From ea2fa5ccc30ce340b825d1974d4f4df43a94a789 Mon Sep 17 00:00:00 2001 From: Xavier Hardy Date: Sun, 22 Jul 2018 16:48:59 +0100 Subject: [PATCH 1/6] Python 2.6, 2.7, 3.4, 3.5, 3.6, 3.7 compatibility --- collectors/0/couchbase.py | 6 +- collectors/0/dfstat.py | 6 +- collectors/0/docker.py | 20 ++++--- collectors/0/elasticsearch.py | 22 +++++--- collectors/0/flume.py | 24 +++++--- collectors/0/g1gc.py | 6 +- collectors/0/graphite_bridge.py | 10 +++- collectors/0/gstat.py | 36 ++++++------ collectors/0/hadoop_datanode.py | 2 +- collectors/0/hadoop_namenode.py | 2 +- collectors/0/haproxy.py | 6 +- collectors/0/hbase_regionserver.py | 2 +- collectors/0/ifrate.py | 26 ++++----- collectors/0/ifstat.py | 2 +- collectors/0/iostat.py | 9 +-- collectors/0/jolokia.py | 10 ++-- collectors/0/mongo.py | 10 ++-- collectors/0/mongo3.py | 14 +++-- collectors/0/mountstats.py | 19 +++++-- collectors/0/mysql.py | 28 ++++++---- collectors/0/netstat.py | 22 ++++---- collectors/0/nfsstat.py | 18 +++--- collectors/0/ntpstat.py | 8 ++- collectors/0/postgresql.py | 8 +-- collectors/0/postgresql_replication.py | 16 +++--- collectors/0/procnettcp.py | 20 ++++--- collectors/0/procstats.py | 60 ++++++++++---------- collectors/0/pxc-collector.py | 4 +- collectors/0/redis_stats.py | 10 ++-- collectors/0/riak.py | 10 +++- collectors/0/smart_stats.py | 26 +++++---- collectors/0/sysload.py | 76 ++++++++++++++------------ collectors/0/tcollector.py | 2 +- collectors/0/tcp_bridge.py | 14 +++-- collectors/0/udp_bridge.py | 4 +- collectors/0/varnishstat.py | 8 +-- collectors/0/zabbix_bridge.py | 10 ++-- collectors/0/zfsiostats.py | 20 ++++--- collectors/0/zfsolkernstats.py | 8 +-- collectors/0/zookeeper.py | 10 ++-- collectors/300/aws_cloudwatch_stats.py | 25 +++++---- collectors/lib/hadoop_http.py | 20 ++++--- collectors/lib/postgresqlutils.py | 2 +- collectors/lib/utils.py | 20 +++++-- eos/collectors/eos.py | 6 +- eos/tcollector_agent.py | 4 +- tcollector.py | 69 ++++++++++++----------- tests.py | 75 +++++++++++++++---------- 48 files changed, 471 insertions(+), 364 deletions(-) diff --git a/collectors/0/couchbase.py b/collectors/0/couchbase.py index 1e5d71ba..dabf3da6 100755 --- a/collectors/0/couchbase.py +++ b/collectors/0/couchbase.py @@ -87,7 +87,7 @@ def find_conf_file(pid): """Returns config file for couchbase-server.""" try: fd = open('/proc/%s/cmdline' % pid) - except IOError, e: + except IOError as e: utils.err("Couchbase (pid %s) went away ? %s" % (pid, e)) return try: @@ -100,7 +100,7 @@ def find_bindir_path(config_file): """Returns the bin directory path""" try: fd = open(config_file) - except IOError, e: + except IOError as e: utils.err("Error for Config file (%s): %s" % (config_file, e)) return None try: @@ -142,7 +142,7 @@ def collect_stats(bin_dir, bucket): metric = stat.split(":")[0].lstrip(" ") value = stat.split(":")[1].lstrip(" \t") if metric in KEYS: - print ("couchbase.%s %i %s bucket=%s" % (metric, ts, value, bucket)) + print("couchbase.%s %i %s bucket=%s" % (metric, ts, value, bucket)) def main(): utils.drop_privileges() diff --git a/collectors/0/dfstat.py b/collectors/0/dfstat.py index 46123520..69d8957b 100755 --- a/collectors/0/dfstat.py +++ b/collectors/0/dfstat.py @@ -51,7 +51,7 @@ def main(): """dfstats main loop""" try: f_mounts = open("/proc/mounts", "r") - except IOError, e: + except IOError as e: utils.err("error: can't open /proc/mounts: %s" % e) return 13 # Ask tcollector to not respawn us @@ -72,7 +72,7 @@ def main(): # fs_passno # Order in which filesystem checks are done at reboot time try: fs_spec, fs_file, fs_vfstype, fs_mntops, fs_freq, fs_passno = line.split(None) - except ValueError, e: + except ValueError as e: utils.err("error: can't parse line at /proc/mounts: %s" % e) continue @@ -105,7 +105,7 @@ def main(): fs_spec, fs_file, fs_vfstype = device try: r = os.statvfs(fs_file) - except OSError, e: + except OSError as e: utils.err("can't get info for mount point: %s: %s" % (fs_file, e)) continue diff --git a/collectors/0/docker.py b/collectors/0/docker.py index d7560317..422b0386 100755 --- a/collectors/0/docker.py +++ b/collectors/0/docker.py @@ -2,6 +2,8 @@ # More informations on https://docs.docker.com/articles/runmetrics/ """Imports Docker stats from /sys/fs/cgroup.""" +from __future__ import print_function + import os import re import socket @@ -60,7 +62,7 @@ def getnameandimage(containerid): try: r = sock.connect_ex(DOCKER_SOCK) if (r != 0): - print >>sys.stderr, "Can not connect to %s" % (DOCKER_SOCK) + print("Can not connect to %s" % (DOCKER_SOCK), file=sys.stderr) else: message = 'GET /containers/' + containerid + '/json HTTP/1.1\r\nHost: http\n\n' sock.sendall(message) @@ -79,16 +81,16 @@ def getnameandimage(containerid): try: containernames[containerid] = data["Name"].lstrip('/') except: - print >>sys.stderr, containerid+" has no Name field" + print(containerid+" has no Name field", file=sys.stderr) try: containerimages[containerid] = data["Config"]["Image"].replace(':', '_') except: - print >>sys.stderr, containerid+" has no Image field" + print(containerid+" has no Image field", file=sys.stderr) except: - print >>sys.stderr, "Can not load json" + print("Can not load json", file=sys.stderr) - except socket.timeout, e: - print >>sys.stderr, "Socket: %s" % (e,) + except socket.timeout as e: + print("Socket: %s" % (e,), file=sys.stderr) def senddata(datatosend, containerid): if datatosend: @@ -97,7 +99,7 @@ def senddata(datatosend, containerid): datatosend += " containername="+containernames[containerid] if (containerid in containerimages): datatosend += " containerimage="+containerimages[containerid] - print "docker.%s" % datatosend + print("docker.%s" % datatosend) sys.stdout.flush() def readdockerstats(path, containerid): @@ -112,8 +114,8 @@ def readdockerstats(path, containerid): and ((file_stat in proc_names.keys()) or (file_stat in proc_names_to_agg.keys()))): try: f_stat = open(path+"/"+file_stat) - except IOError, e: - print >>sys.stderr, "Failed to open input file: %s" % (e,) + except IOError as e: + print("Failed to open input file: %s" % (e,), file=sys.stderr) return 1 ts = int(time.time()) diff --git a/collectors/0/elasticsearch.py b/collectors/0/elasticsearch.py index c5fff9c4..70a758bb 100755 --- a/collectors/0/elasticsearch.py +++ b/collectors/0/elasticsearch.py @@ -16,7 +16,6 @@ # Tested with ES 0.16.5, 0.17.x, 0.90.1 . import errno -import httplib try: import json except ImportError: @@ -30,6 +29,11 @@ from collectors.lib import utils from collectors.etc import elasticsearch_conf +try: + from http.client import HTTPConnection, OK +except ImportError: + from httplib import HTTPConnection, OK + COLLECTION_INTERVAL = 15 # seconds DEFAULT_TIMEOUT = 10.0 # seconds @@ -57,7 +61,7 @@ def request(server, uri, json_in = True): """Does a GET request of the given uri on the given HTTPConnection.""" server.request("GET", uri) resp = server.getresponse() - if resp.status != httplib.OK: + if resp.status != OK: raise ESError(resp) if json_in: return json.loads(resp.read()) @@ -99,10 +103,10 @@ def printmetric(metric, ts, value, tags): # Warning, this should be called inside a lock if tags: tags = " " + " ".join("%s=%s" % (name.replace(" ",""), value.replace(" ","")) - for name, value in tags.iteritems()) + for name, value in tags.items()) else: tags = "" - print ("%s %d %s %s" + print("%s %d %s %s" % (metric, ts, value, tags)) def _traverse(metric, stats, ts, tags): @@ -148,7 +152,7 @@ def _collect_indices(server, metric, tags, lock): # now print value with lock: printmetric(metric + ".cluster.byindex." + headerlist[count], ts, value, newtags) - except ValueError, ve: + except ValueError: # add this as a tag newtags[headerlist[count]] = value count += 1 @@ -200,11 +204,11 @@ def main(argv): return 1 for conf in elasticsearch_conf.get_servers(): - server = httplib.HTTPConnection( *conf ) + server = HTTPConnection( *conf ) try: server.connect() - except socket.error, (erno, e): - if erno == errno.ECONNREFUSED: + except socket.error as exc: + if exc.errno == errno.ECONNREFUSED: continue raise servers.append( server ) @@ -222,7 +226,7 @@ def main(argv): t.start() threads.append(t) for thread in threads: - t.join() + thread.join() time.sleep(COLLECTION_INTERVAL) if __name__ == "__main__": diff --git a/collectors/0/flume.py b/collectors/0/flume.py index 9d023f20..d99206de 100755 --- a/collectors/0/flume.py +++ b/collectors/0/flume.py @@ -26,10 +26,11 @@ Based on the elastichsearch collector -""" +""" + +from __future__ import print_function import errno -import httplib try: import json except ImportError: @@ -45,6 +46,11 @@ except ImportError: flume_conf = None +try: + from http.client import HTTPConnection, OK +except ImportError: + from httplib import HTTPConnection, OK + COLLECTION_INTERVAL = 15 # seconds DEFAULT_TIMEOUT = 10.0 # seconds FLUME_HOST = "localhost" @@ -54,7 +60,7 @@ EXCLUDE = [ 'StartTime', 'StopTime', 'Type' ] def err(msg): - print >>sys.stderr, msg + print(msg, file=sys.stderr) class FlumeError(RuntimeError): """Exception raised if we don't get a 200 OK from Flume webserver.""" @@ -66,7 +72,7 @@ def request(server, uri): """Does a GET request of the given uri on the given HTTPConnection.""" server.request("GET", uri) resp = server.getresponse() - if resp.status != httplib.OK: + if resp.status != OK: raise FlumeError(resp) return json.loads(resp.read()) @@ -94,11 +100,11 @@ def main(argv): utils.drop_privileges() socket.setdefaulttimeout(DEFAULT_TIMEOUT) - server = httplib.HTTPConnection(FLUME_HOST, FLUME_PORT) + server = HTTPConnection(FLUME_HOST, FLUME_PORT) try: server.connect() - except socket.error, (erno, e): - if erno == errno.ECONNREFUSED: + except socket.error as exc: + if exc.errno == errno.ECONNREFUSED: return 13 # No Flume server available, ask tcollector to not respawn us. raise if json is None: @@ -108,10 +114,10 @@ def main(argv): def printmetric(metric, value, **tags): if tags: tags = " " + " ".join("%s=%s" % (name, value) - for name, value in tags.iteritems()) + for name, value in tags.items()) else: tags = "" - print ("flume.%s %d %s %s" % (metric, ts, value, tags)) + print(("flume.%s %d %s %s" % (metric, ts, value, tags))) while True: # Get the metrics diff --git a/collectors/0/g1gc.py b/collectors/0/g1gc.py index f7ec6795..593db037 100755 --- a/collectors/0/g1gc.py +++ b/collectors/0/g1gc.py @@ -161,7 +161,7 @@ def sec2milli(seconds): def flush_collector(collector): for metric_name, value in collector['data'].items(): - print metric_name % (collector['timestamp'], value) + print(metric_name % (collector['timestamp'], value)) collector['timestamp'] = None collector['data'] = {} @@ -372,11 +372,11 @@ def process_gc_log(collector): if not collector['timestamp'] is None: for gen, value in collector['gensize'].items(): - print "%s.gc.g1.gensize %s %s gen=%s" % (prefix, current_timestamp_in_sec, value, gen) + print("%s.gc.g1.gensize %s %s gen=%s" % (prefix, current_timestamp_in_sec, value, gen)) # publish gc event count metrics for event, value in collector['count'].items(): - print "%s.gc.g1.event.count %s %s event=%s" % (prefix, current_timestamp_in_sec, value, event) + print("%s.gc.g1.event.count %s %s event=%s" % (prefix, current_timestamp_in_sec, value, event)) except Exception: exc_type, exc_value, exc_traceback = sys.exc_info() diff --git a/collectors/0/graphite_bridge.py b/collectors/0/graphite_bridge.py index 9b74cee7..17cfde35 100755 --- a/collectors/0/graphite_bridge.py +++ b/collectors/0/graphite_bridge.py @@ -17,9 +17,13 @@ import sys from collectors.lib import utils -import SocketServer import threading +try: + from socketserver import ThreadingTCPServer, BaseRequestHandler +except ImportError: + from SocketServer import ThreadingTCPServer, BaseRequestHandler + try: from collectors.etc import graphite_bridge_conf except ImportError: @@ -29,12 +33,12 @@ PORT = 2003 SIZE = 8192 -class GraphiteServer(SocketServer.ThreadingTCPServer): +class GraphiteServer(ThreadingTCPServer): allow_reuse_address = True print_lock = threading.Lock() -class GraphiteHandler(SocketServer.BaseRequestHandler): +class GraphiteHandler(BaseRequestHandler): def handle_line(self, line): line_parts = line.split() diff --git a/collectors/0/gstat.py b/collectors/0/gstat.py index dc881275..0904ec22 100755 --- a/collectors/0/gstat.py +++ b/collectors/0/gstat.py @@ -85,7 +85,7 @@ def main(): ["gstat", "-B", "-d", "-o", "-s", "-I"+str(collection_interval)+"s", "-f"+str(collection_filter)], stdout=subprocess.PIPE, ) - except OSError, e: + except OSError as e: if e.errno == errno.ENOENT: # it makes no sense to run this collector here sys.exit(13) # we signal tcollector to not run us @@ -96,7 +96,7 @@ def main(): while signal_received is None: try: line = p_gstat.stdout.readline() - except (IOError, OSError), e: + except (IOError, OSError) as e: if e.errno in (errno.EINTR, errno.EAGAIN): break raise @@ -111,22 +111,22 @@ def main(): fields = line.split() - print "disk.queue %s %s disk=%s" % (timestamp, fields[0], fields[17]) - print "disk.ops.read %s %s disk=%s" % (timestamp, fields[2], fields[17]) - print "disk.b.read %s %d disk=%s" % (timestamp, float(fields[3])*1024, fields[17]) - print "disk.bps.read %s %d disk=%s" % (timestamp, float(fields[4])*1024, fields[17]) - print "disk.ms.read %s %s disk=%s" % (timestamp, float(fields[5]), fields[17]) - print "disk.ops.write %s %s disk=%s" % (timestamp, fields[6], fields[17]) - print "disk.b.write %s %d disk=%s" % (timestamp, float(fields[7])*1024, fields[17]) - print "disk.bps.write %s %d disk=%s" % (timestamp, float(fields[8])*1024, fields[17]) - print "disk.ms.write %s %s disk=%s" % (timestamp, float(fields[9]), fields[17]) - print "disk.ops.delete %s %s disk=%s" % (timestamp, fields[10], fields[17]) - print "disk.b.delete %s %d disk=%s" % (timestamp, float(fields[11])*1024, fields[17]) - print "disk.bps.delete %s %d disk=%s" % (timestamp, float(fields[12])*1024, fields[17]) - print "disk.ms.delete %s %s disk=%s" % (timestamp, float(fields[13]), fields[17]) - print "disk.ops.other %s %s disk=%s" % (timestamp, fields[14], fields[17]) - print "disk.ms.other %s %s disk=%s" % (timestamp, float(fields[15]), fields[17]) - print "disk.busy %s %s disk=%s" % (timestamp, fields[16], fields[17]) + print("disk.queue %s %s disk=%s" % (timestamp, fields[0], fields[17])) + print("disk.ops.read %s %s disk=%s" % (timestamp, fields[2], fields[17])) + print("disk.b.read %s %d disk=%s" % (timestamp, float(fields[3])*1024, fields[17])) + print("disk.bps.read %s %d disk=%s" % (timestamp, float(fields[4])*1024, fields[17])) + print("disk.ms.read %s %s disk=%s" % (timestamp, float(fields[5]), fields[17])) + print("disk.ops.write %s %s disk=%s" % (timestamp, fields[6], fields[17])) + print("disk.b.write %s %d disk=%s" % (timestamp, float(fields[7])*1024, fields[17])) + print("disk.bps.write %s %d disk=%s" % (timestamp, float(fields[8])*1024, fields[17])) + print("disk.ms.write %s %s disk=%s" % (timestamp, float(fields[9]), fields[17])) + print("disk.ops.delete %s %s disk=%s" % (timestamp, fields[10], fields[17])) + print("disk.b.delete %s %d disk=%s" % (timestamp, float(fields[11])*1024, fields[17])) + print("disk.bps.delete %s %d disk=%s" % (timestamp, float(fields[12])*1024, fields[17])) + print("disk.ms.delete %s %s disk=%s" % (timestamp, float(fields[13]), fields[17])) + print("disk.ops.other %s %s disk=%s" % (timestamp, fields[14], fields[17])) + print("disk.ms.other %s %s disk=%s" % (timestamp, float(fields[15]), fields[17])) + print("disk.busy %s %s disk=%s" % (timestamp, fields[16], fields[17])) sys.stdout.flush() diff --git a/collectors/0/hadoop_datanode.py b/collectors/0/hadoop_datanode.py index e50c122c..45ac7318 100755 --- a/collectors/0/hadoop_datanode.py +++ b/collectors/0/hadoop_datanode.py @@ -47,7 +47,7 @@ def emit(self): current_time = int(time.time()) metrics = self.poll() for context, metric_name, value in metrics: - for k, v in REPLACEMENTS.iteritems(): + for k, v in REPLACEMENTS.items(): if any(c.startswith(k) for c in context): context = v self.emit_metric(context, current_time, metric_name, value) diff --git a/collectors/0/hadoop_namenode.py b/collectors/0/hadoop_namenode.py index 5168e3fd..67ec9fac 100755 --- a/collectors/0/hadoop_namenode.py +++ b/collectors/0/hadoop_namenode.py @@ -45,7 +45,7 @@ def emit(self): current_time = int(time.time()) metrics = self.poll() for context, metric_name, value in metrics: - for k, v in REPLACEMENTS.iteritems(): + for k, v in REPLACEMENTS.items(): if any(c.startswith(k) for c in context): context = v self.emit_metric(context, current_time, metric_name, value) diff --git a/collectors/0/haproxy.py b/collectors/0/haproxy.py index 84e80fb9..0fd82331 100755 --- a/collectors/0/haproxy.py +++ b/collectors/0/haproxy.py @@ -102,7 +102,7 @@ def find_conf_file(pid): """Returns the conf file of haproxy.""" try: output = subprocess.check_output(["ps", "--no-headers", "-o", "cmd", pid]) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError as e: utils.err("HAProxy (pid %s) went away? %s" % (pid, e)) return None return output.split("-f")[1].split()[0] @@ -111,7 +111,7 @@ def find_sock_file(conf_file): """Returns the unix socket file of haproxy.""" try: fd = open(conf_file) - except IOError, e: + except IOError as e: utils.err("Error: %s. Config file path is relative: %s" % (e, conf_file)) return None try: @@ -182,7 +182,7 @@ def print_metric(line, metric, timestamp): value = line[metric] if not value: value = 0 - print ("haproxy.%s %i %s source=%s cluster=%s" + print("haproxy.%s %i %s source=%s cluster=%s" % (METRIC_NAMES[metric], timestamp, value, diff --git a/collectors/0/hbase_regionserver.py b/collectors/0/hbase_regionserver.py index 8cf3ef5a..4b99aa87 100755 --- a/collectors/0/hbase_regionserver.py +++ b/collectors/0/hbase_regionserver.py @@ -44,7 +44,7 @@ def emit_region_metric(self, context, current_time, full_metric_name, value): metric_name = match.group(4) tag_dict = {"namespace": namespace, "table": table, "region": region} - if any( not v for k,v in tag_dict.iteritems()): + if any( not v for k,v in tag_dict.items()): utils.err("Error splitting %s" % full_metric_name) else: self.emit_metric(context, current_time, metric_name, value, tag_dict) diff --git a/collectors/0/ifrate.py b/collectors/0/ifrate.py index f41d6bc4..2f6d7f82 100755 --- a/collectors/0/ifrate.py +++ b/collectors/0/ifrate.py @@ -83,7 +83,7 @@ def main(): intnum+=1 else: sys.exit(13) # we signal tcollector to not run us - except OSError, e: + except OSError as e: if e.errno == errno.ENOENT: # it makes no sense to run this collector here sys.exit(13) # we signal tcollector to not run us @@ -97,7 +97,7 @@ def main(): procnum=0 try: line = p_net[procnum].stdout.readline() - except (IOError, OSError), e: + except (IOError, OSError) as e: if e.errno in (errno.EINTR, errno.EAGAIN): break raise @@ -111,20 +111,20 @@ def main(): if len(fields) == 9: if(procnum == 0): timestamp = int(time.time()) - print ("ifrate.byt.in %s %s int=%s" % (timestamp, int(fields[3])/collection_interval, interfaces[procnum])) - print ("ifrate.byt.out %s %s int=%s" % (timestamp, int(fields[6])/collection_interval, interfaces[procnum])) + print("ifrate.byt.in %s %s int=%s" % (timestamp, int(fields[3])/collection_interval, interfaces[procnum])) + print("ifrate.byt.out %s %s int=%s" % (timestamp, int(fields[6])/collection_interval, interfaces[procnum])) if(report_packets): - print ("ifrate.pkt.in %s %s int=%s" % (timestamp, int(fields[0])/collection_interval, interfaces[procnum])) - print ("ifrate.pkt.out %s %s int=%s" % (timestamp, int(fields[4])/collection_interval, interfaces[procnum])) + print("ifrate.pkt.in %s %s int=%s" % (timestamp, int(fields[0])/collection_interval, interfaces[procnum])) + print("ifrate.pkt.out %s %s int=%s" % (timestamp, int(fields[4])/collection_interval, interfaces[procnum])) if(merge_err_in_out): - print ("ifrate.err %s %s int=%s" % (timestamp, (int(fields[1])+int(fields[5]))/collection_interval, interfaces[procnum])) - print ("ifrate.drp %s %s int=%s" % (timestamp, (int(fields[2])+int(fields[8]))/collection_interval, interfaces[procnum])) + print("ifrate.err %s %s int=%s" % (timestamp, (int(fields[1])+int(fields[5]))/collection_interval, interfaces[procnum])) + print("ifrate.drp %s %s int=%s" % (timestamp, (int(fields[2])+int(fields[8]))/collection_interval, interfaces[procnum])) else: - print ("ifrate.err.in %s %s int=%s" % (timestamp, int(fields[1])/collection_interval, interfaces[procnum])) - print ("ifrate.drp.in %s %s int=%s" % (timestamp, int(fields[2])/collection_interval, interfaces[procnum])) - print ("ifrate.err.out %s %s int=%s" % (timestamp, int(fields[5])/collection_interval, interfaces[procnum])) - print ("ifrate.drp.out %s %s int=%s" % (timestamp, int(fields[8])/collection_interval, interfaces[procnum])) - print ("ifrate.col %s %s int=%s" % (timestamp, int(fields[7])/collection_interval, interfaces[procnum])) + print("ifrate.err.in %s %s int=%s" % (timestamp, int(fields[1])/collection_interval, interfaces[procnum])) + print("ifrate.drp.in %s %s int=%s" % (timestamp, int(fields[2])/collection_interval, interfaces[procnum])) + print("ifrate.err.out %s %s int=%s" % (timestamp, int(fields[5])/collection_interval, interfaces[procnum])) + print("ifrate.drp.out %s %s int=%s" % (timestamp, int(fields[8])/collection_interval, interfaces[procnum])) + print("ifrate.col %s %s int=%s" % (timestamp, int(fields[7])/collection_interval, interfaces[procnum])) # analyze next process procnum+=1 diff --git a/collectors/0/ifstat.py b/collectors/0/ifstat.py index c45d385d..8e0aaf93 100755 --- a/collectors/0/ifstat.py +++ b/collectors/0/ifstat.py @@ -84,7 +84,7 @@ def direction(i): if i >= 8: return "out" return "in" - for i in xrange(16): + for i in range(16): print("proc.net.%s %d %s iface=%s direction=%s" % (FIELDS[i], ts, stats[i], intf, direction(i))) diff --git a/collectors/0/iostat.py b/collectors/0/iostat.py index 7fbb4281..9c8245f1 100755 --- a/collectors/0/iostat.py +++ b/collectors/0/iostat.py @@ -64,6 +64,7 @@ # when you have to handle mapping of /dev/mapper to dm-N, pulling out # swap partitions from /proc/swaps, etc. +from __future__ import print_function import sys import time @@ -197,7 +198,7 @@ def main(): tput = ((nr_ios - prev_nr_ios) * float(HZ) / float(itv)) util = ((float(stats.get("msec_total")) - float(prev_stats[device].get("msec_total"))) * float(HZ) / float(itv)) svctm = 0.0 - await = 0.0 + await_ = 0.0 r_await = 0.0 w_await = 0.0 @@ -213,7 +214,7 @@ def main(): if wr_ios != prev_wr_ios: w_await = (float(wr_ticks) - float(prev_wr_ticks) ) / float(wr_ios - prev_wr_ios) if nr_ios != prev_nr_ios: - await = (float(rd_ticks) + float(wr_ticks) - float(prev_rd_ticks) - float(prev_wr_ticks)) / float(nr_ios - prev_nr_ios) + await_ = (float(rd_ticks) + float(wr_ticks) - float(prev_rd_ticks) - float(prev_wr_ticks)) / float(nr_ios - prev_nr_ios) print("%s%s %d %.2f dev=%s" % (metric, "svctm", ts, svctm, device)) print("%s%s %d %.2f dev=%s" @@ -221,7 +222,7 @@ def main(): print("%s%s %d %.2f dev=%s" % (metric, "w_await", ts, w_await, device)) print("%s%s %d %.2f dev=%s" - % (metric, "await", ts, await, device)) + % (metric, "await", ts, await_, device)) print("%s%s %d %.2f dev=%s" % (metric, "util", ts, float(util/1000.0), device)) @@ -233,7 +234,7 @@ def main(): print("%s%s %d %s dev=%s" % (metric, FIELDS_PART[i], ts, values[i+3], device)) else: - print >> sys.stderr, "Cannot parse /proc/diskstats line: ", line + print("Cannot parse /proc/diskstats line: ", line, file=sys.stderr) continue sys.stdout.flush() diff --git a/collectors/0/jolokia.py b/collectors/0/jolokia.py index 3eb536f8..4077db5f 100755 --- a/collectors/0/jolokia.py +++ b/collectors/0/jolokia.py @@ -98,19 +98,19 @@ def print_metrics(self, d, metric_prefix, timestamp, tags, not_tags=[]): """ Take a dict of attributes and print out numerical metric strings Recurse if necessary """ - for k, v in d.iteritems(): + for k, v in d.items(): # Tack on the name of the attribute attribute, more_tags = self.parse_attribute(k.lower(), not_tags) metric_name = '.'.join([metric_prefix, attribute]) my_tags = tags + more_tags # If numerical if utils.is_numeric(v): - print "%s %d %s %s" % (metric_name, timestamp, str(v), - ' '.join(my_tags)) + print("%s %d %s %s" % (metric_name, timestamp, str(v), + ' '.join(my_tags))) # If a bool, True=1, False=0 elif type(v) is bool: - print "%s %d %s %s" % (metric_name, timestamp, str(int(v)), - ' '.join(my_tags)) + print("%s %d %s %s" % (metric_name, timestamp, str(int(v)), + ' '.join(my_tags))) # Or a dict of more attributes, call ourselves again elif type(v) is dict: self.print_metrics(v, metric_name, timestamp, my_tags, not_tags) diff --git a/collectors/0/mongo.py b/collectors/0/mongo.py index b4e652eb..0f640ff5 100755 --- a/collectors/0/mongo.py +++ b/collectors/0/mongo.py @@ -13,6 +13,8 @@ # of the GNU Lesser General Public License along with this program. If not, # see . +from __future__ import print_function + import sys import time try: @@ -63,7 +65,7 @@ def main(): utils.drop_privileges() if pymongo is None: - print >>sys.stderr, "error: Python module `pymongo' is missing" + print("error: Python module `pymongo' is missing", file=sys.stderr) return 13 c = pymongo.Connection(host=HOST, port=PORT) @@ -74,8 +76,8 @@ def main(): for base_metric, tags in TAG_METRICS: for tag in tags: - print 'mongo.%s %d %s type=%s' % (base_metric, ts, - res[base_metric][tag], tag) + print('mongo.%s %d %s type=%s' % (base_metric, ts, + res[base_metric][tag], tag)) for metric in METRICS: cur = res try: @@ -83,7 +85,7 @@ def main(): cur = cur[m] except KeyError: continue - print 'mongo.%s %d %s' % (metric, ts, cur) + print('mongo.%s %d %s' % (metric, ts, cur)) sys.stdout.flush() time.sleep(INTERVAL) diff --git a/collectors/0/mongo3.py b/collectors/0/mongo3.py index 41f490d4..2fb56564 100755 --- a/collectors/0/mongo3.py +++ b/collectors/0/mongo3.py @@ -14,6 +14,8 @@ # of the GNU Lesser General Public License along with this program. If not, # see . +from __future__ import print_function + import sys import time import os @@ -191,7 +193,7 @@ def runServerStatus(c): cur = cur[m] except KeyError: continue - print 'mongo.%s %d %s' % (metric, ts, cur) + print('mongo.%s %d %s' % (metric, ts, cur)) for metric in CONFIG_LOCKS_METRICS: cur = res @@ -201,7 +203,7 @@ def runServerStatus(c): except KeyError: continue for k, v in cur.items(): - print 'mongo.%s %d %s mode=%s' % (metric, ts, v, k) + print('mongo.%s %d %s mode=%s' % (metric, ts, v, k)) def runDbStats(c): for db_name in DB_NAMES: @@ -215,7 +217,7 @@ def runDbStats(c): cur = cur[m] except KeyError: continue - print 'mongo.db.%s %d %s db=%s' % (metric, ts, cur, db_name) + print('mongo.db.%s %d %s db=%s' % (metric, ts, cur, db_name)) raw_metrics = res['raw'] for key, value in raw_metrics.items(): @@ -229,7 +231,7 @@ def runDbStats(c): cur = cur[m] except KeyError: continue - print 'mongo.rs.%s %d %s replica=%s db=%s' % (metric, ts, cur, replica_name, db_name) + print('mongo.rs.%s %d %s replica=%s db=%s' % (metric, ts, cur, replica_name, db_name)) def runReplSetGetStatus(c): res = c.admin.command('replSetGetStatus') @@ -254,7 +256,7 @@ def runReplSetGetStatus(c): cur = cur[m] except KeyError: continue - print 'mongo.replica.%s %d %s replica_set=%s replica=%s replica_state=%s replica_health=%s' % (metric, ts, cur, replica_set_name, replica_name, replica_state, replica_health) + print('mongo.replica.%s %d %s replica_set=%s replica=%s replica_state=%s replica_health=%s' % (metric, ts, cur, replica_set_name, replica_name, replica_state, replica_health)) def loadEnv(): global USER, PASS, INTERVAL, DB_NAMES, CONFIG_CONN, MONGOS_CONN, REPLICA_CONN @@ -284,7 +286,7 @@ def main(): utils.drop_privileges() if pymongo is None: - print >>sys.stderr, "error: Python module `pymongo' is missing" + print("error: Python module `pymongo' is missing", file=sys.stderr) return 13 for index, item in enumerate(CONFIG_CONN, start=0): diff --git a/collectors/0/mountstats.py b/collectors/0/mountstats.py index 93f6fbdb..66cdaa36 100755 --- a/collectors/0/mountstats.py +++ b/collectors/0/mountstats.py @@ -83,7 +83,18 @@ import socket import sys import time -import md5 + +PY3 = sys.version_info[0] > 2 +if PY3: + from hashlib import md5 + + def md5_digest(line): + return md5(line.encode("utf8")).digest() +else: + import md5 + + def md5_digest(line): + return md5.new(line).digest() COLLECTION_INTERVAL = 10 # seconds @@ -140,7 +151,7 @@ def main(): # ( If multiple subdirectories of the same volume are mounted to different places they # will show up in mountstats, but will have duplicate data. ) if field == "events": - m = md5.new(line).digest() + m = md5_digest(line) rpc_metrics[device]['digest'] = m if m in rpc_metrics: # metrics already counted, mark as dupe ignore @@ -176,9 +187,9 @@ def main(): nfsvol = rpc_metrics[device]['mounts'][0] for metric in KEY_METRICS+['other']: for field in rpc_metrics[device][metric]: - print "proc.mountstats.%s.%s %d %s nfshost=%s nfsvol=%s" % (metric.lower(), field.lower(), ts, rpc_metrics[device][metric][field], nfshost, nfsvol) + print("proc.mountstats.%s.%s %d %s nfshost=%s nfsvol=%s" % (metric.lower(), field.lower(), ts, rpc_metrics[device][metric][field], nfshost, nfsvol)) for field in BYTES_FIELDS: - print "proc.mountstats.bytes.%s %d %s nfshost=%s nfsvol=%s" % (field.lower(), ts, rpc_metrics[device]['bytes'][field], nfshost, nfsvol) + print("proc.mountstats.bytes.%s %d %s nfshost=%s nfsvol=%s" % (field.lower(), ts, rpc_metrics[device]['bytes'][field], nfshost, nfsvol)) sys.stdout.flush() time.sleep(COLLECTION_INTERVAL) diff --git a/collectors/0/mysql.py b/collectors/0/mysql.py index 8397da79..c094f10d 100755 --- a/collectors/0/mysql.py +++ b/collectors/0/mysql.py @@ -20,6 +20,12 @@ import sys import time +PY3 = sys.version_info[0] > 2 +if PY3: + INTEGER_TYPES = (int, ) +else: + INTEGER_TYPES = (int, long) + try: import MySQLdb except ImportError: @@ -69,7 +75,7 @@ def __init__(self, sockfile, dbname, db, cursor, version): try: self.major = int(version[0]) self.medium = int(version[1]) - except (ValueError, IndexError), e: + except (ValueError, IndexError) as e: self.major = self.medium = 0 def __str__(self): @@ -90,8 +96,8 @@ def query(self, sql): assert self.cursor, "%s already closed?" % (self,) try: self.cursor.execute(sql) - except MySQLdb.OperationalError, (errcode, msg): - if errcode != 2006: # "MySQL server has gone away" + except MySQLdb.OperationalError as exc: + if exc.errno != 2006: # "MySQL server has gone away" raise self._reconnect() return self.cursor.fetchall() @@ -189,7 +195,7 @@ def find_databases(dbs=None): cursor = db.cursor() cursor.execute("SELECT VERSION()") except (EnvironmentError, EOFError, RuntimeError, socket.error, - MySQLdb.MySQLError), e: + MySQLdb.MySQLError) as e: utils.err("Couldn't connect to %s: %s" % (sockfile, e)) continue version = cursor.fetchone()[0] @@ -211,7 +217,7 @@ def collectInnodbStatus(db): """Collects and prints InnoDB stats about the given DB instance.""" ts = now() def printmetric(metric, value, tags=""): - print "mysql.%s %d %s schema=%s%s" % (metric, ts, value, db.dbname, tags) + print("mysql.%s %d %s schema=%s%s" % (metric, ts, value, db.dbname, tags)) innodb_status = db.query("SHOW ENGINE INNODB STATUS")[0][2] m = re.search("^(\d{6}\s+\d{1,2}:\d\d:\d\d) INNODB MONITOR OUTPUT$", @@ -295,7 +301,7 @@ def collect(db): ts = now() def printmetric(metric, value, tags=""): - print "mysql.%s %d %s schema=%s%s" % (metric, ts, value, db.dbname, tags) + print("mysql.%s %d %s schema=%s%s" % (metric, ts, value, db.dbname, tags)) has_innodb = False if db.isShowGlobalStatusSafe(): @@ -328,7 +334,7 @@ def printmetric(metric, value, tags=""): mutex += "." + kind wait_count = int(status.split("=", 1)[1]) waits[mutex] = waits.get(mutex, 0) + wait_count - for mutex, wait_count in waits.iteritems(): + for mutex, wait_count in waits.items(): printmetric("innodb.locks", wait_count, " mutex=" + mutex) ts = now() @@ -342,7 +348,7 @@ def printmetric(metric, value, tags=""): if master_host and master_host != "None": sbm = slave_status.get("seconds_behind_master") - if isinstance(sbm, (int, long)): + if isinstance(sbm, INTEGER_TYPES): printmetric("slave.seconds_behind_master", sbm) printmetric("slave.bytes_executed", slave_status["exec_master_log_pos"]) printmetric("slave.bytes_relayed", slave_status["read_master_log_pos"]) @@ -355,7 +361,7 @@ def printmetric(metric, value, tags=""): for row in db.query("SHOW PROCESSLIST"): id, user, host, db_, cmd, time, state = row[:7] states[cmd] = states.get(cmd, 0) + 1 - for state, count in states.iteritems(): + for state, count in states.items(): state = state.lower().replace(" ", "_") printmetric("connection_states", count, " state=%s" % state) @@ -377,11 +383,11 @@ def main(args): last_db_refresh = ts errs = [] - for dbname, db in dbs.iteritems(): + for dbname, db in dbs.items(): try: collect(db) except (EnvironmentError, EOFError, RuntimeError, socket.error, - MySQLdb.MySQLError), e: + MySQLdb.MySQLError) as e: if isinstance(e, IOError) and e[0] == errno.EPIPE: # Exit on a broken pipe. There's no point in continuing # because no one will read our stdout anyway. diff --git a/collectors/0/netstat.py b/collectors/0/netstat.py index 2c00dbbe..d55400bf 100755 --- a/collectors/0/netstat.py +++ b/collectors/0/netstat.py @@ -57,6 +57,8 @@ - net.stat.tcp.syncookies: SYN cookies (both sent & received). """ +from __future__ import print_function + import re import resource import sys @@ -76,8 +78,8 @@ def main(): sockstat = open("/proc/net/sockstat") netstat = open("/proc/net/netstat") snmp = open("/proc/net/snmp") - except IOError, e: - print >>sys.stderr, "open failed: %s" % e + except IOError as e: + print("open failed: %s" % e, file=sys.stderr) return 13 # Ask tcollector to not re-start us. utils.drop_privileges() @@ -101,7 +103,7 @@ def main(): def print_sockstat(metric, value, tags=""): # Note: tags must start with ' ' if value is not None: - print "net.sockstat.%s %d %s%s" % (metric, ts, value, tags) + print("net.sockstat.%s %d %s%s" % (metric, ts, value, tags)) # If a line in /proc/net/{netstat,snmp} doesn't start with a word in that @@ -255,8 +257,8 @@ def print_netstat(statstype, metric, value, tags=""): space = " " else: tags = space = "" - print "net.stat.%s.%s %d %s%s%s" % (statstype, metric, ts, value, - space, tags) + print("net.stat.%s.%s %d %s%s%s" % (statstype, metric, ts, value, + space, tags)) def parse_stats(stats, filename): statsdikt = {} @@ -278,20 +280,20 @@ def parse_stats(stats, filename): assert header[0] == data[0], repr((header, data)) assert len(header) == len(data), repr((header, data)) if header[0] not in known_statstypes: - print >>sys.stderr, ("Unrecoginized line in %s:" - " %r (file=%r)" % (filename, header, stats)) + print("Unrecoginized line in %s:" + " %r (file=%r)" % (filename, header, stats), file=sys.stderr) continue statstype = header.pop(0) data.pop(0) stats = dict(zip(header, data)) statsdikt.setdefault(known_statstypes[statstype], {}).update(stats) - for statstype, stats in statsdikt.iteritems(): + for statstype, stats in statsdikt.items(): # Undo the kernel's double counting if "ListenDrops" in stats: stats["ListenDrops"] = int(stats["ListenDrops"]) - int(stats.get("ListenOverflows", 0)) elif "RcvbufErrors" in stats: stats["InErrors"] = int(stats.get("InErrors", 0)) - int(stats["RcvbufErrors"]) - for stat, (metric, tags) in known_stats[statstype].iteritems(): + for stat, (metric, tags) in known_stats[statstype].items(): value = stats.get(stat) if value is not None: print_netstat(statstype, metric, value, tags) @@ -306,7 +308,7 @@ def parse_stats(stats, filename): snmpstats = snmp.read() m = re.match(regexp, data) if not m: - print >>sys.stderr, "Cannot parse sockstat: %r" % data + print("Cannot parse sockstat: %r" % data, file=sys.stderr) return 13 # The difference between the first two values is the number of diff --git a/collectors/0/nfsstat.py b/collectors/0/nfsstat.py index 1ac12465..94ef1ec6 100755 --- a/collectors/0/nfsstat.py +++ b/collectors/0/nfsstat.py @@ -14,6 +14,8 @@ # """Imports NFS stats from /proc.""" +from __future__ import print_function + import sys import time @@ -49,8 +51,8 @@ def main(): try: f_nfs = open("/proc/net/rpc/nfs") - except IOError, e: - print >>sys.stderr, "Failed to open input file: %s" % (e,) + except IOError as e: + print("Failed to open input file: %s" % (e,), file=sys.stderr) return 13 # Ask tcollector to not re-start us immediately. utils.drop_privileges() @@ -67,19 +69,19 @@ def main(): % (int(fields[1]), len(fields[2:]))) for idx, val in enumerate(fields[2:]): try: - print ("nfs.client.rpc %d %s op=%s version=%s" + print("nfs.client.rpc %d %s op=%s version=%s" % (ts, int(val), nfs_client_proc_names[fields[0]][idx], fields[0][4:])) except IndexError: - print >> sys.stderr, ("Warning: name lookup failed" - " at position %d" % idx) + print("Warning: name lookup failed" + " at position %d" % idx, file=sys.stderr) elif fields[0] == "rpc": # RPC calls = int(fields[1]) retrans = int(fields[2]) authrefrsh = int(fields[3]) - print "nfs.client.rpc.stats %d %d type=calls" % (ts, calls) - print "nfs.client.rpc.stats %d %d type=retrans" % (ts, retrans) - print ("nfs.client.rpc.stats %d %d type=authrefrsh" + print("nfs.client.rpc.stats %d %d type=calls" % (ts, calls)) + print("nfs.client.rpc.stats %d %d type=retrans" % (ts, retrans)) + print("nfs.client.rpc.stats %d %d type=authrefrsh" % (ts, authrefrsh)) sys.stdout.flush() diff --git a/collectors/0/ntpstat.py b/collectors/0/ntpstat.py index 88a7b11a..d07ed5e0 100755 --- a/collectors/0/ntpstat.py +++ b/collectors/0/ntpstat.py @@ -17,6 +17,8 @@ # # ntp.offset estimated offset +from __future__ import print_function + import os import socket import subprocess @@ -47,7 +49,7 @@ def main(): ts = int(time.time()) try: ntp_proc = subprocess.Popen(["ntpq", "-p"], stdout=subprocess.PIPE) - except OSError, e: + except OSError as e: if e.errno == errno.ENOENT: # looks like ntpdc is not available, stop using this collector sys.exit(13) # we signal tcollector to stop using this @@ -64,9 +66,9 @@ def main(): if fields[0].startswith("*"): offset=fields[8] continue - print ("ntp.offset %d %s" % (ts, offset)) + print("ntp.offset %d %s" % (ts, offset)) else: - print >> sys.stderr, "ntpq -p, returned %r" % (ntp_proc.returncode) + print("ntpq -p, returned %r" % (ntp_proc.returncode), file=sys.stderr) sys.stdout.flush() time.sleep(collection_interval) diff --git a/collectors/0/postgresql.py b/collectors/0/postgresql.py index 1a70311a..ab003acc 100755 --- a/collectors/0/postgresql.py +++ b/collectors/0/postgresql.py @@ -66,7 +66,7 @@ def collect(db): try: if metric in ("stats_reset"): continue - print ("postgresql.%s %i %s database=%s" + print("postgresql.%s %i %s database=%s" % (metric, ts, value, database)) except: utils.err("got here") @@ -79,10 +79,10 @@ def collect(db): connections = cursor.fetchall() for database, connection in connections: - print ("postgresql.connections %i %s database=%s" + print("postgresql.connections %i %s database=%s" % (ts, connection, database)) - except (EnvironmentError, EOFError, RuntimeError, socket.error), e: + except (EnvironmentError, EOFError, RuntimeError, socket.error) as e: if isinstance(e, IOError) and e[0] == errno.EPIPE: # exit on a broken pipe. There is no point in continuing # because no one will read our stdout anyway. @@ -94,7 +94,7 @@ def main(args): try: db = postgresqlutils.connect() - except (Exception), e: + except (Exception) as e: utils.err("error: Could not initialize collector : %s" % (e)) return 13 # Ask tcollector to not respawn us diff --git a/collectors/0/postgresql_replication.py b/collectors/0/postgresql_replication.py index 747b38d0..d5e99221 100755 --- a/collectors/0/postgresql_replication.py +++ b/collectors/0/postgresql_replication.py @@ -52,14 +52,14 @@ def collect(db): stats = cursor.fetchall() if (stats[0][0] is not None): - print ("postgresql.replication.upstream.lag.time %i %s" + print("postgresql.replication.upstream.lag.time %i %s" % (ts, stats[0][0])) if (stats[0][1] is not None): - print ("postgresql.replication.upstream.lag.bytes %i %s" + print("postgresql.replication.upstream.lag.bytes %i %s" % (ts, stats[0][1])) - print ("postgresql.replication.recovering %i %i" + print("postgresql.replication.recovering %i %i" % (ts, stats[0][2])) # WAL receiver process running (could be slave only or master / slave combo) @@ -74,7 +74,7 @@ def collect(db): wal_receiver_running = 1; break - print ("postgresql.replication.walreceiver.running %i %s" + print("postgresql.replication.walreceiver.running %i %s" % (ts, wal_receiver_running)) # WAL sender process info (could be master only or master / slave combo) @@ -84,14 +84,14 @@ def collect(db): ts = time.time() stats = cursor.fetchall() - print ("postgresql.replication.downstream.count %i %i" + print("postgresql.replication.downstream.count %i %i" % (ts, len(stats))) for stat in stats: - print ("postgresql.replication.downstream.lag.bytes %i %i client_ip=%s client_port=%s" + print("postgresql.replication.downstream.lag.bytes %i %i client_ip=%s client_port=%s" % (ts, stat[2], stat[0], stat[1])) - except (EnvironmentError, EOFError, RuntimeError, socket.error), e: + except (EnvironmentError, EOFError, RuntimeError, socket.error) as e: if isinstance(e, IOError) and e[0] == errno.EPIPE: # exit on a broken pipe. There is no point in continuing # because no one will read our stdout anyway. @@ -103,7 +103,7 @@ def main(args): try: db = postgresqlutils.connect() - except (Exception), e: + except (Exception) as e: utils.err("error: Could not initialize collector : %s" % (e)) return 13 # Ask tcollector to not respawn us diff --git a/collectors/0/procnettcp.py b/collectors/0/procnettcp.py index 1e0f381a..6b7dac9b 100755 --- a/collectors/0/procnettcp.py +++ b/collectors/0/procnettcp.py @@ -48,6 +48,8 @@ # opened/handled the connection. For connections in time_wait, for # example, they will always show user=root. +from __future__ import print_function + import os import pwd import sys @@ -103,7 +105,7 @@ 60020: "hregionserver", } -SERVICES = tuple(set(PORTS.itervalues())) +SERVICES = tuple(set(PORTS.values())) TCPSTATES = { "01": "established", @@ -145,8 +147,8 @@ def main(unused_args): """procnettcp main loop""" try: # On some Linux kernel versions, with lots of connections os.nice(19) # this collector can be very CPU intensive. So be nicer. - except OSError, e: - print >>sys.stderr, "warning: failed to self-renice:", e + except OSError as e: + print("warning: failed to self-renice:", e, file=sys.stderr) interval = 60 @@ -165,13 +167,13 @@ def main(unused_args): # address size try: tcp6 = open("/proc/net/tcp6") - except IOError, (errno, msg): - if errno == 2: # No such file => IPv6 is disabled. + except IOError as exc: + if exc.errno == 2: # No such file => IPv6 is disabled. tcp6 = None else: raise - except IOError, e: - print >>sys.stderr, "Failed to open input file: %s" % (e,) + except IOError as e: + print("Failed to open input file: %s" % (e,), file=sys.stderr) return 13 # Ask tcollector to not re-start us immediately. utils.drop_privileges() @@ -224,9 +226,9 @@ def main(unused_args): key = ("state=%s endpoint=%s service=%s user=%s" % (TCPSTATES[state], endpoint, service, user)) if key in counter: - print "proc.net.tcp", ts, counter[key], key + print("proc.net.tcp", ts, counter[key], key) else: - print "proc.net.tcp", ts, "0", key + print("proc.net.tcp", ts, "0", key) sys.stdout.flush() time.sleep(interval) diff --git a/collectors/0/procstats.py b/collectors/0/procstats.py index 813cd121..3f602698 100755 --- a/collectors/0/procstats.py +++ b/collectors/0/procstats.py @@ -30,8 +30,8 @@ def find_sysfs_numa_stats(): """Returns a possibly empty list of NUMA stat file names.""" try: nodes = os.listdir(NUMADIR) - except OSError, (errno, msg): - if errno == 2: # No such file or directory + except OSError as exc: + if exc.errno == 2: # No such file or directory return [] # We don't have NUMA stats. raise @@ -40,8 +40,8 @@ def find_sysfs_numa_stats(): for node in nodes: try: numastats.append(os.path.join(NUMADIR, node, "numastat")) - except OSError, (errno, msg): - if errno == 2: # No such file or directory + except OSError as exc: + if exc.errno == 2: # No such file or directory continue raise return numastats @@ -59,23 +59,23 @@ def print_numa_stats(numafiles): # miss: process wanted another node and got it from # this one instead. ("numa_miss", "miss")): - print ("sys.numa.zoneallocs %d %s node=%d type=%s" + print("sys.numa.zoneallocs %d %s node=%d type=%s" % (ts, stats[stat], node_id, tag)) # Count this one as a separate metric because we can't sum up hit + # miss + foreign, this would result in double-counting of all misses. # See `zone_statistics' in the code of the kernel. # foreign: process wanted memory from this node but got it from # another node. So maybe this node is out of free pages. - print ("sys.numa.foreign_allocs %d %s node=%d" + print("sys.numa.foreign_allocs %d %s node=%d" % (ts, stats["numa_foreign"], node_id)) # When is memory allocated to a node that's local or remote to where # the process is running. for stat, tag in (("local_node", "local"), ("other_node", "remote")): - print ("sys.numa.allocation %d %s node=%d type=%s" + print("sys.numa.allocation %d %s node=%d type=%s" % (ts, stats[stat], node_id, tag)) # Pages successfully allocated with the interleave policy. - print ("sys.numa.interleave %d %s node=%d type=hit" + print("sys.numa.interleave %d %s node=%d type=hit" % (ts, stats["interleave_hit"], node_id)) numafile.close() @@ -116,8 +116,8 @@ def main(): for line in f_uptime: m = re.match("(\S+)\s+(\S+)", line) if m: - print "proc.uptime.total %d %s" % (ts, m.group(1)) - print "proc.uptime.now %d %s" % (ts, m.group(2)) + print("proc.uptime.total %d %s" % (ts, m.group(1))) + print("proc.uptime.now %d %s" % (ts, m.group(2))) # proc.meminfo f_meminfo.seek(0) @@ -131,7 +131,7 @@ def main(): else: value = m.group(2) name = re.sub("\W", "_", m.group(1)).lower().strip("_") - print ("proc.meminfo.%s %d %s" + print("proc.meminfo.%s %d %s" % (name, ts, value)) # proc.vmstat @@ -143,7 +143,7 @@ def main(): continue if m.group(1) in ("pgpgin", "pgpgout", "pswpin", "pswpout", "pgfault", "pgmajfault"): - print "proc.vmstat.%s %d %s" % (m.group(1), ts, m.group(2)) + print("proc.vmstat.%s %d %s" % (m.group(1), ts, m.group(2))) # proc.stat f_stat.seek(0) @@ -166,17 +166,17 @@ def main(): # We use zip to ignore fields that don't exist. for value, field_name in zip(fields, cpu_types): - print "proc.stat.cpu%s %d %s type=%s%s" % (metric_percpu, - ts, value, field_name, tags) + print("proc.stat.cpu%s %d %s type=%s%s" % (metric_percpu, + ts, value, field_name, tags)) elif m.group(1) == "intr": - print ("proc.stat.intr %d %s" - % (ts, m.group(2).split()[0])) + print(("proc.stat.intr %d %s" + % (ts, m.group(2).split()[0]))) elif m.group(1) == "ctxt": - print "proc.stat.ctxt %d %s" % (ts, m.group(2)) + print("proc.stat.ctxt %d %s" % (ts, m.group(2))) elif m.group(1) == "processes": - print "proc.stat.processes %d %s" % (ts, m.group(2)) + print("proc.stat.processes %d %s" % (ts, m.group(2))) elif m.group(1) == "procs_blocked": - print "proc.stat.procs_blocked %d %s" % (ts, m.group(2)) + print("proc.stat.procs_blocked %d %s" % (ts, m.group(2))) f_loadavg.seek(0) ts = int(time.time()) @@ -184,16 +184,16 @@ def main(): m = re.match("(\S+)\s+(\S+)\s+(\S+)\s+(\d+)/(\d+)\s+", line) if not m: continue - print "proc.loadavg.1min %d %s" % (ts, m.group(1)) - print "proc.loadavg.5min %d %s" % (ts, m.group(2)) - print "proc.loadavg.15min %d %s" % (ts, m.group(3)) - print "proc.loadavg.runnable %d %s" % (ts, m.group(4)) - print "proc.loadavg.total_threads %d %s" % (ts, m.group(5)) + print("proc.loadavg.1min %d %s" % (ts, m.group(1))) + print("proc.loadavg.5min %d %s" % (ts, m.group(2))) + print("proc.loadavg.15min %d %s" % (ts, m.group(3))) + print("proc.loadavg.runnable %d %s" % (ts, m.group(4))) + print("proc.loadavg.total_threads %d %s" % (ts, m.group(5))) f_entropy_avail.seek(0) ts = int(time.time()) for line in f_entropy_avail: - print "proc.kernel.entropy_avail %d %s" % (ts, line.strip()) + print("proc.kernel.entropy_avail %d %s" % (ts, line.strip())) f_interrupts.seek(0) ts = int(time.time()) @@ -219,7 +219,7 @@ def main(): sys.stderr.write("Unexpected interrupts value %r in" " %r: " % (val, cols)) break - print ("proc.interrupts %s %s type=%s cpu=%s" + print("proc.interrupts %s %s type=%s cpu=%s" % (ts, val, irq_type, i)) f_softirqs.seek(0) @@ -240,7 +240,7 @@ def main(): sys.stderr.write("Unexpected softirq value %r in" " %r: " % (val, cols)) break - print ("proc.softirqs %s %s type=%s cpu=%s" + print("proc.softirqs %s %s type=%s cpu=%s" % (ts, val, irq_type, i)) print_numa_stats(numastats) @@ -251,19 +251,19 @@ def main(): f = f_scaling_min[cpu_no] f.seek(0) for line in f: - print "proc.scaling.min %d %s cpu=%s" % (ts, line.rstrip('\n'), cpu_no) + print("proc.scaling.min %d %s cpu=%s" % (ts, line.rstrip('\n'), cpu_no)) ts = int(time.time()) for cpu_no in f_scaling_max.keys(): f = f_scaling_max[cpu_no] f.seek(0) for line in f: - print "proc.scaling.max %d %s cpu=%s" % (ts, line.rstrip('\n'), cpu_no) + print("proc.scaling.max %d %s cpu=%s" % (ts, line.rstrip('\n'), cpu_no)) ts = int(time.time()) for cpu_no in f_scaling_cur.keys(): f = f_scaling_cur[cpu_no] f.seek(0) for line in f: - print "proc.scaling.cur %d %s cpu=%s" % (ts, line.rstrip('\n'), cpu_no) + print("proc.scaling.cur %d %s cpu=%s" % (ts, line.rstrip('\n'), cpu_no)) sys.stdout.flush() time.sleep(COLLECTION_INTERVAL) diff --git a/collectors/0/pxc-collector.py b/collectors/0/pxc-collector.py index 6c0441ea..4a77c0fc 100755 --- a/collectors/0/pxc-collector.py +++ b/collectors/0/pxc-collector.py @@ -52,7 +52,7 @@ def getRow(): result = cursor.fetchall() except: - print "Error: unable to fetch data - Check your configuration!" + print("Error: unable to fetch data - Check your configuration!") sys.exit(13) # Don't respawn collector db.close() @@ -77,7 +77,7 @@ def main(): timestamp = int(time.time()) if row[0] in myMap: result = TSDResult(row[0], row[1], prefix, timestamp) - print result.TSDRow() + print(result.TSDRow()) time.sleep(interval) return 0 else: diff --git a/collectors/0/redis_stats.py b/collectors/0/redis_stats.py index 590b6fa9..cdba5673 100755 --- a/collectors/0/redis_stats.py +++ b/collectors/0/redis_stats.py @@ -57,6 +57,8 @@ http://redis.io/commands/info """ +from __future__ import print_function + import re import subprocess import sys @@ -112,7 +114,7 @@ def main(): def print_stat(metric, value, tags=""): if value is not None: - print "redis.%s %d %s %s" % (metric, ts, value, tags) + print("redis.%s %d %s %s" % (metric, ts, value, tags)) dbre = re.compile("^db\d+$") @@ -137,8 +139,8 @@ def print_stat(metric, value, tags=""): print_stat(key, info[key], tags) # per database metrics - for db in filter(dbre.match, info.keys()): - for db_metric in info[db].keys(): + for db in filter(dbre.match, list(info.keys())): + for db_metric in list(info[db].keys()): print_stat(db_metric, info[db][db_metric], "%s db=%s" % (tags, db)) # get some instant latency information @@ -163,7 +165,7 @@ def scan_for_instances(): ns_proc = subprocess.Popen(["netstat", "-tnlp"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, _ = ns_proc.communicate() if ns_proc.returncode != 0: - print >> sys.stderr, "failed to find instances %r" % ns_proc.returncode + print("failed to find instances %r" % ns_proc.returncode, file=sys.stderr) return {} for line in stdout.split("\n"): diff --git a/collectors/0/riak.py b/collectors/0/riak.py index d2016b5e..dc64624b 100755 --- a/collectors/0/riak.py +++ b/collectors/0/riak.py @@ -44,10 +44,14 @@ import os import sys import time -import urllib2 from collectors.lib import utils +try: + from urllib.request import urlopen +except ImportError: + from urllib2 import urlopen + MAP = { 'vnode_gets_total': ('vnode.requests', 'type=get'), 'vnode_puts_total': ('vnode.requests', 'type=put'), @@ -87,12 +91,12 @@ def main(): def print_stat(metric, value, tags=""): if value is not None: - print "riak.%s %d %s %s" % (metric, ts, value, tags) + print("riak.%s %d %s %s" % (metric, ts, value, tags)) while True: ts = int(time.time()) - req = urllib2.urlopen("http://localhost:8098/stats") + req = urlopen("http://localhost:8098/stats") if req is not None: obj = json.loads(req.read()) for key in obj: diff --git a/collectors/0/smart_stats.py b/collectors/0/smart_stats.py index 735aec51..c268f5c7 100755 --- a/collectors/0/smart_stats.py +++ b/collectors/0/smart_stats.py @@ -15,6 +15,8 @@ # """SMART disk stats for TSDB""" +from __future__ import print_function + import glob import os import signal @@ -91,8 +93,8 @@ class Alarm(RuntimeError): def alarm_handler(signum, frame): - print >>sys.stderr, ("Program took too long to run, " - "consider increasing its timeout.") + print("Program took too long to run, " + "consider increasing its timeout.", file=sys.stderr) raise Alarm() @@ -126,31 +128,31 @@ def is_adaptec_driver_broken(): if arcconf.returncode == 127: # arcconf doesn't even work on this system, so assume we're safe return False - print >>sys.stderr, ("arcconf unexpected error %s" % arcconf.returncode) + print("arcconf unexpected error %s" % arcconf.returncode, file=sys.stderr) return True for line in arcconf_output.split("\n"): fields = [x for x in line.split(" ") if x] if fields[0] == "Driver" and fields[2] in BROKEN_DRIVER_VERSIONS: - print >>sys.stderr, ("arcconf indicates broken driver version %s" - % fields[2]) + print("arcconf indicates broken driver version %s" + % fields[2], file=sys.stderr) return True return False def is_3ware_driver_broken(drives): # Apparently 3ware controllers can't report SMART stats from SAS drives. WTF. # See also http://sourceforge.net/apps/trac/smartmontools/ticket/161 - for i in reversed(xrange(len(drives))): + for i in reversed(range(len(drives))): drive = drives[i] signal.alarm(COMMAND_TIMEOUT) smart_ctl = subprocess.Popen(SMART_CTL + " -i /dev/" + drive, shell=True, stdout=subprocess.PIPE) smart_output = smart_ctl.communicate()[0] if "supports SMART and is Disabled" in smart_output: - print >>sys.stderr, "SMART is disabled for %s" % drive + print("SMART is disabled for %s" % drive, file=sys.stderr) del drives[i] # We're iterating from the end of the list so this is OK. signal.alarm(0) if not drives: - print >>sys.stderr, "None of the drives support SMART. Are they SAS drives?" + print("None of the drives support SMART. Are they SAS drives?", file=sys.stderr) return True return False @@ -174,17 +176,17 @@ def process_output(drive, smart_output): if len(fields) > 2 and field in ATTRIBUTE_MAP: metric = ATTRIBUTE_MAP[field] value = fields[9].split()[0] - print ("smart.%s %d %s disk=%s" % (metric, ts, value, drive)) + print("smart.%s %d %s disk=%s" % (metric, ts, value, drive)) if is_seagate and metric in ("seek_error_rate", "raw_read_error_rate"): # It appears that some Seagate drives (and possibly some Western # Digital ones too) use the first 16 bits to store error counts, # and the low 32 bits to store operation counts, out of these 48 # bit values. So try to be helpful and extract these here. value = int(value) - print ("smart.%s %d %d disk=%s" + print("smart.%s %d %d disk=%s" % (metric.replace("error_rate", "count"), ts, value & 0xFFFFFFFF, drive)) - print ("smart.%s %d %d disk=%s" + print("smart.%s %d %d disk=%s" % (metric.replace("error_rate", "errors"), ts, (value & 0xFFFF00000000) >> 32, drive)) elif line.startswith("ID#"): @@ -230,7 +232,7 @@ def main(): if smart_ctl.returncode == 127: sys.exit(13) else: - print >>sys.stderr, "Command exited with: %d" % smart_ctl.returncode + print("Command exited with: %d" % smart_ctl.returncode, file=sys.stderr) process_output(drive, smart_output) sys.stdout.flush() diff --git a/collectors/0/sysload.py b/collectors/0/sysload.py index c10d3d73..8e24959c 100755 --- a/collectors/0/sysload.py +++ b/collectors/0/sysload.py @@ -47,6 +47,10 @@ from collectors.lib import utils +PY3 = sys.version_info[0] > 2 +if PY3: + long = int + try: from collectors.etc import sysload_conf except ImportError: @@ -114,7 +118,7 @@ def main(): ["mpstat", str(collection_interval)], stdout=subprocess.PIPE, ) - except OSError, e: + except OSError as e: if e.errno == errno.ENOENT: # it makes no sense to run this collector here sys.exit(13) # we signal tcollector to not run us @@ -125,7 +129,7 @@ def main(): while signal_received is None: try: line = p_top.stdout.readline() - except (IOError, OSError), e: + except (IOError, OSError) as e: if e.errno in (errno.EINTR, errno.EAGAIN): break raise @@ -153,17 +157,17 @@ def main(): cpusystem=fields[4] cpuinterrupt=fields[6] cpuidle=fields[-1] - print ("cpu.usr %s %s cpu=%s" % (timestamp, float(cpuuser), cpuid)) - print ("cpu.nice %s %s cpu=%s" % (timestamp, float(cpunice), cpuid)) - print ("cpu.sys %s %s cpu=%s" % (timestamp, float(cpusystem), cpuid)) - print ("cpu.irq %s %s cpu=%s" % (timestamp, float(cpuinterrupt), cpuid)) - print ("cpu.idle %s %s cpu=%s" % (timestamp, float(cpuidle), cpuid)) + print("cpu.usr %s %s cpu=%s" % (timestamp, float(cpuuser), cpuid)) + print("cpu.nice %s %s cpu=%s" % (timestamp, float(cpunice), cpuid)) + print("cpu.sys %s %s cpu=%s" % (timestamp, float(cpusystem), cpuid)) + print("cpu.irq %s %s cpu=%s" % (timestamp, float(cpuinterrupt), cpuid)) + print("cpu.idle %s %s cpu=%s" % (timestamp, float(cpuidle), cpuid)) elif(fields[0] == "averages:"): timestamp = int(time.time()) - print ("load.1m %s %s" % (timestamp, fields[1])) - print ("load.5m %s %s" % (timestamp, fields[2])) - print ("load.15m %s %s" % (timestamp, fields[3])) + print("load.1m %s %s" % (timestamp, fields[1])) + print("load.5m %s %s" % (timestamp, fields[2])) + print("load.15m %s %s" % (timestamp, fields[3])) elif (re.match("[0-9]+ processes:",line)): starting=0 @@ -188,14 +192,14 @@ def main(): waiting=fields[i-1] if(fields[i] == "lock"): lock=fields[i-1] - print ("ps.all %s %s" % (timestamp, fields[0])) - print ("ps.start %s %s" % (timestamp, starting)) - print ("ps.run %s %s" % (timestamp, running)) - print ("ps.sleep %s %s" % (timestamp, sleeping)) - print ("ps.stop %s %s" % (timestamp, stopped)) - print ("ps.zomb %s %s" % (timestamp, zombie)) - print ("ps.wait %s %s" % (timestamp, waiting)) - print ("ps.lock %s %s" % (timestamp, lock)) + print("ps.all %s %s" % (timestamp, fields[0])) + print("ps.start %s %s" % (timestamp, starting)) + print("ps.run %s %s" % (timestamp, running)) + print("ps.sleep %s %s" % (timestamp, sleeping)) + print("ps.stop %s %s" % (timestamp, stopped)) + print("ps.zomb %s %s" % (timestamp, zombie)) + print("ps.wait %s %s" % (timestamp, waiting)) + print("ps.lock %s %s" % (timestamp, lock)) elif(fields[0] == "Mem:"): active=0 @@ -217,12 +221,12 @@ def main(): buf=convert_to_bytes(fields[i-1]) if(fields[i] == "Free"): free=convert_to_bytes(fields[i-1]) - print ("mem.active %s %s" % (timestamp, active)) - print ("mem.inact %s %s" % (timestamp, inact)) - print ("mem.wired %s %s" % (timestamp, wired)) - print ("mem.cache %s %s" % (timestamp, cache)) - print ("mem.buf %s %s" % (timestamp, buf)) - print ("mem.free %s %s" % (timestamp, free)) + print("mem.active %s %s" % (timestamp, active)) + print("mem.inact %s %s" % (timestamp, inact)) + print("mem.wired %s %s" % (timestamp, wired)) + print("mem.cache %s %s" % (timestamp, cache)) + print("mem.buf %s %s" % (timestamp, buf)) + print("mem.free %s %s" % (timestamp, free)) elif(fields[0] == "ARC:"): total=0 @@ -244,12 +248,12 @@ def main(): header=convert_to_bytes(fields[i-1]) if(fields[i] == "Other"): other=convert_to_bytes(fields[i-1]) - print ("arc.total %s %s" % (timestamp, total)) - print ("arc.mru %s %s" % (timestamp, mru)) - print ("arc.mfu %s %s" % (timestamp, mfu)) - print ("arc.anon %s %s" % (timestamp, anon)) - print ("arc.header %s %s" % (timestamp, header)) - print ("arc.other %s %s" % (timestamp, other)) + print("arc.total %s %s" % (timestamp, total)) + print("arc.mru %s %s" % (timestamp, mru)) + print("arc.mfu %s %s" % (timestamp, mfu)) + print("arc.anon %s %s" % (timestamp, anon)) + print("arc.header %s %s" % (timestamp, header)) + print("arc.other %s %s" % (timestamp, other)) elif(fields[0] == "Swap:"): total=0 @@ -271,12 +275,12 @@ def main(): inps=convert_to_bytes(fields[i-1])/collection_interval if(fields[i] == "Out"): outps=convert_to_bytes(fields[i-1])/collection_interval - print ("swap.total %s %s" % (timestamp, total)) - print ("swap.used %s %s" % (timestamp, used)) - print ("swap.free %s %s" % (timestamp, free)) - print ("swap.inuse %s %s" % (timestamp, inuse)) - print ("swap.inps %s %s" % (timestamp, inps)) - print ("swap.outps %s %s" % (timestamp, outps)) + print("swap.total %s %s" % (timestamp, total)) + print("swap.used %s %s" % (timestamp, used)) + print("swap.free %s %s" % (timestamp, free)) + print("swap.inuse %s %s" % (timestamp, inuse)) + print("swap.inps %s %s" % (timestamp, inps)) + print("swap.outps %s %s" % (timestamp, outps)) sys.stdout.flush() diff --git a/collectors/0/tcollector.py b/collectors/0/tcollector.py index ee44720a..02a3a373 100755 --- a/collectors/0/tcollector.py +++ b/collectors/0/tcollector.py @@ -114,7 +114,7 @@ def update(self): def filter(self, cond): """ Return processes for that the function cond evaluates to true. """ - return filter(cond, self.processes.values()) + return list(filter(cond, self.processes.values())) def collect_tcollect_stats(processes): # print a msg and do nothing if the parent process isn't tcollector diff --git a/collectors/0/tcp_bridge.py b/collectors/0/tcp_bridge.py index a723d5fb..bd3de3b7 100755 --- a/collectors/0/tcp_bridge.py +++ b/collectors/0/tcp_bridge.py @@ -13,17 +13,23 @@ # see . """Listens on a local TCP socket for incoming Metrics """ +from __future__ import print_function + import socket import os import sys import time from collectors.lib import utils -from thread import * + +try: + from _thread import * +except ImportError: + from thread import * try: from collectors.etc import tcp_bridge_conf except ImportError: - print >> sys.stderr, 'unable to import tcp_bridge_conf' + print('unable to import tcp_bridge_conf', file=sys.stderr) tcp_bridge_conf = None HOST = '127.0.0.1' @@ -42,7 +48,7 @@ def main(): if not (tcp_bridge_conf and tcp_bridge_conf.enabled()): - print >> sys.stderr, 'not enabled, or tcp_bridge_conf unavilable' + print('not enabled, or tcp_bridge_conf unavilable', file=sys.stderr) sys.exit(13) utils.drop_privileges() @@ -103,7 +109,7 @@ def removePut(line): sock.bind((HOST, PORT)) sock.listen(1) - except socket.error, msg: + except socket.error as msg: utils.err('could not open socket: %s' % msg) sys.exit(1) diff --git a/collectors/0/udp_bridge.py b/collectors/0/udp_bridge.py index cfec4537..2daf5b31 100755 --- a/collectors/0/udp_bridge.py +++ b/collectors/0/udp_bridge.py @@ -44,7 +44,7 @@ def removePut(line): else: sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.bind((HOST, PORT)) - except socket.error, msg: + except socket.error as msg: utils.err('could not open socket: %s' % msg) sys.exit(1) @@ -64,7 +64,7 @@ def removePut(line): if not data: utils.err("invalid data") break - print data + print(data) now = int(time.time()) if now > flush_timeout: sys.stdout.flush() diff --git a/collectors/0/varnishstat.py b/collectors/0/varnishstat.py index b2806946..33541dde 100755 --- a/collectors/0/varnishstat.py +++ b/collectors/0/varnishstat.py @@ -68,7 +68,7 @@ def main(): ["varnishstat", "-1", "-f" + fields, "-j"], stdout=subprocess.PIPE, ) - except OSError, e: + except OSError as e: # Die and signal to tcollector not to run this script. sys.stderr.write("Error: %s\n" % e) sys.exit(13) @@ -85,11 +85,11 @@ def main(): else: timestamp = time.time() - for k, v in metrics.iteritems(): + for k, v in metrics.items(): if k != "timestamp" and None == bad_regex.search(k): metric_name = metric_prefix + "." + k - print "%s %d %s %s" % \ - (metric_name, timestamp, v['value'], ",".join(tags)) + print("%s %d %s %s" % \ + (metric_name, timestamp, v['value'], ",".join(tags))) sys.stdout.flush() time.sleep(interval) diff --git a/collectors/0/zabbix_bridge.py b/collectors/0/zabbix_bridge.py index 8be5d03c..3153c0d2 100755 --- a/collectors/0/zabbix_bridge.py +++ b/collectors/0/zabbix_bridge.py @@ -70,14 +70,14 @@ def main(): cachecur.execute('SELECT id, key, host, proxy FROM zabbix_cache WHERE id=?', (itemid,)) row = cachecur.fetchone() if (row is not None): - print "zbx.%s %d %s host=%s proxy=%s" % (row[1], r['clock'], r['value'], row[2], row[3]) + print("zbx.%s %d %s host=%s proxy=%s" % (row[1], r['clock'], r['value'], row[2], row[3])) if ((int(time.time()) - sample_last_ts) > settings['internal_metric_interval']): # Sample internal metrics @ 10s intervals sample_last_ts = int(time.time()) - print "tcollector.zabbix_bridge.log_pos %d %s" % (sample_last_ts, log_pos) - print "tcollector.zabbix_bridge.key_lookup_miss %d %s" % (sample_last_ts, key_lookup_miss) - print "tcollector.zabbix_bridge.timestamp_drift %d %s" % (sample_last_ts, (sample_last_ts - r['clock'])) + print("tcollector.zabbix_bridge.log_pos %d %s" % (sample_last_ts, log_pos)) + print("tcollector.zabbix_bridge.key_lookup_miss %d %s" % (sample_last_ts, key_lookup_miss)) + print("tcollector.zabbix_bridge.timestamp_drift %d %s" % (sample_last_ts, (sample_last_ts - r['clock']))) if ((key_lookup_miss - last_key_lookup_miss) > settings['dbrefresh']): - print "tcollector.zabbix_bridge.key_lookup_miss_reload %d %s" % (sample_last_ts, (key_lookup_miss - last_key_lookup_miss)) + print("tcollector.zabbix_bridge.key_lookup_miss_reload %d %s" % (sample_last_ts, (key_lookup_miss - last_key_lookup_miss))) cachecur.execute('DROP TABLE zabbix_cache') cachecur.execute('CREATE TABLE zabbix_cache AS SELECT * FROM dbfile.zabbix_cache') cachecur.execute('CREATE UNIQUE INDEX uniq_zid on zabbix_cache (id)') diff --git a/collectors/0/zfsiostats.py b/collectors/0/zfsiostats.py index 96d6bb83..a3c0e243 100755 --- a/collectors/0/zfsiostats.py +++ b/collectors/0/zfsiostats.py @@ -39,6 +39,10 @@ import signal import os +PY3 = sys.version_info[0] > 2 +if PY3: + long = int + from collectors.lib import utils try: @@ -147,7 +151,7 @@ def main(): ["zpool", "iostat", "-v", str(collection_interval)], stdout=subprocess.PIPE, ) - except OSError, e: + except OSError as e: if e.errno == errno.ENOENT: # it makes no sense to run this collector here sys.exit(13) # we signal tcollector to not run us @@ -168,7 +172,7 @@ def main(): while signal_received is None: try: line = p_zpool.stdout.readline() - except (IOError, OSError), e: + except (IOError, OSError) as e: if e.errno in (errno.EINTR, errno.EAGAIN): break raise @@ -250,13 +254,13 @@ def main(): for poolname, stats in capacity_stats_pool.items(): fm = "zfs.df.pool.kb.%s %d %s pool=%s" for statname, statnumber in stats.items(): - print fm % (statname, timestamp, statnumber, poolname) + print(fm % (statname, timestamp, statnumber, poolname)) for devicename, stats in capacity_stats_device.items(): fm = "zfs.df.device.kb.%s %d %s device=%s pool=%s" poolname, devicename = devicename.split(" ", 1) for statname, statnumber in stats.items(): - print fm % (statname, timestamp, statnumber, - devicename, poolname) + print(fm % (statname, timestamp, statnumber, + devicename, poolname)) if firstloop: # this flag prevents printing out of the data in the first loop # which is a since-boot summary similar to iostat @@ -266,13 +270,13 @@ def main(): for poolname, stats in io_stats_pool.items(): fm = "zfs.io.pool.%s %d %s pool=%s" for statname, statnumber in stats.items(): - print fm % (statname, timestamp, statnumber, poolname) + print(fm % (statname, timestamp, statnumber, poolname)) for devicename, stats in io_stats_device.items(): fm = "zfs.io.device.%s %d %s device=%s pool=%s" poolname, devicename = devicename.split(" ", 1) for statname, statnumber in stats.items(): - print fm % (statname, timestamp, statnumber, - devicename, poolname) + print(fm % (statname, timestamp, statnumber, + devicename, poolname)) sys.stdout.flush() if signal_received is None: diff --git a/collectors/0/zfsolkernstats.py b/collectors/0/zfsolkernstats.py index b5389486..6d580d99 100755 --- a/collectors/0/zfsolkernstats.py +++ b/collectors/0/zfsolkernstats.py @@ -41,7 +41,7 @@ def main(): try: f_slab = open("/proc/spl/kmem/slab", "r") f_arcstats = open("/proc/spl/kstat/zfs/arcstats", "r") - except IOError, e: + except IOError as e: if e.errno == errno.ENOENT: # it makes no sense to run this collector here sys.exit(13) # we signal tcollector to not run us @@ -63,10 +63,10 @@ def main(): typ = typ.group(1) else: typ = name - print ("zfs.mem.slab.size %d %d type=%s objsize=%d" % + print("zfs.mem.slab.size %d %d type=%s objsize=%d" % (ts, size, typ, objsize) ) - print ("zfs.mem.slab.alloc %d %d type=%s objsize=%d" % + print("zfs.mem.slab.alloc %d %d type=%s objsize=%d" % (ts, alloc, typ, objsize) ) @@ -76,7 +76,7 @@ def main(): line = line.split() name, _, data = line data = int(data) - print ("zfs.mem.arc.%s %d %d" % + print("zfs.mem.arc.%s %d %d" % (name, ts, data) ) diff --git a/collectors/0/zookeeper.py b/collectors/0/zookeeper.py index 6da5ac91..6e103eb0 100755 --- a/collectors/0/zookeeper.py +++ b/collectors/0/zookeeper.py @@ -68,7 +68,7 @@ def scan_zk_instances(): except OSError: utils.err("netstat is not in PATH") return instances - except CalledProcessError, err: + except CalledProcessError as err: utils.err("Error: %s" % err) for line in listen_sock.split("\n"): @@ -92,7 +92,7 @@ def scan_zk_instances(): sock.settimeout(0.5) sock.send("ruok\n") data = sock.recv(1024) - except Exception, err: + except Exception as err: utils.err(err) finally: if sock: @@ -100,7 +100,7 @@ def scan_zk_instances(): if data == "imok": instances.append([ip, port, tcp_version]) data = "" - except Exception, err: + except Exception as err: utils.err(err) finally: fd.close() @@ -108,7 +108,7 @@ def scan_zk_instances(): def print_stat(metric, ts, value, tags=""): if value is not None: - print "zookeeper.%s %i %s %s" % (metric, ts, value, tags) + print("zookeeper.%s %i %s %s" % (metric, ts, value, tags)) def connect_socket(tcp_version, port): sock = None @@ -120,7 +120,7 @@ def connect_socket(tcp_version, port): ipaddr = '127.0.0.1' try: sock.connect((ipaddr, port)) - except Exception, err: + except Exception as err: utils.err(err) return sock diff --git a/collectors/300/aws_cloudwatch_stats.py b/collectors/300/aws_cloudwatch_stats.py index 910fae49..903ec2f9 100755 --- a/collectors/300/aws_cloudwatch_stats.py +++ b/collectors/300/aws_cloudwatch_stats.py @@ -6,9 +6,7 @@ import re import json from collections import OrderedDict -import exceptions import threading -import Queue from time import mktime from collectors.lib import utils from collectors.etc import aws_cloudwatch_conf @@ -20,6 +18,11 @@ except ImportError: exit(13) +try: + from queue import Queue +except ImportError: + from Queue import Queue + ILLEGAL_CHARS_REGEX = re.compile('[^a-zA-Z0-9\- _./]') path = os.path.dirname(os.path.realpath(__file__)) @@ -41,7 +44,7 @@ 'SampleCount' ]) -sendQueue = Queue.Queue() +sendQueue = Queue() def validate_config(): access_key, secret_access_key = aws_cloudwatch_conf.get_accesskey_secretkey() @@ -57,7 +60,7 @@ def cloudwatch_connect_to_region(region): try: conn = boto.ec2.cloudwatch.connect_to_region(region, aws_access_key_id=access_key, aws_secret_access_key=secret_access_key) except: - print "Unexpected error:", sys.exc_info()[0] + print("Unexpected error:", sys.exc_info()[0]) else: return conn @@ -88,7 +91,7 @@ def format_timestamp(ts): def build_tag_list(metric_name, region, dimensions): tags = "region=" + str(region) - for tagk,tagv in dimensions.iteritems(): + for tagk,tagv in dimensions.items(): tagkey = str(tagk) tagval = str(tagv[0]) tags += " %s=%s" % (tagkey, tagval) @@ -120,10 +123,10 @@ def handle_region(region, statistic): metrics = cloudwatch_list_metrics(region_conn) for metric in metrics: cloudwatch_query_metric(region, metric, statistic) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: # sys.stderr.write("finished region " + region + "," + statistic + "\n") pass - except exceptions.KeyboardInterrupt: + except KeyboardInterrupt: return 0 except: sys.stderr.write("failed region " + region + "," + statistic + "\n") @@ -145,11 +148,11 @@ def send_metrics(): datapoints[timestamp].append(output) sendQueue.task_done() sys.stderr.write("Queue Emptied, sorting output") - for outputs in sorted(datapoints.iteritems(), key=lambda x: x[1]): + for outputs in sorted(datapoints.items(), key=lambda x: x[1]): for output in outputs: for t in output: - print t - except exceptions.KeyboardInterrupt: + print(t) + except KeyboardInterrupt: return 0 # Uses the same code as tcollector here @@ -176,7 +179,7 @@ def main(): t.start() while threading.activeCount() > 1: time.sleep(1) - except exceptions.KeyboardInterrupt: + except KeyboardInterrupt: return 0 except: raise diff --git a/collectors/lib/hadoop_http.py b/collectors/lib/hadoop_http.py index 89f22a4a..c7f84d97 100644 --- a/collectors/lib/hadoop_http.py +++ b/collectors/lib/hadoop_http.py @@ -13,7 +13,7 @@ # see . import sys -import httplib + try: import json except ImportError: @@ -24,6 +24,12 @@ from ordereddict import OrderedDict # Can be easy_install'ed for <= 2.6 from collectors.lib.utils import is_numeric +try: + from http.client import HTTPConnection +except ImportError: + from httplib import HTTPConnection + + EXCLUDED_KEYS = ( "Name", "name" @@ -36,7 +42,7 @@ def __init__(self, service, daemon, host, port, uri="/jmx"): self.port = port self.host = host self.uri = uri - self.server = httplib.HTTPConnection(self.host, self.port) + self.server = HTTPConnection(self.host, self.port) self.server.auto_open = True def request(self): @@ -63,13 +69,13 @@ def poll(self): #split the name string context = bean['name'].split("name=")[1].split(",sub=") # Create a set that keeps the first occurrence - context = OrderedDict.fromkeys(context).keys() + context = list(OrderedDict.fromkeys(context).keys()) # lower case and replace spaces. context = [c.lower().replace(" ", "_") for c in context] # don't want to include the service or daemon twice context = [c for c in context if c != self.service and c != self.daemon] - for key, value in bean.iteritems(): + for key, value in bean.items(): if key in EXCLUDED_KEYS: continue if not is_numeric(value): @@ -79,11 +85,11 @@ def poll(self): def emit_metric(self, context, current_time, metric_name, value, tag_dict=None): if not tag_dict: - print "%s.%s.%s.%s %d %d" % (self.service, self.daemon, ".".join(context), metric_name, current_time, value) + print("%s.%s.%s.%s %d %d" % (self.service, self.daemon, ".".join(context), metric_name, current_time, value)) else: tag_string = " ".join([k + "=" + v for k, v in tag_dict.iteritems()]) - print "%s.%s.%s.%s %d %d %s" % \ - (self.service, self.daemon, ".".join(context), metric_name, current_time, value, tag_string) + print ("%s.%s.%s.%s %d %d %s" % \ + (self.service, self.daemon, ".".join(context), metric_name, current_time, value, tag_string)) # flush to protect against subclassed collectors that output few metrics not having enough output to trigger # buffer flush within 10 mins, which then get killed by TCollector due to "inactivity" sys.stdout.flush() diff --git a/collectors/lib/postgresqlutils.py b/collectors/lib/postgresqlutils.py index 265fed0d..9e09f4b3 100644 --- a/collectors/lib/postgresqlutils.py +++ b/collectors/lib/postgresqlutils.py @@ -57,7 +57,7 @@ def postgres_connect(sockdir): "connect_timeout='%s' dbname=postgres" % (sockdir, user, password, CONNECT_TIMEOUT)) - except (EnvironmentError, EOFError, RuntimeError, socket.error), e: + except (EnvironmentError, EOFError, RuntimeError, socket.error) as e: utils.err("Couldn't connect to DB :%s" % (e)) def connect(): diff --git a/collectors/lib/utils.py b/collectors/lib/utils.py index 0259e6b7..ebd861fe 100644 --- a/collectors/lib/utils.py +++ b/collectors/lib/utils.py @@ -14,12 +14,16 @@ """Common utility functions shared for Python collectors""" +from __future__ import print_function + import os import stat import pwd import errno import sys +PY3 = sys.version_info[0] > 2 + # If we're running as root and this user exists, we'll drop privileges. USER = "nobody" @@ -42,17 +46,21 @@ def is_sockfile(path): """Returns whether or not the given path is a socket file.""" try: s = os.stat(path) - except OSError, (no, e): - if no == errno.ENOENT: + except OSError as exc: + if exc.errno == errno.ENOENT: return False - err("warning: couldn't stat(%r): %s" % (path, e)) + err("warning: couldn't stat(%r): %s" % (path, exc)) return None return s.st_mode & stat.S_IFSOCK == stat.S_IFSOCK def err(msg): - print >> sys.stderr, msg + print(msg, file=sys.stderr) -def is_numeric(value): - return isinstance(value, (int, long, float)) +if PY3: + def is_numeric(value): + return isinstance(value, (int, float)) +else: + def is_numeric(value): + return isinstance(value, (int, long, float)) diff --git a/eos/collectors/eos.py b/eos/collectors/eos.py index c3bd436d..f43f5076 100755 --- a/eos/collectors/eos.py +++ b/eos/collectors/eos.py @@ -49,7 +49,7 @@ def printIntfCounters(self): ("discards", {"direction" : "out"}, "outDiscards"), ("discards", {"direction" : "in"}, "inDiscards"), ] - for intf_id, intf_counters in counters["interfaces"].iteritems(): + for intf_id, intf_counters in counters["interfaces"].items(): for counter_name, tags, eos_counter_name in counter_definitions: if eos_counter_name in intf_counters: self.printIntfCounter(counter_name, ts, intf_counters[eos_counter_name], @@ -64,14 +64,14 @@ def printIntfCounters(self): ("alignmentErrors", {}, "alignmentErrors"), ("symbolErrors", {}, "symbolErrors"), ] - for intf_id, intf_error_counters in error_counters["interfaceErrorCounters"].iteritems(): + for intf_id, intf_error_counters in error_counters["interfaceErrorCounters"].items(): for counter_name, tags, eos_counter_name in error_counter_definitions: if eos_counter_name in intf_error_counters: self.printIntfCounter(counter_name, ts, intf_error_counters[eos_counter_name], intf_id, tags) # Print interface bin counters - for intf_id, intf_bin_counters in bin_counters["interfaces"].iteritems(): + for intf_id, intf_bin_counters in bin_counters["interfaces"].items(): for direction in ["in", "out"]: if not intf_bin_counters.get("%sBinsCounters" % direction): continue diff --git a/eos/tcollector_agent.py b/eos/tcollector_agent.py index b67011e9..9046258f 100644 --- a/eos/tcollector_agent.py +++ b/eos/tcollector_agent.py @@ -200,7 +200,7 @@ def _import_tcollector(self): self.get_agent_mgr().status_set("has_tcollector_py", "True") self.module_.LOG = SdkLogger("tcollector") self.module_.setup_logging() - except IOError, e: + except IOError as e: import errno if e.errno != errno.ENOENT: raise @@ -230,7 +230,7 @@ def _socket_at(self, family, socktype, proto): def on_hostname(self, hostname): debug("Hostname changed to", hostname) self.tags_["host"] = hostname - self.sender_thread_.tags = sorted(self.tags_.iteritems()) + self.sender_thread_.tags = sorted(self.tags_.items()) def start(self): tcollector = self.module_ diff --git a/tcollector.py b/tcollector.py index a4e88a4f..688b61d8 100755 --- a/tcollector.py +++ b/tcollector.py @@ -34,13 +34,19 @@ import threading import time import json -import urllib2 import base64 from logging.handlers import RotatingFileHandler -from Queue import Queue -from Queue import Empty -from Queue import Full from optparse import OptionParser +import collections + +PY3 = sys.version_info[0] > 2 +if PY3: + from queue import Queue, Empty, Full + from urllib.request import Request, urlopen + from urllib.error import HTTPError +else: + from Queue import Queue, Empty, Full + from urllib2 import Request, urlopen, HTTPError # global variables. @@ -145,8 +151,8 @@ def read(self): self.name, len(out)) for line in out.splitlines(): LOG.warning('%s: %s', self.name, line) - except IOError, (err, msg): - if err != errno.EAGAIN: + except IOError as exc: + if exc.errno != errno.EAGAIN: raise except: LOG.exception('uncaught exception in stderr read') @@ -159,8 +165,8 @@ def read(self): if len(self.buffer): LOG.debug('reading %s, buffer now %d bytes', self.name, len(self.buffer)) - except IOError, (err, msg): - if err != errno.EAGAIN: + except IOError as exc: + if exc.errno != errno.EAGAIN: raise except AttributeError: # sometimes the process goes away in another thread and we don't @@ -471,7 +477,7 @@ def pick_connection(self): # isn't in the blacklist, or until we run out of hosts (i.e. they # are all blacklisted, which typically happens when we lost our # connectivity to the outside world). - for self.current_tsd in xrange(self.current_tsd + 1, len(self.hosts)): + for self.current_tsd in range(self.current_tsd + 1, len(self.hosts)): hostport = self.hosts[self.current_tsd] if hostport not in self.blacklisted_hosts: break @@ -526,7 +532,7 @@ def run(self): self.send_data() errors = 0 # We managed to do a successful iteration. except (ArithmeticError, EOFError, EnvironmentError, LookupError, - ValueError), e: + ValueError) as e: errors += 1 if errors > MAX_UNCAUGHT_EXCEPTIONS: shutdown() @@ -558,7 +564,7 @@ def verify_conn(self): # closing the connection and indicating that we need to reconnect. try: self.tsd.close() - except socket.error, msg: + except socket.error as msg: pass # not handling that self.time_reconnect = time.time() return False @@ -568,7 +574,7 @@ def verify_conn(self): LOG.debug('verifying our TSD connection is alive') try: self.tsd.sendall('version\n') - except socket.error, msg: + except socket.error as msg: self.tsd = None self.blacklist_connection() return False @@ -580,7 +586,7 @@ def verify_conn(self): # connection try: buf = self.tsd.recv(bufsize) - except socket.error, msg: + except socket.error as msg: self.tsd = None self.blacklist_connection() return False @@ -659,7 +665,7 @@ def maintain_conn(self): addresses = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM, 0) - except socket.gaierror, e: + except socket.gaierror as e: # Don't croak on transient DNS resolution issues. if e[0] in (socket.EAI_AGAIN, socket.EAI_NONAME, socket.EAI_NODATA): @@ -674,7 +680,7 @@ def maintain_conn(self): # if we get here it connected LOG.debug('Connection to %s was successful'%(str(sockaddr))) break - except socket.error, msg: + except socket.error as msg: LOG.warning('Connection attempt failed to %s:%d: %s', self.host, self.port, msg) self.tsd.close() @@ -714,11 +720,11 @@ def send_data(self): # try sending again next time. try: if self.dryrun: - print out + print(out) else: self.tsd.sendall(out) self.sendq = [] - except socket.error, msg: + except socket.error as msg: LOG.error('failed to send data: %s', msg) try: self.tsd.close() @@ -759,22 +765,22 @@ def send_data_via_http(self): metric_tags[tag_key] = tag_value metric_entry = {} metric_entry["metric"] = metric - metric_entry["timestamp"] = long(timestamp) + metric_entry["timestamp"] = int(timestamp) metric_entry["value"] = float(value) metric_entry["tags"] = dict(self.tags).copy() if len(metric_tags) + len(metric_entry["tags"]) > self.maxtags: metric_tags_orig = set(metric_tags) subset_metric_keys = frozenset(metric_tags[:len(metric_tags[:self.maxtags-len(metric_entry["tags"])])]) - metric_tags = dict((k, v) for k, v in metric_tags.iteritems() if k in subset_metric_keys) + metric_tags = dict((k, v) for k, v in metric_tags.items() if k in subset_metric_keys) LOG.error("Exceeding maximum permitted metric tags - removing %s for metric %s", str(metric_tags_orig - set(metric_tags)), metric) metric_entry["tags"].update(metric_tags) metrics.append(metric_entry) if self.dryrun: - print "Would have sent:\n%s" % json.dumps(metrics, + print("Would have sent:\n%s" % json.dumps(metrics, sort_keys=True, - indent=4) + indent=4)) return if((self.current_tsd == -1) or (len(self.hosts) > 1)): @@ -782,13 +788,13 @@ def send_data_via_http(self): url = self.build_http_url() LOG.debug("Sending metrics to url: %s", url) - req = urllib2.Request(url) + req = Request(url) if self.http_username and self.http_password: req.add_header("Authorization", "Basic %s" % base64.b64encode("%s:%s" % (self.http_username, self.http_password))) req.add_header("Content-Type", "application/json") try: - response = urllib2.urlopen(req, json.dumps(metrics)) + response = urlopen(req, json.dumps(metrics)) LOG.debug("Received response %s %s", response.getcode(), response.read().rstrip('\n')) # clear out the sendq self.sendq = [] @@ -797,7 +803,7 @@ def send_data_via_http(self): # for line in response: # print line, # print - except urllib2.HTTPError, e: + except HTTPError as e: LOG.error("Got error %s %s", e, e.read().rstrip('\n')) # for line in http_error: # print line, @@ -968,7 +974,7 @@ def daemonize(): if os.fork(): os._exit(0) os.chdir("/") - os.umask(022) + os.umask(0o22) os.setsid() os.umask(0) if os.fork(): @@ -980,8 +986,8 @@ def daemonize(): os.dup2(stdout.fileno(), 2) stdin.close() stdout.close() - os.umask(022) - for fd in xrange(3, 1024): + os.umask(0o22) + for fd in range(3, 1024): try: os.close(fd) except OSError: # This FD wasn't opened... @@ -1173,7 +1179,7 @@ def load_config_module(name, options, tags): else: module = reload(name) onload = module.__dict__.get('onload') - if callable(onload): + if isinstance(onload, collections.Callable): try: onload(options, tags) except: @@ -1198,7 +1204,7 @@ def reload_changed_config_modules(modules, options, sender, tags): changed = False # Reload any module that has changed. - for path, (module, timestamp) in modules.iteritems(): + for path, (module, timestamp) in modules.items(): if path not in current_paths: # Module was removed. continue mtime = os.path.getmtime(path) @@ -1236,8 +1242,7 @@ def write_pid(pidfile): def all_collectors(): """Generator to return all collectors.""" - - return COLLECTORS.itervalues() + return COLLECTORS.values() # collectors that are not marked dead @@ -1359,7 +1364,7 @@ def spawn_collector(col): stderr=subprocess.PIPE, close_fds=True, preexec_fn=os.setsid) - except OSError, e: + except OSError as e: LOG.error('Failed to spawn collector %s: %s' % (col.filename, e)) return # The following line needs to move below this line because it is used in diff --git a/tests.py b/tests.py index 1c95032a..1348d8ee 100755 --- a/tests.py +++ b/tests.py @@ -20,6 +20,16 @@ import mocks import tcollector +PY3 = sys.version_info[0] > 2 + + +def return_none(x): + return None + + +def always_true(): + return True + class CollectorsTests(unittest.TestCase): @@ -36,7 +46,11 @@ def check_access_rights(top): check_access_rights(pathname) elif S_ISREG(mode): # file, check permissions - self.assertEqual("0100775", oct(os.stat(pathname)[ST_MODE])) + permissions = oct(os.stat(pathname)[ST_MODE]) + if PY3: + self.assertEqual("0o100775", permissions) + else: + self.assertEqual("0100775", permissions) else: # unknown file type pass @@ -116,15 +130,17 @@ def setUp(self): self.udp_bridge = tcollector.COLLECTORS['udp_bridge.py'] # pylint: disable=maybe-no-member self.udp_globals = {} - sys.exit = lambda x: None + sys.exit = return_none + bridge_file = open(self.udp_bridge.filename) try: - execfile(self.udp_bridge.filename, self.udp_globals) + exec(compile(bridge_file.read(), self.udp_bridge.filename, 'exec'), self.udp_globals) finally: + bridge_file.close() sys.exit = self.saved_exit self.udp_globals['socket'] = mocks.Socket() self.udp_globals['sys'] = mocks.Sys() - self.udp_globals['udp_bridge_conf'].enabled = lambda: True + self.udp_globals['udp_bridge_conf'].enabled = always_true self.udp_globals['utils'] = mocks.Utils() def run_bridge_test(self, udpInputLines, stdoutLines, stderrLines): @@ -146,9 +162,10 @@ def run_bridge_test(self, udpInputLines, stdoutLines, stderrLines): sys.stdout = self.saved_stdout def test_populated(self): - self.assertIsInstance(self.udp_bridge, tcollector.Collector) # pylint: disable=maybe-no-member - self.assertIsNone(self.udp_bridge.proc) - self.assertIn('main', self.udp_globals) + # assertIsInstance, assertIn, assertIsNone do not exist in Python 2.6 + self.assertTrue(isinstance(self.udp_bridge, tcollector.Collector), msg="self.udp_bridge not instance of tcollector.Collector") # pylint: disable=maybe-no-member + self.assertEqual(self.udp_bridge.proc, None) + self.assertTrue('main' in self.udp_globals, msg="'main' not in self.udp_globals") def test_single_line_no_put(self): inputLines = [ @@ -159,8 +176,8 @@ def test_single_line_no_put(self): stdout = [] self.run_bridge_test(inputLines, stdout, stderr) - self.assertEquals(''.join(stdout), expected) - self.assertListEqual(stderr, []) + self.assertEqual(''.join(stdout), expected) + self.assertEqual(stderr, []) def test_single_line_put(self): inputLines = [ @@ -173,8 +190,8 @@ def test_single_line_put(self): stdout = [] self.run_bridge_test(inputLines, stdout, stderr) - self.assertEquals(''.join(stdout), expected) - self.assertListEqual(stderr, []) + self.assertEqual(''.join(stdout), expected) + self.assertEqual(stderr, []) def test_multi_line_no_put(self): inputLines = [ @@ -186,8 +203,8 @@ def test_multi_line_no_put(self): stdout = [] self.run_bridge_test(inputLines, stdout, stderr) - self.assertEquals(''.join(stdout), expected) - self.assertListEqual(stderr, []) + self.assertEqual(''.join(stdout), expected) + self.assertEqual(stderr, []) def test_multi_line_put(self): inputLines = [ @@ -202,8 +219,8 @@ def test_multi_line_put(self): stdout = [] self.run_bridge_test(inputLines, stdout, stderr) - self.assertEquals(''.join(stdout), expected) - self.assertListEqual(stderr, []) + self.assertEqual(''.join(stdout), expected) + self.assertEqual(stderr, []) def test_multi_line_mixed_put(self): inputLines = [ @@ -220,8 +237,8 @@ def test_multi_line_mixed_put(self): stdout = [] self.run_bridge_test(inputLines, stdout, stderr) - self.assertEquals(''.join(stdout), expected) - self.assertListEqual(stderr, []) + self.assertEqual(''.join(stdout), expected) + self.assertEqual(stderr, []) def test_multi_line_no_put_cond(self): inputLines = [ @@ -232,8 +249,8 @@ def test_multi_line_no_put_cond(self): stdout = [] self.run_bridge_test(inputLines, stdout, stderr) - self.assertEquals(''.join(stdout), expected) - self.assertListEqual(stderr, []) + self.assertEqual(''.join(stdout), expected) + self.assertEqual(stderr, []) def test_multi_line_put_cond(self): inputLines = [ @@ -247,8 +264,8 @@ def test_multi_line_put_cond(self): stdout = [] self.run_bridge_test(inputLines, stdout, stderr) - self.assertEquals(''.join(stdout), expected) - self.assertListEqual(stderr, []) + self.assertEqual(''.join(stdout), expected) + self.assertEqual(stderr, []) def test_multi_empty_line_no_put(self): inputLines = [ @@ -261,8 +278,8 @@ def test_multi_empty_line_no_put(self): stdout = [] self.run_bridge_test(inputLines, stdout, stderr) - self.assertEquals(''.join(stdout), expected) - self.assertListEqual(stderr, ['invalid data\n']) + self.assertEqual(''.join(stdout), expected) + self.assertEqual(stderr, ['invalid data\n']) def test_multi_empty_line_put(self): inputLines = [ @@ -275,8 +292,8 @@ def test_multi_empty_line_put(self): stdout = [] self.run_bridge_test(inputLines, stdout, stderr) - self.assertEquals(''.join(stdout), expected) - self.assertListEqual(stderr, ['invalid data\n']) + self.assertEqual(''.join(stdout), expected) + self.assertEqual(stderr, ['invalid data\n']) def test_multi_empty_line_no_put_cond(self): inputLines = [ @@ -287,8 +304,8 @@ def test_multi_empty_line_no_put_cond(self): stdout = [] self.run_bridge_test(inputLines, stdout, stderr) - self.assertEquals(''.join(stdout), expected) - self.assertListEqual(stderr, []) + self.assertEqual(''.join(stdout), expected) + self.assertEqual(stderr, []) def test_multi_empty_line_put_cond(self): inputLines = [ @@ -303,8 +320,8 @@ def test_multi_empty_line_put_cond(self): stdout = [] self.run_bridge_test(inputLines, stdout, stderr) - self.assertEquals(''.join(stdout), expected) - self.assertListEqual(stderr, []) + self.assertEqual(''.join(stdout), expected) + self.assertEqual(stderr, []) if __name__ == '__main__': cdir = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), From 146097ec2ecee41e827515558c33bf48aea6b930 Mon Sep 17 00:00:00 2001 From: Jonathan Creasy Date: Tue, 4 Dec 2018 13:45:04 -0800 Subject: [PATCH 2/6] Added Python 3.x versions to Travis --- .travis.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.travis.yml b/.travis.yml index 2f95e4fc..757d0ace 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,6 +1,10 @@ language: python python: - "2.7" + - "3.4" + - "3.5" + - "3.6" + - "3.7" install: - pip install pylint pylint_runner ordereddict mysqlclient requests feedparser prometheus_client script: From 2e1bb8984914366669ab439caad8df4dac90acbb Mon Sep 17 00:00:00 2001 From: Jonathan Creasy Date: Tue, 4 Dec 2018 13:30:22 -0800 Subject: [PATCH 3/6] Linter works in 2.7 --- tcollector.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tcollector.py b/tcollector.py index 688b61d8..74741a09 100755 --- a/tcollector.py +++ b/tcollector.py @@ -41,9 +41,9 @@ PY3 = sys.version_info[0] > 2 if PY3: - from queue import Queue, Empty, Full - from urllib.request import Request, urlopen - from urllib.error import HTTPError + from queue import Queue, Empty, Full # pylint: disable=import-error + from urllib.request import Request, urlopen # pylint: disable=maybe-no-member,no-name-in-module,import-error + from urllib.error import HTTPError # pylint: disable=maybe-no-member,no-name-in-module,import-error else: from Queue import Queue, Empty, Full from urllib2 import Request, urlopen, HTTPError From e95c49bc52aee03095a89f6823b6e2164305bb74 Mon Sep 17 00:00:00 2001 From: Jonathan Creasy Date: Tue, 4 Dec 2018 14:14:22 -0800 Subject: [PATCH 4/6] Lint passes on 3.6 --- collectors/0/docker_engine.py | 3 ++- collectors/0/flume.py | 10 +++++----- collectors/0/hbase_regionserver.py | 5 ++--- collectors/0/mountstats.py | 2 +- collectors/0/mysql.py | 2 +- collectors/etc/docker_engine_conf.py | 2 +- collectors/lib/docker_engine/docker_metrics.py | 10 ++++------ collectors/lib/utils.py | 2 +- eos/tcollector_agent.py | 2 +- tcollector.py | 9 +++++++-- 10 files changed, 25 insertions(+), 22 deletions(-) diff --git a/collectors/0/docker_engine.py b/collectors/0/docker_engine.py index a4bb2b49..9ea4b812 100755 --- a/collectors/0/docker_engine.py +++ b/collectors/0/docker_engine.py @@ -13,6 +13,7 @@ # see . """Imports Docker stats from the docker-api""" +from __future__ import print_function import sys from collectors.etc import docker_engine_conf @@ -32,7 +33,7 @@ def main(): cli = DockerMetrics(METRICS_PATH) for m in cli.get_endpoint(): - print m.get_metric_lines() + print(m.get_metric_lines()) if __name__ == "__main__": diff --git a/collectors/0/flume.py b/collectors/0/flume.py index d99206de..d7202cca 100755 --- a/collectors/0/flume.py +++ b/collectors/0/flume.py @@ -125,11 +125,11 @@ def printmetric(metric, value, **tags): stats = flume_metrics(server) for metric in stats: - (component, name) = metric.split(".") - tags = {component.lower(): name} - for key,value in stats[metric].items(): - if key not in EXCLUDE: - printmetric(key.lower(), value, **tags) + (component, name) = metric.split(".") + tags = {component.lower(): name} + for key,value in stats[metric].items(): + if key not in EXCLUDE: + printmetric(key.lower(), value, **tags) time.sleep(COLLECTION_INTERVAL) diff --git a/collectors/0/hbase_regionserver.py b/collectors/0/hbase_regionserver.py index 4b99aa87..85d64a1b 100755 --- a/collectors/0/hbase_regionserver.py +++ b/collectors/0/hbase_regionserver.py @@ -33,7 +33,7 @@ def __init__(self): super(HBaseRegionserver, self).__init__("hbase", "regionserver", "localhost", 60030) def emit_region_metric(self, context, current_time, full_metric_name, value): - match = REGION_METRIC_PATTERN.match(full_metric_name) + match = REGION_METRIC_PATTERN.match(full_metric_name) if not match: utils.err("Error splitting %s" % full_metric_name) return @@ -74,11 +74,10 @@ def main(args): utils.err("This collector requires the `json' Python module.") return 13 # Ask tcollector not to respawn us hbase_service = HBaseRegionserver() + while True: hbase_service.emit() time.sleep(15) - return 0 - if __name__ == "__main__": import sys diff --git a/collectors/0/mountstats.py b/collectors/0/mountstats.py index 66cdaa36..ad31bddf 100755 --- a/collectors/0/mountstats.py +++ b/collectors/0/mountstats.py @@ -91,7 +91,7 @@ def md5_digest(line): return md5(line.encode("utf8")).digest() else: - import md5 + import md5 # pylint: disable=import-error def md5_digest(line): return md5.new(line).digest() diff --git a/collectors/0/mysql.py b/collectors/0/mysql.py index c094f10d..b9f59272 100755 --- a/collectors/0/mysql.py +++ b/collectors/0/mysql.py @@ -24,7 +24,7 @@ if PY3: INTEGER_TYPES = (int, ) else: - INTEGER_TYPES = (int, long) + INTEGER_TYPES = (int, long) # pylint: disable=undefined-variable try: import MySQLdb diff --git a/collectors/etc/docker_engine_conf.py b/collectors/etc/docker_engine_conf.py index 4648c57c..2e10f6fc 100644 --- a/collectors/etc/docker_engine_conf.py +++ b/collectors/etc/docker_engine_conf.py @@ -13,7 +13,7 @@ # see . def enabled(): - return False + return True def get_config(): """Configuration for the Docker engine (Prometeus) collector """ diff --git a/collectors/lib/docker_engine/docker_metrics.py b/collectors/lib/docker_engine/docker_metrics.py index 9a276253..88b940a0 100644 --- a/collectors/lib/docker_engine/docker_metrics.py +++ b/collectors/lib/docker_engine/docker_metrics.py @@ -12,14 +12,12 @@ # of the GNU Lesser General Public License along with this program. If not, # see . +from __future__ import print_function import time - import requests from prometheus_client.parser import text_string_to_metric_families - from collectors.lib.docker_engine.metric import Metric - class DockerMetrics(object): def __init__(self, url): self._url = url @@ -30,10 +28,10 @@ def get_endpoint(self): ret = [] r = requests.get(self._url) if r.status_code != 200: - print "Error %s: %s" % (r.status_code, r.text) + print("Error %s: %s" % (r.status_code, r.text)) else: - for line in r.iter_lines(): - if not line.startswith("#"): + for line in r.iter_lines(decode_unicode=True): + if not line.startswith('#'): ret.extend(self.eval_prometheus_line(self.event_time, line)) return ret diff --git a/collectors/lib/utils.py b/collectors/lib/utils.py index ebd861fe..031c27e3 100644 --- a/collectors/lib/utils.py +++ b/collectors/lib/utils.py @@ -63,4 +63,4 @@ def is_numeric(value): return isinstance(value, (int, float)) else: def is_numeric(value): - return isinstance(value, (int, long, float)) + return isinstance(value, (int, long, float)) # pylint: disable=undefined-variable diff --git a/eos/tcollector_agent.py b/eos/tcollector_agent.py index 9046258f..9c1d3c07 100644 --- a/eos/tcollector_agent.py +++ b/eos/tcollector_agent.py @@ -225,7 +225,7 @@ def _get_tsd_port(self): def _socket_at(self, family, socktype, proto): vrf = self.get_agent_mgr().agent_option("vrf") or "" fd = self.vrf_mgr_.socket_at(family, socktype, proto, vrf) - return socket._socketobject(_sock=socket.fromfd(fd, family, socktype, proto)) + return socket._socketobject(_sock=socket.fromfd(fd, family, socktype, proto)) # pylint: disable=no-member def on_hostname(self, hostname): debug("Hostname changed to", hostname) diff --git a/tcollector.py b/tcollector.py index 74741a09..81f46848 100755 --- a/tcollector.py +++ b/tcollector.py @@ -41,12 +41,14 @@ PY3 = sys.version_info[0] > 2 if PY3: + import importlib from queue import Queue, Empty, Full # pylint: disable=import-error from urllib.request import Request, urlopen # pylint: disable=maybe-no-member,no-name-in-module,import-error from urllib.error import HTTPError # pylint: disable=maybe-no-member,no-name-in-module,import-error + else: from Queue import Queue, Empty, Full - from urllib2 import Request, urlopen, HTTPError + from urllib2 import Request, urlopen, HTTPError # pylint: disable=maybe-no-member,no-name-in-module,import-error # global variables. @@ -1177,7 +1179,10 @@ def load_config_module(name, options, tags): # Strip the trailing .py module = __import__(name[:-3], d, d) else: - module = reload(name) + if PY3: + module = importlib.reload(name) # pylint: disable=undefined-variable + else: + module = reload(name) # pylint: disable=undefined-variable onload = module.__dict__.get('onload') if isinstance(onload, collections.Callable): try: From c9aefeaf51e003e58fa2c07cedd52aa6736bf2d5 Mon Sep 17 00:00:00 2001 From: Jonathan Creasy Date: Tue, 4 Dec 2018 14:19:45 -0800 Subject: [PATCH 5/6] Lint passes on 3.6 --- tcollector.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tcollector.py b/tcollector.py index 81f46848..3236fc4c 100755 --- a/tcollector.py +++ b/tcollector.py @@ -47,10 +47,9 @@ from urllib.error import HTTPError # pylint: disable=maybe-no-member,no-name-in-module,import-error else: - from Queue import Queue, Empty, Full + from Queue import Queue, Empty, Full # pylint: disable=maybe-no-member,no-name-in-module,import-error from urllib2 import Request, urlopen, HTTPError # pylint: disable=maybe-no-member,no-name-in-module,import-error - # global variables. COLLECTORS = {} GENERATION = 0 @@ -1180,7 +1179,7 @@ def load_config_module(name, options, tags): module = __import__(name[:-3], d, d) else: if PY3: - module = importlib.reload(name) # pylint: disable=undefined-variable + module = importlib.reload(name) # pylint: disable=no-member,undefined-variable else: module = reload(name) # pylint: disable=undefined-variable onload = module.__dict__.get('onload') From 4953acdd7ad8ecdaffcb7cde79d5d3841831a8c0 Mon Sep 17 00:00:00 2001 From: Jonathan Creasy Date: Tue, 4 Dec 2018 14:22:34 -0800 Subject: [PATCH 6/6] Lint passes on 3.6 --- .travis.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 757d0ace..b958c04f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,7 +4,6 @@ python: - "3.4" - "3.5" - "3.6" - - "3.7" install: - pip install pylint pylint_runner ordereddict mysqlclient requests feedparser prometheus_client script: