diff --git a/checks.d/postgres.py b/checks.d/postgres.py index a33016650e..c26debfce3 100644 --- a/checks.d/postgres.py +++ b/checks.d/postgres.py @@ -177,7 +177,7 @@ class PostgreSql(AgentCheck): ('schemaname', 'schema') ], 'metrics': { - 'pg_stat_user_tables': ('postgresql.total_tables', GAUGE), + 'pg_stat_user_tables': ('postgresql.table.count', GAUGE), }, 'relation': False, 'query': """ @@ -218,6 +218,28 @@ class PostgreSql(AgentCheck): """ } + STATIO_METRICS = { + 'descriptors': [ + ('relname', 'table'), + ], + 'metrics': { + 'heap_blks_read' : ('postgresql.heap_blocks_read', RATE), + 'heap_blks_hit' : ('postgresql.heap_blocks_hit', RATE), + 'idx_blks_read' : ('postgresql.index_blocks_read', RATE), + 'idx_blks_hit' : ('postgresql.index_blocks_hit', RATE), + 'toast_blks_read' : ('postgresql.toast_blocks_read', RATE), + 'toast_blks_hit' : ('postgresql.toast_blocks_hit', RATE), + 'tidx_blks_read' : ('postgresql.toast_index_blocks_read', RATE), + 'tidx_blks_hit' : ('postgresql.toast_index_blocks_hit', RATE), + }, + 'query': """ +SELECT relname, + %s + FROM pg_statio_user_tables + WHERE relname = ANY(%s)""", + 'relation': True, + } + def __init__(self, name, init_config, agentConfig, instances=None): AgentCheck.__init__(self, name, init_config, agentConfig, instances) self.dbs = {} @@ -227,6 +249,7 @@ def __init__(self, name, init_config, agentConfig, instances=None): self.db_instance_metrics = [] self.db_bgw_metrics = [] self.replication_metrics = {} + self.custom_metrics = {} def _get_version(self, key, db): if key not in self.versions: @@ -263,20 +286,18 @@ def _get_instance_metrics(self, key, db): metrics = self.instance_metrics.get(key) if metrics is None: - # Hack to make sure that if we have multiple instances that connect to # the same host, port, we don't collect metrics twice # as it will result in https://github.com/DataDog/dd-agent/issues/1211 sub_key = key[:2] if sub_key in self.db_instance_metrics: - self.instance_metrics[key] = {} - self.log.debug("Not collecting instance metrics for key: {0} as"\ + self.instance_metrics[key] = None + self.log.debug("Not collecting instance metrics for key: {0} as" " they are already collected by another instance".format(key)) - return {} + return None self.db_instance_metrics.append(sub_key) - if self._is_9_2_or_above(key, db): self.instance_metrics[key] = dict(self.COMMON_METRICS, **self.NEWER_92_METRICS) else: @@ -293,16 +314,15 @@ def _get_bgw_metrics(self, key, db): metrics = self.bgw_metrics.get(key) if metrics is None: - # Hack to make sure that if we have multiple instances that connect to # the same host, port, we don't collect metrics twice # as it will result in https://github.com/DataDog/dd-agent/issues/1211 sub_key = key[:2] if sub_key in self.db_bgw_metrics: - self.bgw_metrics[key] = {} - self.log.debug("Not collecting bgw metrics for key: {0} as"\ + self.bgw_metrics[key] = None + self.log.debug("Not collecting bgw metrics for key: {0} as" " they are already collected by another instance".format(key)) - return {} + return None self.db_bgw_metrics.append(sub_key) @@ -334,26 +354,38 @@ def _collect_stats(self, key, db, instance_tags, relations, custom_metrics): If custom_metrics is not an empty list, gather custom metrics defined in postgres.yaml """ - self.DB_METRICS['metrics'] = self._get_instance_metrics(key, db) - self.BGW_METRICS['metrics'] = self._get_bgw_metrics(key, db) metric_scope = [ - self.DB_METRICS, self.CONNECTION_METRICS, - self.BGW_METRICS, self.LOCK_METRICS, - self.COUNT_METRICS + self.COUNT_METRICS, ] + # These are added only once per PG server, thus the test + db_instance_metrics = self._get_instance_metrics(key, db) + bgw_instance_metrics = self._get_bgw_metrics(key, db) + + if db_instance_metrics is not None: + # FIXME: constants shouldn't be modified + self.DB_METRICS['metrics'] = db_instance_metrics + metric_scope.append(self.DB_METRICS) + + if bgw_instance_metrics is not None: + # FIXME: constants shouldn't be modified + self.BGW_METRICS['metrics'] = bgw_instance_metrics + metric_scope.append(self.BGW_METRICS) + # Do we need relation-specific metrics? if relations: metric_scope += [ self.REL_METRICS, self.IDX_METRICS, - self.SIZE_METRICS + self.SIZE_METRICS, + self.STATIO_METRICS ] replication_metrics = self._get_replication_metrics(key, db) if replication_metrics is not None: + # FIXME: constants shouldn't be modified self.REPLICATION_METRICS['metrics'] = replication_metrics metric_scope.append(self.REPLICATION_METRICS) @@ -364,10 +396,8 @@ def _collect_stats(self, key, db, instance_tags, relations, custom_metrics): for scope in full_metric_scope: if scope == self.REPLICATION_METRICS or not self._is_above(key, db, [9,0,0]): log_func = self.log.debug - warning_func = self.log.debug else: log_func = self.log.warning - warning_func = self.warning # build query cols = scope['metrics'].keys() # list of metrics to query, in some order @@ -394,10 +424,11 @@ def _collect_stats(self, key, db, instance_tags, relations, custom_metrics): if scope in custom_metrics and len(results) > MAX_CUSTOM_RESULTS: self.warning( - "Query: {0} returned more than {1} results ({2})Truncating").format( + "Query: {0} returned more than {1} results ({2}). Truncating").format( query, MAX_CUSTOM_RESULTS, len(results)) results = results[:MAX_CUSTOM_RESULTS] + # FIXME this cramps my style if scope == self.DB_METRICS: self.gauge("postgresql.db.count", len(results), tags=[t for t in instance_tags if not t.startswith("db:")]) @@ -482,7 +513,12 @@ def get_connection(self, key, host, port, user, password, dbname, use_cached=Tru self.dbs[key] = connection return connection - def _process_customer_metrics(self,custom_metrics): + def _get_custom_metrics(self, custom_metrics, key): + # Pre-processed cached custom_metrics + if key in self.custom_metrics: + return self.custom_metrics[key] + + # Otherwise pre-process custom metrics and verify definition required_parameters = ("descriptors", "metrics", "query", "relation") for m in custom_metrics: @@ -493,14 +529,17 @@ def _process_customer_metrics(self,custom_metrics): self.log.debug("Metric: {0}".format(m)) - for k, v in m['metrics'].items(): - if v[1].upper() not in ['RATE','GAUGE','MONOTONIC']: - raise CheckException("Collector method {0} is not known."\ - "Known methods are RATE,GAUGE,MONOTONIC".format( - v[1].upper())) + for ref, (_, mtype) in m['metrics'].iteritems(): + cap_mtype = mtype.upper() + if cap_mtype not in ('RATE', 'GAUGE', 'MONOTONIC'): + raise CheckException("Collector method {0} is not known." + " Known methods are RATE, GAUGE, MONOTONIC".format(cap_mtype)) - m['metrics'][k][1] = getattr(PostgreSql, v[1].upper()) - self.log.debug("Method: %s" % (str(v[1]))) + m['metrics'][ref][1] = getattr(PostgreSql, cap_mtype) + self.log.debug("Method: %s" % (str(mtype))) + + self.custom_metrics[key] = custom_metrics + return custom_metrics def check(self, instance): host = instance.get('host', '') @@ -510,8 +549,6 @@ def check(self, instance): tags = instance.get('tags', []) dbname = instance.get('dbname', None) relations = instance.get('relations', []) - custom_metrics = instance.get('custom_metrics') or [] - self._process_customer_metrics(custom_metrics) if relations and not dbname: self.warning('"dbname" parameter must be set when using the "relations" parameter.') @@ -521,6 +558,8 @@ def check(self, instance): key = (host, port, dbname) + custom_metrics = self._get_custom_metrics(instance.get('custom_metrics', []), key) + # Clean up tags in case there was a None entry in the instance # e.g. if the yaml contains tags: but no actual tags if tags is None: diff --git a/ci/postgres.rb b/ci/postgres.rb index 18e85fc181..48e9452434 100644 --- a/ci/postgres.rb +++ b/ci/postgres.rb @@ -46,26 +46,13 @@ def pg_rootdir sleep_for 5 sh %(#{pg_rootdir}/bin/psql\ -p 15432 -U $USER\ - -c "CREATE USER datadog WITH PASSWORD 'datadog'"\ - postgres) + postgres < $TRAVIS_BUILD_DIR/ci/resources/postgres/postgres.sql) sh %(#{pg_rootdir}/bin/psql\ - -p 15432 -U $USER\ - -c "GRANT SELECT ON pg_stat_database TO datadog"\ - postgres) - sh %(#{pg_rootdir}/bin/psql\ - -p 15432 -U $USER\ - -c "CREATE DATABASE datadog_test"\ - postgres) - sh %(#{pg_rootdir}/bin/psql\ - -p 15432 -U $USER\ - -c "GRANT ALL PRIVILEGES ON DATABASE datadog_test TO datadog"\ - postgres) + -p 15432 -U datadog\ + datadog_test < $TRAVIS_BUILD_DIR/ci/resources/postgres/datadog_test.sql) sh %(#{pg_rootdir}/bin/psql\ -p 15432 -U datadog\ - -c "CREATE TABLE persons (personid INT, lastname VARCHAR(255), firstname VARCHAR(255), address VARCHAR(255), city VARCHAR(255))"\ - datadog_test) - # For pg_stat_user_table to return stuff - sleep_for 5 + dogs < $TRAVIS_BUILD_DIR/ci/resources/postgres/dogs.sql) end task :script => ['ci:common:script'] do diff --git a/ci/resources/postgres/datadog_test.sql b/ci/resources/postgres/datadog_test.sql new file mode 100644 index 0000000000..6f81f9342e --- /dev/null +++ b/ci/resources/postgres/datadog_test.sql @@ -0,0 +1,5 @@ +CREATE TABLE persons (personid SERIAL, lastname VARCHAR(255), firstname VARCHAR(255), address VARCHAR(255), city VARCHAR(255)); +INSERT INTO persons (lastname, firstname, address, city) VALUES ('Cavaille', 'Leo', 'Midtown', 'New York'), ('Someveryveryveryveryveryveryveryveryveryverylongname', 'something', 'Avenue des Champs Elysees', 'Beautiful city of lights'); +SELECT * FROM persons; +SELECT * FROM persons; +SELECT * FROM persons; diff --git a/ci/resources/postgres/dogs.sql b/ci/resources/postgres/dogs.sql new file mode 100644 index 0000000000..4b2b883717 --- /dev/null +++ b/ci/resources/postgres/dogs.sql @@ -0,0 +1,7 @@ +CREATE TABLE breed (id SERIAL, name VARCHAR(255)); +CREATE TABLE kennel (id SERIAL, address VARCHAR(255)); +INSERT INTO kennel (address) VALUES ('Midtown, New York'), ('Boston'); +SELECT * FROM kennel; +CREATE INDEX breed_names ON breed(name); +INSERT INTO breed (name) VALUES ('Labrador Retriver'), ('German Shepherd'), ('Yorkshire Terrier'), ('Golden Retriever'), ('Bulldog'); +SELECT * FROM breed WHERE name = 'Labrador'; diff --git a/ci/resources/postgres/postgres.sql b/ci/resources/postgres/postgres.sql new file mode 100644 index 0000000000..c8916054c5 --- /dev/null +++ b/ci/resources/postgres/postgres.sql @@ -0,0 +1,5 @@ +CREATE USER datadog WITH PASSWORD 'datadog'; +GRANT SELECT ON pg_stat_database TO datadog; +CREATE DATABASE datadog_test; +GRANT ALL PRIVILEGES ON DATABASE datadog_test TO datadog; +CREATE DATABASE dogs; diff --git a/tests/common.py b/tests/common.py index ea1af81892..12ffaa8914 100644 --- a/tests/common.py +++ b/tests/common.py @@ -285,7 +285,7 @@ def assertMetricTagPrefix(self, metric_name, tag_prefix, count=None, at_least=1) log.debug("Looking for a tag starting with `{0}:` on metric {1}".format(tag_prefix, metric_name)) if count is not None: log.debug(" * should have exactly {0} data points".format(count)) - if at_least is not None: + elif at_least is not None: log.debug(" * should have at least {0} data points".format(at_least)) candidates = [] @@ -314,7 +314,7 @@ def assertMetricTag(self, metric_name, tag, count=None, at_least=1): log.debug("Looking for tag {0} on metric {1}".format(tag, metric_name)) if count is not None: log.debug(" * should have exactly {0} data points".format(count)) - if at_least is not None: + elif at_least is not None: log.debug(" * should have at least {0} data points".format(at_least)) candidates = [] @@ -348,6 +348,8 @@ def assertServiceCheck(self, service_check_name, status=None, tags=None, log.debug(" * tagged with {0}".format(tags)) if count is not None: log.debug(" * should have exactly {0} statuses".format(count)) + elif at_least is not None: + log.debug(" * should have at least {0} statuses".format(count)) candidates = [] for sc in self.service_checks: if sc['check'] == service_check_name: diff --git a/tests/test_postgres.py b/tests/test_postgres.py new file mode 100644 index 0000000000..0f2553659f --- /dev/null +++ b/tests/test_postgres.py @@ -0,0 +1,218 @@ +# stdlib +import time + +# 3p +from nose.plugins.attrib import attr + +# project +from checks import AgentCheck +from tests.common import AgentCheckTest + + +@attr(requires='postgres') +class TestPostgres(AgentCheckTest): + CHECK_NAME = 'postgres' + + def test_checks(self): + host = 'localhost' + port = 15432 + dbname = 'datadog_test' + + instances = [ + { + 'host': host, + 'port': port, + 'username': 'datadog', + 'password': 'datadog', + 'dbname': dbname, + 'relations': ['persons'], + 'custom_metrics': [{ + 'descriptors': [('datname', 'customdb')], + 'metrics': { + 'numbackends': ['custom.numbackends', 'Gauge'], + }, + 'query': "SELECT datname, %s FROM pg_stat_database WHERE datname = 'datadog_test' LIMIT(1)", + 'relation': False, + }] + }, + { + 'host': host, + 'port': port, + 'username': 'datadog', + 'password': 'datadog', + 'dbname': 'dogs', + 'relations': ['breed', 'kennel'] + } + ] + + self.run_check_twice(dict(instances=instances)) + + # Useful to get server version + # FIXME: Not great, should have a function like that available + key = (host, port, dbname) + db = self.check.dbs[key] + + # Testing DB_METRICS scope + COMMON_METRICS = [ + 'postgresql.connections', + 'postgresql.commits', + 'postgresql.rollbacks', + 'postgresql.disk_read', + 'postgresql.buffer_hit', + 'postgresql.rows_returned', + 'postgresql.rows_fetched', + 'postgresql.rows_inserted', + 'postgresql.rows_updated', + 'postgresql.rows_deleted', + 'postgresql.database_size', + ] + + for mname in COMMON_METRICS: + for db in ('datadog_test', 'dogs'): + self.assertMetric(mname, count=1, tags=['db:%s' % db]) + + NEWER_92_METRICS = [ + 'postgresql.deadlocks', + 'postgresql.temp_bytes', + 'postgresql.temp_files', + ] + + if self.check._is_9_2_or_above(key, db): + for mname in NEWER_92_METRICS: + for db in ('datadog_test', 'dogs'): + self.assertMetric(mname, count=1, tags=['db:%s' % db]) + + # Testing BGW_METRICS scope + COMMON_BGW_METRICS = [ + 'postgresql.bgwriter.checkpoints_timed', + 'postgresql.bgwriter.checkpoints_requested', + 'postgresql.bgwriter.buffers_checkpoint', + 'postgresql.bgwriter.buffers_clean', + 'postgresql.bgwriter.maxwritten_clean', + 'postgresql.bgwriter.buffers_backend', + 'postgresql.bgwriter.buffers_alloc', + ] + + for mname in COMMON_BGW_METRICS: + self.assertMetric(mname, count=1) + + NEWER_91_BGW_METRICS = [ + 'postgresql.bgwriter.buffers_backend_fsync', + ] + + if self.check._is_9_1_or_above(key, db): + for mname in NEWER_91_BGW_METRICS: + self.assertMetric(mname, count=1) + + NEWER_92_BGW_METRICS = [ + 'postgresql.bgwriter.write_time', + 'postgresql.bgwriter.sync_time', + ] + + if self.check._is_9_2_or_above(key, db): + for mname in NEWER_92_BGW_METRICS: + self.assertMetric(mname, count=1) + + # FIXME: Test postgresql.locks + + # Relation specific metrics + RELATION_METRICS = [ + 'postgresql.seq_scans', + 'postgresql.seq_rows_read', + 'postgresql.index_scans', + 'postgresql.index_rows_fetched', + 'postgresql.rows_inserted', + 'postgresql.rows_updated', + 'postgresql.rows_deleted', + 'postgresql.rows_hot_updated', + 'postgresql.live_rows', + 'postgresql.dead_rows', + ] + + SIZE_METRICS = [ + 'postgresql.table_size', + 'postgresql.index_size', + 'postgresql.total_size', + ] + + STATIO_METRICS = [ + 'postgresql.heap_blocks_read', + 'postgresql.heap_blocks_hit', + 'postgresql.index_blocks_read', + 'postgresql.index_blocks_hit', + 'postgresql.toast_blocks_read', + 'postgresql.toast_blocks_hit', + 'postgresql.toast_index_blocks_read', + 'postgresql.toast_index_blocks_hit', + ] + + for inst in instances: + for rel in inst.get('relations', []): + expected_tags = ['db:%s' % inst['dbname'], 'table:%s' % rel] + for mname in RELATION_METRICS: + count = 1 + # We only build a test index and stimulate it on breed + # in the dogs DB, so the other index metrics shouldn't be + # here. + if 'index' in mname and rel != 'breed': + count = 0 + self.assertMetric(mname, count=count, tags=expected_tags) + + for mname in SIZE_METRICS: + self.assertMetric(mname, count=1, tags=expected_tags) + + for mname in STATIO_METRICS: + at_least = None + count = 1 + if '.index' in mname and rel != 'breed': + count = 0 + # FIXME: toast are not reliable, need to do some more setup + # to get some values here I guess + if 'toast' in mname: + at_least = 0 # how to set easily a flaky metric, w/o impacting coverage + count = None + self.assertMetric(mname, count=count, at_least=at_least, tags=expected_tags) + + # Index metrics + IDX_METRICS = [ + 'postgresql.index_scans', + 'postgresql.index_rows_read', + 'postgresql.index_rows_fetched', + ] + + # we have a single index defined! + expected_tags = ['db:dogs', 'table:breed', 'index:breed_names'] + for mname in IDX_METRICS: + self.assertMetric(mname, count=1, tags=expected_tags) + + # instance connection metrics + CONNECTION_METRICS = [ + 'postgresql.max_connections', + 'postgresql.percent_usage_connections', + ] + for mname in CONNECTION_METRICS: + self.assertMetric(mname, count=1) + + # db level connections + for inst in instances: + expected_tags = ['db:%s' % inst['dbname']] + self.assertMetric('postgresql.connections', count=1, tags=expected_tags) + + # By schema metrics + self.assertMetric('postgresql.table.count', value=2, count=1, tags=['schema:public']) + self.assertMetric('postgresql.db.count', value=2, count=1) + + # Our custom metric + self.assertMetric('custom.numbackends', value=1, tags=['customdb:datadog_test']) + + # Test service checks + self.assertServiceCheck('postgres.can_connect', + count=1, status=AgentCheck.OK, + tags=['host:localhost', 'port:15432', 'db:datadog_test'] + ) + self.assertServiceCheck('postgres.can_connect', + count=1, status=AgentCheck.OK, + tags=['host:localhost', 'port:15432', 'db:dogs'] + ) + + self.coverage_report() diff --git a/tests/test_postgresql.py b/tests/test_postgresql.py deleted file mode 100644 index b4b8b5cc9c..0000000000 --- a/tests/test_postgresql.py +++ /dev/null @@ -1,151 +0,0 @@ -import os -import unittest -from tests.common import load_check, AgentCheckTest - -from nose.plugins.attrib import attr - -import time -from pprint import pprint - - -# postgres version: (expected metrics, expected tagged metrics per database) -METRICS = { - '9.4.1': (40, 26), - '9.3.6': (40, 26), - '9.2.10': (40, 26), - '9.1.15': (35, 23), - '9.0.19': (34, 23), -} - -@attr(requires='postgres') -class TestPostgres(AgentCheckTest): - - CHECK_NAME = "postgres" - - def test_checks(self): - host = 'localhost' - port = 15432 - dbname = 'datadog_test' - - config = { - 'instances': [ - { - 'host': host, - 'port': port, - 'username': 'datadog', - 'password': 'datadog', - 'dbname': dbname, - 'relations': ['persons'], - 'custom_metrics': [ - { - "descriptors": [ - ("datname", "customdb") - ], - "metrics": { - "numbackends": ["custom.numbackends", "Gauge"], - }, - "query": "SELECT datname, %s FROM pg_stat_database WHERE datname = 'datadog_test' LIMIT(1)", - "relation": False, - }] - }, - { - 'host': host, - 'port': port, - 'username': 'datadog', - 'password': 'datadog', - 'dbname': 'postgres' - } - ] - } - agentConfig = { - 'version': '0.1', - 'api_key': 'toto' - } - - - self.check = load_check('postgres', config, agentConfig) - - self.check.run() - - # FIXME: Not great, should have a function like that available - key = (host, port, dbname) - db = self.check.dbs[key] - - metrics = self.check.get_metrics() - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.connections']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.dead_rows']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.live_rows']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.table_size']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.index_size']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.total_size']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.max_connections']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.percent_usage_connections']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.total_tables']) == 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.db.count']) == 1, pprint(metrics)) - - # Rate metrics, need 2 collection rounds - time.sleep(1) - self.check.run() - metrics = self.check.get_metrics() - - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.bgwriter.checkpoints_timed']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.bgwriter.checkpoints_requested']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.bgwriter.buffers_checkpoint']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.bgwriter.buffers_clean']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.bgwriter.maxwritten_clean']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.bgwriter.buffers_backend']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.bgwriter.buffers_alloc']) >= 1, pprint(metrics)) - - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.commits']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.rollbacks']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.disk_read']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.buffer_hit']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.rows_returned']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.rows_fetched']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.rows_inserted']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.rows_updated']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.rows_deleted']) >= 1, pprint(metrics)) - - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.seq_scans']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.seq_rows_read']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.rows_hot_updated']) >= 1, pprint(metrics)) - - if self.check._is_9_1_or_above(key, db): - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.bgwriter.buffers_backend_fsync']) >= 1, pprint(metrics)) - - if self.check._is_9_2_or_above(key, db): - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.bgwriter.write_time']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.bgwriter.sync_time']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.deadlocks']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.temp_bytes']) >= 1, pprint(metrics)) - self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.temp_files']) >= 1, pprint(metrics)) - - # Service checks - service_checks = self.check.get_service_checks() - service_checks_count = len(service_checks) - self.assertTrue(type(service_checks) == type([])) - self.assertTrue(service_checks_count > 0) - self.assertEquals(len([sc for sc in service_checks if sc['check'] == "postgres.can_connect"]), 4, service_checks) - # Assert that all service checks have the proper tags: host, port and db - self.assertEquals(len([sc for sc in service_checks if "host:localhost" in sc['tags']]), service_checks_count, service_checks) - self.assertEquals(len([sc for sc in service_checks if "port:%s" % config['instances'][0]['port'] in sc['tags']]), service_checks_count, service_checks) - self.assertEquals(len([sc for sc in service_checks if "db:%s" % config['instances'][0]['dbname'] in sc['tags']]), service_checks_count/2, service_checks) - - time.sleep(1) - self.check.run() - metrics = self.check.get_metrics() - - self.assertEquals(len([m for m in metrics if 'table:persons' in str(m[3].get('tags', [])) ]), 11, metrics) - - pg_version_array = self.check._get_version(key, db) - pg_version = '.'.join(str(x) for x in pg_version_array) - exp_metrics = METRICS[pg_version][0] - exp_db_tagged_metrics = METRICS[pg_version][1] - self.assertEquals(len(metrics), exp_metrics, metrics) - self.assertEquals(len([m for m in metrics if 'db:datadog_test' in str(m[3].get('tags', []))]), exp_db_tagged_metrics, metrics) - - self.metrics = metrics - self.assertMetric("custom.numbackends") - -if __name__ == '__main__': - unittest.main()