Skip to content

Commit

Permalink
Revert new query type (#1967)
Browse files Browse the repository at this point in the history
* Revert "Comment out example custom metric (#1965)"

This reverts commit b7d74c4.

* Revert "Fix example yaml (#1963)"

This reverts commit 54a7d07.

* Revert "support object_name metric identifiers (#1679)"

This reverts commit 149da55.
  • Loading branch information
ofek committed Jul 30, 2018
1 parent 99883e3 commit 1467fd0
Show file tree
Hide file tree
Showing 2 changed files with 50 additions and 87 deletions.
23 changes: 7 additions & 16 deletions sqlserver/datadog_checks/sqlserver/data/conf.yaml.example
Original file line number Diff line number Diff line change
Expand Up @@ -36,24 +36,15 @@ init_config:
# instance_name: ALL
# tag_by: db

# You can also query by the object_name. Since there may be multiple instances
# associated with these queries, they also require instance_name set to ALL and
# a tag_by identifier.
#
# - name: sqlserver.cache.hit_ratio
# object_name: SQLServer:Columnstore
# instance_name: ALL
# tag_by : db

# the default table from which counters are drawn is the sys.dm_os_performance_counters
# table. This integration also supports
# table. This integration also supports
# sys.dm_os_wait_stats
# sys.dm_os_memory_clerks
# sys.dm_io_virtual_file_stats

# to report a metric drawn from one of the additional tables, specify the table in
# the counter definition, as well as the counter columns to be reported

# - name: sqlserver.LCK_M_S
# table: sys.dm_os_wait_stats
# counter_name: LCK_M_S
Expand All @@ -77,7 +68,7 @@ init_config:
# As well as capturing from the DMV you can also capture from a custom proc
# Please note this feature will produce a number of custom metrics that might affect your billing
# This feature is also only available when using the adodbapi driver (set by default)
# To use this feature, specify in your instance the procedure to execute :
# To use this feature, specify in your instance the procedure to execute :
# stored_procedure: ProcedureToExecute
#
# The proc should return this table
Expand All @@ -90,11 +81,11 @@ init_config:
# )
#
# NOTE: SET NOCOUNT to ON inside proc to avoid extra resultsets that prevent valid query results
#
#
# You can also specify:
# ignore_missing_database : if DB doesn't exist on the server then don't do the check. Default False
# proc_only_if : run this SQL before each call to stored_procedure. Only if it returns 1 then call the proc
# proc_only_if_database : the database to run the proc_only_if SQL in. Optional.
# proc_only_if_database : the database to run the proc_only_if SQL in. Optional.
# Defaults to database attribute
#
# The proc_only_if guard condition is useful for HA scenarios where a database can move between servers.
Expand All @@ -120,7 +111,7 @@ instances:
# Optional, specify a custom connection string to be used
# Ex: "ApplicationIntent=ReadWrite" or "MultiSubnetFailover=True"
# connection_string: "my_connection_string"

# Optional, timeout in seconds for the connection and each command run
# command_timeout: 30
# database: my_database # Optional, defaults to "master"
Expand All @@ -137,4 +128,4 @@ instances:
# stored_procedure: GetDatadogMetrics
# proc_only_if: SELECT CASE CONVERT(sysname,DatabasePropertyEx('MyDB','Updateability')) WHEN 'READ_WRITE' THEN 1 ELSE 0 END
# proc_only_if_database: master
# ignore_missing_database: True
# ignore_missing_database: True
114 changes: 43 additions & 71 deletions sqlserver/datadog_checks/sqlserver/sqlserver.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
from collections import defaultdict
# 3rd party
import adodbapi
from six import iteritems
try:
import pyodbc
except ImportError:
Expand All @@ -38,20 +37,20 @@
# Queries
COUNTER_TYPE_QUERY = '''select distinct cntr_type
from sys.dm_os_performance_counters
where {metric_type} = ?;'''
where counter_name = ?;'''

BASE_NAME_QUERY = '''select distinct {metric_type}
BASE_NAME_QUERY = '''select distinct counter_name
from sys.dm_os_performance_counters
where ({metric_type}=? or {metric_type}=?
or {metric_type}=?) and cntr_type=%s;''' % PERF_LARGE_RAW_BASE
where (counter_name=? or counter_name=?
or counter_name=?) and cntr_type=%s;''' % PERF_LARGE_RAW_BASE

INSTANCES_QUERY = '''select instance_name
from sys.dm_os_performance_counters
where {metric_type}=? and instance_name!='_Total';'''
where counter_name=? and instance_name!='_Total';'''

VALUE_AND_BASE_QUERY = '''select {metric_type}, cntr_type, cntr_value, instance_name
VALUE_AND_BASE_QUERY = '''select counter_name, cntr_type, cntr_value, instance_name
from sys.dm_os_performance_counters
where {metric_type} in (%s)
where counter_name in (%s)
order by cntr_type;'''

DATABASE_EXISTS_QUERY = 'select name from sys.databases;'
Expand All @@ -69,10 +68,6 @@ class SQLConnectionError(Exception):
pass


def format_query(query, metric_type='counter_name'):
return query.format(metric_type=metric_type)


class SQLServer(AgentCheck):

SERVICE_CHECK_NAME = 'sqlserver.can_connect'
Expand Down Expand Up @@ -193,12 +188,11 @@ def _make_metric_list_to_collect(self, instance, custom_metrics):
metrics_to_collect = []
for name, counter_name, instance_name in self.METRICS:
try:
sql_type, base_name = self.get_sql_type(instance, counter_name)
cfg = {}
cfg['name'] = name
cfg['counter_name'] = counter_name
cfg['instance_name'] = instance_name
cfg['metric_type'] = 'counter_name'
sql_type, base_name = self.get_sql_type(instance, cfg)

metrics_to_collect.append(self.typed_metric(instance,
cfg,
Expand All @@ -220,22 +214,14 @@ def _make_metric_list_to_collect(self, instance, custom_metrics):
self.log.error('%s has an invalid table name: %s', row['name'], db_table)
continue

if 'counter_name' in row:
row['metric_type'] = 'counter_name'
elif 'object_name' in row:
row['metric_type'] = 'object_name'
else:
self.log.error('Field counter_name or object_name is required for {}'.format(row['name']))
continue

if db_table == DEFAULT_PERFORMANCE_TABLE:
user_type = row.get('type')
if user_type is not None and user_type not in VALID_METRIC_TYPES:
self.log.error('%s has an invalid metric type: %s', row['name'], user_type)
sql_type = None
try:
if user_type is None:
sql_type, base_name = self.get_sql_type(instance, row)
sql_type, base_name = self.get_sql_type(instance, row['counter_name'])
except Exception:
self.log.warning("Can't load the metric %s, ignoring", row['name'], exc_info=True)
continue
Expand All @@ -260,20 +246,20 @@ def _make_metric_list_to_collect(self, instance, custom_metrics):

instance_key = self._conn_key(instance, self.DEFAULT_DB_KEY)
self.instances_metrics[instance_key] = metrics_to_collect
simple_metrics = defaultdict(list)
fraction_metrics = defaultdict(list)
simple_metrics = []
fraction_metrics = []
wait_stat_metrics = []
vfs_metrics = []
clerk_metrics = []
self.log.debug("metrics to collect %s", str(metrics_to_collect))
for m in metrics_to_collect:
if type(m) is SqlSimpleMetric:
self.log.debug("Adding simple metric %s", m.sql_name)
simple_metrics[m.metric_type].append(m.sql_name)
simple_metrics.append(m.sql_name)
elif type(m) is SqlFractionMetric or type(m) is SqlIncrFractionMetric:
self.log.debug("Adding fraction metric %s", m.sql_name)
fraction_metrics[m.metric_type].append(m.sql_name)
fraction_metrics[m.metric_type].append(m.base_name)
fraction_metrics.append(m.sql_name)
fraction_metrics.append(m.base_name)
elif type(m) is SqlOsWaitStat:
self.log.debug("Adding SqlOsWaitStat metric %s", m.sql_name)
wait_stat_metrics.append(m.sql_name)
Expand Down Expand Up @@ -422,18 +408,15 @@ def get_cursor(self, instance, db_key, db_name=None):
cursor = conn.cursor()
return cursor

def get_sql_type(self, instance, cfg):
def get_sql_type(self, instance, counter_name):
'''
Return the type of the performance counter so that we can report it to
Datadog correctly
If the sql_type is one that needs a base (PERF_RAW_LARGE_FRACTION and
PERF_AVERAGE_BULK), the name of the base counter will also be returned
'''
with self.get_managed_cursor(instance, self.DEFAULT_DB_KEY) as cursor:
metric_type = cfg['metric_type']
counter_name = cfg[metric_type]

cursor.execute(format_query(COUNTER_TYPE_QUERY, metric_type), (counter_name,))
cursor.execute(COUNTER_TYPE_QUERY, (counter_name,))
(sql_type,) = cursor.fetchone()
if sql_type == PERF_LARGE_RAW_BASE:
self.log.warning("Metric %s is of type Base and shouldn't be reported this way",
Expand All @@ -449,7 +432,7 @@ def get_sql_type(self, instance, cfg):
counter_name.replace("Avg ", "") + " base"
)
try:
cursor.execute(format_query(BASE_NAME_QUERY, metric_type), candidates)
cursor.execute(BASE_NAME_QUERY, candidates)
base_name = cursor.fetchone().counter_name.strip()
self.log.debug("Got base metric: %s for metric: %s", base_name, counter_name)
except Exception as e:
Expand Down Expand Up @@ -663,8 +646,7 @@ def __init__(self, connector, cfg_instance, base_name,
self.connector = connector
self.cfg_instance = cfg_instance
self.datadog_name = cfg_instance['name']
self.metric_type = cfg_instance.get('metric_type', 'counter_name')
self.sql_name = cfg_instance.get(self.metric_type, '')
self.sql_name = cfg_instance.get('counter_name', '')
self.base_name = base_name
self.report_function = report_function
self.instance = cfg_instance.get('instance_name', '')
Expand All @@ -681,22 +663,17 @@ def fetch_metrics(self, cursor, tags):
class SqlSimpleMetric(SqlServerMetric):

@classmethod
def fetch_all_values(cls, cursor, metric_lists, logger):
rows = []

for metric_type, counters_list in iteritems(metric_lists):
placeholder = '?'
placeholders = ', '.join(placeholder for unused in counters_list)
query_base = '''
select {metric_type}, instance_name, cntr_value
from sys.dm_os_performance_counters where {metric_type} in (%s)
''' % placeholders
query_base = format_query(query_base, metric_type)

logger.debug("query base: %s", query_base)
cursor.execute(query_base, counters_list)
rows.extend(cursor.fetchall())
def fetch_all_values(cls, cursor, counters_list, logger):
placeholder = '?'
placeholders = ', '.join(placeholder for unused in counters_list)
query_base = '''
select counter_name, instance_name, cntr_value
from sys.dm_os_performance_counters where counter_name in (%s)
''' % placeholders

logger.debug("query base: %s", query_base)
cursor.execute(query_base, counters_list)
rows = cursor.fetchall()
return rows

def fetch_metric(self, cursor, rows, tags):
Expand All @@ -723,29 +700,24 @@ def fetch_metric(self, cursor, rows, tags):
class SqlFractionMetric(SqlServerMetric):

@classmethod
def fetch_all_values(cls, cursor, metric_lists, logger):
all_results = {}

for metric_type, counters_list in iteritems(metric_lists):
placeholder = '?'
placeholders = ', '.join(placeholder for unused in counters_list)
query_base = format_query(VALUE_AND_BASE_QUERY % placeholders, metric_type)

logger.debug("query base: %s, %s", query_base, str(counters_list))
cursor.execute(query_base, counters_list)
rows = cursor.fetchall()
results = defaultdict(list)
for counter_name, cntr_type, cntr_value, instance_name in rows:
rowlist = [cntr_type, cntr_value, instance_name.strip()]
logger.debug("Adding new rowlist %s", str(rowlist))
results[counter_name.strip()].append(rowlist)
all_results.update(results)

return all_results
def fetch_all_values(cls, cursor, counters_list, logger):
placeholder = '?'
placeholders = ', '.join(placeholder for unused in counters_list)
query_base = VALUE_AND_BASE_QUERY % placeholders

logger.debug("query base: %s, %s", query_base, str(counters_list))
cursor.execute(query_base, counters_list)
rows = cursor.fetchall()
results = defaultdict(list)
for counter_name, cntr_type, cntr_value, instance_name in rows:
rowlist = [cntr_type, cntr_value, instance_name.strip()]
logger.debug("Adding new rowlist %s", str(rowlist))
results[counter_name.strip()].append(rowlist)
return results

def set_instances(self, cursor):
if self.instance == ALL_INSTANCES:
cursor.execute(format_query(INSTANCES_QUERY, self.metric_type), (self.sql_name,))
cursor.execute(INSTANCES_QUERY, (self.sql_name,))
self.instances = [row.instance_name for row in cursor.fetchall()]
else:
self.instances = [self.instance]
Expand Down

0 comments on commit 1467fd0

Please sign in to comment.