diff --git a/delfin/api/v1/storages.py b/delfin/api/v1/storages.py index d3b1d479c..ce1217d5b 100755 --- a/delfin/api/v1/storages.py +++ b/delfin/api/v1/storages.py @@ -37,7 +37,7 @@ CONF = cfg.CONF telemetry_opts = [ - cfg.IntOpt('performance_collection_interval', default=300, + cfg.IntOpt('performance_collection_interval', default=900, help='default interval (in sec) for performance collection'), ] @@ -127,7 +127,6 @@ def create(self, req, body): msg = _('Failed to trigger performance monitoring for storage: ' '%(storage)s. Error: %(err)s') % {'storage': storage['id'], 'err': six.text_type(e)} - LOG.error(msg) return storage_view.build_storage(storage) diff --git a/delfin/common/constants.py b/delfin/common/constants.py index 246235b79..bda7be3b6 100644 --- a/delfin/common/constants.py +++ b/delfin/common/constants.py @@ -292,7 +292,6 @@ class SecurityLevel(object): "writeRequests" ] - BLOCK_SIZE = 4096 @@ -307,3 +306,4 @@ class TelemetryCollection(object): PERFORMANCE_TASK_METHOD = "delfin.task_manager.scheduler.schedulers." \ "telemetry.performance_collection_handler." \ "PerformanceCollectionHandler" + PERIODIC_JOB_INTERVAL = 300 diff --git a/delfin/task_manager/scheduler/schedule_manager.py b/delfin/task_manager/scheduler/schedule_manager.py index 438bdc8e9..6f2c3e693 100644 --- a/delfin/task_manager/scheduler/schedule_manager.py +++ b/delfin/task_manager/scheduler/schedule_manager.py @@ -20,27 +20,20 @@ from oslo_utils import uuidutils from delfin import context +from delfin.common import constants from delfin.task_manager.scheduler import scheduler from delfin.task_manager.scheduler.schedulers.telemetry import telemetry_job LOG = log.getLogger(__name__) CONF = cfg.CONF -telemetry_opts = [ - cfg.IntOpt('periodic_task_schedule_interval', default=180, - help='default interval (in sec) for the periodic scan for ' - 'failed task scheduling'), -] -CONF.register_opts(telemetry_opts, "TELEMETRY") -telemetry = CONF.TELEMETRY - class SchedulerManager(object): def __init__(self): self.schedule_instance = scheduler.Scheduler.get_instance() def start(self): - """ Initialise the schedulers for collection and failed tasks + """ Initialise the schedulers for periodic job creation """ ctxt = context.get_admin_context() try: @@ -49,7 +42,7 @@ def start(self): periodic_scheduler_job_id = uuidutils.generate_uuid() self.schedule_instance.add_job( telemetry_job.TelemetryJob(ctxt), 'interval', args=[ctxt], - seconds=telemetry.periodic_task_schedule_interval, + seconds=constants.TelemetryCollection.PERIODIC_JOB_INTERVAL, next_run_time=datetime.now(), id=periodic_scheduler_job_id) except Exception as e: diff --git a/delfin/task_manager/scheduler/schedulers/telemetry/performance_collection_handler.py b/delfin/task_manager/scheduler/schedulers/telemetry/performance_collection_handler.py index 8ec1f4295..6f23030d3 100644 --- a/delfin/task_manager/scheduler/schedulers/telemetry/performance_collection_handler.py +++ b/delfin/task_manager/scheduler/schedulers/telemetry/performance_collection_handler.py @@ -31,16 +31,15 @@ def __init__(self): def __call__(self, ctx, task_id): # Handles performance collection from driver and dispatch try: - task = db.task_get(ctx, task_id) LOG.debug('Collecting performance metrics for task id: %s' % task['id']) current_time = int(datetime.utcnow().timestamp()) db.task_update(ctx, task_id, {'last_run_time': current_time}) - # Times (starttime and endtime) are epoch time in miliseconds - start_time = current_time * 1000 - end_time = start_time + task['interval'] * 10000 + # Times are epoch time in miliseconds + end_time = current_time * 1000 + start_time = end_time - (task['interval'] * 1000) self.task_rpcapi.collect_telemetry(ctx, task['storage_id'], telemetry.TelemetryTask. __module__ + '.' + @@ -51,8 +50,7 @@ def __call__(self, ctx, task_id): LOG.error("Failed to collect performance metrics for " "task id :{0}, reason:{1}".format(task_id, six.text_type(e))) - - # Add this entry to failed task for the retry process else: LOG.debug("Performance collection done for storage id :{0}" - " and task id:{1}".format(task['storage_id'], task_id)) + ",task id :{1} and interval(in sec):{2}" + .format(task['storage_id'], task_id, task['interval'])) diff --git a/delfin/task_manager/scheduler/schedulers/telemetry/telemetry_job.py b/delfin/task_manager/scheduler/schedulers/telemetry/telemetry_job.py index 8d21580ad..4d7b5c84d 100644 --- a/delfin/task_manager/scheduler/schedulers/telemetry/telemetry_job.py +++ b/delfin/task_manager/scheduler/schedulers/telemetry/telemetry_job.py @@ -55,7 +55,7 @@ def __call__(self, ctx): # method indicates the specific collection task to be triggered collection_class = importutils.import_class(task['method']) - # Create periodic task + # Create periodic job self.schedule.add_job( collection_class(), 'interval', args=[ctx, task_id], seconds=task['interval'], diff --git a/etc/delfin/delfin.conf b/etc/delfin/delfin.conf index e0f4f4d83..844cc4232 100644 --- a/etc/delfin/delfin.conf +++ b/etc/delfin/delfin.conf @@ -12,7 +12,6 @@ db_backend = sqlalchemy [TELEMETRY] performance_collection_interval = 300 -periodic_task_schedule_interval = 180 [KAFKA_EXPORTER] kafka_topic_name = "delfin-kafka"