diff --git a/py/sight/demo/w1.py b/py/sight/demo/w1.py index 817bf25..098375e 100644 --- a/py/sight/demo/w1.py +++ b/py/sight/demo/w1.py @@ -39,6 +39,8 @@ def warn(*args, **kwargs): from sight.sight import Sight from sight.widgets.decision import decision from sight.widgets.decision import proposal +from helpers.logs.logs_handler import logger as logging + FLAGS = flags.FLAGS @@ -214,10 +216,10 @@ async def propose_actions_wrapper(sight: Sight, question_label: str) -> None: # both base and treatment are considerred to be same dict here propose_actions(sight, question_label, sample, sample))) - print("waiting for all get outcome to finish.....") + logging.info("waiting for all get outcome to finish.....") diff_time_series = await asyncio.gather(*tasks) - print("all get outcome are finished.....") - print(f'Combine Series : {diff_time_series}') + logging.info("all get outcome are finished.....") + logging.info(f'Combine Series : {diff_time_series}') def driver(sight: Sight) -> None: @@ -229,7 +231,7 @@ def driver(sight: Sight) -> None: for _ in range(1): next_point = decision.decision_point(get_question_label(), sight) - print('next_point : ', next_point) + logging.info('next_point : %s', next_point) # using next_points to propose actions asyncio.run( @@ -274,6 +276,10 @@ def main(argv: Sequence[str]) -> None: sight.set_object_code_loc(sight_obj, frame) sight.log_object(sight_obj, True) + # this thread checks the outcome for proposed action from server + decision.init_sight_polling_thread(sight.id, + get_question_label_to_propose_actions()) + decision.run(sight=sight, question_label=get_question_label(), driver_fn=driver) diff --git a/py/sight/utility.py b/py/sight/utility.py index 9bfdfc0..69ef6f0 100644 --- a/py/sight/utility.py +++ b/py/sight/utility.py @@ -29,9 +29,11 @@ from sight.utils.proto_conversion import convert_proto_to_dict from sight.widgets.decision.resource_lock import RWLockDictWrapper from sight_service.proto import service_pb2 +from helpers.logs.logs_handler import logger as logging -POLL_LIMIT = 300 # POLL_TIME_INTERVAL th part of second -POLL_TIME_INTERVAL = 6 # seconds + +POLL_LIMIT = 10 # POLL_TIME_INTERVAL th part of second +POLL_TIME_INTERVAL = 2 # seconds global_outcome_mapping = RWLockDictWrapper() @@ -90,7 +92,7 @@ def poll_network_batch_outcome(sight_id, question_label): # print("pending action ids : ", pending_action_ids) if len(pending_action_ids): counter = POLL_LIMIT - print(f'BATCH POLLING THE IDS FOR => {len(pending_action_ids)}') + logging.info(f'BATCH POLLING THE IDS FOR => %s',{len(pending_action_ids)}) # print(f'BATCH POLLING THE IDS FOR => {pending_action_ids}') outcome_of_action_ids = get_all_outcomes(sight_id, question_label, pending_action_ids) @@ -103,8 +105,8 @@ def poll_network_batch_outcome(sight_id, question_label): global_outcome_mapping.update(new_dict) else: - print( - f'Not sending request as no pending ids ...=> {pending_action_ids} with counter => {counter}' + logging.info( + f'Not sending request as no pending ids ...=> %s with counter => %s', pending_action_ids, counter ) if counter <= 0: return diff --git a/py/sight/widgets/decision/decision.py b/py/sight/widgets/decision/decision.py index be7402e..b04beac 100644 --- a/py/sight/widgets/decision/decision.py +++ b/py/sight/widgets/decision/decision.py @@ -221,7 +221,7 @@ def init_sight_polling_thread(sight_id, question_label): # print status_update_thread = threading.Thread(target=poll_network_batch_outcome, args=(sight_id, question_label)) - print('*************** starting thread ************') + logging.info('*************** starting thread ************') status_update_thread.start() diff --git a/py/sight/widgets/decision/trials.py b/py/sight/widgets/decision/trials.py index f6061d3..3d667df 100644 --- a/py/sight/widgets/decision/trials.py +++ b/py/sight/widgets/decision/trials.py @@ -115,11 +115,12 @@ def launch( response = service.call(lambda s, meta: s.Launch(req, 300, metadata=meta)) # start polling thread, fetching outcome from server for proposed actions - if (decision_configuration.optimizer_type == sight_pb2. - DecisionConfigurationStart.OptimizerType.OT_WORKLIST_SCHEDULER and - response.display_string == "Worklist Scheduler SUCCESS!"): - decision.init_sight_polling_thread(sight.id, - decision_configuration.question_label) + # as we are awaiting till we get response back for this proposal of workerlist_scheduler, removing this thread + # if (decision_configuration.optimizer_type == sight_pb2. + # DecisionConfigurationStart.OptimizerType.OT_WORKLIST_SCHEDULER and + # response.display_string == "Worklist Scheduler SUCCESS!"): + # decision.init_sight_polling_thread(sight.id, + # decision_configuration.question_label) logging.info('##### Launch response=%s #####', response) logging.debug('<<<<<<<<< Out %s method of %s file.', method_name, _file_name) diff --git a/sight_service/service_root.py b/sight_service/service_root.py index bf7862f..70e48ff 100644 --- a/sight_service/service_root.py +++ b/sight_service/service_root.py @@ -307,14 +307,16 @@ def Close(self, request, context): logging.info('request in close is : %s', request) with self.optimizers.instances_lock.gen_rlock(): - # there is an issue with this : even one of the worker calls the close, + # fixed now - there is an issue with this : even one of the worker calls the close, # this will call the close on the optimizer - need to fix this + # logging.info("request => %s", request) if request.HasField("question_label"): instance = self.optimizers.get_instance(request.client_id, request.question_label) # print('*********lenght of instances : ', len(instances)) if instance: # for question, obj in instances.items(): + # logging.info('instance found : %s', instance) obj = instance.close(request) else: logging.info(