From c535982913366a2fa2e412f2dc8906086987e8d2 Mon Sep 17 00:00:00 2001 From: Starofall Date: Mon, 16 Jan 2017 22:16:58 +0100 Subject: [PATCH] Make definition more general / add dependencies --- experiment-specification/definition.py | 34 +++++++++++++------------- rtxlib/report.py | 2 +- setup.py | 4 ++- 3 files changed, 21 insertions(+), 19 deletions(-) diff --git a/experiment-specification/definition.py b/experiment-specification/definition.py index 176eb5c..9a98062 100644 --- a/experiment-specification/definition.py +++ b/experiment-specification/definition.py @@ -59,38 +59,38 @@ # If we use the Spark preprocessor, we have to define this sparkConfig "spark": { # currently we only support "local_jar" - "submit_mode": "local_jar", - # name of the spark jobs jar (located in the experiment's folder) - "job_file": "CrowdNavSpark-assembly-1.0.jar", - # the class of the script to start - "job_class": "crowdnav.Main" + "submit_mode": "", + # name of the spark jobs jar (located in the experiment's folder) - e.g. "assembly-1.0.jar" + "job_file": "", + # the class of the script to start - e.g. "crowdnav.Main" + "job_class": "" }, # If we use KafkaProducer as a ChangeProvider, we have to define this kafkaProducerConfig "kafka_producer": { - # Where we can connect to kafka - "kafka_uri": "sparfka:9092", + # Where we can connect to kafka - e.g. kafka:9092 + "kafka_uri": "", # The topic to listen to - "topic": "crowd-nav-commands", + "topic": "", # The serializer we want to use for kafka messages # Currently only "JSON" is supported - "serializer": "JSON", + "serializer": "", }, # If we use KafkaConsumer as a DataProvider, we have to define this kafkaConsumerConfig "kafka_consumer": { # Where we can connect to kafka - "kafka_uri": "sparfka:9092", + "kafka_uri": "", # The topic to listen to - "topic": "crowd-nav-trips", + "topic": "", # The serializer we want to use for kafka messages # Currently only "JSON" is supported - "serializer": "JSON", + "serializer": "", }, } # If we use ExecutionStrategy "self_optimizer" -> self_optimizer = { - # Currently only Gauss Process - "method": "gauss_process", + # Currently only "gauss_process" is supported + "method": "", # If new changes are not instantly visible, we want to ignore some results after state changes "ignore_first_n_results": 1000, # How many samples of data to receive for one run @@ -98,7 +98,7 @@ # The variables to modify "knobs": { # defines a [from-to] interval that will be used by the optimizer - "exploration_percentage": [0.0, 1.0] + "variable_name": [0.0, 1.0] } } @@ -107,7 +107,7 @@ { # Variable that is changed in the process "knobs": { - "exploration_percentage": 0.0 + "variable_name": 0.0 }, # If new changes are not instantly visible, we want to ignore some results after state changes "ignore_first_n_results": 1000, @@ -117,7 +117,7 @@ { # Variable that is changed in the process "knobs": { - "exploration_percentage": 0.1 + "variable_name": 0.1 }, # If new changes are not instantly visible, we want to ignore some results after state changes "ignore_first_n_results": 1000, diff --git a/rtxlib/report.py b/rtxlib/report.py index f9e6400..a1437d8 100644 --- a/rtxlib/report.py +++ b/rtxlib/report.py @@ -1,5 +1,5 @@ from colorama import Fore -from dbalib import info, error +from rtxlib import info, error import pandas as pd import seaborn as sns diff --git a/setup.py b/setup.py index bacfc2d..287713d 100644 --- a/setup.py +++ b/setup.py @@ -11,6 +11,8 @@ 'colorama', 'kafka-python', 'scikit-optimize', - 'flask' + 'flask', + 'pandas', + 'seaborn' ] ) \ No newline at end of file