diff --git a/luigi/contrib/bigquery.py b/luigi/contrib/bigquery.py index 79fa180a26..f3ca73c506 100644 --- a/luigi/contrib/bigquery.py +++ b/luigi/contrib/bigquery.py @@ -174,7 +174,7 @@ def table_exists(self, table): return True - def make_dataset(self, dataset, raise_if_exists=False, body={}): + def make_dataset(self, dataset, raise_if_exists=False, body=dict()): """Creates a new dataset with the default permissions. :param dataset: diff --git a/luigi/contrib/dataproc.py b/luigi/contrib/dataproc.py index e003a122f7..af2e41c592 100644 --- a/luigi/contrib/dataproc.py +++ b/luigi/contrib/dataproc.py @@ -56,7 +56,7 @@ def submit_job(self, job_config): self._job_id = self._job['reference']['jobId'] return self._job - def submit_spark_job(self, jars, main_class, job_args=[]): + def submit_spark_job(self, jars, main_class, job_args=list()): job_config = {"job": { "placement": { "clusterName": self.dataproc_cluster_name @@ -72,7 +72,7 @@ def submit_spark_job(self, jars, main_class, job_args=[]): logger.info("Submitted new dataproc job:{} id:{}".format(self._job_name, self._job_id)) return self._job - def submit_pyspark_job(self, job_file, extra_files=[], job_args=[]): + def submit_pyspark_job(self, job_file, extra_files=list(), job_args=list()): job_config = {"job": { "placement": { "clusterName": self.dataproc_cluster_name diff --git a/luigi/contrib/esindex.py b/luigi/contrib/esindex.py index ede6429f61..8d18529ac2 100644 --- a/luigi/contrib/esindex.py +++ b/luigi/contrib/esindex.py @@ -117,7 +117,7 @@ class ElasticsearchTarget(luigi.Target): def __init__(self, host, port, index, doc_type, update_id, marker_index_hist_size=0, http_auth=None, timeout=10, - extra_elasticsearch_args={}): + extra_elasticsearch_args=dict()): """ :param host: Elasticsearch server host :type host: str diff --git a/luigi/contrib/opener.py b/luigi/contrib/opener.py index 9640c55164..4ef9fcdffd 100644 --- a/luigi/contrib/opener.py +++ b/luigi/contrib/opener.py @@ -69,7 +69,7 @@ class InvalidQuery(OpenerError): class OpenerRegistry(object): - def __init__(self, openers=[]): + def __init__(self, openers=list()): """An opener registry that stores a number of opener objects used to parse Target URIs diff --git a/luigi/contrib/sqla.py b/luigi/contrib/sqla.py index 72e35d3c16..3ed065e4d8 100644 --- a/luigi/contrib/sqla.py +++ b/luigi/contrib/sqla.py @@ -163,7 +163,7 @@ class SQLAlchemyTarget(luigi.Target): _engine_dict = {} # dict of sqlalchemy engine instances Connection = collections.namedtuple("Connection", "engine pid") - def __init__(self, connection_string, target_table, update_id, echo=False, connect_args={}): + def __init__(self, connection_string, target_table, update_id, echo=False, connect_args=dict()): """ Constructor for the SQLAlchemyTarget. diff --git a/luigi/scheduler.py b/luigi/scheduler.py index 47e53856ea..a94bd22d83 100644 --- a/luigi/scheduler.py +++ b/luigi/scheduler.py @@ -772,7 +772,7 @@ def add_task(self, task_id=None, status=PENDING, runnable=True, deps=None, new_deps=None, expl=None, resources=None, priority=0, family='', module=None, params=None, assistant=False, tracking_url=None, worker=None, batchable=None, - batch_id=None, retry_policy_dict={}, owners=None, **kwargs): + batch_id=None, retry_policy_dict=dict(), owners=None, **kwargs): """ * add task identified by task_id if it doesn't exist * if deps is not None, update dependency list