diff --git a/setup.py b/setup.py index 1f6e4d17..44a49208 100755 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ "pendulum==1.2.0", "singer-python==5.9.0", "sqlalchemy<2.0.0", - "pyodbc==4.0.35", + "pyodbc==4.0.26", "backoff==1.8.0", "MarkupSafe==2.0.1", "jinja2==2.11.3", diff --git a/tap_mssql/sync_strategies/full_table.py b/tap_mssql/sync_strategies/full_table.py index 83344a16..d8529c4a 100755 --- a/tap_mssql/sync_strategies/full_table.py +++ b/tap_mssql/sync_strategies/full_table.py @@ -85,7 +85,7 @@ def sync_table(mssql_conn, config, catalog_entry, state, columns, stream_version ): singer.write_message(activate_version_message) - with mssql_conn.connect().execution_options(stream_results=True) as open_conn: + with mssql_conn.connect() as open_conn: LOGGER.info("Generating select_sql") params = {} @@ -101,10 +101,14 @@ def sync_table(mssql_conn, config, catalog_entry, state, columns, stream_version query_df = df = pd.DataFrame(columns=columns) #TODO: delete? time_extracted = utils.now() #TODO: delete? + conn = mssql_conn.connect().execution_options(stream_results=True) + + csv_saved = 0 + chunk_size = config.get("fastsync_batch_rows") #TODO: update this so that its not required (if not set, fastsync disabled) files = [] - csv_saved = 0 - for chunk_dataframe in pd.read_sql(select_sql, open_conn, chunksize=chunk_size): + + for chunk_dataframe in pd.read_sql(select_sql, conn, chunksize=chunk_size): csv_saved += 1 filename = gen_export_filename(table=table_stream)