From dfb24fdaf35fffbd2e104aff85e6db0b186978a4 Mon Sep 17 00:00:00 2001 From: Drew Banin Date: Tue, 23 May 2017 14:52:27 -0400 Subject: [PATCH] colorized dbt output (#441) colorize dbt output --- dbt/config.py | 12 +- dbt/logger.py | 36 +++++- dbt/main.py | 7 +- dbt/runner.py | 230 +++++++++------------------------------ dbt/task/archive.py | 5 +- dbt/task/run.py | 4 +- dbt/task/test.py | 4 +- dbt/ui/__init__.py | 0 dbt/ui/colors.py | 9 ++ dbt/ui/printer.py | 221 +++++++++++++++++++++++++++++++++++++ dbt/utils.py | 17 --- requirements.txt | 1 + setup.py | 1 + test/unit/test_config.py | 36 ++++-- 14 files changed, 365 insertions(+), 218 deletions(-) create mode 100644 dbt/ui/__init__.py create mode 100644 dbt/ui/colors.py create mode 100644 dbt/ui/printer.py diff --git a/dbt/config.py b/dbt/config.py index c365e6290d2..32f769e02a9 100644 --- a/dbt/config.py +++ b/dbt/config.py @@ -25,14 +25,12 @@ def read_profile(profiles_dir): def read_config(profiles_dir): profile = read_profile(profiles_dir) - return profile.get('config') + return profile.get('config', {}) -def send_anonymous_usage_stats(profiles_dir): - config = read_config(profiles_dir) +def send_anonymous_usage_stats(config): + return config.get('send_anonymous_usage_stats', True) - if config is not None \ - and not config.get("send_anonymous_usage_stats", True): - return False - return True +def colorize_output(config): + return config.get('use_colors', True) diff --git a/dbt/logger.py b/dbt/logger.py index 8050109e064..5f9a94d955b 100644 --- a/dbt/logger.py +++ b/dbt/logger.py @@ -1,8 +1,11 @@ import dbt.clients.system +import dbt.compat import logging import os import sys +import colorama + # disable logs from other modules, excepting CRITICAL logs logging.getLogger('botocore').setLevel(logging.CRITICAL) logging.getLogger('contracts').setLevel(logging.CRITICAL) @@ -10,8 +13,26 @@ logging.getLogger('urllib3').setLevel(logging.CRITICAL) logging.getLogger('snowflake.connector').setLevel(logging.CRITICAL) + +# Colorama needs some help on windows because we're using logger.info +# intead of print(). If the Windows env doesn't have a TERM var set, +# then we should override the logging stream to use the colorama +# converter. If the TERM var is set (as with Git Bash), then it's safe +# to send escape characters and no log handler injection is needed. +colorama_stdout = sys.stdout +colorama_wrap = True + +if sys.platform == 'win32' and not os.environ.get('TERM'): + colorama_wrap = False + colorama_stdout = colorama.AnsiToWin32(sys.stdout).stream + +elif sys.platform == 'win32': + colorama_wrap = False + +colorama.init(wrap=colorama_wrap) + # create a global console logger for dbt -stdout_handler = logging.StreamHandler(sys.stdout) +stdout_handler = logging.StreamHandler(colorama_stdout) stdout_handler.setFormatter(logging.Formatter('%(message)s')) stdout_handler.setLevel(logging.INFO) @@ -26,6 +47,16 @@ def make_log_dir_if_missing(log_dir): dbt.clients.system.make_directory(log_dir) +class ColorFilter(logging.Filter): + def filter(self, record): + subbed = dbt.compat.to_string(record.msg) + for escape_sequence in dbt.ui.colors.COLORS.values(): + subbed = subbed.replace(escape_sequence, '') + record.msg = subbed + + return True + + def initialize_logger(debug_mode=False, path=None): global initialized, logger, stdout_handler @@ -49,6 +80,9 @@ def initialize_logger(debug_mode=False, path=None): backupCount=7, ) + color_filter = ColorFilter() + logdir_handler.addFilter(color_filter) + logdir_handler.setFormatter( logging.Formatter('%(asctime)-18s: %(message)s')) logdir_handler.setLevel(logging.DEBUG) diff --git a/dbt/main.py b/dbt/main.py index 78367412647..3238638461b 100644 --- a/dbt/main.py +++ b/dbt/main.py @@ -19,6 +19,7 @@ import dbt.tracking import dbt.config as config import dbt.adapters.cache as adapter_cache +import dbt.ui.printer def main(args=None): @@ -39,11 +40,15 @@ def handle(args): # this needs to happen after args are parsed so we can determine the # correct profiles.yml file - if not config.send_anonymous_usage_stats(parsed.profiles_dir): + profile_config = config.read_config(parsed.profiles_dir) + if not config.send_anonymous_usage_stats(profile_config): dbt.tracking.do_not_track() else: dbt.tracking.initialize_tracking() + if dbt.config.colorize_output(profile_config): + dbt.ui.printer.use_colors() + res = run_from_args(parsed) dbt.tracking.flush() diff --git a/dbt/runner.py b/dbt/runner.py index cf2d759246f..156e0e5ec31 100644 --- a/dbt/runner.py +++ b/dbt/runner.py @@ -1,5 +1,3 @@ -from __future__ import print_function - import hashlib import psycopg2 import os @@ -19,15 +17,17 @@ import dbt.schema import dbt.graph.selector import dbt.model +import dbt.ui.printer from multiprocessing.dummy import Pool as ThreadPool + ABORTED_TRANSACTION_STRING = ("current transaction is aborted, commands " "ignored until end of transaction block") - -def get_timestamp(): - return time.strftime("%H:%M:%S") +INTERNAL_ERROR_STRING = """This is an error in dbt. Please try again. If \ +the error persists, open an issue at https://github.com/fishtown-analytics/dbt +""".strip() def get_hash(model): @@ -42,138 +42,34 @@ def is_enabled(model): return model.get('config', {}).get('enabled') is True -def print_timestamped_line(msg): - logger.info("{} | {}".format(get_timestamp(), msg)) - - -def print_fancy_output_line(msg, status, index, total, execution_time=None): - prefix = "{timestamp} | {index} of {total} {message}".format( - timestamp=get_timestamp(), - index=index, - total=total, - message=msg) - justified = prefix.ljust(80, ".") - - if execution_time is None: - status_time = "" - else: - status_time = " in {execution_time:0.2f}s".format( - execution_time=execution_time) - - output = "{justified} [{status}{status_time}]".format( - justified=justified, status=status, status_time=status_time) - - logger.info(output) - - -def print_skip_line(model, schema, relation, index, num_models): - msg = 'SKIP relation {}.{}'.format(schema, relation) - print_fancy_output_line(msg, 'SKIP', index, num_models) - - -def print_counts(flat_nodes): - counts = {} - - for node in flat_nodes: - t = node.get('resource_type') - - if node.get('resource_type') == NodeType.Model: - t = '{} {}'.format(get_materialization(node), t) - - counts[t] = counts.get(t, 0) + 1 - - stat_line = ", ".join( - ["{} {}s".format(v, k) for k, v in counts.items()]) - - logger.info("") - print_timestamped_line("Running {}".format(stat_line)) - print_timestamped_line("") - - -def print_start_line(node, schema_name, index, total): +def print_start_line(node, schema, index, total): if is_type(node, NodeType.Model): - print_model_start_line(node, schema_name, index, total) + dbt.ui.printer.print_model_start_line(node, schema, index, total) if is_type(node, NodeType.Test): - print_test_start_line(node, schema_name, index, total) + dbt.ui.printer.print_test_start_line(node, schema, index, total) if is_type(node, NodeType.Archive): - print_archive_start_line(node, index, total) - - -def print_test_start_line(model, schema_name, index, total): - msg = "START test {name}".format( - name=model.get('name')) - - print_fancy_output_line(msg, 'RUN', index, total) - - -def print_model_start_line(model, schema_name, index, total): - msg = "START {model_type} model {schema}.{relation}".format( - model_type=get_materialization(model), - schema=schema_name, - relation=model.get('name')) + dbt.ui.printer.print_archive_start_line(node, index, total) - print_fancy_output_line(msg, 'RUN', index, total) - -def print_archive_start_line(model, index, total): - cfg = model.get('config', {}) - msg = "START archive {source_schema}.{source_table} --> "\ - "{target_schema}.{target_table}".format(**cfg) - - print_fancy_output_line(msg, 'RUN', index, total) - - -def print_result_line(result, schema_name, index, total): +def print_result_line(result, schema, index, total): node = result.node if is_type(node, NodeType.Model): - print_model_result_line(result, schema_name, index, total) + dbt.ui.printer.print_model_result_line(result, schema, index, total) elif is_type(node, NodeType.Test): - print_test_result_line(result, schema_name, index, total) + dbt.ui.printer.print_test_result_line(result, schema, index, total) elif is_type(node, NodeType.Archive): - print_archive_result_line(result, index, total) - - -def print_test_result_line(result, schema_name, index, total): - model = result.node - info = 'PASS' - - if result.errored: - info = "ERROR" - elif result.status > 0: - info = 'FAIL {}'.format(result.status) - result.fail = True - elif result.status == 0: - info = 'PASS' - else: - raise RuntimeError("unexpected status: {}".format(result.status)) - - print_fancy_output_line( - "{info} {name}".format( - info=info, - name=model.get('name')), - info, - index, - total, - result.execution_time) + dbt.ui.printer.print_archive_result_line(result, index, total) -def print_archive_result_line(result, index, total): - model = result.node - info = 'OK archived' - - if result.errored: - info = 'ERROR archiving' - - cfg = model.get('config', {}) +def print_results_line(results, execution_time): + nodes = [r.node for r in results] + stat_line = dbt.ui.printer.get_counts(nodes) - print_fancy_output_line( - "{info} {source_schema}.{source_table} --> " - "{target_schema}.{target_table}".format(info=info, **cfg), - result.status, - index, - total, - result.execution_time) + dbt.ui.printer.print_timestamped_line("") + dbt.ui.printer.print_timestamped_line( + "Finished running {stat_line} in {execution_time:0.2f}s." + .format(stat_line=stat_line, execution_time=execution_time)) def execute_test(profile, test): @@ -199,45 +95,6 @@ def execute_test(profile, test): return row[0] -def print_model_result_line(result, schema_name, index, total): - model = result.node - info = 'OK created' - - if result.errored: - info = 'ERROR creating' - - print_fancy_output_line( - "{info} {model_type} model {schema}.{relation}".format( - info=info, - model_type=get_materialization(model), - schema=schema_name, - relation=model.get('name')), - result.status, - index, - total, - result.execution_time) - - -def print_results_line(results, execution_time): - stats = {} - - for result in results: - t = result.node.get('resource_type') - - if result.node.get('resource_type') == NodeType.Model: - t = '{} {}'.format(get_materialization(result.node), t) - - stats[t] = stats.get(t, 0) + 1 - - stat_line = ", ".join( - ["{} {}s".format(ct, t) for t, ct in stats.items()]) - - print_timestamped_line("") - print_timestamped_line( - "Finished running {stat_line} in {execution_time:0.2f}s." - .format(stat_line=stat_line, execution_time=execution_time)) - - def execute_model(profile, model, existing): adapter = get_adapter(profile) schema = adapter.get_default_schema(profile) @@ -545,8 +402,10 @@ def safe_execute_node(self, data): dbt.exceptions.ProgrammingException, psycopg2.ProgrammingError, psycopg2.InternalError) as e: - error = "Error executing {filepath}\n{error}".format( - filepath=node.get('build_path'), error=str(e).strip()) + + prefix = "Error executing {}\n".format(node.get('build_path')) + error = "{}{}".format(dbt.ui.printer.red(prefix), str(e).strip()) + status = "ERROR" logger.debug(error) if type(e) == psycopg2.InternalError and \ @@ -557,20 +416,29 @@ def safe_execute_node(self, data): status="SKIP") except dbt.exceptions.InternalException as e: - error = ("Internal error executing {filepath}\n\n{error}" - "\n\nThis is an error in dbt. Please try again. If " - "the error persists, open an issue at " - "https://github.com/fishtown-analytics/dbt").format( - filepath=node.get('build_path'), - error=str(e).strip()) + + build_path = node.get('build_path') + prefix = 'Internal error executing {}'.format(build_path) + + error = "{prefix}\n{error}\n\n{note}".format( + prefix=dbt.ui.printer.red(prefix), + error=str(e).strip(), + note=INTERNAL_ERROR_STRING) + logger.debug(error) + status = "ERROR" except Exception as e: - error = ("Unhandled error while executing {filepath}\n{error}" - .format( - filepath=node.get('build_path'), - error=str(e).strip())) + + prefix = "Unhandled error while executing {filepath}".format( + filepath=node.get('build_path')) + + error = "{prefix}\n{error}".format( + prefix=dbt.ui.printer.red(prefix), + error=str(e).strip()) + logger.debug(error) + raise e finally: @@ -670,7 +538,12 @@ def execute_nodes(self, flat_graph, node_dependency_list, on_failure, pool = ThreadPool(num_threads) if should_execute: - print_counts(flat_nodes) + stat_line = dbt.ui.printer.get_counts(flat_nodes) + full_line = "Running {}".format(stat_line) + + logger.info("") + dbt.ui.printer.print_timestamped_line(full_line) + dbt.ui.printer.print_timestamped_line("") start_time = time.time() @@ -685,8 +558,9 @@ def get_idx(node): for node_list in node_dependency_list: for i, node in enumerate([node for node in node_list if node.get('skip')]): - print_skip_line(node, schema_name, node.get('name'), - get_idx(node), num_nodes) + node_name = node.get('name') + dbt.ui.printer.print_skip_line(node, schema_name, node_name, + get_idx(node), num_nodes) node_result = RunModelResult(node, skip=True) node_results.append(node_result) diff --git a/dbt/task/archive.py b/dbt/task/archive.py index b62b8497bdf..e338458a195 100644 --- a/dbt/task/archive.py +++ b/dbt/task/archive.py @@ -1,5 +1,6 @@ from dbt.runner import RunManager from dbt.logger import GLOBAL_LOGGER as logger # noqa +import dbt.ui.printer class ArchiveTask: @@ -14,4 +15,6 @@ def run(self): self.args ) - runner.run_archives(['*'], []) + results = runner.run_archives(['*'], []) + + logger.info(dbt.ui.printer.get_run_status_line(results)) diff --git a/dbt/task/run.py b/dbt/task/run.py index 6be95709b64..3b43fab7d37 100644 --- a/dbt/task/run.py +++ b/dbt/task/run.py @@ -2,7 +2,7 @@ from dbt.logger import GLOBAL_LOGGER as logger from dbt.runner import RunManager -import dbt.utils +import dbt.ui.printer class RunTask: @@ -17,4 +17,4 @@ def run(self): results = runner.run_models(self.args.models, self.args.exclude) - logger.info(dbt.utils.get_run_status_line(results)) + logger.info(dbt.ui.printer.get_run_status_line(results)) diff --git a/dbt/task/test.py b/dbt/task/test.py index 3605b0b8f74..621a94cabe5 100644 --- a/dbt/task/test.py +++ b/dbt/task/test.py @@ -1,6 +1,6 @@ from dbt.runner import RunManager from dbt.logger import GLOBAL_LOGGER as logger # noqa -import dbt.utils +import dbt.ui.printer class TestTask: @@ -36,6 +36,6 @@ def run(self): else: raise RuntimeError("unexpected") - logger.info(dbt.utils.get_run_status_line(results)) + logger.info(dbt.ui.printer.get_run_status_line(results)) return results diff --git a/dbt/ui/__init__.py b/dbt/ui/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbt/ui/colors.py b/dbt/ui/colors.py new file mode 100644 index 00000000000..798005f10cf --- /dev/null +++ b/dbt/ui/colors.py @@ -0,0 +1,9 @@ + +import colorama + +COLORS = { + 'red': colorama.Fore.RED, + 'green': colorama.Fore.GREEN, + 'yellow': colorama.Fore.YELLOW, + 'reset_all': colorama.Style.RESET_ALL +} diff --git a/dbt/ui/printer.py b/dbt/ui/printer.py new file mode 100644 index 00000000000..af088a7d881 --- /dev/null +++ b/dbt/ui/printer.py @@ -0,0 +1,221 @@ + +from dbt.logger import GLOBAL_LOGGER as logger +from dbt.utils import get_materialization, NodeType +import dbt.ui.colors + +import time + +USE_COLORS = False + +COLOR_FG_RED = dbt.ui.colors.COLORS['red'] +COLOR_FG_GREEN = dbt.ui.colors.COLORS['green'] +COLOR_FG_YELLOW = dbt.ui.colors.COLORS['yellow'] +COLOR_RESET_ALL = dbt.ui.colors.COLORS['reset_all'] + + +def use_colors(): + global USE_COLORS + USE_COLORS = True + + +def get_timestamp(): + return time.strftime("%H:%M:%S") + + +def color(text, color_code): + if USE_COLORS: + return "{}{}{}".format(color_code, text, COLOR_RESET_ALL) + else: + return text + + +def green(text): + return color(text, COLOR_FG_GREEN) + + +def yellow(text): + return color(text, COLOR_FG_YELLOW) + + +def red(text): + return color(text, COLOR_FG_RED) + + +def print_timestamped_line(msg): + logger.info("{} | {}".format(get_timestamp(), msg)) + + +def print_fancy_output_line(msg, status, index, total, execution_time=None): + prefix = "{timestamp} | {index} of {total} {message}".format( + timestamp=get_timestamp(), + index=index, + total=total, + message=msg) + + justified = prefix.ljust(80, ".") + + if execution_time is None: + status_time = "" + else: + status_time = " in {execution_time:0.2f}s".format( + execution_time=execution_time) + + status_txt = status + + output = "{justified} [{status}{status_time}]".format( + justified=justified, status=status_txt, status_time=status_time) + + logger.info(output) + + +def print_skip_line(model, schema, relation, index, num_models): + msg = 'SKIP relation {}.{}'.format(schema, relation) + print_fancy_output_line(msg, yellow('SKIP'), index, num_models) + + +def get_counts(flat_nodes): + counts = {} + + for node in flat_nodes: + t = node.get('resource_type') + + if node.get('resource_type') == NodeType.Model: + t = '{} {}'.format(get_materialization(node), t) + + counts[t] = counts.get(t, 0) + 1 + + stat_line = ", ".join( + ["{} {}s".format(v, k) for k, v in counts.items()]) + + return stat_line + + +def print_test_start_line(model, schema_name, index, total): + msg = "START test {name}".format( + name=model.get('name')) + + run = 'RUN' + print_fancy_output_line(msg, run, index, total) + + +def print_model_start_line(model, schema_name, index, total): + msg = "START {model_type} model {schema}.{relation}".format( + model_type=get_materialization(model), + schema=schema_name, + relation=model.get('name')) + + run = 'RUN' + print_fancy_output_line(msg, run, index, total) + + +def print_archive_start_line(model, index, total): + cfg = model.get('config', {}) + msg = "START archive {source_schema}.{source_table} --> "\ + "{target_schema}.{target_table}".format(**cfg) + + run = 'RUN' + print_fancy_output_line(msg, run, index, total) + + +def print_test_result_line(result, schema_name, index, total): + model = result.node + info = 'PASS' + + if result.errored: + info = "ERROR" + color = red + + elif result.status > 0: + info = 'FAIL {}'.format(result.status) + color = red + + result.fail = True + elif result.status == 0: + info = 'PASS' + color = green + + else: + raise RuntimeError("unexpected status: {}".format(result.status)) + + print_fancy_output_line( + "{info} {name}".format(info=info, name=model.get('name')), + color(info), + index, + total, + result.execution_time) + + +def get_printable_result(result, success, error): + if result.errored: + info = 'ERROR {}'.format(error) + status = red(result.status) + else: + info = 'OK {}'.format(success) + status = green(result.status) + + return info, status + + +def print_archive_result_line(result, index, total): + model = result.node + + info, status = get_printable_result(result, 'archived', 'archiving') + cfg = model.get('config', {}) + + print_fancy_output_line( + "{info} {source_schema}.{source_table} --> " + "{target_schema}.{target_table}".format(info=info, **cfg), + status, + index, + total, + result.execution_time) + + +def print_model_result_line(result, schema_name, index, total): + model = result.node + + info, status = get_printable_result(result, 'created', 'creating') + + print_fancy_output_line( + "{info} {model_type} model {schema}.{relation}".format( + info=info, + model_type=get_materialization(model), + schema=schema_name, + relation=model.get('name')), + status, + index, + total, + result.execution_time) + + +def interpret_run_result(result): + if result.errored or result.failed: + return 'error' + elif result.skipped: + return 'skip' + else: + return 'pass' + + +def get_run_status_line(results): + stats = { + 'error': 0, + 'skip': 0, + 'pass': 0, + 'total': 0, + } + + for r in results: + result_type = interpret_run_result(r) + stats[result_type] += 1 + stats['total'] += 1 + + if stats['error'] == 0: + message = green('Completed successfully') + else: + message = red('Completed with errors') + + stats_line = "Done. PASS={pass} ERROR={error} SKIP={skip} TOTAL={total}" + stats_line = stats_line.format(**stats) + + return "{}\n{}".format(message, stats_line) diff --git a/dbt/utils.py b/dbt/utils.py index 52e34842250..53045513105 100644 --- a/dbt/utils.py +++ b/dbt/utils.py @@ -209,7 +209,6 @@ def dependency_projects(project): def split_path(path): - norm = os.path.normpath(path) return path.split(os.sep) @@ -276,22 +275,6 @@ def get_pseudo_hook_path(hook_name): return os.path.join(*path_parts) -def get_run_status_line(results): - total = len(results) - errored = len([r for r in results if r.errored or r.failed]) - skipped = len([r for r in results if r.skipped]) - passed = total - errored - skipped - - return ( - "Done. PASS={passed} ERROR={errored} SKIP={skipped} TOTAL={total}" - .format( - total=total, - passed=passed, - errored=errored, - skipped=skipped - )) - - def get_nodes_by_tags(nodes, match_tags, resource_type): matched_nodes = [] for node in nodes: diff --git a/requirements.txt b/requirements.txt index de5be026061..4ef8c490c92 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,3 +9,4 @@ snowplow-tracker==0.7.2 celery==3.1.23 voluptuous==0.9.3 snowflake-connector-python==1.3.12 +colorama==0.3.9 diff --git a/setup.py b/setup.py index b60f4272346..fd171657779 100644 --- a/setup.py +++ b/setup.py @@ -40,5 +40,6 @@ 'celery==3.1.23', 'voluptuous==0.10.5', 'snowflake-connector-python==1.3.16', + 'colorama==0.3.9' ], ) diff --git a/test/unit/test_config.py b/test/unit/test_config.py index 67556b6353a..e15ab695501 100644 --- a/test/unit/test_config.py +++ b/test/unit/test_config.py @@ -17,15 +17,15 @@ def set_up_empty_config(self): with open(profiles_path, 'w') as f: f.write(yaml.dump({})) - def set_up_config_options(self, send_anonymous_usage_stats=False): + def set_up_config_options(self, **kwargs): profiles_path = '{}/profiles.yml'.format(TMPDIR) + config = { + 'config': kwargs + } + with open(profiles_path, 'w') as f: - f.write(yaml.dump({ - 'config': { - 'send_anonymous_usage_stats': send_anonymous_usage_stats - } - })) + f.write(yaml.dump(config)) def tearDown(self): profiles_path = '{}/profiles.yml'.format(TMPDIR) @@ -37,12 +37,30 @@ def tearDown(self): def test__implicit_opt_in(self): self.set_up_empty_config() - self.assertTrue(dbt.config.send_anonymous_usage_stats(TMPDIR)) + config = dbt.config.read_config(TMPDIR) + self.assertTrue(dbt.config.send_anonymous_usage_stats(config)) def test__explicit_opt_out(self): self.set_up_config_options(send_anonymous_usage_stats=False) - self.assertFalse(dbt.config.send_anonymous_usage_stats(TMPDIR)) + config = dbt.config.read_config(TMPDIR) + self.assertFalse(dbt.config.send_anonymous_usage_stats(config)) def test__explicit_opt_in(self): self.set_up_config_options(send_anonymous_usage_stats=True) - self.assertTrue(dbt.config.send_anonymous_usage_stats(TMPDIR)) + config = dbt.config.read_config(TMPDIR) + self.assertTrue(dbt.config.send_anonymous_usage_stats(config)) + + def test__implicit_colors(self): + self.set_up_empty_config() + config = dbt.config.read_config(TMPDIR) + self.assertTrue(dbt.config.colorize_output(config)) + + def test__explicit_opt_out(self): + self.set_up_config_options(use_colors=False) + config = dbt.config.read_config(TMPDIR) + self.assertFalse(dbt.config.colorize_output(config)) + + def test__explicit_opt_in(self): + self.set_up_config_options(use_colors=True) + config = dbt.config.read_config(TMPDIR) + self.assertTrue(dbt.config.colorize_output(config))