diff --git a/pyproject.toml b/pyproject.toml index b66c474a6b8..fad1b990731 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -241,7 +241,6 @@ ignore = [ "B011", # 7 occurences [*] assert-false "PT015", # 7 occurences [ ] pytest-assert-always-false "N815", # 7 occurences [ ] mixed-case-variable-in-class-scope - "A001", # 6 occurences [ ] builtin-variable-shadowing "PT006", # 6 occurences [*] pytest-parametrize-names-wrong-type "RET504", # 6 occurences [*] unnecessary-assign "N803", # 6 occurences [ ] invalid-argument-name @@ -254,12 +253,7 @@ ignore = [ "B006", # 4 occurences [*] mutable-argument-default "PIE794", # 4 occurences [*] duplicate-class-field-definition "PTH103", # 4 occurences [ ] os-makedirs - "E711", # 4 occurences [*] none-comparison - "E721", # 4 occurences [ ] type-comparison "PLW2901", # 4 occurences [ ] redefined-loop-name - "RUF003", # 4 occurences [ ] ambiguous-unicode-character-comment - "B904", # 3 occurences [ ] raise-without-from-inside-except - "A002", # 3 occurences [ ] builtin-argument-shadowing "PTH112", # 3 occurences [ ] os-path-isdir "W291", # 3 occurences [*] trailing-whitespace "RUF017", # 3 occurences [*] quadratic-list-summation @@ -268,34 +262,23 @@ ignore = [ "S605", # 2 occurences [ ] start-process-with-a-shell "C408", # 2 occurences [*] unnecessary-collection-call "C416", # 2 occurences [*] unnecessary-comprehension - "FIX003", # 2 occurences [ ] line-contains-xxx "PIE810", # 2 occurences [*] multiple-starts-ends-with "T203", # 2 occurences [*] p-print - "ARG005", # 2 occurences [ ] unused-lambda-argument "PTH100", # 2 occurences [ ] os-path-abspath "PTH109", # 2 occurences [ ] os-getcwd - "D210", # 2 occurences [ ] surrounding-whitespace "FURB129", # 2 occurences [*] readlines-in-for - "RUF002", # 2 occurences [ ] ambiguous-unicode-character-docstring - "ASYNC251", # 1 occurences [ ] blocking-sleep-in-async-function - "S108", # 1 occurences [ ] hardcoded-temp-file - "S110", # 1 occurences [ ] try-except-pass "C400", # 1 occurences [*] unnecessary-generator-list "C401", # 1 occurences [*] unnecessary-generator-set - "SIM105", # 1 occurences [ ] suppressible-exception - "TD005", # 1 occurences [ ] missing-todo-description - "PTH113", # 1 occurences [ ] os-path-isfile - "F403", # 1 occurences [ ] undefined-local-with-import-star - "PLC0206", # 1 occurences [ ] dict-index-missing-items - "PLR0133", # 1 occurences [ ] comparison-of-constant - "RUF006", # 1 occurences [ ] asyncio-dangling-task - # keep those rules + # keep those exceptions + "FIX003", # line-contains-xxx: freedom of speech! + "FIX004", # line-contains-xxx: freedom of speech! "PLR1730", # if-stmt-min-max: not clear that it makes the code easier to read "RET506", # superfluous-else-raise: requires a slightly higher cognitive effort to understand the code "RET507", # superfluous-else-continue : requires a slightly higher cognitive effort to understand the code "RET508", # superfluous-else-break: requires a slightly higher cognitive effort to understand the code "RET505", # superfluous-else-return: requires a slightly higher cognitive effort to understand the code + "S108", # hardcoded-temp-file: test code may contains weird things ] "utils/build/*" = ["ALL"] "lib-injection/*" = ["ALL"] diff --git a/tests/appsec/rasp/utils.py b/tests/appsec/rasp/utils.py index 1bffabb8966..28d9e12786b 100644 --- a/tests/appsec/rasp/utils.py +++ b/tests/appsec/rasp/utils.py @@ -84,20 +84,20 @@ def find_series(is_metrics: bool, namespace, metric): return series -def validate_metric(name, type, metric): +def validate_metric(name, metric_type, metric): return ( metric.get("metric") == name and metric.get("type") == "count" - and f"rule_type:{type}" in metric.get("tags", ()) + and f"rule_type:{metric_type}" in metric.get("tags", ()) and any(s.startswith("waf_version:") for s in metric.get("tags", ())) ) -def validate_metric_variant(name, type, variant, metric): +def validate_metric_variant(name, metric_type, variant, metric): return ( metric.get("metric") == name and metric.get("type") == "count" - and f"rule_type:{type}" in metric.get("tags", ()) + and f"rule_type:{metric_type}" in metric.get("tags", ()) and f"rule_variant:{variant}" in metric.get("tags", ()) and any(s.startswith("waf_version:") for s in metric.get("tags", ())) ) diff --git a/tests/appsec/waf/test_addresses.py b/tests/appsec/waf/test_addresses.py index 0dd40d46527..508e78320aa 100644 --- a/tests/appsec/waf/test_addresses.py +++ b/tests/appsec/waf/test_addresses.py @@ -338,8 +338,8 @@ def test_basic(self): for r in self.requests: try: interfaces.library.assert_waf_attack(r, address="grpc.server.request.message") - except: - raise ValueError(f"Basic attack #{self.requests.index(r)} not detected") + except Exception as e: + raise ValueError(f"Basic attack #{self.requests.index(r)} not detected") from e @rfc("https://datadoghq.atlassian.net/wiki/spaces/APS/pages/2278064284/gRPC+Protocol+Support") diff --git a/tests/appsec/waf/test_reports.py b/tests/appsec/waf/test_reports.py index dad8fcd74d9..03167ddc1e4 100644 --- a/tests/appsec/waf/test_reports.py +++ b/tests/appsec/waf/test_reports.py @@ -107,10 +107,10 @@ def validate_rules_monitoring_span_tags(span): raise Exception("if there are rule errors, there should be rule error details too") try: json.loads(meta[expected_rules_errors_meta_tag]) - except ValueError: + except ValueError as e: raise Exception( f"rule error details should be valid JSON but was `{meta[expected_rules_errors_meta_tag]}`" - ) + ) from e return True diff --git a/tests/appsec/waf/test_telemetry.py b/tests/appsec/waf/test_telemetry.py index 71c1fea4a83..e5ad0f353cc 100644 --- a/tests/appsec/waf/test_telemetry.py +++ b/tests/appsec/waf/test_telemetry.py @@ -17,7 +17,7 @@ def _setup(self): r_blocked = weblog.get( "/", headers={"x-forwarded-for": "80.80.80.80", "user-agent": "dd-test-scanner-log-block"}, - # XXX: hack to prevent rid inhibiting the dd-test-scanner-log-block rule + # Hack to prevent rid inhibiting the dd-test-scanner-log-block rule rid_in_user_agent=False, ) Test_TelemetryMetrics.__common_setup_done = True diff --git a/tests/auto_inject/test_auto_inject_install.py b/tests/auto_inject/test_auto_inject_install.py index a60503c5a16..1896dbe825c 100644 --- a/tests/auto_inject/test_auto_inject_install.py +++ b/tests/auto_inject/test_auto_inject_install.py @@ -149,8 +149,8 @@ def test_crash_no_zombie(self, virtual_machine): @scenarios.installer_auto_injection class TestInstallerAutoInjectManual(base.AutoInjectBaseTest): # Note: uninstallation of a single installer package is not available today - #  on the installer. As we can't only uninstall the injector, we are skipping - #  the uninstall test today + # on the installer. As we can't only uninstall the injector, we are skipping + # the uninstall test today @parametrize_virtual_machines( bugs=[ {"vm_name": "AlmaLinux_8_arm64", "weblog_variant": "test-app-python-alpine", "reason": "APMON-1576"}, diff --git a/tests/auto_inject/utils.py b/tests/auto_inject/utils.py index dc36cd23db3..83ec2497be7 100644 --- a/tests/auto_inject/utils.py +++ b/tests/auto_inject/utils.py @@ -147,8 +147,8 @@ def _test_uninstall(self, virtual_machine): start_weblog_command = virtual_machine._vm_provision.weblog_installation.remote_command else: # Container stop_weblog_command = "sudo -E docker-compose -f docker-compose.yml down" - #   On older Docker versions, the network recreation can hang. The solution is to restart Docker. - #   https://github.com/docker-archive/classicswarm/issues/1931 + # On older Docker versions, the network recreation can hang. The solution is to restart Docker. + # https://github.com/docker-archive/classicswarm/issues/1931 start_weblog_command = "sudo systemctl restart docker && sudo -E docker-compose -f docker-compose.yml up --wait --wait-timeout 120" install_command = "sudo datadog-installer apm instrument" diff --git a/tests/debugger/test_debugger_exception_replay.py b/tests/debugger/test_debugger_exception_replay.py index c90986a02b0..6c3cc4d16e1 100644 --- a/tests/debugger/test_debugger_exception_replay.py +++ b/tests/debugger/test_debugger_exception_replay.py @@ -406,8 +406,7 @@ def setup_exception_replay_rockpaperscissors(self): shapes = {"rock": False, "paper": False, "scissors": False} while not all(shapes.values()) and retries < _max_retries: - for shape in shapes: - shape_found = shapes[shape] + for shape, shape_found in shapes.items(): logger.debug(f"{shape} found: {shape_found}, retry #{retries}") if shape_found: diff --git a/tests/docker_ssi/test_docker_ssi.py b/tests/docker_ssi/test_docker_ssi.py index 171c4f52a97..b1c846b0d88 100644 --- a/tests/docker_ssi/test_docker_ssi.py +++ b/tests/docker_ssi/test_docker_ssi.py @@ -112,7 +112,7 @@ def test_telemetry_abort(self): inject_result = False break - assert inject_result != None, "No telemetry data found for inject.success, inject.skip or inject.error" + assert inject_result is not None, "No telemetry data found for inject.success, inject.skip or inject.error" # The injector detected by itself that the version is not supported if inject_result == False: diff --git a/tests/fuzzer/core.py b/tests/fuzzer/core.py index daf4ad5d43f..d2db0610198 100644 --- a/tests/fuzzer/core.py +++ b/tests/fuzzer/core.py @@ -10,7 +10,6 @@ from logging.handlers import RotatingFileHandler import os import signal -import time import aiohttp from yarl import URL @@ -95,7 +94,7 @@ def __init__( self.dump_on_status = dump_on_status self.enable_response_dump = False - self.systematic_exporter = _RequestDumper() if systematic_export else lambda x: 0 + self.systematic_exporter = _RequestDumper() if systematic_export else lambda _: 0 self.total_metric = AccumulatedMetric("#", format_string="#{value}", display_length=7, has_raw_value=False) self.memory_metric = NumericalMetric("Mem") @@ -154,7 +153,7 @@ async def wait_for_first_response(self): self.logger.info(f"First response received after {i} attempts") return - time.sleep(1) + await asyncio.sleep(1) raise Exception("Server does not respond") finally: @@ -164,10 +163,10 @@ def run_forever(self): self.logger.info("") self.logger.info("=" * 80) - asyncio.ensure_future(self._run(), loop=self.loop) + task = asyncio.ensure_future(self._run(), loop=self.loop) self.loop.add_signal_handler(signal.SIGINT, self.perform_armageddon) self.logger.info("Starting event loop") - self.loop.run_forever() + self.loop.run_until_complete(task) def perform_armageddon(self): self.finished = True @@ -258,7 +257,7 @@ async def _run(self): task = self.loop.create_task(self._process(session, request)) tasks.add(task) task.add_done_callback(tasks.remove) - task.add_done_callback(lambda t: self.sem.release()) + task.add_done_callback(lambda _: self.sem.release()) request_id += 1 diff --git a/tests/fuzzer/corpus.py b/tests/fuzzer/corpus.py index a77cb7a5a12..6259098cc2d 100644 --- a/tests/fuzzer/corpus.py +++ b/tests/fuzzer/corpus.py @@ -5,6 +5,7 @@ import os import sys import json +from pathlib import Path from tests.fuzzer.tools.random_strings import get_random_unicode as gru @@ -165,9 +166,9 @@ def _load_dir(base_dirname): if filename.endswith(".json") or filename.endswith(".dump"): _load_file(os.path.join(base_dirname, filename)) - if os.path.isfile(source): + if Path(source).is_file(): _load_file(source) - elif os.path.isdir(source): + elif Path(source).is_dir(): _load_dir(source) else: raise ValueError(f"{source} is not a file or a dir") diff --git a/tests/fuzzer/request_mutator.py b/tests/fuzzer/request_mutator.py index 478e45a5421..79f4f3ff1cb 100644 --- a/tests/fuzzer/request_mutator.py +++ b/tests/fuzzer/request_mutator.py @@ -90,7 +90,7 @@ def _mutate_item(item): item = random.choice((True, False)) else: - # TODO + # TODO: other use cases pass return item diff --git a/tests/integrations/test_db_integrations_sql.py b/tests/integrations/test_db_integrations_sql.py index a7efd66f56a..43934bcb867 100644 --- a/tests/integrations/test_db_integrations_sql.py +++ b/tests/integrations/test_db_integrations_sql.py @@ -273,7 +273,7 @@ class Test_MsSql(_BaseDatadogDbIntegrationTestClass): @missing_feature(library="nodejs", reason="Not implemented yet") def test_db_mssql_instance_name(self): """The Microsoft SQL Server instance name connecting to. This name is used to determine the port of a named instance. - This value should be set only if it’s specified on the mssql connection string. + This value should be set only if it's specified on the mssql connection string. """ for db_operation, span in self.get_spans(): diff --git a/tests/integrations/test_dsm.py b/tests/integrations/test_dsm.py index 074bd255521..839a3348d43 100644 --- a/tests/integrations/test_dsm.py +++ b/tests/integrations/test_dsm.py @@ -471,7 +471,7 @@ def test_dsmcontext_injection_base64(self): @features.datastreams_monitoring_support_for_base64_encoding @scenarios.integrations class Test_DsmContext_Extraction_Base64: - """Verify DSM context is extracted using "dd-pathway-ctx-base64" """ + """Verify DSM context is extracted using dd-pathway-ctx-base64""" def setup_dsmcontext_extraction_base64(self): topic = "dsm-injection-topic" diff --git a/tests/integrations/test_inferred_proxy.py b/tests/integrations/test_inferred_proxy.py index 5a83a1bc738..596eb813239 100644 --- a/tests/integrations/test_inferred_proxy.py +++ b/tests/integrations/test_inferred_proxy.py @@ -8,7 +8,7 @@ @features.aws_api_gateway_inferred_span_creation @scenarios.integrations class Test_AWS_API_Gateway_Inferred_Span_Creation: - """Verify DSM context is extracted using "dd-pathway-ctx-base64" """ + """Verify DSM context is extracted using dd-pathway-ctx-base64""" start_time = round(time.time() * 1e3) start_time_ns = start_time * 1e6 diff --git a/tests/integrations/test_open_telemetry.py b/tests/integrations/test_open_telemetry.py index d9825ce8a8d..2c26e0c1c92 100644 --- a/tests/integrations/test_open_telemetry.py +++ b/tests/integrations/test_open_telemetry.py @@ -162,7 +162,7 @@ class Test_MsSql(_BaseOtelDbIntegrationTestClass): ) def test_db_mssql_instance_name(self): """The Microsoft SQL Server instance name connecting to. This name is used to determine the port of a named instance. - This value should be set only if it’s specified on the mssql connection string. + This value should be set only if it's specified on the mssql connection string. """ for db_operation, request in self.get_requests(): span = self.get_span_from_agent(request) diff --git a/tests/parametric/conftest.py b/tests/parametric/conftest.py index bee7ae7de08..539c2415214 100644 --- a/tests/parametric/conftest.py +++ b/tests/parametric/conftest.py @@ -111,9 +111,9 @@ def __init__(self, base_url: str, pytest_request: None): def _url(self, path: str) -> str: return urllib.parse.urljoin(self._base_url, path) - def _write_log(self, type, json_trace): + def _write_log(self, log_type, json_trace): with open(self.log_path, "a") as log: - log.write(f"\n{type}>>>>\n") + log.write(f"\n{log_type}>>>>\n") log.write(json.dumps(json_trace)) def traces(self, clear=False, **kwargs): @@ -131,15 +131,14 @@ def set_remote_config(self, path, payload): def get_remote_config(self): resp = self._session.get(self._url("/v0.7/config")) resp_json = resp.json() - list = [] + result = [] if resp_json and resp_json["target_files"]: target_files = resp_json["target_files"] for target in target_files: path = target["path"] msg = json.loads(str(base64.b64decode(target["raw"]), encoding="utf-8")) - dict = {"path": path, "msg": msg} - list.append(dict) - return list + result.append({"path": path, "msg": msg}) + return result def add_remote_config(self, path, payload): current_rc = self.get_remote_config() @@ -181,18 +180,18 @@ def _build_config_path_response(config: list): client_configs = [] target_files = [] targets_tmp = {} - for dict in config: - client_configs.append(dict["path"]) - dict["msg_enc"] = bytes(json.dumps(dict["msg"]), encoding="utf-8") + for item in config: + client_configs.append(item["path"]) + item["msg_enc"] = bytes(json.dumps(item["msg"]), encoding="utf-8") tf = { - "path": dict["path"], - "raw": str(base64.b64encode(dict["msg_enc"]), encoding="utf-8"), + "path": item["path"], + "raw": str(base64.b64encode(item["msg_enc"]), encoding="utf-8"), } target_files.append(tf) - targets_tmp[dict["path"]] = { + targets_tmp[item["path"]] = { "custom": {"c": [""], "v": 0}, - "hashes": {"sha256": hashlib.sha256(dict["msg_enc"]).hexdigest()}, - "length": len(dict["msg_enc"]), + "hashes": {"sha256": hashlib.sha256(item["msg_enc"]).hexdigest()}, + "length": len(item["msg_enc"]), } data = { @@ -520,7 +519,7 @@ def docker_network(test_id: str) -> Generator[str, None, None]: # It's possible (why?) of having some container not stopped. # If it happen, failing here makes stdout tough to understance. # Let's ignore this, later calls will clean the mess - pass + logger.info("Failed to remove network, ignoring the error") @pytest.fixture diff --git a/tests/parametric/test_headers_tracecontext.py b/tests/parametric/test_headers_tracecontext.py index ded74209f66..65bfe215edb 100644 --- a/tests/parametric/test_headers_tracecontext.py +++ b/tests/parametric/test_headers_tracecontext.py @@ -931,7 +931,7 @@ def test_tracestate_w3c_context_leak(self, test_agent, test_library): ) assert case1["meta"].get("_dd.p.tid") == "3333333333333333" - assert case2["meta"].get("_dd.p.tid") == None + assert case2["meta"].get("_dd.p.tid") is None @temporary_enable_optin_tracecontext() def test_tracestate_all_allowed_characters(self, test_agent, test_library): diff --git a/tests/parametric/test_library_tracestats.py b/tests/parametric/test_library_tracestats.py index dd4e3300a74..5b569ce4e4d 100644 --- a/tests/parametric/test_library_tracestats.py +++ b/tests/parametric/test_library_tracestats.py @@ -105,33 +105,33 @@ def test_distinct_aggregationkeys_TS003(self, library_env, test_agent, test_libr name = "name" resource = "resource" service = "service" - type = "http" + span_type = "http" http_status_code = "200" origin = "rum" with test_library: # Baseline - with test_library.dd_start_span(name=name, resource=resource, service=service, typestr=type) as span: + with test_library.dd_start_span(name=name, resource=resource, service=service, typestr=span_type) as span: span.set_meta(key="_dd.origin", val=origin) span.set_meta(key="http.status_code", val=http_status_code) # Unique Name with test_library.dd_start_span( - name="unique-name", resource=resource, service=service, typestr=type + name="unique-name", resource=resource, service=service, typestr=span_type ) as span: span.set_meta(key="_dd.origin", val=origin) span.set_meta(key="http.status_code", val=http_status_code) # Unique Resource with test_library.dd_start_span( - name=name, resource="unique-resource", service=service, typestr=type + name=name, resource="unique-resource", service=service, typestr=span_type ) as span: span.set_meta(key="_dd.origin", val=origin) span.set_meta(key="http.status_code", val=http_status_code) # Unique Service with test_library.dd_start_span( - name=name, resource=resource, service="unique-service", typestr=type + name=name, resource=resource, service="unique-service", typestr=span_type ) as span: span.set_meta(key="_dd.origin", val=origin) span.set_meta(key="http.status_code", val=http_status_code) @@ -144,12 +144,12 @@ def test_distinct_aggregationkeys_TS003(self, library_env, test_agent, test_libr span.set_meta(key="http.status_code", val=http_status_code) # Unique Synthetics - with test_library.dd_start_span(name=name, resource=resource, service=service, typestr=type) as span: + with test_library.dd_start_span(name=name, resource=resource, service=service, typestr=span_type) as span: span.set_meta(key="_dd.origin", val="synthetics") span.set_meta(key="http.status_code", val=http_status_code) # Unique HTTP Status Code - with test_library.dd_start_span(name=name, resource=resource, service=service, typestr=type) as span: + with test_library.dd_start_span(name=name, resource=resource, service=service, typestr=span_type) as span: span.set_meta(key="_dd.origin", val=origin) span.set_meta(key="http.status_code", val="400") @@ -397,16 +397,16 @@ def test_metrics_computed_after_span_finsh_TS009(self, library_env, test_agent, name = "name" resource = "resource" service = "service" - type = "http" + span_type = "http" http_status_code = "200" origin = "synthetics" with test_library: - with test_library.dd_start_span(name=name, service=service, resource=resource, typestr=type) as span: + with test_library.dd_start_span(name=name, service=service, resource=resource, typestr=span_type) as span: span.set_meta(key="_dd.origin", val=origin) span.set_meta(key="http.status_code", val=http_status_code) - with test_library.dd_start_span(name=name, service=service, resource=resource, typestr=type) as span2: + with test_library.dd_start_span(name=name, service=service, resource=resource, typestr=span_type) as span2: span2.set_meta(key="_dd.origin", val=origin) span2.set_meta(key="http.status_code", val=http_status_code) diff --git a/tests/test_config_consistency.py b/tests/test_config_consistency.py index 7c6ce6c3c97..8234ed0100a 100644 --- a/tests/test_config_consistency.py +++ b/tests/test_config_consistency.py @@ -205,7 +205,7 @@ def test_status_code_500(self): client_span = _get_span_by_tags(spans, tags={"span.kind": "client", "http.status_code": "500"}) assert client_span, spans - assert client_span.get("error") == None or client_span.get("error") == 0 + assert client_span.get("error") is None or client_span.get("error") == 0 @scenarios.tracing_config_nondefault diff --git a/tests/test_data_integrity.py b/tests/test_data_integrity.py index a7297f06f46..8a1b1a8eb16 100644 --- a/tests/test_data_integrity.py +++ b/tests/test_data_integrity.py @@ -62,8 +62,8 @@ def validator(data): if header.lower() == "x-datadog-trace-count": try: trace_count = int(value) - except ValueError: - raise ValueError(f"'x-datadog-trace-count' request header is not an integer: {value}") + except ValueError as e: + raise ValueError(f"'x-datadog-trace-count' request header is not an integer: {value}") from e if trace_count != len(data["request"]["content"]): raise ValueError("x-datadog-trace-count request header didn't match the number of traces") diff --git a/tests/test_distributed.py b/tests/test_distributed.py index ea149d2e141..c050c4401ed 100644 --- a/tests/test_distributed.py +++ b/tests/test_distributed.py @@ -209,7 +209,7 @@ def test_span_links_omit_tracestate_from_conflicting_contexts(self): links = _retrieve_span_links(span) assert len(links) == 1 link1 = links[0] - assert link1.get("tracestate") == None + assert link1.get("tracestate") is None def _retrieve_span_links(span): diff --git a/tests/test_graphql.py b/tests/test_graphql.py index ee8812ead2f..fb9bddcf2c8 100644 --- a/tests/test_graphql.py +++ b/tests/test_graphql.py @@ -54,12 +54,12 @@ def test_execute_error_span_event(self): attributes = event["attributes"] - assert type(attributes["message"]) == str - assert type(attributes["type"]) == str - assert type(attributes["stacktrace"]) == str + assert isinstance(attributes["message"], str) + assert isinstance(attributes["type"], str) + assert isinstance(attributes["stacktrace"], str) for path in attributes["path"]: - assert type(path) == str + assert isinstance(path, str) for location in attributes["locations"]: assert len(location.split(":")) == 2 diff --git a/tests/test_library_conf.py b/tests/test_library_conf.py index 60d8d172fd6..24e285eee2a 100644 --- a/tests/test_library_conf.py +++ b/tests/test_library_conf.py @@ -3,7 +3,29 @@ # Copyright 2021 Datadog, Inc. from utils import weblog, interfaces, scenarios, features, missing_feature -from utils._context.header_tag_vars import * +from utils._context.header_tag_vars import ( + CONFIG_COLON_LEADING, + CONFIG_COLON_TRAILING, + HEADER_NAME_COLON_LEADING, + HEADER_NAME_COLON_TRAILING, + HEADER_NAME_LONG, + HEADER_NAME_SHORT, + HEADER_NAME_WHITESPACE_HEADER, + HEADER_NAME_WHITESPACE_TAG, + HEADER_NAME_WHITESPACE_VAL_LONG, + HEADER_NAME_WHITESPACE_VAL_SHORT, + HEADER_VAL_BASIC, + HEADER_VAL_WHITESPACE_VAL_LONG, + HEADER_VAL_WHITESPACE_VAL_SHORT, + TAG_COLON_LEADING, + TAG_COLON_TRAILING, + TAG_LONG, + TAG_SHORT, + TAG_WHITESPACE_HEADER, + TAG_WHITESPACE_TAG, + TAG_WHITESPACE_VAL_LONG, + TAG_WHITESPACE_VAL_SHORT, +) from utils import remote_config as rc import json @@ -252,8 +274,8 @@ def get_rc_params(self, header_tags): "service_target": {"service": "weblog", "env": "system-tests"}, "lib_config": header_tags, } - id = hash(json.dumps(config)) - return f"datadog/2/APM_TRACING/{id}/config", config + rc_id = hash(json.dumps(config)) + return f"datadog/2/APM_TRACING/{rc_id}/config", config # The Datadog specific tracecontext flags to mark flags are set diff --git a/tests/test_the_test/test_json_report.py b/tests/test_the_test/test_json_report.py index db721d246ac..352a66d0961 100644 --- a/tests/test_the_test/test_json_report.py +++ b/tests/test_the_test/test_json_report.py @@ -141,7 +141,7 @@ def test_logs(self): class Test_Mock: def test_mock(self): """Mock test doc""" - assert 1 == 1 + assert 1 == 1 # noqa: PLR0133 @missing_feature(True, reason="not yet done") @features.app_client_configuration_change_event