diff --git a/core/dbt/contracts/graph/manifest.py b/core/dbt/contracts/graph/manifest.py index 40ba6531573..6d95455e98f 100644 --- a/core/dbt/contracts/graph/manifest.py +++ b/core/dbt/contracts/graph/manifest.py @@ -15,12 +15,13 @@ from dbt.contracts.graph.parsed import ( ParsedNode, ParsedMacro, ParsedDocumentation, ParsedNodePatch, - ParsedSourceDefinition + ParsedMacroPatch, ParsedSourceDefinition ) from dbt.contracts.graph.compiled import CompileResultNode from dbt.contracts.util import Writable, Replaceable from dbt.exceptions import ( - raise_duplicate_resource_name, InternalException, raise_compiler_error + raise_duplicate_resource_name, InternalException, raise_compiler_error, + warn_or_error ) from dbt.include.global_project import PACKAGES from dbt.logger import GLOBAL_LOGGER as logger @@ -31,6 +32,7 @@ import dbt.utils NodeEdgeMap = Dict[str, List[str]] +MacroKey = Tuple[str, str] @dataclass @@ -137,6 +139,8 @@ class SourceFile(JsonSchemaMixin): sources: List[str] = field(default_factory=list) # any node patches in this file. The entries are names, not unique ids! patches: List[str] = field(default_factory=list) + # any macro patches in this file. The entries are pacakge, name pairs. + macro_patches: List[MacroKey] = field(default_factory=list) @property def search_key(self) -> Optional[str]: @@ -580,7 +584,26 @@ def add_nodes(self, new_nodes): raise_duplicate_resource_name(node, self.nodes[unique_id]) self.nodes[unique_id] = node - def patch_nodes(self, patches: MutableMapping[str, ParsedNodePatch]): + def patch_macros( + self, patches: MutableMapping[MacroKey, ParsedMacroPatch] + ) -> None: + for macro in self.macros.values(): + key = (macro.package_name, macro.name) + patch = patches.pop(key, None) + if not patch: + continue + macro.patch(patch) + + if patches: + for patch in patches.values(): + warn_or_error( + f'WARNING: Found documentation for macro "{patch.name}" ' + f'which was not found' + ) + + def patch_nodes( + self, patches: MutableMapping[str, ParsedNodePatch] + ) -> None: """Patch nodes with the given dict of patches. Note that this consumes the input! This relies on the fact that all nodes have unique _name_ fields, not @@ -593,7 +616,7 @@ def patch_nodes(self, patches: MutableMapping[str, ParsedNodePatch]): for node in self.nodes.values(): if node.resource_type == NodeType.Source: continue - # we know this because of the check above + # appease mypy - we know this because of the check above assert not isinstance(node, ParsedSourceDefinition) patch = patches.pop(node.name, None) if not patch: @@ -629,7 +652,7 @@ def patch_nodes(self, patches: MutableMapping[str, ParsedNodePatch]): # since patches aren't nodes, we can't use the existing # target_not_found warning logger.debug(( - 'WARNING: Found documentation for model "{}" which was ' + 'WARNING: Found documentation for resource "{}" which was ' 'not found or is disabled').format(patch.name) ) diff --git a/core/dbt/contracts/graph/parsed.py b/core/dbt/contracts/graph/parsed.py index 72c87bdd0fc..432bb642539 100644 --- a/core/dbt/contracts/graph/parsed.py +++ b/core/dbt/contracts/graph/parsed.py @@ -454,18 +454,27 @@ def json_schema(cls, embeddable: bool = False) -> Dict[str, Any]: return schema -# The parsed node update is only the 'patch', not the test. The test became a -# regular parsed node. Note that description and columns must be present, but -# may be empty. @dataclass -class ParsedNodePatch(HasYamlMetadata, Replaceable): +class ParsedPatch(HasYamlMetadata, Replaceable): name: str description: str - columns: Dict[str, ColumnInfo] docrefs: List[Docref] meta: Dict[str, Any] +# The parsed node update is only the 'patch', not the test. The test became a +# regular parsed node. Note that description and columns must be present, but +# may be empty. +@dataclass +class ParsedNodePatch(ParsedPatch): + columns: Dict[str, ColumnInfo] + + +@dataclass +class ParsedMacroPatch(ParsedPatch): + pass + + @dataclass class MacroDependsOn(JsonSchemaMixin, Replaceable): macros: List[str] = field(default_factory=list) @@ -479,6 +488,10 @@ class ParsedMacro(UnparsedMacro, HasUniqueID): tags: List[str] = field(default_factory=list) # TODO: is this ever populated? depends_on: MacroDependsOn = field(default_factory=MacroDependsOn) + docrefs: List[Docref] = field(default_factory=list) + description: str = field(default='') + meta: Dict[str, Any] = field(default_factory=dict) + patch_path: Optional[str] = None def local_vars(self): return {} @@ -490,6 +503,15 @@ def generator(self) -> Callable[[Dict[str, Any]], Callable]: """ return MacroGenerator(self) + def patch(self, patch: ParsedMacroPatch): + self.patch_path: Optional[str] = patch.original_file_path + self.description = patch.description + self.docrefs = patch.docrefs + self.meta = patch.meta + if dbt.flags.STRICT_MODE: + assert isinstance(self, JsonSchemaMixin) + self.to_dict(validate=True) + @dataclass class ParsedDocumentation(UnparsedDocumentationFile, HasUniqueID): diff --git a/core/dbt/contracts/graph/unparsed.py b/core/dbt/contracts/graph/unparsed.py index 4d3cc905c73..c34b2758cd9 100644 --- a/core/dbt/contracts/graph/unparsed.py +++ b/core/dbt/contracts/graph/unparsed.py @@ -7,7 +7,7 @@ from dataclasses import dataclass, field from datetime import timedelta -from typing import Optional, List, Union, Dict, Any +from typing import Optional, List, Union, Dict, Any, Sequence @dataclass @@ -59,12 +59,19 @@ class UnparsedRunHook(UnparsedNode): @dataclass -class NamedTested(JsonSchemaMixin, Replaceable): +class HasDocs(JsonSchemaMixin, Replaceable): name: str description: str = '' meta: Dict[str, Any] = field(default_factory=dict) data_type: Optional[str] = None - tests: Optional[List[Union[Dict[str, Any], str]]] = None + + +TestDef = Union[Dict[str, Any], str] + + +@dataclass +class HasTests(HasDocs): + tests: Optional[List[TestDef]] = None def __post_init__(self): if self.tests is None: @@ -72,18 +79,18 @@ def __post_init__(self): @dataclass -class UnparsedColumn(NamedTested): +class UnparsedColumn(HasTests): tags: List[str] = field(default_factory=list) @dataclass -class ColumnDescription(JsonSchemaMixin, Replaceable): - columns: List[UnparsedColumn] = field(default_factory=list) +class HasColumnDocs(JsonSchemaMixin, Replaceable): + columns: Sequence[HasDocs] = field(default_factory=list) @dataclass -class NodeDescription(NamedTested): - pass +class HasColumnTests(HasColumnDocs): + columns: Sequence[UnparsedColumn] = field(default_factory=list) @dataclass @@ -94,9 +101,18 @@ class HasYamlMetadata(JsonSchemaMixin): @dataclass -class UnparsedNodeUpdate(ColumnDescription, NodeDescription, HasYamlMetadata): - def __post_init__(self): - NodeDescription.__post_init__(self) +class UnparsedAnalysisUpdate(HasColumnDocs, HasDocs, HasYamlMetadata): + pass + + +@dataclass +class UnparsedNodeUpdate(HasColumnTests, HasTests, HasYamlMetadata): + pass + + +@dataclass +class UnparsedMacroUpdate(HasDocs, HasYamlMetadata): + pass class TimePeriod(StrEnum): @@ -205,7 +221,7 @@ class Quoting(JsonSchemaMixin, Mergeable): @dataclass -class UnparsedSourceTableDefinition(ColumnDescription, NodeDescription): +class UnparsedSourceTableDefinition(HasColumnTests, HasTests): loaded_at_field: Optional[str] = None identifier: Optional[str] = None quoting: Quoting = field(default_factory=Quoting) @@ -217,9 +233,6 @@ class UnparsedSourceTableDefinition(ColumnDescription, NodeDescription): ) tags: List[str] = field(default_factory=list) - def __post_init__(self): - NodeDescription.__post_init__(self) - @dataclass class UnparsedSourceDefinition(JsonSchemaMixin, Replaceable): diff --git a/core/dbt/exceptions.py b/core/dbt/exceptions.py index a2ace5536f8..73a23ae8a47 100644 --- a/core/dbt/exceptions.py +++ b/core/dbt/exceptions.py @@ -1,6 +1,6 @@ import builtins import functools -from typing import NoReturn +from typing import NoReturn, Optional from dbt.logger import GLOBAL_LOGGER as logger from dbt.node_types import NodeType @@ -425,7 +425,9 @@ def doc_invalid_args(model, args): model) -def doc_target_not_found(model, target_doc_name, target_doc_package): +def doc_target_not_found( + model, target_doc_name: str, target_doc_package: Optional[str] +) -> NoReturn: target_package_string = '' if target_doc_package is not None: @@ -708,17 +710,26 @@ def raise_patch_targets_not_found(patches): ) -def raise_duplicate_patch_name(name, patch_1, patch_2): +def raise_duplicate_patch_name(patch_1, patch_2): + name = patch_1.name raise_compiler_error( - 'dbt found two schema.yml entries for the same model named {0}. ' - 'Models and their associated columns may only be described a single ' - 'time. To fix this, remove the model entry for for {0} in one of ' - 'these files:\n - {1}\n - {2}' - .format( - name, - patch_1.original_file_path, - patch_2.original_file_path, - ) + f'dbt found two schema.yml entries for the same resource named ' + f'{name}. Resources and their associated columns may only be ' + f'described a single time. To fix this, remove the resource entry ' + f'for {name} in one of these files:\n - ' + f'{patch_1.original_file_path}\n - {patch_2.original_file_path}' + ) + + +def raise_duplicate_macro_patch_name(patch_1, patch_2): + package_name = patch_1.package_name + name = patch_1.name + raise_compiler_error( + f'dbt found two schema.yml entries for the same macro in package ' + f'{package_name} named {name}. Macros may only be described a single ' + f'time. To fix this, remove the macros entry for {name} in one ' + f'of these files:' + f'\n - {patch_1.original_file_path}\n - {patch_2.original_file_path}' ) diff --git a/core/dbt/node_types.py b/core/dbt/node_types.py index 2f503316a57..0eb3b60db03 100644 --- a/core/dbt/node_types.py +++ b/core/dbt/node_types.py @@ -43,6 +43,8 @@ def documentable(cls) -> List['NodeType']: cls.Seed, cls.Snapshot, cls.Source, + cls.Macro, + cls.Analysis, ] def pluralize(self) -> str: diff --git a/core/dbt/parser/manifest.py b/core/dbt/parser/manifest.py index f7edef92d55..7b816dcb872 100644 --- a/core/dbt/parser/manifest.py +++ b/core/dbt/parser/manifest.py @@ -309,6 +309,7 @@ def create_manifest(self) -> Manifest: files=self.results.files, ) manifest.patch_nodes(self.results.patches) + manifest.patch_macros(self.results.macro_patches) manifest = ParserUtils.process_sources( manifest, self.root_project.project_name ) diff --git a/core/dbt/parser/results.py b/core/dbt/parser/results.py index 801c725a9f6..fd128c95c2c 100644 --- a/core/dbt/parser/results.py +++ b/core/dbt/parser/results.py @@ -3,16 +3,20 @@ from hologram import JsonSchemaMixin -from dbt.contracts.graph.manifest import SourceFile, RemoteFile, FileHash +from dbt.contracts.graph.manifest import ( + SourceFile, RemoteFile, FileHash, MacroKey +) from dbt.contracts.graph.parsed import ( ParsedNode, HasUniqueID, ParsedMacro, ParsedDocumentation, ParsedNodePatch, ParsedSourceDefinition, ParsedAnalysisNode, ParsedHookNode, ParsedRPCNode, ParsedModelNode, ParsedSeedNode, ParsedTestNode, ParsedSnapshotNode, + ParsedMacroPatch, ) from dbt.contracts.util import Writable, Replaceable from dbt.exceptions import ( raise_duplicate_resource_name, raise_duplicate_patch_name, - CompilationException, InternalException, raise_compiler_error + raise_duplicate_macro_patch_name, CompilationException, InternalException, + raise_compiler_error, ) from dbt.node_types import NodeType from dbt.ui import printer @@ -54,6 +58,7 @@ class ParseResult(JsonSchemaMixin, Writable, Replaceable): sources: MutableMapping[str, ParsedSourceDefinition] = dict_field() docs: MutableMapping[str, ParsedDocumentation] = dict_field() macros: MutableMapping[str, ParsedMacro] = dict_field() + macro_patches: MutableMapping[MacroKey, ParsedMacroPatch] = dict_field() patches: MutableMapping[str, ParsedNodePatch] = dict_field() files: MutableMapping[str, SourceFile] = dict_field() disabled: MutableMapping[str, List[ParsedNode]] = dict_field() @@ -120,14 +125,25 @@ def add_doc(self, source_file: SourceFile, doc: ParsedDocumentation): self.docs[doc.unique_id] = doc self.get_file(source_file).docs.append(doc.unique_id) - def add_patch(self, source_file: SourceFile, patch: ParsedNodePatch): + def add_patch( + self, source_file: SourceFile, patch: ParsedNodePatch + ) -> None: # matches can't be overwritten if patch.name in self.patches: - raise_duplicate_patch_name(patch.name, patch, - self.patches[patch.name]) + raise_duplicate_patch_name(patch, self.patches[patch.name]) self.patches[patch.name] = patch self.get_file(source_file).patches.append(patch.name) + def add_macro_patch( + self, source_file: SourceFile, patch: ParsedMacroPatch + ) -> None: + # macros are fully namespaced + key = (patch.package_name, patch.name) + if key in self.macro_patches: + raise_duplicate_macro_patch_name(patch, self.macro_patches[key]) + self.macro_patches[key] = patch + self.get_file(source_file).macro_patches.append(key) + def _get_disabled( self, unique_id: str, match_file: SourceFile ) -> List[ParsedNode]: @@ -218,10 +234,19 @@ def sanitized_update( ) self.add_patch(source_file, patch) patched = True - if patched: self.get_file(source_file).patches.sort() + macro_patched = False + for key in old_file.macro_patches: + macro_patch = _expect_value( + key, old_result.macro_patches, old_file, "macro_patches" + ) + self.add_macro_patch(source_file, macro_patch) + macro_patched = True + if macro_patched: + self.get_file(source_file).macro_patches.sort() + return True def has_file(self, source_file: SourceFile) -> bool: @@ -239,12 +264,13 @@ def rpc(cls): return cls(FileHash.empty(), FileHash.empty(), {}) -T = TypeVar('T') +K_T = TypeVar('K_T') +V_T = TypeVar('V_T') def _expect_value( - key: str, src: Mapping[str, T], old_file: SourceFile, name: str -) -> T: + key: K_T, src: Mapping[K_T, V_T], old_file: SourceFile, name: str +) -> V_T: if key not in src: raise CompilationException( 'Expected to find "{}" in cached "result.{}" based ' diff --git a/core/dbt/parser/schema_test_builders.py b/core/dbt/parser/schema_test_builders.py index 75d38fe09c7..7afa4ae6a92 100644 --- a/core/dbt/parser/schema_test_builders.py +++ b/core/dbt/parser/schema_test_builders.py @@ -1,12 +1,15 @@ import hashlib import re from dataclasses import dataclass -from typing import Generic, TypeVar, Dict, Any, Tuple, Optional, List, Union +from typing import ( + Generic, TypeVar, Dict, Any, Tuple, Optional, List, Sequence +) from dbt.clients.jinja import get_rendered from dbt.contracts.graph.unparsed import ( UnparsedNodeUpdate, UnparsedSourceDefinition, - UnparsedSourceTableDefinition, UnparsedColumn + UnparsedSourceTableDefinition, UnparsedColumn, UnparsedMacroUpdate, + UnparsedAnalysisUpdate, TestDef ) from dbt.exceptions import raise_compiler_error from dbt.parser.search import FileBlock @@ -79,24 +82,36 @@ def name(self) -> str: return '{0.name}_{1.name}'.format(self.source, self.table) @property - def columns(self) -> List[UnparsedColumn]: + def columns(self) -> Sequence[UnparsedColumn]: if self.table.columns is None: return [] else: return self.table.columns @property - def tests(self) -> List[Union[Dict[str, Any], str]]: + def tests(self) -> List[TestDef]: if self.table.tests is None: return [] else: return self.table.tests -NodeTarget = UnparsedNodeUpdate +Testable = TypeVar('Testable', SourceTarget, UnparsedNodeUpdate) +ColumnTarget = TypeVar( + 'ColumnTarget', + SourceTarget, + UnparsedNodeUpdate, + UnparsedAnalysisUpdate, +) -Target = TypeVar('Target', NodeTarget, SourceTarget) +Target = TypeVar( + 'Target', + SourceTarget, + UnparsedNodeUpdate, + UnparsedMacroUpdate, + UnparsedAnalysisUpdate, +) @dataclass @@ -107,6 +122,27 @@ class TargetBlock(YamlBlock, Generic[Target]): def name(self): return self.target.name + @property + def columns(self): + return [] + + @property + def tests(self) -> List[TestDef]: + return [] + + @classmethod + def from_yaml_block( + cls, src: YamlBlock, target: Target + ) -> 'TargetBlock[Target]': + return cls( + file=src.file, + data=src.data, + target=target, + ) + + +@dataclass +class TargetColumnsBlock(TargetBlock[ColumnTarget], Generic[ColumnTarget]): @property def columns(self): if self.target.columns is None: @@ -114,8 +150,11 @@ def columns(self): else: return self.target.columns + +@dataclass +class TestBlock(TargetColumnsBlock[Testable], Generic[Testable]): @property - def tests(self) -> List[Union[Dict[str, Any], str]]: + def tests(self) -> List[TestDef]: if self.target.tests is None: return [] else: @@ -123,8 +162,8 @@ def tests(self) -> List[Union[Dict[str, Any], str]]: @classmethod def from_yaml_block( - cls, src: YamlBlock, target: Target - ) -> 'TargetBlock[Target]': + cls, src: YamlBlock, target: Testable + ) -> 'TestBlock[Testable]': return cls( file=src.file, data=src.data, @@ -133,15 +172,15 @@ def from_yaml_block( @dataclass -class SchemaTestBlock(TargetBlock): +class SchemaTestBlock(TestBlock[Testable], Generic[Testable]): test: Dict[str, Any] column_name: Optional[str] tags: List[str] @classmethod - def from_target_block( + def from_test_block( cls, - src: TargetBlock, + src: TestBlock, test: Dict[str, Any], column_name: Optional[str], tags: List[str], @@ -156,7 +195,7 @@ def from_target_block( ) -class TestBuilder(Generic[Target]): +class TestBuilder(Generic[Testable]): """An object to hold assorted test settings and perform basic parsing Test names have the following pattern: @@ -174,7 +213,7 @@ class TestBuilder(Generic[Target]): def __init__( self, test: Dict[str, Any], - target: Target, + target: Testable, package_name: str, render_ctx: Dict[str, Any], column_name: str = None, @@ -182,7 +221,7 @@ def __init__( test_name, test_args = self.extract_test_args(test, column_name) self.args: Dict[str, Any] = test_args self.package_name: str = package_name - self.target: Target = target + self.target: Testable = target match = self.TEST_NAME_PATTERN.match(test_name) if match is None: @@ -277,7 +316,7 @@ def macro_name(self) -> str: return macro_name def describe_test_target(self) -> str: - if isinstance(self.target, NodeTarget): + if isinstance(self.target, UnparsedNodeUpdate): fmt = "model('{0}')" elif isinstance(self.target, SourceTarget): fmt = "source('{0.source}', '{0.table}')" @@ -288,7 +327,7 @@ def describe_test_target(self) -> str: raise NotImplementedError('describe_test_target not implemented!') def get_test_name(self) -> Tuple[str, str]: - if isinstance(self.target, NodeTarget): + if isinstance(self.target, UnparsedNodeUpdate): name = self.name elif isinstance(self.target, SourceTarget): name = 'source_' + self.name @@ -310,7 +349,7 @@ def build_raw_sql(self) -> str: ) def build_model_str(self): - if isinstance(self.target, NodeTarget): + if isinstance(self.target, UnparsedNodeUpdate): fmt = "ref('{0.name}')" elif isinstance(self.target, SourceTarget): fmt = "source('{0.source.name}', '{0.table.name}')" diff --git a/core/dbt/parser/schemas.py b/core/dbt/parser/schemas.py index 7732777f4df..63b8c729b00 100644 --- a/core/dbt/parser/schemas.py +++ b/core/dbt/parser/schemas.py @@ -2,7 +2,9 @@ import os from abc import abstractmethod -from typing import Iterable, Dict, Any, Union, List, Optional, Generic, TypeVar +from typing import ( + Iterable, Dict, Any, Union, List, Optional, Generic, TypeVar, Type +) from hologram import ValidationError @@ -18,10 +20,12 @@ ColumnInfo, Docref, ParsedTestNode, + ParsedMacroPatch, ) from dbt.contracts.graph.unparsed import ( UnparsedSourceDefinition, UnparsedNodeUpdate, UnparsedColumn, - UnparsedSourceTableDefinition, FreshnessThreshold + UnparsedMacroUpdate, UnparsedAnalysisUpdate, + UnparsedSourceTableDefinition, FreshnessThreshold, ) from dbt.context.parser import docs from dbt.exceptions import ( @@ -32,13 +36,18 @@ from dbt.parser.base import SimpleParser from dbt.parser.search import FileBlock, FilesystemSearcher from dbt.parser.schema_test_builders import ( - TestBuilder, SourceTarget, NodeTarget, Target, - SchemaTestBlock, TargetBlock, YamlBlock, + TestBuilder, SourceTarget, Target, SchemaTestBlock, TargetBlock, YamlBlock, + TestBlock, ) from dbt.utils import get_pseudo_test_path, coerce_dict_str -UnparsedSchemaYaml = Union[UnparsedSourceDefinition, UnparsedNodeUpdate] +UnparsedSchemaYaml = Union[ + UnparsedSourceDefinition, + UnparsedNodeUpdate, + UnparsedAnalysisUpdate, + UnparsedMacroUpdate, +] TestDef = Union[str, Dict[str, Any]] @@ -104,7 +113,7 @@ class SchemaParser(SimpleParser[SchemaTestBlock, ParsedTestNode]): - read_yaml_{models,sources}: read in yaml as a dictionary, then validate it against the basic structures required so we can start parsing (NodeTarget, SourceTarget) - - these return potentially many Targets per yaml block, since earch + - these return potentially many Targets per yaml block, since each source can have multiple tables - parse_target_{model,source}: Read in the underlying target, parse and return a list of all its tests (model and column tests), collect @@ -163,17 +172,9 @@ def _yaml_from_file( ) return None - def parse_column( - self, block: TargetBlock, column: UnparsedColumn, refs: ParserRef + def parse_column_tests( + self, block: TestBlock, column: UnparsedColumn ) -> None: - column_name = column.name - description = column.description - data_type = column.data_type - meta = column.meta - collect_docrefs(block.target, refs, column_name, description) - - refs.add(column, description, data_type, meta) - if not column.tests: return @@ -235,11 +236,10 @@ def parse_node(self, block: SchemaTestBlock) -> ParsedTestNode: def parse_test( self, - target_block: TargetBlock, + target_block: TestBlock, test: TestDef, column: Optional[UnparsedColumn], ) -> None: - if isinstance(test, str): test = {test: {}} @@ -250,7 +250,7 @@ def parse_test( column_name = column.name column_tags = column.tags - block = SchemaTestBlock.from_target_block( + block = SchemaTestBlock.from_test_block( src=target_block, test=test, column_name=column_name, @@ -267,14 +267,12 @@ def parse_test( ) raise CompilationException(msg) from exc - def parse_tests(self, block: TargetBlock) -> ParserRef: - refs = ParserRef() + def parse_tests(self, block: TestBlock) -> None: for column in block.columns: - self.parse_column(block, column, refs) + self.parse_column_tests(block, column) for test in block.tests: self.parse_test(block, test, None) - return refs def parse_file(self, block: FileBlock) -> None: dct = self._yaml_from_file(block.file) @@ -285,17 +283,113 @@ def parse_file(self, block: FileBlock) -> None: self._parse_format_version(yaml_block) - parser: YamlParser + parser: YamlDocsReader for key in NodeType.documentable(): + plural = key.pluralize() if key == NodeType.Source: - parser = SourceParser(self, yaml_block, key.pluralize()) - parser.parse() + parser = SourceParser(self, yaml_block, plural) + elif key == NodeType.Macro: + parser = MacroPatchParser(self, yaml_block, plural) + elif key == NodeType.Analysis: + parser = AnalysisPatchParser(self, yaml_block, plural) else: - parser = NodeParser(self, yaml_block, key.pluralize()) - parser.parse() + parser = TestablePatchParser(self, yaml_block, plural) + for test_block in parser.parse(): + self.parse_tests(test_block) + + +Parsed = TypeVar( + 'Parsed', + ParsedSourceDefinition, ParsedNodePatch, ParsedMacroPatch +) +NodeTarget = TypeVar( + 'NodeTarget', + UnparsedNodeUpdate, UnparsedAnalysisUpdate +) +NonSourceTarget = TypeVar( + 'NonSourceTarget', + UnparsedNodeUpdate, UnparsedAnalysisUpdate, UnparsedMacroUpdate +) + + +class YamlDocsReader(Generic[Target, Parsed]): + def __init__( + self, schema_parser: SchemaParser, yaml: YamlBlock, key: str + ) -> None: + self.schema_parser = schema_parser + self.key = key + self.yaml = yaml + + @property + def results(self): + return self.schema_parser.results + + @property + def project(self): + return self.schema_parser.project + @property + def default_database(self): + return self.schema_parser.default_database -Parsed = TypeVar('Parsed', ParsedSourceDefinition, ParsedNodePatch) + @property + def root_project(self): + return self.schema_parser.root_project + + def get_key_dicts(self) -> Iterable[Dict[str, Any]]: + data = self.yaml.data.get(self.key, []) + if not isinstance(data, list): + raise CompilationException( + '{} must be a list, got {} instead: ({})' + .format(self.key, type(data), _trimmed(str(data))) + ) + path = self.yaml.path.original_file_path + + for entry in data: + if coerce_dict_str(entry) is not None: + yield entry + else: + msg = error_context( + path, self.key, data, 'expected a dict with string keys' + ) + raise CompilationException(msg) + + def parse_docs(self, block: TargetBlock) -> ParserRef: + refs = ParserRef() + for column in block.columns: + column_name = column.name + description = column.description + data_type = column.data_type + meta = column.meta + collect_docrefs(block.target, refs, column_name, description) + + refs.add(column, description, data_type, meta) + return refs + + @abstractmethod + def get_unparsed_target(self) -> Iterable[Target]: + raise NotImplementedError('get_unparsed_target is abstract') + + @abstractmethod + def get_block(self, node: Target) -> TargetBlock: + raise NotImplementedError('get_block is abstract') + + @abstractmethod + def parse_patch( + self, block: TargetBlock[Target], refs: ParserRef + ) -> None: + raise NotImplementedError('parse_patch is abstract') + + def parse(self) -> List[TestBlock]: + node: Target + test_blocks: List[TestBlock] = [] + for node in self.get_unparsed_target(): + node_block = self.get_block(node) + if isinstance(node_block, TestBlock): + test_blocks.append(node_block) + refs = self.parse_docs(node_block) + self.parse_patch(node_block, refs) + return test_blocks class YamlParser(Generic[Target, Parsed]): @@ -340,29 +434,49 @@ def get_key_dicts(self) -> Iterable[Dict[str, Any]]: ) raise CompilationException(msg) + def parse_docs(self, block: TargetBlock) -> ParserRef: + refs = ParserRef() + for column in block.columns: + column_name = column.name + description = column.description + data_type = column.data_type + meta = column.meta + collect_docrefs(block.target, refs, column_name, description) + + refs.add(column, description, data_type, meta) + return refs + def parse(self): node: Target for node in self.get_unparsed_target(): node_block = TargetBlock.from_yaml_block(self.yaml, node) - refs = self.schema_parser.parse_tests(node_block) - self.parse_with_refs(node_block, refs) + refs = self.parse_docs(node_block) + self.parse_tests(node_block) + self.parse_patch(node_block, refs) + + def parse_tests(self, target: TargetBlock[Target]) -> None: + # some yaml parsers just don't have tests (macros, analyses) + pass @abstractmethod def get_unparsed_target(self) -> Iterable[Target]: raise NotImplementedError('get_unparsed_target is abstract') @abstractmethod - def parse_with_refs( + def parse_patch( self, block: TargetBlock[Target], refs: ParserRef ) -> None: - raise NotImplementedError('parse_with_refs is abstract') + raise NotImplementedError('parse_patch is abstract') -class SourceParser(YamlParser[SourceTarget, ParsedSourceDefinition]): +class SourceParser(YamlDocsReader[SourceTarget, ParsedSourceDefinition]): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._renderer = ConfigRenderer(self.root_project.cli_vars) + def get_block(self, node: SourceTarget) -> TestBlock: + return TestBlock.from_yaml_block(self.yaml, node) + def get_unparsed_target(self) -> Iterable[SourceTarget]: path = self.yaml.path.original_file_path @@ -393,7 +507,7 @@ def _calculate_freshness( else: return None - def parse_with_refs( + def parse_patch( self, block: TargetBlock[SourceTarget], refs: ParserRef ) -> None: source = block.target.source @@ -445,30 +559,46 @@ def parse_with_refs( self.results.add_source(self.yaml.file, result) -class NodeParser(YamlParser[NodeTarget, ParsedNodePatch]): - def get_unparsed_target(self) -> Iterable[NodeTarget]: +class NonSourceParser( + YamlDocsReader[NonSourceTarget, Parsed], Generic[NonSourceTarget, Parsed] +): + def collect_docrefs( + self, block: TargetBlock[NonSourceTarget], refs: ParserRef + ) -> str: + description = block.target.description + collect_docrefs(block.target, refs, None, description) + return description + + @abstractmethod + def _target_type(self) -> Type[NonSourceTarget]: + raise NotImplementedError('_unsafe_from_dict not implemented') + + def get_unparsed_target(self) -> Iterable[NonSourceTarget]: path = self.yaml.path.original_file_path for data in self.get_key_dicts(): data.update({ 'original_file_path': path, 'yaml_key': self.key, - 'package_name': self.schema_parser.project.project_name, + 'package_name': self.project.project_name, }) try: - model = UnparsedNodeUpdate.from_dict(data) + model = self._target_type().from_dict(data) except (ValidationError, JSONValidationException) as exc: msg = error_context(path, self.key, data, exc) raise CompilationException(msg) from exc else: yield model - def parse_with_refs( - self, block: TargetBlock[UnparsedNodeUpdate], refs: ParserRef - ) -> None: - description = block.target.description - collect_docrefs(block.target, refs, None, description) +class NodePatchParser( + NonSourceParser[NodeTarget, ParsedNodePatch], + Generic[NodeTarget] +): + def parse_patch( + self, block: TargetBlock[NodeTarget], refs: ParserRef + ) -> None: + description = self.collect_docrefs(block, refs) result = ParsedNodePatch( name=block.target.name, original_file_path=block.target.original_file_path, @@ -480,3 +610,43 @@ def parse_with_refs( meta=block.target.meta, ) self.results.add_patch(self.yaml.file, result) + + +class TestablePatchParser(NodePatchParser[UnparsedNodeUpdate]): + def get_block(self, node: UnparsedNodeUpdate) -> TestBlock: + return TestBlock.from_yaml_block(self.yaml, node) + + def _target_type(self) -> Type[UnparsedNodeUpdate]: + return UnparsedNodeUpdate + + +class AnalysisPatchParser(NodePatchParser[UnparsedAnalysisUpdate]): + def get_block(self, node: UnparsedAnalysisUpdate) -> TargetBlock: + return TargetBlock.from_yaml_block(self.yaml, node) + + def _target_type(self) -> Type[UnparsedAnalysisUpdate]: + return UnparsedAnalysisUpdate + + +class MacroPatchParser(NonSourceParser[UnparsedMacroUpdate, ParsedMacroPatch]): + def get_block(self, node: UnparsedMacroUpdate) -> TargetBlock: + return TargetBlock.from_yaml_block(self.yaml, node) + + def _target_type(self) -> Type[UnparsedMacroUpdate]: + return UnparsedMacroUpdate + + def parse_patch( + self, block: TargetBlock[UnparsedMacroUpdate], refs: ParserRef + ) -> None: + description = self.collect_docrefs(block, refs) + + result = ParsedMacroPatch( + name=block.target.name, + original_file_path=block.target.original_file_path, + yaml_key=block.target.yaml_key, + package_name=block.target.package_name, + description=description, + docrefs=refs.docrefs, + meta=block.target.meta, + ) + self.results.add_macro_patch(self.yaml.file, result) diff --git a/core/dbt/parser/util.py b/core/dbt/parser/util.py index e626a788e16..7e31a3e5135 100644 --- a/core/dbt/parser/util.py +++ b/core/dbt/parser/util.py @@ -1,9 +1,13 @@ -from typing import Optional +from typing import Optional, Union import dbt.exceptions import dbt.utils from dbt.node_types import NodeType -from dbt.contracts.graph.parsed import ColumnInfo +from dbt.contracts.graph.manifest import Manifest +from dbt.contracts.graph.parsed import ( + ColumnInfo, ParsedNode, ParsedMacro, ParsedSourceDefinition, + ParsedDocumentation, +) from dbt.config import RuntimeConfig from dbt.flags import SINGLE_THREADED_HANDLER @@ -35,15 +39,19 @@ def do_docs(*args: str): return do_docs +class Disabled: + pass + + class ParserUtils: - DISABLED = object() + DISABLED = Disabled() @classmethod def resolve_source( - cls, manifest, target_source_name: Optional[str], - target_table_name: Optional[str], current_project: str, + cls, manifest: Manifest, target_source_name: str, + target_table_name: str, current_project: str, node_package: str - ): + ) -> Optional[ParsedSourceDefinition]: candidate_targets = [current_project, node_package, None] target_source = None for candidate in candidate_targets: @@ -62,7 +70,7 @@ def resolve_ref( cls, manifest, target_model_name: Optional[str], target_model_package: Optional[str], current_project: str, node_package: str - ): + ) -> Optional[Union[ParsedNode, Disabled]]: if target_model_package is not None: return manifest.find_refable_by_name( target_model_name, @@ -99,7 +107,7 @@ def resolve_ref( def resolve_doc( cls, manifest, target_doc_name: str, target_doc_package: Optional[str], current_project: str, node_package: str - ): + ) -> Optional[ParsedDocumentation]: """Resolve the given documentation. This follows the same algorithm as resolve_ref except the is_enabled checks are unnecessary as docs are always enabled. @@ -131,7 +139,12 @@ def _get_node_column(cls, node, column_name): return column @classmethod - def process_docs_for_node(cls, manifest, current_project: str, node): + def process_docs_for_node( + cls, + manifest: Manifest, + current_project: str, + node: ParsedNode, + ) -> None: for docref in node.docrefs: column_name = docref.column_name @@ -150,7 +163,26 @@ def process_docs_for_node(cls, manifest, current_project: str, node): obj.description = dbt.clients.jinja.get_rendered(raw, context) @classmethod - def process_docs_for_source(cls, manifest, current_project: str, source): + def process_docs_for_macro( + cls, + manifest: Manifest, + current_project: str, + macro: ParsedMacro, + ) -> None: + for docref in macro.docrefs: + context = { + 'doc': docs(macro, manifest, current_project) + } + raw = macro.description or '' + macro.description = dbt.clients.jinja.get_rendered(raw, context) + + @classmethod + def process_docs_for_source( + cls, + manifest: Manifest, + current_project: str, + source: ParsedSourceDefinition + ) -> None: context = { 'doc': docs(source, manifest, current_project), } @@ -175,6 +207,8 @@ def process_docs(cls, manifest, current_project: str): cls.process_docs_for_source(manifest, current_project, node) else: cls.process_docs_for_node(manifest, current_project, node) + for macro in manifest.macros.values(): + cls.process_docs_for_macro(manifest, current_project, macro) return manifest @classmethod @@ -197,13 +231,13 @@ def process_refs_for_node(cls, manifest, current_project: str, node): current_project, node.package_name) - if target_model is None or target_model is cls.DISABLED: + if target_model is None or isinstance(target_model, Disabled): # This may raise. Even if it doesn't, we don't want to add # this node to the graph b/c there is no destination node node.config.enabled = False dbt.utils.invalid_ref_fail_unless_test( node, target_model_name, target_model_package, - disabled=(target_model is cls.DISABLED) + disabled=isinstance(target_model, Disabled) ) continue diff --git a/test/integration/029_docs_generate_tests/ref_models/docs.md b/test/integration/029_docs_generate_tests/ref_models/docs.md index 1918e825b44..61200373fba 100644 --- a/test/integration/029_docs_generate_tests/ref_models/docs.md +++ b/test/integration/029_docs_generate_tests/ref_models/docs.md @@ -25,3 +25,7 @@ My table {% docs column_info %} An ID field {% enddocs %} + +{% docs macro_info %} +My custom test that I wrote that does nothing +{% enddocs %} diff --git a/test/integration/029_docs_generate_tests/ref_models/schema.yml b/test/integration/029_docs_generate_tests/ref_models/schema.yml index 3cc4c7aac10..d0350ec4ff5 100644 --- a/test/integration/029_docs_generate_tests/ref_models/schema.yml +++ b/test/integration/029_docs_generate_tests/ref_models/schema.yml @@ -29,3 +29,9 @@ sources: columns: - name: id description: "{{ doc('column_info') }}" + +macros: + - name: test_nothing + description: "{{ doc('macro_info') }}" + meta: + some_key: 100 diff --git a/test/integration/029_docs_generate_tests/test_docs_generate.py b/test/integration/029_docs_generate_tests/test_docs_generate.py index d2a8b4d63f0..5a8652ad425 100644 --- a/test/integration/029_docs_generate_tests/test_docs_generate.py +++ b/test/integration/029_docs_generate_tests/test_docs_generate.py @@ -810,7 +810,17 @@ def verify_catalog(self, expected): actual = catalog['nodes'] self.assertEqual(expected, actual) - def verify_manifest_macros(self, manifest): + def verify_manifest_macros(self, manifest, expected=None): + self.assertIn('macros', manifest) + if expected is None: + self._verify_generic_macro_structure(manifest) + return + for unique_id, expected_macro in expected.items(): + self.assertIn(unique_id, manifest['macros']) + actual_macro = manifest['macros'][unique_id] + self.assertEqual(expected_macro, actual_macro) + + def _verify_generic_macro_structure(self, manifest): # just test a known global macro to avoid having to update this every # time they change. self.assertIn('macro.dbt.column_list', manifest['macros']) @@ -820,7 +830,7 @@ def verify_manifest_macros(self, manifest): { 'path', 'original_file_path', 'package_name', 'raw_sql', 'root_path', 'name', 'unique_id', 'tags', 'resource_type', - 'depends_on' + 'depends_on', 'meta', 'description', 'patch_path', 'docrefs', } ) # Don't compare the sql, just make sure it exists @@ -842,6 +852,10 @@ def verify_manifest_macros(self, manifest): 'tags': [], 'resource_type': 'macro', 'depends_on': {'macros': []}, + 'description': '', + 'patch_path': None, + 'docrefs': [], + 'meta': {}, }, without_sql, ) @@ -1204,6 +1218,7 @@ def expected_seeded_manifest(self, model_database=None): 'nodes': [], 'sources': [], 'patches': [], + 'macro_patches': [], }, normalize('models/model.sql'): { 'path': self._path_to('models', 'model.sql'), @@ -1213,6 +1228,7 @@ def expected_seeded_manifest(self, model_database=None): 'nodes': ['model.test.model'], 'sources': [], 'patches': [], + 'macro_patches': [], }, normalize('seed/seed.csv'): { 'path': self._path_to('seed', 'seed.csv'), @@ -1223,8 +1239,9 @@ def expected_seeded_manifest(self, model_database=None): 'docs': [], 'macros': [], 'nodes': ['seed.test.seed'], - 'patches': [], 'sources': [], + 'patches': [], + 'macro_patches': [], }, normalize('models/readme.md'): { 'path': self._path_to('models', 'readme.md'), @@ -1232,8 +1249,9 @@ def expected_seeded_manifest(self, model_database=None): 'docs': [], 'macros': [], 'nodes': [], - 'patches': [], 'sources': [], + 'patches': [], + 'macro_patches': [], }, normalize('models/schema.yml'): { 'path': self._path_to('models', 'schema.yml'), @@ -1241,8 +1259,9 @@ def expected_seeded_manifest(self, model_database=None): 'docs': [], 'macros': [], 'nodes': ['test.test.unique_model_id', 'test.test.not_null_model_id', 'test.test.test_nothing_model_'], - 'patches': ['model'], 'sources': [], + 'patches': ['model'], + 'macro_patches': [], }, normalize('seed/schema.yml'): { 'path': self._path_to('seed', 'schema.yml'), @@ -1250,8 +1269,9 @@ def expected_seeded_manifest(self, model_database=None): 'docs': [], 'macros': [], 'nodes': [], - 'patches': ['seed'], 'sources': [], + 'patches': ['seed'], + 'macro_patches': [], }, }, } @@ -1280,6 +1300,9 @@ def expected_postgres_references_manifest(self, model_database=None): column_info = LineIndifferent( '{% docs column_info %}\nAn ID field\n{% enddocs %}' ) + macro_info = LineIndifferent( + '{% docs macro_info %}\nMy custom test that I wrote that does nothing\n{% enddocs %}' + ) return { 'nodes': { @@ -1703,6 +1726,16 @@ def expected_postgres_references_manifest(self, model_database=None): 'root_path': self.test_root_dir, 'unique_id': 'test.view_summary', }, + 'test.macro_info': { + 'block_contents': 'My custom test that I wrote that does nothing', + 'file_contents': macro_info, + 'name': 'macro_info', + 'original_file_path': docs_path, + 'package_name': 'test', + 'path': 'docs.md', + 'root_path': self.test_root_dir, + 'unique_id': 'test.macro_info', + }, }, 'child_map': { 'model.test.ephemeral_copy': ['model.test.ephemeral_summary'], @@ -1733,6 +1766,7 @@ def expected_postgres_references_manifest(self, model_database=None): 'patches': [], 'path': self._path_to('macros', 'dummy_test.sql'), 'sources': [], + 'macro_patches': [], }, normalize('ref_models/view_summary.sql'): { 'checksum': self._checksum_file('ref_models/view_summary.sql'), @@ -1742,6 +1776,7 @@ def expected_postgres_references_manifest(self, model_database=None): 'patches': [], 'path': self._path_to('ref_models', 'view_summary.sql'), 'sources': [], + 'macro_patches': [], }, normalize('ref_models/ephemeral_summary.sql'): { 'checksum': self._checksum_file('ref_models/ephemeral_summary.sql'), @@ -1751,6 +1786,7 @@ def expected_postgres_references_manifest(self, model_database=None): 'patches': [], 'path': self._path_to('ref_models', 'ephemeral_summary.sql'), 'sources': [], + 'macro_patches': [], }, normalize('ref_models/ephemeral_copy.sql'): { 'checksum': self._checksum_file('ref_models/ephemeral_copy.sql'), @@ -1760,6 +1796,7 @@ def expected_postgres_references_manifest(self, model_database=None): 'patches': [], 'path': self._path_to('ref_models', 'ephemeral_copy.sql'), 'sources': [], + 'macro_patches': [], }, normalize('seed/seed.csv'): { 'checksum': { @@ -1772,6 +1809,7 @@ def expected_postgres_references_manifest(self, model_database=None): 'patches': [], 'path': self._path_to('seed', 'seed.csv'), 'sources': [], + 'macro_patches': [], }, normalize('ref_models/docs.md'): { 'checksum': self._checksum_file('ref_models/docs.md'), @@ -1783,12 +1821,14 @@ def expected_postgres_references_manifest(self, model_database=None): 'test.source_info', 'test.table_info', 'test.column_info', + 'test.macro_info', ], 'macros': [], 'nodes': [], 'patches': [], 'path': self._path_to('ref_models', 'docs.md'), 'sources': [], + 'macro_patches': [], }, normalize('ref_models/schema.yml'): { 'checksum': self._checksum_file('ref_models/schema.yml'), @@ -1798,6 +1838,7 @@ def expected_postgres_references_manifest(self, model_database=None): 'patches': ['ephemeral_summary', 'view_summary'], 'path': self._path_to('ref_models', 'schema.yml'), 'sources': ['source.test.my_source.my_table'], + 'macro_patches': [['test', 'test_nothing']], }, normalize('seed/schema.yml'): { 'path': self._path_to('seed', 'schema.yml'), @@ -1807,8 +1848,35 @@ def expected_postgres_references_manifest(self, model_database=None): 'nodes': [], 'patches': ['seed'], 'sources': [], + 'macro_patches': [], }, }, + 'macros': { + 'macro.test.test_nothing': { + 'name': 'test_nothing', + 'depends_on': {'macros': []}, + 'description': 'My custom test that I wrote that does nothing', + 'raw_sql': AnyStringWith('macro test_nothing'), + 'original_file_path': self.dir('macros/dummy_test.sql'), + 'path': self.dir('macros/dummy_test.sql'), + 'package_name': 'test', + 'docrefs': [ + { + 'column_name': None, + 'documentation_name': 'macro_info', + 'documentation_package': '', + }, + ], + 'meta': { + 'some_key': 100, + }, + 'patch_path': self.dir('ref_models/schema.yml'), + 'resource_type': 'macro', + 'unique_id': 'macro.test.test_nothing', + 'tags': [], + 'root_path': self.test_root_dir, + } + } } def expected_bigquery_complex_manifest(self): @@ -2217,6 +2285,7 @@ def expected_bigquery_complex_manifest(self): 'docs': [], 'nodes': [], 'sources': [], + 'macro_patches': [], }, normalize('bq_models/clustered.sql'): { 'checksum': self._checksum_file('bq_models/clustered.sql'), @@ -2226,6 +2295,7 @@ def expected_bigquery_complex_manifest(self): 'docs': [], 'macros': [], 'sources': [], + 'macro_patches': [], }, normalize('bq_models/multi_clustered.sql'): { 'checksum': self._checksum_file('bq_models/multi_clustered.sql'), @@ -2235,6 +2305,7 @@ def expected_bigquery_complex_manifest(self): 'docs': [], 'macros': [], 'sources': [], + 'macro_patches': [], }, normalize('bq_models/nested_table.sql'): { 'checksum': self._checksum_file('bq_models/nested_table.sql'), @@ -2244,6 +2315,7 @@ def expected_bigquery_complex_manifest(self): 'docs': [], 'macros': [], 'sources': [], + 'macro_patches': [], }, normalize('bq_models/nested_view.sql'): { 'checksum': self._checksum_file('bq_models/nested_view.sql'), @@ -2253,6 +2325,7 @@ def expected_bigquery_complex_manifest(self): 'docs': [], 'macros': [], 'sources': [], + 'macro_patches': [], }, normalize('seed/seed.csv'): { 'checksum': { @@ -2265,6 +2338,7 @@ def expected_bigquery_complex_manifest(self): 'docs': [], 'macros': [], 'sources': [], + 'macro_patches': [], }, normalize('bq_models/schema.yml'): { 'checksum': self._checksum_file('bq_models/schema.yml'), @@ -2274,6 +2348,7 @@ def expected_bigquery_complex_manifest(self): 'docs': [], 'macros': [], 'sources': [], + 'macro_patches': [], }, normalize('seed/schema.yml'): { 'path': self._path_to('seed', 'schema.yml'), @@ -2283,6 +2358,7 @@ def expected_bigquery_complex_manifest(self): 'nodes': [], 'patches': ['seed'], 'sources': [], + 'macro_patches': [], }, }, } @@ -2509,6 +2585,7 @@ def expected_redshift_incremental_view_manifest(self): 'nodes': [], 'patches': [], 'sources': [], + 'macro_patches': [], }, normalize('rs_models/model.sql'): { 'checksum': self._checksum_file('rs_models/model.sql'), @@ -2518,6 +2595,7 @@ def expected_redshift_incremental_view_manifest(self): 'nodes': ['model.test.model'], 'patches': [], 'sources': [], + 'macro_patches': [], }, normalize('seed/seed.csv'): { 'checksum': { @@ -2530,6 +2608,7 @@ def expected_redshift_incremental_view_manifest(self): 'nodes': ['seed.test.seed'], 'patches': [], 'sources': [], + 'macro_patches': [], }, normalize('rs_models/schema.yml'): { 'checksum': self._checksum_file('rs_models/schema.yml'), @@ -2538,7 +2617,8 @@ def expected_redshift_incremental_view_manifest(self): 'macros': [], 'nodes': [], 'patches': ['model'], - 'sources': [] + 'sources': [], + 'macro_patches': [], }, normalize('seed/schema.yml'): { 'path': self._path_to('seed', 'schema.yml'), @@ -2548,6 +2628,7 @@ def expected_redshift_incremental_view_manifest(self): 'nodes': [], 'patches': ['seed'], 'sources': [], + 'macro_patches': [], }, }, } @@ -2579,7 +2660,7 @@ def verify_manifest(self, expected_manifest): for key in manifest_keys: if key == 'macros': - self.verify_manifest_macros(manifest) + self.verify_manifest_macros(manifest, expected_manifest.get('macros')) elif key == 'generated_at': self.assertBetween(manifest['generated_at'], start=self.generate_start_time) diff --git a/test/unit/test_contracts_graph_parsed.py b/test/unit/test_contracts_graph_parsed.py index e060116e114..384892d7582 100644 --- a/test/unit/test_contracts_graph_parsed.py +++ b/test/unit/test_contracts_graph_parsed.py @@ -1459,7 +1459,10 @@ def test_ok(self): 'resource_type': 'macro', 'unique_id': 'macro.test.foo', 'tags': [], - 'depends_on': {'macros': []} + 'depends_on': {'macros': []}, + 'meta': {}, + 'description': 'my macro description', + 'docrefs': [], } macro = ParsedMacro( name='foo', @@ -1471,7 +1474,10 @@ def test_ok(self): resource_type=NodeType.Macro, unique_id='macro.test.foo', tags=[], - depends_on=MacroDependsOn() + depends_on=MacroDependsOn(), + meta={}, + description='my macro description', + docrefs=[], ) self.assert_symmetric(macro, macro_dict) self.assertEqual(macro.local_vars(), {}) diff --git a/test/unit/test_parser.py b/test/unit/test_parser.py index 55d6e141b4b..d86becc501e 100644 --- a/test/unit/test_parser.py +++ b/test/unit/test_parser.py @@ -11,7 +11,9 @@ ModelParser, MacroParser, DataTestParser, SchemaParser, ParserUtils, ParseResult, SnapshotParser, AnalysisParser ) -from dbt.parser.schemas import NodeParser, SourceParser +from dbt.parser.schemas import ( + TestablePatchParser, SourceParser, AnalysisPatchParser, MacroPatchParser +) from dbt.parser.search import FileBlock from dbt.parser.schema_test_builders import YamlBlock @@ -191,8 +193,14 @@ def yaml_block_for(self, test_yml: str, filename: str): class SchemaParserSourceTest(SchemaParserTest): def test__read_basic_source(self): block = self.yaml_block_for(SINGLE_TABLE_SOURCE, 'test_one.yml') - NodeParser(self.parser, block, 'models').parse() - SourceParser(self.parser, block, 'sources').parse() + analysis_blocks = AnalysisPatchParser(self.parser, block, 'analyses').parse() + model_blocks = TestablePatchParser(self.parser, block, 'models').parse() + source_blocks = SourceParser(self.parser, block, 'sources').parse() + macro_blocks = MacroPatchParser(self.parser, block, 'macros').parse() + self.assertEqual(len(analysis_blocks), 0) + self.assertEqual(len(model_blocks), 0) + self.assertEqual(len(source_blocks), 1) + self.assertEqual(len(macro_blocks), 0) self.assertEqual(len(list(self.parser.results.patches)), 0) self.assertEqual(len(list(self.parser.results.nodes)), 0) results = list(self.parser.results.sources.values()) @@ -229,11 +237,16 @@ def test__parse_basic_source(self): def test__read_basic_source_tests(self): block = self.yaml_block_for(SINGLE_TABLE_SOURCE_TESTS, 'test_one.yml') - NodeParser(self.parser, block, 'models').parse() + analysis_blocks = AnalysisPatchParser(self.parser, block, 'analyses').parse() + model_blocks = TestablePatchParser(self.parser, block, 'models').parse() + source_blocks = SourceParser(self.parser, block, 'sources').parse() + macro_blocks = MacroPatchParser(self.parser, block, 'macros').parse() + self.assertEqual(len(analysis_blocks), 0) + self.assertEqual(len(model_blocks), 0) + self.assertEqual(len(source_blocks), 1) + self.assertEqual(len(macro_blocks), 0) self.assertEqual(len(list(self.parser.results.nodes)), 0) - SourceParser(self.parser, block, 'sources').parse() self.assertEqual(len(list(self.parser.results.patches)), 0) - self.assertEqual(len(list(self.parser.results.nodes)), 2) results = list(self.parser.results.sources.values()) self.assertEqual(len(results), 1) self.assertEqual(results[0].source_name, 'my_source')