diff --git a/RELEASE.md b/RELEASE.md index 29031d4cf2..482119df40 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -11,6 +11,7 @@ Please follow the established format: ## Major features and improvements - Drop support for `python=3.8`. (#1747) +- Migrate Viz to use `pydantic>=2`. (#1743) ## Bug fixes and other changes diff --git a/package/features/steps/lower_requirements.txt b/package/features/steps/lower_requirements.txt index 1fcded74bd..c8cf3c5225 100644 --- a/package/features/steps/lower_requirements.txt +++ b/package/features/steps/lower_requirements.txt @@ -1,5 +1,5 @@ ipython==7.0.0 -fastapi==0.73.0 +fastapi==0.100.0 fsspec==2021.4 aiofiles==22.1.0 uvicorn[standard]==0.22.0 @@ -13,4 +13,3 @@ strawberry-graphql==0.192.0 networkx==2.5 orjson==3.9 secure==0.3.0 -pydantic==1.10 \ No newline at end of file diff --git a/package/kedro_viz/api/rest/responses.py b/package/kedro_viz/api/rest/responses.py index 08fb3c76bd..67b1528480 100644 --- a/package/kedro_viz/api/rest/responses.py +++ b/package/kedro_viz/api/rest/responses.py @@ -1,4 +1,5 @@ """`kedro_viz.api.rest.responses` defines REST response types.""" + # pylint: disable=missing-class-docstring,invalid-name import abc import logging @@ -10,7 +11,7 @@ from fastapi.encoders import jsonable_encoder from fastapi.responses import JSONResponse, ORJSONResponse from kedro.io.core import get_protocol_and_path -from pydantic import BaseModel +from pydantic import BaseModel, ConfigDict from kedro_viz.api.rest.utils import get_package_version from kedro_viz.data_access import data_access_manager @@ -35,8 +36,7 @@ class APIErrorMessage(BaseModel): class BaseAPIResponse(BaseModel, abc.ABC): - class Config: - orm_mode = True + model_config = ConfigDict(from_attributes=True) class BaseGraphNodeAPIResponse(BaseAPIResponse): @@ -47,14 +47,13 @@ class BaseGraphNodeAPIResponse(BaseAPIResponse): type: str # If a node is a ModularPipeline node, this value will be None, hence Optional. - modular_pipelines: Optional[List[str]] + modular_pipelines: Optional[List[str]] = None class TaskNodeAPIResponse(BaseGraphNodeAPIResponse): parameters: Dict - - class Config: - schema_extra = { + model_config = ConfigDict( + json_schema_extra={ "example": { "id": "6ab908b8", "name": "split_data_node", @@ -78,15 +77,15 @@ class Config: }, } } + ) class DataNodeAPIResponse(BaseGraphNodeAPIResponse): - layer: Optional[str] - dataset_type: Optional[str] - stats: Optional[Dict] - - class Config: - schema_extra = { + layer: Optional[str] = None + dataset_type: Optional[str] = None + stats: Optional[Dict] = None + model_config = ConfigDict( + json_schema_extra={ "example": { "id": "d7b83b05", "name": "master_table", @@ -99,6 +98,7 @@ class Config: "stats": {"rows": 10, "columns": 2, "file_size": 2300}, } } + ) NodeAPIResponse = Union[ @@ -108,15 +108,14 @@ class Config: class TaskNodeMetadataAPIResponse(BaseAPIResponse): - code: Optional[str] - filepath: Optional[str] - parameters: Optional[Dict] + code: Optional[str] = None + filepath: Optional[str] = None + parameters: Optional[Dict] = None inputs: List[str] outputs: List[str] - run_command: Optional[str] - - class Config: - schema_extra = { + run_command: Optional[str] = None + model_config = ConfigDict( + json_schema_extra={ "example": { "code": "def split_data(data: pd.DataFrame, parameters: Dict) -> Tuple:", "filepath": "proj/src/new_kedro_project/pipelines/data_science/nodes.py", @@ -126,39 +125,39 @@ class Config: "run_command": "kedro run --to-nodes=split_data", } } + ) class DataNodeMetadataAPIResponse(BaseAPIResponse): - filepath: Optional[str] + filepath: Optional[str] = None type: str - run_command: Optional[str] - preview: Optional[Union[Dict, str]] - preview_type: Optional[str] - stats: Optional[Dict] - - class Config: - schema_extra = { + run_command: Optional[str] = None + preview: Optional[Union[Dict, str]] = None + preview_type: Optional[str] = None + stats: Optional[Dict] = None + model_config = ConfigDict( + json_schema_extra={ "example": { "filepath": "/my-kedro-project/data/03_primary/master_table.csv", "type": "kedro_datasets.pandas.csv_dataset.CSVDataset", "run_command": "kedro run --to-outputs=master_table", } } + ) class TranscodedDataNodeMetadataAPIReponse(BaseAPIResponse): filepath: str original_type: str transcoded_types: List[str] - run_command: Optional[str] - stats: Optional[Dict] + run_command: Optional[str] = None + stats: Optional[Dict] = None class ParametersNodeMetadataAPIResponse(BaseAPIResponse): parameters: Dict - - class Config: - schema_extra = { + model_config = ConfigDict( + json_schema_extra={ "example": { "parameters": { "test_size": 0.2, @@ -176,6 +175,7 @@ class Config: } } } + ) NodeMetadataAPIResponse = Union[ @@ -197,7 +197,7 @@ class NamedEntityAPIResponse(BaseAPIResponse): """ id: str - name: Optional[str] + name: Optional[str] = None class ModularPipelineChildAPIResponse(BaseAPIResponse): @@ -266,15 +266,15 @@ class PackageCompatibilityAPIResponse(BaseAPIResponse): package_name: str package_version: str is_compatible: bool - - class Config: - schema_extra = { + model_config = ConfigDict( + json_schema_extra={ "example": { "package_name": "fsspec", "package_version": "2023.9.1", "is_compatible": True, } } + ) class EnhancedORJSONResponse(ORJSONResponse): diff --git a/package/kedro_viz/integrations/kedro/data_loader.py b/package/kedro_viz/integrations/kedro/data_loader.py index 3ffd608bca..dccf95d8af 100644 --- a/package/kedro_viz/integrations/kedro/data_loader.py +++ b/package/kedro_viz/integrations/kedro/data_loader.py @@ -2,6 +2,7 @@ load data from a Kedro project. It takes care of making sure viz can load data from projects created in a range of Kedro versions. """ + # pylint: disable=import-outside-toplevel, protected-access import json diff --git a/package/kedro_viz/models/experiment_tracking.py b/package/kedro_viz/models/experiment_tracking.py index 7512984621..d662a3fead 100644 --- a/package/kedro_viz/models/experiment_tracking.py +++ b/package/kedro_viz/models/experiment_tracking.py @@ -1,12 +1,13 @@ """kedro_viz.models.experiment_tracking` defines data models to represent run data and tracking datasets.""" -# pylint: disable=too-few-public-methods,protected-access,missing-class-docstring,missing-function-docstring +# pylint: disable=too-few-public-methods,protected-access,missing-function-docstring import logging from dataclasses import dataclass, field from enum import Enum from typing import TYPE_CHECKING, Any, Dict from kedro.io import Version +from pydantic import ConfigDict from sqlalchemy import Column from sqlalchemy.orm import declarative_base # type: ignore from sqlalchemy.sql.schema import ForeignKey @@ -35,9 +36,7 @@ class RunModel(Base): # type: ignore id = Column(String, primary_key=True, index=True) blob = Column(JSON) - - class Config: - orm_mode = True + model_config = ConfigDict(from_attributes=True) class UserRunDetailsModel(Base): # type: ignore @@ -50,9 +49,7 @@ class UserRunDetailsModel(Base): # type: ignore bookmark = Column(Boolean, default=False) title = Column(String) notes = Column(String) - - class Config: - orm_mode = True + model_config = ConfigDict(from_attributes=True) class TrackingDatasetGroup(str, Enum): diff --git a/package/kedro_viz/models/flowchart.py b/package/kedro_viz/models/flowchart.py index 8e1f04fa4e..c7e0b4aed7 100644 --- a/package/kedro_viz/models/flowchart.py +++ b/package/kedro_viz/models/flowchart.py @@ -1,6 +1,6 @@ """`kedro_viz.models.flowchart` defines data models to represent Kedro entities in a viz graph.""" -# pylint: disable=protected-access, missing-function-docstring +# pylint: disable=protected-access, missing-function-docstring, too-many-lines import abc import hashlib import inspect @@ -12,7 +12,14 @@ from kedro.pipeline.node import Node as KedroNode from kedro.pipeline.pipeline import TRANSCODING_SEPARATOR, _strip_transcoding -from pydantic import BaseModel, Field, root_validator, validator +from pydantic import ( + BaseModel, + ConfigDict, + Field, + ValidationInfo, + field_validator, + model_validator, +) from kedro_viz.models.utils import get_dataset_type @@ -47,12 +54,17 @@ class NamedEntity(BaseModel): """ id: str - name: Optional[str] = Field(None, description="The name of the registered pipeline") + name: Optional[str] = Field( + default=None, + validate_default=True, + description="The name of the registered pipeline", + ) - @validator("name", always=True) - def set_name(cls, _, values): - assert "id" in values - return values["id"] + @field_validator("name") + @classmethod + def set_name(cls, _, info: ValidationInfo): + assert "id" in info.data + return info.data["id"] class RegisteredPipeline(NamedEntity): @@ -128,16 +140,16 @@ class GraphNode(BaseModel, abc.ABC): # For example, node(func, namespace="uk.de") means this node belongs # to the modular pipeline "uk" and "uk.de" namespace: Optional[str] = Field( - None, description="The original namespace on this node" + default=None, + validate_default=True, + description="The original namespace on this node", ) modular_pipelines: Optional[List[str]] = Field( - None, description="The modular_pipelines this node belongs to" + default=None, + validate_default=True, + description="The modular_pipelines this node belongs to", ) - - class Config: - """Pydantic Config for GraphNode""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) @staticmethod def _hash(value: str): @@ -319,7 +331,9 @@ class TaskNode(GraphNode): """ modular_pipelines: List[str] = Field( - [], description="The modular pipelines this node belongs to" + default=[], + validate_default=True, + description="The modular pipelines this node belongs to", ) parameters: Dict = Field( {}, description="A dictionary of parameter values for the task node" @@ -328,18 +342,21 @@ class TaskNode(GraphNode): # The type for Task node type: str = GraphNodeType.TASK.value - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def check_kedro_obj_exists(cls, values): assert "kedro_obj" in values return values - @validator("namespace", always=True) - def set_namespace(cls, _, values): - return values["kedro_obj"].namespace + @field_validator("namespace") + @classmethod + def set_namespace(cls, _, info: ValidationInfo): + return info.data["kedro_obj"].namespace - @validator("modular_pipelines", always=True) - def set_modular_pipelines(cls, _, values): - return cls._expand_namespaces(values["kedro_obj"].namespace) + @field_validator("modular_pipelines") + @classmethod + def set_modular_pipelines(cls, _, info: ValidationInfo): + return cls._expand_namespaces(info.data["kedro_obj"].namespace) def _extract_wrapped_func(func: FunctionType) -> FunctionType: @@ -430,25 +447,38 @@ class TaskNodeMetadata(GraphNodeMetadata): task_node: TaskNode = Field(..., exclude=True) - # Source code of the node's function - code: Optional[str] + code: Optional[str] = Field( + default=None, + validate_default=True, + description="Source code of the node's function", + ) - # Path to the file where the node is defined - filepath: Optional[str] + filepath: Optional[str] = Field( + default=None, + validate_default=True, + description="Path to the file where the node is defined", + ) parameters: Optional[Dict] = Field( - None, description="The parameters of the node, if available" + default=None, + validate_default=True, + description="The parameters of the node, if available", ) run_command: Optional[str] = Field( - None, description="The command to run the pipeline to this node" + default=None, + validate_default=True, + description="The command to run the pipeline to this node", ) - inputs: Optional[List[str]] = Field(None, description="The inputs to the TaskNode") + inputs: Optional[List[str]] = Field( + default=None, validate_default=True, description="The inputs to the TaskNode" + ) outputs: Optional[List[str]] = Field( - None, description="The outputs from the TaskNode" + default=None, validate_default=True, description="The outputs from the TaskNode" ) - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def check_task_node_exists(cls, values): assert "task_node" in values cls.set_task_and_kedro_node(values["task_node"]) @@ -459,7 +489,8 @@ def set_task_and_kedro_node(cls, task_node): cls.task_node = task_node cls.kedro_node = cast(KedroNode, task_node.kedro_obj) - @validator("code", always=True) + @field_validator("code") + @classmethod def set_code(cls, code): # this is required to handle partial, curry functions if inspect.isfunction(cls.kedro_node.func): @@ -468,7 +499,8 @@ def set_code(cls, code): return None - @validator("filepath", always=True) + @field_validator("filepath") + @classmethod def set_filepath(cls, filepath): # this is required to handle partial, curry functions if inspect.isfunction(cls.kedro_node.func): @@ -488,11 +520,13 @@ def set_filepath(cls, filepath): return None - @validator("parameters", always=True) + @field_validator("parameters") + @classmethod def set_parameters(cls, _): return cls.task_node.parameters - @validator("run_command", always=True) + @field_validator("run_command") + @classmethod def set_run_command(cls, _): # if a node doesn't have a user-supplied `_name` attribute, # a human-readable run command `kedro run --to-nodes/nodes` is not available @@ -503,11 +537,13 @@ def set_run_command(cls, _): return None - @validator("inputs", always=True) + @field_validator("inputs") + @classmethod def set_inputs(cls, _): return cls.kedro_node.inputs - @validator("outputs", always=True) + @field_validator("outputs") + @classmethod def set_outputs(cls, _): return cls.kedro_node.outputs @@ -533,15 +569,21 @@ class DataNode(GraphNode): ) stats: Optional[Dict] = Field(None, description="The statistics for the data node.") - # The concrete type of the underlying kedro_obj. - dataset_type: Optional[str] + dataset_type: Optional[str] = Field( + default=None, + validate_default=True, + description="The concrete type of the underlying kedro_obj", + ) modular_pipelines: List[str] = Field( - [], description="The modular pipelines this node belongs to" + default=[], + validate_default=True, + description="The modular pipelines this node belongs to", ) - # The metadata for data node - viz_metadata: Optional[Dict] + viz_metadata: Optional[Dict] = Field( + default=None, validate_default=True, description="The metadata for data node" + ) run_command: Optional[str] = Field( None, description="The command to run the pipeline to this node" @@ -550,37 +592,42 @@ class DataNode(GraphNode): # The type for data node type: str = GraphNodeType.DATA.value - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def check_kedro_obj_exists(cls, values): assert "kedro_obj" in values return values - @validator("dataset_type", always=True) - def set_dataset_type(cls, _, values): - kedro_obj = values.get("kedro_obj") + @field_validator("dataset_type") + @classmethod + def set_dataset_type(cls, _, info: ValidationInfo): + kedro_obj = cast(AbstractDataset, info.data.get("kedro_obj")) return get_dataset_type(kedro_obj) - @validator("namespace", always=True) - def set_namespace(cls, _, values): - assert "name" in values + @field_validator("namespace") + @classmethod + def set_namespace(cls, _, info: ValidationInfo): + assert "name" in info.data # the modular pipelines that a data node belongs to # are derived from its namespace, which in turn # is derived from the dataset's name. - name = values.get("name") - return cls._get_namespace(name) + name = info.data.get("name") + return cls._get_namespace(str(name)) - @validator("modular_pipelines", always=True) - def set_modular_pipelines(cls, _, values): - assert "name" in values + @field_validator("modular_pipelines") + @classmethod + def set_modular_pipelines(cls, _, info: ValidationInfo): + assert "name" in info.data - name = values.get("name") - namespace = cls._get_namespace(name) + name = info.data.get("name") + namespace = cls._get_namespace(str(name)) return cls._expand_namespaces(namespace) - @validator("viz_metadata", always=True) - def set_viz_metadata(cls, _, values): - kedro_obj = values.get("kedro_obj") + @field_validator("viz_metadata") + @classmethod + def set_viz_metadata(cls, _, info: ValidationInfo): + kedro_obj = cast(AbstractDataset, info.data.get("kedro_obj")) if hasattr(kedro_obj, "metadata") and kedro_obj.metadata: return kedro_obj.metadata.get("kedro-viz", None) @@ -629,7 +676,9 @@ class TranscodedDataNode(GraphNode): ) modular_pipelines: List[str] = Field( - [], description="The modular pipelines this node belongs to" + default=[], + validate_default=True, + description="The modular pipelines this node belongs to", ) run_command: Optional[str] = Field( @@ -643,22 +692,24 @@ class TranscodedDataNode(GraphNode): # The type for data node type: str = GraphNodeType.DATA.value - @validator("namespace", always=True) - def set_namespace(cls, _, values): - assert "name" in values + @field_validator("namespace") + @classmethod + def set_namespace(cls, _, info: ValidationInfo): + assert "name" in info.data # the modular pipelines that a data node belongs to # are derived from its namespace, which in turn # is derived from the dataset's name. - name = values.get("name") - return cls._get_namespace(name) + name = info.data.get("name") + return cls._get_namespace(str(name)) - @validator("modular_pipelines", always=True) - def set_modular_pipelines(cls, _, values): - assert "name" in values + @field_validator("modular_pipelines") + @classmethod + def set_modular_pipelines(cls, _, info: ValidationInfo): + assert "name" in info.data - name = values.get("name") - namespace = cls._get_namespace(name) + name = info.data.get("name") + namespace = cls._get_namespace(str(name)) return cls._expand_namespaces(namespace) def has_metadata(self) -> bool: @@ -677,25 +728,42 @@ class DataNodeMetadata(GraphNodeMetadata): data_node: DataNode = Field(..., exclude=True) - # The type of the data node - type: Optional[str] + type: Optional[str] = Field( + default=None, validate_default=True, description="The type of the data node" + ) - # The path to the actual data file for the underlying dataset - filepath: Optional[str] + filepath: Optional[str] = Field( + default=None, + validate_default=True, + description="The path to the actual data file for the underlying dataset", + ) run_command: Optional[str] = Field( - None, description="Command to run the pipeline to this node" + default=None, + validate_default=True, + description="Command to run the pipeline to this node", ) + preview: Optional[Union[Dict, str]] = Field( - None, description="Preview data for the underlying data node" + default=None, + validate_default=True, + description="Preview data for the underlying datanode", ) preview_type: Optional[str] = Field( - None, description="Type of preview for the dataset" + default=None, + validate_default=True, + description="Type of preview for the dataset", ) - stats: Optional[Dict] = Field(None, description="The statistics for the data node.") - @root_validator(pre=True) + stats: Optional[Dict] = Field( + default=None, + validate_default=True, + description="The statistics for the data node.", + ) + + @model_validator(mode="before") + @classmethod def check_data_node_exists(cls, values): assert "data_node" in values cls.set_data_node_and_dataset(values["data_node"]) @@ -710,22 +778,26 @@ def set_data_node_and_dataset(cls, data_node): # does not arise: https://github.com/kedro-org/kedro-viz/pull/573. cls.dataset.release() - @validator("type", always=True) + @field_validator("type") + @classmethod def set_type(cls, _): return cls.data_node.dataset_type - @validator("filepath", always=True) + @field_validator("filepath") + @classmethod def set_filepath(cls, _): dataset_description = cls.dataset._describe() return _parse_filepath(dataset_description) - @validator("run_command", always=True) + @field_validator("run_command") + @classmethod def set_run_command(cls, _): if not cls.data_node.is_free_input: return f"kedro run --to-outputs={cls.data_node.name}" return None - @validator("preview", always=True) + @field_validator("preview") + @classmethod def set_preview(cls, _): if cls.data_node.is_preview_disabled() or not hasattr(cls.dataset, "preview"): return None @@ -747,7 +819,8 @@ def set_preview(cls, _): ) return None - @validator("preview_type", always=True) + @field_validator("preview_type") + @classmethod def set_preview_type(cls, _): if cls.data_node.is_preview_disabled() or not hasattr(cls.dataset, "preview"): return None @@ -764,7 +837,8 @@ def set_preview_type(cls, _): ) return None - @validator("stats", always=True) + @field_validator("stats") + @classmethod def set_stats(cls, _): return cls.data_node.stats @@ -781,55 +855,71 @@ class TranscodedDataNodeMetadata(GraphNodeMetadata): transcoded_data_node: TranscodedDataNode = Field(..., exclude=True) - # The path to the actual data file for the underlying dataset. # Only available if the dataset has filepath set. - filepath: Optional[str] + filepath: Optional[str] = Field( + default=None, + validate_default=True, + description="The path to the actual data file for the underlying dataset", + ) run_command: Optional[str] = Field( - None, description="Command to run the pipeline to this node" + default=None, + validate_default=True, + description="Command to run the pipeline to this node", ) original_type: Optional[str] = Field( - None, + default=None, + validate_default=True, description="The dataset type of the underlying transcoded data node original version", ) transcoded_types: Optional[List[str]] = Field( - None, description="The list of all dataset types for the transcoded versions" + default=None, + validate_default=True, + description="The list of all dataset types for the transcoded versions", ) # Statistics for the underlying data node stats: Optional[Dict] = Field( - None, description="The statistics for the transcoded data node metadata." + default=None, + validate_default=True, + description="The statistics for the transcoded data node metadata.", ) - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def check_transcoded_data_node_exists(cls, values): assert "transcoded_data_node" in values cls.transcoded_data_node = values["transcoded_data_node"] return values - @validator("filepath", always=True) + @field_validator("filepath") + @classmethod def set_filepath(cls, _): dataset_description = cls.transcoded_data_node.original_version._describe() return _parse_filepath(dataset_description) - @validator("run_command", always=True) + @field_validator("run_command") + @classmethod def set_run_command(cls, _): if not cls.transcoded_data_node.is_free_input: return f"kedro run --to-outputs={cls.transcoded_data_node.original_name}" return None - @validator("original_type", always=True) + @field_validator("original_type") + @classmethod def set_original_type(cls, _): return get_dataset_type(cls.transcoded_data_node.original_version) - @validator("transcoded_types", always=True) + @field_validator("transcoded_types") + @classmethod def set_transcoded_types(cls, _): return [ get_dataset_type(transcoded_version) for transcoded_version in cls.transcoded_data_node.transcoded_versions ] - @validator("stats", always=True) + @field_validator("stats") + @classmethod def set_stats(cls, _): return cls.transcoded_data_node.stats @@ -854,7 +944,8 @@ class ParametersNode(GraphNode): # The type for Parameters Node type: str = GraphNodeType.PARAMETERS.value - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def check_kedro_obj_and_name_exists(cls, values): assert "kedro_obj" in values assert "name" in values @@ -888,7 +979,6 @@ def parameter_name(self) -> str: @property def parameter_value(self) -> Any: """Load the parameter value from the underlying dataset""" - self.kedro_obj: AbstractDataset if not (self.kedro_obj and hasattr(self.kedro_obj, "load")): return None @@ -917,16 +1007,20 @@ class ParametersNodeMetadata(GraphNodeMetadata): parameters_node: ParametersNode = Field(..., exclude=True) parameters: Optional[Dict] = Field( - None, description="The parameters dictionary for the parameters metadata node" + default=None, + validate_default=True, + description="The parameters dictionary for the parameters metadata node", ) - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def check_parameters_node_exists(cls, values): assert "parameters_node" in values cls.parameters_node = values["parameters_node"] return values - @validator("parameters", always=True) + @field_validator("parameters") + @classmethod def set_parameters(cls, _): if cls.parameters_node.is_single_parameter(): return { diff --git a/package/requirements.txt b/package/requirements.txt index 166b36365e..6765378883 100644 --- a/package/requirements.txt +++ b/package/requirements.txt @@ -1,8 +1,7 @@ packaging~=23.0 kedro>=0.18.0 ipython>=7.0.0, <9.0 -fastapi>=0.73.0,<0.200.0 -pydantic<2 +fastapi>=0.100.0,<0.200.0 fsspec>=2021.4, <2024.1 s3fs>=2021.4, <2024.1 aiofiles>=22.1.0 diff --git a/package/tests/conftest.py b/package/tests/conftest.py index e788bfc389..cc77364d74 100644 --- a/package/tests/conftest.py +++ b/package/tests/conftest.py @@ -258,7 +258,7 @@ def mock_http_response(): class MockHTTPResponse(BaseModel, frozen=True): data: dict - def json(self): + def model_dump_json(self, **kwargs): return self.data return MockHTTPResponse diff --git a/package/tests/test_data_access/test_repositories/test_graph.py b/package/tests/test_data_access/test_repositories/test_graph.py index 7485360058..1249460cc1 100644 --- a/package/tests/test_data_access/test_repositories/test_graph.py +++ b/package/tests/test_data_access/test_repositories/test_graph.py @@ -48,7 +48,6 @@ def test_filter_by_node_is(self): repo = GraphEdgesRepository() for edge in [ab, bc, cd, da]: repo.add_edge(edge) - # pylint: disable=unhashable-member assert set(repo.get_edges_by_node_ids({"a", "b", "d"})) == { ab, da, diff --git a/package/tests/test_models/test_flowchart.py b/package/tests/test_models/test_flowchart.py index 87cc505701..d9f7a75235 100644 --- a/package/tests/test_models/test_flowchart.py +++ b/package/tests/test_models/test_flowchart.py @@ -281,7 +281,7 @@ def identity(x): assert task_node_metadata.filepath == str( Path(__file__).relative_to(Path.cwd().parent).expanduser() ) - assert task_node_metadata.parameters == {} + assert not task_node_metadata.parameters assert ( task_node_metadata.run_command == "kedro run --to-nodes=namespace.identity_node" @@ -306,7 +306,7 @@ def identity(x): assert task_node_metadata.filepath == str( Path(__file__).relative_to(Path.cwd().parent).expanduser() ) - assert task_node_metadata.parameters == {} + assert not task_node_metadata.parameters assert task_node_metadata.run_command == "kedro run --to-nodes=identity_node" def test_task_node_metadata_no_run_command(self): @@ -342,7 +342,7 @@ def decorated(x): assert task_node_metadata.filepath == str( Path(__file__).relative_to(Path.cwd().parent).expanduser() ) - assert task_node_metadata.parameters == {} + assert not task_node_metadata.parameters def test_task_node_metadata_with_partial_func(self): kedro_node = node( @@ -357,7 +357,7 @@ def test_task_node_metadata_with_partial_func(self): assert task_node.name == "" assert task_node_metadata.code is None assert task_node_metadata.filepath is None - assert task_node_metadata.parameters == {} + assert not task_node_metadata.parameters assert task_node_metadata.inputs == ["x"] assert task_node_metadata.outputs == ["y"] @@ -375,8 +375,8 @@ def test_data_node_metadata(self): assert data_node_metadata.preview_type == "TablePreview" assert data_node_metadata.filepath == "/tmp/dataset.csv" assert data_node_metadata.run_command == "kedro run --to-outputs=dataset" - assert data_node_metadata.stats["rows"] == 10 - assert data_node_metadata.stats["columns"] == 2 + assert data_node_metadata.stats.get("rows") == 10 + assert data_node_metadata.stats.get("columns") == 2 def test_get_preview_args(self): metadata = {"kedro-viz": {"preview_args": {"nrows": 3}}} @@ -473,8 +473,8 @@ def test_transcoded_data_node_metadata(self): assert transcoded_data_node_metadata.transcoded_types == [ "pandas.csv_dataset.CSVDataset" ] - assert transcoded_data_node_metadata.stats["rows"] == 10 - assert transcoded_data_node_metadata.stats["columns"] == 2 + assert transcoded_data_node_metadata.stats.get("rows") == 10 + assert transcoded_data_node_metadata.stats.get("columns") == 2 def test_partitioned_data_node_metadata(self): dataset = PartitionedDataset(path="partitioned/", dataset="pandas.CSVDataset") diff --git a/package/tests/test_services/test_modular_pipelines.py b/package/tests/test_services/test_modular_pipelines.py index 458706a5bd..ee555a8c77 100644 --- a/package/tests/test_services/test_modular_pipelines.py +++ b/package/tests/test_services/test_modular_pipelines.py @@ -13,7 +13,6 @@ def test_expand_tree_no_nested_key(): assert expanded_tree[modular_pipeline_id].name == "data_science" -# pylint: disable=unhashable-member def test_expanded_tree_with_nested_key(): modular_pipeline_id = "uk.data_science.model_training" modular_pipeline_node = GraphNode.create_modular_pipeline_node(modular_pipeline_id)