diff --git a/CHANGELOG.md b/CHANGELOG.md
index 112514a8..bfcb8168 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,12 +4,13 @@
 
 ### Added
 
--   :sparkles: Implement missing PipelineML filtering functionalities to let kedro display resume hints and avoid breaking kedro-viz ([#377, Calychas](https://github.com/Galileo-Galilei/kedro-mlflow/pull/377), [#601, Calychas](https://github.com/Galileo-Galilei/kedro-mlflow/pull/601))
--   :sparkles: Sanitize parameters name with unsupported characters to avoid mlflow errors when logging ([#595, pascalwhoop](https://github.com/Galileo-Galilei/kedro-mlflow/pull/595))
+-   :sparkles: Implement missing ``PipelineML`` filtering functionalities to let ``kedro`` display resume hints and avoid breaking ``kedro-viz`` ([#377](https://github.com/Galileo-Galilei/kedro-mlflow/pull/377), [#601, Calychas](https://github.com/Galileo-Galilei/kedro-mlflow/pull/601))
+-   :sparkles: Sanitize parameters name with unsupported characters to avoid ``mlflow`` errors when logging ([#595, pascalwhoop](https://github.com/Galileo-Galilei/kedro-mlflow/pull/595))
 
 ### Changed
 
--   :pushpin: :sparkles: Removed lower and upper bounds for ``python`` versions of to be constantly in sync with ``kedro`` and make migrations faster [#603](https://github.com/Galileo-Galilei/kedro-mlflow/issues/603)
+-   :pushpin: :sparkles: Removed lower and upper bounds for ``python`` versions of to be constantly in sync with ``kedro`` and make migrations faster ([#603](https://github.com/Galileo-Galilei/kedro-mlflow/issues/603))
+-   :heavy_plus_sign: :heavy_minus_sign: Removed [``pytest-lazy-fixture``](https://pypi.org/project/pytest-lazy-fixture/) in favor of [``pytest-lazy-fixtures``](https://pypi.org/project/pytest-lazy-fixtures/) which has less reputation but is more up to date. The former has [not been updated in two years](https://github.com/TvoroG/pytest-lazy-fixture/issues/63) and is [not compatible with recent pytest](https://github.com/TvoroG/pytest-lazy-fixture/issues). ([#524](https://github.com/Galileo-Galilei/kedro-mlflow/issues/524), [#604](https://github.com/Galileo-Galilei/kedro-mlflow/issues/604))
 
 ## [0.13.2] - 2024-10-15
 
diff --git a/setup.py b/setup.py
index b1dff703..5356e4da 100644
--- a/setup.py
+++ b/setup.py
@@ -46,9 +46,9 @@ def _parse_requirements(path, encoding="utf-8"):
             "myst-parser>=0.17.2,<4.1.0",
         ],
         "test": [
-            "pytest>=5.4.0, <8.0.0",  # pytest==8.0.0 breaks pytest-lazy-fixture : https://github.com/TvoroG/pytest-lazy-fixture/issues/65
+            "pytest>=5.4.0, <9.0.0",  # pytest==8.0.0 breaks pytest-lazy-fixture : https://github.com/TvoroG/pytest-lazy-fixture/issues/65
             "pytest-cov>=2.8.0, <6.0.0",
-            "pytest-lazy-fixture>=0.6.0, <1.0.0",
+            "pytest-lazy-fixtures>=1.0.0, <2.0.0",
             "pytest-mock>=3.1.0, <4.0.0",
             "ruff>=0.5.0,<0.8.0",  # ensure consistency with pre-commit
             "scikit-learn>=0.23.0, <1.6.0",
diff --git a/tests/config/test_get_mlflow_config.py b/tests/config/test_get_mlflow_config.py
index f90b5cd8..e3a2415b 100644
--- a/tests/config/test_get_mlflow_config.py
+++ b/tests/config/test_get_mlflow_config.py
@@ -12,6 +12,7 @@
 from kedro.framework.project import _IsSubclassValidator, _ProjectSettings
 from kedro.framework.session import KedroSession
 from kedro.framework.startup import bootstrap_project
+from pytest_lazy_fixtures import lf
 
 
 def _write_yaml(filepath, config):
@@ -199,7 +200,7 @@ def fake_project(tmp_path, local_logging_config):
     "project_settings",
     [
         "",
-        pytest.lazy_fixture("mock_settings_omega_config_loader_class"),
+        lf("mock_settings_omega_config_loader_class"),
     ],
 )
 def test_mlflow_config_correctly_set(kedro_project, project_settings):
diff --git a/tests/framework/cli/test_cli_modelify.py b/tests/framework/cli/test_cli_modelify.py
index 34c7661e..6d445d7c 100644
--- a/tests/framework/cli/test_cli_modelify.py
+++ b/tests/framework/cli/test_cli_modelify.py
@@ -12,6 +12,7 @@
 from kedro.framework.cli.starters import TEMPLATE_PATH
 from kedro.framework.session import KedroSession
 from kedro.framework.startup import bootstrap_project
+from pytest_lazy_fixtures import lf
 
 from kedro_mlflow.framework.cli.cli import (
     modelify as cli_modelify,  # import after changing the path to avoid registering the project, else import pippeliens does not work!
@@ -228,10 +229,10 @@ def register_pipelines():
 @pytest.mark.parametrize(
     "example_repo,artifacts_list,inside_subdirectory",
     [
-        (pytest.lazy_fixture("kp_for_modelify"), ["trained_model"], False),
-        (pytest.lazy_fixture("kp_for_modelify"), ["trained_model"], True),
+        (lf("kp_for_modelify"), ["trained_model"], False),
+        (lf("kp_for_modelify"), ["trained_model"], True),
         (
-            pytest.lazy_fixture("kp_for_modelify_with_parameters"),
+            lf("kp_for_modelify_with_parameters"),
             ["trained_model", "params:my_param"],
             False,
         ),
diff --git a/tests/framework/hooks/test_hook_pipeline_ml.py b/tests/framework/hooks/test_hook_pipeline_ml.py
index 7c954568..82c8d0b4 100644
--- a/tests/framework/hooks/test_hook_pipeline_ml.py
+++ b/tests/framework/hooks/test_hook_pipeline_ml.py
@@ -12,6 +12,7 @@
 from kedro_datasets.pickle import PickleDataset
 from mlflow.models import infer_signature
 from mlflow.tracking import MlflowClient
+from pytest_lazy_fixtures import lf
 
 from kedro_mlflow.framework.hooks.mlflow_hook import MlflowHook
 from kedro_mlflow.pipeline import pipeline_ml_factory
@@ -236,8 +237,8 @@ def dummy_catalog_dataset_factory(tmp_path):
 @pytest.mark.parametrize(
     "pipeline_to_run",
     [
-        (pytest.lazy_fixture("dummy_pipeline")),
-        (pytest.lazy_fixture("dummy_pipeline_ml")),
+        (lf("dummy_pipeline")),
+        (lf("dummy_pipeline_ml")),
     ],
 )
 def test_mlflow_hook_save_pipeline_ml(
@@ -502,10 +503,10 @@ def test_mlflow_hook_save_pipeline_ml_with_parameters(
     "model_signature,expected_signature",
     (
         [None, None],
-        ["auto", pytest.lazy_fixture("dummy_signature")],
+        ["auto", lf("dummy_signature")],
         [
-            pytest.lazy_fixture("dummy_signature"),
-            pytest.lazy_fixture("dummy_signature"),
+            lf("dummy_signature"),
+            lf("dummy_signature"),
         ],
     ),
 )
diff --git a/tests/io/artifacts/test_mlflow_artifact_dataset.py b/tests/io/artifacts/test_mlflow_artifact_dataset.py
index 85bdf079..8d67ee60 100644
--- a/tests/io/artifacts/test_mlflow_artifact_dataset.py
+++ b/tests/io/artifacts/test_mlflow_artifact_dataset.py
@@ -8,7 +8,7 @@
 from kedro_datasets.pandas import CSVDataset
 from kedro_datasets.partitions import PartitionedDataset
 from kedro_datasets.pickle import PickleDataset
-from pytest_lazyfixture import lazy_fixture
+from pytest_lazy_fixtures import lf
 
 from kedro_mlflow.io.artifacts import MlflowArtifactDataset
 
@@ -28,17 +28,17 @@ def df2():
 @pytest.mark.parametrize(
     "dataset,extension,data,artifact_path",
     [
-        (CSVDataset, ".csv", lazy_fixture("df1"), None),
-        ("pandas.CSVDataset", ".csv", lazy_fixture("df1"), None),
-        (PickleDataset, ".pkl", lazy_fixture("df1"), None),
-        ("pickle.PickleDataset", ".pkl", lazy_fixture("df1"), None),
-        (CSVDataset, ".csv", lazy_fixture("df1"), "artifact_dir"),
-        ("pandas.CSVDataset", ".csv", lazy_fixture("df1"), "artifact_dir"),
-        (PickleDataset, ".pkl", lazy_fixture("df1"), "artifact_dir"),
+        (CSVDataset, ".csv", lf("df1"), None),
+        ("pandas.CSVDataset", ".csv", lf("df1"), None),
+        (PickleDataset, ".pkl", lf("df1"), None),
+        ("pickle.PickleDataset", ".pkl", lf("df1"), None),
+        (CSVDataset, ".csv", lf("df1"), "artifact_dir"),
+        ("pandas.CSVDataset", ".csv", lf("df1"), "artifact_dir"),
+        (PickleDataset, ".pkl", lf("df1"), "artifact_dir"),
         (
             "pickle.PickleDataset",
             ".pkl",
-            lazy_fixture("df1"),
+            lf("df1"),
             "artifact_dir",
         ),
     ],
diff --git a/tests/io/metrics/test_mlflow_metrics_dataset.py b/tests/io/metrics/test_mlflow_metrics_dataset.py
index d177af99..d47120fe 100644
--- a/tests/io/metrics/test_mlflow_metrics_dataset.py
+++ b/tests/io/metrics/test_mlflow_metrics_dataset.py
@@ -4,7 +4,7 @@
 import pytest
 from kedro.io import DatasetError
 from mlflow.tracking import MlflowClient
-from pytest_lazyfixture import lazy_fixture
+from pytest_lazy_fixtures import lf
 
 from kedro_mlflow.io.metrics import MlflowMetricsHistoryDataset
 
@@ -67,12 +67,12 @@ def metrics3():
 @pytest.mark.parametrize(
     "data, prefix",
     [
-        (lazy_fixture("metrics"), None),
-        (lazy_fixture("metrics"), "test"),
-        (lazy_fixture("metrics2"), None),
-        (lazy_fixture("metrics2"), "test"),
-        (lazy_fixture("metrics3"), None),
-        (lazy_fixture("metrics3"), "test"),
+        (lf("metrics"), None),
+        (lf("metrics"), "test"),
+        (lf("metrics2"), None),
+        (lf("metrics2"), "test"),
+        (lf("metrics3"), None),
+        (lf("metrics3"), "test"),
     ],
 )
 def test_mlflow_metrics_dataset_saved_and_logged(mlflow_client, data, prefix):
diff --git a/tests/io/models/test_mlflow_model_local_filesystem_dataset.py b/tests/io/models/test_mlflow_model_local_filesystem_dataset.py
index a8e4f964..757f9df5 100644
--- a/tests/io/models/test_mlflow_model_local_filesystem_dataset.py
+++ b/tests/io/models/test_mlflow_model_local_filesystem_dataset.py
@@ -6,6 +6,7 @@
 from kedro.io import DataCatalog, MemoryDataset
 from kedro.pipeline import Pipeline, node
 from kedro_datasets.pickle import PickleDataset
+from pytest_lazy_fixtures import lf
 from sklearn.linear_model import LinearRegression
 
 from kedro_mlflow.io.models import MlflowModelLocalFileSystemDataset
@@ -150,8 +151,8 @@ def test_save_load_local(linreg_path, linreg_model, versioned):
 @pytest.mark.parametrize(
     "pipeline",
     [
-        (pytest.lazy_fixture("pipeline_ml_obj")),  # must work for PipelineML
-        (pytest.lazy_fixture("pipeline_inference")),  # must work for Pipeline
+        (lf("pipeline_ml_obj")),  # must work for PipelineML
+        (lf("pipeline_inference")),  # must work for Pipeline
     ],
 )
 def test_pyfunc_flavor_python_model_save_and_load(
diff --git a/tests/io/models/test_mlflow_model_tracking_dataset.py b/tests/io/models/test_mlflow_model_tracking_dataset.py
index ff7bcb56..4e57ca18 100644
--- a/tests/io/models/test_mlflow_model_tracking_dataset.py
+++ b/tests/io/models/test_mlflow_model_tracking_dataset.py
@@ -7,6 +7,7 @@
 from kedro.io.core import DatasetError
 from kedro.pipeline import Pipeline, node
 from kedro_datasets.pickle import PickleDataset
+from pytest_lazy_fixtures import lf
 from sklearn.linear_model import LinearRegression
 
 from kedro_mlflow.io.models import MlflowModelTrackingDataset
@@ -265,8 +266,8 @@ def test_load_without_run_id_nor_active_run(tracking_uri):
 @pytest.mark.parametrize(
     "pipeline",
     [
-        (pytest.lazy_fixture("pipeline_ml_obj")),  # must work for PipelineML
-        (pytest.lazy_fixture("pipeline_inference")),  # must work for Pipeline
+        (lf("pipeline_ml_obj")),  # must work for PipelineML
+        (lf("pipeline_inference")),  # must work for Pipeline
     ],
 )
 def test_pyfunc_flavor_python_model_save_and_load(
diff --git a/tests/mlflow/test_kedro_pipeline_model.py b/tests/mlflow/test_kedro_pipeline_model.py
index 696ee76a..673497b5 100644
--- a/tests/mlflow/test_kedro_pipeline_model.py
+++ b/tests/mlflow/test_kedro_pipeline_model.py
@@ -7,6 +7,7 @@
 from kedro.io import DataCatalog, MemoryDataset
 from kedro.pipeline import Pipeline, node
 from kedro_datasets.pickle import PickleDataset
+from pytest_lazy_fixtures import lf
 from sklearn.linear_model import LinearRegression
 
 from kedro_mlflow.io.models import MlflowModelLocalFileSystemDataset
@@ -475,26 +476,26 @@ def predict_fun(model, data):
     "pipeline,catalog,input_name,result",
     [
         (
-            pytest.lazy_fixture("pipeline_inference_dummy"),
-            pytest.lazy_fixture("dummy_catalog"),
+            lf("pipeline_inference_dummy"),
+            lf("dummy_catalog"),
             "raw_data",
             {"raw_data", "model"},
         ),
         (
-            pytest.lazy_fixture("pipeline_inference_with_intermediary_artifacts"),
-            pytest.lazy_fixture("catalog_with_encoder"),
+            lf("pipeline_inference_with_intermediary_artifacts"),
+            lf("catalog_with_encoder"),
             "raw_data",
             {"raw_data", "model", "encoder"},
         ),
         (
-            pytest.lazy_fixture("pipeline_inference_with_inputs_artifacts"),
-            pytest.lazy_fixture("catalog_with_stopwords"),
+            lf("pipeline_inference_with_inputs_artifacts"),
+            lf("catalog_with_stopwords"),
             "data",
             {"data", "model", "stopwords_from_nltk"},
         ),
         (
-            pytest.lazy_fixture("pipeline_inference_with_parameters"),
-            pytest.lazy_fixture("catalog_with_parameters"),
+            lf("pipeline_inference_with_parameters"),
+            lf("catalog_with_parameters"),
             "data",
             {
                 "data",
@@ -548,26 +549,26 @@ def test_catalog_extraction_unpersisted_inference_input(pipeline_inference_dummy
     "pipeline,catalog,input_name,result",
     [
         (
-            pytest.lazy_fixture("pipeline_inference_dummy"),
-            pytest.lazy_fixture("dummy_catalog"),
+            lf("pipeline_inference_dummy"),
+            lf("dummy_catalog"),
             "raw_data",
             pd.DataFrame([1, 2, 3]),
         ),
         (
-            pytest.lazy_fixture("pipeline_inference_with_intermediary_artifacts"),
-            pytest.lazy_fixture("catalog_with_encoder"),
+            lf("pipeline_inference_with_intermediary_artifacts"),
+            lf("catalog_with_encoder"),
             "raw_data",
             pd.DataFrame([1, 2, 3]),
         ),
         (
-            pytest.lazy_fixture("pipeline_inference_with_inputs_artifacts"),
-            pytest.lazy_fixture("catalog_with_stopwords"),
+            lf("pipeline_inference_with_inputs_artifacts"),
+            lf("catalog_with_stopwords"),
             "data",
             pd.DataFrame([1, 2, 3]),
         ),
         (
-            pytest.lazy_fixture("pipeline_inference_with_parameters"),
-            pytest.lazy_fixture("catalog_with_parameters"),
+            lf("pipeline_inference_with_parameters"),
+            lf("catalog_with_parameters"),
             "data",
             pd.DataFrame([0, 1, 1]),
         ),
diff --git a/tests/pipeline/test_pipeline_ml.py b/tests/pipeline/test_pipeline_ml.py
index 63d3763e..1b120dc7 100644
--- a/tests/pipeline/test_pipeline_ml.py
+++ b/tests/pipeline/test_pipeline_ml.py
@@ -2,6 +2,7 @@
 from kedro.io import DataCatalog, MemoryDataset
 from kedro.pipeline import Pipeline, node, pipeline
 from kedro_datasets.pandas import CSVDataset
+from pytest_lazy_fixtures import lf
 
 from kedro_mlflow.pipeline import pipeline_ml_factory
 from kedro_mlflow.pipeline.pipeline_ml import KedroMlflowPipelineMLError, PipelineML
@@ -475,8 +476,8 @@ def test_pipeline_ml_filtering(
 @pytest.mark.parametrize(
     "pipeline_ml_obj",
     [
-        pytest.lazy_fixture("pipeline_ml_with_tag"),
-        pytest.lazy_fixture("pipeline_ml_with_intermediary_artifacts"),
+        lf("pipeline_ml_with_tag"),
+        lf("pipeline_ml_with_intermediary_artifacts"),
     ],
 )
 @pytest.mark.parametrize(