From bfca775142fef27cb9e703df8287225394e82d31 Mon Sep 17 00:00:00 2001 From: yangxiaoyu14 Date: Wed, 23 Aug 2023 02:43:41 +0000 Subject: [PATCH 01/14] def dtensor_from_fn first edition --- build/test/auto_parallel/test_dist_tensor.py | 157 +++++++++++++++++++ 1 file changed, 157 insertions(+) create mode 100644 build/test/auto_parallel/test_dist_tensor.py diff --git a/build/test/auto_parallel/test_dist_tensor.py b/build/test/auto_parallel/test_dist_tensor.py new file mode 100644 index 00000000000000..a53d32f6cd6620 --- /dev/null +++ b/build/test/auto_parallel/test_dist_tensor.py @@ -0,0 +1,157 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np + +import paddle +import paddle.distributed as dist + +class TestDistTensor(unittest.TestCase): + def test_dist_tensor_creation(self): + shape = [10, 5] + mesh = dist.ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"]) + dist_attr = dist.DistAttr(mesh=mesh, sharding_specs=['x', 'y']) + + # create dist tensor using numpy + dist_tensor_with_numpy = dist.shard_tensor( + np.ones(shape, dtype=np.float32), dist_attr=dist_attr + ) + + # create dist tensor using tensor + dist_tensor_with_tensor = dist.shard_tensor( + paddle.ones(shape), dist_attr=dist_attr + ) + + # create normal tensor + tensor = paddle.ones(shape) + + # test dist tensor properties + self.assertEqual(dist_tensor_with_numpy.shape, shape) + self.assertEqual(dist_tensor_with_tensor.shape, shape) + self.assertEqual(dist_tensor_with_numpy.is_dist(), True) + self.assertEqual(dist_tensor_with_tensor.is_dist(), True) + self.assertEqual(tensor.is_dist(), False) + self.assertEqual( + str(dist_tensor_with_numpy), str(dist_tensor_with_tensor) + ) + self.assertEqual(dist_tensor_with_numpy.dist_attr, dist_attr) + self.assertEqual(dist_tensor_with_tensor.dist_attr, dist_attr) + + +class TestDistributedTensor(unittest.TestCase): + def test_dtensor_from_fn(self): + # Define a function for generating a tensor + def generate_tensor_ones(): + return paddle.ones(shape=[2, 3]) + + def generate_tensor_zeros(): + return paddle.zeros(shape=[2, 3]) + + def generate_tensor_random(): + return paddle.random(shape=[2, 3]) + + def generate_tensor_range(): + return paddle.range(start=1, end=7).reshape([2, 3]) + + + # Create a distributed attribute + mesh = dist.ProcessMesh([[2, 4, 5], [0, 1, 3]], dim_names=["x", "y"]) + dist_attr = dist.DistAttr(mesh=mesh, sharding_specs=['x', 'y']) + + # Test with generate_tensor_ones() + # Call the function dtensor_from_fn with dist_attr parameter + result = dist.dtensor_from_fn(paddle.ones, dist_attr=dist_attr, shape=[2, 3]) + + # Verify the result + self.assertIsInstance(result, paddle.Tensor) + self.assertEqual(result.shape, [2, 3]) + self.assertEqual(result.dist_attr, dist_attr) + + # Test with generate_tensor_zeros() + result_zeros = dist.dtensor_from_fn(paddle.zeros, dist_attr=dist_attr, shape=[2, 3]) + self.assertIsInstance(result_zeros, paddle.Tensor) + self.assertEqual(result_zeros.shape, [2, 3]) + self.assertEqual(result_zeros.dist_attr, dist_attr) + + # Test with generate_tensor_random() + result_random = dist.dtensor_from_fn(paddle.random, dist_attr=dist_attr, shape=[2, 3]) + self.assertIsInstance(result_random, paddle.Tensor) + self.assertEqual(result_random.shape, [2, 3]) + self.assertEqual(result_random.dist_attr, dist_attr) + + # Test with generate_tensor_range() + result_range = dist.dtensor_from_fn(paddle.range, dist_attr=dist_attr, shape=[2, 3]) + self.assertIsInstance(result_range, paddle.Tensor) + self.assertEqual(result_range.shape, [2, 3]) + self.assertEqual(result_range.dist_attr, dist_attr) + +""" + # Additional assertions + self.assertTrue((result.numpy() == 1).all()) # Check tensor values + + # Test with another function + def generate_tensor_zeros(): + return paddle.zeros(shape=[4, 2]) + + # Call the function dtensor_from_fn with dist_attr parameter and another function + result_zeros = dist.dtensor_from_fn(generate_tensor_zeros, dist_attr=dist_attr) + + # Verify the result + self.assertIsInstance(result_zeros, paddle.Tensor) + self.assertEqual(result_zeros.shape, [4, 2]) + self.assertEqual(result_zeros.dist_attr, dist_attr) + + # Additional assertions + self.assertTrue((result_zeros.numpy() == 0).all()) # Check tensor values + + # Test static mode (NotImplementedError should be raised) + with self.assertRaises(NotImplementedError): + with paddle.static.program_guard(paddle.static.Program()): + # Call the function dtensor_from_fn with dist_attr parameter + result = dist.dtensor_from_fn(generate_tensor, dist_attr) + + def test_dtensor_from_fn_additional_args(self): + # Define a function for generating a tensor with additional arguments + def generate_tensor_with_args(shape, value): + return paddle.full(shape=shape, fill_value=value) + + # Create a distributed attribute + mesh = dist.ProcessMesh([[2, 4, 5], [0, 1, 3]], dim_names=["x", "y"]) + dist_attr = dist.DistAttr(mesh=mesh, sharding_specs=['x', 'y']) + + + # Test dynamic mode + with paddle.fluid.dygraph.guard(): + # Call the function dtensor_from_fn with additional arguments and dist_attr parameter + result = dist.dtensor_from_fn(generate_tensor_with_args, dist_attr, shape=[2, 3], value=5) + + # Verify the result + self.assertIsInstance(result, paddle.Tensor) + self.assertEqual(result.shape, [2, 3]) + self.assertEqual(result.dist_attr, dist_attr) + self.assertTrue((result.numpy() == 5).all()) + + # Additional assertions + self.assertEqual(result.numpy().sum(), 30) # Check tensor sum + + # Test static mode (NotImplementedError should be raised) + with self.assertRaises(NotImplementedError): + with paddle.static.program_guard(paddle.static.Program()): + # Call the function dtensor_from_fn with additional arguments and dist_attr parameter + result = dist.dtensor_from_fn(generate_tensor_with_args, dist_attr, shape=[2, 3], value=5) +""" +if __name__ == "__main__": + unittest.main() \ No newline at end of file From d20a0328f8f2746b506798187f2b940870de2dc1 Mon Sep 17 00:00:00 2001 From: yangxiaoyu14 Date: Wed, 23 Aug 2023 02:45:02 +0000 Subject: [PATCH 02/14] dtensor_from_fn first edition --- python/paddle/distributed/__init__.py | 2 + .../paddle/distributed/auto_parallel/api.py | 39 +++++++++++++++++- test/auto_parallel/test_dist_tensor.py | 41 ++++++++++++++++++- 3 files changed, 79 insertions(+), 3 deletions(-) diff --git a/python/paddle/distributed/__init__.py b/python/paddle/distributed/__init__.py index 183f307607c362..fe914bbb3422a5 100644 --- a/python/paddle/distributed/__init__.py +++ b/python/paddle/distributed/__init__.py @@ -66,6 +66,7 @@ from .auto_parallel import shard_op # noqa: F401 from .auto_parallel.api import shard_tensor # noqa: F401 +from .auto_parallel.api import dtensor_from_fn # noqa: F401 from .fleet import BoxPSDataset # noqa: F401 @@ -126,4 +127,5 @@ "ProcessMesh", "DistAttr", "shard_tensor", + "dtensor_from_fn", ] diff --git a/python/paddle/distributed/auto_parallel/api.py b/python/paddle/distributed/auto_parallel/api.py index b25799d058ad2f..acbc96a748d7ed 100644 --- a/python/paddle/distributed/auto_parallel/api.py +++ b/python/paddle/distributed/auto_parallel/api.py @@ -24,7 +24,7 @@ class DistAttr(core.TensorDistAttr): """ DistAttr specifies how tensors are distributed or sliced on ProcessMesh. - + Args: mesh(paddle.distributed.ProcessMesh): The `ProcessMesh` object describes the Cartesian topology of the used processes. sharding_specs(list[str|None]): The specification describing how to shard the Tensor. @@ -124,3 +124,40 @@ def shard_tensor( raise NotImplementedError( "The `paddle.distributed.shard_tensor` for static mode will be implemented later." ) + +def dtensor_from_fn( + fn, dist_attr, *args, **kwargs +): + """ + Construct a Distributed Tensor from a function of arguments. + + Args: + fn (callable): A callable function that takes arguments of Distributed Tensor and returns tensor. + dist_attr(paddle.distributed.DistAttr): Specify how tensors are distributed or sliced on ProcessMesh. + *args: A list of arguments to be passed to the ``fn`` function. + **kwargs: A list of arguments to be passed to the ``fn`` function. + + Retruns: + Tensor: A Tensor constructed from ``fn`` with distributed attributes. + + Examples: + + .. code-block:: python + + import paddle + import paddle.distribute as dist + + def generate_tensor(): + return paddle.ones(shape=[2, 3]) + + # Create a distributed attribute + mesh = dist.ProcessMesh([[2, 4, 5], [0, 1, 3]], dim_names=["x", "y"]) + dist_attr = dist.DistAttr(mesh=mesh, sharding_specs=['x', 'y']) + + # Call the function dtensor_from_fn with dist_attr parameter + d_tensor = dist.dtensor_from_fn(paddle.ones, dist_attr=dist_attr, shape=[2, 3]) + + print(d_tensor) + """ + tensor = fn(*args, **kwargs) + return shard_tensor(tensor, dist_attr=dist_attr) diff --git a/test/auto_parallel/test_dist_tensor.py b/test/auto_parallel/test_dist_tensor.py index 61705a322e2d60..680f07a90b755a 100644 --- a/test/auto_parallel/test_dist_tensor.py +++ b/test/auto_parallel/test_dist_tensor.py @@ -19,7 +19,6 @@ import paddle import paddle.distributed as dist - class TestDistTensor(unittest.TestCase): def test_dist_tensor_creation(self): shape = [10, 5] @@ -51,6 +50,44 @@ def test_dist_tensor_creation(self): self.assertEqual(dist_tensor_with_numpy.dist_attr, dist_attr) self.assertEqual(dist_tensor_with_tensor.dist_attr, dist_attr) + +class TestDistributedTensor(unittest.TestCase): + def test_dtensor_from_fn(self): + # Define a function for generating a tensor + def generate_tensor_ones(): + return paddle.ones(shape=[2, 3]) + + def generate_tensor_zeros(): + return paddle.zeros(shape=[2, 3]) + + def generate_tensor_random(): + return paddle.rand(shape=[2, 3]) + + + # Create a distributed attribute + mesh = dist.ProcessMesh([[2, 4, 5], [0, 1, 3]], dim_names=["x", "y"]) + dist_attr = dist.DistAttr(mesh=mesh, sharding_specs=['x', 'y']) + + # Test with generate_tensor_ones() + # Call the function dtensor_from_fn with dist_attr parameter + result = dist.dtensor_from_fn(paddle.ones, dist_attr=dist_attr, shape=[2, 3]) + + # Verify the result + self.assertIsInstance(result, paddle.Tensor) + self.assertEqual(result.shape, [2, 3]) + self.assertEqual(result.dist_attr, dist_attr) + + # Test with generate_tensor_zeros() + result_zeros = dist.dtensor_from_fn(paddle.zeros, dist_attr=dist_attr, shape=[2, 3]) + self.assertIsInstance(result_zeros, paddle.Tensor) + self.assertEqual(result_zeros.shape, [2, 3]) + self.assertEqual(result_zeros.dist_attr, dist_attr) + + # Test with generate_tensor_random() + result_random = dist.dtensor_from_fn(paddle.rand, dist_attr=dist_attr, shape=[2, 3]) + self.assertIsInstance(result_random, paddle.Tensor) + self.assertEqual(result_random.shape, [2, 3]) + self.assertEqual(result_random.dist_attr, dist_attr) if __name__ == "__main__": - unittest.main() + unittest.main() \ No newline at end of file From 3902f7d1e3cea95aeff7449344af34ca8afc0283 Mon Sep 17 00:00:00 2001 From: yangxiaoyu14 Date: Thu, 24 Aug 2023 02:28:12 +0000 Subject: [PATCH 03/14] Delete file /home/Paddle/build/test/auto_parallel/test_dist_tensor.py --- build/test/auto_parallel/test_dist_tensor.py | 157 ------------------- 1 file changed, 157 deletions(-) delete mode 100644 build/test/auto_parallel/test_dist_tensor.py diff --git a/build/test/auto_parallel/test_dist_tensor.py b/build/test/auto_parallel/test_dist_tensor.py deleted file mode 100644 index a53d32f6cd6620..00000000000000 --- a/build/test/auto_parallel/test_dist_tensor.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np - -import paddle -import paddle.distributed as dist - -class TestDistTensor(unittest.TestCase): - def test_dist_tensor_creation(self): - shape = [10, 5] - mesh = dist.ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"]) - dist_attr = dist.DistAttr(mesh=mesh, sharding_specs=['x', 'y']) - - # create dist tensor using numpy - dist_tensor_with_numpy = dist.shard_tensor( - np.ones(shape, dtype=np.float32), dist_attr=dist_attr - ) - - # create dist tensor using tensor - dist_tensor_with_tensor = dist.shard_tensor( - paddle.ones(shape), dist_attr=dist_attr - ) - - # create normal tensor - tensor = paddle.ones(shape) - - # test dist tensor properties - self.assertEqual(dist_tensor_with_numpy.shape, shape) - self.assertEqual(dist_tensor_with_tensor.shape, shape) - self.assertEqual(dist_tensor_with_numpy.is_dist(), True) - self.assertEqual(dist_tensor_with_tensor.is_dist(), True) - self.assertEqual(tensor.is_dist(), False) - self.assertEqual( - str(dist_tensor_with_numpy), str(dist_tensor_with_tensor) - ) - self.assertEqual(dist_tensor_with_numpy.dist_attr, dist_attr) - self.assertEqual(dist_tensor_with_tensor.dist_attr, dist_attr) - - -class TestDistributedTensor(unittest.TestCase): - def test_dtensor_from_fn(self): - # Define a function for generating a tensor - def generate_tensor_ones(): - return paddle.ones(shape=[2, 3]) - - def generate_tensor_zeros(): - return paddle.zeros(shape=[2, 3]) - - def generate_tensor_random(): - return paddle.random(shape=[2, 3]) - - def generate_tensor_range(): - return paddle.range(start=1, end=7).reshape([2, 3]) - - - # Create a distributed attribute - mesh = dist.ProcessMesh([[2, 4, 5], [0, 1, 3]], dim_names=["x", "y"]) - dist_attr = dist.DistAttr(mesh=mesh, sharding_specs=['x', 'y']) - - # Test with generate_tensor_ones() - # Call the function dtensor_from_fn with dist_attr parameter - result = dist.dtensor_from_fn(paddle.ones, dist_attr=dist_attr, shape=[2, 3]) - - # Verify the result - self.assertIsInstance(result, paddle.Tensor) - self.assertEqual(result.shape, [2, 3]) - self.assertEqual(result.dist_attr, dist_attr) - - # Test with generate_tensor_zeros() - result_zeros = dist.dtensor_from_fn(paddle.zeros, dist_attr=dist_attr, shape=[2, 3]) - self.assertIsInstance(result_zeros, paddle.Tensor) - self.assertEqual(result_zeros.shape, [2, 3]) - self.assertEqual(result_zeros.dist_attr, dist_attr) - - # Test with generate_tensor_random() - result_random = dist.dtensor_from_fn(paddle.random, dist_attr=dist_attr, shape=[2, 3]) - self.assertIsInstance(result_random, paddle.Tensor) - self.assertEqual(result_random.shape, [2, 3]) - self.assertEqual(result_random.dist_attr, dist_attr) - - # Test with generate_tensor_range() - result_range = dist.dtensor_from_fn(paddle.range, dist_attr=dist_attr, shape=[2, 3]) - self.assertIsInstance(result_range, paddle.Tensor) - self.assertEqual(result_range.shape, [2, 3]) - self.assertEqual(result_range.dist_attr, dist_attr) - -""" - # Additional assertions - self.assertTrue((result.numpy() == 1).all()) # Check tensor values - - # Test with another function - def generate_tensor_zeros(): - return paddle.zeros(shape=[4, 2]) - - # Call the function dtensor_from_fn with dist_attr parameter and another function - result_zeros = dist.dtensor_from_fn(generate_tensor_zeros, dist_attr=dist_attr) - - # Verify the result - self.assertIsInstance(result_zeros, paddle.Tensor) - self.assertEqual(result_zeros.shape, [4, 2]) - self.assertEqual(result_zeros.dist_attr, dist_attr) - - # Additional assertions - self.assertTrue((result_zeros.numpy() == 0).all()) # Check tensor values - - # Test static mode (NotImplementedError should be raised) - with self.assertRaises(NotImplementedError): - with paddle.static.program_guard(paddle.static.Program()): - # Call the function dtensor_from_fn with dist_attr parameter - result = dist.dtensor_from_fn(generate_tensor, dist_attr) - - def test_dtensor_from_fn_additional_args(self): - # Define a function for generating a tensor with additional arguments - def generate_tensor_with_args(shape, value): - return paddle.full(shape=shape, fill_value=value) - - # Create a distributed attribute - mesh = dist.ProcessMesh([[2, 4, 5], [0, 1, 3]], dim_names=["x", "y"]) - dist_attr = dist.DistAttr(mesh=mesh, sharding_specs=['x', 'y']) - - - # Test dynamic mode - with paddle.fluid.dygraph.guard(): - # Call the function dtensor_from_fn with additional arguments and dist_attr parameter - result = dist.dtensor_from_fn(generate_tensor_with_args, dist_attr, shape=[2, 3], value=5) - - # Verify the result - self.assertIsInstance(result, paddle.Tensor) - self.assertEqual(result.shape, [2, 3]) - self.assertEqual(result.dist_attr, dist_attr) - self.assertTrue((result.numpy() == 5).all()) - - # Additional assertions - self.assertEqual(result.numpy().sum(), 30) # Check tensor sum - - # Test static mode (NotImplementedError should be raised) - with self.assertRaises(NotImplementedError): - with paddle.static.program_guard(paddle.static.Program()): - # Call the function dtensor_from_fn with additional arguments and dist_attr parameter - result = dist.dtensor_from_fn(generate_tensor_with_args, dist_attr, shape=[2, 3], value=5) -""" -if __name__ == "__main__": - unittest.main() \ No newline at end of file From 50fd5a951ac0432722d3547026bf18ce64716511 Mon Sep 17 00:00:00 2001 From: yangxiaoyu14 Date: Mon, 28 Aug 2023 02:40:20 +0000 Subject: [PATCH 04/14] polish code format --- .../paddle/distributed/auto_parallel/api.py | 34 ++++---------- test/auto_parallel/test_dist_tensor.py | 47 ++++++++++++------- 2 files changed, 39 insertions(+), 42 deletions(-) diff --git a/python/paddle/distributed/auto_parallel/api.py b/python/paddle/distributed/auto_parallel/api.py index 1bcce2f5a882d7..7769eac884dc27 100644 --- a/python/paddle/distributed/auto_parallel/api.py +++ b/python/paddle/distributed/auto_parallel/api.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + import paddle from paddle.distributed.auto_parallel.interface import ( shard_tensor as shard_tensor_static, @@ -26,7 +27,7 @@ class DistAttr(core.TensorDistAttr): """ DistAttr specifies how tensors are distributed or sliced on ProcessMesh. - + Args: mesh(paddle.distributed.ProcessMesh): The `ProcessMesh` object describes the Cartesian topology of the used processes. sharding_specs(list[str|None]): The specification describing how to shard the Tensor. @@ -69,19 +70,6 @@ def __init__(self, mesh, sharding_specs): self.process_mesh = mesh self.dims_mapping = dims_mapping - self.mark_annotated("process_mesh") - self.mark_annotated("dims_mapping") - - @property - def sharding_specs(self): - """ - Get sharding_specs of the dist_attr - - Returns: - list[str]: sharding_specs - """ - return self._sharding_specs - def shard_tensor( data, dtype=None, place=None, stop_gradient=True, dist_attr=None @@ -143,31 +131,27 @@ def shard_tensor( data, dist_attr.process_mesh, dist_attr.sharding_specs ) -def dtensor_from_fn( - fn, dist_attr, *args, **kwargs -): + +def dtensor_from_fn(fn, dist_attr, *args, **kwargs): """ - Construct a Distributed Tensor from a function of arguments. + Construct a Distributed Tensor from a paddle api function of arguments. Args: - fn (callable): A callable function that takes arguments of Distributed Tensor and returns tensor. + fn (callable): A paddle api function that takes arguments of *args, **kwargs and returns tensor. dist_attr(paddle.distributed.DistAttr): Specify how tensors are distributed or sliced on ProcessMesh. *args: A list of arguments to be passed to the ``fn`` function. **kwargs: A list of arguments to be passed to the ``fn`` function. Retruns: - Tensor: A Tensor constructed from ``fn`` with distributed attributes. + Tensor: A Tensor constructed from ``fn`` with distributed attributes. Examples: - .. code-block:: python + .. code-block:: python import paddle import paddle.distribute as dist - def generate_tensor(): - return paddle.ones(shape=[2, 3]) - # Create a distributed attribute mesh = dist.ProcessMesh([[2, 4, 5], [0, 1, 3]], dim_names=["x", "y"]) dist_attr = dist.DistAttr(mesh=mesh, sharding_specs=['x', 'y']) @@ -176,6 +160,6 @@ def generate_tensor(): d_tensor = dist.dtensor_from_fn(paddle.ones, dist_attr=dist_attr, shape=[2, 3]) print(d_tensor) - """ + """ tensor = fn(*args, **kwargs) return shard_tensor(tensor, dist_attr=dist_attr) diff --git a/test/auto_parallel/test_dist_tensor.py b/test/auto_parallel/test_dist_tensor.py index aae178b5d4e7d6..c6660c3ae63b66 100644 --- a/test/auto_parallel/test_dist_tensor.py +++ b/test/auto_parallel/test_dist_tensor.py @@ -20,6 +20,7 @@ import paddle.distributed as dist import paddle.nn.functional as F + class TestDistTensor(unittest.TestCase): def test_dist_tensor_creation(self): shape = [10, 5] @@ -51,27 +52,18 @@ def test_dist_tensor_creation(self): self.assertEqual(dist_tensor_with_numpy.dist_attr, dist_attr) self.assertEqual(dist_tensor_with_tensor.dist_attr, dist_attr) - + class TestDistributedTensor(unittest.TestCase): def test_dtensor_from_fn(self): - # Define a function for generating a tensor - def generate_tensor_ones(): - return paddle.ones(shape=[2, 3]) - - def generate_tensor_zeros(): - return paddle.zeros(shape=[2, 3]) - - def generate_tensor_random(): - return paddle.rand(shape=[2, 3]) - - # Create a distributed attribute mesh = dist.ProcessMesh([[2, 4, 5], [0, 1, 3]], dim_names=["x", "y"]) dist_attr = dist.DistAttr(mesh=mesh, sharding_specs=['x', 'y']) - + # Test with generate_tensor_ones() # Call the function dtensor_from_fn with dist_attr parameter - result = dist.dtensor_from_fn(paddle.ones, dist_attr=dist_attr, shape=[2, 3]) + result = dist.dtensor_from_fn( + paddle.ones, dist_attr=dist_attr, shape=[2, 3] + ) # Verify the result self.assertIsInstance(result, paddle.Tensor) @@ -79,17 +71,38 @@ def generate_tensor_random(): self.assertEqual(result.dist_attr, dist_attr) # Test with generate_tensor_zeros() - result_zeros = dist.dtensor_from_fn(paddle.zeros, dist_attr=dist_attr, shape=[2, 3]) + result_zeros = dist.dtensor_from_fn( + paddle.zeros, dist_attr=dist_attr, shape=[2, 3] + ) self.assertIsInstance(result_zeros, paddle.Tensor) self.assertEqual(result_zeros.shape, [2, 3]) self.assertEqual(result_zeros.dist_attr, dist_attr) # Test with generate_tensor_random() - result_random = dist.dtensor_from_fn(paddle.rand, dist_attr=dist_attr, shape=[2, 3]) + result_random = dist.dtensor_from_fn( + paddle.rand, dist_attr=dist_attr, shape=[2, 3] + ) self.assertIsInstance(result_random, paddle.Tensor) self.assertEqual(result_random.shape, [2, 3]) self.assertEqual(result_random.dist_attr, dist_attr) + # Test with invalid sharding_specs length + with self.assertRaises(AssertionError): + invalid_dist_attr = dist.DistAttr(mesh=mesh, sharding_specs=['x']) + dist.dtensor_from_fn( + paddle.ones, dist_attr=invalid_dist_attr, shape=[2, 3] + ) + + # Test exceptions when running in static mode + paddle.enable_static() + with self.assertRaises(NotImplementedError): + with paddle.static.program_guard(paddle.static.Program()): + dist.dtensor_from_fn( + paddle.ones, dist_attr=dist_attr, shape=[2, 3] + ) + paddle.disable_static() + + class TestDistTensorForDygraphAPI(unittest.TestCase): def check_tensor_eq(self, a, b): np1 = a.numpy() @@ -122,4 +135,4 @@ def test_relu_api_for_dist_tensor(self): if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main() From 3953a5e02341cce1014f06e7d5669e7ddc6e73b2 Mon Sep 17 00:00:00 2001 From: yangxiaoyu14 Date: Tue, 29 Aug 2023 08:20:40 +0000 Subject: [PATCH 05/14] fix sample code formatting issues --- python/paddle/distributed/auto_parallel/api.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/python/paddle/distributed/auto_parallel/api.py b/python/paddle/distributed/auto_parallel/api.py index 7769eac884dc27..09a8bb00747536 100644 --- a/python/paddle/distributed/auto_parallel/api.py +++ b/python/paddle/distributed/auto_parallel/api.py @@ -149,17 +149,17 @@ def dtensor_from_fn(fn, dist_attr, *args, **kwargs): .. code-block:: python - import paddle - import paddle.distribute as dist + >>>import paddle + >>>import paddle.distribute as dist - # Create a distributed attribute - mesh = dist.ProcessMesh([[2, 4, 5], [0, 1, 3]], dim_names=["x", "y"]) - dist_attr = dist.DistAttr(mesh=mesh, sharding_specs=['x', 'y']) + >>># Create a distributed attribute + >>>mesh = dist.ProcessMesh([[2, 4, 5], [0, 1, 3]], dim_names=["x", "y"]) + >>>dist_attr = dist.DistAttr(mesh=mesh, sharding_specs=['x', 'y']) - # Call the function dtensor_from_fn with dist_attr parameter - d_tensor = dist.dtensor_from_fn(paddle.ones, dist_attr=dist_attr, shape=[2, 3]) + >>># Call the function dtensor_from_fn with dist_attr parameter + >>>d_tensor = dist.dtensor_from_fn(paddle.ones, dist_attr=dist_attr, shape=[2, 3]) - print(d_tensor) + >>>print(d_tensor) """ tensor = fn(*args, **kwargs) return shard_tensor(tensor, dist_attr=dist_attr) From 1d510ea743c1c8c93e9b9cecb56e6feedb694281 Mon Sep 17 00:00:00 2001 From: yangxiaoyu14 Date: Wed, 30 Aug 2023 02:22:37 +0000 Subject: [PATCH 06/14] change sample codes ' >>>' to '>>> ' --- python/paddle/distributed/auto_parallel/api.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/python/paddle/distributed/auto_parallel/api.py b/python/paddle/distributed/auto_parallel/api.py index 09a8bb00747536..3ae0db7226c424 100644 --- a/python/paddle/distributed/auto_parallel/api.py +++ b/python/paddle/distributed/auto_parallel/api.py @@ -149,17 +149,17 @@ def dtensor_from_fn(fn, dist_attr, *args, **kwargs): .. code-block:: python - >>>import paddle - >>>import paddle.distribute as dist + >>> import paddle + >>> import paddle.distribute as dist - >>># Create a distributed attribute - >>>mesh = dist.ProcessMesh([[2, 4, 5], [0, 1, 3]], dim_names=["x", "y"]) - >>>dist_attr = dist.DistAttr(mesh=mesh, sharding_specs=['x', 'y']) + >>> # Create a distributed attribute + >>> mesh = dist.ProcessMesh([[2, 4, 5], [0, 1, 3]], dim_names=["x", "y"]) + >>> dist_attr = dist.DistAttr(mesh=mesh, sharding_specs=['x', 'y']) - >>># Call the function dtensor_from_fn with dist_attr parameter - >>>d_tensor = dist.dtensor_from_fn(paddle.ones, dist_attr=dist_attr, shape=[2, 3]) + >>> # Call the function dtensor_from_fn with dist_attr parameter + >>> d_tensor = dist.dtensor_from_fn(paddle.ones, dist_attr=dist_attr, shape=[2, 3]) - >>>print(d_tensor) + >>> print(d_tensor) """ tensor = fn(*args, **kwargs) return shard_tensor(tensor, dist_attr=dist_attr) From 47564d2d164e46d08cbe93ff150498ee4d6d2e3c Mon Sep 17 00:00:00 2001 From: yangxiaoyu14 Date: Thu, 31 Aug 2023 05:30:35 +0000 Subject: [PATCH 07/14] Add static image single measurement --- .../paddle/distributed/auto_parallel/api.py | 26 +++++---- test/auto_parallel/test_dist_tensor.py | 54 +++++++++++-------- 2 files changed, 48 insertions(+), 32 deletions(-) diff --git a/python/paddle/distributed/auto_parallel/api.py b/python/paddle/distributed/auto_parallel/api.py index 3ae0db7226c424..88733a5266e06a 100644 --- a/python/paddle/distributed/auto_parallel/api.py +++ b/python/paddle/distributed/auto_parallel/api.py @@ -69,6 +69,17 @@ def __init__(self, mesh, sharding_specs): self.process_mesh = mesh self.dims_mapping = dims_mapping + self.mark_annotated("process_mesh") + self.mark_annotated("dims_mapping") + + @property + def sharding_specs(self): + """ + Get sharding_specs of the dist_attr + Returns: + list[str]: sharding_specs + """ + return self._sharding_specs def shard_tensor( @@ -134,13 +145,13 @@ def shard_tensor( def dtensor_from_fn(fn, dist_attr, *args, **kwargs): """ - Construct a Distributed Tensor from a paddle api function of arguments. + Construct a Distributed Tensor from a function of arguments. Args: - fn (callable): A paddle api function that takes arguments of *args, **kwargs and returns tensor. - dist_attr(paddle.distributed.DistAttr): Specify how tensors are distributed or sliced on ProcessMesh. - *args: A list of arguments to be passed to the ``fn`` function. - **kwargs: A list of arguments to be passed to the ``fn`` function. + fn (callable): A callable function that takes arguments of Distributed Tensor and returns tensor. + dist_attr (paddle.distributed.DistAttr): Specify how tensors are distributed or sliced on ProcessMesh. + *args (tuple): A tuple of arguments to be passed to the ``fn`` function. + **kwargs (dict): A dict of arguments to be passed to the ``fn`` function. Retruns: Tensor: A Tensor constructed from ``fn`` with distributed attributes. @@ -150,15 +161,12 @@ def dtensor_from_fn(fn, dist_attr, *args, **kwargs): .. code-block:: python >>> import paddle - >>> import paddle.distribute as dist - + >>> import paddle.distributed as dist >>> # Create a distributed attribute >>> mesh = dist.ProcessMesh([[2, 4, 5], [0, 1, 3]], dim_names=["x", "y"]) >>> dist_attr = dist.DistAttr(mesh=mesh, sharding_specs=['x', 'y']) - >>> # Call the function dtensor_from_fn with dist_attr parameter >>> d_tensor = dist.dtensor_from_fn(paddle.ones, dist_attr=dist_attr, shape=[2, 3]) - >>> print(d_tensor) """ tensor = fn(*args, **kwargs) diff --git a/test/auto_parallel/test_dist_tensor.py b/test/auto_parallel/test_dist_tensor.py index c6660c3ae63b66..40615e56b8c052 100644 --- a/test/auto_parallel/test_dist_tensor.py +++ b/test/auto_parallel/test_dist_tensor.py @@ -53,38 +53,46 @@ def test_dist_tensor_creation(self): self.assertEqual(dist_tensor_with_tensor.dist_attr, dist_attr) -class TestDistributedTensor(unittest.TestCase): - def test_dtensor_from_fn(self): +class TestDistTensorFromFn(unittest.TestCase): + def run_dtensor_from_fn(self): # Create a distributed attribute - mesh = dist.ProcessMesh([[2, 4, 5], [0, 1, 3]], dim_names=["x", "y"]) - dist_attr = dist.DistAttr(mesh=mesh, sharding_specs=['x', 'y']) + mesh = dist.ProcessMesh([0, 1], dim_names=["x"]) + dist_attr = dist.DistAttr(mesh=mesh, sharding_specs=[None]) - # Test with generate_tensor_ones() # Call the function dtensor_from_fn with dist_attr parameter result = dist.dtensor_from_fn( - paddle.ones, dist_attr=dist_attr, shape=[2, 3] + paddle.ones, dist_attr=dist_attr, shape=[1] ) # Verify the result - self.assertIsInstance(result, paddle.Tensor) - self.assertEqual(result.shape, [2, 3]) - self.assertEqual(result.dist_attr, dist_attr) + if paddle.in_dynamic_mode(): + self.assertIsInstance(result, paddle.Tensor) + self.assertEqual(result.shape, [1]) + else: + self.assertIsInstance(result, paddle.fluid.framework.Variable) + self.assertEqual(result.shape, (1,)) # Test with generate_tensor_zeros() result_zeros = dist.dtensor_from_fn( - paddle.zeros, dist_attr=dist_attr, shape=[2, 3] + paddle.zeros, dist_attr=dist_attr, shape=[1] ) - self.assertIsInstance(result_zeros, paddle.Tensor) - self.assertEqual(result_zeros.shape, [2, 3]) - self.assertEqual(result_zeros.dist_attr, dist_attr) + if paddle.in_dynamic_mode(): + self.assertIsInstance(result, paddle.Tensor) + self.assertEqual(result.shape, [1]) + else: + self.assertIsInstance(result, paddle.fluid.framework.Variable) + self.assertEqual(result.shape, (1,)) # Test with generate_tensor_random() result_random = dist.dtensor_from_fn( - paddle.rand, dist_attr=dist_attr, shape=[2, 3] + paddle.rand, dist_attr=dist_attr, shape=[1] ) - self.assertIsInstance(result_random, paddle.Tensor) - self.assertEqual(result_random.shape, [2, 3]) - self.assertEqual(result_random.dist_attr, dist_attr) + if paddle.in_dynamic_mode(): + self.assertIsInstance(result, paddle.Tensor) + self.assertEqual(result.shape, [1]) + else: + self.assertIsInstance(result, paddle.fluid.framework.Variable) + self.assertEqual(result.shape, (1,)) # Test with invalid sharding_specs length with self.assertRaises(AssertionError): @@ -93,13 +101,13 @@ def test_dtensor_from_fn(self): paddle.ones, dist_attr=invalid_dist_attr, shape=[2, 3] ) - # Test exceptions when running in static mode + def test_dynamic_mode(self): + self.run_dtensor_from_fn() + + # Test exceptions when running in static mode + def test_static_mode(self): paddle.enable_static() - with self.assertRaises(NotImplementedError): - with paddle.static.program_guard(paddle.static.Program()): - dist.dtensor_from_fn( - paddle.ones, dist_attr=dist_attr, shape=[2, 3] - ) + self.run_dtensor_from_fn() paddle.disable_static() From dd6c04f28a30bdbd91a9772ea607d71ef0120037 Mon Sep 17 00:00:00 2001 From: yangxiaoyu14 Date: Fri, 1 Sep 2023 03:24:20 +0000 Subject: [PATCH 08/14] modify the Indent of Sample Code --- python/paddle/distributed/auto_parallel/api.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/python/paddle/distributed/auto_parallel/api.py b/python/paddle/distributed/auto_parallel/api.py index 88733a5266e06a..0a061fb6b35a81 100644 --- a/python/paddle/distributed/auto_parallel/api.py +++ b/python/paddle/distributed/auto_parallel/api.py @@ -160,14 +160,14 @@ def dtensor_from_fn(fn, dist_attr, *args, **kwargs): .. code-block:: python - >>> import paddle - >>> import paddle.distributed as dist - >>> # Create a distributed attribute - >>> mesh = dist.ProcessMesh([[2, 4, 5], [0, 1, 3]], dim_names=["x", "y"]) - >>> dist_attr = dist.DistAttr(mesh=mesh, sharding_specs=['x', 'y']) - >>> # Call the function dtensor_from_fn with dist_attr parameter - >>> d_tensor = dist.dtensor_from_fn(paddle.ones, dist_attr=dist_attr, shape=[2, 3]) - >>> print(d_tensor) + >>> import paddle + >>> import paddle.distribute as dist + >>> # Create a distributed attribute + >>> mesh = dist.ProcessMesh([0, 1], dim_names=["x"]) + >>> dist_attr = dist.DistAttr(mesh=mesh, sharding_specs=[None]) + >>> # Call the function dtensor_from_fn with dist_attr parameter + >>> d_tensor = dist.dtensor_from_fn(paddle.ones, dist_attr=dist_attr, shape=[1]) + >>> print(d_tensor) """ tensor = fn(*args, **kwargs) return shard_tensor(tensor, dist_attr=dist_attr) From ff9928df2d26b8b048d156d1d231c25ea323381d Mon Sep 17 00:00:00 2001 From: yangxiaoyu14 Date: Fri, 1 Sep 2023 10:30:21 +0000 Subject: [PATCH 09/14] complete the sample code modification according to ZhongKai's suggestion --- .../paddle/distributed/auto_parallel/api.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/python/paddle/distributed/auto_parallel/api.py b/python/paddle/distributed/auto_parallel/api.py index 0a061fb6b35a81..a8bb5b7ec44f5b 100644 --- a/python/paddle/distributed/auto_parallel/api.py +++ b/python/paddle/distributed/auto_parallel/api.py @@ -158,16 +158,15 @@ def dtensor_from_fn(fn, dist_attr, *args, **kwargs): Examples: - .. code-block:: python - - >>> import paddle - >>> import paddle.distribute as dist - >>> # Create a distributed attribute - >>> mesh = dist.ProcessMesh([0, 1], dim_names=["x"]) - >>> dist_attr = dist.DistAttr(mesh=mesh, sharding_specs=[None]) - >>> # Call the function dtensor_from_fn with dist_attr parameter - >>> d_tensor = dist.dtensor_from_fn(paddle.ones, dist_attr=dist_attr, shape=[1]) - >>> print(d_tensor) + .. code-block:: python + >>> import paddle + >>> import paddle.distributed as dist + >>> # Create a distributed attribute + >>> mesh = dist.ProcessMesh([0, 1], dim_names=["x"]) + >>> dist_attr = dist.DistAttr(mesh=mesh, sharding_specs=[None]) + >>> # Call the function dtensor_from_fn with dist_attr parameter + >>> d_tensor = dist.dtensor_from_fn(paddle.ones, dist_attr=dist_attr, shape=[1]) + >>> print(d_tensor) """ tensor = fn(*args, **kwargs) return shard_tensor(tensor, dist_attr=dist_attr) From a9dd254ca490d6e60ccfb0fbc2378a22eb3bf005 Mon Sep 17 00:00:00 2001 From: yangxiaoyu14 Date: Mon, 4 Sep 2023 11:32:50 +0000 Subject: [PATCH 10/14] modify according to the review --- test/auto_parallel/test_dist_tensor.py | 33 ++++++++++++++++---------- 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/test/auto_parallel/test_dist_tensor.py b/test/auto_parallel/test_dist_tensor.py index 40615e56b8c052..87ee854bca9c4e 100644 --- a/test/auto_parallel/test_dist_tensor.py +++ b/test/auto_parallel/test_dist_tensor.py @@ -61,38 +61,47 @@ def run_dtensor_from_fn(self): # Call the function dtensor_from_fn with dist_attr parameter result = dist.dtensor_from_fn( - paddle.ones, dist_attr=dist_attr, shape=[1] + paddle.ones, dist_attr=dist_attr, shape=[16] ) - # Verify the result if paddle.in_dynamic_mode(): + dist_attr.dynamic_dims = [] self.assertIsInstance(result, paddle.Tensor) - self.assertEqual(result.shape, [1]) + self.assertEqual(result.shape, [16]) + self.assertEqual(result.dist_attr, dist_attr) else: + dist_attr.dynamic_dims = [0] self.assertIsInstance(result, paddle.fluid.framework.Variable) - self.assertEqual(result.shape, (1,)) + self.assertEqual(result.shape, (16,)) + self.assertEqual(result.dist_attr, dist_attr) - # Test with generate_tensor_zeros() result_zeros = dist.dtensor_from_fn( - paddle.zeros, dist_attr=dist_attr, shape=[1] + paddle.zeros, dist_attr=dist_attr, shape=[16] ) if paddle.in_dynamic_mode(): + dist_attr.dynamic_dims = [] self.assertIsInstance(result, paddle.Tensor) - self.assertEqual(result.shape, [1]) + self.assertEqual(result.shape, [16]) + self.assertEqual(result.dist_attr, dist_attr) else: + dist_attr.dynamic_dims = [0] self.assertIsInstance(result, paddle.fluid.framework.Variable) - self.assertEqual(result.shape, (1,)) + self.assertEqual(result.shape, (16,)) + self.assertEqual(result.dist_attr, dist_attr) - # Test with generate_tensor_random() result_random = dist.dtensor_from_fn( - paddle.rand, dist_attr=dist_attr, shape=[1] + paddle.rand, dist_attr=dist_attr, shape=[16] ) if paddle.in_dynamic_mode(): + dist_attr.dynamic_dims = [] self.assertIsInstance(result, paddle.Tensor) - self.assertEqual(result.shape, [1]) + self.assertEqual(result.shape, [16]) + self.assertEqual(result.dist_attr, dist_attr) else: + dist_attr.dynamic_dims = [0] self.assertIsInstance(result, paddle.fluid.framework.Variable) - self.assertEqual(result.shape, (1,)) + self.assertEqual(result.shape, (16,)) + self.assertEqual(result.dist_attr, dist_attr) # Test with invalid sharding_specs length with self.assertRaises(AssertionError): From 832d36b8e40fa31898b3a1969df54631d903dc6a Mon Sep 17 00:00:00 2001 From: yangxiaoyu14 Date: Wed, 6 Sep 2023 02:42:30 +0000 Subject: [PATCH 11/14] change fluid.Variable to static.Variable --- test/auto_parallel/test_dist_tensor.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/auto_parallel/test_dist_tensor.py b/test/auto_parallel/test_dist_tensor.py index 87ee854bca9c4e..669aa1219352a1 100644 --- a/test/auto_parallel/test_dist_tensor.py +++ b/test/auto_parallel/test_dist_tensor.py @@ -71,7 +71,7 @@ def run_dtensor_from_fn(self): self.assertEqual(result.dist_attr, dist_attr) else: dist_attr.dynamic_dims = [0] - self.assertIsInstance(result, paddle.fluid.framework.Variable) + self.assertIsInstance(result, paddle.static.Variable) self.assertEqual(result.shape, (16,)) self.assertEqual(result.dist_attr, dist_attr) @@ -85,7 +85,7 @@ def run_dtensor_from_fn(self): self.assertEqual(result.dist_attr, dist_attr) else: dist_attr.dynamic_dims = [0] - self.assertIsInstance(result, paddle.fluid.framework.Variable) + self.assertIsInstance(result, paddle.static.Variable) self.assertEqual(result.shape, (16,)) self.assertEqual(result.dist_attr, dist_attr) @@ -99,7 +99,7 @@ def run_dtensor_from_fn(self): self.assertEqual(result.dist_attr, dist_attr) else: dist_attr.dynamic_dims = [0] - self.assertIsInstance(result, paddle.fluid.framework.Variable) + self.assertIsInstance(result, paddle.static.Variable) self.assertEqual(result.shape, (16,)) self.assertEqual(result.dist_attr, dist_attr) From bf0598bbaedea99f5bdcf44cc9aaff041b2aecd6 Mon Sep 17 00:00:00 2001 From: yangxiaoyu14 Date: Thu, 7 Sep 2023 07:03:26 +0000 Subject: [PATCH 12/14] modify according to zhongkai's review --- .../ir/generated/pd_ops.parsed.yaml | 487 ++++++++++++++++++ .../ir/generated/pd_ops_backward.parsed.yaml | 39 ++ .../paddle/distributed/auto_parallel/api.py | 1 + 3 files changed, 527 insertions(+) create mode 100644 paddle/fluid/ir/dialect/paddle_dialect/ir/generated/pd_ops.parsed.yaml create mode 100644 paddle/fluid/ir/dialect/paddle_dialect/ir/generated/pd_ops_backward.parsed.yaml diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/generated/pd_ops.parsed.yaml b/paddle/fluid/ir/dialect/paddle_dialect/ir/generated/pd_ops.parsed.yaml new file mode 100644 index 00000000000000..b95740f24c1715 --- /dev/null +++ b/paddle/fluid/ir/dialect/paddle_dialect/ir/generated/pd_ops.parsed.yaml @@ -0,0 +1,487 @@ +- name: add_n_ + inputs: + - typename: Tensor[] + name: inputs + optional: false + no_need_buffer: false + data_transform: {} + attrs: [] + outputs: + - {typename: Tensor, name: out, optional: false, intermediate: false} + no_need_buffer: null + data_transform: null + kernel: + func: [add_n] + param: [inputs] + backend: null + layout: null + data_type: null + dispatch: {add_n: null} + force_backend: null + infer_meta: + func: AddNInferMeta + param: [inputs] + inplace: null + view: null + backward: add_n_grad +- name: add_n_with_kernel + inputs: + - typename: Tensor[] + name: inputs + optional: false + no_need_buffer: false + data_transform: {} + attrs: [] + outputs: + - {typename: Tensor, name: out, optional: false, intermediate: false} + no_need_buffer: null + data_transform: null + kernel: + func: [add_n] + param: [inputs] + backend: null + layout: null + data_type: null + dispatch: {add_n: null} + force_backend: null + infer_meta: + func: AddNInferMeta + param: [inputs] + inplace: null + view: null + backward: add_n_grad +- name: assert + inputs: + - typename: Tensor + name: cond + optional: false + no_need_buffer: false + data_transform: {} + - typename: Tensor[] + name: data + optional: false + no_need_buffer: false + data_transform: {} + attrs: + - {typename: int64_t, name: summarize, default_value: '-1'} + outputs: [] + no_need_buffer: null + data_transform: null + kernel: + func: [assert] + param: [cond, data, summarize] + backend: null + layout: null + data_type: + ordered: false + candidates: [cond] + to_complex_flag: [false] + dispatch: {assert: null} + force_backend: null + inplace: null + view: null + backward: null +- name: assign_value + inputs: [] + attrs: + - {typename: 'int[]', name: shape} + - {typename: DataType, name: dtype} + - {typename: 'Scalar[]', name: values, data_type: 'std::vector'} + - {typename: Place, name: place, default_value: '{}'} + outputs: + - {typename: Tensor, name: out, optional: false, intermediate: false} + no_need_buffer: null + data_transform: null + kernel: + func: [assign_value] + param: [shape, dtype, values] + backend: + ordered: true + candidates: [place] + layout: null + data_type: + ordered: false + candidates: [dtype] + to_complex_flag: [false] + dispatch: {assign_value: null} + force_backend: null + infer_meta: + func: AssignValueInferMeta + param: [shape, dtype] + inplace: null + view: null + backward: null +- name: embedding_grad_sparse + inputs: + - typename: Tensor + name: x + optional: false + no_need_buffer: false + data_transform: {} + - typename: Tensor + name: weight + optional: false + no_need_buffer: false + data_transform: {} + - typename: Tensor + name: out_grad + optional: false + no_need_buffer: false + data_transform: {} + attrs: + - {typename: int64_t, name: padding_idx, default_value: '-1'} + - {typename: bool, name: sparse, default_value: 'false'} + outputs: + - {typename: SelectedRows, name: weight_grad, optional: false, intermediate: false} + no_need_buffer: null + data_transform: null + kernel: + func: [embedding_sparse_grad] + param: [x, weight, out_grad, padding_idx, sparse] + backend: null + layout: null + data_type: + ordered: false + candidates: [weight] + to_complex_flag: [false] + dispatch: {embedding_sparse_grad: null} + force_backend: null + infer_meta: + func: EmbeddingGradSparseInferMeta + param: [weight] + inplace: null + view: null + backward: null +- name: feed + inputs: [] + attrs: + - {typename: str, name: name} + - {typename: int, name: col} + outputs: + - {typename: Tensor, name: out, optional: false, intermediate: false} + no_need_buffer: null + data_transform: null + inplace: null + view: null + backward: null +- name: fetch + inputs: + - typename: Tensor + name: x + optional: false + no_need_buffer: false + data_transform: {} + attrs: + - {typename: str, name: name} + - {typename: int, name: col} + outputs: + - {typename: Tensor, name: out, optional: false, intermediate: false} + no_need_buffer: null + data_transform: null + kernel: + func: [fetch] + param: [x] + backend: null + layout: null + data_type: null + dispatch: {fetch: null} + force_backend: null + infer_meta: + func: UnchangedInferMeta + param: [x] + inplace: null + view: null + backward: null +- name: load_combine + inputs: [] + attrs: + - {typename: str, name: file_path} + - {typename: bool, name: load_as_fp16} + - {typename: bool, name: model_from_memory} + outputs: + - {typename: 'Tensor[]', name: Out, optional: true, intermediate: false} + no_need_buffer: null + data_transform: null + kernel: + func: [load_combine] + param: [file_path, load_as_fp16, model_from_memory] + backend: null + layout: null + data_type: null + dispatch: {load_combine: null} + force_backend: null + inplace: null + view: null + backward: null +- name: lod_array_length + inputs: + - typename: Tensor[] + name: x + optional: false + no_need_buffer: false + data_transform: {} + attrs: [] + outputs: + - {typename: Tensor, name: out, optional: false, intermediate: false} + no_need_buffer: null + data_transform: null + inplace: null + view: null + backward: null +- name: print + inputs: + - typename: Tensor + name: in + optional: false + no_need_buffer: false + data_transform: {} + attrs: + - {typename: int, name: first_n} + - {typename: str, name: message} + - {typename: int, name: summarize} + - {typename: bool, name: print_tensor_name, default_value: 'true'} + - {typename: bool, name: print_tensor_type, default_value: 'true'} + - {typename: bool, name: print_tensor_shape, default_value: 'true'} + - {typename: bool, name: print_tensor_layout, default_value: 'true'} + - {typename: bool, name: print_tensor_lod, default_value: 'true'} + - {typename: str, name: print_phase, default_value: '"BOTH"'} + - {typename: bool, name: is_forward, default_value: 'true'} + outputs: + - {typename: Tensor, name: out, optional: false, intermediate: false} + no_need_buffer: null + data_transform: null + kernel: + func: [print_kernel] + param: [in, first_n, message, summarize, print_tensor_name, print_tensor_type, + print_tensor_shape, print_tensor_layout, print_tensor_lod, print_phase, is_forward] + backend: null + layout: null + data_type: null + dispatch: {print_kernel: null} + force_backend: null + infer_meta: + func: UnchangedInferMeta + param: [in] + inplace: null + view: null + backward: null +- name: recv_v2 + inputs: [] + attrs: + - {typename: 'int[]', name: out_shape, default_value: '{}'} + - {typename: DataType, name: dtype, default_value: 'DataType::FLOAT32'} + - {typename: int, name: peer, default_value: '0'} + - {typename: int, name: ring_id, default_value: '0'} + - {typename: bool, name: use_calc_stream, default_value: 'false'} + - {typename: bool, name: dynamic_shape, default_value: 'false'} + outputs: + - {typename: Tensor, name: out, optional: false, intermediate: false} + no_need_buffer: null + data_transform: null + kernel: + func: [recv_v2] + param: [ring_id, dynamic_shape, peer, out_shape, dtype, use_calc_stream] + backend: null + layout: null + data_type: + ordered: false + candidates: [dtype] + to_complex_flag: [false] + dispatch: {recv_v2: null} + force_backend: null + infer_meta: + func: RecvV2InferMeta + param: [peer, dtype, out_shape] + inplace: null + view: null + backward: null +- name: save_combine + inputs: + - typename: Tensor[] + name: x + optional: false + no_need_buffer: false + data_transform: {} + attrs: + - {typename: str, name: file_path} + - {typename: bool, name: overwrite} + - {typename: bool, name: save_as_fp16} + - {typename: bool, name: save_to_memory} + outputs: + - {typename: Tensor, name: out, optional: true, intermediate: false} + no_need_buffer: null + data_transform: null + kernel: + func: [save_combine_tensor] + param: [x, file_path, overwrite, save_as_fp16, save_to_memory] + backend: null + layout: null + data_type: null + dispatch: {save_combine_tensor: null} + force_backend: null + inplace: null + view: null + backward: null +- name: send_v2 + inputs: + - typename: Tensor + name: x + optional: false + no_need_buffer: false + data_transform: {} + attrs: + - {typename: int, name: ring_id, default_value: '0'} + - {typename: int, name: peer, default_value: '0'} + - {typename: bool, name: use_calc_stream, default_value: 'false'} + - {typename: bool, name: dynamic_shape, default_value: 'false'} + outputs: [] + no_need_buffer: null + data_transform: null + kernel: + func: [send_v2] + param: [x, ring_id, dynamic_shape, peer, use_calc_stream] + backend: null + layout: null + data_type: null + dispatch: {send_v2: null} + force_backend: null + infer_meta: + func: SendV2InferMeta + param: [peer, ring_id] + inplace: null + view: null + backward: null +- name: set_value + inputs: + - typename: Tensor + name: x + optional: false + no_need_buffer: false + data_transform: {} + attrs: + - {typename: 'int64_t[]', name: starts} + - {typename: 'int64_t[]', name: ends} + - {typename: 'int64_t[]', name: steps} + - {typename: 'int64_t[]', name: axes} + - {typename: 'int64_t[]', name: decrease_axes} + - {typename: 'int64_t[]', name: none_axes} + - {typename: 'int64_t[]', name: shape} + - {typename: 'Scalar[]', name: values, data_type: 'std::vector'} + outputs: + - {typename: Tensor, name: out, optional: false, intermediate: false} + no_need_buffer: null + data_transform: null + kernel: + func: [set_value] + param: [x, starts, ends, steps, axes, decrease_axes, none_axes, shape, values] + backend: null + layout: null + data_type: null + dispatch: {set_value: null} + force_backend: null + infer_meta: + func: SetValueInferMeta + param: [x] + inplace: {out: x} + view: null + backward: set_value_grad +- name: set_value_with_tensor + inputs: + - typename: Tensor + name: x + optional: false + no_need_buffer: false + data_transform: {} + - typename: Tensor + name: values + optional: false + no_need_buffer: false + data_transform: {} + attrs: + - {typename: 'int64_t[]', name: starts} + - {typename: 'int64_t[]', name: ends} + - {typename: 'int64_t[]', name: steps} + - {typename: 'int64_t[]', name: axes} + - {typename: 'int64_t[]', name: decrease_axes} + - {typename: 'int64_t[]', name: none_axes} + outputs: + - {typename: Tensor, name: out, optional: false, intermediate: false} + no_need_buffer: null + data_transform: null + kernel: + func: [set_value_with_tensor] + param: [x, values, starts, ends, steps, axes, decrease_axes, none_axes] + backend: null + layout: null + data_type: null + dispatch: {set_value_with_tensor: null} + force_backend: null + infer_meta: + func: SetValueInferMeta + param: [x] + inplace: {out: x} + view: null + backward: set_value_grad +- name: shadow_feed + inputs: + - typename: Tensor + name: x + optional: false + no_need_buffer: false + data_transform: {} + attrs: [] + outputs: + - {typename: Tensor, name: out, optional: false, intermediate: false} + no_need_buffer: null + data_transform: null + kernel: + func: [shadow_feed] + param: [x] + backend: null + layout: null + data_type: null + dispatch: {shadow_feed: null} + force_backend: null + infer_meta: + func: UnchangedInferMeta + param: [x] + inplace: null + view: null + backward: null +- name: share_buffer_ + inputs: + - typename: Tensor[] + name: x + optional: false + no_need_buffer: false + data_transform: {} + attrs: + - {typename: 'bool[]', name: share_dims_and_dtype, default_value: '{}'} + outputs: + - {typename: 'Tensor[]', name: out, size: x.size(), optional: false, intermediate: false} + - {typename: 'Tensor[]', name: xout, size: x.size(), optional: false, intermediate: false} + no_need_buffer: null + data_transform: null + inplace: null + view: null + backward: null +- name: write_to_array + inputs: + - typename: Tensor + name: i + optional: false + no_need_buffer: false + data_transform: {} + - typename: Tensor + name: x + optional: false + no_need_buffer: false + data_transform: {} + attrs: [] + outputs: + - {typename: 'Tensor[]', name: out, optional: false, intermediate: false} + no_need_buffer: null + data_transform: null + inplace: null + view: null + backward: write_to_array_grad diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/generated/pd_ops_backward.parsed.yaml b/paddle/fluid/ir/dialect/paddle_dialect/ir/generated/pd_ops_backward.parsed.yaml new file mode 100644 index 00000000000000..fe24d850a534d0 --- /dev/null +++ b/paddle/fluid/ir/dialect/paddle_dialect/ir/generated/pd_ops_backward.parsed.yaml @@ -0,0 +1,39 @@ +- name: set_value_grad + inputs: + - typename: Tensor + name: out_grad + optional: false + no_need_buffer: false + data_transform: {} + - typename: Tensor + name: values + optional: false + no_need_buffer: false + data_transform: {} + attrs: + - {typename: 'int64_t[]', name: starts} + - {typename: 'int64_t[]', name: ends} + - {typename: 'int64_t[]', name: steps} + - {typename: 'int64_t[]', name: axes} + - {typename: 'int64_t[]', name: decrease_axes} + - {typename: 'int64_t[]', name: none_axes} + outputs: + - {typename: Tensor, name: x_grad, optional: false, intermediate: false} + - {typename: Tensor, name: values_grad, optional: false, intermediate: false} + no_need_buffer: null + data_transform: null + kernel: + func: [set_value_grad] + param: [out_grad, starts, ends, steps, axes, decrease_axes, none_axes] + backend: null + layout: null + data_type: null + dispatch: {set_value_grad: null} + force_backend: null + infer_meta: + func: SetValueGradInferMeta + param: [out_grad, values] + inplace: null + view: null + backward: null + forward: null diff --git a/python/paddle/distributed/auto_parallel/api.py b/python/paddle/distributed/auto_parallel/api.py index a8bb5b7ec44f5b..680b9cc95bc2b0 100644 --- a/python/paddle/distributed/auto_parallel/api.py +++ b/python/paddle/distributed/auto_parallel/api.py @@ -159,6 +159,7 @@ def dtensor_from_fn(fn, dist_attr, *args, **kwargs): Examples: .. code-block:: python + >>> import paddle >>> import paddle.distributed as dist >>> # Create a distributed attribute From d545aed26d73d0e818133fdc76b4a86137ac3249 Mon Sep 17 00:00:00 2001 From: yangxiaoyu14 Date: Mon, 11 Sep 2023 03:10:27 +0000 Subject: [PATCH 13/14] According to Yifan's suggestion, pull the latest code to resolve conflicts --- test/auto_parallel/test_dist_tensor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/auto_parallel/test_dist_tensor.py b/test/auto_parallel/test_dist_tensor.py index 17d3b7a46be1e0..0e4ab8e57d9006 100644 --- a/test/auto_parallel/test_dist_tensor.py +++ b/test/auto_parallel/test_dist_tensor.py @@ -55,7 +55,7 @@ def test_dist_tensor_creation(self): class TestDistTensorFromFn(unittest.TestCase): def run_dtensor_from_fn(self): - # Create a distributed attribute + # Create a dist_attr mesh = dist.ProcessMesh([0, 1], dim_names=["x"]) dist_attr = dist.DistAttr(mesh=mesh, sharding_specs=[None]) From f064d24226d7e28c1c79cf18727eaef8c80f7e7d Mon Sep 17 00:00:00 2001 From: yangxiaoyu14 Date: Mon, 11 Sep 2023 08:41:08 +0000 Subject: [PATCH 14/14] remove paddle/fluid/ir/dialect/paddle_dialect/ir/generated/pd_ops_backward.parsed.yaml and paddle/fluid/ir/dialect/paddle_dialect/ir/generated/pd_ops.parsed.yaml --- .../ir/generated/pd_ops.parsed.yaml | 487 ------------------ .../ir/generated/pd_ops_backward.parsed.yaml | 39 -- 2 files changed, 526 deletions(-) delete mode 100644 paddle/fluid/ir/dialect/paddle_dialect/ir/generated/pd_ops.parsed.yaml delete mode 100644 paddle/fluid/ir/dialect/paddle_dialect/ir/generated/pd_ops_backward.parsed.yaml diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/generated/pd_ops.parsed.yaml b/paddle/fluid/ir/dialect/paddle_dialect/ir/generated/pd_ops.parsed.yaml deleted file mode 100644 index b95740f24c1715..00000000000000 --- a/paddle/fluid/ir/dialect/paddle_dialect/ir/generated/pd_ops.parsed.yaml +++ /dev/null @@ -1,487 +0,0 @@ -- name: add_n_ - inputs: - - typename: Tensor[] - name: inputs - optional: false - no_need_buffer: false - data_transform: {} - attrs: [] - outputs: - - {typename: Tensor, name: out, optional: false, intermediate: false} - no_need_buffer: null - data_transform: null - kernel: - func: [add_n] - param: [inputs] - backend: null - layout: null - data_type: null - dispatch: {add_n: null} - force_backend: null - infer_meta: - func: AddNInferMeta - param: [inputs] - inplace: null - view: null - backward: add_n_grad -- name: add_n_with_kernel - inputs: - - typename: Tensor[] - name: inputs - optional: false - no_need_buffer: false - data_transform: {} - attrs: [] - outputs: - - {typename: Tensor, name: out, optional: false, intermediate: false} - no_need_buffer: null - data_transform: null - kernel: - func: [add_n] - param: [inputs] - backend: null - layout: null - data_type: null - dispatch: {add_n: null} - force_backend: null - infer_meta: - func: AddNInferMeta - param: [inputs] - inplace: null - view: null - backward: add_n_grad -- name: assert - inputs: - - typename: Tensor - name: cond - optional: false - no_need_buffer: false - data_transform: {} - - typename: Tensor[] - name: data - optional: false - no_need_buffer: false - data_transform: {} - attrs: - - {typename: int64_t, name: summarize, default_value: '-1'} - outputs: [] - no_need_buffer: null - data_transform: null - kernel: - func: [assert] - param: [cond, data, summarize] - backend: null - layout: null - data_type: - ordered: false - candidates: [cond] - to_complex_flag: [false] - dispatch: {assert: null} - force_backend: null - inplace: null - view: null - backward: null -- name: assign_value - inputs: [] - attrs: - - {typename: 'int[]', name: shape} - - {typename: DataType, name: dtype} - - {typename: 'Scalar[]', name: values, data_type: 'std::vector'} - - {typename: Place, name: place, default_value: '{}'} - outputs: - - {typename: Tensor, name: out, optional: false, intermediate: false} - no_need_buffer: null - data_transform: null - kernel: - func: [assign_value] - param: [shape, dtype, values] - backend: - ordered: true - candidates: [place] - layout: null - data_type: - ordered: false - candidates: [dtype] - to_complex_flag: [false] - dispatch: {assign_value: null} - force_backend: null - infer_meta: - func: AssignValueInferMeta - param: [shape, dtype] - inplace: null - view: null - backward: null -- name: embedding_grad_sparse - inputs: - - typename: Tensor - name: x - optional: false - no_need_buffer: false - data_transform: {} - - typename: Tensor - name: weight - optional: false - no_need_buffer: false - data_transform: {} - - typename: Tensor - name: out_grad - optional: false - no_need_buffer: false - data_transform: {} - attrs: - - {typename: int64_t, name: padding_idx, default_value: '-1'} - - {typename: bool, name: sparse, default_value: 'false'} - outputs: - - {typename: SelectedRows, name: weight_grad, optional: false, intermediate: false} - no_need_buffer: null - data_transform: null - kernel: - func: [embedding_sparse_grad] - param: [x, weight, out_grad, padding_idx, sparse] - backend: null - layout: null - data_type: - ordered: false - candidates: [weight] - to_complex_flag: [false] - dispatch: {embedding_sparse_grad: null} - force_backend: null - infer_meta: - func: EmbeddingGradSparseInferMeta - param: [weight] - inplace: null - view: null - backward: null -- name: feed - inputs: [] - attrs: - - {typename: str, name: name} - - {typename: int, name: col} - outputs: - - {typename: Tensor, name: out, optional: false, intermediate: false} - no_need_buffer: null - data_transform: null - inplace: null - view: null - backward: null -- name: fetch - inputs: - - typename: Tensor - name: x - optional: false - no_need_buffer: false - data_transform: {} - attrs: - - {typename: str, name: name} - - {typename: int, name: col} - outputs: - - {typename: Tensor, name: out, optional: false, intermediate: false} - no_need_buffer: null - data_transform: null - kernel: - func: [fetch] - param: [x] - backend: null - layout: null - data_type: null - dispatch: {fetch: null} - force_backend: null - infer_meta: - func: UnchangedInferMeta - param: [x] - inplace: null - view: null - backward: null -- name: load_combine - inputs: [] - attrs: - - {typename: str, name: file_path} - - {typename: bool, name: load_as_fp16} - - {typename: bool, name: model_from_memory} - outputs: - - {typename: 'Tensor[]', name: Out, optional: true, intermediate: false} - no_need_buffer: null - data_transform: null - kernel: - func: [load_combine] - param: [file_path, load_as_fp16, model_from_memory] - backend: null - layout: null - data_type: null - dispatch: {load_combine: null} - force_backend: null - inplace: null - view: null - backward: null -- name: lod_array_length - inputs: - - typename: Tensor[] - name: x - optional: false - no_need_buffer: false - data_transform: {} - attrs: [] - outputs: - - {typename: Tensor, name: out, optional: false, intermediate: false} - no_need_buffer: null - data_transform: null - inplace: null - view: null - backward: null -- name: print - inputs: - - typename: Tensor - name: in - optional: false - no_need_buffer: false - data_transform: {} - attrs: - - {typename: int, name: first_n} - - {typename: str, name: message} - - {typename: int, name: summarize} - - {typename: bool, name: print_tensor_name, default_value: 'true'} - - {typename: bool, name: print_tensor_type, default_value: 'true'} - - {typename: bool, name: print_tensor_shape, default_value: 'true'} - - {typename: bool, name: print_tensor_layout, default_value: 'true'} - - {typename: bool, name: print_tensor_lod, default_value: 'true'} - - {typename: str, name: print_phase, default_value: '"BOTH"'} - - {typename: bool, name: is_forward, default_value: 'true'} - outputs: - - {typename: Tensor, name: out, optional: false, intermediate: false} - no_need_buffer: null - data_transform: null - kernel: - func: [print_kernel] - param: [in, first_n, message, summarize, print_tensor_name, print_tensor_type, - print_tensor_shape, print_tensor_layout, print_tensor_lod, print_phase, is_forward] - backend: null - layout: null - data_type: null - dispatch: {print_kernel: null} - force_backend: null - infer_meta: - func: UnchangedInferMeta - param: [in] - inplace: null - view: null - backward: null -- name: recv_v2 - inputs: [] - attrs: - - {typename: 'int[]', name: out_shape, default_value: '{}'} - - {typename: DataType, name: dtype, default_value: 'DataType::FLOAT32'} - - {typename: int, name: peer, default_value: '0'} - - {typename: int, name: ring_id, default_value: '0'} - - {typename: bool, name: use_calc_stream, default_value: 'false'} - - {typename: bool, name: dynamic_shape, default_value: 'false'} - outputs: - - {typename: Tensor, name: out, optional: false, intermediate: false} - no_need_buffer: null - data_transform: null - kernel: - func: [recv_v2] - param: [ring_id, dynamic_shape, peer, out_shape, dtype, use_calc_stream] - backend: null - layout: null - data_type: - ordered: false - candidates: [dtype] - to_complex_flag: [false] - dispatch: {recv_v2: null} - force_backend: null - infer_meta: - func: RecvV2InferMeta - param: [peer, dtype, out_shape] - inplace: null - view: null - backward: null -- name: save_combine - inputs: - - typename: Tensor[] - name: x - optional: false - no_need_buffer: false - data_transform: {} - attrs: - - {typename: str, name: file_path} - - {typename: bool, name: overwrite} - - {typename: bool, name: save_as_fp16} - - {typename: bool, name: save_to_memory} - outputs: - - {typename: Tensor, name: out, optional: true, intermediate: false} - no_need_buffer: null - data_transform: null - kernel: - func: [save_combine_tensor] - param: [x, file_path, overwrite, save_as_fp16, save_to_memory] - backend: null - layout: null - data_type: null - dispatch: {save_combine_tensor: null} - force_backend: null - inplace: null - view: null - backward: null -- name: send_v2 - inputs: - - typename: Tensor - name: x - optional: false - no_need_buffer: false - data_transform: {} - attrs: - - {typename: int, name: ring_id, default_value: '0'} - - {typename: int, name: peer, default_value: '0'} - - {typename: bool, name: use_calc_stream, default_value: 'false'} - - {typename: bool, name: dynamic_shape, default_value: 'false'} - outputs: [] - no_need_buffer: null - data_transform: null - kernel: - func: [send_v2] - param: [x, ring_id, dynamic_shape, peer, use_calc_stream] - backend: null - layout: null - data_type: null - dispatch: {send_v2: null} - force_backend: null - infer_meta: - func: SendV2InferMeta - param: [peer, ring_id] - inplace: null - view: null - backward: null -- name: set_value - inputs: - - typename: Tensor - name: x - optional: false - no_need_buffer: false - data_transform: {} - attrs: - - {typename: 'int64_t[]', name: starts} - - {typename: 'int64_t[]', name: ends} - - {typename: 'int64_t[]', name: steps} - - {typename: 'int64_t[]', name: axes} - - {typename: 'int64_t[]', name: decrease_axes} - - {typename: 'int64_t[]', name: none_axes} - - {typename: 'int64_t[]', name: shape} - - {typename: 'Scalar[]', name: values, data_type: 'std::vector'} - outputs: - - {typename: Tensor, name: out, optional: false, intermediate: false} - no_need_buffer: null - data_transform: null - kernel: - func: [set_value] - param: [x, starts, ends, steps, axes, decrease_axes, none_axes, shape, values] - backend: null - layout: null - data_type: null - dispatch: {set_value: null} - force_backend: null - infer_meta: - func: SetValueInferMeta - param: [x] - inplace: {out: x} - view: null - backward: set_value_grad -- name: set_value_with_tensor - inputs: - - typename: Tensor - name: x - optional: false - no_need_buffer: false - data_transform: {} - - typename: Tensor - name: values - optional: false - no_need_buffer: false - data_transform: {} - attrs: - - {typename: 'int64_t[]', name: starts} - - {typename: 'int64_t[]', name: ends} - - {typename: 'int64_t[]', name: steps} - - {typename: 'int64_t[]', name: axes} - - {typename: 'int64_t[]', name: decrease_axes} - - {typename: 'int64_t[]', name: none_axes} - outputs: - - {typename: Tensor, name: out, optional: false, intermediate: false} - no_need_buffer: null - data_transform: null - kernel: - func: [set_value_with_tensor] - param: [x, values, starts, ends, steps, axes, decrease_axes, none_axes] - backend: null - layout: null - data_type: null - dispatch: {set_value_with_tensor: null} - force_backend: null - infer_meta: - func: SetValueInferMeta - param: [x] - inplace: {out: x} - view: null - backward: set_value_grad -- name: shadow_feed - inputs: - - typename: Tensor - name: x - optional: false - no_need_buffer: false - data_transform: {} - attrs: [] - outputs: - - {typename: Tensor, name: out, optional: false, intermediate: false} - no_need_buffer: null - data_transform: null - kernel: - func: [shadow_feed] - param: [x] - backend: null - layout: null - data_type: null - dispatch: {shadow_feed: null} - force_backend: null - infer_meta: - func: UnchangedInferMeta - param: [x] - inplace: null - view: null - backward: null -- name: share_buffer_ - inputs: - - typename: Tensor[] - name: x - optional: false - no_need_buffer: false - data_transform: {} - attrs: - - {typename: 'bool[]', name: share_dims_and_dtype, default_value: '{}'} - outputs: - - {typename: 'Tensor[]', name: out, size: x.size(), optional: false, intermediate: false} - - {typename: 'Tensor[]', name: xout, size: x.size(), optional: false, intermediate: false} - no_need_buffer: null - data_transform: null - inplace: null - view: null - backward: null -- name: write_to_array - inputs: - - typename: Tensor - name: i - optional: false - no_need_buffer: false - data_transform: {} - - typename: Tensor - name: x - optional: false - no_need_buffer: false - data_transform: {} - attrs: [] - outputs: - - {typename: 'Tensor[]', name: out, optional: false, intermediate: false} - no_need_buffer: null - data_transform: null - inplace: null - view: null - backward: write_to_array_grad diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/generated/pd_ops_backward.parsed.yaml b/paddle/fluid/ir/dialect/paddle_dialect/ir/generated/pd_ops_backward.parsed.yaml deleted file mode 100644 index fe24d850a534d0..00000000000000 --- a/paddle/fluid/ir/dialect/paddle_dialect/ir/generated/pd_ops_backward.parsed.yaml +++ /dev/null @@ -1,39 +0,0 @@ -- name: set_value_grad - inputs: - - typename: Tensor - name: out_grad - optional: false - no_need_buffer: false - data_transform: {} - - typename: Tensor - name: values - optional: false - no_need_buffer: false - data_transform: {} - attrs: - - {typename: 'int64_t[]', name: starts} - - {typename: 'int64_t[]', name: ends} - - {typename: 'int64_t[]', name: steps} - - {typename: 'int64_t[]', name: axes} - - {typename: 'int64_t[]', name: decrease_axes} - - {typename: 'int64_t[]', name: none_axes} - outputs: - - {typename: Tensor, name: x_grad, optional: false, intermediate: false} - - {typename: Tensor, name: values_grad, optional: false, intermediate: false} - no_need_buffer: null - data_transform: null - kernel: - func: [set_value_grad] - param: [out_grad, starts, ends, steps, axes, decrease_axes, none_axes] - backend: null - layout: null - data_type: null - dispatch: {set_value_grad: null} - force_backend: null - infer_meta: - func: SetValueGradInferMeta - param: [out_grad, values] - inplace: null - view: null - backward: null - forward: null