From cf4bacc4aa542b81cbb16a876a0dda04f360755e Mon Sep 17 00:00:00 2001 From: megemini Date: Tue, 2 Jul 2024 23:18:34 +0800 Subject: [PATCH 1/2] [Fix] type ignore --- python/paddle/amp/debugging.py | 2 +- python/paddle/base/layers/math_op_patch.py | 2 +- python/paddle/distributed/communication/stream/gather.py | 2 +- python/paddle/distributed/parallel.py | 2 +- python/paddle/optimizer/lbfgs.py | 6 +++--- python/paddle/tensor/attribute.py | 2 +- python/paddle/tensor/creation.py | 2 +- python/paddle/vision/transforms/transforms.py | 2 +- 8 files changed, 10 insertions(+), 10 deletions(-) diff --git a/python/paddle/amp/debugging.py b/python/paddle/amp/debugging.py index 1b6e575cdbec9..8e4232b508f59 100644 --- a/python/paddle/amp/debugging.py +++ b/python/paddle/amp/debugging.py @@ -88,7 +88,7 @@ def check_layer_numerics(func): ... return x @ self._w + self._b ... >>> dtype = 'float32' - >>> x = paddle.rand([10, 2, 2], dtype=dtype) # type: ignore + >>> x = paddle.rand([10, 2, 2], dtype=dtype) # type: ignore[var-annotated] >>> model = MyLayer(dtype) >>> x[0] = float(0) >>> loss = model(x) diff --git a/python/paddle/base/layers/math_op_patch.py b/python/paddle/base/layers/math_op_patch.py index 1dd3c19b44d9a..dfd4c802d89f9 100644 --- a/python/paddle/base/layers/math_op_patch.py +++ b/python/paddle/base/layers/math_op_patch.py @@ -352,7 +352,7 @@ def astype(self, dtype): >>> import paddle >>> import numpy as np - >>> x = np.ones([2, 2], np.float32) + >>> x = np.ones([2, 2], np.float32) # type: ignore[var-annotated] >>> with base.dygraph.guard(): ... original_variable = paddle.to_tensor(x) ... print("original var's dtype is: {}, numpy dtype is {}".format(original_variable.dtype, original_variable.numpy().dtype)) diff --git a/python/paddle/distributed/communication/stream/gather.py b/python/paddle/distributed/communication/stream/gather.py index c0405ec696bc0..45b86b0215e0f 100644 --- a/python/paddle/distributed/communication/stream/gather.py +++ b/python/paddle/distributed/communication/stream/gather.py @@ -83,7 +83,7 @@ def gather( >>> import paddle.distributed as dist >>> dist.init_parallel_env() - >>> gather_list = [] + >>> gather_list = [] # type: ignore[var-annotated] >>> if dist.get_rank() == 0: ... data = paddle.to_tensor([1, 2, 3]) ... dist.stream.gather(data, gather_list, dst=0) diff --git a/python/paddle/distributed/parallel.py b/python/paddle/distributed/parallel.py index 0d905b4f5d985..791f8834c37a6 100644 --- a/python/paddle/distributed/parallel.py +++ b/python/paddle/distributed/parallel.py @@ -334,7 +334,7 @@ class DataParallel(layers.Layer): ... model = paddle.DataParallel(model) ... opt = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters()) ... for step in range(10): - ... x_data = numpy.random.randn(2, 2).astype(numpy.float32) + ... x_data = numpy.random.randn(2, 2).astype(numpy.float32) # type: ignore[var-annotated] ... x = paddle.to_tensor(x_data) ... x.stop_gradient = False ... # step 1 : skip gradient synchronization by 'no_sync' diff --git a/python/paddle/optimizer/lbfgs.py b/python/paddle/optimizer/lbfgs.py index a0198048ecfea..5a41e119f08bf 100644 --- a/python/paddle/optimizer/lbfgs.py +++ b/python/paddle/optimizer/lbfgs.py @@ -399,10 +399,10 @@ class LBFGS(Optimizer): >>> paddle.disable_static() >>> np.random.seed(0) - >>> np_w = np.random.rand(1).astype(np.float32) # type: ignore - >>> np_x = np.random.rand(1).astype(np.float32) # type: ignore + >>> np_w = np.random.rand(1).astype(np.float32) # type: ignore[var-annotated] + >>> np_x = np.random.rand(1).astype(np.float32) # type: ignore[var-annotated] - >>> inputs = [np.random.rand(1).astype(np.float32) for i in range(10)] # type: ignore + >>> inputs = [np.random.rand(1).astype(np.float32) for i in range(10)] # type: ignore[var-annotated] >>> # y = 2x >>> targets = [2 * x for x in inputs] diff --git a/python/paddle/tensor/attribute.py b/python/paddle/tensor/attribute.py index 2a0f4f5df2eed..d4d35bcb1e05a 100644 --- a/python/paddle/tensor/attribute.py +++ b/python/paddle/tensor/attribute.py @@ -102,7 +102,7 @@ def shape(input: Tensor) -> Tensor: >>> exe = paddle.static.Executor(paddle.CPUPlace()) >>> exe.run(paddle.static.default_startup_program()) - >>> img = np.ones((3, 100, 100)).astype(np.float32) # type: ignore + >>> img = np.ones((3, 100, 100)).astype(np.float32) # type: ignore[var-annotated] >>> res = exe.run(paddle.static.default_main_program(), feed={'x':img}, fetch_list=[output]) >>> print(res) diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index 506525d1e2e49..8e6635a641f62 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -2483,7 +2483,7 @@ def assign(x: TensorLike, output: paddle.Tensor | None = None) -> paddle.Tensor: [2.5 2.5]] >>> array = np.array([[1, 1], [3, 4], [1, 3]]).astype( ... np.int64 - ... ) # type: ignore + ... ) # type: ignore[var-annotated] >>> result1 = paddle.zeros(shape=[3, 3], dtype='float32') >>> paddle.assign(array, result1) >>> print(result1.numpy()) diff --git a/python/paddle/vision/transforms/transforms.py b/python/paddle/vision/transforms/transforms.py index bf5fc470e87f3..2e25cbc76e164 100644 --- a/python/paddle/vision/transforms/transforms.py +++ b/python/paddle/vision/transforms/transforms.py @@ -232,7 +232,7 @@ class BaseTransform(_Transform[_InputT, _RetT]): ... else: ... raise TypeError("Unexpected type {}".format(type(img))) ... - >>> class CustomRandomFlip(BaseTransform): # type: ignore + >>> class CustomRandomFlip(BaseTransform): # type: ignore[type-arg] ... def __init__(self, prob=0.5, keys=None): ... super().__init__(keys) ... self.prob = prob From c41ee501dd4ae75aaf57b0886666fbcf2d7ce686 Mon Sep 17 00:00:00 2001 From: megemini Date: Wed, 3 Jul 2024 02:04:20 +0800 Subject: [PATCH 2/2] [Fix] ignore type --- python/paddle/amp/debugging.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/amp/debugging.py b/python/paddle/amp/debugging.py index 8e4232b508f59..8b4340a2c4935 100644 --- a/python/paddle/amp/debugging.py +++ b/python/paddle/amp/debugging.py @@ -88,7 +88,7 @@ def check_layer_numerics(func): ... return x @ self._w + self._b ... >>> dtype = 'float32' - >>> x = paddle.rand([10, 2, 2], dtype=dtype) # type: ignore[var-annotated] + >>> x = paddle.rand([10, 2, 2], dtype=dtype) # type: ignore[arg-type] >>> model = MyLayer(dtype) >>> x[0] = float(0) >>> loss = model(x)