diff --git a/python/paddle/amp/debugging.py b/python/paddle/amp/debugging.py index 1b6e575cdbec9..8b4340a2c4935 100644 --- a/python/paddle/amp/debugging.py +++ b/python/paddle/amp/debugging.py @@ -88,7 +88,7 @@ def check_layer_numerics(func): ... return x @ self._w + self._b ... >>> dtype = 'float32' - >>> x = paddle.rand([10, 2, 2], dtype=dtype) # type: ignore + >>> x = paddle.rand([10, 2, 2], dtype=dtype) # type: ignore[arg-type] >>> model = MyLayer(dtype) >>> x[0] = float(0) >>> loss = model(x) diff --git a/python/paddle/base/layers/math_op_patch.py b/python/paddle/base/layers/math_op_patch.py index 1dd3c19b44d9a..dfd4c802d89f9 100644 --- a/python/paddle/base/layers/math_op_patch.py +++ b/python/paddle/base/layers/math_op_patch.py @@ -352,7 +352,7 @@ def astype(self, dtype): >>> import paddle >>> import numpy as np - >>> x = np.ones([2, 2], np.float32) + >>> x = np.ones([2, 2], np.float32) # type: ignore[var-annotated] >>> with base.dygraph.guard(): ... original_variable = paddle.to_tensor(x) ... print("original var's dtype is: {}, numpy dtype is {}".format(original_variable.dtype, original_variable.numpy().dtype)) diff --git a/python/paddle/distributed/communication/stream/gather.py b/python/paddle/distributed/communication/stream/gather.py index c0405ec696bc0..45b86b0215e0f 100644 --- a/python/paddle/distributed/communication/stream/gather.py +++ b/python/paddle/distributed/communication/stream/gather.py @@ -83,7 +83,7 @@ def gather( >>> import paddle.distributed as dist >>> dist.init_parallel_env() - >>> gather_list = [] + >>> gather_list = [] # type: ignore[var-annotated] >>> if dist.get_rank() == 0: ... data = paddle.to_tensor([1, 2, 3]) ... dist.stream.gather(data, gather_list, dst=0) diff --git a/python/paddle/distributed/parallel.py b/python/paddle/distributed/parallel.py index 0d905b4f5d985..791f8834c37a6 100644 --- a/python/paddle/distributed/parallel.py +++ b/python/paddle/distributed/parallel.py @@ -334,7 +334,7 @@ class DataParallel(layers.Layer): ... model = paddle.DataParallel(model) ... opt = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters()) ... for step in range(10): - ... x_data = numpy.random.randn(2, 2).astype(numpy.float32) + ... x_data = numpy.random.randn(2, 2).astype(numpy.float32) # type: ignore[var-annotated] ... x = paddle.to_tensor(x_data) ... x.stop_gradient = False ... # step 1 : skip gradient synchronization by 'no_sync' diff --git a/python/paddle/optimizer/lbfgs.py b/python/paddle/optimizer/lbfgs.py index a0198048ecfea..5a41e119f08bf 100644 --- a/python/paddle/optimizer/lbfgs.py +++ b/python/paddle/optimizer/lbfgs.py @@ -399,10 +399,10 @@ class LBFGS(Optimizer): >>> paddle.disable_static() >>> np.random.seed(0) - >>> np_w = np.random.rand(1).astype(np.float32) # type: ignore - >>> np_x = np.random.rand(1).astype(np.float32) # type: ignore + >>> np_w = np.random.rand(1).astype(np.float32) # type: ignore[var-annotated] + >>> np_x = np.random.rand(1).astype(np.float32) # type: ignore[var-annotated] - >>> inputs = [np.random.rand(1).astype(np.float32) for i in range(10)] # type: ignore + >>> inputs = [np.random.rand(1).astype(np.float32) for i in range(10)] # type: ignore[var-annotated] >>> # y = 2x >>> targets = [2 * x for x in inputs] diff --git a/python/paddle/tensor/attribute.py b/python/paddle/tensor/attribute.py index 2a0f4f5df2eed..d4d35bcb1e05a 100644 --- a/python/paddle/tensor/attribute.py +++ b/python/paddle/tensor/attribute.py @@ -102,7 +102,7 @@ def shape(input: Tensor) -> Tensor: >>> exe = paddle.static.Executor(paddle.CPUPlace()) >>> exe.run(paddle.static.default_startup_program()) - >>> img = np.ones((3, 100, 100)).astype(np.float32) # type: ignore + >>> img = np.ones((3, 100, 100)).astype(np.float32) # type: ignore[var-annotated] >>> res = exe.run(paddle.static.default_main_program(), feed={'x':img}, fetch_list=[output]) >>> print(res) diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index 506525d1e2e49..8e6635a641f62 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -2483,7 +2483,7 @@ def assign(x: TensorLike, output: paddle.Tensor | None = None) -> paddle.Tensor: [2.5 2.5]] >>> array = np.array([[1, 1], [3, 4], [1, 3]]).astype( ... np.int64 - ... ) # type: ignore + ... ) # type: ignore[var-annotated] >>> result1 = paddle.zeros(shape=[3, 3], dtype='float32') >>> paddle.assign(array, result1) >>> print(result1.numpy()) diff --git a/python/paddle/vision/transforms/transforms.py b/python/paddle/vision/transforms/transforms.py index bf5fc470e87f3..2e25cbc76e164 100644 --- a/python/paddle/vision/transforms/transforms.py +++ b/python/paddle/vision/transforms/transforms.py @@ -232,7 +232,7 @@ class BaseTransform(_Transform[_InputT, _RetT]): ... else: ... raise TypeError("Unexpected type {}".format(type(img))) ... - >>> class CustomRandomFlip(BaseTransform): # type: ignore + >>> class CustomRandomFlip(BaseTransform): # type: ignore[type-arg] ... def __init__(self, prob=0.5, keys=None): ... super().__init__(keys) ... self.prob = prob