diff --git a/_typos.toml b/_typos.toml index 07da2cd966f08..3ed9daf72b856 100644 --- a/_typos.toml +++ b/_typos.toml @@ -134,13 +134,6 @@ defind = 'defind' defeine = 'defeine' defition = 'defition' defination = 'defination' -delet = 'delet' -dependecies = 'dependecies' -dependecy = 'dependecy' -decprecated = 'decprecated' -derivated = 'derivated' -descripor = 'descripor' -deserailize = 'deserailize' Destory = 'Destory' DEIVCE = 'DEIVCE' dictionnary = 'dictionnary' diff --git a/paddle/common/flags.cc b/paddle/common/flags.cc index 6ea2442d3070a..1efa3c64f7ab1 100644 --- a/paddle/common/flags.cc +++ b/paddle/common/flags.cc @@ -1521,8 +1521,8 @@ PHI_DEFINE_EXPORTED_bool(use_shm_cache, * Since Version: 2.6.2 * Value Range: bool, default=false * Example: - * Note: . If True, mmap_allocator will use file descripor to open shared memory - * operation. + * Note: . If True, mmap_allocator will use file descriptor to open shared + * memory operation. */ PHI_DEFINE_EXPORTED_bool(dataloader_use_file_descriptor, false, diff --git a/paddle/fluid/eager/autograd_meta.h b/paddle/fluid/eager/autograd_meta.h index 11476d011b8b9..0b98f796d8af4 100644 --- a/paddle/fluid/eager/autograd_meta.h +++ b/paddle/fluid/eager/autograd_meta.h @@ -56,7 +56,7 @@ using AbstractAutogradMeta = paddle::AbstractAutogradMeta; * * **/ -// No other AutogradMeta class should be derivated from AbstractAutogradMeta. +// No other AutogradMeta class should be derived from AbstractAutogradMeta. // It's only used by class AutogradMeta : public AbstractAutogradMeta { public: diff --git a/paddle/fluid/framework/ir/lock_free_optimize_pass.h b/paddle/fluid/framework/ir/lock_free_optimize_pass.h index cece1d1a015f7..aed92a30195e8 100644 --- a/paddle/fluid/framework/ir/lock_free_optimize_pass.h +++ b/paddle/fluid/framework/ir/lock_free_optimize_pass.h @@ -30,7 +30,7 @@ class Graph; /* * Remove the sum op of all gradients of the backward op. - * And remove the dependecies of the optimizer related to the + * And remove the dependencies of the optimizer related to the * same backward op. * * Before this pass: diff --git a/paddle/phi/infermeta/spmd_rules/reshape.cc b/paddle/phi/infermeta/spmd_rules/reshape.cc index f881812ac3b51..b7367509514b2 100644 --- a/paddle/phi/infermeta/spmd_rules/reshape.cc +++ b/paddle/phi/infermeta/spmd_rules/reshape.cc @@ -313,7 +313,7 @@ SpmdInfo ReshapeInferSpmdReverse(const DistMetaTensor& x, return {{x_dist_attr}, {out_dist_attr_dst}}; } -// FIXME(dev): XShape will be decprecated in the future, so we +// FIXME(dev): XShape will be deprecated in the future, so we // need unify inferSpmd into ReshapeInferSpmd function. SpmdInfo ReshapeInferSpmdDynamic(const DistMetaTensor& x, const std::vector& shape) { diff --git a/python/paddle/distributed/passes/auto_parallel_sharding.py b/python/paddle/distributed/passes/auto_parallel_sharding.py index d870f2014afcb..4b7814af7f53e 100644 --- a/python/paddle/distributed/passes/auto_parallel_sharding.py +++ b/python/paddle/distributed/passes/auto_parallel_sharding.py @@ -1303,7 +1303,7 @@ def _overlap_grad_comm( ) idx += 1 - # NOTE(Ruibiao): Why add dependecy here? + # NOTE(Ruibiao): Why add dependency here? # It is hack to delay GC for coalesce_var, which significantly reduce memory usage. # With the pattern of reduce_sum + scale, the coalesce_var is used by the reduce_sum # op on the comm-stream, and then released by the scale op on the comp-stream. Since diff --git a/python/paddle/incubate/distributed/fleet/parameter_server/distribute_transpiler/__init__.py b/python/paddle/incubate/distributed/fleet/parameter_server/distribute_transpiler/__init__.py index 1e8de775acd53..f2e336b135519 100644 --- a/python/paddle/incubate/distributed/fleet/parameter_server/distribute_transpiler/__init__.py +++ b/python/paddle/incubate/distributed/fleet/parameter_server/distribute_transpiler/__init__.py @@ -869,7 +869,7 @@ def _build_trainer_programs(self, compiled_config): # for startup program _startup = worker.fake_init_ops_pass(_startup, compiled_config) _startup = worker.init_from_server_pass(_startup, compiled_config) - _startup = worker.delet_extra_optimizes_pass( + _startup = worker.delete_extra_optimizes_pass( _startup, compiled_config ) else: diff --git a/test/cpp/inference/api/trt_dynamic_shape_test.cc b/test/cpp/inference/api/trt_dynamic_shape_test.cc index 517765d2930f5..71cd80bcc2559 100644 --- a/test/cpp/inference/api/trt_dynamic_shape_test.cc +++ b/test/cpp/inference/api/trt_dynamic_shape_test.cc @@ -295,7 +295,7 @@ TEST(AnalysisPredictor, trt_dynamic) { TestDynamic(true); } TEST(AnalysisPredictor, trt_memory_serialize) { // serailize TestDynamic(true, true, true); - // deserailize + // deserialize TestDynamic(true, false, true); } TEST(AnalysisPredictor, trt_dynamic2) { TestDynamic2(); }