diff --git a/_typos.toml b/_typos.toml index 9f359a9e3b6e8..615776ef8f295 100644 --- a/_typos.toml +++ b/_typos.toml @@ -318,18 +318,6 @@ Multiplie = 'Multiplie' Muti = 'Muti' muti = 'muti' mutexs = 'mutexs' -nams = 'nams' -namess = 'namess' -neccessary = 'neccessary' -Neet = 'Neet' -neet = 'neet' -neeed = 'neeed' -nedd = 'nedd' -neighor = 'neighor' -netwrok = 'netwrok' -normlize = 'normlize' -noraml = 'noraml' -numer = 'numer' occured = 'occured' Ocurred = 'Ocurred' occures = 'occures' diff --git a/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc b/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc index e5de5909e053a..ae028086bcce8 100644 --- a/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc @@ -64,7 +64,7 @@ static int BuildFusion(Graph* graph, #define SET_IN(Key, node__) op_desc.SetInput(#Key, {node__->Name()}); SET_IN(Ids, input); SET_IN(WeightH, weight_h); - // Neet to have this passed as We need Wc data for peephole connections + // Need to have this passed as We need Wc data for peephole connections SET_IN(Bias, bias); #undef SET_IN diff --git a/paddle/fluid/framework/new_executor/new_executor_defs.h b/paddle/fluid/framework/new_executor/new_executor_defs.h index 4ae278141ca36..3f2c88afb102a 100644 --- a/paddle/fluid/framework/new_executor/new_executor_defs.h +++ b/paddle/fluid/framework/new_executor/new_executor_defs.h @@ -350,7 +350,7 @@ static constexpr char kMemcpyH2D[] = "memcpy_h2d"; static constexpr char kMemcpyD2H[] = "memcpy_d2h"; static constexpr char kFetchVarName[] = "fetch"; -// static_ref_ is the numer of last live ops calculated to statically after +// static_ref_ is the number of last live ops calculated to statically after // `build` the Instructions. dynamic_ref_ is the runtime version ref which will // be decreased by one dynamically after the execution of an op (in last ops // list). var_ is the related variable @@ -381,7 +381,7 @@ class VarRefInfo { Variable* var_; }; -// static_dep_ is the numer of dependencies (ops that must run before it) of +// static_dep_ is the number of dependencies (ops that must run before it) of // each op which is calculated to statically. static_dep_ is the runtime // version dep which will be decreased by one dynamically after the execution of // one dependency op. diff --git a/paddle/fluid/operators/controlflow/pylayer_op.cc b/paddle/fluid/operators/controlflow/pylayer_op.cc index ab8b0dc6ca42d..cbc2bf6cce809 100644 --- a/paddle/fluid/operators/controlflow/pylayer_op.cc +++ b/paddle/fluid/operators/controlflow/pylayer_op.cc @@ -219,7 +219,7 @@ class PyLayerBackwardOp : public PyLayerOp { core_->Run({}, false); - // NOTE: It's neccessary. The reason of associating `inside_grads` and + // NOTE: It's necessary. The reason of associating `inside_grads` and // `outside_grads` at runtime `RunImpl` instead of `assign` op at block is // that the Var name of grad_op's outputs may be changed in the // `append_backward` function (e.g. `_addup_repetitive_outputs_`). diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/infer_sym_slice_utils.h b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/infer_sym_slice_utils.h index c584b8306b854..2dd89fd355c8f 100644 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/infer_sym_slice_utils.h +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/infer_sym_slice_utils.h @@ -139,7 +139,7 @@ inline ExprVec GetSliceDims(const ExprVec &in_dims, for (size_t i = 0; i < axes.size(); ++i) { auto out_dim = ends[i] - starts[i]; int64_t axis = axes[i]; - // If in_dims[axis] or ends[i] have symbol, nedd get Min(in_dims[axis] - + // If in_dims[axis] or ends[i] have symbol, need get Min(in_dims[axis] - // start[i], ends[i] - start[i] ) if (!out_dim.isa() && (!in_dims[axis].isa() || !ends[i].isa())) { diff --git a/paddle/phi/infermeta/spmd_rules/stack.cc b/paddle/phi/infermeta/spmd_rules/stack.cc index 96f4e11e208b3..be583117efe7b 100644 --- a/paddle/phi/infermeta/spmd_rules/stack.cc +++ b/paddle/phi/infermeta/spmd_rules/stack.cc @@ -53,7 +53,7 @@ SpmdInfo StackInferSpmd(const std::vector& x, int axis) { auto non_empty_index = all_empty ? 0 : non_empty_iter - tensor_shapes.begin(); auto ndim = tensor_shapes[non_empty_index].size(); - // normlize dim + // normalize dim auto dim = axis < 0 ? static_cast(ndim) + axis + 1 : axis; std::vector input_attrs; std::transform( diff --git a/paddle/phi/kernels/gpu/grid_sample_grad_kernel.cu b/paddle/phi/kernels/gpu/grid_sample_grad_kernel.cu index 2b6ceff59afa7..7c5108ef2f110 100644 --- a/paddle/phi/kernels/gpu/grid_sample_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/grid_sample_grad_kernel.cu @@ -535,7 +535,7 @@ __global__ void GridSampler3DCudaBackwardKernel(const int nthreads, auto iy_nearest = static_cast(std::round(iy)); auto iz_nearest = static_cast(std::round(iz)); - // assign nearest neighor pixel value to output pixel + // assign nearest neighbor pixel value to output pixel int gOut_offset = n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; T* gInp_ptr_NC = grad_input + n * inp_sN; for (int c = 0; c < out_c; diff --git a/paddle/phi/kernels/gpu/grid_sample_kernel.cu b/paddle/phi/kernels/gpu/grid_sample_kernel.cu index 8499e371d10cf..9280748ea6319 100644 --- a/paddle/phi/kernels/gpu/grid_sample_kernel.cu +++ b/paddle/phi/kernels/gpu/grid_sample_kernel.cu @@ -306,7 +306,7 @@ __global__ void GridSample3DCudaKernel(const int nthreads, int iy_nearest = static_cast(std::round(iy)); int iz_nearest = static_cast(std::round(iz)); - // assign nearest neighor pixel value to output pixel + // assign nearest neighbor pixel value to output pixel auto inp_ptr_NC = input + n * inp_sN; auto out_ptr_NCDHW = output + n * out_sN + d * out_sD + h * out_sH + w * out_sW; diff --git a/python/paddle/audio/functional/window.py b/python/paddle/audio/functional/window.py index 2962cb0fbed0e..d0725ea9b7a74 100644 --- a/python/paddle/audio/functional/window.py +++ b/python/paddle/audio/functional/window.py @@ -213,7 +213,7 @@ def _taylor( signs[1::2] = -1 m2 = ma * ma for mi in range(len(ma)): - numer = signs[mi] * paddle.prod( + number = signs[mi] * paddle.prod( 1 - m2[mi] / s2 / (A**2 + (ma - 0.5) ** 2) ) if mi == 0: @@ -227,7 +227,7 @@ def _taylor( * paddle.prod(1 - m2[mi] / m2[mi + 1 :]) ) - Fm[mi] = numer / denom + Fm[mi] = number / denom def W(n): return 1 + 2 * paddle.matmul( diff --git a/python/paddle/distributed/auto_parallel/process_mesh.py b/python/paddle/distributed/auto_parallel/process_mesh.py index 74ee7e0bfdb3c..5d5e962c6695d 100644 --- a/python/paddle/distributed/auto_parallel/process_mesh.py +++ b/python/paddle/distributed/auto_parallel/process_mesh.py @@ -355,7 +355,7 @@ def __ne__(self, other: ProcessMesh | core.ProcessMesh) -> None: return not self.__eq__(other) def __str__(self) -> str: - str = f"shape {self.shape}, process_ids {self.process_ids}, dim_nams {self.dim_names}" + str = f"shape {self.shape}, process_ids {self.process_ids}, dim_names {self.dim_names}" return str def __hash__(self) -> int: diff --git a/python/paddle/jit/api.py b/python/paddle/jit/api.py index eb9c2ae31ee40..65311c303f5f6 100644 --- a/python/paddle/jit/api.py +++ b/python/paddle/jit/api.py @@ -1435,7 +1435,7 @@ def save( if combine_params: if use_pir_api(): # NOTE(Ruting): concrete_program has been pruned when init partialProgramLayer, - # so we do not neet to prune again. + # so we do not need to prune again. for var in concrete_program.main_program.list_vars(): if var.persistable: diff --git a/test/dygraph_to_static/predictor_utils.py b/test/dygraph_to_static/predictor_utils.py index acc9c9b0c3838..57d7c9d52ed97 100644 --- a/test/dygraph_to_static/predictor_utils.py +++ b/test/dygraph_to_static/predictor_utils.py @@ -74,7 +74,7 @@ def _get_analysis_outputs(self, config): Args: config (AnalysisConfig): predictor configs Returns: - outs (numpy array): forward netwrok prediction outputs + outs (numpy array): forward network prediction outputs ''' predictor = create_paddle_predictor(config) names = predictor.get_input_names() diff --git a/test/legacy_test/test_dataset_consistency_inspection.py b/test/legacy_test/test_dataset_consistency_inspection.py index 192b6ef7a611d..90128f1fca039 100644 --- a/test/legacy_test/test_dataset_consistency_inspection.py +++ b/test/legacy_test/test_dataset_consistency_inspection.py @@ -445,7 +445,7 @@ def test_var_consistency_insepection(self): ) ) - # context_feat_namess + # context_feat_names for feat_name in range(len_sparse_query + 16, len_sparse_query + 18): slot_data.append( paddle.static.data( @@ -471,7 +471,7 @@ def test_var_consistency_insepection(self): ) ) - # neg context_feat_namess + # neg context_feat_names for feat_name in range(len_sparse_query + 33, len_sparse_query + 35): slot_data.append( paddle.static.data( diff --git a/test/legacy_test/test_log_normal.py b/test/legacy_test/test_log_normal.py index 3140d4751b52f..4f792f0e06d6f 100644 --- a/test/legacy_test/test_log_normal.py +++ b/test/legacy_test/test_log_normal.py @@ -22,7 +22,7 @@ paddle.seed(10) -def log_noraml_mean(mean, std): +def log_normal_mean(mean, std): return np.exp(mean + np.power(std, 2) / 2.0) @@ -152,7 +152,7 @@ def test_api(self): mean = np.mean(ret, axis=0, keepdims=True) var = np.var(ret, axis=0, keepdims=True) - mean_ref = log_noraml_mean(self.mean, self.std) + mean_ref = log_normal_mean(self.mean, self.std) var_ref = log_normal_var(self.mean, self.std) np.testing.assert_allclose(mean_ref, mean, rtol=0.2, atol=0.2) np.testing.assert_allclose(var_ref, var, rtol=0.2, atol=0.2) diff --git a/test/legacy_test/test_log_normal_inplace.py b/test/legacy_test/test_log_normal_inplace.py index 0412646a1c053..48d9af89176ba 100644 --- a/test/legacy_test/test_log_normal_inplace.py +++ b/test/legacy_test/test_log_normal_inplace.py @@ -21,7 +21,7 @@ from paddle import base -def log_noraml_mean(mean, std): +def log_normal_mean(mean, std): return np.exp(mean + np.power(std, 2) / 2.0) @@ -106,7 +106,7 @@ def test_log_normal_inplace_op_distribution(self): tensor.log_normal_(self.mean, self.std) mean = np.mean(tensor.numpy()) var = np.var(tensor.numpy()) - mean_ref = log_noraml_mean(self.mean, self.std) + mean_ref = log_normal_mean(self.mean, self.std) var_ref = log_normal_var(self.mean, self.std) np.testing.assert_allclose(mean_ref, mean, rtol=0.2, atol=0.2) np.testing.assert_allclose(var_ref, var, rtol=0.2, atol=0.2) diff --git a/tools/jetson_infer_op.py b/tools/jetson_infer_op.py index a5a72278e1d36..823ba3246ea66 100644 --- a/tools/jetson_infer_op.py +++ b/tools/jetson_infer_op.py @@ -118,7 +118,7 @@ def add_import_skip_return(file, pattern_import, pattern_skip, pattern_return): match_obj = pattern_2.search(line) if match_obj is not None: file_data += ( - "@skip_check_grad_ci(reason='jetson do n0t neeed this !')\n" + "@skip_check_grad_ci(reason='jetson do n0t need this !')\n" ) print("### add @skip_check_grad_ci ####")