Skip to content

Commit

Permalink
[CodeStyle][Typos][[N-[1-5],N-[9,10],N-[12-14]] Fix typo(namess, `n…
Browse files Browse the repository at this point in the history
…ams`, `neccessary`, `Neet`, `neet`, `neeed`, `nedd`,`neighor`,`netwrok`,`normlize`,`noraml`,`numer`) (#70283)

* fix typos

* Update _typos.toml

* fix typos
  • Loading branch information
yinfan98 authored Dec 20, 2024
1 parent 4a6d084 commit e871b7d
Show file tree
Hide file tree
Showing 16 changed files with 20 additions and 32 deletions.
12 changes: 0 additions & 12 deletions _typos.toml
Original file line number Diff line number Diff line change
Expand Up @@ -318,18 +318,6 @@ Multiplie = 'Multiplie'
Muti = 'Muti'
muti = 'muti'
mutexs = 'mutexs'
nams = 'nams'
namess = 'namess'
neccessary = 'neccessary'
Neet = 'Neet'
neet = 'neet'
neeed = 'neeed'
nedd = 'nedd'
neighor = 'neighor'
netwrok = 'netwrok'
normlize = 'normlize'
noraml = 'noraml'
numer = 'numer'
occured = 'occured'
Ocurred = 'Ocurred'
occures = 'occures'
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ static int BuildFusion(Graph* graph,
#define SET_IN(Key, node__) op_desc.SetInput(#Key, {node__->Name()});
SET_IN(Ids, input);
SET_IN(WeightH, weight_h);
// Neet to have this passed as We need Wc data for peephole connections
// Need to have this passed as We need Wc data for peephole connections
SET_IN(Bias, bias);
#undef SET_IN

Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/new_executor/new_executor_defs.h
Original file line number Diff line number Diff line change
Expand Up @@ -350,7 +350,7 @@ static constexpr char kMemcpyH2D[] = "memcpy_h2d";
static constexpr char kMemcpyD2H[] = "memcpy_d2h";
static constexpr char kFetchVarName[] = "fetch";

// static_ref_ is the numer of last live ops calculated to statically after
// static_ref_ is the number of last live ops calculated to statically after
// `build` the Instructions. dynamic_ref_ is the runtime version ref which will
// be decreased by one dynamically after the execution of an op (in last ops
// list). var_ is the related variable
Expand Down Expand Up @@ -381,7 +381,7 @@ class VarRefInfo {
Variable* var_;
};

// static_dep_ is the numer of dependencies (ops that must run before it) of
// static_dep_ is the number of dependencies (ops that must run before it) of
// each op which is calculated to statically. static_dep_ is the runtime
// version dep which will be decreased by one dynamically after the execution of
// one dependency op.
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/controlflow/pylayer_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ class PyLayerBackwardOp : public PyLayerOp {

core_->Run({}, false);

// NOTE: It's neccessary. The reason of associating `inside_grads` and
// NOTE: It's necessary. The reason of associating `inside_grads` and
// `outside_grads` at runtime `RunImpl` instead of `assign` op at block is
// that the Var name of grad_op's outputs may be changed in the
// `append_backward` function (e.g. `_addup_repetitive_outputs_`).
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ inline ExprVec GetSliceDims(const ExprVec &in_dims,
for (size_t i = 0; i < axes.size(); ++i) {
auto out_dim = ends[i] - starts[i];
int64_t axis = axes[i];
// If in_dims[axis] or ends[i] have symbol, nedd get Min(in_dims[axis] -
// If in_dims[axis] or ends[i] have symbol, need get Min(in_dims[axis] -
// start[i], ends[i] - start[i] )
if (!out_dim.isa<int64_t>() &&
(!in_dims[axis].isa<int64_t>() || !ends[i].isa<int64_t>())) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/infermeta/spmd_rules/stack.cc
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ SpmdInfo StackInferSpmd(const std::vector<DistMetaTensor>& x, int axis) {

auto non_empty_index = all_empty ? 0 : non_empty_iter - tensor_shapes.begin();
auto ndim = tensor_shapes[non_empty_index].size();
// normlize dim
// normalize dim
auto dim = axis < 0 ? static_cast<int64_t>(ndim) + axis + 1 : axis;
std::vector<TensorDistAttr> input_attrs;
std::transform(
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/grid_sample_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -535,7 +535,7 @@ __global__ void GridSampler3DCudaBackwardKernel(const int nthreads,
auto iy_nearest = static_cast<int>(std::round(iy));
auto iz_nearest = static_cast<int>(std::round(iz));

// assign nearest neighor pixel value to output pixel
// assign nearest neighbor pixel value to output pixel
int gOut_offset = n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW;
T* gInp_ptr_NC = grad_input + n * inp_sN;
for (int c = 0; c < out_c;
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/grid_sample_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,7 @@ __global__ void GridSample3DCudaKernel(const int nthreads,
int iy_nearest = static_cast<int>(std::round(iy));
int iz_nearest = static_cast<int>(std::round(iz));

// assign nearest neighor pixel value to output pixel
// assign nearest neighbor pixel value to output pixel
auto inp_ptr_NC = input + n * inp_sN;
auto out_ptr_NCDHW =
output + n * out_sN + d * out_sD + h * out_sH + w * out_sW;
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/audio/functional/window.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ def _taylor(
signs[1::2] = -1
m2 = ma * ma
for mi in range(len(ma)):
numer = signs[mi] * paddle.prod(
number = signs[mi] * paddle.prod(
1 - m2[mi] / s2 / (A**2 + (ma - 0.5) ** 2)
)
if mi == 0:
Expand All @@ -227,7 +227,7 @@ def _taylor(
* paddle.prod(1 - m2[mi] / m2[mi + 1 :])
)

Fm[mi] = numer / denom
Fm[mi] = number / denom

def W(n):
return 1 + 2 * paddle.matmul(
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/process_mesh.py
Original file line number Diff line number Diff line change
Expand Up @@ -355,7 +355,7 @@ def __ne__(self, other: ProcessMesh | core.ProcessMesh) -> None:
return not self.__eq__(other)

def __str__(self) -> str:
str = f"shape {self.shape}, process_ids {self.process_ids}, dim_nams {self.dim_names}"
str = f"shape {self.shape}, process_ids {self.process_ids}, dim_names {self.dim_names}"
return str

def __hash__(self) -> int:
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/jit/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -1435,7 +1435,7 @@ def save(
if combine_params:
if use_pir_api():
# NOTE(Ruting): concrete_program has been pruned when init partialProgramLayer,
# so we do not neet to prune again.
# so we do not need to prune again.

for var in concrete_program.main_program.list_vars():
if var.persistable:
Expand Down
2 changes: 1 addition & 1 deletion test/dygraph_to_static/predictor_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def _get_analysis_outputs(self, config):
Args:
config (AnalysisConfig): predictor configs
Returns:
outs (numpy array): forward netwrok prediction outputs
outs (numpy array): forward network prediction outputs
'''
predictor = create_paddle_predictor(config)
names = predictor.get_input_names()
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_dataset_consistency_inspection.py
Original file line number Diff line number Diff line change
Expand Up @@ -445,7 +445,7 @@ def test_var_consistency_insepection(self):
)
)

# context_feat_namess
# context_feat_names
for feat_name in range(len_sparse_query + 16, len_sparse_query + 18):
slot_data.append(
paddle.static.data(
Expand All @@ -471,7 +471,7 @@ def test_var_consistency_insepection(self):
)
)

# neg context_feat_namess
# neg context_feat_names
for feat_name in range(len_sparse_query + 33, len_sparse_query + 35):
slot_data.append(
paddle.static.data(
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_log_normal.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
paddle.seed(10)


def log_noraml_mean(mean, std):
def log_normal_mean(mean, std):
return np.exp(mean + np.power(std, 2) / 2.0)


Expand Down Expand Up @@ -152,7 +152,7 @@ def test_api(self):

mean = np.mean(ret, axis=0, keepdims=True)
var = np.var(ret, axis=0, keepdims=True)
mean_ref = log_noraml_mean(self.mean, self.std)
mean_ref = log_normal_mean(self.mean, self.std)
var_ref = log_normal_var(self.mean, self.std)
np.testing.assert_allclose(mean_ref, mean, rtol=0.2, atol=0.2)
np.testing.assert_allclose(var_ref, var, rtol=0.2, atol=0.2)
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_log_normal_inplace.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from paddle import base


def log_noraml_mean(mean, std):
def log_normal_mean(mean, std):
return np.exp(mean + np.power(std, 2) / 2.0)


Expand Down Expand Up @@ -106,7 +106,7 @@ def test_log_normal_inplace_op_distribution(self):
tensor.log_normal_(self.mean, self.std)
mean = np.mean(tensor.numpy())
var = np.var(tensor.numpy())
mean_ref = log_noraml_mean(self.mean, self.std)
mean_ref = log_normal_mean(self.mean, self.std)
var_ref = log_normal_var(self.mean, self.std)
np.testing.assert_allclose(mean_ref, mean, rtol=0.2, atol=0.2)
np.testing.assert_allclose(var_ref, var, rtol=0.2, atol=0.2)
Expand Down
2 changes: 1 addition & 1 deletion tools/jetson_infer_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ def add_import_skip_return(file, pattern_import, pattern_skip, pattern_return):
match_obj = pattern_2.search(line)
if match_obj is not None:
file_data += (
"@skip_check_grad_ci(reason='jetson do n0t neeed this !')\n"
"@skip_check_grad_ci(reason='jetson do n0t need this !')\n"
)
print("### add @skip_check_grad_ci ####")

Expand Down

0 comments on commit e871b7d

Please sign in to comment.