Skip to content

Commit

Permalink
[CodeStyle][Typos][E-[11-20]] Fix typos(enfore, entrys, `envirnme…
Browse files Browse the repository at this point in the history
…nt`, `environnement`, `epoches`, `EPOCHES`, `epslion`, `eqaul`, `Errorr`, `exmaple`, `expection`, `excption`) (#70447)
  • Loading branch information
enkilee authored Dec 25, 2024
1 parent 93206c5 commit 967948f
Show file tree
Hide file tree
Showing 14 changed files with 23 additions and 34 deletions.
13 changes: 1 addition & 12 deletions _typos.toml
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ clen = 'clen'
cll = 'cll'
dout = "dout"
eles = 'eles'
entrys = 'entrys'
grad = "grad"
kinf = 'kinf'
kow = 'kow'
Expand Down Expand Up @@ -97,18 +98,6 @@ doubel = 'doubel'
dobule = 'dobule'
Dowloading = 'Dowloading'
downsteram = 'downsteram'
enfore = 'enfore'
entrys = 'entrys'
envirnment = 'envirnment'
environnement = 'environnement'
epoches = 'epoches'
EPOCHES = 'EPOCHES'
epslion = 'epslion'
eqaul = 'eqaul'
Errorr = 'Errorr'
exmaple = 'exmaple'
expection = 'expection'
excption = 'excption'
execuate = 'execuate'
exsit = 'exsit'
exsits = 'exsits'
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/hlir/framework/pir/compilation_task.cc
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ void GroupCompilationContext::PrepareModuleBuilder() {
/**
* For functions belonging to different broadcast groups, int args and the name
* of the tensor args may be variate, but the number of the tensor args should
* be fixed. So we need to unify the tensor args and symbol args. For exmaple,
* be fixed. So we need to unify the tensor args and symbol args. For example,
* func1(_var, _var_1, S4, S5); func2(_var, _var_2, S1) would be unified to
* func1(_var, _var_1, S4, S5, S1); func2(_var, _var_2, S4, S5, S1).
*/
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/utils/multi_threading.cc
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ void parallel_run(const WorkerFuncType& fn,
} catch (::common::EnforceNotMet& ex) {
LOG(ERROR) << ex.error_str();
PADDLE_THROW(
::common::errors::Fatal("Parallel compile Paddle enfore error"));
::common::errors::Fatal("Parallel compile Paddle enforce error"));
} catch (const std::exception& e) {
LOG(ERROR) << "Parallel compile error " << e.what();
PADDLE_THROW(::common::errors::Fatal("Parallel compile std::exception"));
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/platform/device/ipu/ipu_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ const popart::DataType OnnxDType2PopartType(const ONNXDataType type);
const ONNXDataType VarType2OnnxDType(const VarType::Type type);
// VarType::Type to String in Popart
const std::string VarType2PopartStr(const VarType::Type type);
// Get bool from envirnment varaible
// Get bool from environment varaible
const bool GetBoolEnv(const std::string& str);
// Request number of ipus must be pow(2, n)
const int RequestIpus(const int num_ipus);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/backends/gpu/rocm/miopen_helper.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ limitations under the License. */
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/enforce.h"

// MIOPEN do not have epslion definition
// MIOPEN do not have epsilon definition
#define CUDNN_BN_MIN_EPSILON 1e-05

COMMON_DECLARE_bool(cudnn_deterministic);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/infermeta/fusion.cc
Original file line number Diff line number Diff line change
Expand Up @@ -4345,7 +4345,7 @@ void CrossAttentionXPUInferMeta(
common::errors::InvalidArgument(
"The dim of input_kv should be 3! But received ",
input_kv_dims.size()));
// sequece length of q and k/v not requied to be eqaul
// sequece length of q and k/v not required to be equal
// but batch size and dim should be the same
PADDLE_ENFORCE_EQ(
input_q_dims[0],
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/distributed/auto_parallel/high_level_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ def to_distributed(
>>> from paddle.distributed import to_distributed
>>> from paddle.distributed.auto_parallel.high_level_api import ToDistributedConfig
>>> EPOCHES = 1
>>> EPOCHS = 1
>>> VOCAB_SIZE = 8000
>>> BATCH_NUM = 2
>>> BATCH_SIZE = 4
Expand Down Expand Up @@ -670,7 +670,7 @@ def to_distributed(
... config=dist_config,
... )
>>> for epoch in range(EPOCHES):
>>> for epoch in range(EPOCHS):
... dist_model.train()
... for i, data in enumerate(dist_loader()):
... inputs, labels = data
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/fleet/launch_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -508,7 +508,7 @@ def start_local_trainers(
"PADDLE_WORLD_DEVICE_IDS": ",".join(res),
}

# The following three environnement variables are used for auto mapping
# The following three environment variables are used for auto mapping
if current_env.get("PADDLE_CLUSTER_TOPO_PATH", None) is not None:
proc_env["PADDLE_CLUSTER_TOPO_PATH"] = current_env[
"PADDLE_CLUSTER_TOPO_PATH"
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/io/dataloader/batch_sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -374,7 +374,7 @@ def set_epoch(self, epoch: int) -> None:
as seeds of random numbers. By default, users may not set this, all
replicas (workers) use a different random ordering for each epoch.
If set same number at each epoch, this sampler will yield the same
ordering at all epoches.
ordering at all epochs.
Arguments:
epoch (int): Epoch number.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
from paddle.distributed import to_distributed
from paddle.distributed.auto_parallel.high_level_api import ToDistributedConfig

EPOCHES = 1
EPOCHS = 1
VOCAB_SIZE = 8000
BATCH_NUM = 2
BATCH_SIZE = 4
Expand Down Expand Up @@ -627,7 +627,7 @@ def test_to_distributed_api(self):
dist_config,
)

for epoch in range(EPOCHES):
for epoch in range(EPOCHS):
dist_model.train()
for i, data in enumerate(dist_loader()):
inputs, labels = data
Expand Down
2 changes: 1 addition & 1 deletion test/cpp/pir/core/paddle_fatal_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class FatalClass {

void throw_exception_in_func() {
FatalClass test_case;
PADDLE_THROW(::common::errors::External("throw excption in func"));
PADDLE_THROW(::common::errors::External("throw exception in func"));
}

void terminate_in_func() { FatalClass test_case; }
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
paddle.enable_static()


class TestDataNormOpErrorr(unittest.TestCase):
class TestDataNormOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
x2 = paddle.static.data(name='x2', shape=[-1, 3, 4], dtype="int32")
Expand Down
8 changes: 4 additions & 4 deletions test/legacy_test/test_imperative_data_loader_exception.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def test_not_capacity(self):
):
base.io.DataLoader.from_generator()

def test_single_process_with_thread_expection(self):
def test_single_process_with_thread_exception(self):
def error_sample_genarator(batch_num):
def __reader__():
for _ in range(batch_num):
Expand All @@ -60,13 +60,13 @@ def __reader__():
exception = None
try:
for _ in loader():
print("test_single_process_with_thread_expection")
print("test_single_process_with_thread_exception")
except core.EnforceNotMet as ex:
self.assertIn("Blocking queue is killed", str(ex))
exception = ex
self.assertIsNotNone(exception)

def test_multi_process_with_process_expection(self):
def test_multi_process_with_process_exception(self):
def error_sample_genarator(batch_num):
def __reader__():
for _ in range(batch_num):
Expand All @@ -84,7 +84,7 @@ def __reader__():
exception = None
try:
for _ in loader():
print("test_multi_process_with_thread_expection")
print("test_multi_process_with_thread_exception")
except core.EnforceNotMet as ex:
exception = ex
self.assertIsNotNone(exception)
Expand Down
10 changes: 5 additions & 5 deletions test/legacy_test/test_imperative_deepcf.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ def setUp(self):

self.batch_size = int(os.environ.get('BATCH_SIZE', 128))
self.num_batches = int(os.environ.get('NUM_BATCHES', 5))
self.num_epoches = int(os.environ.get('NUM_EPOCHES', 1))
self.num_epochs = int(os.environ.get('NUM_EPOCHS', 1))

def get_data(self):
user_ids = []
Expand Down Expand Up @@ -277,7 +277,7 @@ def test_deefcf(self):
else base.CUDAPlace(0)
)
exe.run(startup)
for e in range(self.num_epoches):
for e in range(self.num_epochs):
sys.stderr.write(f'epoch {e}\n')
for slice in range(
0, self.batch_size * self.num_batches, self.batch_size
Expand Down Expand Up @@ -307,7 +307,7 @@ def test_deefcf(self):

deepcf = DeepCF(num_users, num_items, matrix)
adam = paddle.optimizer.Adam(0.01, parameters=deepcf.parameters())
for e in range(self.num_epoches):
for e in range(self.num_epochs):
sys.stderr.write(f'epoch {e}\n')
for slice in range(
0, self.batch_size * self.num_batches, self.batch_size
Expand Down Expand Up @@ -343,7 +343,7 @@ def test_deefcf(self):
deepcf2 = DeepCF(num_users, num_items, matrix)
adam2 = paddle.optimizer.Adam(0.01, parameters=deepcf2.parameters())
base.set_flags({'FLAGS_sort_sum_gradient': True})
for e in range(self.num_epoches):
for e in range(self.num_epochs):
sys.stderr.write(f'epoch {e}\n')
for slice in range(
0, self.batch_size * self.num_batches, self.batch_size
Expand Down Expand Up @@ -379,7 +379,7 @@ def test_deefcf(self):
deepcf = DeepCF(num_users, num_items, matrix)
adam = paddle.optimizer.Adam(0.01, parameters=deepcf.parameters())

for e in range(self.num_epoches):
for e in range(self.num_epochs):
sys.stderr.write(f'epoch {e}\n')
for slice in range(
0, self.batch_size * self.num_batches, self.batch_size
Expand Down

0 comments on commit 967948f

Please sign in to comment.