Skip to content

Commit c0c87b0

Browse files
authored
Fix elemenewist_add_op elementwise_add_op [fluid_ops] (#74245)
1 parent 5f14c9a commit c0c87b0

File tree

10 files changed

+20
-30
lines changed

10 files changed

+20
-30
lines changed

paddle/fluid/pybind/compiled_program.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -387,7 +387,7 @@ void BindCompiledProgram(pybind11::module &m) { // NOLINT
387387
self.fuse_gemm_epilogue_ = b;
388388
},
389389
R"DOC((bool, optional): fuse_gemm_epilogue indicate whether
390-
to fuse matmul_op, elemenewist_add_op and activation_op,
390+
to fuse matmul_op, elementwise_add_op and activation_op,
391391
it may make the execution faster. Default is False.
392392
393393
Examples:
@@ -410,7 +410,7 @@ void BindCompiledProgram(pybind11::module &m) { // NOLINT
410410
PADDLE_ENFORCE_NE(self.IsFinalized(),
411411
true,
412412
common::errors::PreconditionNotMet(
413-
"BuildStrategy has been finlaized, cannot be "
413+
"BuildStrategy has been finalized, cannot be "
414414
"configured again."));
415415
self.fuse_dot_product_attention_ = b;
416416
},

paddle/phi/backends/device_ext.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -395,7 +395,7 @@ struct C_DeviceInterface {
395395
size_t size);
396396

397397
/**
398-
* @brief Asynchonrize memory copy from host to device
398+
* @brief Asynchronize memory copy from host to device
399399
*
400400
* @param[C_Device] device Core fill it with a physical id
401401
* @param[C_Stream] stream
@@ -410,7 +410,7 @@ struct C_DeviceInterface {
410410
size_t size);
411411

412412
/**
413-
* @brief Asynchonrize memory copy from device to host
413+
* @brief Asynchronize memory copy from device to host
414414
*
415415
* @param[C_Device] device Core fill it with a physical id
416416
* @param[C_Stream] stream
@@ -425,7 +425,7 @@ struct C_DeviceInterface {
425425
size_t size);
426426

427427
/**
428-
* @brief Asynchonrize memory copy from device to device
428+
* @brief Asynchronize memory copy from device to device
429429
*
430430
* @param[C_Device] device Core fill it with a physical id
431431
* @param[C_Stream] stream
@@ -440,7 +440,7 @@ struct C_DeviceInterface {
440440
size_t size);
441441

442442
/**
443-
* @brief Peer asynchonrize memory copy from host to device
443+
* @brief Peer asynchronize memory copy from host to device
444444
*
445445
* @param[C_Device] device Core fill it with a physical id
446446
* @param[C_Stream] stream

paddle/phi/backends/onednn/axpy_handler.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ class OneDNNAXPYHandler {
5353
// Private implementation idiom to hide dependency on oneDNN headers.
5454
class Impl;
5555
// We need custom deleter, since the compiler is unable to parameterize
56-
// an allocator's default deleter due to incomple type.
56+
// an allocator's default deleter due to incomplete type.
5757
std::unique_ptr<Impl, void (*)(Impl*)> pimpl_;
5858
};
5959
} // namespace funcs

paddle/phi/backends/onednn/matmul_utils.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ class MatmulOneDNNHandler : public OneDNNHandlerNoCachingT<XT, dnnl::matmul> {
5959
std::vector<int64_t> x_strides(x_dims.size() - 3, 1);
6060
std::vector<int64_t> y_strides(x_dims.size() - 3, 1);
6161
std::vector<int64_t> out_strides(x_dims.size() - 3, 1);
62-
std::vector<int64_t> out_ddims(x_dims.size() - 3, 1);
62+
std::vector<int64_t> out_dims(x_dims.size() - 3, 1);
6363

6464
x_strides.reserve(x_dims.size());
6565
y_strides.reserve(x_dims.size());
@@ -78,20 +78,20 @@ class MatmulOneDNNHandler : public OneDNNHandlerNoCachingT<XT, dnnl::matmul> {
7878
}
7979

8080
out_strides.insert(out_strides.end(), {M * N, N, 1});
81-
out_ddims.insert(out_ddims.end(),
82-
{std::max(x_dims[MB_idx], y_dims[MB_idx]), M, N});
81+
out_dims.insert(out_dims.end(),
82+
{std::max(x_dims[MB_idx], y_dims[MB_idx]), M, N});
8383

8484
for (int i = x_dims.size() - 4; i >= 0; --i) {
85-
out_ddims[i] = std::max(x_dims[i], y_dims[i]);
85+
out_dims[i] = std::max(x_dims[i], y_dims[i]);
8686
x_strides[i] = x_dims[i + 1] * x_strides[i + 1];
8787
y_strides[i] = y_dims[i + 1] * y_strides[i + 1];
8888

89-
out_strides[i] = out_ddims[i + 1] * out_strides[i + 1];
89+
out_strides[i] = out_dims[i + 1] * out_strides[i + 1];
9090
}
9191

9292
auto x_md = memory::desc(x_dims, OneDNNGetDataType<XT>(), x_strides);
9393
auto y_md = memory::desc(y_dims, OneDNNGetDataType<YT>(), y_strides);
94-
auto out_md = memory::desc(out_ddims, OneDNNGetDataType<OT>(), out_strides);
94+
auto out_md = memory::desc(out_dims, OneDNNGetDataType<OT>(), out_strides);
9595

9696
this->AcquireForwardPrimitiveDescriptor(x_md, y_md, out_md);
9797
}

paddle/phi/core/dense_tensor.cc

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -230,8 +230,7 @@ void DenseTensor::set_meta(const DenseTensorMeta& meta) {
230230
1. Designed behaviour: DenseTensor constructed with its underlying storage_
231231
initialized
232232
2. Legacy behaviour(fluid): DenseTensor constructed using default
233-
constructor, where
234-
storage_ won't be initialized until the first
233+
constructor, where storage_ won't be initialized until the first
235234
call to mutable_data(place)
236235
*/
237236
void DenseTensor::ResizeAndAllocate(const DDim& dims) {

paddle/phi/core/framework/var_type_helper.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -281,8 +281,7 @@ extern inline paddle::framework::proto::VarType::Type ToRealType(
281281
default:
282282
PADDLE_THROW(common::errors::Unimplemented(
283283
"Unknown complex value data type (%s), now only support complex64 "
284-
"and "
285-
"complex128.",
284+
"and complex128.",
286285
VarDataTypeToString(t)));
287286
}
288287
}

paddle/phi/kernels/funcs/fft_fill_conj.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ HOSTDEVICE inline bool is_conj_part(const int64_t dst_idx,
8080

8181
// FFTFillConjFunctor fill the destination tensor with source tensor and
8282
// conjugate symmetry element of source tensor .
83-
// Use framework::ForRange to iterate destination element with
83+
// Use phi::ForRange to iterate destination element with
8484
// supporting different device
8585
template <typename C>
8686
struct FFTFillConjFunctor {

paddle/phi/kernels/funcs/sequence_padding.cu

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -164,14 +164,6 @@ class UnpaddingDenseTensorFunctor<phi::GPUContext, T> {
164164
pad_seq_len,
165165
step_width,
166166
layout);
167-
/*
168-
if (!norm_by_times && seq_num == 1UL && pad_seq_len == max_seq_len) {
169-
paddle::framework::TensorCopy(pad_tensor, dev_ctx.GetPlace(), dev_ctx,
170-
seq_tensor);
171-
seq_tensor->Resize(seq_tensor_dims);
172-
return;
173-
}
174-
*/
175167

176168
const int kBlockSize = 512;
177169

paddle/phi/kernels/funcs/strided_slice.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -529,9 +529,9 @@ void StridedSliceGradCompute(const Context& dev_ctx,
529529
const std::vector<int>& infer_flags,
530530
const std::vector<int>& decrease_axis,
531531
TensorArray* x_grad) {
532-
// Note(weixin):Since the shape of `framework::GradVarName("Input")` of
532+
// Note(weixin):Since the shape of `x_grad` of
533533
// StridedSliceGrad cannot be calculated by
534-
// `framework::GradVarName("Output")`, the dim of "Input" is used to
534+
// `out_grad`, the dim of "x" is used to
535535
// calculate the output shape. when set it to inplace OP, there may be
536536
// some problems.
537537
const int64_t size = x.size();

test/legacy_test/test_fold_op.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -229,7 +229,7 @@ def test_padding_shape():
229229
)
230230

231231
def test_dilations_shape():
232-
# dialtions_size must be 2
232+
# dilations_size must be 2
233233
x = paddle.randn(shape=[2, 6, 6], dtype="float32")
234234
out = fold(
235235
x,
@@ -239,7 +239,7 @@ def test_dilations_shape():
239239
)
240240

241241
def test_strides_shape():
242-
# strids_size must be 2
242+
# strides_size must be 2
243243
x = paddle.randn(shape=[2, 6, 6], dtype="float32")
244244
out = fold(
245245
x,

0 commit comments

Comments
 (0)