Skip to content

Commit

Permalink
polish some details
Browse files Browse the repository at this point in the history
  • Loading branch information
chenwhql committed Oct 26, 2021
1 parent 5fb285c commit 558a848
Show file tree
Hide file tree
Showing 5 changed files with 2 additions and 9 deletions.
1 change: 0 additions & 1 deletion paddle/fluid/operators/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,6 @@ if(WITH_UNITY_BUILD)
endif()

set(OP_HEADER_DEPS ${OP_HEADER_DEPS} pten)
#set(OP_HEADER_DEPS ${OP_HEADER_DEPS} pten_utils)
register_operators(EXCLUDES
py_layer_op py_func_op warpctc_op dgc_op load_combine_op lstm_op run_program_op eye_op
recurrent_op save_combine_op sparse_attention_op sync_batch_norm_op spectral_op ${OP_MKL_DEPS} DEPS ${OP_HEADER_DEPS})
Expand Down
2 changes: 1 addition & 1 deletion paddle/pten/core/tensor_meta.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ inline bool DenseTensorMeta::valid() const noexcept {
bool valid{true};
valid = valid && (type != DataType::UNDEFINED);
valid = valid && (layout != DataLayout::UNDEFINED);
valid = valid && (is_scalar || product(dims));
valid = valid && (is_scalar || product(dims) >= 0);
return valid;
}

Expand Down
2 changes: 0 additions & 2 deletions paddle/pten/kernels/cuda/math.cu
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,6 @@ void Sign(const CUDAContext& dev_ctx, const DenseTensor& x, DenseTensor* out) {

template <typename T>
void Mean(const CUDAContext& dev_ctx, const DenseTensor& x, DenseTensor* out) {
VLOG(1) << "chenweihang: call new pt mean kernel.";
// eigen::Mean<CUDAContext, T>(dev_ctx, x, out);
auto size_prob = x.numel();
const T* x_data = x.data<T>();
T* out_data = out->mutable_data<T>();
Expand Down
4 changes: 0 additions & 4 deletions paddle/pten/kernels/functions/eigen/sign.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,7 @@ namespace eigen {

template <typename DevCtx, typename T>
void Sign(const DevCtx& dev_ctx, const DenseTensor& x, DenseTensor* out) {
VLOG(1) << "enter module::Sign";
// out->mutable_data<T>(x.place());
out->mutable_data<T>();

VLOG(1) << "module::Sign, calc by eigen.";
// TODO(chenweihang): if we design new tensor, we should support
// the low-level calc functor use new tensor as input,
// which may be a big project!
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def _decorate_compare_fused_all_reduce(self, model, use_device):
use_device,
init_feed_dict=init_data,
optimizer=self.optimizer,
fuse_all_optimizer_ops=True)
fuse_all_optimizer_ops=False)

def test_simple_fc_with_fuse_all_reduce(self):
self._decorate_compare_fused_all_reduce(simple_fc_net, DeviceType.CUDA)
Expand Down

0 comments on commit 558a848

Please sign in to comment.