Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature/support int64 for sum #5832

Merged
merged 2 commits into from
Nov 23, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions paddle/operators/math/selected_rows_functor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,8 @@ struct SelectedRowsAddTo<platform::CPUPlace, T> {

template struct SelectedRowsAddTo<platform::CPUPlace, float>;
template struct SelectedRowsAddTo<platform::CPUPlace, double>;
template struct SelectedRowsAddTo<platform::CPUPlace, int>;
template struct SelectedRowsAddTo<platform::CPUPlace, int64_t>;

template <typename T>
struct SelectedRowsAddToTensor<platform::CPUPlace, T> {
Expand Down Expand Up @@ -175,6 +177,8 @@ struct SelectedRowsAddToTensor<platform::CPUPlace, T> {

template struct SelectedRowsAddToTensor<platform::CPUPlace, float>;
template struct SelectedRowsAddToTensor<platform::CPUPlace, double>;
template struct SelectedRowsAddToTensor<platform::CPUPlace, int>;
template struct SelectedRowsAddToTensor<platform::CPUPlace, int64_t>;

} // namespace math
} // namespace operators
Expand Down
4 changes: 4 additions & 0 deletions paddle/operators/math/selected_rows_functor.cu
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,8 @@ struct SelectedRowsAddTo<platform::GPUPlace, T> {

template struct SelectedRowsAddTo<platform::GPUPlace, float>;
template struct SelectedRowsAddTo<platform::GPUPlace, double>;
template struct SelectedRowsAddTo<platform::GPUPlace, int>;
template struct SelectedRowsAddTo<platform::GPUPlace, int64_t>;

namespace {
template <typename T, int block_size>
Expand Down Expand Up @@ -223,6 +225,8 @@ struct SelectedRowsAddToTensor<platform::GPUPlace, T> {

template struct SelectedRowsAddToTensor<platform::GPUPlace, float>;
template struct SelectedRowsAddToTensor<platform::GPUPlace, double>;
template struct SelectedRowsAddToTensor<platform::GPUPlace, int>;
template struct SelectedRowsAddToTensor<platform::GPUPlace, int64_t>;

} // namespace math
} // namespace operators
Expand Down
4 changes: 3 additions & 1 deletion paddle/operators/sum_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -176,4 +176,6 @@ namespace ops = paddle::operators;
REGISTER_OPERATOR(sum, ops::SumOp, ops::SumOpMaker, ops::SumGradMaker,
ops::SumOpVarTypeInference);
REGISTER_OP_CPU_KERNEL(sum, ops::SumKernel<paddle::platform::CPUPlace, float>,
ops::SumKernel<paddle::platform::CPUPlace, double>);
ops::SumKernel<paddle::platform::CPUPlace, double>,
ops::SumKernel<paddle::platform::CPUPlace, int>,
ops::SumKernel<paddle::platform::CPUPlace, int64_t>);
4 changes: 3 additions & 1 deletion paddle/operators/sum_op.cu
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,6 @@ limitations under the License. */

namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(sum, ops::SumKernel<paddle::platform::GPUPlace, float>,
ops::SumKernel<paddle::platform::GPUPlace, double>);
ops::SumKernel<paddle::platform::GPUPlace, double>,
ops::SumKernel<paddle::platform::GPUPlace, int>,
ops::SumKernel<paddle::platform::GPUPlace, int64_t>);
10 changes: 10 additions & 0 deletions paddle/platform/cuda_helper.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,16 @@ constexpr int PADDLE_CUDA_NUM_THREADS = 512;

// For atomicAdd.
USE_CUDA_ATOMIC(Add, float);
USE_CUDA_ATOMIC(Add, int);
USE_CUDA_ATOMIC(Add, unsigned int);
USE_CUDA_ATOMIC(Add, unsigned long long int);

CUDA_ATOMIC_WRAPPER(Add, int64_t) {
static_assert(sizeof(int64_t) == sizeof(long long int),
"long long should be int64");
return CudaAtomicAdd(reinterpret_cast<unsigned long long int*>(address),
static_cast<unsigned long long int>(val));
}

#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 600
USE_CUDA_ATOMIC(Add, double);
Expand Down