Skip to content

Commit

Permalink
Merge branch 'develop' into F403/fix
Browse files Browse the repository at this point in the history
  • Loading branch information
SigureMo committed Oct 19, 2022
2 parents 7240f08 + d00b7d8 commit dd59e21
Show file tree
Hide file tree
Showing 665 changed files with 16,929 additions and 6,129 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,9 @@ tools/nvcc_lazy

# these files (directories) are generated before build system generation
paddle/fluid/operators/generated_op.cc
paddle/fluid/operators/generated_sparse_op.cc
paddle/phi/ops/compat/generated_sig.cc
paddle/phi/ops/compat/generated_sparse_sig.cc
paddle/phi/api/yaml/parsed_apis/
python/paddle/utils/code_gen/
paddle/fluid/pybind/tmp_eager_op_function_impl.h
Expand Down
1 change: 1 addition & 0 deletions AUTHORS.md
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ This is an incomplete list of authors of [Paddle](https://github.com/PaddlePaddl
| xushaoyong | Shao-Yong Xu |
| Yancey1989 | Xu Yan |
| zhaopu7 | Pu Zhao |
| zhiqiu | Qiu-Liang Chen |
| zhouxiao-coder | Xiao Zhou |
| Zrachel | Rui-Qing Zhang |
| jeng1220 | Bai-Cheng(Ryan) Jeng (NVIDIA) |
Expand Down
6 changes: 2 additions & 4 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ find_package(CUDA QUIET)
find_package(MKL CONFIG QUIET)
option(WITH_ONEMKL "Compile PaddlePaddle with oneMKL" OFF)
option(WITH_GPU "Compile PaddlePaddle with NVIDIA GPU" ${CUDA_FOUND})
option(WITH_MPI "Compile PaddlePaddle with MPI" OFF)
option(WITH_TENSORRT "Compile PaddlePaddle with NVIDIA TensorRT" OFF)
option(WITH_XPU "Compile PaddlePaddle with BAIDU KUNLUN XPU" OFF)
option(WITH_XPU_KP "Compile PaddlePaddle with BAIDU XPU compiler " OFF)
Expand Down Expand Up @@ -336,7 +337,7 @@ endif()

if(LINUX
AND NOT WITH_CUSTOM_DEVICE
AND NOT ON_INFER)
AND WITH_PYTHON)
set(WITH_CUSTOM_DEVICE
ON
CACHE BOOL "Enable Custom Device when compiling for Linux" FORCE)
Expand Down Expand Up @@ -485,9 +486,6 @@ if(WITH_DISTRIBUTE)
ON
CACHE STRING "Enable GLOO when compiling WITH_DISTRIBUTE=ON." FORCE)
endif()
set(WITH_MPI
ON
CACHE STRING "Enable MPI when compiling WITH_DISTRIBUTE=ON." FORCE)
if(WITH_ASCEND_CL AND NOT WITH_ARM_BRPC)
# disable WITH_PSCORE for NPU before include third_party
message(
Expand Down
4 changes: 2 additions & 2 deletions cmake/external/xpu.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ set(XPU_RT_LIB_NAME "libxpurt.so")
if(NOT DEFINED XPU_BASE_URL)
set(XPU_BASE_URL_WITHOUT_DATE
"https://baidu-kunlun-product.su.bcebos.com/KL-SDK/klsdk-dev")
set(XPU_BASE_URL "${XPU_BASE_URL_WITHOUT_DATE}/20220928")
set(XPU_BASE_URL "${XPU_BASE_URL_WITHOUT_DATE}/20221016")
else()
set(XPU_BASE_URL "${XPU_BASE_URL}")
endif()
Expand All @@ -19,7 +19,7 @@ endif()
if(NOT DEFINED XPU_XDNN_BASE_URL)
set(XPU_XDNN_BASE_URL_WITHOUT_DATE
"https://klx-sdk-release-public.su.bcebos.com/xdnn/dev")
set(XPU_XDNN_BASE_URL "${XPU_XDNN_BASE_URL_WITHOUT_DATE}/20220928")
set(XPU_XDNN_BASE_URL "${XPU_XDNN_BASE_URL_WITHOUT_DATE}/20221016")
else()
set(XPU_XDNN_BASE_URL "${XPU_XDNN_BASE_URL}")
endif()
Expand Down
7 changes: 0 additions & 7 deletions cmake/operators.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -511,13 +511,6 @@ function(op_library TARGET)
# Append first implemented MKLDNN activation operator
if(${MKLDNN_FILE} STREQUAL "activation_mkldnn_op")
file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(softplus, MKLDNN);\n")
elseif(${MKLDNN_FILE} STREQUAL "conv_mkldnn_op")
file(APPEND ${pybind_file}
"USE_OP_DEVICE_KERNEL_WITH_CUSTOM_TYPE(conv2d, MKLDNN, FP32);\n")
file(APPEND ${pybind_file}
"USE_OP_DEVICE_KERNEL_WITH_CUSTOM_TYPE(conv2d, MKLDNN, S8);\n")
file(APPEND ${pybind_file}
"USE_OP_DEVICE_KERNEL_WITH_CUSTOM_TYPE(conv2d, MKLDNN, U8);\n")
elseif(${MKLDNN_FILE} STREQUAL "fc_mkldnn_op")
file(APPEND ${pybind_file}
"USE_OP_DEVICE_KERNEL_WITH_CUSTOM_TYPE(fc, MKLDNN, FP32);\n")
Expand Down
4 changes: 3 additions & 1 deletion paddle/fluid/distributed/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,9 @@ set(DISTRIBUTE_COMPILE_FLAGS
if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0)
set(DISTRIBUTE_COMPILE_FLAGS "${DISTRIBUTE_COMPILE_FLAGS} -faligned-new")
endif()

if(LINUX)
add_subdirectory(rpc)
endif()
add_subdirectory(common)
add_subdirectory(ps)
add_subdirectory(test)
Expand Down
8 changes: 8 additions & 0 deletions paddle/fluid/distributed/collective/reducer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,10 @@ static void ConcatTensorsWithType(
ConcatTensorsForAllReduce<DeviceContext, double>()(
context, dense_tensors_, p_dense_contents);
break;
case phi::DataType::BFLOAT16:
ConcatTensorsForAllReduce<DeviceContext, platform::bfloat16>()(
context, dense_tensors_, p_dense_contents);
break;
default:
PADDLE_THROW(platform::errors::Unimplemented(
"Data type (%s) is not supported when it concats tensors for "
Expand Down Expand Up @@ -281,6 +285,10 @@ static void SplitTensorsWithType(const DeviceContext &context,
SplitTensorsForAllReduce<DeviceContext, double>()(
context, p_dense_contents, p_dense_tensors);
break;
case phi::DataType::BFLOAT16:
SplitTensorsForAllReduce<DeviceContext, platform::bfloat16>()(
context, p_dense_contents, p_dense_tensors);
break;
default:
PADDLE_THROW(platform::errors::Unimplemented(
"Data type (%s) is not supported when it splits tensors for "
Expand Down
Loading

0 comments on commit dd59e21

Please sign in to comment.