Skip to content

Commit

Permalink
Merge branch 'develop' into add_multigammaln_api
Browse files Browse the repository at this point in the history
  • Loading branch information
GreatV committed Nov 1, 2023
2 parents 4b17276 + 347a565 commit 7d50471
Show file tree
Hide file tree
Showing 1,242 changed files with 68,621 additions and 13,793 deletions.
3 changes: 3 additions & 0 deletions .flake8
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,9 @@ per-file-ignores =
# These files need tabs for testing.
test/dygraph_to_static/test_error.py:E101,W191

# Ignore compare with True in sot unittest
test/sot/test_dup_top.py:E712

# temp ignore base directory
python/paddle/base/*:
E712,
Expand Down
2 changes: 1 addition & 1 deletion cmake/cinn/external/absl.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ set(ABSL_INSTALL_DIR ${THIRD_PARTY_PATH}/install/absl)
set(ABSL_PREFIX_DIR ${THIRD_PARTY_PATH}/absl)
set(ABSL_CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})

set(ABSL_REPOSITORY "https://github.com/abseil/abseil-cpp.git")
set(ABSL_REPOSITORY "${GIT_URL}/abseil/abseil-cpp.git")
set(ABSL_TAG "20210324.2")

set(OPTIONAL_ARGS
Expand Down
2 changes: 1 addition & 1 deletion cmake/cinn/external/jitify.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ include(ExternalProject)

# clone jitify to Paddle/third_party
set(JITIFY_SOURCE_DIR ${PADDLE_SOURCE_DIR}/third_party/jitify)
set(JITIFY_URL https://github.com/NVIDIA/jitify.git)
set(JITIFY_URL ${GIT_URL}/NVIDIA/jitify.git)
set(JITIFY_TAG 57de649139c866eb83acacfe50c92ad7c6278776)

ExternalProject_Add(
Expand Down
2 changes: 1 addition & 1 deletion cmake/external/brpc.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ include_directories(${BRPC_INCLUDE_DIR})

# clone brpc to Paddle/third_party
set(BRPC_SOURCE_DIR ${PADDLE_SOURCE_DIR}/third_party/brpc)
set(BRPC_URL https://github.com/apache/brpc.git)
set(BRPC_URL ${GIT_URL}/apache/brpc.git)
set(BRPC_TAG 1.4.0)

# Reference https://stackoverflow.com/questions/45414507/pass-a-list-of-prefix-paths-to-externalproject-add-in-cmake-args
Expand Down
2 changes: 1 addition & 1 deletion cmake/external/cudnn-frontend.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ if((NOT DEFINED CUDNN_FRONTEND_NAME) OR (NOT DEFINED CUDNN_FRONTEND_URL))
"cudnn-frontend"
CACHE STRING "" FORCE)
set(CUDNN_FRONTEND_URL
"https://github.com/NVIDIA/cudnn-frontend/archive/refs/tags/${CUDNN_FRONTEND_VER}.tar.gz"
"${GIT_URL}/NVIDIA/cudnn-frontend/archive/refs/tags/${CUDNN_FRONTEND_VER}.tar.gz"
CACHE STRING "" FORCE)
endif()
set(CUDNN_FRONTEND_CACHE_FILENAME "${CUDNN_FRONTEND_VER}.tar.gz")
Expand Down
2 changes: 1 addition & 1 deletion cmake/external/dirent.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ if((NOT DEFINED DIRENT_NAME) OR (NOT DEFINED DIRENT_URL))
"dirent"
CACHE STRING "" FORCE)
set(DIRENT_URL
"https://github.com/tronkko/dirent/archive/refs/tags/1.23.2.tar.gz"
"${GIT_URL}/tronkko/dirent/archive/refs/tags/1.23.2.tar.gz"
CACHE STRING "" FORCE)
set(DIRENT_CACHE_FILENAME "1.23.2.tar.gz")
endif()
Expand Down
4 changes: 1 addition & 3 deletions cmake/external/jemalloc.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,7 @@ set(JEMALLOC_DOWNLOAD_DIR
set(JEMALLOC_PROJECT "extern_jemalloc")
set(JEMALLOC_BUILD ${THIRD_PARTY_PATH}/jemalloc/src/extern_jemalloc)
set(JEMALLOC_PREFIX_DIR ${THIRD_PARTY_PATH}/jemalloc)
set(JEMALLOC_URL
https://github.com/jemalloc/jemalloc/releases/download/5.1.0/jemalloc-5.1.0.tar.bz2
)
set(JEMALLOC_URL https://paddle-ci.gz.bcebos.com/jemalloc-5.1.0.tar.bz2)
set(JEMALLOC_INSTALL ${THIRD_PARTY_PATH}/install/jemalloc)
set(JEMALLOC_INCLUDE_DIR ${JEMALLOC_INSTALL}/include)

Expand Down
5 changes: 2 additions & 3 deletions cmake/external/libxsmm.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,8 @@ set(LIBXSMMNOBLAS_LIB "${LIBXSMM_LIBRARY_DIR}/libxsmmnoblas.a")
file(GLOB LIBXSMM_SOURCE_FILE_LIST ${LIBXSMM_SOURCE_DIR})
list(LENGTH LIBXSMM_SOURCE_FILE_LIST RES_LEN)
if(RES_LEN EQUAL 0)
execute_process(
COMMAND ${GIT_EXECUTABLE} clone -b ${LIBXSMM_TAG}
"https://github.com/hfp/libxsmm.git" ${LIBXSMM_SOURCE_DIR})
execute_process(COMMAND ${GIT_EXECUTABLE} clone -b ${LIBXSMM_TAG}
"${GIT_URL}/hfp/libxsmm.git" ${LIBXSMM_SOURCE_DIR})
else()
# check git tag
execute_process(
Expand Down
6 changes: 3 additions & 3 deletions cmake/external/onnxruntime.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -44,19 +44,19 @@ set(ONNXRUNTIME_DOWNLOAD_DIR

if(WIN32)
set(ONNXRUNTIME_URL
"https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-win-x64-${ONNXRUNTIME_VERSION}.zip"
"${GIT_URL}/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-win-x64-${ONNXRUNTIME_VERSION}.zip"
)
set(ONNXRUNTIME_URL_MD5 f21d6bd1feef15935a5f4e1007797593)
set(ONNXRUNTIME_CACHE_EXTENSION "zip")
elseif(APPLE)
set(ONNXRUNTIME_URL
"https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-osx-x86_64-${ONNXRUNTIME_VERSION}.tgz"
"${GIT_URL}/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-osx-x86_64-${ONNXRUNTIME_VERSION}.tgz"
)
set(ONNXRUNTIME_URL_MD5 6a6f6b7df97587da59976042f475d3f4)
set(ONNXRUNTIME_CACHE_EXTENSION "tgz")
else()
set(ONNXRUNTIME_URL
"https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz"
"${GIT_URL}/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz"
)
set(ONNXRUNTIME_URL_MD5 ce3f2376854b3da4b483d6989666995a)
set(ONNXRUNTIME_CACHE_EXTENSION "tgz")
Expand Down
19 changes: 11 additions & 8 deletions cmake/external/openblas.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,16 @@ set(CBLAS_INSTALL_DIR ${THIRD_PARTY_PATH}/install/openblas)
set(CBLAS_SOURCE_DIR ${PADDLE_SOURCE_DIR}/third_party/openblas)
set(CBLAS_TAG v0.3.7)

# OpenBLAS support Raptor Lake from v0.3.22
if(UNIX
AND NOT APPLE
AND NOT WITH_ROCM
# Why use v0.3.18? The IDG business line encountered a random openblas error,
# which can be resolved after upgrading openblas.
# And why compile when gcc>8.2? Please refer to
# https://github.com/spack/spack/issues/19932#issuecomment-733452619
# v0.3.18 only support gcc>=8.3 or gcc>=7.4
if((CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 8.2
AND NOT WITH_XPU)
set(CBLAS_TAG v0.3.23)
# We only compile with openblas 0.3.18 when gcc >= 8.3
set(CBLAS_TAG v0.3.18)
endif()

if(APPLE AND WITH_ARM)
Expand All @@ -42,9 +46,8 @@ endif()
file(GLOB CBLAS_SOURCE_FILE_LIST ${CBLAS_SOURCE_DIR})
list(LENGTH CBLAS_SOURCE_FILE_LIST RES_LEN)
if(RES_LEN EQUAL 0)
execute_process(
COMMAND ${GIT_EXECUTABLE} clone -b ${CBLAS_TAG}
"https://github.com/xianyi/OpenBLAS.git" ${CBLAS_SOURCE_DIR})
execute_process(COMMAND ${GIT_EXECUTABLE} clone -b ${CBLAS_TAG}
"${GIT_URL}/xianyi/OpenBLAS.git" ${CBLAS_SOURCE_DIR})
else()
# check git tag
execute_process(
Expand Down
6 changes: 3 additions & 3 deletions cmake/external/paddle2onnx.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -71,19 +71,19 @@ endif()

if(WIN32)
set(PADDLE2ONNX_URL
"https://github.com/PaddlePaddle/Paddle2ONNX/releases/download/v${PADDLE2ONNX_VERSION}/paddle2onnx-win-x64-${PADDLE2ONNX_VERSION}.zip"
"${GIT_URL}/PaddlePaddle/Paddle2ONNX/releases/download/v${PADDLE2ONNX_VERSION}/paddle2onnx-win-x64-${PADDLE2ONNX_VERSION}.zip"
)
set(PADDLE2ONNX_URL_MD5 "122b864cb57338191a7e9ef5f607c4ba")
set(PADDLE2ONNX_CACHE_EXTENSION "zip")
elseif(APPLE)
set(PADDLE2ONNX_URL
"https://github.com/PaddlePaddle/Paddle2ONNX/releases/download/v${PADDLE2ONNX_VERSION}/paddle2onnx-osx-x86_64-${PADDLE2ONNX_VERSION}.tgz"
"${GIT_URL}/PaddlePaddle/Paddle2ONNX/releases/download/v${PADDLE2ONNX_VERSION}/paddle2onnx-osx-x86_64-${PADDLE2ONNX_VERSION}.tgz"
)
set(PADDLE2ONNX_URL_MD5 "32a4381ff8441b69d58ef0fd6fd919eb")
set(PADDLE2ONNX_CACHE_EXTENSION "tgz")
else()
set(PADDLE2ONNX_URL
"https://github.com/PaddlePaddle/Paddle2ONNX/releases/download/v${PADDLE2ONNX_VERSION}/paddle2onnx-linux-x64-${PADDLE2ONNX_VERSION}.tgz"
"${GIT_URL}/PaddlePaddle/Paddle2ONNX/releases/download/v${PADDLE2ONNX_VERSION}/paddle2onnx-linux-x64-${PADDLE2ONNX_VERSION}.tgz"
)
set(PADDLE2ONNX_URL_MD5 "3fbb074987ba241327797f76514e937f")
set(PADDLE2ONNX_CACHE_EXTENSION "tgz")
Expand Down
2 changes: 1 addition & 1 deletion cmake/external/protobuf.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ function(build_protobuf TARGET_NAME BUILD_FOR_HOST)
set(PROTOBUF_TAG 01a05a53f40ca2ac5f0af10c6cc0810bee39b792)
else()
if(WITH_PSLIB)
set(PROTOBUF_REPOSITORY "https://github.com/google/protobuf.git")
set(PROTOBUF_REPOSITORY "${GIT_URL}/google/protobuf.git")
set(PROTOBUF_TAG "9f75c5aa851cd877fb0d93ccc31b8567a6706546")
else()
set(PROTOBUF_REPOSITORY ${GIT_URL}/protocolbuffers/protobuf.git)
Expand Down
2 changes: 1 addition & 1 deletion cmake/external/xpu.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ set(XPU_XFT_LIB_NAME "libxft.so")
set(XPU_XPTI_LIB_NAME "libxpti.so")

if(NOT DEFINED XPU_BASE_DATE)
set(XPU_BASE_DATE "20230926")
set(XPU_BASE_DATE "20231023")
endif()
set(XPU_XCCL_BASE_VERSION "1.0.53.6")
if(NOT DEFINED XPU_XFT_BASE_VERSION)
Expand Down
16 changes: 10 additions & 6 deletions cmake/generic.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -499,12 +499,15 @@ function(cc_test_run TARGET_NAME)
NAME ${TARGET_NAME}
COMMAND ${cc_test_COMMAND} ${cc_test_ARGS}
WORKING_DIRECTORY ${cc_test_DIR})
set_property(TEST ${TARGET_NAME} PROPERTY ENVIRONMENT
FLAGS_cpu_deterministic=true)
set_property(TEST ${TARGET_NAME} PROPERTY ENVIRONMENT
FLAGS_init_allocated_mem=true)
set_property(TEST ${TARGET_NAME} PROPERTY ENVIRONMENT
FLAGS_cudnn_deterministic=true)
set_property(
TEST ${TARGET_NAME}
PROPERTY
ENVIRONMENT
FLAGS_cpu_deterministic=true
FLAGS_init_allocated_mem=true
FLAGS_cudnn_deterministic=true
LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${PADDLE_BINARY_DIR}/python/paddle/libs:${PADDLE_BINARY_DIR}/python/paddle/base
)
# No unit test should exceed 2 minutes.
if(WIN32)
set_tests_properties(${TARGET_NAME} PROPERTIES TIMEOUT 150)
Expand Down Expand Up @@ -726,6 +729,7 @@ function(nv_test TARGET_NAME)
# 2. cuda_add_executable does not support ccache.
# Reference: https://cmake.org/cmake/help/v3.10/module/FindCUDA.html
add_executable(${TARGET_NAME} ${nv_test_SRCS})
target_compile_definitions(${TARGET_NAME} PUBLIC STATIC_PADDLE)
get_property(os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES)
target_link_libraries(${TARGET_NAME} ${nv_test_DEPS}
${os_dependency_modules} paddle_gtest_main phi)
Expand Down
5 changes: 5 additions & 0 deletions cmake/hip.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,11 @@ list(APPEND HIP_CXX_FLAGS -Wno-unused-value)
list(APPEND HIP_CXX_FLAGS -Wno-braced-scalar-init)
list(APPEND HIP_CXX_FLAGS -Wno-return-type)
list(APPEND HIP_CXX_FLAGS -Wno-pragma-once-outside-header)
list(APPEND HIP_CXX_FLAGS -Wno-deprecated-builtins)
list(APPEND HIP_CXX_FLAGS -Wno-switch)
list(APPEND HIP_CXX_FLAGS -Wno-literal-conversion)
list(APPEND HIP_CXX_FLAGS -Wno-constant-conversion)
list(APPEND HIP_CXX_FLAGS -Wno-defaulted-function-deleted)

if(WITH_CINN)
list(APPEND HIP_CXX_FLAGS -std=c++14)
Expand Down
3 changes: 3 additions & 0 deletions cmake/operators.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -684,6 +684,9 @@ function(prune_pybind_h)
list(APPEND op_list "load_combine")
list(APPEND op_list "tensorrt_engine")

# TODO(ming1753): conditional_block_infer is temporarily reserved here to avoid link errors in functions of standalone_executor
list(APPEND op_list "conditional_block_infer")

# add fused_op in op_list
list(APPEND op_list "fc")
list(APPEND op_list "conv2d_fusion")
Expand Down
121 changes: 121 additions & 0 deletions paddle/cinn/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
```
___ ___ ___
/\__\ /\ \ /\ \
/:/ / ___ \:\ \ \:\ \
/:/ / /\__\ \:\ \ \:\ \
/:/ / ___ /:/__/ _____\:\ \ _____\:\ \
/:/__/ /\__\/::\ \ /::::::::\__\/::::::::\__\
\:\ \ /:/ /\/\:\ \__\:\~~\~~\/__/\:\~~\~~\/__/
\:\ /:/ / \:\/\__\\:\ \ \:\ \
\:\/:/ / \::/ / \:\ \ \:\ \
\::/ / /:/ / \:\__\ \:\__\
\/__/ \/__/ \/__/ \/__/
```


# CINN : Compiler Infrastructure for Neural Networks

The project CINN is a machine learning compiler and executor for multiple hardware backends.
It is designed to provide multiple layers of APIs to make tensor computation easier to define, faster to execute, and more convenient to extend with hardware backends.
Currently, it targets x86 CPUs and Nvidia GPUs.

This project is under active development.

## How it works

The CINN lowers a traditional DNN model into a two-level intermediate representation(IR), the high-level IR(HLIR) and CINN IR.

The HLIR helps to define some domain-specific computation and perform some overall optimization on the IR-graph;
the CINN IR helps to represent some computation semantic and finally lower to a hardware backend.

Both levels of IR have the similar SSA graph, analysis and optimization facilities.
The schedule transform is applied on the CINN IR to do optimizations.

For more details, you can refer to:
https://github.com/PaddlePaddle/docs/tree/develop/docs/guides/cinn

## Getting Started

### Compile

Clone PaddlePaddle first.

```
git clone https://github.com/PaddlePaddle/Paddle.git
cd Paddle
mkdir build
cd build
```

Build paddle with cinn:

```
cmake .. -DCINN_ONLY=OFF -DWITH_CINN=ON -DWITH_GPU=ON
```

Build cinn only:

```
cmake .. -DCINN_ONLY=ON -DWITH_CINN=ON -DWITH_GPU=ON
```

And then

```
make -j
```

### Install

Install paddle with cinn:

```
pip install python/dist/paddlepaddle_gpu-xxx.whl
```

Install cinn only:

```
pip install python/dist/cinn_gpu-xxx.whl
```

Then you can import paddle in the python environment and check if a paddle version with CINN is installed.

```
import paddle
paddle.is_compiled_with_cinn()
```

### Concepts

There are two levels of APIs in CINN, the higher level is HLIR and the lower level is CINN IR, both contain some concepts.

In HLIR

- `frontend::Program`, the program helps to define a machine learning computation,
- `hlir::framework::Tensor`, multi-dimensional arrays helps to manage a memory buffer.
- `hlir::framework::Program`, the final executable program in runtime. It holds many basic executable elements.
- `hlir::framework::Graph`, the graph that represents the structure of a model. Each node in the graph represents an operator (conv2d, relu, mul, etc.).
- `hlir::framework::GraphCompiler`, the compiler that transforms the graph representation(hlir::framework::Graph) of a model into an executable program(hlir::framework::Program).

In CINN IR

- `Compute`, the method to define a computation,
- `Lower`, the method to lower a computation to the corresponding IR,
- `LoweredFunc`, the function defined in CINN IR,
- `Var`, a scalar variable,
- `Expr`, an expression represents any CINN IR node(no specified Statement node),

## License

CINN is licensed under the [Apache 2.0 license](LICENSE).

## Acknowledgement

CINN learned a lot from the following projects:

- [Halide](https://github.com/halide/Halide): Referenced the design of most IR nodes,
- [TVM](https://github.com/apache/tvm): We learned many ideas including the semantics of some schedule primitives, TOPI, NNVM, and so on,
- [tiramisu](https://github.com/Tiramisu-Compiler): The isl usage, polyhedral compilation, schedule primitive implementation, and so on,
- [tensorflow/xla](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/compiler/xla): Referenced the semantics of the primitive operations.
19 changes: 10 additions & 9 deletions paddle/cinn/auto_schedule/analysis/analyze_ir_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include <sstream>
#include <vector>

#include "paddle/cinn/ast_gen_ius/tensor_group.h"
#include "paddle/cinn/common/context.h"
#include "paddle/cinn/ir/ir.h"
#include "paddle/cinn/ir/ir_base.h"
Expand Down Expand Up @@ -49,9 +50,9 @@ TEST(AnalyzeIr, AnalyzeScheduleBlockReadWriteBuffer_SimpleAssign) {
ir::Tensor B = lang::Compute(
{M, N}, [&](Var i, Var j) { return A(i, j); }, "B");

poly::StageMap stages = poly::CreateStages({A, B});
std::vector<ir::LoweredFunc> funcs = lang::LowerVec(
"SimpleAssign", stages, {A, B}, {}, {}, nullptr, target, true);
ast_gen_ius::TensorGroup tensor_group({A, B});
std::vector<ir::LoweredFunc> funcs =
lang::LowerToAstVec("SimpleAssign", {A, B}, &tensor_group, target);

ASSERT_FALSE(funcs.empty());
ir::Expr ast_expr = funcs[0]->body;
Expand Down Expand Up @@ -115,9 +116,9 @@ TEST(AnalyzeIr, AnalyzeScheduleBlockReadWriteBuffer_AddDiffShape) {
ir::Tensor C = lang::Compute(
{M, N}, [&](Var i, Var j) { return A(i) + B(j); }, "C");

poly::StageMap stages = poly::CreateStages({C});
std::vector<ir::LoweredFunc> funcs = lang::LowerVec(
"AddDiffShape", stages, {C}, {}, {}, nullptr, target, true);
ast_gen_ius::TensorGroup tensor_group({C});
std::vector<ir::LoweredFunc> funcs =
lang::LowerToAstVec("AddDiffShape", {C}, &tensor_group, target);

ir::Expr ast_expr = funcs[0]->body;
VLOG(6) << "Expr before MultiLevelTiling: ";
Expand Down Expand Up @@ -169,9 +170,9 @@ TEST(AnalyzeIr, ContainsNodeType) {
ir::Tensor B = lang::Compute(
{M, N}, [&](Var i, Var j) { return A(i, j); }, "B");

poly::StageMap stages = poly::CreateStages({A, B});
std::vector<ir::LoweredFunc> funcs = lang::LowerVec(
"SimpleAssign", stages, {A, B}, {}, {}, nullptr, target, true);
ast_gen_ius::TensorGroup tensor_group({A, B});
std::vector<ir::LoweredFunc> funcs =
lang::LowerToAstVec("SimpleAssign", {A, B}, &tensor_group, target);

ASSERT_FALSE(funcs.empty());
ir::Expr ast_expr = funcs[0]->body;
Expand Down
Loading

0 comments on commit 7d50471

Please sign in to comment.