From 0ba1b3e10bcf0c3a59b592da79b9d848b59ae745 Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Mon, 6 Apr 2020 02:50:18 +0000 Subject: [PATCH 01/20] cpp standard --- 3rdparty/dmlc-core | 2 +- 3rdparty/mshadow/guide/Makefile | 2 +- 3rdparty/mshadow/guide/mshadow-ps/Makefile | 2 +- 3rdparty/mshadow/make/mshadow.mk | 4 +- 3rdparty/mshadow/test/Makefile | 2 +- CMakeLists.txt | 42 ++------ Makefile | 22 +++-- amalgamation/Makefile | 7 +- amalgamation/amalgamation.py | 2 +- ci/docker/Dockerfile.build.armv6 | 3 +- ci/docker/Dockerfile.build.armv7 | 3 +- ci/docker/Dockerfile.build.armv8 | 7 +- ci/docker/Dockerfile.build.jetson | 5 - ci/docker/Dockerfile.build.ubuntu_cpu_r | 3 + ci/docker/Dockerfile.build.ubuntu_cpu_scala | 3 + ci/docker/Dockerfile.build.ubuntu_gpu_cu100 | 3 + ci/docker/Dockerfile.build.ubuntu_gpu_cu101 | 3 + ci/docker/Dockerfile.build.ubuntu_gpu_cu102 | 3 + ci/docker/Dockerfile.build.ubuntu_gpu_cu90 | 3 + ci/docker/Dockerfile.build.ubuntu_gpu_cu92 | 3 + ci/docker/install/android_arm64_openblas.sh | 2 +- ci/docker/install/android_armv7_openblas.sh | 2 +- ci/docker/install/centos7_base.sh | 4 + ci/docker/install/centos7_core.sh | 4 + ci/docker/install/ubuntu_gcc8.sh | 2 +- ci/docker/runtime_functions.sh | 99 ++++++++++++++----- cpp-package/example/Makefile | 2 +- cpp-package/example/example.mk | 4 +- cpp-package/example/feature_extract/Makefile | 4 +- cpp-package/example/inference/Makefile | 2 +- cpp-package/example/inference/inference.mk | 4 +- .../image-classification/predict-cpp/Makefile | 6 +- example/multi_threaded_inference/Makefile | 2 +- example/rnn/large_word_lm/setup.py | 2 +- python/setup.py | 4 +- src/operator/fusion/fused_op.cu | 2 +- tests/cpp/unittest.mk | 24 ++--- 37 files changed, 170 insertions(+), 123 deletions(-) diff --git a/3rdparty/dmlc-core b/3rdparty/dmlc-core index 14bf7e884328..5df8305fe699 160000 --- a/3rdparty/dmlc-core +++ b/3rdparty/dmlc-core @@ -1 +1 @@ -Subproject commit 14bf7e884328eb97bfde160ec6f64c20f5337459 +Subproject commit 5df8305fe699d3b503d10c60a231ab0223142407 diff --git a/3rdparty/mshadow/guide/Makefile b/3rdparty/mshadow/guide/Makefile index bad7a8e94b1d..c8b828c3834b 100644 --- a/3rdparty/mshadow/guide/Makefile +++ b/3rdparty/mshadow/guide/Makefile @@ -4,7 +4,7 @@ export CXX = g++ export NVCC =nvcc include config.mk include ../make/mshadow.mk -export CFLAGS = -Wall -O3 -std=c++11 -I../ $(MSHADOW_CFLAGS) +export CFLAGS = -Wall -O3 -std=c++17 -I../ $(MSHADOW_CFLAGS) export LDFLAGS= -lm $(MSHADOW_LDFLAGS) export NVCCFLAGS = -O3 --use_fast_math -ccbin $(CXX) $(MSHADOW_NVCCFLAGS) diff --git a/3rdparty/mshadow/guide/mshadow-ps/Makefile b/3rdparty/mshadow/guide/mshadow-ps/Makefile index 58d64a304009..e16f0a059ad8 100644 --- a/3rdparty/mshadow/guide/mshadow-ps/Makefile +++ b/3rdparty/mshadow/guide/mshadow-ps/Makefile @@ -4,7 +4,7 @@ export CXX = g++ export NVCC =nvcc include config.mk include ../../make/mshadow.mk -export CFLAGS = -Wall -O3 -std=c++11 -fopenmp -I../../ $(MSHADOW_CFLAGS) +export CFLAGS = -Wall -O3 -std=c++17 -fopenmp -I../../ $(MSHADOW_CFLAGS) export LDFLAGS= -lm $(MSHADOW_LDFLAGS) export NVCCFLAGS = -O3 --use_fast_math -ccbin $(CXX) $(MSHADOW_NVCCFLAGS) diff --git a/3rdparty/mshadow/make/mshadow.mk b/3rdparty/mshadow/make/mshadow.mk index 86155eaaadcf..cce3db5fe093 100644 --- a/3rdparty/mshadow/make/mshadow.mk +++ b/3rdparty/mshadow/make/mshadow.mk @@ -149,13 +149,13 @@ else endif ifeq ($(USE_DIST_PS),1) -MSHADOW_CFLAGS += -DMSHADOW_DIST_PS=1 -std=c++11 \ +MSHADOW_CFLAGS += -DMSHADOW_DIST_PS=1 -std=c++17 \ -I$(PS_PATH)/src -I$(PS_THIRD_PATH)/include PS_LIB = $(addprefix $(PS_PATH)/build/, libps.a libps_main.a) \ $(addprefix $(PS_THIRD_PATH)/lib/, libgflags.a libzmq.a libprotobuf.a \ libglog.a libz.a libsnappy.a) # -L$(PS_THIRD_PATH)/lib -lgflags -lzmq -lprotobuf -lglog -lz -lsnappy -MSHADOW_NVCCFLAGS += --std=c++11 +MSHADOW_NVCCFLAGS += --std=c++14 else MSHADOW_CFLAGS+= -DMSHADOW_DIST_PS=0 endif diff --git a/3rdparty/mshadow/test/Makefile b/3rdparty/mshadow/test/Makefile index dc2d0552deb4..ec11128e949f 100644 --- a/3rdparty/mshadow/test/Makefile +++ b/3rdparty/mshadow/test/Makefile @@ -20,7 +20,7 @@ test: test.cu test_tblob: test_tblob.cc $(BIN) : - $(CXX) $(CFLAGS) -std=c++0x -o $@ $(filter %.cpp %.o %.c %.cc, $^) $(LDFLAGS) + $(CXX) $(CFLAGS) -std=c++17 -o $@ $(filter %.cpp %.o %.c %.cc, $^) $(LDFLAGS) $(OBJ) : $(CXX) -c $(CFLAGS) -o $@ $(firstword $(filter %.cpp %.c, $^) ) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9dce131473b6..4cf24bbcae53 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -7,6 +7,9 @@ if(CMAKE_CROSSCOMPILING) endif() project(mxnet C CXX) +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD_REQUIRED ON) +set(CMAKE_CXX_EXTENSIONS ON) # GNU extensions used by src/operator/random/shuffle_op.cc if(CMAKE_PROJECT_NAME STREQUAL PROJECT_NAME AND EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/config.cmake) # Load config.cmake only if mxnet is not compiled as a dependency of another project @@ -59,7 +62,6 @@ option(USE_PLUGIN_CAFFE "Use Caffe Plugin" OFF) option(USE_CPP_PACKAGE "Build C++ Package" OFF) option(USE_MXNET_LIB_NAMING "Use MXNet library naming conventions." ON) option(USE_GPROF "Compile with gprof (profiling) flag" OFF) -option(USE_CXX14_IF_AVAILABLE "Build with C++14 if the compiler supports it" OFF) option(USE_VTUNE "Enable use of Intel Amplifier XE (VTune)" OFF) # one could set VTUNE_ROOT for search path option(USE_TVM_OP "Enable use of TVM operator build system." OFF) option(ENABLE_CUDA_RTC "Build with CUDA runtime compilation support" ON) @@ -98,14 +100,7 @@ if(USE_CUDA) "Please fix your cuda installation: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#mandatory-post") endif() enable_language(CUDA) - set(CMAKE_CUDA_STANDARD 11) - include(CheckCXXCompilerFlag) - if(USE_CXX14_IF_AVAILABLE) - check_cxx_compiler_flag("-std=c++14" SUPPORT_CXX14) - if (SUPPORT_CXX14) - set(CMAKE_CUDA_STANDARD 14) - endif() - endif() + set(CMAKE_CUDA_STANDARD 14) set(CMAKE_CUDA_STANDARD_REQUIRED ON) endif() @@ -153,24 +148,21 @@ add_definitions(-DDMLC_MODERN_THREAD_LOCAL=0) # disable stack trace in exception by default. add_definitions(-DDMLC_LOG_STACK_TRACE_SIZE=0) +add_definitions(-DDMLC_USE_CXX11) +add_definitions(-DDMLC_STRICT_CXX11) +add_definitions(-DDMLC_USE_CXX14) +add_definitions(-DMSHADOW_IN_CXX11) if(MSVC) add_definitions(-DWIN32_LEAN_AND_MEAN) - add_definitions(-DDMLC_USE_CXX11) add_definitions(-D_SCL_SECURE_NO_WARNINGS) add_definitions(-D_CRT_SECURE_NO_WARNINGS) add_definitions(-DMXNET_EXPORTS) add_definitions(-DNNVM_EXPORTS) - add_definitions(-DDMLC_STRICT_CXX11) add_definitions(-DNOMINMAX) set(CMAKE_C_FLAGS "/MP") set(CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} /bigobj") else() include(CheckCXXCompilerFlag) - if(USE_CXX14_IF_AVAILABLE) - check_cxx_compiler_flag("-std=c++14" SUPPORT_CXX14) - endif() - check_cxx_compiler_flag("-std=c++11" SUPPORT_CXX11) - check_cxx_compiler_flag("-std=c++0x" SUPPORT_CXX0X) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Wno-sign-compare") if(CMAKE_BUILD_TYPE STREQUAL "Debug") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O0 -g") @@ -184,21 +176,7 @@ else() set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O3") endif() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CMAKE_C_FLAGS}") - if(SUPPORT_CXX14) - add_definitions(-DDMLC_USE_CXX11=1) - add_definitions(-DDMLC_USE_CXX14=1) - add_definitions(-DMSHADOW_IN_CXX11) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14") - elseif(SUPPORT_CXX11) - add_definitions(-DDMLC_USE_CXX11=1) - add_definitions(-DMSHADOW_IN_CXX11) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") - elseif(SUPPORT_CXX0X) - add_definitions(-DDMLC_USE_CXX11=1) - add_definitions(-DMSHADOW_IN_CXX11) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x") - endif() -endif(MSVC) +endif() if(NOT mxnet_LINKER_LIBS) set(mxnet_LINKER_LIBS "") @@ -530,8 +508,6 @@ if(USE_PLUGIN_CAFFE) endif() if(NOT DEFINED CAFFE_PATH) if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/caffe) - # Need newer FindCUDA.cmake that correctly handles -std=c++11 - cmake_minimum_required(VERSION 3.3) set(CAFFE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/caffe) else() set(CAFFE_PATH $ENV{CAFFE_PATH}) diff --git a/Makefile b/Makefile index e5d6bb288134..b8634140b4be 100644 --- a/Makefile +++ b/Makefile @@ -92,6 +92,8 @@ include $(DMLC_CORE)/make/dmlc.mk # all tge possible warning tread WARNFLAGS= -Wall -Wsign-compare CFLAGS = -DMSHADOW_FORCE_STREAM $(WARNFLAGS) +# C++ standard +CFLAGS+= -DDMLC_USE_CXX11=1 -DDMLC_USE_CXX11=1 -DDMLC_USE_CXX14=1 # use old thread local implementation in DMLC-CORE CFLAGS += -DDMLC_MODERN_THREAD_LOCAL=0 # disable stack trace in exception by default. @@ -131,9 +133,9 @@ endif # -L/usr/local/lib ifeq ($(DEBUG), 1) - NVCCFLAGS += -std=c++11 -Xcompiler -D_FORCE_INLINES -g -G -O0 -ccbin $(CXX) $(MSHADOW_NVCCFLAGS) + NVCCFLAGS += -std=c++14 -Xcompiler -D_FORCE_INLINES -g -G -O0 -ccbin $(CXX) $(MSHADOW_NVCCFLAGS) else - NVCCFLAGS += -std=c++11 -Xcompiler -D_FORCE_INLINES -O3 -ccbin $(CXX) $(MSHADOW_NVCCFLAGS) + NVCCFLAGS += -std=c++14 -Xcompiler -D_FORCE_INLINES -O3 -ccbin $(CXX) $(MSHADOW_NVCCFLAGS) endif # CFLAGS for segfault logger @@ -567,7 +569,7 @@ ALLX_DEP= $(ALL_DEP) build/src/%.o: src/%.cc | mkldnn @mkdir -p $(@D) - $(CXX) -std=c++11 -c $(CFLAGS) -MMD -c $< -o $@ + $(CXX) -std=c++17 -c $(CFLAGS) -MMD -c $< -o $@ build/src/%_gpu.o: src/%.cu | mkldnn @mkdir -p $(@D) @@ -578,12 +580,12 @@ build/src/%_gpu.o: src/%.cu | mkldnn # Use CXX to generate dependency instead. build/plugin/%_gpu.o: plugin/%.cu @mkdir -p $(@D) - $(CXX) -std=c++11 $(CFLAGS) -MM -MT build/plugin/$*_gpu.o $< >build/plugin/$*_gpu.d + $(CXX) -std=c++17 $(CFLAGS) -MM -MT build/plugin/$*_gpu.o $< >build/plugin/$*_gpu.d $(NVCC) -c -o $@ $(NVCCFLAGS) $(CUDA_ARCH) -Xcompiler "$(CFLAGS)" $< build/plugin/%.o: plugin/%.cc | mkldnn @mkdir -p $(@D) - $(CXX) -std=c++11 -c $(CFLAGS) -MMD -c $< -o $@ + $(CXX) -std=c++17 -c $(CFLAGS) -MMD -c $< -o $@ %_gpu.o: %.cu @mkdir -p $(@D) @@ -592,7 +594,7 @@ build/plugin/%.o: plugin/%.cc | mkldnn %.o: %.cc $(CORE_INC) @mkdir -p $(@D) - $(CXX) -std=c++11 -c $(CFLAGS) -MMD -Isrc/operator -c $< -o $@ + $(CXX) -std=c++17 -c $(CFLAGS) -MMD -Isrc/operator -c $< -o $@ # Set install path for libmxnet.so on Mac OS ifeq ($(UNAME_S), Darwin) @@ -653,7 +655,7 @@ bin/im2rec: tools/im2rec.cc $(ALLX_DEP) $(BIN) : @mkdir -p $(@D) - $(CXX) $(CFLAGS) -std=c++11 -o $@ $(filter %.cpp %.o %.c %.a %.cc, $^) $(LDFLAGS) + $(CXX) $(CFLAGS) -std=c++17 -o $@ $(filter %.cpp %.o %.c %.a %.cc, $^) $(LDFLAGS) # CPP Package ifeq ($(USE_CPP_PACKAGE), 1) @@ -686,13 +688,13 @@ extension_libs: $(EXT_LIBS) build/libcustomop_lib.so: @mkdir -p $(@D) - $(CXX) -shared -fPIC -std=c++11 example/extensions/lib_custom_op/gemm_lib.cc -o $@ -I include/mxnet + $(CXX) -shared -fPIC -std=c++17 example/extensions/lib_custom_op/gemm_lib.cc -o $@ -I include/mxnet build/libcustomop_gpu_lib.so: @mkdir -p $(@D) - $(NVCC) -shared -std=c++11 -Xcompiler -fPIC example/extensions/lib_custom_op/relu_lib.cu -o $@ -I include/mxnet + $(NVCC) -shared -std=c++14 -Xcompiler -fPIC example/extensions/lib_custom_op/relu_lib.cu -o $@ -I include/mxnet build/libsubgraph_lib.so: @mkdir -p $(@D) - $(CXX) -shared -fPIC -std=c++11 example/extensions/lib_subgraph/subgraph_lib.cc -o $@ -I include/mxnet + $(CXX) -shared -fPIC -std=c++17 example/extensions/lib_subgraph/subgraph_lib.cc -o $@ -I include/mxnet # Cython build cython: diff --git a/amalgamation/Makefile b/amalgamation/Makefile index 701c1f155e47..55aad1d470a2 100644 --- a/amalgamation/Makefile +++ b/amalgamation/Makefile @@ -50,7 +50,8 @@ endif DEFS+=-DMSHADOW_USE_CUDA=0 -DMSHADOW_USE_MKL=0 -DMSHADOW_RABIT_PS=0 -DMSHADOW_DIST_PS=0 -DDMLC_LOG_STACK_TRACE=0 DEFS+=-DMSHADOW_FORCE_STREAM -DMXNET_USE_OPENCV=0 -DMXNET_PREDICT_ONLY=1 -CFLAGS=-std=c++11 -Wno-unknown-pragmas -Wall $(DEFS) +DEFS+=-DDMLC_USE_CXX11=1 -DDMLC_USE_CXX11=1 -DDMLC_USE_CXX14=1 +CFLAGS=-std=c++17 -Wno-unknown-pragmas -Wall $(DEFS) # if architecture of the CPU supports F16C instruction set, enable USE_F16C for fast fp16 computation on CPU ifeq ($(USE_F16C), 1) @@ -63,7 +64,7 @@ ifneq ($(MIN), 1) CFLAGS += -I${OPENBLAS_ROOT} -I${OPENBLAS_ROOT}/include LDFLAGS+= -L${OPENBLAS_ROOT} -L${OPENBLAS_ROOT}/lib - # Define which blas is installed. Uses OpenBLAS by default. +# Define which blas is installed. Uses OpenBLAS by default. ifeq ($(USE_BLAS), atlas) LDFLAGS += -lcblas else ifeq ($(USE_BLAS), blas) @@ -120,7 +121,7 @@ else endif libmxnet_predict.js: mxnet_predict-all.cc - ${EMCC} -std=c++11 -O2 $(DEFS) -DMSHADOW_USE_SSE=0 -D__MXNET_JS__ -o $@ $+ \ + ${EMCC} -std=c++17 -O2 $(DEFS) -DMSHADOW_USE_SSE=0 -D__MXNET_JS__ -o $@ $+ \ -s EXPORTED_FUNCTIONS="['_MXPredCreate', \ '_MXPredGetOutputShape', \ '_MXPredSetInput', \ diff --git a/amalgamation/amalgamation.py b/amalgamation/amalgamation.py index 5f825de77483..cb961c699fe8 100644 --- a/amalgamation/amalgamation.py +++ b/amalgamation/amalgamation.py @@ -30,7 +30,7 @@ 'opencv2/opencv.hpp', 'sys/stat.h', 'sys/types.h', 'cuda.h', 'cuda_fp16.h', 'omp.h', 'onnx/onnx.pb.h', 'execinfo.h', 'packet/sse-inl.h', 'emmintrin.h', 'thrust/device_vector.h', 'cusolverDn.h', 'internal/concurrentqueue_internal_debug.h', 'relacy/relacy_std.hpp', - 'relacy_shims.h', 'ittnotify.h', 'shared_mutex', 'nvToolsExt.h', 'dmlc/build_config.h', + 'relacy_shims.h', 'ittnotify.h', 'nvToolsExt.h', 'dmlc/build_config.h', 'sys/isa_defs.h' ] diff --git a/ci/docker/Dockerfile.build.armv6 b/ci/docker/Dockerfile.build.armv6 index e6a7ffe758b9..98414d8c2aff 100644 --- a/ci/docker/Dockerfile.build.armv6 +++ b/ci/docker/Dockerfile.build.armv6 @@ -21,7 +21,8 @@ FROM dockcross/linux-armv6 ENV ARCH armv6l -ENV HOSTCC gcc +ENV HOSTCC gcc-6 +ENV HOSTCXX g++-6 ENV TARGET ARMV6 WORKDIR /work/deps diff --git a/ci/docker/Dockerfile.build.armv7 b/ci/docker/Dockerfile.build.armv7 index bad9ab214050..f0cdd90322f0 100644 --- a/ci/docker/Dockerfile.build.armv7 +++ b/ci/docker/Dockerfile.build.armv7 @@ -21,7 +21,8 @@ FROM dockcross/linux-armv7 ENV ARCH armv7l -ENV HOSTCC gcc +ENV HOSTCC gcc-7 +ENV HOSTCXX g++-7 ENV TARGET ARMV7 WORKDIR /work/deps diff --git a/ci/docker/Dockerfile.build.armv8 b/ci/docker/Dockerfile.build.armv8 index bd2373180f0b..2f918e55d991 100644 --- a/ci/docker/Dockerfile.build.armv8 +++ b/ci/docker/Dockerfile.build.armv8 @@ -21,15 +21,12 @@ FROM dockcross/linux-arm64 ENV ARCH aarch64 -ENV HOSTCC gcc +ENV HOSTCC gcc-7 +ENV HOSTCXX g++-7 ENV TARGET ARMV8 WORKDIR /work/deps -# gh issue #11567 https://github.com/apache/incubator-mxnet/issues/11567 -#RUN sed -i '\#deb http://cdn-fastly.deb.debian.org/debian-security jessie/updates main#d' /etc/apt/sources.list -#RUN sed -i 's/cdn-fastly.//' /etc/apt/sources.list - COPY install/ubuntu_arm.sh /work/ RUN /work/ubuntu_arm.sh diff --git a/ci/docker/Dockerfile.build.jetson b/ci/docker/Dockerfile.build.jetson index e31ee43a93d8..55575d17970f 100644 --- a/ci/docker/Dockerfile.build.jetson +++ b/ci/docker/Dockerfile.build.jetson @@ -28,11 +28,6 @@ ENV ARCH aarch64 ENV HOSTCC gcc ENV TARGET ARMV8 -# gh issue #11567 https://github.com/apache/incubator-mxnet/issues/11567 -#RUN sed -i '\#deb http://cdn-fastly.deb.debian.org/debian-security jessie/updates main#d' /etc/apt/sources.list -#RUN sed -i 's/cdn-fastly.//' /etc/apt/sources.list - - WORKDIR /work/deps COPY install/ubuntu_arm.sh /work/ diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu_r b/ci/docker/Dockerfile.build.ubuntu_cpu_r index 264d34cd6422..2354cb3b66d6 100644 --- a/ci/docker/Dockerfile.build.ubuntu_cpu_r +++ b/ci/docker/Dockerfile.build.ubuntu_cpu_r @@ -28,6 +28,9 @@ RUN /work/ubuntu_core.sh COPY install/deb_ubuntu_ccache.sh /work/ RUN /work/deb_ubuntu_ccache.sh +COPY install/ubuntu_gcc8.sh /work/ +RUN /work/ubuntu_gcc8.sh + COPY install/ubuntu_r.sh /work/ COPY install/r.gpg /work/ RUN /work/ubuntu_r.sh diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu_scala b/ci/docker/Dockerfile.build.ubuntu_cpu_scala index 38874d290e1d..d0ce47784e27 100644 --- a/ci/docker/Dockerfile.build.ubuntu_cpu_scala +++ b/ci/docker/Dockerfile.build.ubuntu_cpu_scala @@ -28,6 +28,9 @@ RUN /work/ubuntu_core.sh COPY install/deb_ubuntu_ccache.sh /work/ RUN /work/deb_ubuntu_ccache.sh +COPY install/ubuntu_gcc8.sh /work/ +RUN /work/ubuntu_gcc8.sh + COPY install/ubuntu_python.sh /work/ COPY install/requirements /work/ RUN /work/ubuntu_python.sh diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu100 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu100 index e35c64eeca5d..3a2fb7e825aa 100644 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu100 +++ b/ci/docker/Dockerfile.build.ubuntu_gpu_cu100 @@ -46,6 +46,9 @@ RUN /work/ubuntu_perl.sh COPY install/ubuntu_clang.sh /work/ RUN /work/ubuntu_clang.sh +COPY install/ubuntu_gcc8.sh /work/ +RUN /work/ubuntu_gcc8.sh + COPY install/ubuntu_tvm.sh /work/ RUN /work/ubuntu_tvm.sh diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu101 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu101 index aa62fbc6307e..49dcd672917b 100644 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu101 +++ b/ci/docker/Dockerfile.build.ubuntu_gpu_cu101 @@ -46,6 +46,9 @@ RUN /work/ubuntu_perl.sh COPY install/ubuntu_clang.sh /work/ RUN /work/ubuntu_clang.sh +COPY install/ubuntu_gcc8.sh /work/ +RUN /work/ubuntu_gcc8.sh + COPY install/ubuntu_tvm.sh /work/ RUN /work/ubuntu_tvm.sh diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu102 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu102 index 8badadbb1bdb..9ab16514cf50 100644 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu102 +++ b/ci/docker/Dockerfile.build.ubuntu_gpu_cu102 @@ -46,6 +46,9 @@ RUN /work/ubuntu_perl.sh COPY install/ubuntu_clang.sh /work/ RUN /work/ubuntu_clang.sh +COPY install/ubuntu_gcc8.sh /work/ +RUN /work/ubuntu_gcc8.sh + COPY install/ubuntu_tvm.sh /work/ RUN /work/ubuntu_tvm.sh diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu90 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu90 index cc50e7e55191..c8fa9221508b 100644 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu90 +++ b/ci/docker/Dockerfile.build.ubuntu_gpu_cu90 @@ -46,6 +46,9 @@ RUN /work/ubuntu_perl.sh COPY install/ubuntu_clang.sh /work/ RUN /work/ubuntu_clang.sh +COPY install/ubuntu_gcc8.sh /work/ +RUN /work/ubuntu_gcc8.sh + COPY install/ubuntu_tvm.sh /work/ RUN /work/ubuntu_tvm.sh diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu92 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu92 index 40a4f44abeb5..4ba33006b28a 100644 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu92 +++ b/ci/docker/Dockerfile.build.ubuntu_gpu_cu92 @@ -46,6 +46,9 @@ RUN /work/ubuntu_perl.sh COPY install/ubuntu_clang.sh /work/ RUN /work/ubuntu_clang.sh +COPY install/ubuntu_gcc8.sh /work/ +RUN /work/ubuntu_gcc8.sh + COPY install/ubuntu_tvm.sh /work/ RUN /work/ubuntu_tvm.sh diff --git a/ci/docker/install/android_arm64_openblas.sh b/ci/docker/install/android_arm64_openblas.sh index 1c3014f6cca9..f9647d969bf2 100755 --- a/ci/docker/install/android_arm64_openblas.sh +++ b/ci/docker/install/android_arm64_openblas.sh @@ -24,7 +24,7 @@ set -ex pushd . git clone https://github.com/xianyi/OpenBLAS.git cd OpenBLAS -make -j$(nproc) TARGET=ARMV8 ARM_SOFTFP_ABI=1 HOSTCC=gcc NOFORTRAN=1 libs +make -j$(nproc) TARGET=ARMV8 ARM_SOFTFP_ABI=1 HOSTCC=gcc-7 HOSTCXX=g++-7 NOFORTRAN=1 libs # Can't be run (utility not compiled for the target platform) #make install cp *.h /usr/include diff --git a/ci/docker/install/android_armv7_openblas.sh b/ci/docker/install/android_armv7_openblas.sh index 55c098909654..8642df6d9450 100755 --- a/ci/docker/install/android_armv7_openblas.sh +++ b/ci/docker/install/android_armv7_openblas.sh @@ -24,7 +24,7 @@ set -ex pushd . git clone https://github.com/xianyi/OpenBLAS.git cd OpenBLAS -make TARGET=ARMV7 HOSTCC=gcc NOFORTRAN=1 ARM_SOFTFP_ABI=1 -j$(nproc) libs +make TARGET=ARMV7 HOSTCC=gcc-7 HOSTCXX=g++-7 NOFORTRAN=1 ARM_SOFTFP_ABI=1 -j$(nproc) libs #make PREFIX=${CROSS_ROOT} TARGET=ARMV7 HOSTCC=gcc NOFORTRAN=1 ARM_SOFTFP_ABI=1 install cp *.h ${CROSS_ROOT}/include cp libopenblas*.a ${CROSS_ROOT}/lib diff --git a/ci/docker/install/centos7_base.sh b/ci/docker/install/centos7_base.sh index c5f860e6e7a7..bf289fe034fc 100755 --- a/ci/docker/install/centos7_base.sh +++ b/ci/docker/install/centos7_base.sh @@ -34,6 +34,10 @@ yum -y install protobuf-compiler yum -y install protobuf-devel yum -y install zeromq-devel +# gcc7 +yum -y install centos-release-scl +yum -y install devtoolset-7 + # Centos 7 only provides ninja-build ln -s /usr/bin/ninja-build /usr/bin/ninja diff --git a/ci/docker/install/centos7_core.sh b/ci/docker/install/centos7_core.sh index fbdb239cf0c2..1d858dd0a2f4 100755 --- a/ci/docker/install/centos7_core.sh +++ b/ci/docker/install/centos7_core.sh @@ -40,6 +40,10 @@ yum -y install wget yum -y install unzip yum -y install ninja-build +# gcc7 +yum -y install centos-release-scl +yum -y install devtoolset-7 + # Centos 7 only provides ninja-build ln -s /usr/bin/ninja-build /usr/bin/ninja diff --git a/ci/docker/install/ubuntu_gcc8.sh b/ci/docker/install/ubuntu_gcc8.sh index cd31f8213c1a..e0f2986e101f 100755 --- a/ci/docker/install/ubuntu_gcc8.sh +++ b/ci/docker/install/ubuntu_gcc8.sh @@ -20,4 +20,4 @@ sudo add-apt-repository ppa:jonathonf/gcc-8.0 sudo add-apt-repository ppa:jonathonf/gcc-7.3 sudo apt-get update || true -sudo apt-get install -y gcc-8 g++-8 +sudo apt-get install -y gcc-8 g++-8 gcc-7 g++-7 diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh index e171767d51f3..158775c10aed 100755 --- a/ci/docker/runtime_functions.sh +++ b/ci/docker/runtime_functions.sh @@ -159,7 +159,7 @@ gather_licenses() { build_ubuntu_cpu_release() { set -ex cd /work/build - cmake \ + CC=gcc-7 CXX=g++-7 cmake \ -DUSE_MKL_IF_AVAILABLE=OFF \ -DUSE_MKLDNN=ON \ -DUSE_CUDA=OFF \ @@ -170,7 +170,7 @@ build_ubuntu_cpu_release() { build_ubuntu_cpu_native_release() { set -ex cd /work/build - cmake \ + CC=gcc-7 CXX=g++-7 cmake \ -DUSE_MKL_IF_AVAILABLE=OFF \ -DUSE_MKLDNN=OFF \ -DUSE_CUDA=OFF \ @@ -181,7 +181,7 @@ build_ubuntu_cpu_native_release() { build_ubuntu_gpu_release() { set -ex cd /work/build - cmake \ + CC=gcc-7 CXX=g++-7 cmake \ -DUSE_MKL_IF_AVAILABLE=OFF \ -DUSE_MKLDNN=ON \ -DUSE_DIST_KVSTORE=ON \ @@ -218,6 +218,8 @@ build_jetson() { set -ex pushd . + export CC=gcc-6 + export CXX=g++-6 cp make/crosscompile.jetson.mk ./config.mk make -j$(nproc) @@ -325,7 +327,7 @@ build_android_armv7() { build_android_armv8() { set -ex cd /work/build - cmake\ + cmake \ -DANDROID=ON \ -DUSE_CUDA=OFF\ -DUSE_SSE=OFF\ @@ -341,6 +343,7 @@ build_android_armv8() { build_centos7_cpu() { set -ex cd /work/build + source /opt/rh/devtoolset-7/enable cmake \ -DCMAKE_BUILD_TYPE="RelWithDebInfo" \ -DENABLE_TESTCOVERAGE=ON \ @@ -355,9 +358,7 @@ build_centos7_cpu() { build_centos7_cpu_make() { set -ex cd /work/mxnet - export CC="ccache gcc" - export CXX="ccache g++" - build_ccache_wrappers + source /opt/rh/devtoolset-7/enable make \ DEV=1 \ USE_LAPACK=1 \ @@ -372,6 +373,7 @@ build_centos7_cpu_make() { build_centos7_mkldnn() { set -ex cd /work/build + source /opt/rh/devtoolset-7/enable cmake \ -DUSE_MKL_IF_AVAILABLE=OFF \ -DUSE_MKLDNN=ON \ @@ -383,6 +385,7 @@ build_centos7_mkldnn() { build_centos7_gpu() { set -ex cd /work/build + source /opt/rh/devtoolset-7/enable cmake \ -DCMAKE_BUILD_TYPE="RelWithDebInfo" \ -DUSE_MKL_IF_AVAILABLE=OFF \ @@ -401,7 +404,7 @@ build_ubuntu_cpu() { build_ubuntu_cpu_openblas() { set -ex cd /work/build - CXXFLAGS="-Wno-error=strict-overflow" cmake \ + CXXFLAGS="-Wno-error=strict-overflow" CC=gcc-7 CXX=g++-7 cmake \ -DCMAKE_BUILD_TYPE="RelWithDebInfo" \ -DENABLE_TESTCOVERAGE=ON \ -DUSE_TVM_OP=ON \ @@ -417,8 +420,8 @@ build_ubuntu_cpu_openblas() { build_ubuntu_cpu_openblas_make() { set -ex - export CC="gcc" - export CXX="g++" + export CC=gcc-7 + export CXX=g++-7 build_ccache_wrappers make \ DEV=1 \ @@ -436,7 +439,7 @@ build_ubuntu_cpu_openblas_make() { build_ubuntu_cpu_mkl() { set -ex cd /work/build - cmake \ + CC=gcc-7 CXX=g++-7 cmake \ -DCMAKE_BUILD_TYPE="RelWithDebInfo" \ -DENABLE_TESTCOVERAGE=ON \ -DUSE_MKLDNN=OFF \ @@ -451,7 +454,7 @@ build_ubuntu_cpu_mkl() { build_ubuntu_cpu_cmake_debug() { set -ex cd /work/build - cmake \ + CC=gcc-7 CXX=g++-7 cmake \ -DCMAKE_BUILD_TYPE=Debug \ -DENABLE_TESTCOVERAGE=ON \ -DUSE_CUDA=OFF \ @@ -468,7 +471,7 @@ build_ubuntu_cpu_cmake_debug() { build_ubuntu_cpu_cmake_no_tvm_op() { set -ex cd /work/build - cmake \ + CC=gcc-7 CXX=g++-7 cmake \ -DUSE_CUDA=OFF \ -DUSE_TVM_OP=OFF \ -DUSE_MKL_IF_AVAILABLE=OFF \ @@ -626,6 +629,8 @@ build_ubuntu_cpu_clang100_mkldnn() { build_ubuntu_cpu_mkldnn_make() { set -ex + export CC=gcc-7 + export CXX=g++-7 build_ccache_wrappers make \ @@ -640,7 +645,7 @@ build_ubuntu_cpu_mkldnn_make() { build_ubuntu_cpu_mkldnn() { set -ex cd /work/build - cmake \ + CC=gcc-7 CXX=g++-7 cmake \ -DCMAKE_BUILD_TYPE="RelWithDebInfo" \ -DENABLE_TESTCOVERAGE=ON \ -DUSE_MKL_IF_AVAILABLE=OFF \ @@ -655,7 +660,7 @@ build_ubuntu_cpu_mkldnn() { build_ubuntu_cpu_mkldnn_mkl() { set -ex cd /work/build - cmake \ + CC=gcc-7 CXX=g++-7 cmake \ -DCMAKE_BUILD_TYPE="RelWithDebInfo" \ -DENABLE_TESTCOVERAGE=ON \ -DUSE_MKLDNN=ON \ @@ -675,6 +680,9 @@ build_ubuntu_gpu_tensorrt() { set -ex + export CC=gcc-7 + export CXX=g++-7 + # Build ONNX pushd . echo "Installing ONNX." @@ -726,7 +734,7 @@ build_ubuntu_gpu_mkldnn() { set -ex cd /work/build # Set CMAKE_AR and CMAKE_RANLIB due to Ubuntu 16.04 default binutils 4GB limitation - cmake \ + CC=gcc-7 CXX=g++-7 cmake \ -DCMAKE_AR=/usr/local/bin/ar \ -DCMAKE_RANLIB=/usr/local/bin/ranlib \ -DCMAKE_BUILD_TYPE="RelWithDebInfo" \ @@ -743,7 +751,7 @@ build_ubuntu_gpu_mkldnn_nocudnn() { set -ex cd /work/build # Set CMAKE_AR and CMAKE_RANLIB due to Ubuntu 16.04 default binutils 4GB limitation - cmake \ + CC=gcc-7 CXX=g++-7 cmake \ -DCMAKE_AR=/usr/local/bin/ar \ -DCMAKE_RANLIB=/usr/local/bin/ranlib \ -DCMAKE_BUILD_TYPE="RelWithDebInfo" \ @@ -761,7 +769,7 @@ build_ubuntu_gpu_cuda101_cudnn7() { set -ex cd /work/build # Set CMAKE_AR and CMAKE_RANLIB due to Ubuntu 16.04 default binutils 4GB limitation - cmake \ + CC=gcc-7 CXX=g++-7 cmake \ -DCMAKE_AR=/usr/local/bin/ar \ -DCMAKE_RANLIB=/usr/local/bin/ranlib \ -DCMAKE_BUILD_TYPE="RelWithDebInfo" \ @@ -780,6 +788,8 @@ build_ubuntu_gpu_cuda101_cudnn7() { build_ubuntu_gpu_cuda101_cudnn7_make() { set -ex + export CC=gcc-7 + export CXX=g++-7 build_ccache_wrappers make \ DEV=1 \ @@ -799,6 +809,8 @@ build_ubuntu_gpu_cuda101_cudnn7_make() { build_ubuntu_gpu_cuda101_cudnn7_mkldnn_cpp_test() { set -ex + export CC=gcc-7 + export CXX=g++-7 build_ccache_wrappers make \ DEV=1 \ @@ -821,7 +833,7 @@ build_ubuntu_gpu_cuda101_cudnn7_no_tvm_op() { set -ex cd /work/build # Set CMAKE_AR and CMAKE_RANLIB due to Ubuntu 16.04 default binutils 4GB limitation - cmake \ + CC=gcc-7 CXX=g++-7 cmake \ -DCMAKE_AR=/usr/local/bin/ar \ -DCMAKE_RANLIB=/usr/local/bin/ranlib \ -DCMAKE_BUILD_TYPE="RelWithDebInfo" \ @@ -840,6 +852,8 @@ build_ubuntu_gpu_cuda101_cudnn7_no_tvm_op() { build_ubuntu_amalgamation() { set -ex # Amalgamation can not be run with -j nproc + export CC=gcc-7 + export CXX=g++-7 build_ccache_wrappers make -C amalgamation/ clean make -C amalgamation/ \ @@ -849,6 +863,8 @@ build_ubuntu_amalgamation() { build_ubuntu_amalgamation_min() { set -ex # Amalgamation can not be run with -j nproc + export CC=gcc-7 + export CXX=g++-7 build_ccache_wrappers make -C amalgamation/ clean make -C amalgamation/ \ @@ -859,7 +875,7 @@ build_ubuntu_amalgamation_min() { build_ubuntu_gpu_cmake() { set -ex cd /work/build - cmake \ + CC=gcc-7 CXX=g++-7 cmake \ -DUSE_SIGNAL_HANDLER=ON \ -DUSE_CUDA=ON \ -DUSE_CUDNN=ON \ @@ -880,7 +896,7 @@ build_ubuntu_gpu_cmake() { build_ubuntu_gpu_cmake_no_rtc() { set -ex cd /work/build - cmake \ + CC=gcc-7 CXX=g++-7 cmake \ -DUSE_SIGNAL_HANDLER=ON \ -DUSE_CUDA=ON \ -DUSE_CUDNN=ON \ @@ -902,7 +918,7 @@ build_ubuntu_gpu_cmake_no_rtc() { build_ubuntu_gpu_cmake_no_tvm_op() { set -ex cd /work/build - cmake \ + CC=gcc-7 CXX=g++-7 cmake \ -DUSE_SIGNAL_HANDLER=ON \ -DUSE_CUDA=ON \ -DUSE_CUDNN=ON \ @@ -923,7 +939,7 @@ build_ubuntu_gpu_cmake_no_tvm_op() { build_ubuntu_cpu_large_tensor() { set -ex cd /work/build - cmake \ + CC=gcc-7 CXX=g++-7 cmake \ -DUSE_SIGNAL_HANDLER=ON \ -DUSE_CUDA=OFF \ -DUSE_CUDNN=OFF \ @@ -938,7 +954,7 @@ build_ubuntu_cpu_large_tensor() { build_ubuntu_gpu_large_tensor() { set -ex cd /work/build - cmake \ + CC=gcc-7 CXX=g++-7 cmake \ -DUSE_SIGNAL_HANDLER=ON \ -DUSE_CUDA=ON \ -DUSE_CUDNN=ON \ @@ -1105,6 +1121,7 @@ unittest_ubuntu_python3_quantization_gpu() { unittest_centos7_cpu_scala() { set -ex + source /opt/rh/devtoolset-7/enable cd /work/mxnet scala_prepare cd scala-package @@ -1144,6 +1161,8 @@ unittest_ubuntu_cpu_R() { mkdir -p /tmp/r-site-library # build R packages in parallel mkdir -p ~/.R/ + export CC=gcc-7 + export CXX=g++-7 build_ccache_wrappers echo "MAKEFLAGS = -j"$(nproc) > ~/.R/Makevars # make -j not supported @@ -1159,8 +1178,10 @@ unittest_ubuntu_minimal_R() { mkdir -p /tmp/r-site-library # build R packages in parallel mkdir -p ~/.R/ - build_ccache_wrappers echo "MAKEFLAGS = -j"$(nproc) > ~/.R/Makevars + export CC=gcc-7 + export CXX=g++-7 + build_ccache_wrappers # make -j not supported make -f R-package/Makefile rpkg \ R_LIBS=/tmp/r-site-library @@ -1188,6 +1209,8 @@ unittest_ubuntu_gpu_R() { mkdir -p /tmp/r-site-library # build R packages in parallel mkdir -p ~/.R/ + export CC=gcc-7 + export CXX=g++-7 build_ccache_wrappers echo "MAKEFLAGS = -j"$(nproc) > ~/.R/Makevars # make -j not supported @@ -1232,6 +1255,7 @@ unittest_ubuntu_cpu_julia10() { unittest_centos7_cpu() { set -ex + source /opt/rh/devtoolset-7/enable cd /work/mxnet python3.6 -m "nose" $NOSE_COVERAGE_ARGUMENTS $NOSE_TIMER_ARGUMENTS --with-xunit --xunit-file nosetests_unittest.xml --verbose tests/python/unittest python3.6 -m "nose" $NOSE_COVERAGE_ARGUMENTS $NOSE_TIMER_ARGUMENTS --with-xunit --xunit-file nosetests_train.xml --verbose tests/python/train @@ -1239,6 +1263,7 @@ unittest_centos7_cpu() { unittest_centos7_gpu() { set -ex + source /opt/rh/devtoolset-7/enable cd /work/mxnet export CUDNN_VERSION=${CUDNN_VERSION:-7.0.3} export DMLC_LOG_STACK_TRACE_DEPTH=10 @@ -1469,6 +1494,8 @@ nightly_test_large_vector() { nightly_test_amalgamation() { set -ex export DMLC_LOG_STACK_TRACE_DEPTH=10 + export CC=gcc-7 + export CXX=g++-7 # Amalgamation can not be run with -j nproc make -C amalgamation/ clean make -C amalgamation/ ${1} ${2} @@ -1479,6 +1506,8 @@ nightly_test_javascript() { set -ex export LLVM=/work/deps/emscripten-fastcomp/build/bin export DMLC_LOG_STACK_TRACE_DEPTH=10 + export CC=gcc-7 + export CXX=g++-7 # This part is needed to run emcc correctly cd /work/deps/emscripten ./emcc @@ -1598,8 +1627,8 @@ build_docs_setup() { build_ubuntu_cpu_docs() { set -ex - export CC="gcc" - export CXX="g++" + export CC="gcc-7" + export CXX="g++-7" build_ccache_wrappers make \ DEV=1 \ @@ -1975,6 +2004,8 @@ build_static_scala_cpu() { scala_prepare export MAVEN_PUBLISH_OS_TYPE=linux-x86_64-cpu export mxnet_variant=cpu + export CC=gcc-7 + export CXX=g++-7 ./ci/publish/scala/build.sh popd } @@ -1983,6 +2014,8 @@ build_static_python_cpu() { set -ex pushd . export mxnet_variant=cpu + export CC=gcc-7 + export CXX=g++-7 ./ci/publish/python/build.sh popd } @@ -1991,6 +2024,8 @@ build_static_python_cu101() { set -ex pushd . export mxnet_variant=cu101 + export CC=gcc-7 + export CXX=g++-7 ./ci/publish/python/build.sh popd } @@ -2000,6 +2035,8 @@ build_static_python_cpu_cmake() { pushd . export mxnet_variant=cpu export CMAKE_STATICBUILD=1 + export CC=gcc-7 + export CXX=g++-7 ./ci/publish/python/build.sh popd } @@ -2009,6 +2046,8 @@ build_static_python_cu101_cmake() { pushd . export mxnet_variant=cu101 export CMAKE_STATICBUILD=1 + export CC=gcc-7 + export CXX=g++-7 ./ci/publish/python/build.sh popd } @@ -2017,6 +2056,8 @@ publish_scala_build() { set -ex pushd . scala_prepare + export CC=gcc-7 + export CXX=g++-7 ./ci/publish/scala/build.sh popd } @@ -2025,6 +2066,8 @@ publish_scala_test() { set -ex pushd . scala_prepare + export CC=gcc-7 + export CXX=g++-7 ./ci/publish/scala/test.sh popd } @@ -2033,6 +2076,8 @@ publish_scala_deploy() { set -ex pushd . scala_prepare + export CC=gcc-7 + export CXX=g++-7 ./ci/publish/scala/deploy.sh popd } diff --git a/cpp-package/example/Makefile b/cpp-package/example/Makefile index 237ab96a3e32..d42cf455386c 100644 --- a/cpp-package/example/Makefile +++ b/cpp-package/example/Makefile @@ -46,7 +46,7 @@ debug: CPPEX_CFLAGS += -DDEBUG -g debug: prebuild all $(CPPEX_EXE):% : %.cpp - $(CXX) -std=c++11 $(CFLAGS) $(CPPEX_CFLAGS) -o build/$@ $(filter %.cpp %.a, $^) $(CPPEX_EXTRA_LDFLAGS) + $(CXX) -std=c++17 $(CFLAGS) $(CPPEX_CFLAGS) -o build/$@ $(filter %.cpp %.a, $^) $(CPPEX_EXTRA_LDFLAGS) ifeq ($(UNAME_S), Darwin) install_name_tool -add_rpath @loader_path build/$@ install_name_tool -add_rpath $(MXNET_LIB_PATH) build/$@ diff --git a/cpp-package/example/example.mk b/cpp-package/example/example.mk index ef99d7426414..cf92e4076d18 100644 --- a/cpp-package/example/example.mk +++ b/cpp-package/example/example.mk @@ -30,8 +30,8 @@ cpp-package-example-all: cpp-package-all $(CPPEX_EXE) build/cpp-package/example/% : cpp-package/example/%.cpp lib/libmxnet.so $(CPP_PACKAGE_OP_H_FILE) @mkdir -p $(@D) - $(CXX) -std=c++11 $(CFLAGS) $(CPPEX_CFLAGS) -MM -MT cpp-package/example/$* $< >build/cpp-package/example//$*.d - $(CXX) -std=c++11 $(CFLAGS) $(CPPEX_CFLAGS) -o $@ $(filter %.cpp %.a, $^) $(LDFLAGS) $(CPPEX_EXTRA_LDFLAGS) + $(CXX) -std=c++17 $(CFLAGS) $(CPPEX_CFLAGS) -MM -MT cpp-package/example/$* $< >build/cpp-package/example//$*.d + $(CXX) -std=c++17 $(CFLAGS) $(CPPEX_CFLAGS) -o $@ $(filter %.cpp %.a, $^) $(LDFLAGS) $(CPPEX_EXTRA_LDFLAGS) cpp-package-example-clean: rm -rf build/cpp-package/example/* diff --git a/cpp-package/example/feature_extract/Makefile b/cpp-package/example/feature_extract/Makefile index 193eaa7e850b..084b60632729 100644 --- a/cpp-package/example/feature_extract/Makefile +++ b/cpp-package/example/feature_extract/Makefile @@ -27,12 +27,12 @@ LDFLAGS=$(COMMFLAGS) -L ../../../lib -lmxnet $(BLAS) $(CUDA) -lgomp -pthread all: feature_extract prepare_data_with_opencv feature_extract: ./feature_extract.cpp - $(CXX) -c -std=c++11 $(CFLAGS) $^ + $(CXX) -c -std=c++17 $(CFLAGS) $^ $(CXX) $(basename $@).o -o $@ $(LDFLAGS) -rm -f $(basename $@).o prepare_data_with_opencv: ./prepare_data_with_opencv.cpp - $(CXX) -c -std=c++11 $(OPENCV_CFLAGS) $^ + $(CXX) -c -std=c++17 $(OPENCV_CFLAGS) $^ $(CXX) $(basename $@).o -o $@ $(OPENCV_LDFLAGS) -rm -f $(basename $@).o diff --git a/cpp-package/example/inference/Makefile b/cpp-package/example/inference/Makefile index 5efe6cfb68e5..a0ec819e3749 100644 --- a/cpp-package/example/inference/Makefile +++ b/cpp-package/example/inference/Makefile @@ -34,7 +34,7 @@ debug: all $(CPPEX_EXE):% : %.cpp - $(CXX) -std=c++0x $(CFLAGS) $(CPPEX_CFLAGS) -o $@ $(filter %.cpp %.a, $^) $(CPPEX_EXTRA_LDFLAGS) + $(CXX) -std=c++17 $(CFLAGS) $(CPPEX_CFLAGS) -o $@ $(filter %.cpp %.a, $^) $(CPPEX_EXTRA_LDFLAGS) clean: rm -f $(CPPEX_EXE) diff --git a/cpp-package/example/inference/inference.mk b/cpp-package/example/inference/inference.mk index b03055395f21..7708db6e029a 100644 --- a/cpp-package/example/inference/inference.mk +++ b/cpp-package/example/inference/inference.mk @@ -30,8 +30,8 @@ cpp-package-inference-example-all: cpp-package-all $(CPPEX_EXE) build/cpp-package/example/% : cpp-package/example/inference/%.cpp lib/libmxnet.so $(CPP_PACKAGE_OP_H_FILE) @mkdir -p $(@D) - $(CXX) -std=c++11 $(CFLAGS) $(CPPEX_CFLAGS) -MM -MT cpp-package/example/inference/$* $< >build/cpp-package/example/$*.d - $(CXX) -std=c++11 $(CFLAGS) $(CPPEX_CFLAGS) -o $@ $(filter %.cpp %.a, $^) $(LDFLAGS) $(CPPEX_EXTRA_LDFLAGS) + $(CXX) -std=c++17 $(CFLAGS) $(CPPEX_CFLAGS) -MM -MT cpp-package/example/inference/$* $< >build/cpp-package/example/$*.d + $(CXX) -std=c++17 $(CFLAGS) $(CPPEX_CFLAGS) -o $@ $(filter %.cpp %.a, $^) $(LDFLAGS) $(CPPEX_EXTRA_LDFLAGS) cpp-package-inference-example-clean: rm -rf build/cpp-package/example/inference* diff --git a/example/image-classification/predict-cpp/Makefile b/example/image-classification/predict-cpp/Makefile index 5c084119b966..05f1afc53821 100644 --- a/example/image-classification/predict-cpp/Makefile +++ b/example/image-classification/predict-cpp/Makefile @@ -1,5 +1,5 @@ # Special thanks to https://github.com/pertusa for the Makefile -CFLAGS=-std=c++11 -Wno-unknown-pragmas -Wall +CFLAGS=-std=c++17 -Wno-unknown-pragmas -Wall # Added for openblas # export OPENBLAS_ROOT=/usr/local/opt/openblas @@ -22,8 +22,8 @@ image-classification-predict: image-classification-predict.o image-classification-predict.o: image-classification-predict.cc g++ -O3 -c image-classification-predict.cc ${CFLAGS} - -clean: + +clean: rm image-classification-predict rm -f *.d *.o diff --git a/example/multi_threaded_inference/Makefile b/example/multi_threaded_inference/Makefile index 3189738fbfff..a58928b12759 100644 --- a/example/multi_threaded_inference/Makefile +++ b/example/multi_threaded_inference/Makefile @@ -16,7 +16,7 @@ # under the License. -CFLAGS=-std=c++11 -g -Wno-unknown-pragmas -Wall -DMXNET_USE_CUDA=1 -DMXNET_USE_CUDNN=1 -DMXNET_USE_MKLDNN=1 +CFLAGS=-std=c++17 -g -Wno-unknown-pragmas -Wall -DMXNET_USE_CUDA=1 -DMXNET_USE_CUDNN=1 -DMXNET_USE_MKLDNN=1 export MXNET_ROOT = `pwd`/../.. export CPP_PACKAGE = $(MXNET_ROOT)/cpp-package diff --git a/example/rnn/large_word_lm/setup.py b/example/rnn/large_word_lm/setup.py index 09c4fb0965a9..54404c183ed4 100644 --- a/example/rnn/large_word_lm/setup.py +++ b/example/rnn/large_word_lm/setup.py @@ -24,5 +24,5 @@ setup(ext_modules = cythonize(Extension(extension_name, sources=sources, language="c++", - extra_compile_args=["-std=c++11"], + extra_compile_args=["-std=c++17"], include_dirs=[numpy.get_include()]))) diff --git a/python/setup.py b/python/setup.py index dcd84cef1ea1..ccfccb3f3f74 100644 --- a/python/setup.py +++ b/python/setup.py @@ -94,7 +94,7 @@ def config_cython(): libraries=libraries, extra_link_args=extra_link_args, language="c++")) - + path = "mxnet/_ffi/_cython" for fn in os.listdir(path): if not fn.endswith(".pyx"): @@ -105,7 +105,7 @@ def config_cython(): include_dirs=["../include/", "../3rdparty/tvm/nnvm/include"], library_dirs=library_dirs, libraries=libraries, - extra_compile_args=["-std=c++11"], + extra_compile_args=["-std=c++17"], extra_link_args=extra_link_args, language="c++")) diff --git a/src/operator/fusion/fused_op.cu b/src/operator/fusion/fused_op.cu index 7f86c056cf13..00887240aa56 100644 --- a/src/operator/fusion/fused_op.cu +++ b/src/operator/fusion/fused_op.cu @@ -601,7 +601,7 @@ CUfunction FusedOp::CompileCode(const std::string &code, std::string gpu_arch_arg = "--gpu-architecture=compute_" + std::to_string(sm_arch); const char *opts[] = {gpu_arch_arg.c_str(), - "--std=c++11"}; + "--std=c++14"}; const std::string kernel_name_demangled = "FusedKernel_" + kernel_name; NVRTC_CALL(nvrtcAddNameExpression(program, (kernel_name_demangled).c_str())); diff --git a/tests/cpp/unittest.mk b/tests/cpp/unittest.mk index 2c674b9ec9d7..8534db91b52a 100644 --- a/tests/cpp/unittest.mk +++ b/tests/cpp/unittest.mk @@ -42,38 +42,38 @@ endif .PHONY: runtest testclean gtest-all.o : $(GTEST_SRCS_) - $(CXX) -std=c++11 $(CPPFLAGS) -I$(GTEST_INC) -I$(GTEST_DIR) $(CXXFLAGS) -c $(GTEST_DIR)/src/gtest-all.cc + $(CXX) -std=c++17 $(CPPFLAGS) -I$(GTEST_INC) -I$(GTEST_DIR) $(CXXFLAGS) -c $(GTEST_DIR)/src/gtest-all.cc gtest.a : gtest-all.o $(AR) $(ARFLAGS) $@ $^ build/tests/cpp/%.o : tests/cpp/%.cc | mkldnn @mkdir -p $(@D) - $(CXX) -std=c++11 $(TEST_CFLAGS) -I$(GTEST_INC) -MM -MT tests/cpp/$* $< > build/tests/cpp/$*.d - $(CXX) -c -std=c++11 $(TEST_CFLAGS) -I$(GTEST_INC) -o build/tests/cpp/$*.o $(filter %.cc %.a, $^) + $(CXX) -std=c++17 $(TEST_CFLAGS) -I$(GTEST_INC) -MM -MT tests/cpp/$* $< > build/tests/cpp/$*.d + $(CXX) -c -std=c++17 $(TEST_CFLAGS) -I$(GTEST_INC) -o build/tests/cpp/$*.o $(filter %.cc %.a, $^) build/tests/cpp/operator/%.o : tests/cpp/operator/%.cc | mkldnn @mkdir -p $(@D) - $(CXX) -std=c++11 $(TEST_CFLAGS) -I$(GTEST_INC) -MM -MT tests/cpp/operator/$* $< > build/tests/cpp/operator/$*.d - $(CXX) -c -std=c++11 $(TEST_CFLAGS) -I$(GTEST_INC) -o build/tests/cpp/operator/$*.o $(filter %.cc %.a, $^) + $(CXX) -std=c++17 $(TEST_CFLAGS) -I$(GTEST_INC) -MM -MT tests/cpp/operator/$* $< > build/tests/cpp/operator/$*.d + $(CXX) -c -std=c++17 $(TEST_CFLAGS) -I$(GTEST_INC) -o build/tests/cpp/operator/$*.o $(filter %.cc %.a, $^) build/tests/cpp/storage/%.o : tests/cpp/storage/%.cc | mkldnn @mkdir -p $(@D) - $(CXX) -std=c++11 $(TEST_CFLAGS) -I$(GTEST_INC) -MM -MT tests/cpp/storage/$* $< > build/tests/cpp/storage/$*.d - $(CXX) -c -std=c++11 $(TEST_CFLAGS) -I$(GTEST_INC) -o build/tests/cpp/storage/$*.o $(filter %.cc %.a, $^) + $(CXX) -std=c++17 $(TEST_CFLAGS) -I$(GTEST_INC) -MM -MT tests/cpp/storage/$* $< > build/tests/cpp/storage/$*.d + $(CXX) -c -std=c++17 $(TEST_CFLAGS) -I$(GTEST_INC) -o build/tests/cpp/storage/$*.o $(filter %.cc %.a, $^) build/tests/cpp/engine/%.o : tests/cpp/engine/%.cc | mkldnn @mkdir -p $(@D) - $(CXX) -std=c++11 $(TEST_CFLAGS) -I$(GTEST_INC) -MM -MT tests/cpp/engine/$* $< > build/tests/cpp/engine/$*.d - $(CXX) -c -std=c++11 $(TEST_CFLAGS) -I$(GTEST_INC) -o build/tests/cpp/engine/$*.o $(filter %.cc %.a, $^) + $(CXX) -std=c++17 $(TEST_CFLAGS) -I$(GTEST_INC) -MM -MT tests/cpp/engine/$* $< > build/tests/cpp/engine/$*.d + $(CXX) -c -std=c++17 $(TEST_CFLAGS) -I$(GTEST_INC) -o build/tests/cpp/engine/$*.o $(filter %.cc %.a, $^) build/tests/cpp/thread_safety/%.o : tests/cpp/thread_safety/%.cc | mkldnn @mkdir -p $(@D) - $(CXX) -std=c++11 $(TEST_CFLAGS) $(TEST_CPPFLAGS) -I$(GTEST_INC) -MM -MT tests/cpp/thread_safety/$* $< > build/tests/cpp/thread_safety/$*.d - $(CXX) -c -std=c++11 $(TEST_CFLAGS) $(TEST_CPPFLAGS) -I$(GTEST_INC) -o build/tests/cpp/thread_safety/$*.o $(filter %.cc %.a, $^) + $(CXX) -std=c++17 $(TEST_CFLAGS) $(TEST_CPPFLAGS) -I$(GTEST_INC) -MM -MT tests/cpp/thread_safety/$* $< > build/tests/cpp/thread_safety/$*.d + $(CXX) -c -std=c++17 $(TEST_CFLAGS) $(TEST_CPPFLAGS) -I$(GTEST_INC) -o build/tests/cpp/thread_safety/$*.o $(filter %.cc %.a, $^) $(TEST): $(TEST_OBJ) lib/libmxnet.so $(TEST_LIB_DEP) - $(CXX) -std=c++11 $(TEST_CFLAGS) -I$(GTEST_INC) -o $@ $^ $(TEST_LDFLAGS) + $(CXX) -std=c++17 $(TEST_CFLAGS) -I$(GTEST_INC) -o $@ $^ $(TEST_LDFLAGS) runtest: $(TEST) LD_LIBRARY_PATH=$(shell pwd)/lib:$(LD_LIBRARY_PATH) $(TEST) From 70478e5ed908ae50d307b96b19d4743ba183261a Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Mon, 6 Apr 2020 17:39:59 +0000 Subject: [PATCH 02/20] Remove leftover files of Cuda 7 and Cuda 8 support --- cd/mxnet_lib/mxnet_lib_pipeline.groovy | 3 +- cd/python/pypi/pypi_package.sh | 2 +- cd/utils/docker_tag.sh | 2 +- cd/utils/mxnet_base_image.sh | 3 - ci/docker/Dockerfile.build.ubuntu_gpu_cu80 | 79 --------------------- ci/docker/runtime_functions.sh | 8 +-- ci/jenkins/Jenkins_steps.groovy | 14 ++-- ci/jenkins/Jenkinsfile_clang | 4 +- tests/jenkins/run_test_pip_installations.sh | 12 ++-- tools/pip/doc/CPU_ADDITIONAL.md | 5 +- tools/pip/doc/CU100_ADDITIONAL.md | 7 +- tools/pip/doc/CU101_ADDITIONAL.md | 7 +- tools/pip/doc/CU102_ADDITIONAL.md | 6 +- tools/pip/doc/CU75_ADDITIONAL.md | 38 ---------- tools/pip/doc/CU80_ADDITIONAL.md | 38 ---------- tools/pip/doc/CU90_ADDITIONAL.md | 6 +- tools/pip/doc/CU92_ADDITIONAL.md | 6 +- tools/pip/doc/NATIVE_ADDITIONAL.md | 8 ++- tools/pip/doc/PYPI_README.md | 2 +- tools/pip/setup.py | 9 +-- tools/setup_gpu_build_tools.sh | 59 +-------------- 21 files changed, 55 insertions(+), 263 deletions(-) delete mode 100644 ci/docker/Dockerfile.build.ubuntu_gpu_cu80 delete mode 100644 tools/pip/doc/CU75_ADDITIONAL.md delete mode 100644 tools/pip/doc/CU80_ADDITIONAL.md diff --git a/cd/mxnet_lib/mxnet_lib_pipeline.groovy b/cd/mxnet_lib/mxnet_lib_pipeline.groovy index 0310dd991651..d09bacd4de8e 100644 --- a/cd/mxnet_lib/mxnet_lib_pipeline.groovy +++ b/cd/mxnet_lib/mxnet_lib_pipeline.groovy @@ -42,8 +42,7 @@ def get_pipeline(mxnet_variant, build_fn) { } } - // Add quantization tests for all cu variants except cu80 - if (mxnet_variant.startsWith('cu') && !mxnet_variant.startsWith('cu80')) { + if (mxnet_variant.startsWith('cu')) { tests["${mxnet_variant}: Quantization Python 3"] = { stage("${mxnet_variant}: Quantization Python 3") { timeout(time: max_time, unit: 'MINUTES') { diff --git a/cd/python/pypi/pypi_package.sh b/cd/python/pypi/pypi_package.sh index fafd88e9742b..f9a0b1eb6906 100755 --- a/cd/python/pypi/pypi_package.sh +++ b/cd/python/pypi/pypi_package.sh @@ -18,7 +18,7 @@ set -ex -# variant = cpu, native, cu80, cu100, etc. +# variant = cpu, native, cu92, cu100, etc. export mxnet_variant=${1:?"Please specify the mxnet variant"} # Due to this PR: https://github.com/apache/incubator-mxnet/pull/14899 diff --git a/cd/utils/docker_tag.sh b/cd/utils/docker_tag.sh index e77cbe7856bf..b56e119f0130 100755 --- a/cd/utils/docker_tag.sh +++ b/cd/utils/docker_tag.sh @@ -24,7 +24,7 @@ is_release=${RELEASE_BUILD:-false} version=${VERSION:-nightly} # The docker tags will be in the form _(_mkl) -# Eg. nightly_cpu, 1.4.0_cpu_mkl, nightly_gpu_cu80_mkl, etc. +# Eg. nightly_cpu, 1.4.0_cpu_mkl, nightly_gpu_cu92_mkl, etc. if [[ ${mxnet_variant} == "cpu" ]]; then tag_suffix="cpu" diff --git a/cd/utils/mxnet_base_image.sh b/cd/utils/mxnet_base_image.sh index c87db661818c..1667d4c6f62a 100755 --- a/cd/utils/mxnet_base_image.sh +++ b/cd/utils/mxnet_base_image.sh @@ -21,9 +21,6 @@ mxnet_variant=${1:?"Please specify the mxnet variant as the first parameter"} case ${mxnet_variant} in - cu80*) - echo "nvidia/cuda:8.0-cudnn7-runtime-ubuntu16.04" - ;; cu90*) echo "nvidia/cuda:9.0-cudnn7-runtime-ubuntu16.04" ;; diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu80 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu80 deleted file mode 100644 index 30971b0a5c6e..000000000000 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu80 +++ /dev/null @@ -1,79 +0,0 @@ -# -*- mode: dockerfile -*- -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -# Dockerfile to run MXNet on Ubuntu 16.04 for GPU - -FROM nvidia/cuda:8.0-cudnn7-devel-ubuntu16.04 - -WORKDIR /work/deps - -COPY install/ubuntu_core.sh /work/ -RUN /work/ubuntu_core.sh - -COPY install/deb_ubuntu_ccache.sh /work/ -RUN /work/deb_ubuntu_ccache.sh - -COPY install/ubuntu_python.sh /work/ -COPY install/requirements /work/ -RUN /work/ubuntu_python.sh - -COPY install/ubuntu_scala.sh /work/ -COPY install/sbt.gpg /work/ -RUN /work/ubuntu_scala.sh - -COPY install/ubuntu_r.sh /work/ -COPY install/r.gpg /work/ -RUN /work/ubuntu_r.sh - -COPY install/ubuntu_perl.sh /work/ -RUN /work/ubuntu_perl.sh - -COPY install/ubuntu_clang.sh /work/ -RUN /work/ubuntu_clang.sh - -COPY install/ubuntu_tvm.sh /work/ -RUN /work/ubuntu_tvm.sh - -COPY install/ubuntu_llvm.sh /work/ -RUN /work/ubuntu_llvm.sh - -COPY install/ubuntu_caffe.sh /work/ -RUN /work/ubuntu_caffe.sh - -COPY install/ubuntu_onnx.sh /work/ -RUN /work/ubuntu_onnx.sh - -COPY install/ubuntu_docs.sh /work/ -COPY install/requirements /work/ -RUN /work/ubuntu_docs.sh - -COPY install/ubuntu_tutorials.sh /work/ -RUN /work/ubuntu_tutorials.sh - -COPY install/ubuntu_binutils.sh /work/ -RUN /work/ubuntu_binutils.sh - -ARG USER_ID=0 -ARG GROUP_ID=0 -COPY install/ubuntu_adduser.sh /work/ -RUN /work/ubuntu_adduser.sh - -COPY runtime_functions.sh /work/ - -WORKDIR /work/mxnet -ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh index 158775c10aed..123c6f1724ab 100755 --- a/ci/docker/runtime_functions.sh +++ b/ci/docker/runtime_functions.sh @@ -553,10 +553,10 @@ build_ubuntu_gpu_clang10_werror() { ninja } -build_ubuntu_cpu_clang39() { +build_ubuntu_cpu_clang6() { set -ex cd /work/build - CXX=clang++-3.9 CC=clang-3.9 cmake \ + CXX=clang++-6.0 CC=clang-6.0 cmake \ -DUSE_MKL_IF_AVAILABLE=OFF \ -DUSE_MKLDNN=OFF \ -DUSE_CUDA=OFF \ @@ -601,10 +601,10 @@ build_ubuntu_cpu_clang_tidy() { $CLANG_TIDY -p /work/build -j $(nproc) -clang-tidy-binary clang-tidy-6.0 /work/mxnet/src } -build_ubuntu_cpu_clang39_mkldnn() { +build_ubuntu_cpu_clang6_mkldnn() { set -ex cd /work/build - CXX=clang++-3.9 CC=clang-3.9 cmake \ + CXX=clang++-6.0 CC=clang-6.0 cmake \ -DUSE_MKL_IF_AVAILABLE=OFF \ -DUSE_MKLDNN=ON \ -DUSE_CUDA=OFF \ diff --git a/ci/jenkins/Jenkins_steps.groovy b/ci/jenkins/Jenkins_steps.groovy index e5ce8de24485..ef942f551bc6 100644 --- a/ci/jenkins/Jenkins_steps.groovy +++ b/ci/jenkins/Jenkins_steps.groovy @@ -422,13 +422,13 @@ def compile_centos7_gpu() { }] } -def compile_unix_clang_3_9_cpu() { - return ['CPU: Clang 3.9': { +def compile_unix_clang_6_cpu() { + return ['CPU: Clang 6': { node(NODE_LINUX_CPU) { ws('workspace/build-cpu-clang39') { timeout(time: max_time, unit: 'MINUTES') { utils.init_git() - utils.docker_run('ubuntu_cpu', 'build_ubuntu_cpu_clang39', false) + utils.docker_run('ubuntu_cpu', 'build_ubuntu_cpu_clang6', false) } } } @@ -462,13 +462,13 @@ def compile_unix_clang_tidy_cpu() { }] } -def compile_unix_clang_3_9_mkldnn_cpu() { - return ['CPU: Clang 3.9 MKLDNN': { +def compile_unix_clang_6_mkldnn_cpu() { + return ['CPU: Clang 6 MKLDNN': { node(NODE_LINUX_CPU) { - ws('workspace/build-cpu-mkldnn-clang39') { + ws('workspace/build-cpu-mkldnn-clang6') { timeout(time: max_time, unit: 'MINUTES') { utils.init_git() - utils.docker_run('ubuntu_cpu', 'build_ubuntu_cpu_clang39_mkldnn', false) + utils.docker_run('ubuntu_cpu', 'build_ubuntu_cpu_clang6_mkldnn', false) } } } diff --git a/ci/jenkins/Jenkinsfile_clang b/ci/jenkins/Jenkinsfile_clang index 28c40915acd7..1365b31b701d 100644 --- a/ci/jenkins/Jenkinsfile_clang +++ b/ci/jenkins/Jenkinsfile_clang @@ -34,10 +34,10 @@ utils.assign_node_labels(utility: 'utility', linux_cpu: 'mxnetlinux-cpu', linux_ utils.main_wrapper( core_logic: { utils.parallel_stage('Build', [ - custom_steps.compile_unix_clang_3_9_cpu(), + custom_steps.compile_unix_clang_6_cpu(), custom_steps.compile_unix_clang_10_cpu(), custom_steps.compile_unix_clang_tidy_cpu(), - custom_steps.compile_unix_clang_3_9_mkldnn_cpu(), + custom_steps.compile_unix_clang_6_mkldnn_cpu(), custom_steps.compile_unix_clang_10_mkldnn_cpu() ]) } diff --git a/tests/jenkins/run_test_pip_installations.sh b/tests/jenkins/run_test_pip_installations.sh index 44788bfaf772..f2b4b245be5c 100755 --- a/tests/jenkins/run_test_pip_installations.sh +++ b/tests/jenkins/run_test_pip_installations.sh @@ -29,8 +29,8 @@ fi WORKSPACE=$( echo "$1" | tr '[:upper:]' '[:lower:]' ) -PYTHON_VERSIONS=('2.7' '3.4' '3.6' '3.5') -DEVICES=('pip_cu75' 'pip_cu80' 'pip_cpu') +PYTHON_VERSIONS=('3.5' '3.6') +DEVICES=('pip_cu92' 'pip_cu101' 'pip_cpu') CI_BUILD_DIR=tests/ci_build/pip_tests # build Docker images and test pip installation for each device @@ -61,10 +61,10 @@ for DEV in "${DEVICES[@]}"; do DOCKER_CMD="virtualenv -p \"/usr/bin/${PYTHON}\" ${PYTHON}; source \"${PYTHON}/bin/activate\"; cd ${WORKSPACE};" if [[ "${DEV}" == *"cpu"* ]]; then DOCKER_CMD="${DOCKER_CMD} pip install mxnet --pre; python tests/python/train/test_conv.py" - elif [[ "${DEV}" == *"cu75"* ]]; then - DOCKER_CMD="${DOCKER_CMD} pip install mxnet-cu75 --pre; python tests/python/train/test_conv.py --gpu" - elif [[ "${DEV}" == *"cu80"* ]]; then - DOCKER_CMD="${DOCKER_CMD} pip install mxnet-cu80 --pre; python tests/python/train/test_conv.py --gpu" + elif [[ "${DEV}" == *"cu92"* ]]; then + DOCKER_CMD="${DOCKER_CMD} pip install mxnet-cu92 --pre -f pip install --pre mxnet -f https://dist.mxnet.io/python/cu92; python tests/python/train/test_conv.py --gpu" + elif [[ "${DEV}" == *"cu101"* ]]; then + DOCKER_CMD="${DOCKER_CMD} pip install mxnet-cu101 --pre -f https://dist.mxnet.io/python/cu101; python tests/python/train/test_conv.py --gpu" fi ${DOCKER_BINARY} run --rm -v ${WORKSPACE}:${WORKSPACE} -w ${WORKSPACE} ${DOCKER_TAG} bash -c "tests/jenkins/run_as_user.sh `id -u` `id -un` `id -g` `id -un` '${DOCKER_CMD}'" done diff --git a/tools/pip/doc/CPU_ADDITIONAL.md b/tools/pip/doc/CPU_ADDITIONAL.md index 07a95d5e451b..34c21268ea2d 100644 --- a/tools/pip/doc/CPU_ADDITIONAL.md +++ b/tools/pip/doc/CPU_ADDITIONAL.md @@ -18,13 +18,12 @@ Prerequisites ------------- This package supports Linux, Mac OSX, and Windows platforms. You may also want to check: -- [mxnet-cu102](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.2 support. +- [mxnet-cu102](https://pypi.python.org/pypi/mxnet-cu102/) with CUDA-10.2 support. - [mxnet-cu101](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.1 support. - [mxnet-cu100](https://pypi.python.org/pypi/mxnet-cu100/) with CUDA-10.0 support. - [mxnet-cu92](https://pypi.python.org/pypi/mxnet-cu92/) with CUDA-9.2 support. - [mxnet-cu90](https://pypi.python.org/pypi/mxnet-cu90/) with CUDA-9.0 support. -- [mxnet-cu80](https://pypi.python.org/pypi/mxnet-cu80/) with CUDA-8.0 support. -- [mxnet-cu75](https://pypi.python.org/pypi/mxnet-cu75/) with CUDA-7.5 support. +- [mxnet](https://pypi.python.org/pypi/mxnet/). - [mxnet-native](https://pypi.python.org/pypi/mxnet-native/) CPU variant without MKLDNN. To install for other platforms (e.g. Windows, Raspberry Pi/ARM) or other versions, check [Installing MXNet](https://mxnet.apache.org/versions/master/install/index.html) for instructions on building from source. diff --git a/tools/pip/doc/CU100_ADDITIONAL.md b/tools/pip/doc/CU100_ADDITIONAL.md index 2e607d766ed8..1a33feb1607f 100644 --- a/tools/pip/doc/CU100_ADDITIONAL.md +++ b/tools/pip/doc/CU100_ADDITIONAL.md @@ -18,12 +18,13 @@ Prerequisites ------------- This package supports Linux and Windows platforms. You may also want to check: -- [mxnet-cu102](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.2 support. +- [mxnet-cu102](https://pypi.python.org/pypi/mxnet-cu102/) with CUDA-10.2 support. +- [mxnet-cu101](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.1 support. +- [mxnet-cu100](https://pypi.python.org/pypi/mxnet-cu100/) with CUDA-10.0 support. - [mxnet-cu92](https://pypi.python.org/pypi/mxnet-cu92/) with CUDA-9.2 support. - [mxnet-cu90](https://pypi.python.org/pypi/mxnet-cu90/) with CUDA-9.0 support. -- [mxnet-cu80](https://pypi.python.org/pypi/mxnet-cu80/) with CUDA-8.0 support. -- [mxnet-cu75](https://pypi.python.org/pypi/mxnet-cu75/) with CUDA-7.5 support. - [mxnet](https://pypi.python.org/pypi/mxnet/). +- [mxnet-native](https://pypi.python.org/pypi/mxnet-native/) CPU variant without MKLDNN. To download CUDA, check [CUDA download](https://developer.nvidia.com/cuda-downloads). For more instructions, check [CUDA Toolkit online documentation](http://docs.nvidia.com/cuda/index.html). diff --git a/tools/pip/doc/CU101_ADDITIONAL.md b/tools/pip/doc/CU101_ADDITIONAL.md index 278c39942141..75b35dbd3de6 100644 --- a/tools/pip/doc/CU101_ADDITIONAL.md +++ b/tools/pip/doc/CU101_ADDITIONAL.md @@ -18,12 +18,13 @@ Prerequisites ------------- This package supports Linux and Windows platforms. You may also want to check: -- [mxnet-cu102](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.2 support. +- [mxnet-cu102](https://pypi.python.org/pypi/mxnet-cu102/) with CUDA-10.2 support. +- [mxnet-cu101](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.1 support. +- [mxnet-cu100](https://pypi.python.org/pypi/mxnet-cu100/) with CUDA-10.0 support. - [mxnet-cu92](https://pypi.python.org/pypi/mxnet-cu92/) with CUDA-9.2 support. - [mxnet-cu90](https://pypi.python.org/pypi/mxnet-cu90/) with CUDA-9.0 support. -- [mxnet-cu80](https://pypi.python.org/pypi/mxnet-cu80/) with CUDA-8.0 support. -- [mxnet-cu75](https://pypi.python.org/pypi/mxnet-cu75/) with CUDA-7.5 support. - [mxnet](https://pypi.python.org/pypi/mxnet/). +- [mxnet-native](https://pypi.python.org/pypi/mxnet-native/) CPU variant without MKLDNN. To download CUDA, check [CUDA download](https://developer.nvidia.com/cuda-downloads). For more instructions, check [CUDA Toolkit online documentation](http://docs.nvidia.com/cuda/index.html). diff --git a/tools/pip/doc/CU102_ADDITIONAL.md b/tools/pip/doc/CU102_ADDITIONAL.md index 81829690da29..5a8c87a6f5d7 100644 --- a/tools/pip/doc/CU102_ADDITIONAL.md +++ b/tools/pip/doc/CU102_ADDITIONAL.md @@ -18,11 +18,13 @@ Prerequisites ------------- This package supports Linux and Windows platforms. You may also want to check: +- [mxnet-cu102](https://pypi.python.org/pypi/mxnet-cu102/) with CUDA-10.2 support. +- [mxnet-cu101](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.1 support. +- [mxnet-cu100](https://pypi.python.org/pypi/mxnet-cu100/) with CUDA-10.0 support. - [mxnet-cu92](https://pypi.python.org/pypi/mxnet-cu92/) with CUDA-9.2 support. - [mxnet-cu90](https://pypi.python.org/pypi/mxnet-cu90/) with CUDA-9.0 support. -- [mxnet-cu80](https://pypi.python.org/pypi/mxnet-cu80/) with CUDA-8.0 support. -- [mxnet-cu75](https://pypi.python.org/pypi/mxnet-cu75/) with CUDA-7.5 support. - [mxnet](https://pypi.python.org/pypi/mxnet/). +- [mxnet-native](https://pypi.python.org/pypi/mxnet-native/) CPU variant without MKLDNN. To download CUDA, check [CUDA download](https://developer.nvidia.com/cuda-downloads). For more instructions, check [CUDA Toolkit online documentation](http://docs.nvidia.com/cuda/index.html). diff --git a/tools/pip/doc/CU75_ADDITIONAL.md b/tools/pip/doc/CU75_ADDITIONAL.md deleted file mode 100644 index ae382f96ba35..000000000000 --- a/tools/pip/doc/CU75_ADDITIONAL.md +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - - - - - - - - - - - -**CUDA 7.5 package for MXNet is no longer maintained for new releases.** - -Prerequisites -------------- -This package supports Linux only, up to 1.2.1. You may also want to check: -- [mxnet-cu102](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.2 support. -- [mxnet-cu92](https://pypi.python.org/pypi/mxnet-cu92/) with CUDA-9.2 support. -- [mxnet-cu90](https://pypi.python.org/pypi/mxnet-cu90/) with CUDA-9.0 support. -- [mxnet-cu80](https://pypi.python.org/pypi/mxnet-cu80/) with CUDA-8.0 support. -- [mxnet](https://pypi.python.org/pypi/mxnet/). - -To download CUDA, check [CUDA download](https://developer.nvidia.com/cuda-downloads). For more instructions, check [CUDA Toolkit online documentation](http://docs.nvidia.com/cuda/index.html). - -To install for other platforms (e.g. Windows, Raspberry Pi/ARM) or other versions, check [Installing MXNet](https://mxnet.apache.org/versions/master/install/index.html) for instructions on building from source. - -Installation ------------- -To install: -```bash -pip install mxnet-cu75 -``` diff --git a/tools/pip/doc/CU80_ADDITIONAL.md b/tools/pip/doc/CU80_ADDITIONAL.md deleted file mode 100644 index 5ce06d764e42..000000000000 --- a/tools/pip/doc/CU80_ADDITIONAL.md +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - - - - - - - - - - - -**CUDA 8.0 package for MXNet is no longer maintained for new releases.** - -Prerequisites -------------- -This package supports Linux and Windows platforms. You may also want to check: -- [mxnet-cu102](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.2 support. -- [mxnet-cu92](https://pypi.python.org/pypi/mxnet-cu92/) with CUDA-9.2 support. -- [mxnet-cu90](https://pypi.python.org/pypi/mxnet-cu90/) with CUDA-9.0 support. -- [mxnet-cu75](https://pypi.python.org/pypi/mxnet-cu75/) with CUDA-7.5 support. -- [mxnet](https://pypi.python.org/pypi/mxnet/). - -To download CUDA, check [CUDA download](https://developer.nvidia.com/cuda-downloads). For more instructions, check [CUDA Toolkit online documentation](http://docs.nvidia.com/cuda/index.html). - -To install for other platforms (e.g. Raspberry Pi/ARM) or other versions of CUDA, check [Installing MXNet](https://mxnet.apache.org/versions/master/install/index.html) for instructions on building from source. - -Installation ------------- -To install: -```bash -pip install mxnet-cu80 -``` diff --git a/tools/pip/doc/CU90_ADDITIONAL.md b/tools/pip/doc/CU90_ADDITIONAL.md index 770914b5a1a0..3f51c50520f2 100644 --- a/tools/pip/doc/CU90_ADDITIONAL.md +++ b/tools/pip/doc/CU90_ADDITIONAL.md @@ -21,10 +21,12 @@ Prerequisites ------------- This package supports Linux and Windows platforms. You may also want to check: - [mxnet-cu102](https://pypi.python.org/pypi/mxnet-cu102/) with CUDA-10.2 support. +- [mxnet-cu101](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.1 support. +- [mxnet-cu100](https://pypi.python.org/pypi/mxnet-cu100/) with CUDA-10.0 support. - [mxnet-cu92](https://pypi.python.org/pypi/mxnet-cu92/) with CUDA-9.2 support. -- [mxnet-cu80](https://pypi.python.org/pypi/mxnet-cu80/) with CUDA-8.0 support. -- [mxnet-cu75](https://pypi.python.org/pypi/mxnet-cu75/) with CUDA-7.5 support. +- [mxnet-cu90](https://pypi.python.org/pypi/mxnet-cu90/) with CUDA-9.0 support. - [mxnet](https://pypi.python.org/pypi/mxnet/). +- [mxnet-native](https://pypi.python.org/pypi/mxnet-native/) CPU variant without MKLDNN. To download CUDA, check [CUDA download](https://developer.nvidia.com/cuda-downloads). For more instructions, check [CUDA Toolkit online documentation](http://docs.nvidia.com/cuda/index.html). diff --git a/tools/pip/doc/CU92_ADDITIONAL.md b/tools/pip/doc/CU92_ADDITIONAL.md index 7aec9a1aeb67..0b87c76974c3 100644 --- a/tools/pip/doc/CU92_ADDITIONAL.md +++ b/tools/pip/doc/CU92_ADDITIONAL.md @@ -19,10 +19,12 @@ Prerequisites ------------- This package supports Linux and Windows platforms. You may also want to check: - [mxnet-cu102](https://pypi.python.org/pypi/mxnet-cu102/) with CUDA-10.2 support. +- [mxnet-cu101](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.1 support. +- [mxnet-cu100](https://pypi.python.org/pypi/mxnet-cu100/) with CUDA-10.0 support. +- [mxnet-cu92](https://pypi.python.org/pypi/mxnet-cu92/) with CUDA-9.2 support. - [mxnet-cu90](https://pypi.python.org/pypi/mxnet-cu90/) with CUDA-9.0 support. -- [mxnet-cu80](https://pypi.python.org/pypi/mxnet-cu80/) with CUDA-8.0 support. -- [mxnet-cu75](https://pypi.python.org/pypi/mxnet-cu75/) with CUDA-7.5 support. - [mxnet](https://pypi.python.org/pypi/mxnet/). +- [mxnet-native](https://pypi.python.org/pypi/mxnet-native/) CPU variant without MKLDNN. To download CUDA, check [CUDA download](https://developer.nvidia.com/cuda-downloads). For more instructions, check [CUDA Toolkit online documentation](http://docs.nvidia.com/cuda/index.html). diff --git a/tools/pip/doc/NATIVE_ADDITIONAL.md b/tools/pip/doc/NATIVE_ADDITIONAL.md index 902464c7ab6e..f73a1f22ac89 100644 --- a/tools/pip/doc/NATIVE_ADDITIONAL.md +++ b/tools/pip/doc/NATIVE_ADDITIONAL.md @@ -18,9 +18,13 @@ Prerequisites ------------- This package supports Linux and Windows platforms. You may also want to check: -- [mxnet-cu102](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.2 support. +- [mxnet-cu102](https://pypi.python.org/pypi/mxnet-cu102/) with CUDA-10.2 support. +- [mxnet-cu101](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.1 support. +- [mxnet-cu100](https://pypi.python.org/pypi/mxnet-cu100/) with CUDA-10.0 support. - [mxnet-cu92](https://pypi.python.org/pypi/mxnet-cu92/) with CUDA-9.2 support. -- [mxnet](https://pypi.python.org/pypi/mxnet/) CPU build with MKLDNN. +- [mxnet-cu90](https://pypi.python.org/pypi/mxnet-cu90/) with CUDA-9.0 support. +- [mxnet](https://pypi.python.org/pypi/mxnet/). +- [mxnet-native](https://pypi.python.org/pypi/mxnet-native/) CPU variant without MKLDNN. To download CUDA, check [CUDA download](https://developer.nvidia.com/cuda-downloads). For more instructions, check [CUDA Toolkit online documentation](http://docs.nvidia.com/cuda/index.html). diff --git a/tools/pip/doc/PYPI_README.md b/tools/pip/doc/PYPI_README.md index d323a5545f22..c39d6d6fb5b3 100644 --- a/tools/pip/doc/PYPI_README.md +++ b/tools/pip/doc/PYPI_README.md @@ -17,7 +17,7 @@ Apache MXNet (Incubating) Python Package ======================================== -[Apache MXNet](http://beta.mxnet.io) is a deep learning framework designed for both *efficiency* and *flexibility*. +[Apache MXNet](https://mxnet.apache.org/) is a deep learning framework designed for both *efficiency* and *flexibility*. It allows you to mix the flavours of deep learning programs together to maximize the efficiency and your productivity. For feature requests on the PyPI package, suggestions, and issue reports, create an issue by clicking [here](https://github.com/apache/incubator-mxnet/issues/new). diff --git a/tools/pip/setup.py b/tools/pip/setup.py index d01051713f2f..ce49f3c1ce7a 100644 --- a/tools/pip/setup.py +++ b/tools/pip/setup.py @@ -145,12 +145,9 @@ def skip_markdown_comments(md): libraries.append('CUDA-9.1') elif variant.startswith('CU90'): libraries.append('CUDA-9.0') - elif variant.startswith('CU80'): - libraries.append('CUDA-8.0') - elif variant.startswith('CU75'): - libraries.append('CUDA-7.5') - if variant.endswith('MKL'): - libraries.append('MKLDNN') + +if variant != 'native': + libraries.append('MKLDNN') short_description += ' This version uses {0}.'.format(' and '.join(libraries)) diff --git a/tools/setup_gpu_build_tools.sh b/tools/setup_gpu_build_tools.sh index bba37108b98b..6c5f655f8df9 100755 --- a/tools/setup_gpu_build_tools.sh +++ b/tools/setup_gpu_build_tools.sh @@ -18,7 +18,7 @@ # under the License. # This script installs the tools and libraries for CUDA GPU on Ubuntu. -# Usage: VARIANT=cu92mkl; DEPS_PATH=$HOME; setup_gpu_build_tools.sh $VARIANT $DEPS_PATH; +# Usage: VARIANT=cu102mkl; DEPS_PATH=$HOME; setup_gpu_build_tools.sh $VARIANT $DEPS_PATH; # It installs the tools into DEPS_PATH as specified by the second argument, and will set # the following environment variables: # PATH, CPLUS_INCLUDE_PATH, C_INCLUDE_PATH, LIBRARY_PATH, LD_LIBRARY_PATH, NVCC @@ -63,18 +63,6 @@ elif [[ $VARIANT == cu90* ]]; then LIBCUDA_VERSION='384.145-0ubuntu1' LIBCUDNN_VERSION='7.6.5.32-1+cuda9.0' LIBNCCL_VERSION='2.5.6-1+cuda9.0' -elif [[ $VARIANT == cu80* ]]; then - CUDA_VERSION='8.0.61-1' - CUDA_PATCH_VERSION='8.0.61.2-1' - LIBCUDA_VERSION='375.88-0ubuntu1' - LIBCUDNN_VERSION='7.2.1.38-1+cuda8.0' - LIBNCCL_VERSION='2.3.4-1+cuda8.0' -elif [[ $VARIANT == cu75* ]]; then - CUDA_VERSION='7.5-18' - CUDA_PATCH_VERSION='7.5-18' - LIBCUDA_VERSION='375.88-0ubuntu1' - LIBCUDNN_VERSION='6.0.21-1+cuda7.5' - LIBNCCL_VERSION='' fi if [[ $VARIANT == cu* ]]; then CUDA_MAJOR_VERSION=$(echo $CUDA_VERSION | tr '-' '.' | cut -d. -f1,2) @@ -246,51 +234,6 @@ elif [[ $VARIANT == cu90* ]]; then "libcudnn${LIBCUDNN_MAJOR}-dev_${LIBCUDNN_VERSION}_amd64.deb" \ "libnccl-dev_${LIBNCCL_VERSION}_amd64.deb" \ ) -elif [[ $VARIANT == cu80* ]]; then - cuda_files=( \ - "cuda-core-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cublas-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ - "cuda-cublas-dev-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ - "cuda-cudart-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cudart-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-curand-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-curand-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cufft-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cufft-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-nvrtc-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-nvrtc-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cusolver-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cusolver-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-misc-headers-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "libcuda1-${LIBCUDA_MAJOR}_${LIBCUDA_VERSION}_amd64.deb" \ - "nvidia-${LIBCUDA_MAJOR}_${LIBCUDA_VERSION}_amd64.deb" \ - ) - ml_files=( \ - "libcudnn${LIBCUDNN_MAJOR}-dev_${LIBCUDNN_VERSION}_amd64.deb" \ - "libnccl-dev_${LIBNCCL_VERSION}_amd64.deb" \ - ) -elif [[ $VARIANT == cu75* ]]; then - cuda_files=( \ - "cuda-core-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cublas-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ - "cuda-cublas-dev-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ - "cuda-cudart-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ - "cuda-cudart-dev-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ - "cuda-curand-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-curand-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cufft-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cufft-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-nvrtc-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-nvrtc-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cusolver-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cusolver-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-misc-headers-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "libcuda1-${LIBCUDA_MAJOR}_${LIBCUDA_VERSION}_amd64.deb" \ - "nvidia-${LIBCUDA_MAJOR}_${LIBCUDA_VERSION}_amd64.deb" \ - ) - ml_files=( \ - "libcudnn${LIBCUDNN_MAJOR}-dev_${LIBCUDNN_VERSION}_amd64.deb" \ - ) fi From 8063182b39c10fc3fc7bb7474c0fbe59b27c757b Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Mon, 6 Apr 2020 22:32:10 +0000 Subject: [PATCH 03/20] thrust 1.9.8 for clang10 --- ci/docker/Dockerfile.build.ubuntu_gpu_cu101 | 3 +++ ci/docker/install/thrust.sh | 29 +++++++++++++++++++++ ci/docker/runtime_functions.sh | 4 +++ 3 files changed, 36 insertions(+) create mode 100755 ci/docker/install/thrust.sh diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu101 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu101 index 49dcd672917b..9a582c4cfd4f 100644 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu101 +++ b/ci/docker/Dockerfile.build.ubuntu_gpu_cu101 @@ -73,6 +73,9 @@ ENV CUDNN_VERSION=7.6.5.32 COPY install/ubuntu_cudnn.sh /work/ RUN /work/ubuntu_cudnn.sh +COPY install/thrust.sh /work/ +RUN /work/thrust.sh + COPY install/ubuntu_binutils.sh /work/ RUN /work/ubuntu_binutils.sh diff --git a/ci/docker/install/thrust.sh b/ci/docker/install/thrust.sh new file mode 100755 index 000000000000..b307604dcd85 --- /dev/null +++ b/ci/docker/install/thrust.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Install Thrust 1.9.8 to be shipped with Cuda 11. +# Fixes https://github.com/thrust/thrust/issues/1072 for Clang 10 +# This file can be deleted when using Cuda 11 on CI + +set -ex + +cd /usr/local +git clone https://github.com/thrust/thrust.git +cd thrust +git checkout 1.9.8 diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh index 123c6f1724ab..9817efb7323d 100755 --- a/ci/docker/runtime_functions.sh +++ b/ci/docker/runtime_functions.sh @@ -541,6 +541,10 @@ build_ubuntu_gpu_clang10_werror() { # Disable cpp package as OpWrapperGenerator.py dlopens libmxnet.so, # requiring presence of cuda driver libraries that are missing on CI host export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/cuda-10.1/targets/x86_64-linux/lib/stubs + # Workaround https://github.com/thrust/thrust/issues/1072 + # Can be deleted on Cuda 11 + export CXXFLAGS="-I/usr/local/thrust" + # Set CMAKE_AR and CMAKE_RANLIB due to Ubuntu 16.04 default binutils 4GB limitation CXX=clang++-10 CC=clang-10 cmake \ -DCMAKE_AR=/usr/local/bin/ar \ From 6d1d1220bb4b59b51640729ad7593d11b477fb44 Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Tue, 7 Apr 2020 01:36:27 +0000 Subject: [PATCH 04/20] compiler warnings --- 3rdparty/mshadow/mshadow/logging.h | 5 +++++ 3rdparty/mshadow/mshadow/packet-inl.h | 4 ++++ Makefile | 4 +++- ci/docker/runtime_functions.sh | 2 -- 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/3rdparty/mshadow/mshadow/logging.h b/3rdparty/mshadow/mshadow/logging.h index 5fc56aff3bae..6aede0d69725 100644 --- a/3rdparty/mshadow/mshadow/logging.h +++ b/3rdparty/mshadow/mshadow/logging.h @@ -204,7 +204,12 @@ class LogMessageFatal { ~LogMessageFatal() MSHADOW_THROW_EXCEPTION { // throwing out of destructor is evil // hopefully we can do it here +#pragma GCC diagnostic push +#if __GNUC__ >= 7 +#pragma GCC diagnostic ignored "-Wterminate" +#endif throw Error(log_stream_.str()); +#pragma GCC diagnostic pop } private: diff --git a/3rdparty/mshadow/mshadow/packet-inl.h b/3rdparty/mshadow/mshadow/packet-inl.h index 58cbc4005aaf..69a41b50e08a 100644 --- a/3rdparty/mshadow/mshadow/packet-inl.h +++ b/3rdparty/mshadow/mshadow/packet-inl.h @@ -74,7 +74,11 @@ inline void* AlignedMallocPitch(size_t *out_pitch, if (res == NULL) { LOG(FATAL) << "AlignedMallocPitch failed"; } +#if __GNUC__ >= 6 +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif return res; +#pragma GCC diagnostic pop } /*! diff --git a/Makefile b/Makefile index b8634140b4be..ed19e69390bd 100644 --- a/Makefile +++ b/Makefile @@ -101,7 +101,9 @@ CFLAGS += -DDMLC_LOG_STACK_TRACE_SIZE=0 CFLAGS += -DDMLC_LOG_FATAL_THROW=1 ifeq ($(DEV), 1) - CFLAGS += -g -Werror + # Excluded from Werror: + # 1) variables used in '#pragma omp parallel' are considered unused + CFLAGS += -g -Werror -Wno-error=unused-variable -Wno-error=maybe-uninitialized -Wno-error=unused-function NVCCFLAGS += -Werror cross-execution-space-call endif diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh index 9817efb7323d..ae7613513df7 100755 --- a/ci/docker/runtime_functions.sh +++ b/ci/docker/runtime_functions.sh @@ -796,7 +796,6 @@ build_ubuntu_gpu_cuda101_cudnn7_make() { export CXX=g++-7 build_ccache_wrappers make \ - DEV=1 \ USE_BLAS=openblas \ USE_MKLDNN=0 \ USE_CUDA=1 \ @@ -817,7 +816,6 @@ build_ubuntu_gpu_cuda101_cudnn7_mkldnn_cpp_test() { export CXX=g++-7 build_ccache_wrappers make \ - DEV=1 \ USE_BLAS=openblas \ USE_MKLDNN=1 \ USE_CUDA=1 \ From cc0966b43e159ca8e30ab48dfb7127866942344f Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Tue, 7 Apr 2020 05:18:20 +0000 Subject: [PATCH 05/20] Disable broken test_init.test_rsp_const_init --- perl-package/AI-MXNet/t/test_init.t | 5 +++-- src/c_api/c_api_executor.cc | 2 +- tests/python/unittest/test_init.py | 6 +++++- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/perl-package/AI-MXNet/t/test_init.t b/perl-package/AI-MXNet/t/test_init.t index bf811f8584b7..c697e99bce0f 100644 --- a/perl-package/AI-MXNet/t/test_init.t +++ b/perl-package/AI-MXNet/t/test_init.t @@ -17,7 +17,8 @@ use strict; use warnings; -use Test::More tests => 7; +# use Test::More tests => 7; https://github.com/apache/incubator-mxnet/issues/17988 +use Test::More tests => 4; use AI::MXNet qw(mx); sub test_default_init @@ -71,7 +72,7 @@ sub test_rsp_const_init $check_rsp_const_init->(mx->initializer->One(), 1); } -test_rsp_const_init(); +# test_rsp_const_init(); https://github.com/apache/incubator-mxnet/issues/17988 test_default_init(); test_variable_init(); test_aux_init(); diff --git a/src/c_api/c_api_executor.cc b/src/c_api/c_api_executor.cc index afc64f73de7c..41193b5966ef 100644 --- a/src/c_api/c_api_executor.cc +++ b/src/c_api/c_api_executor.cc @@ -809,7 +809,7 @@ int _SimpleBindImpl(SymbolHandle symbol_handle, ret->ret_vec_charp.push_back(ret->ret_vec_str.back().c_str()); } *shared_buffer_len = shared_buffer_map.size(); - *updated_shared_buffer_handle_list = &(ret->ret_handles[nd_idx]); + *updated_shared_buffer_handle_list = &(ret->ret_handles.at(nd_idx)); *updated_shared_buffer_name_list = &(ret->ret_vec_charp[0]); } diff --git a/tests/python/unittest/test_init.py b/tests/python/unittest/test_init.py index 6d8830c1d089..290f84b18781 100644 --- a/tests/python/unittest/test_init.py +++ b/tests/python/unittest/test_init.py @@ -15,9 +15,12 @@ # specific language governing permissions and limitations # under the License. +import json +import unittest + import mxnet as mx import numpy as np -import json + def test_default_init(): data = mx.sym.Variable('data') @@ -45,6 +48,7 @@ def test_aux_init(): assert (mod.get_params()[1]['bn_moving_var'].asnumpy() == 1).all() assert (mod.get_params()[1]['bn_moving_mean'].asnumpy() == 0).all() +@unittest.skip("rsp const init is broken: https://github.com/apache/incubator-mxnet/issues/17988") def test_rsp_const_init(): def check_rsp_const_init(init, val): shape = (10, 10) From 435f8b11b54d9a113a3ec7e9d4f254815621def2 Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Tue, 7 Apr 2020 06:40:49 +0000 Subject: [PATCH 06/20] Disable tests invoking NumpyBooleanAssignForwardCPU --- src/operator/numpy/np_boolean_mask_assign.cc | 2 +- tests/python/unittest/test_numpy_ndarray.py | 1 + tests/python/unittest/test_numpy_op.py | 2 ++ 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/operator/numpy/np_boolean_mask_assign.cc b/src/operator/numpy/np_boolean_mask_assign.cc index e01ebb7c6c24..d5ab00835638 100644 --- a/src/operator/numpy/np_boolean_mask_assign.cc +++ b/src/operator/numpy/np_boolean_mask_assign.cc @@ -220,7 +220,7 @@ void NumpyBooleanAssignForwardCPU(const nnvm::NodeAttrs& attrs, // If there's no True in mask, return directly if (valid_num == 0) return; - const TShape& vshape = inputs[2].shape_; + const TShape& vshape = inputs.at(2).shape_; if (inputs.size() == 3U) { // tensor case diff --git a/tests/python/unittest/test_numpy_ndarray.py b/tests/python/unittest/test_numpy_ndarray.py index 3ce53c6a6e80..0f57947b6c1f 100644 --- a/tests/python/unittest/test_numpy_ndarray.py +++ b/tests/python/unittest/test_numpy_ndarray.py @@ -1125,6 +1125,7 @@ def test_np_multinomial(): @unittest.skipUnless(is_op_runnable(), "Comparison ops can only run on either CPU instances, or GPU instances with" " compute capability >= 53 if MXNet is built with USE_TVM_OP=ON") @use_np +@unittest.skip("NumpyBooleanAssignForwardCPU broken: https://github.com/apache/incubator-mxnet/issues/17990") def test_np_ndarray_boolean_indexing(): def test_single_bool_index(): # adapted from numpy's test_indexing.py diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py index 063c3b7a58a6..030129e42397 100644 --- a/tests/python/unittest/test_numpy_op.py +++ b/tests/python/unittest/test_numpy_op.py @@ -1366,6 +1366,7 @@ def hybrid_forward(self, F, a): @with_seed() @use_np +@unittest.skip("NumpyBooleanAssignForwardCPU broken: https://github.com/apache/incubator-mxnet/issues/17990") def test_npx_batch_dot(): ctx = mx.context.current_context() dtypes = ['float32', 'float64'] @@ -1485,6 +1486,7 @@ def gt_grad_batch_dot_numpy(lhs, rhs, ograd, transpose_a, transpose_b, lhs_req, @with_seed() @use_np +@unittest.skip("NumpyBooleanAssignForwardCPU broken: https://github.com/apache/incubator-mxnet/issues/17990") def test_npi_boolean_assign(): class TestBooleanAssignScalar(HybridBlock): def __init__(self, val, start_axis): From 36986dfbd750f5168de6ce8b5f47c921523726b2 Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Tue, 7 Apr 2020 07:46:17 +0000 Subject: [PATCH 07/20] Fix out of bounds access in SoftmaxGradOpType --- src/operator/nn/softmax-inl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/operator/nn/softmax-inl.h b/src/operator/nn/softmax-inl.h index f8a3fe429c53..1b5e9921a62c 100644 --- a/src/operator/nn/softmax-inl.h +++ b/src/operator/nn/softmax-inl.h @@ -713,7 +713,7 @@ static inline bool SoftmaxGradOpType(const nnvm::NodeAttrs& attrs, } return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1 && - (*out_attrs)[1] != -1 && (*in_attrs)[1] != -1; + (!softmax_use_length(attrs) || ((*out_attrs)[1] != -1 && (*in_attrs)[1] != -1)); } else { CHECK_EQ(in_attrs->size(), 2U); int out_dtype = (*in_attrs)[1]; From d9d6a299eeb56c9f10ecbaaca7e3667531baf065 Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Tue, 7 Apr 2020 04:55:14 +0000 Subject: [PATCH 08/20] Use CentOS 7 for staticbuilds CentOS 7 fullfills the requirements for PEP 599 manylinux-2014 and provides a C++17 toolchain. --- cd/mxnet_lib/mxnet_lib_pipeline.groovy | 5 +- cd/mxnet_lib/static/Jenkins_pipeline.groovy | 4 +- cd/python/docker/Jenkins_pipeline.groovy | 5 +- cd/python/pypi/Jenkins_pipeline.groovy | 5 +- ci/docker/Dockerfile.build.ubuntu_build_cuda | 5 + ci/docker/Dockerfile.build.ubuntu_gpu_cu100 | 87 ------------------ ci/docker/Dockerfile.build.ubuntu_gpu_cu102 | 88 ------------------ ci/docker/Dockerfile.build.ubuntu_gpu_cu90 | 88 ------------------ ci/docker/Dockerfile.build.ubuntu_gpu_cu92 | 87 ------------------ ci/docker/Dockerfile.publish.centos7_cpu | 41 +++++++++ ...u => Dockerfile.publish.centos7_gpu_cu100} | 29 +++--- ...u => Dockerfile.publish.centos7_gpu_cu101} | 28 +++--- .../Dockerfile.publish.centos7_gpu_cu102 | 39 ++++++++ ...pu => Dockerfile.publish.centos7_gpu_cu90} | 28 +++--- ...pu => Dockerfile.publish.centos7_gpu_cu92} | 28 +++--- ci/docker/install/centos7_base.sh | 4 + ci/docker/install/centos7_core.sh | 4 + ci/docker/install/ubuntu_publish.sh | 92 ------------------- ci/docker/install/ubuntu_scala.sh | 26 +----- ci/docker/runtime_functions.sh | 26 ++---- ci/jenkins/Jenkins_steps.groovy | 46 +++++----- ci/publish/Jenkinsfile | 2 +- ci/publish/README.md | 7 +- tools/dependencies/README.md | 1 - .../dependencies/make_shared_dependencies.sh | 3 +- tools/dependencies/zmq.sh | 6 ++ tools/staticbuild/build.sh | 2 +- tools/staticbuild/build_lib.sh | 10 +- tools/staticbuild/build_lib_cmake.sh | 10 +- 29 files changed, 210 insertions(+), 596 deletions(-) delete mode 100644 ci/docker/Dockerfile.build.ubuntu_gpu_cu100 delete mode 100644 ci/docker/Dockerfile.build.ubuntu_gpu_cu102 delete mode 100644 ci/docker/Dockerfile.build.ubuntu_gpu_cu90 delete mode 100644 ci/docker/Dockerfile.build.ubuntu_gpu_cu92 create mode 100644 ci/docker/Dockerfile.publish.centos7_cpu rename ci/docker/{Dockerfile.publish.test.ubuntu1404_gpu => Dockerfile.publish.centos7_gpu_cu100} (69%) rename ci/docker/{Dockerfile.publish.ubuntu1404_cpu => Dockerfile.publish.centos7_gpu_cu101} (69%) create mode 100644 ci/docker/Dockerfile.publish.centos7_gpu_cu102 rename ci/docker/{Dockerfile.publish.ubuntu1404_gpu => Dockerfile.publish.centos7_gpu_cu90} (69%) rename ci/docker/{Dockerfile.publish.test.ubuntu1404_cpu => Dockerfile.publish.centos7_gpu_cu92} (69%) delete mode 100755 ci/docker/install/ubuntu_publish.sh diff --git a/cd/mxnet_lib/mxnet_lib_pipeline.groovy b/cd/mxnet_lib/mxnet_lib_pipeline.groovy index d09bacd4de8e..0c49bfa8e2c8 100644 --- a/cd/mxnet_lib/mxnet_lib_pipeline.groovy +++ b/cd/mxnet_lib/mxnet_lib_pipeline.groovy @@ -75,10 +75,9 @@ def get_stash(mxnet_variant) { // The environment corresponds to the docker files in the 'docker' directory def get_environment(mxnet_variant) { if (mxnet_variant.startsWith("cu")) { - // Remove 'mkl' suffix from variant to properly format test environment - return "ubuntu_gpu_${mxnet_variant.replace('mkl', '')}" + return "publish.centos7_gpu_${mxnet_variant}" } - return "ubuntu_cpu" + return "publish.centos7_cpu" } // Returns the variant appropriate jenkins node test in which diff --git a/cd/mxnet_lib/static/Jenkins_pipeline.groovy b/cd/mxnet_lib/static/Jenkins_pipeline.groovy index abbafdbef075..61d18083e314 100644 --- a/cd/mxnet_lib/static/Jenkins_pipeline.groovy +++ b/cd/mxnet_lib/static/Jenkins_pipeline.groovy @@ -46,9 +46,7 @@ def build(mxnet_variant) { node(NODE_LINUX_CPU) { ws("workspace/mxnet_${libtype}/${mxnet_variant}/${env.BUILD_NUMBER}") { ci_utils.init_git() - // Compiling in Ubuntu14.04 due to glibc issues. - // This should be updates once we have clarity on this issue. - ci_utils.docker_run('publish.ubuntu1404_cpu', "build_static_libmxnet ${mxnet_variant}", false) + ci_utils.docker_run('publish.centos7_cpu', "build_static_libmxnet ${mxnet_variant}", false) ci_utils.pack_lib("mxnet_${mxnet_variant}", libmxnet_pipeline.get_stash(mxnet_variant)) } } diff --git a/cd/python/docker/Jenkins_pipeline.groovy b/cd/python/docker/Jenkins_pipeline.groovy index 2911a6571288..693acc540874 100644 --- a/cd/python/docker/Jenkins_pipeline.groovy +++ b/cd/python/docker/Jenkins_pipeline.groovy @@ -32,10 +32,9 @@ def get_pipeline(mxnet_variant) { // The environment corresponds to the docker files in the 'docker' directory def get_environment(mxnet_variant) { if (mxnet_variant.startsWith("cu")) { - // Remove 'mkl' suffix from variant to properly format test environment - return "ubuntu_gpu_${mxnet_variant.replace('mkl', '')}" + return "publish.centos7_gpu_${mxnet_variant}" } - return "ubuntu_cpu" + return "publish.centos7_cpu" } diff --git a/cd/python/pypi/Jenkins_pipeline.groovy b/cd/python/pypi/Jenkins_pipeline.groovy index 125eb2c5c200..dfd864fa1a3b 100644 --- a/cd/python/pypi/Jenkins_pipeline.groovy +++ b/cd/python/pypi/Jenkins_pipeline.groovy @@ -35,11 +35,10 @@ def get_pipeline(mxnet_variant) { } def get_environment(mxnet_variant) { - def environment = "ubuntu_cpu" if (mxnet_variant.startsWith('cu')) { - environment = "ubuntu_gpu_${mxnet_variant}".replace("mkl", "") + return "publish.centos7_gpu_${mxnet_variant}" } - return environment + return "publish.centos7_cpu" } def build(mxnet_variant) { diff --git a/ci/docker/Dockerfile.build.ubuntu_build_cuda b/ci/docker/Dockerfile.build.ubuntu_build_cuda index 07f67d178c2c..7623048d6909 100644 --- a/ci/docker/Dockerfile.build.ubuntu_build_cuda +++ b/ci/docker/Dockerfile.build.ubuntu_build_cuda @@ -42,8 +42,12 @@ COPY install/ubuntu_perl.sh /work/ RUN /work/ubuntu_perl.sh COPY install/ubuntu_clang.sh /work/ RUN /work/ubuntu_clang.sh +COPY install/ubuntu_gcc8.sh /work/ +RUN /work/ubuntu_gcc8.sh COPY install/ubuntu_binutils.sh /work/ RUN /work/ubuntu_binutils.sh +COPY install/thrust.sh /work/ +RUN /work/thrust.sh ENV CUDNN_VERSION=7.6.5.32 COPY install/ubuntu_cudnn.sh /work/ @@ -51,6 +55,7 @@ RUN /work/ubuntu_cudnn.sh # Special case because the CPP-Package requires the CUDA runtime libs # and not only stubs (which are provided by the base image) +# This prevents usage of this image for actual GPU tests with Docker. COPY install/ubuntu_nvidia.sh /work/ RUN /work/ubuntu_nvidia.sh diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu100 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu100 deleted file mode 100644 index 3a2fb7e825aa..000000000000 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu100 +++ /dev/null @@ -1,87 +0,0 @@ -# -*- mode: dockerfile -*- -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -# Dockerfile to run MXNet on Ubuntu 16.04 for GPU - -FROM nvidia/cuda:10.0-devel-ubuntu16.04 - -WORKDIR /work/deps - -COPY install/ubuntu_core.sh /work/ -RUN /work/ubuntu_core.sh - -COPY install/deb_ubuntu_ccache.sh /work/ -RUN /work/deb_ubuntu_ccache.sh - -COPY install/ubuntu_python.sh /work/ -COPY install/requirements /work/ -RUN /work/ubuntu_python.sh - -COPY install/ubuntu_scala.sh /work/ -COPY install/sbt.gpg /work/ -RUN /work/ubuntu_scala.sh - -COPY install/ubuntu_r.sh /work/ -COPY install/r.gpg /work/ -RUN /work/ubuntu_r.sh - -COPY install/ubuntu_perl.sh /work/ -RUN /work/ubuntu_perl.sh - -COPY install/ubuntu_clang.sh /work/ -RUN /work/ubuntu_clang.sh - -COPY install/ubuntu_gcc8.sh /work/ -RUN /work/ubuntu_gcc8.sh - -COPY install/ubuntu_tvm.sh /work/ -RUN /work/ubuntu_tvm.sh - -COPY install/ubuntu_llvm.sh /work/ -RUN /work/ubuntu_llvm.sh - -COPY install/ubuntu_caffe.sh /work/ -RUN /work/ubuntu_caffe.sh - -COPY install/ubuntu_onnx.sh /work/ -RUN /work/ubuntu_onnx.sh - -COPY install/ubuntu_docs.sh /work/ -COPY install/requirements /work/ -RUN /work/ubuntu_docs.sh - -COPY install/ubuntu_tutorials.sh /work/ -RUN /work/ubuntu_tutorials.sh - -ENV CUDA_VERSION=10.0.130 -ENV CUDNN_VERSION=7.6.5.32 -COPY install/ubuntu_cudnn.sh /work/ -RUN /work/ubuntu_cudnn.sh - -COPY install/ubuntu_binutils.sh /work/ -RUN /work/ubuntu_binutils.sh - -# Always last -ARG USER_ID=0 -ARG GROUP_ID=0 -COPY install/ubuntu_adduser.sh /work/ -RUN /work/ubuntu_adduser.sh - -COPY runtime_functions.sh /work/ - -WORKDIR /work/mxnet diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu102 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu102 deleted file mode 100644 index 9ab16514cf50..000000000000 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu102 +++ /dev/null @@ -1,88 +0,0 @@ -# -*- mode: dockerfile -*- -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -# Dockerfile to run MXNet on Ubuntu 16.04 for GPU - -FROM nvidia/cuda:10.2-devel-ubuntu16.04 - -WORKDIR /work/deps - -COPY install/ubuntu_core.sh /work/ -RUN /work/ubuntu_core.sh - -COPY install/deb_ubuntu_ccache.sh /work/ -RUN /work/deb_ubuntu_ccache.sh - -COPY install/ubuntu_python.sh /work/ -COPY install/requirements /work/ -RUN /work/ubuntu_python.sh - -COPY install/ubuntu_scala.sh /work/ -COPY install/sbt.gpg /work/ -RUN /work/ubuntu_scala.sh - -COPY install/ubuntu_r.sh /work/ -COPY install/r.gpg /work/ -RUN /work/ubuntu_r.sh - -COPY install/ubuntu_perl.sh /work/ -RUN /work/ubuntu_perl.sh - -COPY install/ubuntu_clang.sh /work/ -RUN /work/ubuntu_clang.sh - -COPY install/ubuntu_gcc8.sh /work/ -RUN /work/ubuntu_gcc8.sh - -COPY install/ubuntu_tvm.sh /work/ -RUN /work/ubuntu_tvm.sh - -COPY install/ubuntu_llvm.sh /work/ -RUN /work/ubuntu_llvm.sh - -COPY install/ubuntu_caffe.sh /work/ -RUN /work/ubuntu_caffe.sh - -COPY install/ubuntu_onnx.sh /work/ -RUN /work/ubuntu_onnx.sh - -COPY install/ubuntu_docs.sh /work/ -COPY install/requirements /work/ -RUN /work/ubuntu_docs.sh - -COPY install/ubuntu_tutorials.sh /work/ -RUN /work/ubuntu_tutorials.sh - -ENV CUDA_VERSION=10.2.89 -ENV CUDNN_VERSION=7.6.5.32 -COPY install/ubuntu_cudnn.sh /work/ -RUN /work/ubuntu_cudnn.sh - -COPY install/ubuntu_binutils.sh /work/ -RUN /work/ubuntu_binutils.sh - -# Always last -ARG USER_ID=0 -ARG GROUP_ID=0 -COPY install/ubuntu_adduser.sh /work/ -RUN /work/ubuntu_adduser.sh - -COPY runtime_functions.sh /work/ - -WORKDIR /work/mxnet -ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/cuda/compat diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu90 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu90 deleted file mode 100644 index c8fa9221508b..000000000000 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu90 +++ /dev/null @@ -1,88 +0,0 @@ -# -*- mode: dockerfile -*- -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -# Dockerfile to run MXNet on Ubuntu 16.04 for GPU - -FROM nvidia/cuda:9.0-devel-ubuntu16.04 - -WORKDIR /work/deps - -COPY install/ubuntu_core.sh /work/ -RUN /work/ubuntu_core.sh - -COPY install/deb_ubuntu_ccache.sh /work/ -RUN /work/deb_ubuntu_ccache.sh - -COPY install/ubuntu_python.sh /work/ -COPY install/requirements /work/ -RUN /work/ubuntu_python.sh - -COPY install/ubuntu_scala.sh /work/ -COPY install/sbt.gpg /work/ -RUN /work/ubuntu_scala.sh - -COPY install/ubuntu_r.sh /work/ -COPY install/r.gpg /work/ -RUN /work/ubuntu_r.sh - -COPY install/ubuntu_perl.sh /work/ -RUN /work/ubuntu_perl.sh - -COPY install/ubuntu_clang.sh /work/ -RUN /work/ubuntu_clang.sh - -COPY install/ubuntu_gcc8.sh /work/ -RUN /work/ubuntu_gcc8.sh - -COPY install/ubuntu_tvm.sh /work/ -RUN /work/ubuntu_tvm.sh - -COPY install/ubuntu_llvm.sh /work/ -RUN /work/ubuntu_llvm.sh - -COPY install/ubuntu_caffe.sh /work/ -RUN /work/ubuntu_caffe.sh - -COPY install/ubuntu_onnx.sh /work/ -RUN /work/ubuntu_onnx.sh - -COPY install/ubuntu_docs.sh /work/ -COPY install/requirements /work/ -RUN /work/ubuntu_docs.sh - -COPY install/ubuntu_tutorials.sh /work/ -RUN /work/ubuntu_tutorials.sh - -ENV CUDA_VERSION=9.0.176 -ENV CUDNN_VERSION=7.6.5.32 -COPY install/ubuntu_cudnn.sh /work/ -RUN /work/ubuntu_cudnn.sh - -COPY install/ubuntu_binutils.sh /work/ -RUN /work/ubuntu_binutils.sh - -# Always last -ARG USER_ID=0 -ARG GROUP_ID=0 -COPY install/ubuntu_adduser.sh /work/ -RUN /work/ubuntu_adduser.sh - -COPY runtime_functions.sh /work/ - -WORKDIR /work/mxnet -ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu92 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu92 deleted file mode 100644 index 4ba33006b28a..000000000000 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu92 +++ /dev/null @@ -1,87 +0,0 @@ -# -*- mode: dockerfile -*- -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -# Dockerfile to run MXNet on Ubuntu 16.04 for GPU - -FROM nvidia/cuda:9.2-devel-ubuntu16.04 - -WORKDIR /work/deps - -COPY install/ubuntu_core.sh /work/ -RUN /work/ubuntu_core.sh - -COPY install/deb_ubuntu_ccache.sh /work/ -RUN /work/deb_ubuntu_ccache.sh - -COPY install/ubuntu_python.sh /work/ -COPY install/requirements /work/ -RUN /work/ubuntu_python.sh - -COPY install/ubuntu_scala.sh /work/ -COPY install/sbt.gpg /work/ -RUN /work/ubuntu_scala.sh - -COPY install/ubuntu_r.sh /work/ -COPY install/r.gpg /work/ -RUN /work/ubuntu_r.sh - -COPY install/ubuntu_perl.sh /work/ -RUN /work/ubuntu_perl.sh - -COPY install/ubuntu_clang.sh /work/ -RUN /work/ubuntu_clang.sh - -COPY install/ubuntu_gcc8.sh /work/ -RUN /work/ubuntu_gcc8.sh - -COPY install/ubuntu_tvm.sh /work/ -RUN /work/ubuntu_tvm.sh - -COPY install/ubuntu_llvm.sh /work/ -RUN /work/ubuntu_llvm.sh - -COPY install/ubuntu_caffe.sh /work/ -RUN /work/ubuntu_caffe.sh - -COPY install/ubuntu_onnx.sh /work/ -RUN /work/ubuntu_onnx.sh - -COPY install/ubuntu_docs.sh /work/ -COPY install/requirements /work/ -RUN /work/ubuntu_docs.sh - -COPY install/ubuntu_tutorials.sh /work/ -RUN /work/ubuntu_tutorials.sh - -ENV CUDA_VERSION=9.2.148 -ENV CUDNN_VERSION=7.6.5.32 -COPY install/ubuntu_cudnn.sh /work/ -RUN /work/ubuntu_cudnn.sh - -COPY install/ubuntu_binutils.sh /work/ -RUN /work/ubuntu_binutils.sh - -ARG USER_ID=0 -ARG GROUP_ID=0 -COPY install/ubuntu_adduser.sh /work/ -RUN /work/ubuntu_adduser.sh - -COPY runtime_functions.sh /work/ - -WORKDIR /work/mxnet -ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib diff --git a/ci/docker/Dockerfile.publish.centos7_cpu b/ci/docker/Dockerfile.publish.centos7_cpu new file mode 100644 index 000000000000..2010238cb71d --- /dev/null +++ b/ci/docker/Dockerfile.publish.centos7_cpu @@ -0,0 +1,41 @@ +# -*- mode: dockerfile -*- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# Dockerfile to build and run MXNet on CentOS 7 for CPU + +FROM centos:7 + +WORKDIR /work/deps + +COPY install/centos7_base.sh /work/ +RUN /work/centos7_base.sh +COPY install/centos7_ccache.sh /work/ +RUN /work/centos7_ccache.sh +COPY install/centos7_python.sh /work/ +RUN /work/centos7_python.sh +COPY install/centos7_scala.sh /work/ +RUN /work/centos7_scala.sh + +ARG USER_ID=0 +COPY install/centos7_adduser.sh /work/ +RUN /work/centos7_adduser.sh + +ENV PYTHONPATH=./python/ +WORKDIR /work/mxnet + +COPY runtime_functions.sh /work/ diff --git a/ci/docker/Dockerfile.publish.test.ubuntu1404_gpu b/ci/docker/Dockerfile.publish.centos7_gpu_cu100 similarity index 69% rename from ci/docker/Dockerfile.publish.test.ubuntu1404_gpu rename to ci/docker/Dockerfile.publish.centos7_gpu_cu100 index 854dd68a63c1..78398c97dac8 100644 --- a/ci/docker/Dockerfile.publish.test.ubuntu1404_gpu +++ b/ci/docker/Dockerfile.publish.centos7_gpu_cu100 @@ -15,26 +15,25 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -# -# Dockerfile to run MXNet on Ubuntu 14.04 for GPU -# Use CPU with setup_gpu script -FROM ubuntu:14.04 +FROM nvidia/cuda:10.0-cudnn7-devel-centos7 WORKDIR /work/deps -COPY install/ubuntu_base.sh /work/ -RUN /work/ubuntu_base.sh - -COPY install/ubuntu_scala.sh /work/ -RUN /work/ubuntu_scala.sh +COPY install/centos7_base.sh /work/ +RUN /work/centos7_base.sh +COPY install/centos7_ccache.sh /work/ +RUN /work/centos7_ccache.sh +COPY install/centos7_python.sh /work/ +RUN /work/centos7_python.sh +COPY install/centos7_scala.sh /work/ +RUN /work/centos7_scala.sh ARG USER_ID=0 -ARG GROUP_ID=0 -COPY install/ubuntu_adduser.sh /work/ -RUN /work/ubuntu_adduser.sh - -COPY runtime_functions.sh /work/ +COPY install/centos7_adduser.sh /work/ +RUN /work/centos7_adduser.sh +ENV PYTHONPATH=./python/ WORKDIR /work/mxnet -ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib + +COPY runtime_functions.sh /work/ diff --git a/ci/docker/Dockerfile.publish.ubuntu1404_cpu b/ci/docker/Dockerfile.publish.centos7_gpu_cu101 similarity index 69% rename from ci/docker/Dockerfile.publish.ubuntu1404_cpu rename to ci/docker/Dockerfile.publish.centos7_gpu_cu101 index 8ccc41b2143c..0a7fd33b8bf1 100644 --- a/ci/docker/Dockerfile.publish.ubuntu1404_cpu +++ b/ci/docker/Dockerfile.publish.centos7_gpu_cu101 @@ -15,25 +15,25 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -# -# Dockerfile to build and run MXNet on Ubuntu 14.04 for CPU -FROM ubuntu:14.04 +FROM nvidia/cuda:10.1-cudnn7-devel-centos7 WORKDIR /work/deps -COPY install/ubuntu_publish.sh /work/ -RUN /work/ubuntu_publish.sh - -COPY install/ubuntu_binutils.sh /work/ -RUN /work/ubuntu_binutils.sh +COPY install/centos7_base.sh /work/ +RUN /work/centos7_base.sh +COPY install/centos7_ccache.sh /work/ +RUN /work/centos7_ccache.sh +COPY install/centos7_python.sh /work/ +RUN /work/centos7_python.sh +COPY install/centos7_scala.sh /work/ +RUN /work/centos7_scala.sh ARG USER_ID=0 -ARG GROUP_ID=0 -COPY install/ubuntu_adduser.sh /work/ -RUN /work/ubuntu_adduser.sh - -COPY runtime_functions.sh /work/ +COPY install/centos7_adduser.sh /work/ +RUN /work/centos7_adduser.sh +ENV PYTHONPATH=./python/ WORKDIR /work/mxnet -ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib + +COPY runtime_functions.sh /work/ diff --git a/ci/docker/Dockerfile.publish.centos7_gpu_cu102 b/ci/docker/Dockerfile.publish.centos7_gpu_cu102 new file mode 100644 index 000000000000..b7549ffbb668 --- /dev/null +++ b/ci/docker/Dockerfile.publish.centos7_gpu_cu102 @@ -0,0 +1,39 @@ +# -*- mode: dockerfile -*- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +FROM nvidia/cuda:10.2-cudnn7-devel-centos7 + +WORKDIR /work/deps + +COPY install/centos7_base.sh /work/ +RUN /work/centos7_base.sh +COPY install/centos7_ccache.sh /work/ +RUN /work/centos7_ccache.sh +COPY install/centos7_python.sh /work/ +RUN /work/centos7_python.sh +COPY install/centos7_scala.sh /work/ +RUN /work/centos7_scala.sh + +ARG USER_ID=0 +COPY install/centos7_adduser.sh /work/ +RUN /work/centos7_adduser.sh + +ENV PYTHONPATH=./python/ +WORKDIR /work/mxnet + +COPY runtime_functions.sh /work/ diff --git a/ci/docker/Dockerfile.publish.ubuntu1404_gpu b/ci/docker/Dockerfile.publish.centos7_gpu_cu90 similarity index 69% rename from ci/docker/Dockerfile.publish.ubuntu1404_gpu rename to ci/docker/Dockerfile.publish.centos7_gpu_cu90 index 3a005cadecea..a562d2580a89 100644 --- a/ci/docker/Dockerfile.publish.ubuntu1404_gpu +++ b/ci/docker/Dockerfile.publish.centos7_gpu_cu90 @@ -15,25 +15,25 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -# -# Dockerfile to run MXNet on Ubuntu 14.04 for GPU -FROM nvidia/cuda:10.1-cudnn7-devel-ubuntu14.04 +FROM nvidia/cuda:9.0-cudnn7-devel-centos7 WORKDIR /work/deps -COPY install/ubuntu_publish.sh /work/ -RUN /work/ubuntu_publish.sh - -COPY install/ubuntu_binutils.sh /work/ -RUN /work/ubuntu_binutils.sh +COPY install/centos7_base.sh /work/ +RUN /work/centos7_base.sh +COPY install/centos7_ccache.sh /work/ +RUN /work/centos7_ccache.sh +COPY install/centos7_python.sh /work/ +RUN /work/centos7_python.sh +COPY install/centos7_scala.sh /work/ +RUN /work/centos7_scala.sh ARG USER_ID=0 -ARG GROUP_ID=0 -COPY install/ubuntu_adduser.sh /work/ -RUN /work/ubuntu_adduser.sh - -COPY runtime_functions.sh /work/ +COPY install/centos7_adduser.sh /work/ +RUN /work/centos7_adduser.sh +ENV PYTHONPATH=./python/ WORKDIR /work/mxnet -ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib + +COPY runtime_functions.sh /work/ diff --git a/ci/docker/Dockerfile.publish.test.ubuntu1404_cpu b/ci/docker/Dockerfile.publish.centos7_gpu_cu92 similarity index 69% rename from ci/docker/Dockerfile.publish.test.ubuntu1404_cpu rename to ci/docker/Dockerfile.publish.centos7_gpu_cu92 index 035837686554..c6dd32399b67 100644 --- a/ci/docker/Dockerfile.publish.test.ubuntu1404_cpu +++ b/ci/docker/Dockerfile.publish.centos7_gpu_cu92 @@ -15,25 +15,25 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -# -# Dockerfile to build and run MXNet on Ubuntu 14.04 for CPU -FROM ubuntu:14.04 +FROM nvidia/cuda:9.2-cudnn7-devel-centos7 WORKDIR /work/deps -COPY install/ubuntu_base.sh /work/ -RUN /work/ubuntu_base.sh - -COPY install/ubuntu_scala.sh /work/ -RUN /work/ubuntu_scala.sh +COPY install/centos7_base.sh /work/ +RUN /work/centos7_base.sh +COPY install/centos7_ccache.sh /work/ +RUN /work/centos7_ccache.sh +COPY install/centos7_python.sh /work/ +RUN /work/centos7_python.sh +COPY install/centos7_scala.sh /work/ +RUN /work/centos7_scala.sh ARG USER_ID=0 -ARG GROUP_ID=0 -COPY install/ubuntu_adduser.sh /work/ -RUN /work/ubuntu_adduser.sh - -COPY runtime_functions.sh /work/ +COPY install/centos7_adduser.sh /work/ +RUN /work/centos7_adduser.sh +ENV PYTHONPATH=./python/ WORKDIR /work/mxnet -ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib + +COPY runtime_functions.sh /work/ diff --git a/ci/docker/install/centos7_base.sh b/ci/docker/install/centos7_base.sh index bf289fe034fc..72896cbc42ad 100755 --- a/ci/docker/install/centos7_base.sh +++ b/ci/docker/install/centos7_base.sh @@ -30,9 +30,13 @@ yum -y install make yum -y install unzip yum -y install ninja-build yum -y install gcc-gfortran +yum -y install automake +yum -y install autoconf +yum -y install libtool yum -y install protobuf-compiler yum -y install protobuf-devel yum -y install zeromq-devel +yum -y install patchelf # gcc7 yum -y install centos-release-scl diff --git a/ci/docker/install/centos7_core.sh b/ci/docker/install/centos7_core.sh index 1d858dd0a2f4..7f1c3d70aebc 100755 --- a/ci/docker/install/centos7_core.sh +++ b/ci/docker/install/centos7_core.sh @@ -39,6 +39,10 @@ yum -y install make yum -y install wget yum -y install unzip yum -y install ninja-build +yum -y install automake +yum -y install autoconf +yum -y install libtool +yum -y install patchelf # gcc7 yum -y install centos-release-scl diff --git a/ci/docker/install/ubuntu_publish.sh b/ci/docker/install/ubuntu_publish.sh deleted file mode 100755 index 4690a2c3dfad..000000000000 --- a/ci/docker/install/ubuntu_publish.sh +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Build on Ubuntu 14.04 LTS for LINUX CPU/GPU -set -ex - -# replace https with http to force apt-get update to use http -# nvidia-docker no longer supports ubuntu 14.04 -# refer https://github.com/apache/incubator-mxnet/issues/18005 -sudo sed -i 's/https/http/g' /etc/apt/sources.list.d/*.list -apt-get update -apt-get install -y software-properties-common -add-apt-repository ppa:ubuntu-toolchain-r/test -y -add-apt-repository ppa:openjdk-r/ppa -y # Java lib -apt-get update -apt-get install -y git \ - cmake3 \ - ninja-build \ - libcurl4-openssl-dev \ - unzip \ - gcc-4.8 \ - g++-4.8 \ - gfortran \ - gfortran-4.8 \ - binutils \ - nasm \ - libtool \ - curl \ - wget \ - sudo \ - gnupg \ - gnupg2 \ - gnupg-agent \ - pandoc \ - python3-pip \ - automake \ - pkg-config \ - openjdk-8-jdk - -curl -o apache-maven-3.3.9-bin.tar.gz -L http://www.eu.apache.org/dist/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz \ - || curl -o apache-maven-3.3.9-bin.tar.gz -L https://search.maven.org/remotecontent?filepath=org/apache/maven/apache-maven/3.3.9/apache-maven-3.3.9-bin.tar.gz - -tar xzf apache-maven-3.3.9-bin.tar.gz -mkdir /usr/local/maven -mv apache-maven-3.3.9/ /usr/local/maven/ -update-alternatives --install /usr/bin/mvn mvn /usr/local/maven/apache-maven-3.3.9/bin/mvn 1 -update-ca-certificates -f - -# patchelf available starting Ubuntu 16.04; compile from source for Ubuntu 14.04 -mkdir /usr/local/patchelf -cd /usr/local/patchelf -curl -L -o patchelf-0.10.tar.gz https://github.com/NixOS/patchelf/archive/0.10.tar.gz -tar xzf patchelf-0.10.tar.gz -cd /usr/local/patchelf/patchelf-0.10 -./bootstrap.sh -./configure -make -sudo make install -cd / - -apt-get install -y python python-pip python3 python3-pip - -# the version of the pip shipped with ubuntu may be too lower, install a recent version here -# Restrict pip version to <19 due to use of Python 3.4 on Ubuntu 14.04 -python3 -m pip install --upgrade 'pip<19' - -# Restrict numpy version to <1.18 due to use of Python 3.4 on Ubuntu 14.04 -python3 -m pip install --upgrade --ignore-installed nose cpplint==1.3.0 pylint==2.3.1 'numpy>1.16.0,<1.18' nose-timer 'requests<2.19.0,>=2.18.4' h5py==2.8.0rc1 scipy==1.0.1 boto3 - -# CMake 3.13.2+ is required -mkdir /opt/cmake && cd /opt/cmake -wget -nv https://cmake.org/files/v3.13/cmake-3.13.5-Linux-x86_64.sh -sh cmake-3.13.5-Linux-x86_64.sh --prefix=/opt/cmake --skip-license -ln -s /opt/cmake/bin/cmake /usr/local/bin/cmake -rm cmake-3.13.5-Linux-x86_64.sh -cmake --version diff --git a/ci/docker/install/ubuntu_scala.sh b/ci/docker/install/ubuntu_scala.sh index d223b8e173ae..355e978e075c 100755 --- a/ci/docker/install/ubuntu_scala.sh +++ b/ci/docker/install/ubuntu_scala.sh @@ -21,33 +21,11 @@ # the whole docker cache for the image set -ex -cd "$(dirname "$0")" -# install libraries for mxnet's scala package on ubuntu -echo 'Installing Scala...' -# Ubuntu 14.04 -if [[ $(lsb_release -r | grep 14.04) ]]; then - add-apt-repository -y ppa:openjdk-r/ppa -fi - -# All Ubuntu apt-get update || true apt-get install -y \ openjdk-8-jdk \ openjdk-8-jre \ software-properties-common \ - scala - -# Ubuntu 14.04 -if [[ $(lsb_release -r | grep 14.04) ]]; then - curl -o apache-maven-3.3.9-bin.tar.gz -L http://www.eu.apache.org/dist/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz \ - || curl -o apache-maven-3.3.9-bin.tar.gz -L https://search.maven.org/remotecontent?filepath=org/apache/maven/apache-maven/3.3.9/apache-maven-3.3.9-bin.tar.gz - - tar xzf apache-maven-3.3.9-bin.tar.gz - mkdir /usr/local/maven - mv apache-maven-3.3.9/ /usr/local/maven/ - update-alternatives --install /usr/bin/mvn mvn /usr/local/maven/apache-maven-3.3.9/bin/mvn 1 - update-ca-certificates -f -else - apt-get install -y maven -fi + scala \ + maven diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh index ae7613513df7..03f8b2c1c3d8 100755 --- a/ci/docker/runtime_functions.sh +++ b/ci/docker/runtime_functions.sh @@ -1943,6 +1943,7 @@ checkout() { build_static_libmxnet() { set -ex pushd . + source /opt/rh/devtoolset-7/enable local mxnet_variant=${1:?"This function requires a python command as the first argument"} source tools/staticbuild/build.sh ${mxnet_variant} popd @@ -1952,6 +1953,7 @@ build_static_libmxnet() { cd_package_pypi() { set -ex pushd . + source /opt/rh/devtoolset-7/enable local mxnet_variant=${1:?"This function requires a python command as the first argument"} ./cd/python/pypi/pypi_package.sh ${mxnet_variant} popd @@ -2006,8 +2008,7 @@ build_static_scala_cpu() { scala_prepare export MAVEN_PUBLISH_OS_TYPE=linux-x86_64-cpu export mxnet_variant=cpu - export CC=gcc-7 - export CXX=g++-7 + source /opt/rh/devtoolset-7/enable ./ci/publish/scala/build.sh popd } @@ -2016,8 +2017,7 @@ build_static_python_cpu() { set -ex pushd . export mxnet_variant=cpu - export CC=gcc-7 - export CXX=g++-7 + source /opt/rh/devtoolset-7/enable ./ci/publish/python/build.sh popd } @@ -2026,8 +2026,8 @@ build_static_python_cu101() { set -ex pushd . export mxnet_variant=cu101 - export CC=gcc-7 - export CXX=g++-7 + export USE_SYSTEM_CUDA=1 + source /opt/rh/devtoolset-7/enable ./ci/publish/python/build.sh popd } @@ -2037,8 +2037,7 @@ build_static_python_cpu_cmake() { pushd . export mxnet_variant=cpu export CMAKE_STATICBUILD=1 - export CC=gcc-7 - export CXX=g++-7 + source /opt/rh/devtoolset-7/enable ./ci/publish/python/build.sh popd } @@ -2048,8 +2047,8 @@ build_static_python_cu101_cmake() { pushd . export mxnet_variant=cu101 export CMAKE_STATICBUILD=1 - export CC=gcc-7 - export CXX=g++-7 + export USE_SYSTEM_CUDA=1 + source /opt/rh/devtoolset-7/enable ./ci/publish/python/build.sh popd } @@ -2058,8 +2057,7 @@ publish_scala_build() { set -ex pushd . scala_prepare - export CC=gcc-7 - export CXX=g++-7 + source /opt/rh/devtoolset-7/enable ./ci/publish/scala/build.sh popd } @@ -2068,8 +2066,6 @@ publish_scala_test() { set -ex pushd . scala_prepare - export CC=gcc-7 - export CXX=g++-7 ./ci/publish/scala/test.sh popd } @@ -2078,8 +2074,6 @@ publish_scala_deploy() { set -ex pushd . scala_prepare - export CC=gcc-7 - export CXX=g++-7 ./ci/publish/scala/deploy.sh popd } diff --git a/ci/jenkins/Jenkins_steps.groovy b/ci/jenkins/Jenkins_steps.groovy index ef942f551bc6..eddfd5bcd901 100644 --- a/ci/jenkins/Jenkins_steps.groovy +++ b/ci/jenkins/Jenkins_steps.groovy @@ -740,7 +740,7 @@ def test_static_scala_cpu() { ws('workspace/ut-publish-scala-cpu') { timeout(time: max_time, unit: 'MINUTES') { utils.init_git() - utils.docker_run("publish.ubuntu1404_cpu", 'build_static_scala_cpu', false) + utils.docker_run("publish.centos7_cpu", 'build_static_scala_cpu', false) } } } @@ -748,12 +748,12 @@ def test_static_scala_cpu() { } def test_static_python_cpu() { - return ['Static build CPU 14.04 Python' : { + return ['Static build CPU CentOS7 Python' : { node(NODE_LINUX_CPU) { ws('workspace/ut-publish-python-cpu') { timeout(time: max_time, unit: 'MINUTES') { utils.init_git() - utils.docker_run("publish.ubuntu1404_cpu", 'build_static_python_cpu', false) + utils.docker_run("publish.centos7_cpu", 'build_static_python_cpu', false) } } } @@ -761,25 +761,25 @@ def test_static_python_cpu() { } def test_static_python_cpu_cmake() { - return ['Static build CPU 14.04 Python with CMake' : { - node(NODE_LINUX_CPU) { - ws('workspace/ut-publish-python-cpu') { - timeout(time: max_time, unit: 'MINUTES') { - utils.init_git() - utils.docker_run("publish.ubuntu1404_cpu", 'build_static_python_cpu_cmake', false) - } - } + return ['Static build CPU CentOS7 Python with CMake' : { + node(NODE_LINUX_CPU) { + ws('workspace/ut-publish-python-cpu') { + timeout(time: max_time, unit: 'MINUTES') { + utils.init_git() + utils.docker_run("publish.centos7_cpu", 'build_static_python_cpu_cmake', false) + } } - }] + } + }] } def test_static_python_gpu() { - return ['Static build GPU 14.04 Python' : { + return ['Static build GPU CentOS7 Python' : { node(NODE_LINUX_GPU) { ws('workspace/ut-publish-python-gpu') { timeout(time: max_time, unit: 'MINUTES') { utils.init_git() - utils.docker_run("publish.ubuntu1404_gpu", 'build_static_python_cu101', true) + utils.docker_run("publish.centos7_gpu_cu101", 'build_static_python_cu101', true) } } } @@ -787,16 +787,16 @@ def test_static_python_gpu() { } def test_static_python_gpu_cmake() { - return ['Static build GPU 14.04 Python' : { - node(NODE_LINUX_GPU) { - ws('workspace/ut-publish-python-gpu') { - timeout(time: max_time, unit: 'MINUTES') { - utils.init_git() - utils.docker_run("publish.ubuntu1404_gpu", 'build_static_python_cu101_cmake', true) - } - } + return ['Static build GPU CentOS7 Python with CMake' : { + node(NODE_LINUX_GPU) { + ws('workspace/ut-publish-python-gpu') { + timeout(time: max_time, unit: 'MINUTES') { + utils.init_git() + utils.docker_run("publish.centos7_gpu_cu101", 'build_static_python_cu101_cmake', true) + } } - }] + } + }] } def test_unix_python3_cpu() { diff --git a/ci/publish/Jenkinsfile b/ci/publish/Jenkinsfile index ed09b4c2ef0f..366758d85665 100644 --- a/ci/publish/Jenkinsfile +++ b/ci/publish/Jenkinsfile @@ -57,7 +57,7 @@ for (x in labels) { toBuild["Scala Build ${label}"] = wrapStep(nodeMap['cpu'], "build-scala-${label}") { withEnv(["MAVEN_PUBLISH_OS_TYPE=${scalaOSMap[label]}", "mxnet_variant=${scalaVariantMap[label]}"]) { utils.init_git() - utils.docker_run("publish.ubuntu1404_cpu", 'publish_scala_build', false, '500m', 'MAVEN_PUBLISH_OS_TYPE mxnet_variant') + utils.docker_run("publish.centos7_cpu", 'publish_scala_build', false, '500m', 'MAVEN_PUBLISH_OS_TYPE mxnet_variant') utils.pack_lib("scala_${label}", mx_scala_pub, false) } } diff --git a/ci/publish/README.md b/ci/publish/README.md index cdd70ce82258..3d315a9a57ec 100644 --- a/ci/publish/README.md +++ b/ci/publish/README.md @@ -30,11 +30,14 @@ Currently, we are supporting tests in the following systems: - Ubuntu 18.04 - Cent OS 7 -All packages are currently built in `Ubuntu 14.04`. All Dockerfile used for publishing are available in `ci/docker/` with prefix `Dockerfile.publish`. +All packages are currently built in `Cent OS 7` with Developer Toolset 7. +Developer Toolset 7 provides `GCC 7` with C++17 support on `Cent OS 7`, enabling +us to build binaries that support all major Linux distributions released after +2014 (cf. Python Enhancement Proposals 599). All Dockerfile used for publishing +are available in `ci/docker/` with prefix `Dockerfile.publish`. Apart from that, the script used to create the environment and publish are available under `ci/docker/install`: -- `ubuntu_publish.sh` installs all required dependencies for Ubuntu 14.04 for publishing - `ubuntu_base.sh` installs minimum dependencies required to run the published packages ## Scala publishing diff --git a/tools/dependencies/README.md b/tools/dependencies/README.md index ec1e80088895..c45f33328bbc 100644 --- a/tools/dependencies/README.md +++ b/tools/dependencies/README.md @@ -228,7 +228,6 @@ Please run performance test aginast the MXNet you build before raising the PR. - [ ] Python/setup.py - [ ] tools/pip/setup.py - [ ] ci/docker/install/requirements -- [ ] ci/docker/install/ubuntu_publish.sh - [ ] ci/docker/install/ubuntu_python.sh - [ ] ci/qemu/mxnet_requirements.txt - [ ] docs/install/requirements.txt diff --git a/tools/dependencies/make_shared_dependencies.sh b/tools/dependencies/make_shared_dependencies.sh index 9c86c11024d5..96d3561d446b 100755 --- a/tools/dependencies/make_shared_dependencies.sh +++ b/tools/dependencies/make_shared_dependencies.sh @@ -65,4 +65,5 @@ source $DIR/cityhash.sh source $DIR/zmq.sh source $DIR/lz4.sh -export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:$(dirname $(find $DEPS_PATH -type f -name 'libprotoc*' | grep protobuf | head -n 1)):$DEPS_PATH/lib +export LIBRARY_PATH=${LIBRARY_PATH}:$(dirname $(find $DEPS_PATH -type f -name 'libprotoc*' | grep protobuf | head -n 1)):$DEPS_PATH/lib:$DEPS_PATH/lib64 +export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:$(dirname $(find $DEPS_PATH -type f -name 'libprotoc*' | grep protobuf | head -n 1)):$DEPS_PATH/lib:$DEPS_PATH/lib64 diff --git a/tools/dependencies/zmq.sh b/tools/dependencies/zmq.sh index 11d7063200b5..33ea628d53bb 100755 --- a/tools/dependencies/zmq.sh +++ b/tools/dependencies/zmq.sh @@ -37,5 +37,11 @@ if [[ ! -f $DEPS_PATH/lib/libzmq.a ]]; then -D BUILD_SHARED_LIBS=OFF .. $MAKE $MAKE install + + if [[ ! -f $DEPS_PATH/lib/libzmq.a ]]; then + mkdir -p $DEPS_PATH/lib + cp $DEPS_PATH/lib64/*zmq* $DEPS_PATH/lib + fi + popd fi diff --git a/tools/staticbuild/build.sh b/tools/staticbuild/build.sh index f33ce9d711bc..e5fd24368ed3 100755 --- a/tools/staticbuild/build.sh +++ b/tools/staticbuild/build.sh @@ -54,7 +54,7 @@ export FC="gfortran" export PKG_CONFIG_PATH=$DEPS_PATH/lib/pkgconfig:$DEPS_PATH/lib64/pkgconfig:$DEPS_PATH/lib/x86_64-linux-gnu/pkgconfig:$PKG_CONFIG_PATH export CPATH=$DEPS_PATH/include:$CPATH -if [[ $PLATFORM == 'linux' && $VARIANT == cu* ]]; then +if [[ -z "$USE_SYSTEM_CUDA" && $PLATFORM == 'linux' && $VARIANT == cu* ]]; then source tools/setup_gpu_build_tools.sh $VARIANT $DEPS_PATH fi diff --git a/tools/staticbuild/build_lib.sh b/tools/staticbuild/build_lib.sh index 6cceced6f27a..989070ac7078 100755 --- a/tools/staticbuild/build_lib.sh +++ b/tools/staticbuild/build_lib.sh @@ -40,14 +40,8 @@ $MAKE DEPS_PATH=$DEPS_PATH mkldnn $MAKE DEPS_PATH=$DEPS_PATH if [[ $PLATFORM == 'linux' ]]; then - if [[ -f /usr/lib/gcc/x86_64-linux-gnu/4.8/libgfortran.so ]]; then - cp -L /usr/lib/gcc/x86_64-linux-gnu/4.8/libgfortran.so lib/libgfortran.so.3 - elif [[ -f /usr/lib/x86_64-linux-gnu/libgfortran.so.3 ]]; then - cp -L /usr/lib/x86_64-linux-gnu/libgfortran.so.3 lib/libgfortran.so.3 - else - cp -L /usr/lib/x86_64-linux-gnu/libgfortran.so.4 lib/libgfortran.so.4 - fi - cp -L /usr/lib/x86_64-linux-gnu/libquadmath.so.0 lib/libquadmath.so.0 + cp -L $(ldd lib/libmxnet.so | grep libgfortran | awk '{print $3}') lib/ + cp -L $(ldd lib/libmxnet.so | grep libquadmath | awk '{print $3}') lib/ fi # Print the linked objects on libmxnet.so diff --git a/tools/staticbuild/build_lib_cmake.sh b/tools/staticbuild/build_lib_cmake.sh index 6a4bbec7afcf..5261b2a6942a 100755 --- a/tools/staticbuild/build_lib_cmake.sh +++ b/tools/staticbuild/build_lib_cmake.sh @@ -39,14 +39,8 @@ rm -rf lib; mkdir lib; if [[ $PLATFORM == 'linux' ]]; then cp -L build/libmxnet.so lib/libmxnet.so cp -L staticdeps/lib/libopenblas.so lib/libopenblas.so.0 - if [[ -f /usr/lib/gcc/x86_64-linux-gnu/4.8/libgfortran.so ]]; then - cp -L /usr/lib/gcc/x86_64-linux-gnu/4.8/libgfortran.so lib/libgfortran.so.3 - elif [[ -f /usr/lib/x86_64-linux-gnu/libgfortran.so.3 ]]; then - cp -L /usr/lib/x86_64-linux-gnu/libgfortran.so.3 lib/libgfortran.so.3 - else - cp -L /usr/lib/x86_64-linux-gnu/libgfortran.so.4 lib/libgfortran.so.4 - fi - cp -L /usr/lib/x86_64-linux-gnu/libquadmath.so.0 lib/libquadmath.so.0 + cp -L $(ldd lib/libmxnet.so | grep libgfortran | awk '{print $3}') lib/ + cp -L $(ldd lib/libmxnet.so | grep libquadmath | awk '{print $3}') lib/ elif [[ $PLATFORM == 'darwin' ]]; then cp -L build/libmxnet.dylib lib/libmxnet.dylib fi From cc74ec48fbd6ab302ca7981b765c3846e32c0ac6 Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Wed, 8 Apr 2020 19:15:39 +0000 Subject: [PATCH 09/20] Fix MKLDNNFCBackward --- src/operator/nn/mkldnn/mkldnn_fully_connected.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/operator/nn/mkldnn/mkldnn_fully_connected.cc b/src/operator/nn/mkldnn/mkldnn_fully_connected.cc index 1cf9e2269b60..6e8a1505e15e 100644 --- a/src/operator/nn/mkldnn/mkldnn_fully_connected.cc +++ b/src/operator/nn/mkldnn/mkldnn_fully_connected.cc @@ -299,7 +299,9 @@ void MKLDNNFCBackward(const nnvm::NodeAttrs& attrs, const OpContext &ctx, MKLDNNStream::Get()->RegisterPrimArgs( mkldnn::inner_product_backward_weights(ipBwdWeights_pd), args); CommitOutput(in_grad[fullc::kWeight], in_grad_weight); - CommitOutput(in_grad[fullc::kBias], in_grad_bias); + if (!param.no_bias) { + CommitOutput(in_grad[fullc::kBias], in_grad_bias); + } } if (req[fullc::kData]) { mkldnn::inner_product_backward_data::primitive_desc ipBwdData_pd = GetFCBwdData( From 8f648c3ae85818e580b8b626732b6769a634da86 Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Fri, 10 Apr 2020 21:59:49 +0000 Subject: [PATCH 10/20] Cleanup random.h --- 3rdparty/mshadow/mshadow/base.h | 24 ------- 3rdparty/mshadow/mshadow/random.h | 103 +----------------------------- include/mxnet/base.h | 12 ---- 3 files changed, 1 insertion(+), 138 deletions(-) diff --git a/3rdparty/mshadow/mshadow/base.h b/3rdparty/mshadow/mshadow/base.h index a99838422348..cdca74b04f84 100755 --- a/3rdparty/mshadow/mshadow/base.h +++ b/3rdparty/mshadow/mshadow/base.h @@ -119,18 +119,6 @@ typedef unsigned __int64 uint64_t; #define MSHADOW_OLD_CUDA 0 #endif -/*! - * \brief macro to decide existence of c++11 compiler - */ -#ifndef MSHADOW_IN_CXX11 - #if (defined(__GXX_EXPERIMENTAL_CXX0X__) ||\ - __cplusplus >= 201103L || defined(_MSC_VER)) - #define MSHADOW_IN_CXX11 1 - #else - #define MSHADOW_IN_CXX11 0 - #endif -#endif - /*! \brief whether use SSE */ #ifndef MSHADOW_USE_SSE #define MSHADOW_USE_SSE 1 @@ -207,13 +195,6 @@ extern "C" { /*! \brief cpu force inline */ #define MSHADOW_CINLINE MSHADOW_FORCE_INLINE -#if defined(__GXX_EXPERIMENTAL_CXX0X) ||\ - defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L - #define MSHADOW_CONSTEXPR constexpr -#else - #define MSHADOW_CONSTEXPR const -#endif - /*! * \brief default data type for tensor string * in code release, change it to default_real_t @@ -231,13 +212,8 @@ extern "C" { #define MSHADOW_USE_GLOG DMLC_USE_GLOG #endif // MSHADOW_USE_GLOG -#if DMLC_USE_CXX11 #define MSHADOW_THROW_EXCEPTION noexcept(false) #define MSHADOW_NO_EXCEPTION noexcept(true) -#else -#define MSHADOW_THROW_EXCEPTION -#define MSHADOW_NO_EXCEPTION -#endif #if defined(_MSC_VER) #define MSHADOW_ALIGNED(x) __declspec(align(x)) diff --git a/3rdparty/mshadow/mshadow/random.h b/3rdparty/mshadow/mshadow/random.h index e6e620cc32e0..259dbe163227 100644 --- a/3rdparty/mshadow/mshadow/random.h +++ b/3rdparty/mshadow/mshadow/random.h @@ -14,14 +14,7 @@ #include "./base.h" #include "./tensor.h" #include "./tensor_container.h" - -#if MSHADOW_IN_CXX11 -#include // use cxx11 random by default -#endif - -#if _MSC_VER -#define rand_r(x) rand() -#endif +#include namespace mshadow { @@ -52,9 +45,7 @@ class Random { * \param seed seed of prng */ inline void Seed(int seed) { -#if MSHADOW_IN_CXX11 rnd_engine_.seed(seed); -#endif this->rseed_ = static_cast(seed); } /*! @@ -71,9 +62,6 @@ class Random { inline void set_stream(Stream *stream) { } -// These samplers are only avail in C++11. -#if MSHADOW_IN_CXX11 - /*! * \brief get some random integer * \return integer as unsigned @@ -226,7 +214,6 @@ class Random { return static_cast(dist_poisson(rnd_engine_));}); } } -#endif /*! * \brief return a temporal expression storing standard gaussian random variables @@ -270,98 +257,10 @@ class Random { } private: -#if MSHADOW_IN_CXX11 /*! \brief use c++11 random engine. */ std::mt19937 rnd_engine_; /*! \brief random number seed used in random engine */ uint64_t rseed_; - -#else - - /*! \brief random number seed used by PRNG */ - unsigned rseed_; - // functions - template - inline void SampleUniform(Tensor *dst, - DType a = 0.0f, DType b = 1.0f) { - if (dst->CheckContiguous()) { - this->GenUniform(dst->dptr_, dst->shape_.Size(), a, b); - } else { - Tensor mat = dst->FlatTo2D(); - for (index_t i = 0; i < mat.size(0); ++i) { - this->GenUniform(mat[i].dptr_, mat.size(1), a, b); - } - } - } - template - inline void SampleGaussian(Tensor *dst, - DType mu = 0.0f, DType sigma = 1.0f) { - if (sigma <= 0.0f) { - *dst = mu; return; - } - if (dst->CheckContiguous()) { - this->GenGaussian(dst->dptr_, dst->shape_.Size(), mu, sigma); - } else { - Tensor mat = dst->FlatTo2D(); - for (index_t i = 0; i < mat.size(0); ++i) { - this->GenGaussian(mat[i].dptr_, mat.size(1), mu, sigma); - } - } - } - inline void GenUniform(float *dptr, index_t size, float a, float b) { - for (index_t j = 0; j < size; ++j) { - dptr[j] = static_cast(RandNext()) * (b - a) + a; - } - } - inline void GenUniform(double *dptr, index_t size, double a, double b) { - for (index_t j = 0; j < size; ++j) { - dptr[j] = static_cast(RandNext()) * (b - a) + a; - } - } - inline void GenGaussian(float *dptr, index_t size, float mu, float sigma) { - this->GenGaussianX(dptr, size, mu, sigma); - } - inline void GenGaussian(double *dptr, index_t size, double mu, double sigma) { - this->GenGaussianX(dptr, size, mu, sigma); - } - inline void GenGaussianX(DType *dptr, index_t size, DType mu, DType sigma) { - DType g1 = 0.0f, g2 = 0.0f; - for (index_t j = 0; j < size; ++j) { - if ((j & 1) == 0) { - this->SampleNormal2D(&g1, &g2); - dptr[j] = mu + g1 * sigma; - } else { - dptr[j] = mu + g2 * sigma; - } - } - } - /*! \brief get next random number from rand */ - inline DType RandNext(void) { - return static_cast(rand_r(&rseed_)) / - (static_cast(RAND_MAX) + 1.0f); - } - /*! \brief return a real numer uniform in (0,1) */ - inline DType RandNext2(void) { - return (static_cast(rand_r(&rseed_)) + 1.0f) / - (static_cast(RAND_MAX) + 2.0f); - } - /*! - * \brief sample iid xx,yy ~N(0,1) - * \param xx first gaussian output - * \param yy second gaussian output - */ - inline void SampleNormal2D(DType *xx_, DType *yy_) { - DType &xx = *xx_, &yy = *yy_; - DType x, y, s; - do { - x = 2.0f * RandNext2() - 1.0f; - y = 2.0f * RandNext2() - 1.0f; - s = x * x + y * y; - } while (s >= 1.0f || s == 0.0f); - DType t = std::sqrt(-2.0f * std::log(s) / s); - xx = x * t; yy = y * t; - } -#endif /*! \brief temporal space used to store random numbers */ TensorContainer buffer_; }; // class Random diff --git a/include/mxnet/base.h b/include/mxnet/base.h index 860028393e49..aa0021d543a0 100644 --- a/include/mxnet/base.h +++ b/include/mxnet/base.h @@ -38,18 +38,6 @@ #include "tuple.h" -/*! - * \brief define compatible keywords in g++ - * Used to support g++-4.6 and g++4.7 - */ -#if DMLC_USE_CXX11 && defined(__GNUC__) && !defined(__clang_version__) -#if __GNUC__ == 4 && __GNUC_MINOR__ < 8 -#error "Currently we need g++ 4.8 or higher to fully support c++11 features" -#define override -#define final -#endif -#endif - /*! * \brief define dllexport for Visual Studio */ From 104074a3ebf442d1660d50e101afd68db9fa89a1 Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Fri, 10 Apr 2020 19:01:06 +0000 Subject: [PATCH 11/20] Support platforms without rand_r --- .../multi_threaded_inference.cc | 5 +- src/operator/contrib/dgl_graph.cc | 73 +++++++++++++------ src/operator/nn/mkldnn/mkldnn_rnn.cc | 4 +- src/operator/rnn-inl.h | 20 +++-- src/operator/rnn.cc | 1 + src/operator/rnn_impl.h | 29 ++++---- tests/cpp/engine/threaded_engine_test.cc | 14 ++-- tests/cpp/thread_safety/thread_safety_test.cc | 16 ++-- 8 files changed, 106 insertions(+), 56 deletions(-) diff --git a/example/multi_threaded_inference/multi_threaded_inference.cc b/example/multi_threaded_inference/multi_threaded_inference.cc index e90d55307e53..8b1864feea93 100644 --- a/example/multi_threaded_inference/multi_threaded_inference.cc +++ b/example/multi_threaded_inference/multi_threaded_inference.cc @@ -34,6 +34,7 @@ #include #include #include "mxnet-cpp/MxNetCpp.h" +#include const float DEFAULT_MEAN = 117.0; @@ -248,7 +249,9 @@ void run_inference(const std::string& model_name, const std::vector distribution(0, 5); + int sleep_time = distribution(generator); std::this_thread::sleep_for(std::chrono::seconds(sleep_time)); } int num_output = 0; diff --git a/src/operator/contrib/dgl_graph.cc b/src/operator/contrib/dgl_graph.cc index 428899791a5d..89bee8abf655 100644 --- a/src/operator/contrib/dgl_graph.cc +++ b/src/operator/contrib/dgl_graph.cc @@ -24,6 +24,9 @@ #include #include #include +#include +#include + #include "../elemwise_op_common.h" #include "../../imperative/imperative_utils.h" #include "../subgraph_op_common.h" @@ -41,7 +44,9 @@ typedef int64_t dgl_id_t; */ class ArrayHeap { public: - explicit ArrayHeap(const std::vector& prob) { + explicit ArrayHeap(const std::vector& prob, unsigned int seed) { + generator_ = std::mt19937(seed); + distribution_ = std::uniform_real_distribution(0.0, 1.0); vec_size_ = prob.size(); bit_len_ = ceil(log2(vec_size_)); limit_ = 1 << bit_len_; @@ -86,8 +91,8 @@ class ArrayHeap { /* * Sample from arrayHeap */ - size_t Sample(unsigned int* seed) { - float xi = heap_[1] * (rand_r(seed)%100/101.0); + size_t Sample() { + float xi = heap_[1] * distribution_(generator_); int i = 1; while (i < limit_) { i = i << 1; @@ -102,10 +107,10 @@ class ArrayHeap { /* * Sample a vector by given the size n */ - void SampleWithoutReplacement(size_t n, std::vector* samples, unsigned int* seed) { + void SampleWithoutReplacement(size_t n, std::vector* samples) { // sample n elements for (size_t i = 0; i < n; ++i) { - samples->at(i) = this->Sample(seed); + samples->at(i) = this->Sample(); this->Delete(samples->at(i)); } } @@ -115,6 +120,8 @@ class ArrayHeap { int bit_len_; // bit size int limit_; std::vector heap_; + std::mt19937 generator_; + std::uniform_real_distribution distribution_; }; struct NeighborSampleParam : public dmlc::Parameter { @@ -402,10 +409,12 @@ static bool CSRNeighborNonUniformSampleType(const nnvm::NodeAttrs& attrs, static void RandomSample(size_t set_size, size_t num, std::vector* out, - unsigned int* seed) { + unsigned int seed) { + std::mt19937 generator(seed); std::unordered_set sampled_idxs; + std::uniform_int_distribution distribution(0, set_size - 1); while (sampled_idxs.size() < num) { - sampled_idxs.insert(rand_r(seed) % set_size); + sampled_idxs.insert(distribution(generator)); } out->clear(); for (auto it = sampled_idxs.begin(); it != sampled_idxs.end(); it++) { @@ -441,7 +450,7 @@ static void GetUniformSample(const dgl_id_t* val_list, const size_t max_num_neighbor, std::vector* out_ver, std::vector* out_edge, - unsigned int* seed) { + unsigned int seed) { // Copy ver_list to output if (ver_len <= max_num_neighbor) { for (size_t i = 0; i < ver_len; ++i) { @@ -485,7 +494,7 @@ static void GetNonUniformSample(const float* probability, const size_t max_num_neighbor, std::vector* out_ver, std::vector* out_edge, - unsigned int* seed) { + unsigned int seed) { // Copy ver_list to output if (ver_len <= max_num_neighbor) { for (size_t i = 0; i < ver_len; ++i) { @@ -500,8 +509,8 @@ static void GetNonUniformSample(const float* probability, for (size_t i = 0; i < ver_len; ++i) { sp_prob[i] = probability[col_list[i]]; } - ArrayHeap arrayHeap(sp_prob); - arrayHeap.SampleWithoutReplacement(max_num_neighbor, &sp_index, seed); + ArrayHeap arrayHeap(sp_prob, seed); + arrayHeap.SampleWithoutReplacement(max_num_neighbor, &sp_index); out_ver->resize(max_num_neighbor); out_edge->resize(max_num_neighbor); for (size_t i = 0; i < max_num_neighbor; ++i) { @@ -536,8 +545,8 @@ static void SampleSubgraph(const NDArray &csr, const float* probability, int num_hops, size_t num_neighbor, - size_t max_num_vertices) { - unsigned int time_seed = time(nullptr); + size_t max_num_vertices, + unsigned int random_seed) { size_t num_seeds = seed_arr.shape().Size(); CHECK_GE(max_num_vertices, num_seeds); @@ -594,7 +603,7 @@ static void SampleSubgraph(const NDArray &csr, num_neighbor, &tmp_sampled_src_list, &tmp_sampled_edge_list, - &time_seed); + random_seed); } else { // non-uniform-sample GetNonUniformSample(probability, val_list + *(indptr + dst_id), @@ -603,7 +612,7 @@ static void SampleSubgraph(const NDArray &csr, num_neighbor, &tmp_sampled_src_list, &tmp_sampled_edge_list, - &time_seed); + random_seed); } CHECK_EQ(tmp_sampled_src_list.size(), tmp_sampled_edge_list.size()); size_t pos = neighbor_list.size(); @@ -720,12 +729,15 @@ static void CSRNeighborUniformSampleComputeExCPU(const nnvm::NodeAttrs& attrs, const std::vector& inputs, const std::vector& req, const std::vector& outputs) { - const NeighborSampleParam& params = - nnvm::get(attrs.parsed); + const NeighborSampleParam& params = nnvm::get(attrs.parsed); int num_subgraphs = inputs.size() - 1; CHECK_EQ(outputs.size(), 3 * num_subgraphs); + mshadow::Stream *s = ctx.get_stream(); + mshadow::Random *prnd = ctx.requested[0].get_random(s); + unsigned int seed = prnd->GetRandInt(); + #pragma omp parallel for for (int i = 0; i < num_subgraphs; i++) { SampleSubgraph(inputs[0], // graph_csr @@ -737,7 +749,12 @@ static void CSRNeighborUniformSampleComputeExCPU(const nnvm::NodeAttrs& attrs, nullptr, // probability params.num_hops, params.num_neighbor, - params.max_num_vertices); + params.max_num_vertices, +#if defined(_OPENMP) + seed + omp_get_thread_num()); +#else + seed); +#endif } } @@ -798,6 +815,9 @@ of max_num_vertices, and the valid number of vertices is the same as the ones in .set_attr("FInferShape", CSRNeighborUniformSampleShape) .set_attr("FInferType", CSRNeighborUniformSampleType) .set_attr("FComputeEx", CSRNeighborUniformSampleComputeExCPU) +.set_attr("FResourceRequest", [](const NodeAttrs& attrs) { + return std::vector{ResourceRequest::kRandom}; +}) .add_argument("csr_matrix", "NDArray-or-Symbol", "csr matrix") .add_argument("seed_arrays", "NDArray-or-Symbol[]", "seed vertices") .set_attr("key_var_num_args", "num_args") @@ -811,14 +831,17 @@ static void CSRNeighborNonUniformSampleComputeExCPU(const nnvm::NodeAttrs& attrs const std::vector& inputs, const std::vector& req, const std::vector& outputs) { - const NeighborSampleParam& params = - nnvm::get(attrs.parsed); + const NeighborSampleParam& params = nnvm::get(attrs.parsed); int num_subgraphs = inputs.size() - 2; CHECK_EQ(outputs.size(), 4 * num_subgraphs); const float* probability = inputs[1].data().dptr(); + mshadow::Stream *s = ctx.get_stream(); + mshadow::Random *prnd = ctx.requested[0].get_random(s); + unsigned int seed = prnd->GetRandInt(); + #pragma omp parallel for for (int i = 0; i < num_subgraphs; i++) { float* sub_prob = outputs[i+2*num_subgraphs].data().dptr(); @@ -831,7 +854,12 @@ static void CSRNeighborNonUniformSampleComputeExCPU(const nnvm::NodeAttrs& attrs probability, params.num_hops, params.num_neighbor, - params.max_num_vertices); + params.max_num_vertices, +#if defined(_OPENMP) + seed + omp_get_thread_num()); +#else + seed); +#endif } } @@ -897,6 +925,9 @@ of max_num_vertices, and the valid number of vertices is the same as the ones in .set_attr("FInferShape", CSRNeighborNonUniformSampleShape) .set_attr("FInferType", CSRNeighborNonUniformSampleType) .set_attr("FComputeEx", CSRNeighborNonUniformSampleComputeExCPU) +.set_attr("FResourceRequest", [](const NodeAttrs& attrs) { + return std::vector{ResourceRequest::kRandom}; +}) .add_argument("csr_matrix", "NDArray-or-Symbol", "csr matrix") .add_argument("probability", "NDArray-or-Symbol", "probability vector") .add_argument("seed_arrays", "NDArray-or-Symbol[]", "seed vertices") diff --git a/src/operator/nn/mkldnn/mkldnn_rnn.cc b/src/operator/nn/mkldnn/mkldnn_rnn.cc index c830080cee6d..c33ad484ddda 100644 --- a/src/operator/nn/mkldnn/mkldnn_rnn.cc +++ b/src/operator/nn/mkldnn/mkldnn_rnn.cc @@ -995,7 +995,7 @@ void MKLDNNRnnOp::Forward(const OpContext &ctx, const std::vector &inputs, const std::vector &req, const std::vector &outputs) { - TmpMemMgr::Get()->Init(ctx.requested[0]); + TmpMemMgr::Get()->Init(ctx.requested[1]); // In the `autograd.record()` context, RNNOp is required to run into // forward_training mode. const bool is_training = (ctx.is_train || ctx.need_grad); @@ -1132,7 +1132,7 @@ void MKLDNNRnnOp::Backward(const OpContext& ctx, const std::vector& req, const std::vector& outputs) { using tag = mkldnn::memory::format_tag; - TmpMemMgr::Get()->Init(ctx.requested[0]); + TmpMemMgr::Get()->Init(ctx.requested[1]); const RNNParam& default_param = full_param_.default_param; const int data_dtype = inputs[rnn_enum::kData].dtype(); const int w_dtype = inputs[rnn_enum::kParams].dtype(); diff --git a/src/operator/rnn-inl.h b/src/operator/rnn-inl.h index 63f30b17d1b9..24c9985e7346 100644 --- a/src/operator/rnn-inl.h +++ b/src/operator/rnn-inl.h @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -293,23 +294,24 @@ void RNNForwardTraining(DType* ws, DType* hy_ptr, DType* cy_ptr, const float dropout, - int mode) { + int mode, + std::mt19937 &rnd_engine) { // NOLINT(runtime/references) switch (mode) { case rnn_enum::kLstm: LstmForwardTraining(ws, rs, state_outputs, num_layers, direction, seq_length, batch_size, input_size, state_size, x_ptr, hx_ptr, cx_ptr, - w_ptr, b_ptr, y_ptr, hy_ptr, cy_ptr, dropout); + w_ptr, b_ptr, y_ptr, hy_ptr, cy_ptr, dropout, rnd_engine); break; case rnn_enum::kGru: GruForwardTraining(ws, rs, state_outputs, num_layers, direction, seq_length, batch_size, input_size, state_size, x_ptr, hx_ptr, - w_ptr, y_ptr, hy_ptr, dropout); + w_ptr, y_ptr, hy_ptr, dropout, rnd_engine); break; case rnn_enum::kRnnTanh: case rnn_enum::kRnnRelu: VanillaRNNForwardTraining(ws, rs, state_outputs, num_layers, direction, seq_length, batch_size, input_size, state_size, x_ptr, hx_ptr, - w_ptr, y_ptr, hy_ptr, dropout, mode); + w_ptr, y_ptr, hy_ptr, dropout, mode, rnd_engine); break; default: LOG(FATAL) << "unknown RNN mode " << mode; @@ -842,7 +844,8 @@ class RNNOp { } #endif // MXNET_USE_CUDNN == 1 && defined(__CUDACC__) - if (ctx_.dev_type == kCPU) { +#if !defined(__CUDACC__) // cuda doesn't support C++17 + if constexpr (std::is_same::value) { int projection_size = 0; if (param_.projection_size.has_value()) { projection_size = param_.projection_size.value(); @@ -860,6 +863,9 @@ class RNNOp { DType* work_cpu_space = static_cast(temp_cpu_space_.data().dptr_); if (ctx.is_train || ctx.need_grad) { + mshadow::Random *prnd = ctx.requested[0].get_random(s); + std::mt19937 &rnd_engine = prnd->GetRndEngine(); + // allocate reserve space if (param_.projection_size.has_value()) { LOG(FATAL) << "No training support for LSTM with projection on CPU currently."; @@ -894,7 +900,8 @@ class RNNOp { hy_ptr, cy_ptr, param_.p, - param_.mode); + param_.mode, + rnd_engine); } else { RNNForwardInference(work_cpu_space, param_.state_outputs, @@ -916,6 +923,7 @@ class RNNOp { param_.mode); } } +#endif } void Backward(const OpContext &ctx, diff --git a/src/operator/rnn.cc b/src/operator/rnn.cc index ecd38a88736d..7ff8a2f993bc 100644 --- a/src/operator/rnn.cc +++ b/src/operator/rnn.cc @@ -184,6 +184,7 @@ static std::vector RNNResourceEx(const NodeAttrs& attrs, const } #endif } else { + request.emplace_back(ResourceRequest::kRandom); #if MXNET_USE_MKLDNN == 1 request.emplace_back(ResourceRequest::kTempSpace); #endif diff --git a/src/operator/rnn_impl.h b/src/operator/rnn_impl.h index 5acf4eb7b3bd..779ac8839d6c 100644 --- a/src/operator/rnn_impl.h +++ b/src/operator/rnn_impl.h @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -139,7 +140,8 @@ void LstmForwardTraining(DType* ws, DType* y_ptr, DType* hy_ptr, DType* cy_ptr, - const float dropout) { + const float dropout, + std::mt19937 &rnd_engine) { // NOLINT(runtime/references) DType* dropout_random = rs; DType* rs2 = dropout_random + (L - 1) * D * T * N * H; const int total_layers = D * L; @@ -149,7 +151,6 @@ void LstmForwardTraining(DType* ws, const index_t r_size = D * T * N * H * 6; const index_t y_offset = T * N * H * 5; const index_t cell_size = N * H; - unsigned int seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn) int idx = 0; // state & cell state's idx; const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); for (int i = 0; i < L; ++i) { @@ -174,10 +175,9 @@ void LstmForwardTraining(DType* ws, w_ptr += w_size; b_ptr += b_size; if (dropout > 0.0f) { - #pragma omp parallel for num_threads(omp_threads) + std::uniform_real_distribution distribution(0, 1); for (index_t j = 0; j < T * N * H * D; j++) { - int rand_data = rand_r(&seed_); - if (static_cast(rand_data % 1000) < static_cast(1000 * dropout)) { + if (distribution(rnd_engine) < dropout) { dropout_random[i * T * N * H * D + j] = 0; y.dptr_[j] = 0; } else { @@ -1000,7 +1000,8 @@ void GruForwardTraining(DType* ws, DType* w_ptr, DType* y_ptr, DType* hy_ptr, - const float dropout) { + const float dropout, + std::mt19937 &rnd_engine) { // NOLINT(runtime/references) DType* wx = w_ptr; DType* wh = wx + I * H * 3; DType* bx = wh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3) @@ -1021,18 +1022,15 @@ void GruForwardTraining(DType* ws, DType* bx_l = bx; DType* bh_l = bh; DType* y_tmp = x_ptr; - unsigned int seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn) for (int l = 0; l < L; l++) { if (l != 0) { y_tmp = y_l; y_l = y_l + T * N * H * D; } if (dropout > 0.0f && l > 0) { - const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); - #pragma omp parallel for num_threads(omp_threads) + std::uniform_real_distribution distribution(0, 1); for (index_t i = 0; i < T * N * I; i++) { - int rand_data = rand_r(&seed_); - if (static_cast(rand_data % 1000) < static_cast(1000 * dropout)) { + if (distribution(rnd_engine) < dropout) { dropout_random[(l - 1) * T * N * I + i] = 0; y_tmp[i] = 0; } else { @@ -1889,7 +1887,8 @@ void VanillaRNNForwardTraining(DType* ws, DType* y_ptr, DType* hy_ptr, const float dropout, - int mode) { + int mode, + std::mt19937 &rnd_engine) { // NOLINT(runtime/references) DType* wx = w_ptr; DType* wh = wx + I * H; DType* bx = wh + H * H + (D - 1) * (H * H + I * H) @@ -1908,17 +1907,15 @@ void VanillaRNNForwardTraining(DType* ws, DType* bh_l = bh; DType* y_tmp = x_ptr; const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); - unsigned int seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn) for (int l = 0; l < L; l++) { if (l != 0) { y_tmp = y_l; y_l = y_l + T * N * H * D; } if (dropout > 0.0f && l > 0) { - #pragma omp parallel for num_threads(omp_threads) + std::uniform_real_distribution distribution(0, 1); for (index_t i = 0; i < T * N * I; i++) { - int rand_data = rand_r(&seed_); - if (static_cast(rand_data % 1000) < static_cast(1000 * dropout)) { + if (distribution(rnd_engine) < dropout) { dropout_random[(l - 1) * T * N * I + i] = 0; y_tmp[i] = 0; } else { diff --git a/tests/cpp/engine/threaded_engine_test.cc b/tests/cpp/engine/threaded_engine_test.cc index cea92a01e799..e1e3a53e656c 100644 --- a/tests/cpp/engine/threaded_engine_test.cc +++ b/tests/cpp/engine/threaded_engine_test.cc @@ -35,6 +35,7 @@ #include #include #include +#include #include "../src/engine/engine_impl.h" #include "../include/test_util.h" @@ -62,15 +63,18 @@ void GenerateWorkload(int num_workloads, int num_var, std::vector* workloads) { workloads->clear(); workloads->resize(num_workloads); + static thread_local std::mt19937 generator; + std::uniform_int_distribution distribution_var(0, num_var - 1); + std::uniform_int_distribution distribution_time(min_time, max_time - 1); + std::uniform_int_distribution distribution_read(min_read, max_read - 1); for (int i = 0; i < num_workloads; ++i) { auto& wl = workloads->at(i); - wl.write = rand_r(&seed_) % num_var; - int r = rand_r(&seed_); - int num_read = min_read + (r % (max_read - min_read)); + wl.write = distribution_var(generator); + int num_read = distribution_read(generator); for (int j = 0; j < num_read; ++j) { - wl.reads.push_back(rand_r(&seed_) % num_var); + wl.reads.push_back(distribution_var(generator)); } - wl.time = min_time + rand_r(&seed_) % (max_time - min_time); + wl.time = distribution_time(generator); } } diff --git a/tests/cpp/thread_safety/thread_safety_test.cc b/tests/cpp/thread_safety/thread_safety_test.cc index 1f811d8c3fd7..9566adfd9d13 100644 --- a/tests/cpp/thread_safety/thread_safety_test.cc +++ b/tests/cpp/thread_safety/thread_safety_test.cc @@ -25,15 +25,17 @@ #if MXNET_USE_CPP_PACKAGE == 1 #include #include -#include #include -#include +#include #include #include +#include +#include #include "../src/engine/engine_impl.h" #include "../src/imperative/imperative_utils.h" #include "../include/test_util.h" #include "mxnet-cpp/MxNetCpp.h" + /* * Prepares input data for the ops/models used in this file */ @@ -298,8 +300,10 @@ void run_inference(const std::string& model, unsigned next = num; for (size_t i = 0; i < num_inf_per_thread; ++i) { if (random_sleep) { - int sleep_time = rand_r(&next) % 5; - std::this_thread::sleep_for(std::chrono::seconds(sleep_time)); + static thread_local std::mt19937 generator; + std::uniform_int_distribution distribution(0, 5); + int sleep_time = distribution(generator); + std::this_thread::sleep_for(std::chrono::seconds(sleep_time)); } int num_output = 0; const int *stypes; @@ -479,7 +483,9 @@ void run_inference_unsupported(const std::string& model, unsigned next = num; for (size_t i = 0; i < num_inf_per_thread; ++i) { if (random_sleep) { - int sleep_time = rand_r(&next) % 5; + static thread_local std::mt19937 generator; + std::uniform_int_distribution distribution(0, 5); + int sleep_time = distribution(generator); std::this_thread::sleep_for(std::chrono::seconds(sleep_time)); } int num_output = 0; From 36c87c3521a227d978e06f95a1a1447121feb967 Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Wed, 8 Apr 2020 21:59:50 +0000 Subject: [PATCH 12/20] Update edge toolchain --- CMakeLists.txt | 2 +- ci/build.py | 11 +- ci/docker/Dockerfile.build.android_armv7 | 94 +++----- ci/docker/Dockerfile.build.android_armv8 | 92 +++----- ci/docker/Dockerfile.build.armv6 | 46 ++-- ci/docker/Dockerfile.build.armv7 | 55 +++-- ci/docker/Dockerfile.build.armv8 | 53 +++-- ci/docker/Dockerfile.build.jetson | 91 ++++---- ci/docker/install/android_armv7_openblas.sh | 31 --- ci/docker/install/android_ndk.sh | 38 --- ci/docker/install/arm64_openblas.sh | 35 --- ci/docker/install/deb_ubuntu_ccache.sh | 26 +-- ci/docker/install/ubuntu_arm.sh | 28 --- ci/docker/runtime_functions.sh | 81 ++++--- .../aarch64-linux-gnu-toolchain.cmake} | 24 +- .../arm-linux-gnueabihf-toolchain.cmake} | 21 +- cmake/upstream/FindCUDAToolkit.cmake | 205 ++++++++++++----- make/crosscompile.jetson.mk | 216 ------------------ src/operator/random/shuffle_op.cc | 2 +- 19 files changed, 434 insertions(+), 717 deletions(-) delete mode 100755 ci/docker/install/android_armv7_openblas.sh delete mode 100755 ci/docker/install/android_ndk.sh delete mode 100755 ci/docker/install/arm64_openblas.sh delete mode 100755 ci/docker/install/ubuntu_arm.sh rename ci/docker/{install/android_arm64_openblas.sh => toolchains/aarch64-linux-gnu-toolchain.cmake} (64%) mode change 100755 => 100644 rename ci/docker/{install/arm_openblas.sh => toolchains/arm-linux-gnueabihf-toolchain.cmake} (65%) mode change 100755 => 100644 delete mode 100644 make/crosscompile.jetson.mk diff --git a/CMakeLists.txt b/CMakeLists.txt index 4cf24bbcae53..1ca92ff19a93 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -180,7 +180,7 @@ endif() if(NOT mxnet_LINKER_LIBS) set(mxnet_LINKER_LIBS "") -endif(NOT mxnet_LINKER_LIBS) +endif() if(USE_GPROF) message(STATUS "Using GPROF") diff --git a/ci/build.py b/ci/build.py index a21ec44942a8..cbc41218f042 100755 --- a/ci/build.py +++ b/ci/build.py @@ -70,7 +70,8 @@ def get_docker_binary(use_nvidia_docker: bool) -> str: return "nvidia-docker" if use_nvidia_docker else "docker" -def build_docker(platform: str, docker_binary: str, registry: str, num_retries: int, no_cache: bool) -> str: +def build_docker(platform: str, docker_binary: str, registry: str, num_retries: int, no_cache: bool, + cache_intermediate: bool) -> str: """ Build a container for the given platform :param platform: Platform @@ -104,6 +105,8 @@ def build_docker(platform: str, docker_binary: str, registry: str, num_retries: "--build-arg", "GROUP_ID={}".format(os.getgid())] if no_cache: cmd.append("--no-cache") + if cache_intermediate: + cmd.append("--rm=false") elif registry: cmd.extend(["--cache-from", tag]) cmd.extend(["-t", tag, get_dockerfiles_path()]) @@ -330,6 +333,9 @@ def main() -> int: parser.add_argument("--no-cache", action="store_true", help="passes --no-cache to docker build") + parser.add_argument("--cache-intermediate", action="store_true", + help="passes --rm=false to docker build") + parser.add_argument("-e", "--environment", nargs="*", default=[], help="Environment variables for the docker container. " "Specify with a list containing either names or name=value") @@ -361,7 +367,8 @@ def main() -> int: load_docker_cache(tag=tag, docker_registry=args.docker_registry) if not args.run_only: build_docker(platform=platform, docker_binary=docker_binary, registry=args.docker_registry, - num_retries=args.docker_build_retries, no_cache=args.no_cache) + num_retries=args.docker_build_retries, no_cache=args.no_cache, + cache_intermediate=args.cache_intermediate) else: logging.info("Skipping docker build step.") diff --git a/ci/docker/Dockerfile.build.android_armv7 b/ci/docker/Dockerfile.build.android_armv7 index 2c923a015b63..96ca04e9f5e6 100644 --- a/ci/docker/Dockerfile.build.android_armv7 +++ b/ci/docker/Dockerfile.build.android_armv7 @@ -18,62 +18,41 @@ # # Dockerfile to build MXNet for Android ARMv7 -FROM dockcross/base -MAINTAINER Pedro Larroy "pllarroy@amazon.com" - -# The cross-compiling emulator -RUN apt-get update && apt-get install -y \ - unzip - -ENV CROSS_TRIPLE=arm-linux-androideabi -ENV CROSS_ROOT=/usr/${CROSS_TRIPLE} -ENV AS=${CROSS_ROOT}/bin/${CROSS_TRIPLE}-as \ - AR=${CROSS_ROOT}/bin/${CROSS_TRIPLE}-ar \ - CC=${CROSS_ROOT}/bin/${CROSS_TRIPLE}-gcc \ - CPP=${CROSS_ROOT}/bin/${CROSS_TRIPLE}-cpp \ - CXX=${CROSS_ROOT}/bin/${CROSS_TRIPLE}-g++ \ - LD=${CROSS_ROOT}/bin/${CROSS_TRIPLE}-ld - -ENV ANDROID_NDK_REVISION 17b -ENV ANDROID_NDK_API 27 -ENV ANDROID_NDK_ARCH arm -WORKDIR /work/deps -COPY install/android_ndk.sh /work/deps -RUN /work/deps/android_ndk.sh - -ENV DEFAULT_DOCKCROSS_IMAGE dockcross/android-arm - -# Build-time metadata as defined at http://label-schema.org -ARG BUILD_DATE -ARG IMAGE -ARG VCS_REF -ARG VCS_URL -LABEL org.label-schema.build-date=$BUILD_DATE \ - org.label-schema.name=$IMAGE \ - org.label-schema.vcs-ref=$VCS_REF \ - org.label-schema.vcs-url=$VCS_URL \ - org.label-schema.schema-version="1.0" - - -ENV CC=${CROSS_ROOT}/bin/${CROSS_TRIPLE}-clang -ENV CXX=${CROSS_ROOT}/bin/${CROSS_TRIPLE}-clang++ - -WORKDIR /work/deps - -COPY install/deb_ubuntu_ccache.sh /work/ -RUN /work/deb_ubuntu_ccache.sh -WORKDIR /work -COPY install/ubuntu_arm.sh /work/ -RUN /work/ubuntu_arm.sh - -COPY install/arm_openblas.sh /work/ -COPY install/android_armv7_openblas.sh /work/deps -RUN /work/deps/android_armv7_openblas.sh - -ENV OpenBLAS_HOME=${CROSS_ROOT} -ENV OpenBLAS_DIR=${CROSS_ROOT} - -WORKDIR /work +FROM ubuntu:20.04 + +ENV ARCH=armv7l \ + HOSTCC=gcc \ + HOSTCXX=g++ \ + TARGET=ARMV7 + +WORKDIR /usr/local + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + build-essential \ + ninja-build \ + cmake \ + ccache \ + git \ + curl \ + unzip \ + && rm -rf /var/lib/apt/lists/* + +RUN curl -o android-ndk-r19-linux-x86_64.zip -L https://dl.google.com/android/repository/android-ndk-r19-linux-x86_64.zip && \ + unzip android-ndk-r19-linux-x86_64.zip && \ + rm android-ndk-r19-linux-x86_64.zip +ENV CMAKE_TOOLCHAIN_FILE=/usr/local/android-ndk-r19/build/cmake/android.toolchain.cmake + +RUN git clone --recursive -b v0.3.9 https://github.com/xianyi/OpenBLAS.git && \ + mkdir /usr/local/openblas-android && \ + cd /usr/local/OpenBLAS && \ + export TOOLCHAIN=/usr/local/android-ndk-r19/toolchains/llvm/prebuilt/linux-x86_64 && \ + make NOFORTRAN=1 ARM_SOFTFP_ABI=1 \ + LDFLAGS="-L/usr/local/android-ndk-r19/toolchains/arm-linux-androideabi-4.9/prebuilt/linux-x86_64/lib/gcc/arm-linux-androideabi/4.9.x -lm" \ + CC=$TOOLCHAIN/bin/armv7a-linux-androideabi16-clang AR=$TOOLCHAIN/bin/arm-linux-androideabi-ar && \ + make PREFIX=/usr/local/openblas-android install && \ + cd /usr/local && \ + rm -rf OpenBLAS +ENV OpenBLAS_HOME=/usr/local/openblas-android ARG USER_ID=0 ARG GROUP_ID=0 @@ -81,5 +60,4 @@ COPY install/ubuntu_adduser.sh /work/ RUN /work/ubuntu_adduser.sh COPY runtime_functions.sh /work/ -WORKDIR /work/mxnet - +WORKDIR /work/build diff --git a/ci/docker/Dockerfile.build.android_armv8 b/ci/docker/Dockerfile.build.android_armv8 index ca62288129bb..81adc80edf14 100644 --- a/ci/docker/Dockerfile.build.android_armv8 +++ b/ci/docker/Dockerfile.build.android_armv8 @@ -18,62 +18,41 @@ # # Dockerfile to build MXNet for Android ARM64/ARMv8 -FROM dockcross/base -MAINTAINER Pedro Larroy "pllarroy@amazon.com" - -RUN apt-get update && apt-get install -y \ - unzip - -WORKDIR /work/deps - -# Build x86 dependencies. -COPY install/deb_ubuntu_ccache.sh /work/ -RUN /work/deb_ubuntu_ccache.sh - -# Setup Android cross-compilation environment. -ENV CROSS_TRIPLE=aarch64-linux-android -ENV CROSS_ROOT=/usr/${CROSS_TRIPLE} -ENV AS=${CROSS_ROOT}/bin/${CROSS_TRIPLE}-as \ - AR=${CROSS_ROOT}/bin/${CROSS_TRIPLE}-ar \ - CC=${CROSS_ROOT}/bin/${CROSS_TRIPLE}-gcc \ - CPP=${CROSS_ROOT}/bin/${CROSS_TRIPLE}-cpp \ - CXX=${CROSS_ROOT}/bin/${CROSS_TRIPLE}-g++ \ - LD=${CROSS_ROOT}/bin/${CROSS_TRIPLE}-ld - - -ENV DEFAULT_DOCKCROSS_IMAGE dockcross/android-arm - -# Build-time metadata as defined at http://label-schema.org -ARG BUILD_DATE -ARG IMAGE -ARG VCS_REF -ARG VCS_URL -LABEL org.label-schema.build-date=$BUILD_DATE \ - org.label-schema.name=$IMAGE \ - org.label-schema.vcs-ref=$VCS_REF \ - org.label-schema.vcs-url=$VCS_URL \ - org.label-schema.schema-version="1.0" - -ENV ARCH aarch64 -ENV ANDROID_NDK_REVISION 17b -ENV ANDROID_NDK_API 27 -ENV ANDROID_NDK_ARCH arm64 -WORKDIR /work/deps -COPY install/android_ndk.sh /work/deps -RUN /work/deps/android_ndk.sh - - -WORKDIR /work/deps -COPY install/android_ndk.sh /work/ -RUN /work/android_ndk.sh - -ENV CC=${CROSS_ROOT}/bin/${CROSS_TRIPLE}-clang -ENV CXX=${CROSS_ROOT}/bin/${CROSS_TRIPLE}-clang++ - -# Build ARM dependencies. -COPY install/android_arm64_openblas.sh /work/ -RUN /work/android_arm64_openblas.sh -ENV CPLUS_INCLUDE_PATH /work/deps/OpenBLAS +FROM ubuntu:20.04 + +ENV ARCH=aarch64 \ + HOSTCC=gcc \ + HOSTCXX=g++ \ + TARGET=ARMV8 + +WORKDIR /usr/local + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + build-essential \ + ninja-build \ + cmake \ + ccache \ + git \ + curl \ + unzip \ + && rm -rf /var/lib/apt/lists/* + +RUN curl -o android-ndk-r19-linux-x86_64.zip -L https://dl.google.com/android/repository/android-ndk-r19-linux-x86_64.zip && \ + unzip android-ndk-r19-linux-x86_64.zip && \ + rm android-ndk-r19-linux-x86_64.zip +ENV CMAKE_TOOLCHAIN_FILE=/usr/local/android-ndk-r19/build/cmake/android.toolchain.cmake + +RUN git clone --recursive -b v0.3.9 https://github.com/xianyi/OpenBLAS.git && \ + mkdir /usr/local/openblas-android && \ + cd /usr/local/OpenBLAS && \ + export TOOLCHAIN=/usr/local/android-ndk-r19/toolchains/llvm/prebuilt/linux-x86_64 && \ + make NOFORTRAN=1 \ + LDFLAGS="-L/usr/local/android-ndk-r21/toolchains/aarch64-linux-android-4.9/prebuilt/linux-x86_64/lib/gcc/aarch64-linux-android/4.9.x -lm" \ + CC=$TOOLCHAIN/bin/aarch64-linux-android21-clang AR=$TOOLCHAIN/bin/aarch64-linux-android-ar && \ + make PREFIX=/usr/local/openblas-android install && \ + cd /usr/local && \ + rm -rf OpenBLAS +ENV OpenBLAS_HOME=/usr/local/openblas-android ARG USER_ID=0 ARG GROUP_ID=0 @@ -81,5 +60,4 @@ COPY install/ubuntu_adduser.sh /work/ RUN /work/ubuntu_adduser.sh COPY runtime_functions.sh /work/ - WORKDIR /work/build diff --git a/ci/docker/Dockerfile.build.armv6 b/ci/docker/Dockerfile.build.armv6 index 98414d8c2aff..02e16da11616 100644 --- a/ci/docker/Dockerfile.build.armv6 +++ b/ci/docker/Dockerfile.build.armv6 @@ -18,26 +18,42 @@ # # Dockerfile to build MXNet for ARMv6 -FROM dockcross/linux-armv6 +FROM ubuntu:20.04 -ENV ARCH armv6l -ENV HOSTCC gcc-6 -ENV HOSTCXX g++-6 -ENV TARGET ARMV6 +ENV ARCH=armv6l \ + HOSTCC=gcc \ + HOSTCXX=g++ \ + TARGET=ARMV6 -WORKDIR /work/deps +WORKDIR /usr/local -COPY install/ubuntu_arm.sh /work/ -RUN /work/ubuntu_arm.sh +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + build-essential \ + ninja-build \ + cmake \ + ccache \ + git \ + curl \ + zip \ + python3 \ + python3-pip \ + && rm -rf /var/lib/apt/lists/* -COPY install/arm_openblas.sh /work/ -RUN /work/arm_openblas.sh +# We use a toolchain from toolchains.bootlin.com instead of Debian / Ubunut +# crossbuild-essential-armel toolchain, as the latter targets ARM architecture +# versions 4T, 5T, and 6, whereas we only wish to target ARMV6 and like to use +# ARMV6 specific features. https://wiki.debian.org/ArmEabiPort +RUN curl -o armv6-eabihf--glibc--stable-2020.02-2.tar.bz2 -L https://toolchains.bootlin.com/downloads/releases/toolchains/armv6-eabihf/tarballs/armv6-eabihf--glibc--stable-2020.02-2.tar.bz2 && \ + tar xf armv6-eabihf--glibc--stable-2020.02-2.tar.bz2 && \ + rm armv6-eabihf--glibc--stable-2020.02-2.tar.bz2 +ENV CMAKE_TOOLCHAIN_FILE=/usr/local/armv6-eabihf--glibc--stable-2020.02-2/share/buildroot/toolchainfile.cmake -ENV OpenBLAS_HOME=${CROSS_ROOT} -ENV OpenBLAS_DIR=${CROSS_ROOT} - -COPY install/deb_ubuntu_ccache.sh /work/ -RUN /work/deb_ubuntu_ccache.sh +RUN git clone --recursive -b v0.3.9 https://github.com/xianyi/OpenBLAS.git && \ + cd /usr/local/OpenBLAS && \ + make NOFORTRAN=1 CC=/usr/local/armv6-eabihf--glibc--stable-2020.02-2/bin/arm-linux-gcc && \ + make PREFIX=/usr/local/armv6-eabihf--glibc--stable-2020.02-2/arm-buildroot-linux-gnueabihf/sysroot install && \ + cd /usr/local && \ + rm -rf OpenBLAS ARG USER_ID=0 ARG GROUP_ID=0 diff --git a/ci/docker/Dockerfile.build.armv7 b/ci/docker/Dockerfile.build.armv7 index f0cdd90322f0..a9cc6d1e83a4 100644 --- a/ci/docker/Dockerfile.build.armv7 +++ b/ci/docker/Dockerfile.build.armv7 @@ -16,28 +16,39 @@ # specific language governing permissions and limitations # under the License. # -# Dockerfile to build MXNet for Android ARMv7 - -FROM dockcross/linux-armv7 - -ENV ARCH armv7l -ENV HOSTCC gcc-7 -ENV HOSTCXX g++-7 -ENV TARGET ARMV7 - -WORKDIR /work/deps - -COPY install/ubuntu_arm.sh /work/ -RUN /work/ubuntu_arm.sh - -COPY install/arm_openblas.sh /work/ -RUN /work/arm_openblas.sh - -ENV OpenBLAS_HOME=${CROSS_ROOT} -ENV OpenBLAS_DIR=${CROSS_ROOT} - -COPY install/deb_ubuntu_ccache.sh /work/ -RUN /work/deb_ubuntu_ccache.sh +# Dockerfile to build MXNet for ARMv7 + +FROM ubuntu:20.04 + +ENV ARCH=armv7l \ + HOSTCC=gcc \ + HOSTCXX=g++ \ + TARGET=ARMV7 + +WORKDIR /usr/local + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + build-essential \ + ninja-build \ + cmake \ + ccache \ + git \ + curl \ + zip \ + python3 \ + python3-pip \ + crossbuild-essential-armhf \ + && rm -rf /var/lib/apt/lists/* + +COPY toolchains/arm-linux-gnueabihf-toolchain.cmake /usr/local +ENV CMAKE_TOOLCHAIN_FILE=/usr/local/arm-linux-gnueabihf-toolchain.cmake + +RUN git clone --recursive -b v0.3.9 https://github.com/xianyi/OpenBLAS.git && \ + cd /usr/local/OpenBLAS && \ + make NOFORTRAN=1 CC=arm-linux-gnueabihf-gcc && \ + make PREFIX=/usr/local/arm-linux-gnueabihf install && \ + cd /usr/local && \ + rm -rf OpenBLAS ARG USER_ID=0 ARG GROUP_ID=0 diff --git a/ci/docker/Dockerfile.build.armv8 b/ci/docker/Dockerfile.build.armv8 index 2f918e55d991..adf6873fb40c 100644 --- a/ci/docker/Dockerfile.build.armv8 +++ b/ci/docker/Dockerfile.build.armv8 @@ -18,26 +18,37 @@ # # Dockerfile to build MXNet for ARM64/ARMv8 -FROM dockcross/linux-arm64 - -ENV ARCH aarch64 -ENV HOSTCC gcc-7 -ENV HOSTCXX g++-7 -ENV TARGET ARMV8 - -WORKDIR /work/deps - -COPY install/ubuntu_arm.sh /work/ -RUN /work/ubuntu_arm.sh - -COPY install/arm_openblas.sh /work/ -RUN /work/arm_openblas.sh - -ENV OpenBLAS_HOME=${CROSS_ROOT} -ENV OpenBLAS_DIR=${CROSS_ROOT} - -COPY install/deb_ubuntu_ccache.sh /work/ -RUN /work/deb_ubuntu_ccache.sh +FROM ubuntu:20.04 + +ENV ARCH=aarch64 \ + HOSTCC=gcc \ + HOSTCXX=g++ \ + TARGET=ARMV8 + +WORKDIR /usr/local + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + build-essential \ + ninja-build \ + cmake \ + ccache \ + git \ + curl \ + zip \ + python3 \ + python3-pip \ + crossbuild-essential-arm64 \ + && rm -rf /var/lib/apt/lists/* + +COPY toolchains/aarch64-linux-gnu-toolchain.cmake /usr +ENV CMAKE_TOOLCHAIN_FILE=/usr/aarch64-linux-gnu-toolchain.cmake + +RUN git clone --recursive -b v0.3.9 https://github.com/xianyi/OpenBLAS.git && \ + cd /usr/local/OpenBLAS && \ + make NOFORTRAN=1 CC=aarch64-linux-gnu-gcc && \ + make PREFIX=/usr/aarch64-linux-gnu install && \ + cd /usr/local && \ + rm -rf OpenBLAS ARG USER_ID=0 ARG GROUP_ID=0 @@ -45,4 +56,4 @@ COPY install/ubuntu_adduser.sh /work/ RUN /work/ubuntu_adduser.sh COPY runtime_functions.sh /work/ -WORKDIR /work/build +WORKDIR /work/mxnet diff --git a/ci/docker/Dockerfile.build.jetson b/ci/docker/Dockerfile.build.jetson index 55575d17970f..93fe5e0a5b0d 100644 --- a/ci/docker/Dockerfile.build.jetson +++ b/ci/docker/Dockerfile.build.jetson @@ -20,63 +20,58 @@ # This script assumes /work/mxnet exists and contains the mxnet code you wish to compile and # that /work/build exists and is the target for your output. -FROM nvidia/cuda:9.0-cudnn7-devel as cudabuilder +FROM nvidia/cuda:10.0-cudnn7-devel-ubuntu18.04 -FROM dockcross/linux-arm64 +ENV ARCH=aarch64 \ + HOSTCC=gcc \ + TARGET=ARMV8 -ENV ARCH aarch64 -ENV HOSTCC gcc -ENV TARGET ARMV8 +WORKDIR /usr/local -WORKDIR /work/deps +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + build-essential \ + ninja-build \ + git \ + curl \ + zip \ + unzip \ + python3 \ + python3-pip \ + awscli \ + crossbuild-essential-arm64 \ + && rm -rf /var/lib/apt/lists/* -COPY install/ubuntu_arm.sh /work/ -RUN /work/ubuntu_arm.sh - -COPY install/arm_openblas.sh /work/ -RUN /work/arm_openblas.sh - -ENV OpenBLAS_HOME=${CROSS_ROOT} -ENV OpenBLAS_DIR=${CROSS_ROOT} +# cmake on Ubuntu 18.04 is too old +RUN python3 -m pip install cmake +# ccache on Ubuntu 18.04 is too old to support Cuda correctly COPY install/deb_ubuntu_ccache.sh /work/ RUN /work/deb_ubuntu_ccache.sh -# Setup CUDA build env (including configuring and copying nvcc) -COPY --from=cudabuilder /usr/local/cuda /usr/local/cuda -ENV TARGET_ARCH aarch64 -ENV TARGET_OS linux +COPY toolchains/aarch64-linux-gnu-toolchain.cmake /usr +ENV CMAKE_TOOLCHAIN_FILE=/usr/aarch64-linux-gnu-toolchain.cmake + +RUN git clone --recursive -b v0.3.9 https://github.com/xianyi/OpenBLAS.git && \ + cd /usr/local/OpenBLAS && \ + make NOFORTRAN=1 CC=aarch64-linux-gnu-gcc && \ + make PREFIX=/usr/aarch64-linux-gnu install && \ + cd /usr/local && \ + rm -rf OpenBLAS -# Install ARM depedencies based on Jetpack 3.3 -RUN JETPACK_DOWNLOAD_PREFIX=https://developer.download.nvidia.com/devzone/devcenter/mobile/jetpack_l4t/3.3/lw.xd42/JetPackL4T_33_b39 && \ - CUDA_REPO_PREFIX=/var/cuda-repo-9-0-local && \ - ARM_CUDA_INSTALLER_PACKAGE=cuda-repo-l4t-9-0-local_9.0.252-1_arm64.deb && \ - ARM_CUDNN_INSTALLER_PACKAGE=libcudnn7_7.1.5.14-1+cuda9.0_arm64.deb && \ - ARM_CUDNN_DEV_INSTALLER_PACKAGE=libcudnn7-dev_7.1.5.14-1+cuda9.0_arm64.deb && \ - ARM_LICENSE_INSTALLER=cuda-license-9-0_9.0.252-1_arm64.deb && \ - ARM_CUBLAS_INSTALLER=cuda-cublas-9-0_9.0.252-1_arm64.deb && \ - ARM_NVINFER_INSTALLER_PACKAGE=libnvinfer4_4.1.3-1+cuda9.0_arm64.deb && \ - ARM_NVINFER_DEV_INSTALLER_PACKAGE=libnvinfer-dev_4.1.3-1+cuda9.0_arm64.deb && \ - dpkg --add-architecture arm64 && \ - wget -nv $JETPACK_DOWNLOAD_PREFIX/$ARM_CUDA_INSTALLER_PACKAGE && \ - wget -nv $JETPACK_DOWNLOAD_PREFIX/$ARM_CUDNN_INSTALLER_PACKAGE && \ - wget -nv $JETPACK_DOWNLOAD_PREFIX/$ARM_CUDNN_DEV_INSTALLER_PACKAGE && \ - wget -nv $JETPACK_DOWNLOAD_PREFIX/$ARM_NVINFER_INSTALLER_PACKAGE && \ - wget -nv $JETPACK_DOWNLOAD_PREFIX/$ARM_NVINFER_DEV_INSTALLER_PACKAGE && \ - dpkg -i --force-architecture $ARM_CUDA_INSTALLER_PACKAGE && \ - apt-key add $CUDA_REPO_PREFIX/7fa2af80.pub && \ - dpkg -i --force-architecture $ARM_CUDNN_INSTALLER_PACKAGE && \ - dpkg -i --force-architecture $ARM_CUDNN_DEV_INSTALLER_PACKAGE && \ - dpkg -i --force-architecture $CUDA_REPO_PREFIX/$ARM_LICENSE_INSTALLER && \ - dpkg -i --force-architecture $CUDA_REPO_PREFIX/$ARM_CUBLAS_INSTALLER && \ - dpkg -i --force-architecture $ARM_NVINFER_INSTALLER_PACKAGE && \ - dpkg -i --force-architecture $ARM_NVINFER_DEV_INSTALLER_PACKAGE && \ - apt update -y || true && apt install -y cuda-libraries-dev-9-0 libcudnn7-dev libnvinfer-dev -RUN ln -s /usr/include/aarch64-linux-gnu/cudnn_v7.h /usr/include/aarch64-linux-gnu/cudnn.h -ENV PATH $PATH:/usr/local/cuda/bin -ENV NVCCFLAGS "-m64" -ENV CUDA_ARCH "-gencode arch=compute_53,code=sm_53 -gencode arch=compute_62,code=sm_62" -ENV NVCC /usr/local/cuda/bin/nvcc +# Install aarch64 cross depedencies based on Jetpack 4.3 +# Manually downloaded using SDK Manager tool and placed in a private S3 bucket. +# We're not allowed to redistribute these files and there is no public version. +RUN aws s3 cp s3://mxnet-ci-prod-private-slave-data/nvidia/sdkm_downloads/cuda-repo-ubuntu1804-10-0-local-10.0.326-410.108_1.0-1_amd64.deb . && \ + dpkg -i cuda-repo-ubuntu1804-10-0-local-10.0.326-410.108_1.0-1_amd64.deb && \ + rm cuda-repo-ubuntu1804-10-0-local-10.0.326-410.108_1.0-1_amd64.deb && \ + apt-key add /var/cuda-repo-10-0-local-10.0.326-410.108/7fa2af80.pub && \ + aws s3 cp s3://mxnet-ci-prod-private-slave-data/nvidia/sdkm_downloads/cuda-repo-cross-aarch64-10-0-local-10.0.326_1.0-1_all.deb . && \ + dpkg -i cuda-repo-cross-aarch64-10-0-local-10.0.326_1.0-1_all.deb && \ + rm cuda-repo-cross-aarch64-10-0-local-10.0.326_1.0-1_all.deb && \ + apt-get update && \ + apt-get install -y -f && \ + apt-get install -y cuda-cross-aarch64 cuda-cross-aarch64-10-0 && \ + rm -rf /var/lib/apt/lists/* ARG USER_ID=0 ARG GROUP_ID=0 diff --git a/ci/docker/install/android_armv7_openblas.sh b/ci/docker/install/android_armv7_openblas.sh deleted file mode 100755 index 8642df6d9450..000000000000 --- a/ci/docker/install/android_armv7_openblas.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# build and install are separated so changes to build don't invalidate -# the whole docker cache for the image - -set -ex -pushd . -git clone https://github.com/xianyi/OpenBLAS.git -cd OpenBLAS -make TARGET=ARMV7 HOSTCC=gcc-7 HOSTCXX=g++-7 NOFORTRAN=1 ARM_SOFTFP_ABI=1 -j$(nproc) libs -#make PREFIX=${CROSS_ROOT} TARGET=ARMV7 HOSTCC=gcc NOFORTRAN=1 ARM_SOFTFP_ABI=1 install -cp *.h ${CROSS_ROOT}/include -cp libopenblas*.a ${CROSS_ROOT}/lib -popd diff --git a/ci/docker/install/android_ndk.sh b/ci/docker/install/android_ndk.sh deleted file mode 100755 index cb83aa65639a..000000000000 --- a/ci/docker/install/android_ndk.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# build and install are separated so changes to build don't invalidate -# the whole docker cache for the image - -set -ex -pushd . -# This environment variable comes from the docker file -echo "Downloading android SDK rev ${ANDROID_NDK_REVISION}" -curl -O https://dl.google.com/android/repository/android-ndk-r${ANDROID_NDK_REVISION}-linux-x86_64.zip && \ -unzip ./android-ndk-r${ANDROID_NDK_REVISION}-linux-x86_64.zip && \ -cd android-ndk-r${ANDROID_NDK_REVISION} && \ -./build/tools/make_standalone_toolchain.py \ - --stl=libc++ \ - --arch ${ANDROID_NDK_ARCH}\ - --api ${ANDROID_NDK_API}\ - --install-dir=${CROSS_ROOT} && \ - -find ${CROSS_ROOT} -exec chmod a+r '{}' \; && \ -find ${CROSS_ROOT} -executable -exec chmod a+x '{}' \; -popd diff --git a/ci/docker/install/arm64_openblas.sh b/ci/docker/install/arm64_openblas.sh deleted file mode 100755 index 88f2e98cd65b..000000000000 --- a/ci/docker/install/arm64_openblas.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# build and install are separated so changes to build don't invalidate -# the whole docker cache for the image - -set -ex -pushd . -wget -nv https://api.github.com/repos/xianyi/OpenBLAS/git/refs/heads/master -O openblas_version.json -echo "Using openblas:" -cat openblas_version.json -git clone https://github.com/xianyi/OpenBLAS.git -cd OpenBLAS -make -j$(nproc) TARGET=ARMV8 -make install -ln -s /opt/OpenBLAS/lib/libopenblas.so /usr/lib/libopenblas.so -ln -s /opt/OpenBLAS/lib/libopenblas.a /usr/lib/libopenblas.a -ln -s /opt/OpenBLAS/lib/libopenblas.a /usr/lib/liblapack.a -popd diff --git a/ci/docker/install/deb_ubuntu_ccache.sh b/ci/docker/install/deb_ubuntu_ccache.sh index cdc9354e220f..ef913ba36e55 100755 --- a/ci/docker/install/deb_ubuntu_ccache.sh +++ b/ci/docker/install/deb_ubuntu_ccache.sh @@ -23,7 +23,7 @@ set -ex pushd . -apt update || true +apt update apt install -y \ autoconf \ gperf \ @@ -32,31 +32,9 @@ apt install -y \ mkdir -p /work/deps cd /work/deps -# Unset ARM toolchain cross-compilation configuration on dockcross -unset ARCH -unset DEFAULT_DOCKCROSS_IMAGE -unset CROSS_TRIPLE -unset CC -unset AS -unset AR -unset FC -unset CXX -unset CROSS_ROOT -unset CROSS_COMPILE -unset PKG_CONFIG_PATH -unset CMAKE_TOOLCHAIN_FILE -unset CPP -unset LD -export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - git clone --recursive https://github.com/ccache/ccache.git cd ccache -git checkout v3.7.8 -# Backport cuda related fixes: https://github.com/ccache/ccache/pull/381 -git config user.name "MXNet CI" -git config user.email "MXNetCI@example.com" -git cherry-pick --strategy-option=theirs c4fffda031034f930df2cf188878b8f9160027df -git cherry-pick 0dec5c2df3e3ebc1fbbf33f74c992bef6264f37a +git checkout v3.7.9 ./autogen.sh ./configure --disable-man diff --git a/ci/docker/install/ubuntu_arm.sh b/ci/docker/install/ubuntu_arm.sh deleted file mode 100755 index 608d0362f138..000000000000 --- a/ci/docker/install/ubuntu_arm.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -ex - -apt update || true -apt install -y \ - unzip \ - python3 \ - python3-pip - -pip3 install setuptools diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh index 03f8b2c1c3d8..4e7b0db1b648 100755 --- a/ci/docker/runtime_functions.sh +++ b/ci/docker/runtime_functions.sh @@ -216,15 +216,22 @@ build_dynamic_libmxnet() { build_jetson() { set -ex - pushd . - - export CC=gcc-6 - export CXX=g++-6 - cp make/crosscompile.jetson.mk ./config.mk - make -j$(nproc) - - build_wheel /work/mxnet/python /work/mxnet/lib - popd + cd /work/build + cmake \ + -DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE} \ + -DUSE_CUDA=ON \ + -DMXNET_CUDA_ARCH="5.2" \ + -DENABLE_CUDA_RTC=OFF \ + -DSUPPORT_F16C=OFF \ + -DUSE_OPENCV=OFF \ + -DUSE_OPENMP=ON \ + -DUSE_LAPACK=OFF \ + -DUSE_SIGNAL_HANDLER=ON \ + -DCMAKE_BUILD_TYPE=Release \ + -DUSE_MKL_IF_AVAILABLE=OFF \ + -G Ninja /work/mxnet + ninja + build_wheel } # @@ -252,7 +259,7 @@ build_armv6() { -DUSE_MKL_IF_AVAILABLE=OFF \ -DUSE_LAPACK=OFF \ -DBUILD_CPP_EXAMPLES=OFF \ - -Dmxnet_LINKER_LIBS=-lgfortran \ + -Dmxnet_LINKER_LIBS=-latomic \ -G Ninja /work/mxnet ninja @@ -279,7 +286,6 @@ build_armv7() { -DUSE_MKL_IF_AVAILABLE=OFF \ -DUSE_LAPACK=OFF \ -DBUILD_CPP_EXAMPLES=OFF \ - -Dmxnet_LINKER_LIBS=-lgfortran \ -G Ninja /work/mxnet ninja @@ -289,14 +295,15 @@ build_armv7() { build_armv8() { cd /work/build cmake \ - -DUSE_CUDA=OFF\ - -DSUPPORT_F16C=OFF\ - -DUSE_OPENCV=OFF\ + -DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE} \ + -DUSE_CUDA=OFF \ + -DSUPPORT_F16C=OFF \ + -DUSE_OPENCV=OFF \ -DUSE_OPENMP=ON \ - -DUSE_LAPACK=OFF\ - -DUSE_SIGNAL_HANDLER=ON\ - -DCMAKE_BUILD_TYPE=Release\ - -DUSE_MKL_IF_AVAILABLE=OFF\ + -DUSE_LAPACK=OFF \ + -DUSE_SIGNAL_HANDLER=ON \ + -DCMAKE_BUILD_TYPE=Release \ + -DUSE_MKL_IF_AVAILABLE=OFF \ -G Ninja /work/mxnet ninja build_wheel @@ -311,15 +318,18 @@ build_android_armv7() { set -ex cd /work/build cmake \ - -DANDROID=ON\ - -DUSE_CUDA=OFF\ - -DUSE_SSE=OFF\ - -DSUPPORT_F16C=OFF\ - -DUSE_LAPACK=OFF\ - -DUSE_OPENCV=OFF\ - -DUSE_OPENMP=OFF\ - -DUSE_SIGNAL_HANDLER=ON\ - -DUSE_MKL_IF_AVAILABLE=OFF\ + -DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE} \ + -DANDROID_ABI="armeabi-v7a" \ + -DANDROID_STL="c++_shared" \ + -DANDROID=ON \ + -DUSE_CUDA=OFF \ + -DUSE_SSE=OFF \ + -DSUPPORT_F16C=OFF \ + -DUSE_LAPACK=OFF \ + -DUSE_OPENCV=OFF \ + -DUSE_OPENMP=OFF \ + -DUSE_SIGNAL_HANDLER=ON \ + -DUSE_MKL_IF_AVAILABLE=OFF \ -G Ninja /work/mxnet ninja } @@ -328,14 +338,17 @@ build_android_armv8() { set -ex cd /work/build cmake \ + -DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE} \ + -DANDROID_ABI="arm64-v8a" \ + -DANDROID_STL="c++_shared" \ -DANDROID=ON \ - -DUSE_CUDA=OFF\ - -DUSE_SSE=OFF\ - -DUSE_LAPACK=OFF\ - -DUSE_OPENCV=OFF\ - -DUSE_OPENMP=OFF\ - -DUSE_SIGNAL_HANDLER=ON\ - -DUSE_MKL_IF_AVAILABLE=OFF\ + -DUSE_CUDA=OFF \ + -DUSE_SSE=OFF \ + -DUSE_LAPACK=OFF \ + -DUSE_OPENCV=OFF \ + -DUSE_OPENMP=OFF \ + -DUSE_SIGNAL_HANDLER=ON \ + -DUSE_MKL_IF_AVAILABLE=OFF \ -G Ninja /work/mxnet ninja } diff --git a/ci/docker/install/android_arm64_openblas.sh b/ci/docker/toolchains/aarch64-linux-gnu-toolchain.cmake old mode 100755 new mode 100644 similarity index 64% rename from ci/docker/install/android_arm64_openblas.sh rename to ci/docker/toolchains/aarch64-linux-gnu-toolchain.cmake index f9647d969bf2..3780415c4b15 --- a/ci/docker/install/android_arm64_openblas.sh +++ b/ci/docker/toolchains/aarch64-linux-gnu-toolchain.cmake @@ -1,5 +1,3 @@ -#!/usr/bin/env bash - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -17,16 +15,14 @@ # specific language governing permissions and limitations # under the License. -# build and install are separated so changes to build don't invalidate -# the whole docker cache for the image +set(CMAKE_SYSTEM_NAME Linux) +set(CMAKE_SYSTEM_PROCESSOR "aarch64") +set(CMAKE_C_COMPILER aarch64-linux-gnu-gcc) +set(CMAKE_CXX_COMPILER aarch64-linux-gnu-g++) +set(CMAKE_CUDA_HOST_COMPILER aarch64-linux-gnu-gcc) +set(CMAKE_FIND_ROOT_PATH "/usr/aarch64-linux-gnu") -set -ex -pushd . -git clone https://github.com/xianyi/OpenBLAS.git -cd OpenBLAS -make -j$(nproc) TARGET=ARMV8 ARM_SOFTFP_ABI=1 HOSTCC=gcc-7 HOSTCXX=g++-7 NOFORTRAN=1 libs -# Can't be run (utility not compiled for the target platform) -#make install -cp *.h /usr/include -cp libopenblas.a /usr/local/lib -popd +set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) +set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) diff --git a/ci/docker/install/arm_openblas.sh b/ci/docker/toolchains/arm-linux-gnueabihf-toolchain.cmake old mode 100755 new mode 100644 similarity index 65% rename from ci/docker/install/arm_openblas.sh rename to ci/docker/toolchains/arm-linux-gnueabihf-toolchain.cmake index fa2e5cae9cba..62038ecee16a --- a/ci/docker/install/arm_openblas.sh +++ b/ci/docker/toolchains/arm-linux-gnueabihf-toolchain.cmake @@ -1,5 +1,3 @@ -#!/usr/bin/env bash - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -17,14 +15,13 @@ # specific language governing permissions and limitations # under the License. -set -ex - -git clone --recursive -b v0.2.20 https://github.com/xianyi/OpenBLAS.git - -cd OpenBLAS -make -j$(nproc) -PREFIX=${CROSS_ROOT} make install - -cd .. +set(CMAKE_SYSTEM_NAME Linux) +set(CMAKE_SYSTEM_PROCESSOR "armv7l") +set(CMAKE_C_COMPILER arm-linux-gnueabihf-gcc) +set(CMAKE_CXX_COMPILER arm-linux-gnueabihf-g++) +set(CMAKE_FIND_ROOT_PATH "/usr/arm-linux-gnueabihf" "/usr/local/arm-linux-gnueabihf") -rm -rf OpenBLAS +set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) +set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) diff --git a/cmake/upstream/FindCUDAToolkit.cmake b/cmake/upstream/FindCUDAToolkit.cmake index d37c44d9c782..fee4f3f4f698 100644 --- a/cmake/upstream/FindCUDAToolkit.cmake +++ b/cmake/upstream/FindCUDAToolkit.cmake @@ -132,6 +132,7 @@ of the following libraries that are part of the CUDAToolkit: - :ref:`cuRAND` - :ref:`cuSOLVER` - :ref:`cuSPARSE` +- :ref:`cuPTI` - :ref:`NPP` - :ref:`nvBLAS` - :ref:`nvGRAPH` @@ -149,7 +150,6 @@ CUDA Runtime Library The CUDA Runtime library (cudart) are what most applications will typically need to link against to make any calls such as `cudaMalloc`, and `cudaFree`. -They are an explicit dependency of almost every library. Targets Created: @@ -230,6 +230,18 @@ Targets Created: - ``CUDA::cusparse`` - ``CUDA::cusparse_static`` +.. _`cuda_toolkit_cupti`: + +cupti +""""" + +The `NVIDIA CUDA Profiling Tools Interface `_. + +Targets Created: + +- ``CUDA::cupti`` +- ``CUDA::cupti_static`` + .. _`cuda_toolkit_NPP`: NPP @@ -361,8 +373,6 @@ Targets Created: - ``CUDA::nvml`` -.. _`cuda_toolkit_opencl`: - .. _`cuda_toolkit_nvToolsExt`: nvToolsExt @@ -375,6 +385,8 @@ Targets Created: - ``CUDA::nvToolsExt`` +.. _`cuda_toolkit_opencl`: + OpenCL """""" @@ -436,6 +448,11 @@ Result variables The path to the CUDA Toolkit library directory that contains the CUDA Runtime library ``cudart``. +``CUDAToolkit_TARGET_DIR`` + The path to the CUDA Toolkit directory including the target architecture + when cross-compiling. When not cross-compiling this will be equivalant to + ``CUDAToolkit_ROOT_DIR``. + ``CUDAToolkit_NVCC_EXECUTABLE`` The path to the NVIDIA CUDA compiler ``nvcc``. Note that this path may **not** be the same as @@ -487,6 +504,7 @@ if(CMAKE_CUDA_COMPILER_LOADED AND NOT CUDAToolkit_BIN_DIR) get_filename_component(cuda_dir "${CMAKE_CUDA_COMPILER}" DIRECTORY) # use the already detected cuda compiler set(CUDAToolkit_BIN_DIR "${cuda_dir}" CACHE PATH "") + mark_as_advanced(CUDAToolkit_BIN_DIR) unset(cuda_dir) endif() @@ -641,6 +659,7 @@ endif() if(NOT CUDAToolkit_BIN_DIR AND CUDAToolkit_NVCC_EXECUTABLE) get_filename_component(cuda_dir "${CUDAToolkit_NVCC_EXECUTABLE}" DIRECTORY) set(CUDAToolkit_BIN_DIR "${cuda_dir}" CACHE PATH "" FORCE) + mark_as_advanced(CUDAToolkit_BIN_DIR) unset(cuda_dir) endif() @@ -669,8 +688,47 @@ endif() get_filename_component(CUDAToolkit_ROOT_DIR ${CUDAToolkit_BIN_DIR} DIRECTORY ABSOLUTE) -# Now that we have the real ROOT_DIR, find components inside it. -list(APPEND CMAKE_PREFIX_PATH ${CUDAToolkit_ROOT_DIR}) +# Handle cross compilation +if(CMAKE_CROSSCOMPILING) + if(CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7-a") + # Support for NVPACK + set (CUDAToolkit_TARGET_NAME "armv7-linux-androideabi") + elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "arm") + # Support for arm cross compilation + set(CUDAToolkit_TARGET_NAME "armv7-linux-gnueabihf") + elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64") + # Support for aarch64 cross compilation + if (ANDROID_ARCH_NAME STREQUAL "arm64") + set(CUDAToolkit_TARGET_NAME "aarch64-linux-androideabi") + else() + set(CUDAToolkit_TARGET_NAME "aarch64-linux") + endif (ANDROID_ARCH_NAME STREQUAL "arm64") + elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64") + set(CUDAToolkit_TARGET_NAME "x86_64-linux") + endif() + + if (EXISTS "${CUDAToolkit_ROOT_DIR}/targets/${CUDAToolkit_TARGET_NAME}") + set(CUDAToolkit_TARGET_DIR "${CUDAToolkit_ROOT_DIR}/targets/${CUDAToolkit_TARGET_NAME}") + # add known CUDA target root path to the set of directories we search for programs, libraries and headers + list(PREPEND CMAKE_FIND_ROOT_PATH "${CUDAToolkit_TARGET_DIR}") + + # Mark that we need to pop the root search path changes after we have + # found all cuda libraries so that searches for our cross-compilation + # libraries work when another cuda sdk is in CMAKE_PREFIX_PATH or + # PATh + set(_CUDAToolkit_Pop_ROOT_PATH True) + endif() +else() + # Not cross compiling + set(CUDAToolkit_TARGET_DIR "${CUDAToolkit_ROOT_DIR}") + # Now that we have the real ROOT_DIR, find components inside it. + list(APPEND CMAKE_PREFIX_PATH ${CUDAToolkit_ROOT_DIR}) + + # Mark that we need to pop the prefix path changes after we have + # found the cudart library. + set(_CUDAToolkit_Pop_Prefix True) +endif() + # Find the include/ directory find_path(CUDAToolkit_INCLUDE_DIR @@ -680,14 +738,17 @@ find_path(CUDAToolkit_INCLUDE_DIR # And find the CUDA Runtime Library libcudart find_library(CUDA_CUDART NAMES cudart - PATH_SUFFIXES lib64 lib/x64 + PATH_SUFFIXES lib64 lib64/stubs lib/x64 ) if (NOT CUDA_CUDART AND NOT CUDAToolkit_FIND_QUIETLY) message(STATUS "Unable to find cudart library.") endif() unset(CUDAToolkit_ROOT_DIR) -list(REMOVE_AT CMAKE_PREFIX_PATH -1) +if(_CUDAToolkit_Pop_Prefix) + list(REMOVE_AT CMAKE_PREFIX_PATH -1) + unset(_CUDAToolkit_Pop_Prefix) +endif() #----------------------------------------------------------------------------- # Perform version comparison and validate all required variables are set. @@ -702,6 +763,10 @@ find_package_handle_standard_args(CUDAToolkit VERSION_VAR CUDAToolkit_VERSION ) +mark_as_advanced(CUDA_CUDART + CUDAToolkit_INCLUDE_DIR + CUDAToolkit_NVCC_EXECUTABLE + ) #----------------------------------------------------------------------------- # Construct result variables @@ -714,78 +779,103 @@ endif() # Construct import targets if(CUDAToolkit_FOUND) - function(find_and_add_cuda_import_lib lib_name) + function(_CUDAToolkit_find_and_add_import_lib lib_name) + cmake_parse_arguments(arg "" "" "ALT;DEPS;EXTRA_PATH_SUFFIXES" ${ARGN}) - if(ARGC GREATER 1) - set(search_names ${ARGN}) - else() - set(search_names ${lib_name}) - endif() + set(search_names ${lib_name} ${arg_ALT}) find_library(CUDA_${lib_name}_LIBRARY NAMES ${search_names} - PATHS ${CUDAToolkit_LIBRARY_DIR} + HINTS ${CUDAToolkit_LIBRARY_DIR} ENV CUDA_PATH - PATH_SUFFIXES nvidia/current lib64 lib/x64 lib + PATH_SUFFIXES nvidia/current lib64 lib64/stubs lib/x64 lib lib/stubs stubs + ${arg_EXTRA_PATH_SUFFIXES} ) + mark_as_advanced(CUDA_${lib_name}_LIBRARY) - if (NOT CUDA::${lib_name} AND CUDA_${lib_name}_LIBRARY) + if (NOT TARGET CUDA::${lib_name} AND CUDA_${lib_name}_LIBRARY) add_library(CUDA::${lib_name} IMPORTED INTERFACE) target_include_directories(CUDA::${lib_name} SYSTEM INTERFACE "${CUDAToolkit_INCLUDE_DIRS}") target_link_libraries(CUDA::${lib_name} INTERFACE "${CUDA_${lib_name}_LIBRARY}") + foreach(dep ${arg_DEPS}) + if(TARGET CUDA::${dep}) + target_link_libraries(CUDA::${lib_name} INTERFACE CUDA::${dep}) + endif() + endforeach() endif() endfunction() - function(add_cuda_link_dependency lib_name) - foreach(dependency IN LISTS ${ARGN}) - target_link_libraries(CUDA::${lib_name} INTERFACE CUDA::${dependency}) - endforeach() - endfunction() + if(NOT TARGET CUDA::toolkit) + add_library(CUDA::toolkit IMPORTED INTERFACE) + target_include_directories(CUDA::toolkit SYSTEM INTERFACE "${CUDAToolkit_INCLUDE_DIRS}") + target_link_directories(CUDA::toolkit INTERFACE "${CUDAToolkit_LIBRARY_DIR}") + endif() - add_library(CUDA::toolkit IMPORTED INTERFACE) - target_include_directories(CUDA::toolkit SYSTEM INTERFACE "${CUDAToolkit_INCLUDE_DIRS}") - target_link_directories(CUDA::toolkit INTERFACE "${CUDAToolkit_LIBRARY_DIR}") + _CUDAToolkit_find_and_add_import_lib(cuda_driver ALT cuda) + _CUDAToolkit_find_and_add_import_lib(cudart) + _CUDAToolkit_find_and_add_import_lib(cudart_static) - find_and_add_cuda_import_lib(cuda_driver cuda) + # setup dependencies that are required for cudart_static when building + # on linux. These are generally only required when using the CUDA toolkit + # when CUDA language is disabled + if(NOT TARGET CUDA::cudart_static_deps + AND TARGET CUDA::cudart_static) - find_and_add_cuda_import_lib(cudart) - find_and_add_cuda_import_lib(cudart_static) + add_library(CUDA::cudart_static_deps IMPORTED INTERFACE) + target_link_libraries(CUDA::cudart_static INTERFACE CUDA::cudart_static_deps) - foreach (cuda_lib cublas cufft cufftw curand cusolver cusparse nvgraph nvjpeg) - find_and_add_cuda_import_lib(${cuda_lib}) - add_cuda_link_dependency(${cuda_lib} cudart) + if(UNIX AND (CMAKE_C_COMPILER OR CMAKE_CXX_COMPILER)) + find_package(Threads REQUIRED) + target_link_libraries(CUDA::cudart_static_deps INTERFACE Threads::Threads ${CMAKE_DL_LIBS}) + endif() - find_and_add_cuda_import_lib(${cuda_lib}_static) - add_cuda_link_dependency(${cuda_lib}_static cudart_static) + if(UNIX AND NOT APPLE) + # On Linux, you must link against librt when using the static cuda runtime. + find_library(CUDAToolkit_rt_LIBRARY rt) + mark_as_advanced(CUDAToolkit_rt_LIBRARY) + if(NOT CUDAToolkit_rt_LIBRARY) + message(WARNING "Could not find librt library, needed by CUDA::cudart_static") + else() + target_link_libraries(CUDA::cudart_static_deps INTERFACE ${CUDAToolkit_rt_LIBRARY}) + endif() + endif() + endif() + + _CUDAToolkit_find_and_add_import_lib(culibos) # it's a static library + foreach (cuda_lib cublas cufft curand cusparse nppc nvjpeg) + _CUDAToolkit_find_and_add_import_lib(${cuda_lib}) + _CUDAToolkit_find_and_add_import_lib(${cuda_lib}_static DEPS culibos) endforeach() + # cuFFTW depends on cuFFT + _CUDAToolkit_find_and_add_import_lib(cufftw DEPS cufft) + _CUDAToolkit_find_and_add_import_lib(cufftw DEPS cufft_static) + # cuSOLVER depends on cuBLAS, and cuSPARSE - add_cuda_link_dependency(cusolver cublas cusparse) - add_cuda_link_dependency(cusolver_static cublas_static cusparse) + _CUDAToolkit_find_and_add_import_lib(cusolver DEPS cublas cusparse) + _CUDAToolkit_find_and_add_import_lib(cusolver_static DEPS cublas_static cusparse_static culibos) # nvGRAPH depends on cuRAND, and cuSOLVER. - add_cuda_link_dependency(nvgraph curand cusolver) - add_cuda_link_dependency(nvgraph_static curand_static cusolver_static) - - find_and_add_cuda_import_lib(nppc) - find_and_add_cuda_import_lib(nppc_static) - - add_cuda_link_dependency(nppc cudart) - add_cuda_link_dependency(nppc_static cudart_static culibos) + _CUDAToolkit_find_and_add_import_lib(nvgraph DEPS curand cusolver) + _CUDAToolkit_find_and_add_import_lib(nvgraph_static DEPS curand_static cusolver_static) # Process the majority of the NPP libraries. foreach (cuda_lib nppial nppicc nppidei nppif nppig nppim nppist nppitc npps nppicom nppisu) - find_and_add_cuda_import_lib(${cuda_lib}) - find_and_add_cuda_import_lib(${cuda_lib}_static) - add_cuda_link_dependency(${cuda_lib} nppc) - add_cuda_link_dependency(${cuda_lib}_static nppc_static) + _CUDAToolkit_find_and_add_import_lib(${cuda_lib} DEPS nppc) + _CUDAToolkit_find_and_add_import_lib(${cuda_lib}_static DEPS nppc_static) endforeach() - find_and_add_cuda_import_lib(nvrtc) - add_cuda_link_dependency(nvrtc cuda_driver) + _CUDAToolkit_find_and_add_import_lib(cupti + EXTRA_PATH_SUFFIXES ../extras/CUPTI/lib64/ + ../extras/CUPTI/lib/) + _CUDAToolkit_find_and_add_import_lib(cupti_static + EXTRA_PATH_SUFFIXES ../extras/CUPTI/lib64/ + ../extras/CUPTI/lib/) + + _CUDAToolkit_find_and_add_import_lib(nvrtc DEPS cuda_driver) - find_and_add_cuda_import_lib(nvml nvidia-ml nvml) + _CUDAToolkit_find_and_add_import_lib(nvml ALT nvidia-ml nvml) if(WIN32) # nvtools can be installed outside the CUDA toolkit directory @@ -798,17 +888,12 @@ if(CUDAToolkit_FOUND) PATH_SUFFIXES lib/x64 lib ) endif() - find_and_add_cuda_import_lib(nvToolsExt nvToolsExt nvToolsExt64) + _CUDAToolkit_find_and_add_import_lib(nvToolsExt ALT nvToolsExt64) - add_cuda_link_dependency(nvToolsExt cudart) - - find_and_add_cuda_import_lib(OpenCL) - - find_and_add_cuda_import_lib(culibos) - if(TARGET CUDA::culibos) - foreach (cuda_lib cublas cufft cusparse curand nvjpeg) - add_cuda_link_dependency(${cuda_lib}_static culibos) - endforeach() - endif() + _CUDAToolkit_find_and_add_import_lib(OpenCL) +endif() +if(_CUDAToolkit_Pop_ROOT_PATH) + list(REMOVE_AT CMAKE_FIND_ROOT_PATH 0) + unset(_CUDAToolkit_Pop_ROOT_PATH) endif() diff --git a/make/crosscompile.jetson.mk b/make/crosscompile.jetson.mk deleted file mode 100644 index 880e2cf5b466..000000000000 --- a/make/crosscompile.jetson.mk +++ /dev/null @@ -1,216 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -#------------------------------------------------------------------------------- -# Template configuration for compiling mxnet -# -# If you want to change the configuration, please use the following -# steps. Assume you are on the root directory of mxnet. First copy the this -# file so that any local changes will be ignored by git -# -# $ cp make/config.mk . -# -# Next modify the according entries, and then compile by -# -# $ make -# -# or build in parallel with 8 threads -# -# $ make -j8 -#------------------------------------------------------------------------------- - -#--------------------- -# For cross compilation we only explictily set a compiler when one is not already present. -#-------------------- - -ifndef CC -export CC = gcc -endif -ifndef CXX -export CXX = g++ -endif -ifndef NVCC -export NVCC = nvcc -endif - -# whether compile with options for MXNet developer -DEV = 0 - -# whether compile with debug -DEBUG = 0 - -# whether to turn on segfault signal handler to log the stack trace -USE_SIGNAL_HANDLER = 1 - -# the additional link flags you want to add -ADD_LDFLAGS = -L${CROSS_ROOT}/lib -L/usr/lib/aarch64-linux-gnu/ - -# the additional compile flags you want to add -ADD_CFLAGS = -I${CROSS_ROOT}/include -I/usr/include/aarch64-linux-gnu/ - -#--------------------------------------------- -# matrix computation libraries for CPU/GPU -#--------------------------------------------- - -# whether use CUDA during compile -USE_CUDA = 1 - -# add the path to CUDA library to link and compile flag -# if you have already add them to environment variable, leave it as NONE -# USE_CUDA_PATH = /usr/local/cuda -USE_CUDA_PATH = /usr/local/cuda-9.0/targets/aarch64-linux - -# whether to enable CUDA runtime compilation -ENABLE_CUDA_RTC = 0 - -# whether use CuDNN R3 library -USE_CUDNN = 1 - -#whether to use NCCL library -USE_NCCL = 0 -#add the path to NCCL library -USE_NCCL_PATH = NONE - -# whether use opencv during compilation -# you can disable it, however, you will not able to use -# imbin iterator -USE_OPENCV = 0 -# Add OpenCV include path, in which the directory `opencv2` exists -USE_OPENCV_INC_PATH = NONE -# Add OpenCV shared library path, in which the shared library exists -USE_OPENCV_LIB_PATH = NONE - -#whether use libjpeg-turbo for image decode without OpenCV wrapper -USE_LIBJPEG_TURBO = 0 -#add the path to libjpeg-turbo library -USE_LIBJPEG_TURBO_PATH = NONE - -# use openmp for parallelization -USE_OPENMP = 1 - -# whether use MKL-DNN library -USE_MKLDNN = 0 - -# whether use NNPACK library -USE_NNPACK = 0 - -# choose the version of blas you want to use -# can be: mkl, blas, atlas, openblas -# in default use atlas for linux while apple for osx -UNAME_S := $(shell uname -s) -USE_BLAS = openblas - -# whether use lapack during compilation -# only effective when compiled with blas versions openblas/apple/atlas/mkl -USE_LAPACK = 1 - -# path to lapack library in case of a non-standard installation -USE_LAPACK_PATH = - -# add path to intel library, you may need it for MKL, if you did not add the path -# to environment variable -USE_INTEL_PATH = NONE - -# If use MKL only for BLAS, choose static link automatically to allow python wrapper -ifeq ($(USE_BLAS), mkl) -USE_STATIC_MKL = 1 -else -USE_STATIC_MKL = NONE -endif - -#---------------------------- -# Settings for power and arm arch -#---------------------------- -USE_SSE=0 - -# Turn off F16C instruction set support -USE_F16C=0 - -#---------------------------- -# distributed computing -#---------------------------- - -# whether or not to enable multi-machine supporting -USE_DIST_KVSTORE = 0 - -# whether or not allow to read and write HDFS directly. If yes, then hadoop is -# required -USE_HDFS = 0 - -# path to libjvm.so. required if USE_HDFS=1 -LIBJVM=$(JAVA_HOME)/jre/lib/amd64/server - -# whether or not allow to read and write AWS S3 directly. If yes, then -# libcurl4-openssl-dev is required, it can be installed on Ubuntu by -# sudo apt-get install -y libcurl4-openssl-dev -USE_S3 = 0 - -#---------------------------- -# performance settings -#---------------------------- -# Use operator tuning -USE_OPERATOR_TUNING = 1 - -# Use gperftools if found -# Disable because of #8968 -USE_GPERFTOOLS = 0 - -# path to gperftools (tcmalloc) library in case of a non-standard installation -USE_GPERFTOOLS_PATH = - -# Use JEMalloc if found, and not using gperftools -USE_JEMALLOC = 1 - -# path to jemalloc library in case of a non-standard installation -USE_JEMALLOC_PATH = - -#---------------------------- -# additional operators -#---------------------------- - -# path to folders containing projects specific operators that you don't want to put in src/operators -EXTRA_OPERATORS = - -#---------------------------- -# other features -#---------------------------- - -# Create C++ interface package -USE_CPP_PACKAGE = 0 - -# Use int64_t type to represent the total number of elements in the tensor -# This will cause performance degradation reported in issue #14496 -# Set to 1 for large tensor with tensor size greater than INT32_MAX i.e. 2147483647 -# Note: the size of each dimension is still bounded by INT32_MAX -USE_INT64_TENSOR_SIZE = 0 - -#---------------------------- -# plugins -#---------------------------- - -# whether to use caffe integration. This requires installing caffe. -# You also need to add CAFFE_PATH/build/lib to your LD_LIBRARY_PATH -# CAFFE_PATH = $(HOME)/caffe -# MXNET_PLUGINS += plugin/caffe/caffe.mk - -# WARPCTC_PATH = $(HOME)/warp-ctc -# MXNET_PLUGINS += plugin/warpctc/warpctc.mk - -# whether to use sframe integration. This requires build sframe -# git@github.com:dato-code/SFrame.git -# SFRAME_PATH = $(HOME)/SFrame -# MXNET_PLUGINS += plugin/sframe/plugin.mk diff --git a/src/operator/random/shuffle_op.cc b/src/operator/random/shuffle_op.cc index 3f94cca530c3..c81d90689d58 100644 --- a/src/operator/random/shuffle_op.cc +++ b/src/operator/random/shuffle_op.cc @@ -23,7 +23,7 @@ * \brief Operator to shuffle elements of an NDArray */ #if ((__GNUC__ > 4 && !defined(__clang__major__)) || (__clang_major__ > 4 && __linux__)) && \ - defined(_OPENMP) + defined(_OPENMP) && !defined(__ANDROID__) #define USE_GNU_PARALLEL_SHUFFLE #endif From b1dd13cbb9b64717bb403097f2f83c42d77930ed Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Sat, 11 Apr 2020 22:59:51 +0000 Subject: [PATCH 13/20] Greatly simplify qemu setup --- ci/README.md | 95 +---- ci/dev_menu.py | 2 +- ci/docker/Dockerfile.build.android_armv7 | 4 +- ci/docker/Dockerfile.build.android_armv8 | 4 +- ci/docker/Dockerfile.build.armv6 | 4 +- ci/docker/Dockerfile.build.armv7 | 4 +- ci/docker/Dockerfile.build.armv8 | 4 +- ...t.arm_qemu => Dockerfile.build.test.armv7} | 32 +- ...rm_qemu.sh => Dockerfile.build.test.armv8} | 38 +- ci/docker/install/ubuntu_arm_qemu_bin.sh | 40 -- ci/docker/qemu/README.md | 18 - ci/docker/qemu/runtime_functions.py | 134 ------- ci/docker/qemu/vmcontrol.py | 360 ------------------ ci/docker/runtime_functions.sh | 12 + ci/jenkins/Jenkins_steps.groovy | 40 +- ci/jenkins/Jenkinsfile_edge | 7 +- ci/qemu/README.md | 92 ----- ci/qemu/copy.sh | 23 -- ci/qemu/init.sh | 23 -- ci/qemu/initrd_modif/inittab | 38 -- ci/qemu/install.sh | 32 -- ci/qemu/mxnet_requirements.txt | 7 - ci/qemu/preseed.cfg | 68 ---- ci/qemu/preseed.sh | 29 -- ci/qemu/run.sh | 33 -- ci/qemu/test_requirements.txt | 3 - 26 files changed, 108 insertions(+), 1038 deletions(-) rename ci/docker/{Dockerfile.build.test.arm_qemu => Dockerfile.build.test.armv7} (67%) rename ci/docker/{install/ubuntu_arm_qemu.sh => Dockerfile.build.test.armv8} (60%) mode change 100755 => 100644 delete mode 100755 ci/docker/install/ubuntu_arm_qemu_bin.sh delete mode 100644 ci/docker/qemu/README.md delete mode 100755 ci/docker/qemu/runtime_functions.py delete mode 100644 ci/docker/qemu/vmcontrol.py delete mode 100644 ci/qemu/README.md delete mode 100755 ci/qemu/copy.sh delete mode 100755 ci/qemu/init.sh delete mode 100644 ci/qemu/initrd_modif/inittab delete mode 100755 ci/qemu/install.sh delete mode 100644 ci/qemu/mxnet_requirements.txt delete mode 100644 ci/qemu/preseed.cfg delete mode 100755 ci/qemu/preseed.sh delete mode 100755 ci/qemu/run.sh delete mode 100644 ci/qemu/test_requirements.txt diff --git a/ci/README.md b/ci/README.md index 155a0104a125..7172bd955491 100644 --- a/ci/README.md +++ b/ci/README.md @@ -111,90 +111,37 @@ significantly. You can set this directory explicitly by setting CCACHE_DIR envir variable. All ccache instances are currently set to be 10 Gigabytes max in size. -## Testing with QEMU -To run the unit tests under qemu: -``` -./build.py -p armv7 && ./build.py -p test.arm_qemu ./runtime_functions.py run_ut_py3_qemu -``` - -To get a shell on the container and debug issues with the emulator itself, we build the container -and then execute it interactively. We can afterwards use port 2222 on the host to connect with SSH. - - -``` -ci/build.py -p test.arm_qemu -b && docker run -p2222:2222 -ti mxnetci/build.test.arm_qemu -``` +## Testing with ARM / Edge devices with QEMU -Then from another terminal: +We build on [QEMU](https://www.qemu.org/) and Linux [Kernel Support for +miscellaneous Binary +Formats](https://www.kernel.org/doc/html/v5.6/admin-guide/binfmt-misc.html) for +testing MXNet on edge devices. Test can be invoked with the same syntax as for +non-virtualized platforms: ``` -ssh -o StrictHostKeyChecking=no -p 2222 qemu@localhost +./build.py -p armv7 +./build.py -p test.armv7 /work/runtime_functions.sh unittest_ubuntu_python3_armv7 ``` -There are two pre-configured users: `root` and `qemu` both without passwords. - - -### Example of reproducing a test result with QEMU on ARM - - -You might want to enable a debug build first: - -``` -$ git diff -diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh -index 39631f9..666ceea 100755 ---- a/ci/docker/runtime_functions.sh -+++ b/ci/docker/runtime_functions.sh -@@ -172,6 +172,7 @@ build_armv7() { - -DUSE_LAPACK=OFF \ - -DBUILD_CPP_EXAMPLES=OFF \ - -Dmxnet_LINKER_LIBS=-lgfortran \ -+ -DCMAKE_BUILD_TYPE=Debug \ - -G Ninja /work/mxnet - - ninja -v +For the test step to succeed, you must run Linux kernel 4.8 or later and have qemu installed. +On Debian and Ubuntu systems, run the following command to install the dependencies: ``` +sudo apt install binfmt-support qemu-user-static -Then we build the project for armv7, the test container and start QEMU inside docker: - -``` -ci/build.py -p armv7 -ci/build.py -p test.arm_qemu -b && docker run -p2222:2222 -ti mxnetci/build.test.arm_qemu +# Use qemu-binfmt-conf.sh to register all binary types with the kernel +wget https://raw.githubusercontent.com/qemu/qemu/stable-4.1/scripts/qemu-binfmt-conf.sh +chmod +x qemu-binfmt-conf.sh +sudo ./qemu-binfmt-conf.sh --persistent yes --qemu-suffix "-static" --qemu-path "/usr/bin" --systemd ALL ``` - - -At this point we copy artifacts and sources to the VM, in another terminal (host) do the following: +If you run into segmentation faults at the beginning of the emulated tests, you +probably have a ancient version of Qemu on your system (or found a bug in +upstream Qemu). In that situation, you can rely on the +`multiarch/qemu-user-static` Docker project to register a set of up-to-date Qemu +binaries from their Docker image with your kernel: ``` -# Copy mxnet sources to the VM -rsync --delete -e 'ssh -p2222' --exclude='.git/' -zvaP ./ qemu@localhost:mxnet - - -# Ssh into the vm -ssh -p2222 qemu@localhost - -cd mxnet - -# Execute a single failing C++ test -build/tests/mxnet_unit_tests --gtest_filter="ACTIVATION_PERF.ExecuteBidirectional" - -# To install MXNet: -sudo pip3 install --upgrade --force-reinstall build/mxnet-1.3.1-py2.py3-none-any.whl - -# Execute a single python test: - -nosetests-3.4 -v -s tests/python/unittest/test_ndarray.py - - -# Debug with cgdb -sudo apt install -y libstdc++6-6-dbg -cgdb build/tests/mxnet_unit_tests - -(gdb) !pwd -/home/qemu/mxnet -(gdb) set substitute-path /work /home/qemu -(gdb) set substitute-path /build/gcc-6-6mK9AW/gcc-6-6.3.0/build/arm-linux-gnueabihf/libstdc++-v3/include/ /usr/include/c++/6/ -(gdb) r --gtest_filter="ACTIVATION_PERF.ExecuteBidirectional" +docker run --rm --privileged multiarch/qemu-user-static --reset -p yes ``` diff --git a/ci/dev_menu.py b/ci/dev_menu.py index e9f031e1b171..962e4ecfe03f 100755 --- a/ci/dev_menu.py +++ b/ci/dev_menu.py @@ -167,7 +167,7 @@ def provision_virtualenv(venv_path=DEFAULT_PYENV): ('[Docker] Python3 ARMv7 unittests (QEMU)', [ "ci/build.py -p armv7", - "ci/build.py -p test.arm_qemu ./runtime_functions.py run_ut_py3_qemu" + "ci/build.py -p test.armv7 /work/runtime_functions.sh unittest_ubuntu_python3_armv7" ]), ('Clean (RESET HARD) repository (Warning! erases local changes / DATA LOSS)', Confirm("ci/docker/runtime_functions.sh clean_repo")) diff --git a/ci/docker/Dockerfile.build.android_armv7 b/ci/docker/Dockerfile.build.android_armv7 index 96ca04e9f5e6..8d9fb6481e2e 100644 --- a/ci/docker/Dockerfile.build.android_armv7 +++ b/ci/docker/Dockerfile.build.android_armv7 @@ -46,10 +46,10 @@ RUN git clone --recursive -b v0.3.9 https://github.com/xianyi/OpenBLAS.git && \ mkdir /usr/local/openblas-android && \ cd /usr/local/OpenBLAS && \ export TOOLCHAIN=/usr/local/android-ndk-r19/toolchains/llvm/prebuilt/linux-x86_64 && \ - make NOFORTRAN=1 ARM_SOFTFP_ABI=1 \ + make NOFORTRAN=1 ARM_SOFTFP_ABI=1 NO_SHARED=1 \ LDFLAGS="-L/usr/local/android-ndk-r19/toolchains/arm-linux-androideabi-4.9/prebuilt/linux-x86_64/lib/gcc/arm-linux-androideabi/4.9.x -lm" \ CC=$TOOLCHAIN/bin/armv7a-linux-androideabi16-clang AR=$TOOLCHAIN/bin/arm-linux-androideabi-ar && \ - make PREFIX=/usr/local/openblas-android install && \ + make PREFIX=/usr/local/openblas-android NO_SHARED=1 install && \ cd /usr/local && \ rm -rf OpenBLAS ENV OpenBLAS_HOME=/usr/local/openblas-android diff --git a/ci/docker/Dockerfile.build.android_armv8 b/ci/docker/Dockerfile.build.android_armv8 index 81adc80edf14..a78113a33bae 100644 --- a/ci/docker/Dockerfile.build.android_armv8 +++ b/ci/docker/Dockerfile.build.android_armv8 @@ -46,10 +46,10 @@ RUN git clone --recursive -b v0.3.9 https://github.com/xianyi/OpenBLAS.git && \ mkdir /usr/local/openblas-android && \ cd /usr/local/OpenBLAS && \ export TOOLCHAIN=/usr/local/android-ndk-r19/toolchains/llvm/prebuilt/linux-x86_64 && \ - make NOFORTRAN=1 \ + make NOFORTRAN=1 NO_SHARED=1 \ LDFLAGS="-L/usr/local/android-ndk-r21/toolchains/aarch64-linux-android-4.9/prebuilt/linux-x86_64/lib/gcc/aarch64-linux-android/4.9.x -lm" \ CC=$TOOLCHAIN/bin/aarch64-linux-android21-clang AR=$TOOLCHAIN/bin/aarch64-linux-android-ar && \ - make PREFIX=/usr/local/openblas-android install && \ + make PREFIX=/usr/local/openblas-android NO_SHARED=1 install && \ cd /usr/local && \ rm -rf OpenBLAS ENV OpenBLAS_HOME=/usr/local/openblas-android diff --git a/ci/docker/Dockerfile.build.armv6 b/ci/docker/Dockerfile.build.armv6 index 02e16da11616..83186369d829 100644 --- a/ci/docker/Dockerfile.build.armv6 +++ b/ci/docker/Dockerfile.build.armv6 @@ -50,8 +50,8 @@ ENV CMAKE_TOOLCHAIN_FILE=/usr/local/armv6-eabihf--glibc--stable-2020.02-2/share/ RUN git clone --recursive -b v0.3.9 https://github.com/xianyi/OpenBLAS.git && \ cd /usr/local/OpenBLAS && \ - make NOFORTRAN=1 CC=/usr/local/armv6-eabihf--glibc--stable-2020.02-2/bin/arm-linux-gcc && \ - make PREFIX=/usr/local/armv6-eabihf--glibc--stable-2020.02-2/arm-buildroot-linux-gnueabihf/sysroot install && \ + make NOFORTRAN=1 NO_SHARED=1 CC=/usr/local/armv6-eabihf--glibc--stable-2020.02-2/bin/arm-linux-gcc && \ + make PREFIX=/usr/local/armv6-eabihf--glibc--stable-2020.02-2/arm-buildroot-linux-gnueabihf/sysroot NO_SHARED=1 install && \ cd /usr/local && \ rm -rf OpenBLAS diff --git a/ci/docker/Dockerfile.build.armv7 b/ci/docker/Dockerfile.build.armv7 index a9cc6d1e83a4..d207d79485ae 100644 --- a/ci/docker/Dockerfile.build.armv7 +++ b/ci/docker/Dockerfile.build.armv7 @@ -45,8 +45,8 @@ ENV CMAKE_TOOLCHAIN_FILE=/usr/local/arm-linux-gnueabihf-toolchain.cmake RUN git clone --recursive -b v0.3.9 https://github.com/xianyi/OpenBLAS.git && \ cd /usr/local/OpenBLAS && \ - make NOFORTRAN=1 CC=arm-linux-gnueabihf-gcc && \ - make PREFIX=/usr/local/arm-linux-gnueabihf install && \ + make NOFORTRAN=1 NO_SHARED=1 CC=arm-linux-gnueabihf-gcc && \ + make PREFIX=/usr/local/arm-linux-gnueabihf NO_SHARED=1 install && \ cd /usr/local && \ rm -rf OpenBLAS diff --git a/ci/docker/Dockerfile.build.armv8 b/ci/docker/Dockerfile.build.armv8 index adf6873fb40c..d318cc2f02d4 100644 --- a/ci/docker/Dockerfile.build.armv8 +++ b/ci/docker/Dockerfile.build.armv8 @@ -45,8 +45,8 @@ ENV CMAKE_TOOLCHAIN_FILE=/usr/aarch64-linux-gnu-toolchain.cmake RUN git clone --recursive -b v0.3.9 https://github.com/xianyi/OpenBLAS.git && \ cd /usr/local/OpenBLAS && \ - make NOFORTRAN=1 CC=aarch64-linux-gnu-gcc && \ - make PREFIX=/usr/aarch64-linux-gnu install && \ + make NOFORTRAN=1 NO_SHARED=1 CC=aarch64-linux-gnu-gcc && \ + make PREFIX=/usr/aarch64-linux-gnu NO_SHARED=1 install && \ cd /usr/local && \ rm -rf OpenBLAS diff --git a/ci/docker/Dockerfile.build.test.arm_qemu b/ci/docker/Dockerfile.build.test.armv7 similarity index 67% rename from ci/docker/Dockerfile.build.test.arm_qemu rename to ci/docker/Dockerfile.build.test.armv7 index 5dc610a524b0..d49e7a5582c1 100644 --- a/ci/docker/Dockerfile.build.test.arm_qemu +++ b/ci/docker/Dockerfile.build.test.armv7 @@ -16,22 +16,21 @@ # specific language governing permissions and limitations # under the License. # -# Dockerfile to build and run MXNet on Ubuntu 16.04 for CPU +# Dockerfile to test MXNet on Ubuntu 20.04 ARMv7 CPU -FROM ubuntu:16.04 +FROM arm32v7/ubuntu:20.04 -WORKDIR /work +WORKDIR /usr/local -RUN apt-get update -COPY install/ubuntu_python.sh /work/ -COPY install/requirements /work/ -RUN /work/ubuntu_python.sh - -COPY install/ubuntu_arm_qemu.sh /work -RUN /work/ubuntu_arm_qemu.sh - -COPY install/ubuntu_arm_qemu_bin.sh /work -RUN /work/ubuntu_arm_qemu_bin.sh +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + python3 \ + python3-pip \ + python3-numpy \ + python3-scipy \ + python3-nose \ + python3-nose-timer \ + python3-requests \ + && rm -rf /var/lib/apt/lists/* ARG USER_ID=0 ARG GROUP_ID=0 @@ -39,9 +38,4 @@ COPY install/ubuntu_adduser.sh /work/ RUN /work/ubuntu_adduser.sh COPY runtime_functions.sh /work/ -COPY qemu/* /work/ - -# SSH to the Qemu VM -EXPOSE 2222/tcp - -CMD ["./runtime_functions.py","run_qemu_interactive"] +WORKDIR /work/mxnet \ No newline at end of file diff --git a/ci/docker/install/ubuntu_arm_qemu.sh b/ci/docker/Dockerfile.build.test.armv8 old mode 100755 new mode 100644 similarity index 60% rename from ci/docker/install/ubuntu_arm_qemu.sh rename to ci/docker/Dockerfile.build.test.armv8 index 79ab67bfdbe6..bee4d85c6a97 --- a/ci/docker/install/ubuntu_arm_qemu.sh +++ b/ci/docker/Dockerfile.build.test.armv8 @@ -1,5 +1,4 @@ -#!/usr/bin/env bash - +# -*- mode: dockerfile -*- # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -16,22 +15,27 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. +# +# Dockerfile to test MXNet on Ubuntu 20.04 ARMv8 CPU + +FROM arm64v8/ubuntu:20.04 -# build and install are separated so changes to build don't invalidate -# the whole docker cache for the image +WORKDIR /usr/local -set -exuo pipefail +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + python3 \ + python3-pip \ + python3-numpy \ + python3-scipy \ + python3-nose \ + python3-nose-timer \ + python3-requests \ + && rm -rf /var/lib/apt/lists/* -apt-get install -y \ - cmake \ - curl \ - wget \ - git \ - qemu \ - qemu-system-arm \ - unzip \ - bzip2 \ - vim-nox \ - toilet +ARG USER_ID=0 +ARG GROUP_ID=0 +COPY install/ubuntu_adduser.sh /work/ +RUN /work/ubuntu_adduser.sh -pip3 install ipython +COPY runtime_functions.sh /work/ +WORKDIR /work/mxnet \ No newline at end of file diff --git a/ci/docker/install/ubuntu_arm_qemu_bin.sh b/ci/docker/install/ubuntu_arm_qemu_bin.sh deleted file mode 100755 index d4f81185c169..000000000000 --- a/ci/docker/install/ubuntu_arm_qemu_bin.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# build and install are separated so changes to build don't invalidate -# the whole docker cache for the image - -set -exuo pipefail - -# -# This disk image and kernels for virtual testing with QEMU is generated with some manual OS -# installation steps with the scripts and documentation found in the ci/qemu/ folder. -# -# The image has a base Debian OS and MXNet runtime dependencies installed. -# The root password is empty and there's a "qemu" user without password. SSH access is enabled as -# well. -# -# See also: ci/qemu/README.md -# - -REMOTE="https://s3-us-west-2.amazonaws.com/mxnet-ci-prod-slave-data" -curl -f ${REMOTE}/vda_debian_stretch.qcow2.bz2 | bunzip2 > vda.qcow2 -curl -f ${REMOTE}/vmlinuz -o vmlinuz -curl -f ${REMOTE}/initrd.img -o initrd.img - diff --git a/ci/docker/qemu/README.md b/ci/docker/qemu/README.md deleted file mode 100644 index c06b34562b57..000000000000 --- a/ci/docker/qemu/README.md +++ /dev/null @@ -1,18 +0,0 @@ - - - - - - - - - - - - - - - - - -These are files used in the docker container that runs QEMU diff --git a/ci/docker/qemu/runtime_functions.py b/ci/docker/qemu/runtime_functions.py deleted file mode 100755 index 5a57cb8dae6a..000000000000 --- a/ci/docker/qemu/runtime_functions.py +++ /dev/null @@ -1,134 +0,0 @@ -#!/usr/bin/env python3 - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# -*- coding: utf-8 -*- -"""Runtime functions to use in docker / testing""" - -__author__ = 'Pedro Larroy' -__version__ = '0.1' - -import os -import sys -import subprocess -import argparse -import logging -from subprocess import call, check_call, Popen, DEVNULL, PIPE -import time -import sys -import types -import glob -import vmcontrol -from vmcontrol import qemu_ssh, qemu_provision, qemu_rsync_to_host, VM - -def activate_this(base): - import site - import os - import sys - if sys.platform == 'win32': - site_packages = os.path.join(base, 'Lib', 'site-packages') - else: - site_packages = os.path.join(base, 'lib', 'python%s' % sys.version[:3], 'site-packages') - prev_sys_path = list(sys.path) - sys.real_prefix = sys.prefix - sys.prefix = base - # Move the added items to the front of the path: - new_sys_path = [] - for item in list(sys.path): - if item not in prev_sys_path: - new_sys_path.append(item) - sys.path.remove(item) - sys.path[:0] = new_sys_path - - - - -def run_ut_py3_qemu(): - """Run unit tests in the emulator and copy the results back to the host through the mounted - volume in /mxnet""" - from vmcontrol import VM - with VM() as vm: - qemu_provision(vm.ssh_port) - logging.info("execute tests") - qemu_ssh(vm.ssh_port, "./runtime_functions.py", "run_ut_python3_qemu_internal") - qemu_rsync_to_host(vm.ssh_port, "*.xml", "mxnet") - logging.info("copied to host") - logging.info("tests finished, vm shutdown.") - vm.shutdown() - -def run_ut_python3_qemu_internal(): - """this runs inside the vm""" - pkg = glob.glob('mxnet_dist/*.whl')[0] - logging.info("=== NOW Running inside QEMU ===") - logging.info("PIP Installing %s", pkg) - check_call(['sudo', 'pip3', 'install', pkg]) - logging.info("PIP Installing mxnet/test_requirements.txt") - check_call(['sudo', 'pip3', 'install', '-r', 'mxnet/test_requirements.txt']) - logging.info("Running tests in mxnet/tests/python/unittest/") - check_call(['nosetests', '--with-timer', '--with-xunit', '--xunit-file', 'nosetests_unittest.xml', '--verbose', 'mxnet/tests/python/unittest/test_engine.py']) - # Example to run a single unit test: - # check_call(['nosetests', '--with-timer', '--with-xunit', '--xunit-file', 'nosetests_unittest.xml', '--verbose', 'mxnet/tests/python/unittest/test_ndarray.py:test_ndarray_fluent']) - - - -def run_qemu_interactive(): - vm = VM(interactive=True) - vm.detach() - vm.start() - vm.wait() - logging.info("QEMU finished") - -################################ - -def parsed_args(): - parser = argparse.ArgumentParser(description="""python runtime functions""", epilog="") - parser.add_argument('command',nargs='*', - help="Name of the function to run with arguments") - args = parser.parse_args() - return (args, parser) - -def script_name() -> str: - return os.path.split(sys.argv[0])[1] - -def chdir_to_script_directory(): - # We need to be in the same directory than the script so the commands in the dockerfiles work as - # expected. But the script can be invoked from a different path - base = os.path.split(os.path.realpath(__file__))[0] - os.chdir(base) - -def main(): - logging.getLogger().setLevel(logging.INFO) - logging.basicConfig(format='{}: %(asctime)-15s %(message)s'.format(script_name())) - chdir_to_script_directory() - - # Run function with name passed as argument - (args, parser) = parsed_args() - logging.info("%s", args.command) - if args.command: - fargs = args.command[1:] - globals()[args.command[0]](*fargs) - return 0 - else: - parser.print_help() - fnames = [x for x in globals() if type(globals()[x]) is types.FunctionType] - print('\nAvailable functions: {}'.format(' '.join(fnames))) - return 1 - -if __name__ == '__main__': - sys.exit(main()) - diff --git a/ci/docker/qemu/vmcontrol.py b/ci/docker/qemu/vmcontrol.py deleted file mode 100644 index 31ef4d2550c3..000000000000 --- a/ci/docker/qemu/vmcontrol.py +++ /dev/null @@ -1,360 +0,0 @@ -#!/usr/bin/env python3 - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# -*- coding: utf-8 -*- -"""Utilities to control a guest VM, used for virtual testing with QEMU""" - -__author__ = 'Pedro Larroy' -__version__ = '0.1' - -import os -import sys -import subprocess -import argparse -import logging -from subprocess import call, check_call, Popen, DEVNULL, PIPE -import time -import sys -import multiprocessing -import shlex - -################################################### -# -# Virtual testing with QEMU -# -# We start QEMU instances that have a local port in the host redirected to the ssh port. -# -# The VMs are provisioned after boot, tests are run and then they are stopped -# -QEMU_SSH_PORT=2222 -QEMU_RAM=4096 - -QEMU_RUN=""" -qemu-system-arm -M virt -m {ram} \ - -kernel vmlinuz \ - -initrd initrd.img \ - -append 'root=/dev/vda1' \ - -drive if=none,file=vda.qcow2,format=qcow2,id=hd \ - -device virtio-blk-device,drive=hd \ - -netdev user,id=mynet,hostfwd=tcp::{ssh_port}-:22 \ - -device virtio-net-device,netdev=mynet \ - -display none -nographic -""" - -QEMU_RUN_INTERACTIVE=""" -qemu-system-arm -M virt -m {ram} \ - -kernel vmlinuz \ - -initrd initrd.img \ - -append 'root=/dev/vda1' \ - -drive if=none,file=vda.qcow2,format=qcow2,id=hd \ - -device virtio-blk-device,drive=hd \ - -netdev user,id=mynet,hostfwd=tcp::{ssh_port}-:22 \ - -device virtio-net-device,netdev=mynet \ - -nographic -""" - -def retry(target_exception, tries=4, delay_s=1, backoff=2): - """Retry calling the decorated function using an exponential backoff. - - http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/ - original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry - - :param target_exception: the exception to check. may be a tuple of - exceptions to check - :type target_exception: Exception or tuple - :param tries: number of times to try (not retry) before giving up - :type tries: int - :param delay_s: initial delay between retries in seconds - :type delay_s: int - :param backoff: backoff multiplier e.g. value of 2 will double the delay - each retry - :type backoff: int - """ - import time - from functools import wraps - - def decorated_retry(f): - @wraps(f) - def f_retry(*args, **kwargs): - mtries, mdelay = tries, delay_s - while mtries > 1: - try: - return f(*args, **kwargs) - except target_exception as e: - logging.warning("Exception: %s, Retrying in %d seconds...", str(e), mdelay) - time.sleep(mdelay) - mtries -= 1 - mdelay *= backoff - return f(*args, **kwargs) - - return f_retry # true decorator - - return decorated_retry - - - - -class VMError(RuntimeError): - pass - -class VM: - """Control of the virtual machine""" - def __init__(self, ssh_port=QEMU_SSH_PORT, ram=QEMU_RAM, interactive=False): - self.log = logging.getLogger(VM.__name__) - self.ssh_port = ssh_port - self.timeout_s = 300 - self.qemu_process = None - self._detach = False - self._interactive = interactive - self.ram = ram - - def __enter__(self): - self.start() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if not self._detach: - self.shutdown() - self.terminate() - - def start(self): - sys.stderr.flush() - call(['toilet', '-f', 'smbraille', 'Starting QEMU']) - sys.stdout.flush() - self.log.info("Starting VM, ssh port redirected to localhost:%s (inside docker, not exposed by default)", self.ssh_port) - if self.is_running(): - raise VMError("VM is running, shutdown first") - if self._interactive: - self.qemu_process = Popen(shlex.split(QEMU_RUN_INTERACTIVE.format(ssh_port=self.ssh_port, ram=self.ram))) - return - else: - self.log.info("Starting in non-interactive mode. Terminal output is disabled.") - self.qemu_process = Popen(shlex.split(QEMU_RUN.format(ssh_port=self.ssh_port, ram=self.ram)), stdout=DEVNULL, stdin=DEVNULL, stderr=PIPE) - def keep_waiting(): - return self.is_running() - - logging.info("waiting for ssh to be open in the VM (timeout {}s)".format(self.timeout_s)) - ssh_working = wait_ssh_open('127.0.0.1', self.ssh_port, keep_waiting, self.timeout_s) - - if not self.is_running(): - (_, stderr) = self.qemu_process.communicate() - raise VMError("VM failed to start, retcode: {}, stderr: {}".format( self.retcode(), stderr.decode())) - - if not ssh_working: - if self.is_running(): - self.log.error("VM running but SSH is not working") - self.terminate() - raise VMError("SSH is not working after {} seconds".format(self.timeout_s)) - self.log.info("VM is online and SSH is up") - - def is_running(self): - return self.qemu_process and self.qemu_process.poll() is None - - def retcode(self): - if self.qemu_process: - return self.qemu_process.poll() - else: - raise RuntimeError('qemu process was not started') - - def terminate(self): - if self.qemu_process: - logging.info("send term signal") - self.qemu_process.terminate() - time.sleep(3) - logging.info("send kill signal") - self.qemu_process.kill() - self.qemu_process.wait() - self.qemu_process = None - else: - logging.warn("VM.terminate: QEMU process not running") - - def detach(self): - self._detach = True - - def shutdown(self): - if self.qemu_process: - logging.info("Shutdown via ssh") - # ssh connection will be closed with an error - call(["ssh", "-o", "StrictHostKeyChecking=no", "-p", str(self.ssh_port), "qemu@localhost", - "sudo", "poweroff"]) - ret = self.qemu_process.wait(timeout=90) - self.log.info("VM on port %s has shutdown (exit code %d)", self.ssh_port, ret) - self.qemu_process = None - - def wait(self): - if self.qemu_process: - self.qemu_process.wait() - - def __del__(self): - if self.is_running and not self._detach: - logging.info("VM destructor hit") - self.terminate() - - -def qemu_ssh(ssh_port=QEMU_SSH_PORT, *args): - check_call(["ssh", "-o", "ServerAliveInterval=5", "-o", "StrictHostKeyChecking=no", "-p{}".format(ssh_port), "qemu@localhost", *args]) - - -def qemu_rsync(ssh_port, local_path, remote_path): - check_call(['rsync', '-e', 'ssh -o StrictHostKeyChecking=no -p{}'.format(ssh_port), '-a', local_path, 'qemu@localhost:{}'.format(remote_path)]) - -def qemu_rsync_to_host(ssh_port, remote_path, local_path): - check_call(['rsync', '-e', 'ssh -o StrictHostKeyChecking=no -p{}'.format(ssh_port), '-va', 'qemu@localhost:{}'.format(remote_path), local_path]) - - -@retry(subprocess.CalledProcessError) -def qemu_provision(ssh_port=QEMU_SSH_PORT): - import glob - logging.info("Provisioning the VM with artifacts and sources") - - artifact = glob.glob('/work/mxnet/build/*.whl') - for x in artifact: - qemu_rsync(ssh_port, x, 'mxnet_dist/') - qemu_rsync(ssh_port, '/work/runtime_functions.py','') - qemu_rsync(ssh_port, '/work/vmcontrol.py','') - qemu_rsync(ssh_port, 'mxnet/tests', 'mxnet') - qemu_rsync(ssh_port, 'mxnet/ci/qemu/test_requirements.txt', 'mxnet/test_requirements.txt') - logging.info("Provisioning completed successfully.") - - -def wait_ssh_open(server, port, keep_waiting=None, timeout=None): - """ Wait for network service to appear - @param server: host to connect to (str) - @param port: port (int) - @param timeout: in seconds, if None or 0 wait forever - @return: True of False, if timeout is None may return only True or - throw unhandled network exception - """ - import socket - import errno - import time - log = logging.getLogger('wait_ssh_open') - sleep_s = 1 - if timeout: - from time import time as now - # time module is needed to calc timeout shared between two exceptions - end = now() + timeout - - while True: - log.debug("Sleeping for %s second(s)", sleep_s) - time.sleep(sleep_s) - s = socket.socket() - try: - if keep_waiting and not keep_waiting(): - log.debug("keep_waiting() is set and evaluates to False") - return False - - if timeout: - next_timeout = end - now() - if next_timeout < 0: - log.debug("connect time out") - return False - else: - log.debug("connect timeout %d s", next_timeout) - s.settimeout(next_timeout) - - log.debug("connect %s:%d", server, port) - s.connect((server, port)) - ret = s.recv(1024).decode() - if ret and ret.startswith('SSH'): - s.close() - log.info("wait_ssh_open: port %s:%s is open and ssh is ready", server, port) - return True - else: - log.debug("Didn't get the SSH banner") - s.close() - - except ConnectionError as err: - log.debug("ConnectionError %s", err) - if sleep_s == 0: - sleep_s = 1 - else: - sleep_s *= 2 - - except socket.gaierror as err: - log.debug("gaierror %s",err) - return False - - except socket.timeout as err: - # this exception occurs only if timeout is set - if timeout: - return False - - except TimeoutError as err: - # catch timeout exception from underlying network library - # this one is different from socket.timeout - raise - - -def wait_port_open(server, port, timeout=None): - """ Wait for network service to appear - @param server: host to connect to (str) - @param port: port (int) - @param timeout: in seconds, if None or 0 wait forever - @return: True of False, if timeout is None may return only True or - throw unhandled network exception - """ - import socket - import errno - import time - sleep_s = 0 - if timeout: - from time import time as now - # time module is needed to calc timeout shared between two exceptions - end = now() + timeout - - while True: - logging.debug("Sleeping for %s second(s)", sleep_s) - time.sleep(sleep_s) - s = socket.socket() - try: - if timeout: - next_timeout = end - now() - if next_timeout < 0: - return False - else: - s.settimeout(next_timeout) - - logging.info("connect %s %d", server, port) - s.connect((server, port)) - - except ConnectionError as err: - logging.debug("ConnectionError %s", err) - if sleep_s == 0: - sleep_s = 1 - - except socket.gaierror as err: - logging.debug("gaierror %s",err) - return False - - except socket.timeout as err: - # this exception occurs only if timeout is set - if timeout: - return False - - except TimeoutError as err: - # catch timeout exception from underlying network library - # this one is different from socket.timeout - raise - - else: - s.close() - logging.info("wait_port_open: port %s:%s is open", server, port) - return True - diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh index 4e7b0db1b648..c321a0ae0ad4 100755 --- a/ci/docker/runtime_functions.sh +++ b/ci/docker/runtime_functions.sh @@ -1409,6 +1409,18 @@ test_ubuntu_cpu_python3() { popd } +# QEMU based ARM tests +unittest_ubuntu_python3_arm() { + set -ex + export PYTHONPATH=./python/ + export MXNET_MKLDNN_DEBUG=0 # Ignored if not present + export MXNET_STORAGE_FALLBACK_LOG_VERBOSE=0 + export MXNET_SUBGRAPH_VERBOSE=0 + export MXNET_ENABLE_CYTHON=0 + export DMLC_LOG_STACK_TRACE_DEPTH=10 + python3 -m nose --verbose tests/python/unittest/test_engine.py +} + # Functions that run the nightly Tests: #Runs Apache RAT Check on MXNet Source for License Headers diff --git a/ci/jenkins/Jenkins_steps.groovy b/ci/jenkins/Jenkins_steps.groovy index eddfd5bcd901..6b720abbcad6 100644 --- a/ci/jenkins/Jenkins_steps.groovy +++ b/ci/jenkins/Jenkins_steps.groovy @@ -27,9 +27,6 @@ mx_lib = 'build/libmxnet.so, build/3rdparty/tvm/libtvm_runtime.so, build/libtvmo mx_lib_cython = 'build/libmxnet.so, build/3rdparty/tvm/libtvm_runtime.so, build/libtvmop.so, build/tvmop.conf, build/libcustomop_lib.so, build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so, python/mxnet/_cy3/*.so, build/3rdparty/openmp/runtime/src/libomp.so, python/mxnet/_ffi/_cy3/*.so' mx_lib_make = 'lib/libmxnet.so, lib/libmxnet.a, lib/libtvm_runtime.so, lib/libtvmop.so, lib/tvmop.conf, build/libcustomop_lib.so, build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so, 3rdparty/dmlc-core/libdmlc.a, 3rdparty/tvm/nnvm/lib/libnnvm.a' -// Python wheels -mx_pip = 'build/*.whl' - // mxnet cmake libraries, in cmake builds we do not produce a libnvvm static library by default. mx_cmake_lib = 'build/libmxnet.so, build/3rdparty/tvm/libtvm_runtime.so, build/libtvmop.so, build/tvmop.conf, build/tests/mxnet_unit_tests, build/3rdparty/openmp/runtime/src/libomp.so' mx_cmake_lib_no_tvm_op = 'build/libmxnet.so, build/libcustomop_lib.so, build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so, build/tests/mxnet_unit_tests, build/3rdparty/openmp/runtime/src/libomp.so' @@ -502,27 +499,28 @@ def compile_armv8_jetson_gpu() { }] } -def compile_armv7_cpu() { - return ['ARMv7':{ +def compile_armv6_cpu() { + return ['ARMv6':{ node(NODE_LINUX_CPU) { - ws('workspace/build-ARMv7') { + ws('workspace/build-ARMv6') { timeout(time: max_time, unit: 'MINUTES') { utils.init_git() - utils.docker_run('armv7', 'build_armv7', false) - utils.pack_lib('armv7', mx_pip) + utils.docker_run('armv6', 'build_armv6', false) + utils.pack_lib('armv6', mx_lib) } } } }] } -def compile_armv6_cpu() { - return ['ARMv6':{ +def compile_armv7_cpu() { + return ['ARMv7':{ node(NODE_LINUX_CPU) { - ws('workspace/build-ARMv6') { + ws('workspace/build-ARMv7') { timeout(time: max_time, unit: 'MINUTES') { utils.init_git() - utils.docker_run('armv6', 'build_armv6', false) + utils.docker_run('armv7', 'build_armv7', false) + utils.pack_lib('armv7', mx_lib) } } } @@ -536,6 +534,7 @@ def compile_armv8_cpu() { timeout(time: max_time, unit: 'MINUTES') { utils.init_git() utils.docker_run('armv8', 'build_armv8', false) + utils.pack_lib('armv8', mx_lib) } } } @@ -1431,8 +1430,21 @@ def test_qemu_armv7_cpu() { node(NODE_LINUX_CPU) { ws('workspace/ut-armv7-qemu') { timeout(time: max_time, unit: 'MINUTES') { - utils.unpack_and_init('armv7', mx_pip) - sh "ci/build.py --docker-registry ${env.DOCKER_CACHE_REGISTRY} -p test.arm_qemu ./runtime_functions.py run_ut_py3_qemu" + utils.unpack_and_init('armv7', mx_lib) + utils.docker_run('test.armv7', 'unittest_ubuntu_python3_arm', false) + } + } + } + }] +} + +def test_qemu_armv8_cpu() { + return ['ARMv8 QEMU': { + node(NODE_LINUX_CPU) { + ws('workspace/ut-armv8-qemu') { + timeout(time: max_time, unit: 'MINUTES') { + utils.unpack_and_init('armv8', mx_lib) + utils.docker_run('test.armv8', 'unittest_ubuntu_python3_arm', false) } } } diff --git a/ci/jenkins/Jenkinsfile_edge b/ci/jenkins/Jenkinsfile_edge index 9d8e01399d7c..9e2abf558dd2 100644 --- a/ci/jenkins/Jenkinsfile_edge +++ b/ci/jenkins/Jenkinsfile_edge @@ -40,11 +40,12 @@ core_logic: { custom_steps.compile_armv8_cpu(), custom_steps.compile_armv8_android_cpu(), custom_steps.compile_armv7_android_cpu() - ]) + ]) utils.parallel_stage('Tests', [ - custom_steps.test_qemu_armv7_cpu() - ]) + custom_steps.test_qemu_armv7_cpu(), + custom_steps.test_qemu_armv8_cpu() + ]) } , failure_handler: { diff --git a/ci/qemu/README.md b/ci/qemu/README.md deleted file mode 100644 index 4beca4a03690..000000000000 --- a/ci/qemu/README.md +++ /dev/null @@ -1,92 +0,0 @@ - - - - - - - - - - - - - - - - - -# QEMU base image creation - -This folder contains scripts and configuration to create a QEMU virtual drive with a debian system. - -The order of execution is: -- `init.sh` to download the installation kernel and ramdisk -- `preseed.sh` to preseed the debian installer so it doesn't ask questions -- `copy.sh` to extract the kernel and ramdisk from the installed system -- `run.sh` to boot the system and fine tune the image - -# Description of the process: - -# Preparing the base image - -First, an installation is made using installer kernel and initrd by using the scripts above. - -# After installation, we extract initrd and kernel from the installation drive - -The commands look like this: - -`virt-copy-out -a hda.qcow2 /boot/initrd.img-4.15.0-30-generic-lpae .` - -In the same way for the kernel. - -Then we install packages and dependencies on the qemu image: - -apt install -y sudo python3-dev virtualenv wget libgfortran3 libopenblas-base rsync build-essential -libopenblas-dev libomp5 - -We enable sudo and passwordless logins: - -Add file `/etc/sudoers.d/01-qemu` -With content: -``` -qemu ALL=(ALL) NOPASSWD: ALL -``` - -Edit: `/etc/ssh/sshd_config` - -And set the following options: -``` -PermitEmptyPasswords yes -PasswordAuthentication yes -PermitRootLogin yes -``` - -Disable root and user passwords with `passwd -d` - -Edit ` /etc/pam.d/common-auth` - -Replace `auth [success=1 default=ignore] pam_unix.so nullok_secure` by -``` -auth [success=1 default=ignore] pam_unix.so nullok -``` - -As root to install system wide: - -``` -wget -nv https://bootstrap.pypa.io/get-pip.py -python3 get-pip.py -apt-get clean -``` - -Afterwards install mxnet python3 deps: - -``` -pip3 install -r mxnet_requirements.txt -``` - - -To access qemu control console from tmux: `ctrl-a a c` - -# CI and Testing - -Formally, [runtime_functions.py](https://github.com/apache/incubator-mxnet/blob/master/ci/docker/qemu/runtime_functions.py) would [run](https://github.com/apache/incubator-mxnet/blob/8beea18e3d9835f90b59d3f9de8f9945ac819423/ci/docker/qemu/runtime_functions.py#L81) *pip install -r [mxnet/tests/requirements.txt](https://github.com/apache/incubator-mxnet/blob/master/tests/requirements.txt)*. If the requirements change, there can be an unfortunate side-effect that there are no wheel files for Raspberry Pi for the new requirement. This would trigger a build from source on the emulator, which can take a long time and cause job timeouts. Therefore, we no longer install the `tests/requirements.txt` requirements, but rather rely on [test_requirements.txt](https://github.com/apache/incubator-mxnet/blob/master/ci/qemu/test_requirements.txt) to maintain the requirements for the qemu tests. Should any requirements changes lead to a job time out, it is incumbent on the submitter to update the image to include the requirement and unblock ci. diff --git a/ci/qemu/copy.sh b/ci/qemu/copy.sh deleted file mode 100755 index f39a9d083509..000000000000 --- a/ci/qemu/copy.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash -exuo pipefail - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Extract kernel from image - -set -ex -virt-copy-out -a vda.qcow2 /boot/vmlinuz-3.16.0-6-armmp-lpae /boot/initrd.img-3.16.0-6-armmp-lpae . diff --git a/ci/qemu/init.sh b/ci/qemu/init.sh deleted file mode 100755 index 1698cb10f272..000000000000 --- a/ci/qemu/init.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash -exuo pipefail - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Download the installer and ramdisk for intallation -set -ex -wget -O installer-vmlinuz http://http.us.debian.org/debian/dists/jessie/main/installer-armhf/current/images/netboot/vmlinuz -wget -O installer-initrd.gz http://http.us.debian.org/debian/dists/jessie/main/installer-armhf/current/images/netboot/initrd.gz diff --git a/ci/qemu/initrd_modif/inittab b/ci/qemu/initrd_modif/inittab deleted file mode 100644 index 064512595fbc..000000000000 --- a/ci/qemu/initrd_modif/inittab +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# /etc/inittab -# busybox init configuration for debian-installer - -# main rc script -::sysinit:/sbin/reopen-console /sbin/debian-installer-startup - -# main setup program -::respawn:/sbin/reopen-console /sbin/debian-installer - -# convenience shells -tty2::askfirst:-/bin/sh -tty3::askfirst:-/bin/sh - -# logging -#tty4::respawn:/usr/bin/tail -f /var/log/syslog - -# Stuff to do before rebooting -::ctrlaltdel:/sbin/shutdown > /dev/null 2>&1 - -# re-exec init on receipt of SIGHUP/SIGUSR1 -::restart:/sbin/init diff --git a/ci/qemu/install.sh b/ci/qemu/install.sh deleted file mode 100755 index 8531b033d074..000000000000 --- a/ci/qemu/install.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -ex -rm -f vda.qcow2 -sudo ./preseed.sh -qemu-img create -f qcow2 vda.qcow2 10G -qemu-system-arm -M virt -m 1024 \ - -kernel installer-vmlinuz \ - -append BOOT_DEBUG=2,DEBIAN_FRONTEND=noninteractive \ - -initrd installer-initrd_automated.gz \ - -drive if=none,file=vda.qcow2,format=qcow2,id=hd \ - -device virtio-blk-device,drive=hd \ - -netdev user,id=mynet \ - -device virtio-net-device,netdev=mynet \ - -nographic -no-reboot diff --git a/ci/qemu/mxnet_requirements.txt b/ci/qemu/mxnet_requirements.txt deleted file mode 100644 index 2ab0fd9612e5..000000000000 --- a/ci/qemu/mxnet_requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -urllib3<1.23,>=1.21.1 -requests<2.19.0,>=2.18.4 -graphviz<0.9.0,>=0.8.1 -numpy>1.16.0,<2.0.0 -mock -nose -nose-timer diff --git a/ci/qemu/preseed.cfg b/ci/qemu/preseed.cfg deleted file mode 100644 index 23a8fc3baebf..000000000000 --- a/ci/qemu/preseed.cfg +++ /dev/null @@ -1,68 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -d-i debian-installer/locale string en_US -d-i keyboard-configuration/xkb-keymap select us -d-i netcfg/get_hostname string debian-qemu -d-i netcfg/get_domain string lab -d-i passwd/root-login boolean true -d-i passwd/root-password password debian -d-i passwd/root-password-again password debian -d-i clock-setup/utc boolean true -d-i mirror/country string US -d-i mirror/https/proxy string -d-i mirror/http/proxy string -d-i mirror/ftp/proxy string -d-i mirror/http/countries select US -d-i mirror/http/hostname string ftp.us.debian.org -d-i mirror/http/mirror select ftp.us.debian.org -d-i localechooser/preferred-locale select en_US.UTF-8 -apt-mirror-setup apt-setup/use_mirror boolean false -apt-mirror-setup apt-setup/mirror/error select Retry -d-i passwd/username string qemu -d-i passwd/user-password password qemu -d-i passwd/user-password-again password qemu -user-setup-udeb passwd/username string qemu -user-setup-udeb passwd/user-fullname string qemu -d-i time/zone string GMT -d-i partman-auto/choose_recipe select atomic -#partman-auto partman-auto/select_disk select /var/lib/partman/devices/=dev=vda -#partman-auto partman-auto/automatically_partition select -#partman-target partman-target/no_root error -#partman-auto partman-auto/init_automatically_partition select 50some_device__________regular -#partman-auto partman-auto/disk string vda -#partman-auto partman-auto/expert_recipe string \ -# boot-root :: \ -# 100 10000 1000000000 ext4 \ -# $primary{ } \ -# lv_name{ root } \ -# method{ format } \ -# format{ } \ -# use_filesystem{ } \ -# filesystem{ ext4 } \ -# mountpoint{ / } . -# -#d-i partman-partitioning/confirm_write_new_label boolean true -#d-i partman/choose_partition select finish -#d-i partman/confirm boolean true -#d-i partman/confirm_nooverwrite boolean true -#partman-base partman/choose_partition select 90finish__________finish -#partman-basicfilesystems partman-basicfilesystems/swap_check_failed boolean -d-i popularity-contest/participate boolean false -d-i tasksel/first multiselect SSH server, standard system utilities -d-i debian-installer/main-menu select Finish the installation -d-i debian-installer/exit/poweroff boolean true diff --git a/ci/qemu/preseed.sh b/ci/qemu/preseed.sh deleted file mode 100755 index ad005548fbbe..000000000000 --- a/ci/qemu/preseed.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bash -exuo pipefail - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -ex -rm -rf initrd -mkdir -p initrd -cd initrd -gunzip -c ../installer-initrd.gz | cpio -i -cp ../preseed.cfg . -cp ../initrd_modif/inittab etc/inittab -cp ../initrd_modif/S10syslog lib/debian-installer-startup.d/S10syslog -find . | cpio --create --format 'newc' | gzip -c > ../installer-initrd_automated.gz -echo "Done!" diff --git a/ci/qemu/run.sh b/ci/qemu/run.sh deleted file mode 100755 index eeff4e1fdccb..000000000000 --- a/ci/qemu/run.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env bash -exuo pipefail - - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -ex -disk=${1:-vda.qcow2} -qemu-system-arm -M virt -m 1024 \ - -kernel vmlinuz-3.16.0-6-armmp-lpae \ - -initrd initrd.img-3.16.0-6-armmp-lpae \ - -smp 4 \ - -append 'root=/dev/vda1' \ - -drive if=none,file=$disk,format=qcow2,id=hd \ - -device virtio-blk-device,drive=hd \ - -netdev user,id=mynet,hostfwd=tcp::2222-:22 \ - -device virtio-net-device,netdev=mynet \ - -nographic -# -display none diff --git a/ci/qemu/test_requirements.txt b/ci/qemu/test_requirements.txt deleted file mode 100644 index 77037d89c673..000000000000 --- a/ci/qemu/test_requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -mock -nose -nose-timer \ No newline at end of file From 80fadd10a3c859977aa6be747b7552366a387acf Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Sun, 12 Apr 2020 01:14:47 +0000 Subject: [PATCH 14/20] Remove unused functions in Jenkins_steps.groovy --- ci/jenkins/Jenkins_steps.groovy | 25 ------------------------- 1 file changed, 25 deletions(-) diff --git a/ci/jenkins/Jenkins_steps.groovy b/ci/jenkins/Jenkins_steps.groovy index 6b720abbcad6..eb4c0099579f 100644 --- a/ci/jenkins/Jenkins_steps.groovy +++ b/ci/jenkins/Jenkins_steps.groovy @@ -1451,31 +1451,6 @@ def test_qemu_armv8_cpu() { }] } -// This is for running on PRs -def docs_website() { - return ['Docs': { - node(NODE_LINUX_CPU) { - ws('workspace/docs') { - timeout(time: max_time, unit: 'MINUTES') { - - unstash 'jekyll-artifacts' - unstash 'python-artifacts' - utils.docker_run('ubuntu_cpu_jekyll', 'build_docs_small', false) - - master_url = utils.get_jenkins_master_url() - if ( master_url == 'jenkins.mxnet-ci.amazon-ml.com') { - // TODO: Make sure this scripts publish the website from the right folder - sh "ci/other/ci_deploy_doc.sh ${env.BRANCH_NAME} ${env.BUILD_NUMBER}" - } else { - print "Skipping staging documentation publishing since we are not running in prod. Host: {$master_url}" - } - } - } - } - }] -} - - // This creates the MXNet binary needed for generating different docs sets def compile_unix_lite() { return ['MXNet lib': { From 0cc758066b8840750e434afe6be2989e9e7877b0 Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Sun, 12 Apr 2020 19:26:18 +0000 Subject: [PATCH 15/20] Skip quantized_elemwise_mul due QuantizedElemwiseMulOpShape bug --- tests/python/quantization/test_quantization.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/python/quantization/test_quantization.py b/tests/python/quantization/test_quantization.py index 8c6100d50765..4e42a5dfcf60 100644 --- a/tests/python/quantization/test_quantization.py +++ b/tests/python/quantization/test_quantization.py @@ -353,6 +353,10 @@ def check_quantized_elemwise_mul(data_shape, qtype): if is_test_for_native_cpu(): print('skipped testing quantized_elemwise_mul for native cpu since it is not supported yet') return + if is_test_for_mkldnn(): + print('skipped testing quantized_elemwise_mul for mkldnn due to ' + 'https://github.com/apache/incubator-mxnet/issues/18034') + return elif qtype != 'int8': print('skipped testing quantized_elemwise_mul for not supported data type') return From 941b7ab79a6a50f5d5405badd8937b35bf365995 Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Mon, 13 Apr 2020 19:27:04 +0000 Subject: [PATCH 16/20] Fix R package installation https://github.com/apache/incubator-mxnet/issues/18042 --- ci/docker/Dockerfile.build.ubuntu_build_cuda | 6 +++--- ci/docker/Dockerfile.build.ubuntu_cpu | 8 ++++---- ci/docker/Dockerfile.build.ubuntu_cpu_julia | 8 ++++---- ci/docker/Dockerfile.build.ubuntu_gpu_cu101 | 8 ++++---- ci/docker/Dockerfile.build.ubuntu_nightly_cpu | 8 ++++---- ci/docker/Dockerfile.build.ubuntu_nightly_gpu | 8 ++++---- ci/docker/install/ubuntu_r.sh | 5 ++++- 7 files changed, 27 insertions(+), 24 deletions(-) diff --git a/ci/docker/Dockerfile.build.ubuntu_build_cuda b/ci/docker/Dockerfile.build.ubuntu_build_cuda index 7623048d6909..4f5d07a40236 100644 --- a/ci/docker/Dockerfile.build.ubuntu_build_cuda +++ b/ci/docker/Dockerfile.build.ubuntu_build_cuda @@ -35,9 +35,6 @@ RUN /work/ubuntu_python.sh COPY install/ubuntu_scala.sh /work/ COPY install/sbt.gpg /work/ RUN /work/ubuntu_scala.sh -COPY install/ubuntu_r.sh /work/ -COPY install/r.gpg /work/ -RUN /work/ubuntu_r.sh COPY install/ubuntu_perl.sh /work/ RUN /work/ubuntu_perl.sh COPY install/ubuntu_clang.sh /work/ @@ -48,6 +45,9 @@ COPY install/ubuntu_binutils.sh /work/ RUN /work/ubuntu_binutils.sh COPY install/thrust.sh /work/ RUN /work/thrust.sh +COPY install/ubuntu_r.sh /work/ +COPY install/r.gpg /work/ +RUN /work/ubuntu_r.sh ENV CUDNN_VERSION=7.6.5.32 COPY install/ubuntu_cudnn.sh /work/ diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu b/ci/docker/Dockerfile.build.ubuntu_cpu index b1eb89bb3f36..3c17b748e3ab 100644 --- a/ci/docker/Dockerfile.build.ubuntu_cpu +++ b/ci/docker/Dockerfile.build.ubuntu_cpu @@ -39,10 +39,6 @@ RUN /work/ubuntu_scala.sh COPY install/ubuntu_clojure.sh /work/ RUN /work/ubuntu_clojure.sh -COPY install/ubuntu_r.sh /work/ -COPY install/r.gpg /work/ -RUN /work/ubuntu_r.sh - COPY install/ubuntu_perl.sh /work/ RUN /work/ubuntu_perl.sh @@ -64,6 +60,10 @@ RUN /work/ubuntu_caffe.sh COPY install/ubuntu_onnx.sh /work/ RUN /work/ubuntu_onnx.sh +COPY install/ubuntu_r.sh /work/ +COPY install/r.gpg /work/ +RUN /work/ubuntu_r.sh + COPY install/ubuntu_docs.sh /work/ RUN /work/ubuntu_docs.sh diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu_julia b/ci/docker/Dockerfile.build.ubuntu_cpu_julia index b1eb89bb3f36..3c17b748e3ab 100644 --- a/ci/docker/Dockerfile.build.ubuntu_cpu_julia +++ b/ci/docker/Dockerfile.build.ubuntu_cpu_julia @@ -39,10 +39,6 @@ RUN /work/ubuntu_scala.sh COPY install/ubuntu_clojure.sh /work/ RUN /work/ubuntu_clojure.sh -COPY install/ubuntu_r.sh /work/ -COPY install/r.gpg /work/ -RUN /work/ubuntu_r.sh - COPY install/ubuntu_perl.sh /work/ RUN /work/ubuntu_perl.sh @@ -64,6 +60,10 @@ RUN /work/ubuntu_caffe.sh COPY install/ubuntu_onnx.sh /work/ RUN /work/ubuntu_onnx.sh +COPY install/ubuntu_r.sh /work/ +COPY install/r.gpg /work/ +RUN /work/ubuntu_r.sh + COPY install/ubuntu_docs.sh /work/ RUN /work/ubuntu_docs.sh diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu101 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu101 index 9a582c4cfd4f..aa2fdba837e3 100644 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu101 +++ b/ci/docker/Dockerfile.build.ubuntu_gpu_cu101 @@ -36,10 +36,6 @@ COPY install/ubuntu_scala.sh /work/ COPY install/sbt.gpg /work/ RUN /work/ubuntu_scala.sh -COPY install/ubuntu_r.sh /work/ -COPY install/r.gpg /work/ -RUN /work/ubuntu_r.sh - COPY install/ubuntu_perl.sh /work/ RUN /work/ubuntu_perl.sh @@ -79,6 +75,10 @@ RUN /work/thrust.sh COPY install/ubuntu_binutils.sh /work/ RUN /work/ubuntu_binutils.sh +COPY install/ubuntu_r.sh /work/ +COPY install/r.gpg /work/ +RUN /work/ubuntu_r.sh + # Always last ARG USER_ID=0 ARG GROUP_ID=0 diff --git a/ci/docker/Dockerfile.build.ubuntu_nightly_cpu b/ci/docker/Dockerfile.build.ubuntu_nightly_cpu index 5717df1b9130..49a665e57c33 100644 --- a/ci/docker/Dockerfile.build.ubuntu_nightly_cpu +++ b/ci/docker/Dockerfile.build.ubuntu_nightly_cpu @@ -36,10 +36,6 @@ COPY install/ubuntu_scala.sh /work/ COPY install/sbt.gpg /work/ RUN /work/ubuntu_scala.sh -COPY install/ubuntu_r.sh /work/ -COPY install/r.gpg /work/ -RUN /work/ubuntu_r.sh - COPY install/ubuntu_perl.sh /work/ RUN /work/ubuntu_perl.sh @@ -52,6 +48,10 @@ RUN /work/ubuntu_caffe.sh COPY install/ubuntu_onnx.sh /work/ RUN /work/ubuntu_onnx.sh +COPY install/ubuntu_r.sh /work/ +COPY install/r.gpg /work/ +RUN /work/ubuntu_r.sh + COPY install/ubuntu_docs.sh /work/ COPY install/requirements /work/ RUN /work/ubuntu_docs.sh diff --git a/ci/docker/Dockerfile.build.ubuntu_nightly_gpu b/ci/docker/Dockerfile.build.ubuntu_nightly_gpu index 5e812c433b43..82d049792c1b 100644 --- a/ci/docker/Dockerfile.build.ubuntu_nightly_gpu +++ b/ci/docker/Dockerfile.build.ubuntu_nightly_gpu @@ -36,10 +36,6 @@ COPY install/ubuntu_scala.sh /work/ COPY install/sbt.gpg /work/ RUN /work/ubuntu_scala.sh -COPY install/ubuntu_r.sh /work/ -COPY install/r.gpg /work/ -RUN /work/ubuntu_r.sh - COPY install/ubuntu_perl.sh /work/ RUN /work/ubuntu_perl.sh @@ -58,6 +54,10 @@ RUN /work/ubuntu_caffe.sh COPY install/ubuntu_onnx.sh /work/ RUN /work/ubuntu_onnx.sh +COPY install/ubuntu_r.sh /work/ +COPY install/r.gpg /work/ +RUN /work/ubuntu_r.sh + COPY install/ubuntu_docs.sh /work/ COPY install/requirements /work/ RUN /work/ubuntu_docs.sh diff --git a/ci/docker/install/ubuntu_r.sh b/ci/docker/install/ubuntu_r.sh index b7ddea78f90a..44ebf7c0799e 100755 --- a/ci/docker/install/ubuntu_r.sh +++ b/ci/docker/install/ubuntu_r.sh @@ -44,4 +44,7 @@ apt-get install -y --allow-unauthenticated \ r-base-dev \ texinfo \ texlive \ - texlive-fonts-extra + texlive-fonts-extra + +# Delete cran repository as it requires --allow-unauthenticated +find /etc/apt -name "*.list" | xargs sed -i 's/.*cran\.rstudio.com.*//' From 006ad0a925fdbfb2ebd935d417126eadad46cf3f Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Mon, 13 Apr 2020 20:59:47 +0000 Subject: [PATCH 17/20] Fix centos ccache --- ci/docker/install/centos7_ccache.sh | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/ci/docker/install/centos7_ccache.sh b/ci/docker/install/centos7_ccache.sh index 19f7cefec3ad..955287b228e8 100755 --- a/ci/docker/install/centos7_ccache.sh +++ b/ci/docker/install/centos7_ccache.sh @@ -23,19 +23,17 @@ set -ex pushd . -yum -y install autoconf libb2-devel libzstd-devel +yum -y install autoconf libb2-devel libzstd-devel gperf mkdir -p /work/deps cd /work/deps git clone --recursive https://github.com/ccache/ccache.git cd ccache -# Checkout a fixed & tested pre-release commit of ccache 4 -# ccache 4 contains fixes for caching nvcc output: https://github.com/ccache/ccache/pull/381 -git checkout 2e7154e67a5dd56852dae29d4c418d4ddc07c230 +git checkout v3.7.9 ./autogen.sh -CXXFLAGS="-Wno-missing-field-initializers" ./configure --disable-man +./configure --disable-man make -j$(nproc) make install From 24344c563e1282db8d18de8b5b51a44f5cf3a116 Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Tue, 14 Apr 2020 00:52:22 +0000 Subject: [PATCH 18/20] Fix GPU Makefile staticbuild on CentOS7 --- make/staticbuild/linux_cu100.mk | 4 + make/staticbuild/linux_cu101.mk | 4 + make/staticbuild/linux_cu102.mk | 4 + make/staticbuild/linux_cu75.mk | 167 ------------------------------- make/staticbuild/linux_cu80.mk | 170 -------------------------------- make/staticbuild/linux_cu90.mk | 4 + make/staticbuild/linux_cu91.mk | 4 + make/staticbuild/linux_cu92.mk | 4 + 8 files changed, 24 insertions(+), 337 deletions(-) delete mode 100644 make/staticbuild/linux_cu75.mk delete mode 100644 make/staticbuild/linux_cu80.mk diff --git a/make/staticbuild/linux_cu100.mk b/make/staticbuild/linux_cu100.mk index 862c1f56f8ae..0bd7d9ae61dd 100644 --- a/make/staticbuild/linux_cu100.mk +++ b/make/staticbuild/linux_cu100.mk @@ -66,7 +66,11 @@ USE_CUDA = 1 # add the path to CUDA library to link and compile flag # if you have already add them to environment variable, leave it as NONE # USE_CUDA_PATH = /usr/local/cuda +ifdef USE_SYSTEM_CUDA +USE_CUDA_PATH = /usr/local/cuda-10.0 +else USE_CUDA_PATH = $(DEPS_PATH)/usr/local/cuda-10.0 +endif # whether to use CuDNN library USE_CUDNN = 1 diff --git a/make/staticbuild/linux_cu101.mk b/make/staticbuild/linux_cu101.mk index 6161431454ba..2715d264d995 100644 --- a/make/staticbuild/linux_cu101.mk +++ b/make/staticbuild/linux_cu101.mk @@ -66,7 +66,11 @@ USE_CUDA = 1 # add the path to CUDA library to link and compile flag # if you have already add them to environment variable, leave it as NONE # USE_CUDA_PATH = /usr/local/cuda +ifdef USE_SYSTEM_CUDA +USE_CUDA_PATH = /usr/local/cuda-10.1 +else USE_CUDA_PATH = $(DEPS_PATH)/usr/local/cuda-10.1 +endif # whether to use CuDNN library USE_CUDNN = 1 diff --git a/make/staticbuild/linux_cu102.mk b/make/staticbuild/linux_cu102.mk index 4bc649fb5423..c328e688c56b 100644 --- a/make/staticbuild/linux_cu102.mk +++ b/make/staticbuild/linux_cu102.mk @@ -66,7 +66,11 @@ USE_CUDA = 1 # add the path to CUDA library to link and compile flag # if you have already add them to environment variable, leave it as NONE # USE_CUDA_PATH = /usr/local/cuda +ifdef USE_SYSTEM_CUDA +USE_CUDA_PATH = /usr/local/cuda-10.2 +else USE_CUDA_PATH = $(DEPS_PATH)/usr/local/cuda-10.2 +endif # whether to use CuDNN library USE_CUDNN = 1 diff --git a/make/staticbuild/linux_cu75.mk b/make/staticbuild/linux_cu75.mk deleted file mode 100644 index e263794600df..000000000000 --- a/make/staticbuild/linux_cu75.mk +++ /dev/null @@ -1,167 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -#------------------------------------------------------------------------------- -# Template configuration for compiling mxnet for making python wheel -#------------------------------------------------------------------------------- - -#--------------------- -# choice of compiler -#-------------------- - -export CC = gcc -export CXX = g++ -export NVCC = nvcc - -# whether compile with options for MXNet developer -DEV = 0 - -# whether compile with debug -DEBUG = 0 - -# whether to turn on signal handler (e.g. segfault logger) -USE_SIGNAL_HANDLER = 1 - -# the additional link flags you want to add -ADD_LDFLAGS += -L$(DEPS_PATH)/lib $(DEPS_PATH)/lib/libculibos.a -lpng -ltiff -ljpeg -lz -ldl -lgfortran -Wl,--version-script=$(CURDIR)/make/config/libmxnet.ver,-rpath,'$${ORIGIN}',--gc-sections - -# the additional compile flags you want to add -ADD_CFLAGS += -I$(DEPS_PATH)/include -ffunction-sections -fdata-sections - -#--------------------------------------------- -# matrix computation libraries for CPU/GPU -#--------------------------------------------- - -# choose the version of blas you want to use -# can be: mkl, blas, atlas, openblas -# in default use atlas for linux while apple for osx -USE_BLAS=openblas - -# whether use opencv during compilation -# you can disable it, however, you will not able to use -# imbin iterator -USE_OPENCV = 1 -# Add OpenCV include path, in which the directory `opencv2` exists -USE_OPENCV_INC_PATH = NONE -# Add OpenCV shared library path, in which the shared library exists -USE_OPENCV_LIB_PATH = NONE - -# whether use CUDA during compile -USE_CUDA = 1 - -# add the path to CUDA library to link and compile flag -# if you have already add them to environment variable, leave it as NONE -# USE_CUDA_PATH = /usr/local/cuda -USE_CUDA_PATH = $(DEPS_PATH)/usr/local/cuda-7.5 - -# whether use CuDNN R3 library -USE_CUDNN = 1 - -# CUDA architecture setting: going with all of them. -# For CUDA < 6.0, comment the *_50 lines for compatibility. -# CUDA_ARCH := - -# whether use cuda runtime compiling for writing kernels in native language (i.e. Python) -ENABLE_CUDA_RTC = 1 - -# use openmp for parallelization -USE_OPENMP = 1 -USE_OPERATOR_TUNING = 1 -USE_LIBJPEG_TURBO = 1 - -# whether use MKL-DNN library -USE_MKLDNN = 1 - -# whether use NNPACK library -USE_NNPACK = 0 - -# whether use lapack during compilation -# only effective when compiled with blas versions openblas/apple/atlas/mkl -USE_LAPACK = 1 - -# path to lapack library in case of a non-standard installation -USE_LAPACK_PATH = $(DEPS_PATH)/lib - -# add path to intel library, you may need it for MKL, if you did not add the path -# to environment variable -USE_INTEL_PATH = NONE - -# If use MKL, choose static link automatically to allow python wrapper -ifeq ($(USE_BLAS), mkl) -USE_STATIC_MKL = 1 -else -USE_STATIC_MKL = NONE -endif - -#---------------------------- -# Settings for power and arm arch -#---------------------------- -ARCH := $(shell uname -a) -ifneq (,$(filter $(ARCH), armv6l armv7l powerpc64le ppc64le aarch64)) - USE_SSE=0 -else - USE_SSE=1 -endif - -#---------------------------- -# distributed computing -#---------------------------- - -# whether or not to enable multi-machine supporting -USE_DIST_KVSTORE = 1 - -# whether or not allow to read and write HDFS directly. If yes, then hadoop is -# required -USE_HDFS = 0 - -# path to libjvm.so. required if USE_HDFS=1 -LIBJVM=$(JAVA_HOME)/jre/lib/amd64/server - -# whether or not allow to read and write AWS S3 directly. If yes, then -# libcurl4-openssl-dev is required, it can be installed on Ubuntu by -# sudo apt-get install -y libcurl4-openssl-dev -USE_S3 = 1 - -#---------------------------- -# additional operators -#---------------------------- - -# path to folders containing projects specific operators that you don't want to put in src/operators -EXTRA_OPERATORS = - - -#---------------------------- -# plugins -#---------------------------- - -# whether to use caffe integration. This requires installing caffe. -# You also need to add CAFFE_PATH/build/lib to your LD_LIBRARY_PATH -# CAFFE_PATH = $(HOME)/caffe -# MXNET_PLUGINS += plugin/caffe/caffe.mk - -# whether to use torch integration. This requires installing torch. -# You also need to add TORCH_PATH/install/lib to your LD_LIBRARY_PATH -# TORCH_PATH = $(HOME)/torch -# MXNET_PLUGINS += plugin/torch/torch.mk - -# WARPCTC_PATH = $(HOME)/warp-ctc -# MXNET_PLUGINS += plugin/warpctc/warpctc.mk - -# whether to use sframe integration. This requires build sframe -# git@github.com:dato-code/SFrame.git -# SFRAME_PATH = $(HOME)/SFrame -# MXNET_PLUGINS += plugin/sframe/plugin.mk diff --git a/make/staticbuild/linux_cu80.mk b/make/staticbuild/linux_cu80.mk deleted file mode 100644 index a42220d3d467..000000000000 --- a/make/staticbuild/linux_cu80.mk +++ /dev/null @@ -1,170 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -#------------------------------------------------------------------------------- -# Template configuration for compiling mxnet for making python wheel -#------------------------------------------------------------------------------- - -#--------------------- -# choice of compiler -#-------------------- - -export CC = gcc -export CXX = g++ -export NVCC = nvcc - -# whether compile with options for MXNet developer -DEV = 0 - -# whether compile with debug -DEBUG = 0 - -# whether to turn on signal handler (e.g. segfault logger) -USE_SIGNAL_HANDLER = 1 - -# the additional link flags you want to add -ADD_LDFLAGS += -L$(DEPS_PATH)/lib $(DEPS_PATH)/lib/libculibos.a -lpng -ltiff -ljpeg -lz -ldl -lgfortran -Wl,--version-script=$(CURDIR)/make/config/libmxnet.ver,-rpath,'$${ORIGIN}',--gc-sections - -# the additional compile flags you want to add -ADD_CFLAGS += -I$(DEPS_PATH)/include -ffunction-sections -fdata-sections - -#--------------------------------------------- -# matrix computation libraries for CPU/GPU -#--------------------------------------------- - -# choose the version of blas you want to use -# can be: mkl, blas, atlas, openblas -# in default use atlas for linux while apple for osx -USE_BLAS=openblas - -# whether use opencv during compilation -# you can disable it, however, you will not able to use -# imbin iterator -USE_OPENCV = 1 -# Add OpenCV include path, in which the directory `opencv2` exists -USE_OPENCV_INC_PATH = NONE -# Add OpenCV shared library path, in which the shared library exists -USE_OPENCV_LIB_PATH = NONE - -# whether use CUDA during compile -USE_CUDA = 1 - -# add the path to CUDA library to link and compile flag -# if you have already add them to environment variable, leave it as NONE -# USE_CUDA_PATH = /usr/local/cuda -USE_CUDA_PATH = $(DEPS_PATH)/usr/local/cuda-8.0 - -# whether to use CuDNN library -USE_CUDNN = 1 - -# whether to use NCCL library -USE_NCCL = 1 - -# CUDA architecture setting: going with all of them. -# For CUDA < 6.0, comment the *_50 lines for compatibility. -# CUDA_ARCH := - -# whether use cuda runtime compiling for writing kernels in native language (i.e. Python) -ENABLE_CUDA_RTC = 1 - -# use openmp for parallelization -USE_OPENMP = 1 -USE_OPERATOR_TUNING = 1 -USE_LIBJPEG_TURBO = 1 - -# whether use MKL-DNN library -USE_MKLDNN = 1 - -# whether use NNPACK library -USE_NNPACK = 0 - -# whether use lapack during compilation -# only effective when compiled with blas versions openblas/apple/atlas/mkl -USE_LAPACK = 1 - -# path to lapack library in case of a non-standard installation -USE_LAPACK_PATH = $(DEPS_PATH)/lib - -# add path to intel library, you may need it for MKL, if you did not add the path -# to environment variable -USE_INTEL_PATH = NONE - -# If use MKL, choose static link automatically to allow python wrapper -ifeq ($(USE_BLAS), mkl) -USE_STATIC_MKL = 1 -else -USE_STATIC_MKL = NONE -endif - -#---------------------------- -# Settings for power and arm arch -#---------------------------- -ARCH := $(shell uname -a) -ifneq (,$(filter $(ARCH), armv6l armv7l powerpc64le ppc64le aarch64)) - USE_SSE=0 -else - USE_SSE=1 -endif - -#---------------------------- -# distributed computing -#---------------------------- - -# whether or not to enable multi-machine supporting -USE_DIST_KVSTORE = 1 - -# whether or not allow to read and write HDFS directly. If yes, then hadoop is -# required -USE_HDFS = 0 - -# path to libjvm.so. required if USE_HDFS=1 -LIBJVM=$(JAVA_HOME)/jre/lib/amd64/server - -# whether or not allow to read and write AWS S3 directly. If yes, then -# libcurl4-openssl-dev is required, it can be installed on Ubuntu by -# sudo apt-get install -y libcurl4-openssl-dev -USE_S3 = 1 - -#---------------------------- -# additional operators -#---------------------------- - -# path to folders containing projects specific operators that you don't want to put in src/operators -EXTRA_OPERATORS = - - -#---------------------------- -# plugins -#---------------------------- - -# whether to use caffe integration. This requires installing caffe. -# You also need to add CAFFE_PATH/build/lib to your LD_LIBRARY_PATH -# CAFFE_PATH = $(HOME)/caffe -# MXNET_PLUGINS += plugin/caffe/caffe.mk - -# whether to use torch integration. This requires installing torch. -# You also need to add TORCH_PATH/install/lib to your LD_LIBRARY_PATH -# TORCH_PATH = $(HOME)/torch -# MXNET_PLUGINS += plugin/torch/torch.mk - -# WARPCTC_PATH = $(HOME)/warp-ctc -# MXNET_PLUGINS += plugin/warpctc/warpctc.mk - -# whether to use sframe integration. This requires build sframe -# git@github.com:dato-code/SFrame.git -# SFRAME_PATH = $(HOME)/SFrame -# MXNET_PLUGINS += plugin/sframe/plugin.mk diff --git a/make/staticbuild/linux_cu90.mk b/make/staticbuild/linux_cu90.mk index c46c10f6358b..bd1db8014f19 100644 --- a/make/staticbuild/linux_cu90.mk +++ b/make/staticbuild/linux_cu90.mk @@ -66,7 +66,11 @@ USE_CUDA = 1 # add the path to CUDA library to link and compile flag # if you have already add them to environment variable, leave it as NONE # USE_CUDA_PATH = /usr/local/cuda +ifdef USE_SYSTEM_CUDA +USE_CUDA_PATH = /usr/local/cuda-9.0 +else USE_CUDA_PATH = $(DEPS_PATH)/usr/local/cuda-9.0 +endif # whether to use CuDNN library USE_CUDNN = 1 diff --git a/make/staticbuild/linux_cu91.mk b/make/staticbuild/linux_cu91.mk index b2a33d7e36c8..a7d1c59ba7a9 100644 --- a/make/staticbuild/linux_cu91.mk +++ b/make/staticbuild/linux_cu91.mk @@ -66,7 +66,11 @@ USE_CUDA = 1 # add the path to CUDA library to link and compile flag # if you have already add them to environment variable, leave it as NONE # USE_CUDA_PATH = /usr/local/cuda +ifdef USE_SYSTEM_CUDA +USE_CUDA_PATH = /usr/local/cuda-9.1 +else USE_CUDA_PATH = $(DEPS_PATH)/usr/local/cuda-9.1 +endif # whether to use CuDNN library USE_CUDNN = 1 diff --git a/make/staticbuild/linux_cu92.mk b/make/staticbuild/linux_cu92.mk index bbaa4bfcd772..0a308894d0bb 100644 --- a/make/staticbuild/linux_cu92.mk +++ b/make/staticbuild/linux_cu92.mk @@ -66,7 +66,11 @@ USE_CUDA = 1 # add the path to CUDA library to link and compile flag # if you have already add them to environment variable, leave it as NONE # USE_CUDA_PATH = /usr/local/cuda +ifdef USE_SYSTEM_CUDA +USE_CUDA_PATH = /usr/local/cuda-9.2 +else USE_CUDA_PATH = $(DEPS_PATH)/usr/local/cuda-9.2 +endif # whether to use CuDNN library USE_CUDNN = 1 From 7aaae05c146d456e1c34902985175b50cd1a650a Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Tue, 14 Apr 2020 03:52:08 +0000 Subject: [PATCH 19/20] CentOS7 NCCL --- Makefile | 4 +++ .../Dockerfile.publish.centos7_gpu_cu100 | 4 +++ .../Dockerfile.publish.centos7_gpu_cu101 | 4 +++ .../Dockerfile.publish.centos7_gpu_cu102 | 4 +++ ci/docker/Dockerfile.publish.centos7_gpu_cu90 | 4 +++ ci/docker/Dockerfile.publish.centos7_gpu_cu92 | 4 +++ ci/docker/install/centos7_nccl.sh | 35 +++++++++++++++++++ ci/docker/runtime_functions.sh | 1 + cmake/Modules/FindNCCL.cmake | 10 ++++-- config/distribution/linux_cu100.cmake | 3 +- config/distribution/linux_cu101.cmake | 3 +- config/distribution/linux_cu102.cmake | 3 +- config/distribution/linux_cu75.cmake | 35 ------------------- config/distribution/linux_cu80.cmake | 35 ------------------- config/distribution/linux_cu90.cmake | 3 +- config/distribution/linux_cu91.cmake | 3 +- config/distribution/linux_cu92.cmake | 3 +- 17 files changed, 80 insertions(+), 78 deletions(-) create mode 100755 ci/docker/install/centos7_nccl.sh delete mode 100644 config/distribution/linux_cu75.cmake delete mode 100644 config/distribution/linux_cu80.cmake diff --git a/Makefile b/Makefile index ed19e69390bd..f51199589354 100644 --- a/Makefile +++ b/Makefile @@ -541,7 +541,11 @@ ifeq ($(USE_CUDA), 1) CFLAGS += -I$(USE_NCCL_PATH)/include LDFLAGS += -L$(USE_NCCL_PATH)/lib endif + ifdef USE_SYSTEM_CUDA + LDFLAGS += -lnccl_static + else LDFLAGS += -lnccl + endif CFLAGS += -DMXNET_USE_NCCL=1 else CFLAGS += -DMXNET_USE_NCCL=0 diff --git a/ci/docker/Dockerfile.publish.centos7_gpu_cu100 b/ci/docker/Dockerfile.publish.centos7_gpu_cu100 index 78398c97dac8..f9469fcb186f 100644 --- a/ci/docker/Dockerfile.publish.centos7_gpu_cu100 +++ b/ci/docker/Dockerfile.publish.centos7_gpu_cu100 @@ -28,6 +28,10 @@ COPY install/centos7_python.sh /work/ RUN /work/centos7_python.sh COPY install/centos7_scala.sh /work/ RUN /work/centos7_scala.sh +ENV SHORT_CUDA_VERSION=10.0 +ENV SHORT_NCCL_VERSION=2.4.8 +COPY install/centos7_nccl.sh /work/ +RUN /work/centos7_nccl.sh ARG USER_ID=0 COPY install/centos7_adduser.sh /work/ diff --git a/ci/docker/Dockerfile.publish.centos7_gpu_cu101 b/ci/docker/Dockerfile.publish.centos7_gpu_cu101 index 0a7fd33b8bf1..00be436c0412 100644 --- a/ci/docker/Dockerfile.publish.centos7_gpu_cu101 +++ b/ci/docker/Dockerfile.publish.centos7_gpu_cu101 @@ -28,6 +28,10 @@ COPY install/centos7_python.sh /work/ RUN /work/centos7_python.sh COPY install/centos7_scala.sh /work/ RUN /work/centos7_scala.sh +ENV SHORT_CUDA_VERSION=10.1 +ENV SHORT_NCCL_VERSION=2.4.8 +COPY install/centos7_nccl.sh /work/ +RUN /work/centos7_nccl.sh ARG USER_ID=0 COPY install/centos7_adduser.sh /work/ diff --git a/ci/docker/Dockerfile.publish.centos7_gpu_cu102 b/ci/docker/Dockerfile.publish.centos7_gpu_cu102 index b7549ffbb668..27a625e4641d 100644 --- a/ci/docker/Dockerfile.publish.centos7_gpu_cu102 +++ b/ci/docker/Dockerfile.publish.centos7_gpu_cu102 @@ -28,6 +28,10 @@ COPY install/centos7_python.sh /work/ RUN /work/centos7_python.sh COPY install/centos7_scala.sh /work/ RUN /work/centos7_scala.sh +ENV SHORT_CUDA_VERSION=10.2 +ENV SHORT_NCCL_VERSION=2.4.8 +COPY install/centos7_nccl.sh /work/ +RUN /work/centos7_nccl.sh ARG USER_ID=0 COPY install/centos7_adduser.sh /work/ diff --git a/ci/docker/Dockerfile.publish.centos7_gpu_cu90 b/ci/docker/Dockerfile.publish.centos7_gpu_cu90 index a562d2580a89..23217148f87c 100644 --- a/ci/docker/Dockerfile.publish.centos7_gpu_cu90 +++ b/ci/docker/Dockerfile.publish.centos7_gpu_cu90 @@ -28,6 +28,10 @@ COPY install/centos7_python.sh /work/ RUN /work/centos7_python.sh COPY install/centos7_scala.sh /work/ RUN /work/centos7_scala.sh +ENV SHORT_CUDA_VERSION=9.0 +ENV SHORT_NCCL_VERSION=2.4.8 +COPY install/centos7_nccl.sh /work/ +RUN /work/centos7_nccl.sh ARG USER_ID=0 COPY install/centos7_adduser.sh /work/ diff --git a/ci/docker/Dockerfile.publish.centos7_gpu_cu92 b/ci/docker/Dockerfile.publish.centos7_gpu_cu92 index c6dd32399b67..75277f0f1fd2 100644 --- a/ci/docker/Dockerfile.publish.centos7_gpu_cu92 +++ b/ci/docker/Dockerfile.publish.centos7_gpu_cu92 @@ -28,6 +28,10 @@ COPY install/centos7_python.sh /work/ RUN /work/centos7_python.sh COPY install/centos7_scala.sh /work/ RUN /work/centos7_scala.sh +ENV SHORT_CUDA_VERSION=9.2 +ENV SHORT_NCCL_VERSION=2.4.8 +COPY install/centos7_nccl.sh /work/ +RUN /work/centos7_nccl.sh ARG USER_ID=0 COPY install/centos7_adduser.sh /work/ diff --git a/ci/docker/install/centos7_nccl.sh b/ci/docker/install/centos7_nccl.sh new file mode 100755 index 000000000000..7a14f104b328 --- /dev/null +++ b/ci/docker/install/centos7_nccl.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +set -ex + + +if [ -z ${SHORT_CUDA_VERSION} ]; then + echo "Error: SHORT_CUDA_VERSION environment variable undefined" + exit 1 +fi +if [ -z ${SHORT_NCCL_VERSION} ]; then + echo "Error: SHORT_NCCL_VERSION environment variable undefined" + exit 1 +fi + +curl -fsSL https://developer.download.nvidia.com/compute/machine-learning/repos/rhel7/x86_64/nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm -O +rpm -i nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm +yum check-update || true # exit code 100 in case of available updates +yum install -y libnccl-${SHORT_NCCL_VERSION}-1+cuda${SHORT_CUDA_VERSION} libnccl-devel-${SHORT_NCCL_VERSION}-1+cuda${SHORT_CUDA_VERSION} libnccl-static-${SHORT_NCCL_VERSION}-1+cuda${SHORT_CUDA_VERSION} diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh index c321a0ae0ad4..dc119bb10256 100755 --- a/ci/docker/runtime_functions.sh +++ b/ci/docker/runtime_functions.sh @@ -1969,6 +1969,7 @@ build_static_libmxnet() { set -ex pushd . source /opt/rh/devtoolset-7/enable + export USE_SYSTEM_CUDA=1 local mxnet_variant=${1:?"This function requires a python command as the first argument"} source tools/staticbuild/build.sh ${mxnet_variant} popd diff --git a/cmake/Modules/FindNCCL.cmake b/cmake/Modules/FindNCCL.cmake index 1bd901cf374e..a94020baf8e3 100644 --- a/cmake/Modules/FindNCCL.cmake +++ b/cmake/Modules/FindNCCL.cmake @@ -45,8 +45,14 @@ find_path(NCCL_INCLUDE_DIRS $ENV{NCCL_DIR}/include ) +if(CMAKE_BUILD_TYPE STREQUAL "Distribution" AND UNIX) + set(NCCL_LIB_NAME "nccl_static") +else() + set(NCCL_LIB_NAME "nccl") +endif() + find_library(NCCL_LIBRARIES - NAMES nccl + NAMES ${NCCL_LIB_NAME} HINTS ${NCCL_LIB_DIR} ${NCCL_ROOT_DIR} @@ -68,7 +74,7 @@ if (UNIX) ) find_library(NCCL_LIBRARIES - NAMES nccl + NAMES ${NCCL_LIB_NAME} PATHS ${search_paths} PATH_SUFFIXES lib ) diff --git a/config/distribution/linux_cu100.cmake b/config/distribution/linux_cu100.cmake index bdbec7e63005..250f494d0963 100644 --- a/config/distribution/linux_cu100.cmake +++ b/config/distribution/linux_cu100.cmake @@ -20,7 +20,8 @@ set(CFLAGS "-mno-avx" CACHE STRING "CFLAGS") set(CXXFLAGS "-mno-avx" CACHE STRING "CXXFLAGS") set(USE_CUDA ON CACHE BOOL "Build with CUDA support") -set(USE_CUDNN ON CACHE BOOL "Build with CUDA support") +set(USE_CUDNN ON CACHE BOOL "Build with CUDNN support") +set(USE_NCCL ON CACHE BOOL "Build with NCCL support") set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support") set(USE_OPENMP ON CACHE BOOL "Build with Openmp support") set(USE_MKL_IF_AVAILABLE OFF CACHE BOOL "Use Intel MKL if found") diff --git a/config/distribution/linux_cu101.cmake b/config/distribution/linux_cu101.cmake index fd773e88193b..ab11bcf69067 100644 --- a/config/distribution/linux_cu101.cmake +++ b/config/distribution/linux_cu101.cmake @@ -22,7 +22,8 @@ set(CFLAGS "-mno-avx" CACHE STRING "CFLAGS") set(CXXFLAGS "-mno-avx" CACHE STRING "CXXFLAGS") set(USE_CUDA ON CACHE BOOL "Build with CUDA support") -set(USE_CUDNN ON CACHE BOOL "Build with CUDA support") +set(USE_CUDNN ON CACHE BOOL "Build with CUDNN support") +set(USE_NCCL ON CACHE BOOL "Build with NCCL support") set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support") set(USE_OPENMP ON CACHE BOOL "Build with Openmp support") set(USE_MKL_IF_AVAILABLE OFF CACHE BOOL "Use Intel MKL if found") diff --git a/config/distribution/linux_cu102.cmake b/config/distribution/linux_cu102.cmake index 9f740f543ecb..9e2848c7fed6 100644 --- a/config/distribution/linux_cu102.cmake +++ b/config/distribution/linux_cu102.cmake @@ -20,7 +20,8 @@ set(CFLAGS "-mno-avx" CACHE STRING "CFLAGS") set(CXXFLAGS "-mno-avx" CACHE STRING "CXXFLAGS") set(USE_CUDA ON CACHE BOOL "Build with CUDA support") -set(USE_CUDNN ON CACHE BOOL "Build with CUDA support") +set(USE_CUDNN ON CACHE BOOL "Build with CUDNN support") +set(USE_NCCL ON CACHE BOOL "Build with NCCL support") set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support") set(USE_OPENMP ON CACHE BOOL "Build with Openmp support") set(USE_MKL_IF_AVAILABLE OFF CACHE BOOL "Use Intel MKL if found") diff --git a/config/distribution/linux_cu75.cmake b/config/distribution/linux_cu75.cmake deleted file mode 100644 index 91ef97150519..000000000000 --- a/config/distribution/linux_cu75.cmake +++ /dev/null @@ -1,35 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set(CMAKE_BUILD_TYPE "Distribution" CACHE STRING "Build type") -set(CFLAGS "-mno-avx" CACHE STRING "CFLAGS") -set(CXXFLAGS "-mno-avx" CACHE STRING "CXXFLAGS") - -set(USE_CUDA ON CACHE BOOL "Build with CUDA support") -set(USE_CUDNN ON CACHE BOOL "Build with CUDA support") -set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support") -set(USE_OPENMP ON CACHE BOOL "Build with Openmp support") -set(USE_MKL_IF_AVAILABLE OFF CACHE BOOL "Use Intel MKL if found") -set(USE_MKLDNN ON CACHE BOOL "Build with MKL-DNN support") -set(USE_LAPACK ON CACHE BOOL "Build with lapack support") -set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.") -set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support") -set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support") -set(USE_LIBJPEG_TURBO ON CACHE BOOL "Build with libjpeg-turbo") - -set(CUDACXX "/usr/local/cuda-7.5/bin/nvcc" CACHE STRING "Cuda compiler") -set(MXNET_CUDA_ARCH "3.0;3.5;5.0;5.2" CACHE STRING "Cuda architectures") diff --git a/config/distribution/linux_cu80.cmake b/config/distribution/linux_cu80.cmake deleted file mode 100644 index 6b98538e6c89..000000000000 --- a/config/distribution/linux_cu80.cmake +++ /dev/null @@ -1,35 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set(CMAKE_BUILD_TYPE "Distribution" CACHE STRING "Build type") -set(CFLAGS "-mno-avx" CACHE STRING "CFLAGS") -set(CXXFLAGS "-mno-avx" CACHE STRING "CXXFLAGS") - -set(USE_CUDA ON CACHE BOOL "Build with CUDA support") -set(USE_CUDNN ON CACHE BOOL "Build with CUDA support") -set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support") -set(USE_OPENMP ON CACHE BOOL "Build with Openmp support") -set(USE_MKL_IF_AVAILABLE OFF CACHE BOOL "Use Intel MKL if found") -set(USE_MKLDNN ON CACHE BOOL "Build with MKL-DNN support") -set(USE_LAPACK ON CACHE BOOL "Build with lapack support") -set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.") -set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support") -set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support") -set(USE_LIBJPEG_TURBO ON CACHE BOOL "Build with libjpeg-turbo") - -set(CUDACXX "/usr/local/cuda-8.0/bin/nvcc" CACHE STRING "Cuda compiler") -set(MXNET_CUDA_ARCH "3.0;5.0;6.0;6.2" CACHE STRING "Cuda architectures") diff --git a/config/distribution/linux_cu90.cmake b/config/distribution/linux_cu90.cmake index 1932a320f615..e4249cd609c8 100644 --- a/config/distribution/linux_cu90.cmake +++ b/config/distribution/linux_cu90.cmake @@ -20,7 +20,8 @@ set(CFLAGS "-mno-avx" CACHE STRING "CFLAGS") set(CXXFLAGS "-mno-avx" CACHE STRING "CXXFLAGS") set(USE_CUDA ON CACHE BOOL "Build with CUDA support") -set(USE_CUDNN ON CACHE BOOL "Build with CUDA support") +set(USE_CUDNN ON CACHE BOOL "Build with CUDNN support") +set(USE_NCCL ON CACHE BOOL "Build with NCCL support") set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support") set(USE_OPENMP ON CACHE BOOL "Build with Openmp support") set(USE_MKL_IF_AVAILABLE OFF CACHE BOOL "Use Intel MKL if found") diff --git a/config/distribution/linux_cu91.cmake b/config/distribution/linux_cu91.cmake index 36e10a624e40..a239ada43454 100644 --- a/config/distribution/linux_cu91.cmake +++ b/config/distribution/linux_cu91.cmake @@ -20,7 +20,8 @@ set(CFLAGS "-mno-avx" CACHE STRING "CFLAGS") set(CXXFLAGS "-mno-avx" CACHE STRING "CXXFLAGS") set(USE_CUDA ON CACHE BOOL "Build with CUDA support") -set(USE_CUDNN ON CACHE BOOL "Build with CUDA support") +set(USE_CUDNN ON CACHE BOOL "Build with CUDNN support") +set(USE_NCCL ON CACHE BOOL "Build with NCCL support") set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support") set(USE_OPENMP ON CACHE BOOL "Build with Openmp support") set(USE_MKL_IF_AVAILABLE OFF CACHE BOOL "Use Intel MKL if found") diff --git a/config/distribution/linux_cu92.cmake b/config/distribution/linux_cu92.cmake index 285daccdabc0..74f31c8ae031 100644 --- a/config/distribution/linux_cu92.cmake +++ b/config/distribution/linux_cu92.cmake @@ -20,7 +20,8 @@ set(CFLAGS "-mno-avx" CACHE STRING "CFLAGS") set(CXXFLAGS "-mno-avx" CACHE STRING "CXXFLAGS") set(USE_CUDA ON CACHE BOOL "Build with CUDA support") -set(USE_CUDNN ON CACHE BOOL "Build with CUDA support") +set(USE_CUDNN ON CACHE BOOL "Build with CUDNN support") +set(USE_NCCL ON CACHE BOOL "Build with NCCL support") set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support") set(USE_OPENMP ON CACHE BOOL "Build with Openmp support") set(USE_MKL_IF_AVAILABLE OFF CACHE BOOL "Use Intel MKL if found") From c33c85ba530a118ddfa1bbacc3f64f821c0fd1f9 Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Tue, 14 Apr 2020 07:05:51 +0000 Subject: [PATCH 20/20] CentOS7 staticbuild fix link with libculibos --- make/staticbuild/linux_cu100.mk | 4 ++++ make/staticbuild/linux_cu101.mk | 4 ++++ make/staticbuild/linux_cu102.mk | 4 ++++ make/staticbuild/linux_cu90.mk | 4 ++++ make/staticbuild/linux_cu91.mk | 4 ++++ make/staticbuild/linux_cu92.mk | 4 ++++ 6 files changed, 24 insertions(+) diff --git a/make/staticbuild/linux_cu100.mk b/make/staticbuild/linux_cu100.mk index 0bd7d9ae61dd..855485c5b6df 100644 --- a/make/staticbuild/linux_cu100.mk +++ b/make/staticbuild/linux_cu100.mk @@ -37,7 +37,11 @@ DEBUG = 0 USE_SIGNAL_HANDLER = 1 # the additional link flags you want to add +ifdef USE_SYSTEM_CUDA +ADD_LDFLAGS += -L$(DEPS_PATH)/lib -lpng -ltiff -ljpeg -lz -ldl -lgfortran -Wl,--version-script=$(CURDIR)/make/config/libmxnet.ver,-rpath,'$${ORIGIN}',--gc-sections +else ADD_LDFLAGS += -L$(DEPS_PATH)/lib $(DEPS_PATH)/lib/libculibos.a -lpng -ltiff -ljpeg -lz -ldl -lgfortran -Wl,--version-script=$(CURDIR)/make/config/libmxnet.ver,-rpath,'$${ORIGIN}',--gc-sections +endif # the additional compile flags you want to add ADD_CFLAGS += -I$(DEPS_PATH)/include -ffunction-sections -fdata-sections diff --git a/make/staticbuild/linux_cu101.mk b/make/staticbuild/linux_cu101.mk index 2715d264d995..7bbde85bee11 100644 --- a/make/staticbuild/linux_cu101.mk +++ b/make/staticbuild/linux_cu101.mk @@ -37,7 +37,11 @@ DEBUG = 0 USE_SIGNAL_HANDLER = 1 # the additional link flags you want to add +ifdef USE_SYSTEM_CUDA +ADD_LDFLAGS += -L$(DEPS_PATH)/lib -lpng -ltiff -ljpeg -lz -ldl -lgfortran -Wl,--version-script=$(CURDIR)/make/config/libmxnet.ver,-rpath,'$${ORIGIN}',--gc-sections +else ADD_LDFLAGS += -L$(DEPS_PATH)/lib $(DEPS_PATH)/lib/libculibos.a -lpng -ltiff -ljpeg -lz -ldl -lgfortran -Wl,--version-script=$(CURDIR)/make/config/libmxnet.ver,-rpath,'$${ORIGIN}',--gc-sections +endif # the additional compile flags you want to add ADD_CFLAGS += -I$(DEPS_PATH)/include -ffunction-sections -fdata-sections diff --git a/make/staticbuild/linux_cu102.mk b/make/staticbuild/linux_cu102.mk index c328e688c56b..963842a19cff 100644 --- a/make/staticbuild/linux_cu102.mk +++ b/make/staticbuild/linux_cu102.mk @@ -37,7 +37,11 @@ DEBUG = 0 USE_SIGNAL_HANDLER = 1 # the additional link flags you want to add +ifdef USE_SYSTEM_CUDA +ADD_LDFLAGS += -L$(DEPS_PATH)/lib -lpng -ltiff -ljpeg -lz -ldl -lgfortran -Wl,--version-script=$(CURDIR)/make/config/libmxnet.ver,-rpath,'$${ORIGIN}',--gc-sections +else ADD_LDFLAGS += -L$(DEPS_PATH)/lib $(DEPS_PATH)/lib/libculibos.a -lpng -ltiff -ljpeg -lz -ldl -lgfortran -Wl,--version-script=$(CURDIR)/make/config/libmxnet.ver,-rpath,'$${ORIGIN}',--gc-sections +endif # the additional compile flags you want to add ADD_CFLAGS += -I$(DEPS_PATH)/include -ffunction-sections -fdata-sections diff --git a/make/staticbuild/linux_cu90.mk b/make/staticbuild/linux_cu90.mk index bd1db8014f19..1d0669ef82b6 100644 --- a/make/staticbuild/linux_cu90.mk +++ b/make/staticbuild/linux_cu90.mk @@ -37,7 +37,11 @@ DEBUG = 0 USE_SIGNAL_HANDLER = 1 # the additional link flags you want to add +ifdef USE_SYSTEM_CUDA +ADD_LDFLAGS += -L$(DEPS_PATH)/lib -lpng -ltiff -ljpeg -lz -ldl -lgfortran -Wl,--version-script=$(CURDIR)/make/config/libmxnet.ver,-rpath,'$${ORIGIN}',--gc-sections +else ADD_LDFLAGS += -L$(DEPS_PATH)/lib $(DEPS_PATH)/lib/libculibos.a -lpng -ltiff -ljpeg -lz -ldl -lgfortran -Wl,--version-script=$(CURDIR)/make/config/libmxnet.ver,-rpath,'$${ORIGIN}',--gc-sections +endif # the additional compile flags you want to add ADD_CFLAGS += -I$(DEPS_PATH)/include -ffunction-sections -fdata-sections diff --git a/make/staticbuild/linux_cu91.mk b/make/staticbuild/linux_cu91.mk index a7d1c59ba7a9..89b35b10f6fa 100644 --- a/make/staticbuild/linux_cu91.mk +++ b/make/staticbuild/linux_cu91.mk @@ -37,7 +37,11 @@ DEBUG = 0 USE_SIGNAL_HANDLER = 1 # the additional link flags you want to add +ifdef USE_SYSTEM_CUDA +ADD_LDFLAGS += -L$(DEPS_PATH)/lib -lpng -ltiff -ljpeg -lz -ldl -lgfortran -Wl,--version-script=$(CURDIR)/make/config/libmxnet.ver,-rpath,'$${ORIGIN}',--gc-sections +else ADD_LDFLAGS += -L$(DEPS_PATH)/lib $(DEPS_PATH)/lib/libculibos.a -lpng -ltiff -ljpeg -lz -ldl -lgfortran -Wl,--version-script=$(CURDIR)/make/config/libmxnet.ver,-rpath,'$${ORIGIN}',--gc-sections +endif # the additional compile flags you want to add ADD_CFLAGS += -I$(DEPS_PATH)/include -ffunction-sections -fdata-sections diff --git a/make/staticbuild/linux_cu92.mk b/make/staticbuild/linux_cu92.mk index 0a308894d0bb..2cbbdd25eeaf 100644 --- a/make/staticbuild/linux_cu92.mk +++ b/make/staticbuild/linux_cu92.mk @@ -37,7 +37,11 @@ DEBUG = 0 USE_SIGNAL_HANDLER = 1 # the additional link flags you want to add +ifdef USE_SYSTEM_CUDA +ADD_LDFLAGS += -L$(DEPS_PATH)/lib -lpng -ltiff -ljpeg -lz -ldl -lgfortran -Wl,--version-script=$(CURDIR)/make/config/libmxnet.ver,-rpath,'$${ORIGIN}',--gc-sections +else ADD_LDFLAGS += -L$(DEPS_PATH)/lib $(DEPS_PATH)/lib/libculibos.a -lpng -ltiff -ljpeg -lz -ldl -lgfortran -Wl,--version-script=$(CURDIR)/make/config/libmxnet.ver,-rpath,'$${ORIGIN}',--gc-sections +endif # the additional compile flags you want to add ADD_CFLAGS += -I$(DEPS_PATH)/include -ffunction-sections -fdata-sections