Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Disable -DUSE_TVM_OP on GPU builds #18204

Merged
merged 1 commit into from
May 1, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 0 additions & 46 deletions ci/docker/runtime_functions.sh
Original file line number Diff line number Diff line change
Expand Up @@ -723,7 +723,6 @@ build_ubuntu_gpu_mkldnn() {
CC=gcc-7 CXX=g++-7 cmake \
-DCMAKE_BUILD_TYPE="RelWithDebInfo" \
-DUSE_MKL_IF_AVAILABLE=OFF \
-DUSE_TVM_OP=ON \
-DUSE_CUDA=ON \
-DMXNET_CUDA_ARCH="$CI_CMAKE_CUDA_ARCH" \
-DUSE_CPP_PACKAGE=ON \
Expand All @@ -737,7 +736,6 @@ build_ubuntu_gpu_mkldnn_nocudnn() {
CC=gcc-7 CXX=g++-7 cmake \
-DCMAKE_BUILD_TYPE="RelWithDebInfo" \
-DUSE_MKL_IF_AVAILABLE=OFF \
-DUSE_TVM_OP=ON \
-DUSE_CUDA=ON \
-DMXNET_CUDA_ARCH="$CI_CMAKE_CUDA_ARCH" \
-DUSE_CUDNN=OFF \
Expand All @@ -752,7 +750,6 @@ build_ubuntu_gpu_cuda101_cudnn7() {
CC=gcc-7 CXX=g++-7 cmake \
-DCMAKE_BUILD_TYPE="RelWithDebInfo" \
-DUSE_MKL_IF_AVAILABLE=OFF \
-DUSE_TVM_OP=ON \
-DUSE_CUDA=ON \
-DMXNET_CUDA_ARCH="$CI_CMAKE_CUDA_ARCH" \
-DUSE_CUDNN=ON \
Expand All @@ -775,7 +772,6 @@ build_ubuntu_gpu_cuda101_cudnn7_make() {
USE_CUDA=1 \
USE_CUDA_PATH=/usr/local/cuda \
USE_CUDNN=1 \
USE_TVM_OP=1 \
USE_CPP_PACKAGE=1 \
USE_DIST_KVSTORE=1 \
CUDA_ARCH="$CI_CUDA_COMPUTE_CAPABILITIES" \
Expand All @@ -795,7 +791,6 @@ build_ubuntu_gpu_cuda101_cudnn7_mkldnn_cpp_test() {
USE_CUDA=1 \
USE_CUDA_PATH=/usr/local/cuda \
USE_CUDNN=1 \
USE_TVM_OP=0 \
USE_CPP_PACKAGE=1 \
USE_DIST_KVSTORE=1 \
CUDA_ARCH="$CI_CUDA_COMPUTE_CAPABILITIES" \
Expand All @@ -805,23 +800,6 @@ build_ubuntu_gpu_cuda101_cudnn7_mkldnn_cpp_test() {
make cython PYTHON=python3
}

build_ubuntu_gpu_cuda101_cudnn7_no_tvm_op() {
set -ex
cd /work/build
CC=gcc-7 CXX=g++-7 cmake \
-DCMAKE_BUILD_TYPE="RelWithDebInfo" \
-DUSE_MKL_IF_AVAILABLE=OFF \
-DUSE_TVM_OP=OFF \
-DUSE_CUDA=ON \
-DMXNET_CUDA_ARCH="$CI_CMAKE_CUDA_ARCH" \
-DUSE_CUDNN=ON \
-DUSE_MKLDNN=OFF \
-DBUILD_CYTHON_MODULES=ON \
-DUSE_DIST_KVSTORE=ON \
-G Ninja /work/mxnet
ninja
}

build_ubuntu_amalgamation() {
set -ex
# Amalgamation can not be run with -j nproc
Expand Down Expand Up @@ -852,7 +830,6 @@ build_ubuntu_gpu_cmake() {
-DUSE_SIGNAL_HANDLER=ON \
-DUSE_CUDA=ON \
-DUSE_CUDNN=ON \
-DUSE_TVM_OP=ON \
-DUSE_MKL_IF_AVAILABLE=OFF \
-DUSE_MKLML_MKL=OFF \
-DUSE_MKLDNN=OFF \
Expand All @@ -873,7 +850,6 @@ build_ubuntu_gpu_cmake_no_rtc() {
-DUSE_SIGNAL_HANDLER=ON \
-DUSE_CUDA=ON \
-DUSE_CUDNN=ON \
-DUSE_TVM_OP=ON \
-DUSE_MKL_IF_AVAILABLE=OFF \
-DUSE_MKLML_MKL=OFF \
-DUSE_MKLDNN=ON \
Expand All @@ -888,27 +864,6 @@ build_ubuntu_gpu_cmake_no_rtc() {
ninja
}

build_ubuntu_gpu_cmake_no_tvm_op() {
set -ex
cd /work/build
CC=gcc-7 CXX=g++-7 cmake \
-DUSE_SIGNAL_HANDLER=ON \
-DUSE_CUDA=ON \
-DUSE_CUDNN=ON \
-DUSE_TVM_OP=OFF \
-DUSE_MKL_IF_AVAILABLE=OFF \
-DUSE_MKLML_MKL=OFF \
-DUSE_MKLDNN=OFF \
-DUSE_DIST_KVSTORE=ON \
-DCMAKE_BUILD_TYPE=Release \
-DMXNET_CUDA_ARCH="$CI_CMAKE_CUDA_ARCH" \
-DBUILD_CYTHON_MODULES=1 \
-G Ninja \
/work/mxnet

ninja
}

build_ubuntu_cpu_large_tensor() {
set -ex
cd /work/build
Expand All @@ -931,7 +886,6 @@ build_ubuntu_gpu_large_tensor() {
-DUSE_SIGNAL_HANDLER=ON \
-DUSE_CUDA=ON \
-DUSE_CUDNN=ON \
-DUSE_TVM_OP=ON \
-DUSE_MKL_IF_AVAILABLE=OFF \
-DUSE_MKLML_MKL=OFF \
-DUSE_MKLDNN=OFF \
Expand Down
43 changes: 0 additions & 43 deletions ci/jenkins/Jenkins_steps.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -289,20 +289,6 @@ def compile_unix_full_gpu_mkldnn_cpp_test() {
}]
}

def compile_unix_full_gpu_no_tvm_op() {
return ['GPU: CUDA10.1+cuDNN7 TVM_OP OFF': {
node(NODE_LINUX_CPU) {
ws('workspace/build-gpu-no-tvm-op') {
timeout(time: max_time, unit: 'MINUTES') {
utils.init_git()
utils.docker_run('ubuntu_build_cuda', 'build_ubuntu_gpu_cuda101_cudnn7_no_tvm_op', false)
utils.pack_lib('gpu_no_tvm_op', mx_lib_cpp_examples_no_tvm_op)
}
}
}
}]
}

def compile_unix_cmake_gpu() {
return ['GPU: CMake': {
node(NODE_LINUX_CPU) {
Expand All @@ -317,19 +303,6 @@ def compile_unix_cmake_gpu() {
}]
}

def compile_unix_cmake_gpu_no_tvm_op() {
return ['GPU: CMake TVM_OP OFF': {
node(NODE_LINUX_CPU) {
ws('workspace/build-cmake-gpu-no-tvm-op') {
timeout(time: max_time, unit: 'MINUTES') {
utils.init_git()
utils.docker_run('ubuntu_gpu_cu101', 'build_ubuntu_gpu_cmake_no_tvm_op', false)
}
}
}
}]
}

def compile_unix_cmake_gpu_no_rtc() {
return ['GPU: CMake CUDA RTC OFF': {
node(NODE_LINUX_CPU) {
Expand Down Expand Up @@ -841,22 +814,6 @@ def test_unix_python3_gpu() {
}]
}

def test_unix_python3_gpu_no_tvm_op() {
return ['Python3: GPU TVM_OP OFF': {
node(NODE_LINUX_GPU) {
ws('workspace/ut-python3-gpu-no-tvm-op') {
try {
utils.unpack_and_init('gpu_no_tvm_op', mx_lib_cpp_examples_no_tvm_op)
python3_gpu_ut_cython('ubuntu_gpu_cu101')
utils.publish_test_coverage()
} finally {
utils.collect_test_results_unix('tests_gpu.xml', 'tests_python3_gpu.xml')
}
}
}
}]
}

def test_unix_python3_quantize_gpu() {
return ['Python3: Quantize GPU': {
node(NODE_LINUX_GPU_P3) {
Expand Down
3 changes: 0 additions & 3 deletions ci/jenkins/Jenkinsfile_unix_gpu
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,6 @@ core_logic: {
custom_steps.compile_unix_cmake_gpu(),
custom_steps.compile_unix_tensorrt_gpu(),
custom_steps.compile_unix_int64_gpu(),
custom_steps.compile_unix_full_gpu_no_tvm_op(),
custom_steps.compile_unix_cmake_gpu_no_tvm_op(),
custom_steps.compile_unix_cmake_gpu_no_rtc(),
custom_steps.compile_unix_full_gpu_mkldnn_cpp_test()
])
Expand All @@ -61,7 +59,6 @@ core_logic: {
custom_steps.test_unix_scala_gpu(),
// TODO(szha): fix and reenable the hanging issue. tracked in #18098
// custom_steps.test_unix_distributed_kvstore_gpu(),
custom_steps.test_unix_python3_gpu_no_tvm_op(),
custom_steps.test_unix_capi_cpp_package(),
])
}
Expand Down