diff --git a/cd/mxnet_lib/Jenkins_pipeline.groovy b/cd/mxnet_lib/Jenkins_pipeline.groovy index ac372c621206..31da48928c71 100644 --- a/cd/mxnet_lib/Jenkins_pipeline.groovy +++ b/cd/mxnet_lib/Jenkins_pipeline.groovy @@ -57,7 +57,7 @@ def build(mxnet_variant) { ci_utils.init_git() // Compiling in Ubuntu14.04 due to glibc issues. // This should be updates once we have clarity on this issue. - ci_utils.docker_run('publish.ubuntu1404_cpu', "build_static_libmxnet ${mxnet_variant}", false) + ci_utils.docker_run('centos7_cpu', "build_static_libmxnet ${mxnet_variant}", false) ci_utils.pack_lib("mxnet_${mxnet_variant}", libmxnet_pipeline.get_stash(mxnet_variant)) } } diff --git a/cd/mxnet_lib/mxnet_lib_pipeline.groovy b/cd/mxnet_lib/mxnet_lib_pipeline.groovy index 9c154a911f97..d9fb3d810bc5 100644 --- a/cd/mxnet_lib/mxnet_lib_pipeline.groovy +++ b/cd/mxnet_lib/mxnet_lib_pipeline.groovy @@ -79,9 +79,9 @@ def get_environment(mxnet_variant) { return "centos7_aarch64_cpu" } else if (mxnet_variant.startsWith("cu")) { // Remove 'mkl' suffix from variant to properly format test environment - return "ubuntu_gpu_${mxnet_variant.replace('mkl', '')}" + return "centos7_gpu_${mxnet_variant.replace('mkl', '')}" } - return "ubuntu_cpu" + return "centos7_cpu" } // Returns the variant appropriate jenkins node test in which diff --git a/cd/python/docker/Jenkins_pipeline.groovy b/cd/python/docker/Jenkins_pipeline.groovy index 46fc64714cc3..847756adef9c 100644 --- a/cd/python/docker/Jenkins_pipeline.groovy +++ b/cd/python/docker/Jenkins_pipeline.groovy @@ -45,9 +45,9 @@ def get_environment(mxnet_variant) { return "centos7_aarch64_cpu" } if (mxnet_variant.startsWith('cu')) { - return "ubuntu_gpu_${mxnet_variant}" + return "centos7_gpu_${mxnet_variant}" } - return "ubuntu_cpu" + return "centos7_cpu" } diff --git a/cd/python/pypi/Jenkins_pipeline.groovy b/cd/python/pypi/Jenkins_pipeline.groovy index 8b2d37ff3090..4e642e08fe53 100644 --- a/cd/python/pypi/Jenkins_pipeline.groovy +++ b/cd/python/pypi/Jenkins_pipeline.groovy @@ -51,9 +51,9 @@ def get_environment(mxnet_variant) { return "centos7_aarch64_cpu" } if (mxnet_variant.startsWith('cu')) { - return "ubuntu_gpu_${mxnet_variant}" + return "centos7_gpu_${mxnet_variant}" } - return "ubuntu_cpu" + return "centos7_cpu" } def build(mxnet_variant) { diff --git a/ci/Jenkinsfile_docker_cache b/ci/Jenkinsfile_docker_cache index f90bf0459f03..1be54a33d91e 100644 --- a/ci/Jenkinsfile_docker_cache +++ b/ci/Jenkinsfile_docker_cache @@ -38,6 +38,7 @@ core_logic: { timeout(time: total_timeout, unit: 'MINUTES') { utils.init_git() sh "ci/docker_cache.py --docker-registry ${env.DOCKER_ECR_REGISTRY}" + sh "cd ci && python3 ./docker_login.py --secret-name ${env.DOCKERHUB_SECRET_NAME} && docker-compose -f docker/docker-compose.yml build --parallel && docker-compose -f docker/docker-compose.yml push && docker logout" } } } diff --git a/ci/Jenkinsfile_utils.groovy b/ci/Jenkinsfile_utils.groovy index 3f774f052703..a758c72211cc 100644 --- a/ci/Jenkinsfile_utils.groovy +++ b/ci/Jenkinsfile_utils.groovy @@ -147,9 +147,10 @@ def collect_test_results_windows(original_file_name, new_file_name) { } -def docker_run(platform, function_name, use_nvidia, shared_mem = '500m', env_vars = "") { - def command = "ci/build.py %ENV_VARS% --docker-registry ${env.DOCKER_ECR_REGISTRY} %USE_NVIDIA% --platform %PLATFORM% --docker-build-retries 3 --shm-size %SHARED_MEM% /work/runtime_functions.sh %FUNCTION_NAME%" +def docker_run(platform, function_name, use_nvidia, shared_mem = '500m', env_vars = "", build_args = "") { + def command = "ci/build.py %ENV_VARS% %BUILD_ARGS% --docker-registry ${env.DOCKER_CACHE_REGISTRY} %USE_NVIDIA% --platform %PLATFORM% --docker-build-retries 3 --shm-size %SHARED_MEM% /work/runtime_functions.sh %FUNCTION_NAME%" command = command.replaceAll('%ENV_VARS%', env_vars.length() > 0 ? "-e ${env_vars}" : '') + command = command.replaceAll('%BUILD_ARGS%', env_vars.length() > 0 ? "${build_args}" : '') command = command.replaceAll('%USE_NVIDIA%', use_nvidia ? '--nvidiadocker' : '') command = command.replaceAll('%PLATFORM%', platform) command = command.replaceAll('%FUNCTION_NAME%', function_name) diff --git a/ci/build.py b/ci/build.py index 645eb96875e9..a316225bae29 100755 --- a/ci/build.py +++ b/ci/build.py @@ -38,18 +38,28 @@ from subprocess import check_call, check_output from typing import * +import yaml + from util import * +DOCKER_COMPOSE_WHITELIST = ('centos7_cpu', 'centos7_gpu_cu92', 'centos7_gpu_cu100', + 'centos7_gpu_cu101', 'centos7_gpu_cu102', 'centos7_gpu_cu110', + 'centos7_gpu_cu112') + +# Files for docker compose +DOCKER_COMPOSE_FILES = set(('docker/build.centos7')) def get_dockerfiles_path(): return "docker" -def get_platforms(path: str = get_dockerfiles_path()) -> List[str]: +def get_platforms(path: str = get_dockerfiles_path(), legacy_only=False) -> List[str]: """Get a list of architectures given our dockerfiles""" dockerfiles = glob.glob(os.path.join(path, "Dockerfile.*")) - dockerfiles = list(filter(lambda x: x[-1] != '~', dockerfiles)) - files = list(map(lambda x: re.sub(r"Dockerfile.(.*)", r"\1", x), dockerfiles)) + dockerfiles = set(filter(lambda x: x[-1] != '~', dockerfiles)) + files = set(map(lambda x: re.sub(r"Dockerfile.(.*)", r"\1", x), dockerfiles)) + if legacy_only: + files = files - DOCKER_COMPOSE_FILES platforms = list(map(lambda x: os.path.split(x)[1], sorted(files))) return platforms @@ -79,6 +89,11 @@ def _hash_file(ctx, filename): def get_docker_tag(platform: str, registry: str) -> str: """:return: docker tag to be used for the container""" + if platform in DOCKER_COMPOSE_WHITELIST: + with open("docker/docker-compose.yml", "r") as f: + compose_config = yaml.load(f.read(), yaml.SafeLoader) + return compose_config["services"][platform]["image"].replace('${DOCKER_CACHE_REGISTRY}', registry) + platform = platform if any(x in platform for x in ['build.', 'publish.']) else 'build.{}'.format(platform) if not registry: registry = "mxnet_local" @@ -106,41 +121,58 @@ def build_docker(platform: str, registry: str, num_retries: int, no_cache: bool, :return: Id of the top level image """ tag = get_docker_tag(platform=platform, registry=registry) - logging.info("Building docker container tagged '%s'", tag) - # - # We add a user with the same group as the executing non-root user so files created in the - # container match permissions of the local user. Same for the group. - # - # These variables are used in the docker files to create user and group with these ids. - # see: docker/install/ubuntu_adduser.sh - # - # cache-from is needed so we use the cached images tagged from the remote via - # docker pull see: docker_cache.load_docker_cache - # - # This also prevents using local layers for caching: https://github.com/moby/moby/issues/33002 - # So to use local caching, we should omit the cache-from by using --no-dockerhub-cache argument to this - # script. - # - # This doesn't work with multi head docker files. - # - cmd = ["docker", "build", - "-f", get_dockerfile(platform), - "--build-arg", "USER_ID={}".format(os.getuid()), - "--build-arg", "GROUP_ID={}".format(os.getgid())] - if no_cache: - cmd.append("--no-cache") - if cache_intermediate: - cmd.append("--rm=false") - elif registry: - cmd.extend(["--cache-from", tag]) - cmd.extend(["-t", tag, get_dockerfiles_path()]) + + # docker-compose + if platform in DOCKER_COMPOSE_WHITELIST: + logging.info('Building docker container tagged \'%s\' based on ci/docker/docker-compose.yml', tag) + # We add a user with the same group as the executing non-root user so files created in the + # container match permissions of the local user. Same for the group. + cmd = ['docker-compose', '-f', 'docker/docker-compose.yml', 'build', + "--build-arg", "USER_ID={}".format(os.getuid()), + "--build-arg", "GROUP_ID={}".format(os.getgid())] + if cache_intermediate: + cmd.append('--no-rm') + cmd.append(platform) + else: + logging.info("Building docker container tagged '%s'", tag) + # + # We add a user with the same group as the executing non-root user so files created in the + # container match permissions of the local user. Same for the group. + # + # These variables are used in the docker files to create user and group with these ids. + # see: docker/install/ubuntu_adduser.sh + # + # cache-from is needed so we use the cached images tagged from the remote via + # docker pull see: docker_cache.load_docker_cache + # + # This also prevents using local layers for caching: https://github.com/moby/moby/issues/33002 + # So to use local caching, we should omit the cache-from by using --no-dockerhub-cache argument to this + # script. + # + # This doesn't work with multi head docker files. + # + cmd = ["docker", "build", + "-f", get_dockerfile(platform), + "--build-arg", "USER_ID={}".format(os.getuid()), + "--build-arg", "GROUP_ID={}".format(os.getgid())] + if no_cache: + cmd.append("--no-cache") + if cache_intermediate: + cmd.append("--rm=false") + elif registry: + cmd.extend(["--cache-from", tag]) + cmd.extend(["-t", tag, get_dockerfiles_path()]) + + env = os.environ.copy() + env["DOCKER_CACHE_REGISTRY"] = registry @retry(subprocess.CalledProcessError, tries=num_retries) - def run_cmd(): + def run_cmd(env=None): logging.info("Running command: '%s'", ' '.join(cmd)) - check_call(cmd) + check_call(cmd, env=env) + + run_cmd(env=env) - run_cmd() # Get image id by reading the tag. It's guaranteed (except race condition) that the tag exists. Otherwise, the # check_call would have failed image_id = _get_local_image_id(docker_tag=tag) @@ -258,9 +290,19 @@ def list_platforms() -> str: return "\nSupported platforms:\n{}".format('\n'.join(get_platforms())) -def load_docker_cache(tag, docker_registry) -> None: +def load_docker_cache(platform, tag, docker_registry) -> None: """Imports tagged container from the given docker registry""" if docker_registry: + if platform in DOCKER_COMPOSE_WHITELIST: + env = os.environ.copy() + env["DOCKER_CACHE_REGISTRY"] = docker_registry + cmd = ['docker-compose', '-f', 'docker/docker-compose.yml', 'pull', platform] + logging.info("Running command: 'DOCKER_CACHE_REGISTRY=%s %s'", docker_registry, ' '.join(cmd)) + check_call(cmd, env=env) + return + + env = os.environ.copy() + env["DOCKER_CACHE_REGISTRY"] = docker_registry # noinspection PyBroadException try: import docker_cache @@ -363,8 +405,8 @@ def main() -> int: elif args.platform: platform = args.platform tag = get_docker_tag(platform=platform, registry=args.docker_registry) - if args.docker_registry: - load_docker_cache(tag=tag, docker_registry=args.docker_registry) + if args.docker_registry: + load_docker_cache(platform=platform, tag=tag, docker_registry=args.docker_registry) if not args.run_only: build_docker(platform=platform, registry=args.docker_registry, num_retries=args.docker_build_retries, no_cache=args.no_cache, @@ -409,7 +451,7 @@ def main() -> int: logging.info("Artifacts will be produced in the build/ directory.") for platform in platforms: tag = get_docker_tag(platform=platform, registry=args.docker_registry) - load_docker_cache(tag=tag, docker_registry=args.docker_registry) + load_docker_cache(platform=platform, tag=tag, docker_registry=args.docker_registry) build_docker(platform, registry=args.docker_registry, num_retries=args.docker_build_retries, no_cache=args.no_cache, cache_intermediate=args.cache_intermediate) diff --git a/ci/docker/Dockerfile.build.centos7_cpu b/ci/docker/Dockerfile.build.centos7 similarity index 60% rename from ci/docker/Dockerfile.build.centos7_cpu rename to ci/docker/Dockerfile.build.centos7 index 0cfa5a9f6e47..0114e9ba5bdb 100644 --- a/ci/docker/Dockerfile.build.centos7_cpu +++ b/ci/docker/Dockerfile.build.centos7 @@ -16,9 +16,22 @@ # specific language governing permissions and limitations # under the License. # -# Dockerfile to build and run MXNet on CentOS 7 for CPU +# +# Dockerfile for CentOS 7 based builds. +# Via the CentOS 7 Dockerfiles, we ensure MXNet continues to run fine on older systems. +# +# See docker-compose.yml for supported BASE_IMAGE ARGs and targets. -FROM centos:7 +#################################################################################################### +# The Dockerfile uses a dynamic BASE_IMAGE (for example centos:7, +# nvidia/cuda:10.2-cudnn7-devel-centos7 etc). +# On top of BASE_IMAGE we install all dependencies shared by all MXNet build +# environments into a "base" target. At the end of this file, we specialize +# "base" for specific usecases. The target built by docker can be selected via +# "--target" option or docker-compose.yml +#################################################################################################### +ARG BASE_IMAGE +FROM $BASE_IMAGE AS base WORKDIR /work/deps @@ -39,3 +52,4 @@ ENV PYTHONPATH=./python/ WORKDIR /work/mxnet COPY runtime_functions.sh /work/ + diff --git a/ci/docker/Dockerfile.build.centos7_gpu b/ci/docker/Dockerfile.build.centos7_gpu deleted file mode 100644 index 7e49e88b3a52..000000000000 --- a/ci/docker/Dockerfile.build.centos7_gpu +++ /dev/null @@ -1,43 +0,0 @@ -# -*- mode: dockerfile -*- -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -# Dockerfile to build and run MXNet on CentOS 7 for GPU - -FROM nvidia/cuda:10.1-devel-centos7 - -WORKDIR /work/deps - -COPY install/centos7_core.sh /work/ -RUN /work/centos7_core.sh -COPY install/centos7_ccache.sh /work/ -RUN /work/centos7_ccache.sh -COPY install/centos7_python.sh /work/ -RUN /work/centos7_python.sh - -ENV CUDNN_VERSION=7.6.0.64 -COPY install/centos7_cudnn.sh /work/ -RUN /work/centos7_cudnn.sh - -ARG USER_ID=0 -COPY install/centos7_adduser.sh /work/ -RUN /work/centos7_adduser.sh - -ENV PYTHONPATH=./python/ -WORKDIR /work/mxnet - -COPY runtime_functions.sh /work/ diff --git a/ci/docker/docker-compose.yml b/ci/docker/docker-compose.yml new file mode 100644 index 000000000000..0784987187a5 --- /dev/null +++ b/ci/docker/docker-compose.yml @@ -0,0 +1,94 @@ +# -*- mode: dockerfile -*- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# We use the cache_from feature introduced in file form version 3.4 (released 2017-11-01) +version: "3.4" + +# For simplicity, only the centos7_cpu is commented. But the comments apply to +# all other services as well. +services: + ################################################################################################### + # Dockerfile.build.centos7 based images used for building on CentOS7. On + # CentOS7, we respectively test the oldest supported toolchain and dependency + # versions + ################################################################################################### + centos7_cpu: + # The resulting image will be named build.centos7_cpu:latest and will be + # pushed to the dockerhub user specified in the environment variable + # ${DOCKER_CACHE_REGISTRY} (typicall "mxnetci") under this name + image: ${DOCKER_CACHE_REGISTRY}/build.centos7_cpu:latest + build: + context: . + dockerfile: Dockerfile.build.centos7 + # Use "base" target declared in Dockerfile.build.centos7 as "build.centos7_cpu:latest" + target: base + args: + # BASE_IMAGE is used to dynamically specify the FROM image in Dockerfile.build.centos7 + BASE_IMAGE: centos:7 + cache_from: + - ${DOCKER_CACHE_REGISTRY}/build.centos7_cpu:latest + centos7_gpu_cu100: + image: ${DOCKER_CACHE_REGISTRY}/build.centos7_gpu_cu100:latest + build: + context: . + dockerfile: Dockerfile.build.centos7 + target: base + args: + BASE_IMAGE: nvidia/cuda:10.0-cudnn7-devel-centos7 + cache_from: + - ${DOCKER_CACHE_REGISTRY}/build.centos7_gpu_cu100:latest + centos7_gpu_cu101: + image: ${DOCKER_CACHE_REGISTRY}/build.centos7_gpu_cu101:latest + build: + context: . + dockerfile: Dockerfile.build.centos7 + target: base + args: + BASE_IMAGE: nvidia/cuda:10.1-cudnn7-devel-centos7 + cache_from: + - ${DOCKER_CACHE_REGISTRY}/build.centos7_gpu_cu101:latest + centos7_gpu_cu102: + image: ${DOCKER_CACHE_REGISTRY}/build.centos7_gpu_cu102:latest + build: + context: . + dockerfile: Dockerfile.build.centos7 + target: base + args: + BASE_IMAGE: nvidia/cuda:10.2-cudnn8-devel-centos7 + cache_from: + - ${DOCKER_CACHE_REGISTRY}/build.centos7_gpu_cu102:latest + centos7_gpu_cu110: + image: ${DOCKER_CACHE_REGISTRY}/build.centos7_gpu_cu110:latest + build: + context: . + dockerfile: Dockerfile.build.centos7 + target: base + args: + BASE_IMAGE: nvidia/cuda:11.0-cudnn8-devel-centos7 + cache_from: + - ${DOCKER_CACHE_REGISTRY}/build.centos7_gpu_cu110:latest + centos7_gpu_cu112: + image: ${DOCKER_CACHE_REGISTRY}/build.centos7_gpu_cu112:latest + build: + context: . + dockerfile: Dockerfile.build.centos7 + target: base + args: + BASE_IMAGE: nvidia/cuda:11.2.1-cudnn8-devel-centos7 + cache_from: + - ${DOCKER_CACHE_REGISTRY}/build.centos7_gpu_cu112:latest \ No newline at end of file diff --git a/ci/docker/install/centos7_core.sh b/ci/docker/install/centos7_core.sh index ae5cb719d38a..9b2452e78bcc 100755 --- a/ci/docker/install/centos7_core.sh +++ b/ci/docker/install/centos7_core.sh @@ -39,6 +39,11 @@ yum -y install make yum -y install wget yum -y install unzip yum -y install ninja-build +yum -y install automake +yum -y install patchelf +yum -y install nasm +yum -y install libtool +yum -y install dpkg-dev # CMake 3.13.2+ is required mkdir /opt/cmake && cd /opt/cmake diff --git a/ci/docker/install/requirements b/ci/docker/install/requirements index 1abdc0051a82..210914ac129f 100644 --- a/ci/docker/install/requirements +++ b/ci/docker/install/requirements @@ -32,4 +32,4 @@ astroid==2.3.3 # pylint and astroid need to be aligned requests<2.19.0,>=2.18.4 scipy==1.2.1 setuptools -coverage +coverage \ No newline at end of file diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh index 8e1d31d6a0c5..c0047ce27a6d 100755 --- a/ci/docker/runtime_functions.sh +++ b/ci/docker/runtime_functions.sh @@ -1244,8 +1244,8 @@ unittest_ubuntu_cpu_julia10() { unittest_centos7_cpu() { set -ex cd /work/mxnet - python3.6 -m "nose" $NOSE_COVERAGE_ARGUMENTS $NOSE_TIMER_ARGUMENTS --with-xunit --xunit-file nosetests_unittest.xml --verbose tests/python/unittest - python3.6 -m "nose" $NOSE_COVERAGE_ARGUMENTS $NOSE_TIMER_ARGUMENTS --with-xunit --xunit-file nosetests_train.xml --verbose tests/python/train + python3 -m "nose" $NOSE_COVERAGE_ARGUMENTS $NOSE_TIMER_ARGUMENTS --with-xunit --xunit-file nosetests_unittest.xml --verbose tests/python/unittest + python3 -m "nose" $NOSE_COVERAGE_ARGUMENTS $NOSE_TIMER_ARGUMENTS --with-xunit --xunit-file nosetests_train.xml --verbose tests/python/train } unittest_centos7_gpu() { @@ -1253,7 +1253,7 @@ unittest_centos7_gpu() { cd /work/mxnet export CUDNN_VERSION=${CUDNN_VERSION:-7.0.3} export DMLC_LOG_STACK_TRACE_DEPTH=10 - python3.6 -m "nose" $NOSE_COVERAGE_ARGUMENTS $NOSE_TIMER_ARGUMENTS --with-xunit --xunit-file nosetests_gpu.xml --verbose tests/python/gpu + python3 -m "nose" $NOSE_COVERAGE_ARGUMENTS $NOSE_TIMER_ARGUMENTS --with-xunit --xunit-file nosetests_gpu.xml --verbose tests/python/gpu } integrationtest_ubuntu_cpu_onnx() { diff --git a/ci/docker_cache.py b/ci/docker_cache.py index da01314f5f8d..0ac96e76ece0 100755 --- a/ci/docker_cache.py +++ b/ci/docker_cache.py @@ -75,6 +75,13 @@ def _build_save_container(platform, registry, load_cache) -> Optional[str]: :param load_cache: Load cache before building :return: Platform if failed, None otherwise """ + # docker-compose + if platform in build_util.DOCKER_COMPOSE_WHITELIST: + build_util.build_docker(platform=platform, registry=registry, num_retries=10, no_cache=False) + push_cmd = ['docker-compose', 'push', platform] + subprocess.check_call(push_cmd) + return None + docker_tag = build_util.get_docker_tag(platform=platform, registry=registry) # Preload cache @@ -205,7 +212,7 @@ def script_name() -> str: args = parser.parse_args() - platforms = build_util.get_platforms() + platforms = build_util.get_platforms(legacy_only=True) if "dkr.ecr" in args.docker_registry: _ecr_login(args.docker_registry) diff --git a/ci/jenkins/Jenkins_steps.groovy b/ci/jenkins/Jenkins_steps.groovy index da6a74a50fd3..9f96c7ad5e6d 100644 --- a/ci/jenkins/Jenkins_steps.groovy +++ b/ci/jenkins/Jenkins_steps.groovy @@ -356,7 +356,7 @@ def compile_centos7_gpu() { ws('workspace/build-centos7-gpu') { timeout(time: max_time, unit: 'MINUTES') { utils.init_git() - utils.docker_run('centos7_gpu', 'build_centos7_gpu', false) + utils.docker_run('centos7_gpu_cu101', 'build_centos7_gpu', false) utils.pack_lib('centos7_gpu', mx_lib) } } @@ -642,7 +642,7 @@ def test_static_scala_cpu() { ws('workspace/ut-publish-scala-cpu') { timeout(time: max_time, unit: 'MINUTES') { utils.init_git() - utils.docker_run("publish.ubuntu1404_cpu", 'build_static_scala_cpu', false) + utils.docker_run('publish.ubuntu1404_cpu', 'build_static_scala_cpu', false) } } } @@ -655,7 +655,7 @@ def test_static_python_cpu() { ws('workspace/ut-publish-python-cpu') { timeout(time: max_time, unit: 'MINUTES') { utils.init_git() - utils.docker_run("publish.ubuntu1404_cpu", 'build_static_python_cpu', false) + utils.docker_run('publish.ubuntu1404_cpu', 'build_static_python_cpu', false) } } } @@ -668,7 +668,7 @@ def test_static_python_cpu_cmake() { ws('workspace/ut-publish-python-cpu') { timeout(time: max_time, unit: 'MINUTES') { utils.init_git() - utils.docker_run("publish.ubuntu1404_cpu", 'build_static_python_cpu_cmake', false) + utils.docker_run('publish.ubuntu1404_cpu', 'build_static_python_cpu_cmake', false) } } } @@ -681,7 +681,7 @@ def test_static_python_gpu() { ws('workspace/ut-publish-python-gpu') { timeout(time: max_time, unit: 'MINUTES') { utils.init_git() - utils.docker_run("publish.ubuntu1404_gpu", 'build_static_python_cu101', true) + utils.docker_run('publish.ubuntu1404_gpu', 'build_static_python_cu101', true) } } } @@ -694,7 +694,7 @@ def test_static_python_gpu_cmake() { ws('workspace/ut-publish-python-gpu') { timeout(time: max_time, unit: 'MINUTES') { utils.init_git() - utils.docker_run("publish.ubuntu1404_gpu", 'build_static_python_cu101_cmake', true) + utils.docker_run('publish.ubuntu1404_gpu', 'build_static_python_cu101_cmake', true) } } } @@ -1251,7 +1251,7 @@ def test_centos7_python3_gpu() { timeout(time: max_time, unit: 'MINUTES') { try { utils.unpack_and_init('centos7_gpu', mx_lib) - utils.docker_run('centos7_gpu', 'unittest_centos7_gpu', true) + utils.docker_run('centos7_gpu_cu101', 'unittest_centos7_gpu', true) utils.publish_test_coverage() } finally { utils.collect_test_results_unix('nosetests_gpu.xml', 'nosetests_python3_centos7_gpu.xml') diff --git a/tests/README.md b/tests/README.md index de5d8107a790..997ea36c3e28 100644 --- a/tests/README.md +++ b/tests/README.md @@ -62,13 +62,20 @@ Ninja is a build tool (like make) that prioritizes building speed. If you will b ## Runing Python Tests Within Docker -1. To run tests inside docker run the following comamdn - ``` - ci/build.py --platform {PLATFORM} /work/runtime_functions.sh {RUNTIME_FUNCTION} - ``` +To run tests inside docker, you first need to install `docker` and `docker-compose` on your machine. + +On Ubuntu you may install them via `sudo apt-get install docker.io docker-compose` +and set them up via `sudo usermod $(whoami) -G docker -a`. + +Then, to run tests inside docker run the following command + +``` +ci/build.py --platform {PLATFORM} /work/runtime_functions.sh {RUNTIME_FUNCTION} +``` + An example for running python tests would be ``` -ci/build.py --platform build_ubuntu_cpu_mkldnn /work/runtime_functions.sh unittest_ubuntu_python3_cpu PYTHONPATH=./python/ nosetests-2.7 tests/python/unittest +ci/build.py --platform build_ubuntu_cpu_mkldnn /work/runtime_functions.sh unittest_ubuntu_python3_cpu ```