Skip to content

Commit

Permalink
Merged in SIM-2897 (pull request apache#2)
Browse files Browse the repository at this point in the history
SIM-2897: Add packaging for TVM

Approved-by: Jeffrey Uong
  • Loading branch information
Spenser Gilliland committed May 5, 2020
2 parents da3e676 + cbf9ba9 commit 8d2b334
Show file tree
Hide file tree
Showing 6 changed files with 294 additions and 284 deletions.
332 changes: 49 additions & 283 deletions Jenkinsfile
Original file line number Diff line number Diff line change
@@ -1,290 +1,56 @@
#!groovy
// -*- mode: groovy -*-

// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

// Jenkins pipeline
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/

// Docker env used for testing
// Different image may have different version tag
// because some of them are more stable than anoter.
//
// Docker images are maintained by PMC, cached in dockerhub
// and remains relatively stable over the time.
// Flow for upgrading docker env(need commiter)
//
// - Send PR to upgrade build script in the repo
// - Build the new docker image
// - Tag the docker image with a new version and push to tvmai
// - Update the version in the Jenkinsfile, send a PR
// - Fix any issues wrt to the new image version in the PR
// - Merge the PR and now we are in new version
// - Tag the new version as the lates
// - Periodically cleanup the old versions on local workers
//

// Hashtag in the source to build current CI docker builds
//
//

ci_lint = "tvmai/ci-lint:v0.60"
ci_gpu = "tvmai/ci-gpu:v0.61"
ci_cpu = "tvmai/ci-cpu:v0.61"
ci_i386 = "tvmai/ci-i386:v0.52"

// tvm libraries
tvm_runtime = "build/libtvm_runtime.so, build/config.cmake"
tvm_lib = "build/libtvm.so, " + tvm_runtime
// LLVM upstream lib
tvm_multilib = "build/libtvm.so, " +
"build/libvta_tsim.so, " +
"build/libvta_fsim.so, " +
"build/libtvm_topi.so, " +
tvm_runtime

// command to start a docker container
docker_run = 'docker/bash.sh'
// timeout in minutes
max_time = 120

def per_exec_ws(folder) {
return "workspace/exec_${env.EXECUTOR_NUMBER}/" + folder
}

// initialize source codes
def init_git() {
// Add more info about job node
sh """
echo "INFO: NODE_NAME=${NODE_NAME} EXECUTOR_NUMBER=${EXECUTOR_NUMBER}"
"""
checkout scm
retry(5) {
timeout(time: 2, unit: 'MINUTES') {
sh 'git submodule update --init'
}
}
}

def init_git_win() {
checkout scm
retry(5) {
timeout(time: 2, unit: 'MINUTES') {
bat 'git submodule update --init'
library('sima-jenkins-lib')

def main() {
def job_name = env.JOB_NAME.split('/')[1]
def currentBranchName = env.CHANGE_ID ? env.CHANGE_BRANCH : env.BRANCH_NAME

properties([
parameters([
string(name: "COPY_MLA_BRANCH_PKG", defaultValue: currentBranchName, description: 'Copy specified mla pkg'),
string(name: "COPY_N2A_COMPILER_BRANCH_PKG", defaultValue: currentBranchName, description: 'Copy specified n2a_compiler pkg')
]),
])

node("docker") {
stage("Checkout") {
utils.checkoutBitbucket()
}

def image
stage("DockerBuild") {
image = utils.dockerBuild("docker/Dockerfile", 'simaai/' + job_name, "docker_creds", "docker_build.log", { ->
utils.getPackage('sima-ai','mla', params.COPY_MLA_BRANCH_PKG, '*.deb')
utils.getPackage('sima-ai','n2a_compiler', params.COPY_N2A_COMPILER_BRANCH_PKG, '*.whl')
sh "ls -alh"
})
}

parallel push: {
stage("DockerPush") {
image['post']()
}
}, build: {
image["image"].inside("-m 32g -c 8") {
utils.cmakeBuild("build", "-DCMAKE_CXX_COMPILER_LAUNCHER=ccache", {}, { src_dir ->
stage("Python Bindings") {
sh """#!/bin/bash -ex
cd ..
make cython
cd python
python3 setup.py bdist_wheel
"""
}
}, "../sima-regres.cmake", "clean all")
stage("Package") {
archiveArtifacts('python/dist/*.whl')
utils.uploadPythonPackages('jenkins_user', 'sima-pypi', 'python/dist/*.whl', 3)
}
}
}

stage("Sanity Check") {
timeout(time: max_time, unit: 'MINUTES') {
node('CPU') {
ws(per_exec_ws("tvm/sanity")) {
init_git()
sh "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh"
}
}
}
}

// Run make. First try to do an incremental make from a previous workspace in hope to
// accelerate the compilation. If something wrong, clean the workspace and then
// build from scratch.
def make(docker_type, path, make_flag) {
timeout(time: max_time, unit: 'MINUTES') {
try {
sh "${docker_run} ${docker_type} ./tests/scripts/task_build.sh ${path} ${make_flag}"
// always run cpp test when build
sh "${docker_run} ${docker_type} ./tests/scripts/task_cpp_unittest.sh"
} catch (exc) {
echo 'Incremental compilation failed. Fall back to build from scratch'
sh "${docker_run} ${docker_type} ./tests/scripts/task_clean.sh ${path}"
sh "${docker_run} ${docker_type} ./tests/scripts/task_build.sh ${path} ${make_flag}"
sh "${docker_run} ${docker_type} ./tests/scripts/task_cpp_unittest.sh"
}
}
}

// pack libraries for later use
def pack_lib(name, libs) {
sh """
echo "Packing ${libs} into ${name}"
echo ${libs} | sed -e 's/,/ /g' | xargs md5sum
"""
stash includes: libs, name: name
}


// unpack libraries saved before
def unpack_lib(name, libs) {
unstash name
sh """
echo "Unpacked ${libs} from ${name}"
echo ${libs} | sed -e 's/,/ /g' | xargs md5sum
"""
}

stage('Build') {
parallel 'BUILD: GPU': {
node('GPUBUILD') {
ws(per_exec_ws("tvm/build-gpu")) {
init_git()
sh "${docker_run} ${ci_gpu} ./tests/scripts/task_config_build_gpu.sh"
make(ci_gpu, 'build', '-j2')
pack_lib('gpu', tvm_multilib)
// compiler test
sh "${docker_run} ${ci_gpu} ./tests/scripts/task_config_build_gpu_vulkan.sh"
make(ci_gpu, 'build2', '-j2')
}
}
},
'BUILD: CPU': {
node('CPU') {
ws(per_exec_ws("tvm/build-cpu")) {
init_git()
sh "${docker_run} ${ci_cpu} ./tests/scripts/task_config_build_cpu.sh"
make(ci_cpu, 'build', '-j2')
pack_lib('cpu', tvm_lib)
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_unittest.sh"
sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh"
sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_fsim.sh"
sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_tsim.sh"
sh "${docker_run} ${ci_cpu} ./tests/scripts/task_golang.sh"
sh "${docker_run} ${ci_cpu} ./tests/scripts/task_rust.sh"
}
}
}
},
'BUILD : i386': {
node('CPU') {
ws(per_exec_ws("tvm/build-i386")) {
init_git()
sh "${docker_run} ${ci_i386} ./tests/scripts/task_config_build_i386.sh"
make(ci_i386, 'build', '-j2')
pack_lib('i386', tvm_multilib)
}
}
}
}

stage('Unit Test') {
parallel 'python3: GPU': {
node('TensorCore') {
ws(per_exec_ws("tvm/ut-python-gpu")) {
init_git()
unpack_lib('gpu', tvm_multilib)
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ${ci_gpu} ./tests/scripts/task_sphinx_precheck.sh"
sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest.sh"
sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration.sh"
}
}
}
},
'python3: i386': {
node('CPU') {
ws(per_exec_ws("tvm/ut-python-i386")) {
init_git()
unpack_lib('i386', tvm_multilib)
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_unittest.sh"
sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration.sh"
sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_vta_fsim.sh"
}
}
}
},
'java: GPU': {
node('GPU') {
ws(per_exec_ws("tvm/ut-java")) {
init_git()
unpack_lib('gpu', tvm_multilib)
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ${ci_gpu} ./tests/scripts/task_java_unittest.sh"
}
}
}
}
}

stage('Integration Test') {
parallel 'topi: GPU': {
node('GPU') {
ws(per_exec_ws("tvm/topi-python-gpu")) {
init_git()
unpack_lib('gpu', tvm_multilib)
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_topi.sh"
}
}
}
},
'frontend: GPU': {
node('GPU') {
ws(per_exec_ws("tvm/frontend-python-gpu")) {
init_git()
unpack_lib('gpu', tvm_multilib)
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh"
}
}
}
},
'docs: GPU': {
node('GPU') {
ws(per_exec_ws("tvm/docs-python-gpu")) {
init_git()
unpack_lib('gpu', tvm_multilib)
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_docs.sh"
}
pack_lib('mydocs', 'docs.tgz')
}
}
}
}

/*
stage('Build packages') {
parallel 'conda CPU': {
node('CPU') {
sh "${docker_run} tvmai/conda-cpu ./conda/build_cpu.sh
}
},
'conda cuda': {
node('CPU') {
sh "${docker_run} tvmai/conda-cuda90 ./conda/build_cuda.sh
sh "${docker_run} tvmai/conda-cuda100 ./conda/build_cuda.sh
}
}
// Here we could upload the packages to anaconda for releases
// and/or the master branch
}
*/

stage('Deploy') {
node('doc') {
ws(per_exec_ws("tvm/deploy-docs")) {
if (env.BRANCH_NAME == "master") {
unpack_lib('mydocs', 'docs.tgz')
sh "cp docs.tgz /var/docs/docs.tgz"
sh "tar xf docs.tgz -C /var/docs"
}
}
}
}
utils.job_wrapper( {
main()
})
2 changes: 2 additions & 0 deletions docker/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
*.deb
*.whl
42 changes: 42 additions & 0 deletions docker/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
FROM simaai/ubuntu:latest

ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get -y install \
build-essential ccache \
wget curl awscli cmake unzip && \
rm -rf /var/lib/apt/lists/*
RUN \
sed -i 's/# \(.*multiverse$\)/\1/g' /etc/apt/sources.list && \
apt-get -y update && \
apt-get install -y python3-pip python3-dev && \
rm -rf /var/lib/apt/lists/* && \
python3 -m pip install --upgrade pip

ADD install_sonar.sh /tmp/
RUN /tmp/install_sonar.sh
ENV PATH=${PATH}:/opt/sonar-scanner/bin

RUN \
curl https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem.txt \
-o /usr/local/share/ca-certificates/letsencryptx3.crt && \
update-ca-certificates

ARG MLA_BRANCH="master"
ARG MLA_VERSION="0.0.1"
ADD mla-${MLA_VERSION}-Linux.deb /tmp
RUN apt-get update && apt-get install -y /tmp/mla-${MLA_VERSION}-Linux.deb && rm -rf /var/lib/apt/lists/*

ARG N2A_COMPILER_BRANCH="master"
ARG N2A_COMPILER_VERSION="0.0.1"
ADD sima_mlc-${N2A_COMPILER_VERSION}_${N2A_COMPILER_BRANCH}-py3-none-any.whl /tmp
RUN pip3 install /tmp/sima_mlc-${N2A_COMPILER_VERSION}_${N2A_COMPILER_BRANCH}-py3-none-any.whl

RUN apt-get update && \
apt-get install -y \
libopenblas-dev \
cython && \
rm -rf /var/lib/apt/lists/* && \
pip3 install twine && \
pip3 install --upgrade keyrings.alt


13 changes: 13 additions & 0 deletions docker/install_sonar.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
#!/bin/bash -ex

wget -nv https://sonarcloud.io/static/cpp/build-wrapper-linux-x86.zip
unzip build-wrapper-linux-x86.zip
mv build-wrapper-linux-x86/* /usr/local/bin/
rm -rf build-wrapper-linux-x86*

wget -nv https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-3.3.0.1492-linux.zip
unzip sonar-scanner-cli-3.3.0.1492-linux.zip
mv sonar-scanner-3.3.0.1492-linux /opt/
ln -sf /opt/sonar-scanner-3.3.0.1492-linux /opt/sonar-scanner
rm -rf sonar-scanner-cli-3.3.0.1492-linux.zip

Loading

0 comments on commit 8d2b334

Please sign in to comment.