Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Upgrade to CUDA 10.0 #5649

Merged
merged 4 commits into from
May 11, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 5 additions & 6 deletions Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ pipeline {
'build-cpu': { BuildCPU() },
'build-cpu-rabit-mock': { BuildCPUMock() },
'build-cpu-non-omp': { BuildCPUNonOmp() },
'build-gpu-cuda9.0': { BuildCUDA(cuda_version: '9.0') },
'build-gpu-cuda10.0': { BuildCUDA(cuda_version: '10.0') },
'build-gpu-cuda10.1': { BuildCUDA(cuda_version: '10.1') },
'build-jvm-packages': { BuildJVMPackages(spark_version: '2.4.3') },
Expand Down Expand Up @@ -251,10 +250,10 @@ def BuildCUDA(args) {
${dockerRun} ${container_type} ${docker_binary} ${docker_args} bash -c "cd python-package && rm -rf dist/* && python setup.py bdist_wheel --universal"
${dockerRun} ${container_type} ${docker_binary} ${docker_args} python3 tests/ci_build/rename_whl.py python-package/dist/*.whl ${commit_id} manylinux2010_x86_64
"""
// Stash wheel for CUDA 9.0 target
if (args.cuda_version == '9.0') {
// Stash wheel for CUDA 10.0 target
if (args.cuda_version == '10.0') {
echo 'Stashing Python wheel...'
stash name: 'xgboost_whl_cuda9', includes: 'python-package/dist/*.whl'
stash name: 'xgboost_whl_cuda10', includes: 'python-package/dist/*.whl'
path = ("${BRANCH_NAME}" == 'master') ? '' : "${BRANCH_NAME}/"
s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', workingDir: 'python-package/dist', includePathPattern:'**/*.whl'
echo 'Stashing C++ test executable (testxgboost)...'
Expand Down Expand Up @@ -298,7 +297,7 @@ def BuildJVMDoc() {

def TestPythonCPU() {
node('linux && cpu') {
unstash name: 'xgboost_whl_cuda9'
unstash name: 'xgboost_whl_cuda10'
unstash name: 'srcs'
unstash name: 'xgboost_cli'
echo "Test Python CPU"
Expand All @@ -315,7 +314,7 @@ def TestPythonCPU() {
def TestPythonGPU(args) {
nodeReq = (args.multi_gpu) ? 'linux && mgpu' : 'linux && gpu'
node(nodeReq) {
unstash name: 'xgboost_whl_cuda9'
unstash name: 'xgboost_whl_cuda10'
unstash name: 'srcs'
echo "Test Python GPU: CUDA ${args.cuda_version}"
def container_type = "gpu"
Expand Down
37 changes: 22 additions & 15 deletions tests/python-gpu/test_gpu_basic_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,25 +12,15 @@
class TestGPUBasicModels(unittest.TestCase):
cputest = test_bm.TestModels()

def test_eta_decay_gpu_hist(self):
self.cputest.run_eta_decay('gpu_hist')

def test_deterministic_gpu_hist(self):
kRows = 1000
kCols = 64
kClasses = 4
# Create large values to force rounding.
X = np.random.randn(kRows, kCols) * 1e4
y = np.random.randint(0, kClasses, size=kRows)

def run_cls(self, X, y, deterministic):
cls = xgb.XGBClassifier(tree_method='gpu_hist',
deterministic_histogram=True,
deterministic_histogram=deterministic,
single_precision_histogram=True)
cls.fit(X, y)
cls.get_booster().save_model('test_deterministic_gpu_hist-0.json')

cls = xgb.XGBClassifier(tree_method='gpu_hist',
deterministic_histogram=True,
deterministic_histogram=deterministic,
single_precision_histogram=True)
cls.fit(X, y)
cls.get_booster().save_model('test_deterministic_gpu_hist-1.json')
Expand All @@ -40,7 +30,24 @@ def test_deterministic_gpu_hist(self):
with open('test_deterministic_gpu_hist-1.json', 'r') as fd:
model_1 = fd.read()

assert hash(model_0) == hash(model_1)

os.remove('test_deterministic_gpu_hist-0.json')
os.remove('test_deterministic_gpu_hist-1.json')

return hash(model_0), hash(model_1)

def test_eta_decay_gpu_hist(self):
self.cputest.run_eta_decay('gpu_hist')

def test_deterministic_gpu_hist(self):
kRows = 1000
kCols = 64
kClasses = 4
# Create large values to force rounding.
X = np.random.randn(kRows, kCols) * 1e4
y = np.random.randint(0, kClasses, size=kRows) * 1e4

model_0, model_1 = self.run_cls(X, y, True)
assert model_0 == model_1

model_0, model_1 = self.run_cls(X, y, False)
assert model_0 != model_1