Skip to content

[SYCL][CUDA] Fix and cleanup more CUDA LIT fails #1303

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Apr 9, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion sycl/plugins/cuda/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ message(STATUS "Including the PI API CUDA backend.")
# we only require the CUDA driver API to be used
# CUDA_CUDA_LIBRARY variable defines the path to libcuda.so, the CUDA Driver API library.

find_package(CUDA 10.0 REQUIRED)
find_package(CUDA 10.1 REQUIRED)

add_library(cudadrv SHARED IMPORTED)

Expand Down
4 changes: 3 additions & 1 deletion sycl/test/aot/gpu.cpp
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
// REQUIRES: ocloc, gpu
// UNSUPPORTED: cuda
// CUDA is not compatible with SPIR.

// RUN: %clangxx -fsycl -fsycl-targets=spir64_gen-unknown-unknown-sycldevice -Xsycl-target-backend=spir64_gen-unknown-unknown-sycldevice "-device skl" %s -o %t.out
// RUN: env SYCL_DEVICE_TYPE=HOST %t.out
// RUN: %GPU_RUN_PLACEHOLDER %t.out
// XFAIL: cuda

//==----- gpu.cpp - AOT compilation for gen devices using GEN compiler ------==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
Expand Down
2 changes: 2 additions & 0 deletions sycl/test/aot/multiple-devices.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@
//===------------------------------------------------------------------------===//

// REQUIRES: opencl-aot, ocloc, aoc, cpu, gpu, accelerator
// UNSUPPORTED: cuda
// CUDA is not compatible with SPIR.

// 1-command compilation case
// Targeting CPU, GPU, FPGA
Expand Down
9 changes: 5 additions & 4 deletions sycl/test/basic_tests/buffer/subbuffer.cpp
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
// RUN: %clangxx -fsycl -fsycl-targets=%sycl_triple %s -o %t.out
// XFAIL: cuda
// TODO: Fix CUDA implementation.
//
// RUN: %clangxx -fsycl -fsycl-targets=%sycl_triple %s -o %t.out
// RUN: env SYCL_DEVICE_TYPE=HOST %t.out
// RUN: %CPU_RUN_PLACEHOLDER %t.out
// RUN: %GPU_RUN_PLACEHOLDER %t.out
// RUN: %ACC_RUN_PLACEHOLDER %t.out
// XFAIL: cuda
// TODO: cuda fail due to unimplemented param_name 4121 in cuda_piDeviceGetInfo

//
//==---------- subbuffer.cpp --- sub-buffer basic test ---------------------==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
Expand Down
6 changes: 3 additions & 3 deletions sycl/test/basic_tests/handler/handler_copy_with_offset.cpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: %clangxx -fsycl %s -o %t.out
// RUN: %clangxx -fsycl -fsycl-targets=%sycl_triple %s -o %t.out
// RUN: env SYCL_DEVICE_TYPE=HOST %t.out
// RUN: %CPU_RUN_PLACEHOLDER %t.out
// RUN: %GPU_RUN_PLACEHOLDER %t.out
Expand Down Expand Up @@ -48,7 +48,7 @@ int main() {

vector_class<char> Expected{'x', 'x', '0', '1', '2', '3', 'x', 'x'};
if (DataRaw != Expected)
throw std::runtime_error("Check of hadler.copy(ptr, acc) was failed");
throw std::runtime_error("Check of handler.copy(ptr, acc) was failed");
}

{
Expand All @@ -71,7 +71,7 @@ int main() {
}
vector_class<char> Expected{'2', '3', '4', '5', 'x', 'x', 'x', 'x'};
if (DataRaw != Expected)
throw std::runtime_error("Check of hadler.copy(acc, ptr) was failed");
throw std::runtime_error("Check of handler.copy(acc, ptr) was failed");
}
return 0;
}
2 changes: 1 addition & 1 deletion sycl/test/basic_tests/handler/interop_task.cpp
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
// REQUIRES: opencl
// RUN: %clangxx -fsycl %s -o %t.out -L %opencl_libs_dir -lOpenCL
// RUN: %CPU_RUN_PLACEHOLDER %t.out
// RUN: %GPU_RUN_PLACEHOLDER %t.out
// REQUIRES: opencl

//==------- interop_task.cpp -----------------------------------------------==//
//
Expand Down
32 changes: 11 additions & 21 deletions sycl/test/basic_tests/kernel_info.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,28 +12,18 @@
//===----------------------------------------------------------------------===//

#include <CL/sycl.hpp>
#include <cassert>

using namespace cl::sycl;

void check(bool condition, const char *conditionString, const char *filename,
const long line) noexcept {
if (!condition) {
std::cerr << "CHECK failed in " << filename << "#" << line << " "
<< conditionString << "\n";
std::abort();
}
}

#define CHECK(CONDITION) check(CONDITION, #CONDITION, __FILE__, __LINE__)

int main() {
queue q;

buffer<int, 1> buf(range<1>(1));
program prg(q.get_context());

prg.build_with_kernel_type<class SingleTask>();
CHECK(prg.has_kernel<class SingleTask>());
assert(prg.has_kernel<class SingleTask>());
kernel krn = prg.get_kernel<class SingleTask>();

q.submit([&](handler &cgh) {
Expand All @@ -42,26 +32,26 @@ int main() {
});

const string_class krnName = krn.get_info<info::kernel::function_name>();
CHECK(!krnName.empty());
assert(!krnName.empty());
const cl_uint krnArgCount = krn.get_info<info::kernel::num_args>();
CHECK(krnArgCount > 0);
assert(krnArgCount > 0);
const context krnCtx = krn.get_info<info::kernel::context>();
CHECK(krnCtx == q.get_context());
assert(krnCtx == q.get_context());
const program krnPrg = krn.get_info<info::kernel::program>();
CHECK(krnPrg == prg);
assert(krnPrg == prg);
const cl_uint krnRefCount = krn.get_info<info::kernel::reference_count>();
CHECK(krnRefCount > 0);
assert(krnRefCount > 0);
const string_class krnAttr = krn.get_info<info::kernel::attributes>();
CHECK(krnAttr.empty());
assert(krnAttr.empty());

device dev = q.get_device();
const size_t wgSize =
krn.get_work_group_info<info::kernel_work_group::work_group_size>(dev);
CHECK(wgSize > 0);
assert(wgSize > 0);
const size_t prefWGSizeMult = krn.get_work_group_info<
info::kernel_work_group::preferred_work_group_size_multiple>(dev);
CHECK(prefWGSizeMult > 0);
assert(prefWGSizeMult > 0);
const cl_ulong prvMemSize =
krn.get_work_group_info<info::kernel_work_group::private_mem_size>(dev);
CHECK(prvMemSize == 0);
assert(prvMemSize == 0);
}
6 changes: 4 additions & 2 deletions sycl/test/basic_tests/parallel_for_range.cpp
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
// RUN: %clangxx -fsycl %s -o %t.out
// XFAIL: cuda
// CUDA exposes broken hierarchical parallelism.

// RUN: %clangxx -fsycl -fsycl-targets=%sycl_triple %s -o %t.out
// RUN: %CPU_RUN_PLACEHOLDER %t.out
// RUN: %GPU_RUN_PLACEHOLDER %t.out
// RUN: %ACC_RUN_PLACEHOLDER %t.out
// XFAIL: cuda

#include <CL/sycl.hpp>

Expand Down
3 changes: 0 additions & 3 deletions sycl/test/basic_tests/sampler/sampler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,6 @@
// RUN: %GPU_RUN_PLACEHOLDER %t.out
// RUN: %ACC_RUN_PLACEHOLDER %t.out

// TODO: Image support in CUDA backend
// XFAIL: cuda

//==--------------- sampler.cpp - SYCL sampler basic test ------------------==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
Expand Down
2 changes: 1 addition & 1 deletion sycl/test/fpga_tests/fpga_pipes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
// RUN: %CPU_RUN_PLACEHOLDER %t.out
// RUN: %GPU_RUN_PLACEHOLDER %t.out
// RUN: %ACC_RUN_PLACEHOLDER %t.out
// UNSUPPORTED: cuda
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does it pass on CUDA device?

Copy link
Contributor Author

@bjoernknafla bjoernknafla Apr 9, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Would you prefer to mark all the fpga tests as not supported by CUDA? I am not entirely sure if the test semantics only work for accelerators anyway, in which case I could also quickly add REQUIRED: accelerator to all of them.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm fine with this change actually.
We probably should have some common guidance for test behavior in unsupported environment and align all the tests in a separate PR.
+@MrSidims


//==------------- fpga_pipes.cpp - SYCL FPGA pipes test --------------------==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
Expand Down
2 changes: 1 addition & 1 deletion sycl/test/fpga_tests/fpga_queue.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
// RUN: %ACC_RUN_PLACEHOLDER %t.out
// RUN: %CPU_RUN_PLACEHOLDER %t.out
// RUN: %GPU_RUN_PLACEHOLDER %t.out
// UNSUPPORTED: cuda

//==------------- fpga_queue.cpp - SYCL FPGA queues test -------------------==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
Expand Down
8 changes: 5 additions & 3 deletions sycl/test/function-pointers/fp-as-kernel-arg.cpp
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
// RUN: %clangxx -Xclang -fsycl-allow-func-ptr -std=c++14 -fsycl %s -o %t.out -L %opencl_libs_dir -lOpenCL
// UNSUPPORTED: windows
// UNSUPPORTED: cuda
// CUDA does not support the function pointer as kernel argument extension.

// RUN: %clangxx -Xclang -fsycl-allow-func-ptr -std=c++14 -fsycl %s -o %t.out
// RUN: env SYCL_DEVICE_TYPE=HOST %t.out
// RUN: %CPU_RUN_PLACEHOLDER %t.out
// RUN: %GPU_RUN_PLACEHOLDER %t.out
// FIXME: This test should use runtime early exit once correct check for
// corresponding extension is implemented
// UNSUPPORTED: windows
// XFAIL: cuda

#include <CL/sycl.hpp>

Expand Down
8 changes: 5 additions & 3 deletions sycl/test/function-pointers/pass-fp-through-buffer.cpp
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
// RUN: %clangxx -Xclang -fsycl-allow-func-ptr -std=c++14 -fsycl %s -o %t.out -L %opencl_libs_dir -lOpenCL
// UNSUPPORTED: windows
// UNSUPPORTED: cuda
// CUDA does not support the function pointer as kernel argument extension.

// RUN: %clangxx -Xclang -fsycl-allow-func-ptr -std=c++14 -fsycl %s -o %t.out
// RUN: env SYCL_DEVICE_TYPE=HOST %t.out
// RUN: %CPU_RUN_PLACEHOLDER %t.out
// RUN: %GPU_RUN_PLACEHOLDER %t.out
// FIXME: This test should use runtime early exit once correct check for
// corresponding extension is implemented
// UNSUPPORTED: windows
// XFAIL: cuda

#include <CL/sycl.hpp>

Expand Down
4 changes: 3 additions & 1 deletion sycl/test/group-algorithm/all_of.cpp
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
// UNSUPPORTED: cuda
// OpenCL C 2.x alike work-group functions not yet supported by CUDA.
//
// RUN: %clangxx -fsycl -fsycl-targets=%sycl_triple %s -o %t.out
// RUN: env SYCL_DEVICE_TYPE=HOST %t.out
// RUN: %CPU_RUN_PLACEHOLDER %t.out
// RUN: %GPU_RUN_PLACEHOLDER %t.out
// RUN: %ACC_RUN_PLACEHOLDER %t.out
// UNSUPPORTED: cuda

#include <CL/sycl.hpp>
#include <algorithm>
Expand Down
4 changes: 3 additions & 1 deletion sycl/test/group-algorithm/any_of.cpp
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
// UNSUPPORTED: cuda
// OpenCL C 2.x alike work-group functions not yet supported by CUDA.
//
// RUN: %clangxx -fsycl -fsycl-targets=%sycl_triple %s -o %t.out
// RUN: env SYCL_DEVICE_TYPE=HOST %t.out
// RUN: %CPU_RUN_PLACEHOLDER %t.out
// RUN: %GPU_RUN_PLACEHOLDER %t.out
// RUN: %ACC_RUN_PLACEHOLDER %t.out
// UNSUPPORTED: cuda

#include <CL/sycl.hpp>
#include <algorithm>
Expand Down
4 changes: 3 additions & 1 deletion sycl/test/group-algorithm/broadcast.cpp
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
// UNSUPPORTED: cuda
// OpenCL C 2.x alike work-group functions not yet supported by CUDA.
//
// RUN: %clangxx -fsycl -fsycl-targets=%sycl_triple %s -o %t.out
// RUN: env SYCL_DEVICE_TYPE=HOST %t.out
// RUN: %CPU_RUN_PLACEHOLDER %t.out
// RUN: %GPU_RUN_PLACEHOLDER %t.out
// RUN: %ACC_RUN_PLACEHOLDER %t.out
// UNSUPPORTED: cuda

#include <CL/sycl.hpp>
#include <algorithm>
Expand Down
4 changes: 3 additions & 1 deletion sycl/test/group-algorithm/exclusive_scan.cpp
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
// UNSUPPORTED: cuda
// OpenCL C 2.x alike work-group functions not yet supported by CUDA.
//
// RUN: %clangxx -fsycl -fsycl-targets=%sycl_triple %s -o %t.out
// RUN: env SYCL_DEVICE_TYPE=HOST %t.out
// RUN: %CPU_RUN_PLACEHOLDER %t.out
// RUN: %GPU_RUN_PLACEHOLDER %t.out
// RUN: %ACC_RUN_PLACEHOLDER %t.out
// UNSUPPORTED: cuda

#include <CL/sycl.hpp>
#include <algorithm>
Expand Down
4 changes: 3 additions & 1 deletion sycl/test/group-algorithm/inclusive_scan.cpp
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
// UNSUPPORTED: cuda
// OpenCL C 2.x alike work-group functions not yet supported by CUDA.
//
// RUN: %clangxx -fsycl -fsycl-targets=%sycl_triple %s -o %t.out
// RUN: env SYCL_DEVICE_TYPE=HOST %t.out
// RUN: %CPU_RUN_PLACEHOLDER %t.out
// RUN: %GPU_RUN_PLACEHOLDER %t.out
// RUN: %ACC_RUN_PLACEHOLDER %t.out
// UNSUPPORTED: cuda

#include <CL/sycl.hpp>
#include <algorithm>
Expand Down
3 changes: 3 additions & 0 deletions sycl/test/group-algorithm/leader.cpp
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
// UNSUPPORTED: cuda
// OpenCL C 2.x alike work-group functions not yet supported by CUDA.
//
// RUN: %clangxx -fsycl -fsycl-targets=%sycl_triple %s -o %t.out
// RUN: env SYCL_DEVICE_TYPE=HOST %t.out
// RUN: %CPU_RUN_PLACEHOLDER %t.out
Expand Down
4 changes: 3 additions & 1 deletion sycl/test/group-algorithm/none_of.cpp
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
// UNSUPPORTED: cuda
// OpenCL C 2.x alike work-group functions not yet supported by CUDA.
//
// RUN: %clangxx -fsycl -fsycl-targets=%sycl_triple %s -o %t.out
// RUN: env SYCL_DEVICE_TYPE=HOST %t.out
// RUN: %CPU_RUN_PLACEHOLDER %t.out
// RUN: %GPU_RUN_PLACEHOLDER %t.out
// RUN: %ACC_RUN_PLACEHOLDER %t.out
// UNSUPPORTED: cuda

#include <CL/sycl.hpp>
#include <algorithm>
Expand Down
4 changes: 3 additions & 1 deletion sycl/test/group-algorithm/reduce.cpp
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
// UNSUPPORTED: cuda
// OpenCL C 2.x alike work-group functions not yet supported by CUDA.
//
// RUN: %clangxx -fsycl -fsycl-targets=%sycl_triple %s -o %t.out
// RUN: env SYCL_DEVICE_TYPE=HOST %t.out
// RUN: %CPU_RUN_PLACEHOLDER %t.out
// RUN: %GPU_RUN_PLACEHOLDER %t.out
// RUN: %ACC_RUN_PLACEHOLDER %t.out
// UNSUPPORTED: cuda

#include <CL/sycl.hpp>
#include <algorithm>
Expand Down
3 changes: 0 additions & 3 deletions sycl/test/hier_par/hier_par_wgscope.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,6 @@
// RUN: %GPU_RUN_PLACEHOLDER %t.out
// RUN: %ACC_RUN_PLACEHOLDER %t.out

// TODO: ptxas fatal : Unresolved extern function '__spirv_ControlBarrier'
// UNSUPPORTED: cuda

// This test checks correctness of hierarchical kernel execution when there is
// code and data in the work group scope.

Expand Down
3 changes: 3 additions & 0 deletions sycl/test/kernel_from_file/hw.cpp
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
// UNSUPPORTED: cuda
// CUDA does not support SPIR-V.

//-fsycl-targets=%sycl_triple
// RUN: %clangxx -fsycl-device-only -fno-sycl-use-bitcode -Xclang -fsycl-int-header=%t.h -c %s -o %t.spv -I %sycl_include -Xclang -verify-ignore-unexpected=note,warning -Wno-sycl-strict
// RUN: %clangxx -include %t.h -g %s -o %t.out -lsycl -I %sycl_include -Xclang -verify-ignore-unexpected=note,warning
Expand Down
2 changes: 1 addition & 1 deletion sycl/test/linear_id/linear-sub_group.cpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: %clangxx -fsycl %s -o %t.out
// RUN: %clangxx -fsycl -fsycl-targets=%sycl_triple %s -o %t.out
// RUN: env SYCL_DEVICE_TYPE=HOST %t.out
// RUN: %CPU_RUN_PLACEHOLDER %t.out
// RUN: %GPU_RUN_PLACEHOLDER %t.out
Expand Down
3 changes: 2 additions & 1 deletion sycl/test/linear_id/opencl-interop.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,10 @@
// RUN: %CPU_RUN_PLACEHOLDER %t.out
// RUN: %GPU_RUN_PLACEHOLDER %t.out
// RUN: %ACC_RUN_PLACEHOLDER %t.out
// UNSUPPORTED: cuda

//==---------------- opencl-interop.cpp - SYCL linear id test --------------==//
//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Expand Down
2 changes: 1 addition & 1 deletion sycl/test/lit.cfg.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,8 @@

config.substitutions.append( ('%sycl_libs_dir', config.sycl_libs_dir ) )
config.substitutions.append( ('%sycl_include', config.sycl_include ) )
config.substitutions.append( ('%opencl_libs_dir', config.opencl_libs_dir) )
config.substitutions.append( ('%sycl_source_dir', config.sycl_source_dir) )
config.substitutions.append( ('%opencl_libs_dir', config.opencl_libs_dir) )
config.substitutions.append( ('%opencl_include_dir', config.opencl_include_dir) )
config.substitutions.append( ('%cuda_toolkit_include', config.cuda_toolkit_include) )

Expand Down
4 changes: 2 additions & 2 deletions sycl/test/lit.site.cfg.py.in
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,15 @@
import sys

config.llvm_tools_dir = "@LLVM_TOOLS_DIR@"
config.sycl_tools_dir = lit_config.params.get('SYCL_TOOLS_DIR', "@LLVM_TOOLS_DIR@")
config.lit_tools_dir = "@LLVM_LIT_TOOLS_DIR@"
config.sycl_tools_dir = lit_config.params.get('SYCL_TOOLS_DIR', "@LLVM_TOOLS_DIR@")
config.sycl_include = lit_config.params.get('SYCL_INCLUDE', "@SYCL_INCLUDE@")
config.sycl_obj_root = "@SYCL_BINARY_DIR@"
config.sycl_source_dir = "@SYCL_SOURCE_DIR@/source"
config.opencl_libs_dir = os.path.dirname("@OpenCL_LIBRARIES@")
config.sycl_libs_dir = lit_config.params.get('SYCL_LIBS_DIR', "@LLVM_LIBS_DIR@")
config.target_triple = "@TARGET_TRIPLE@"
config.host_triple = "@LLVM_HOST_TRIPLE@"
config.opencl_libs_dir = os.path.dirname("@OpenCL_LIBRARIES@")
config.opencl_include_dir = "@OpenCL_INCLUDE_DIR@"
config.cuda_toolkit_include = "@CUDA_TOOLKIT_INCLUDE@"

Expand Down
1 change: 0 additions & 1 deletion sycl/test/ordered_queue/ordered_dmemll.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
// RUN: %clangxx -fsycl %s -o %t1.out -L %opencl_libs_dir -lOpenCL
// RUN: %CPU_RUN_PLACEHOLDER %t1.out
// RUN: %GPU_RUN_PLACEHOLDER %t1.out
// XFAIL: cuda
//==----------- ordered_dmemll.cpp - Device Memory Linked List test --------==//
// It uses an ordered queue where explicit waiting is not necessary between
// kernels
Expand Down
2 changes: 2 additions & 0 deletions sycl/test/ordered_queue/ordered_queue.cpp
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
// REQUIRES: opencl
//
// RUN: %clangxx -fsycl %s -o %t.out -L %opencl_libs_dir -lOpenCL
// RUN: env SYCL_DEVICE_TYPE=HOST %t.out
//==---------- ordered_queue.cpp - SYCL ordered queue test -----------------==//
Expand Down
Loading