Skip to content

Commit

Permalink
Added cudaGetLastError() calls to reset benchmarking kernel errors (i…
Browse files Browse the repository at this point in the history
…ssue 88). (#173)

* Create and use NVBENCH_CUDA_CALL_RESET_ERROR.

* Moved cudaGetLastError() call to NVBENCH_CUDA_CALL macro

---------

Co-authored-by: Sergey Pavlov <psvvsp89@gmail.com>
  • Loading branch information
psvvsp and Sergey Pavlov authored May 31, 2024
1 parent 088c9ee commit a171514
Show file tree
Hide file tree
Showing 3 changed files with 33 additions and 0 deletions.
2 changes: 2 additions & 0 deletions nvbench/cuda_call.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,14 @@
#include <string>

/// Throws a std::runtime_error if `call` doesn't return `cudaSuccess`.
/// Resets the error with cudaGetLastError().
#define NVBENCH_CUDA_CALL(call) \
do \
{ \
const cudaError_t nvbench_cuda_call_error = call; \
if (nvbench_cuda_call_error != cudaSuccess) \
{ \
cudaGetLastError(); \
nvbench::cuda_call::throw_error(__FILE__, __LINE__, #call, nvbench_cuda_call_error); \
} \
} while (false)
Expand Down
1 change: 1 addition & 0 deletions testing/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ set(test_srcs
named_values.cu
option_parser.cu
range.cu
reset_error.cu
ring_buffer.cu
runner.cu
state.cu
Expand Down
30 changes: 30 additions & 0 deletions testing/reset_error.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
#include <nvbench/cuda_call.cuh>

#include "test_asserts.cuh"


namespace
{
__global__ void multiply5(const int32_t* __restrict__ a, int32_t* __restrict__ b)
{
const auto id = blockIdx.x * blockDim.x + threadIdx.x;
b[id] = 5 * a[id];
}
}

int main()
{
multiply5<<<256, 256>>>(nullptr, nullptr);

try
{
NVBENCH_CUDA_CALL(cudaStreamSynchronize(0));
ASSERT(false);
}
catch (const std::runtime_error &)
{
ASSERT(cudaGetLastError() == cudaError_t::cudaSuccess);
}

return 0;
}

0 comments on commit a171514

Please sign in to comment.