From d71f0dd93a8bc17348385a2d0577a27525f5baf8 Mon Sep 17 00:00:00 2001 From: Cliff Burdick <30670611+cliffburdick@users.noreply.github.com> Date: Thu, 16 Jan 2025 08:33:15 -0800 Subject: [PATCH] Fixing warnings issues by clang-19, both host and device (#825) --- CMakeLists.txt | 16 ++++-- examples/black_scholes.cu | 2 +- examples/convolution.cu | 1 - examples/mvdr_beamformer.h | 6 +- examples/recursive_filter.cu | 3 - include/matx/core/half.h | 4 +- include/matx/core/half_complex.h | 6 +- include/matx/core/operator_utils.h | 2 - include/matx/core/print.h | 2 +- include/matx/core/storage.h | 4 +- include/matx/core/tensor.h | 6 +- include/matx/core/tensor_utils.h | 2 +- include/matx/core/utils.h | 6 +- include/matx/executors/host.h | 4 +- include/matx/generators/chirp.h | 4 +- include/matx/kernels/channelize_poly.cuh | 10 ++-- include/matx/kernels/filter.cuh | 58 ++++++++++---------- include/matx/operators/all.h | 8 +-- include/matx/operators/any.h | 8 +-- include/matx/operators/argmax.h | 12 ++-- include/matx/operators/argmin.h | 10 ++-- include/matx/operators/argminmax.h | 10 ++-- include/matx/operators/cgsolve.h | 10 ++-- include/matx/operators/channelize_poly.h | 8 +-- include/matx/operators/chol.h | 8 +-- include/matx/operators/clone.h | 2 +- include/matx/operators/conv.h | 8 +-- include/matx/operators/corr.h | 8 +-- include/matx/operators/cov.h | 8 +-- include/matx/operators/cumsum.h | 8 +-- include/matx/operators/det.h | 8 +-- include/matx/operators/eig.h | 8 +-- include/matx/operators/einsum.h | 2 +- include/matx/operators/find.h | 8 +-- include/matx/operators/find_idx.h | 8 +-- include/matx/operators/frexp.h | 8 +-- include/matx/operators/hist.h | 8 +-- include/matx/operators/lu.h | 8 +-- include/matx/operators/matvec.h | 8 +-- include/matx/operators/mean.h | 8 +-- include/matx/operators/median.h | 8 +-- include/matx/operators/outer.h | 8 +-- include/matx/operators/pinv.h | 8 +-- include/matx/operators/prod.h | 8 +-- include/matx/operators/pwelch.h | 8 +-- include/matx/operators/qr.h | 8 +-- include/matx/operators/reduce.h | 8 +-- include/matx/operators/resample_poly.h | 8 +-- include/matx/operators/softmax.h | 8 +-- include/matx/operators/sort.h | 8 +-- include/matx/operators/stdd.h | 8 +-- include/matx/operators/sum.h | 8 +-- include/matx/operators/svd.h | 12 ++-- include/matx/operators/trace.h | 10 ++-- include/matx/operators/transpose.h | 8 +-- include/matx/operators/unique.h | 8 +-- include/matx/operators/var.h | 8 +-- include/matx/transforms/cgsolve.h | 1 - include/matx/transforms/channelize_poly.h | 18 +----- include/matx/transforms/chol/chol_cuda.h | 4 +- include/matx/transforms/cub.h | 29 ++++------ include/matx/transforms/eig/eig_cuda.h | 8 +-- include/matx/transforms/fft/fft_common.h | 1 - include/matx/transforms/inverse.h | 2 +- include/matx/transforms/lu/lu_cuda.h | 4 +- include/matx/transforms/matmul/matmul_cuda.h | 2 +- include/matx/transforms/matvec.h | 8 +-- include/matx/transforms/outer.h | 8 +-- include/matx/transforms/qr/qr_cuda.h | 7 +-- include/matx/transforms/reduce.h | 14 ++--- include/matx/transforms/resample_poly.h | 1 - include/matx/transforms/svd/svd_cuda.h | 10 ++-- test/00_io/FileIOTests.cu | 3 - test/00_operators/OperatorTests.cu | 40 -------------- test/00_operators/ReductionTests.cu | 1 - test/00_solver/Cholesky.cu | 3 - test/00_solver/Det.cu | 2 - test/00_solver/Pinv.cu | 4 -- test/00_solver/SVD.cu | 9 +-- test/00_transform/ChannelizePoly.cu | 2 - test/00_transform/FFT.cu | 2 +- 81 files changed, 278 insertions(+), 365 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 62efeb8aa..511ab3fad 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -134,8 +134,10 @@ rapids_cpm_cccl( target_link_libraries(matx INTERFACE CCCL::CCCL) -# Set flags for compiling tests faster -set(MATX_CUDA_FLAGS ${CMAKE_CUDA_FLAGS} --threads 0 -ftemplate-backtrace-limit=0) +# Set flags for compiling tests faster (only for nvcc) +if (NOT CMAKE_CUDA_COMPILER_ID STREQUAL "Clang") + set(MATX_CUDA_FLAGS ${CMAKE_CUDA_FLAGS} --threads 0 -ftemplate-backtrace-limit=0) +endif() # Hack because CMake doesn't have short circult evaluation if (NOT CMAKE_BUILD_TYPE OR "${CMAKE_BUILD_TYPE}" STREQUAL "Debug") @@ -165,9 +167,13 @@ if (NOT ${IS_NVCPP} GREATER -1) endif() endif() - - -set(WARN_FLAGS ${WARN_FLAGS} $<$:-Werror all-warnings>) +if (CMAKE_CUDA_COMPILER_ID STREQUAL "Clang") +message((STATUS "Using Clang compiler")) + # Workaround for clang bug: https://github.com/llvm/llvm-project/issues/58491 + set(WARN_FLAGS ${WARN_FLAGS} $<$:-Wno-unused-command-line-argument>) +else() + set(WARN_FLAGS ${WARN_FLAGS} $<$:-Werror all-warnings>) +endif() set(WARN_FLAGS ${WARN_FLAGS} $<$:-Werror>) # CUTLASS slows down compile times when used, so leave it as optional for now diff --git a/examples/black_scholes.cu b/examples/black_scholes.cu index 2a8ab47a2..cb7b94fa1 100644 --- a/examples/black_scholes.cu +++ b/examples/black_scholes.cu @@ -61,7 +61,7 @@ private: public: BlackScholes(O out, I1 K, I1 V, I1 S, I1 r, I1 T) - : out_(out), K_(K), V_(V), S_(S), r_(r), T_(T) {} + : out_(out), V_(V), S_(S), K_(K), r_(r), T_(T) {} __device__ inline void operator()(index_t idx) { diff --git a/examples/convolution.cu b/examples/convolution.cu index ac41759bb..dc54186c4 100644 --- a/examples/convolution.cu +++ b/examples/convolution.cu @@ -39,7 +39,6 @@ using namespace matx; int main([[maybe_unused]] int argc, [[maybe_unused]] char **argv) { MATX_ENTER_HANDLER(); - typedef cuda::std::complex complex; uint32_t iterations = 10; constexpr index_t numSamples = 1638400; diff --git a/examples/mvdr_beamformer.h b/examples/mvdr_beamformer.h index 350239f5c..d19d1cbd6 100644 --- a/examples/mvdr_beamformer.h +++ b/examples/mvdr_beamformer.h @@ -164,9 +164,9 @@ class MVDRBeamformer { auto GetCovMatInvView() { return invCovMatView; } private: - index_t num_beams_; - index_t num_el_; - index_t data_len_; + [[maybe_unused]] index_t num_beams_; + [[maybe_unused]] index_t num_el_; + [[maybe_unused]] index_t data_len_; index_t snap_len_; cuda::std::complex load_coeff_ = {0.1f, 0.f}; diff --git a/examples/recursive_filter.cu b/examples/recursive_filter.cu index fbeedec4d..79eb07bbb 100644 --- a/examples/recursive_filter.cu +++ b/examples/recursive_filter.cu @@ -40,8 +40,6 @@ using namespace matx; int main([[maybe_unused]] int argc, [[maybe_unused]] char **argv) { MATX_ENTER_HANDLER(); - using complex = cuda::std::complex; - cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); @@ -70,7 +68,6 @@ int main([[maybe_unused]] int argc, [[maybe_unused]] char **argv) cudaEventCreate(&start); cudaEventCreate(&stop); - using OutType = float; using InType = float; using FilterType = float; diff --git a/include/matx/core/half.h b/include/matx/core/half.h index 0273b7425..20dd8b372 100644 --- a/include/matx/core/half.h +++ b/include/matx/core/half.h @@ -417,7 +417,7 @@ __MATX_HOST__ __MATX_DEVICE__ __MATX_INLINE__ bool operator==(const T &lhs, const matxHalf &rhs) { matxHalf tmp{lhs}; - return lhs == tmp; + return rhs == tmp; } /** @@ -464,7 +464,7 @@ __MATX_HOST__ __MATX_DEVICE__ __MATX_INLINE__ bool operator!=(const T &lhs, const matxHalf &rhs) { matxHalf tmp{lhs}; - return !(lhs == tmp); + return !(rhs == tmp); } /** diff --git a/include/matx/core/half_complex.h b/include/matx/core/half_complex.h index 6ac4566bb..884475dc7 100644 --- a/include/matx/core/half_complex.h +++ b/include/matx/core/half_complex.h @@ -515,7 +515,7 @@ __MATX_HOST__ __MATX_DEVICE__ __MATX_INLINE__ bool operator==(const T &lhs, const matxHalfComplex &rhs) { matxHalfComplex tmp{lhs}; - return lhs == tmp; + return rhs == tmp; } /** @@ -562,7 +562,7 @@ __MATX_HOST__ __MATX_DEVICE__ __MATX_INLINE__ bool operator!=(const T &lhs, const matxHalfComplex &rhs) { matxHalfComplex tmp{lhs}; - return !(lhs == tmp); + return !(rhs == tmp); } @@ -853,7 +853,7 @@ pow(const T &x, const matxHalfComplex &y) { cuda::std::complex tmp{static_cast(y.real()), static_cast(y.imag())}; - tmp = cuda::std::pow(y, pow); + tmp = cuda::std::pow(x, pow); return {static_cast(tmp.real()), static_cast(tmp.imag())}; } diff --git a/include/matx/core/operator_utils.h b/include/matx/core/operator_utils.h index 6f1f1f189..95375c77c 100644 --- a/include/matx/core/operator_utils.h +++ b/include/matx/core/operator_utils.h @@ -132,8 +132,6 @@ namespace matx { template __MATX_INLINE__ auto GetSupportedTensor(const Op &in, const ValidFunc &fn, matxMemorySpace_t space, cudaStream_t stream = 0) { - constexpr int RANK = Op::Rank(); - if constexpr (is_matx_transform_op()) { // We can assume that if a transform is passed to the input then PreRun has already completed // on the transform and we can use the internal pointer diff --git a/include/matx/core/print.h b/include/matx/core/print.h index d7c7ee51f..6d6180732 100644 --- a/include/matx/core/print.h +++ b/include/matx/core/print.h @@ -653,7 +653,7 @@ namespace matx { */ template 0 && sizeof...(Args) == 0), bool> = true> - void fprint(FILE* fp, const Op &op, Args... dims) { + void fprint(FILE* fp, const Op &op, [[maybe_unused]] Args... dims) { cuda::std::array arr = {0}; auto tp = cuda::std::tuple_cat(arr); cuda::std::apply([&](auto &&...args) { fprint(fp, op, args...); }, tp); diff --git a/include/matx/core/storage.h b/include/matx/core/storage.h index 17260f459..d34febaf6 100644 --- a/include/matx/core/storage.h +++ b/include/matx/core/storage.h @@ -406,7 +406,7 @@ namespace matx */ void SetData(T *const data) noexcept { - data_.reset(data_, [](auto){}); + data_.reset(data, [](auto){}); } /** @@ -423,7 +423,7 @@ namespace matx * * @param size Size in bytes to allocate */ - __MATX_INLINE__ T* allocate(size_t size) + __MATX_INLINE__ T* allocate([[maybe_unused]] size_t size) { MATX_THROW(matxInvalidParameter, "Cannot call allocate on a smart pointer storage type"); } diff --git a/include/matx/core/tensor.h b/include/matx/core/tensor.h index 00227a656..f042d18b5 100644 --- a/include/matx/core/tensor.h +++ b/include/matx/core/tensor.h @@ -967,7 +967,7 @@ class tensor_t : public detail::tensor_impl_t { Reset(T *const data, T *const ldata) noexcept { storage_.SetData(data); - this->SetData(data); + this->SetData(ldata); } @@ -1074,7 +1074,7 @@ class tensor_t : public detail::tensor_impl_t { __MATX_INLINE__ __MATX_HOST__ bool IsManagedPointer() { bool managed; - const CUresult retval = cuPointerGetAttribute(&managed, CU_POINTER_ATTRIBUTE_IS_MANAGED, (CUdeviceptr)this->Data()); + [[maybe_unused]] const CUresult retval = cuPointerGetAttribute(&managed, CU_POINTER_ATTRIBUTE_IS_MANAGED, (CUdeviceptr)this->Data()); MATX_ASSERT(retval == CUDA_SUCCESS, matxNotSupported); return managed; } @@ -1453,7 +1453,7 @@ class tensor_t : public detail::tensor_impl_t { // Determine where this memory resides auto kind = GetPointerKind(this->Data()); - auto mem_res = cuPointerGetAttributes(sizeof(attr)/sizeof(attr[0]), attr, data, reinterpret_cast(this->Data())); + [[maybe_unused]] auto mem_res = cuPointerGetAttributes(sizeof(attr)/sizeof(attr[0]), attr, data, reinterpret_cast(this->Data())); MATX_ASSERT_STR_EXP(mem_res, CUDA_SUCCESS, matxCudaError, "Error returned from cuPointerGetAttributes"); if (kind == MATX_INVALID_MEMORY) { if (mem_type == CU_MEMORYTYPE_DEVICE) { diff --git a/include/matx/core/tensor_utils.h b/include/matx/core/tensor_utils.h index 3d5341881..067f70c52 100644 --- a/include/matx/core/tensor_utils.h +++ b/include/matx/core/tensor_utils.h @@ -155,7 +155,7 @@ namespace matx __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ auto BlockToIdx(const Op &op, index_t abs, int nb_dims) { using l_stride_type = index_t; using l_shape_type = index_t; - constexpr int RANK = op.Rank(); + constexpr int RANK = Op::Rank(); cuda::std::array indices{0}; for (int idx = 0; idx < RANK - nb_dims; idx++) { diff --git a/include/matx/core/utils.h b/include/matx/core/utils.h index 80f377029..816432100 100644 --- a/include/matx/core/utils.h +++ b/include/matx/core/utils.h @@ -134,9 +134,9 @@ __MATX_HOST__ __MATX_DEVICE__ __MATX_INLINE__ auto madd( const T1 &x, const T2 & //__half2 Y = make_half2(y.real(), y.imag()); //__half2 Z = make_half2(z.real(), z.imag()); - const __half2 &X = *reinterpret_cast(&x); - const __half2 &Y = *reinterpret_cast(&y); - const __half2 &Z = *reinterpret_cast(&z); + [[maybe_unused]] const __half2 &X = *reinterpret_cast(&x); + [[maybe_unused]] const __half2 &Y = *reinterpret_cast(&y); + [[maybe_unused]] const __half2 &Z = *reinterpret_cast(&z); #if 1 #ifdef __CUDA_ARCH__ diff --git a/include/matx/executors/host.h b/include/matx/executors/host.h index e7ad02698..cb65a7842 100644 --- a/include/matx/executors/host.h +++ b/include/matx/executors/host.h @@ -58,7 +58,7 @@ enum class ThreadsMode { struct HostExecParams { HostExecParams(int threads = 1) : threads_(threads) {} - HostExecParams(cpu_set_t cpu_set) : cpu_set_(cpu_set), threads_(1) { + HostExecParams(cpu_set_t cpu_set) : threads_(1), cpu_set_(cpu_set) { MATX_ASSERT_STR(false, matxNotSupported, "CPU affinity not supported yet"); } @@ -66,7 +66,9 @@ struct HostExecParams { private: int threads_; +MATX_IGNORE_WARNING_PUSH_CLANG("-Wunused-private-field") cpu_set_t cpu_set_ {0}; +MATX_IGNORE_WARNING_POP_CLANG }; /** diff --git a/include/matx/generators/chirp.h b/include/matx/generators/chirp.h index c329f1aa6..c7f110b02 100644 --- a/include/matx/generators/chirp.h +++ b/include/matx/generators/chirp.h @@ -67,8 +67,8 @@ namespace matx inline __MATX_HOST__ __MATX_DEVICE__ Chirp(SpaceOp sop, FreqType f0, space_type t1, FreqType f1, ChirpMethod method) : sop_(sop), f0_(f0), + f1_(f1), t1_(t1), - f1_(f1), method_(method) {} @@ -109,8 +109,8 @@ namespace matx inline __MATX_HOST__ __MATX_DEVICE__ ComplexChirp(SpaceOp sop, FreqType f0, space_type t1, FreqType f1, ChirpMethod method) : sop_(sop), f0_(f0), - t1_(t1), f1_(f1), + t1_(t1), method_(method) {} diff --git a/include/matx/kernels/channelize_poly.cuh b/include/matx/kernels/channelize_poly.cuh index 455205ee6..18402c255 100644 --- a/include/matx/kernels/channelize_poly.cuh +++ b/include/matx/kernels/channelize_poly.cuh @@ -290,22 +290,22 @@ __global__ void ChannelizePoly1D_Smem(OutType output, InType input, FilterType f if (outdims[OutElemRank] <= last_elem) { const filter_t *h = h_start; output_t accum { 0 }; - const int first_end = cuda::std::min(cached_input_ind_tail + filter_phase_len - 1, smem_input_height - 1); + const uint32_t first_end = cuda::std::min(cached_input_ind_tail + filter_phase_len - 1, smem_input_height - 1); // The footprint of samples involved in the convolution may wrap from the end // to the beginning of smem_input. The prologue below handles the samples from // the current tail to the end of smem_input and the epilogue starts back at the // beginning of smem_input. - const int prologue_count = (first_end - cached_input_ind_tail + 1); - const int epilogue_count = (prologue_count < filter_phase_len) ? filter_phase_len - prologue_count : 0; + const uint32_t prologue_count = (first_end - cached_input_ind_tail + 1); + const uint32_t epilogue_count = (prologue_count < filter_phase_len) ? filter_phase_len - prologue_count : 0; const input_t *sample = smem_input + cached_input_ind_tail * num_channels + (num_channels - 1 - chan); // Apply the filter h in reverse order below to flip the filter for convolution - for (int k = 0; k < prologue_count; k++) { + for (uint32_t k = 0; k < prologue_count; k++) { accum += (*h) * (*sample); sample += num_channels; h -= num_channels; } sample = smem_input + (num_channels - 1 - chan); - for (int k = 0; k < epilogue_count; k++) { + for (uint32_t k = 0; k < epilogue_count; k++) { accum += (*h) * (*sample); sample += num_channels; h -= num_channels; diff --git a/include/matx/kernels/filter.cuh b/include/matx/kernels/filter.cuh index a40ccef68..b6aedcad3 100644 --- a/include/matx/kernels/filter.cuh +++ b/include/matx/kernels/filter.cuh @@ -11,17 +11,17 @@ namespace matx { namespace detail_filter { - constexpr size_t MAX_BATCHES = 10000; - constexpr size_t BLOCK_SIZE_RECURSIVE = 1024; - constexpr size_t CORR_COLS = BLOCK_SIZE_RECURSIVE; - constexpr size_t MAX_BLOCKS_PER_BATCH = 1000; - constexpr size_t RECURSIVE_VALS_PER_THREAD = 8; - constexpr size_t MAX_NON_RECURSIVE_COEFFS = 4; - constexpr size_t MAX_RECURSIVE_COEFFS = 4; - constexpr size_t WARP_SIZE = 32; + constexpr uint32_t MAX_BATCHES = 10000; + constexpr uint32_t BLOCK_SIZE_RECURSIVE = 1024; + constexpr uint32_t CORR_COLS = BLOCK_SIZE_RECURSIVE; + constexpr uint32_t MAX_BLOCKS_PER_BATCH = 1000; + constexpr uint32_t RECURSIVE_VALS_PER_THREAD = 8; + constexpr uint32_t MAX_NON_RECURSIVE_COEFFS = 4; + constexpr uint32_t MAX_RECURSIVE_COEFFS = 4; + constexpr uint32_t WARP_SIZE = 32; using COMPLEX_TYPE = cuComplex; - constexpr size_t RECURSIVE_CHUNK_SIZE = BLOCK_SIZE_RECURSIVE * RECURSIVE_VALS_PER_THREAD; - constexpr size_t MAX_SIGNAL_LEN_PER_BATCH = + constexpr uint32_t RECURSIVE_CHUNK_SIZE = BLOCK_SIZE_RECURSIVE * RECURSIVE_VALS_PER_THREAD; + constexpr uint32_t MAX_SIGNAL_LEN_PER_BATCH = (BLOCK_SIZE_RECURSIVE * RECURSIVE_VALS_PER_THREAD * MAX_BLOCKS_PER_BATCH); }; using namespace detail_filter; @@ -121,7 +121,7 @@ __global__ __launch_bounds__(BLOCK_SIZE_RECURSIVE, 1) void RecursiveFilter( } } - if (static_cast(threadIdx.x) < num_non_recursive - 1) { + if (threadIdx.x < num_non_recursive - 1) { if (chunk_id == 0) { if constexpr (is_cuda_complex_v) { s_exch[threadIdx.x] = make_cuFloatComplex(0.0, 0.0); @@ -138,7 +138,7 @@ __global__ __launch_bounds__(BLOCK_SIZE_RECURSIVE, 1) void RecursiveFilter( } // Copy all correction coefficients - for (int32_t i = 0; i < CORR_COLS * num_recursive; i += blockDim.x) { + for (uint32_t i = 0; i < CORR_COLS * num_recursive; i += blockDim.x) { s_corr[i + threadIdx.x] = d_corr[i + threadIdx.x]; } @@ -166,7 +166,7 @@ __global__ __launch_bounds__(BLOCK_SIZE_RECURSIVE, 1) void RecursiveFilter( } if ((blockIdx.x * RECURSIVE_VALS_PER_THREAD + threadIdx.x * r) < - len) { // Make sure this value is within bounds of the signal + static_cast(len)) { // Make sure this value is within bounds of the signal if constexpr (is_cuda_complex_v) { vals[r] = cuCmulf(vals[r], r_nonr[0]); } @@ -215,7 +215,7 @@ __global__ __launch_bounds__(BLOCK_SIZE_RECURSIVE, 1) void RecursiveFilter( // 1->2 #pragma unroll - for (int32_t r = 0; r < RECURSIVE_VALS_PER_THREAD; r++) { + for (uint32_t r = 0; r < RECURSIVE_VALS_PER_THREAD; r++) { if constexpr (is_cuda_complex_v) { *reinterpret_cast(&tmp[0]) = __shfl_sync(~0, *reinterpret_cast(&vals[r]), 0, 2); @@ -256,7 +256,7 @@ __global__ __launch_bounds__(BLOCK_SIZE_RECURSIVE, 1) void RecursiveFilter( for (uint32_t r = 0; r < RECURSIVE_VALS_PER_THREAD; r++) { // Load all of the values we need from other threads in the warp #pragma unroll - for (int32_t rec = 0; rec < cuda::std::min(num_recursive, static_cast(wl)); rec++) { + for (uint32_t rec = 0; rec < cuda::std::min(num_recursive, static_cast(wl)); rec++) { if constexpr (is_cuda_complex_v) { *reinterpret_cast(&tmp[rec + 1]) = __shfl_sync(~0, *reinterpret_cast(&vals[r]), @@ -268,10 +268,10 @@ __global__ __launch_bounds__(BLOCK_SIZE_RECURSIVE, 1) void RecursiveFilter( } if ((blockIdx.x * RECURSIVE_VALS_PER_THREAD + threadIdx.x * r) < - len) { // Make sure this value is within bounds of the signal + static_cast(len)) { // Make sure this value is within bounds of the signal // Now apply those values #pragma unroll - for (int32_t rec = 0; rec < cuda::std::min(num_recursive, static_cast(wl)); rec++) { + for (uint32_t rec = 0; rec < cuda::std::min(num_recursive, static_cast(wl)); rec++) { if constexpr (is_cuda_complex_v) { vals[r] = cuCaddf(vals[r], @@ -304,7 +304,7 @@ __global__ __launch_bounds__(BLOCK_SIZE_RECURSIVE, 1) void RecursiveFilter( // while correcting int32_t sub_group_base; int32_t sub_group_idx; - int32_t cor_size = 32; + uint32_t cor_size = 32; int32_t dcor = 2 * cor_size - 1; int32_t cor_log2 = 5; #pragma unroll @@ -317,9 +317,9 @@ __global__ __launch_bounds__(BLOCK_SIZE_RECURSIVE, 1) void RecursiveFilter( __syncthreads(); // Pick off the last num_recursive threads in the block - if (sub_group_idx < 0 && (-sub_group_idx <= num_recursive)) { + if (sub_group_idx < 0 && (-sub_group_idx <= static_cast(num_recursive))) { #pragma unroll - for (int32_t r = 0; r < RECURSIVE_VALS_PER_THREAD; r++) { + for (uint32_t r = 0; r < RECURSIVE_VALS_PER_THREAD; r++) { s_exch[num_recursive * (sub_group_base + r * (BLOCK_SIZE_RECURSIVE / 2) / cor_size) - sub_group_idx - 1] = vals[r]; @@ -335,7 +335,7 @@ __global__ __launch_bounds__(BLOCK_SIZE_RECURSIVE, 1) void RecursiveFilter( if ((blockIdx.x * RECURSIVE_VALS_PER_THREAD + threadIdx.x * vpt) < len) { // Make sure this value is within bounds of the signal #pragma unroll - for (int32_t r = 0; r < num_recursive; r++) { + for (uint32_t r = 0; r < num_recursive; r++) { tmp[r] = s_exch[(sub_group_base + vpt * ((BLOCK_SIZE_RECURSIVE / 2) / cor_size)) * num_recursive + @@ -384,7 +384,7 @@ __global__ __launch_bounds__(BLOCK_SIZE_RECURSIVE, 1) void RecursiveFilter( if ((blockIdx.x * RECURSIVE_VALS_PER_THREAD + threadIdx.x * block_idx) < len) { // Make sure this value is within bounds of the signal #pragma unroll - for (int32_t r = 0; r < num_recursive; r++) { + for (uint32_t r = 0; r < num_recursive; r++) { if constexpr (is_cuda_complex_v) { vals[block_idx] = cuCaddf( vals[block_idx], @@ -458,7 +458,7 @@ __global__ __launch_bounds__(BLOCK_SIZE_RECURSIVE, 1) void RecursiveFilter( int to_check = chunk_id - threadIdx.x - 1; int lstatus = STATUS_FLAG_PARTIAL_COMPLETE; int full_complete; - int last_full; + unsigned int last_full; if (threadIdx.x < 32) { // Keep looping until we have both a full carry present, and all the @@ -481,7 +481,7 @@ __global__ __launch_bounds__(BLOCK_SIZE_RECURSIVE, 1) void RecursiveFilter( // them if (threadIdx.x < last_full) { #pragma unroll - for (int32_t r = 0; r < num_recursive; r++) { + for (uint32_t r = 0; r < num_recursive; r++) { if constexpr (is_cuda_complex_v) { *reinterpret_cast( &s_exch[threadIdx.x * num_recursive + r]) = @@ -506,7 +506,7 @@ __global__ __launch_bounds__(BLOCK_SIZE_RECURSIVE, 1) void RecursiveFilter( // value to all threads if (threadIdx.x == 0) { #pragma unroll - for (int32_t r = 0; r < num_recursive; r++) { + for (uint32_t r = 0; r < num_recursive; r++) { if constexpr (is_cuda_complex_v) { *reinterpret_cast(&tmp[r]) = *reinterpret_cast( @@ -522,13 +522,13 @@ __global__ __launch_bounds__(BLOCK_SIZE_RECURSIVE, 1) void RecursiveFilter( } // Reuse the non-recursive registers here to save some shared memory - for (int32_t r = 0; r < num_recursive; r++) { + for (uint32_t r = 0; r < num_recursive; r++) { r_nonr[r] = d_last_corrections[r]; } // Roll up the lookback values for (uint32_t lookback = 0; lookback < last_full; lookback++) { - for (int32_t r = 0; r < num_recursive; r++) { + for (uint32_t r = 0; r < num_recursive; r++) { if constexpr (is_cuda_complex_v) { tmp[r] = cuCaddf(s_exch[lookback * num_recursive + r], cuCmulf(tmp[r], r_nonr[r])); @@ -540,7 +540,7 @@ __global__ __launch_bounds__(BLOCK_SIZE_RECURSIVE, 1) void RecursiveFilter( } } - for (int32_t r = 0; r < num_recursive; r++) { + for (uint32_t r = 0; r < num_recursive; r++) { s_exch[num_recursive - r - 1] = tmp[r]; } } @@ -553,7 +553,7 @@ __global__ __launch_bounds__(BLOCK_SIZE_RECURSIVE, 1) void RecursiveFilter( block_idx = 0; do { #pragma unroll - for (int32_t r = 0; r < num_recursive; r++) { + for (uint32_t r = 0; r < num_recursive; r++) { if ((blockIdx.x * RECURSIVE_VALS_PER_THREAD + threadIdx.x * r) < len) { if constexpr (is_cuda_complex_v) { vals[block_idx] = cuCaddf( diff --git a/include/matx/operators/all.h b/include/matx/operators/all.h index f56f8336c..8f01b02c4 100644 --- a/include/matx/operators/all.h +++ b/include/matx/operators/all.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/any.h b/include/matx/operators/any.h index 1d76e9688..daaf2ac24 100644 --- a/include/matx/operators/any.h +++ b/include/matx/operators/any.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/argmax.h b/include/matx/operators/argmax.h index b3fe7fda0..9186afc1a 100644 --- a/include/matx/operators/argmax.h +++ b/include/matx/operators/argmax.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // @@ -75,14 +75,14 @@ namespace detail { } template - __MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, Executor &&ex) const noexcept + __MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, [[maybe_unused]] Executor &&ex) const noexcept { if constexpr (is_matx_op()) { a_.PreRun(std::forward(shape), std::forward(ex)); } } - constexpr __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ index_t Size(int dim) const + constexpr __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ index_t Size([[maybe_unused]] int dim) const { return 0; } diff --git a/include/matx/operators/argmin.h b/include/matx/operators/argmin.h index 6311dc799..48e423101 100644 --- a/include/matx/operators/argmin.h +++ b/include/matx/operators/argmin.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // @@ -82,7 +82,7 @@ namespace detail { } } - constexpr __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ index_t Size(int dim) const + constexpr __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ index_t Size([[maybe_unused]] int dim) const { return 0; } diff --git a/include/matx/operators/argminmax.h b/include/matx/operators/argminmax.h index 2a680b72e..95f711197 100644 --- a/include/matx/operators/argminmax.h +++ b/include/matx/operators/argminmax.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // @@ -82,7 +82,7 @@ namespace detail { } } - constexpr __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ index_t Size(int dim) const + constexpr __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ index_t Size([[maybe_unused]] int dim) const { return 0; } diff --git a/include/matx/operators/cgsolve.h b/include/matx/operators/cgsolve.h index 21eab1bcf..db1f508e4 100644 --- a/include/matx/operators/cgsolve.h +++ b/include/matx/operators/cgsolve.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // @@ -95,7 +95,7 @@ namespace matx } template - __MATX_INLINE__ void InnerPreRun([[maybe_unused]] ShapeType &&shape, Executor &&ex) const noexcept + __MATX_INLINE__ void InnerPreRun([[maybe_unused]] ShapeType &&shape, [[maybe_unused]] Executor &&ex) const noexcept { if constexpr (is_matx_op()) { a_.PreRun(std::forward(shape), std::forward(ex)); diff --git a/include/matx/operators/channelize_poly.h b/include/matx/operators/channelize_poly.h index 3f6e374f5..82c3c1070 100644 --- a/include/matx/operators/channelize_poly.h +++ b/include/matx/operators/channelize_poly.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/chol.h b/include/matx/operators/chol.h index 5d7dc33d5..0c4cb67c5 100644 --- a/include/matx/operators/chol.h +++ b/include/matx/operators/chol.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/clone.h b/include/matx/operators/clone.h index b99bd83af..3010d03cd 100644 --- a/include/matx/operators/clone.h +++ b/include/matx/operators/clone.h @@ -57,7 +57,7 @@ namespace matx __MATX_INLINE__ CloneOp(const T &op, cuda::std::array shape) : op_(op) { static_assert(T::Rank() < CRank, "Cloning rank must be higher than input operator rank"); - const index_t num_keep = std::count_if(shape.begin(), shape.end(), [](index_t i) { return i == matxKeepDim; }); + [[maybe_unused]] const index_t num_keep = std::count_if(shape.begin(), shape.end(), [](index_t i) { return i == matxKeepDim; }); MATX_ASSERT_STR(num_keep == T::Rank(), matxInvalidParameter, "Number of matxKeepDim in a clone must match input operator rank"); diff --git a/include/matx/operators/conv.h b/include/matx/operators/conv.h index 62f3a2f8e..37645bab6 100644 --- a/include/matx/operators/conv.h +++ b/include/matx/operators/conv.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/corr.h b/include/matx/operators/corr.h index 5156e9068..8edc0823d 100644 --- a/include/matx/operators/corr.h +++ b/include/matx/operators/corr.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/cov.h b/include/matx/operators/cov.h index dc3d42093..b1e613113 100644 --- a/include/matx/operators/cov.h +++ b/include/matx/operators/cov.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/cumsum.h b/include/matx/operators/cumsum.h index 79f89fbda..735418f2d 100644 --- a/include/matx/operators/cumsum.h +++ b/include/matx/operators/cumsum.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/det.h b/include/matx/operators/det.h index ecdf7758e..edcd3861f 100644 --- a/include/matx/operators/det.h +++ b/include/matx/operators/det.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/eig.h b/include/matx/operators/eig.h index ddde7cde6..d7ab5cfc2 100644 --- a/include/matx/operators/eig.h +++ b/include/matx/operators/eig.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/einsum.h b/include/matx/operators/einsum.h index a0edb55e3..5d545c71f 100644 --- a/include/matx/operators/einsum.h +++ b/include/matx/operators/einsum.h @@ -109,7 +109,7 @@ namespace detail { // Size is not relevant in einsum() since there are multiple return values and it // is not allowed to be called in larger expressions - constexpr __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ index_t Size(int dim) const + constexpr __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ index_t Size([[maybe_unused]] int dim) const { return 0; } diff --git a/include/matx/operators/find.h b/include/matx/operators/find.h index 425a31745..c4e02b29b 100644 --- a/include/matx/operators/find.h +++ b/include/matx/operators/find.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/find_idx.h b/include/matx/operators/find_idx.h index c7d5580f7..a8ae2ead1 100644 --- a/include/matx/operators/find_idx.h +++ b/include/matx/operators/find_idx.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/frexp.h b/include/matx/operators/frexp.h index 41e5093fb..eba9f3093 100644 --- a/include/matx/operators/frexp.h +++ b/include/matx/operators/frexp.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/hist.h b/include/matx/operators/hist.h index 6eef7486a..73f8f95aa 100644 --- a/include/matx/operators/hist.h +++ b/include/matx/operators/hist.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/lu.h b/include/matx/operators/lu.h index 99df6cf13..28d3f6e46 100644 --- a/include/matx/operators/lu.h +++ b/include/matx/operators/lu.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/matvec.h b/include/matx/operators/matvec.h index 222402c24..d318c4a2a 100644 --- a/include/matx/operators/matvec.h +++ b/include/matx/operators/matvec.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/mean.h b/include/matx/operators/mean.h index c63714031..45e46828f 100644 --- a/include/matx/operators/mean.h +++ b/include/matx/operators/mean.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // mean rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must remeanuce the above cOpBright notice, +// 2. Redistributions in binary form must remeanuce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote meanucts derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/median.h b/include/matx/operators/median.h index a1d201a3f..60ec4442e 100644 --- a/include/matx/operators/median.h +++ b/include/matx/operators/median.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // median rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must remedianuce the above cOpBright notice, +// 2. Redistributions in binary form must remedianuce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote medianucts derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/outer.h b/include/matx/operators/outer.h index 56cd07aa6..7dc500eb5 100644 --- a/include/matx/operators/outer.h +++ b/include/matx/operators/outer.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/pinv.h b/include/matx/operators/pinv.h index a591d3e4e..1ada44e95 100644 --- a/include/matx/operators/pinv.h +++ b/include/matx/operators/pinv.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/prod.h b/include/matx/operators/prod.h index 136861ee7..1700a5c35 100644 --- a/include/matx/operators/prod.h +++ b/include/matx/operators/prod.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // prod rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/pwelch.h b/include/matx/operators/pwelch.h index 63599140a..fd1fd1f82 100644 --- a/include/matx/operators/pwelch.h +++ b/include/matx/operators/pwelch.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2023, NVIDIA Corporation +// Copyright (c) 2023, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/qr.h b/include/matx/operators/qr.h index 0f9032380..d64c92cbd 100644 --- a/include/matx/operators/qr.h +++ b/include/matx/operators/qr.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/reduce.h b/include/matx/operators/reduce.h index 655f8ad66..cdccfa254 100644 --- a/include/matx/operators/reduce.h +++ b/include/matx/operators/reduce.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/resample_poly.h b/include/matx/operators/resample_poly.h index 542f80081..0a6808b27 100644 --- a/include/matx/operators/resample_poly.h +++ b/include/matx/operators/resample_poly.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/softmax.h b/include/matx/operators/softmax.h index f2ae69667..c798754b0 100644 --- a/include/matx/operators/softmax.h +++ b/include/matx/operators/softmax.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/sort.h b/include/matx/operators/sort.h index 75f9157e9..25ae45455 100644 --- a/include/matx/operators/sort.h +++ b/include/matx/operators/sort.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/stdd.h b/include/matx/operators/stdd.h index 3092abacc..1f06c324c 100644 --- a/include/matx/operators/stdd.h +++ b/include/matx/operators/stdd.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/sum.h b/include/matx/operators/sum.h index d95b25951..eec0d9b7f 100644 --- a/include/matx/operators/sum.h +++ b/include/matx/operators/sum.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // sum rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must resumuce the above cOpBright notice, +// 2. Redistributions in binary form must resumuce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote sumucts derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/svd.h b/include/matx/operators/svd.h index 014a990cb..9968be46a 100644 --- a/include/matx/operators/svd.h +++ b/include/matx/operators/svd.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // @@ -185,7 +185,7 @@ namespace detail { } } - constexpr __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ index_t Size(int dim) const + constexpr __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ index_t Size([[maybe_unused]] int dim) const { return 0; } @@ -264,7 +264,7 @@ namespace detail { } } - constexpr __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ index_t Size(int dim) const + constexpr __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ index_t Size([[maybe_unused]] int dim) const { return 0; } diff --git a/include/matx/operators/trace.h b/include/matx/operators/trace.h index f5c599f51..59004a478 100644 --- a/include/matx/operators/trace.h +++ b/include/matx/operators/trace.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // @@ -104,7 +104,7 @@ namespace detail { matxFree(ptr); } - constexpr __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ index_t Size(int dim) const + constexpr __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ index_t Size([[maybe_unused]] int dim) const { return 1; } diff --git a/include/matx/operators/transpose.h b/include/matx/operators/transpose.h index aaa2bd778..f727d9730 100644 --- a/include/matx/operators/transpose.h +++ b/include/matx/operators/transpose.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/unique.h b/include/matx/operators/unique.h index ccbae31e3..313f6441c 100644 --- a/include/matx/operators/unique.h +++ b/include/matx/operators/unique.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/operators/var.h b/include/matx/operators/var.h index 7550d33ed..ce223a607 100644 --- a/include/matx/operators/var.h +++ b/include/matx/operators/var.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/transforms/cgsolve.h b/include/matx/transforms/cgsolve.h index 0f389ca9e..9d8f6d416 100644 --- a/include/matx/transforms/cgsolve.h +++ b/include/matx/transforms/cgsolve.h @@ -94,7 +94,6 @@ namespace matx for(int i = 0 ; i < SRANK; i++) { scalar_shape[i] = X.Size(i); } - value_type N = value_type(X.Size(SRANK)); // Construct temporary scalars auto r0r0 = make_tensor(scalar_shape, MATX_ASYNC_DEVICE_MEMORY, stream); diff --git a/include/matx/transforms/channelize_poly.h b/include/matx/transforms/channelize_poly.h index 2e164ba3a..f4716558b 100644 --- a/include/matx/transforms/channelize_poly.h +++ b/include/matx/transforms/channelize_poly.h @@ -63,11 +63,6 @@ inline void matxChannelizePoly1DInternal(OutType o, const InType &i, { #ifdef __CUDACC__ MATX_NVTX_START("", matx::MATX_NVTX_LOG_INTERNAL) - - using input_t = typename InType::value_type; - using filter_t = typename FilterType::value_type; - - index_t filter_len = filter.Size(FilterType::Rank()-1); const index_t num_channels = o.Size(OutType::Rank()-1); const index_t nout_per_channel = o.Size(OutType::Rank()-2); @@ -92,7 +87,6 @@ inline size_t matxChannelizePoly1DInternal_SmemSizeBytes(const OutType &o, const index_t filter_len = filter.Size(FilterType::Rank()-1); const index_t num_channels = o.Size(OutType::Rank()-1); - const index_t nout_per_channel = o.Size(OutType::Rank()-2); const index_t filter_phase_len = (filter_len + num_channels - 1) / num_channels; size_t smem_size = sizeof(filter_t)*(num_channels)*(filter_phase_len) + @@ -126,11 +120,6 @@ inline void matxChannelizePoly1DInternal_Smem(OutType o, const InType &i, const #ifdef __CUDACC__ MATX_NVTX_START("", matx::MATX_NVTX_LOG_INTERNAL) - using input_t = typename InType::value_type; - using filter_t = typename FilterType::value_type; - - index_t filter_len = filter.Size(FilterType::Rank()-1); - const index_t num_channels = o.Size(OutType::Rank()-1); const index_t nout_per_channel = o.Size(OutType::Rank()-2); const int num_batches = static_cast(TotalSize(i)/i.Size(i.Rank() - 1)); @@ -153,11 +142,6 @@ inline void matxChannelizePoly1DInternal_FusedChan(OutType o, const InType &i, { #ifdef __CUDACC__ MATX_NVTX_START("", matx::MATX_NVTX_LOG_INTERNAL) - - using input_t = typename InType::value_type; - using filter_t = typename FilterType::value_type; - - index_t filter_len = filter.Size(FilterType::Rank()-1); const index_t num_channels = o.Size(OutType::Rank()-1); const index_t nout_per_channel = o.Size(OutType::Rank()-2); @@ -260,7 +244,7 @@ inline void channelize_poly_impl(OutType out, const InType &in, const FilterType MATX_ASSERT_STR(out.Size(i) == in.Size(i), matxInvalidDim, "channelize_poly: input/output must have matched batch sizes"); } - const index_t num_elem_per_channel = (in.Size(IN_RANK-1) + num_channels - 1) / num_channels; + [[maybe_unused]] const index_t num_elem_per_channel = (in.Size(IN_RANK-1) + num_channels - 1) / num_channels; MATX_ASSERT_STR(out.Size(OUT_RANK-1) == num_channels, matxInvalidDim, "channelize_poly: output size OUT_RANK-1 mismatch"); diff --git a/include/matx/transforms/chol/chol_cuda.h b/include/matx/transforms/chol/chol_cuda.h index e2478acd0..1c43fe3a4 100644 --- a/include/matx/transforms/chol/chol_cuda.h +++ b/include/matx/transforms/chol/chol_cuda.h @@ -111,7 +111,7 @@ class matxDnCholCUDAPlan_t : matxDnCUDASolver_t { void GetWorkspaceSize() override { - cusolverStatus_t ret = cusolverDnXpotrf_bufferSize(this->handle, this->dn_params, params.uplo, + [[maybe_unused]] cusolverStatus_t ret = cusolverDnXpotrf_bufferSize(this->handle, this->dn_params, params.uplo, params.n, MatXTypeToCudaType(), params.A, params.n, MatXTypeToCudaType(), &this->dspace, @@ -158,7 +158,7 @@ class matxDnCholCUDAPlan_t : matxDnCUDASolver_t { // At this time cuSolver does not have a batched 64-bit cholesky interface. // Change this to use the batched version once available. for (size_t i = 0; i < this->batch_a_ptrs.size(); i++) { - auto ret = cusolverDnXpotrf( + [[maybe_unused]] auto ret = cusolverDnXpotrf( this->handle, this->dn_params, uplo, params.n, MatXTypeToCudaType(), this->batch_a_ptrs[i], params.n, MatXTypeToCudaType(), reinterpret_cast(this->d_workspace) + i * this->dspace, this->dspace, diff --git a/include/matx/transforms/cub.h b/include/matx/transforms/cub.h index ecddbf59b..f2d698dfa 100644 --- a/include/matx/transforms/cub.h +++ b/include/matx/transforms/cub.h @@ -730,7 +730,7 @@ inline void ExecSort(OutputTensor &a_out, return cub::DeviceSegmentedReduce::Reduce(d_temp, temp_storage_bytes, in, out, static_cast(TotalSize(out_base)), begin, end, cparams_.reduce_op, cparams_.init, stream); }; - auto rv = ReduceInput(ft, out_base, in_base); + [[maybe_unused]] auto rv = ReduceInput(ft, out_base, in_base); MATX_ASSERT_STR_EXP(rv, cudaSuccess, matxCudaError, "Error in cub::DeviceSegmentedReduce::Reduce"); } else { @@ -738,7 +738,7 @@ inline void ExecSort(OutputTensor &a_out, return cub::DeviceReduce::Reduce(d_temp, temp_storage_bytes, in, out, static_cast(TotalSize(in_base)), cparams_.reduce_op, cparams_.init, stream); }; - auto rv = ReduceInput(ft, out_base, in_base); + [[maybe_unused]] auto rv = ReduceInput(ft, out_base, in_base); MATX_ASSERT_STR_EXP(rv, cudaSuccess, matxCudaError, "Error in cub::DeviceReduce::Reduce"); } @@ -780,14 +780,14 @@ inline void ExecSort(OutputTensor &a_out, auto ft = [&](auto &&in, auto &&out, auto &&begin, auto &&end) { return cub::DeviceSegmentedReduce::Sum(d_temp, temp_storage_bytes, in, out, static_cast(TotalSize(out_base)), begin, end, stream); }; - auto rv = ReduceInput(ft, out_base, in_base); + [[maybe_unused]] auto rv = ReduceInput(ft, out_base, in_base); MATX_ASSERT_STR_EXP(rv, cudaSuccess, matxCudaError, "Error in cub::DeviceSegmentedReduce::Sum"); } else { auto ft = [&](auto &&in, auto &&out, [[maybe_unused]] auto &&unused1, [[maybe_unused]] auto &&unused2) { return cub::DeviceReduce::Sum(d_temp, temp_storage_bytes, in, out, static_cast(TotalSize(in_base)), stream); }; - auto rv = ReduceInput(ft, out_base, in_base); + [[maybe_unused]] auto rv = ReduceInput(ft, out_base, in_base); MATX_ASSERT_STR_EXP(rv, cudaSuccess, matxCudaError, "Error in ub::DeviceReduce::Sum"); } #endif @@ -826,14 +826,14 @@ inline void ExecSort(OutputTensor &a_out, auto ft = [&](auto &&in, auto &&out, auto &&begin, auto &&end) { return cub::DeviceSegmentedReduce::Min(d_temp, temp_storage_bytes, in, out, static_cast(TotalSize(out_base)), begin, end, stream); }; - auto rv = ReduceInput(ft, out_base, in_base); + [[maybe_unused]] auto rv = ReduceInput(ft, out_base, in_base); MATX_ASSERT_STR_EXP(rv, cudaSuccess, matxCudaError, "Error in cub::DeviceSegmentedReduce::Min"); } else { auto ft = [&](auto &&in, auto &&out, [[maybe_unused]] auto &&unused1, [[maybe_unused]] auto &&unused2) { return cub::DeviceReduce::Min(d_temp, temp_storage_bytes, in, out, static_cast(TotalSize(in_base)), stream); }; - auto rv = ReduceInput(ft, out_base, in_base); + [[maybe_unused]] auto rv = ReduceInput(ft, out_base, in_base); MATX_ASSERT_STR_EXP(rv, cudaSuccess, matxCudaError, "Error in cub::DevicdReduce::Min"); } #endif @@ -872,7 +872,7 @@ inline void ExecSort(OutputTensor &a_out, auto ft = [&](auto &&in, auto &&out, auto &&begin, auto &&end) { return cub::DeviceSegmentedReduce::Max(d_temp, temp_storage_bytes, in, out, static_cast(TotalSize(out_base)), begin, end, stream); }; - auto rv = ReduceInput(ft, out_base, in_base); + [[maybe_unused]] auto rv = ReduceInput(ft, out_base, in_base); MATX_ASSERT_STR_EXP(rv, cudaSuccess, matxCudaError, "Error in cub::DeviceSegmentedReduce::Max"); } else { @@ -880,8 +880,7 @@ inline void ExecSort(OutputTensor &a_out, return cub::DeviceReduce::Max(d_temp, temp_storage_bytes, in, out, static_cast(TotalSize(in_base)), stream); }; - auto rv = ReduceInput(ft, out_base, in_base); - + [[maybe_unused]] auto rv = ReduceInput(ft, out_base, in_base); MATX_ASSERT_STR_EXP(rv, cudaSuccess, matxCudaError, "Error in cub::DeviceReduce::Max"); } #endif @@ -1889,8 +1888,6 @@ void sort_impl(OutputTensor &a_out, const InputOperator &a, #ifdef __CUDACC__ MATX_NVTX_START("", matx::MATX_NVTX_LOG_API) - cudaStream_t stream = exec.getStream(); - using a_type = typename InputOperator::value_type; a_type *out_ptr = nullptr; detail::tensor_impl_t tmp_in; @@ -2080,9 +2077,9 @@ void hist_impl(OutputTensor &a_out, const InputOperator &a, #ifdef __CUDACC__ MATX_NVTX_START("", matx::MATX_NVTX_LOG_API) - using param_type = typename detail::HistEvenParams_t; detail::HistEvenParams_t hp{lower, upper, num_levels}; #ifndef MATX_DISABLE_CUB_CACHE + using param_type = typename detail::HistEvenParams_t; auto params = detail::matxCubPlan_t; auto cparams = detail::SelectParams_t{sel, num_found}; cudaStream_t stream = exec.getStream(); #ifndef MATX_DISABLE_CUB_CACHE - + using param_type = typename detail::SelectParams_t; // Get cache or new Sort plan if it doesn't exist auto params = detail::matxCubPlan_t; auto cparams = detail::SelectParams_t{sel, num_found}; #ifndef MATX_DISABLE_CUB_CACHE - + using param_type = typename detail::SelectParams_t; // Get cache or new Sort plan if it doesn't exist auto params = detail::matxCubPlan_t; auto cparams = detail::UniqueParams_t{num_found}; #ifndef MATX_DISABLE_CUB_CACHE + using param_type = typename detail::UniqueParams_t; // Get cache or new Sort plan if it doesn't exist auto params = detail::matxCubPlan_t 11701 || (CUSOLVER_VERSION == 11701 && CUSOLVER_VER_BUILD >=2) // Use vector mode for a larger workspace size that works for both modes - cusolverStatus_t ret = cusolverDnXsyevBatched_bufferSize( + [[maybe_unused]] cusolverStatus_t ret = cusolverDnXsyevBatched_bufferSize( this->handle, this->dn_params, CUSOLVER_EIG_MODE_VECTOR, params.uplo, params.n, MatXTypeToCudaType(), params.A, params.n, MatXTypeToCudaType(), params.W, MatXTypeToCudaType(), &this->dspace, &this->hspace, params.batch_size); #else - cusolverStatus_t ret = cusolverDnXsyevd_bufferSize( + [[maybe_unused]] cusolverStatus_t ret = cusolverDnXsyevd_bufferSize( this->handle, this->dn_params, CUSOLVER_EIG_MODE_VECTOR, params.uplo, params.n, MatXTypeToCudaType(), params.A, params.n, MatXTypeToCudaType(), params.W, @@ -192,7 +192,7 @@ class matxDnEigCUDAPlan_t : matxDnCUDASolver_t { cusolverDnSetStream(this->handle, stream); #if CUSOLVER_VERSION > 11701 || ( CUSOLVER_VERSION == 11701 && CUSOLVER_VER_BUILD >=2) - auto ret = cusolverDnXsyevBatched( + [[maybe_unused]] auto ret = cusolverDnXsyevBatched( this->handle, this->dn_params, jobz, uplo, params.n, MatXTypeToCudaType(), out.Data(), params.n, MatXTypeToCudaType(), w.Data(), MatXTypeToCudaType(), @@ -211,7 +211,7 @@ class matxDnEigCUDAPlan_t : matxDnCUDASolver_t { // Older cuSolver versions do not support batching with cusolverDnXsyevd for (size_t i = 0; i < this->batch_a_ptrs.size(); i++) { - auto ret = cusolverDnXsyevd( + [[maybe_unused]] auto ret = cusolverDnXsyevd( this->handle, this->dn_params, jobz, uplo, params.n, MatXTypeToCudaType(), this->batch_a_ptrs[i], params.n, MatXTypeToCudaType(), this->batch_w_ptrs[i], MatXTypeToCudaType(), diff --git a/include/matx/transforms/fft/fft_common.h b/include/matx/transforms/fft/fft_common.h index fe5641a52..6e86ac9a7 100644 --- a/include/matx/transforms/fft/fft_common.h +++ b/include/matx/transforms/fft/fft_common.h @@ -131,7 +131,6 @@ namespace detail { // Create a new shape where n is the size of the last dimension auto shape = i.Shape(); *(shape.end() - 1) = act_fft_size; - auto tot = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies()); // Make a new buffer large enough for our input if constexpr (is_cuda_executor_v) { diff --git a/include/matx/transforms/inverse.h b/include/matx/transforms/inverse.h index f69944356..87d919389 100644 --- a/include/matx/transforms/inverse.h +++ b/include/matx/transforms/inverse.h @@ -206,7 +206,7 @@ class matxInversePlan_t { cuda::std::array a_idx{0}; cuda::std::array a_inv_idx{0}; constexpr int batch_offset = 2; - auto a_shape = a.Shape(); + // Get total number of batches for (size_t iter = 0; iter < params.batch_size; iter++) { if (use_input_workbuf) { diff --git a/include/matx/transforms/lu/lu_cuda.h b/include/matx/transforms/lu/lu_cuda.h index 861f8aaf5..6b4e3cc69 100644 --- a/include/matx/transforms/lu/lu_cuda.h +++ b/include/matx/transforms/lu/lu_cuda.h @@ -113,7 +113,7 @@ class matxDnLUCUDAPlan_t : matxDnCUDASolver_t { void GetWorkspaceSize() override { - cusolverStatus_t ret = cusolverDnXgetrf_bufferSize(this->handle, this->dn_params, params.m, + [[maybe_unused]] cusolverStatus_t ret = cusolverDnXgetrf_bufferSize(this->handle, this->dn_params, params.m, params.n, MatXTypeToCudaType(), params.A, params.m, MatXTypeToCudaType(), &this->dspace, @@ -164,7 +164,7 @@ class matxDnLUCUDAPlan_t : matxDnCUDASolver_t { // At this time cuSolver does not have a batched 64-bit LU interface. Change // this to use the batched version once available. for (size_t i = 0; i < this->batch_a_ptrs.size(); i++) { - auto ret = cusolverDnXgetrf( + [[maybe_unused]] auto ret = cusolverDnXgetrf( this->handle, this->dn_params, params.m, params.n, MatXTypeToCudaType(), this->batch_a_ptrs[i], params.m, this->batch_piv_ptrs[i], MatXTypeToCudaType(), diff --git a/include/matx/transforms/matmul/matmul_cuda.h b/include/matx/transforms/matmul/matmul_cuda.h index b1e42bc10..74dd6c8b1 100644 --- a/include/matx/transforms/matmul/matmul_cuda.h +++ b/include/matx/transforms/matmul/matmul_cuda.h @@ -832,7 +832,7 @@ class MatMulCUDAHandle_t { } if constexpr (RANK <= 3) { - auto res = cublasLtMatmul( + [[maybe_unused]] auto res = cublasLtMatmul( ltHandle, operationDesc, &salpha, (void *)a_adj.Data(), Adesc, (void *)b_adj.Data(), Bdesc, &sbeta, (void *)c_adj.Data(), Cdesc, (void *)c_adj.Data(), Cdesc, &heuristicResult.algo, workspace, diff --git a/include/matx/transforms/matvec.h b/include/matx/transforms/matvec.h index ff65d4578..202f0de00 100644 --- a/include/matx/transforms/matvec.h +++ b/include/matx/transforms/matvec.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/transforms/outer.h b/include/matx/transforms/outer.h index de967cff6..5598a35cb 100644 --- a/include/matx/transforms/outer.h +++ b/include/matx/transforms/outer.h @@ -1,20 +1,20 @@ //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // -// COpBright (c) 2021, NVIDIA Corporation +// Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // -// 1. Redistributions of source code must retain the above cOpBright notice, this +// 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // -// 2. Redistributions in binary form must reproduce the above cOpBright notice, +// 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // -// 3. Neither the name of the cOpBright holder nor the names of its +// 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/include/matx/transforms/qr/qr_cuda.h b/include/matx/transforms/qr/qr_cuda.h index 076a539f0..afc7b2523 100644 --- a/include/matx/transforms/qr/qr_cuda.h +++ b/include/matx/transforms/qr/qr_cuda.h @@ -55,7 +55,6 @@ namespace detail { const int RANK = AType::Rank(); index_t m = A.Size(RANK-2); - index_t n = A.Size(RANK-1); cuda::std::array uShape; for(int i = 0; i < RANK-2; i++) { @@ -302,7 +301,7 @@ class matxDnQRCUDAPlan_t : matxDnCUDASolver_t { void GetWorkspaceSize() override { - cusolverStatus_t ret = cusolverDnXgeqrf_bufferSize( + [[maybe_unused]] cusolverStatus_t ret = cusolverDnXgeqrf_bufferSize( this->handle, this->dn_params, params.m, params.n, MatXTypeToCudaType(), params.A, params.m, MatXTypeToCudaType(), params.tau, MatXTypeToCudaType(), &this->dspace, &this->hspace); @@ -353,7 +352,7 @@ class matxDnQRCUDAPlan_t : matxDnCUDASolver_t { // At this time cuSolver does not have a batched 64-bit LU interface. Change // this to use the batched version once available. for (size_t i = 0; i < this->batch_a_ptrs.size(); i++) { - auto ret = cusolverDnXgeqrf( + [[maybe_unused]] auto ret = cusolverDnXgeqrf( this->handle, this->dn_params, params.m, params.n, MatXTypeToCudaType(), this->batch_a_ptrs[i], params.m, MatXTypeToCudaType(), this->batch_tau_ptrs[i], MatXTypeToCudaType(), @@ -370,7 +369,7 @@ class matxDnQRCUDAPlan_t : matxDnCUDASolver_t { // This will block. Figure this out later cudaStreamSynchronize(stream); - for (const auto& info : h_info) { + for ([[maybe_unused]] const auto& info : h_info) { MATX_ASSERT_STR_EXP(info, 0, matxSolverError, ("Parameter " + std::to_string(-info) + " had an illegal value in cuSolver Xgeqrf").c_str()); } diff --git a/include/matx/transforms/reduce.h b/include/matx/transforms/reduce.h index 19d3e4f27..ea238abea 100644 --- a/include/matx/transforms/reduce.h +++ b/include/matx/transforms/reduce.h @@ -190,6 +190,7 @@ __MATX_DEVICE__ __MATX_INLINE__ __nv_bfloat16 atomicMax(__nv_bfloat16 *addr, __n unsigned int *address_as_other; int offset; + MATX_IGNORE_WARNING_PUSH_CLANG("-Wcast-align") // We need to move our pointer back if ((uintptr_t)addr & 0x10) { address_as_other = (unsigned int *)(reinterpret_cast(addr) - 2); @@ -199,6 +200,7 @@ __MATX_DEVICE__ __MATX_INLINE__ __nv_bfloat16 atomicMax(__nv_bfloat16 *addr, __n address_as_other = (unsigned int *)(addr); offset = 0; } + MATX_IGNORE_WARNING_POP_CLANG unsigned short assumed; old.i = *address_as_other; @@ -249,6 +251,7 @@ __MATX_DEVICE__ __MATX_INLINE__ __half atomicMax(__half *addr, __half val) unsigned int *address_as_other; int offset; + MATX_IGNORE_WARNING_PUSH_CLANG("-Wcast-align") // We need to move our pointer back to align to a 2b boundary if ((uintptr_t)addr & 0x10) { address_as_other = (unsigned int *)(reinterpret_cast(addr) - 2); @@ -258,6 +261,7 @@ __MATX_DEVICE__ __MATX_INLINE__ __half atomicMax(__half *addr, __half val) address_as_other = (unsigned int *)(addr); offset = 0; } + MATX_IGNORE_WARNING_POP_CLANG unsigned short assumed; old.i = *address_as_other; @@ -366,7 +370,6 @@ __MATX_DEVICE__ __MATX_INLINE__ void atomicAll(float *addr, float val) { unsigned int *address_as_uint = (unsigned int *)addr; unsigned int old = *address_as_uint, assumed; - unsigned int val_uint = __float_as_uint(val); // nan should be ok here but should verify while (val == 0.0 && old != 0.0) { @@ -1046,7 +1049,7 @@ __global__ void matxReduceKernel(OutType dest, const InType in, extern __shared__ char smemc_[]; value_type *smem_ = reinterpret_cast(smemc_); - int s2_size, soff; + unsigned int s2_size, soff; value_type *smem; // if blockDim.x > 32 we need a 2 stage reduction @@ -1118,7 +1121,7 @@ __global__ void matxReduceKernel(OutType dest, const InType in, // Compute offset index based on rank difference #pragma unroll - for (int r = 0; r < InType::Rank() - DRANK; r++) { + for (int r = 0; r < (int)(InType::Rank() - DRANK); r++) { indices[InType::Rank() - r - 1] = indices[InType::Rank() - (DRANK + 1 + r)]; } @@ -1529,7 +1532,6 @@ void __MATX_INLINE__ mean_impl(OutType dest, const InType &in, [[maybe_unused]] static_assert(OutType::Rank() < InType::Rank(), "reduction dimensions must be <= Rank of input"); using inner_type = typename inner_op_type_t::type; - inner_type scale = 1; auto ft = [&](auto &&lin, auto &&lout, [[maybe_unused]] auto &&lbegin, [[maybe_unused]] auto &&lend) { if constexpr (OutType::Rank() == 0) { @@ -2031,7 +2033,6 @@ void __MATX_INLINE__ max_impl(OutType dest, const InType &in, [[maybe_unused]] c } else { const index_t BATCHES = TotalSize(dest); - const index_t els = lend[0] - lbegin[0]; for (index_t b = 0; b < BATCHES; b++) { lout[b] = *std::max_element(lin + lbegin[b], lin + lend[b]); } @@ -2110,7 +2111,6 @@ void __MATX_INLINE__ argmax_impl(OutType dest, TensorIndexType &idest, const InT } else { const index_t BATCHES = TotalSize(dest); - const index_t els = lend[0] - lbegin[0]; for (index_t b = 0; b < BATCHES; b++) { lout[b] = cuda::std::max_element(lin + lbegin[b], lin + lend[b]) - lin; } @@ -2180,7 +2180,6 @@ void __MATX_INLINE__ min_impl(OutType dest, const InType &in, [[maybe_unused]] c } else { const index_t BATCHES = TotalSize(dest); - const index_t els = lend[0] - lbegin[0]; for (index_t b = 0; b < BATCHES; b++) { lout[b] = *std::min_element(lin + lbegin[b], lin + lend[b]); } @@ -2262,7 +2261,6 @@ void __MATX_INLINE__ argmin_impl(OutType dest, TensorIndexType &idest, const InT } else { const index_t BATCHES = TotalSize(dest); - const index_t els = lend[0] - lbegin[0]; for (index_t b = 0; b < BATCHES; b++) { lout[b] = cuda::std::min_element(lin + lbegin[b], lin + lend[b]) - lin; } diff --git a/include/matx/transforms/resample_poly.h b/include/matx/transforms/resample_poly.h index a3443a8b3..e7b596d9b 100644 --- a/include/matx/transforms/resample_poly.h +++ b/include/matx/transforms/resample_poly.h @@ -53,7 +53,6 @@ inline void matxResamplePoly1DInternal(OutType &o, const InType &i, #ifdef __CUDACC__ MATX_NVTX_START("", matx::MATX_NVTX_LOG_INTERNAL) - using input_t = typename InType::value_type; using filter_t = typename FilterType::value_type; using output_t = typename OutType::value_type; using shape_type = typename OutType::shape_type; diff --git a/include/matx/transforms/svd/svd_cuda.h b/include/matx/transforms/svd/svd_cuda.h index 720249e8f..10a275dca 100644 --- a/include/matx/transforms/svd/svd_cuda.h +++ b/include/matx/transforms/svd/svd_cuda.h @@ -556,8 +556,8 @@ static __MATX_INLINE__ SVDMethod GetCUDASVDMethod(const ATensor &a) { SVDMethod method = detail::SVDMethod::GESVD; if (a.Rank() != 2) { - if (a.Size(RANK-2) <= 32 && - a.Size(RANK-1) <= 32) { + if (m <= 32 && + n <= 32) { if constexpr (is_tensor_view_v) { #if !defined(MATX_INDEX_32_BIT) if (a.Stride(0) < std::numeric_limits::max()) { @@ -663,7 +663,7 @@ class matxDnSVDCUDAPlan_t : matxDnCUDASolver_t { void GetWorkspaceSize() override { - cusolverStatus_t ret; + [[maybe_unused]] cusolverStatus_t ret; // Use all mode for a larger workspace size that works for all modes if (params.method == SVDMethod::GESVD) { @@ -746,7 +746,7 @@ class matxDnSVDCUDAPlan_t : matxDnCUDASolver_t { { MATX_NVTX_START("", matx::MATX_NVTX_LOG_INTERNAL) - cusolverStatus_t ret; + [[maybe_unused]] cusolverStatus_t ret; // Batch size checks for(int i = 0 ; i < RANK-2; i++) { @@ -756,7 +756,7 @@ class matxDnSVDCUDAPlan_t : matxDnCUDASolver_t { } // Inner size checks - int64_t k = cuda::std::min(params.m, params.n); + [[maybe_unused]] int64_t k = cuda::std::min(params.m, params.n); if (jobz == 'S') { MATX_ASSERT_STR((u.Size(RANK-1) == k) && (u.Size(RANK-2) == params.m), matxInvalidSize, "U must be ... x m x min(m,n)"); MATX_ASSERT_STR((vt.Size(RANK-1) == params.n) && (vt.Size(RANK-2) == k), matxInvalidSize, "VT must be ... x min(m,n) x n"); diff --git a/test/00_io/FileIOTests.cu b/test/00_io/FileIOTests.cu index abf64d6f7..69c634344 100644 --- a/test/00_io/FileIOTests.cu +++ b/test/00_io/FileIOTests.cu @@ -89,7 +89,6 @@ TYPED_TEST(FileIoTestsNonComplexFloatTypes, SmallCSVWrite) { MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; - using ExecType = cuda::std::tuple_element_t<1, TypeParam>; tensor_t Avs{{10, 2}}; this->pb->NumpyToTensorView(this->Av, this->small_csv.c_str()); @@ -106,7 +105,6 @@ TYPED_TEST(FileIoTestsNonComplexFloatTypes, MATRead) { MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; - using ExecType = cuda::std::tuple_element_t<1, TypeParam>; auto t = make_tensor({1,10}); // example-begin read_mat-test-1 @@ -123,7 +121,6 @@ TYPED_TEST(FileIoTestsNonComplexFloatTypes, MATWrite) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; - using ExecType = cuda::std::tuple_element_t<1, TypeParam>; auto t = make_tensor({2,3}); auto t2 = make_tensor({2,3}); t.SetVals({{1,2,3},{4,5,6}}); diff --git a/test/00_operators/OperatorTests.cu b/test/00_operators/OperatorTests.cu index 23c88d04a..1a350981c 100644 --- a/test/00_operators/OperatorTests.cu +++ b/test/00_operators/OperatorTests.cu @@ -1021,7 +1021,6 @@ TYPED_TEST(OperatorTestsFloatNonComplexNonHalfAllExecs, SliceAndReshape) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; @@ -1660,7 +1659,6 @@ TYPED_TEST(OperatorTestsNumericAllExecs, RemapOp) TYPED_TEST(OperatorTestsNumericAllExecs, RemapRankZero) { MATX_ENTER_HANDLER(); - using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; ExecType exec{}; @@ -1735,7 +1733,6 @@ TYPED_TEST(OperatorTestsComplexTypesAllExecs, RealImagOp) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; auto tiv0 = make_tensor({}); @@ -1763,7 +1760,6 @@ TYPED_TEST(OperatorTestsAllExecs, OperatorFuncs) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; auto tiv0 = make_tensor({}); @@ -1864,7 +1860,6 @@ TYPED_TEST(OperatorTestsFloatNonComplexAllExecs, OperatorFuncsR2C) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; auto tiv0 = make_tensor({}); @@ -1888,7 +1883,6 @@ TYPED_TEST(OperatorTestsFloatNonComplexAllExecs, OperatorFuncs) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; auto tiv0 = make_tensor({}); @@ -1953,7 +1947,6 @@ TYPED_TEST(OperatorTestsFloatNonComplexAllExecs, NDOperatorFuncs) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; @@ -2017,7 +2010,6 @@ TYPED_TEST(OperatorTestsNumericNonComplexAllExecs, OperatorFuncs) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; auto tiv0 = make_tensor({}); @@ -2086,7 +2078,6 @@ TYPED_TEST(OperatorTestsComplexTypesAllExecs, OperatorFuncDivComplex) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; auto tiv0 = make_tensor({}); @@ -2108,7 +2099,6 @@ TYPED_TEST(OperatorTestsFloatNonComplexAllExecs, IsNanInf) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; @@ -2160,7 +2150,6 @@ TYPED_TEST(OperatorTestsNumericAllExecs, OperatorFuncs) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; auto tiv0 = make_tensor({}); @@ -2230,7 +2219,6 @@ TYPED_TEST(OperatorTestsIntegralAllExecs, OperatorFuncs) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; auto tiv0 = make_tensor({}); @@ -2254,7 +2242,6 @@ TYPED_TEST(OperatorTestsBooleanAllExecs, OperatorFuncs) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; auto tiv0 = make_tensor({}); @@ -2308,7 +2295,6 @@ TYPED_TEST(OperatorTestsComplexTypesAllExecs, OperatorFuncs) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; @@ -2347,7 +2333,6 @@ TYPED_TEST(OperatorTestsAllExecs, Flatten) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; @@ -2378,7 +2363,6 @@ TYPED_TEST(OperatorTestsNumericNoHalfAllExecs, AdvancedOperators) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; index_t count = 100; @@ -2448,7 +2432,6 @@ TYPED_TEST(OperatorTestsNumericNonComplexAllExecs, AdvancedOperators) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; index_t count = 10; @@ -2516,7 +2499,6 @@ TYPED_TEST(OperatorTestsComplexTypesAllExecs, ComplexTypeCompatibility) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; index_t count = 10; @@ -2524,10 +2506,6 @@ TYPED_TEST(OperatorTestsComplexTypesAllExecs, ComplexTypeCompatibility) tensor_t fview({count}); tensor_t dview({count}); - using data_type = - typename std::conditional_t, float, - typename TestType::value_type>; - // Multiply by scalar for (index_t i = 0; i < count; i++) { fview(i) = static_cast(i); @@ -2624,7 +2602,6 @@ TYPED_TEST(OperatorTestsNumericAllExecs, SquareCopyTranspose) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; @@ -2669,7 +2646,6 @@ TYPED_TEST(OperatorTestsNumericAllExecs, NonSquareTranspose) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; index_t count = 100; @@ -2702,7 +2678,6 @@ TYPED_TEST(OperatorTestsNumericAllExecs, Transpose3D) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; @@ -2739,7 +2714,6 @@ TYPED_TEST(OperatorTestsNumericAllExecs, TransposeVsTransposeMatrix) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; // example-begin transpose-test-1 // ExecType is an executor type (e.g. matx::cudaExecutor for executing on the GPU). @@ -2790,7 +2764,6 @@ TYPED_TEST(OperatorTestsFloatNonComplexAllExecs, CloneAndAdd) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; @@ -2852,7 +2825,6 @@ TYPED_TEST(OperatorTestsNumericAllExecs, Reshape) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; index_t count = 10; @@ -2927,7 +2899,6 @@ TYPED_TEST(OperatorTestsNumericNonComplexAllExecs, Overlap) using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; @@ -3008,7 +2979,6 @@ TYPED_TEST(OperatorTestsNumericAllExecs, Broadcast) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; @@ -3341,7 +3311,6 @@ TYPED_TEST(OperatorTestsFloatNonComplexAllExecs, Concatenate) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; index_t i, j; @@ -3477,7 +3446,6 @@ TYPED_TEST(OperatorTestsNumericAllExecs, Stack) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; @@ -3523,7 +3491,6 @@ TYPED_TEST(OperatorTestsComplexTypesAllExecs, HermitianTranspose) using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; index_t count0 = 100; @@ -3560,7 +3527,6 @@ TYPED_TEST(OperatorTestsComplexTypesAllExecs, PlanarTransform) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; index_t m = 10; @@ -3592,7 +3558,6 @@ TYPED_TEST(OperatorTestsComplexTypesAllExecs, InterleavedTransform) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; @@ -3629,7 +3594,6 @@ TYPED_TEST(OperatorTestsAllExecs, RepMat) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; @@ -3685,7 +3649,6 @@ TYPED_TEST(OperatorTestsFloatNonComplexAllExecs, Sphere2Cart) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; @@ -4011,7 +3974,6 @@ TYPED_TEST(OperatorTestsNumericAllExecs, ShiftOp) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; @@ -4169,7 +4131,6 @@ TYPED_TEST(OperatorTestsNumericAllExecs, Reverse) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; @@ -4473,7 +4434,6 @@ TYPED_TEST(OperatorTestsFloatNonComplexAllExecs, Legendre) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ExecType = cuda::std::tuple_element_t<1, TypeParam>; - using inner_type = typename inner_op_type_t::type; ExecType exec{}; diff --git a/test/00_operators/ReductionTests.cu b/test/00_operators/ReductionTests.cu index 57e0a9bf7..fe94517db 100644 --- a/test/00_operators/ReductionTests.cu +++ b/test/00_operators/ReductionTests.cu @@ -1441,7 +1441,6 @@ TYPED_TEST(ReductionTestsFloatNonComplexNonHalfAllExecs, Trace) MATX_ENTER_HANDLER(); index_t count = 10; - TestType c = GenerateData(); // example-begin trace-test-1 auto t2 = make_tensor({count, count}); diff --git a/test/00_solver/Cholesky.cu b/test/00_solver/Cholesky.cu index 94edc35b0..82851cf99 100644 --- a/test/00_solver/Cholesky.cu +++ b/test/00_solver/Cholesky.cu @@ -75,7 +75,6 @@ TYPED_TEST(CholSolverTestNonHalfFloatTypes, CholeskyBasic) { MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; - using ExecType = cuda::std::tuple_element_t<1, TypeParam>; const cuda::std::array dims { 16, @@ -123,7 +122,6 @@ TYPED_TEST(CholSolverTestNonHalfFloatTypes, CholeskyBasicBatched) { MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; - using ExecType = cuda::std::tuple_element_t<1, TypeParam>; const cuda::std::array dims { 16, @@ -173,7 +171,6 @@ TYPED_TEST(CholSolverTestNonHalfFloatTypes, CholeskyWindowed) { MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; - using ExecType = cuda::std::tuple_element_t<1, TypeParam>; const cuda::std::array dims { 50, diff --git a/test/00_solver/Det.cu b/test/00_solver/Det.cu index 7d4b53a89..7d47dc3ce 100644 --- a/test/00_solver/Det.cu +++ b/test/00_solver/Det.cu @@ -85,7 +85,6 @@ TYPED_TEST(DetSolverTestFloatTypes, DeterminantBasic) { MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; - using inner_type = typename inner_op_type_t::type; auto Av = make_tensor({m, m}); auto detv = make_tensor({}); @@ -114,7 +113,6 @@ TYPED_TEST(DetSolverTestFloatTypes, DeterminantBasicBatched) { MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; - using inner_type = typename inner_op_type_t::type; constexpr int batches = 10; diff --git a/test/00_solver/Pinv.cu b/test/00_solver/Pinv.cu index 4f8f3564d..50b5a7df4 100644 --- a/test/00_solver/Pinv.cu +++ b/test/00_solver/Pinv.cu @@ -75,7 +75,6 @@ TYPED_TEST(PinvSolverTestFloatTypes, PinvBasic) { MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; - using inner_type = typename inner_op_type_t::type; constexpr cuda::std::array sizes { std::pair{100, 50}, @@ -105,7 +104,6 @@ TYPED_TEST(PinvSolverTestFloatTypes, PinvRankDeficient) { MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; - using inner_type = typename inner_op_type_t::type; constexpr cuda::std::array sizes { std::pair{100, 50}, @@ -133,7 +131,6 @@ TYPED_TEST(PinvSolverTestFloatTypes, PinvBasicBatched) { MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; - using inner_type = typename inner_op_type_t::type; constexpr int m = 100; constexpr int n = 50; @@ -156,7 +153,6 @@ TYPED_TEST(PinvSolverTestFloatTypes, PinvBatchedRankDeficient) { MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; - using inner_type = typename inner_op_type_t::type; constexpr int m = 100; constexpr int n = 50; diff --git a/test/00_solver/SVD.cu b/test/00_solver/SVD.cu index 2cbb9b485..7c3789438 100644 --- a/test/00_solver/SVD.cu +++ b/test/00_solver/SVD.cu @@ -88,7 +88,6 @@ TYPED_TEST(SVDSolverTestNonHalfTypes, SVDBasic) { MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; - using ExecType = cuda::std::tuple_element_t<1, TypeParam>; using value_type = typename inner_op_type_t::type; constexpr index_t m = 100; constexpr index_t n = 50; @@ -143,7 +142,6 @@ TYPED_TEST(SVDSolverTestNonHalfTypes, SVDMLeqN) { MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; - using ExecType = cuda::std::tuple_element_t<1, TypeParam>; using value_type = typename inner_op_type_t::type; constexpr index_t m = 50; constexpr index_t n = 100; @@ -196,7 +194,6 @@ TYPED_TEST(SVDSolverTestNonHalfTypes, SVDReducedMode) { MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; - using ExecType = cuda::std::tuple_element_t<1, TypeParam>; using value_type = typename inner_op_type_t::type; constexpr cuda::std::array sizes { @@ -255,7 +252,6 @@ TYPED_TEST(SVDSolverTestNonHalfTypes, SVDHostAlgoQR) { MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; - using ExecType = cuda::std::tuple_element_t<1, TypeParam>; using value_type = typename inner_op_type_t::type; constexpr index_t m = 100; constexpr index_t n = 50; @@ -308,7 +304,6 @@ TYPED_TEST(SVDSolverTestNonHalfTypes, SVDBasicBatched) { MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; - using ExecType = cuda::std::tuple_element_t<1, TypeParam>; using value_type = typename inner_op_type_t::type; constexpr index_t batches = 10; @@ -367,8 +362,7 @@ TYPED_TEST(SVDSolverTestNonHalfTypes, SVDBasicBatched) TYPED_TEST(SVDSolverTestNonHalfTypes, SVDBasicBatchedSmallMGTN) { MATX_ENTER_HANDLER(); - using TestType = cuda::std::tuple_element_t<0, TypeParam>; - using ExecType = cuda::std::tuple_element_t<1, TypeParam>; + using TestType = cuda::std::tuple_element_t<0, TypeParam>; using value_type = typename inner_op_type_t::type; constexpr index_t batches = 10; @@ -427,7 +421,6 @@ TYPED_TEST(SVDSolverTestNonHalfTypes, SVDBasicBatchedSmallMEQN) { MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; - using ExecType = cuda::std::tuple_element_t<1, TypeParam>; using value_type = typename inner_op_type_t::type; constexpr index_t batches = 10; diff --git a/test/00_transform/ChannelizePoly.cu b/test/00_transform/ChannelizePoly.cu index b3f82e529..670308864 100644 --- a/test/00_transform/ChannelizePoly.cu +++ b/test/00_transform/ChannelizePoly.cu @@ -382,7 +382,6 @@ TYPED_TEST(ChannelizePolyTestNonHalfFloatTypes, Operators) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; - using InnerType = typename test_types::inner_type::type; using ComplexType = typename test_types::complex_type::type; const index_t a_len = 2500; @@ -421,7 +420,6 @@ TYPED_TEST(ChannelizePolyTestDoubleType, Harris2003) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; - using InnerType = typename test_types::inner_type::type; using ComplexType = typename test_types::complex_type::type; cudaStream_t stream = 0; diff --git a/test/00_transform/FFT.cu b/test/00_transform/FFT.cu index b4bb32ed2..09070809d 100644 --- a/test/00_transform/FFT.cu +++ b/test/00_transform/FFT.cu @@ -661,7 +661,7 @@ TYPED_TEST(FFTTestComplexNonHalfTypesAllExecs, FFT1DSizeChecks) MATX_ENTER_HANDLER(); using TestType = cuda::std::tuple_element_t<0, TypeParam>; using ComplexType = TestType; - using RealType = typename TestType::value_type; + //using RealType = typename TestType::value_type; const index_t N = 16; auto tc = make_tensor({N});