Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix constructor order, friend class, inconsistent function, inconsistent class/struct #1725

Open
wants to merge 6 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 31 additions & 0 deletions .github/workflows/msvc_clang.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
name: Windows-MSVC-CLANG

on:
push:
branches:
- 'master'
- 'develop'
- 'release/**'
tags:
- '**'
pull_request:
types: [opened,synchronize]

concurrency:
group: ${{ github.workflow }}-${{ (github.head_ref && github.ref) || github.run_id }}
cancel-in-progress: true

jobs:
windows_ref:
name: msvc/clang
runs-on: [windows-latest]
steps:
- name: Checkout the latest code (shallow clone)
uses: actions/checkout@v3
- name: configure
run: |
mkdir build
cd build
cmake -T ClangCL -DBUILD_SHARED_LIBS=OFF -DGINKGO_BUILD_CUDA=OFF -DGINKGO_BUILD_OMP=OFF ..
cmake --build . -j4 --config Release
ctest . -C Release --output-on-failure
2 changes: 1 addition & 1 deletion accessor/scaled_reduced_row_major.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ class scaled_reduced_row_major
"Only Dimensionality <= 32 is currently supported");

// Allow access to both `scalar_` and `compute_mask_scalar_index()`
friend class detail::enable_write_scalar<
friend struct detail::enable_write_scalar<
dimensionality, scaled_reduced_row_major, scalar_type>;
friend class range<scaled_reduced_row_major>;

Expand Down
4 changes: 2 additions & 2 deletions benchmark/spmv/spmv_common.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@ struct SpmvBenchmark : Benchmark<spmv_benchmark_state<Generator>> {
bool do_print = true)
: name{"spmv"},
formats{std::move(formats)},
generator{generator},
do_print{do_print}
do_print{do_print},
generator{generator}
{}

const std::string& get_name() const override { return name; }
Expand Down
2 changes: 1 addition & 1 deletion benchmark/utils/iteration_control.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@
class IterationControl {
using IndexType = unsigned int; //!< to be compatible with GFLAGS type

class run_control;
struct run_control;

public:
/**
Expand Down
14 changes: 7 additions & 7 deletions common/cuda_hip/base/batch_multi_vector_kernels.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ constexpr auto default_block_size = 256;

template <typename ValueType>
void scale(std::shared_ptr<const DefaultExecutor> exec,
const batch::MultiVector<ValueType>* const alpha,
batch::MultiVector<ValueType>* const x)
const batch::MultiVector<ValueType>* alpha,
batch::MultiVector<ValueType>* x)
{
const auto num_blocks = x->get_num_batch_items();
const auto alpha_ub = get_batch_struct(alpha);
Expand Down Expand Up @@ -61,9 +61,9 @@ GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(

template <typename ValueType>
void add_scaled(std::shared_ptr<const DefaultExecutor> exec,
const batch::MultiVector<ValueType>* const alpha,
const batch::MultiVector<ValueType>* const x,
batch::MultiVector<ValueType>* const y)
const batch::MultiVector<ValueType>* alpha,
const batch::MultiVector<ValueType>* x,
batch::MultiVector<ValueType>* y)
{
const auto num_blocks = x->get_num_batch_items();
const size_type nrhs = x->get_common_size()[1];
Expand Down Expand Up @@ -127,8 +127,8 @@ GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(

template <typename ValueType>
void compute_norm2(std::shared_ptr<const DefaultExecutor> exec,
const batch::MultiVector<ValueType>* const x,
batch::MultiVector<remove_complex<ValueType>>* const result)
const batch::MultiVector<ValueType>* x,
batch::MultiVector<remove_complex<ValueType>>* result)
{
const auto num_blocks = x->get_num_batch_items();
const auto num_rhs = x->get_common_size()[1];
Expand Down
12 changes: 6 additions & 6 deletions common/cuda_hip/matrix/csr_kernels.template.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1823,9 +1823,9 @@ void extract_diagonal(std::shared_ptr<const DefaultExecutor> exec,


template <typename ValueType, typename IndexType>
void check_diagonal_entries_exist(
std::shared_ptr<const DefaultExecutor> exec,
const matrix::Csr<ValueType, IndexType>* const mtx, bool& has_all_diags)
void check_diagonal_entries_exist(std::shared_ptr<const DefaultExecutor> exec,
const matrix::Csr<ValueType, IndexType>* mtx,
bool& has_all_diags)
{
const auto num_diag = static_cast<IndexType>(
std::min(mtx->get_size()[0], mtx->get_size()[1]));
Expand All @@ -1846,9 +1846,9 @@ void check_diagonal_entries_exist(

template <typename ValueType, typename IndexType>
void add_scaled_identity(std::shared_ptr<const DefaultExecutor> exec,
const matrix::Dense<ValueType>* const alpha,
const matrix::Dense<ValueType>* const beta,
matrix::Csr<ValueType, IndexType>* const mtx)
const matrix::Dense<ValueType>* alpha,
const matrix::Dense<ValueType>* beta,
matrix::Csr<ValueType, IndexType>* mtx)
{
const auto nrows = mtx->get_size()[0];
if (nrows == 0) {
Expand Down
38 changes: 18 additions & 20 deletions common/cuda_hip/matrix/fbcsr_kernels.template.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -294,8 +294,8 @@ __global__ void __launch_bounds__(default_block_size)

template <typename ValueType, typename IndexType>
void fallback_transpose(const std::shared_ptr<const DefaultExecutor> exec,
const matrix::Fbcsr<ValueType, IndexType>* const input,
matrix::Fbcsr<ValueType, IndexType>* const output)
const matrix::Fbcsr<ValueType, IndexType>* input,
matrix::Fbcsr<ValueType, IndexType>* output)
{
const auto in_num_row_blocks = input->get_num_block_rows();
const auto out_num_row_blocks = output->get_num_block_rows();
Expand Down Expand Up @@ -353,8 +353,8 @@ void fill_in_dense(std::shared_ptr<const DefaultExecutor> exec,

template <typename ValueType, typename IndexType>
void convert_to_csr(const std::shared_ptr<const DefaultExecutor> exec,
const matrix::Fbcsr<ValueType, IndexType>* const source,
matrix::Csr<ValueType, IndexType>* const result)
const matrix::Fbcsr<ValueType, IndexType>* source,
matrix::Csr<ValueType, IndexType>* result)
{
constexpr auto warps_per_block = default_block_size / config::warp_size;
const auto num_blocks =
Expand All @@ -373,8 +373,7 @@ void convert_to_csr(const std::shared_ptr<const DefaultExecutor> exec,
template <typename ValueType, typename IndexType>
void is_sorted_by_column_index(
std::shared_ptr<const DefaultExecutor> exec,
const matrix::Fbcsr<ValueType, IndexType>* const to_check,
bool* const is_sorted)
const matrix::Fbcsr<ValueType, IndexType>* to_check, bool* is_sorted)
{
*is_sorted = true;
auto gpu_array = array<bool>(exec, 1);
Expand All @@ -396,7 +395,7 @@ void is_sorted_by_column_index(

template <typename ValueType, typename IndexType>
void sort_by_column_index(const std::shared_ptr<const DefaultExecutor> exec,
matrix::Fbcsr<ValueType, IndexType>* const to_sort)
matrix::Fbcsr<ValueType, IndexType>* to_sort)
GKO_NOT_IMPLEMENTED;


Expand All @@ -412,8 +411,8 @@ namespace {
template <typename ValueType>
void dense_transpose(std::shared_ptr<const DefaultExecutor> exec,
const size_type nrows, const size_type ncols,
const size_type orig_stride, const ValueType* const orig,
const size_type trans_stride, ValueType* const trans)
const size_type orig_stride, const ValueType* orig,
const size_type trans_stride, ValueType* trans)
{
if (nrows == 0) {
return;
Expand All @@ -439,9 +438,8 @@ void dense_transpose(std::shared_ptr<const DefaultExecutor> exec,

template <typename ValueType, typename IndexType>
void spmv(std::shared_ptr<const DefaultExecutor> exec,
const matrix::Fbcsr<ValueType, IndexType>* const a,
const matrix::Dense<ValueType>* const b,
matrix::Dense<ValueType>* const c)
const matrix::Fbcsr<ValueType, IndexType>* a,
const matrix::Dense<ValueType>* b, matrix::Dense<ValueType>* c)
{
if (c->get_size()[0] == 0 || c->get_size()[1] == 0) {
// empty output: nothing to do
Expand Down Expand Up @@ -494,11 +492,11 @@ void spmv(std::shared_ptr<const DefaultExecutor> exec,

template <typename ValueType, typename IndexType>
void advanced_spmv(std::shared_ptr<const DefaultExecutor> exec,
const matrix::Dense<ValueType>* const alpha,
const matrix::Fbcsr<ValueType, IndexType>* const a,
const matrix::Dense<ValueType>* const b,
const matrix::Dense<ValueType>* const beta,
matrix::Dense<ValueType>* const c)
const matrix::Dense<ValueType>* alpha,
const matrix::Fbcsr<ValueType, IndexType>* a,
const matrix::Dense<ValueType>* b,
const matrix::Dense<ValueType>* beta,
matrix::Dense<ValueType>* c)
{
if (c->get_size()[0] == 0 || c->get_size()[1] == 0) {
// empty output: nothing to do
Expand Down Expand Up @@ -556,7 +554,7 @@ namespace {
template <int mat_blk_sz, typename ValueType, typename IndexType>
void transpose_blocks_impl(syn::value_list<int, mat_blk_sz>,
std::shared_ptr<const DefaultExecutor> exec,
matrix::Fbcsr<ValueType, IndexType>* const mat)
matrix::Fbcsr<ValueType, IndexType>* mat)
{
constexpr int subwarp_size = config::warp_size;
const auto nbnz = mat->get_num_stored_blocks();
Expand All @@ -579,8 +577,8 @@ GKO_ENABLE_IMPLEMENTATION_SELECTION(select_transpose_blocks,

template <typename ValueType, typename IndexType>
void transpose(const std::shared_ptr<const DefaultExecutor> exec,
const matrix::Fbcsr<ValueType, IndexType>* const orig,
matrix::Fbcsr<ValueType, IndexType>* const trans)
const matrix::Fbcsr<ValueType, IndexType>* orig,
matrix::Fbcsr<ValueType, IndexType>* trans)
{
#ifdef GKO_COMPILING_CUDA
if (sparselib::is_supported<ValueType, IndexType>::value) {
Expand Down
8 changes: 3 additions & 5 deletions common/cuda_hip/reorder/rcm_kernels.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -614,11 +614,9 @@ void sort_levels(std::shared_ptr<const DefaultExecutor> exec,

template <typename IndexType>
void compute_permutation(std::shared_ptr<const DefaultExecutor> exec,
const IndexType num_rows,
const IndexType* const row_ptrs,
const IndexType* const col_idxs,
IndexType* const permutation,
IndexType* const inv_permutation,
const IndexType num_rows, const IndexType* row_ptrs,
const IndexType* col_idxs, IndexType* permutation,
IndexType* inv_permutation,
const gko::reorder::starting_strategy strategy)
{
if (num_rows == 0) {
Expand Down
6 changes: 3 additions & 3 deletions common/unified/matrix/dense_kernels.template.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -730,9 +730,9 @@ void get_imag(std::shared_ptr<const DefaultExecutor> exec,

template <typename ValueType, typename ScalarType>
void add_scaled_identity(std::shared_ptr<const DefaultExecutor> exec,
const matrix::Dense<ScalarType>* const alpha,
const matrix::Dense<ScalarType>* const beta,
matrix::Dense<ValueType>* const mtx)
const matrix::Dense<ScalarType>* alpha,
const matrix::Dense<ScalarType>* beta,
matrix::Dense<ValueType>* mtx)
{
run_kernel(
exec,
Expand Down
2 changes: 1 addition & 1 deletion core/base/dense_cache.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ void DenseCache<ValueType>::init_from(
}


#define GKO_DECLARE_DENSE_CACHE(_type) class DenseCache<_type>
#define GKO_DECLARE_DENSE_CACHE(_type) struct DenseCache<_type>
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_CACHE);


Expand Down
2 changes: 1 addition & 1 deletion core/base/device_matrix_data.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ device_matrix_data<ValueType, IndexType>::empty_out()


#define GKO_DECLARE_DEVICE_MATRIX_DATA(ValueType, IndexType) \
struct device_matrix_data<ValueType, IndexType>
class device_matrix_data<ValueType, IndexType>
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(GKO_DECLARE_DEVICE_MATRIX_DATA);


Expand Down
4 changes: 2 additions & 2 deletions core/base/perturbation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -89,9 +89,9 @@ Perturbation<ValueType>::Perturbation(std::shared_ptr<const LinOp> scalar,
std::shared_ptr<const LinOp> projector)
: EnableLinOp<Perturbation>(basis->get_executor(),
gko::dim<2>{basis->get_size()[0]}),
scalar_{std::move(scalar)},
basis_{std::move(basis)},
projector_{std::move(projector)}
projector_{std::move(projector)},
scalar_{std::move(scalar)}
{
this->validate_perturbation();
}
Expand Down
2 changes: 1 addition & 1 deletion core/base/segmented_array.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ segmented_array<T>& segmented_array<T>::operator=(segmented_array&& other)
}


#define GKO_DECLARE_SEGMENTED_ARRAY(_type) class segmented_array<_type>
#define GKO_DECLARE_SEGMENTED_ARRAY(_type) struct segmented_array<_type>

GKO_INSTANTIATE_FOR_EACH_POD_TYPE(GKO_DECLARE_SEGMENTED_ARRAY);

Expand Down
2 changes: 1 addition & 1 deletion core/distributed/index_map.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ index_map<LocalIndexType, GlobalIndexType>::index_map(
}


#define GKO_DECLARE_INDEX_MAP(_ltype, _gtype) class index_map<_ltype, _gtype>
#define GKO_DECLARE_INDEX_MAP(_ltype, _gtype) struct index_map<_ltype, _gtype>

GKO_INSTANTIATE_FOR_EACH_LOCAL_GLOBAL_INDEX_TYPE(GKO_DECLARE_INDEX_MAP);

Expand Down
2 changes: 1 addition & 1 deletion core/log/batch_logger.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ log_data<ValueType>::log_data(std::shared_ptr<const Executor> exec,
}
}

#define GKO_DECLARE_LOG_DATA(_type) class log_data<_type>
#define GKO_DECLARE_LOG_DATA(_type) struct log_data<_type>

GKO_INSTANTIATE_FOR_EACH_NON_COMPLEX_VALUE_TYPE(GKO_DECLARE_LOG_DATA);

Expand Down
4 changes: 2 additions & 2 deletions core/matrix/csr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1029,8 +1029,8 @@ void Csr<ValueType, IndexType>::inv_scale_impl(const LinOp* alpha)


template <typename ValueType, typename IndexType>
void Csr<ValueType, IndexType>::add_scaled_identity_impl(const LinOp* const a,
const LinOp* const b)
void Csr<ValueType, IndexType>::add_scaled_identity_impl(const LinOp* a,
const LinOp* b)
{
bool has_diags{false};
this->get_executor()->run(
Expand Down
7 changes: 3 additions & 4 deletions core/matrix/dense.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1906,8 +1906,7 @@ void Dense<ValueType>::get_imag(ptr_param<real_type> result) const


template <typename ValueType>
void Dense<ValueType>::add_scaled_identity_impl(const LinOp* const a,
const LinOp* const b)
void Dense<ValueType>::add_scaled_identity_impl(const LinOp* a, const LinOp* b)
{
precision_dispatch_real_complex<ValueType>(
[this](auto dense_alpha, auto dense_beta, auto dense_x) {
Expand Down Expand Up @@ -2019,8 +2018,8 @@ Dense<ValueType>::Dense(std::shared_ptr<const Executor> exec,
const dim<2>& size, array<value_type> values,
size_type stride)
: EnableLinOp<Dense>(exec, size),
values_{exec, std::move(values)},
stride_{stride}
stride_{stride},
values_{exec, std::move(values)}
{
if (size[0] > 0 && size[1] > 0) {
GKO_ENSURE_IN_BOUNDS((size[0] - 1) * stride + size[1] - 1,
Expand Down
10 changes: 5 additions & 5 deletions core/matrix/ell.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -375,10 +375,10 @@ Ell<ValueType, IndexType>::Ell(std::shared_ptr<const Executor> exec,
size_type num_stored_elements_per_row,
size_type stride)
: EnableLinOp<Ell>(exec, size),
num_stored_elements_per_row_(num_stored_elements_per_row),
stride_(stride == 0 ? size[0] : stride),
values_(exec, stride_ * num_stored_elements_per_row),
col_idxs_(exec, stride_ * num_stored_elements_per_row),
num_stored_elements_per_row_(num_stored_elements_per_row)
col_idxs_(exec, stride_ * num_stored_elements_per_row)
{}


Expand All @@ -389,10 +389,10 @@ Ell<ValueType, IndexType>::Ell(std::shared_ptr<const Executor> exec,
size_type num_stored_elements_per_row,
size_type stride)
: EnableLinOp<Ell>(exec, size),
values_{exec, std::move(values)},
col_idxs_{exec, std::move(col_idxs)},
num_stored_elements_per_row_{num_stored_elements_per_row},
stride_{stride}
stride_{stride},
values_{exec, std::move(values)},
col_idxs_{exec, std::move(col_idxs)}
{
GKO_ASSERT_EQ(num_stored_elements_per_row_ * stride_, values_.get_size());
GKO_ASSERT_EQ(num_stored_elements_per_row_ * stride_, col_idxs_.get_size());
Expand Down
Loading
Loading