Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

export API #1824

Merged
merged 30 commits into from
Feb 24, 2022
Merged
Show file tree
Hide file tree
Changes from 19 commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
43479df
export API
boeschf Feb 1, 2022
6a2cf5d
Merge remote-tracking branch 'upstream/master' into export
boeschf Feb 1, 2022
1e7e681
absolute paths in cmake
boeschf Feb 1, 2022
aa0bc09
cleanup
boeschf Feb 1, 2022
dd710df
more exports
boeschf Feb 1, 2022
6ce00da
some more exports
boeschf Feb 2, 2022
c9b0e9e
mpi exports
boeschf Feb 2, 2022
f1077e9
export backends
boeschf Feb 2, 2022
634b84f
unit tests, and other exports
boeschf Feb 2, 2022
1bdcab7
reverted inclusion of mpi headers
boeschf Feb 2, 2022
feddd05
proper installation
boeschf Feb 2, 2022
ccce912
compiler/platform
boeschf Feb 3, 2022
b109fe0
cmake cleanup
boeschf Feb 3, 2022
f578fe4
typo
boeschf Feb 3, 2022
5a02548
ci + shared libs
boeschf Feb 8, 2022
a8c4042
Merge remote-tracking branch 'upstream/master' into export
boeschf Feb 8, 2022
f2e3917
load shared lib
boeschf Feb 8, 2022
98d9966
export friend function test_invariants
boeschf Feb 8, 2022
988e58f
export friend print operator
boeschf Feb 10, 2022
6784f1e
tests without macos
boeschf Feb 10, 2022
f38aece
tests without macos
boeschf Feb 10, 2022
7946024
removed extra declarations
boeschf Feb 10, 2022
c979596
visibility for tests
boeschf Feb 16, 2022
86c1f15
cpp examples
boeschf Feb 16, 2022
c23fe96
test only shared variant
boeschf Feb 16, 2022
e58d435
yaml typo
boeschf Feb 16, 2022
ff39071
yaml typo
boeschf Feb 16, 2022
ec94c00
Merge remote-tracking branch 'upsteam/master' into export
boeschf Feb 16, 2022
4919245
test all variants
boeschf Feb 16, 2022
ff79a0c
comments
boeschf Feb 24, 2022
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 11 additions & 1 deletion .github/workflows/test-everything.yml
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ jobs:
mpi: "ON",
simd: "OFF"
}
variant: [shared, static]
env:
CC: ${{ matrix.config.cc }}
CXX: ${{ matrix.config.cxx }}
Expand Down Expand Up @@ -149,13 +150,22 @@ jobs:
mpic++ --show
mpicc --show
echo $PYTHONPATH
- name: Build arbor
- if: ${{ matrix.variant == 'static' }}
name: Build arbor
run: |
mkdir build
cd build
cmake .. -DCMAKE_CXX_COMPILER=$CXX -DCMAKE_C_COMPILER=$CC -DARB_WITH_PYTHON=ON -DARB_VECTORIZE=${{ matrix.config.simd }} -DPython3_EXECUTABLE=`which python` -DARB_WITH_MPI=${{ matrix.config.mpi }} -DARB_USE_BUNDLED_LIBS=ON -DARB_WITH_NEUROML=ON
make -j4 tests examples pyarb html
cd -
- if: ${{ matrix.variant == 'shared' }}
name: Build arbor
run: |
mkdir build
cd build
cmake .. -DCMAKE_CXX_COMPILER=$CXX -DCMAKE_C_COMPILER=$CC -DARB_WITH_PYTHON=ON -DARB_VECTORIZE=${{ matrix.config.simd }} -DPython3_EXECUTABLE=`which python` -DARB_WITH_MPI=${{ matrix.config.mpi }} -DARB_USE_BUNDLED_LIBS=ON -DARB_WITH_NEUROML=ON -DBUILD_SHARED_LIBS=ON
make -j4 tests examples pyarb html
cd -
- name: Install arbor
run: |
cd build
Expand Down
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ __pycache__
*.swq
*.swm
*.swl
*~

.cache

Expand Down
7 changes: 5 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -172,8 +172,11 @@ set(CMAKE_CXX_EXTENSIONS OFF)
# Data and internal scripts go here
set(ARB_INSTALL_DATADIR ${CMAKE_INSTALL_FULL_DATAROOTDIR}/arbor)
# Derived paths for arbor-build-catalogue
file(RELATIVE_PATH ARB_REL_DATADIR ${CMAKE_INSTALL_FULL_BINDIR} ${CMAKE_INSTALL_FULL_DATAROOTDIR}/arbor)
file(RELATIVE_PATH ARB_REL_PACKAGEDIR ${CMAKE_INSTALL_FULL_BINDIR} ${CMAKE_INSTALL_FULL_LIBDIR}/cmake/arbor)
get_filename_component(absolute_full_bindir ${CMAKE_INSTALL_FULL_BINDIR} REALPATH)
get_filename_component(absolute_full_datarootdir ${CMAKE_INSTALL_FULL_DATAROOTDIR} REALPATH)
get_filename_component(absolute_full_libdir ${CMAKE_INSTALL_FULL_LIBDIR} REALPATH)
file(RELATIVE_PATH ARB_REL_DATADIR ${absolute_full_bindir} ${absolute_full_datarootdir}/arbor)
file(RELATIVE_PATH ARB_REL_PACKAGEDIR ${absolute_full_bindir} ${absolute_full_libdir}/cmake/arbor)

# Interface library `arbor-config-defs` collects configure-time defines
# for arbor, arborenv, arborio, of the form ARB_HAVE_XXX. These
Expand Down
5 changes: 5 additions & 0 deletions arbor/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -136,5 +136,10 @@ endif()

set_target_properties(arbor PROPERTIES CUDA_RESOLVE_DEVICE_SYMBOLS ON)

export_visibility(arbor)

install(FILES ${CMAKE_CURRENT_BINARY_DIR}/include/arbor/export.hpp
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/arbor)

install(TARGETS arbor EXPORT arbor-targets ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})

4 changes: 2 additions & 2 deletions arbor/assert.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

namespace arb {

void abort_on_failed_assertion(
void ARB_ARBOR_API abort_on_failed_assertion(
const char* assertion,
const char* file,
int line,
Expand All @@ -22,7 +22,7 @@ void abort_on_failed_assertion(
std::abort();
}

void ignore_failed_assertion(
void ARB_ARBOR_API ignore_failed_assertion(
const char* assertion,
const char* file,
int line,
Expand Down
3 changes: 2 additions & 1 deletion arbor/backends/gpu/forest.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,15 @@

#include <vector>

#include <arbor/export.hpp>
#include "tree.hpp"

namespace arb {
namespace gpu {

using size_type = int;

struct forest {
struct ARB_ARBOR_API forest {
forest(const std::vector<size_type>& p, const std::vector<size_type>& cell_cv_divs);

void optimize();
Expand Down
2 changes: 1 addition & 1 deletion arbor/backends/gpu/matrix_assemble.cu
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ void assemble_matrix_interleaved(

} // namespace kernels

void assemble_matrix_flat(
ARB_ARBOR_API void assemble_matrix_flat(
fvm_value_type* d,
fvm_value_type* rhs,
const fvm_value_type* invariant_d,
Expand Down
8 changes: 4 additions & 4 deletions arbor/backends/gpu/matrix_fine.cu
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,7 @@ void solve_matrix_fine(

} // namespace kernels

void gather(
ARB_ARBOR_API void gather(
const fvm_value_type* from,
fvm_value_type* to,
const fvm_index_type* p,
Expand All @@ -257,7 +257,7 @@ void gather(
kernels::gather<<<griddim, blockdim>>>(from, to, p, n);
}

void scatter(
ARB_ARBOR_API void scatter(
const fvm_value_type* from,
fvm_value_type* to,
const fvm_index_type* p,
Expand All @@ -269,7 +269,7 @@ void scatter(
kernels::scatter<<<griddim, blockdim>>>(from, to, p, n);
}

void assemble_matrix_fine(
ARB_ARBOR_API void assemble_matrix_fine(
fvm_value_type* d,
fvm_value_type* rhs,
const fvm_value_type* invariant_d,
Expand Down Expand Up @@ -308,7 +308,7 @@ void assemble_matrix_fine(
// num_levels = [3, 2, 3, ...]
// num_cells = [2, 3, ...]
// num_blocks = level_start.size() - 1 = num_levels.size() = num_cells.size()
void solve_matrix_fine(
ARB_ARBOR_API void solve_matrix_fine(
fvm_value_type* rhs,
fvm_value_type* d, // diagonal values
const fvm_value_type* u, // upper diagonal (and lower diagonal as the matrix is SPD)
Expand Down
9 changes: 5 additions & 4 deletions arbor/backends/gpu/matrix_fine.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#include <arbor/fvm_types.hpp>

#include <arbor/export.hpp>
#include <ostream>

namespace arb {
Expand All @@ -13,19 +14,19 @@ struct level_metadata {
};

// C wrappers around kernels
void gather(
ARB_ARBOR_API void gather(
const fvm_value_type* from,
fvm_value_type* to,
const fvm_index_type* p,
unsigned n);

void scatter(
ARB_ARBOR_API void scatter(
const fvm_value_type* from,
fvm_value_type* to,
const fvm_index_type* p,
unsigned n);

void assemble_matrix_fine(
ARB_ARBOR_API void assemble_matrix_fine(
fvm_value_type* d,
fvm_value_type* rhs,
const fvm_value_type* invariant_d,
Expand All @@ -39,7 +40,7 @@ void assemble_matrix_fine(
const fvm_index_type* perm,
unsigned n);

void solve_matrix_fine(
ARB_ARBOR_API void solve_matrix_fine(
fvm_value_type* rhs,
fvm_value_type* d, // diagonal values
const fvm_value_type* u, // upper diagonal (and lower diagonal as the matrix is SPD)
Expand Down
2 changes: 1 addition & 1 deletion arbor/backends/gpu/matrix_solve.cu
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ void solve_matrix_interleaved(

} // namespace kernels

void solve_matrix_flat(
ARB_ARBOR_API void solve_matrix_flat(
fvm_value_type* rhs,
fvm_value_type* d,
const fvm_value_type* u,
Expand Down
5 changes: 3 additions & 2 deletions arbor/backends/gpu/matrix_state_flat.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#pragma once

#include <arbor/export.hpp>
#include <arbor/fvm_types.hpp>

#include "memory/memory.hpp"
Expand All @@ -13,15 +14,15 @@ namespace gpu {

// CUDA implementation entry points:

void solve_matrix_flat(
ARB_ARBOR_API void solve_matrix_flat(
fvm_value_type* rhs,
fvm_value_type* d,
const fvm_value_type* u,
const fvm_index_type* p,
const fvm_index_type* cell_cv_divs,
int num_mtx);

void assemble_matrix_flat(
ARB_ARBOR_API void assemble_matrix_flat(
fvm_value_type* d,
fvm_value_type* rhs,
const fvm_value_type* invariant_d,
Expand Down
3 changes: 2 additions & 1 deletion arbor/backends/gpu/multi_event_stream.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

// Indexed collection of pop-only event queues --- CUDA back-end implementation.

#include <arbor/export.hpp>
#include <arbor/arbexcept.hpp>
#include <arbor/common_types.hpp>
#include <arbor/fvm_types.hpp>
Expand All @@ -18,7 +19,7 @@ namespace arb {
namespace gpu {

// Base class provides common implementations across event types.
class multi_event_stream_base {
class ARB_ARBOR_API multi_event_stream_base {
public:
using size_type = cell_size_type;
using value_type = fvm_value_type;
Expand Down
2 changes: 1 addition & 1 deletion arbor/backends/gpu/shared_state.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -459,7 +459,7 @@ void shared_state::take_samples(const sample_event_stream::state& s, array& samp
}

// Debug interface
std::ostream& operator<<(std::ostream& o, shared_state& s) {
ARB_ARBOR_API std::ostream& operator<<(std::ostream& o, shared_state& s) {
o << " cv_to_intdom " << s.cv_to_intdom << "\n";
o << " time " << s.time << "\n";
o << " time_to " << s.time_to << "\n";
Expand Down
8 changes: 4 additions & 4 deletions arbor/backends/gpu/shared_state.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ namespace gpu {
* Xo_ cao external calcium concentration
*/

struct ion_state {
struct ARB_ARBOR_API ion_state {
iarray node_index_; // Instance to CV map.
array iX_; // (A/m²) current density
array eX_; // (mV) reversal potential
Expand Down Expand Up @@ -62,7 +62,7 @@ struct ion_state {
void reset();
};

struct istim_state {
struct ARB_ARBOR_API istim_state {
// Immutable data (post construction/initialization):
iarray accu_index_; // Instance to accumulator index (accu_stim_ index) map.
iarray accu_to_cv_; // Accumulator index to CV map.
Expand Down Expand Up @@ -99,7 +99,7 @@ struct istim_state {
istim_state() = default;
};

struct shared_state {
struct ARB_ARBOR_API shared_state {
struct mech_storage {
array data_;
iarray indices_;
Expand Down Expand Up @@ -202,7 +202,7 @@ struct shared_state {
};

// For debugging only
std::ostream& operator<<(std::ostream& o, shared_state& s);
ARB_ARBOR_API std::ostream& operator<<(std::ostream& o, shared_state& s);

} // namespace gpu
} // namespace arb
2 changes: 1 addition & 1 deletion arbor/backends/gpu/stimulus.cu
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ void istim_add_current_impl(int n, istim_pp pp) {

} // namespace kernel

void istim_add_current_impl(int n, const istim_pp& pp) {
ARB_ARBOR_API void istim_add_current_impl(int n, const istim_pp& pp) {
constexpr unsigned block_dim = 128;
const unsigned grid_dim = impl::block_count(n, block_dim);
if (!grid_dim) return;
Expand Down
3 changes: 2 additions & 1 deletion arbor/backends/gpu/stimulus.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#pragma once

#include <arbor/export.hpp>
#include <arbor/fvm_types.hpp>

namespace arb {
Expand All @@ -25,7 +26,7 @@ struct istim_pp {
fvm_value_type* current_density;
};

void istim_add_current_impl(int n, const istim_pp& pp);
ARB_ARBOR_API void istim_add_current_impl(int n, const istim_pp& pp);

} // namespace gpu
} // namespace arb
2 changes: 1 addition & 1 deletion arbor/backends/multicore/shared_state.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,7 @@ void shared_state::take_samples(
}

// (Debug interface only.)
std::ostream& operator<<(std::ostream& out, const shared_state& s) {
ARB_ARBOR_API std::ostream& operator<<(std::ostream& out, const shared_state& s) {
using io::csv;

out << "n_intdom " << s.n_intdom << "\n";
Expand Down
9 changes: 5 additions & 4 deletions arbor/backends/multicore/shared_state.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
#include <utility>
#include <vector>

#include <arbor/export.hpp>
#include <arbor/assert.hpp>
#include <arbor/common_types.hpp>
#include <arbor/fvm_types.hpp>
Expand Down Expand Up @@ -38,7 +39,7 @@ namespace multicore {
* Xo_ cao external calcium concentration
*/

struct ion_state {
struct ARB_ARBOR_API ion_state {
unsigned alignment = 1; // Alignment and padding multiple.

iarray node_index_; // Instance to CV map.
Expand Down Expand Up @@ -73,7 +74,7 @@ struct ion_state {
void reset();
};

struct istim_state {
struct ARB_ARBOR_API istim_state {
unsigned alignment = 1; // Alignment and padding multiple.

// Immutable data (post initialization):
Expand Down Expand Up @@ -105,7 +106,7 @@ struct istim_state {
istim_state() = default;
};

struct shared_state {
struct ARB_ARBOR_API shared_state {
struct mech_storage {
array data_;
iarray indices_;
Expand Down Expand Up @@ -207,7 +208,7 @@ struct shared_state {
};

// For debugging only:
std::ostream& operator<<(std::ostream& o, const shared_state& s);
ARB_ARBOR_API std::ostream& operator<<(std::ostream& o, const shared_state& s);


} // namespace multicore
Expand Down
2 changes: 1 addition & 1 deletion arbor/cable_cell_param.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@

namespace arb {

void check_global_properties(const cable_cell_global_properties& G) {
ARB_ARBOR_API void check_global_properties(const cable_cell_global_properties& G) {
auto& param = G.default_parameters;

if (!param.init_membrane_potential) {
Expand Down
8 changes: 4 additions & 4 deletions arbor/common_types_io.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

namespace arb {

std::ostream& operator<<(std::ostream& o, lid_selection_policy policy) {
ARB_ARBOR_API std::ostream& operator<<(std::ostream& o, lid_selection_policy policy) {
switch (policy) {
case lid_selection_policy::round_robin:
return o << "round_robin";
Expand All @@ -14,11 +14,11 @@ std::ostream& operator<<(std::ostream& o, lid_selection_policy policy) {
return o;
}

std::ostream& operator<<(std::ostream& o, arb::cell_member_type m) {
ARB_ARBOR_API std::ostream& operator<<(std::ostream& o, arb::cell_member_type m) {
return o << m.gid << ':' << m.index;
}

std::ostream& operator<<(std::ostream& o, arb::cell_kind k) {
ARB_ARBOR_API std::ostream& operator<<(std::ostream& o, arb::cell_kind k) {
o << "cell_kind::";
switch (k) {
case arb::cell_kind::spike_source:
Expand All @@ -33,7 +33,7 @@ std::ostream& operator<<(std::ostream& o, arb::cell_kind k) {
return o;
}

std::ostream& operator<<(std::ostream& o, arb::backend_kind k) {
ARB_ARBOR_API std::ostream& operator<<(std::ostream& o, arb::backend_kind k) {
o << "backend_kind::";
switch (k) {
case arb::backend_kind::multicore:
Expand Down
Loading