Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Standard conformance fixes #190

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 8 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -63,9 +63,10 @@ Optional dependencies are:

Compilers tested include:

* gfortran-9.4.0
* ifort-2021.4
* ifx-2021.4
* flang-new 20.0.0
* gfortran 13.2.0, 14.0.1
* ifort 2021.13.1
* ifx 2024.2.1

### Building with fpm

Expand All @@ -85,7 +86,7 @@ Once installed, use the compiler wrappers `caf` and `cafrun` to build and execut
in parallel, respectively:

```
fpm build --compiler caf --profile release
fpm build --compiler caf --profile release --flag "-cpp -DPARALLEL"
```

#### Testing with fpm
Expand All @@ -107,7 +108,7 @@ See the [Fortran Package Manager](https://github.com/fortran-lang/fpm) for more
```
mkdir build
cd build
cmake .. -DSERIAL=1
cmake ..
make
```

Expand All @@ -122,7 +123,7 @@ in parallel, respectively:


```
FC=caf cmake ..
FC=caf cmake .. -DPARALLEL
make
cafrun -n 4 bin/mnist # run MNIST example on 4 cores
```
Expand All @@ -139,7 +140,7 @@ FC=ifort cmake ..
for a parallel build of neural-fortran, or

```
FC=ifort cmake .. -DSERIAL=1
FC=ifort cmake ..
```

for a serial build.
Expand Down
38 changes: 20 additions & 18 deletions cmake/compilers.cmake
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
# compiler flags for gfortran
if(CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")

if(SERIAL)
message(STATUS "Configuring to build with -fcoarray=single")
add_compile_options("$<$<COMPILE_LANGUAGE:Fortran>:-fcoarray=single>")
if(PARALLEL)
message(STATUS "Configuring to build with -fcoarray=shared")
add_compile_options("$<$<COMPILE_LANGUAGE:Fortran>:-fcoarray=shared>")
add_compile_definitions(PARALLEL)
else()
add_compile_options("$<$<COMPILE_LANGUAGE:Fortran>:-fcoarray=lib>")
add_compile_options("$<$<COMPILE_LANGUAGE:Fortran>:-fcoarray=single>")
endif()

if(BLAS)
Expand All @@ -14,21 +15,22 @@ if(CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
message(STATUS "Configuring build to use BLAS from ${BLAS}")
endif()

add_compile_options("$<$<AND:$<COMPILE_LANGUAGE:Fortran>,$<CONFIG:Debug>>:-fcheck=bounds;-fbacktrace>")
add_compile_options("$<$<AND:$<COMPILE_LANGUAGE:Fortran>,$<CONFIG:Release>>:-Ofast;-fno-frontend-optimize;-fno-backtrace>")
add_compile_options("$<$<AND:$<COMPILE_LANGUAGE:Fortran>,$<CONFIG:Debug>>:-cpp;-fcheck=bounds;-fbacktrace>")
add_compile_options("$<$<AND:$<COMPILE_LANGUAGE:Fortran>,$<CONFIG:Release>>:-cpp;-Ofast;-fno-frontend-optimize;-fno-backtrace>")

elseif(CMAKE_Fortran_COMPILER_ID MATCHES "^Intel")
# compiler flags for ifort

if(SERIAL)
message(STATUS "Configuring to build with -coarray=single")
if(PARALLEL)
message(STATUS "Configuring to build with -coarray=shared")
if(WIN32)
add_compile_options("$<$<COMPILE_LANGUAGE:Fortran>:/Qcoarray:single>")
add_link_options("$<$<COMPILE_LANGUAGE:Fortran>:/Qcoarray:single>")
add_compile_options("$<$<COMPILE_LANGUAGE:Fortran>:/Qcoarray:shared>")
add_link_options("$<$<COMPILE_LANGUAGE:Fortran>:/Qcoarray:shared>")
else()
add_compile_options("$<$<COMPILE_LANGUAGE:Fortran>:-coarray=single>")
add_link_options("$<$<COMPILE_LANGUAGE:Fortran>:-coarray=single>")
add_compile_options("$<$<COMPILE_LANGUAGE:Fortran>:-coarray=shared>")
add_link_options("$<$<COMPILE_LANGUAGE:Fortran>:-coarray=shared>")
endif()
add_compile_definitions(PARALLEL)
else()
if(WIN32)
add_compile_options("$<$<COMPILE_LANGUAGE:Fortran>:/Qcoarray:shared>")
Expand All @@ -40,16 +42,16 @@ elseif(CMAKE_Fortran_COMPILER_ID MATCHES "^Intel")
endif()

if(WIN32)
string(APPEND CMAKE_Fortran_FLAGS " /assume:byterecl")
string(APPEND CMAKE_Fortran_FLAGS " /assume:byterecl /fpp")
else()
string(APPEND CMAKE_Fortran_FLAGS " -assume byterecl")
string(APPEND CMAKE_Fortran_FLAGS " -assume byterecl -fpp")
endif()
add_compile_options("$<$<AND:$<COMPILE_LANGUAGE:Fortran>,$<CONFIG:Debug>>:-check;-traceback>")
# add_compile_options("$<$<AND:$<COMPILE_LANGUAGE:Fortran>,$<CONFIG:Release>>:-O3>")
add_compile_options("$<$<AND:$<COMPILE_LANGUAGE:Fortran>,$<CONFIG:Debug>>:-fpp;-check;-traceback>")
add_compile_options("$<$<AND:$<COMPILE_LANGUAGE:Fortran>,$<CONFIG:Release>>:-fpp;-O3>")

elseif(CMAKE_Fortran_COMPILER_ID STREQUAL "Cray")
# compiler flags for Cray ftn
string(APPEND CMAKE_Fortran_FLAGS " -h noomp")
add_compile_options("$<$<AND:$<COMPILE_LANGUAGE:Fortran>,$<CONFIG:Debug>>:-O0;-g>")
add_compile_options("$<$<AND:$<COMPILE_LANGUAGE:Fortran>,$<CONFIG:Release>>:-O3>")
add_compile_options("$<$<AND:$<COMPILE_LANGUAGE:Fortran>,$<CONFIG:Debug>>:-e Z;-O0;-g>")
add_compile_options("$<$<AND:$<COMPILE_LANGUAGE:Fortran>,$<CONFIG:Release>>:-e Z;-O3>")
endif()
8 changes: 4 additions & 4 deletions cmake/options.cmake
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
option(SERIAL "Serial execution")
option(PARALLEL "Parallel execution")
option(${PROJECT_NAME}_BUILD_TESTING "build ${PROJECT_NAME} tests" true)
option(${PROJECT_NAME}_BUILD_EXAMPLES "build ${PROJECT_NAME} examples" true)

Expand All @@ -8,10 +8,10 @@ set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)

if(SERIAL)
message(STATUS "Configuring build for serial execution")
else()
if(PARALLEL)
message(STATUS "Configuring build for parallel execution")
else()
message(STATUS "Configuring build for serial execution; configure with -DPARALLEL=1 for a parallel build")
endif()

# --- Generally useful CMake project options
Expand Down
5 changes: 2 additions & 3 deletions example/cnn_mnist.f90
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,8 @@ program cnn_mnist
optimizer=sgd(learning_rate=3.) &
)

if (this_image() == 1) &
print '(a,i2,a,f5.2,a)', 'Epoch ', n, ' done, Accuracy: ', accuracy( &
net, validation_images, label_digits(validation_labels)) * 100, ' %'
print '(a,i2,a,f5.2,a)', 'Epoch ', n, ' done, Accuracy: ', accuracy( &
net, validation_images, label_digits(validation_labels)) * 100, ' %'

end do epochs

Expand Down
12 changes: 5 additions & 7 deletions example/dense_mnist.f90
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,8 @@ program dense_mnist

call net % print_info()

if (this_image() == 1) &
print '(a,f5.2,a)', 'Initial accuracy: ', accuracy( &
net, validation_images, label_digits(validation_labels)) * 100, ' %'
print '(a,f5.2,a)', 'Initial accuracy: ', accuracy( &
net, validation_images, label_digits(validation_labels)) * 100, ' %'

epochs: do n = 1, num_epochs

Expand All @@ -44,10 +43,9 @@ program dense_mnist
! 2 metrics; 1st is default loss function (quadratic), other is Pearson corr.
output_metrics = net % evaluate(validation_images, label_digits(validation_labels), metric=corr())
mean_metrics = sum(output_metrics, 1) / size(output_metrics, 1)
if (this_image() == 1) &
print '(a,i2,3(a,f6.3))', 'Epoch ', n, ' done, Accuracy: ', &
accuracy(net, validation_images, label_digits(validation_labels)) * 100, &
'%, Loss: ', mean_metrics(1), ', Pearson correlation: ', mean_metrics(2)
print '(a,i2,3(a,f6.3))', 'Epoch ', n, ' done, Accuracy: ', &
accuracy(net, validation_images, label_digits(validation_labels)) * 100, &
'%, Loss: ', mean_metrics(1), ', Pearson correlation: ', mean_metrics(2)
end block

end do epochs
Expand Down
2 changes: 1 addition & 1 deletion src/nf/nf_activation.f90
Original file line number Diff line number Diff line change
Expand Up @@ -557,7 +557,7 @@ pure function eval_3d_celu_prime(self, x) result(res)
end where
end function

pure function get_activation_by_name(activation_name) result(res)
function get_activation_by_name(activation_name) result(res)
! Workaround to get activation_function with some
! hardcoded default parameters by its name.
! Need this function since we get only activation name
Expand Down
2 changes: 1 addition & 1 deletion src/nf/nf_conv2d_layer.f90
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ module nf_conv2d_layer
end type conv2d_layer

interface conv2d_layer
pure module function conv2d_layer_cons(filters, kernel_size, activation) &
module function conv2d_layer_cons(filters, kernel_size, activation) &
result(res)
!! `conv2d_layer` constructor function
integer, intent(in) :: filters
Expand Down
2 changes: 1 addition & 1 deletion src/nf/nf_conv2d_layer_submodule.f90
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

contains

pure module function conv2d_layer_cons(filters, kernel_size, activation) result(res)
module function conv2d_layer_cons(filters, kernel_size, activation) result(res)
implicit none
integer, intent(in) :: filters
integer, intent(in) :: kernel_size
Expand Down
2 changes: 1 addition & 1 deletion src/nf/nf_dense_layer.f90
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ module nf_dense_layer
end type dense_layer

interface dense_layer
elemental module function dense_layer_cons(output_size, activation) &
module function dense_layer_cons(output_size, activation) &
result(res)
!! This function returns the `dense_layer` instance.
integer, intent(in) :: output_size
Expand Down
4 changes: 3 additions & 1 deletion src/nf/nf_dense_layer_submodule.f90
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

contains

elemental module function dense_layer_cons(output_size, activation) &
module function dense_layer_cons(output_size, activation) &
result(res)
integer, intent(in) :: output_size
class(activation_function), intent(in) :: activation
Expand Down Expand Up @@ -129,7 +129,9 @@ module subroutine init(self, input_shape)
self % weights = self % weights / self % input_size

! Broadcast weights to all other images, if any.
#ifdef PARALLEL
call co_broadcast(self % weights, 1)
#endif

allocate(self % biases(self % output_size))
self % biases = 0
Expand Down
14 changes: 7 additions & 7 deletions src/nf/nf_layer_constructors.f90
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ module nf_layer_constructors

interface input

pure module function input1d(layer_size) result(res)
module function input1d(layer_size) result(res)
!! 1-d input layer constructor.
!!
!! This layer is for inputting 1-d data to the network.
Expand All @@ -35,7 +35,7 @@ pure module function input1d(layer_size) result(res)
!! Resulting layer instance
end function input1d

pure module function input3d(layer_shape) result(res)
module function input3d(layer_shape) result(res)
!! 3-d input layer constructor.
!!
!! This layer is for inputting 3-d data to the network.
Expand All @@ -62,7 +62,7 @@ end function input3d

interface

pure module function dense(layer_size, activation) result(res)
module function dense(layer_size, activation) result(res)
!! Dense (fully-connected) layer constructor.
!!
!! This layer is a building block for dense, fully-connected networks,
Expand All @@ -85,7 +85,7 @@ pure module function dense(layer_size, activation) result(res)
!! Resulting layer instance
end function dense

pure module function flatten() result(res)
module function flatten() result(res)
!! Flatten (3-d -> 1-d) layer constructor.
!!
!! Use this layer to chain layers with 3-d outputs to layers with 1-d
Expand All @@ -106,7 +106,7 @@ pure module function flatten() result(res)
!! Resulting layer instance
end function flatten

pure module function conv2d(filters, kernel_size, activation) result(res)
module function conv2d(filters, kernel_size, activation) result(res)
!! 2-d convolutional layer constructor.
!!
!! This layer is for building 2-d convolutional network.
Expand All @@ -133,7 +133,7 @@ pure module function conv2d(filters, kernel_size, activation) result(res)
!! Resulting layer instance
end function conv2d

pure module function maxpool2d(pool_size, stride) result(res)
module function maxpool2d(pool_size, stride) result(res)
!! 2-d maxpooling layer constructor.
!!
!! This layer is for downscaling other layers, typically `conv2d`.
Expand All @@ -155,7 +155,7 @@ pure module function maxpool2d(pool_size, stride) result(res)
!! Resulting layer instance
end function maxpool2d

pure module function reshape(output_shape) result(res)
module function reshape(output_shape) result(res)
!! Rank-1 to rank-any reshape layer constructor.
!! Currently implemented is only rank-3 for the output of the reshape.
!!
Expand Down
14 changes: 7 additions & 7 deletions src/nf/nf_layer_constructors_submodule.f90
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

contains

pure module function conv2d(filters, kernel_size, activation) result(res)
module function conv2d(filters, kernel_size, activation) result(res)
integer, intent(in) :: filters
integer, intent(in) :: kernel_size
class(activation_function), intent(in), optional :: activation
Expand All @@ -40,7 +40,7 @@ pure module function conv2d(filters, kernel_size, activation) result(res)
end function conv2d


pure module function dense(layer_size, activation) result(res)
module function dense(layer_size, activation) result(res)
integer, intent(in) :: layer_size
class(activation_function), intent(in), optional :: activation
type(layer) :: res
Expand All @@ -63,14 +63,14 @@ pure module function dense(layer_size, activation) result(res)
end function dense


pure module function flatten() result(res)
module function flatten() result(res)
type(layer) :: res
res % name = 'flatten'
allocate(res % p, source=flatten_layer())
end function flatten


pure module function input1d(layer_size) result(res)
module function input1d(layer_size) result(res)
integer, intent(in) :: layer_size
type(layer) :: res
res % name = 'input'
Expand All @@ -81,7 +81,7 @@ pure module function input1d(layer_size) result(res)
end function input1d


pure module function input3d(layer_shape) result(res)
module function input3d(layer_shape) result(res)
integer, intent(in) :: layer_shape(3)
type(layer) :: res
res % name = 'input'
Expand All @@ -91,7 +91,7 @@ pure module function input3d(layer_shape) result(res)
res % initialized = .true.
end function input3d

pure module function maxpool2d(pool_size, stride) result(res)
module function maxpool2d(pool_size, stride) result(res)
integer, intent(in) :: pool_size
integer, intent(in), optional :: stride
integer :: stride_
Expand Down Expand Up @@ -119,7 +119,7 @@ pure module function maxpool2d(pool_size, stride) result(res)

end function maxpool2d

pure module function reshape(output_shape) result(res)
module function reshape(output_shape) result(res)
integer, intent(in) :: output_shape(:)
type(layer) :: res

Expand Down
2 changes: 1 addition & 1 deletion src/nf/nf_metrics.f90
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ end function metric_interface

contains

pure module function corr_eval(true, predicted) result(res)
pure function corr_eval(true, predicted) result(res)
!! Pearson correlation function:
!!
real, intent(in) :: true(:)
Expand Down
Loading
Loading