Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

This PR aims to solve issue 41. #43

Merged
merged 2 commits into from
Sep 18, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ jobs:
- uses: r-lib/actions/setup-pandoc@v2
- uses: r-lib/actions/setup-r-dependencies@v2
with:
extra-packages: any::rcmdcheck, any::covr
extra-packages: any::rcmdcheck, any::covr, vdiffr@1.0.5
needs: check, coverage
- name: Miniconda + Tensorflow + Keras installation
run: |
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/pkgdown.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ jobs:

- uses: r-lib/actions/setup-r-dependencies@v2
with:
extra-packages: any::pkgdown, local::.
extra-packages: any::pkgdown, local::., vdiffr@1.0.5
needs: website

- name: Build site
Expand Down
1 change: 0 additions & 1 deletion NAMESPACE
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ S3method(plot_taylor_and_activation_potentials,keras.engine.training.Model)
S3method(predict,nn2poly)
export(add_constraints)
export(change_string_to_function)
export(check_weight_constraints)
export(eval_poly)
export(nn2poly)
export(obtain_derivatives_list)
Expand Down
6 changes: 1 addition & 5 deletions R/check_weight_constraints.R
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,8 @@
#' @param maxnorm List of 2 elements: the name of the used norm and its max value.
#'
#' @return List across all layers with a vector containing the norms of each weight vector.
#' @export
#'
check_weight_constraints <- function(weights, maxnorm) {
print("The used norm is:")
print(maxnorm)

#### Compute the norm for the full matrix ####

n_weights <- length(weights)
Expand Down Expand Up @@ -40,7 +36,7 @@ check_weight_constraints <- function(weights, maxnorm) {
norm <- pracma::Norm(weights[[i]][, j], 2)
weights_norms[[i]][j] <- norm
} else {
print("Checking the l1_norm in case of no chosen norm")
print("Imprecise norm. Computing the l1 norm...")
norm <- pracma::Norm(weights[[i]][, j], 1)
weights_norms[[i]][j] <- norm
}
Expand Down
16 changes: 16 additions & 0 deletions R/check_weight_dimensions.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
#' dimensions checker
#'
#' @param weights_matrices_list List of weight matrices of a given neural network.
#'
#' @return `TRUE` if the dimensiones are correct, `FALSE` if not.
#'
check_weights_dimensions <- function(weights_matrices_list) {
for (matrix_index in 2:length(weights_matrices_list)) {
nrows_current <- nrow(weights_matrices_list[[matrix_index]])
ncols_prev <- ncol(weights_matrices_list[[matrix_index - 1]])

if (nrows_current != ncols_prev + 1)
return(FALSE)
}
return(TRUE)
}
29 changes: 17 additions & 12 deletions R/nn2poly.R
Original file line number Diff line number Diff line change
Expand Up @@ -46,21 +46,26 @@ NULL
#' @export
#'
nn2poly <- function(object,
q_taylor_vector,
all_partitions = NULL,
store_coeffs = FALSE,
forced_max_Q = NULL,
q_taylor_vector = NULL,
all_partitions = NULL,
store_coeffs = FALSE,
forced_max_Q = NULL,
...) {
UseMethod("nn2poly")
}

#' @export
nn2poly.default <- function(object, # weights_list and af_string_list
q_taylor_vector,
all_partitions = NULL,
store_coeffs = FALSE,
forced_max_Q = NULL,
q_taylor_vector = NULL,
all_partitions = NULL,
store_coeffs = FALSE,
forced_max_Q = NULL,
...) {
if (!check_weights_dimensions(object)) {
stop("The list of weights has incorrect dimensions.
Please, check the right dimmensions in the documentation.",
call. = FALSE)
}

result <- nn2poly_algorithm(
weights_list = object,
Expand All @@ -76,10 +81,10 @@ nn2poly.default <- function(object, # weights_list and af_string_list

#' @export
nn2poly.keras.engine.training.Model <- function(object,
q_taylor_vector,
all_partitions = NULL,
store_coeffs = FALSE,
forced_max_Q = NULL,
q_taylor_vector = NULL,
all_partitions = NULL,
store_coeffs = FALSE,
forced_max_Q = NULL,
...) {

model_parameters <- get_model_parameters(object)
Expand Down
14 changes: 10 additions & 4 deletions R/nn2poly_algorithm.R
Original file line number Diff line number Diff line change
Expand Up @@ -48,10 +48,10 @@
#'
nn2poly_algorithm <- function(weights_list,
af_string_list,
q_taylor_vector,
all_partitions = NULL,
store_coeffs = FALSE,
forced_max_Q = NULL) {
q_taylor_vector = NULL,
all_partitions = NULL,
store_coeffs = FALSE,
forced_max_Q = NULL) {

# Obtain number of variables (dimension p)
p <- dim(weights_list[[1]])[1] - 1
Expand All @@ -74,6 +74,12 @@ nn2poly_algorithm <- function(weights_list,
results <- vector(mode = "list", length = 2 * L)
}

# Create a default q_taylor_vector if it is not given by the user
if (is.null(q_taylor_vector)) {
# 8 for the non-linear layers and 1 for the line
q_taylor_vector <- ifelse(af_string_list=="linear", 1, 8)
}

# Obtain all the derivatives up to the desired Taylor degree at each layer
af_derivatives_list <- obtain_derivatives_list(
af_string_list = af_string_list,
Expand Down
17 changes: 17 additions & 0 deletions man/check_weights_dimensions.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion man/nn2poly.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion man/nn2poly_algorithm.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

80 changes: 80 additions & 0 deletions tests/testthat/test-add_constraints.R
Original file line number Diff line number Diff line change
Expand Up @@ -57,3 +57,83 @@ test_that("Test that the 'keep_old_weights' parameter works as intended", {
expect_false(identical(nn_weights, constrained_nn_weights2))

})

test_that("The function works over an already trained network and the connstraints
are fulfiled after training for the constrained network", {
tensorflow::set_random_seed(42)

# load the example
nn2poly_example <- nn2poly_example0

# save some of the variables of the example to improve readability
train_x <- nn2poly_example$train_x
train_y <- nn2poly_example$train_y
af_string_list <- nn2poly_example$af_string_list
q_taylor_vector <- nn2poly_example$q_taylor_vector

# save the number of parameters as the number of columns in train_x
p <- ncol(train_x)

# save the train data as an unique variable
train <- cbind(train_x, train_y)


nn <- keras::keras_model_sequential()
keras::`%>%`(nn, keras::layer_dense(units = 2,
activation = "tanh",
input_shape = 2))
keras::`%>%`(nn, keras::layer_dense(units = 3,
activation = "softplus"))
keras::`%>%`(nn, keras::layer_dense(units = 2,
activation = "linear"))

# compile the model
keras::compile(nn,
loss = "mse",
optimizer = keras::optimizer_adam(),
metrics = "mse")

# train the model
keras::fit(nn,
train_x,
train_y,
verbose = 0,
epochs = 5,
validation_split = 0.2
)

constrained_nn <- add_constraints(nn, constraint_type = "l1_norm")



# compile the model
keras::compile(constrained_nn,
loss = "mse",
optimizer = keras::optimizer_adam(),
metrics = "mse")

# train the model
keras::fit(constrained_nn,
train_x,
train_y,
verbose = 0,
epochs = 5,
validation_split = 0.2
)

constrained_nn_parameters <- get_model_parameters(constrained_nn)
constrained_nn_weights <- constrained_nn_parameters$weights_list

# compute the l1-norm for all the weight matrices
weights_l1_norm <- check_weight_constraints(constrained_nn_weights,
maxnorm = list("l1_norm"))

# check if the condition is fulfilled for every matrix
expect_true(
all(
sapply(weights_l1_norm[1:length(weights_l1_norm)-1], # skip the output layer
function(l1_norm) all(l1_norm < 1))
)
)

})
49 changes: 48 additions & 1 deletion tests/testthat/test-nn2poly.R
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,34 @@ test_that("nn2poly_algorithm:
})


test_that("nn2poly_algorithm:
Check algorithm against precomputed example but with the default q_taylor_vector", {
nn2poly_example <- nn2poly_example0

# Get the needed data
object <- nn2poly_example$weights_list
names(object) <- nn2poly_example$af_string_list

result <- nn2poly(
object = object,
store_coeffs = TRUE,
forced_max_Q = 3

)

# Output polynomial order is 4, as no order is forced and taylor
# vector is 2,2,1, so the product is 4:
n_terms <- length(result[[length(result)]]$labels)
order <- length(result[[length(result)]]$labels[[n_terms]])
expect_equal(order, 3)

# Desired coefficient is output y at layer 2, neuron 1,
# coefficient "1,1"
label <- result[[4]]$labels[[4]]
coeff <- result[[4]]$values[1,4]
expect_equal(label,c(1,1))
expect_equal(round(coeff,4),-2.2147)
})

test_that("nn2poly_algorithm:
Check that the algortihm provides a correct value for a certain
Expand All @@ -56,7 +84,7 @@ test_that("nn2poly_algorithm:
order <- length(result[[length(result)]][[1]][[n_terms]])
expect_equal(order, 2)

# Desired coeffcient is output y at layer 2, neuron 1,
# Desired coefficient is output y at layer 2, neuron 1,
# coefficient "1,1"
label <- result[[4]][[1]][[4]]
coeff <- result[[4]]$values[1,4]
Expand Down Expand Up @@ -113,3 +141,22 @@ test_that("nn2poly for a constrained keras.engine.training.Model object", {
expect_equal(result$labels[[6]], c(2,2))
})

test_that("Check that it throws an error when the dimensions of the weights list
are not right.", {
nn2poly_example <- nn2poly_example0

# Get the needed data
object <- nn2poly_example$weights_list
names(object) <- nn2poly_example$af_string_list
object[[2]] <- rbind(object[[2]], c(1,1))

q_taylor_vector <- nn2poly_example$q_taylor_vector

expect_error(
nn2poly(
object = object,
q_taylor_vector = q_taylor_vector,
store_coeffs = TRUE
)
)
})