-
Notifications
You must be signed in to change notification settings - Fork 0
/
learning_cnn.R
109 lines (96 loc) · 3.88 KB
/
learning_cnn.R
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
#https://towardsdatascience.com/how-to-implement-deep-learning-in-r-using-keras-and-tensorflow-82d135ae4889
devtools::install_github("rstudio/tensorflow")
library(tensorflow)
install_tensorflow(method = "conda", version="nightly")
#install_tensorflow() #<- apparently this works now?!??
library(keras)
use_condaenv('r-tensorflow')
? dataset_cifar10 #to see the help file for details of dataset
cifar <- dataset_cifar10()
train_x <- cifar$train$x / 255
#convert a vector class to binary class matrix
#converting the target variable to once hot encoded vectors using
#keras inbuilt function 'to_categorical()
train_y <- to_categorical(cifar$train$y, num_classes = 10)
#TEST DATA
test_x <- cifar$test$x / 255
test_y <- to_categorical(cifar$test$y, num_classes = 10)
#checking the dimentions
dim(train_x)
cat("No of training samples\t",
dim(train_x)[[1]],
"\tNo of test samples\t",
dim(test_x)[[1]])
#a linear stack of layers
model <- keras_model_sequential()
#configuring the Model
model %>%
#defining a 2-D convolution layer
layer_conv_2d(
filter = 32,
kernel_size = c(3, 3),
padding = "same",
input_shape = c(32, 32, 3)
) %>%
layer_activation("relu") %>%
#another 2-D convolution layer
layer_conv_2d(filter = 32 , kernel_size = c(3, 3)) %>% layer_activation("relu") %>%
#Defining a Pooling layer which reduces the dimentions of the features map
#and reduces the computational
#complexity of the model
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
#dropout layer to avoid overfitting
layer_dropout(0.25) %>%
layer_conv_2d(filter = 32 ,
kernel_size = c(3, 3),
padding = "same") %>% layer_activation("relu") %>% layer_conv_2d(filter =
32, kernel_size = c(3, 3)) %>% layer_activation("relu") %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(0.25) %>%
#flatten the input
layer_flatten() %>%
layer_dense(512) %>%
layer_activation("relu") %>%
layer_dropout(0.5) %>%
#output layer-10 classes-10 units
layer_dense(10) %>%
#applying softmax nonlinear activation function to the output layer
#to calculate cross-entropy
layer_activation("softmax")
#for computing Probabilities of classes-"logit(log probabilities)
#Model's Optimizer
#defining the type of optimizer-ADAM-Adaptive Momentum Estimation
opt<-optimizer_adam( lr= 0.0001 , decay = 1e-6 )
#lr-learning rate , decay - learning rate decay over each update
model %>%
compile(loss="categorical_crossentropy",
optimizer=opt,metrics = "accuracy")
#Summary of the Model and its Architecture
summary(model)
#TRAINING PROCESS OF THE MODEL
data_augmentation <- TRUE
if(!data_augmentation) {
model %>% fit( train_x,train_y ,batch_size=32,
epochs=80,validation_data = list(test_x, test_y),
shuffle=TRUE)
} else {
#Generating images
gen_images <- image_data_generator(featurewise_center = TRUE,
featurewise_std_normalization = TRUE,
rotation_range = 20,
width_shift_range = 0.30,
height_shift_range = 0.30,
horizontal_flip = TRUE )
#Fit image data generator internal statistics to some sample data
gen_images %>% fit_image_data_generator(train_x)
#Generates batches of augmented/normalized data from image data and
#labels to visually see the generated images by the Model
model %>% fit_generator(
flow_images_from_data(train_x, train_y,gen_images,
batch_size=32,save_to_dir="F:/PROJECTS/CNNcifarimages/"),
steps_per_epoch=as.integer(50000/32),epochs = 80,
validation_data = list(test_x, test_y) )
}
#use save_to_dir argument to specify the directory to save the
#images generated by the Model and to visually check the Model's
#output and ability to classify images.