Setup

library(keras3)

Prepare the data

# Model / data parameters
num_classes <- 10
input_shape <- c(28, 28, 1)

# Load the data and split it between train and test sets
c(c(x_train, y_train), c(x_test, y_test)) %<-% dataset_mnist()

# Scale images to the [0, 1] range
x_train <- x_train / 255
x_test <- x_test / 255
# Make sure images have shape (28, 28, 1)
x_train <- op_expand_dims(x_train, -1)
x_test <- op_expand_dims(x_test, -1)


dim(x_train)
## [1] 60000    28    28     1
dim(x_test)
## [1] 10000    28    28     1
# convert class vectors to binary class matrices
y_train <- to_categorical(y_train, num_classes)
y_test <- to_categorical(y_test, num_classes)

Build the model

model <- keras_model_sequential(input_shape = input_shape)
model |>
  layer_conv_2d(filters = 32, kernel_size = c(3, 3), activation = "relu") |>
  layer_max_pooling_2d(pool_size = c(2, 2)) |>
  layer_conv_2d(filters = 64, kernel_size = c(3, 3), activation = "relu") |>
  layer_max_pooling_2d(pool_size = c(2, 2)) |>
  layer_flatten() |>
  layer_dropout(rate = 0.5) |>
  layer_dense(units = num_classes, activation = "softmax")

summary(model)
## Model: "sequential"
## ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓
## ┃ Layer (type)                    ┃ Output Shape           ┃       Param # 
## ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩
##  conv2d (Conv2D)                  (None, 26, 26, 32)                320 
## ├─────────────────────────────────┼────────────────────────┼───────────────┤
##  max_pooling2d (MaxPooling2D)     (None, 13, 13, 32)                  0 
## ├─────────────────────────────────┼────────────────────────┼───────────────┤
##  conv2d_1 (Conv2D)                (None, 11, 11, 64)             18,496 
## ├─────────────────────────────────┼────────────────────────┼───────────────┤
##  max_pooling2d_1 (MaxPooling2D)   (None, 5, 5, 64)                    0 
## ├─────────────────────────────────┼────────────────────────┼───────────────┤
##  flatten (Flatten)                (None, 1600)                        0 
## ├─────────────────────────────────┼────────────────────────┼───────────────┤
##  dropout (Dropout)                (None, 1600)                        0 
## ├─────────────────────────────────┼────────────────────────┼───────────────┤
##  dense (Dense)                    (None, 10)                     16,010 
## └─────────────────────────────────┴────────────────────────┴───────────────┘
##  Total params: 34,826 (136.04 KB)
##  Trainable params: 34,826 (136.04 KB)
##  Non-trainable params: 0 (0.00 B)

Train the model

batch_size <- 128
epochs <- 15

model |> compile(
  loss = "categorical_crossentropy",
  optimizer = "adam",
  metrics = "accuracy"
)

model |> fit(
  x_train, y_train,
  batch_size = batch_size,
  epochs = epochs,
  validation_split = 0.1
)
## Epoch 1/15
## 422/422 - 5s - 11ms/step - accuracy: 0.8895 - loss: 0.3636 - val_accuracy: 0.9787 - val_loss: 0.0792
## Epoch 2/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9664 - loss: 0.1111 - val_accuracy: 0.9850 - val_loss: 0.0550
## Epoch 3/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9743 - loss: 0.0824 - val_accuracy: 0.9882 - val_loss: 0.0441
## Epoch 4/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9786 - loss: 0.0695 - val_accuracy: 0.9895 - val_loss: 0.0400
## Epoch 5/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9804 - loss: 0.0626 - val_accuracy: 0.9900 - val_loss: 0.0355
## Epoch 6/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9824 - loss: 0.0558 - val_accuracy: 0.9912 - val_loss: 0.0333
## Epoch 7/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9835 - loss: 0.0501 - val_accuracy: 0.9918 - val_loss: 0.0310
## Epoch 8/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9851 - loss: 0.0479 - val_accuracy: 0.9922 - val_loss: 0.0310
## Epoch 9/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9862 - loss: 0.0444 - val_accuracy: 0.9920 - val_loss: 0.0300
## Epoch 10/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9863 - loss: 0.0438 - val_accuracy: 0.9912 - val_loss: 0.0294
## Epoch 11/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9873 - loss: 0.0394 - val_accuracy: 0.9913 - val_loss: 0.0304
## Epoch 12/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9875 - loss: 0.0371 - val_accuracy: 0.9927 - val_loss: 0.0287
## Epoch 13/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9891 - loss: 0.0346 - val_accuracy: 0.9920 - val_loss: 0.0292
## Epoch 14/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9891 - loss: 0.0343 - val_accuracy: 0.9922 - val_loss: 0.0284
## Epoch 15/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9895 - loss: 0.0320 - val_accuracy: 0.9920 - val_loss: 0.0282

Evaluate the trained model

score <- model |> evaluate(x_test, y_test, verbose = 0)
score
## $accuracy
## [1] 0.9914
##
## $loss
## [1] 0.02402576


rstudio/keras documentation built on July 8, 2024, 3:07 p.m.