library(keras3)
# Model / data parameters num_classes <- 10 input_shape <- c(28, 28, 1) # Load the data and split it between train and test sets c(c(x_train, y_train), c(x_test, y_test)) %<-% dataset_mnist() # Scale images to the [0, 1] range x_train <- x_train / 255 x_test <- x_test / 255 # Make sure images have shape (28, 28, 1) x_train <- op_expand_dims(x_train, -1) x_test <- op_expand_dims(x_test, -1) dim(x_train)
## [1] 60000 28 28 1
dim(x_test)
## [1] 10000 28 28 1
# convert class vectors to binary class matrices y_train <- to_categorical(y_train, num_classes) y_test <- to_categorical(y_test, num_classes)
model <- keras_model_sequential(input_shape = input_shape) model |> layer_conv_2d(filters = 32, kernel_size = c(3, 3), activation = "relu") |> layer_max_pooling_2d(pool_size = c(2, 2)) |> layer_conv_2d(filters = 64, kernel_size = c(3, 3), activation = "relu") |> layer_max_pooling_2d(pool_size = c(2, 2)) |> layer_flatten() |> layer_dropout(rate = 0.5) |> layer_dense(units = num_classes, activation = "softmax") summary(model)
## [1mModel: "sequential"[0m ## ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓ ## ┃[1m [0m[1mLayer (type) [0m[1m [0m┃[1m [0m[1mOutput Shape [0m[1m [0m┃[1m [0m[1m Param #[0m[1m [0m┃ ## ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩ ## │ conv2d ([38;5;33mConv2D[0m) │ ([38;5;45mNone[0m, [38;5;34m26[0m, [38;5;34m26[0m, [38;5;34m32[0m) │ [38;5;34m320[0m │ ## ├─────────────────────────────────┼────────────────────────┼───────────────┤ ## │ max_pooling2d ([38;5;33mMaxPooling2D[0m) │ ([38;5;45mNone[0m, [38;5;34m13[0m, [38;5;34m13[0m, [38;5;34m32[0m) │ [38;5;34m0[0m │ ## ├─────────────────────────────────┼────────────────────────┼───────────────┤ ## │ conv2d_1 ([38;5;33mConv2D[0m) │ ([38;5;45mNone[0m, [38;5;34m11[0m, [38;5;34m11[0m, [38;5;34m64[0m) │ [38;5;34m18,496[0m │ ## ├─────────────────────────────────┼────────────────────────┼───────────────┤ ## │ max_pooling2d_1 ([38;5;33mMaxPooling2D[0m) │ ([38;5;45mNone[0m, [38;5;34m5[0m, [38;5;34m5[0m, [38;5;34m64[0m) │ [38;5;34m0[0m │ ## ├─────────────────────────────────┼────────────────────────┼───────────────┤ ## │ flatten ([38;5;33mFlatten[0m) │ ([38;5;45mNone[0m, [38;5;34m1600[0m) │ [38;5;34m0[0m │ ## ├─────────────────────────────────┼────────────────────────┼───────────────┤ ## │ dropout ([38;5;33mDropout[0m) │ ([38;5;45mNone[0m, [38;5;34m1600[0m) │ [38;5;34m0[0m │ ## ├─────────────────────────────────┼────────────────────────┼───────────────┤ ## │ dense ([38;5;33mDense[0m) │ ([38;5;45mNone[0m, [38;5;34m10[0m) │ [38;5;34m16,010[0m │ ## └─────────────────────────────────┴────────────────────────┴───────────────┘ ## [1m Total params: [0m[38;5;34m34,826[0m (136.04 KB) ## [1m Trainable params: [0m[38;5;34m34,826[0m (136.04 KB) ## [1m Non-trainable params: [0m[38;5;34m0[0m (0.00 B)
batch_size <- 128 epochs <- 15 model |> compile( loss = "categorical_crossentropy", optimizer = "adam", metrics = "accuracy" ) model |> fit( x_train, y_train, batch_size = batch_size, epochs = epochs, validation_split = 0.1 )
## Epoch 1/15 ## 422/422 - 4s - 10ms/step - accuracy: 0.8895 - loss: 0.3636 - val_accuracy: 0.9787 - val_loss: 0.0793 ## Epoch 2/15 ## 422/422 - 1s - 2ms/step - accuracy: 0.9666 - loss: 0.1112 - val_accuracy: 0.9852 - val_loss: 0.0550 ## Epoch 3/15 ## 422/422 - 1s - 2ms/step - accuracy: 0.9743 - loss: 0.0824 - val_accuracy: 0.9882 - val_loss: 0.0439 ## Epoch 4/15 ## 422/422 - 1s - 2ms/step - accuracy: 0.9786 - loss: 0.0695 - val_accuracy: 0.9897 - val_loss: 0.0399 ## Epoch 5/15 ## 422/422 - 1s - 2ms/step - accuracy: 0.9803 - loss: 0.0626 - val_accuracy: 0.9900 - val_loss: 0.0354 ## Epoch 6/15 ## 422/422 - 1s - 2ms/step - accuracy: 0.9823 - loss: 0.0557 - val_accuracy: 0.9912 - val_loss: 0.0332 ## Epoch 7/15 ## 422/422 - 1s - 2ms/step - accuracy: 0.9837 - loss: 0.0499 - val_accuracy: 0.9918 - val_loss: 0.0310 ## Epoch 8/15 ## 422/422 - 1s - 2ms/step - accuracy: 0.9849 - loss: 0.0481 - val_accuracy: 0.9920 - val_loss: 0.0310 ## Epoch 9/15 ## 422/422 - 1s - 2ms/step - accuracy: 0.9861 - loss: 0.0444 - val_accuracy: 0.9917 - val_loss: 0.0302 ## Epoch 10/15 ## 422/422 - 1s - 2ms/step - accuracy: 0.9863 - loss: 0.0439 - val_accuracy: 0.9913 - val_loss: 0.0297 ## Epoch 11/15 ## 422/422 - 1s - 2ms/step - accuracy: 0.9872 - loss: 0.0393 - val_accuracy: 0.9917 - val_loss: 0.0306 ## Epoch 12/15 ## 422/422 - 1s - 2ms/step - accuracy: 0.9874 - loss: 0.0372 - val_accuracy: 0.9922 - val_loss: 0.0289 ## Epoch 13/15 ## 422/422 - 1s - 2ms/step - accuracy: 0.9888 - loss: 0.0344 - val_accuracy: 0.9918 - val_loss: 0.0293 ## Epoch 14/15 ## 422/422 - 1s - 2ms/step - accuracy: 0.9890 - loss: 0.0343 - val_accuracy: 0.9918 - val_loss: 0.0282 ## Epoch 15/15 ## 422/422 - 1s - 2ms/step - accuracy: 0.9894 - loss: 0.0322 - val_accuracy: 0.9915 - val_loss: 0.0284
score <- model |> evaluate(x_test, y_test, verbose = 0) score
## $accuracy ## [1] 0.9912 ## ## $loss ## [1] 0.02448307
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.