BiocStyle::markdown()
Last modified: r as.POSIXlt(file.info("rMiW_02_BioImageDbs.Rmd")$mtime, format="%Y-%m-%d %H:%M:%S", tz="Japan")
Compiled: r as.POSIXlt(x=Sys.time(), format="%Y-%m-%d %H:%M:%S", tz="Japan")
library(ExperimentHub) library(rMiW) library(EBImage) library(keras) packageVersion("keras") packageVersion("tensorflow")
BiocManager::install(version = "3.14")
#On MacOSX library(reticulate) #reticulate::install_miniconda(force = T) #reticulate::use_python("~/Library/r-miniconda/envs/r-reticulate/bin/python") reticulate::py_config() #install pydot #reticulate::py_install("pydot") #On CentOS library(reticulate) #reticulate::install_miniconda(force = T) #reticulate::use_python("~/.local/share/r-miniconda/envs/r-reticulate/bin/python") reticulate::py_config()
#For CPU keras::install_keras()
install.packages("R.cache") #system("open ~/Library/Caches/org.R-project.R/R/") R.cache::clearCache("~/Library/Caches/org.R-project.R/R/ExperimentHub")
knitr::purl("./rMiW/vignettes/rMiW_02_BioImageDbs.Rmd", output="./rMiW/vignettes/rMiW_02_BioImageDbs.R")
About the BioImageDbs package, please ckeck Providing Bioimage Dataset for ExperimentHub document for more information.
Please check the metadata (CSV) of BioImageDbs in GitHub.
#Description: Providing Bioimage Dataset for ExperimentHub browseURL("https://bioconductor.org/packages/release/data/experiment/vignettes/BioImageDbs/inst/doc/BioImageDbs.html") #Metadata (CSV) for BioImageDbs browseURL("https://github.com/kumeS/BioImageDbs/blob/main/inst/extdata/v02/metadata_v02.csv")
Via the ExperimentHub function, we can obtain the supervised image data as a list of R arrays and their metadata.
Here shows an example of a search query for the BioImageDbs (Currently, snapshotDate(): 2021-10-18
for version 3.14).
#Set the ExperimentHub function eh <- ExperimentHub::ExperimentHub() #All entities of BioImageDbs AnnotationHub::query(eh, c("BioImageDbs")) #Query with LM_id0001 (Light Microscopy ID 0001) AnnotationHub::query(eh, c("BioImageDbs", "LM_id0001")) #check 4d tensor of LM_id0001 (qr <- AnnotationHub::query(eh, c("BioImageDbs", "LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary"))) #Select their metadata using `qr$` #show title qr$title #show description qr$description[3]
Note: small .rds
data does not work. They will be removed in future.
file <- system.file("script", "gdrive_download.sh", package="rMiW") system(paste0("source ", file, " ; gdrive_download 1J-wR0icTCpFgeKPP0iF4cyzD-b1m3tOO ./output.Rds")) ImgData <- readRDS("output.Rds") str(ImgData)
We use []
to access its metadata while [[]]
to get its data instance.
We could load from cache (~/Library/Caches/org.R-project.R/R/) once the data was downloaded.
#Access metadata qr[3] #Show metadata qr[3]$title qr[3]$description #Download the dataset of LM_id0001 (LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary.rds) ImgData <- qr[[3]] str(ImgData) #List of 2 # $ Train:List of 2 # ..$ Train_Original : num [1:84, 1:512, 1:512, 1] 0.518 0.455 0.455 0.447 0.439 ... # ..$ Train_GroundTruth_Binary: num [1:84, 1:512, 1:512, 1] 0 0 0 0 0 0 0 0 0 0 ... # $ Test :List of 2 # ..$ Test_Original : num [1:84, 1:512, 1:512, 1] 0.604 0.467 0.459 0.435 0.408 ... # ..$ Test_GroundTruth_Binary: num [1:84, 1:512, 1:512, 1] 0 1 1 1 1 1 1 0 0 0 ... #show an image EBImage::display(EBImage::Image(ImgData$Train$Train_Original[1,,,]), method="raster")
LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary.Rds
is a list of
4D arrays with the binary labels for the image segmentation of Human HeLa cells on a flat glass.
Here we will get a gif animation and check the result of data visualization.
#Access metadata qr[2] #show metadata qr[2]$title qr[2]$description #Get gif animation GifData <- qr[[2]] str(GifData) # Data path magick::image_read(GifData)
LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary_train_dataset.gif
is
an animation file (.gif) of the train dataset of LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary.rds
Currently, only magick::image_read
is supported to view gif animation files.
We will use dimensions of images (Width, Height, Channel(Gray)) for the model construction.
#Dimensions of ImgData #Image number, Width, Height, Channel(Gray) str(ImgData) dim(ImgData$Train$Train_Original) #Use Width, Height, Channel(Gray) ImgShape <- dim(ImgData$Train$Train_Original)[-1] ImgShape #[1] 512 512 1
We will make the U-Net model with dropout layers.
model <- rMiW::unet2D_v01(shape = ImgShape) model
Here visualizes the U-NET network.
rMiW::plot_model(model=model) #OR #use plot_model in tensorflow rMiW::Py_plot_model(model=model) EBImage::display(EBImage::readImage("Model.png"))
#Alternatively, perform this if do not work above. source("https://gist.githubusercontent.com/kumeS/41fed511efb45bd55d468d4968b0f157/raw/b7205c6285422e5166f70b770e1e8674d65f5ea2/DL_plot_modi_v1.2.R") plot_model_modi(model=model)
Here we will choose the optimizer and loss function.
model <- model %>% keras::compile( optimizer = keras::optimizer_rmsprop(learning_rate = 0.01), loss = rMiW::bce_dice_loss, metrics = rMiW::dice_coef )
Check the reference sheet in keras.rstudio.com:
We should use drop=F
to avoid any change of array shape.
#Create Train Data X <- ImgData$Train$Train_Original[1:20,,,,drop=FALSE] str(X) Y <- ImgData$Train$Train_GroundTruth_Binary[1:20,,,,drop=FALSE] str(Y) history <- model %>% keras::fit(x = X, y = Y, batch_size = 2, epochs = 2, verbose = 1)
verbose: Verbosity mode (0 = silent, 1 = progress bar, 2 = one line per epoch) / 表示モード
Training Speed (3rd Nov 2021):
##Skip execution for this chunk. file <- system.file("extdata", "Fit_01.png", package="rMiW") EBImage::display(EBImage::resize(EBImage::readImage(files = file), w = 500), method = "raster")
##Skip execution for this chunk. file <- system.file("extdata", "Fit_02.png", package="rMiW") EBImage::display(EBImage::resize(EBImage::readImage(files = file), w = 500), method = "raster")
save_model_hdf5()
model %>% keras::save_model_hdf5("model_v01.h5") #Model weights as R arrays keras::get_weights(model)[[1]]
The save_model_hdf5
function can save all information of the model;
the weight values, the model’s configuration(architecture),
and the optimizer configuration.
We will load the saved model and run compile and fit.
We can see that the calculation is done from the continuation.
#Re-read model file <- system.file("extdata", "model_v01.h5", package="rMiW") #Re-load modelR <- keras::load_model_hdf5(file, compile=F) summary(modelR) keras::get_weights(modelR)[[1]] #Re-compile modelR <- modelR %>% keras::compile( optimizer = keras::optimizer_rmsprop(learning_rate = 0.01), loss = rMiW::bce_dice_loss, metrics = rMiW::dice_coef ) #Re-fit: Do not re-fit in this section if(F){ history <- modelR %>% keras::fit(x = X, y = Y, batch_size = 2, epochs = 1, verbose = 1) }
#Re-read model file <- system.file("extdata", "model_v02.h5", package="rMiW") #Re-load modelR2 <- keras::load_model_hdf5(file, compile=F) summary(modelR2) keras::get_weights(modelR2)[[1]] #Re-compile modelR2 <- modelR2 %>% keras::compile( optimizer = keras::optimizer_rmsprop(learning_rate = 0.01), loss = rMiW::bce_dice_loss, metrics = rMiW::dice_coef )
Here we evaluate the model object using keras::evaluate
function.
## Model evaluation Score <- modelR %>% keras::evaluate(X, Y, verbose = 1) cat(paste0('Train loss:', round(Score[[1]], 4), '\nTrain accuracy:', round(Score[[2]], 4))) #model_v01 (training: 60 epochs) #Train loss:1.3279 #Train accuracy:0.8672
The model is used to predict the binarization at the pixel level.
Y_pred <- rMiW::model.pred(model=modelR, x=X)
We use ImageView2D
function for the visualization.
for(n in 1:20){ #n <- 2 rMiW::ImageView2D(X, Y_pred, ImgN=n) }
We can visualize the results using another function.
#Imge: 2 ImageView2D_pred(ImgArray_x=X, ImgArray_y=Y, ImgArray_pred=Y_pred, ImgN=2) #Imge: 6 ImageView2D_pred(ImgArray_x=X, ImgArray_y=Y, ImgArray_pred=Y_pred, ImgN=6) #Image: All for(N in 1:20){ ImageView2D_pred(ImgArray_x=X, ImgArray_y=Y, ImgArray_pred=Y_pred, ImgN=N) }
#Data Test_X <- ImgData$Train$Train_Original[21:40,,,,drop=FALSE] str(Test_X) Test_Y <- ImgData$Train$Train_GroundTruth_Binary[21:40,,,,drop=FALSE] str(Test_Y) ## Model evaluation Score <- modelR %>% keras::evaluate(Test_X, Test_Y, verbose = 1) cat(paste0('Train loss:', round(Score[[1]], 4), '\nTrain accuracy:', round(Score[[2]], 4))) #model_v01 (training: 60 epochs) #Train loss:1.1545 #Train accuracy:0.8758
We use ImageView2D
function for the visualization.
Test_Y_pred <- rMiW::model.pred(model=modelR, x=Test_X) #visualization for(N in 1:20){ ImageView2D_pred(ImgArray_x=Test_X, ImgArray_y=Test_Y, ImgArray_pred=Test_Y_pred, ImgN=N) }
Here we evaluate the model object using keras::evaluate
function.
## Model evaluation Score <- modelR2 %>% keras::evaluate(X, Y, verbose = 1) cat(paste0('Train loss:', round(Score[[1]], 4), '\nTrain accuracy:', round(Score[[2]], 4))) #model_v02 (training: 2000 epochs) #Train loss:0.0051 #Train accuracy:0.9978
The model is used to predict the binarization at the pixel level.
Y_pred <- rMiW::model.pred(model=modelR2, x=X)
We use ImageView2D
function for the visualization.
for(n in 1:20){ #n <- 2 rMiW::ImageView2D(X, Y_pred, ImgN=n) }
We can visualize the results using another function.
#Imge: 2 ImageView2D_pred(ImgArray_x=X, ImgArray_y=Y, ImgArray_pred=Y_pred, ImgN=2) #Imge: 6 ImageView2D_pred(ImgArray_x=X, ImgArray_y=Y, ImgArray_pred=Y_pred, ImgN=6) #Image: All for(N in 1:20){ ImageView2D_pred(ImgArray_x=X, ImgArray_y=Y, ImgArray_pred=Y_pred, ImgN=N) }
#Data Test_X <- ImgData$Train$Train_Original[21:40,,,,drop=FALSE] str(Test_X) Test_Y <- ImgData$Train$Train_GroundTruth_Binary[21:40,,,,drop=FALSE] str(Test_Y) ## Model evaluation Score <- modelR2 %>% keras::evaluate(Test_X, Test_Y, verbose = 1) cat(paste0('Train loss:', round(Score[[1]], 4), '\nTrain accuracy:', round(Score[[2]], 4))) #model_v01 (training: 2000 epochs) #Train loss:0.8893 #Train accuracy:0.9292
We use ImageView2D
function for the visualization.
Test_Y_pred <- rMiW::model.pred(model=modelR2, x=Test_X) #visualization for(N in 1:20){ ImageView2D_pred(ImgArray_x=Test_X, ImgArray_y=Test_Y, ImgArray_pred=Test_Y_pred, ImgN=N) }
sessionInfo()
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.