library(imager)
library(dplyr)
library(ImageAlignR)
if (!"ShoeSampleData" %in% installed.packages()) {
  devtools::install_github("srvanderplas/ShoeData")
}

knitr::opts_chunk$set(
  collapse = TRUE,
  comment = "#",
  cache = T,
  # prompt = "",
  fig.width = 5, fig.height = 9
)
imlinks <- system.file(package = "ShoeSampleData", "extdata/") %>%
  list.files(full.names = T) %>%
  sort()

clean_shoe_img <- function(im, ylim = 3500) {
  img <- im %>%
    crop.borders(nPix = 170) %>%
    grayscale() %>%
    threshold() %>%
    clean(5) %>%
    autocrop() %>% 
    crop.bbox(., Yc(.) < ylim) %>%
    autocrop() %>% 
    pad(20, axes = "xy", val = max(.))
  # plot(img)
  img
}

imgs <- purrr::map2(imlinks[1:8], c(rep(3500, 6), 3750, 3500), function(x, y) load.image(x) %>% clean_shoe_img(ylim = y)) %>% as.imlist()
plot(imgs)

We need to resize the images so that they are the same size:

imgdims <- sapply(imgs, dim)

canonical_image <- matrix(1, nrow = max(imgdims[1,]), ncol = max(imgdims[2,])) %>%
  as.cimg()

imgs <- lapply(imgs, function(.) images_resize(., canonical_image, scale = F)[[1]]) %>% as.imlist()

We can then overlay the images to see how far apart they are:

par(mfrow = c(1, 4))
plot(imgs[[1]] + imgs[[2]], main = "Initial\nImages 1 and 2", axes = F)
plot(imgs[[3]] + imgs[[4]], main = "Check In 1\nImages 3 and 4", axes = F)
plot(imgs[[5]] + imgs[[6]], main = "Check In 2\nImages 5 and 6", axes = F)
plot(imgs[[7]] + imgs[[8]], main = "Check In 3\nImages 7 and 8", axes = F)

We'll align images within timepoint - this removes the variability due to the individual collecting the data as well as the variability due to wear over time.

Step 1: Keypoint Detection

hkp <- purrr::map(imgs, harris_keypoints, sigma = 3)

plots <- purrr::map(hkp, function(x) ggplot2::qplot(x$centers$mx, -x$centers$my, colour = I("red")))

gridExtra::grid.arrange(grobs = plots, ncol = 4)

Step 2: Image Orientation

Calculating the dominant orientations for the whole image produces:

angles <- purrr::map(imgs, oriented_gradients, sigma = 2, show_plot = F)
angles

Step 3: Feature Detection

For each angle, we pull features from a 40x40 area around the keypoint. These features will be used to identify points of similarity across the two images.

This step takes, by far, the longest amount of time.

get_kpf <- function(angles, hkp, im) {
  kpa <- data_frame(angle = angles, v = list(hkp$centers)) %>%
    tidyr::unnest(v) %>%
    dplyr::rename(theta = angle, x = mx, y = my) %>%
    mutate(idx = 1:n()) %>%
    rowwise() %>%
    tidyr::nest(-theta, -idx, .key = "v") %>%
    select(-idx)
  purrr::pmap(list(theta = kpa$theta, v = kpa$v), descriptor_orientation, im = im) %>%
    do.call("rbind", .)
}

kpf <- purrr::pmap(list(angles = angles, hkp = hkp, im = imgs), get_kpf)

Step 4: Match points

Match points are calculated using the K nearest neighbors algorithm, combined with some thresholding by distance.

hkp_centers <- lapply(hkp, function(x) x$centers)
match_idxs <- list(c(1, 2), c(3, 4), c(5, 6), c(7, 8))

match_points <- purrr::map(match_idxs,
  ~knn_points(kpf[[.[1]]], kpf[[.[2]]],
              hkp_centers[[.[1]]], hkp_centers[[.[2]]],
              ratio = .85, show_plot = T))

Step 5: RANSAC

RANSAC is then used to find points that have similar homography.

ransac_points <- purrr::map(match_points, ~ransac(.$points_a, .$points_b, N = 5000))
par(mfrow = c(1, 4))
for (i in 1:length(match_idxs)) {
  idx1 <- match_idxs[[i]][1]
  idx2 <- match_idxs[[i]][2]
  plot(imgs[[idx1]], main = sprintf("Image %d \n+ Image %d", idx1, idx2), axes = F)
  hkp[[idx1]]$centers %$% points(mx, my, col = "orange")
  points(match_points[[i]]$points_a[ransac_points[[i]]$inliers, ], col = "purple", pch = 16)
}

Step 6: Image Warping

The homography can be used to warp one image onto the other:

map_fcns <- purrr::map(ransac_points, function(.) map_affine_gen(.$homography))


imgs_warp <- purrr::map2(imgs[c(1, 3, 5, 7)], map_fcns, .f = imwarp, direction = "backward", boundary = "neumann")
imgs_warp <- imgs_warp %>% as.imlist()

imgs_unwarp <- imgs[c(2, 4, 6, 8)]

We can then overlay the two images:

blank_channel <- purrr::map2(imgs_warp, imgs_unwarp, ~as.cimg(.x > 0 & .y > 0)) %>% as.imlist()

overlaid_images <- purrr::pmap(list(r = imgs_warp, g = blank_channel, b = imgs_unwarp),
                               function(r, g, b) imappend(imlist(r, g, b), axis = "c"))
overlaid_images <- as.imlist(overlaid_images)

par(mfrow = c(1, 4))
for (i in 1:4) {
  plot(overlaid_images[[i]], main = sprintf("Image %d warp\n + Image %d", match_idxs[[i]][1], match_idxs[[i]][2]), axes = F)
}

Areas that are in the first image only are shown in red; areas in the second image only are shown in blue. Areas in both images are shown in black.



srvanderplas/ImageAlignR documentation built on Jan. 24, 2021, 5:10 a.m.