An example showing the advantage of being able to collect many expressions and pack them into a single extend_se() node. This example may seem extreme or unnatural. However we have seen once you expose a system to enough users you see a lot more extreme use cases than you would at first expect. We have actually seen large tens of columns added to a mart in a large irregular block (so not the same transform for each columns) by building up long pipelines, so this simplified example is in fact relevant to production deployments.

First set up our packages, database connection, and remote table.

library("dplyr")
library("rquery")
library("microbenchmark")
library("ggplot2")
library("WVPlots")
library("rqdatatable")
library("cdata")

use_spark <- TRUE
# connect
if(use_spark) {
  conf <- sparklyr::spark_config()
  conf$spark.yarn.am.cores <- 2
  conf$spark.executor.cores <- 2
  mem_size <- "4G"
  conf$spark.executor.memory <- mem_size
  conf$spark.yarn.am.memory <- mem_size 
  conf$`sparklyr.shell.driver-memory` <- mem_size
  conf$`sparklyr.shell.executor-memory` <- mem_size
  conf$`spark.yarn.executor.memoryOverhead` <- mem_size
  con <- sparklyr::spark_connect(version='2.2.0', 
                                 master = "local",
                                 config = conf)
} else {
  con <- DBI::dbConnect(RPostgreSQL::PostgreSQL(),
                        host = 'localhost',
                        port = 5432,
                        user = 'johnmount',
                        password = '')
}

# configure rquery connection options
dbopts <- rq_connection_tests(con)
db_hdl <- rquery_db_info(
  connection = con,
  is_dbi = TRUE,
  connection_options = dbopts)
print(db_hdl)

nrow <- 1000000
td <- rq_copy_to(db_hdl, 
                 "d",
                 data.frame(x = seq_len(nrow)),
                 overwrite = TRUE,
                 temporary = TRUE)

tbl <- dplyr::tbl(con, "d")

ncol <- 100

rquery torture function: add r sprintf("%d", ncol) columns to a r sprintf("%d", nrow) row table.

rquery_fn <- function(db_hdl, td, ncol, return_sql = FALSE) {
  expressions <- paste0("x + ", seq_len(ncol))
  names(expressions) <- paste0("x_", seq_len(ncol))
  ops <- td %.>%
    extend_se(., expressions) %.>%
    select_rows_nse(., x == 3)
  if(return_sql) {
    return(to_sql(ops, db_hdl))
  }
  # force execution
  db_hdl %.>% ops
}

cat(rquery_fn(db_hdl, td, 5, return_sql = TRUE))

rquery_fn(db_hdl, td, 5)

The row-selection step is to cut down on the in-memory cost of bringing the result back to R. Obviously we could optimize the example away by pivoting the filter to earlier in the example pipeline. We ask the reader to take this example as a stand-in for a more complicated (though nasty) real-world example where such optimizations are not available.

Same torture for dplyr.

dplyr_fn <- function(tbl, ncol, return_sql = FALSE) {
  pipeline <- tbl
  xvar <- rlang::sym("x")
  for(i in seq_len(ncol)) {
    res_i <- rlang::sym(paste0("x_", i))
    pipeline <- pipeline %>%
      mutate(., !!res_i := !!xvar + i)
  }
  pipeline <- pipeline %>%
    filter(., x == 3)
  if(return_sql) {
    return(dbplyr::remote_query(pipeline))
  }
  # force execution
  pipeline %>% collect(.)
}

cat(dplyr_fn(tbl, 5, return_sql = TRUE))

dplyr_fn(tbl, 5)

We can also collect expressions efficiently using seplyr (seplyr is a thin wrapper over dplyr, so seplyr's method mutate_se() is essentially instructions how to do the same thing using rlang).

seplyr_fn <- function(tbl, ncol, return_sql = FALSE) {
  expressions <- paste0("x + ", seq_len(ncol))
  names(expressions) <- paste0("x_", seq_len(ncol))
  pipeline <- tbl %>%
    seplyr::mutate_se(., expressions) %>%
    filter(., x == 3)
  if(return_sql) {
    return(dbplyr::remote_query(pipeline))
  }
  # force execution
  pipeline %>% collect(.)
}

cat(seplyr_fn(tbl, 5, return_sql = TRUE))

seplyr_fn(tbl, 5)

Time the functions. Timing is not going to be certain given issues such as cluster state and query caching.

timings <- microbenchmark(
  rquery = rquery_fn(db_hdl, td, ncol),
  dplyr = dplyr_fn(tbl, ncol),
  seplyr = seplyr_fn(tbl, ncol),
  times = 10L)

saveRDS(timings, "CollectExprs_timings.RDS")

Present the results.

print(timings)
#autoplot(timings)

timings <- as.data.frame(timings)
timings$seconds <- timings$time/10^9
timings$method <- factor(timings$expr)
timings$method <- reorder(timings$method, timings$seconds)
WVPlots::ScatterBoxPlotH(timings, "seconds", "method", "task time by method")

tratio <- timings %.>%
  project_nse(., 
              groupby = "method", 
              mean_seconds = mean(seconds)) %.>%
  pivot_to_rowrecs(., 
                   columnToTakeKeysFrom = "method", 
                   columnToTakeValuesFrom = "mean_seconds", 
                   rowKeyColumns = NULL) %.>%
  extend_nse(.,
             ratio = dplyr/rquery)

tratio[]

ratio_str <- sprintf("%.2g", tratio$ratio)

rquery is about r ratio_str times faster than dplyr for this task at this scale for this data implementation and configuration (we have also seen an over 8 times difference for this example on PostgreSQL).

if(use_spark) {
  sparklyr::spark_disconnect(con)
} else {
  DBI::dbDisconnect(con)
}


WinVector/rquery documentation built on Aug. 24, 2023, 11:12 a.m.