knitr::opts_chunk$set(echo = TRUE) # mandatory libraries library(tidyverse) library(tidyr) # speed-enhancing libraries library(data.table) # report-enhancing libraries library(DT) # libraries just for introduction/study site sections library(leaflet) library(dataRetrieval) library(dplyr) library(plotly) # default values input_folder <- paste0(system.file("extdata", package = "HyMETT"),"/") site <- '01013500' # example site # Decimal Lat and long. lat <- 47.2375 long <- -68.58278 # .drop = c('V1') # extra column of row numbers to drop during CSV import
#------- Import all necessary data ## GOF - annual gof <- paste0(input_folder, site, '_GOF.csv') %>% data.table::fread(input = ., data.table = F) %>% tibble::as_tibble() ## Streamflow - Daily daily_sim <- paste0(input_folder, site, '_mod_daily.csv') %>% data.table::fread(input = ., data.table = F) %>% dplyr::mutate(date = as.Date(Date)) %>% tibble::as_tibble() daily_obs <- paste0(input_folder, site, '_obs_daily.csv') %>% data.table::fread(input = ., data.table = F) %>% dplyr::mutate(date = as.Date(Date)) %>% tibble::as_tibble() ## Streamflow - Annual annual_sim <- paste0(input_folder, site, '_mod_annual.csv') %>% data.table::fread(input = ., data.table = F) %>% tibble::as_tibble() annual_obs <- paste0(input_folder, site, '_obs_annual.csv') %>% data.table::fread(input = ., data.table = F) %>% tibble::as_tibble() ## Trends - Annual annual_trend_comp <- paste0(input_folder, site, '_com_trend.csv') %>% data.table::fread(input = ., data.table = F) %>% tibble::as_tibble() annual_trend_sim <- paste0(input_folder, site, '_mod_trend.csv') %>% data.table::fread(input = ., data.table = F) %>% tibble::as_tibble() annual_trend_obs <- paste0(input_folder, site, '_obs_trend.csv') %>% data.table::fread(input = ., data.table = F) %>% tibble::as_tibble()
This is a an automated report for a single sites generated from thy HyMETT tool in R. The site included in this report is:
cat(site)
This report provides goodness-of-fit measurements between modeled and observational streamflow at this gauged site. Measures of model skill applied here are percent bias, volumetric efficiency (VE), Spearman's rho correlation, and normalized root mean square error (NRMSE). These metrics are applied to numerous streamflow statistics summarizing streamflow at daily, monthly, annual, and period-of-record timesteps. Streamflow statistics include mean, minima, maxima, percent of annual, standard deviation, percentiles, and multi-day high flows (Q).
## Map site location via leaflet # Get site coordinates site_coord <- tryCatch({ NULL # dataRetrieval::readNWISsite(siteNumbers = site) }, error = function(e){ NULL }) # The firewall on my laptop doesn't let me use dataRetrieval package, so including # example output here site_coord <- data.frame(site_no = site, dec_lat_va = lat, dec_long_va = long) # Create map object if coordinates are available if (!is.null(site_coord)){ leaflet::leaflet(width = "100%") %>% leaflet::addTiles() %>% leaflet::addMarkers(data = site_coord, lng = ~dec_long_va, lat = ~dec_lat_va, popup = ~site_no) } # Create text warning if site coordinates were not available coord_warn <- ifelse(is.null(site_coord), "Warning: Study site coordinates were not retrievable through the 'dataRetrieval' R package.", "") # (add warning in text below)
r coord_warn
# Show mean values over POR tibble::tibble(Site = site, `Mean Daily Q - Observed [cfs]` = round(mean(daily_obs$value, na.rm = T), 2), `Mean Daily Q - Simulated [cfs]` = round(mean(daily_sim$value, na.rm = T), 2)) %>% DT::datatable(., options = list(dom = 't', ordering = F))
\newpage
# Assign categories to GOF metrics gof_cat <- gof %>% dplyr::mutate(category = NA) %>% dplyr::mutate(category = ifelse(grepl('mean', `annual_stat`, ignore.case = T), 'Mean', category)) %>% dplyr::mutate(category = ifelse(grepl('max', `annual_stat`, ignore.case = T), 'Max', category)) %>% dplyr::mutate(category = ifelse(grepl('min', `annual_stat`, ignore.case = T), 'Min', category)) %>% dplyr::mutate(category = ifelse(grepl('percent_annual', `annual_stat`, ignore.case = T), 'Percent Annual', category)) %>% dplyr::mutate(category = ifelse(grepl('CVD', `annual_stat`, ignore.case = T), 'CVD', category)) %>% dplyr::mutate(category = ifelse(grepl('percentile', `annual_stat`, ignore.case = T), 'Percentile', category)) %>% dplyr::mutate(category = ifelse(grepl('sd', `annual_stat`, ignore.case = T), 'SD', category)) %>% dplyr::mutate(category = ifelse(grepl('q', `annual_stat`, ignore.case = T), 'Q', category)) %>% dplyr::mutate_if(is.numeric, round, digits = 2)
gof_cat %>% dplyr::filter(category == 'Mean') %>% dplyr::select(-category) %>% dplyr::rename(Month = `annual_stat`) %>% dplyr::mutate(Month = gsub('_mean', '', Month)) %>% dplyr::mutate(Month = tools::toTitleCase(Month)) %>% dplyr::mutate(Month = ifelse(Month == 'may', 'May', Month)) %>% DT::datatable(data = ., caption = 'Monthly mean error statistics', fillContainer = F, options = list(dom = 't', pageLength = 13))
gof_cat %>% dplyr::filter(category == 'Min') %>% dplyr::select(-category) %>% dplyr::rename(Month = `annual_stat`) %>% dplyr::mutate(Month = gsub('_min', '', Month)) %>% dplyr::mutate(Month = tools::toTitleCase(Month)) %>% dplyr::mutate(Month = ifelse(Month == 'may', 'May', Month)) %>% DT::datatable(data = ., caption = 'Monthly minima error statistics', fillContainer = F, options = list(dom = 't', pageLength = 13))
gof_cat %>% dplyr::filter(category == 'Max') %>% dplyr::select(-category) %>% dplyr::rename(Month = `annual_stat`) %>% dplyr::mutate(Month = gsub('_max', '', Month)) %>% dplyr::mutate(Month = tools::toTitleCase(Month)) %>% dplyr::mutate(Month = ifelse(Month == 'may', 'May', Month)) %>% DT::datatable(data = ., caption = 'Monthly maxima error statistics', fillContainer = F, options = list(dom = 't', pageLength = 13))
gof_cat %>% dplyr::filter(category == 'Percent Annual') %>% dplyr::select(-category) %>% dplyr::rename(Month = `annual_stat`) %>% dplyr::mutate(Month = gsub('_percent_annual', '', Month)) %>% dplyr::mutate(Month = tools::toTitleCase(Month)) %>% dplyr::mutate(Month = ifelse(Month == 'may', 'May', Month)) %>% DT::datatable(data = ., caption = 'Monthly percent annual error statistics', fillContainer = F, options = list(dom = 't', pageLength = 13))
gof_cat %>% dplyr::filter(category == 'Percentile') %>% dplyr::select(-category) %>% dplyr::rename(`Annual Percentile` = `annual_stat`) %>% dplyr::mutate(`Annual Percentile` = gsub('_percentile', '', `Annual Percentile`)) %>% dplyr::mutate(`Annual Percentile` = gsub('annual_', '', `Annual Percentile`)) %>% dplyr::mutate(`Annual Percentile` = ifelse(`Annual Percentile` == 1, '1st', paste0(`Annual Percentile`, 'th'))) %>% DT::datatable(data = ., caption = 'Annual percentiles error statistics', fillContainer = F, options = list(dom = 't', pageLength = 13))
gof_cat %>% dplyr::filter(category == 'SD') %>% dplyr::select(-category) %>% dplyr::rename(Period = `annual_stat`) %>% dplyr::mutate(Period = gsub('_sd', '', Period)) %>% dplyr::mutate(Period = tools::toTitleCase(Period)) %>% dplyr::mutate(Period = ifelse(Period == 'may', 'May', Period)) %>% DT::datatable(data = ., caption = 'Standard deviation error statistics', fillContainer = F, options = list(dom = 't', pageLength = 13))
gof_cat %>% dplyr::filter(category == 'Q') %>% dplyr::select(-category) %>% dplyr::rename(`Q(day)` = `annual_stat`) %>% dplyr::mutate(`Q(day)` = gsub('high_', '', `Q(day)`)) %>% dplyr::mutate(`Q(day)` = gsub('q', 'Q', `Q(day)`)) %>% dplyr::mutate(`Q(day)` = gsub('_jd', ' (Julian day)', `Q(day)`)) %>% DT::datatable(data = ., caption = '1, 3, 7, and 30 day flow error statistics', fillContainer = F, options = list(dom = 't', pageLength = 13))
gof_cat %>% dplyr::filter(category == 'CVD') %>% dplyr::select(-category) %>% dplyr::rename(`CVD` = `annual_stat`) %>% DT::datatable(data = ., caption = 'CVD error statistics', fillContainer = F, options = list(dom = 't', pageLength = 12))
Percent Bias: Average percent bias at each basin was computed as the sum of the difference between modeled and observed values for each year (the sum of residuals), divided by the sum of the observed values, and converted to percent.
Volumetric Efficiency (VE): An overall goodness of fit metric, that is similar to the more commonly used Nash Sutcliffe model efficiency (NSE). VE was used here as it balances the effect of small and large values evenly whereas NSE puts more weight on larger values. A value of 1 is a perfect match between the observed and simulated values. Lighter colors are closer to one and represent a better fit between simulated and observed values.
Spearman’s rank correlation coefficient is a non-parametric measure of statistical dependence between the ranking of two variables. Lighter colors represent higher Spearman’s values or better correlation between the observed and simulated values. Darker colors represent lower Spearman’s values or poorer correlation between the observed and simulated values.
Normalized Root Mean Square Error. Lighter colors are closer to zero reflecting less difference between simulated and observed values, while darker colors represent higher NRMSE values with more difference between simulated and observed values.
\newpage
#----- Daily data # Get daily Q dailyQ <- dplyr::full_join(daily_sim[, c('Date', 'value')], daily_obs[, c('Date', 'value')], by = 'Date') %>% dplyr::rename(Simulated = value.x) %>% dplyr::rename(Observed = value.y) # Plot daily obs v sim ggplot2::ggplot(dailyQ, aes(x = Observed, y = Simulated)) + ggplot2::geom_point(alpha = 0.10) + ggplot2::theme_bw() + ggplot2::coord_cartesian(xlim = c(0, max(dailyQ[,-1], na.rm = T)), ylim = c(0, max(dailyQ[,-1], na.rm = T))) + ggplot2::geom_abline(slope = 1, col = 'red') + ggplot2::labs(x = 'Observed [cfs]', y = 'Simulated [cfs]', title = 'Daily Streamflow', subtitle = paste0('Site #', site), caption = paste0('n = ', formatC(nrow(dailyQ), big.mark = ',')))
#----- Monthly data # Average daily values to monthly monthlyQ <- dailyQ %>% dplyr::mutate(yearmon = substr(Date, 1, 7)) %>% dplyr::group_by(yearmon) %>% dplyr::summarise(Simulated = mean(Simulated, na.rm = T), Observed = mean(Observed, na.rm = T)) %>% dplyr::ungroup() # Plot monthly obs v sim ggplot2::ggplot(monthlyQ, aes(x = Observed, y = Simulated)) + ggplot2::geom_point(alpha = 0.40) + ggplot2::theme_bw() + ggplot2::coord_cartesian(xlim = c(0, max(monthlyQ[,-1], na.rm = T)), ylim = c(0, max(monthlyQ[,-1], na.rm = T))) + ggplot2::geom_abline(slope = 1, col = 'red') + ggplot2::labs(x = 'Observed [cfs]', y = 'Simulated [cfs]', title = 'Monthly Streamflow', subtitle = paste0('Site #', site), caption = paste0('n = ', formatC(nrow(monthlyQ), big.mark = ',')))
#------- Annual data annualQ <- dplyr::full_join(annual_sim[,c('WY', 'annual_mean')], annual_obs[,c('WY', 'annual_mean')], by = 'WY') %>% dplyr::rename(Simulated = annual_mean.x) %>% dplyr::rename(Observed = annual_mean.y) # Plot annual obs v sim ggplot2::ggplot(annualQ, aes(x = Observed, y = Simulated)) + ggplot2::geom_point(alpha = 0.50) + ggplot2::theme_bw() + ggplot2::coord_cartesian(xlim = c(0, max(annualQ[,-1], na.rm = T)), ylim = c(0, max(annualQ[,-1], na.rm = T))) + ggplot2::geom_abline(slope = 1, col = 'red') + ggplot2::labs(x = 'Observed [cfs]', y = 'Simulated [cfs]', title = 'Annual* Mean Streamflow', subtitle = paste0('Site #', site), caption = paste0(paste0('n = ', formatC(nrow(annualQ), big.mark = ',')), '\n*by water year'))
#----- Annual maximima # Select max daily value in each year annualMax <- dailyQ %>% dplyr::mutate(waterYear = ifelse(test = lubridate::month(Date) >= 10, yes = lubridate::year(Date) - 1, no = lubridate::year(Date))) %>% dplyr::group_by(waterYear) %>% dplyr::summarise(Simulated = max(Simulated, na.rm = T), Observed = max(Observed, na.rm = T)) %>% dplyr::ungroup() # Plot monthly obs v sim ggplot2::ggplot(annualMax, aes(x = Observed, y = Simulated)) + ggplot2::geom_point(alpha = 0.40) + ggplot2::theme_bw() + ggplot2::coord_cartesian(xlim = c(0, max(annualMax[,-1], na.rm = T)), ylim = c(0, max(annualMax[,-1], na.rm = T))) + ggplot2::geom_abline(slope = 1, col = 'red') + ggplot2::labs(x = 'Observed [cfs]', y = 'Simulated [cfs]', title = 'Annual* 1-Day Peak Flow', subtitle = paste0('Site #', site), caption = paste0(paste0('n = ', formatC(nrow(annualMax), big.mark = ',')), '\n*by water year'))
#----- Annual maximima # Select max daily value in each year dailyQ3 <- dplyr::full_join(daily_sim[,c('Date', 'Q3')], daily_obs[,c('Date', 'Q3')], by = 'Date') %>% dplyr::rename(Simulated = Q3.x) %>% dplyr::rename(Observed = Q3.y) %>% tidyr::drop_na() # Plot monthly obs v sim ggplot2::ggplot(dailyQ3, aes(x = Observed, y = Simulated)) + ggplot2::geom_point(alpha = 0.40) + ggplot2::theme_bw() + ggplot2::coord_cartesian(xlim = c(0, max(dailyQ3[,-1], na.rm = T)), ylim = c(0, max(dailyQ3[,-1], na.rm = T))) + ggplot2::geom_abline(slope = 1, col = 'red') + ggplot2::labs(x = 'Observed [cfs]', y = 'Simulated [cfs]', title = 'Daily Q3', subtitle = paste0('Site #', site), caption = paste0('n = ', formatC(nrow(dailyQ3), big.mark = ',')))
#----- Annual maximima # Select max daily value in each year dailyQ7 <- dplyr::full_join(daily_sim[,c('Date', 'Q7')], daily_obs[,c('Date', 'Q7')], by = 'Date') %>% dplyr::rename(Simulated = Q7.x) %>% dplyr::rename(Observed = Q7.y) %>% tidyr::drop_na() # Plot monthly obs v sim ggplot2::ggplot(dailyQ7, aes(x = Observed, y = Simulated)) + ggplot2::geom_point(alpha = 0.40) + ggplot2::theme_bw() + ggplot2::coord_cartesian(xlim = c(0, max(dailyQ7[,-1], na.rm = T)), ylim = c(0, max(dailyQ7[,-1], na.rm = T))) + ggplot2::geom_abline(slope = 1, col = 'red') + ggplot2::labs(x = 'Observed [cfs]', y = 'Simulated [cfs]', title = 'Daily Q7', subtitle = paste0('Site #', site), caption = paste0('n = ', formatC(nrow(dailyQ7), big.mark = ',')))
#----- Annual maximima # Select max daily value in each year dailyQ30 <- dplyr::full_join(daily_sim[,c('Date', 'Q30')], daily_obs[,c('Date', 'Q30')], by = 'Date') %>% dplyr::rename(Simulated = Q30.x) %>% dplyr::rename(Observed = Q30.y) %>% tidyr::drop_na() # Plot monthly obs v sim ggplot2::ggplot(dailyQ30, aes(x = Observed, y = Simulated)) + ggplot2::geom_point(alpha = 0.40) + ggplot2::theme_bw() + ggplot2::coord_cartesian(xlim = c(0, max(dailyQ30[,-1], na.rm = T)), ylim = c(0, max(dailyQ30[,-1], na.rm = T))) + ggplot2::geom_abline(slope = 1, col = 'red') + ggplot2::labs(x = 'Observed [cfs]', y = 'Simulated [cfs]', title = 'Daily Q30', subtitle = paste0('Site #', site), caption = paste0('n = ', formatC(nrow(dailyQ30), big.mark = ',')))
#----- Annual maximima # Select max daily value in each year dailyQ30 <- dplyr::full_join(daily_sim[,c('Date', 'Q30')], daily_obs[,c('Date', 'Q30')], by = 'Date') %>% dplyr::rename(Simulated = Q30.x) %>% dplyr::rename(Observed = Q30.y) %>% tidyr::drop_na() dailyQ30$Observed_log <- log10(dailyQ30$Observed) dailyQ30$Simulated_log <- log10(dailyQ30$Simulated) # Plot monthly obs v sim ggplot2::ggplot(dailyQ30, aes(x = Observed_log, y = Simulated_log)) + ggplot2::geom_point(alpha = 0.40) + ggplot2::theme_bw() + ggplot2::coord_cartesian(xlim = c(min(dailyQ30[,c(4,5)], na.rm = T), max(dailyQ30[,c(4,5)], na.rm = T)), ylim = c(min(dailyQ30[,c(4,5)], na.rm = T), max(dailyQ30[,c(4,5)], na.rm = T))) + ggplot2::geom_abline(slope = 1, col = 'red') + ggplot2::labs(x = 'Log Observed', y = 'Log Simulated', title = 'Log Daily Q30', subtitle = paste0('Site #', site), caption = paste0('n = ', formatC(nrow(dailyQ30), big.mark = ',')))
Observed vs simulated measurements. The red line is the one:one line representing a perfect match.
#----- Daily data # Get daily Q dailyQlog <- dplyr::bind_rows(daily_sim %>% dplyr::select(Date, value) %>% dplyr::mutate(Source = 'Modeled'), daily_obs %>% dplyr::select(Date, value) %>% dplyr::mutate(Source = 'Observed')) %>% dplyr::mutate(Source = factor(Source, levels = c("Observed", "Modeled"))) # Plot daily obs v sim p <- ggplot2::ggplot(dailyQlog, aes(x = Date, y = value, color = Source)) + ggplot2::geom_line() + ggplot2::theme_bw() + ggplot2::scale_y_continuous(trans = 'log10') + ggplot2::labs(x = 'Date', y = 'log10(Q) [log10(cfs)]', title = 'Daily Streamflow (log10)', subtitle = paste0('Site #', site), caption = paste0('n days = ', formatC(nrow(dailyQlog), big.mark = ','))) plotly::ggplotly(p)
Streamflow hydrograph. Y values are log values of discharge.
dailyQlog %>% dplyr::mutate(month = lubridate::month(Date)) %>% dplyr::mutate(month = month.abb[month]) %>% dplyr::mutate(month = factor(month, levels = month.abb)) %>% ggplot2::ggplot(., aes(x = month, y = value, fill = Source)) + ggplot2::geom_boxplot() + ggplot2::theme_bw() + ggplot2::labs(x = "Month", y = 'Q [cfs]', title = 'Streamflow by Month', subtitle = paste0('Site #', site))
Standard boxplots of stream flow comparing monthly distributions of streamflow by month across the time period.
dailyQlog %>% dplyr::mutate(MOY = lubridate::month(Date)) %>% dplyr::mutate(month = month.abb[MOY]) %>% dplyr::mutate(Source = as.character(Source)) %>% dplyr::group_by(Source, month) %>% dplyr::rename(Month = month) %>% dplyr::summarise(MOY = dplyr::first(MOY), value = round(mean(value, na.rm = T), 2)) %>% tidyr::pivot_wider(id_cols = c(MOY, Month), names_from = Source, values_from = value) %>% dplyr::arrange(MOY) %>% dplyr::select(MOY, Month, Observed, Modeled) %>% dplyr::select(-MOY) %>% dplyr::rename(`Observed [cfs]` = Observed) %>% dplyr::rename(`Modeled [cfs]` = Modeled) %>% DT::datatable(data = ., caption = paste0('Mean daily streamflow for each month for years ', min(lubridate::year(dailyQlog$date)), ' - ', max(lubridate::year(dailyQlog$date))), fillContainer = F, options = list(dom = 't', pageLength = 12))
dailyQlog %>% dplyr::mutate(MOY = lubridate::month(Date)) %>% dplyr::mutate(month = month.abb[MOY]) %>% dplyr::mutate(Source = as.character(Source)) %>% dplyr::group_by(Source, month) %>% dplyr::rename(Month = month) %>% dplyr::summarise(MOY = dplyr::first(MOY), value = round(quantile(value, 0.01, na.rm = T), 2)) %>% tidyr::pivot_wider(id_cols = c(MOY, Month), names_from = Source, values_from = value) %>% dplyr::arrange(MOY) %>% dplyr::select(MOY, Month, Observed, Modeled) %>% dplyr::select(-MOY) %>% dplyr::rename(`Observed [cfs]` = Observed) %>% dplyr::rename(`Modeled [cfs]` = Modeled) %>% DT::datatable(data = ., caption = paste0('1st percentile of daily streamflow for each month for years ', min(lubridate::year(dailyQlog$Date)), ' - ', max(lubridate::year(dailyQlog$Date))), fillContainer = F, options = list(dom = 't', pageLength = 12))
dailyQlog %>% dplyr::mutate(MOY = lubridate::month(Date)) %>% dplyr::mutate(month = month.abb[MOY]) %>% dplyr::mutate(Source = as.character(Source)) %>% dplyr::group_by(Source, month) %>% dplyr::rename(Month = month) %>% dplyr::summarise(MOY = dplyr::first(MOY), value = round(quantile(value, 0.10, na.rm = T), 2)) %>% tidyr::pivot_wider(id_cols = c(MOY, Month), names_from = Source, values_from = value) %>% dplyr::arrange(MOY) %>% dplyr::select(MOY, Month, Observed, Modeled) %>% dplyr::select(-MOY) %>% dplyr::rename(`Observed [cfs]` = Observed) %>% dplyr::rename(`Modeled [cfs]` = Modeled) %>% DT::datatable(data = ., caption = paste0('10th percentile of daily streamflow for each month for years ', min(lubridate::year(dailyQlog$Date)), ' - ', max(lubridate::year(dailyQlog$Date))), fillContainer = F, options = list(dom = 't', pageLength = 12))
dailyQlog %>% dplyr::mutate(MOY = lubridate::month(Date)) %>% dplyr::mutate(month = month.abb[MOY]) %>% dplyr::mutate(Source = as.character(Source)) %>% dplyr::group_by(Source, month) %>% dplyr::rename(Month = month) %>% dplyr::summarise(MOY = dplyr::first(MOY), value = round(quantile(value, 0.50, na.rm = T), 2)) %>% tidyr::pivot_wider(id_cols = c(MOY, Month), names_from = Source, values_from = value) %>% dplyr::arrange(MOY) %>% dplyr::select(MOY, Month, Observed, Modeled) %>% dplyr::select(-MOY) %>% dplyr::rename(`Observed [cfs]` = Observed) %>% dplyr::rename(`Modeled [cfs]` = Modeled) %>% DT::datatable(data = ., caption = paste0('50th percentile (median) of daily streamflow for each month for years ', min(lubridate::year(dailyQlog$Date)), ' - ', max(lubridate::year(dailyQlog$Date))), fillContainer = F, options = list(dom = 't', pageLength = 12))
dailyQlog %>% dplyr::mutate(MOY = lubridate::month(Date)) %>% dplyr::mutate(month = month.abb[MOY]) %>% dplyr::mutate(Source = as.character(Source)) %>% dplyr::group_by(Source, month) %>% dplyr::rename(Month = month) %>% dplyr::summarise(MOY = dplyr::first(MOY), value = round(quantile(value, 0.90, na.rm = T), 2)) %>% tidyr::pivot_wider(id_cols = c(MOY, Month), names_from = Source, values_from = value) %>% dplyr::arrange(MOY) %>% dplyr::select(MOY, Month, Observed, Modeled) %>% dplyr::select(-MOY) %>% dplyr::rename(`Observed [cfs]` = Observed) %>% dplyr::rename(`Modeled [cfs]` = Modeled) %>% DT::datatable(data = ., caption = paste0('90th percentile of daily streamflow for each month for years ', min(lubridate::year(dailyQlog$Date)), ' - ', max(lubridate::year(dailyQlog$Date))), fillContainer = F, options = list(dom = 't', pageLength = 12))
dailyQlog %>% dplyr::mutate(MOY = lubridate::month(Date)) %>% dplyr::mutate(month = month.abb[MOY]) %>% dplyr::mutate(Source = as.character(Source)) %>% dplyr::group_by(Source, month) %>% dplyr::rename(Month = month) %>% dplyr::summarise(MOY = dplyr::first(MOY), value = round(quantile(value, 0.99, na.rm = T), 2)) %>% tidyr::pivot_wider(id_cols = c(MOY, Month), names_from = Source, values_from = value) %>% dplyr::arrange(MOY) %>% dplyr::select(MOY, Month, Observed, Modeled) %>% dplyr::select(-MOY) %>% dplyr::rename(`Observed [cfs]` = Observed) %>% dplyr::rename(`Modeled [cfs]` = Modeled) %>% DT::datatable(data = ., caption = paste0('99th percentile of daily streamflow for each month for years ', min(lubridate::year(dailyQlog$Date)), ' - ', max(lubridate::year(dailyQlog$Date))), fillContainer = F, options = list(dom = 't', pageLength = 12))
# Calculate percent exceedances # sim fdc_sim <- dailyQ$Simulated %>% sort(., decreasing = T) %>% tibble::tibble(x = 100/length(.) * (1:length(.)), value = ., Source = 'Simulated') fdc_obs <- dailyQ$Observed %>% sort(., decreasing = T) %>% tibble::tibble(x = 100/length(.) * (1:length(.)), value = ., Source = 'Observed') fdc <- dplyr::bind_rows(fdc_sim, fdc_obs) %>% dplyr::mutate(Source = factor(Source, levels = c('Observed', 'Simulated'))) # plot ggplot2::ggplot(fdc, aes(x = x, y = value, color = Source)) + ggplot2::geom_line(size = 1.25) + ggplot2::theme_bw() + ggplot2::labs(x = 'Exceedence Probability (%)', y = 'Q [cfs]', title = 'Flow Duration Curve', subtitle = paste0('Site #', site), caption = paste0('n = ', formatC(nrow(dailyQ), big.mark = ',')))
(This section is still under development. The plan is to eventually have tables displaying period of record statistics.)
Trend tests were completed using Sen’s slope [40] for magnitude, except in cases when the annual series had “zero-flow” status for any observed or modeled time series for a given basin; in those cases, the time series was converted to a binomial series, and a logistic regression [41] was done with year as the explanatory variable. The predicted probabilities from the model (dependent variable) were used to quantify the change in probability of a zero-flow year over time. Sen’s slope was computed using the R function “GeneralMannKendall.R”; the function is authored by Benjamin Renard of Irstea, UR Riverly, Lyon, France and is available for download from Dudley et al. [42]. Logistic regression was completed using the “glm” function in the R “stats” package with family = binomial (link = “logit”). Correlations between basin characteristics for study basins and project results were computed with Kendall’s tau. (Hodgkins et al 2020)
trend_comp <- purrr::map(site, function(site){ paste0(input_folder, site, '_com_trend.csv') %>% data.table::fread(input = ., data.table = F) %>% tibble::as_tibble()%>% # add site number dplyr::mutate(site = site) %>% dplyr::select(site, dplyr::everything()) %>% # add statistic categories dplyr::mutate(category = NA) %>% dplyr::mutate(category = ifelse(grepl('mean', annual_stat, ignore.case = T), 'Mean', category)) %>% dplyr::mutate(category = ifelse(grepl('max', annual_stat, ignore.case = T), 'Max', category)) %>% dplyr::mutate(category = ifelse(grepl('min', annual_stat, ignore.case = T), 'Min', category)) %>% dplyr::mutate(category = ifelse(grepl('percent_annual', annual_stat, ignore.case = T), 'Percent Annual', category)) %>% dplyr::mutate(category = ifelse(grepl('CVD', annual_stat, ignore.case = T), 'CVD', category)) %>% dplyr::mutate(category = ifelse(grepl('percentile', annual_stat, ignore.case = T), 'Percentile', category)) %>% dplyr::mutate(category = ifelse(grepl('sd', annual_stat, ignore.case = T), 'SD', category)) %>% dplyr::mutate(category = ifelse(grepl('q', annual_stat, ignore.case = T), 'Q', category)) %>% dplyr::mutate_if(is.numeric, round, digits = 2) %>% # transform to long format tidyr::pivot_longer(cols = sen_slope_diff:val_perc_change_diff) %>% dplyr::rename(annual_stat = annual_stat) }) %>% dplyr::bind_rows() # stat = annual_sd trend_difference <- trend_comp %>% dplyr::filter(name == "val_perc_change_diff") #%>% # dplyr::filter(`Annual Stat` %in% c('annual_sd', 'high_q1')) trend_difference$value[trend_difference$value > 100] <- 100 trend_difference$value[trend_difference$value < -100] <- -100 bar_plot <- ggplot2::ggplot(trend_difference, aes(x = value, y = annual_stat)) + ggplot2::geom_point(aes(color=site)) + ggplot2::theme_bw() + ggplot2::facet_grid(rows = ggplot2::vars(category), scales = 'free_y') + ggplot2::labs(title = paste('Trend Differences')) + xlim(-100, 100) print(bar_plot)
stat_list <- colnames(annual_obs)[2:length(colnames(annual_obs))] # stat_list <- c('high_q1', 'high_q3') stat_list <- stat_list[2:6] for (stat in stat_list){ cat("#### ",stat," \n") # Slice trend data trend_data_obs <- filter(annual_trend_obs, annual_stat == stat) # slice discharge data discharge_data_obs <- annual_obs %>% select('WY', stat) discharge_data_obs <- dplyr::rename(discharge_data_obs, "Obs" = stat) # Slice trend data trend_data_sim <- filter(annual_trend_sim, annual_stat == stat) # slice discharge data discharge_data_sim <- annual_sim %>% select('WY', stat) discharge_data_sim <- dplyr::rename(discharge_data_sim, "Sim" = stat) trend_data_diff <- filter(annual_trend_comp, annual_stat == stat) df <- merge(discharge_data_obs, discharge_data_sim, by = "WY") df <- melt(as.data.table(df), id = "WY") p <- ggplot2::ggplot(df, aes(x=WY, y=value, colour = variable)) + geom_point() + scale_color_manual(values = c("blue", "red")) + geom_abline(intercept = trend_data_obs$intercept, slope = trend_data_obs$sen_slope, color = "blue") + # geom_point(data = discharge_data_sim, color = 'red') + geom_abline(intercept = trend_data_sim$intercept, slope = trend_data_sim$sen_slope, color = 'red') + labs(title = paste0("Trends for ", stat), # subtitle = paste0("Difference in Percent Change: ", trend_data_diff$percent_change_q_difference), y = stat) + theme(axis.title.y = element_blank(), legend.title = element_blank()) print(p) print(paste("\n\n Observed Trend Change:", trend_data_obs$percent_change_q, "% \n\n", "Simulalted Trend Change:", trend_data_sim$percent_change_q, "% \n\n", "Observed Vs Simulated Trend Difference:", trend_data_diff$percent_change_q_difference, "% \n\n")) cat("\n\n") }
# Format data sen_sim <- annual_trend_sim %>% dplyr::select(annual_stat, sen_slope, p_value) %>% dplyr::filter(grepl('mean', annual_stat)) %>% dplyr::mutate(annual_stat = gsub('_mean', '', annual_stat)) %>% dplyr::mutate(annual_stat = tools::toTitleCase(annual_stat)) %>% dplyr::mutate(annual_stat = gsub('may', 'May', annual_stat)) %>% dplyr::rename(Month = annual_stat) %>% dplyr::rename(Sen = sen_slope) %>% dplyr::rename(p = p_value) %>% dplyr::mutate(Sen = round(Sen, 2)) %>% dplyr::mutate(p = round(p, 3)) %>% dplyr::rename(Simulated = Sen)%>% dplyr::rename(p_sim = p) sen_obs <- annual_trend_obs %>% dplyr::select(annual_stat, sen_slope, p_value) %>% dplyr::filter(grepl('mean', annual_stat)) %>% dplyr::mutate(annual_stat = gsub('_mean', '', annual_stat)) %>% dplyr::mutate(annual_stat = tools::toTitleCase(annual_stat)) %>% dplyr::mutate(annual_stat = gsub('may', 'May', annual_stat)) %>% dplyr::rename(Month = annual_stat) %>% dplyr::rename(Sen = sen_slope) %>% dplyr::rename(p = p_value) %>% dplyr::mutate(Sen = round(Sen, 2)) %>% dplyr::mutate(p = round(p, 3)) %>% dplyr::rename(Observed = Sen) %>% dplyr::rename(p_obs = p) sen <- dplyr::full_join(sen_obs, sen_sim, by = 'Month') # Generate table DT::datatable(data = sen, caption = 'Trend in monthly means measured as Sen slope. Values that are assumed to be insignificant through a binary p-values test (using an alpha value of 0.05) are colored grey.', fillContainer = F, options = list( dom = 't', pageLength = 13, columnDefs = list(list(visible = F, targets = c(3,5))) )) %>% DT::formatStyle(columns = 'Observed', valueColumns = 'p_obs', color = DT::styleInterval(c(0.05), c('black', 'darkgrey'))) %>% DT::formatStyle(columns = 'Simulated', valueColumns = 'p_sim', color = DT::styleInterval(c(0.05), c('black', 'darkgrey')))
# Format data sen_sim <- annual_trend_sim %>% dplyr::select(annual_stat, sen_slope, p_value) %>% dplyr::filter(grepl('min', annual_stat)) %>% dplyr::mutate(annual_stat = gsub('_min', '', annual_stat)) %>% dplyr::mutate(annual_stat = tools::toTitleCase(annual_stat)) %>% dplyr::mutate(annual_stat = gsub('may', 'May', annual_stat)) %>% dplyr::rename(Month = annual_stat) %>% dplyr::rename(Sen = sen_slope) %>% dplyr::rename(p = p_value) %>% dplyr::mutate(Sen = round(Sen, 2)) %>% dplyr::mutate(p = round(p, 3)) %>% dplyr::rename(Simulated = Sen)%>% dplyr::rename(p_sim = p) sen_obs <- annual_trend_obs %>% dplyr::select(annual_stat, sen_slope, p_value) %>% dplyr::filter(grepl('min', annual_stat)) %>% dplyr::mutate(annual_stat = gsub('_min', '', annual_stat)) %>% dplyr::mutate(annual_stat = tools::toTitleCase(annual_stat)) %>% dplyr::mutate(annual_stat = gsub('may', 'May', annual_stat)) %>% dplyr::rename(Month = annual_stat) %>% dplyr::rename(Sen = sen_slope) %>% dplyr::rename(p = p_value) %>% dplyr::mutate(Sen = round(Sen, 2)) %>% dplyr::mutate(p = round(p, 3)) %>% dplyr::rename(Observed = Sen) %>% dplyr::rename(p_obs = p) sen <- dplyr::full_join(sen_obs, sen_sim, by = 'Month') # Generate table DT::datatable(data = sen, caption = 'Trend in monthly minima measured as Sen slope. Values that are assumed to be insignificant through a binary p-values test (using an alpha value of 0.05) are colored grey.', fillContainer = F, options = list( dom = 't', pageLength = 13, columnDefs = list(list(visible = F, targets = c(3,5))) )) %>% DT::formatStyle(columns = 'Observed', valueColumns = 'p_obs', color = DT::styleInterval(c(0.05), c('black', 'darkgrey'))) %>% DT::formatStyle(columns = 'Simulated', valueColumns = 'p_sim', color = DT::styleInterval(c(0.05), c('black', 'darkgrey')))
# Format data sen_sim <- annual_trend_sim %>% dplyr::select(annual_stat, sen_slope, p_value) %>% dplyr::filter(grepl('max', annual_stat)) %>% dplyr::mutate(annual_stat = gsub('_max', '', annual_stat)) %>% dplyr::mutate(annual_stat = tools::toTitleCase(annual_stat)) %>% dplyr::mutate(annual_stat = gsub('may', 'May', annual_stat)) %>% dplyr::rename(Month = annual_stat) %>% dplyr::rename(Sen = sen_slope) %>% dplyr::rename(p = p_value) %>% dplyr::mutate(Sen = round(Sen, 2)) %>% dplyr::mutate(p = round(p, 3)) %>% dplyr::rename(Simulated = Sen)%>% dplyr::rename(p_sim = p) sen_obs <- annual_trend_obs %>% dplyr::select(annual_stat, sen_slope, p_value) %>% dplyr::filter(grepl('max', annual_stat)) %>% dplyr::mutate(annual_stat = gsub('_max', '', annual_stat)) %>% dplyr::mutate(annual_stat = tools::toTitleCase(annual_stat)) %>% dplyr::mutate(annual_stat = gsub('may', 'May', annual_stat)) %>% dplyr::rename(Month = annual_stat) %>% dplyr::rename(Sen = sen_slope) %>% dplyr::rename(p = p_value) %>% dplyr::mutate(Sen = round(Sen, 2)) %>% dplyr::mutate(p = round(p, 3)) %>% dplyr::rename(Observed = Sen) %>% dplyr::rename(p_obs = p) sen <- dplyr::full_join(sen_obs, sen_sim, by = 'Month') # Generate table DT::datatable(data = sen, caption = 'Trend in monthly maxima measured as Sen slope. Values that are assumed to be insignificant through a binary p-values test (using an alpha value of 0.05) are colored grey.', fillContainer = F, options = list( dom = 't', pageLength = 13, columnDefs = list(list(visible = F, targets = c(3,5))) )) %>% DT::formatStyle(columns = 'Observed', valueColumns = 'p_obs', color = DT::styleInterval(c(0.05), c('black', 'darkgrey'))) %>% DT::formatStyle(columns = 'Simulated', valueColumns = 'p_sim', color = DT::styleInterval(c(0.05), c('black', 'darkgrey')))
# Format data sen_sim <- annual_trend_sim %>% dplyr::select(annual_stat, sen_slope, p_value) %>% dplyr::filter(grepl('percent_annual', annual_stat)) %>% dplyr::mutate(annual_stat = gsub('_percent_annual', '', annual_stat)) %>% dplyr::mutate(annual_stat = tools::toTitleCase(annual_stat)) %>% dplyr::mutate(annual_stat = gsub('may', 'May', annual_stat)) %>% dplyr::rename(Month = annual_stat) %>% dplyr::rename(Sen = sen_slope) %>% dplyr::rename(p = p_value) %>% dplyr::mutate(Sen = round(Sen, 2)) %>% dplyr::mutate(p = round(p, 3)) %>% dplyr::rename(Simulated = Sen)%>% dplyr::rename(p_sim = p) sen_obs <- annual_trend_obs %>% dplyr::select(annual_stat, sen_slope, p_value) %>% dplyr::filter(grepl('percent_annual', annual_stat)) %>% dplyr::mutate(annual_stat = gsub('_percent_annual', '', annual_stat)) %>% dplyr::mutate(annual_stat = tools::toTitleCase(annual_stat)) %>% dplyr::mutate(annual_stat = gsub('may', 'May', annual_stat)) %>% dplyr::rename(Month = annual_stat) %>% dplyr::rename(Sen = sen_slope) %>% dplyr::rename(p = p_value) %>% dplyr::mutate(Sen = round(Sen, 2)) %>% dplyr::mutate(p = round(p, 3)) %>% dplyr::rename(Observed = Sen) %>% dplyr::rename(p_obs = p) sen <- dplyr::full_join(sen_obs, sen_sim, by = 'Month') # Generate table DT::datatable(data = sen, caption = 'Trend in monthly percent-of-annual measured as Sen slope. Values that are assumed to be insignificant through a binary p-values test (using an alpha value of 0.05) are colored grey.', fillContainer = F, options = list( dom = 't', pageLength = 13, columnDefs = list(list(visible = F, targets = c(3,5))) )) %>% DT::formatStyle(columns = 'Observed', valueColumns = 'p_obs', color = DT::styleInterval(c(0.05), c('black', 'darkgrey'))) %>% DT::formatStyle(columns = 'Simulated', valueColumns = 'p_sim', color = DT::styleInterval(c(0.05), c('black', 'darkgrey')))
# Format data sen_sim <- annual_trend_sim %>% dplyr::select(annual_stat, sen_slope, p_value) %>% dplyr::filter(grepl('percentile', annual_stat)) %>% dplyr::mutate(annual_stat = gsub('_percentile', '', annual_stat)) %>% dplyr::mutate(annual_stat = gsub('annual_', '', annual_stat)) %>% dplyr::mutate(annual_stat = paste0(annual_stat, 'th')) %>% dplyr::mutate(annual_stat = gsub('1th', '1st', annual_stat)) %>% dplyr::rename(Percentile = annual_stat) %>% dplyr::rename(Sen = sen_slope) %>% dplyr::rename(p = p_value) %>% dplyr::mutate(Sen = round(Sen, 2)) %>% dplyr::mutate(p = round(p, 3)) %>% dplyr::rename(Simulated = Sen)%>% dplyr::rename(p_sim = p) sen_obs <- annual_trend_obs %>% dplyr::select(annual_stat, sen_slope, p_value) %>% dplyr::filter(grepl('percentile', annual_stat)) %>% dplyr::mutate(annual_stat = gsub('_percentile', '', annual_stat)) %>% dplyr::mutate(annual_stat = gsub('annual_', '', annual_stat)) %>% dplyr::mutate(annual_stat = paste0(annual_stat, 'th')) %>% dplyr::mutate(annual_stat = gsub('1th', '1st', annual_stat)) %>% dplyr::rename(Percentile = annual_stat) %>% dplyr::rename(Sen = sen_slope) %>% dplyr::rename(p = p_value) %>% dplyr::mutate(Sen = round(Sen, 2)) %>% dplyr::mutate(p = round(p, 3)) %>% dplyr::rename(Observed = Sen)%>% dplyr::rename(p_obs = p) sen <- dplyr::full_join(sen_obs, sen_sim, by = 'Percentile') # Generate table DT::datatable(data = sen, caption = 'Trend in annual percentiles measured as Sen slope. Values that are assumed to be insignificant through a binary p-values test (using an alpha value of 0.05) are colored grey.', fillContainer = F, options = list( dom = 't', pageLength = 13, columnDefs = list(list(visible = F, targets = c(3,5))) )) %>% DT::formatStyle(columns = 'Observed', valueColumns = 'p_obs', color = DT::styleInterval(c(0.05), c('black', 'darkgrey'))) %>% DT::formatStyle(columns = 'Simulated', valueColumns = 'p_sim', color = DT::styleInterval(c(0.05), c('black', 'darkgrey')))
# Format data sen_sim <- annual_trend_sim %>% dplyr::select(annual_stat, sen_slope, p_value) %>% dplyr::filter(grepl('_sd', annual_stat)) %>% dplyr::mutate(annual_stat = gsub('_sd', '', annual_stat)) %>% dplyr::mutate(annual_stat = tools::toTitleCase(annual_stat)) %>% dplyr::mutate(annual_stat = gsub('may', 'May', annual_stat)) %>% dplyr::rename(Period = annual_stat) %>% dplyr::rename(Sen = sen_slope) %>% dplyr::rename(p = p_value) %>% dplyr::mutate(Sen = round(Sen, 2)) %>% dplyr::mutate(p = round(p, 3)) %>% dplyr::rename(Simulated = Sen)%>% dplyr::rename(p_sim = p) sen_obs <- annual_trend_obs %>% dplyr::select(annual_stat, sen_slope, p_value) %>% dplyr::filter(grepl('_sd', annual_stat)) %>% dplyr::mutate(annual_stat = gsub('_sd', '', annual_stat)) %>% dplyr::mutate(annual_stat = tools::toTitleCase(annual_stat)) %>% dplyr::mutate(annual_stat = gsub('may', 'May', annual_stat)) %>% dplyr::rename(Period = annual_stat) %>% dplyr::rename(Sen = sen_slope) %>% dplyr::rename(p = p_value) %>% dplyr::mutate(Sen = round(Sen, 2)) %>% dplyr::mutate(p = round(p, 3)) %>% dplyr::rename(Observed = Sen)%>% dplyr::rename(p_obs = p) sen <- dplyr::full_join(sen_obs, sen_sim, by = 'Period') # Generate table DT::datatable(data = sen, caption = 'Trend in annual and monthly standard deviation measured as Sen slope. Values that are assumed to be insignificant through a binary p-values test (using an alpha value of 0.05) are colored grey.', fillContainer = F, options = list( dom = 't', pageLength = 13, columnDefs = list(list(visible = F, targets = c(3,5))) )) %>% DT::formatStyle(columns = 'Observed', valueColumns = 'p_obs', color = DT::styleInterval(c(0.05), c('black', 'darkgrey'))) %>% DT::formatStyle(columns = 'Simulated', valueColumns = 'p_sim', color = DT::styleInterval(c(0.05), c('black', 'darkgrey')))
# Format data sen_sim <- annual_trend_sim %>% dplyr::select(annual_stat, sen_slope, p_value) %>% dplyr::filter(grepl('_q', annual_stat)) %>% dplyr::mutate(annual_stat = gsub('high_', '', annual_stat)) %>% dplyr::mutate(annual_stat = gsub('_jd', ' (Julian day)', annual_stat)) %>% dplyr::mutate(annual_stat = tools::toTitleCase(annual_stat)) %>% dplyr::rename(`Q(days)` = annual_stat) %>% dplyr::rename(Sen = sen_slope) %>% dplyr::rename(p = p_value) %>% dplyr::mutate(Sen = round(Sen, 2)) %>% dplyr::mutate(p = round(p, 3)) %>% dplyr::rename(Simulated = Sen)%>% dplyr::rename(p_sim = p) sen_obs <- annual_trend_obs %>% dplyr::select(annual_stat, sen_slope, p_value) %>% dplyr::filter(grepl('_q', annual_stat)) %>% dplyr::mutate(annual_stat = gsub('high_', '', annual_stat)) %>% dplyr::mutate(annual_stat = gsub('_jd', ' (Julian day)', annual_stat)) %>% dplyr::mutate(annual_stat = tools::toTitleCase(annual_stat)) %>% dplyr::rename(`Q(days)` = annual_stat) %>% dplyr::rename(Sen = sen_slope) %>% dplyr::rename(p = p_value) %>% dplyr::mutate(Sen = round(Sen, 2)) %>% dplyr::mutate(p = round(p, 3)) %>% dplyr::rename(Observed = Sen)%>% dplyr::rename(p_obs = p) sen <- dplyr::full_join(sen_obs, sen_sim, by = "Q(days)") # Generate table DT::datatable(data = sen, caption = 'Trend in annual multi-day flows measured as Sen slope. Values that are assumed to be insignificant through a binary p-values test (using an alpha value of 0.05) are colored grey.', fillContainer = F, options = list( dom = 't', pageLength = 13, columnDefs = list(list(visible = F, targets = c(3,5))) )) %>% DT::formatStyle(columns = 'Observed', valueColumns = 'p_obs', color = DT::styleInterval(c(0.05), c('black', 'darkgrey'))) %>% DT::formatStyle(columns = 'Simulated', valueColumns = 'p_sim', color = DT::styleInterval(c(0.05), c('black', 'darkgrey')))
# Format data sen_sim <- annual_trend_sim %>% dplyr::select(annual_stat, sen_slope, p_value) %>% dplyr::filter(grepl('CVD', annual_stat)) %>% dplyr::rename(CVD = annual_stat) %>% dplyr::rename(Sen = sen_slope) %>% dplyr::rename(p = p_value) %>% dplyr::mutate(Sen = round(Sen, 2)) %>% dplyr::mutate(p = round(p, 3)) %>% dplyr::rename(Simulated = Sen)%>% dplyr::rename(p_sim = p) sen_obs <- annual_trend_obs %>% dplyr::select(annual_stat, sen_slope, p_value) %>% dplyr::filter(grepl('CVD', annual_stat)) %>% dplyr::rename(CVD = annual_stat) %>% dplyr::rename(Sen = sen_slope) %>% dplyr::rename(p = p_value) %>% dplyr::mutate(Sen = round(Sen, 2)) %>% dplyr::mutate(p = round(p, 3)) %>% dplyr::rename(Observed = Sen)%>% dplyr::rename(p_obs = p) sen <- dplyr::full_join(sen_obs, sen_sim, by = "CVD") # Generate table DT::datatable(data = sen, caption = 'Trend in CVD measured as Sen slope. Values that are assumed to be insignificant through a binary p-values test (using an alpha value of 0.05) are colored grey.', fillContainer = F, options = list( dom = 't', pageLength = 13, columnDefs = list(list(visible = F, targets = c(3,5))) )) %>% DT::formatStyle(columns = 'Observed', valueColumns = 'p_obs', color = DT::styleInterval(c(0.05), c('black', 'darkgrey'))) %>% DT::formatStyle(columns = 'Simulated', valueColumns = 'p_sim', color = DT::styleInterval(c(0.05), c('black', 'darkgrey')))
(This section is still in development. The current plan is to have auto-generate text summarize error statistics in a text block. Something 'readable' to follow up the pile of tables and figures.)
This report was generated through the HyMETT R package in the public domain. HyMETT is a product of the United States Geological Survey, an agency of the United States Department of Interior.
Any scripts or data that you put into this service are public.
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.