knitr::opts_chunk$set( collapse = TRUE, comment = "#>" )
The functions in this package allow you to perform a sequence of events resulting in an MSE (Management Strategy Evaluation) type of workflow (Assumes package hydradata and hydra_sim (tpl file)). This is not a stand alone package and needs to be used in conjunction with the aforementioned packages
Decisions need to be made regarding how long to run the model for in the absence of fishing (the number of years). This decision is based on how long the model takes to reach some kind of equilibrium (if it does at all). An equilibrium may not be reached if a species stock recruitment relationship is highly overcompensatory. This may result in a highly cyclical behavior, possiblly around an average. It is recommended that the user specify the total length of the model run.
Once specified, the underlying data used to create Hydra's data files (dat and pin) needs to be extended to accomodate these additional years of data.
``` {r echo=TRUE,eval = FALSE} newData <- hydramse::lengthen_hydra_data(hydradata::hydraDataList,nYrs=150)
Set up the locations for output (create if necessary): ``` {r echo=TRUE,eval =FALSE} outDirForDatPin <- here::here("darwin") # folder for dumping hydra output prior to analysis outPath <- here::here("successfulSims") # folder containing parameters of successful model runs if(!dir.exists(outDirForDatPin)){dir.create(outDirForDatPin)} if(!dir.exists(outPath)){dir.create(outPath)}
Also need to set the name of the hydra's exe file
hydraVersion <- "hydra_sim"
Methods used in the simulation study can be found here. The code below should be included in a loop to simulate multiple parameter sets
``` {r eval=F, echo=T} ipass <- 0 simulationRules <- hydramse::darwinRules darwinD <- hydramse::darwinData
# simulate a set of parameters simulatedValues <- hydramse::simulate_parameters(darwinD,simulationRules,SRFunctionChoice = NULL) # combine with extended time series data for darwinian use hydraD <- hydramse::update_hydra_data(hydradata::hydraDataList,simulatedValues) # update with simulated values hydraD <- hydramse::update_hydra_data(hydradata::hydraDataList,noFishingData) # update with simulated values # create dat and pin files inputOptions <- hydradata::setup_default_inputs(outDir=outDirForDatPin) # gets options for hydraRuns inputOptions$scenarioFlag <- "darwin" hydraD$flagMSE <- 2 # reduces output from hydra run hydraD$recStochastic <- hydraD$recStochastic*0 # no stochasticity used in model run hydraD <- hydradata::create_datpin_files(inputOptions,hydraD) # creates dat and pin file
# run hydra #specify the path the input files and the Hydra model executable datPath <- paste0(outDirForDatPin,"/",inputOptions$outputFilename,".dat") pinPath <- paste0(outDirForDatPin,"/",inputOptions$outputFilename,".pin") exePath <- paste(pathToTPL,hydraVersion,sep="/") hydramse::run_single_hydra(iseed=1,exePath=exePath,datPath=datPath,pinPath=pinPath)
# move output file if (Sys.info()['sysname']=="Windows") { shell(paste0("move .text ",paste0(outDirForDatPin,"/")),intern=T) # move files to darwin folder } else if (Sys.info()['sysname']=="Linux") { system(paste0("mv .text ",paste0(outDirForDatPin,"/")),intern=T) # move files to darwin folder }
## Assess the validity of the parameter set Once the mode has run, the output needs to be processed and compared to historic data. If tests are passes this realization is saved for use in an MSE ```r # processes output from model (1 file) output <- hydramse::process_darwin_output(outDirForDatPin) biomass <- output$biomass catch <- output$catch # Do these simulations satisfy the rules(historical equivalence) # No Fishing Biomass Rule. assumes equilibrium is reached rule1 <- hydramse::rule1_biomass(biomass,nYrsFishing,stockRecruitData$historicBounds,simulationRules) if(rule1$pass == F) { next} # Fishing Biomass Rule. use mean of last 10 years rule2 <- hydramse::rule2_biomass(biomass,nYrs,stockRecruitData$historicBounds,simulationRules) if(rule2$pass == F) { next} # Catch Rule rule3 <- hydramse::rule3_landings(catch,nYrs,stockRecruitData$historicBounds,simulationRules) if(rule3$pass == F) { next} ipass <- ipass + 1 # save all data to RDA file for use in MSE darwinRes <- list(hydraDarwin=hydraD,simulatedValues=simulatedValues,simulationRules=simulationRules,inputOptions=inputOptions) saveRDS(darwinRes,file=paste0(outPath,"/success",ipass,".RDS"))
Each parameter set that passes the tests are then used in an MSE type study. Details of which can be found in a the publication (Beet & Fogarty, 2020). The parameter set can be read from .Rda file or passed straight from the darwin study. Here we assume it is read from the rda file. This simulated data set is used to create all o fthe dat an dpin files required for each scenario/ exploitation rate combination
rootFolder <- "define_the_folder_where_output_for_all_scenario_runs_be_stored" darwinRes <- readRDS(file=paste0(outPath,"/success1.RDS")) # read in the RDS dataToUse <- hydramse::update_hydra_data(hydradata::hydraDataList,darwinRes$simulatedValues)# sets the simulated values to the hydra model data hcrTypes=c("Fixed","Ramp") # harvest control rules types hcrLevels=c("Complex","HTSpecies","LTSpecies") #harvest control rule levels scenarios <- apply(expand.grid(hcrTypes,hcrLevels),1,paste,collapse="") # Create harvest control rule levels dataToUse <- hydradata::set_hcr(dataToUse,minMaxExploitations = c(.05,0.4),increment=0.05) dataToUse$flagMSE <- 1 exploitationRates = round(100*dataToUse$exploitationOptions[dataToUse$Nthresholds,]) # create folder structure for run (given exploitation rates). This creates folders in users working directory folderStructure <- hydramse::create_folder_setup(rootFolder,exploitationRates = exploitationRates) scenarios <- unique(apply(as.matrix(subset(folderStructure,select=c("hcrType","hcrLevels"))),1,function(x) paste0(x,collapse = ""))) folderDirs <- apply(folderStructure,1,function(x) paste0(x,collapse = "")) # vector of dirs outputScenarioDirs <- here::here(rootFolder,paste0("Exploitation",folderDirs)) for (iscenario in 1:length(outputScenarioDirs)) { outputScenarioDir <- outputScenarioDirs[iscenario] print(outputScenarioDir) # set up scenario types to run if (grepl("LT",folderStructure$hcrLevels[iscenario])) speciesFlag <- "low" if (grepl("HT",folderStructure$hcrLevels[iscenario])) speciesFlag <- "high" if (grepl("omplex",folderStructure$hcrLevels[iscenario])) speciesFlag <- "none" scenarioInputs <- hydradata::setup_default_inputs(outDir=outputScenarioDir,scenarioFlag="assessment",temperatureFlag="mean", scenarioType=folderStructure$hcrType[iscenario],maxExploitationRate=as.numeric(folderStructure$exploitationRate[iscenario]), assessmentSpeciesFlag=speciesFlag) # create dat and pin files for scenario dataToPrint <- hydradata::create_datpin_files(listOfParameters=scenarioInputs, dataList=dataToUse) }
Once the folder structure has been created and the dat and pin files saved into each scenarios respective folder then the model can be run any nimber of times for each scenario. The model can be run in series or in parallel. This is dependent on the number of cores available on your machine or server
``` {r eval=F, echo = T} #nCores <- parallel::detectCores()-1 #cl <- parallel::makeCluster(nCores)
nSims <- 100 # set number of simulation for each scenario exePath <- paste(pathToTPL,hydraVersion,sep="/") for (iscenario in 1:dim(folderStructure)[1]) { datpinPath <- here::here(rootFolder,paste0("Exploitation",paste0(folderStructure[iscenario,],collapse=""))) datPath <- paste0(datpinPath,"/",hydraVersion,".dat") pinPath <- paste0(datpinPath,"/",hydraVersion,".pin") #parallel::parLapply(cl,1:nSims,hydramse::run_single_hydra,exePath=exePath,datPath=datPath,pinPath=pinPath) lapply(1:nSims,hydramse::run_single_hydra,exePath=exePath,datPath=datPath,pinPath=pinPath) # move files if (Sys.info()['sysname']=="Windows") { shell(paste0("move .txt ",paste0(datpinPath,"/indices")),intern=T) # move files to assessment folder } else if (Sys.info()['sysname']=="Linux") { system(paste0("mv .txt ",paste0(datpinPath,"/indices")),intern=T) # move files to assessment folder } } #parallel::stopCluster(cl)
## Process the output nSim files are created for each scenario run and are stored in the "indices" folder under each scenario. These files are processed for all scenarios and exported to RDS files for plotting. The output files from Hydra contain only the variables listed in the variable "indices" below. This speeds up run time. If indices from outside the model are to be included then they need to be read in and passed to process_model_runs() ```r indices <- c("index_LFI_Catch","index_LFI_Biomass","index_stdev_catch","avByr","est_catch_biomass","index_status_species","index_status_guild") rD <- read.csv("revenuePricePerPound2012.csv",header=TRUE) otherData <- list() otherData$name = rD[,1] otherData$ppp = rD[,2] hydramse::process_model_runs(dataToUse,indices,scenarios,rootFolder,outputScenarioDirs,otherData,outputType="indices")
For each of the harvest control rule type/level combinations: 1. A series of boxplots are made to compare biomass, catch, and species/guild status against exploitation rate. 1. Equivalent Line plots are also made 1. Production function plots are also made for each guild 1. Radar plots are made to compare the performace of each oth the indices
Still in Development
{r, eval=F, echo=T}
hydramse::plot_box_whiskers(here::here(),rootFolder,inputFile="species_bio_rate.rds")
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.