knitr::opts_chunk$set(echo = TRUE) #https://kbroman.org/pkg_primer/pages/vignettes.html#:~:text=To%20include%20an%20R%20Markdown%20document%20as%20a,vignette%20will%20be%20in%20the%20inst%2Fdoc%20folder.%20 #devtools::build_vignettes() # For the header meta data # https://rdrr.io/cran/summarytools/f/vignettes/rmarkdown.Rmd
require(ReenforcinateR) #install_github("delaneyj1786/REINFORCINATOR") require(tidyverse)
The 'reinforcement' data contains 10 behaviors and 1 reinforcer where the target behavior is 'o'
reinforcement
require(janitor) reinforcement %>% tabyl(BEH)
The recounter will classify events into before or after categories based on when reinforcement occurs.
The recounter produces two outputs. The descriptive list contains a number of descriptive statistics:
recounter(reinforcement,BEH,"o","A")$descriptive_statistics
The recounted data frame is a dataset which can be used in regression models and for contingency table functions (i.e., contingency_table_builders):
recounted_reinforcement<-recounter(reinforcement,BEH,"o","A")$recounted_data_frame
recounted_reinforcement %>% tabyl(recount_recode_stream)
The function tables_recount_table produces two lists, a process list for computing various statistics (e.g., row wise probabilities, F tables without margins, etc.) and an output list which provides contingency tables for the frequency and probabilities.
tables_recount_table(recounted_reinforcement)$process_list
tables_recount_table(recounted_reinforcement)$output_list
The reinforcinator package comes with a function for applying the recounter for each level of a grouping variable (such as participant ID or video ID).
I illustrate the group splitting on the reinforcement data by adding a second person to the reinforcement data
reinforcement2<-tidyr::tibble( VIDELT = rep(4,11), TAR = rep("JAN",11), BEH = c("o","x","x", #4 "A", #4 "o","o","x", #3 "A", #1 "o","o","o") ) reinforcement3 <- rbind(reinforcement,reinforcement2) head(reinforcement3) tail(reinforcement3)
The overall statistics are as follows (without accounting for the reinforcement process)
require(janitor) reinforcement3 %>% tabyl(TAR,BEH)
To adress whether there is a difference before or after reinforcement we must recount the data and then compare proportions
reinforcement_2person<-recounter(reinforcement3, BEH, "o","A")$recounted_data_frame tables_recount_table(reinforcement_2person)$output_list
First we split the dataframe
group_split_reinforcement_2person<-group_splitter(reinforcement3, BEH,"o","A", group = "TAR", actor = "TAR") group_split_reinforcement_2person
Now we use this for the recounted dataframe split by each actor. This produces a single recounted data frame.
recounted_group_split<-group_split_recounter(group_split_reinforcement_2person, "BEH","o","A", actor = "TAR") recounted_group_split %>% tabyl(recount_recode_stream,GROUP)
Now we can apply the descriptives on the whole dataset (which accounts for group separation when sorting sequences).
The difference here is that instead of 4 reinforcers, this is now averaging 2 sets of 2 reinforcers
recounted_group_split tables_recount_table(recounted_group_split)$output_list
tkdata<-tidyr::tibble( VIDELT = rep(5,14), TAR = rep("TK",14), BEH = c("o","x","x", "x", #4 "A", #5 "x","x","x","x", #3 "A", #1 "o","o","o","x") ) tk_data_recounted<-recounter(tkdata, BEH, "o","A")$recounted_data_frame recounter(tkdata, BEH, "o","A")$descriptive_statistics # still off from his old example tables_recount_table(tk_data_recounted)$output_list tables_recount_table(tk_data_recounted)$process_list
The computation should be an average for each sub-series probability
The Reinforcinator comes with a deleter and combiner function
combiner(elevator,BEH,"o","x")
deleter(elevator,BEH,"o")
partner_recoder(two_person_picture,BEH,PART,"A","FRIEND")
# want to see the difference between the two split_df_1 <- group_splitter(two_person_picture,behaviorstream = BEH, behavior = "o", consequence = "A", group = "TAR") split_df_2 <- group_splitter2(two_person_picture,behaviorstream = BEH, behavior = "o", consequence = "A", group1 = "TAR", group2 = "VIDELT", filt = TRUE) group_split_recounter(split_df_1,"BEH","o","A","TAR") ## having issues here ... #Error in if (data[n_obs, behaviorstream] == consequence) { : argument is of length zero group_split_recounter(split_df_2,"BEH","o","A","TAR") # took out gsr 2
test_df<-recounter(two_person_picture,BEH,"o","A")$recounted_data_frame test_df <- test_df %>% group_by(sub_series) %>% mutate(sub_series_sum = cumsum(recount_recode_numeric), # uses the total sum ... not running sub_series_total = max(recount_stream_index), sub_series_cum_run_prob = sub_series_sum / sub_series_total, # total sum as denom sub_series_run_prob = sub_series_sum / recount_stream_index) %>% ungroup() # Plot shows the main probabilities (which don't change) # But also shows the shifting of sequence (the colors) # Thus the average value changes for each sub-series (e.g., Before [red] takes over) ggplot(test_df,aes(x = recount_stream_index, y = sub_series_run_prob, color = (recount_sequence), group = sub_series)) + geom_point() +facet_grid(~sub_series) + ggtitle("Running Sequence Probabilities By Sub-Series")+ xlab("Observation Sequence") + ylab("Running Probability")
# Now - we want the plot to show the average value before and after for each sub-series # If 4 sub-series, then 4 before probabilities and 4 after probabilities # These will be the average by sequence, for each sub-series sum_one<- test_df %>% group_by(sub_series, recount_sequence) %>% summarize(sub_series_mean = mean(sub_series_run_prob)) %>% ungroup() #merge_plot <- left_join(test_df,sum_one, by = "sub_series") ggplot(filter(sum_one, recount_sequence != "R"),aes(x = sub_series, y = sub_series_mean, color = (recount_sequence))) + geom_point() + geom_line() + ggtitle("Average Sub-Series Probabilities By Sequence") + xlab("Sub-Series") + ylab("Average Sequence Probabilities")
## Now we want the original series ... plotted (no coloring) # then we want to add in the AVERAGE for each sequence (across the sub-series) overall_average <- sum_one %>% group_by(recount_sequence) %>% summarize(mean_sub_mean = mean(sub_series_mean)) %>% ungroup() %>% filter(recount_sequence != "R") average_df<-left_join(test_df, overall_average, by = "recount_sequence") ggplot(average_df,aes(x = recount_stream_index, y = sub_series_run_prob, color = (recount_sequence))) + geom_point() + geom_line(aes(y = mean_sub_mean)) + ggtitle("Overall Sequence Average") + xlab("Observation Sequence") + ylab("Running Probabilities of Target")
require(readr) sub1<-read_csv("C:/Users/delan/Dropbox/My PC (LAPTOP-AEGRJVG9)/Desktop/Sage Analysis/jamedat3_sub1.csv") sub2<- read_csv("C:/Users/delan/Dropbox/My PC (LAPTOP-AEGRJVG9)/Desktop/Sage Analysis/jamedat3_sub2.csv") sub2_clean <- sub2 %>% filter(!is.na(BEH)) sub3 <- read_csv("C:/Users/delan/Dropbox/My PC (LAPTOP-AEGRJVG9)/Desktop/Sage Analysis/jamedat3_sub3.csv") dim(sub3) require(tidyverse) sub3 <- sub3 %>% filter(!is.na(BEH)) sub3_clean<-na.omit(sub3) dim(sub3) write_csv(sub3,"jamesdat3_sub3c.csv")
require(tidyverse) test_sub2_df<-recounter(sub2_clean,BEH,"OA","A")$recounted_data_frame test_sub2_df <- test_sub2_df %>% group_by(sub_series) %>% mutate(sub_series_sum = cumsum(recount_recode_numeric), # uses the total sum ... not running sub_series_total = max(recount_stream_index), sub_series_cum_run_prob = sub_series_sum / sub_series_total, # total sum as denom sub_series_run_prob = sub_series_sum / recount_stream_index) %>% ungroup() # Plot shows the main probabilities (which don't change) # But also shows the shifting of sequence (the colors) # Thus the average value changes for each sub-series (e.g., Before [red] takes over) ggplot(test_sub2_df,aes(x = recount_stream_index, y = sub_series_run_prob, color = (recount_sequence), group = sub_series)) + geom_point() +facet_grid(~sub_series) + ggtitle("Running Sequence Probabilities By Sub-Series")+ xlab("Observation Sequence") + ylab("Running Probability")
sum_one_sub2<- test_sub2_df%>% group_by(sub_series, recount_sequence) %>% summarize(sub_series_mean = mean(sub_series_run_prob)) %>% ungroup() #merge_plot <- left_join(test_df,sum_one, by = "sub_series") ggplot(filter(sum_one_sub2, recount_sequence != "R"),aes(x = sub_series, y = sub_series_mean, color = (recount_sequence))) + geom_point() + geom_line() + ggtitle("Average Sub-Series Probabilities By Sequence") + xlab("Sub-Series") + ylab("Average Sequence Probabilities")
require(tidyverse) test_sub3_df<-recounter(sub3_clean,BEH,"OA","A")$recounted_data_frame test_sub3_df <- test_sub3_df %>% group_by(sub_series) %>% mutate(sub_series_sum = cumsum(recount_recode_numeric), # uses the total sum ... not running sub_series_total = max(recount_stream_index), sub_series_cum_run_prob = sub_series_sum / sub_series_total, # total sum as denom sub_series_run_prob = sub_series_sum / recount_stream_index) %>% ungroup() # Plot shows the main probabilities (which don't change) # But also shows the shifting of sequence (the colors) # Thus the average value changes for each sub-series (e.g., Before [red] takes over) ggplot(test_sub3_df,aes(x = recount_stream_index, y = sub_series_run_prob, color = (recount_sequence), group = sub_series)) + geom_point() +facet_grid(~sub_series) + ggtitle("Running Sequence Probabilities By Sub-Series")+ xlab("Observation Sequence") + ylab("Running Probability")
sum_one_sub3<- test_sub3_df%>% group_by(sub_series, recount_sequence) %>% summarize(sub_series_mean = mean(sub_series_run_prob)) %>% ungroup() #merge_plot <- left_join(test_df,sum_one, by = "sub_series") ggplot(filter(sum_one_sub3, recount_sequence != "R"),aes(x = sub_series, y = sub_series_mean, color = (recount_sequence))) + geom_point() + geom_line() + ggtitle("Average Sub-Series Probabilities By Sequence") + xlab("Sub-Series") + ylab("Average Sequence Probabilities")
recounter_desc <- function(list, behaviorstream, behavior,consequence,actor){ list %>% map(~Recounter5(.x,behaviorstream,behavior,consequence,actor)$descriptive_statistics) }
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.