In Logit Employment Binary Allocation Lalonde Training Example, analyzed optimal allocation when all observables attributes of individuals are used for targeting. Subgroup allocation targeting here, based on one attribute.
# dev.off(dev.list()["RStudioGD"]) rm(list = ls(all.names = TRUE)) options(knitr.duplicate.label = 'allow')
library(dplyr) library(tidyr) library(tibble) library(forcats) library(stringr) library(broom) library(ggplot2) library(REconTools) library(PrjOptiAlloc) library(knitr) library(kableExtra) bl_save_rda = FALSE bl_save_img = FALSE
spt_img_save <- '../_img/' spt_img_save_draft <- 'C:/Users/fan/Documents/Dropbox (UH-ECON)/repos/HgtOptiAlloDraft/_img/'
The regression is the same as prior. Subgroup allocation is based on the idea of targeting only a subset of individuals, when we know the marginal effects and needs of all individuals.
# Dataset data(df_opt_lalonde_training) # Add a binary variable for if there are wage in year 1975 dft <- df_opt_lalonde_training %>% mutate(re75_zero = case_when(re75 == 0 ~ 1, re75 != 0 ~ 0)) # dft stands for dataframe training dft <- dft %>% mutate(id = X) %>% select(-X) %>% select(id, everything()) %>% mutate(emp78 = case_when(re78 <= 0 ~ 0, TRUE ~ 1)) %>% mutate(emp75 = case_when(re75 <= 0 ~ 0, TRUE ~ 1)) # Generate combine black + hispanic status # 0 = white, 1 = black, 2 = hispanics dft <- dft %>% mutate(race = case_when(black == 1 ~ 1, hisp == 1 ~ 2, TRUE ~ 0)) dft <- dft %>% mutate(age_m2 = case_when(age <= 23 ~ 1, age > 23~ 2)) %>% mutate(age_m3 = case_when(age <= 20 ~ 1, age > 20 & age <= 26 ~ 2, age > 26 ~ 3)) dft$trt <- factor(dft$trt, levels = c(0,1), labels = c("ntran", "train")) summary(dft) # X-variables to use on RHS ls_st_xs <- c('age', 'educ', 'black','hisp','marr', 'nodeg') svr_binary <- 'trt' svr_binary_lb0 <- 'ntran' svr_binary_lb1 <- 'train' svr_outcome <- 'emp78' sdt_name <- 'NSW Lalonde Training'
Logit regression with a continuous variable and a binary variable. Predict outcome with observed continuous variable as well as observed binary input variable.
# Regress No bivariate rs_logit <- glm(as.formula(paste(svr_outcome, "~", paste(ls_st_xs, collapse="+"))) ,data = dft, family = "binomial") summary(rs_logit) dft$p_mpg <- predict(rs_logit, newdata = dft, type = "response") # Regress with bivariate # rs_logit_bi <- glm(as.formula(paste(svr_outcome, # "~ factor(", svr_binary,") + ", # paste(ls_st_xs, collapse="+"))) # , data = dft, family = "binomial") rs_logit_bi <- glm(emp78 ~ age + I(age^2) + factor(age_m2) # + educ + I(educ^2) + # + educ + black + hisp + marr + nodeg + factor(trt) + factor(age_m2)*factor(trt) , data = dft, family = "binomial") summary(rs_logit_bi) # Predcit Using Regresion Data dft$p_mpg_hp <- predict(rs_logit_bi, newdata = dft, type = "response") # Predicted Probabilities am on mgp with or without hp binary scatter <- ggplot(dft, aes(x=p_mpg_hp, y=p_mpg)) + geom_point(size=1) + # geom_smooth(method=lm) + # Trend line geom_abline(intercept = 0, slope = 1) + # 45 degree line labs(title = paste0('Predicted Probabilities ', svr_outcome, ' on ', ls_st_xs, ' with or without hp binary'), x = paste0('prediction with ', ls_st_xs, ' and binary ', svr_binary, ' indicator, 1 is high'), y = paste0('prediction with only ', ls_st_xs), caption = paste0(sdt_name, ' simulated prediction')) + theme_bw() print(scatter)
Now generate two predictions. One set where binary input is equal to 0, and another where the binary inputs are equal to 1. Ignore whether in data binary input is equal to 0 or 1. Use the same regression results as what was just derived.
Note that given the example here, the probability changes a lot when we
# Previous regression results summary(rs_logit_bi) # Two different dataframes, mutate the binary regressor dft_bi0 <- dft %>% mutate(!!sym(svr_binary) := svr_binary_lb0) dft_bi1 <- dft %>% mutate(!!sym(svr_binary) := svr_binary_lb1) # Predcit Using Regresion Data dft$p_mpg_hp_bi0 <- predict(rs_logit_bi, newdata = dft_bi0, type = "response") dft$p_mpg_hp_bi1 <- predict(rs_logit_bi, newdata = dft_bi1, type = "response") # Predicted Probabilities and Binary Input scatter <- ggplot(dft, aes(x=p_mpg_hp_bi0)) + geom_point(aes(y=p_mpg_hp), size=4, shape=4, color="red") + geom_point(aes(y=p_mpg_hp_bi1), size=2, shape=8) + # geom_smooth(method=lm) + # Trend line geom_abline(intercept = 0, slope = 1) + # 45 degree line labs(title = paste0('Predicted Probabilities and Binary Input', '\ncross(shape=4)/red is predict actual binary data', '\nstar(shape=8)/black is predict set binary = 1 for all'), x = paste0('prediction with ', ls_st_xs, ' and binary ', svr_binary, ' = 0 for all'), y = paste0('prediction with ', ls_st_xs, ' and binary ', svr_binary, ' = 1'), caption = paste0(sdt_name, ' simulated prediction')) + theme_bw() print(scatter)
What is the difference in probability between binary = 0 vs binary = 1. How does that relate to the probability of outcome of interest when binary = 0 for all.
In the binary logit case, the relationship will be hump--shaped by construction between $A_i$ and $\alpha_i$. In the exponential wage cases, the relationship is convex upwards.
# Generate Gap Variable dft <- dft %>% mutate(alpha_i = p_mpg_hp_bi1 - p_mpg_hp_bi0) %>% mutate(A_i = p_mpg_hp_bi0) dft_graph <- dft dft_graph$age_m2 <- factor(dft_graph$age_m2, labels = c('Age <= 23', 'Age > 23')) # Titling title_line1 <- sprintf("Each circle (cross) represents an individual <= age 23 (> age 23)") title_line2 <- sprintf("Heterogeneous expected outcome (employment probability) with and without training") title_line3 <- sprintf("Heterogeneity from logistic regression nonlinearity and heterogeneous age group effects") title <- expression('The joint distribution of'~A[i]~'and'~alpha[i]~','~'Logistic Regression, Lalonde (AER, 1986)') caption <- paste0('Logistic regression predictions of the employment effects of a training RCT. Data from Lalonde (AER, 1986).') # Labels st_x_label <- expression(A[i]~', '~Probability~of~Employment~without~Training~','~'P(train=0)') st_y_label <- expression(alpha[i]~','~Marginal~Effects~of~Training~','~'P(train=1) - P(train=0)') # Binary Marginal Effects and Prediction without Binary plt_A_alpha <- dft_graph %>% ggplot(aes(x=A_i)) + geom_point(aes(y=alpha_i, color=factor(age_m2), shape=factor(age_m2)), size=4) + geom_abline(intercept = 0, slope = 1) + # 45 degree line scale_colour_manual(values=c("#69b3a2", "#404080")) + labs(subtitle = paste0(title_line1,'\n', title_line2, '\n', title_line3), x = st_x_label, y = st_y_label, caption = caption) + theme_bw(base_size=8) + scale_shape_manual(values=c(1, 4)) + guides(color=FALSE) # Labeling plt_A_alpha$labels$shape <- "Age Subgroups" print(plt_A_alpha) if (bl_save_img) { snm_cnts <- 'Lalonde_employ_A_alpha_age.png' png(paste0(spt_img_save, snm_cnts), width = 135, height = 86, units='mm', res = 300, pointsize=7) print(plt_A_alpha) dev.off() png(paste0(spt_img_save_draft, snm_cnts), width = 135, height = 86, units='mm', res = 300, pointsize=5) print(plt_A_alpha) dev.off() }
beta_i <- rep(1/dim(dft)[1], times=dim(dft)[1]) ar_rho = c(-100, -0.001, 0.95) ar_rho <- 1 - (10^(c(seq(-2,2, length.out=30)))) ar_rho <- unique(ar_rho)
Invoke the binary optimal allocation function ffp_opt_anlyz_rhgin_bin that loops over rhos.
svr_inpalc <- 'rank' dft <- cbind(dft, beta_i) svr_rho_val <- 'rho_val' ls_bin_solu_all_rhos <- ffp_opt_anlyz_rhgin_bin(dft, svr_id_i = 'id', svr_A_i = 'A_i', svr_alpha_i = 'alpha_i', svr_beta_i = 'beta_i', ar_rho = ar_rho, svr_rho = 'rho', svr_rho_val = svr_rho_val, svr_inpalc = svr_inpalc, svr_expout = 'opti_exp_outcome', verbose = TRUE) df_all_rho <- ls_bin_solu_all_rhos$df_all_rho df_all_rho_long <- ls_bin_solu_all_rhos$df_all_rho_long # How many people have different ranks across rhos it_how_many_vary_rank <- sum(df_all_rho$rank_max - df_all_rho$rank_min) it_how_many_vary_rank
# get rank when wage rho = 1 df_all_rho_rho_c1 <- df_all_rho %>% select(id, rho_c1_rk) # Merge df_all_rho_long <- df_all_rho_long %>% mutate(rho = as.numeric(rho)) %>% left_join(df_all_rho_rho_c1, by='id') # Select subset to graph df_rank_graph <- df_all_rho_long %>% mutate(id = factor(id)) %>% filter((id == 1) | # utilitarian rank = 1 (id == 11) | # utilitarian rank = 101 (id == 5) | # utilitarian rank = 201 (id == 205) | # utilitarian rank = 301 (id == 42)| # utilitarian rank = 401 (id == 8) | # utilitarian rank = 501 (id == 31) | # utilitarian rank = 601 (id == 134) # utilitarian rank = 701 ) %>% mutate(one_minus_rho = 1 - !!sym(svr_rho_val)) %>% mutate(rho_c1_rk = factor(rho_c1_rk)) df_rank_graph$rho_c1_rk df_rank_graph$id <- factor(df_rank_graph$rho_c1_rk, labels = c('Rank= 1 ', #200 'Rank= 124', #110 'Rank= 245', #95 'Rank= 320', #217 'Rank= 411', #274 'Rank= 503', #101 'Rank= 595', 'Rank= 700' )) # x-labels x.labels <- c('λ=0.99', 'λ=0.90', 'λ=0', 'λ=-10', 'λ=-100') x.breaks <- c(0.01, 0.10, 1, 10, 100) # title line 2 title_line1 <- sprintf("Optimal allocation queue vary λ, allocate using only AGE") title_line2 <- sprintf("Colored lines = different individuals from the NSW training dataset") title_line3 <- sprintf("Track ranking changes for eight individuals ranked 1, 101, ..., 701 at λ=0.99") # Graph Results--Draw line.plot <- df_rank_graph %>% ggplot(aes(x=one_minus_rho, y=!!sym(svr_inpalc), group=fct_rev(id), colour=fct_rev(id), size=2)) + # geom_line(aes(linetype =fct_rev(id)), size=0.75) + geom_line(size=0.5) + geom_vline(xintercept=c(1), linetype="dotted") + labs(subtitle = paste0(title_line1, '\n', title_line2, '\n', title_line3), x = 'log10 Rescale of λ, Log10(λ)\nλ=1 Utilitarian (Maximize Average), λ=-infty Rawlsian (Maximize Mininum)', y = 'Optimal Allocation Queue Rank (1=highest)', caption = 'Based on logistic regression of the employment effects of a training RCT. Data from Lalonde (AER, 1986).') + scale_x_continuous(trans='log10', labels = x.labels, breaks = x.breaks) + theme_bw(base_size=8) # Labeling line.plot$labels$colour <- "At λ=0.99, i's" # Print print(line.plot) if (bl_save_img) { snm_cnts <- 'Lalonde_employ_rank_age.png' png(paste0(spt_img_save, snm_cnts), width = 135, height = 86, units='mm', res = 300, pointsize=7) print(line.plot) dev.off() png(paste0(spt_img_save_draft, snm_cnts), width = 135, height = 86, units='mm', res = 300, pointsize=5) print(line.plot) dev.off() }
# Change Variable names so that this can becombined with logit file later df_all_rho <- df_all_rho %>% rename_at(vars(starts_with("rho_")), funs(str_replace(., "rk", "rk_empage"))) df_all_rho <- df_all_rho %>% rename(A_i_empage = A_i, alpha_i_empage = alpha_i, beta_i_empage = beta_i, rank_min_empage = rank_min, rank_max_empage = rank_max, avg_rank_empage = avg_rank) # Save File if (bl_save_rda) { df_opt_lalonde_training_empage <- df_all_rho usethis::use_data(df_opt_lalonde_training_empage, df_opt_lalonde_training_empage, overwrite = TRUE) }
What is the relationship between ranking,
# ggplot.A.alpha.x <- function(svr_x, df, # svr_alpha = 'alpha_i', svr_A = "A_i"){ # # scatter <- ggplot(df, aes(x=!!sym(svr_x))) + # geom_point(aes(y=alpha_i), size=4, shape=4, color="red") + # geom_point(aes(y=A_i), size=2, shape=8, color="blue") + # geom_abline(intercept = 0, slope = 1) + # 45 degree line # labs(title = paste0('A (blue) and alpha (red) vs x variables=', svr_x), # x = svr_x, # y = 'Probabilities', # caption = paste0(sdt_name, ' simulated prediction')) + # theme_bw() # # return(scatter) # }
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.