require(Hmisc)
loocv_pos <-
function(x, y, threshold=0.01) {
m <- nrow(x)-1
pred <- vector(length=m)
true <- vector(length=m)
train <- list()
test <- list()
fitted_models <-list()
significant.positive.edges <- list()
#significant.negative.edges <- list()
input.data <- data.frame(y=y, x=x)
for (k in 1:(m+1)) {
# print(k)
train[[k]] <- list()
test[[k]] <- list()
fitted_models[[k]] <-list()
train[[k]] <- input.data[-k, ]
test[[k]] <- input.data[k, ]
significant.positive.edges[[k]] <- list()
#significant.negative.edges[[k]] <- list()
######################################################################################################
### feature selection
######################################################################################################
p <- ncol(x)
#spearman <- list()
r.value <- vector(length=p)
p.value <- vector(length=p)
pearson_fun <- function(x) {rcorr(train[[k]]$y, x, type = "pearson") }
pearson_fun_result <- apply(train[[k]][,2:(p+1)], 2, pearson_fun)
for (i in 1:p){
#print(paste0(i, "th edge", "---", "subj ", k))
#spearman[[i]] <- list()
#spearman[[i]] <- rcorr(train[[k]]$y, train[[k]][,2:35779][,i], type="spearman")
r.value[i] <- pearson_fun_result[[i]]$r[1,2]
p.value[i] <- pearson_fun_result[[i]]$P[1,2]
}
df <- data.frame(edge=seq(1:p), r=r.value, p=p.value)
significant.edges <- df[df$p < threshold,]$edge
significant.positive.edges[[k]] <- df[df$p < threshold & df$r>0, ]$edge
#significant.negative.edges[[k]] <- df[df$p < threshold & df$r<0, ]$edge
r.positive.edges <- r.value[significant.positive.edges[[k]]]
#r.negative.edges <- r.value[significant.negative.edges[[k]]]
##########################################################################################
### predicate behavior using positive edges
##########################################################################################
positive.related.connectivity <- as.matrix(train[[k]][,2:(p+1)][, significant.positive.edges[[k]]])
#negative.related.connectivity <- as.matrix(train[[k]][,2:35779][, significant.negative.edges[[k]]])
positive.related.connectivity_test <- as.matrix(test[[k]][,2:(p+1)][, significant.positive.edges[[k]]])
#negative.related.connectivity_test <- as.matrix(test[[k]][,2:35779][, significant.negative.edges[[k]]])
#data.positive <- data.frame(y=train[[k]]$y, positive.related.connectivity)
#data.negative <- data.frame(y=train[[k]]$y, negative.related.connectivity)
data.positive <- data.frame(y=train[[k]]$y, x=apply(positive.related.connectivity, 1, sum))
#data.negative <- data.frame(y=train[[k]]$y, x=apply(negative.related.connectivity, 1, sum))
data.positive_test <- data.frame(y=test[[k]]$y, x=apply(positive.related.connectivity_test, 1, sum))
#data.negative_test <- data.frame(y=test[[k]]$y, x=apply(negative.related.connectivity_test, 1, sum))
######################################################################################################
##########################################################################################
### LOOCV prediction
##########################################################################################
fitted_models[[k]] <- lm(y ~ ., data= data.positive, singular.ok = TRUE)
pred[k] <- data.positive_test[, -1]*fitted_models[[k]]$coefficients[2] + fitted_models[[k]]$coefficients[1]
true[k] <- data.positive_test[1]
print(paste0("pred=", round(pred[k],2), " true=", true[k]))
}
true <- unlist(true)
#significant.positive.edges <- Reduce(intersect, significant.positive.edges)
significant.positive.edges <- Reduce(union, significant.positive.edges)
out <- list(true=true, pred=pred, cor=cor(true, pred), p = rcorr(true, pred, type="pearson")$P[1,2], significant.positive.edges=significant.positive.edges)
}
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.