#' Sets up the test data so that it is in the correct format, then
#' runs prediction on the test data. To do this:
#' 1. Run pairwise prediction on the test data.
#' 2. Run pairwise prediction on the training data.
#' 3. Compute metafeatures on the test data.
#' 4. Predict the test values using the learned model.
#' @param inputDataTest An object of the IntLimData class corresponding to the test set.
#' @param model An object of the ModelResults class corresponding to the optimized model.
#' @param k The number of nearest neighbors to consider in localerr.
#' @param eigStep The number of eigenvectors to step by during the evaluation
#' in localerr.
#' Note that this must be less than the number of samples in localerr. Default = 10.
#' @param colIdInd The ID of the column that has the analyte ID's for the
#' independent variable. If blank, then the existing ID's are used.
#' @param colIdOut The ID of the column that has the analyte ID's for the
#' outcome variable. If blank, then the existing ID's are used.
#' @param useCutoff Whether or not to use the cutoff for prediction. Default is FALSE.
#' @param covar A list of covariates.
#' @return A vector of final prediction values for the test data.
#' @export
DoTestSetupOnly <- function(inputDataTest, model,
k = 2, eigStep = 1,
colIdInd = "databaseId",
colIdOut = "databaseId",
useCutoff = FALSE,
covar = c()){
# Get list of metafeatures.
metaFeatureList <- names(model@model.input@metaFeatures)
pairList <- colnames(model@model.input@metaFeatures[[1]])
# Calculate prediction cutoffs.
predictionCutoffs <- CalculatePredictionCutoffs(modelResults = model)
# Re-set names.
colnames(inputDataTest@analyteType1) <- rownames(inputDataTest@sampleMetaData)
rownames(inputDataTest@analyteType1) <- rownames(model@model.input@input.data@analyteType1)
colnames(inputDataTest@analyteType2) <- rownames(inputDataTest@sampleMetaData)
rownames(inputDataTest@analyteType2) <- rownames(model@model.input@input.data@analyteType2)
# Perform one-hot-encoding on the covariates.
encoding <- GENN::OneHotEncoding(covar = covar, inputData = inputDataTest)
if(length(encoding$covar) > 0 && encoding$covar[1] != ""){
covar = encoding$covar
}
# For any covariates missing from the test data, fill them in.
missingCovar <- setdiff(model@model.input@covariates, covar)
if(length(intersect("", missingCovar)) > 0){
missingCovar <- setdiff(missingCovar, "")
}
if(length(missingCovar) > 0){
covar <- c(covar, missingCovar)
encoding$sampleMetaData[,missingCovar] <- matrix(rep(0, length(missingCovar) *
nrow(encoding$sampleMetaData)),
ncol = length(missingCovar),
nrow = nrow(encoding$sampleMetaData))
}
inputDataTest@sampleMetaData <- encoding$sampleMetaData
# Run pairwise prediction for training data.
pred.cv <- GENN::RunPairwisePrediction(inputResults = model@model.input@model.properties,
inputData = model@model.input@input.data,
stype = model@model.input@stype,
covar = covar,
independentVarType = model@model.input@independent.var.type,
outcomeType = model@model.input@outcome)
pred.cv.test <- GENN::RunPairwisePrediction(inputResults = model@model.input@model.properties,
inputData = inputDataTest,
stype = model@model.input@stype,
covar = covar,
independentVarType = model@model.input@independent.var.type,
outcomeType = model@model.input@outcome)
# Get metafeatures for testing.
traindat <- as.matrix(pred.cv[,pairList])
testdat <- as.matrix(pred.cv.test[,pairList])
if(nrow(pred.cv) == 1){
traindat <- t(traindat)
}
if(nrow(pred.cv.test) == 1){
testdat <- t(testdat)
}
metafeaturesTest <- GENN::GetTestMetaFeatures(predictionsTrain = traindat,
predictionsTest = testdat,
stype = model@model.input@stype,
inputDataTrain = model@model.input@input.data,
inputDataTest = inputDataTest,
metaFeatureList = metaFeatureList,
k = k, eigStep = eigStep,
modelStats = model@model.input@model.properties,
colIdInd = colIdInd,
colIdOut = colIdOut)
# Predict testing values.
weights <- ComputeMetaFeatureWeights(modelResults = model,
metaFeatures = metafeaturesTest)
return(list(weights = weights, individualPredictors = pred.cv.test,
predictionCutoffs = predictionCutoffs, metafeatures = metafeaturesTest,
covar = covar, inputDataTest = inputDataTest))
}
#' Sets up the test data so that it is in the correct format, then
#' runs prediction on the test data. To do this:
#' 1. Run pairwise prediction on the test data.
#' 2. Run pairwise prediction on the training data.
#' 3. Compute metafeatures on the test data.
#' 4. Predict the test values using the learned model.
#' @param inputDataTest An object of the IntLimData class corresponding to the test set.
#' @param model An object of the ModelResults class corresponding to the optimized model.
#' @param k The number of nearest neighbors to consider in localerr.
#' @param eigStep The number of eigenvectors to step by during the evaluation
#' in localerr.
#' Note that this must be less than the number of samples in localerr. Default = 10.
#' @param colIdInd The ID of the column that has the analyte ID's for the
#' independent variable. If blank, then the existing ID's are used.
#' @param colIdOut The ID of the column that has the analyte ID's for the
#' outcome variable. If blank, then the existing ID's are used.
#' @param useCutoff Whether or not to use the cutoff for prediction. Default is FALSE.
#' @param covar A list of covariates.
#' @param averaging If TRUE, then averaging is used to combine predictors rather than
#' retaining the same functional form for both the input and the output.
#' @param zeroOut This parameter zeros out predictors outside of the allowed
#' range.
#' @return A vector of final prediction values for the test data.
#' @export
DoTestSetupAndPrediction <- function(inputDataTest, model,
k = 2, eigStep = 1,
colIdInd = "databaseId",
colIdOut = "databaseId",
useCutoff = FALSE,
covar = c(), averaging = FALSE,
zeroOut = FALSE){
# Get list of metafeatures.
metaFeatureList <- names(model@model.input@metaFeatures)
pairList <- colnames(model@model.input@metaFeatures[[1]])
# Calculate prediction cutoffs.
predictionCutoffs <- CalculatePredictionCutoffs(modelResults = model)
# Re-set names.
colnames(inputDataTest@analyteType1) <- rownames(inputDataTest@sampleMetaData)
rownames(inputDataTest@analyteType1) <- rownames(model@model.input@input.data@analyteType1)
colnames(inputDataTest@analyteType2) <- rownames(inputDataTest@sampleMetaData)
rownames(inputDataTest@analyteType2) <- rownames(model@model.input@input.data@analyteType2)
# Perform one-hot-encoding on the covariates.
encoding <- GENN::OneHotEncoding(covar = covar, inputData = inputDataTest)
if(length(encoding$covar) > 0 && encoding$covar[1] != ""){
covar = encoding$covar
}
# For any covariates missing from the test data, fill them in.
missingCovar <- setdiff(model@model.input@covariates, covar)
if(length(intersect("", missingCovar)) > 0){
missingCovar <- setdiff(missingCovar, "")
}
if(length(missingCovar) > 0){
covar <- c(covar, missingCovar)
encoding$sampleMetaData[,missingCovar] <- matrix(rep(0, length(missingCovar) *
nrow(encoding$sampleMetaData)),
ncol = length(missingCovar),
nrow = nrow(encoding$sampleMetaData))
}
inputDataTest@sampleMetaData <- encoding$sampleMetaData
# Run pairwise prediction for training data.
pred.cv <- GENN::RunPairwisePrediction(inputResults = model@model.input@model.properties,
inputData = model@model.input@input.data,
stype = model@model.input@stype,
covar = covar,
independentVarType = model@model.input@independent.var.type,
outcomeType = model@model.input@outcome)
pred.cv.test <- GENN::RunPairwisePrediction(inputResults = model@model.input@model.properties,
inputData = inputDataTest,
stype = model@model.input@stype,
covar = covar,
independentVarType = model@model.input@independent.var.type,
outcomeType = model@model.input@outcome)
# Get metafeatures for testing.
traindat <- as.matrix(pred.cv[,pairList])
testdat <- as.matrix(pred.cv.test[,pairList])
if(nrow(pred.cv) == 1){
traindat <- t(traindat)
}
if(nrow(pred.cv.test) == 1){
testdat <- t(testdat)
}
metafeaturesTest <- GENN::GetTestMetaFeatures(predictionsTrain = traindat,
predictionsTest = testdat,
stype = model@model.input@stype,
inputDataTrain = model@model.input@input.data,
inputDataTest = inputDataTest,
metaFeatureList = metaFeatureList,
k = k, eigStep = eigStep,
modelStats = model@model.input@model.properties,
colIdInd = colIdInd,
colIdOut = colIdOut)
# Predict testing values.
weights <- ComputeMetaFeatureWeights(modelResults = model,
metaFeatures = metafeaturesTest)
predictions <- Predict(pairs = model@pairs,
targets = unlist(lapply(model@pairs, function(pair){
return(strsplit(pair, "__")[[1]][2])})),
sources = unlist(lapply(model@pairs, function(pair){
return(strsplit(pair, "__")[[1]][1])})),
inputData = inputDataTest,
weights = weights,
model = model,
useCutoff = useCutoff,
minCutoff = predictionCutoffs$min,
maxCutoff = predictionCutoffs$max,
outcomeType = model@model.input@outcome,
independentVarType = model@model.input@independent.var.type,
averaging = averaging, individualPredictors = pred.cv.test,
zeroOut = zeroOut)
return(predictions)
}
#' Predict the test values using the learned model.
#' @param inputDataTest An object of the IntLimData class corresponding to the test set.
#' @param model An object of the ModelResults class corresponding to the optimized model.
#' @param k The number of nearest neighbors to consider in localerr.
#' @param eigStep The number of eigenvectors to step by during the evaluation
#' in localerr.
#' Note that this must be less than the number of samples in localerr. Default = 10.
#' @param colIdInd The ID of the column that has the analyte ID's for the
#' independent variable. If blank, then the existing ID's are used.
#' @param colIdOut The ID of the column that has the analyte ID's for the
#' outcome variable. If blank, then the existing ID's are used.
#' @param useCutoff Whether or not to use the cutoff for prediction. Default is FALSE.
#' @param covar A list of covariates.
#' @param averaging If TRUE, then averaging is used to combine predictors rather than
#' retaining the same functional form for both the input and the output.
#' @param zeroOut This parameter zeros out predictors outside of the allowed
#' range.
#' @return A vector of final prediction values for the test data.
#' @export
DoUnweightedTestSetupAndPrediction <- function(inputDataTest, model,
k = 2, eigStep = 1,
colIdInd = "databaseId",
colIdOut = "databaseId",
useCutoff = FALSE,
covar = c(), averaging = FALSE,
zeroOut = FALSE){
# Calculate prediction cutoffs.
predictionCutoffs <- CalculatePredictionCutoffs(modelResults = model)
# Re-set names.
colnames(inputDataTest@analyteType1) <- rownames(inputDataTest@sampleMetaData)
rownames(inputDataTest@analyteType1) <- rownames(model@model.input@input.data@analyteType1)
colnames(inputDataTest@analyteType2) <- rownames(inputDataTest@sampleMetaData)
rownames(inputDataTest@analyteType2) <- rownames(model@model.input@input.data@analyteType2)
# Perform one-hot-encoding on the covariates.
encoding <- GENN::OneHotEncoding(covar = covar, inputData = inputDataTest)
if(length(encoding$covar) > 0 && encoding$covar[1] != ""){
covar = encoding$covar
}
# For any covariates missing from the test data, fill them in.
missingCovar <- setdiff(model@model.input@covariates, covar)
if(length(intersect("", missingCovar)) > 0){
missingCovar <- setdiff(missingCovar, "")
}
if(length(missingCovar) > 0){
covar <- c(covar, missingCovar)
encoding$sampleMetaData[,missingCovar] <- matrix(rep(0, length(missingCovar) *
nrow(encoding$sampleMetaData)),
ncol = length(missingCovar),
nrow = nrow(encoding$sampleMetaData))
}
inputDataTest@sampleMetaData <- encoding$sampleMetaData
# Predict testing values.
pred.cv.test <- GENN::RunPairwisePrediction(inputResults = model@model.input@model.properties,
inputData = inputDataTest,
stype = model@model.input@stype,
covar = covar,
independentVarType = model@model.input@independent.var.type,
outcomeType = model@model.input@outcome)
weights <- matrix(rep(1, ncol(model@model.input@edge.wise.prediction)
* nrow(inputDataTest@sampleMetaData)),
ncol = ncol(model@model.input@edge.wise.prediction),
nrow = nrow(inputDataTest@sampleMetaData))
colnames(weights) <- colnames(model@model.input@edge.wise.prediction)
rownames(weights) <- rownames(inputDataTest@sampleMetaData)
predictions <- Predict(pairs = model@pairs,
targets = unlist(lapply(model@pairs, function(pair){
return(strsplit(pair, "__")[[1]][2])})),
sources = unlist(lapply(model@pairs, function(pair){
return(strsplit(pair, "__")[[1]][1])})),
inputData = inputDataTest,
weights = weights,
model = model,
useCutoff = useCutoff,
minCutoff = predictionCutoffs$min,
maxCutoff = predictionCutoffs$max,
outcomeType = model@model.input@outcome,
independentVarType = model@model.input@independent.var.type,
averaging = averaging, individualPredictors = pred.cv.test,
zeroOut = zeroOut)
return(predictions)
}
#' Splits input data into training and testing sets given a specified
#' set of testing samples. Alternatively, separate training and testing
#' sets can be read in prior to training. However, this function is a
#' useful helper for cross-validation.
#' @param inputData An object of the IntLimData class.
#' @param testingSamples Calculated importance metrics.
#' @return A list of two IntLimData objects: a training set and a testing set.
#' @export
SplitTrainingAndTesting <- function(inputData, testingSamples){
# Initialize training and testing data to be identical to original data sets.
inputDataTrain <- inputData
inputDataTest <- inputData
# Remove testing sample from training data.
trainingSamples <- setdiff(rownames(inputDataTrain@sampleMetaData), testingSamples)
inputDataTrain@analyteType1 <- as.matrix(inputDataTrain@analyteType1[,trainingSamples])
colnames(inputDataTrain@analyteType1) <- trainingSamples
inputDataTrain@analyteType2 <- inputDataTrain@analyteType2[,trainingSamples]
colnames(inputDataTrain@analyteType2) <- trainingSamples
inputDataTrain@sampleMetaData <- inputDataTrain@sampleMetaData[trainingSamples,]
rownames(inputDataTrain@sampleMetaData) <- trainingSamples
# Retain only testing sample for testing data.
inputDataTest@analyteType1 <- as.matrix(inputDataTest@analyteType1[,testingSamples])
colnames(inputDataTest@analyteType1) <- testingSamples
inputDataTest@analyteType2 <- as.matrix(inputDataTest@analyteType2[,testingSamples])
colnames(inputDataTest@analyteType2) <- testingSamples
inputDataTest@sampleMetaData <- as.data.frame(inputDataTest@sampleMetaData[testingSamples,])
rownames(inputDataTest@sampleMetaData) <- testingSamples
return(list(training = inputDataTrain, testing = inputDataTest))
}
#' Run a prediction on new data using the graph learning model.
#' @param pairs A list of pairs to include in the composite model.
#' @param targets Target analytes for all pairs
#' @param sources Source analytes for all pairs
#' @param inputData The input testing data, which should be of an
#' IntLimData class type.
#' @param model The learned model, an object of the modelResults class.
#' @param minCutoff Mininum cutoff for the prediction.
#' @param maxCutoff Maximum cutoff for the prediction.
#' @param useCutoff Whether or not to use the cutoff for prediction. Default is FALSE.
#' @param useActivation Whether or not to use the activation function if the phenotype
#' to predict is a factor. Default is TRUE. We set to FALSE during training.
#' @param independentVarType The independent variable type (1 or 2)
#' @param outcomeType The outcome type (1 or 2)
#' @param averaging If TRUE, then averaging is used to combine predictors rather than
#' retaining the same functional form for both the input and the output.
#' @param individualPredictors The values of the individual predictors.
#' If set to NULL (default), the edge wise predictions in the model input
#' are used. This variable only needs to be set when inputting test data.
#' @param zeroOut This parameter zeros out predictors outside of the allowed
#' range.
#' @param weights The weight of each predictor.
#' @return A vector of predictions
#' @export
Predict <- function(pairs, targets, sources, inputData, weights, model, minCutoff, maxCutoff, useCutoff = FALSE,
useActivation = TRUE, independentVarType, outcomeType, averaging = FALSE,
individualPredictors = NULL, zeroOut = FALSE){
Y.pred <- NULL
# If we are averaging, simply take the mean of all values.
if(averaging == TRUE){
individualModelPredictions <- individualPredictors
if(is.null(individualPredictors)){
individualModelPredictions <- model@model.input@edge.wise.prediction
}
if(pairs[1] %in% rownames(individualModelPredictions)){
individualModelPredictions <- t(individualModelPredictions)
weights <- t(weights)
}
predictionSubmatrix <- individualModelPredictions[,pairs]
wt <- weights[,pairs]
# Zero out predictors outside the range.
if(zeroOut == TRUE){
outOfRangePredictions <- union(which(predictionSubmatrix < minCutoff),
which(predictionSubmatrix > maxCutoff))
wt[outOfRangePredictions] <- 0
}
Y.pred <- predictionSubmatrix * wt
if(!is.null(nrow(predictionSubmatrix))){
Y.pred <- rowMeans(predictionSubmatrix * wt)
}else if(length(pairs) > 1){
Y.pred <- mean(predictionSubmatrix * wt)
}
# Limit prediction to cutoffs.
if(useCutoff == TRUE){
Y.pred[which(Y.pred < minCutoff)] <- minCutoff
Y.pred[which(Y.pred > maxCutoff)] <- maxCutoff
}
}else{
# Set variables for further analysis.
covar <- model@model.input@covariates
covariates <- model@model.input@model.properties
analyteTgtVals <- inputData@analyteType1
if(outcomeType == 2){
analyteTgtVals <- inputData@analyteType2
}
analyteSrcVals <- inputData@analyteType2
if(independentVarType == 1){
analyteSrcVals <- inputData@analyteType1
}
covariateVals <- inputData@sampleMetaData
wt <- NULL
if(is.null(ncol(weights)) || ncol(weights) == 1){
wt <- as.matrix(weights[pairs])
n <- 1
}else{
wt <- as.matrix(weights[,pairs])
n <- nrow(weights)
}
covariatePairs <- covariates[pairs,]
individualModelPredictions <- NULL
if(!is.null(individualPredictors)){
if(is.null(ncol(individualPredictors)) || ncol(individualPredictors) == 1){
individualModelPredictions <- as.matrix(individualPredictors[pairs])
}else{
individualModelPredictions <- as.matrix(individualPredictors[,pairs])
}
}else{
if(is.null(ncol(model@model.input@edge.wise.prediction)) || ncol(model@model.input@edge.wise.prediction) == 1){
individualModelPredictions <- as.matrix(model@model.input@edge.wise.prediction[pairs])
}else{
individualModelPredictions <- as.matrix(model@model.input@edge.wise.prediction[,pairs])
}
}
# Zero out predictors outside the range.
if(zeroOut == TRUE){
outOfRangePredictions <- union(which(individualModelPredictions < minCutoff),
which(individualModelPredictions > maxCutoff))
wt[outOfRangePredictions] <- 0
}
# Analyte 1
if(!is.null(nrow(covariatePairs))){
inter <- t(matrix(rep(covariatePairs[,"(Intercept)"], n), ncol = n))
a <- t(matrix(rep(covariatePairs[,"a"], n), ncol = n))
type <- t(matrix(rep(covariatePairs[,"type"], n), ncol = n))
interactionTerm <- t(matrix(rep(covariatePairs[,"a:type"], n), ncol = n))
if(ncol(wt) == nrow(inter)){
wt <- t(wt)
}
}
tgt <- as.matrix(analyteTgtVals[targets,])
if(ncol(tgt) == nrow(interactionTerm) && nrow(tgt) == ncol(interactionTerm)){
tgt <- t(analyteTgtVals[targets,])
}
src <- as.matrix(analyteSrcVals[sources,])
if(ncol(src) == nrow(interactionTerm) && nrow(src) == ncol(interactionTerm)){
src <- t(analyteSrcVals[sources,])
}
weighted_a1 <- rowSums(wt * tgt)
if(is.null(nrow(wt)) || nrow(wt) == 1){
weighted_a1 <- sum(wt * tgt)
}else if(is.null(ncol(wt)) || ncol(wt) == 1){
weighted_a1 <- wt * tgt
}
# beta0
weighted_sum_b0 <- NULL
weighted_sum_b1 <- NULL
weighted_sum_b2 <- NULL
weighted_sum_b3 <- NULL
weighted_sum_covars <- NULL
if(!is.null(nrow(covariatePairs))){
weighted_sum_b0 <- rowSums(wt * inter)
weighted_sum_b2 <- rowSums(wt * type)
# If there is only one sample, take the sum.
# If there is only one predictor, there is nothing to sum over.
if(is.null(nrow(wt)) || nrow(wt) == 1){
weighted_sum_b0 <- sum(wt * inter)
weighted_sum_b2 <- sum(wt * type)
}else if(is.null(ncol(wt)) || ncol(wt) == 1){
weighted_sum_b0 <- wt * inter
weighted_sum_b2 <- wt * type
}
# If there is only one sample, take the sum.
# If there is only one predictor, there is nothing to sum over.
weighted_sum_b3 <- rowSums(wt * interactionTerm * src)
weighted_sum_b1 <- rowSums(wt * a * src)
if(is.null(nrow(wt)) || nrow(wt) == 1){
weighted_sum_b3 <- sum(wt * interactionTerm * src)
weighted_sum_b1 <- sum(wt * a * src)
}else if(is.null(ncol(wt)) || ncol(wt) == 1){
weighted_sum_b3 <- wt * interactionTerm * src
weighted_sum_b1 <- wt * a * src
}
weighted_sum_covars <- rep(0, n)
if(covar[1] != ""){
weighted_sum_each <- lapply(covar, function(c){
covMat <- t(matrix(rep(covariatePairs[,c], n), ncol = n))
covPair <- matrix(rep(covariateVals[,c], length(pairs)), ncol = length(pairs))
if(ncol(wt) == nrow(covMat)){
covMat <- t(covMat)
covPair <- t(covPair)
}
# If there is only one sample, take the sum.
# If there is only one predictor, there is nothing to sum over.
weighted_sum <- rowSums(wt * covMat * covPair)
if(is.null(nrow(wt)) || nrow(wt) == 1){
weighted_sum <- sum(wt * covMat * covPair)
}else if(is.null(ncol(wt)) || ncol(wt) == 1){
weighted_sum <- wt * covMat * covPair
}
return(weighted_sum)
})
weighted_sum_covars <- Reduce('+', weighted_sum_each)
}
}else{
# If there is only one sample, take the sum.
# If there is only one predictor, there is nothing to sum over.
weighted_sum_b0 <- rowSums(wt * t(matrix(rep(covariatePairs["(Intercept)"], n), ncol = n)))
weighted_sum_b1 <- rowSums(wt * t(matrix(rep(covariatePairs["a"], n), ncol = n)))
weighted_sum_b2 <- rowSums(wt * t(matrix(rep(covariatePairs["type"], n), ncol = n)))
if(is.null(nrow(wt)) || nrow(wt) == 1){
weighted_sum_b0 <- sum(wt * t(matrix(rep(covariatePairs["(Intercept)"], n), ncol = n)))
weighted_sum_b1 <- sum(wt * t(matrix(rep(covariatePairs["a"], n), ncol = n)))
weighted_sum_b2 <- sum(wt * t(matrix(rep(covariatePairs["type"], n), ncol = n)))
}else if(is.null(ncol(wt)) || ncol(wt) == 1){
weighted_sum_b0 <- wt * t(matrix(rep(covariatePairs["(Intercept)"], n), ncol = n))
weighted_sum_b1 <- wt * t(matrix(rep(covariatePairs["a"], n), ncol = n))
weighted_sum_b2 <- wt * t(matrix(rep(covariatePairs["type"], n), ncol = n))
}
interactionTerm <- t(matrix(rep(covariatePairs["a:type"], n), ncol = n))
src <- as.matrix(analyteSrcVals[sources,])
if(ncol(src) == nrow(interactionTerm) && nrow(src) == ncol(interactionTerm)){
src <- t(analyteSrcVals[sources,])
}
# If there is only one sample, take the sum.
# If there is only one predictor, there is nothing to sum over.
weighted_sum_b3 <- rowSums(wt * interactionTerm * src)
if(is.null(nrow(wt)) || nrow(wt) == 1){
weighted_sum_b3 <- sum(wt * interactionTerm * src)
}else if(is.null(ncol(wt)) || ncol(wt) == 1){
weighted_sum_b3 <- wt * interactionTerm * src
}
weighted_sum_covars <- rep(0, n)
if(covar[1] != ""){
weighted_sum_each <- lapply(covar, function(c){
# If there is only one sample, take the sum.
# If there is only one predictor, there is nothing to sum over.
weighted_sum <- rowSums(wt * t(matrix(rep(covariatePairs[c], n), ncol = n))
* matrix(rep(covariateVals[c], length(pairs)), ncol = length(pairs)))
if(is.null(nrow(wt)) || nrow(wt) == 1){
weighted_sum <- sum(wt * t(matrix(rep(covariatePairs[c], n), ncol = n))
* matrix(rep(covariateVals[c], length(pairs)), ncol = length(pairs)))
}else if(is.null(ncol(wt)) || ncol(wt) == 1){
weighted_sum <- wt * t(matrix(rep(covariatePairs[c], n), ncol = n)) * matrix(rep(covariateVals[c], length(pairs)), ncol = length(pairs))
}
return(weighted_sum)
})
weighted_sum_covars <- Reduce('+', weighted_sum_each)
}
}
# Final value.
denom <- weighted_sum_b2 + weighted_sum_b3
denom[which(denom == 0)] <- 0.0001
Y.pred <- (weighted_a1 - weighted_sum_b0 - weighted_sum_b1 - weighted_sum_covars) / denom
# Implement cutoff if desired.
if(useCutoff == TRUE){
Y.pred[which(Y.pred < minCutoff)] <- minCutoff
Y.pred[which(Y.pred > maxCutoff)] <- maxCutoff
}
# Add names.
if(length(Y.pred) == nrow(inputData@sampleMetaData)){
names(Y.pred) <- rownames(inputData@sampleMetaData)
}
}
return(Y.pred)
}
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.