RNA com Automatic Chen?s Prototype Generation Approach (With Entropy fuction)

Desafios:

Demostrar a efici?ncia do algoritmo Chen utilizando uma fun??o de controle para gera??o de prot?tipos, no qual utiliza o calculo de entropia sobre os grupos subdivididos durante o processo de redu??o de dados.

Resultados

Carregar as bibiotecas necess?rias para o experimento e inst?nciar a fun??o MLP feita na unha :-)!

library(ggplot2)
library(mlbench)
#install.packages("mlbench")
library(caret )
library(ROCR)
library("plyr")
rm(list = setdiff(ls(), lsf.str()))
# Implementa??o na unha da mlp
# %*% dot product, * element wise product
nnet <- function(X, Y, step_size = 0.5, reg = 0.001, h = 10, niteration){

  N <- nrow(X) # n?mero de observa??es
  K <- ncol(Y) # n?mero de classes
  D <- ncol(X) # Para o calculo de dimencionalidade
  iteraerrors <- vector()
  err_iter <- vector()
  # inicializa??o randomica dos par?metros
  W <- 0.01 * matrix(rnorm(D*h), nrow = D)
  b <- matrix(0, nrow = 1, ncol = h)
  W2 <- 0.01 * matrix(rnorm(h*K), nrow = h)
  b2 <- matrix(0, nrow = 1, ncol = K)

  # loop do Gradiente descendente para atualizar os pessos e bias 
  for (i in 0:niteration){
    # Camada escondida e implementa??o da fun??o de ativa??o RElU
    hidden_layer <- pmax(0, X%*% W + matrix(rep(b,N), nrow = N, byrow = T))
    hidden_layer <- matrix(hidden_layer, nrow = N)
    # Score das Classes
    scores <- hidden_layer%*%W2 + matrix(rep(b2,N), nrow = N, byrow = T)

    # computar e normalizar as probabilidades das classes
    exp_scores <- exp(scores)
    probs <- exp_scores / rowSums(exp_scores)

    # Computar a fun??o de perda: sofmax and regularization
    corect_logprobs <- -log(probs)
    data_loss <- sum(corect_logprobs*Y)/N
    reg_loss <- 0.5*reg*sum(W*W) + 0.5*reg*sum(W2*W2)
    loss <- data_loss + reg_loss
    err_iter[i]<-0.5*(loss^2)
    # Verificar o progresso:
    if (i%%10000 == 0 | i == niteration){
      print(paste("iteration", i,': loss', loss))}

    # Calcular o gradiente dos score
    dscores <- probs-Y
    dscores <- dscores/N

    # backpropate dos gradiente para atualiza??o dos  parameters
    dW2 <- t(hidden_layer)%*%dscores
    db2 <- colSums(dscores)
    # proximo camada escondida para o Backpro 
    dhidden <- dscores%*%t(W2)
    # Backprop para a fun??o ReLu
    dhidden[hidden_layer <= 0] <- 0
    # Finalmento atualizar do dW e db
    dW <- t(X)%*%dhidden
    db <- colSums(dhidden)

    # Adicionar a regula??o para os gradientes
    dW2 <- dW2 + reg *W2
    dW <- dW + reg *W

    # Atualiza??o dos par?metros
    W <- W-step_size*dW
    b <- b-step_size*db
    W2 <- W2-step_size*dW2
    b2 <- b2-step_size*db2
  }

  return(list(W, b, W2, b2,err_iter))
}

Primeira parte :

1) Treinamento da RNA com dados originais em forma Espiral e avalia??o desempenho

#install.packages('caret', dependencies = TRUE)


# 1.5 cycles each, with noise
p<-mlbench.spirals(600,1.5,0.05)
library(clusterSim)
#p<-shapes.two.moon(180)
#plot(stm$data,col=rainbow(2)[stm$clusters])
# }
#stm$clusters
# N <- 150 # number of points per class
# D <- 2 # dimensionality
# K <- 2 # number of classes
# X <- data.frame() # data matrix (each row = single example)
# y <- data.frame() # class labels

D <- length(p$x[1,]) # dimensionality

K <-  length(unique(p$classes))# number of classes
N <- length(p$x[,2])/K # number of points per class
#X <- data.frame() # data matrix (each row = single example)
#y <- data.frame() # class labels



#str(p)
#do.call(cbind, p)


 # for (j in (1:K)){
 #   r <- seq(0.05,1,length.out = N) # radius
 #   t <- seq((j-1)*4.7,j*4.7, length.out = N) + rnorm(N, sd = 0.3) # theta
 #   Xtemp <- data.frame(x =r*sin(t) , y = r*cos(t)) 
 #   ytemp <- data.frame(matrix(j, N, 1))
 #   X <- rbind(X, Xtemp)
 #   y <- rbind(y, ytemp)
 # }

data <- data.frame(do.call(cbind, p))
colnames(data) <- c('x','y','label')
# Step 1
#vector <- as.vector(x)
#rep(as.vector(t(y)),3)
# Training Set
# Taxa de Variacao
standdeviation <- 3
# Global Entropy 
ge <- -0.94
# Local Entropy -> Quando 0.1 homogeneidade completa nos grupos gerados
le <- -0.1

homogeneidade <- function (nc,y_part,setsize) {

  g <- rep(nc,length(y_part))
  freqs <- table(y_part)/setsize #lenght(g)
  hom <- sum(freqs * log2(freqs))
  return(hom)

}

x1 <- p$x

Y <- data$label

spiral_features <- p$x
dist_eucl <- function(x1,x2) sqrt(sum((x1 - x2 ) ^ 2))
new_sample <- c(0,0)
distances <- apply(spiral_features, 1, function(x) dist_eucl(x, new_sample))
distances_sorted <- sort(distances, index.return = T)


#m <- dist(p$x)

#mat <- as.matrix(m)

elementos <- distances
#elementos =c(1,4,7,13,17,23,27,35,37) 
#classes = c(1,1,1,1,2,1,2,2,2)
classes <- Y[distances_sorted$ix]
#elementos <- as.vector(x1)
#classes <- Y 
# lenght elementos training set
Nd <- length(elementos)

D = data.frame(elementos, classes)       # df is a data frame

Dados sint?ticos na sua forma original

data_orig <- data
x_min <- min(data[,1])-0.2; x_max <- max(data[,1])+0.2
y_min <- min(data[,2])-0.2; y_max <- max(data[,2])+0.2

# lets visualize the data:
ggplot(data_orig) + geom_point(aes(x=x, y=y, color = as.character(label)), size = 2) + theme_bw(base_size = 15) +
  xlim(x_min, x_max) + ylim(y_min, y_max) +
  ggtitle('Dados Sint?ticos Para Treinamento - Forma Espiral') +
  coord_fixed(ratio = 0.8) +
  theme(axis.ticks=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), 
        axis.text=element_blank(), axis.title=element_blank(), legend.position = 'none')

Pr?-Processamento de dados para classifica??o utilizando MLP com a arquitetura de 2 inputs 50 n?s na camada escondida e um n? para camada de output.

Dados Originais para treinar a RNA

X <- as.matrix(data_orig[,1:2])
X <- as.matrix(X)
Y <- matrix(0, N*K, K)
y <- data_orig[,3]
for (i in 1:(N*K)){
  Y[i, y[i]] <- 1
}

Fun??o de predi??o e modelo MLP

nnetPred <- function(X, para = list()){
  W <- para[[1]]
  b <- para[[2]]
  W2 <- para[[3]]
  b2 <- para[[4]]

  N <- nrow(X)
  hidden_layer <- pmax(0, X%*% W + matrix(rep(b,N), nrow = N, byrow = T)) 
  hidden_layer <- matrix(hidden_layer, nrow = N)
  scores <- hidden_layer%*%W2 + matrix(rep(b2,N), nrow = N, byrow = T) 
  predicted_class <- apply(scores, 1, which.max)

  return(predicted_class)  
}
set.seed(100)
start_time <- Sys.time()
nnet.model <- nnet(X, Y, step_size = 0.09,reg = 0.0000004, h=5, niteration = 80)
end_time <- Sys.time()

print("Dura??o do Treinamento")
print(end_time-start_time)
plot(1:length(nnet.model[[5]][]),nnet.model[[5]][], main="Curva de Aprendizagem", 
      xlab="- n? Itera??es", ylab="Erro m?dio quadr?tico ",type= "l",cex = .6) 

Acur?cia do treinamento com os prot?tipos gerados atrav?s do algoritmo Chen.

predicted_class_prototipo <- nnetPred(X, nnet.model)
print(paste('training accuracy:',mean(predicted_class_prototipo == (y))))

Fronteiras de decis?o ap?s o treinamento da RNA com os dados originais.

# plot the resulting classifier
hs <- 0.01
grid <- as.matrix(expand.grid(seq(x_min, x_max, by = hs), seq(y_min, y_max, by =hs)))
Z <- nnetPred(grid, nnet.model)
data <- data_orig
ggplot()+
  geom_tile(aes(x = grid[,1],y = grid[,2],fill=as.character(Z)), alpha = 0.3, show.legend = F)+ 
  geom_point(data = data, aes(x=x, y=y, color = as.character(label)), size = 2) + theme_bw(base_size = 15) +
  ggtitle('Fronteira de Decis?o - MLP') +
  coord_fixed(ratio = 0.8) + 
  theme(axis.ticks=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), 
        axis.text=element_blank(), axis.title=element_blank(), legend.position = 'none')
# 1.5 cycles each, with noise
p<-mlbench.spirals(600,1.5,0.05)

set.seed(200)
new_data <- data.frame(do.call(cbind, p))
colnames(new_data) <- c('x','y','label')
# lets visualize the condensed data :
ggplot(new_data) + geom_point(aes(x=x, y=y, color = as.character(label)), size = 2) + theme_bw(base_size = 15) +
  xlim(x_min, x_max) + ylim(y_min, y_max) +
  ggtitle('Novos Dados em Formato Espiral') +
  coord_fixed(ratio = 0.8) +
  theme(axis.ticks=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), 
        axis.text=element_blank(), axis.title=element_blank(), legend.position = 'none')

Realiza??o da predi??o de dados novos utilizando o modelo gerado pela MLP

predicted_class <- nnetPred(p$x, nnet.model)
print(paste('Test accuracy:',mean(predicted_class == (new_data$label))))

Gerar a Matriz de Confus?o

#library(caret )
# construct the evaluation dataset
# construct the evaluation dataset

set.seed(144)
true_class <- factor(predicted_class)
true_class <- sort(true_class)
class1_probs <- rbeta(sum(true_class == 1), 1, 1.5)
class2_probs <- rbeta(sum(true_class == 2), 1, 1)
test_set <- data.frame(obs = true_class,Class1 = c(class1_probs, class2_probs))
test_set$Class2 <- 1 - test_set$Class1
test_set$pred <- factor(ifelse(test_set$Class1 >= .5, 1, 2))
# calculate the confusion matrix
#cm <- confusionMatrix(data = test_set$pred, reference = test_set$obs)
#confusionMatrix(as.table(mat), positive="B")
#detach("RSNNS")
#detach("package:RSNNS", unload=TRUE)
cm<-caret::confusionMatrix(factor(p$classes),factor(predicted_class))
draw_confusion_matrix <- function(cm) {

  layout(matrix(c(1,1,2)))
  par(mar=c(2,2,2,2))
  plot(c(100, 345), c(300, 450), type = "n", xlab="", ylab="", xaxt='n', yaxt='n')
  title('CONFUSION MATRIX', cex.main=2)

  # create the matrix 
  rect(150, 430, 240, 370, col='#3F97D0')
  text(195, 435, 'Class1', cex=1.2)
  rect(250, 430, 340, 370, col='#F7AD50')
  text(295, 435, 'Class2', cex=1.2)
  text(125, 370, 'Predicted', cex=1.3, srt=90, font=2)
  text(245, 450, 'Actual', cex=1.3, font=2)
  rect(150, 305, 240, 365, col='#F7AD50')
  rect(250, 305, 340, 365, col='#3F97D0')
  text(140, 400, 'Class1', cex=1.2, srt=90)
  text(140, 335, 'Class2', cex=1.2, srt=90)

  # add in the cm results 
  res <- as.numeric(cm$table)
  text(195, 400, res[1], cex=1.6, font=2, col='white')
  text(195, 335, res[2], cex=1.6, font=2, col='white')
  text(295, 400, res[3], cex=1.6, font=2, col='white')
  text(295, 335, res[4], cex=1.6, font=2, col='white')

  # add in the specifics 
  plot(c(100, 0), c(100, 0), type = "n", xlab="", ylab="", main = "DETAILS", xaxt='n', yaxt='n')
  text(10, 85, names(cm$byClass[1]), cex=1.2, font=2)
  text(10, 70, round(as.numeric(cm$byClass[1]), 3), cex=1.2)
  text(30, 85, names(cm$byClass[2]), cex=1.2, font=2)
  text(30, 70, round(as.numeric(cm$byClass[2]), 3), cex=1.2)
  text(50, 85, names(cm$byClass[5]), cex=1.2, font=2)
  text(50, 70, round(as.numeric(cm$byClass[5]), 3), cex=1.2)
  text(70, 85, names(cm$byClass[6]), cex=1.2, font=2)
  text(70, 70, round(as.numeric(cm$byClass[6]), 3), cex=1.2)
  text(90, 85, names(cm$byClass[7]), cex=1.2, font=2)
  text(90, 70, round(as.numeric(cm$byClass[7]), 3), cex=1.2)

  # add in the accuracy information 
  text(30, 35, names(cm$overall[1]), cex=1.5, font=2)
  text(30, 20, round(as.numeric(cm$overall[1]), 3), cex=1.4)
  text(70, 35, names(cm$overall[2]), cex=1.5, font=2)
  text(70, 20, round(as.numeric(cm$overall[2]), 3), cex=1.4)
}  
draw_confusion_matrix(cm)
# Compute roc
#library(ROCR)
#data(ROCR.simple)
pred <- prediction( predicted_class, new_data$label )
pred2 <- prediction(predicted_class_prototipo, data_orig$label )
perf <- performance( pred, "tpr", "fpr" )
perf2 <- performance(pred2, "tpr", "fpr")
plot( perf, colorize = TRUE)
plot(perf2, add = TRUE)
legend(0.6, 0.6, legend=c("Prot?tipo - Train Peformance", "New Data Train Performance"),
      col=c("black", "red"), pch=c(95,95), cex=0.6)

2) Treinamento com dados condensados com o algoritmo Chen em forma Espiral e avalia??o desempenho

rm(list = setdiff(ls(), lsf.str()))
#library(ggplot2)
#library(mlbench)
#install.packages('caret', dependencies = TRUE)


# 1.5 cycles each, with noise
p<-mlbench.spirals(600,1.5,0.05)

# N <- 150 # number of points per class
# D <- 2 # dimensionality
# K <- 2 # number of classes
# X <- data.frame() # data matrix (each row = single example)
# y <- data.frame() # class labels

D <- length(p$x[1,]) # dimensionality

K <-  length(unique(p$classes))# number of classes
N <- length(p$x[,2])/K # number of points per class
#X <- data.frame() # data matrix (each row = single example)
#y <- data.frame() # class labels



#str(p)
#do.call(cbind, p)


 # for (j in (1:K)){
 #   r <- seq(0.05,1,length.out = N) # radius
 #   t <- seq((j-1)*4.7,j*4.7, length.out = N) + rnorm(N, sd = 0.3) # theta
 #   Xtemp <- data.frame(x =r*sin(t) , y = r*cos(t)) 
 #   ytemp <- data.frame(matrix(j, N, 1))
 #   X <- rbind(X, Xtemp)
 #   y <- rbind(y, ytemp)
 # }

data <- data.frame(do.call(cbind, p))
colnames(data) <- c('x','y','label')


# Step 1
#vector <- as.vector(x)
#rep(as.vector(t(y)),3)
# Training Set
# Taxa de Variacao
standdeviation <- 3
# Global Entropy 
ge <- -0.94
# Local Entropy -> Quando 0.1 homogeneidade completa nos grupos gerados
le <- -0.1

homogeneidade <- function (nc,y_part,setsize) {

  g <- rep(nc,length(y_part))
  freqs <- table(y_part)/setsize #lenght(g)
  hom <- sum(freqs * log2(freqs))
  return(hom)

}

x1 <- p$x

Y <- data$label

spiral_features <- p$x
dist_eucl <- function(x1,x2) sqrt(sum((x1 - x2 ) ^ 2))
new_sample <- c(0,0)
distances <- apply(spiral_features, 1, function(x) dist_eucl(x, new_sample))
distances_sorted <- sort(distances, index.return = T)


#m <- dist(p$x)

#mat <- as.matrix(m)

elementos <- distances
#elementos =c(1,4,7,13,17,23,27,35,37) 
#classes = c(1,1,1,1,2,1,2,2,2)
classes <- Y[distances_sorted$ix]
#elementos <- as.vector(x1)
#classes <- Y 
# lenght elementos training set
Nd <- length(elementos)

D = data.frame(elementos, classes)       # df is a data frame

Rodar o algoritmo Chen para gera??o dos prot?tipos:

      #Analise quantitativa
      alevel <- vector()
      aleveldiv <- 0
      alevelcount <- 0
      aleveldivcount <- 0
      alevelcountNC <- 0
      alevelNC <- vector()
      timing <- vector()

#condensation_manas_version <- function(Nd,D,npar=TRUE,print=TRUE){
      # Auxiliary
      #I <- vector()
      # Time 
      start_time <- Sys.time()
      finalList <- list()
      Divisions <- vector()
      entropy_global <- matrix()
      di <- 0
      # Step 2
      # The current number of subsets of the training set

      Nc <- 1

      # Step 3
      # Assume D = training set, C(1) = D and i = 1

      i <- 1 
      C <- list()

      C[[i]]  <- D


      # Step 12 
      # Continue ateh Nc < Nd 
      while (Nc < Nd) {

        # Alevel
            # Step 4 
            # Find two farthest points P1 and P2 in the set D

            P1 <- max(D$elementos, na.rm = TRUE)
            P2 <- min(D$elementos, na.rm = TRUE)
            # Print Out the divistion (P1+P2)/2
            # Step 5
            # Divide the set D into two subsets D1 and D2
            di <- length(Divisions) + 1
            Divisions[di] <- (P1+P2)/2

            D1 <- subset(D, elementos < (P1+P2)/2, select = c(elementos,classes))
            D2 <- subset(D, elementos > (P1+P2)/2, select = c(elementos,classes))

            # Step 6

            Nc <- i + 1
            C[[i]] <- D1
            C[[Nc]] <- D2

            # Step 7 
            # Contains objects from two classes at least
            # Calculo aleveldiv  
               aleveldivcount <- aleveldivcount + 1


            if( sapply(C[[i]][2], function(x) homogeneidade(i,x,length(x))) > le ) {
             # if( sapply(C[[i]][2], function(x) length(unique(x))) < 2) {
                # timing <- toc()
                 finalList[[length(finalList)+1]]  <- C[[i]]
                 alevelcount <- alevelcount + 1
                 alevel[alevelcount] <- aleveldivcount 
               #  tic("i")

            }

            if( sapply(C[[Nc]][2], function(x) homogeneidade(Nc,x,length(x))) > le) {
            # if( sapply(C[[Nc]][2], function(x) length(unique(x))) < 2) {    

                  finalList[[length(finalList)+1]]  <- C[[Nc]]
                 alevelcount <- alevelcount + 1
                 alevel[alevelcount] <- aleveldivcount 

            }

            I <- lapply(C, function(x) sapply(x, function(x) max(x, na.rm = TRUE) - min(x, na.rm = TRUE)))


            max <- 0
            for (t in 1:length(I)) {

                  if (I[[t]][1] > max && I[[t]][2] > 0){
                        max <- I[[t]][1]
                        list_position <- t
                  }
            }

            if (max == 0) {
                  break
            }
            D <- C[[list_position]]
            C <- C[-list_position]

            #Calculo Entropy stop
             df <- do.call(rbind.data.frame, C)
             entropy_global[i] <- homogeneidade(1,df[,2],length(df))

            #if( homogeneidade(1,df[,2],length(vec)) > -0.97 ) {
             if (Nc > 10) {
             if( sd(entropy_global[(Nc-10):(Nc-2)]) < standdeviation)  {
               break 
              }}

             # break
            #}


            i <- length(C) + 1
      }

# End time
end_time <- Sys.time()

      #condensed_training <- as.data.frame(lapply(finalList, function(x) sapply(x, function(x) median(x))))
      condensed_training <- do.call("rbind",lapply(finalList, function(x) sapply(x, function(x) median(x))))
      element_after_division <- do.call("rbind", finalList)
      rm(i,D,C,max,list_position,I)
#}

#result <- condensation_manas_version(Nd,D)
#View(result)
#c(15,20)
condensed_training <- as.data.frame(condensed_training)      
randomSample = function(condensed_training,n) { 
   return (df[sample(nrow(df), n),])
}

#library( dplyr)

#sample_frac(condensed_training, 0.1)

#length(finalList)  
#str(finalList)
#finalList[[1]][2]

Demonstra??o do resultado alcan?ado :

plot(1:length(entropy_global),log(entropy_global)*-1, main="Auto-Generate Protoype Number Fuction", 
      xlab="Itera??o", ylab="Variabilidade",type= "l",cex = .6) 
legend(length(entropy_global)*.50,-7.4, legend=c("Elapsed Time (Segs)",end_time-start_time),
       col=c("red"),cex=1,bty = "n")
legend(length(entropy_global)*.50,-7.0, legend=c("Data Points",Nd),
       col=c("red"),cex=1,bty = "n")
legend(length(entropy_global)*.50,-7.2, legend=c("Redu??o em %",(1-(length(condensed_training$elementos)/Nd))*100),
       col=c("red"),cex=1,bty = "n")
legend(length(entropy_global)*.50,-6.8, legend=c("N? Prot?tipos",length(condensed_training$elementos)),
       col=c("red"),cex=1,bty = "n")
legend(length(entropy_global)*.10,-7.2, legend=c("Standard Deviation (TAV) ",standdeviation),
       col=c("red"),cex=1,bty = "n")
plot(1:length(alevel), alevel, main="N? Divis?es por Itera??o ", 
      xlab="level", ylab="Divis?es por level",type= "l",cex = .6) 
library(deldir)
library(ggplot2)
 df <- data.frame(x = rnorm(condensed_training[,1]), y = rnorm(condensed_training[,2]))
 df_orig <- data.frame(x = rnorm(data$x), y = rnorm(data$y))
 names(df) <- c("x","y")
 #This creates the voronoi line segments
voronoi <- deldir(df$x, df$y)



# Generate points
#x <- rnorm(500, 0, 1.5)
#y <- rnorm(500, 0, 1)

# Calculate tessellation and triangulation
vtess <- deldir(df$x, df$y)

plot(df$x, df$y, type="n", asp=0.5)
points(df_orig$x, df_orig$y, pch=20, col=c("gray","gray"), cex=0.8)
points(df$x, df$y, pch=20, col=c("red","blue"), cex=1.5)
set.seed(101)
plot(vtess, wlines="tess", wpoints="none", number=TRUE, add=TRUE, lty=3)
legend(0.7, -2, legend=c("Prot?tipo Classe 1", "Prot?tipo Classe 2","Data Points Originais"),
      col=c("red", "blue", "white"), pch=c(18,18,18), cex=0.6)

Gera??o do dendrograma para avalizar os grupos gerados e sua distribui??o:

condensed_training_set_for_neural <- data.frame()
x <- distances_sorted$x
ind <- vector()
for (i in 1:length(condensed_training[,1])){

#print(max(na.omit(distances_sorted$ix[which(x == x[abs(x-condensed_training[i,1]) %in% sort(abs(x-condensed_training[i,1]), partial=1)[1]])])))

ind[i] <- max(distances_sorted$ix[which(x == x[abs(x-condensed_training$elementos[i]) %in% sort(abs(x-condensed_training$elementos[i]))[1]])])

}


# x=c(1:100) 
# your.number=5.43 
# which(abs(x-x-(condensed_training[1,1])==min(abs(x-x-(condensed_training[1,1]))))) 

condensed_training_set_for_neural <- cbind(p$x[ind[!is.infinite(ind)],],p$classes[ind[!is.infinite(ind)]])
colnames(condensed_training_set_for_neural) <- c('x','y','label')
# which(abs(x-condensed_training)==min(abs(x-condensed_training)))



# Load data
#data(USArrests)
# Compute distances and hierarchical clustering
dd <- dist(scale(condensed_training_set_for_neural[,1:2]), method = "euclidean")
hc <- hclust(dd, method = "ward.D2")
hcd <- as.dendrogram(hc)
# Define nodePar
nodePar <- list(lab.cex = 0.6, pch = c(NA, 19), 
                cex = 0.7, col = "blue")
# Customized plot; remove labels
#plot(hcd, ylab = "Height", nodePar = nodePar, leaflab = "none")
plot(hcd,  xlab = "Height", nodePar = nodePar, 
     edgePar = list(col = 2:3, lwd = 2:1))

Dados sint?ticos origin?is em forma espiral:

#p<-mlbench.spirals(300,1.5,0.05)

#ch row = single example)
# y <- data.frame() # class labels N <- 150 # number of points per class
# D <- 2 # dimensionality
# K <- 2 # number of classes
# X <- data.frame() # data matrix (ea

D <- length(condensed_training_set_for_neural[1,-3]) # dimensionality

K <-  length(unique(condensed_training_set_for_neural[,3]))# number of classes
N <- length(condensed_training_set_for_neural[,1])/K # number of points per class
#X <- data.frame() # data matrix (each row = single example)
#y <- data.frame() # class labels



#str(p)
#do.call(cbind, p)


 # for (j in (1:K)){
 #   r <- seq(0.05,1,length.out = N) # radius
 #   t <- seq((j-1)*4.7,j*4.7, length.out = N) + rnorm(N, sd = 0.3) # theta
 #   Xtemp <- data.frame(x =r*sin(t) , y = r*cos(t)) 
 #   ytemp <- data.frame(matrix(j, N, 1))
 #   X <- rbind(X, Xtemp)
 #   y <- rbind(y, ytemp)
 # }

data_orig <- data
data_condensed <- as.data.frame(condensed_training_set_for_neural)
x_min <- min(data[,1])-0.2; x_max <- max(data[,1])+0.2
y_min <- min(data[,2])-0.2; y_max <- max(data[,2])+0.2

# lets visualize the data:
ggplot(data_orig) + geom_point(aes(x=x, y=y, color = as.character(label)), size = 2) + theme_bw(base_size = 15) +
  xlim(x_min, x_max) + ylim(y_min, y_max) +
  ggtitle('Dados de Treinamente em Forma Espiral') +
  coord_fixed(ratio = 0.8) +
  theme(axis.ticks=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), 
        axis.text=element_blank(), axis.title=element_blank(), legend.position = 'none')

Prot?tipos gerados sobre os dados sint?ticos originais:

# lets visualize the condensed data :
ggplot(data_condensed) + geom_point(aes(x=x, y=y, color = as.character(label)), size = 2) + theme_bw(base_size = 15) +
  xlim(x_min, x_max) + ylim(y_min, y_max) +
  ggtitle('Prot?tipos Gerados do Conjunto de Treinamento') +
  coord_fixed(ratio = 0.8) +
  theme(axis.ticks=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), 
        axis.text=element_blank(), axis.title=element_blank(), legend.position = 'none')

Pr?-Processamento de dados para classifica??o utilizando MLP com a arquitetura de 2 inputs 50 n?s na camada escondita e um n? para camada de output.

Dados Condensados

X <- as.matrix(data_condensed[,1:2])
X <- as.matrix(X)
Y <- matrix(0, N*K, K)
y <- data_condensed[,3]
for (i in 1:(N*K)){
  Y[i, y[i]] <- 1
}

Fun??o de predi??o e Modelo MLP

nnetPred <- function(X, para = list()){
  W <- para[[1]]
  b <- para[[2]]
  W2 <- para[[3]]
  b2 <- para[[4]]

  N <- nrow(X)
  hidden_layer <- pmax(0, X%*% W + matrix(rep(b,N), nrow = N, byrow = T)) 
  hidden_layer <- matrix(hidden_layer, nrow = N)
  scores <- hidden_layer%*%W2 + matrix(rep(b2,N), nrow = N, byrow = T) 
  predicted_class <- apply(scores, 1, which.max)

  return(predicted_class)  
}
set.seed(100)
start_time <- Sys.time()
nnet.model <- nnet(X, Y, step_size = 0.09,reg = 0.000004, h=50, niteration = 80000)
end_time <- Sys.time()

print("Dura??o do Treinamento com Prot?tipos")
print(end_time - start_time)
plot(1:length(nnet.model[[5]][]),nnet.model[[5]][], main="Curva de Aprendizagem", 
      xlab="- n? Itera??es", ylab="Erro m?dio quadr?tico ",type= "l",cex = .6) 

Acur?cia do treinamento com os prot?tipos gerados atrav?s do algoritmo Chen:

predicted_class_prototipo <- nnetPred(X, nnet.model)
print(paste('training accuracy:',mean(predicted_class_prototipo == (y))))

Fronteiras de decis?o:

# plot the resulting classifier
hs <- 0.01
grid <- as.matrix(expand.grid(seq(x_min, x_max, by = hs), seq(y_min, y_max, by =hs)))
Z <- nnetPred(grid, nnet.model)
data <- data_condensed
ggplot()+
  geom_tile(aes(x = grid[,1],y = grid[,2],fill=as.character(Z)), alpha = 0.3, show.legend = F)+ 
  geom_point(data = data, aes(x=x, y=y, color = as.character(label)), size = 2) + theme_bw(base_size = 15) +
  ggtitle('Fronteira de Decis?o - MLP') +
  coord_fixed(ratio = 0.8) + 
  theme(axis.ticks=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), 
        axis.text=element_blank(), axis.title=element_blank(), legend.position = 'none')

Gerar novos dados para o teste:

# 1.5 cycles each, with noise
p<-mlbench.spirals(600,1.5,0.05)

set.seed(200)
new_data <- data.frame(do.call(cbind, p))
colnames(new_data) <- c('x','y','label')
# lets visualize the condensed data :
ggplot(new_data) + geom_point(aes(x=x, y=y, color = as.character(label)), size = 2) + theme_bw(base_size = 15) +
  xlim(x_min, x_max) + ylim(y_min, y_max) +
  ggtitle('Novos Dados em Formato Espiral') +
  coord_fixed(ratio = 0.8) +
  theme(axis.ticks=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), 
        axis.text=element_blank(), axis.title=element_blank(), legend.position = 'none')

Realiza??o da predi??o de dados novos utilicando o modelo gerado pela MLP com os prot?tipos :

predicted_class <- nnetPred(p$x, nnet.model)
print(paste('Test accuracy:',mean(predicted_class == (new_data$label))))

Gerar a Matriz de Confus?o:

library(caret )
# construct the evaluation dataset
# construct the evaluation dataset

set.seed(144)
true_class <- factor(predicted_class)
true_class <- sort(true_class)
class1_probs <- rbeta(sum(true_class == 1), 1, 1.5)
class2_probs <- rbeta(sum(true_class == 2), 1, 1)
test_set <- data.frame(obs = true_class,Class1 = c(class1_probs, class2_probs))
test_set$Class2 <- 1 - test_set$Class1
test_set$pred <- factor(ifelse(test_set$Class1 >= .5, 1, 2))
# calculate the confusion matrix
#cm <- confusionMatrix(data = test_set$pred, reference = test_set$obs)
#confusionMatrix(as.table(mat), positive="B")
#detach("RSNNS")
#detach("package:RSNNS", unload=TRUE)
cm<-caret::confusionMatrix(factor(p$classes),factor(predicted_class))
draw_confusion_matrix <- function(cm) {

  layout(matrix(c(1,1,2)))
  par(mar=c(2,2,2,2))
  plot(c(100, 345), c(300, 450), type = "n", xlab="", ylab="", xaxt='n', yaxt='n')
  title('CONFUSION MATRIX', cex.main=2)

  # create the matrix 
  rect(150, 430, 240, 370, col='#3F97D0')
  text(195, 435, 'Class1', cex=1.2)
  rect(250, 430, 340, 370, col='#F7AD50')
  text(295, 435, 'Class2', cex=1.2)
  text(125, 370, 'Predicted', cex=1.3, srt=90, font=2)
  text(245, 450, 'Actual', cex=1.3, font=2)
  rect(150, 305, 240, 365, col='#F7AD50')
  rect(250, 305, 340, 365, col='#3F97D0')
  text(140, 400, 'Class1', cex=1.2, srt=90)
  text(140, 335, 'Class2', cex=1.2, srt=90)

  # add in the cm results 
  res <- as.numeric(cm$table)
  text(195, 400, res[1], cex=1.6, font=2, col='white')
  text(195, 335, res[2], cex=1.6, font=2, col='white')
  text(295, 400, res[3], cex=1.6, font=2, col='white')
  text(295, 335, res[4], cex=1.6, font=2, col='white')

  # add in the specifics 
  plot(c(100, 0), c(100, 0), type = "n", xlab="", ylab="", main = "DETAILS", xaxt='n', yaxt='n')
  text(10, 85, names(cm$byClass[1]), cex=1.2, font=2)
  text(10, 70, round(as.numeric(cm$byClass[1]), 3), cex=1.2)
  text(30, 85, names(cm$byClass[2]), cex=1.2, font=2)
  text(30, 70, round(as.numeric(cm$byClass[2]), 3), cex=1.2)
  text(50, 85, names(cm$byClass[5]), cex=1.2, font=2)
  text(50, 70, round(as.numeric(cm$byClass[5]), 3), cex=1.2)
  text(70, 85, names(cm$byClass[6]), cex=1.2, font=2)
  text(70, 70, round(as.numeric(cm$byClass[6]), 3), cex=1.2)
  text(90, 85, names(cm$byClass[7]), cex=1.2, font=2)
  text(90, 70, round(as.numeric(cm$byClass[7]), 3), cex=1.2)

  # add in the accuracy information 
  text(30, 35, names(cm$overall[1]), cex=1.5, font=2)
  text(30, 20, round(as.numeric(cm$overall[1]), 3), cex=1.4)
  text(70, 35, names(cm$overall[2]), cex=1.5, font=2)
  text(70, 20, round(as.numeric(cm$overall[2]), 3), cex=1.4)
}  
draw_confusion_matrix(cm)
# Compute roc
library(ROCR)
#data(ROCR.simple)
pred <- prediction( predicted_class, new_data$label )
pred2 <- prediction(predicted_class_prototipo, data_condensed$label )
perf <- performance( pred, "tpr", "fpr" )
perf2 <- performance(pred2, "tpr", "fpr")
plot( perf, colorize = TRUE)
plot(perf2, add = TRUE)
legend(0.6, 0.6, legend=c("Prot?tipo - Train Peformance", "New Data Train Performance"),
      col=c("black", "red"), pch=c(95,95), cex=0.6)

Segunda Parte:

Treinamento com dados n?o-lineares com a base Iris

rm(list = setdiff(ls(), lsf.str()))
library("plyr")
data(iris)
#dataset<-iris[which(iris$Species!="versicolor"),][1:2-3]
#head(dataset)
dataset<-iris[which(iris$Species!="setosa"),][1:2-3]
#head(dataset)
irissubdf <- dataset[1:100, c(1, 2, 3)]
names(irissubdf) <- c("sepal", "petal", "species")
irisValues <- irissubdf[,1:2]

classes<-as.numeric(irissubdf$species)

classes_bin<-mapvalues(classes,from=c(2,3),to=c(1,2))
#classes_bin <- decodeClassLabels(classes)
#classes_bin<-mapvalues(classes,from=c(1,3),to=c(0,1))
irisdata <- cbind(irisValues,classes_bin)
#irisTargets <- decodeClassLabels(iris[,5], valTrue=0.9, valFalse=0.1)
library("clusterSim")
x1 <- data.Normalization (irisdata[,1],type="n1",normalization="column")
x1_2nd <- data.Normalization (irisdata[,1],type="n1",normalization="column")
x2 <- data.Normalization (irisdata[,2],type="n1",normalization="column")

x3 <- irisdata[,3]

#x1 <- dataset[,1] - mean[,]

#x2 <- 
library(ggplot2)


data <- cbind(x1,x2,x3)
colnames(data) <- c('x','y','label')
data <- as.data.frame(data)
p <- as.matrix(data)


# Step 1
#vector <- as.vector(x)
#rep(as.vector(t(y)),3)
# Training Set
# Taxa de Variacao
standdeviation <- 1
# Global Entropy 
ge <- -0.94
# Local Entropy -> Quando 0.1 homogeneidade completa nos grupos gerados
le <- -0.1

homogeneidade <- function (nc,y_part,setsize) {

  g <- rep(nc,length(y_part))
  freqs <- table(y_part)/setsize #lenght(g)
  hom <- sum(freqs * log2(freqs))
  return(hom)

}
data_orig <- data
x1 <- p[,1:2]

Y <- p[,3]

spiral_features <- p
dist_eucl <- function(x1,x2) sqrt(sum((x1 - x2 ) ^ 2))
new_sample <- c(0,0)
distances <- apply(spiral_features, 1, function(x) dist_eucl(x, new_sample))
distances_sorted <- sort(distances, index.return = T)


#m <- dist(p$x)

#mat <- as.matrix(m)

elementos <- distances
#elementos =c(1,4,7,13,17,23,27,35,37) 
#classes = c(1,1,1,1,2,1,2,2,2)
classes <- Y[distances_sorted$ix]
#elementos <- as.vector(x1)
#classes <- Y 
# lenght elementos training set
Nd <- length(elementos)

D = data.frame(elementos, classes)       # df is a data frame



#p<-mlbench.spirals(300,1.5,0.05)

#ch row = single example)
# y <- data.frame() # class labels N <- 150 # number of points per class
# D <- 2 # dimensionality
# K <- 2 # number of classes
# X <- data.frame() # data matrix (ea

D <- length(data_orig[1,-3]) # dimensionality

K <-  length(unique(data_orig[,3]))# number of classes
N <- length(data_orig[,1])/K # number of points per class
#X <- data.frame() # data matrix (each row = single example)
#y <- data.frame() # class labels



#str(p)
#do.call(cbind, p)


 # for (j in (1:K)){
 #   r <- seq(0.05,1,length.out = N) # radius
 #   t <- seq((j-1)*4.7,j*4.7, length.out = N) + rnorm(N, sd = 0.3) # theta
 #   Xtemp <- data.frame(x =r*sin(t) , y = r*cos(t)) 
 #   ytemp <- data.frame(matrix(j, N, 1))
 #   X <- rbind(X, Xtemp)
 #   y <- rbind(y, ytemp)
 # }

data_orig <- data
#data_condensed <- as.data.frame(condensed_training_set_for_neural)
x_min <- min(data[,1])-0.2; x_max <- max(data[,1])+0.2
y_min <- min(data[,2])-0.2; y_max <- max(data[,2])+0.2

# lets visualize the data:
ggplot(data_orig) + geom_point(aes(x=x, y=y, color = as.character(label)), size = 2) + theme_bw(base_size = 15) +
  xlim(x_min, x_max) + ylim(y_min, y_max) +
  ggtitle('Dados de Treinamente Iris - Versicolor vs Virginica') +
  coord_fixed(ratio = 0.8) +
  theme(axis.ticks=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), 
        axis.text=element_blank(), axis.title=element_blank(), legend.position = 'none')

Pr?-Processamento dados para classifica??o utilizando MLP com a arquitetura de 2 inputs 2 n?s na camada escondita e um n? para camada de output.

Dados Condensados

X <- as.matrix(data_orig[,1:2])
X <- as.matrix(X)
Y <- matrix(0, N*K, K)
y <- data_orig[,3]
for (i in 1:(N*K)){
  Y[i, y[i]] <- 1
}

Prediction function and model training

nnetPred <- function(X, para = list()){
  W <- para[[1]]
  b <- para[[2]]
  W2 <- para[[3]]
  b2 <- para[[4]]

  N <- nrow(X)
  hidden_layer <- pmax(0, X%*% W + matrix(rep(b,N), nrow = N, byrow = T)) 
  hidden_layer <- matrix(hidden_layer, nrow = N)
  scores <- hidden_layer%*%W2 + matrix(rep(b2,N), nrow = N, byrow = T) 
  predicted_class <- apply(scores, 1, which.max)

  return(predicted_class)  
}
set.seed(100)
nnet.model <- nnet(X, Y, step_size = 0.09,reg = 0.000004, h=10, niteration = 40000)

Fronteiras de Decis?o:

# plot the resulting classifier
hs <- 0.01
grid <- as.matrix(expand.grid(seq(x_min, x_max, by = hs), seq(y_min, y_max, by =hs)))
Z <- nnetPred(grid, nnet.model)
data <- data_orig
ggplot()+
  geom_tile(aes(x = grid[,1],y = grid[,2],fill=as.character(Z)), alpha = 0.3, show.legend = F)+ 
  geom_point(data = data, aes(x=x, y=y, color = as.character(label)), size = 2) + theme_bw(base_size = 15) +
  ggtitle('Fronteira de Decis?o - MLP') +
  coord_fixed(ratio = 0.8) + 
  theme(axis.ticks=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), 
        axis.text=element_blank(), axis.title=element_blank(), legend.position = 'none')
# 1.5 cycles each, with noise
#p<-mlbench.spirals(600,1.5,0.05)

set.seed(200)
data_new <- cbind(x1_2nd,x2,x3)
colnames(data_new) <- c('x','y','label')
data <- as.data.frame(data_new)
new_data <- as.data.frame(data_new)

colnames(new_data) <- c('x','y','label')
# lets visualize the condensed data :
ggplot(new_data) + geom_point(aes(x=x, y=y, color = as.character(label)), size = 2) + theme_bw(base_size = 15) +
  xlim(x_min, x_max) + ylim(y_min, y_max) +
  ggtitle('Dados Originais Base Iris - Versicolor vs Virginica') +
  coord_fixed(ratio = 0.8) +
  theme(axis.ticks=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), 
        axis.text=element_blank(), axis.title=element_blank(), legend.position = 'none')

Realiza??o da predi??o de dados novos utilicando o modelo gerado pela MLP com os prot?tipos

predicted_class <- nnetPred(p[,1:2], nnet.model)
print(paste('Test accuracy:',mean(predicted_class == (new_data$label))))

Gerar a Matriz de Confus?o:

library(caret )
# construct the evaluation dataset
# construct the evaluation dataset

set.seed(144)
true_class <- factor(predicted_class)
true_class <- sort(true_class)
class1_probs <- rbeta(sum(true_class == 1), 1, 1.5)
class2_probs <- rbeta(sum(true_class == 2), 1, 1)
test_set <- data.frame(obs = true_class,Class1 = c(class1_probs, class2_probs))
test_set$Class2 <- 1 - test_set$Class1
test_set$pred <- factor(ifelse(test_set$Class1 >= .5, 1, 2))
# calculate the confusion matrix
#cm <- confusionMatrix(data = test_set$pred, reference = test_set$obs)
#confusionMatrix(as.table(mat), positive="B")
#detach("RSNNS")
#detach("package:RSNNS", unload=TRUE)
cm<-caret::confusionMatrix(factor(p[,3]),factor(predicted_class))
draw_confusion_matrix <- function(cm) {

  layout(matrix(c(1,1,2)))
  par(mar=c(2,2,2,2))
  plot(c(100, 345), c(300, 450), type = "n", xlab="", ylab="", xaxt='n', yaxt='n')
  title('CONFUSION MATRIX', cex.main=2)

  # create the matrix 
  rect(150, 430, 240, 370, col='#3F97D0')
  text(195, 435, 'Class1', cex=1.2)
  rect(250, 430, 340, 370, col='#F7AD50')
  text(295, 435, 'Class2', cex=1.2)
  text(125, 370, 'Predicted', cex=1.3, srt=90, font=2)
  text(245, 450, 'Actual', cex=1.3, font=2)
  rect(150, 305, 240, 365, col='#F7AD50')
  rect(250, 305, 340, 365, col='#3F97D0')
  text(140, 400, 'Class1', cex=1.2, srt=90)
  text(140, 335, 'Class2', cex=1.2, srt=90)

  # add in the cm results 
  res <- as.numeric(cm$table)
  text(195, 400, res[1], cex=1.6, font=2, col='white')
  text(195, 335, res[2], cex=1.6, font=2, col='white')
  text(295, 400, res[3], cex=1.6, font=2, col='white')
  text(295, 335, res[4], cex=1.6, font=2, col='white')

  # add in the specifics 
  plot(c(100, 0), c(100, 0), type = "n", xlab="", ylab="", main = "DETAILS", xaxt='n', yaxt='n')
  text(10, 85, names(cm$byClass[1]), cex=1.2, font=2)
  text(10, 70, round(as.numeric(cm$byClass[1]), 3), cex=1.2)
  text(30, 85, names(cm$byClass[2]), cex=1.2, font=2)
  text(30, 70, round(as.numeric(cm$byClass[2]), 3), cex=1.2)
  text(50, 85, names(cm$byClass[5]), cex=1.2, font=2)
  text(50, 70, round(as.numeric(cm$byClass[5]), 3), cex=1.2)
  text(70, 85, names(cm$byClass[6]), cex=1.2, font=2)
  text(70, 70, round(as.numeric(cm$byClass[6]), 3), cex=1.2)
  text(90, 85, names(cm$byClass[7]), cex=1.2, font=2)
  text(90, 70, round(as.numeric(cm$byClass[7]), 3), cex=1.2)

  # add in the accuracy information 
  text(30, 35, names(cm$overall[1]), cex=1.5, font=2)
  text(30, 20, round(as.numeric(cm$overall[1]), 3), cex=1.4)
  text(70, 35, names(cm$overall[2]), cex=1.5, font=2)
  text(70, 20, round(as.numeric(cm$overall[2]), 3), cex=1.4)
}  
draw_confusion_matrix(cm)
# Compute roc
library(ROCR)
#data(ROCR.simple)
pred <- prediction( predicted_class, new_data$label )
pred2 <- prediction(predicted_class, data_orig$label )
perf <- performance( pred, "tpr", "fpr" )
perf2 <- performance(pred2, "tpr", "fpr")
plot( perf, colorize = TRUE)
plot(perf2, add = TRUE)
legend(0.6, 0.6, legend=c("Prot?tipo - Train Peformance", "New Data Train Performance"),
      col=c("black", "red"), pch=c(95,95), cex=0.6)

2) Dados n?o-linearmente separ?vel com a base Iris utilizando o algoritmo Chen de redu??o dados:

rm(list = setdiff(ls(), lsf.str()))
library("plyr")
data(iris)
#dataset<-iris[which(iris$Species!="versicolor"),][1:2-3]
#head(dataset)
dataset<-iris[which(iris$Species!="setosa"),][1:2-3]
#head(dataset)
irissubdf <- dataset[1:100, c(1, 2, 3)]
names(irissubdf) <- c("sepal", "petal", "species")
irisValues <- irissubdf[,1:2]

classes<-as.numeric(irissubdf$species)

classes_bin<-mapvalues(classes,from=c(2,3),to=c(1,2))
#classes_bin <- decodeClassLabels(classes)
#classes_bin<-mapvalues(classes,from=c(1,3),to=c(0,1))
irisdata <- cbind(irisValues,classes_bin)
#irisTargets <- decodeClassLabels(iris[,5], valTrue=0.9, valFalse=0.1)
library("clusterSim")
x1 <- data.Normalization (irisdata[,1],type="n1",normalization="column")
x1_2nd <- data.Normalization (irisdata[,1],type="n1",normalization="column")
x2 <- data.Normalization (irisdata[,2],type="n1",normalization="column")

x3 <- irisdata[,3]

#x1 <- dataset[,1] - mean[,]

#x2 <- 
library(ggplot2)


data <- cbind(x1,x2,x3)
colnames(data) <- c('x','y','label')
data <- as.data.frame(data)
p <- as.matrix(data)


# Step 1
#vector <- as.vector(x)
#rep(as.vector(t(y)),3)
# Training Set
# Taxa de Variacao
standdeviation <- 0
# Global Entropy 
ge <- -0.94
# Local Entropy -> Quando 0.1 homogeneidade completa nos grupos gerados
le <- -0.1

homogeneidade <- function (nc,y_part,setsize) {

  g <- rep(nc,length(y_part))
  freqs <- table(y_part)/setsize #lenght(g)
  hom <- sum(freqs * log2(freqs))
  return(hom)

}

x1 <- p

Y <- data$label

spiral_features <- p
dist_eucl <- function(x1,x2) sqrt(sum((x1 - x2 ) ^ 2))
new_sample <- c(0,0)
distances <- apply(spiral_features, 1, function(x) dist_eucl(x, new_sample))
distances_sorted <- sort(distances, index.return = T)


#m <- dist(p$x)

#mat <- as.matrix(m)

elementos <- distances
#elementos =c(1,4,7,13,17,23,27,35,37) 
#classes = c(1,1,1,1,2,1,2,2,2)
classes <- Y[distances_sorted$ix]
#elementos <- as.vector(x1)
#classes <- Y 
# lenght elementos training set
Nd <- length(elementos)

D = data.frame(elementos, classes)       # df is a data frame

Execu??o do Algoritmo Chen:

      #Analise quantitativa
      alevel <- vector()
      aleveldiv <- 0
      alevelcount <- 0
      aleveldivcount <- 0
      alevelcountNC <- 0
      alevelNC <- vector()
      timing <- vector()

#condensation_manas_version <- function(Nd,D,npar=TRUE,print=TRUE){
      # Auxiliary
      #I <- vector()
      # Time 
      start_time <- Sys.time()
      finalList <- list()
      Divisions <- vector()
      entropy_global <- matrix()
      di <- 0
      # Step 2
      # The current number of subsets of the training set

      Nc <- 1

      # Step 3
      # Assume D = training set, C(1) = D and i = 1

      i <- 1 
      C <- list()

      C[[i]]  <- D


      # Step 12 
      # Continue ateh Nc < Nd 
      while (Nc < Nd) {

        # Alevel
            # Step 4 
            # Find two farthest points P1 and P2 in the set D

            P1 <- max(D$elementos, na.rm = TRUE)
            P2 <- min(D$elementos, na.rm = TRUE)
            # Print Out the divistion (P1+P2)/2
            # Step 5
            # Divide the set D into two subsets D1 and D2
            di <- length(Divisions) + 1
            Divisions[di] <- (P1+P2)/2

            D1 <- subset(D, elementos < (P1+P2)/2, select = c(elementos,classes))
            D2 <- subset(D, elementos > (P1+P2)/2, select = c(elementos,classes))

            # Step 6

            Nc <- i + 1
            C[[i]] <- D1
            C[[Nc]] <- D2

            # Step 7 
            # Contains objects from two classes at least
            # Calculo aleveldiv  
               aleveldivcount <- aleveldivcount + 1


            if( sapply(C[[i]][2], function(x) homogeneidade(i,x,length(x))) > le ) {
             # if( sapply(C[[i]][2], function(x) length(unique(x))) < 2) {
                # timing <- toc()
                 finalList[[length(finalList)+1]]  <- C[[i]]
                 alevelcount <- alevelcount + 1
                 alevel[alevelcount] <- aleveldivcount 
               #  tic("i")

            }

            if( sapply(C[[Nc]][2], function(x) homogeneidade(Nc,x,length(x))) > le) {
            # if( sapply(C[[Nc]][2], function(x) length(unique(x))) < 2) {    

                  finalList[[length(finalList)+1]]  <- C[[Nc]]
                 alevelcount <- alevelcount + 1
                 alevel[alevelcount] <- aleveldivcount 

            }

            I <- lapply(C, function(x) sapply(x, function(x) max(x, na.rm = TRUE) - min(x, na.rm = TRUE)))


            max <- 0
            for (t in 1:length(I)) {

                  if (I[[t]][1] > max && I[[t]][2] > 0){
                        max <- I[[t]][1]
                        list_position <- t
                  }
            }

            if (max == 0) {
                  break
            }
            D <- C[[list_position]]
            C <- C[-list_position]

            #Calculo Entropy stop
             df <- do.call(rbind.data.frame, C)
             entropy_global[i] <- homogeneidade(1,df[,2],length(df))

            #if( homogeneidade(1,df[,2],length(vec)) > -0.97 ) {
             if (Nc > 10) {
             if( sd(entropy_global[(Nc-10):(Nc-2)]) < standdeviation)  {
               break 
              }}

             # break
            #}


            i <- length(C) + 1
      }

# End time
end_time <- Sys.time()

      #condensed_training <- as.data.frame(lapply(finalList, function(x) sapply(x, function(x) median(x))))
      condensed_training <- do.call("rbind",lapply(finalList, function(x) sapply(x, function(x) median(x))))
      element_after_division <- do.call("rbind", finalList)
      rm(i,D,C,max,list_position,I)
#}

#result <- condensation_manas_version(Nd,D)
#View(result)
#c(15,20)
condensed_training <- as.data.frame(condensed_training)      
randomSample = function(condensed_training,n) { 
   return (df[sample(nrow(df), n),])
}

#library( dplyr)

#sample_frac(condensed_training, 0.1)

#length(finalList)  
#str(finalList)
#finalList[[1]][2]

Desmostra??o dos Prot?tipos Gerados:

plot(1:length(entropy_global),log(entropy_global)*-1, main="Auto-Generate Protoype Number Fuction", 
      xlab="Itera??o", ylab="Variabilidade",type= "l",cex = .6) 
legend(length(entropy_global)*.50,-4.5, legend=c("Elapsed Time (Segs)",end_time-start_time),
       col=c("red"),cex=1,bty = "n")
legend(length(entropy_global)*.50,-4.1, legend=c("Data Points",Nd),
       col=c("red"),cex=1,bty = "n")
legend(length(entropy_global)*.50,-4.3, legend=c("Redu??o em %",(1-(length(condensed_training$elementos)/Nd))*100),
       col=c("red"),cex=1,bty = "n")
legend(length(entropy_global)*.50,-4.8, legend=c("N? Prot?tipos",length(condensed_training$elementos)),
       col=c("red"),cex=1,bty = "n")
legend(length(entropy_global)*.10,-4.2, legend=c("Standard Deviation (TAV) ",standdeviation),
       col=c("red"),cex=1,bty = "n")
plot(1:length(alevel), alevel, main="N? Divis?es por Itera??o ", 
      xlab="level", ylab="Divis?es por level",type= "l",cex = .6) 
library(deldir)
library(ggplot2)
 df <- data.frame(x = rnorm(condensed_training[,1]), y = rnorm(condensed_training[,2]))
 df_orig <- data.frame(x = rnorm(data$x), y = rnorm(data$y))
 names(df) <- c("x","y")
 #This creates the voronoi line segments
voronoi <- deldir(df$x, df$y)



# Generate points
#x <- rnorm(500, 0, 1.5)
#y <- rnorm(500, 0, 1)

# Calculate tessellation and triangulation
vtess <- deldir(df$x, df$y)

plot(df$x, df$y, type="n", asp=0.5)
points(df_orig$x, df_orig$y, pch=20, col=c("gray","gray"), cex=0.8)
points(df$x, df$y, pch=20, col=c("red","blue"), cex=1.5)
set.seed(101)
plot(vtess, wlines="tess", wpoints="none", number=TRUE, add=TRUE, lty=3)
legend(0.7, -1, legend=c("Prot?tipo Classe 1", "Prot?tipo Classe 2","Data Points Originais"),
      col=c("red", "blue", "white"), pch=c(18,18,18), cex=0.6)

Geran?o do dendrograma para an?lise de grupos:

condensed_training_set_for_neural <- data.frame()
x <- distances_sorted$x
ind <- vector()
for (i in 1:length(condensed_training[,1])){

#print(max(na.omit(distances_sorted$ix[which(x == x[abs(x-condensed_training[i,1]) %in% sort(abs(x-condensed_training[i,1]), partial=1)[1]])])))

ind[i] <- max(distances_sorted$ix[which(x == x[abs(x-condensed_training$elementos[i]) %in% sort(abs(x-condensed_training$elementos[i]))[1]])])

}


# x=c(1:100) 
# your.number=5.43 
# which(abs(x-x-(condensed_training[1,1])==min(abs(x-x-(condensed_training[1,1]))))) 

condensed_training_set_for_neural <- cbind(p[ind[!is.infinite(ind)],1:2],p[ind[!is.infinite(ind)],3])
colnames(condensed_training_set_for_neural) <- c('x','y','label')
# which(abs(x-condensed_training)==min(abs(x-condensed_training)))



# Load data
#data(USArrests)
# Compute distances and hierarchical clustering
dd <- dist(scale(condensed_training_set_for_neural[,1:2]), method = "euclidean")
hc <- hclust(dd, method = "ward.D2")
hcd <- as.dendrogram(hc)
# Define nodePar
nodePar <- list(lab.cex = 0.6, pch = c(NA, 19), 
                cex = 0.7, col = "blue")
# Customized plot; remove labels
#plot(hcd, ylab = "Height", nodePar = nodePar, leaflab = "none")
plot(hcd,  xlab = "Height", nodePar = nodePar, 
     edgePar = list(col = 2:3, lwd = 2:1))
#p<-mlbench.spirals(300,1.5,0.05)

#ch row = single example)
# y <- data.frame() # class labels N <- 150 # number of points per class
# D <- 2 # dimensionality
# K <- 2 # number of classes
# X <- data.frame() # data matrix (ea

D <- length(condensed_training_set_for_neural[1,-3]) # dimensionality

K <-  length(unique(condensed_training_set_for_neural[,3]))# number of classes
N <- length(condensed_training_set_for_neural[,1])/K # number of points per class
#X <- data.frame() # data matrix (each row = single example)
#y <- data.frame() # class labels



#str(p)
#do.call(cbind, p)


 # for (j in (1:K)){
 #   r <- seq(0.05,1,length.out = N) # radius
 #   t <- seq((j-1)*4.7,j*4.7, length.out = N) + rnorm(N, sd = 0.3) # theta
 #   Xtemp <- data.frame(x =r*sin(t) , y = r*cos(t)) 
 #   ytemp <- data.frame(matrix(j, N, 1))
 #   X <- rbind(X, Xtemp)
 #   y <- rbind(y, ytemp)
 # }

data_orig <- data
data_condensed <- as.data.frame(condensed_training_set_for_neural)
x_min <- min(data[,1])-0.2; x_max <- max(data[,1])+0.2
y_min <- min(data[,2])-0.2; y_max <- max(data[,2])+0.2

# lets visualize the data:
ggplot(data_orig) + geom_point(aes(x=x, y=y, color = as.character(label)), size = 2) + theme_bw(base_size = 15) +
  xlim(x_min, x_max) + ylim(y_min, y_max) +
  ggtitle('Dados de Treinamente base Iris') +
  coord_fixed(ratio = 0.8) +
  theme(axis.ticks=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), 
        axis.text=element_blank(), axis.title=element_blank(), legend.position = 'none')
# lets visualize the condensed data :
ggplot(data_condensed) + geom_point(aes(x=x, y=y, color = as.character(label)), size = 2) + theme_bw(base_size = 15) +
  xlim(x_min, x_max) + ylim(y_min, y_max) +
  ggtitle('Prot?tipos Gerados do Conjunto de Treinamento') +
  coord_fixed(ratio = 0.8) +
  theme(axis.ticks=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), 
        axis.text=element_blank(), axis.title=element_blank(), legend.position = 'none')

Pr?-Processamento dados para classifica??o utilizando MLP com a arquitetura de 2 inputs 10 n?s na camada escondita e um n? para camada de output:

Dados Condensados

X <- as.matrix(data_condensed[,1:2])
X <- as.matrix(X)
Y <- matrix(0, N*K, K)
y <- data_condensed[,3]
for (i in 1:(N*K)){
  Y[i, y[i]] <- 1
}

Fun??o de predi??o e modelo MLP:

nnetPred <- function(X, para = list()){
  W <- para[[1]]
  b <- para[[2]]
  W2 <- para[[3]]
  b2 <- para[[4]]

  N <- nrow(X)
  hidden_layer <- pmax(0, X%*% W + matrix(rep(b,N), nrow = N, byrow = T)) 
  hidden_layer <- matrix(hidden_layer, nrow = N)
  scores <- hidden_layer%*%W2 + matrix(rep(b2,N), nrow = N, byrow = T) 
  predicted_class <- apply(scores, 1, which.max)

  return(predicted_class)  
}
set.seed(100)
nnet.model <- nnet(X, Y, step_size = 0.09,reg = 0.000004, h=10, niteration = 40000)
plot(1:length(nnet.model[[5]][]),nnet.model[[5]][], main="Curva de Aprendizagem", 
      xlab="- n? Itera??es", ylab="Erro m?dio quadr?tico ",type= "l",cex = .6) 

Acur?cia do treinamento com os prot?tipos gerados com o algoritmo Chen:

predicted_class_prototipo <- nnetPred(X, nnet.model)
print(paste('training accuracy:',mean(predicted_class_prototipo == (y))))

Fronteira de Decis?o:

# plot the resulting classifier
hs <- 0.01
grid <- as.matrix(expand.grid(seq(x_min, x_max, by = hs), seq(y_min, y_max, by =hs)))
Z <- nnetPred(grid, nnet.model)
data <- data_condensed
ggplot()+
  geom_tile(aes(x = grid[,1],y = grid[,2],fill=as.character(Z)), alpha = 0.3, show.legend = F)+ 
  geom_point(data = data, aes(x=x, y=y, color = as.character(label)), size = 2) + theme_bw(base_size = 15) +
  ggtitle('Fronteira de Decis?o - MLP') +
  coord_fixed(ratio = 0.8) + 
  theme(axis.ticks=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), 
        axis.text=element_blank(), axis.title=element_blank(), legend.position = 'none')
# 1.5 cycles each, with noise
#p<-mlbench.spirals(600,1.5,0.05)

set.seed(200)
data_new <- cbind(x1_2nd,x2,x3)
colnames(data_new) <- c('x','y','label')
data <- as.data.frame(data_new)
new_data <- as.data.frame(data_new)

colnames(new_data) <- c('x','y','label')
# lets visualize the condensed data :
ggplot(new_data) + geom_point(aes(x=x, y=y, color = as.character(label)), size = 2) + theme_bw(base_size = 15) +
  xlim(x_min, x_max) + ylim(y_min, y_max) +
  ggtitle('Dados origin?is Iris - Versicolor vs Virginaca') +
  coord_fixed(ratio = 0.8) +
  theme(axis.ticks=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), 
        axis.text=element_blank(), axis.title=element_blank(), legend.position = 'none')

Realiza??o da predi??o de dados novos utilicando o modelo gerado pela MLP com os prot?tipos :

predicted_class <- nnetPred(p[,1:2], nnet.model)
print(paste('Test accuracy:',mean(predicted_class == (new_data$label))))

Gerar a Matriz de Confus?o:

library(caret )
# construct the evaluation dataset
# construct the evaluation dataset

set.seed(144)
true_class <- factor(predicted_class)
true_class <- sort(true_class)
class1_probs <- rbeta(sum(true_class == 1), 1, 1.5)
class2_probs <- rbeta(sum(true_class == 2), 1, 1)
test_set <- data.frame(obs = true_class,Class1 = c(class1_probs, class2_probs))
test_set$Class2 <- 1 - test_set$Class1
test_set$pred <- factor(ifelse(test_set$Class1 >= .5, 1, 2))
# calculate the confusion matrix
#cm <- confusionMatrix(data = test_set$pred, reference = test_set$obs)

#confusionMatrix(as.table(mat), positive="B")
#detach("RSNNS")
#detach("package:RSNNS", unload=TRUE)
cm<-caret::confusionMatrix(factor(p[,3]),factor(predicted_class))
draw_confusion_matrix <- function(cm) {

  layout(matrix(c(1,1,2)))
  par(mar=c(2,2,2,2))
  plot(c(100, 345), c(300, 450), type = "n", xlab="", ylab="", xaxt='n', yaxt='n')
  title('CONFUSION MATRIX', cex.main=2)

  # create the matrix 
  rect(150, 430, 240, 370, col='#3F97D0')
  text(195, 435, 'Class1', cex=1.2)
  rect(250, 430, 340, 370, col='#F7AD50')
  text(295, 435, 'Class2', cex=1.2)
  text(125, 370, 'Predicted', cex=1.3, srt=90, font=2)
  text(245, 450, 'Actual', cex=1.3, font=2)
  rect(150, 305, 240, 365, col='#F7AD50')
  rect(250, 305, 340, 365, col='#3F97D0')
  text(140, 400, 'Class1', cex=1.2, srt=90)
  text(140, 335, 'Class2', cex=1.2, srt=90)

  # add in the cm results 
  res <- as.numeric(cm$table)
  text(195, 400, res[1], cex=1.6, font=2, col='white')
  text(195, 335, res[2], cex=1.6, font=2, col='white')
  text(295, 400, res[3], cex=1.6, font=2, col='white')
  text(295, 335, res[4], cex=1.6, font=2, col='white')

  # add in the specifics 
  plot(c(100, 0), c(100, 0), type = "n", xlab="", ylab="", main = "DETAILS", xaxt='n', yaxt='n')
  text(10, 85, names(cm$byClass[1]), cex=1.2, font=2)
  text(10, 70, round(as.numeric(cm$byClass[1]), 3), cex=1.2)
  text(30, 85, names(cm$byClass[2]), cex=1.2, font=2)
  text(30, 70, round(as.numeric(cm$byClass[2]), 3), cex=1.2)
  text(50, 85, names(cm$byClass[5]), cex=1.2, font=2)
  text(50, 70, round(as.numeric(cm$byClass[5]), 3), cex=1.2)
  text(70, 85, names(cm$byClass[6]), cex=1.2, font=2)
  text(70, 70, round(as.numeric(cm$byClass[6]), 3), cex=1.2)
  text(90, 85, names(cm$byClass[7]), cex=1.2, font=2)
  text(90, 70, round(as.numeric(cm$byClass[7]), 3), cex=1.2)

  # add in the accuracy information 
  text(30, 35, names(cm$overall[1]), cex=1.5, font=2)
  text(30, 20, round(as.numeric(cm$overall[1]), 3), cex=1.4)
  text(70, 35, names(cm$overall[2]), cex=1.5, font=2)
  text(70, 20, round(as.numeric(cm$overall[2]), 3), cex=1.4)
}  
draw_confusion_matrix(cm)
# Compute roc
library(ROCR)
#data(ROCR.simple)
pred <- prediction( predicted_class, new_data$label )
pred2 <- prediction(predicted_class_prototipo, data_condensed$label )
perf <- performance( pred, "tpr", "fpr" )
perf2 <- performance(pred2, "tpr", "fpr")
plot( perf, colorize = TRUE)
plot(perf2, add = TRUE)
legend(0.6, 0.6, legend=c("Prot?tipo - Train Peformance", "New Data Train Performance"),
      col=c("black", "red"), pch=c(95,95), cex=0.6)

Terceira Parte:

1) Com prot?tipos dos dados lineares da base iris (Setosa vs Virginica)

rm(list = setdiff(ls(), lsf.str()))
library("plyr")
data(iris)
dataset<-iris[which(iris$Species!="versicolor"),][1:2-3]
#head(dataset)
#dataset<-iris[which(iris$Species!="setosa"),][1:2-3]
#head(dataset)
irissubdf <- dataset[1:100, c(1, 2, 3)]
names(irissubdf) <- c("sepal", "petal", "species")
irisValues <- irissubdf[,1:2]

classes<-as.numeric(irissubdf$species)

#classes_bin<-mapvalues(classes,from=c(2,3),to=c(1,2))
#classes_bin <- decodeClassLabels(classes)
classes_bin<-mapvalues(classes,from=c(1,3),to=c(1,2))
irisdata <- cbind(irisValues,classes_bin)
#irisTargets <- decodeClassLabels(iris[,5], valTrue=0.9, valFalse=0.1)
p[[1]] <- irisValues
p[[2]] <- classes_bin
names(p) <- c("x","classes")
library("clusterSim")
x1 <- data.Normalization (irisdata[,1],type="n1",normalization="column")
x1_2nd <- data.Normalization (irisdata[,1],type="n1",normalization="column")
x2 <- data.Normalization (irisdata[,2],type="n1",normalization="column")

x3 <- irisdata[,3]

#x1 <- dataset[,1] - mean[,]

#x2 <- 
library(ggplot2)


data <- cbind(x1,x2,x3)
colnames(data) <- c('x','y','label')
data <- as.data.frame(data)
p <- as.matrix(data)


# Step 1
#vector <- as.vector(x)
#rep(as.vector(t(y)),3)
# Training Set
# Taxa de Variacao
standdeviation <- 0
# Global Entropy 
ge <- 0
# Local Entropy -> Quando 0.1 homogeneidade completa nos grupos gerados
le <- -0.1

homogeneidade <- function (nc,y_part,setsize) {

  g <- rep(nc,length(y_part))
  freqs <- table(y_part)/setsize #lenght(g)
  hom <- sum(freqs * log2(freqs))
  return(hom)

}

x1 <- p

Y <- data$label

spiral_features <- p
dist_eucl <- function(x1,x2) sqrt(sum((x1 - x2 ) ^ 2))
new_sample <- c(0,0)
distances <- apply(spiral_features, 1, function(x) dist_eucl(x, new_sample))
distances_sorted <- sort(distances, index.return = T)


#m <- dist(p$x)

#mat <- as.matrix(m)

elementos <- distances
#elementos =c(1,4,7,13,17,23,27,35,37) 
#classes = c(1,1,1,1,2,1,2,2,2)
classes <- Y[distances_sorted$ix]
#elementos <- as.vector(x1)
#classes <- Y 
# lenght elementos training set
Nd <- length(elementos)

D = data.frame(elementos, classes)       # df is a data frame

Executar o algoritmo Chen de redu??o de dados:

      #Analise quantitativa
      alevel <- vector()
      aleveldiv <- 0
      alevelcount <- 0
      aleveldivcount <- 0
      alevelcountNC <- 0
      alevelNC <- vector()
      timing <- vector()

#condensation_manas_version <- function(Nd,D,npar=TRUE,print=TRUE){
      # Auxiliary
      #I <- vector()
      # Time 
      start_time <- Sys.time()
      finalList <- list()
      Divisions <- vector()
      entropy_global <- matrix()
      di <- 0
      # Step 2
      # The current number of subsets of the training set

      Nc <- 1

      # Step 3
      # Assume D = training set, C(1) = D and i = 1

      i <- 1 
      C <- list()

      C[[i]]  <- D


      # Step 12 
      # Continue ateh Nc < Nd 
      while (Nc < Nd) {

        # Alevel
            # Step 4 
            # Find two farthest points P1 and P2 in the set D

            P1 <- max(D$elementos, na.rm = TRUE)
            P2 <- min(D$elementos, na.rm = TRUE)
            # Print Out the divistion (P1+P2)/2
            # Step 5
            # Divide the set D into two subsets D1 and D2
            di <- length(Divisions) + 1
            Divisions[di] <- (P1+P2)/2

            D1 <- subset(D, elementos < (P1+P2)/2, select = c(elementos,classes))
            D2 <- subset(D, elementos > (P1+P2)/2, select = c(elementos,classes))

            # Step 6

            Nc <- i + 1
            C[[i]] <- D1
            C[[Nc]] <- D2

            # Step 7 
            # Contains objects from two classes at least
            # Calculo aleveldiv  
               aleveldivcount <- aleveldivcount + 1


            if( sapply(C[[i]][2], function(x) homogeneidade(i,x,length(x))) > le ) {
             # if( sapply(C[[i]][2], function(x) length(unique(x))) < 2) {
                # timing <- toc()
                 finalList[[length(finalList)+1]]  <- C[[i]]
                 alevelcount <- alevelcount + 1
                 alevel[alevelcount] <- aleveldivcount 
               #  tic("i")

            }

            if( sapply(C[[Nc]][2], function(x) homogeneidade(Nc,x,length(x))) > le) {
            # if( sapply(C[[Nc]][2], function(x) length(unique(x))) < 2) {    

                  finalList[[length(finalList)+1]]  <- C[[Nc]]
                 alevelcount <- alevelcount + 1
                 alevel[alevelcount] <- aleveldivcount 

            }

            I <- lapply(C, function(x) sapply(x, function(x) max(x, na.rm = TRUE) - min(x, na.rm = TRUE)))


            max <- 0
            for (t in 1:length(I)) {

                  if (I[[t]][1] > max && I[[t]][2] > 0){
                        max <- I[[t]][1]
                        list_position <- t
                  }
            }

            if (max == 0) {
                  break
            }
            D <- C[[list_position]]
            C <- C[-list_position]

            #Calculo Entropy stop
             df <- do.call(rbind.data.frame, C)
             entropy_global[i] <- homogeneidade(1,df[,2],length(df))

            #if( homogeneidade(1,df[,2],length(vec)) > -0.97 ) {
             if (Nc > 10) {
             if( sd(entropy_global[(Nc-10):(Nc-2)]) < standdeviation)  {
               break 
              }}

             # break
            #}


            i <- length(C) + 1
      }

# End time
end_time <- Sys.time()

      #condensed_training <- as.data.frame(lapply(finalList, function(x) sapply(x, function(x) median(x))))
      condensed_training <- do.call("rbind",lapply(finalList, function(x) sapply(x, function(x) median(x))))
      element_after_division <- do.call("rbind", finalList)
      rm(i,D,C,max,list_position,I)
#}

#result <- condensation_manas_version(Nd,D)
#View(result)
#c(15,20)
condensed_training <- as.data.frame(condensed_training)      
randomSample = function(condensed_training,n) { 
   return (df[sample(nrow(df), n),])
}

#library( dplyr)

#sample_frac(condensed_training, 0.1)

#length(finalList)  
#str(finalList)
#finalList[[1]][2]

Demonstrar os resultados:

plot(1:length(entropy_global),log(entropy_global)*-1, main="Auto-Generate Protoype Number Fuction", 
      xlab="Itera??o", ylab="Variabilidade",type= "l",cex = .6) 
legend(length(entropy_global)*.50,-5.4, legend=c("Elapsed Time (Segs)",end_time-start_time),
       col=c("red"),cex=1,bty = "n")
legend(length(entropy_global)*.50,-5.0, legend=c("Data Points",Nd),
       col=c("red"),cex=1,bty = "n")
legend(length(entropy_global)*.50,-5.2, legend=c("Redu??o em %",(1-(length(condensed_training$elementos)/Nd))*100),
       col=c("red"),cex=1,bty = "n")
legend(length(entropy_global)*.50,-5.8, legend=c("N? Prot?tipos",length(condensed_training$elementos)),
       col=c("red"),cex=1,bty = "n")
legend(length(entropy_global)*.10,-5.2, legend=c("Standard Deviation (TAV) ",standdeviation),
       col=c("red"),cex=1,bty = "n")
plot(1:length(alevel), alevel, main="N? Divis?es por Itera??o ", 
      xlab="level", ylab="Divis?es por level",type= "l",cex = .6) 
library(deldir)
library(ggplot2)
 df <- data.frame(x = rnorm(condensed_training[,1]), y = rnorm(condensed_training[,2]))
 df_orig <- data.frame(x = rnorm(data$x), y = rnorm(data$y))
 names(df) <- c("x","y")
 #This creates the voronoi line segments
voronoi <- deldir(df$x, df$y)



# Generate points
#x <- rnorm(500, 0, 1.5)
#y <- rnorm(500, 0, 1)

# Calculate tessellation and triangulation
vtess <- deldir(df$x, df$y)

plot(df$x, df$y, type="n", asp=0.5)
points(df_orig$x, df_orig$y, pch=20, col=c("gray","gray"), cex=0.8)
points(df$x, df$y, pch=20, col=c("red","blue"), cex=1.5)
set.seed(101)
plot(vtess, wlines="tess", wpoints="none", number=TRUE, add=TRUE, lty=3)
legend(0.7, -2, legend=c("Prot?tipo Classe 1", "Prot?tipo Classe 2","Data Points Originais"),
      col=c("red", "blue", "white"), pch=c(18,18,18), cex=0.6)
condensed_training_set_for_neural <- data.frame()
x <- distances_sorted$x
ind <- vector()
for (i in 1:length(condensed_training[,1])){

#print(max(na.omit(distances_sorted$ix[which(x == x[abs(x-condensed_training[i,1]) %in% sort(abs(x-condensed_training[i,1]), partial=1)[1]])])))

ind[i] <- max(distances_sorted$ix[which(x == x[abs(x-condensed_training$elementos[i]) %in% sort(abs(x-condensed_training$elementos[i]))[1]])])

}


# x=c(1:100) 
# your.number=5.43 
# which(abs(x-x-(condensed_training[1,1])==min(abs(x-x-(condensed_training[1,1]))))) 

condensed_training_set_for_neural <- cbind(p[ind[!is.infinite(ind)],1:2],p[ind[!is.infinite(ind)],3])
colnames(condensed_training_set_for_neural) <- c('x','y','label')
# which(abs(x-condensed_training)==min(abs(x-condensed_training)))


# Load data
#data(USArrests)
# Compute distances and hierarchical clustering
dd <- dist(scale(condensed_training_set_for_neural[,1:2]), method = "euclidean")
hc <- hclust(dd, method = "ward.D2")
hcd <- as.dendrogram(hc)
# Define nodePar
nodePar <- list(lab.cex = 0.6, pch = c(NA, 19), 
                cex = 0.7, col = "blue")
# Customized plot; remove labels
#plot(hcd, ylab = "Height", nodePar = nodePar, leaflab = "none")
plot(hcd,  xlab = "Height", nodePar = nodePar, 
     edgePar = list(col = 2:3, lwd = 2:1))
#p<-mlbench.spirals(300,1.5,0.05)

#ch row = single example)
# y <- data.frame() # class labels N <- 150 # number of points per class
# D <- 2 # dimensionality
# K <- 2 # number of classes
# X <- data.frame() # data matrix (ea

D <- length(condensed_training_set_for_neural[1,-3]) # dimensionality

K <-  length(unique(condensed_training_set_for_neural[,3]))# number of classes
N <- length(condensed_training_set_for_neural[,1])/K # number of points per class
#X <- data.frame() # data matrix (each row = single example)
#y <- data.frame() # class labels



#str(p)
#do.call(cbind, p)


 # for (j in (1:K)){
 #   r <- seq(0.05,1,length.out = N) # radius
 #   t <- seq((j-1)*4.7,j*4.7, length.out = N) + rnorm(N, sd = 0.3) # theta
 #   Xtemp <- data.frame(x =r*sin(t) , y = r*cos(t)) 
 #   ytemp <- data.frame(matrix(j, N, 1))
 #   X <- rbind(X, Xtemp)
 #   y <- rbind(y, ytemp)
 # }

data_orig <- data
data_condensed <- as.data.frame(condensed_training_set_for_neural)
x_min <- min(data[,1])-0.2; x_max <- max(data[,1])+0.2
y_min <- min(data[,2])-0.2; y_max <- max(data[,2])+0.2

# lets visualize the data:
ggplot(data_orig) + geom_point(aes(x=x, y=y, color = as.character(label)), size = 2) + theme_bw(base_size = 15) +
  xlim(x_min, x_max) + ylim(y_min, y_max) +
  ggtitle('Dados origin?is base Iris - Setosa vs Virginica') +
  coord_fixed(ratio = 0.8) +
  theme(axis.ticks=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), 
        axis.text=element_blank(), axis.title=element_blank(), legend.position = 'none')
# lets visualize the condensed data :
ggplot(data_condensed) + geom_point(aes(x=x, y=y, color = as.character(label)), size = 2) + theme_bw(base_size = 15) +
  xlim(x_min, x_max) + ylim(y_min, y_max) +
  ggtitle('Prot?tipos Gerados do Conjunto de Treinamento') +
  coord_fixed(ratio = 0.8) +
  theme(axis.ticks=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), 
        axis.text=element_blank(), axis.title=element_blank(), legend.position = 'none')

Pr?-Processamento dados para classifica??o utilizando MLP com a arquitetura de 2 inputs 50 n?s na camada escondita e um n? para camada de output:

Dados Condensados

X <- as.matrix(data_condensed[,1:2])
X <- as.matrix(X)
Y <- matrix(0, N*K, K)
y <- data_condensed[,3]
for (i in 1:(N*K)){
  Y[i, y[i]] <- 1
}

Fun??o de Predi??o e modelo MLP:

nnetPred <- function(X, para = list()){
  W <- para[[1]]
  b <- para[[2]]
  W2 <- para[[3]]
  b2 <- para[[4]]

  N <- nrow(X)
  hidden_layer <- pmax(0, X%*% W + matrix(rep(b,N), nrow = N, byrow = T)) 
  hidden_layer <- matrix(hidden_layer, nrow = N)
  scores <- hidden_layer%*%W2 + matrix(rep(b2,N), nrow = N, byrow = T) 
  predicted_class <- apply(scores, 1, which.max)

  return(predicted_class)  
}
set.seed(100)
nnet.model <- nnet(X, Y, step_size = 0.09,reg = 0.000004, h=2, niteration = 40000)
plot(1:length(nnet.model[[5]][]),nnet.model[[5]][], main="Curva de Aprendizagem", 
      xlab="- n? Itera??es", ylab="Erro m?dio quadr?tico ",type= "l",cex = .6) 

Acur?cia do treinamento com os prot?tipos gerados com o algoritmo Chen:

predicted_class_prototipo <- nnetPred(X, nnet.model)
print(paste('training accuracy:',mean(predicted_class_prototipo == (y))))

Fronteira de Decis?o:

# plot the resulting classifier
hs <- 0.01
grid <- as.matrix(expand.grid(seq(x_min, x_max, by = hs), seq(y_min, y_max, by =hs)))
Z <- nnetPred(grid, nnet.model)
data <- data_condensed
ggplot()+
  geom_tile(aes(x = grid[,1],y = grid[,2],fill=as.character(Z)), alpha = 0.3, show.legend = F)+ 
  geom_point(data = data, aes(x=x, y=y, color = as.character(label)), size = 2) + theme_bw(base_size = 15) +
  ggtitle('Fronteira de Decis?o - MLP') +
  coord_fixed(ratio = 0.8) + 
  theme(axis.ticks=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), 
        axis.text=element_blank(), axis.title=element_blank(), legend.position = 'none')
# 1.5 cycles each, with noise
#p<-mlbench.spirals(600,1.5,0.05)

set.seed(200)
data_new <- cbind(x1_2nd,x2,x3)
colnames(data_new) <- c('x','y','label')
data <- as.data.frame(data_new)
new_data <- as.data.frame(data_new)

colnames(new_data) <- c('x','y','label')
# lets visualize the condensed data :
ggplot(new_data) + geom_point(aes(x=x, y=y, color = as.character(label)), size = 2) + theme_bw(base_size = 15) +
  xlim(x_min, x_max) + ylim(y_min, y_max) +
  ggtitle('Dados origin?is base Iris - Setosa vs Virginica') +
  coord_fixed(ratio = 0.8) +
  theme(axis.ticks=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), 
        axis.text=element_blank(), axis.title=element_blank(), legend.position = 'none')

Realiza??o da predi??o de dados novos utilicando o modelo gerado pela MLP com os prot?tipos

predicted_class <- nnetPred(p[,1:2], nnet.model)
print(paste('Test accuracy:',mean(predicted_class == (new_data$label))))

Gerar a Matriz de Confus?o

library(caret )
# construct the evaluation dataset
# construct the evaluation dataset

set.seed(144)
true_class <- factor(predicted_class)
true_class <- sort(true_class)
class1_probs <- rbeta(sum(true_class == 1), 1, 1.5)
class2_probs <- rbeta(sum(true_class == 2), 1, 1)
test_set <- data.frame(obs = true_class,Class1 = c(class1_probs, class2_probs))
test_set$Class2 <- 1 - test_set$Class1
test_set$pred <- factor(ifelse(test_set$Class1 >= .5, 1, 2))
# calculate the confusion matrix
#cm <- confusionMatrix(data = test_set$pred, reference = test_set$obs)
#confusionMatrix(as.table(mat), positive="B")
#detach("RSNNS")
#detach("package:RSNNS", unload=TRUE)
cm<-caret::confusionMatrix(factor(p[,3]),factor(predicted_class))
draw_confusion_matrix <- function(cm) {

  layout(matrix(c(1,1,2)))
  par(mar=c(2,2,2,2))
  plot(c(100, 345), c(300, 450), type = "n", xlab="", ylab="", xaxt='n', yaxt='n')
  title('CONFUSION MATRIX', cex.main=2)

  # create the matrix 
  rect(150, 430, 240, 370, col='#3F97D0')
  text(195, 435, 'Class1', cex=1.2)
  rect(250, 430, 340, 370, col='#F7AD50')
  text(295, 435, 'Class2', cex=1.2)
  text(125, 370, 'Predicted', cex=1.3, srt=90, font=2)
  text(245, 450, 'Actual', cex=1.3, font=2)
  rect(150, 305, 240, 365, col='#F7AD50')
  rect(250, 305, 340, 365, col='#3F97D0')
  text(140, 400, 'Class1', cex=1.2, srt=90)
  text(140, 335, 'Class2', cex=1.2, srt=90)

  # add in the cm results 
  res <- as.numeric(cm$table)
  text(195, 400, res[1], cex=1.6, font=2, col='white')
  text(195, 335, res[2], cex=1.6, font=2, col='white')
  text(295, 400, res[3], cex=1.6, font=2, col='white')
  text(295, 335, res[4], cex=1.6, font=2, col='white')

  # add in the specifics 
  plot(c(100, 0), c(100, 0), type = "n", xlab="", ylab="", main = "DETAILS", xaxt='n', yaxt='n')
  text(10, 85, names(cm$byClass[1]), cex=1.2, font=2)
  text(10, 70, round(as.numeric(cm$byClass[1]), 3), cex=1.2)
  text(30, 85, names(cm$byClass[2]), cex=1.2, font=2)
  text(30, 70, round(as.numeric(cm$byClass[2]), 3), cex=1.2)
  text(50, 85, names(cm$byClass[5]), cex=1.2, font=2)
  text(50, 70, round(as.numeric(cm$byClass[5]), 3), cex=1.2)
  text(70, 85, names(cm$byClass[6]), cex=1.2, font=2)
  text(70, 70, round(as.numeric(cm$byClass[6]), 3), cex=1.2)
  text(90, 85, names(cm$byClass[7]), cex=1.2, font=2)
  text(90, 70, round(as.numeric(cm$byClass[7]), 3), cex=1.2)

  # add in the accuracy information 
  text(30, 35, names(cm$overall[1]), cex=1.5, font=2)
  text(30, 20, round(as.numeric(cm$overall[1]), 3), cex=1.4)
  text(70, 35, names(cm$overall[2]), cex=1.5, font=2)
  text(70, 20, round(as.numeric(cm$overall[2]), 3), cex=1.4)
}  
draw_confusion_matrix(cm)
# Compute roc
library(ROCR)
#data(ROCR.simple)
pred <- prediction( predicted_class, new_data$label )
pred2 <- prediction(predicted_class_prototipo, data_condensed$label )
perf <- performance( pred, "tpr", "fpr" )
perf2 <- performance(pred2, "tpr", "fpr")
plot( perf, colorize = TRUE)
plot(perf2, add = TRUE)
legend(0.6, 0.6, legend=c("Prot?tipo - Train Peformance", "New Data Train Performance"),
      col=c("black", "red"), pch=c(95,95), cex=0.6)


bigmaaplab/MChenINN documentation built on Nov. 8, 2019, 11:32 a.m.