nonet_ensemble Clustering with nonet_plot

nonet provides ensemble capabilities for Clustering problems.

Below example shows the step by step implementation of nonet_ensemble and nonet_plot functions in the context of clustering. We have used Bank Note authentication data set to predict the output class variable using Cluster package because it provides the probability of the input point to be in a specific cluster. Predictions from first GMM and second GMM model are being used as inputs to the nonet_ensemble in the list form.

Let’s start:

Load the required libraries

library(caret)
library(ggplot2)
library(ClusterR)
library(nonet)
Setting the seed
set.seed(1001)

Load the banknote_authentication dataset and explore it.

dataframe <- data.frame(banknote_authentication)

We can see above that class variable has int datatype, we need to convert it into factor.

Converting datatype of class variable into factors.

dataframe$class <- as.factor(dataframe$class)

First GMM Model

Splitting the data into train and test.

#Spliting training set into two parts based on outcome: 75% and 25%
index <- createDataPartition(dataframe$class, p=0.75, list=FALSE)
trainSet <- dataframe[ index,]
testSet <- dataframe[-index,]

Exploring the dimensions of trainSet and testSet

dim(trainSet); dim(testSet)
## [1] 1030    5
## [1] 342   5
str(trainSet)
## 'data.frame':    1030 obs. of  5 variables:
##  $ variance: num  4.546 3.457 0.329 4.368 3.591 ...
##  $ skewness: num  8.17 9.52 -4.46 9.67 3.01 ...
##  $ curtosis: num  -2.459 -4.011 4.572 -3.961 0.729 ...
##  $ entropy : num  -1.462 -3.594 -0.989 -3.163 0.564 ...
##  $ class   : Factor w/ 2 levels "0","1": 1 1 1 1 1 1 1 1 1 1 ...
str(testSet)
## 'data.frame':    342 obs. of  5 variables:
##  $ variance: num  3.62 3.87 2.09 3.2 1.54 ...
##  $ skewness: num  8.67 -2.64 -6.81 5.76 9.18 ...
##  $ curtosis: num  -2.807 1.924 8.464 -0.753 -2.272 ...
##  $ entropy : num  -0.447 0.106 -0.602 -0.613 -0.735 ...
##  $ class   : Factor w/ 2 levels "0","1": 1 1 1 1 1 1 1 1 1 1 ...

#Feature selection using rfe in caret

control <- rfeControl(functions = rfFuncs,
                   method = "repeatedcv",
                   repeats = 3,
                   verbose = FALSE)
outcomeName<-'class'
predictors<-c("variance", "curtosis", "entropy")
head(trainSet[,predictors])
##    variance curtosis  entropy
## 2   4.54590 -2.45860 -1.46210
## 4   3.45660 -4.01120 -3.59440
## 5   0.32924  4.57180 -0.98880
## 6   4.36840 -3.96060 -3.16250
## 7   3.59120  0.72888  0.56421
## 13  1.89930  0.15394 -3.11080
head(trainSet[,outcomeName])
## [1] 0 0 0 0 0 0
## Levels: 0 1

Model Training

set.seed(900)
gmm_first <- GMM(trainSet[,predictors], 2, dist_mode = "maha_dist", seed_mode = "random_subset", km_iter = 10, em_iter = 10, verbose = F)          

#### Predictions using first GMM

predict_clustering_first <- predict_GMM(trainSet[,predictors], gmm_first$centroids, gmm_first$covariance_matrices, gmm_first$weights) 
head(predict_clustering_first$cluster_proba[, 2])
## [1] 0.639909052 0.997604422 0.006756111 0.991002988 0.051609789 0.974703153

Converting probability into classes

predict_cluster_first_class <- as.factor(ifelse(predict_clustering_first$cluster_proba[, 2] >= "0.5", "1", "0"))
head(predict_cluster_first_class)
## [1] 1 1 0 1 0 1
## Levels: 0 1
head(predict_clustering_first$cluster_labels)
## [1] 2 2 1 2 1 2

Second GMM Model

Spliting training set into two parts based on outcome: 75% and 25%

index <- createDataPartition(dataframe$class, p=0.75, list=FALSE)
trainSet <- dataframe[ index,]
testSet <- dataframe[-index,]

Exploring the dimensions of trainSet and testSet

dim(trainSet); dim(testSet)
## [1] 1030    5
## [1] 342   5
str(trainSet)
## 'data.frame':    1030 obs. of  5 variables:
##  $ variance: num  3.622 4.546 3.457 0.329 4.368 ...
##  $ skewness: num  8.67 8.17 9.52 -4.46 9.67 ...
##  $ curtosis: num  -2.81 -2.46 -4.01 4.57 -3.96 ...
##  $ entropy : num  -0.447 -1.462 -3.594 -0.989 -3.163 ...
##  $ class   : Factor w/ 2 levels "0","1": 1 1 1 1 1 1 1 1 1 1 ...
str(testSet)
## 'data.frame':    342 obs. of  5 variables:
##  $ variance: num  3.866 1.225 3.99 1.448 0.329 ...
##  $ skewness: num  -2.64 8.78 -2.71 -4.88 -4.46 ...
##  $ curtosis: num  1.92 -2.21 2.39 8.34 4.57 ...
##  $ entropy : num  0.106 -0.806 0.863 -2.109 -0.989 ...
##  $ class   : Factor w/ 2 levels "0","1": 1 1 1 1 1 1 1 1 1 1 ...

Feature selection using rfe in caret

control <- rfeControl(functions = rfFuncs,
                   method = "repeatedcv",
                   repeats = 3,
                   verbose = FALSE)
outcomeName<-'class'
predictors<-c("skewness", "curtosis", "entropy")
head(trainSet[,predictors])
##   skewness curtosis  entropy
## 1   8.6661 -2.80730 -0.44699
## 2   8.1674 -2.45860 -1.46210
## 4   9.5228 -4.01120 -3.59440
## 5  -4.4552  4.57180 -0.98880
## 6   9.6718 -3.96060 -3.16250
## 7   3.0129  0.72888  0.56421
head(trainSet[,outcomeName])
## [1] 0 0 0 0 0 0
## Levels: 0 1

Model Training: Second

set.seed(423)
gmm_second <- GMM(trainSet[,predictors], 2, dist_mode = "maha_dist", seed_mode = "random_subset", km_iter = 10, em_iter = 10, verbose = F)          

Predictions using Second GMM

predict_clustering_Second <- predict_GMM(trainSet[,predictors], gmm_second$centroids, gmm_second$covariance_matrices, gmm_second$weights) 
head(predict_clustering_Second$cluster_proba[, 2])
## [1] 9.735769e-01 9.929562e-01 9.999888e-01 1.746731e-05 9.999573e-01
## [6] 1.788551e-01

Converting Prediction Probabilities into classes

predict_cluster_Second_class <- as.factor(ifelse(predict_clustering_Second$cluster_proba[, 2] >= "0.5", "1", "0"))
head(predict_cluster_Second_class)
## [1] 1 1 1 1 1 0
## Levels: 0 1
head(predict_clustering_Second$cluster_labels)
## [1] 2 2 2 1 2 1

Create the stack of predictions

Stack_object <- list(predict_clustering_first$cluster_proba[, 2], predict_clustering_Second$cluster_proba[, 2])

Applying naming to the Stack_object

names(Stack_object) <- c("Cluster_first", "Cluster_second")

nonet_ensemble

Now we need to apply the nonet_ensemble method by supplying list object and best model name as input. Note that We have not provided training or test outcome labels to compute the weights in the weighted average ensemble method, which is being used inside the none_ensemble. Thus it uses best models prediction to compute the weights in the weighted average ensemble.

prediction_nonet <- nonet_ensemble(Stack_object, "Cluster_second")

Result Plotting: nonet_plot

Results can be plotted using the nonet_plot function. nonet_plot is being designed to provided different plot_type options to the user so that one can plot different visualization based on their needs.

Creating the list of cluster probabilities

Prediction_data <- list(prediction_nonet, predict_clustering_first$cluster_proba[, 2], predict_clustering_Second$cluster_proba[, 2])

Applying name to the predictions

names(Prediction_data) <- c("pred_nonet", "pred_clust_first", "pred_clust_second")

Converting list object into dataframe

Prediction_dataframe <- data.frame(Prediction_data)
head(Prediction_dataframe)
##   pred_nonet pred_clust_first pred_clust_second
## 1 1.02456315      0.639909052      9.735769e-01
## 2 1.07244269      0.997604422      9.929562e-01
## 3 1.00052713      0.006756111      9.999888e-01
## 4 0.07897798      0.991002988      1.746731e-05
## 5 1.00406940      0.051609789      9.999573e-01
## 6 0.25651684      0.974703153      1.788551e-01
nonet_plot for nonet_ensemble model’s predictions in histogram
plot_first <- nonet_plot(Prediction_dataframe$pred_nonet, Prediction_dataframe$pred_clust_first, Prediction_dataframe, plot_type = "hist")
plot_first

nonet_plot for the first GMM model’s predictions in histogram
plot_second <- nonet_plot(Prediction_dataframe$pred_clust_first,  Prediction_dataframe$pred_clust_second, Prediction_dataframe, plot_type = "hist")
plot_second

nonet_plot for the Second GMM model’s predictions in histogram
plot_third <- nonet_plot(Prediction_dataframe$pred_clust_second,  Prediction_dataframe$pred_clust_first, Prediction_dataframe, plot_type = "hist")
plot_third

Conclusion

Above it can be seen that nonet_ensemble and nonet_plot can serve in a way that one do not need to worry about the outcome variables labels to compute the weights of weighted average ensemble solution.