library(caret) # Confusion matrix

# Load testing dataset
to_predict <- read.csv('C:\\test.csv',header=T)

# Select testing results
toPredict <- subset(to_predict,select=c(3:17)) #subset(to_predict,select=c(c(3:16),c(20:22)))
real_results <- subset(to_predict,select=c(2))

# Predict results
results_prob <- predict(model,toPredict,type='response')

# If prob > 0.5 then 1, else 0
results <- ifelse(results_prob > 0.5,1,0)

# Quick check
head(results)
head(results_prob)

# Check accuracy
misClasificError <- mean(real_results != results)
print(paste('Accuracy',1-misClasificError))

# Confusion matrix
confusionMatrix(data=results, reference=real_results$Creditability)


# Output
# 
# > head(results)
# 1 2 3 4 5 6 
# 1 1 1 1 1 1 
# 
# > head(results_prob)
#         1         2         3         4         5         6 
# 0.7210193 0.6748606 0.6362183 0.6810534 0.7748649 0.7945139 
# 
# > print(paste('Accuracy',1-misClasificError))
# [1] "Accuracy 0.75"
# 
# > confusionMatrix(data=results, reference=real_results$Creditability)
# Confusion Matrix and Statistics
# 
#           Reference
# Prediction   0   1
#          0  76  44
#          1  81 299
#                                           
#                Accuracy : 0.75            
#                  95% CI : (0.7096, 0.7874)
#     No Information Rate : 0.686           
#     P-Value [Acc > NIR] : 0.0009953       
#                                           
#                   Kappa : 0.3801          
#  Mcnemar's Test P-Value : 0.0012822       
#                                           
#             Sensitivity : 0.4841          
#             Specificity : 0.8717          
#          Pos Pred Value : 0.6333          
#          Neg Pred Value : 0.7868          
#              Prevalence : 0.3140          
#          Detection Rate : 0.1520          
#    Detection Prevalence : 0.2400          
#       Balanced Accuracy : 0.6779          
#                                           
#        'Positive' Class : 0    
#