from pycm import ConfusionMatrix
y_test = [2, 0, 2, 2, 0, 1, 1, 2, 2, 0, 1, 2]
y_pred = [0, 0, 2, 1, 0, 2, 1, 0, 2, 0, 2, 2]
cm1=ConfusionMatrix(y_test, y_pred)
cm1
pycm.ConfusionMatrix(classes: [0, 1, 2])
print(cm1)
Predict 0 1 2 Actual 0 3 0 0 1 0 1 2 2 2 1 3 Overall Statistics : 95% CI (0.30439,0.86228) Bennett_S 0.375 Chi-Squared 6.6 Chi-Squared DF 4 Conditional Entropy 0.95915 Cramer_V 0.5244 Cross Entropy 1.59352 Gwet_AC1 0.38931 Hamming Loss 0.41667 Joint Entropy 2.45915 KL Divergence 0.09352 Kappa 0.35484 Kappa 95% CI (-0.07708,0.78675) Kappa No Prevalence 0.16667 Kappa Standard Error 0.22036 Kappa Unbiased 0.34426 Lambda A 0.16667 Lambda B 0.42857 Mutual Information 0.52421 NIR 0.5 Overall_ACC 0.58333 Overall_CEN 0.46381 Overall_J (1.225,0.40833) Overall_MCEN 0.51894 Overall_RACC 0.35417 Overall_RACCU 0.36458 P-Value 0.38721 PPV_Macro 0.56667 PPV_Micro 0.58333 Phi-Squared 0.55 Reference Entropy 1.5 Response Entropy 1.48336 Scott_PI 0.34426 Standard Error 0.14232 Strength_Of_Agreement(Altman) Fair Strength_Of_Agreement(Cicchetti) Poor Strength_Of_Agreement(Fleiss) Poor Strength_Of_Agreement(Landis and Koch) Fair TPR_Macro 0.61111 TPR_Micro 0.58333 Zero-one Loss 5 Class Statistics : Classes 0 1 2 ACC(Accuracy) 0.83333 0.75 0.58333 BM(Informedness or bookmaker informedness) 0.77778 0.22222 0.16667 CEN(Confusion entropy) 0.25 0.49658 0.60442 DOR(Diagnostic odds ratio) None 4.0 2.0 ERR(Error rate) 0.16667 0.25 0.41667 F0.5(F0.5 score) 0.65217 0.45455 0.57692 F1(F1 score - harmonic mean of precision and sensitivity) 0.75 0.4 0.54545 F2(F2 score) 0.88235 0.35714 0.51724 FDR(False discovery rate) 0.4 0.5 0.4 FN(False negative/miss/type 2 error) 0 2 3 FNR(Miss rate or false negative rate) 0.0 0.66667 0.5 FOR(False omission rate) 0.0 0.2 0.42857 FP(False positive/type 1 error/false alarm) 2 1 2 FPR(Fall-out or false positive rate) 0.22222 0.11111 0.33333 G(G-measure geometric mean of precision and sensitivity) 0.7746 0.40825 0.54772 IS(Information score) 1.26303 1.0 0.26303 J(Jaccard index) 0.6 0.25 0.375 LR+(Positive likelihood ratio) 4.5 3.0 1.5 LR-(Negative likelihood ratio) 0.0 0.75 0.75 MCC(Matthews correlation coefficient) 0.68313 0.2582 0.16903 MCEN(Modified confusion entropy) 0.26439 0.5 0.6875 MK(Markedness) 0.6 0.3 0.17143 N(Condition negative) 9 9 6 NPV(Negative predictive value) 1.0 0.8 0.57143 P(Condition positive or support) 3 3 6 POP(Population) 12 12 12 PPV(Precision or positive predictive value) 0.6 0.5 0.6 PRE(Prevalence) 0.25 0.25 0.5 RACC(Random accuracy) 0.10417 0.04167 0.20833 RACCU(Random accuracy unbiased) 0.11111 0.0434 0.21007 TN(True negative/correct rejection) 7 8 4 TNR(Specificity or true negative rate) 0.77778 0.88889 0.66667 TON(Test outcome negative) 7 10 7 TOP(Test outcome positive) 5 2 5 TP(True positive/hit) 3 1 3 TPR(Sensitivity, recall, hit rate, or true positive rate) 1.0 0.33333 0.5
from random import randint
weights = [randint(1,10) for i in range(len(y_test))]
weights[2]*=9
cm2=ConfusionMatrix(y_test, y_pred, sample_weight = weights)
cm2
pycm.ConfusionMatrix(classes: [0, 1, 2])
print(cm2)
Predict 0 1 2 Actual 0 26 0 0 1 0 8 9 2 17 8 60 Overall Statistics : 95% CI (0.65786,0.81089) Bennett_S 0.60156 Chi-Squared 83.06411 Chi-Squared DF 4 Conditional Entropy 0.8895 Cramer_V 0.56962 Cross Entropy 1.3101 Gwet_AC1 0.63391 Hamming Loss 0.26562 Joint Entropy 2.13562 KL Divergence 0.06398 Kappa 0.52328 Kappa 95% CI (0.38596,0.6606) Kappa No Prevalence 0.46875 Kappa Standard Error 0.07006 Kappa Unbiased 0.51604 Lambda A 0.2093 Lambda B 0.44068 Mutual Information 0.49474 NIR 0.66406 Overall_ACC 0.73438 Overall_CEN 0.37625 Overall_J (1.56295,0.52098) Overall_MCEN 0.46301 Overall_RACC 0.44281 Overall_RACCU 0.45114 P-Value 0.0538 PPV_Macro 0.65807 PPV_Micro 0.73438 Phi-Squared 0.64894 Reference Entropy 1.24612 Response Entropy 1.38424 Scott_PI 0.51604 Standard Error 0.03904 Strength_Of_Agreement(Altman) Moderate Strength_Of_Agreement(Cicchetti) Fair Strength_Of_Agreement(Fleiss) Intermediate to Good Strength_Of_Agreement(Landis and Koch) Moderate TPR_Macro 0.72549 TPR_Micro 0.73438 Zero-one Loss 34 Class Statistics : Classes 0 1 2 ACC(Accuracy) 0.86719 0.86719 0.73438 BM(Informedness or bookmaker informedness) 0.83333 0.39852 0.49658 CEN(Confusion entropy) 0.24897 0.50341 0.40602 DOR(Diagnostic odds ratio) None 11.44444 9.06667 ERR(Error rate) 0.13281 0.13281 0.26562 F0.5(F0.5 score) 0.65657 0.49383 0.83102 F1(F1 score - harmonic mean of precision and sensitivity) 0.75362 0.48485 0.77922 F2(F2 score) 0.88435 0.47619 0.7335 FDR(False discovery rate) 0.39535 0.5 0.13043 FN(False negative/miss/type 2 error) 0 9 25 FNR(Miss rate or false negative rate) 0.0 0.52941 0.29412 FOR(False omission rate) 0.0 0.08036 0.42373 FP(False positive/type 1 error/false alarm) 17 8 9 FPR(Fall-out or false positive rate) 0.16667 0.07207 0.2093 G(G-measure geometric mean of precision and sensitivity) 0.77759 0.48507 0.78346 IS(Information score) 1.57374 1.91254 0.38898 J(Jaccard index) 0.60465 0.32 0.6383 LR+(Positive likelihood ratio) 6.0 6.52941 3.37255 LR-(Negative likelihood ratio) 0.0 0.57053 0.37197 MCC(Matthews correlation coefficient) 0.70984 0.40894 0.47052 MCEN(Modified confusion entropy) 0.26465 0.52832 0.53638 MK(Markedness) 0.60465 0.41964 0.44584 N(Condition negative) 102 111 43 NPV(Negative predictive value) 1.0 0.91964 0.57627 P(Condition positive or support) 26 17 85 POP(Population) 128 128 128 PPV(Precision or positive predictive value) 0.60465 0.5 0.86957 PRE(Prevalence) 0.20312 0.13281 0.66406 RACC(Random accuracy) 0.06824 0.0166 0.35797 RACCU(Random accuracy unbiased) 0.07265 0.01662 0.36188 TN(True negative/correct rejection) 85 103 34 TNR(Specificity or true negative rate) 0.83333 0.92793 0.7907 TON(Test outcome negative) 85 112 59 TOP(Test outcome positive) 43 16 69 TP(True positive/hit) 26 8 60 TPR(Sensitivity, recall, hit rate, or true positive rate) 1.0 0.47059 0.70588