Skip to content

Instantly share code, notes, and snippets.

@mkhorasani
Last active September 7, 2021 18:01
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save mkhorasani/01201b6d8c1e41cae6e324bcbc1eed46 to your computer and use it in GitHub Desktop.
Save mkhorasani/01201b6d8c1e41cae6e324bcbc1eed46 to your computer and use it in GitHub Desktop.
def confusion_matrix_plot(data):
z = data.tolist()[::-1]
x = ['Negative','Positive']
y = ['Positive','Negative']
z_text = z
fig = ff.create_annotated_heatmap(z, x, y, annotation_text=z_text, text=z,hoverinfo='text',colorscale='Blackbody')
fig.update_layout(font_family="IBM Plex Sans")
st.write(fig)
def roc_plot(data):
fig = px.line(data, x="False Positive", y="True Positive")#, title='ROC Curve')
fig.update_layout(font_family="IBM Plex Sans")
st.write(fig)
try:
X = df[feature_cols]
y = df[label_col]
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=test_size,random_state=0)
logreg = LogisticRegression(penalty=penalty, dual=dual, tol=tol, C=C,
fit_intercept=fit_intercept, intercept_scaling=intercept_scaling,
class_weight=class_weight, random_state=random_state, solver=solver,
max_iter=max_iter, multi_class=multi_class, verbose=verbose,
warm_start=warm_start, l1_ratio=l1_ratio)
logreg.fit(X_train,y_train)
y_pred = logreg.predict(X_test)
cnf_matrix = metrics.confusion_matrix(y_test, y_pred)
st.subheader('Confusion Matrix')
confusion_matrix_plot(cnf_matrix)
accuracy = metrics.accuracy_score(y_test, y_pred)
precision = metrics.precision_score(y_test, y_pred)
recall = metrics.recall_score(y_test, y_pred)
f1 = metrics.f1_score(y_test, y_pred)
st.subheader('Metrics')
col2_1, col2_2, col2_3, col2_4 = st.columns(4)
with col2_1:
st.info('Accuracy: **%s**' % (round(accuracy,3)))
with col2_2:
st.info('Precision: **%s**' % (round(precision,3)))
with col2_3:
st.info('Recall: **%s**' % (round(recall,3)))
with col2_4:
st.info('F1 Score: **%s**' % (round(f1,3)))
y_pred_proba = logreg.predict_proba(X_test)[::,1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)
roc_data = pd.DataFrame([])
roc_data['True Positive'] = tpr
roc_data['False Positive'] = fpr
st.subheader('ROC Curve')
roc_plot(roc_data)
auc = metrics.roc_auc_score(y_test, y_pred_proba)
st.info('Area Under Curve: **%s**' % (round(auc,3)))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment