Skip to content

Instantly share code, notes, and snippets.

ronzillia

Block or report user

Report or block ronzillia

Hide content and notifications from this user.

Learn more about blocking users

Contact Support about this user’s behavior.

Learn more about reporting abuse

Report abuse
View GitHub Profile
View training.py
# balance data via loss for each batch
x_train, y_train,index = batch_data
label=np.argmax(y_train,axis=1)
num_pos=np.count_nonzero(label)
num_neg=len(label)-num_pos
pos_weight=np.true_divide(num_neg+1,len(label)+1)
neg_weight=np.true_divide(num_pos+1,len(label)+1)
class_weight=np.array([[neg_weight,pos_weight]])
# plus one to avoid all-zero and all-one problem
_ ,train_cost= sess.run([train_op,classification_loss_op], feed_dict={input_x: x_train,input_y_classification: y_train,bs_holder:batch_size,training_flag:True,tf_class_weight:class_weight})
View weighted_loss_function.py
def classification_loss(self,logit,input_y_classification,class_weight):
labels=input_y_classification
weight_per_label = tf.transpose( tf.matmul(labels, tf.transpose(class_weight)) ) #shape [1, batch_size]
# this is the weight for each datapoint, depending on its label
entropy=tf.nn.softmax_cross_entropy_with_logits(logits=logit, labels=labels, name="xent_raw")
xent = tf.multiply(weight_per_label, entropy) #shape [1, batch_size]
cost = tf.reduce_mean(xent) #shape 1
self._summaries['classification_loss'] = tf.summary.scalar('classification_loss', cost)
View metrics.py
# after the whole epoch
tp=conf_mtx[1][1]
fp=conf_mtx[0][1]
tn=conf_mtx[0][0]
fn=conf_mtx[1][0]
precision=np.truediv(tp,tp+fp)
recall=np.true_divide(tp,tp +fn)
tn_rate=np.true_divide(tn,tn+fp)
View Accuracy and confusion matrix.py
con_mtx = tf.confusion_matrix(labels, prediction,num_classes=self.num_class)
You can’t perform that action at this time.