Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save DSLituiev/740813f65c785c13ae294ec5d46bfb17 to your computer and use it in GitHub Desktop.
Save DSLituiev/740813f65c785c13ae294ec5d46bfb17 to your computer and use it in GitHub Desktop.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 25 13:44:51 2017
@author: dlituiev
"""
import os
import numpy as np
from uuid import uuid1
import keras
from keras import backend as K
from keras.layers import (InputLayer, Conv2D, Dense, Activation,
AveragePooling2D, GlobalAveragePooling2D,
BatchNormalization, Lambda)
from keras.applications.inception_v3 import InceptionV3
import tensorflow as tf
from PIL import Image
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import Callback, LearningRateScheduler, ModelCheckpoint
from keras.callbacks import CSVLogger
from keras.backend.tensorflow_backend import _to_tensor
from keras.backend import epsilon
def sparse_categorical_crossentropy(target, output, from_logits=False):
"""Categorical crossentropy with integer targets.
# Arguments
target: An integer tensor.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
# Returns
Output tensor.
"""
# Note: tf.nn.sparse_softmax_cross_entropy_with_logits
# expects logits, Keras expects probabilities.
if not from_logits:
_epsilon = _to_tensor(epsilon(), output.dtype.base_dtype)
output = tf.clip_by_value(output, _epsilon, 1 - _epsilon)
logits = tf.log(output)
else:
logits = output
output_shape = output.get_shape()
input_shape = target.shape
target_shape=[x.value for x in input_shape[:-1]]
#targets = cast(flatten(target), 'int64')
#logits = tf.reshape(output, [-1, int(output_shape[-1])])
target = Lambda(lambda x: x[:,:,:,0], output_shape=target_shape)(target)
target = tf.cast(target, tf.int64)
print("logits", logits.shape)
print("targets", target.shape)
res = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target,
logits=logits)
return res
def metric_per_channel_tf(label, prediction, nch=3, metric=tf.metrics.accuracy):
prec = []
#label = tf.stack([label])
print("label.shape", label.shape)
print("prediction.shape", prediction.shape)
dummy = [0] * len(prediction.shape[:-1])
shape = ([(x.value if x.value is not None else -1) for x in prediction.shape[:-1]] + [1])
print("SHAPE:", shape)
for cc in range(nch):
print("start", (dummy + [cc]))
print("end", shape )
pred_channel = tf.slice(prediction, (dummy + [cc]), shape)
pred_channel = tf.reshape(pred_channel, shape[:-1])
# label_channel = tf.equal(label,cc)
label_channel = label[:,:,:,cc]
_, prec_ = metric(label_channel, pred_channel)
prec.append(prec_)
prec = tf.stack(prec)
return tf.reduce_sum(prec)
def weighted_sparse_softmax_cross_entropy_with_logits(y_true, logits, alpha=0.0):
input_shape = y_true.shape
output_shape=[x.value for x in input_shape[:-1]]
y_true = Lambda(lambda x: x[:,:,:,0], output_shape=output_shape)(y_true)
y_true = tf.cast(y_true, tf.int32)
out = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=y_true,
logits=logits)
print("loss shape", out.shape)
mask_bg = tf.equal(y_true,5)
mask_fg = tf.cast( tf.logical_not(mask_bg), tf.float32)
mask_bg = tf.cast( mask_bg, tf.float32)
fg_c = tf.reduce_sum(mask_fg)
bg_c = tf.reduce_sum(mask_bg)
tot_c = fg_c+bg_c
fg = mask_fg * (tot_c +1/(fg_c+1))
bg = mask_bg * (tot_c +1/(bg_c+1))
#ca = tf.logical_or(tf.equal(y_true,1),tf.equal(y_true,2) )
#be = tf.logical_or(tf.equal(y_true,3),tf.equal(y_true,4) )
#fgloss = tf.boolean_mask(out, fg)
#print("fgloss shape", fgloss.shape)
#fgloss = tf.boolean_mask(out, fg)
loss = alpha*out*bg + (1-alpha)*out*fg
#bgloss = tf.boolean_mask(out, bg)
#loss = alpha*tf.reduce_sum(bgloss) + (1-alpha)*tf.reduce_sum(fgloss)
#print("loss shape", out.shape)
return tf.reduce_mean(loss)
bn_scale=False
model = keras.models.Sequential()
model.add(InputLayer((299,299,3)))
model.add(Conv2D(8,(3,3)))
model.add(BatchNormalization(scale=bn_scale,))
model.add(AveragePooling2D(3,3))
model.add(Conv2D(4,(3,3)))
model.add(BatchNormalization(scale=bn_scale,))
model.add(AveragePooling2D(3,3))
model.add(Conv2D(2,(3,3)))
model.add(BatchNormalization(scale=bn_scale,))
model.add(Activation("softmax"))
print(model.output.shape)
config = tf.ConfigProto(log_device_placement=False)
sess0 = tf.Session(config=config)
x = np.zeros((64,299,299,3))
for ii in range(len(x)):
x_ = x[ii,:,:,0].copy()
start = np.random.randint(100, 200, size=(2,1))
path = (start + np.cumsum(np.random.randint(-1,2, size=(2,100)), axis=1)) % 299
path_ = np.ravel_multi_index(path, dims=(299,299))
x_.ravel()[path_] = 256
for cc in range(x.shape[-1]):
x[ii,:,:,cc] = x_
ysparse = x[:,::10,::10,0] > 0
ysparse = np.stack([ysparse], axis=-1).astype(int)
y = np.stack(
[x[:,::10,::10,0] > 0,
x[:,::10,::10,0] == 0,
], axis=-1)
init_g = tf.global_variables_initializer()
init_l = tf.local_variables_initializer()
epochs = 100
CHECKPOINT_DIR = "checkpoints/segm-test/{}".format(uuid1())
os.makedirs(CHECKPOINT_DIR, exist_ok=True)
CHECKPOINT_PATH = os.path.join(CHECKPOINT_DIR, 'model.{epoch:02d}-{val_loss:2f}.hdf5')
csv_path = os.path.join(CHECKPOINT_DIR, "progresslog.csv")
csv_callback = CSVLogger(csv_path, separator=',', append=False)
checkpoint = ModelCheckpoint(CHECKPOINT_PATH, monitor='val_loss', verbose=1,
save_best_only=False, save_weights_only=False, mode='auto', period=1)
callback_list = [checkpoint, csv_callback]
#weightfile="checkpoints/segm-test/310ef9cc-beaa-11e7-aa30-dca9048b1c31/model.98-570.166443.hdf5"
weightfile=None
if weightfile is not None:
model.load_weights(weightfile)
with sess0.as_default() as sess:
def accuracy_per_channel(x,y):
return metric_per_channel_tf(x,y, nch=2, metric=tf.metrics.accuracy)
sess.run(init_g)
sess.run(init_l)
model.compile(optimizer='Adam',
loss='sparse_categorical_crossentropy',
#loss = weighted_sparse_softmax_cross_entropy_with_logits,
#metrics=[accuracy_per_channel],
)
model.fit(x, ysparse, batch_size=4, epochs=epochs,
validation_split = 1/4,
callbacks=callback_list,)
print("DONE")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment