-
-
Save xiangze/d42d7c8e5ae967fba388d5c639365f3a to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import os, cv2 | |
import os | |
import numpy as np | |
def normalize_x(image): return image/127.5 - 1 | |
def normalize_y(image): return image/255 | |
def denormalize_y(image): image*255 | |
def load_X_files(image_files,IMAGE_SIZE): | |
images = np.zeros((len(image_files), IMAGE_SIZE, IMAGE_SIZE, 3), np.float32) | |
for i, image_file in enumerate(image_files): | |
image = cv2.imread(image_file) | |
image = cv2.resize(image, (IMAGE_SIZE, IMAGE_SIZE)) | |
images[i] = normalize_x(image) | |
return images, image_files | |
def load_X(fdir,IMAGE_SIZE): | |
image_files=os.listdir(fdir).sort() | |
return load_X_flies(image_files,IMAGE_SIZE) | |
def load_Y_files(image_files,IMAGE_SIZE): | |
images = np.zeros((len(image_files), IMAGE_SIZE, IMAGE_SIZE, 1), np.float32) | |
for i, image_file in enumerate(image_files): | |
image = cv2.imread(image_file, cv2.IMREAD_GRAYSCALE) | |
image = cv2.resize(image, (IMAGE_SIZE, IMAGE_SIZE)) | |
image = image[:, :, np.newaxis] | |
images[i] = normalize_y(image) | |
return images | |
def load_Y(fdir,IMAGE_SIZE): | |
image_files = os.listdir(fdir) | |
image_files.sort() | |
return load_Y_files(image_files,IMAGE_SIZE) | |
def segOnehot(y): | |
return y | |
def genVOC2012filelist(ddir,istrain=True,rate=0.9): | |
yfiles=os.listdir(ddir+os.sep+'SegmentationObject' + os.sep) | |
yfiles.sort() | |
xfiles=[ ddir+os.sep+'JPEGImages'+os.sep+os.path.splitext(os.path.basename(y))[0]+".jpg" for y in yfiles] | |
yfiles=[ ddir+os.sep+'SegmentationObject'+os.sep+y for y in yfiles] | |
if(istrain): | |
xfiles=xfiles[:int(rate*len(yfiles))] | |
yfiles=yfiles[:int(rate*len(yfiles))] | |
else: | |
xfiles=xfiles[int((1-rate)*len(yfiles)):] | |
yfiles=yfiles[int((1-rate)*len(yfiles)):] | |
print("yfiles=%d"%len(yfiles)) | |
print("xfiles=%d"%len(xfiles)) | |
return xfiles,yfiles | |
def gettrain(rate=0.9,imsize=256): | |
xfiles,yfiles=genVOC2012filelist(ddir,True,rate) | |
X_train, file_names = load_X_files(xfiles,imsize) | |
Y_train = load_Y_files(yfiles,imsize) | |
return X_train,Y_train | |
def gettest(rate=0.9,imsize=256): | |
xfiles,yfiles=genVOC2012filelist(ddir,False,rate) | |
X_test, file_names = load_X_files(xfiles,imsize) | |
Y_test = load_Y_files(yfiles,imsize) | |
return X_test,Y_test |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import os, cv2 | |
import os | |
import numpy as np | |
def normalize_x(image): return image/127.5 - 1 | |
def normalize_y(image): return image/255 | |
def denormalize_y(image): image*255 | |
def load_X_files(image_files,IMAGE_SIZE): | |
images = np.zeros((len(image_files), IMAGE_SIZE, IMAGE_SIZE, 3), np.float32) | |
for i, image_file in enumerate(image_files): | |
image = cv2.imread(image_file) | |
image = cv2.resize(image, (IMAGE_SIZE, IMAGE_SIZE)) | |
images[i] = normalize_x(image) | |
return images, image_files | |
def load_X(fdir,IMAGE_SIZE): | |
image_files=os.listdir(fdir).sort() | |
return load_X_flies(image_files,IMAGE_SIZE) | |
def load_Y_files(image_files,IMAGE_SIZE): | |
images = np.zeros((len(image_files), IMAGE_SIZE, IMAGE_SIZE, 1), np.float32) | |
for i, image_file in enumerate(image_files): | |
image = cv2.imread(image_file, cv2.IMREAD_GRAYSCALE) | |
image = cv2.resize(image, (IMAGE_SIZE, IMAGE_SIZE)) | |
image = image[:, :, np.newaxis] | |
images[i] = normalize_y(image) | |
return images | |
def load_Y(fdir,IMAGE_SIZE): | |
image_files = os.listdir(fdir) | |
image_files.sort() | |
return load_Y_files(image_files,IMAGE_SIZE) | |
def segOnehot(y): | |
return y | |
def genVOC2012filelist(ddir,istrain=True,rate=0.9): | |
yfiles=os.listdir(ddir+os.sep+'SegmentationObject' + os.sep) | |
yfiles.sort() | |
xfiles=[ ddir+os.sep+'JPEGImages'+os.sep+os.path.splitext(os.path.basename(y))[0]+".jpg" for y in yfiles] | |
yfiles=[ ddir+os.sep+'SegmentationObject'+os.sep+y for y in yfiles] | |
if(istrain): | |
xfiles=xfiles[:int(rate*len(yfiles))] | |
yfiles=yfiles[:int(rate*len(yfiles))] | |
else: | |
xfiles=xfiles[int((1-rate)*len(yfiles)):] | |
yfiles=yfiles[int((1-rate)*len(yfiles)):] | |
print("yfiles=%d"%len(yfiles)) | |
print("xfiles=%d"%len(xfiles)) | |
return xfiles,yfiles | |
def gettrain(rate=0.9,imsize=256): | |
xfiles,yfiles=genVOC2012filelist(ddir,True,rate) | |
X_train, file_names = load_X_files(xfiles,imsize) | |
Y_train = load_Y_files(yfiles,imsize) | |
return X_train,Y_train | |
def gettest(rate=0.9,imsize=256): | |
xfiles,yfiles=genVOC2012filelist(ddir,False,rate) | |
X_test, file_names = load_X_files(xfiles,imsize) | |
Y_test = load_Y_files(yfiles,imsize) | |
return X_test,Y_test |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import os | |
import numpy as np | |
from keras.optimizers import Adam | |
import keras.backend as K | |
from keras.callbacks import ModelCheckpoint, EarlyStopping | |
import os, cv2 | |
import Unet | |
def normalize_x(image): | |
return image/127.5 - 1 | |
def normalize_y(image): return image/255 | |
def denormalize_y(image): image*255 | |
def load_X_files(image_files,IMAGE_SIZE): | |
images = np.zeros((len(image_files), IMAGE_SIZE, IMAGE_SIZE, 3), np.float32) | |
for i, image_file in enumerate(image_files): | |
image = cv2.imread(fdir + os.sep + image_file) | |
image = cv2.resize(image, (IMAGE_SIZE, IMAGE_SIZE)) | |
images[i] = normalize_x(image) | |
return images, image_files | |
def load_X(fdir,IMAGE_SIZE): | |
image_files=os.listdir(fdir).sort() | |
return load_X_flies(image_files,IMAGE_SIZE) | |
def load_Y_files(image_files,IMAGE_SIZE): | |
images = np.zeros((len(image_files), IMAGE_SIZE, IMAGE_SIZE, 1), np.float32) | |
for i, image_file in enumerate(image_files): | |
image = cv2.imread(dir + os.sep + image_file, cv2.IMREAD_GRAYSCALE) | |
image = cv2.resize(image, (IMAGE_SIZE, IMAGE_SIZE)) | |
image = image[:, :, np.newaxis] | |
images[i] = normalize_y(image) | |
return images | |
def load_Y(fdir,IMAGE_SIZE): | |
image_files = os.listdir(fdir) | |
image_files.sort() | |
return load_Y_files(image_files,IMAGE_SIZE) | |
def train(ddir="VOC2012",rate=0.9): | |
imsize=256 | |
if(ddir=="VOC2012"): | |
yfiles=os.listdir(ddir+os.sep+'SegmentationOjbect' + os.sep) | |
xfiles=[ ddir+os.sep+'JPEGImages'+os.sep+y. for y in yfiles] | |
X_train, file_names = load_X_files(xfiles,imsize) | |
Y_train = load_Y_files(yfiles,imsize) | |
elif(ddir=="PedCut2013"): | |
X_train, file_names = load_X(ddir+os.sep+'trainingData' + os.sep + 'left_images',imsize) | |
Y_train = load_Y(ddir+os.sep+'trainingData' + os.sep + 'left_groundTruth',imsize) | |
inputs= Input((imsize, imsize, 3)) | |
model = Unet.genUnetModel(inputs) | |
model.compile(loss=dice_coef_loss, optimizer=Adam(), metrics=[dice_coef]) | |
BATCH_SIZE = 12 | |
NUM_EPOCH = 20 | |
history = model.fit(X_train, Y_train, batch_size=BATCH_SIZE, epochs=NUM_EPOCH, verbose=1) | |
model.save_weights('segunet_weights.hdf5') | |
def predict(ddir="VOC2012",rate=0.1): | |
imsize=256 | |
if(ddir=="VOC2012"): | |
ddir+os.sep+'JPEGImages'+os.sep,imsize) | |
xfiles=os.listdir(ddir+os.sep+'JPEGImages' + os.sep) | |
X_train, file_names = load_X_files(xfiles) | |
elif(ddir=="PedCut2013"): | |
X_test, file_names = load_X('testData' + os.sep + 'left_images') | |
inputs= Input((imsize, imsize, 3)) | |
model = Unet.genUNet(input_channel_count, output_channel_count, first_layer_filter_count) | |
model.load_weights('segunet_weights.hdf5') | |
BATCH_SIZE = 12 | |
Y_pred = model.predict(X_test, BATCH_SIZE) | |
for i, y in enumerate(Y_pred): | |
if(ddir=="VOC2012"): | |
img=cv2.imread(ddir+os.sep+'SegmentationOjbect' + os.sep+ file_names[i]) | |
elif(ddir=="PedCut2013"): | |
img=cv2.imread('testData' + os.sep + 'left_images' + os.sep + file_names[i]) | |
y = cv2.resize(y, (img.shape[1], img.shape[0])) | |
cv2.imwrite('prediction' + str(i) + '.png', denormalize_y(y)) | |
if __name__ == '__main__': | |
train() | |
predict() | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from keras.models import Sequential | |
from keras.layers.core import Dense, Activation | |
from keras.layers.convolutional import Conv2DTranspose | |
from keras.layers.convolutional import Convolution2D | |
from keras.layers.convolutional import MaxPooling2D | |
from keras.layers.merge import Concatenate | |
from keras.layers.merge import concatenate | |
from keras.layers.core import Dropout | |
from keras.layers.core import Flatten | |
from keras.layers.core import Lambda | |
from keras.layers.convolutional import Conv2D | |
def Conv2D16(s): | |
return Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (s) | |
def _Conv2D(s,size,dsize=3): | |
return Conv2D(size, (dsize, dsize), activation='elu', kernel_initializer='he_normal', padding='same')(s) | |
def CDCP(s,size,dsize=3,droprate=0.1,withpooling=True): | |
c =_Conv2D(s,size,dsize) | |
c = Dropout(droprate)(c) | |
c =_Conv2D(c,size,dsize) | |
if(withpooling): | |
p=MaxPooling2D((2, 2)) (c) | |
return p,c | |
else: | |
return c | |
def Ulayer(s,t,size,dsize=3,droprate=0.2): | |
u = Conv2DTranspose(size, (2, 2), strides=(2, 2), padding='same') (s) | |
u = concatenate([u, t]) | |
c = _Conv2D(u,size, dsize) | |
c = Dropout(droprate)(c) | |
return _Conv2D(c,size,dsize) | |
# | |
def genUnet(inputs): | |
s = Lambda(lambda x: x / 255) (inputs) | |
a0,c0=CDCP(s,16,3,0.1) | |
a1,c1=CDCP(a0,32,3,0.1) | |
a2,c2=CDCP(a1,64,3,0.2) | |
a3,c3=CDCP(a2,128,3,0.2) | |
a4=CDCP(a3,256,3,0.2,False) | |
a5=Ulayer(a4,c3,128) | |
a6=Ulayer(a5,c2,64) | |
a7=Ulayer(a6,c1,32) | |
a8=Ulayer(a7,c0,16) | |
# return Conv2D(1, (1, 1), activation='sigmoid') (a8) | |
a9=Conv2D(1, (1, 1), activation='sigmoid')(a8) | |
return Model(inputs=[inputs],outputs=[a9]) | |
def genUnetmodel(inputs): | |
return Model(inputs=[inputs],outputs=[senUnet(inputs)]) | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment