Skip to content

Instantly share code, notes, and snippets.

@harrypotter02
Created July 27, 2020 02:35
Show Gist options
  • Save harrypotter02/0cc6ffe3bf7c520207dc7be96b1e8b66 to your computer and use it in GitHub Desktop.
Save harrypotter02/0cc6ffe3bf7c520207dc7be96b1e8b66 to your computer and use it in GitHub Desktop.
import numpy as np
from sklearn.model_selection import train_test_split
import os
import glob
from scipy import stats
import itertools
from keras.models import Model
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Activation, Convolution2D, MaxPooling2D, Flatten, ReLU, AveragePooling2D, Dropout
from keras.layers import Input, concatenate
from keras.models import model_from_json
from keras import optimizers
from sklearn.metrics import accuracy_score
from keras.callbacks import TensorBoard, EarlyStopping, Callback, K, LearningRateScheduler, ReduceLROnPlateau
import numpy.ma as ma
from sklearn.metrics import classification_report,confusion_matrix
from keras import backend as K
import time
import math
from keras import regularizers
import tensorflow as tf
################### Load data Start ################
#Mount google drive node
from google.colab import drive
import os
drive.mount('/content/gdrive')
def changePath(path):
print('changePath()')
os.chdir(path) #更改路徑
print(os.getcwd()) #查看當前路徑
#download the training data
#https://drive.google.com/drive/folders/1k6qXd8PtGaoRhYLo0sHMJ2vYohOdXBCz?usp=sharing
#Put the data under your google drive space
TrainDataPath = '/content/gdrive/My Drive/SMXDATA/Train/'
TestDataPath = '/content/gdrive/My Drive/SMXDATA/Test/'
def Load_Data(InputPath):
Data_Total = np.array([])
Label_Total = np.array([])
#print("InputPath=",InputPath)
folder_content = glob.glob(InputPath+'*_Label*')
#print("folder_content=",folder_content)
for File in folder_content:
#print('File=',File)
Label = np.load((File))
Label_Total = np.append(Label_Total, Label)
pth = InputPath+ File.split('/')[-1].split('Label')[0] + '*'
#print('pth=',pth)
gb = glob.glob(pth)
#print('gb=',gb)
ld = [F for F in gb if 'Label' not in F][0]
#print('ld=',ld)
Feature = np.load(ld)
Data_Total = np.append(Data_Total, Feature)
Label_Total = Label_Total.reshape(-1, 1)
Data_Total = Data_Total.reshape(-1, 1, 3, 29)
return Data_Total, Label_Total
changePath(TrainDataPath)
Train_Data, Train_Label = Load_Data(TrainDataPath)
changePath(TestDataPath)
Test_Data, Test_Label = Load_Data(TestDataPath)
print (Train_Data.shape, Train_Label.shape)
print (Test_Data.shape, Test_Label.shape)
################### Training data Start ################
from sklearn.model_selection import train_test_split
import os
import glob
from scipy import stats
import itertools
from keras.models import Model
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Activation, Convolution2D, MaxPooling2D, Flatten, ReLU, AveragePooling2D, Dropout
from keras.layers import Input, concatenate
from keras.models import model_from_json
from keras import optimizers
from sklearn.metrics import accuracy_score
from keras.callbacks import TensorBoard, EarlyStopping, Callback, K, LearningRateScheduler, ReduceLROnPlateau
import numpy.ma as ma
from sklearn.metrics import classification_report,confusion_matrix
from keras import backend as K
import time
import math
from keras import regularizers
import tensorflow as tf
import keras.backend.tensorflow_backend as tfback
def _get_available_gpus():
if tfback._LOCAL_DEVICES is None:
print('tfback._LOCAL_DEVICES is None')
devices = tf.config.list_logical_devices()
tfback._LOCAL_DEVICES = [x.name for x in devices]
return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]
tfback._get_available_gpus = _get_available_gpus
def _to_tensor(x, dtype):
return tf.convert_to_tensor(x, dtype=dtype)
def custom_HardTanh(x):
n_one = _to_tensor(-1., x.dtype.base_dtype)
one = _to_tensor(1., x.dtype.base_dtype)
x = tf.clip_by_value(x, n_one, one)
return x
PalmTh = 0.5
Cell_layer1_Filters = 2
PX_layer1_Filters = 2
PY_layer1_Filters = 2
C_layer1_Filters = 2
batch_size = [64]
learning_rate = [0.005]
FullyConnectNode = [12, 15]
Epochs = 500
EarlyStopPatience = 10
ChangeLrPatience = 8
ChangeLrFactor = 0.9
Test_Para = []
InputA = Input(shape=(1, 3, 5), name = "")
InputB = Input(shape=(1, 3, 10), name = "")
InputC = Input(shape=(1, 3, 10), name = "")
InputD = Input(shape=(1, 3, 4), name = "")
for N in range (0,1):
for b in batch_size:
for L in learning_rate:
for F in FullyConnectNode:
print ('batch size: ', b, 'learning rate: ', L, 'FullyConnectNode: ', F)
#================ Height_Cell Convolution ================
A1 = Convolution2D(filters = Cell_layer1_Filters,
kernel_size = 3,
strides = 1,
padding = 'valid',
data_format = 'channels_first',
use_bias = True ,
name = 'Conv1_Height_Cell', activity_regularizer=regularizers.l2(0.00001))(InputA)
A2 = Activation(custom_HardTanh)(A1)
A3 = Flatten()(A2)
#================ ProjectionX Convolution ================
B1 = Convolution2D(filters = PX_layer1_Filters,
kernel_size = 3,
strides = 1,
padding = 'valid',
data_format = 'channels_first',
use_bias = True ,
name = 'Conv1_ProjectionX', activity_regularizer=regularizers.l2(0.00001))(InputB)
B2 = Activation(custom_HardTanh)(B1)
B3 = Flatten()(B2)
#================ ProjectionY Convolution ================
C1 = Convolution2D(filters = PY_layer1_Filters,
kernel_size = 3,
strides = 1,
padding = 'valid',
data_format = 'channels_first',
use_bias = True ,
name = 'Conv1_ProjectionY', activity_regularizer=regularizers.l2(0.00001))(InputC)
C2 = Activation(custom_HardTanh)(C1)
C3 = Flatten()(C2)
#================ Centroid Convolution ================
D1 = Convolution2D(filters = C_layer1_Filters,
kernel_size = 3,
strides = 1,
padding = 'valid',
data_format = 'channels_first',
use_bias = True ,
name = 'Conv1_Centroid', activity_regularizer=regularizers.l2(0.00001))(InputD)
D2 = Activation(custom_HardTanh)(D1)
D3 = Flatten()(D2)
#================ Concatenta Layer ================
ConcatentaLayer = concatenate([A3,
B3,
C3,
D3]
)
#================ FullyConnect Layer ================
DenseLayer1 = Dense(F, use_bias = True, activation=None, activity_regularizer=regularizers.l2(0.00001))(ConcatentaLayer)
DenseLayer1 = Activation(custom_HardTanh)(DenseLayer1)
Output = Dense(1, use_bias = True, activation='sigmoid')(DenseLayer1)
#================ Model Implementation ================
model = Model(inputs=[InputA, InputB, InputC, InputD], outputs=[Output])
adam = optimizers.Adam(lr=L)
model.compile(optimizer = adam, loss = 'binary_crossentropy')
model.summary()
change_lr = ReduceLROnPlateau(monitor='val_loss', factor=ChangeLrFactor,
patience=ChangeLrPatience, min_lr=0.00005)
EarlyStop = EarlyStopping(monitor='loss', patience = EarlyStopPatience, verbose=2, mode='min')
#================ Model Train and Test ================
Training_Data, Val_Data, Training_Label, Val_Label = train_test_split(Train_Data, Train_Label, test_size=0.1)
history = model.fit([Training_Data[:, :, :, 0:5], Training_Data[:, :, :, 5:15], Training_Data[:, :, :, 15:25], Training_Data[:, :, :, 25:]],
Training_Label, epochs=Epochs, batch_size=b,
validation_data=([Val_Data[:, :, :, 0:5], Val_Data[:, :, :, 5:15], Val_Data[:, :, :, 15:25], Val_Data[:, :, :, 25:]], Val_Label)
, callbacks=[change_lr, EarlyStop])#, callbacks=[change_lr]
loss = model.evaluate([Test_Data[:, :, :, 0:5], Test_Data[:, :, :, 5:15], Test_Data[:, :, :, 15:25], Test_Data[:, :, :, 25:]],
Test_Label)
print ("Loss: ", loss)
Predictions = model.predict([Test_Data[:, :, :, 0:5], Test_Data[:, :, :, 5:15], Test_Data[:, :, :, 15:25], Test_Data[:, :, :, 25:]]).reshape(-1, 1)
Predictions[np.where(Predictions >= PalmTh)] = 1
Predictions[np.where(Predictions < PalmTh)] = 0
accuracy = np.mean(Predictions == Test_Label)
print ("Acuracy:", accuracy)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment