Skip to content

Instantly share code, notes, and snippets.

@ur0n2
Created February 16, 2018 14:54
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save ur0n2/7531736ecbc1554657135197ff9baf9b to your computer and use it in GitHub Desktop.
Save ur0n2/7531736ecbc1554657135197ff9baf9b to your computer and use it in GitHub Desktop.
#-*-coding:utf-8-*-
from keras.datasets import boston_housing
from keras.models import Sequential
from keras.layers import Activation, Dense
from keras import optimizers
(X_train, y_train), (x_test, y_test) = boston_housing.load_data()
model = Sequential()
# Keras model with two hidden layer with 10 neurons each
model.add(Dense(10, input_shape = (13,))) # Input layer => input_shape should be explicitly designated
model.add(Activation('sigmoid'))
model.add(Dense(10)) # Hidden layer => only output dimension should be designated
model.add(Activation('sigmoid'))
model.add(Dense(10)) # Hidden layer => only output dimension should be designated
model.add(Activation('sigmoid'))
model.add(Dense(1)) # Output layer => output dimension = 1 since it is regression problem
'''
This is equivalent to the above code block
>> model.add(Dense(10, input_shape = (13,), activation = 'sigmoid'))
>> model.add(Dense(10, activation = 'sigmoid'))
>> model.add(Dense(10, activation = 'sigmoid'))
>> model.add(Dense(1))
'''
sgd = optimizers.SGD(lr = 0.01) # stochastic gradient descent optimizer
#lr 이게 작게 주면 수렴하는데 오래 걸리고 gradient 깎아서 계속 학습시키는 것, 그게 바로 이 코드임
model.compile(optimizer = sgd, loss = 'mean_squared_error', metrics = ['mse']) # for regression problems, mean squared error (MSE) is often employed
model.fit(X_train, y_train, batch_size = 50, epochs = 100, verbose = 1) # batch_size == batch_lerarning, 50개씩 배치 프로세싱 해라 그런거임, epochs를 가변적으로 해보면, 한번 할때마다 경사하강법을 한번 적용시켜서 1번 업뎃하니까, 여러번 할수록 trianing_error는 떨어지는데 test_error는 너무 과적합되서 overfit될 수 있음
results = model.evaluate(x_test, y_test)
print('loss: ', results[0])
print('mse: ', results[1])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment