Skip to content

Instantly share code, notes, and snippets.

@heedaelee
Last active February 20, 2020 03:49
Show Gist options
  • Save heedaelee/5fd013eafc48611575c2aeaf1fa87111 to your computer and use it in GitHub Desktop.
Save heedaelee/5fd013eafc48611575c2aeaf1fa87111 to your computer and use it in GitHub Desktop.
케라스
{
"cells": [
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:From c:\\users\\fyj\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\tensorflow\\python\\util\\tf_should_use.py:193: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.\n",
"Instructions for updating:\n",
"Use `tf.global_variables_initializer` instead.\n",
"0\n",
"1\n",
"2\n",
"3\n"
]
}
],
"source": [
"import tensorflow as tf\n",
"state = tf.Variable(0, name=\"counter\") #변수를 0으로 초기화\n",
"\n",
"one = tf.constant(1) # state에 1을 더할 오퍼레이션 생성\n",
"new_value = tf.add(state, one)\n",
"update = tf.assign(state, new_value)\n",
"\n",
"init_op = tf.initialize_all_variables() #그래프는 처음에 변수를 초기화해야 합니다.\n",
"with tf.Session() as sess: #그래프를 띄우고 오퍼레이션들을 실행\n",
" sess.run(init_op) #초기화 오퍼레이션 실행\n",
" print(sess.run(state)) # state의 초기 값을 출력\n",
" # state를 갱신하는 오퍼레이션을 실행하고, state룰 츌력\n",
" for _ in range(3):\n",
" sess.run(update)\n",
" print(sess.run(state))"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"ename": "TypeError",
"evalue": "unpickle3() missing 1 required positional argument: 'file'",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mTypeError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m<ipython-input-6-1b07e8034f92>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[0mdict\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mpickle\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mload\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfo\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mencoding\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'bytes'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mdict\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 6\u001b[1;33m \u001b[0munpickle3\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[1;31mTypeError\u001b[0m: unpickle3() missing 1 required positional argument: 'file'"
]
}
],
"source": [
"def unpickle3(file):\n",
" import pickle\n",
" with open(file, 'rb') as fo:\n",
" dict = pickle.load(fo, encoding='bytes')\n",
" return dict\n",
"unpickle3()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import keras, os\n",
"from keras.datasets import cifar10\n",
"from keras.preprocessing.image import ImageDataGenerator\n",
"from keras.models import Sequential\n",
"from keras.layers import Dense, Dropout, Activation, Flatten\n",
"from keras.layers import Conv2D, MaxPooling2D\n",
"from keras.optimizers import SGD\n",
"batch_size = 32\n",
"num_classes = 10\n",
"epochs = 100\n",
"data_augmentation = True\n",
"num_predictions = 20\n",
"save_dir = os.path.join(os.getcwd(), 'saved_models')\n",
"model_name = 'keras_cifar10_trained_model.h5'\n",
"(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n",
"print('x_train shape:', x_train.shape)\n",
"print(x_train.shape[0], 'train samples')\n",
"print(x_test.shape[0], 'test samples')\n",
"# Convert class vectors to binary class matrices.\n",
"y_train = keras.utils.to_categorical(y_train, num_classes)\n",
"y_test = keras.utils.to_categorical(y_test, num_classes)\n",
"model = Sequential()\n",
"model.add(Conv2D(32, (3, 3), padding='same',\n",
" input_shape=x_train.shape[1:]))\n",
"model.add(Activation('relu'))\n",
"model.add(Conv2D(32, (3, 3)))\n",
"model.add(Activation('relu'))\n",
"model.add(MaxPooling2D(pool_size=(2, 2)))\n",
"model.add(Dropout(0.25))\n",
"model.add(Conv2D(64, (3, 3), padding='same'))\n",
"model.add(Activation('relu'))\n",
"model.add(Conv2D(64, (3, 3)))\n",
"model.add(Activation('relu'))\n",
"model.add(MaxPooling2D(pool_size=(2, 2)))\n",
"model.add(Dropout(0.25))\n",
"model.add(Flatten())\n",
"model.add(Dense(512))\n",
"model.add(Activation('relu'))\n",
"model.add(Dropout(0.5))\n",
"model.add(Dense(num_classes))\n",
"model.add(Activation('softmax'))\n",
"sgd = SGD(lr=0.00001, decay=1e-6, momentum=0.9, nesterov=True)\n",
"model.compile(optimizer=sgd, loss='mean_squared_error')\n",
"if not data_augmentation:\n",
" print('Not using data augmentation.')\n",
" model.fit(x_train, y_train,\n",
" batch_size=batch_size,\n",
" epochs=epochs,\n",
" validation_data=(x_test, y_test),\n",
" shuffle=True)\n",
"else:\n",
" print('Using real-time data augmentation.')\n",
" # This will do preprocessing and realtime data augmentation:\n",
" datagen = ImageDataGenerator(\n",
" featurewise_center=False, # set input mean to 0 over the dataset\n",
" samplewise_center=False, # set each sample mean to 0\n",
" featurewise_std_normalization=False, # divide inputs by std of the dataset\n",
" samplewise_std_normalization=False, # divide each input by its std\n",
" zca_whitening=False, # apply ZCA whitening\n",
" zca_epsilon=1e-06, # epsilon for ZCA whitening\n",
" rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)\n",
" # randomly shift images horizontally (fraction of total width)\n",
" width_shift_range=0.1,\n",
" # randomly shift images vertically (fraction of total height)\n",
" # randomly shift images vertically (fraction of total height)\n",
" height_shift_range=0.1,\n",
" shear_range=0., # set range for random shear\n",
" zoom_range=0., # set range for random zoom\n",
" channel_shift_range=0., # set range for random channel shifts\n",
" # set mode for filling points outside the input boundaries\n",
" fill_mode='nearest',\n",
" cval=0., # value used for fill_mode = \"constant\"\n",
" horizontal_flip=True, # randomly flip images\n",
" vertical_flip=False, # randomly flip images\n",
" # set rescaling factor (applied before any other transformation)\n",
" rescale=None,\n",
" # set function that will be applied on each input\n",
" preprocessing_function=None,\n",
" # image data format, either \"channels_first\" or \"channels_last\"\n",
" data_format=None,\n",
" # fraction of images reserved for validation (strictly between 0 and 1)\n",
" validation_split=0.0)\n",
" # Compute quantities required for feature-wise normalization\n",
" # (std, mean, and principal components if ZCA whitening is applied).\n",
" datagen.fit(x_train)\n",
"\n",
" # Fit the model on the batches generated by datagen.flow().\n",
" model.fit_generator(datagen.flow(x_train, y_train,\n",
" batch_size=batch_size),\n",
" epochs=epochs,\n",
" validation_data=(x_test, y_test),\n",
" workers=4)\n",
"\n",
"# Save model and weights\n",
"if not os.path.isdir(save_dir):\n",
" os.makedirs(save_dir)\n",
"model_path = os.path.join(save_dir, model_name)\n",
"model.save(model_path)\n",
"print('Saved trained model at %s ' % model_path)\n",
"\n",
"# Score trained model.\n",
"scores = model.evaluate(x_test, y_test, verbose=1)\n",
"print('Test loss:', scores[0])\n",
"print('Test accuracy:', scores[1])\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# !pip install keras==2"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.8"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment