Skip to content

Instantly share code, notes, and snippets.

# -*- coding: utf-8 -*-
# MNIST 데이터를 다운로드 한다.
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# TensorFlow 라이브러리를 추가한다.
import tensorflow as tf
# 변수들을 설정한다.
# -*- coding: utf-8 -*-
# 절대 임포트 설정
from __future__ import absolute_import
from __future__ import print_function
# 필요한 라이브러리들을 임포트
import collections
import math
import os
# -*- coding: utf-8 -*-
# Inception-v3 모델을 이용한 Image Classification
# 절대 임포트 설정
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# 필요한 라이브러리들을 임포트
import os.path
"""
TensorFlow translation of the torch example found here (written by SeanNaren).
https://github.com/SeanNaren/TorchQLearningExample
Original keras example found here (written by Eder Santana).
https://gist.github.com/EderSantana/c7222daa328f0e885093#file-qlearn-py-L164
The agent plays a game of catch. Fruits drop from the sky and the agent can choose the actions
left/stay/right to catch the fruit before it reaches the ground.
"""
# To run this code you must use iPython. Also you can use the .ipynb file in ipython notebook mode.
%matplotlib
%matplotlib inline
from TrainCatchGame import CatchEnvironment, X, W1, b1, input_layer, W2, b2, hidden_layer, W3, b3, output_layer, Y, cost, optimizer
from IPython import display
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import pylab as pl
# boilerplate code
from __future__ import print_function
import os
from io import BytesIO
import numpy as np
from functools import partial
import PIL.Image
from IPython.display import clear_output, Image, display, HTML
import tensorflow as tf
model_fn = 'tensorflow_inception_graph.pb'
# creating TensorFlow session and loading the model
graph = tf.Graph()
sess = tf.InteractiveSession(graph=graph)
with tf.gfile.FastGFile(model_fn, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
t_input = tf.placeholder(np.float32, name='input') # define the input tensor
imagenet_mean = 117.0
layers = [op.name for op in graph.get_operations() if op.type=='Conv2D' and 'import/' in op.name]
feature_nums = [int(graph.get_tensor_by_name(name+':0').get_shape()[-1]) for name in layers]
print('Number of layers', len(layers))
print('Total number of feature channels:', sum(feature_nums))
# Helper functions for TF Graph visualization
def strip_consts(graph_def, max_const_size=32):
# Picking some internal layer. Note that we use outputs before applying the ReLU nonlinearity
# to have non-zero gradients for features with negative initial activations.
layer = 'mixed4d_3x3_bottleneck_pre_relu'
channel = 139 # picking some feature channel to visualize
# start with a gray image with a little noise
img_noise = np.random.uniform(size=(224,224,3)) + 100.0
def showarray(a, fmt='jpeg'):
a = np.uint8(np.clip(a, 0, 1)*255)
def tffunc(*argtypes):
'''Helper that transforms TF-graph generating function into a regular one.
See "resize" function below.
'''
placeholders = list(map(tf.placeholder, argtypes))
def wrap(f):
out = f(*placeholders)
def wrapper(*args, **kw):
return out.eval(dict(zip(placeholders, args)), session=kw.get('session'))
return wrapper