Skip to content

Instantly share code, notes, and snippets.

View Yuvnish017's full-sized avatar

Yuvnish Malhotra Yuvnish017

View GitHub Profile
style_layer_names = [
"block1_conv1",
"block2_conv1",
"block3_conv1",
"block4_conv1",
"block5_conv1",
]
# The layer to use for the content loss.
content_layer_name = "block5_conv2"
model = vgg16.VGG16(weights="imagenet", include_top=False)
# Get the symbolic outputs of each "key" layer (we gave them unique names).
outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
# Set up a model that returns the activation values for every layer in
# VGG19 (as a dict).
feature_extractor = keras.Model(inputs=model.inputs, outputs=outputs_dict)
def gram_matrix(x):
x = tf.transpose(x, (2, 0, 1))
features = tf.reshape(x, (tf.shape(x)[0], -1))
gram = tf.matmul(features, tf.transpose(features))
return gram
def style_loss(style, combination):
S = gram_matrix(style)
C = gram_matrix(combination)
channels = 3
def preprocess_image(image_path):
image = cv2.imread(image_path)
image = cv2.resize(image, (img_cols, img_rows))
image = np.array(image)
image = np.expand_dims(image, axis=0)
image = vgg16.preprocess_input(image)
return image
def deprocess_image(x):
img = cv2.imread('yuvnish_malhotra_photo.jpg')
print(img.shape)
(height, width, channels) = img.shape
img_rows = 400
img_cols = int(width * img_rows / height)
result_prefix = "neural_style_transfer_generated"
# Weights of the different loss components
total_variation_weight = 1e-6
style_weight = 5e-6
import matplotlib.pyplot as plt
import cv2
import tensorflow as tf
import numpy as np
from tensorflow import keras
from tensorflow.keras.applications import vgg16
from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.keras.regularizers import Regularizer
from tensorflow.keras.layers import Input