Skip to content

Instantly share code, notes, and snippets.

View vardanagarwal's full-sized avatar
🌚

Vardan Agarwal vardanagarwal

🌚
View GitHub Profile
from tensorflow.keras.models import Model
from tensorflow.keras.layers import SeparableConv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import SpatialDropout2D, GaussianDropout, Dropout, ActivityRegularization
from tensorflow.keras.layers import Flatten, Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Add, concatenate
from tensorflow.keras.utils import plot_model
import cv2
img = cv2.imread('Paris.jpg')
dst_gray, dst_color = cv2.pencilSketch(img, sigma_s=60, sigma_r=0.07, shade_factor=0.05) # inbuilt function to generate pencil sketch in both color and grayscale
# sigma_s controls the size of the neighborhood. Range 1 - 200
# sigma_r controls the how dissimilar colors within the neighborhood will be averaged. A larger sigma_r results in large regions of constant color. Range 0 - 1
# shade_factor is a simple scaling of the output image intensity. The higher the value, the brighter is the result. Range 0 - 0.1
cv2.imshow("Image", img)
cv2.imshow("Output2", dst_gray)
cv2.imshow("Output", dst_color)
import cv2
import numpy as np
img = cv2.imread('Paris.jpg')
height, width = img.shape[:2]
y = np.ones((height, width), np.uint8) * 128
output = np.zeros((height, width), np.uint8)
# generating the kernels
kernel1 = np.array([[0, -1, -1], # kernel for embossing bottom left side
[1, 0, -1],
import cv2
img = cv2.imread('Paris.jpg')
edges1 = cv2.bitwise_not(cv2.Canny(img, 100, 200)) # for thin edges and inverting the mask obatined
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.medianBlur(gray, 5) # applying median blur with kernel size of 5
edges2 = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 7, 7) # thick edges
dst = cv2.edgePreservingFilter(img, flags=2, sigma_s=64, sigma_r=0.25) # you can also use bilateral filter but that is slow
# flag = 1 for RECURS_FILTER (Recursive Filtering) and 2 for NORMCONV_FILTER (Normalized Convolution). NORMCONV_FILTER produces sharpening of the edges but is slower.
# sigma_s controls the size of the neighborhood. Range 1 - 200
import cv2
import numpy as np
img = cv2.imread('paris.jpg')
original = img.copy()
img = np.array(img, dtype=np.float64) # converting to float to prevent loss
img = cv2.transform(img, np.matrix([[0.272, 0.534, 0.131],
[0.349, 0.686, 0.168],
[0.393, 0.769, 0.189]])) # multipying image with special sepia matrix
img[np.where(img > 255)] = 255 # normalizing values greater than 255 to 255
import cv2
def exponential_function(channel, exp):
table = np.array([min((i**exp), 255) for i in np.arange(0, 256)]).astype("uint8") # creating table for exponent
channel = cv2.LUT(channel, table)
return channel
def tone(img, number):
for i in range(3):
if i == number:
img[:, :, i] = exponential_function(img[:, :, i], 1.05) # applying exponential function on slice
import cv2
import numpy as np
def hsv(img, l, u):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower = np.array([l,128,128]) # setting lower HSV value
upper = np.array([u,255,255]) # setting upper HSV value
mask = cv2.inRange(hsv, lower, upper) # generating mask
return mask
import cv2
import numpy as np
img = cv2.imread('Paris.jpg')
original = img.copy()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
xp = [0, 64, 112, 128, 144, 192, 255] # setting reference values
fp = [0, 16, 64, 128, 192, 240, 255] # setting values to be taken for reference values
x = np.arange(256)
table = np.interp(x, xp, fp).astype('uint8') # creating lookup table
import cv2
import numpy as np
img = cv2.imread('Paris.jpg')
height, width = img.shape[:2]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh = 0.8 # creating threshold. This means noise will be added to 80% pixels
for i in range(height):
for j in range(width):
if np.random.rand() <= thresh:
import cv2
import numpy as np
img = cv2.imread('Paris.jpg')
image_HLS = cv2.cvtColor(img,cv2.COLOR_BGR2HLS) # Conversion to HLS
image_HLS = np.array(image_HLS, dtype = np.float64)
daylight = 1.15
image_HLS[:,:,1] = image_HLS[:,:,1]*daylight # scale pixel values up for channel 1(Lightness)
image_HLS[:,:,1][image_HLS[:,:,1]>255] = 255 # Sets all values above 255 to 255
image_HLS = np.array(image_HLS, dtype = np.uint8)