Skip to content

Instantly share code, notes, and snippets.

View Dipeshpal's full-sized avatar
🤣
LOL

Dipesh Paul Dipeshpal

🤣
LOL
View GitHub Profile
@Dipeshpal
Dipeshpal / ochuman_import.py
Last active June 5, 2020 11:57
Import OCHuman
from ochumanApi.ochuman import OCHuman
import cv2, os
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (15, 15)
import ochumanApi.vis as vistool
from ochumanApi.ochuman import Poly2Mask
# <Filter>:
# None(default): load all. each has a bbox. some instances have keypoint and some have mask annotations.
# images: 5081, instances: 13360
# 'kpt&segm' or 'segm&kpt': only load instances contained both keypoint and mask annotations (and bbox)
# images: 4731, instances: 8110
# 'kpt|segm' or 'segm|kpt': load instances contained either keypoint or mask annotations (and bbox)
# images: 5081, instances: 10375
# 'kpt' or 'segm': load instances contained particular kind of annotations (and bbox)
# images: 5081/4731, instances: 10375/8110
ochuman = OCHuman(AnnoFile='ochuman.json', Filter='segm')
@Dipeshpal
Dipeshpal / get_mask.py
Last active June 5, 2020 11:43
OCHuman get Mask only
def get_segmentation(data):
img = cv2.imread(os.path.join(ImgDir, data['file_name']))
height, width = data['height'], data['width']
colors = [[255, 0, 0],
[255, 255, 0],
[0, 255, 0],
[0, 255, 255],
[0, 0, 255],
[255, 0, 255]]
@Dipeshpal
Dipeshpal / ochuman_new_mask.py
Created June 5, 2020 11:48
Just pass original image and segmented images generated by ochuman API. This function will create black and with mask.
def new_mask(real_img, m_img):
real_img = real_img.reshape(1, -1)[0]
m_img = m_img.reshape(1, -1)[0]
new = []
for i, j in zip(real_img, m_img):
if i != j:
new.append(255) # human will white appear because of 255
else:
IMG_HEIGHT = 512
IMG_WIDTH = 512
ImgDir = 'images/'
@Dipeshpal
Dipeshpal / ochuman_generator_images.py
Created June 5, 2020 11:59
paas batch size (images you want at a time and the index)
def generator_images(batch_size=1, ind):
while True:
x_batch = []
y_batch = []
for i in range(batch_size):
data = ochuman.loadImgs(imgIds=[image_ids[ind]])[0]
file_name = data['file_name']
for i in tqdm(range(4731)):
for x, y in generator_images(1, i):
break
base_dir_custom = "custom_dataset_human_black_background/"
try:
os.makedirs(f'{base_dir_custom}')
except:
pass
try:
@Dipeshpal
Dipeshpal / ochuman_unet.py
Last active December 18, 2020 15:45
OCHuman model
def get_model():
in1 = Input(shape=(IMG_HEIGHT, IMG_WIDTH, 3 ))
conv1 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(in1)
conv1 = Dropout(0.2)(conv1)
conv1 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(conv1)
pool1 = MaxPooling2D((2, 2))(conv1)
conv2 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(pool1)
conv2 = Dropout(0.2)(conv2)
@Dipeshpal
Dipeshpal / ochuman_model_output
Created June 6, 2020 04:55
ochuman_model_output
Model: "model"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 512, 512, 3) 0
__________________________________________________________________________________________________
conv2d (Conv2D) (None, 512, 512, 32) 896 input_1[0][0]
__________________________________________________________________________________________________
dropout (Dropout) (None, 512, 512, 32) 0 conv2d[0][0]
__________________________________________________________________________________________________
model_name = "models/"+"Unet_black_background.h5"
modelcheckpoint = ModelCheckpoint(model_name,
monitor='val_loss',
mode='auto',
verbose=1,
save_best_only=True)
lr_callback = ReduceLROnPlateau(min_lr=0.000001)