Skip to content

Instantly share code, notes, and snippets.

View Dipeshpal's full-sized avatar
🤣
LOL

Dipesh Paul Dipeshpal

🤣
LOL
View GitHub Profile
# <Filter>:
# None(default): load all. each has a bbox. some instances have keypoint and some have mask annotations.
# images: 5081, instances: 13360
# 'kpt&segm' or 'segm&kpt': only load instances contained both keypoint and mask annotations (and bbox)
# images: 4731, instances: 8110
# 'kpt|segm' or 'segm|kpt': load instances contained either keypoint or mask annotations (and bbox)
# images: 5081, instances: 10375
# 'kpt' or 'segm': load instances contained particular kind of annotations (and bbox)
# images: 5081/4731, instances: 10375/8110
ochuman = OCHuman(AnnoFile='ochuman.json', Filter='segm')
@Dipeshpal
Dipeshpal / get_mask.py
Last active June 5, 2020 11:43
OCHuman get Mask only
def get_segmentation(data):
img = cv2.imread(os.path.join(ImgDir, data['file_name']))
height, width = data['height'], data['width']
colors = [[255, 0, 0],
[255, 255, 0],
[0, 255, 0],
[0, 255, 255],
[0, 0, 255],
[255, 0, 255]]
@Dipeshpal
Dipeshpal / ochuman_new_mask.py
Created June 5, 2020 11:48
Just pass original image and segmented images generated by ochuman API. This function will create black and with mask.
def new_mask(real_img, m_img):
real_img = real_img.reshape(1, -1)[0]
m_img = m_img.reshape(1, -1)[0]
new = []
for i, j in zip(real_img, m_img):
if i != j:
new.append(255) # human will white appear because of 255
else:
IMG_HEIGHT = 512
IMG_WIDTH = 512
ImgDir = 'images/'
@Dipeshpal
Dipeshpal / ochuman_import.py
Last active June 5, 2020 11:57
Import OCHuman
from ochumanApi.ochuman import OCHuman
import cv2, os
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (15, 15)
import ochumanApi.vis as vistool
from ochumanApi.ochuman import Poly2Mask
@Dipeshpal
Dipeshpal / ochuman_generator_images.py
Created June 5, 2020 11:59
paas batch size (images you want at a time and the index)
def generator_images(batch_size=1, ind):
while True:
x_batch = []
y_batch = []
for i in range(batch_size):
data = ochuman.loadImgs(imgIds=[image_ids[ind]])[0]
file_name = data['file_name']
for i in tqdm(range(4731)):
for x, y in generator_images(1, i):
break
base_dir_custom = "custom_dataset_human_black_background/"
try:
os.makedirs(f'{base_dir_custom}')
except:
pass
try:
@Dipeshpal
Dipeshpal / ochuman_model_output
Created June 6, 2020 04:55
ochuman_model_output
Model: "model"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 512, 512, 3) 0
__________________________________________________________________________________________________
conv2d (Conv2D) (None, 512, 512, 32) 896 input_1[0][0]
__________________________________________________________________________________________________
dropout (Dropout) (None, 512, 512, 32) 0 conv2d[0][0]
__________________________________________________________________________________________________
model_name = "models/"+"Unet_black_background.h5"
modelcheckpoint = ModelCheckpoint(model_name,
monitor='val_loss',
mode='auto',
verbose=1,
save_best_only=True)
lr_callback = ReduceLROnPlateau(min_lr=0.000001)
Training: epoch {} begins at 18:08:48.853186
Epoch 1/50
100/100 [==============================] - ETA: 0s - loss: 0.5708 - acc: 0.2714
Epoch 00001: val_loss improved from inf to 0.52510, saving model to models/Unet_black_background.h5
Training: epoch 0 ends at 19:03:21.708891
100/100 [==============================] - 3295s 33s/step - loss: 0.5708 - acc: 0.2714 - val_loss: 0.5251 - val_acc: 0.1151 - lr: 0.0010
Epoch 2/50
100/100 [==============================] - ETA: 0s - loss: 0.5104 - acc: 0.2918
Epoch 00002: val_loss improved from 0.52510 to 0.50866, saving model to models/Unet_black_background.h5
Training: epoch 1 ends at 19:27:18.558237