Skip to content

Instantly share code, notes, and snippets.

@IngIeoAndSpare
Created April 19, 2022 02:04
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save IngIeoAndSpare/a878105990ae39a332cae8326668f498 to your computer and use it in GitHub Desktop.
Save IngIeoAndSpare/a878105990ae39a332cae8326668f498 to your computer and use it in GitHub Desktop.
create coco json from segmentation image
# create image meta data json content
def create_images_json(image_source, image_name, image_id) :
im_width, im_height = image_source.size
return {
"file_name" : image_name,
"heigth" : im_height,
"width" : im_width,
"id" : image_id
}
# ref : https://www.immersivelimit.com/create-coco-annotations-from-scratch
def create_sub_mask_annotation(sub_mask, image_id, category_id, annotation_id, is_crowd):
# Find contours (boundary lines) around each sub-mask
# Note: there could be multiple contours if the object
# is partially occluded. (E.g. an elephant behind a tree)
sub_mask = np.array(sub_mask) # <- ref code not consider shape function. so, convert numpy array for shape function
contours = measure.find_contours(sub_mask, 0.5, positive_orientation='low')
segmentations = []
polygons = []
for contour in contours:
# Flip from (row, col) representation to (x, y)
# and subtract the padding pixel
for i in range(len(contour)):
row, col = contour[i]
contour[i] = (col - 1, row - 1)
# Make a polygon and simplify it
poly = Polygon(contour)
poly = poly.simplify(1.0, preserve_topology=False)
polygons.append(poly)
segmentation = np.array(poly.exterior.coords).ravel().tolist()
segmentations.append(segmentation)
# Combine the polygons to calculate the bounding box and area
multi_poly = MultiPolygon(polygons)
x, y, max_x, max_y = multi_poly.bounds
width = max_x - x
height = max_y - y
bbox = (x, y, width, height)
area = multi_poly.area
annotation = {
'segmentation': segmentations,
'iscrowd': is_crowd,
'image_id': image_id,
'category_id': category_id,
'id': annotation_id,
'bbox': bbox,
'area': area
}
return annotation
# ori source : https://www.immersivelimit.com/create-coco-annotations-from-scratch
def create_sub_masks(mask_image):
width, height = mask_image.size
# Initialize a dictionary of sub-masks indexed by RGB colors
sub_masks = {}
for x in range(width):
for y in range(height):
# Get the RGB values of the pixel
pixel = mask_image.getpixel((x,y))[:3]
# If the pixel is not black...
if pixel != (0, 0, 0):
# Check to see if we've created a sub-mask...
pixel_str = str(pixel)
sub_mask = sub_masks.get(pixel_str)
if sub_mask is None:
# Create a sub-mask (one bit per pixel) and add to the dictionary
# Note: we add 1 pixel of padding in each direction
# because the contours module doesn't handle cases
# where pixels bleed to the edge of the image
sub_masks[pixel_str] = Image.new('1', (width+2, height+2))
# Set the pixel value to 1 (default is 0), accounting for padding
sub_masks[pixel_str].putpixel((x+1, y+1), 1)
return sub_masks
import json
import os
import numpy as np
from skimage import measure
from shapely.geometry import Polygon, MultiPolygon
from PIL import Image
from tqdm import tqdm
# input your segmentation image dir
context_path = ""
arr = [ f for f in os.listdir(context_path)]
# input your segmentation object ( object1_id, object2_id .... objectN_id = [1, 2, ... n]
window_id, wall_id, door_id = [1, 2, 3]
# input your semgnetation color and id mapping (r, g, b) : id
category_ids = {
'(0, 0, 128)' : window_id,
'(128, 0, 0)' : wall_id,
'(128, 128, 0)' : door_id
}
# setting index
is_crowd = 0
image_id = 1
annotation_id = 1
# setting data array
annotations = []
images = []
for image_path in tqdm(arr) :
image_source = Image.open(f"{context_path}/{image_path}").convert('RGB') # png convert 3 channel
image_info = create_images_json(image_source, image_path, image_id)
images.append(image_info)
sub_masks = create_sub_masks(image_source)
for color, sub_mask in sub_masks.items():
# only defined category ids data input
if color in category_ids :
category_id = category_ids[color]
annotation = create_sub_mask_annotation(sub_mask, image_id, category_id, annotation_id, is_crowd)
annotations.append(annotation)
annotation_id += 1
image_id += 1
result = {}
result["images"] = images
result["type"] = "instances"
result["annotations"] = annotations
# json dump
save_path = "{{YOUR_ANNOTATION JSON SAVE DIR}}"
with open(f'{save_path}/annotation.json', 'w') as f :
json.dump(result, f, indent=2)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment