Skip to content

Instantly share code, notes, and snippets.

@jbehley
Created September 2, 2020 08:26
Show Gist options
  • Save jbehley/09591d1d8db400a5afc4a63eaeb026cd to your computer and use it in GitHub Desktop.
Save jbehley/09591d1d8db400a5afc4a63eaeb026cd to your computer and use it in GitHub Desktop.
merge point pillar predictions with semantic segmentation
## Merge detections from PointPillar with the predictions of a semantic segmentation.
import argparse
import math
from tqdm import tqdm
from matplotlib.animation import FuncAnimation
import os
import pickle
import numpy as np
import collections
Point = collections.namedtuple("Point", "x y z")
def plot_bbox(ax, bbox, score):
""" plot an oriented bounding box given by center (x,y,z), size (w,l,h), and orientation alpha """
x, y, z, w, l, h, alpha = bbox[:]
s, c = math.sin(alpha), math.cos(alpha)
R = np.array([c, -s, s, c]).reshape(2, 2)
corners = np.array([-w, -w, +w, +w, -w, -l, +l, +l, -l, -l]).reshape(2, 5)
corners = R @ corners
corners[:] = corners + np.array([[x],[y]])
ax.plot(corners.reshape(-1)[:5], corners.reshape(-1)[5:], c= (score,0,0))
def gather_data(t, data_car, data_pedcycle):
# we are not really interested in the labels.
scores_car = data_car[t]["scores"].detach().cpu().numpy()
boxes_car = data_car[t]["box3d_lidar"].detach().cpu().numpy()
scores_pedcycle = data_pedcycle[t]["scores"].detach().cpu().numpy()
boxes_pedcycle = data_pedcycle[t]["box3d_lidar"].detach().cpu().numpy()
# concatenate the stuff to make it simpler.
boxes_all = np.concatenate((boxes_car, boxes_pedcycle))
scores_all = np.concatenate((scores_car, scores_pedcycle))
return boxes_all, scores_all
## example:
## python3 merge_predictions.py --dataset ~/data/kitti-odometry/dataset/sequences/
## --predictions ~/data/semantic-kitti/predictions/kpconv/sequences/
## --pointpillar ~/data/semantic-kitti/point_pillar/pp_aggregated/
## --output ~/data/semantic-kitti/predictions/pp+kpconv/sequences/
if __name__ == "__main__":
parser = argparse.ArgumentParser("./merge_pp.py")
parser.add_argument(
'--dataset', '-d',
type=str,
required=True,
help='Dataset dir of the sequences. No Default',
)
parser.add_argument(
'--predictions', '-p',
type=str,
required=True,
help='Prediction dir. Same organization as dataset, but predictions in'
'each sequences "prediction" directory. No Default. If no option is set'
' we look for the labels in the same directory as dataset'
)
parser.add_argument(
'--pointpillar', '-pp',
type=str,
required=True,
help='directory with the pickle files (pp_car.pkl, pp_pedcycle.pkl) of the point pillar script.'
)
parser.add_argument(
'--output', '-o',
type=str,
required=True,
help='directory to write the merged label files.'
)
ARGS, _ = parser.parse_known_args()
root_dir = ARGS.dataset
pred_dir = ARGS.predictions
if not os.path.exists(ARGS.output): os.makedirs(ARGS.output)
# pointpillar files (currently hardcoded)
# - pp_car.pkl contains the pickled results from the evaluation script for cars
# - pp_pedcycle.pkl contains the pickled results from the evaluation script for pedestrians and cyclists
# - pp_infos.pkl contains the pickled info files of the point pillar stuff.
print("Reading point pillar dumps...", end="", flush=True)
data_car = pickle.load(open(os.path.join(ARGS.pointpillar, "pp_car.pkl"), "rb"))
data_pedcycle = pickle.load(open(os.path.join(ARGS.pointpillar, "pp_pedcycle.pkl"), "rb"))
info = pickle.load(open(os.path.join(ARGS.pointpillar, "pp_info.pkl"), "rb"))
T = len(info)
print("finished.")
threshold = 0.3
for t in tqdm(range(T)):
points = np.fromfile(open(root_dir + info[t]["point_cloud"]["velodyne_path"]), dtype=np.float32).reshape(-1,4)
# print(points)
parts = info[t]["point_cloud"]["velodyne_path"].split("/")
pred_path = os.path.join(parts[0], "predictions", *parts[2:])
pred_path = pred_path.replace(".bin", ".label")
labels = np.fromfile(open(pred_dir + pred_path), dtype=np.uint32)
boxes_all, scores_all = gather_data(t, data_car, data_pedcycle)
# sort bounding boxes by descending scores.
sorted_score_idxes = np.flip(np.argsort(scores_all))
boxes_all = boxes_all[sorted_score_idxes]
scores_all = scores_all[sorted_score_idxes]
# threshold.
boxes_all = boxes_all[scores_all > threshold]
# print("labels_pedcycle=",labels_pedcycle)
N = points.shape[0]
instanceable_labels = [10, 11, 13, 15, 18, 20, 30, 31, 32]
points_assigned = np.zeros((N,2), dtype=np.uint16)
points_assigned[:, 1] = labels
point_indexes = np.arange(N)
for j in range(boxes_all.shape[0]):
x, y, z, w, l, h, alpha = boxes_all[j]
radius = w * w + h * h
ROI = np.isin(points_assigned[:, 1], instanceable_labels) & (abs(points[:, 0] - x) < radius) & (abs(points[:, 1] - y) < radius)
subindex = point_indexes[ROI]
pp = points[ROI, :3] - boxes_all[j, :3]
s, c = math.sin(alpha), math.cos(alpha)
R = np.array([c, -s, 0, s, c, 0, 0, 0, 1]).reshape(3, 3)
rotated = R @ (pp.T)
inside = (abs(rotated[0, :]) < w) & (abs(rotated[1, :]) < l) & (abs(rotated[2, :]) < h)
points_assigned[subindex[inside], 0] = j + 1
# write output.
if not os.path.exists(os.path.dirname(os.path.join(ARGS.output, pred_path))):
os.makedirs(os.path.dirname(os.path.join(ARGS.output, pred_path)))
out_labels = np.array([(instance_id << 16) | class_id for (instance_id, class_id) in points_assigned], dtype=np.uint32)
out_labels.tofile(os.path.join(ARGS.output, pred_path))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment