-
-
Save yahskapar/24a59b0a1099e053db9c79fad9e677cc to your computer and use it in GitHub Desktop.
(Uncleaned) File to generate a tuples file with keyframes for TANDEM SLAM pipeline.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import os | |
import sys | |
import numpy as np | |
from scipy.spatial.transform import Rotation as R | |
def read_file_list(filename): | |
""" | |
Reads a trajectory from a text file. | |
File format: | |
The file format is "stamp d1 d2 d3 ...", where stamp denotes the time stamp (to be matched) | |
and "d1 d2 d3.." is arbitary data (e.g., a 3D position and 3D orientation) associated to this timestamp. | |
Input: | |
filename -- File name | |
Output: | |
dict -- dictionary of (stamp,data) tuples | |
""" | |
file = open(filename) | |
data = file.read() | |
lines = data.replace(","," ").replace("\t"," ").split("\n") | |
list = [[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"] | |
list = [(float(l[0]),l[1:]) for l in list if len(l)>1] | |
return dict(list) | |
def associate(first_list, second_list,offset,max_difference): | |
""" | |
Associate two dictionaries of (stamp,data). As the time stamps never match exactly, we aim | |
to find the closest match for every input tuple. | |
Input: | |
first_list -- first dictionary of (stamp,data) tuples | |
second_list -- second dictionary of (stamp,data) tuples | |
offset -- time offset between both dictionaries (e.g., to model the delay between the sensors) | |
max_difference -- search radius for candidate generation | |
Output: | |
matches -- list of matched tuples ((stamp1,data1),(stamp2,data2)) | |
""" | |
first_keys = first_list.keys() | |
second_keys = second_list.keys() | |
potential_matches = [(abs(a - (b + offset)), a, b) | |
for a in first_keys | |
for b in second_keys | |
if abs(a - (b + offset)) < max_difference] | |
potential_matches.sort() | |
matches = [] | |
for diff, a, b in potential_matches: | |
if a in first_keys and b in second_keys: | |
first_keys.remove(a) | |
second_keys.remove(b) | |
matches.append((a, b)) | |
matches.sort() | |
return matches | |
def align_sim3(gt_pos, est_pos, align_scale=True, eval_rmse=False): | |
""" | |
Align two trajs with sim3 | |
:param gt_pos: ground truth pose, shape [N, 3] | |
:param est_pos: estimated pose, shape [N, 3] | |
:param align_scale: whether to calculate the scale (sim3). Set to 1.0 if False | |
:param eval_rmse: whether to evalute rmse. Set to -1 if False | |
:return: R, t, scale, rmse | |
""" | |
# print(np.shape(gt_pos)) | |
centroid_est = est_pos.mean(0) | |
centroid_gt = gt_pos.mean(0) | |
eval_zerocentered = est_pos - centroid_est | |
gt_zerocentered = gt_pos - centroid_gt | |
H = np.dot(np.transpose(eval_zerocentered), | |
gt_zerocentered) / gt_pos.shape[0] | |
U, D, Vh = np.linalg.svd(H) | |
S = np.array(np.identity(3)) | |
if (np.linalg.det(U) * np.linalg.det(Vh) < 0): | |
S[2, 2] = -1 | |
R_inv = np.dot(U, np.dot(S, Vh)) | |
R = np.transpose(R_inv) | |
rot_centroid_est = np.dot(R, np.transpose(centroid_est)) | |
rot_zerocentered_est = np.dot(est_pos, R_inv) - rot_centroid_est | |
scale = np.trace(np.dot(np.diag(D), S)) / np.mean((np.sum(eval_zerocentered ** | |
2, 1))) if align_scale else 1. | |
t = np.transpose(centroid_gt) - scale * rot_centroid_est | |
rmse = -1. | |
if eval_rmse: | |
diff = (scale * rot_zerocentered_est - gt_zerocentered) | |
size = np.shape(diff) | |
rmse = np.sqrt(np.sum(np.multiply(diff, diff)) / size[0]) | |
return R, t, scale, rmse | |
def generate_tuples(pose_dso_file, alternate_file, scale=1): | |
if not os.path.isfile(pose_dso_file): | |
pose_dso_file = os.path.join(pose_dso_file, 'poses_dso.txt') | |
pose_dso = open(pose_dso_file, 'r') | |
pose_lines = pose_dso.readlines() | |
if len(pose_lines) <= 8: | |
# Retry with alternate file | |
if not os.path.isfile(alternate_file): | |
alternate_file = os.path.join(alternate_file, 'poses_dso.txt') | |
pose_dso = open(alternate_file, 'r') | |
pose_lines = pose_dso.readlines() | |
dir_name = os.path.dirname(pose_dso_file) | |
w8 = open(os.path.join(dir_name, 'tuples_dso_optimization_windows.txt'), 'w') # save for 8 windows | |
last3 = open(os.path.join(dir_name, 'tuples_dso_optimization_windows_last3.txt'), 'w') # save last 3 windows | |
for i in range(len(pose_lines) - 8): | |
w8.write('8') | |
w8.write(' ') | |
last3.write('3') | |
last3.write(' ') | |
for j in range(8): | |
line = pose_lines[i + j] | |
if j > 4: | |
last3.write(line.split()[0]) | |
last3.write(' ') | |
w8.write(line.split()[0]) | |
w8.write(' ') | |
w8.write(str(scale)) | |
w8.write('\n') | |
last3.write(str(scale)) | |
last3.write('\n') | |
w8.close() | |
last3.close() | |
if __name__ == '__main__': | |
upper_level_folder = "/playpen-nas-ssd/akshay/3D_medical_vision/datasets/C3VD_registered_videos_undistorted_V2" | |
# Iterate through sub-folders in the upper-level folder | |
for sequence_folder in os.listdir(upper_level_folder): | |
# Check if the item in the upper-level folder is a directory | |
sequence_folder_path = os.path.join(upper_level_folder, sequence_folder) | |
# print(sequence_folder_path) | |
if os.path.isdir(sequence_folder_path): | |
# Construct the file paths for poses_gt.txt and poses_dso.txt | |
poses_tum_path = os.path.join(sequence_folder_path, "groundtruth_tum.txt") | |
poses_result_path = os.path.join(sequence_folder_path, "result.txt") | |
poses_dso_path = os.path.join(sequence_folder_path, "poses_dso.txt") | |
# Check if both poses_gt.txt and poses_dso.txt exist in the current sequence folder | |
if os.path.exists(poses_tum_path) and os.path.exists(poses_result_path): | |
# Read the file lists for first_list and second_list | |
first_list = read_file_list(poses_tum_path) | |
second_list = read_file_list(poses_result_path) | |
# Perform association and alignment as before | |
matches = associate(first_list, second_list, float(0.0), float(0.02)) | |
if len(matches) < 2: | |
print("Skipped {}".format(sequence_folder_path)) | |
continue | |
# sys.exit("Couldn't find matching timestamp pairs between groundtruth and estimated trajectory! Did you choose the correct sequence?") | |
first_xyz = np.array([[float(value) for value in first_list[a][0:3]] for a, b in matches]) | |
second_xyz = np.array([[float(value) for value in second_list[b][0:3]] for a, b in matches]) | |
R, t, aligned_scale, rmse = align_sim3(first_xyz, second_xyz, align_scale=True, eval_rmse=True) | |
# Generate tuples with the current poses_dso.txt path and aligned_scale | |
generate_tuples(poses_dso_path, None, scale=aligned_scale) | |
# generate_tuples(poses_result_path, poses_dso_path, scale=aligned_scale) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment