Skip to content

Instantly share code, notes, and snippets.

@cbalint13
Last active June 27, 2024 16:17
Show Gist options
  • Save cbalint13/a0b9a10e5f9bda07cf1776de4bcab3da to your computer and use it in GitHub Desktop.
Save cbalint13/a0b9a10e5f9bda07cf1776de4bcab3da to your computer and use it in GitHub Desktop.
Image coregister for timelapse
#!/usr/bin/python3
"""
**********************************************************
* BSD License *
* Copyright (c) 2022 *
* Balint Cristian <cristian dot balint at gmail dot com> *
**********************************************************
# Little script to Homography match images into animation
"""
import os
import cv2
import numpy as np
import torch
import kornia as K
import kornia.feature as KF
import kornia.geometry as KG
debug = False
# limit memory pad
pad_h = int(768 * 1.5)
pad_w = int(1024 * 1.5)
pad_s = .3 # scale
def load_torch_image(fname):
img = K.image_to_tensor(cv2.imread(fname), False).float() / 255.
img = K.color.bgr_to_rgb(img)
return img
os.makedirs("video", exist_ok=True)
os.makedirs("register", exist_ok=True)
# sources of "image" folder.
# 0484 : https://photos.app.goo.gl/LbvCsZYugw3TmG4J8
# 1831 : https://photos.app.goo.gl/DBZTi76HbiQYg4HKA
# 1390 : https://photos.app.goo.gl/9pQa41pWkyr1LhRs5
# 1484 : https://photos.app.goo.gl/GF539e8JxDeYBkv56
# 2193 : https://photos.app.goo.gl/BFButKMhS7rkWgzz6
# 1501 : https://photos.app.goo.gl/ENirPy2yVkq19cdk9
# 1321 : https://photos.app.goo.gl/qXwdyUKyn6H3WXow8
# 2183 : https://photos.app.goo.gl/ekqh7BW5eBLtkq4H7
# 2212 : https://photos.app.goo.gl/4uTkjoheieoXssvb6
# 2283 : https://photos.app.goo.gl/TpvfJj19qCydQRhW7
# 2357 : https://photos.app.goo.gl/PQ6So1Bp5mkoStf98
# image sequence
img_seq = ['0484', '1831', '1390', '1484', '2193', '1501', '1321', '2183', '2212', '2283', '2357', '0484']
ref_img = '1501' # reference image
print("Load reference [DSC_%s.JPG]" % ref_img)
fname2 = 'images/DSC_%s.JPG' % ref_img
img2 = K.geometry.resize(load_torch_image(fname2), (pad_h, pad_w))
for iname in img_seq:
print("Load match image [DSC_%s.JPG]" % iname)
fname1 = 'images/DSC_%s.JPG' % iname
img1 = K.geometry.resize(load_torch_image(fname1), (pad_h, pad_w))
matcher = KF.LoFTR(pretrained='outdoor')
# LofTR: https://arxiv.org/abs/2104.00680
input_dict = {"image0": K.color.rgb_to_grayscale(img1),
"image1": K.color.rgb_to_grayscale(img2)}
print("Do Matching ...")
with torch.no_grad():
correspondences = matcher(input_dict)
mkpts0 = correspondences['keypoints0'].cpu().numpy()
mkpts1 = correspondences['keypoints1'].cpu().numpy()
# fundamental matrix
#F, inliers = cv2.findFundamentalMat(mkpts0, mkpts1, cv2.USAC_MAGSAC, 0.5, 0.999, 100000)
#inliers = inliers > 0
#print("F inliers: ", len(inliers))
#print("F: ", F)
# homography matrix
H, inliers = cv2.findHomography(mkpts0, mkpts1,
method = cv2.USAC_MAGSAC, ransacReprojThreshold = 3,
maxIters = 5000000, confidence = 0.995)
inliers = inliers > 0
print("H inliers: ", len(inliers))
print("H: ", H)
if (debug):
import matplotlib.pyplot as plt
from kornia_moons.feature import *
# display matches
draw_LAF_matches(
KF.laf_from_center_scale_ori(torch.from_numpy(mkpts0).view(1, -1, 2),
torch.ones(mkpts0.shape[0]).view(1, -1, 1, 1),
torch.ones(mkpts0.shape[0]).view(1, -1, 1)),
KF.laf_from_center_scale_ori(torch.from_numpy(mkpts1).view(1, -1, 2),
torch.ones(mkpts1.shape[0]).view(1, -1, 1, 1),
torch.ones(mkpts1.shape[0]).view(1, -1, 1)),
torch.arange(mkpts0.shape[0]).view(-1,1).repeat(1,2),
K.tensor_to_image(img1),
K.tensor_to_image(img2),
inliers,
draw_dict = {'inlier_color': (0.2, 1, 0.2),
'tentative_color': None,
'feature_color': (0.2, 0.5, 1), 'vertical': False})
plt.show()
i1 = cv2.resize(cv2.imread(fname1), (pad_w, pad_h))
ih, iw, id = i1.shape
print("Warp using H perspective")
out = cv2.warpPerspective(i1, H, (iw, ih))
print("Write [register/output-%s.png]" % iname)
cv2.imwrite('register/output-%s.png' % iname, out)
if (debug):
# draw homography box
pts = np.float32([ [0,0], [0, h-1], [w-1, h-1],[w-1, 0] ]).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, H)
cv2.polylines(i2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
# visualise result against reference
cv2.imshow('img2', cv2.resize(i2, (0,0), fx=s, fy=s))
cv2.imshow('img1_to_img2', cv2.resize(out, (0,0), fx=s, fy=s))
cv2.waitKey(0)
print()
frame = 0
old = None
# crop margins
x = 300; y = 320;
w = 1000; h = 700
for idx in img_seq:
# fade previous
if (old):
oimg = cv2.imread("register/output-%s.png" % old)[y:y+h, x:x+w]
nimg = cv2.imread("register/output-%s.png" % idx)[y:y+h, x:x+w]
for fade in np.arange(0, 1.1, 0.1):
dst = cv2.addWeighted( oimg, 1.0-fade, nimg, 0.0+fade, -1)
if (debug):
cv2.imshow('img', dst)
cv2.waitKey(100)
print("Write blend frame [frame-%03i.png]" % frame)
cv2.imwrite('video/frame-%03i.png' % frame, dst)
frame += 1
# static image
for s in range(15):
img = cv2.imread("register/output-%s.png" % idx)[y:y+h, x:x+w]
if (debug):
cv2.imshow('img', img)
cv2.waitKey(100)
print("Write static frame [frame-%03i.png]" % frame)
cv2.imwrite('video/frame-%03i.png' % frame, img)
frame += 1
old = idx
# final video
cmd = "ffmpeg -y -framerate 15 -pattern_type glob -i 'video/frame-*.png' \
-c:v libx264 -pix_fmt yuv420p video.mp4"
os.system(cmd)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment