Skip to content

Instantly share code, notes, and snippets.

@ES-Alexander
Last active March 24, 2024 11:45
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save ES-Alexander/84a0f12d097d409bf70fed07f252ae91 to your computer and use it in GitHub Desktop.
Save ES-Alexander/84a0f12d097d409bf70fed07f252ae91 to your computer and use it in GitHub Desktop.
Projective Perspective
from fullcontrol.visualize.tube_mesh import FlowTubeMesh, go, np
# Display parameters
COLOURS = '#666' # 'black' / '#C0FFEE' / None
SHOW = True # if False, saves animation frames instead
# Geometry parameters
np.random.seed(sum(b'gcode'))
SEGMENTS_PER_CURVE = 31
BASE_THICKNESS = 0.6 # 0.2
RANDOM_THICKNESS_VARIATION = None # 0.4
CHARACTER_WIDTH = 2.0
CHARACTER_SPACING = 1.0
# Animation parameters
TOTAL_DURATION_S = 2. # Desired animation length
DESIRED_FPS = 30 # Desired animation framerate
SLOWDOWN_FACTOR = 0. # Text slowdowns (at the expense of faster transitions) -> (-1, inf)
# Internal parameters - DO NOT CHANGE
_BASE_CHAR_WIDTH = 2.0
_BASE_CHAR_SPACING = 1.0
n_chars = 7
n_spaces = n_chars - 1
_HORIZONTAL_STRETCH = (
(n_chars*CHARACTER_WIDTH + n_spaces*CHARACTER_SPACING)
/ (n_chars*_BASE_CHAR_WIDTH + n_spaces*_BASE_CHAR_SPACING)
)
def adjust(letter, i):
letter[:,0] = (
(letter[:,0] - i*(_BASE_CHAR_WIDTH + _BASE_CHAR_SPACING))
* CHARACTER_WIDTH / _BASE_CHAR_WIDTH
+ i*(CHARACTER_WIDTH + CHARACTER_SPACING)
)
F_left__C_top = np.float64([
[0,0,0],
[0,4,0],
*([(1+np.sqrt(2))/2+(1+np.sqrt(2))/2*np.cos(theta), 4, 2*np.sin(theta)]
for theta in np.linspace(np.pi, np.pi/4, SEGMENTS_PER_CURVE))
])
F_middle__C_bottom = np.float64([
*([(1+np.sqrt(2))/2+(1+np.sqrt(2))/2*np.cos(theta), 2, 2*np.sin(theta)]
for theta in np.linspace(np.pi, 7*np.pi/4, SEGMENTS_PER_CURVE))
])
gap__O1 = np.float64([
*([4+np.cos(theta), 0, 2*np.sin(theta)]
for theta in np.linspace(0, 2*np.pi, SEGMENTS_PER_CURVE))
])
U__N = np.float64([
[6,4,-2],
[6,1,2],
*([7+np.cos(theta), 1+np.sin(theta), -2*np.cos(theta)]
for theta in np.linspace(np.pi, 2*np.pi, SEGMENTS_PER_CURVE)),
[8,4,2]
])
gap__T_vert = np.float64([
[10,0,-2],
[10,0,2]
])
gap__T_top = np.float64([
[9,0,2],
[11,0,2]
])
L1__R = np.float64([
[12,4,-2],
[12,0,2],
[12.7,0,2], # Decent corner without sharp transition to curve
*([13+np.cos(theta), 0, 1+np.sin(theta)]
for theta in np.linspace(np.pi/2, -np.pi/2, SEGMENTS_PER_CURVE)),
[12,0,0],
[14,0,-2]
])
gap__O2 = np.float64([
*([16+np.cos(theta), 0, 2*np.sin(theta)]
for theta in np.linspace(0, 2*np.pi, SEGMENTS_PER_CURVE))
])
L2__L = np.float64([
[18,4,2],
[18,0,-2],
[20,0,-2]
])
meshes = []
for index, character in enumerate((
(F_left__C_top, F_middle__C_bottom,),
(gap__O1,),
(U__N,),
(gap__T_vert, gap__T_top,),
(L1__R,),
(gap__O2,),
(L2__L,),
)):
for line in character:
adjust(line, index)
if RANDOM_THICKNESS_VARIATION:
widths = np.random.random(len(line)) * RANDOM_THICKNESS_VARIATION + BASE_THICKNESS
if len(widths) > 2:
widths[-1] = widths[0] # match start and end for smooth looped meshes
else:
widths = BASE_THICKNESS
meshes.append(
FlowTubeMesh(
line, deviation_threshold_degrees=80, widths=widths, sides=8,
rounding_strength=1, flat_sides=False, capped=False
).to_Mesh3d(colors=COLOURS)
)
fig = go.Figure(meshes)
fig.update_scenes(
aspectmode='data', camera_projection_type='orthographic', dragmode='orbit',
xaxis_visible=False, yaxis_visible=False, zaxis_visible=False,
)
if SHOW:
fig.show()
exit()
# Determine animation parameters
centiseconds_per_frame = round(100 / DESIRED_FPS)
print(f'{centiseconds_per_frame = }')
# Enforce an even number of steps, as close as possible to the specified FPS & duration
output_fps = 100 / centiseconds_per_frame
animation_steps = round(TOTAL_DURATION_S * output_fps / 2) * 2
output_duration = animation_steps / output_fps
print(f'{output_fps = }\n{output_duration = :.2f}s')
# Easing to slow only at F_U_L_L and CONTROL
x = np.linspace(0, np.pi, animation_steps//2, endpoint=False)
slow_fast_slow = 1 - np.sqrt((1 + SLOWDOWN_FACTOR) / (1 + SLOWDOWN_FACTOR * np.cos(x)**2)) * np.cos(x)
# 50% for 1/4 turn from F_U_L_L -> CONTROL, then 50% for 3/4 turn from CONTROL -> F_U_L_L
thetas = np.hstack([np.pi/4 * slow_fast_slow, (2*np.pi-np.pi/2)/2 * slow_fast_slow + np.pi/2])
#import cv2
#t = np.empty((400,1120,4), dtype=np.uint8)
for index, theta in enumerate(thetas):
print(f'\b\r{index}/{len(thetas)}', end='', flush=True)
fig.update_scenes(
camera_up_x=0, camera_up_z=np.sin(theta), camera_up_y=np.cos(theta),
camera_eye_x=0.001*np.sin(theta), camera_eye_z=np.cos(theta), camera_eye_y=-np.sin(theta),
)
filename = f'images/black/{10*np.rad2deg(theta):04.0f}.png'
fig.write_image(filename, width=round(1500 * _HORIZONTAL_STRETCH), height=1000, scale=1)
#image = cv2.imread(filename)
#cropped = image[300:700, 190:1310]
#cv2.imwrite(filename, cropped)
#t[:,:,:3] = cropped
#t[:,:,3] = 255
#t[np.all(t[:,:,:3] == 255, axis=2)] = 0
#cv2.imwrite(filename[:-4] + '_transparent.png', t)
@ES-Alexander
Copy link
Author

A set of tubes designed such that two different perspectives show two different words, due to projective geometry. Orthographic projection is chosen for plotting to allow full occlusions regardless of zoom level, avoiding issues with the perspective warping that would be present in reality.

Example output:

fc.mov

@ES-Alexander
Copy link
Author

ES-Alexander commented Feb 28, 2024

Initial Animation

Example output (made using APNG maker*):
fullcontrol-white-cropped
fullcontrol-transparent-cropped

Used OpenCV for some cropping and setting the background transparent, although there's some rastered in white anti-aliasing, so the transparency isn't pristine at the letter edges.

from pathlib import Path
import cv2
import numpy as np

files = Path('images').glob('*.png')
for file in files:
    filename = str(file) # required by opencv - it doesn't like Path objects
    image = cv2.imread(filename)
    cropped = image[300:700, 190:1310]
    cv2.imwrite(filename[:-4] + '_cropped.png', cropped)
    r, c, d = cropped.shape
    transparent_bg = np.empty((r, c, 4), dtype=np.uint8)
    transparent_bg[:,:,:3] = cropped
    transparent_bg[:,:,3] = 255 # set pixels to opaque by default
    transparent_bg[np.all(transparent_bg[:,:,:3] == 255, axis=2)] = 0 # set white pixels transparent
    cv2.imwrite(filename[:-4] + '_transparent.png', transparent_bg)

*Note: I originally made AVIF files, which are much smaller, but GitHub doesn't support them (even though most browsers do), so I had to go with APNG instead.


Local Alternative

It's also possible to generate the output locally using ImageMagick (open source program with command-line interface):

convert images/????.png -shave 190x300 -delay 3 -loop 0 -quality 99 -write APNG:fullcontrol-white-cropped.png \
    -fuzz 10% -transparent White APNG:fullcontrol-transparent-cropped.png

which is faster, but seems to generate larger files than the online approach. -quality 99 generated the smallest files I could, but they were still ~3x the size of the online generated ones (which I suppose must use some other compression algorithm, and possibly some form of lossy compression).

@ES-Alexander
Copy link
Author

Added some animation parameters, and a seed for the randomness so it's possible to reproduce results (e.g. to generate a different animation of the same text).

@ES-Alexander
Copy link
Author

  • Added some parameters for colour, thickness, variation, and horizontal stretch control
    • COLOURS = None --> the default "new colour per tube" behaviour
  • Reliant on a FullControl version with this PR merged

@ES-Alexander
Copy link
Author

  • Changed horizontal stretch control into independent character width and spacing controls

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment