Created
April 20, 2017 15:57
-
-
Save JoshuaSullivan/cfa417dfa6e0262e8406a918a98823a1 to your computer and use it in GitHub Desktop.
A sketch implementing a simple slit-scan camera from the user's webcam or other attached video source.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// Slit-scan Camera | |
// ©2016 Joshua Sullivan | |
// | |
// This sketch implements a simple time-slit camera. Each row of the video is offset | |
// in time by one frame more than the previous row. So, the top row is real-time and | |
// the bottom row is 240 frames ago. | |
// | |
// We're using an optimized storage mechanism where we are storing N + 1 samples for each row | |
// of the video frame, where N is the index of the row, from 0 to FRAME_HEIGHT - 1. This means | |
// index[0] holds 1 row of data, index[1] holds 2, and so on until index[239] holds 240. | |
// | |
// This works because the top row is not delayed, so it can be shown as-is. The second row is | |
// delayed by one frame, so we need to store a row from the current image and a row from the | |
// previous image (which is the one that is displayed. This continues increasing as we progress | |
// down the list. Storing only the rows that are needed uses less than half (49.5%) of the memory | |
// used by storing all the frames completely, and then extracting one row from each for every | |
// rendering pass. | |
// | |
// I'm pre-allocating all of the images to store the rows and the buffer image that is presented | |
// on-screen every update. Trying to allocate them at run-time produces a huge amount of churn as | |
// the system allocates and then garbage collects thousands of images. On a 2016 MacBook Pro, | |
// allocating images at run-time resulted in a 1.6GB memory footprint and 140% CPU utilization. | |
// When we moved to pre-allocated images that live for the duration of the sketch, memory usage | |
// dropped to 289MB and CPU utilization dropped to 35%. | |
// | |
// If you have a beefy computer with a lot of RAM, you can try using a higher-resolution | |
// capture source. | |
import processing.video.*; | |
// The dimensions of the capture frames. | |
final int FRAME_WIDTH = 320; | |
final int FRAME_HEIGHT= 240; | |
// The capture device. | |
Capture cam; | |
// Keeps track of how many frames have been captured so that modulan arithmatic can be used | |
// to calculate which frame each row should be displaying. | |
int count = 0; | |
// An array of arrays containing PImage objects that are all FRAME_WIDTH x 1 in size. It is | |
// dramatically more efficient to store only the individual rows needed to render the effect | |
// rather than retaining 240 full frames of video and using one row from each every update. | |
// Images are re-used in a cyclical fashion; once the maximum number of frames has been captured | |
// for each row, the index loops around and uses the first image for the next frame. | |
PImage[][] imageArray = new PImage[FRAME_HEIGHT][]; | |
// Pre-allocated image for composing the current frame to display to the viewer. It is reused | |
// every update. | |
PImage currentFrame = createImage(FRAME_WIDTH, FRAME_HEIGHT, RGB); | |
void setup() { | |
// Twice the capture resolution to make it easier to see. | |
size(640, 480, P2D); | |
// Check that we have a capture source available. | |
String[] cameras = Capture.list(); | |
if (cameras.length == 0) { | |
// No camera = GAME OVER | |
println("There are no cameras available for capture."); | |
exit(); | |
} else { | |
// We have a camera, let's try to get a small-sized stream from it. | |
cam = new Capture(this, FRAME_WIDTH, FRAME_HEIGHT, 30); | |
cam.start(); | |
} | |
// I prefer the pixellated look. | |
noSmooth(); | |
// No reason to run the sketch faster than the frame-rate of the camera. | |
frameRate(30); | |
// Allocate all the images for row data. | |
for (int i = 0; i < FRAME_HEIGHT; i++) { | |
PImage[] arr = new PImage[i + 1]; | |
for (int j = 0; j < (i + 1); j++) { | |
arr[j] = createImage(FRAME_WIDTH, 1, RGB); | |
} | |
imageArray[i] = arr; | |
} | |
} | |
void draw() { | |
// Only update the stage if the camera has a new frame available. | |
if (cam.available()) { | |
// Read the new frame in. | |
cam.read(); | |
// Slice up and store the new frame in the imageArray. | |
for (int i = 0; i < FRAME_HEIGHT; i++) { | |
// Doing modulan math lets us re-use the oldest image in each row array. | |
int index = count % (i + 1); | |
// Copy the pixels from the camera to the storage for this row. | |
imageArray[i][index].copy(cam, 0, i, FRAME_WIDTH, 1, 0, 0, FRAME_WIDTH, 1); | |
} | |
// Clear the screen. | |
background(0); | |
// Draw the current frame. | |
for (int i = 0; i < FRAME_HEIGHT; i++) { | |
// This calculation gives us successively older frames for every row we progress down the image. | |
// However, at startup, we don't have enough frames stored for the the lower rows, so we keep | |
// them at frame 0 until we have enough frames to start updating them. | |
int index = max(0, count - i); | |
// Each row cycles in the range 0 --> i. This allows the row to be i frames behind the most | |
// recently captured frame. | |
index %= i + 1; | |
// Copy the current row into the display frame. | |
currentFrame.copy(imageArray[i][index], 0, 0, FRAME_WIDTH, 1, 0, i, FRAME_WIDTH, 1); | |
} | |
// Double the size and draw the display frame. | |
scale(2.0); | |
image(currentFrame, 0, 0); | |
// Every time we capture a frame, the count incriments. This drives all of our modulan arithmetic. | |
count++; | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment