Created
March 9, 2018 13:48
-
-
Save tobspr/e22f093a6689a2e93fda30d231b12b4c to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from __future__ import print_function | |
import math | |
from panda3d.core import * | |
from direct.interval.IntervalGlobal import Sequence | |
from RenderTarget import RenderTarget | |
loadPrcFileData("", "gl-coordinate-system default") | |
loadPrcFileData("", "win-size 1920 1080") | |
# loadPrcFileData("", "win-size 1600 900") | |
loadPrcFileData("", "win-fixed-size 1") | |
# loadPrcFileData("", "show-buffers #t") | |
loadPrcFileData("", "sync-video #f") | |
loadPrcFileData("", "show-frame-rate-meter #t") | |
loadPrcFileData("", "textures-power-2 none") | |
import direct.directbase.DirectStart | |
scene = loader.loadModel("panda") | |
scene.reparentTo(render) | |
scene.setPos(0, 0, -3) | |
scene.setH(90) | |
# Remove default camera | |
base.disableMouse() | |
base.camera.removeNode() | |
targetW, targetH = 480, 854 | |
# Create the buffer | |
buffW, buffH = targetW * 4, targetH * 6 | |
print("Creating offscreen buffer of size", buffW, buffH) | |
target = RenderTarget("scene") | |
target.setSize(buffW, buffH) | |
target.addColorTexture() | |
target.addDepthTexture() | |
target.prepareSceneRender() | |
# When moving the camera, move the camera rig instead of base.cam | |
cameraRig = render.attachNewNode("Camera Rig") | |
cameras = [] | |
regions = [] | |
# Create the 24 display regions and cameras | |
for i in range(24): | |
x = int(i % 4) | |
y = int(i / 4) | |
# Create the camera | |
angleDegree = i / 24.0 * 180.0 | |
angleRad = angleDegree / 180.0 * math.pi | |
camNode = Camera("Camera-" + str(i)) | |
camNode.setLens(base.camLens) | |
cam = cameraRig.attachNewNode(camNode) | |
cam.setPos(math.sin(angleRad) * 30.0, math.cos(angleRad) * 30.0, 20.0) | |
cam.lookAt(0, 0, 0) | |
cameras.append(cam) | |
# Create the region | |
dr = target.getInternalBuffer().makeDisplayRegion() | |
dr.setSort(1000) | |
dr.setDimensions(x / 4.0, x / 4.0 + 0.25, y / 6.0, y / 6.0 + 1.0 / 6.0) | |
dr.setClearDepthActive(True) | |
dr.setClearDepth(1.0) | |
dr.setCamera(cam) | |
regions.append(dr) | |
target.getInternalBuffer().setSort(10000) | |
target.getInternalRegion().setSort(100) | |
# Create the shader which merges the 24 frames | |
combineVertex = """ | |
#version 400 | |
uniform mat4 p3d_ModelViewProjectionMatrix; | |
in vec4 p3d_Vertex; | |
out vec2 texcoord; | |
void main() { | |
gl_Position = vec4(p3d_Vertex.x, p3d_Vertex.z, 0, 1); | |
texcoord = sign(p3d_Vertex.xz * 0.5 + 0.5); | |
} | |
""" | |
combineFragment = """ | |
#version 400 | |
in vec2 texcoord; | |
out vec4 result; | |
uniform sampler2D colorTex; | |
uniform sampler2D ditherTex; | |
uniform ivec2 imageDimensions; | |
// Dither | |
bool find_closest(int x, int y, float c0) | |
{ | |
float limit = 0.0; | |
if(x < 8) | |
{ | |
limit = texelFetch(ditherTex, ivec2(x, y), 0).x; | |
} | |
if(c0 < limit) { | |
return false; | |
} | |
return true; | |
} | |
void main() { | |
result = vec4(0); | |
int flags = 0; | |
int colorChannel = int(int(gl_FragCoord.x) / imageDimensions.x); | |
// "Rest of the area of the FullHD frame is unused" | |
if (colorChannel >= 3 || gl_FragCoord.y > imageDimensions.y) { | |
result = vec4(1, 1, 0, 1); | |
return; | |
} | |
vec3 color_mask = vec3( | |
colorChannel == 0 ? 1 : 0, | |
colorChannel == 1 ? 1 : 0, | |
colorChannel == 2 ? 1 : 0 | |
); | |
ivec2 subtex = ivec2(gl_FragCoord.xy) % imageDimensions; | |
for (int i = 0; i < 24; i++) { | |
int x = i % 4; | |
int y = i / 4; | |
ivec2 transformedCoord = subtex + ivec2(x, y) * imageDimensions; | |
int fractX = int(mod(int(gl_FragCoord.x), 4)); | |
int fractY = int(mod(int(gl_FragCoord.y), 4)); | |
vec4 colorSample = texelFetch(colorTex, transformedCoord, 0); | |
float luminance = dot(colorSample.xyz, color_mask); | |
bool dithered = find_closest(fractX, fractY, luminance); | |
dithered = fractX == 0 ? true : false; | |
if (dithered) { | |
flags |= 1 << i; | |
} | |
} | |
// Convert flags to color | |
vec4 color = vec4( vec3( (flags >> 16) & 0xFF, (flags >> 8) & 0xFF, flags & 0xFF) / 256.0 , 1); | |
result = color; | |
} | |
""" | |
flipFragment = """ | |
#version 400 | |
in vec2 texcoord; | |
out vec4 result; | |
uniform sampler2D sourceTex; | |
void main() { | |
ivec2 coord = ivec2(gl_FragCoord.xy); | |
result = texelFetch(sourceTex, ivec2(coord.x, 1080 - coord.y), 0); | |
result.w = 1.0; | |
} | |
""" | |
combineShader = Shader.make(Shader.SLGLSL, combineVertex, combineFragment) | |
flipShader = Shader.make(Shader.SLGLSL, combineVertex, flipFragment) | |
# Process | |
targetDither = RenderTarget("process") | |
targetDither.setSize(base.win.getXSize(), base.win.getYSize()) | |
targetDither.addColorTexture() | |
targetDither.prepareOffscreenBuffer() | |
targetDither.setShader(combineShader) | |
targetDither.setShaderInput("colorTex", target.getColorTexture()) | |
targetDither.setShaderInput("ditherTex", loader.loadTexture("dither.png")) | |
targetDither.setShaderInput("imageDimensions", LVecBase2i(targetW, targetH)) | |
# Flip vertically | |
target.setShader(flipShader) | |
target.setShaderInput("sourceTex", targetDither.getColorTexture()) | |
# Animate camera rig | |
lerpTop = cameraRig.posInterval(1.2, Vec3(0, 0, 7), startPos=Vec3(0,0,2)) | |
lerpBot = cameraRig.posInterval(1.2, Vec3(0, 0, 2), startPos=Vec3(0,0,7)) | |
sequence = Sequence(lerpTop, lerpBot) | |
sequence.loop() | |
run() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment