Skip to content

Instantly share code, notes, and snippets.

@sethkontny
Created October 16, 2014 00:14
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save sethkontny/c3e48722fce478dafb8c to your computer and use it in GitHub Desktop.
Save sethkontny/c3e48722fce478dafb8c to your computer and use it in GitHub Desktop.
A Pen by Jon Brennecke.
<!--
sorry for all the ugly html but shaders are impossible in Jade without the include function,
which obviously doesn't work in CodePen.
checkout github.com/jonbrennecke/portland-demo for a better way to do this with Jade's include statements!
-->
<div class="page-wrap"><div class="page"><div id="canvas-wrap"></div><div class="textbox-wrap"><div class="blurred"></div><div class="textbox"><h1 class="fa fa-map-marker"></h1><h1>Hello, Portland.</h1><p>I'm moving to Portland soon, and I'd like to connect with other developers and designers.
&nbsp;Do you represent a business in Portland? &nbsp;Are you looking for a talented front end engineer?
&nbsp;I'd love to get in touch. </p><p>Shoot me an email for a copy of my resume. &nbsp;I look forward to hearing from you.</p><div class="social-links"><a href="http://github.com/jonbrennecke" target="_blank" class="fa fa-github-alt"></a><a href="http://codepen.io/jonbrennecke" target="_blank" class="fa fa-codepen"></a><a href="mailto:jpbrennecke@gmail.com" target="_blank" class="fa fa-envelope-o"></a><a href="http://twitter.com/jonbrennecke" target="_blank" class="fa fa-twitter"></a></div></div></div></div></div>
<div class="shaders">
<!-- GLSL SHADERS ************** HERE THERE BY MONSTERS! -->
<script id="basic" type="x-shader/x-vertex">
/* basic vertex shader */
varying vec2 vUv;
void main()
{
vUv = vec2( uv.x, uv.y );
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}
</script>
<script id="depth" type="x-shader/x-fragment">
/*
Basic Depth of Field shader
based off some much better DoF shaders:
https://github.com/mrdoob/three.js/issues/3182
http://blenderartists.org/forum/showthread.php?237488-GLSL-depth-of-field-with-bokeh-v2-4-(update)
http://jabtunes.com/labs/3d/dof/webgl_postprocessing_dof2.html
*/
#define PI 3.1415926
uniform sampler2D tDepth; // depth buffer
uniform sampler2D tRender; // render buffer
uniform float znear; // camera clipping near plane
uniform float zfar; // camera clipping far plane
uniform vec2 iResolution; // screen resolution
uniform float focalLength; // camera focal length
uniform float focalDepth; // camera focal depth
uniform float fstop; // camera fstop
uniform float dithering; // amount of dithering
uniform float maxblur; // maximum amount of blur
uniform float threshold; // highlight threshold;
uniform float gain; // highlight gain;
uniform float bias; // bokeh edge bias
uniform float fringe; // bokeh chromatic aberration / fringing,
varying vec2 vUv; // uv coords
// constants TODO should be const-qualified
vec2 texel = vec2(1.0/iResolution.x,1.0/iResolution.y);
float dbsize = 1.25; // depth blur size
const float CoC = 0.03; //circle of confusion size in mm (35mm film = 0.03mm)
const int rings = 3;
const int samples = 4;
const int maxringsamples = rings * samples;
// generating noise / pattern texture for dithering
vec2 rand(vec2 coord) {
float noiseX = ((fract(1.0-coord.s*(iResolution.x/2.0))*0.25)+(fract(coord.t*(iResolution.y/2.0))*0.75))*2.0-1.0;
float noiseY = ((fract(1.0-coord.s*(iResolution.x/2.0))*0.75)+(fract(coord.t*(iResolution.y/2.0))*0.25))*2.0-1.0;
// if (noise) {
// noiseX = clamp(fract(sin(dot(coord ,vec2(12.9898,78.233))) * 43758.5453),0.0,1.0)*2.0-1.0;
// noiseY = clamp(fract(sin(dot(coord ,vec2(12.9898,78.233)*2.0)) * 43758.5453),0.0,1.0)*2.0-1.0;
// }
return vec2(noiseX,noiseY);
}
// Depth buffer blur
// calculate the depth from a given set of coordinates
float bdepth(vec2 coords) {
float d = 0.0, kernel[9];
vec2 offset[9], wh = vec2(texel.x, texel.y) * dbsize;
offset[0] = vec2(-wh.x,-wh.y);
offset[1] = vec2( 0.0, -wh.y);
offset[2] = vec2( wh.x -wh.y);
offset[3] = vec2(-wh.x, 0.0);
offset[4] = vec2( 0.0, 0.0);
offset[5] = vec2( wh.x, 0.0);
offset[6] = vec2(-wh.x, wh.y);
offset[7] = vec2( 0.0, wh.y);
offset[8] = vec2( wh.x, wh.y);
kernel[0] = 1.0/16.0; kernel[1] = 2.0/16.0; kernel[2] = 1.0/16.0;
kernel[3] = 2.0/16.0; kernel[4] = 4.0/16.0; kernel[5] = 2.0/16.0;
kernel[6] = 1.0/16.0; kernel[7] = 2.0/16.0; kernel[8] = 1.0/16.0;
for( int i=0; i<9; i++ ) {
float tmp = texture2D(tDepth, coords + offset[i]).r;
d += tmp * kernel[i];
}
return d;
}
// processing the sample
vec3 color(vec2 coords,float blur) {
vec3 col = vec3(0.0);
// read from the render buffer at an offset
col.r = texture2D(tRender,coords + vec2(0.0,1.0)*texel*fringe*blur).r;
col.g = texture2D(tRender,coords + vec2(-0.866,-0.5)*texel*fringe*blur).g;
col.b = texture2D(tRender,coords + vec2(0.866,-0.5)*texel*fringe*blur).b;
vec3 lumcoeff = vec3(0.299,0.587,0.114); // arbitrary numbers???
float lum = dot(col.rgb, lumcoeff);
float thresh = max((lum-threshold)*gain, 0.0);
return col+mix(vec3(0.0),col,thresh*blur);
}
float gather(float i, float j, int ringsamples, inout vec3 col, float w, float h, float blur) {
float rings2 = float(rings);
float step = PI*2.0 / float(ringsamples);
float pw = cos(j*step)*i;
float ph = sin(j*step)*i;
float p = 1.0;
col += color(vUv.xy + vec2(pw*w,ph*h), blur) * mix(1.0, i/rings2, bias) * p;
return 1.0 * mix(1.0, i /rings2, bias) * p;
}
float linearize(float depth) {
return -zfar * znear / (depth * (zfar - znear) - zfar);
}
void main(void)
{
float depth = linearize(bdepth(vUv.xy));
float f = focalLength; // focal length in mm,
float d = focalDepth*1000.0; // focal plane in mm,
float o = depth*1000.0; // depth in mm,
float a = (o*f)/(o-f);
float b = (d*f)/(d-f);
float c = (d-f)/(d*fstop*CoC);
float blur = clamp(abs(a-b)*c,0.0,1.0);
// calculation of pattern for dithering
vec2 noise = rand(vUv.xy)*dithering*blur;
// getting blur x and y step factor
float w = (1.0/iResolution.x)*blur*maxblur+noise.x;
float h = (1.0/iResolution.y)*blur*maxblur+noise.y;
// calculation of final color,
vec3 col = texture2D(tRender, vUv.xy).rgb;
if ( blur >= 0.05 ) {
float s = 1.0;
int ringsamples;
for (int i = 1; i <= rings; i++) {
ringsamples = i * samples;
for (int j = 0 ; j < maxringsamples ; j++) {
if (j >= ringsamples) break;
s += gather(float(i), float(j), ringsamples, col, w, h, blur);
}
}
col /= s; //divide by sample count
}
gl_FragColor = vec4(col,1.0);
}
</script>
<script id="mapfs" type="x-shader/x-fragment">
uniform sampler2D tElevation, tSatellite;
uniform vec3 pointLightColor[MAX_POINT_LIGHTS];
uniform vec3 pointLightPosition[MAX_POINT_LIGHTS];
uniform float pointLightIntensity[MAX_POINT_LIGHTS];
varying vec2 vUv;
varying vec3 vertPos, vNormal, vTangent, vBinormal, vElev;
const float specIntensity = 1.0;
void main() {
vec4 color = texture2D(tSatellite,vUv);
// convert the normal maps (in tangent-space) to eye-space
vec3 tanNormal = normalize( (vTangent * vElev.x) + (vBinormal * vElev.y) + (vNormal * vElev.z) );
vec4 sumLights = vec4(0.0,0.0,0.0,1.0);
for( int i = 0; i < MAX_POINT_LIGHTS; ++i)
{
vec3 lightDir = normalize(pointLightPosition[i]-vertPos);
vec3 reflectDir = reflect( -lightDir, tanNormal);
vec3 viewDir = normalize( - vertPos );
float lambertian = max( dot( lightDir, vNormal ), 0.1 );
float specular;
if( lambertian > 0.0 ) {
float specAngle = max( dot( reflectDir, viewDir ), 0.01 );
specular = pow(specAngle, 4.0);
}
sumLights.rgb += clamp(lambertian * color.xyz + specular * pointLightColor[i] * pointLightIntensity[i],0.0,1.0);
}
gl_FragColor = sumLights;
}
</script>
<script id="mapvs" type="x-shader/x-vertex">
uniform sampler2D tElevation, tSatellite;
uniform vec2 iTexResolution;
uniform float heightIntensity;
varying vec2 vUv;
varying vec3 vertPos, vNormal, vTangent, vBinormal, vElev;
void main() {
vUv = uv;
// the elevation is already a normal map, yay!
vec3 norm = normalize(texture2D(tElevation, vUv).rgb);
vNormal = normalize( normalMatrix * norm );
vTangent = normalize( normalMatrix * position );
vBinormal = normalize( cross( norm, vTangent ) );
// deform mesh by the distance from the edge
gl_Position = projectionMatrix * modelViewMatrix * vec4( norm * normal + position, 1.0 );
vertPos = (modelViewMatrix * vec4( norm * normal + position, 1.0)).xyz;
}
</script>
</div>

Portland, OR

I'm moving to Portland! In honor of this awesome event, here's some 3D topologized satelite maps of Portland with a custom DOF shader!

A Pen by Jon Brennecke on CodePen.

License.

$(document).ready( function () {
$("#hide-text").click(function(){
$(".textbox-wrap").animate({right:"-60%"},500)
});
// allow us to load cross-origin image textures from imgur
THREE.ImageUtils.crossOrigin = "anonymous";
/*
There's a lot of globals going on here... usually that's not a good thing; however it
really helps to be able to access all the scene elements.
*/
var parent = $('#canvas-wrap'),
height = parent.height(),
width = parent.width(),
// blurred scene objects
blurred_elem = $(".blurred"),
blurred = {
elem : blurred_elem,
offset : blurred_elem.offset(),
width : blurred_elem.width()|0,
height : blurred_elem.height()|0,
},
// THREE.js scene objects
scene = new THREE.Scene(),
camera = new THREE.PerspectiveCamera( 50, width / height, 1, 10 ),
renderer = new THREE.WebGLRenderer( { antialias : true, alpha : true, preserveDrawingBuffer : true } ),
clock = new THREE.Clock(true),
// for the blurred textbox background setup function
gl = renderer.getContext(),
buffer = new Uint8Array(blurred.width*blurred.height*4),
// depth scene and camera
depth = {
material : new THREE.MeshDepthMaterial(),
renderTarget : undefined,
},
// trackball controls
controls,
// effects composers
effectComposer,
composer,
// image links
images = {
elev : "http://i.imgur.com/2y2xvBq.jpg",
sat : "http://i.imgur.com/rex5rzk.jpg",
},
/*
fragment and vertex shaders are loaded from the html file where Jade pulls them from
their own *.fs and *.vs files.
*/
shaders = {
// map material shader
map : {
uniforms : {
heightIntensity : { type : "f", value : 0.5 },
pointLightColor : { type : "v3v", value: [] },
pointLightPosition : { type : "v3v", value: [] },
pointLightIntensity : { type : "fv1", value: [] },
tSatellite: { type: "t", texture: null, value : THREE.ImageUtils.loadTexture(images.sat,THREE.UVMapping, function (tex){
// this is bad programming and might create a race condition.
// TODO clean this up
// TODO go to sleep
shaders.map.uniforms.iTexResolution.value.set(tex.image.width,tex.image.height);
})},
tElevation: { type: "t", texture: null, value : THREE.ImageUtils.loadTexture(images.elev) },
iTexResolution : { type : "v2", value : new THREE.Vector2(0.0,0.0) }
},
vertexShader : $("#mapvs")[0].innerText,
fragmentShader : $("#mapfs")[0].innerText,
shading : THREE.SmoothShading,
},
// generates depth field as texture
depth : {
uniforms : {
tDepth : { type: "t", texture: null },
tRender : { type: "t", texture: null },
znear : { type: "f", value : camera.near },
zfar : { type: "f", value : camera.far },
iResolution : { type: "v2", value : new THREE.Vector2(width,height) },
focalDepth : { type: "f", value: 10.5 },
focalLength : { type: "f", value: 2.5 },
fstop: { type: "f", value: 0.5 },
dithering : { type: "f", value: 0.0001 },
maxblur : { type: "f", value: 4.0 },
threshold : { type: "f", value: 4 },
gain : { type: "f", value: 1.0 },
bias : { type: "f", value: 1.0 },
fringe : { type: "f", value: 7 },
},
vertexShader : $("#basic")[0].innerText,
fragmentShader : $("#depth")[0].innerText
},
},
blendPass, // needs to be global to we can modify uniforms in the render loop
tack; // tack model imported from blender
function initShaders () {
/*
load the shaders and set up some render targets and composers
*/
// render target parameters
var renderTargetParameters = {
minFilter: THREE.LinearFilter,
magFilter: THREE.LinearFilter,
format: THREE.RGBAFormat,
stencilBufer: false
},
renderTargetBloom = new THREE.WebGLRenderTarget( width, height, renderTargetParameters ),
renderEffectsPass = new THREE.RenderPass( scene, camera);
effectComposer = new THREE.EffectComposer( renderer, renderTargetBloom );
effectComposer.addPass( renderEffectsPass );
var renderTarget = new THREE.WebGLRenderTarget( width, height, renderTargetParameters );
composer = new THREE.EffectComposer( renderer, renderTarget );
var renderPass = new THREE.RenderPass( scene, camera );
composer.addPass( renderPass );
// render target to generate a depth buffer
// could make this another pass, instead of another renderTarget
depth.renderTarget = new THREE.WebGLRenderTarget( width, height, renderTargetParameters );
shaders.depth.uniforms.tRender.value = effectComposer.renderTarget2;
shaders.depth.uniforms.tDepth.value = depth.renderTarget;
var depthPass = new THREE.ShaderPass( shaders.depth );
composer.addPass( depthPass );
depthPass.renderToScreen = true;
}
function setupBlurredBackground () {
/*
"In which a text box background is blurred through very complicated means"
First the 3D webgl canvas is saved to a framebuffer, then the pixel framebuffer
data is read into a 2D canvas, which is blurred by CSS.
Its worth noting that something like:
blurred.elem.css({ "background-image" : "url(" + renderer.domElement.toDataURL() + ")" });
is a much easier way to do this, but its reaaaalllly slow for larger screens.
*/
var fb = gl.createFramebuffer(),
texture = gl.createTexture();
// set up a framebuffer and texture to render the scene into
gl.bindFramebuffer(gl.FRAMEBUFFER, fb);
fb.height = height;
fb.width = width;
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGB, width, height, 0, gl.RGB, gl.UNSIGNED_BYTE, null);
// attach the texture to the framebuffer.
gl.framebufferTexture2D(
gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0,
gl.TEXTURE_2D, texture, 0);
// check if the texture is readable
if (gl.checkFramebufferStatus(gl.FRAMEBUFFER) == gl.FRAMEBUFFER_COMPLETE) {
renderer.render( scene, camera )
// read pixels from the main scene into an array buffer
gl.readPixels(blurred.offset.left|0,blurred.offset.top|0,blurred.width,blurred.height,gl.RGBA,gl.UNSIGNED_BYTE,buffer);
// place the raw pixels into the blurred canvas
// (in WebGL/OpenGL coords, the origin is in bottom-left not the top left)
var i = blurred.height, k = 0;
while( --i )
{
for (var j = 0; j < blurred.width*4; j++, k++) {
blurred.imageData.data[k] = buffer[i*blurred.width*4+j];
}
}
blurred.ctx.putImageData(blurred.imageData,0,0,0,0,blurred.width,blurred.height);
}
gl.bindTexture(gl.TEXTURE_2D, null);
gl.bindFramebuffer(gl.FRAMEBUFFER, null);
}
// init scene
function init () {
/*
initiate all the scene elements and GL objects
*/
// setup the canvas for the blurred textbox background
blurred.canvas = $("<canvas/>").appendTo(blurred.elem)[0];
// blurred.canvas.width = blurred.width;
blurred.canvas.height = height;
blurred.canvas.style.width = blurred.width + "px";
blurred.canvas.style.height = blurred.height + "px";
blurred.ctx = blurred.canvas.getContext("2d");
blurred.imageData = blurred.ctx.getImageData(0,0,blurred.width,blurred.height);
// setup the renderer for the main scene
renderer.setSize( width, height );
renderer.shadowMapEnabled = true;
renderer.setClearColor( 0x000000, 0 );
$( renderer.domElement ).appendTo( parent);
camera.position.set(0,-3,2.5);
camera.lookAt(new THREE.Vector3(0,0,0));
// trackball controls
controls = new THREE.TrackballControls( camera );
controls.rotateSpeed = 1.0;
controls.zoomSpeed = 1.2;
controls.panSpeed = 0.8;
controls.noZoom = false;
controls.noPan = false;
controls.staticMoving = true;
controls.dynamicDampingFactor = 0.3;
controls.keys = [ 65, 83, 68 ];
// controls.addEventListener('change', setupBlurredBackground);
// load the tack mark
var loader = new THREE.JSONLoader();
loader.load( "http://codepen.io/jonbrennecke/pen/xaFsm.js",
function (geometry) {
var material = new THREE.MeshLambertMaterial({
color : 0xb1926c,
shading: THREE.SmoothShading,
side : THREE.DoubleSide,
shininess : 10
});
tack = new THREE.Mesh(geometry, material);
tack.position.set(2,0,0.5);
tack.rotation.set(20,0,0);
tack.scale.set(0.15,0.15,0.15);
// scene.add(tack);
});
var plane = new THREE.Mesh(
new THREE.PlaneGeometry( 8, 5, 100, 100 ), // the generated jpgs are both 4.8x3
new THREE.ShaderMaterial( shaders.map ));
plane.position.set(0,0,0);
plane.rotation.set(0,0,3);
// return random numbers between max and min
function rand( max, min ) { return Math.random() * ( max - min ) + min; }
// particle system geomety
var geometry = new THREE.Geometry();
for (var i = 0; i < 10000; i++) {
geometry.vertices.push( new THREE.Vector3(
rand(5,-5), rand(5,-5), rand(5,-5)
));
geometry.colors[i] = new THREE.Color(0xC2B49A);
};
// particle system
var psys = new THREE.PointCloud(geometry, new THREE.PointCloudMaterial({
size : 0.025,
vertexColors : true
}))
psys.sortParticles = true;
// lights
var l1 = new THREE.PointLight( 0xffa878, 1.0 ),
l2 = new THREE.PointLight( 0xb454ab, 0.5 );
l3 = new THREE.PointLight( 0x516fbd, 0.5 );
l1.position.set(0,0,15);
l2.position.set(15,0,0);
l3.position.set(0,15,0);
shaders.map.uniforms.pointLightColor.value = [
new THREE.Vector3(l1.color.r,l1.color.g,l1.color.b),
new THREE.Vector3(l2.color.r,l2.color.g,l2.color.b),
new THREE.Vector3(l3.color.r,l3.color.g,l3.color.b)];
shaders.map.uniforms.pointLightPosition.value = [ l1.position, l2.position, l3.position ];
shaders.map.uniforms.pointLightIntensity.value = [ l1.intensity, l2.intensity, l3.intensity ];
scene.add(plane);
scene.add(psys);
scene.add(l1);
scene.add(l2);
scene.add(l3);
}
window.addEventListener( 'resize', onWindowResize, false );
function onWindowResize() {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize( window.innerWidth, window.innerHeight );
controls.handleResize();
// blurred.offset = blurred.elem.offset();
// blurred.width = blurred.elem.width()|0;
// blurred.height = blurred.elem.height()|0;
// blurred.canvas.width = blurred.width;
// blurred.canvas.height = blurred.height;
// blurred.ctx.resize(blurred.width,blurred.height);
}
function render () {
// render depth data to a buffer
scene.overrideMaterial = depth.material;
renderer.render( scene, camera, depth.renderTarget, true );
// blendPass.material.uniforms.iGlobalTime.value = clock.getElapsedTime();
effectComposer.render();
composer.render();
setupBlurredBackground();
}
function animate () {
requestAnimationFrame(animate);
controls.update();
render();
}
/*
Alright! We're almost done here...
all that's left is to run the setup functions and start the animation!
*/
init();
initShaders();
animate();
});
@import "compass/css3"
@import "compass/reset"
@import "compass/css3/transition"
@import url("http://fonts.googleapis.com/css?family=Montserrat")
@import url("http://maxcdn.bootstrapcdn.com/font-awesome/4.1.0/css/font-awesome.min.css")
$egg: #C2B49A
$text-color: #fff
$highlight: #e06b8c
html, body
position: absolute
width: 100%
height: 100%
#canvas-wrap
position: absolute
width: 100%
height: 100%
.blurred
-webkit-filter: blur(0px)
width: 100%
height: 100%
display: none
/* canvas */
/* margin-top: -3em */
/*canvas
position: absolute
top: 0
left: 0
width: 100%
height: 100%*/
.textbox-wrap
position: absolute
top: 0
width: 35%
height: 100%
right: 0
box-sizing: border-box
text-align: center
overflow: hidden
background: #fa565a
z-index: 2
.textbox
height: 100%
background: rgba($egg,0.25)
padding: 4em 4em
font-family: "Montserrat", sans-serif
h1
color: #fff
font-size: 24pt
&.fa.fa-map-marker
font-size: 48pt
padding: 0.25em
&.fa.fa-times
position: absolute
top: 1em
left: 1em
cursor: pointer
p
font-size: 12pt
line-height: 2em
padding: 1.5em
color: $text-color
.social-links
a
color: #fff
font-size: 24pt
padding: 0.5em
text-decoration: none
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment