提交 402804ac 编写于 作者: G Garrett Johnson

jsm updaets

上级 900da918
...@@ -8,19 +8,19 @@ import { ...@@ -8,19 +8,19 @@ import {
var DepthLimitedBlurShader = { var DepthLimitedBlurShader = {
defines: { defines: {
'KERNEL_RADIUS': 4, "KERNEL_RADIUS": 4,
'DEPTH_PACKING': 1, "DEPTH_PACKING": 1,
'PERSPECTIVE_CAMERA': 1 "PERSPECTIVE_CAMERA": 1
}, },
uniforms: { uniforms: {
'tDiffuse': { value: null }, "tDiffuse": { value: null },
'size': { value: new Vector2( 512, 512 ) }, "size": { value: new Vector2( 512, 512 ) },
'sampleUvOffsets': { value: [ new Vector2( 0, 0 ) ] }, "sampleUvOffsets": { value: [ new Vector2( 0, 0 ) ] },
'sampleWeights': { value: [ 1.0 ] }, "sampleWeights": { value: [ 1.0 ] },
'tDepth': { value: null }, "tDepth": { value: null },
'cameraNear': { value: 10 }, "cameraNear": { value: 10 },
'cameraFar': { value: 1000 }, "cameraFar": { value: 1000 },
'depthCutoff': { value: 10 }, "depthCutoff": { value: 10 },
}, },
vertexShader: [ vertexShader: [
"#include <common>", "#include <common>",
...@@ -153,9 +153,9 @@ var BlurShaderUtils = { ...@@ -153,9 +153,9 @@ var BlurShaderUtils = {
configure: function ( material, kernelRadius, stdDev, uvIncrement ) { configure: function ( material, kernelRadius, stdDev, uvIncrement ) {
material.defines[ 'KERNEL_RADIUS' ] = kernelRadius; material.defines[ "KERNEL_RADIUS" ] = kernelRadius;
material.uniforms[ 'sampleUvOffsets' ].value = BlurShaderUtils.createSampleOffsets( kernelRadius, uvIncrement ); material.uniforms[ "sampleUvOffsets" ].value = BlurShaderUtils.createSampleOffsets( kernelRadius, uvIncrement );
material.uniforms[ 'sampleWeights' ].value = BlurShaderUtils.createSampleWeights( kernelRadius, stdDev ); material.uniforms[ "sampleWeights" ].value = BlurShaderUtils.createSampleWeights( kernelRadius, stdDev );
material.needsUpdate = true; material.needsUpdate = true;
} }
......
...@@ -39,7 +39,7 @@ var DigitalGlitch = { ...@@ -39,7 +39,7 @@ var DigitalGlitch = {
].join( "\n" ), ].join( "\n" ),
fragmentShader: [ fragmentShader: [
"uniform int byp;",//should we apply the glitch ? "uniform int byp;", //should we apply the glitch ?
"uniform sampler2D tDiffuse;", "uniform sampler2D tDiffuse;",
"uniform sampler2D tDisp;", "uniform sampler2D tDisp;",
......
...@@ -1114,7 +1114,7 @@ var FXAAShader = { ...@@ -1114,7 +1114,7 @@ var FXAAShader = {
" // TODO avoid querying texture twice for same texel", " // TODO avoid querying texture twice for same texel",
" gl_FragColor.a = texture2D(tDiffuse, vUv).a;", " gl_FragColor.a = texture2D(tDiffuse, vUv).a;",
"}" "}"
].join("\n") ].join( "\n" )
}; };
......
...@@ -33,8 +33,8 @@ var HalftoneShader = { ...@@ -33,8 +33,8 @@ var HalftoneShader = {
"void main() {", "void main() {",
"vUV = uv;", " vUV = uv;",
"gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);", " gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);",
"}" "}"
......
...@@ -35,7 +35,7 @@ var LuminosityHighPassShader = { ...@@ -35,7 +35,7 @@ var LuminosityHighPassShader = {
"}" "}"
].join("\n"), ].join( "\n" ),
fragmentShader: [ fragmentShader: [
...@@ -63,7 +63,7 @@ var LuminosityHighPassShader = { ...@@ -63,7 +63,7 @@ var LuminosityHighPassShader = {
"}" "}"
].join("\n") ].join( "\n" )
}; };
......
...@@ -6,11 +6,11 @@ ...@@ -6,11 +6,11 @@
var ParallaxShader = { var ParallaxShader = {
// Ordered from fastest to best quality. // Ordered from fastest to best quality.
modes: { modes: {
none: 'NO_PARALLAX', none: "NO_PARALLAX",
basic: 'USE_BASIC_PARALLAX', basic: "USE_BASIC_PARALLAX",
steep: 'USE_STEEP_PARALLAX', steep: "USE_STEEP_PARALLAX",
occlusion: 'USE_OCLUSION_PARALLAX', // a.k.a. POM occlusion: "USE_OCLUSION_PARALLAX", // a.k.a. POM
relief: 'USE_RELIEF_PARALLAX' relief: "USE_RELIEF_PARALLAX"
}, },
uniforms: { uniforms: {
......
...@@ -9,32 +9,32 @@ import { ...@@ -9,32 +9,32 @@ import {
var SAOShader = { var SAOShader = {
defines: { defines: {
'NUM_SAMPLES': 7, "NUM_SAMPLES": 7,
'NUM_RINGS': 4, "NUM_RINGS": 4,
'NORMAL_TEXTURE': 0, "NORMAL_TEXTURE": 0,
'DIFFUSE_TEXTURE': 0, "DIFFUSE_TEXTURE": 0,
'DEPTH_PACKING': 1, "DEPTH_PACKING": 1,
'PERSPECTIVE_CAMERA': 1 "PERSPECTIVE_CAMERA": 1
}, },
uniforms: { uniforms: {
'tDepth': { value: null }, "tDepth": { value: null },
'tDiffuse': { value: null }, "tDiffuse": { value: null },
'tNormal': { value: null }, "tNormal": { value: null },
'size': { value: new Vector2( 512, 512 ) }, "size": { value: new Vector2( 512, 512 ) },
'cameraNear': { value: 1 }, "cameraNear": { value: 1 },
'cameraFar': { value: 100 }, "cameraFar": { value: 100 },
'cameraProjectionMatrix': { value: new Matrix4() }, "cameraProjectionMatrix": { value: new Matrix4() },
'cameraInverseProjectionMatrix': { value: new Matrix4() }, "cameraInverseProjectionMatrix": { value: new Matrix4() },
'scale': { value: 1.0 }, "scale": { value: 1.0 },
'intensity': { value: 0.1 }, "intensity": { value: 0.1 },
'bias': { value: 0.5 }, "bias": { value: 0.5 },
'minResolution': { value: 0.0 }, "minResolution": { value: 0.0 },
'kernelRadius': { value: 100.0 }, "kernelRadius": { value: 100.0 },
'randomSeed': { value: 0.0 } "randomSeed": { value: 0.0 }
}, },
vertexShader: [ vertexShader: [
"varying vec2 vUv;", "varying vec2 vUv;",
......
此差异已折叠。
...@@ -21,311 +21,311 @@ var VolumeRenderShader1 = { ...@@ -21,311 +21,311 @@ var VolumeRenderShader1 = {
"u_cmdata": { value: null } "u_cmdata": { value: null }
}, },
vertexShader: [ vertexShader: [
'varying vec4 v_nearpos;', " varying vec4 v_nearpos;",
'varying vec4 v_farpos;', " varying vec4 v_farpos;",
'varying vec3 v_position;', " varying vec3 v_position;",
'mat4 inversemat(mat4 m) {', " mat4 inversemat(mat4 m) {",
// Taken from https://github.com/stackgl/glsl-inverse/blob/master/index.glsl // Taken from https://github.com/stackgl/glsl-inverse/blob/master/index.glsl
// This function is licenced by the MIT license to Mikola Lysenko // This function is licenced by the MIT license to Mikola Lysenko
'float', " float",
'a00 = m[0][0], a01 = m[0][1], a02 = m[0][2], a03 = m[0][3],', " a00 = m[0][0], a01 = m[0][1], a02 = m[0][2], a03 = m[0][3],",
'a10 = m[1][0], a11 = m[1][1], a12 = m[1][2], a13 = m[1][3],', " a10 = m[1][0], a11 = m[1][1], a12 = m[1][2], a13 = m[1][3],",
'a20 = m[2][0], a21 = m[2][1], a22 = m[2][2], a23 = m[2][3],', " a20 = m[2][0], a21 = m[2][1], a22 = m[2][2], a23 = m[2][3],",
'a30 = m[3][0], a31 = m[3][1], a32 = m[3][2], a33 = m[3][3],', " a30 = m[3][0], a31 = m[3][1], a32 = m[3][2], a33 = m[3][3],",
'b00 = a00 * a11 - a01 * a10,', " b00 = a00 * a11 - a01 * a10,",
'b01 = a00 * a12 - a02 * a10,', " b01 = a00 * a12 - a02 * a10,",
'b02 = a00 * a13 - a03 * a10,', " b02 = a00 * a13 - a03 * a10,",
'b03 = a01 * a12 - a02 * a11,', " b03 = a01 * a12 - a02 * a11,",
'b04 = a01 * a13 - a03 * a11,', " b04 = a01 * a13 - a03 * a11,",
'b05 = a02 * a13 - a03 * a12,', " b05 = a02 * a13 - a03 * a12,",
'b06 = a20 * a31 - a21 * a30,', " b06 = a20 * a31 - a21 * a30,",
'b07 = a20 * a32 - a22 * a30,', " b07 = a20 * a32 - a22 * a30,",
'b08 = a20 * a33 - a23 * a30,', " b08 = a20 * a33 - a23 * a30,",
'b09 = a21 * a32 - a22 * a31,', " b09 = a21 * a32 - a22 * a31,",
'b10 = a21 * a33 - a23 * a31,', " b10 = a21 * a33 - a23 * a31,",
'b11 = a22 * a33 - a23 * a32,', " b11 = a22 * a33 - a23 * a32,",
'det = b00 * b11 - b01 * b10 + b02 * b09 + b03 * b08 - b04 * b07 + b05 * b06;', " det = b00 * b11 - b01 * b10 + b02 * b09 + b03 * b08 - b04 * b07 + b05 * b06;",
'return mat4(', " return mat4(",
'a11 * b11 - a12 * b10 + a13 * b09,', " a11 * b11 - a12 * b10 + a13 * b09,",
'a02 * b10 - a01 * b11 - a03 * b09,', " a02 * b10 - a01 * b11 - a03 * b09,",
'a31 * b05 - a32 * b04 + a33 * b03,', " a31 * b05 - a32 * b04 + a33 * b03,",
'a22 * b04 - a21 * b05 - a23 * b03,', " a22 * b04 - a21 * b05 - a23 * b03,",
'a12 * b08 - a10 * b11 - a13 * b07,', " a12 * b08 - a10 * b11 - a13 * b07,",
'a00 * b11 - a02 * b08 + a03 * b07,', " a00 * b11 - a02 * b08 + a03 * b07,",
'a32 * b02 - a30 * b05 - a33 * b01,', " a32 * b02 - a30 * b05 - a33 * b01,",
'a20 * b05 - a22 * b02 + a23 * b01,', " a20 * b05 - a22 * b02 + a23 * b01,",
'a10 * b10 - a11 * b08 + a13 * b06,', " a10 * b10 - a11 * b08 + a13 * b06,",
'a01 * b08 - a00 * b10 - a03 * b06,', " a01 * b08 - a00 * b10 - a03 * b06,",
'a30 * b04 - a31 * b02 + a33 * b00,', " a30 * b04 - a31 * b02 + a33 * b00,",
'a21 * b02 - a20 * b04 - a23 * b00,', " a21 * b02 - a20 * b04 - a23 * b00,",
'a11 * b07 - a10 * b09 - a12 * b06,', " a11 * b07 - a10 * b09 - a12 * b06,",
'a00 * b09 - a01 * b07 + a02 * b06,', " a00 * b09 - a01 * b07 + a02 * b06,",
'a31 * b01 - a30 * b03 - a32 * b00,', " a31 * b01 - a30 * b03 - a32 * b00,",
'a20 * b03 - a21 * b01 + a22 * b00) / det;', " a20 * b03 - a21 * b01 + a22 * b00) / det;",
'}', " }",
'void main() {', " void main() {",
// Prepare transforms to map to "camera view". See also: // Prepare transforms to map to "camera view". See also:
// https://threejs.org/docs/#api/renderers/webgl/WebGLProgram // https://threejs.org/docs/#api/renderers/webgl/WebGLProgram
'mat4 viewtransformf = viewMatrix;', " mat4 viewtransformf = viewMatrix;",
'mat4 viewtransformi = inversemat(viewMatrix);', " mat4 viewtransformi = inversemat(viewMatrix);",
// Project local vertex coordinate to camera position. Then do a step // Project local vertex coordinate to camera position. Then do a step
// backward (in cam coords) to the near clipping plane, and project back. Do // backward (in cam coords) to the near clipping plane, and project back. Do
// the same for the far clipping plane. This gives us all the information we // the same for the far clipping plane. This gives us all the information we
// need to calculate the ray and truncate it to the viewing cone. // need to calculate the ray and truncate it to the viewing cone.
'vec4 position4 = vec4(position, 1.0);', " vec4 position4 = vec4(position, 1.0);",
'vec4 pos_in_cam = viewtransformf * position4;', " vec4 pos_in_cam = viewtransformf * position4;",
// Intersection of ray and near clipping plane (z = -1 in clip coords) // Intersection of ray and near clipping plane (z = -1 in clip coords)
'pos_in_cam.z = -pos_in_cam.w;', " pos_in_cam.z = -pos_in_cam.w;",
'v_nearpos = viewtransformi * pos_in_cam;', " v_nearpos = viewtransformi * pos_in_cam;",
// Intersection of ray and far clipping plane (z = +1 in clip coords) // Intersection of ray and far clipping plane (z = +1 in clip coords)
'pos_in_cam.z = pos_in_cam.w;', " pos_in_cam.z = pos_in_cam.w;",
'v_farpos = viewtransformi * pos_in_cam;', " v_farpos = viewtransformi * pos_in_cam;",
// Set varyings and output pos // Set varyings and output pos
'v_position = position;', " v_position = position;",
'gl_Position = projectionMatrix * viewMatrix * modelMatrix * position4;', " gl_Position = projectionMatrix * viewMatrix * modelMatrix * position4;",
'}', " }",
].join( '\n' ), ].join( "\n" ),
fragmentShader: [ fragmentShader: [
'precision highp float;', " precision highp float;",
'precision mediump sampler3D;', " precision mediump sampler3D;",
'uniform vec3 u_size;', " uniform vec3 u_size;",
'uniform int u_renderstyle;', " uniform int u_renderstyle;",
'uniform float u_renderthreshold;', " uniform float u_renderthreshold;",
'uniform vec2 u_clim;', " uniform vec2 u_clim;",
'uniform sampler3D u_data;', " uniform sampler3D u_data;",
'uniform sampler2D u_cmdata;', " uniform sampler2D u_cmdata;",
'varying vec3 v_position;', " varying vec3 v_position;",
'varying vec4 v_nearpos;', " varying vec4 v_nearpos;",
'varying vec4 v_farpos;', " varying vec4 v_farpos;",
// The maximum distance through our rendering volume is sqrt(3). // The maximum distance through our rendering volume is sqrt(3).
'const int MAX_STEPS = 887; // 887 for 512^3, 1774 for 1024^3', " const int MAX_STEPS = 887; // 887 for 512^3, 1774 for 1024^3",
'const int REFINEMENT_STEPS = 4;', " const int REFINEMENT_STEPS = 4;",
'const float relative_step_size = 1.0;', " const float relative_step_size = 1.0;",
'const vec4 ambient_color = vec4(0.2, 0.4, 0.2, 1.0);', " const vec4 ambient_color = vec4(0.2, 0.4, 0.2, 1.0);",
'const vec4 diffuse_color = vec4(0.8, 0.2, 0.2, 1.0);', " const vec4 diffuse_color = vec4(0.8, 0.2, 0.2, 1.0);",
'const vec4 specular_color = vec4(1.0, 1.0, 1.0, 1.0);', " const vec4 specular_color = vec4(1.0, 1.0, 1.0, 1.0);",
'const float shininess = 40.0;', " const float shininess = 40.0;",
'void cast_mip(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray);', " void cast_mip(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray);",
'void cast_iso(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray);', " void cast_iso(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray);",
'float sample1(vec3 texcoords);', " float sample1(vec3 texcoords);",
'vec4 apply_colormap(float val);', " vec4 apply_colormap(float val);",
'vec4 add_lighting(float val, vec3 loc, vec3 step, vec3 view_ray);', " vec4 add_lighting(float val, vec3 loc, vec3 step, vec3 view_ray);",
'void main() {', " void main() {",
// Normalize clipping plane info // Normalize clipping plane info
'vec3 farpos = v_farpos.xyz / v_farpos.w;', " vec3 farpos = v_farpos.xyz / v_farpos.w;",
'vec3 nearpos = v_nearpos.xyz / v_nearpos.w;', " vec3 nearpos = v_nearpos.xyz / v_nearpos.w;",
// Calculate unit vector pointing in the view direction through this fragment. // Calculate unit vector pointing in the view direction through this fragment.
'vec3 view_ray = normalize(nearpos.xyz - farpos.xyz);', " vec3 view_ray = normalize(nearpos.xyz - farpos.xyz);",
// Compute the (negative) distance to the front surface or near clipping plane. // Compute the (negative) distance to the front surface or near clipping plane.
// v_position is the back face of the cuboid, so the initial distance calculated in the dot // v_position is the back face of the cuboid, so the initial distance calculated in the dot
// product below is the distance from near clip plane to the back of the cuboid // product below is the distance from near clip plane to the back of the cuboid
'float distance = dot(nearpos - v_position, view_ray);', " float distance = dot(nearpos - v_position, view_ray);",
'distance = max(distance, min((-0.5 - v_position.x) / view_ray.x,', " distance = max(distance, min((-0.5 - v_position.x) / view_ray.x,",
'(u_size.x - 0.5 - v_position.x) / view_ray.x));', " (u_size.x - 0.5 - v_position.x) / view_ray.x));",
'distance = max(distance, min((-0.5 - v_position.y) / view_ray.y,', " distance = max(distance, min((-0.5 - v_position.y) / view_ray.y,",
'(u_size.y - 0.5 - v_position.y) / view_ray.y));', " (u_size.y - 0.5 - v_position.y) / view_ray.y));",
'distance = max(distance, min((-0.5 - v_position.z) / view_ray.z,', " distance = max(distance, min((-0.5 - v_position.z) / view_ray.z,",
'(u_size.z - 0.5 - v_position.z) / view_ray.z));', " (u_size.z - 0.5 - v_position.z) / view_ray.z));",
// Now we have the starting position on the front surface // Now we have the starting position on the front surface
'vec3 front = v_position + view_ray * distance;', " vec3 front = v_position + view_ray * distance;",
// Decide how many steps to take // Decide how many steps to take
'int nsteps = int(-distance / relative_step_size + 0.5);', " int nsteps = int(-distance / relative_step_size + 0.5);",
'if ( nsteps < 1 )', " if ( nsteps < 1 )",
'discard;', " discard;",
// Get starting location and step vector in texture coordinates // Get starting location and step vector in texture coordinates
'vec3 step = ((v_position - front) / u_size) / float(nsteps);', " vec3 step = ((v_position - front) / u_size) / float(nsteps);",
'vec3 start_loc = front / u_size;', " vec3 start_loc = front / u_size;",
// For testing: show the number of steps. This helps to establish // For testing: show the number of steps. This helps to establish
// whether the rays are correctly oriented // whether the rays are correctly oriented
//'gl_FragColor = vec4(0.0, float(nsteps) / 1.0 / u_size.x, 1.0, 1.0);', //'gl_FragColor = vec4(0.0, float(nsteps) / 1.0 / u_size.x, 1.0, 1.0);',
//'return;', //'return;',
'if (u_renderstyle == 0)', " if (u_renderstyle == 0)",
'cast_mip(start_loc, step, nsteps, view_ray);', " cast_mip(start_loc, step, nsteps, view_ray);",
'else if (u_renderstyle == 1)', " else if (u_renderstyle == 1)",
'cast_iso(start_loc, step, nsteps, view_ray);', " cast_iso(start_loc, step, nsteps, view_ray);",
'if (gl_FragColor.a < 0.05)', " if (gl_FragColor.a < 0.05)",
'discard;', " discard;",
'}', " }",
'float sample1(vec3 texcoords) {', " float sample1(vec3 texcoords) {",
'/* Sample float value from a 3D texture. Assumes intensity data. */', " /* Sample float value from a 3D texture. Assumes intensity data. */",
'return texture(u_data, texcoords.xyz).r;', " return texture(u_data, texcoords.xyz).r;",
'}', " }",
'vec4 apply_colormap(float val) {', " vec4 apply_colormap(float val) {",
'val = (val - u_clim[0]) / (u_clim[1] - u_clim[0]);', " val = (val - u_clim[0]) / (u_clim[1] - u_clim[0]);",
'return texture2D(u_cmdata, vec2(val, 0.5));', " return texture2D(u_cmdata, vec2(val, 0.5));",
'}', " }",
'void cast_mip(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray) {', " void cast_mip(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray) {",
'float max_val = -1e6;', " float max_val = -1e6;",
'int max_i = 100;', " int max_i = 100;",
'vec3 loc = start_loc;', " vec3 loc = start_loc;",
// Enter the raycasting loop. In WebGL 1 the loop index cannot be compared with // Enter the raycasting loop. In WebGL 1 the loop index cannot be compared with
// non-constant expression. So we use a hard-coded max, and an additional condition // non-constant expression. So we use a hard-coded max, and an additional condition
// inside the loop. // inside the loop.
'for (int iter=0; iter<MAX_STEPS; iter++) {', " for (int iter=0; iter<MAX_STEPS; iter++) {",
'if (iter >= nsteps)', " if (iter >= nsteps)",
'break;', " break;",
// Sample from the 3D texture // Sample from the 3D texture
'float val = sample1(loc);', " float val = sample1(loc);",
// Apply MIP operation // Apply MIP operation
'if (val > max_val) {', " if (val > max_val) {",
'max_val = val;', " max_val = val;",
'max_i = iter;', " max_i = iter;",
'}', " }",
// Advance location deeper into the volume // Advance location deeper into the volume
'loc += step;', " loc += step;",
'}', " }",
// Refine location, gives crispier images // Refine location, gives crispier images
'vec3 iloc = start_loc + step * (float(max_i) - 0.5);', " vec3 iloc = start_loc + step * (float(max_i) - 0.5);",
'vec3 istep = step / float(REFINEMENT_STEPS);', " vec3 istep = step / float(REFINEMENT_STEPS);",
'for (int i=0; i<REFINEMENT_STEPS; i++) {', " for (int i=0; i<REFINEMENT_STEPS; i++) {",
'max_val = max(max_val, sample1(iloc));', " max_val = max(max_val, sample1(iloc));",
'iloc += istep;', " iloc += istep;",
'}', " }",
// Resolve final color // Resolve final color
'gl_FragColor = apply_colormap(max_val);', " gl_FragColor = apply_colormap(max_val);",
'}', " }",
'void cast_iso(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray) {', " void cast_iso(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray) {",
'gl_FragColor = vec4(0.0); // init transparent', " gl_FragColor = vec4(0.0); // init transparent",
'vec4 color3 = vec4(0.0); // final color', " vec4 color3 = vec4(0.0); // final color",
'vec3 dstep = 1.5 / u_size; // step to sample derivative', " vec3 dstep = 1.5 / u_size; // step to sample derivative",
'vec3 loc = start_loc;', " vec3 loc = start_loc;",
'float low_threshold = u_renderthreshold - 0.02 * (u_clim[1] - u_clim[0]);', " float low_threshold = u_renderthreshold - 0.02 * (u_clim[1] - u_clim[0]);",
// Enter the raycasting loop. In WebGL 1 the loop index cannot be compared with // Enter the raycasting loop. In WebGL 1 the loop index cannot be compared with
// non-constant expression. So we use a hard-coded max, and an additional condition // non-constant expression. So we use a hard-coded max, and an additional condition
// inside the loop. // inside the loop.
'for (int iter=0; iter<MAX_STEPS; iter++) {', " for (int iter=0; iter<MAX_STEPS; iter++) {",
'if (iter >= nsteps)', " if (iter >= nsteps)",
'break;', " break;",
// Sample from the 3D texture // Sample from the 3D texture
'float val = sample1(loc);', " float val = sample1(loc);",
'if (val > low_threshold) {', " if (val > low_threshold) {",
// Take the last interval in smaller steps // Take the last interval in smaller steps
'vec3 iloc = loc - 0.5 * step;', " vec3 iloc = loc - 0.5 * step;",
'vec3 istep = step / float(REFINEMENT_STEPS);', " vec3 istep = step / float(REFINEMENT_STEPS);",
'for (int i=0; i<REFINEMENT_STEPS; i++) {', " for (int i=0; i<REFINEMENT_STEPS; i++) {",
'val = sample1(iloc);', " val = sample1(iloc);",
'if (val > u_renderthreshold) {', " if (val > u_renderthreshold) {",
'gl_FragColor = add_lighting(val, iloc, dstep, view_ray);', " gl_FragColor = add_lighting(val, iloc, dstep, view_ray);",
'return;', " return;",
'}', " }",
'iloc += istep;', " iloc += istep;",
'}', " }",
'}', " }",
// Advance location deeper into the volume // Advance location deeper into the volume
'loc += step;', " loc += step;",
'}', " }",
'}', " }",
'vec4 add_lighting(float val, vec3 loc, vec3 step, vec3 view_ray)', " vec4 add_lighting(float val, vec3 loc, vec3 step, vec3 view_ray)",
'{', " {",
// Calculate color by incorporating lighting // Calculate color by incorporating lighting
// View direction // View direction
'vec3 V = normalize(view_ray);', " vec3 V = normalize(view_ray);",
// calculate normal vector from gradient // calculate normal vector from gradient
'vec3 N;', " vec3 N;",
'float val1, val2;', " float val1, val2;",
'val1 = sample1(loc + vec3(-step[0], 0.0, 0.0));', " val1 = sample1(loc + vec3(-step[0], 0.0, 0.0));",
'val2 = sample1(loc + vec3(+step[0], 0.0, 0.0));', " val2 = sample1(loc + vec3(+step[0], 0.0, 0.0));",
'N[0] = val1 - val2;', " N[0] = val1 - val2;",
'val = max(max(val1, val2), val);', " val = max(max(val1, val2), val);",
'val1 = sample1(loc + vec3(0.0, -step[1], 0.0));', " val1 = sample1(loc + vec3(0.0, -step[1], 0.0));",
'val2 = sample1(loc + vec3(0.0, +step[1], 0.0));', " val2 = sample1(loc + vec3(0.0, +step[1], 0.0));",
'N[1] = val1 - val2;', " N[1] = val1 - val2;",
'val = max(max(val1, val2), val);', " val = max(max(val1, val2), val);",
'val1 = sample1(loc + vec3(0.0, 0.0, -step[2]));', " val1 = sample1(loc + vec3(0.0, 0.0, -step[2]));",
'val2 = sample1(loc + vec3(0.0, 0.0, +step[2]));', " val2 = sample1(loc + vec3(0.0, 0.0, +step[2]));",
'N[2] = val1 - val2;', " N[2] = val1 - val2;",
'val = max(max(val1, val2), val);', " val = max(max(val1, val2), val);",
'float gm = length(N); // gradient magnitude', " float gm = length(N); // gradient magnitude",
'N = normalize(N);', " N = normalize(N);",
// Flip normal so it points towards viewer // Flip normal so it points towards viewer
'float Nselect = float(dot(N, V) > 0.0);', " float Nselect = float(dot(N, V) > 0.0);",
'N = (2.0 * Nselect - 1.0) * N; // == Nselect * N - (1.0-Nselect)*N;', " N = (2.0 * Nselect - 1.0) * N; // == Nselect * N - (1.0-Nselect)*N;",
// Init colors // Init colors
'vec4 ambient_color = vec4(0.0, 0.0, 0.0, 0.0);', " vec4 ambient_color = vec4(0.0, 0.0, 0.0, 0.0);",
'vec4 diffuse_color = vec4(0.0, 0.0, 0.0, 0.0);', " vec4 diffuse_color = vec4(0.0, 0.0, 0.0, 0.0);",
'vec4 specular_color = vec4(0.0, 0.0, 0.0, 0.0);', " vec4 specular_color = vec4(0.0, 0.0, 0.0, 0.0);",
// note: could allow multiple lights // note: could allow multiple lights
'for (int i=0; i<1; i++)', " for (int i=0; i<1; i++)",
'{', " {",
// Get light direction (make sure to prevent zero devision) // Get light direction (make sure to prevent zero devision)
'vec3 L = normalize(view_ray); //lightDirs[i];', " vec3 L = normalize(view_ray); //lightDirs[i];",
'float lightEnabled = float( length(L) > 0.0 );', " float lightEnabled = float( length(L) > 0.0 );",
'L = normalize(L + (1.0 - lightEnabled));', " L = normalize(L + (1.0 - lightEnabled));",
// Calculate lighting properties // Calculate lighting properties
'float lambertTerm = clamp(dot(N, L), 0.0, 1.0);', " float lambertTerm = clamp(dot(N, L), 0.0, 1.0);",
'vec3 H = normalize(L+V); // Halfway vector', " vec3 H = normalize(L+V); // Halfway vector",
'float specularTerm = pow(max(dot(H, N), 0.0), shininess);', " float specularTerm = pow(max(dot(H, N), 0.0), shininess);",
// Calculate mask // Calculate mask
'float mask1 = lightEnabled;', " float mask1 = lightEnabled;",
// Calculate colors // Calculate colors
'ambient_color += mask1 * ambient_color; // * gl_LightSource[i].ambient;', " ambient_color += mask1 * ambient_color; // * gl_LightSource[i].ambient;",
'diffuse_color += mask1 * lambertTerm;', " diffuse_color += mask1 * lambertTerm;",
'specular_color += mask1 * specularTerm * specular_color;', " specular_color += mask1 * specularTerm * specular_color;",
'}', " }",
// Calculate final color by componing different components // Calculate final color by componing different components
'vec4 final_color;', " vec4 final_color;",
'vec4 color = apply_colormap(val);', " vec4 color = apply_colormap(val);",
'final_color = color * (ambient_color + diffuse_color) + specular_color;', " final_color = color * (ambient_color + diffuse_color) + specular_color;",
'final_color.a = color.a;', " final_color.a = color.a;",
'return final_color;', " return final_color;",
'}', " }",
].join( '\n' ) ].join( "\n" )
}; };
export { VolumeRenderShader1 }; export { VolumeRenderShader1 };
...@@ -9,23 +9,23 @@ var WaterRefractionShader = { ...@@ -9,23 +9,23 @@ var WaterRefractionShader = {
uniforms: { uniforms: {
'color': { "color": {
value: null value: null
}, },
'time': { "time": {
value: 0 value: 0
}, },
'tDiffuse': { "tDiffuse": {
value: null value: null
}, },
'tDudv': { "tDudv": {
value: null value: null
}, },
'textureMatrix': { "textureMatrix": {
value: null value: null
} }
...@@ -33,68 +33,68 @@ var WaterRefractionShader = { ...@@ -33,68 +33,68 @@ var WaterRefractionShader = {
vertexShader: [ vertexShader: [
'uniform mat4 textureMatrix;', "uniform mat4 textureMatrix;",
'varying vec2 vUv;', "varying vec2 vUv;",
'varying vec4 vUvRefraction;', "varying vec4 vUvRefraction;",
'void main() {', "void main() {",
' vUv = uv;', " vUv = uv;",
' vUvRefraction = textureMatrix * vec4( position, 1.0 );', " vUvRefraction = textureMatrix * vec4( position, 1.0 );",
' gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );', " gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );",
'}' "}"
].join( '\n' ), ].join( "\n" ),
fragmentShader: [ fragmentShader: [
'uniform vec3 color;', "uniform vec3 color;",
'uniform float time;', "uniform float time;",
'uniform sampler2D tDiffuse;', "uniform sampler2D tDiffuse;",
'uniform sampler2D tDudv;', "uniform sampler2D tDudv;",
'varying vec2 vUv;', "varying vec2 vUv;",
'varying vec4 vUvRefraction;', "varying vec4 vUvRefraction;",
'float blendOverlay( float base, float blend ) {', "float blendOverlay( float base, float blend ) {",
' return( base < 0.5 ? ( 2.0 * base * blend ) : ( 1.0 - 2.0 * ( 1.0 - base ) * ( 1.0 - blend ) ) );', " return( base < 0.5 ? ( 2.0 * base * blend ) : ( 1.0 - 2.0 * ( 1.0 - base ) * ( 1.0 - blend ) ) );",
'}', "}",
'vec3 blendOverlay( vec3 base, vec3 blend ) {', "vec3 blendOverlay( vec3 base, vec3 blend ) {",
' return vec3( blendOverlay( base.r, blend.r ), blendOverlay( base.g, blend.g ),blendOverlay( base.b, blend.b ) );', " return vec3( blendOverlay( base.r, blend.r ), blendOverlay( base.g, blend.g ),blendOverlay( base.b, blend.b ) );",
'}', "}",
'void main() {', "void main() {",
' float waveStrength = 0.1;', " float waveStrength = 0.1;",
' float waveSpeed = 0.03;', " float waveSpeed = 0.03;",
// simple distortion (ripple) via dudv map (see https://www.youtube.com/watch?v=6B7IF6GOu7s) // simple distortion (ripple) via dudv map (see https://www.youtube.com/watch?v=6B7IF6GOu7s)
' vec2 distortedUv = texture2D( tDudv, vec2( vUv.x + time * waveSpeed, vUv.y ) ).rg * waveStrength;', " vec2 distortedUv = texture2D( tDudv, vec2( vUv.x + time * waveSpeed, vUv.y ) ).rg * waveStrength;",
' distortedUv = vUv.xy + vec2( distortedUv.x, distortedUv.y + time * waveSpeed );', " distortedUv = vUv.xy + vec2( distortedUv.x, distortedUv.y + time * waveSpeed );",
' vec2 distortion = ( texture2D( tDudv, distortedUv ).rg * 2.0 - 1.0 ) * waveStrength;', " vec2 distortion = ( texture2D( tDudv, distortedUv ).rg * 2.0 - 1.0 ) * waveStrength;",
// new uv coords // new uv coords
' vec4 uv = vec4( vUvRefraction );', " vec4 uv = vec4( vUvRefraction );",
' uv.xy += distortion;', " uv.xy += distortion;",
' vec4 base = texture2DProj( tDiffuse, uv );', " vec4 base = texture2DProj( tDiffuse, uv );",
' gl_FragColor = vec4( blendOverlay( base.rgb, color ), 1.0 );', " gl_FragColor = vec4( blendOverlay( base.rgb, color ), 1.0 );",
'}' "}"
].join( '\n' ) ].join( "\n" )
}; };
export { WaterRefractionShader }; export { WaterRefractionShader };
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册