提交 402804ac 编写于 作者: G Garrett Johnson

jsm updaets

上级 900da918
......@@ -8,19 +8,19 @@ import {
var DepthLimitedBlurShader = {
defines: {
'KERNEL_RADIUS': 4,
'DEPTH_PACKING': 1,
'PERSPECTIVE_CAMERA': 1
"KERNEL_RADIUS": 4,
"DEPTH_PACKING": 1,
"PERSPECTIVE_CAMERA": 1
},
uniforms: {
'tDiffuse': { value: null },
'size': { value: new Vector2( 512, 512 ) },
'sampleUvOffsets': { value: [ new Vector2( 0, 0 ) ] },
'sampleWeights': { value: [ 1.0 ] },
'tDepth': { value: null },
'cameraNear': { value: 10 },
'cameraFar': { value: 1000 },
'depthCutoff': { value: 10 },
"tDiffuse": { value: null },
"size": { value: new Vector2( 512, 512 ) },
"sampleUvOffsets": { value: [ new Vector2( 0, 0 ) ] },
"sampleWeights": { value: [ 1.0 ] },
"tDepth": { value: null },
"cameraNear": { value: 10 },
"cameraFar": { value: 1000 },
"depthCutoff": { value: 10 },
},
vertexShader: [
"#include <common>",
......@@ -153,9 +153,9 @@ var BlurShaderUtils = {
configure: function ( material, kernelRadius, stdDev, uvIncrement ) {
material.defines[ 'KERNEL_RADIUS' ] = kernelRadius;
material.uniforms[ 'sampleUvOffsets' ].value = BlurShaderUtils.createSampleOffsets( kernelRadius, uvIncrement );
material.uniforms[ 'sampleWeights' ].value = BlurShaderUtils.createSampleWeights( kernelRadius, stdDev );
material.defines[ "KERNEL_RADIUS" ] = kernelRadius;
material.uniforms[ "sampleUvOffsets" ].value = BlurShaderUtils.createSampleOffsets( kernelRadius, uvIncrement );
material.uniforms[ "sampleWeights" ].value = BlurShaderUtils.createSampleWeights( kernelRadius, stdDev );
material.needsUpdate = true;
}
......
......@@ -39,7 +39,7 @@ var DigitalGlitch = {
].join( "\n" ),
fragmentShader: [
"uniform int byp;",//should we apply the glitch ?
"uniform int byp;", //should we apply the glitch ?
"uniform sampler2D tDiffuse;",
"uniform sampler2D tDisp;",
......
......@@ -1114,7 +1114,7 @@ var FXAAShader = {
" // TODO avoid querying texture twice for same texel",
" gl_FragColor.a = texture2D(tDiffuse, vUv).a;",
"}"
].join("\n")
].join( "\n" )
};
......
......@@ -33,8 +33,8 @@ var HalftoneShader = {
"void main() {",
"vUV = uv;",
"gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);",
" vUV = uv;",
" gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);",
"}"
......
......@@ -35,7 +35,7 @@ var LuminosityHighPassShader = {
"}"
].join("\n"),
].join( "\n" ),
fragmentShader: [
......@@ -63,7 +63,7 @@ var LuminosityHighPassShader = {
"}"
].join("\n")
].join( "\n" )
};
......
......@@ -6,11 +6,11 @@
var ParallaxShader = {
// Ordered from fastest to best quality.
modes: {
none: 'NO_PARALLAX',
basic: 'USE_BASIC_PARALLAX',
steep: 'USE_STEEP_PARALLAX',
occlusion: 'USE_OCLUSION_PARALLAX', // a.k.a. POM
relief: 'USE_RELIEF_PARALLAX'
none: "NO_PARALLAX",
basic: "USE_BASIC_PARALLAX",
steep: "USE_STEEP_PARALLAX",
occlusion: "USE_OCLUSION_PARALLAX", // a.k.a. POM
relief: "USE_RELIEF_PARALLAX"
},
uniforms: {
......
......@@ -9,32 +9,32 @@ import {
var SAOShader = {
defines: {
'NUM_SAMPLES': 7,
'NUM_RINGS': 4,
'NORMAL_TEXTURE': 0,
'DIFFUSE_TEXTURE': 0,
'DEPTH_PACKING': 1,
'PERSPECTIVE_CAMERA': 1
"NUM_SAMPLES": 7,
"NUM_RINGS": 4,
"NORMAL_TEXTURE": 0,
"DIFFUSE_TEXTURE": 0,
"DEPTH_PACKING": 1,
"PERSPECTIVE_CAMERA": 1
},
uniforms: {
'tDepth': { value: null },
'tDiffuse': { value: null },
'tNormal': { value: null },
'size': { value: new Vector2( 512, 512 ) },
"tDepth": { value: null },
"tDiffuse": { value: null },
"tNormal": { value: null },
"size": { value: new Vector2( 512, 512 ) },
'cameraNear': { value: 1 },
'cameraFar': { value: 100 },
'cameraProjectionMatrix': { value: new Matrix4() },
'cameraInverseProjectionMatrix': { value: new Matrix4() },
"cameraNear": { value: 1 },
"cameraFar": { value: 100 },
"cameraProjectionMatrix": { value: new Matrix4() },
"cameraInverseProjectionMatrix": { value: new Matrix4() },
'scale': { value: 1.0 },
'intensity': { value: 0.1 },
'bias': { value: 0.5 },
"scale": { value: 1.0 },
"intensity": { value: 0.1 },
"bias": { value: 0.5 },
'minResolution': { value: 0.0 },
'kernelRadius': { value: 100.0 },
'randomSeed': { value: 0.0 }
"minResolution": { value: 0.0 },
"kernelRadius": { value: 100.0 },
"randomSeed": { value: 0.0 }
},
vertexShader: [
"varying vec2 vUv;",
......
此差异已折叠。
......@@ -21,311 +21,311 @@ var VolumeRenderShader1 = {
"u_cmdata": { value: null }
},
vertexShader: [
'varying vec4 v_nearpos;',
'varying vec4 v_farpos;',
'varying vec3 v_position;',
" varying vec4 v_nearpos;",
" varying vec4 v_farpos;",
" varying vec3 v_position;",
'mat4 inversemat(mat4 m) {',
" mat4 inversemat(mat4 m) {",
// Taken from https://github.com/stackgl/glsl-inverse/blob/master/index.glsl
// This function is licenced by the MIT license to Mikola Lysenko
'float',
'a00 = m[0][0], a01 = m[0][1], a02 = m[0][2], a03 = m[0][3],',
'a10 = m[1][0], a11 = m[1][1], a12 = m[1][2], a13 = m[1][3],',
'a20 = m[2][0], a21 = m[2][1], a22 = m[2][2], a23 = m[2][3],',
'a30 = m[3][0], a31 = m[3][1], a32 = m[3][2], a33 = m[3][3],',
'b00 = a00 * a11 - a01 * a10,',
'b01 = a00 * a12 - a02 * a10,',
'b02 = a00 * a13 - a03 * a10,',
'b03 = a01 * a12 - a02 * a11,',
'b04 = a01 * a13 - a03 * a11,',
'b05 = a02 * a13 - a03 * a12,',
'b06 = a20 * a31 - a21 * a30,',
'b07 = a20 * a32 - a22 * a30,',
'b08 = a20 * a33 - a23 * a30,',
'b09 = a21 * a32 - a22 * a31,',
'b10 = a21 * a33 - a23 * a31,',
'b11 = a22 * a33 - a23 * a32,',
'det = b00 * b11 - b01 * b10 + b02 * b09 + b03 * b08 - b04 * b07 + b05 * b06;',
'return mat4(',
'a11 * b11 - a12 * b10 + a13 * b09,',
'a02 * b10 - a01 * b11 - a03 * b09,',
'a31 * b05 - a32 * b04 + a33 * b03,',
'a22 * b04 - a21 * b05 - a23 * b03,',
'a12 * b08 - a10 * b11 - a13 * b07,',
'a00 * b11 - a02 * b08 + a03 * b07,',
'a32 * b02 - a30 * b05 - a33 * b01,',
'a20 * b05 - a22 * b02 + a23 * b01,',
'a10 * b10 - a11 * b08 + a13 * b06,',
'a01 * b08 - a00 * b10 - a03 * b06,',
'a30 * b04 - a31 * b02 + a33 * b00,',
'a21 * b02 - a20 * b04 - a23 * b00,',
'a11 * b07 - a10 * b09 - a12 * b06,',
'a00 * b09 - a01 * b07 + a02 * b06,',
'a31 * b01 - a30 * b03 - a32 * b00,',
'a20 * b03 - a21 * b01 + a22 * b00) / det;',
'}',
'void main() {',
" float",
" a00 = m[0][0], a01 = m[0][1], a02 = m[0][2], a03 = m[0][3],",
" a10 = m[1][0], a11 = m[1][1], a12 = m[1][2], a13 = m[1][3],",
" a20 = m[2][0], a21 = m[2][1], a22 = m[2][2], a23 = m[2][3],",
" a30 = m[3][0], a31 = m[3][1], a32 = m[3][2], a33 = m[3][3],",
" b00 = a00 * a11 - a01 * a10,",
" b01 = a00 * a12 - a02 * a10,",
" b02 = a00 * a13 - a03 * a10,",
" b03 = a01 * a12 - a02 * a11,",
" b04 = a01 * a13 - a03 * a11,",
" b05 = a02 * a13 - a03 * a12,",
" b06 = a20 * a31 - a21 * a30,",
" b07 = a20 * a32 - a22 * a30,",
" b08 = a20 * a33 - a23 * a30,",
" b09 = a21 * a32 - a22 * a31,",
" b10 = a21 * a33 - a23 * a31,",
" b11 = a22 * a33 - a23 * a32,",
" det = b00 * b11 - b01 * b10 + b02 * b09 + b03 * b08 - b04 * b07 + b05 * b06;",
" return mat4(",
" a11 * b11 - a12 * b10 + a13 * b09,",
" a02 * b10 - a01 * b11 - a03 * b09,",
" a31 * b05 - a32 * b04 + a33 * b03,",
" a22 * b04 - a21 * b05 - a23 * b03,",
" a12 * b08 - a10 * b11 - a13 * b07,",
" a00 * b11 - a02 * b08 + a03 * b07,",
" a32 * b02 - a30 * b05 - a33 * b01,",
" a20 * b05 - a22 * b02 + a23 * b01,",
" a10 * b10 - a11 * b08 + a13 * b06,",
" a01 * b08 - a00 * b10 - a03 * b06,",
" a30 * b04 - a31 * b02 + a33 * b00,",
" a21 * b02 - a20 * b04 - a23 * b00,",
" a11 * b07 - a10 * b09 - a12 * b06,",
" a00 * b09 - a01 * b07 + a02 * b06,",
" a31 * b01 - a30 * b03 - a32 * b00,",
" a20 * b03 - a21 * b01 + a22 * b00) / det;",
" }",
" void main() {",
// Prepare transforms to map to "camera view". See also:
// https://threejs.org/docs/#api/renderers/webgl/WebGLProgram
'mat4 viewtransformf = viewMatrix;',
'mat4 viewtransformi = inversemat(viewMatrix);',
" mat4 viewtransformf = viewMatrix;",
" mat4 viewtransformi = inversemat(viewMatrix);",
// Project local vertex coordinate to camera position. Then do a step
// backward (in cam coords) to the near clipping plane, and project back. Do
// the same for the far clipping plane. This gives us all the information we
// need to calculate the ray and truncate it to the viewing cone.
'vec4 position4 = vec4(position, 1.0);',
'vec4 pos_in_cam = viewtransformf * position4;',
" vec4 position4 = vec4(position, 1.0);",
" vec4 pos_in_cam = viewtransformf * position4;",
// Intersection of ray and near clipping plane (z = -1 in clip coords)
'pos_in_cam.z = -pos_in_cam.w;',
'v_nearpos = viewtransformi * pos_in_cam;',
" pos_in_cam.z = -pos_in_cam.w;",
" v_nearpos = viewtransformi * pos_in_cam;",
// Intersection of ray and far clipping plane (z = +1 in clip coords)
'pos_in_cam.z = pos_in_cam.w;',
'v_farpos = viewtransformi * pos_in_cam;',
" pos_in_cam.z = pos_in_cam.w;",
" v_farpos = viewtransformi * pos_in_cam;",
// Set varyings and output pos
'v_position = position;',
'gl_Position = projectionMatrix * viewMatrix * modelMatrix * position4;',
'}',
].join( '\n' ),
" v_position = position;",
" gl_Position = projectionMatrix * viewMatrix * modelMatrix * position4;",
" }",
].join( "\n" ),
fragmentShader: [
'precision highp float;',
'precision mediump sampler3D;',
" precision highp float;",
" precision mediump sampler3D;",
'uniform vec3 u_size;',
'uniform int u_renderstyle;',
'uniform float u_renderthreshold;',
'uniform vec2 u_clim;',
" uniform vec3 u_size;",
" uniform int u_renderstyle;",
" uniform float u_renderthreshold;",
" uniform vec2 u_clim;",
'uniform sampler3D u_data;',
'uniform sampler2D u_cmdata;',
" uniform sampler3D u_data;",
" uniform sampler2D u_cmdata;",
'varying vec3 v_position;',
'varying vec4 v_nearpos;',
'varying vec4 v_farpos;',
" varying vec3 v_position;",
" varying vec4 v_nearpos;",
" varying vec4 v_farpos;",
// The maximum distance through our rendering volume is sqrt(3).
'const int MAX_STEPS = 887; // 887 for 512^3, 1774 for 1024^3',
'const int REFINEMENT_STEPS = 4;',
'const float relative_step_size = 1.0;',
'const vec4 ambient_color = vec4(0.2, 0.4, 0.2, 1.0);',
'const vec4 diffuse_color = vec4(0.8, 0.2, 0.2, 1.0);',
'const vec4 specular_color = vec4(1.0, 1.0, 1.0, 1.0);',
'const float shininess = 40.0;',
" const int MAX_STEPS = 887; // 887 for 512^3, 1774 for 1024^3",
" const int REFINEMENT_STEPS = 4;",
" const float relative_step_size = 1.0;",
" const vec4 ambient_color = vec4(0.2, 0.4, 0.2, 1.0);",
" const vec4 diffuse_color = vec4(0.8, 0.2, 0.2, 1.0);",
" const vec4 specular_color = vec4(1.0, 1.0, 1.0, 1.0);",
" const float shininess = 40.0;",
'void cast_mip(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray);',
'void cast_iso(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray);',
" void cast_mip(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray);",
" void cast_iso(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray);",
'float sample1(vec3 texcoords);',
'vec4 apply_colormap(float val);',
'vec4 add_lighting(float val, vec3 loc, vec3 step, vec3 view_ray);',
" float sample1(vec3 texcoords);",
" vec4 apply_colormap(float val);",
" vec4 add_lighting(float val, vec3 loc, vec3 step, vec3 view_ray);",
'void main() {',
" void main() {",
// Normalize clipping plane info
'vec3 farpos = v_farpos.xyz / v_farpos.w;',
'vec3 nearpos = v_nearpos.xyz / v_nearpos.w;',
" vec3 farpos = v_farpos.xyz / v_farpos.w;",
" vec3 nearpos = v_nearpos.xyz / v_nearpos.w;",
// Calculate unit vector pointing in the view direction through this fragment.
'vec3 view_ray = normalize(nearpos.xyz - farpos.xyz);',
" vec3 view_ray = normalize(nearpos.xyz - farpos.xyz);",
// Compute the (negative) distance to the front surface or near clipping plane.
// v_position is the back face of the cuboid, so the initial distance calculated in the dot
// product below is the distance from near clip plane to the back of the cuboid
'float distance = dot(nearpos - v_position, view_ray);',
'distance = max(distance, min((-0.5 - v_position.x) / view_ray.x,',
'(u_size.x - 0.5 - v_position.x) / view_ray.x));',
'distance = max(distance, min((-0.5 - v_position.y) / view_ray.y,',
'(u_size.y - 0.5 - v_position.y) / view_ray.y));',
'distance = max(distance, min((-0.5 - v_position.z) / view_ray.z,',
'(u_size.z - 0.5 - v_position.z) / view_ray.z));',
" float distance = dot(nearpos - v_position, view_ray);",
" distance = max(distance, min((-0.5 - v_position.x) / view_ray.x,",
" (u_size.x - 0.5 - v_position.x) / view_ray.x));",
" distance = max(distance, min((-0.5 - v_position.y) / view_ray.y,",
" (u_size.y - 0.5 - v_position.y) / view_ray.y));",
" distance = max(distance, min((-0.5 - v_position.z) / view_ray.z,",
" (u_size.z - 0.5 - v_position.z) / view_ray.z));",
// Now we have the starting position on the front surface
'vec3 front = v_position + view_ray * distance;',
" vec3 front = v_position + view_ray * distance;",
// Decide how many steps to take
'int nsteps = int(-distance / relative_step_size + 0.5);',
'if ( nsteps < 1 )',
'discard;',
" int nsteps = int(-distance / relative_step_size + 0.5);",
" if ( nsteps < 1 )",
" discard;",
// Get starting location and step vector in texture coordinates
'vec3 step = ((v_position - front) / u_size) / float(nsteps);',
'vec3 start_loc = front / u_size;',
" vec3 step = ((v_position - front) / u_size) / float(nsteps);",
" vec3 start_loc = front / u_size;",
// For testing: show the number of steps. This helps to establish
// whether the rays are correctly oriented
//'gl_FragColor = vec4(0.0, float(nsteps) / 1.0 / u_size.x, 1.0, 1.0);',
//'return;',
'if (u_renderstyle == 0)',
'cast_mip(start_loc, step, nsteps, view_ray);',
'else if (u_renderstyle == 1)',
'cast_iso(start_loc, step, nsteps, view_ray);',
" if (u_renderstyle == 0)",
" cast_mip(start_loc, step, nsteps, view_ray);",
" else if (u_renderstyle == 1)",
" cast_iso(start_loc, step, nsteps, view_ray);",
'if (gl_FragColor.a < 0.05)',
'discard;',
'}',
" if (gl_FragColor.a < 0.05)",
" discard;",
" }",
'float sample1(vec3 texcoords) {',
'/* Sample float value from a 3D texture. Assumes intensity data. */',
'return texture(u_data, texcoords.xyz).r;',
'}',
" float sample1(vec3 texcoords) {",
" /* Sample float value from a 3D texture. Assumes intensity data. */",
" return texture(u_data, texcoords.xyz).r;",
" }",
'vec4 apply_colormap(float val) {',
'val = (val - u_clim[0]) / (u_clim[1] - u_clim[0]);',
'return texture2D(u_cmdata, vec2(val, 0.5));',
'}',
" vec4 apply_colormap(float val) {",
" val = (val - u_clim[0]) / (u_clim[1] - u_clim[0]);",
" return texture2D(u_cmdata, vec2(val, 0.5));",
" }",
'void cast_mip(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray) {',
" void cast_mip(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray) {",
'float max_val = -1e6;',
'int max_i = 100;',
'vec3 loc = start_loc;',
" float max_val = -1e6;",
" int max_i = 100;",
" vec3 loc = start_loc;",
// Enter the raycasting loop. In WebGL 1 the loop index cannot be compared with
// non-constant expression. So we use a hard-coded max, and an additional condition
// inside the loop.
'for (int iter=0; iter<MAX_STEPS; iter++) {',
'if (iter >= nsteps)',
'break;',
" for (int iter=0; iter<MAX_STEPS; iter++) {",
" if (iter >= nsteps)",
" break;",
// Sample from the 3D texture
'float val = sample1(loc);',
" float val = sample1(loc);",
// Apply MIP operation
'if (val > max_val) {',
'max_val = val;',
'max_i = iter;',
'}',
" if (val > max_val) {",
" max_val = val;",
" max_i = iter;",
" }",
// Advance location deeper into the volume
'loc += step;',
'}',
" loc += step;",
" }",
// Refine location, gives crispier images
'vec3 iloc = start_loc + step * (float(max_i) - 0.5);',
'vec3 istep = step / float(REFINEMENT_STEPS);',
'for (int i=0; i<REFINEMENT_STEPS; i++) {',
'max_val = max(max_val, sample1(iloc));',
'iloc += istep;',
'}',
" vec3 iloc = start_loc + step * (float(max_i) - 0.5);",
" vec3 istep = step / float(REFINEMENT_STEPS);",
" for (int i=0; i<REFINEMENT_STEPS; i++) {",
" max_val = max(max_val, sample1(iloc));",
" iloc += istep;",
" }",
// Resolve final color
'gl_FragColor = apply_colormap(max_val);',
'}',
" gl_FragColor = apply_colormap(max_val);",
" }",
'void cast_iso(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray) {',
" void cast_iso(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray) {",
'gl_FragColor = vec4(0.0); // init transparent',
'vec4 color3 = vec4(0.0); // final color',
'vec3 dstep = 1.5 / u_size; // step to sample derivative',
'vec3 loc = start_loc;',
" gl_FragColor = vec4(0.0); // init transparent",
" vec4 color3 = vec4(0.0); // final color",
" vec3 dstep = 1.5 / u_size; // step to sample derivative",
" vec3 loc = start_loc;",
'float low_threshold = u_renderthreshold - 0.02 * (u_clim[1] - u_clim[0]);',
" float low_threshold = u_renderthreshold - 0.02 * (u_clim[1] - u_clim[0]);",
// Enter the raycasting loop. In WebGL 1 the loop index cannot be compared with
// non-constant expression. So we use a hard-coded max, and an additional condition
// inside the loop.
'for (int iter=0; iter<MAX_STEPS; iter++) {',
'if (iter >= nsteps)',
'break;',
" for (int iter=0; iter<MAX_STEPS; iter++) {",
" if (iter >= nsteps)",
" break;",
// Sample from the 3D texture
'float val = sample1(loc);',
" float val = sample1(loc);",
'if (val > low_threshold) {',
" if (val > low_threshold) {",
// Take the last interval in smaller steps
'vec3 iloc = loc - 0.5 * step;',
'vec3 istep = step / float(REFINEMENT_STEPS);',
'for (int i=0; i<REFINEMENT_STEPS; i++) {',
'val = sample1(iloc);',
'if (val > u_renderthreshold) {',
'gl_FragColor = add_lighting(val, iloc, dstep, view_ray);',
'return;',
'}',
'iloc += istep;',
'}',
'}',
" vec3 iloc = loc - 0.5 * step;",
" vec3 istep = step / float(REFINEMENT_STEPS);",
" for (int i=0; i<REFINEMENT_STEPS; i++) {",
" val = sample1(iloc);",
" if (val > u_renderthreshold) {",
" gl_FragColor = add_lighting(val, iloc, dstep, view_ray);",
" return;",
" }",
" iloc += istep;",
" }",
" }",
// Advance location deeper into the volume
'loc += step;',
'}',
'}',
" loc += step;",
" }",
" }",
'vec4 add_lighting(float val, vec3 loc, vec3 step, vec3 view_ray)',
'{',
" vec4 add_lighting(float val, vec3 loc, vec3 step, vec3 view_ray)",
" {",
// Calculate color by incorporating lighting
// View direction
'vec3 V = normalize(view_ray);',
" vec3 V = normalize(view_ray);",
// calculate normal vector from gradient
'vec3 N;',
'float val1, val2;',
'val1 = sample1(loc + vec3(-step[0], 0.0, 0.0));',
'val2 = sample1(loc + vec3(+step[0], 0.0, 0.0));',
'N[0] = val1 - val2;',
'val = max(max(val1, val2), val);',
'val1 = sample1(loc + vec3(0.0, -step[1], 0.0));',
'val2 = sample1(loc + vec3(0.0, +step[1], 0.0));',
'N[1] = val1 - val2;',
'val = max(max(val1, val2), val);',
'val1 = sample1(loc + vec3(0.0, 0.0, -step[2]));',
'val2 = sample1(loc + vec3(0.0, 0.0, +step[2]));',
'N[2] = val1 - val2;',
'val = max(max(val1, val2), val);',
'float gm = length(N); // gradient magnitude',
'N = normalize(N);',
" vec3 N;",
" float val1, val2;",
" val1 = sample1(loc + vec3(-step[0], 0.0, 0.0));",
" val2 = sample1(loc + vec3(+step[0], 0.0, 0.0));",
" N[0] = val1 - val2;",
" val = max(max(val1, val2), val);",
" val1 = sample1(loc + vec3(0.0, -step[1], 0.0));",
" val2 = sample1(loc + vec3(0.0, +step[1], 0.0));",
" N[1] = val1 - val2;",
" val = max(max(val1, val2), val);",
" val1 = sample1(loc + vec3(0.0, 0.0, -step[2]));",
" val2 = sample1(loc + vec3(0.0, 0.0, +step[2]));",
" N[2] = val1 - val2;",
" val = max(max(val1, val2), val);",
" float gm = length(N); // gradient magnitude",
" N = normalize(N);",
// Flip normal so it points towards viewer
'float Nselect = float(dot(N, V) > 0.0);',
'N = (2.0 * Nselect - 1.0) * N; // == Nselect * N - (1.0-Nselect)*N;',
" float Nselect = float(dot(N, V) > 0.0);",
" N = (2.0 * Nselect - 1.0) * N; // == Nselect * N - (1.0-Nselect)*N;",
// Init colors
'vec4 ambient_color = vec4(0.0, 0.0, 0.0, 0.0);',
'vec4 diffuse_color = vec4(0.0, 0.0, 0.0, 0.0);',
'vec4 specular_color = vec4(0.0, 0.0, 0.0, 0.0);',
" vec4 ambient_color = vec4(0.0, 0.0, 0.0, 0.0);",
" vec4 diffuse_color = vec4(0.0, 0.0, 0.0, 0.0);",
" vec4 specular_color = vec4(0.0, 0.0, 0.0, 0.0);",
// note: could allow multiple lights
'for (int i=0; i<1; i++)',
'{',
" for (int i=0; i<1; i++)",
" {",
// Get light direction (make sure to prevent zero devision)
'vec3 L = normalize(view_ray); //lightDirs[i];',
'float lightEnabled = float( length(L) > 0.0 );',
'L = normalize(L + (1.0 - lightEnabled));',
" vec3 L = normalize(view_ray); //lightDirs[i];",
" float lightEnabled = float( length(L) > 0.0 );",
" L = normalize(L + (1.0 - lightEnabled));",
// Calculate lighting properties
'float lambertTerm = clamp(dot(N, L), 0.0, 1.0);',
'vec3 H = normalize(L+V); // Halfway vector',
'float specularTerm = pow(max(dot(H, N), 0.0), shininess);',
" float lambertTerm = clamp(dot(N, L), 0.0, 1.0);",
" vec3 H = normalize(L+V); // Halfway vector",
" float specularTerm = pow(max(dot(H, N), 0.0), shininess);",
// Calculate mask
'float mask1 = lightEnabled;',
" float mask1 = lightEnabled;",
// Calculate colors
'ambient_color += mask1 * ambient_color; // * gl_LightSource[i].ambient;',
'diffuse_color += mask1 * lambertTerm;',
'specular_color += mask1 * specularTerm * specular_color;',
'}',
" ambient_color += mask1 * ambient_color; // * gl_LightSource[i].ambient;",
" diffuse_color += mask1 * lambertTerm;",
" specular_color += mask1 * specularTerm * specular_color;",
" }",
// Calculate final color by componing different components
'vec4 final_color;',
'vec4 color = apply_colormap(val);',
'final_color = color * (ambient_color + diffuse_color) + specular_color;',
'final_color.a = color.a;',
'return final_color;',
'}',
].join( '\n' )
" vec4 final_color;",
" vec4 color = apply_colormap(val);",
" final_color = color * (ambient_color + diffuse_color) + specular_color;",
" final_color.a = color.a;",
" return final_color;",
" }",
].join( "\n" )
};
export { VolumeRenderShader1 };
......@@ -9,23 +9,23 @@ var WaterRefractionShader = {
uniforms: {
'color': {
"color": {
value: null
},
'time': {
"time": {
value: 0
},
'tDiffuse': {
"tDiffuse": {
value: null
},
'tDudv': {
"tDudv": {
value: null
},
'textureMatrix': {
"textureMatrix": {
value: null
}
......@@ -33,68 +33,68 @@ var WaterRefractionShader = {
vertexShader: [
'uniform mat4 textureMatrix;',
"uniform mat4 textureMatrix;",
'varying vec2 vUv;',
'varying vec4 vUvRefraction;',
"varying vec2 vUv;",
"varying vec4 vUvRefraction;",
'void main() {',
"void main() {",
' vUv = uv;',
" vUv = uv;",
' vUvRefraction = textureMatrix * vec4( position, 1.0 );',
" vUvRefraction = textureMatrix * vec4( position, 1.0 );",
' gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );',
" gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );",
'}'
"}"
].join( '\n' ),
].join( "\n" ),
fragmentShader: [
'uniform vec3 color;',
'uniform float time;',
'uniform sampler2D tDiffuse;',
'uniform sampler2D tDudv;',
"uniform vec3 color;",
"uniform float time;",
"uniform sampler2D tDiffuse;",
"uniform sampler2D tDudv;",
'varying vec2 vUv;',
'varying vec4 vUvRefraction;',
"varying vec2 vUv;",
"varying vec4 vUvRefraction;",
'float blendOverlay( float base, float blend ) {',
"float blendOverlay( float base, float blend ) {",
' return( base < 0.5 ? ( 2.0 * base * blend ) : ( 1.0 - 2.0 * ( 1.0 - base ) * ( 1.0 - blend ) ) );',
" return( base < 0.5 ? ( 2.0 * base * blend ) : ( 1.0 - 2.0 * ( 1.0 - base ) * ( 1.0 - blend ) ) );",
'}',
"}",
'vec3 blendOverlay( vec3 base, vec3 blend ) {',
"vec3 blendOverlay( vec3 base, vec3 blend ) {",
' return vec3( blendOverlay( base.r, blend.r ), blendOverlay( base.g, blend.g ),blendOverlay( base.b, blend.b ) );',
" return vec3( blendOverlay( base.r, blend.r ), blendOverlay( base.g, blend.g ),blendOverlay( base.b, blend.b ) );",
'}',
"}",
'void main() {',
"void main() {",
' float waveStrength = 0.1;',
' float waveSpeed = 0.03;',
" float waveStrength = 0.1;",
" float waveSpeed = 0.03;",
// simple distortion (ripple) via dudv map (see https://www.youtube.com/watch?v=6B7IF6GOu7s)
' vec2 distortedUv = texture2D( tDudv, vec2( vUv.x + time * waveSpeed, vUv.y ) ).rg * waveStrength;',
' distortedUv = vUv.xy + vec2( distortedUv.x, distortedUv.y + time * waveSpeed );',
' vec2 distortion = ( texture2D( tDudv, distortedUv ).rg * 2.0 - 1.0 ) * waveStrength;',
" vec2 distortedUv = texture2D( tDudv, vec2( vUv.x + time * waveSpeed, vUv.y ) ).rg * waveStrength;",
" distortedUv = vUv.xy + vec2( distortedUv.x, distortedUv.y + time * waveSpeed );",
" vec2 distortion = ( texture2D( tDudv, distortedUv ).rg * 2.0 - 1.0 ) * waveStrength;",
// new uv coords
' vec4 uv = vec4( vUvRefraction );',
' uv.xy += distortion;',
" vec4 uv = vec4( vUvRefraction );",
" uv.xy += distortion;",
' vec4 base = texture2DProj( tDiffuse, uv );',
" vec4 base = texture2DProj( tDiffuse, uv );",
' gl_FragColor = vec4( blendOverlay( base.rgb, color ), 1.0 );',
" gl_FragColor = vec4( blendOverlay( base.rgb, color ), 1.0 );",
'}'
"}"
].join( '\n' )
].join( "\n" )
};
export { WaterRefractionShader };
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册