提交 eaa0c6d4 编写于 作者: A Almar Klein

Add volume rendering example

上级 7cc24e3b
// Author: Almar Klein
// Description: Shaders to render 3D volumes using raycasting.
// The applied techniques are based on similar implementations in the Visvis and Vispy projects.
// This is not the only approach, therefore it's marked 1.
THREE.ShaderLib[ 'volumerender1' ] = {
uniforms: {
"u_size": { value: [1, 1, 1] },
"u_renderstyle": { value: 0 },
"u_renderthreshold": { value: 0.5 },
"u_clim": { value: [0.0, 1.0] }
},
vertexShader: [
'varying vec4 v_nearpos;',
'varying vec4 v_farpos;',
'varying vec3 v_position;',
'mat4 inversemat(mat4 m) {',
// Taken from https://github.com/stackgl/glsl-inverse/blob/master/index.glsl
// This function is licenced by the MIT license to Mikola Lysenko
'float',
'a00 = m[0][0], a01 = m[0][1], a02 = m[0][2], a03 = m[0][3],',
'a10 = m[1][0], a11 = m[1][1], a12 = m[1][2], a13 = m[1][3],',
'a20 = m[2][0], a21 = m[2][1], a22 = m[2][2], a23 = m[2][3],',
'a30 = m[3][0], a31 = m[3][1], a32 = m[3][2], a33 = m[3][3],',
'b00 = a00 * a11 - a01 * a10,',
'b01 = a00 * a12 - a02 * a10,',
'b02 = a00 * a13 - a03 * a10,',
'b03 = a01 * a12 - a02 * a11,',
'b04 = a01 * a13 - a03 * a11,',
'b05 = a02 * a13 - a03 * a12,',
'b06 = a20 * a31 - a21 * a30,',
'b07 = a20 * a32 - a22 * a30,',
'b08 = a20 * a33 - a23 * a30,',
'b09 = a21 * a32 - a22 * a31,',
'b10 = a21 * a33 - a23 * a31,',
'b11 = a22 * a33 - a23 * a32,',
'det = b00 * b11 - b01 * b10 + b02 * b09 + b03 * b08 - b04 * b07 + b05 * b06;',
'return mat4(',
'a11 * b11 - a12 * b10 + a13 * b09,',
'a02 * b10 - a01 * b11 - a03 * b09,',
'a31 * b05 - a32 * b04 + a33 * b03,',
'a22 * b04 - a21 * b05 - a23 * b03,',
'a12 * b08 - a10 * b11 - a13 * b07,',
'a00 * b11 - a02 * b08 + a03 * b07,',
'a32 * b02 - a30 * b05 - a33 * b01,',
'a20 * b05 - a22 * b02 + a23 * b01,',
'a10 * b10 - a11 * b08 + a13 * b06,',
'a01 * b08 - a00 * b10 - a03 * b06,',
'a30 * b04 - a31 * b02 + a33 * b00,',
'a21 * b02 - a20 * b04 - a23 * b00,',
'a11 * b07 - a10 * b09 - a12 * b06,',
'a00 * b09 - a01 * b07 + a02 * b06,',
'a31 * b01 - a30 * b03 - a32 * b00,',
'a20 * b03 - a21 * b01 + a22 * b00) / det;',
'}',
'void main() {',
// Prepare transforms to map to "camera view". See also:
// https://threejs.org/docs/#api/renderers/webgl/WebGLProgram
'mat4 viewtransformf = viewMatrix;',
'mat4 viewtransformi = inversemat(viewMatrix);',
// Project local vertex coordinate to camera position. Then do a step
// backward (in cam coords) to the near clipping plane, and project back. Do
// the same for the far clipping plane. This gives us all the information we
// need to calculate the ray and truncate it to the viewing cone.
'vec4 position4 = vec4(position, 1.0);',
'vec4 pos_in_cam = viewtransformf * position4;',
// Intersection of ray and near clipping plane (z = -1 in clip coords)
'pos_in_cam.z = -pos_in_cam.w;',
'v_nearpos = viewtransformi * pos_in_cam;',
// Intersection of ray and far clipping plane (z = +1 in clip coords)
'pos_in_cam.z = pos_in_cam.w;',
'v_farpos = viewtransformi * pos_in_cam;',
// Set varyings and output pos
'v_position = position;',
'gl_Position = projectionMatrix * viewMatrix * modelMatrix * position4;',
'}',
].join( '\n' ),
fragmentShader: [
'precision highp float;',
'precision mediump sampler3D;',
'uniform vec3 u_size;',
'uniform int u_renderstyle;',
'uniform float u_renderthreshold;',
'uniform vec2 u_clim;',
'uniform sampler3D u_data;',
'uniform sampler2D u_cmdata;',
'varying vec3 v_position;',
'varying vec4 v_nearpos;',
'varying vec4 v_farpos;',
// The maximum distance through our rendering volume is sqrt(3).
'const int MAX_STEPS = 887; // 887 for 512^3, 1774 for 1024^3',
'const int REFINEMENT_STEPS = 4;',
'const float relative_step_size = 1.0;',
'const vec4 ambient_color = vec4(0.2, 0.4, 0.2, 1.0);',
'const vec4 diffuse_color = vec4(0.8, 0.2, 0.2, 1.0);',
'const vec4 specular_color = vec4(1.0, 1.0, 1.0, 1.0);',
'const float shininess = 40.0;',
'void cast_mip(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray);',
'void cast_iso(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray);',
'float sample1(vec3 texcoords);',
'vec4 apply_colormap(float val);',
'vec4 add_lighting(float val, vec3 loc, vec3 step, vec3 view_ray);',
'void main() {',
// Normalize clipping plane info
'vec3 farpos = v_farpos.xyz / v_farpos.w;',
'vec3 nearpos = v_nearpos.xyz / v_nearpos.w;',
// Calculate unit vector pointing in the view direction through this fragment.
'vec3 view_ray = normalize(nearpos.xyz - farpos.xyz);',
// Compute the (negative) distance to the front surface or near clipping plane.
// v_position is the back face of the cuboid, so the initial distance calculated in the dot
// product below is the distance from near clip plane to the back of the cuboid
'float distance = dot(nearpos - v_position, view_ray);',
'distance = max(distance, min((-0.5 - v_position.x) / view_ray.x,',
'(u_size.x - 0.5 - v_position.x) / view_ray.x));',
'distance = max(distance, min((-0.5 - v_position.y) / view_ray.y,',
'(u_size.y - 0.5 - v_position.y) / view_ray.y));',
'distance = max(distance, min((-0.5 - v_position.z) / view_ray.z,',
'(u_size.z - 0.5 - v_position.z) / view_ray.z));',
// Now we have the starting position on the front surface
'vec3 front = v_position + view_ray * distance;',
// Decide how many steps to take
'int nsteps = int(-distance / relative_step_size + 0.5);',
'if ( nsteps < 1 )',
'discard;',
// Get starting location and step vector in texture coordinates
'vec3 step = ((v_position - front) / u_size) / float(nsteps);',
'vec3 start_loc = front / u_size;',
// For testing: show the number of steps. This helps to establish
// whether the rays are correctly oriented
//'gl_FragColor = vec4(0.0, float(nsteps) / 1.0 / u_size.x, 1.0, 1.0);',
//'return;',
'if (u_renderstyle == 0)',
'cast_mip(start_loc, step, nsteps, view_ray);',
'else if (u_renderstyle == 1)',
'cast_iso(start_loc, step, nsteps, view_ray);',
'if (gl_FragColor.a < 0.05)',
'discard;',
'}',
'float sample1(vec3 texcoords) {',
'/* Sample float value from a 3D texture. Assumes intensity data. */',
'return texture(u_data, texcoords.xyz).r;',
'}',
'vec4 apply_colormap(float val) {',
'val = (val - u_clim[0]) / (u_clim[1] - u_clim[0]);',
'return texture2D(u_cmdata, vec2(val, 0.5));',
'}',
'void cast_mip(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray) {',
'float max_val = -1e6;',
'int max_i = 100;',
'vec3 loc = start_loc;',
// Enter the raycasting loop. In WebGL 1 the loop index cannot be compared with
// non-constant expression. So we use a hard-coded max, and an additional condition
// inside the loop.
'for (int iter=0; iter<MAX_STEPS; iter++) {',
'if (iter >= nsteps)',
'break;',
// Sample from the 3D texture
'float val = sample1(loc);',
// Apply MIP operation
'if (val > max_val) {',
'max_val = val;',
'max_i = iter;',
'}',
// Advance location deeper into the volume
'loc += step;',
'}',
// Refine location, gives crispier images
'vec3 iloc = start_loc + step * (float(max_i) - 0.5);',
'vec3 istep = step / float(REFINEMENT_STEPS);',
'for (int i=0; i<REFINEMENT_STEPS; i++) {',
'max_val = max(max_val, sample1(iloc));',
'iloc += istep;',
'}',
// Resolve final color
'gl_FragColor = apply_colormap(max_val);',
'}',
'void cast_iso(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray) {',
'gl_FragColor = vec4(0.0); // init transparent',
'vec4 color3 = vec4(0.0); // final color',
'vec3 dstep = 1.5 / u_size; // step to sample derivative',
'vec3 loc = start_loc;',
'float low_threshold = u_renderthreshold - 0.02 * (u_clim[1] - u_clim[0]);',
// Enter the raycasting loop. In WebGL 1 the loop index cannot be compared with
// non-constant expression. So we use a hard-coded max, and an additional condition
// inside the loop.
'for (int iter=0; iter<MAX_STEPS; iter++) {',
'if (iter >= nsteps)',
'break;',
// Sample from the 3D texture
'float val = sample1(loc);',
'if (val > low_threshold) {',
// Take the last interval in smaller steps
'vec3 iloc = loc - 0.5 * step;',
'vec3 istep = step / float(REFINEMENT_STEPS);',
'for (int i=0; i<REFINEMENT_STEPS; i++) {',
'val = sample1(iloc);',
'if (val > u_renderthreshold) {',
'gl_FragColor = add_lighting(val, iloc, dstep, view_ray);',
'return;',
'}',
'iloc += istep;',
'}',
'}',
// Advance location deeper into the volume
'loc += step;',
'}',
'}',
'vec4 add_lighting(float val, vec3 loc, vec3 step, vec3 view_ray)',
'{',
// Calculate color by incorporating lighting
// View direction
'vec3 V = normalize(view_ray);',
// calculate normal vector from gradient
'vec3 N;',
'float val1, val2;',
'val1 = sample1(loc + vec3(-step[0], 0.0, 0.0));',
'val2 = sample1(loc + vec3(+step[0], 0.0, 0.0));',
'N[0] = val1 - val2;',
'val = max(max(val1, val2), val);',
'val1 = sample1(loc + vec3(0.0, -step[1], 0.0));',
'val2 = sample1(loc + vec3(0.0, +step[1], 0.0));',
'N[1] = val1 - val2;',
'val = max(max(val1, val2), val);',
'val1 = sample1(loc + vec3(0.0, 0.0, -step[2]));',
'val2 = sample1(loc + vec3(0.0, 0.0, +step[2]));',
'N[2] = val1 - val2;',
'val = max(max(val1, val2), val);',
'float gm = length(N); // gradient magnitude',
'N = normalize(N);',
// Flip normal so it points towards viewer
'float Nselect = float(dot(N, V) > 0.0);',
'N = (2.0 * Nselect - 1.0) * N; // == Nselect * N - (1.0-Nselect)*N;',
// Init colors
'vec4 ambient_color = vec4(0.0, 0.0, 0.0, 0.0);',
'vec4 diffuse_color = vec4(0.0, 0.0, 0.0, 0.0);',
'vec4 specular_color = vec4(0.0, 0.0, 0.0, 0.0);',
// note: could allow multiple lights
'for (int i=0; i<1; i++)',
'{',
// Get light direction (make sure to prevent zero devision)
'vec3 L = normalize(view_ray); //lightDirs[i];',
'float lightEnabled = float( length(L) > 0.0 );',
'L = normalize(L + (1.0 - lightEnabled));',
// Calculate lighting properties
'float lambertTerm = clamp(dot(N, L), 0.0, 1.0);',
'vec3 H = normalize(L+V); // Halfway vector',
'float specularTerm = pow(max(dot(H, N), 0.0), shininess);',
// Calculate mask
'float mask1 = lightEnabled;',
// Calculate colors
'ambient_color += mask1 * ambient_color; // * gl_LightSource[i].ambient;',
'diffuse_color += mask1 * lambertTerm;',
'specular_color += mask1 * specularTerm * specular_color;',
'}',
// Calculate final color by componing different components
'vec4 final_color;',
'vec4 color = apply_colormap(val);',
'final_color = color * (ambient_color + diffuse_color) + specular_color;',
'final_color.a = color.a;',
'return final_color;',
'}',
].join( '\n' )
};
<!DOCTYPE html>
<html lang="en">
<head>
<title>three.js webgl - loaders - vtk loader</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0">
<style>
body {
font-family: Monospace;
background-color: #000;
color: #fff;
margin: 0px;
overflow: hidden;
}
#info {
color: #fff;
position: absolute;
top: 10px;
width: 100%;
text-align: center;
z-index: 5;
display:block;
}
.dg {
z-index: 10 !important;
}
#info a, .button { color: #f00; font-weight: bold; text-decoration: underline; cursor: pointer }
#inset {
width: 150px;
height: 150px;
background-color: transparent; /* or transparent; will show through only if renderer alpha: true */
border: none; /* or none; */
margin: 0;
padding: 0px;
position: absolute;
left: 20px;
bottom: 20px;
z-index: 100;
}
</style>
</head>
<body>
<div id="info">
<a href="http://threejs.org" target="_blank" rel="noopener">three.js</a> -
Float volume render test (mip / isosurface)
</div>
<div id="inset"></div>
<script src="../build/three.js"></script>
<script src="js/controls/TrackballControls.js"></script>
<script src="js/Volume.js"></script>
<script src="js/VolumeSlice.js"></script>
<script src="js/loaders/NRRDLoader.js"></script>
<script src="js/shaders/VolumeShader.js"></script>
<script src="js/Detector.js"></script>
<script src="js/libs/stats.min.js"></script>
<script src="js/libs/gunzip.min.js"></script>
<script src="js/libs/dat.gui.min.js"></script>
<script>
if ( ! Detector.webgl ) Detector.addGetWebGLMessage();
var container,
stats,
camera,
controls,
scene,
renderer,
gui,
container2,
renderer2,
camera2,
axes2,
scene2;
init();
animate();
function init() {
// The volume renderer does not work very well with perspective yet
//camera = new THREE.PerspectiveCamera( 60, window.innerWidth / window.innerHeight, 0.01, 1e10 );
camera = new THREE.OrthographicCamera()
camera.position.z = 1000;
this.camera.right = window.innerWidth / 5.0;
this.camera.left = -this.camera.right;
this.camera.top = window.innerHeight / 5.0;
this.camera.bottom = -this.camera.top;
this.camera.near = 0.1;
this.camera.far = 5000;
this.camera.updateProjectionMatrix();
scene = new THREE.Scene();
scene.add( camera );
// light
var dirLight = new THREE.DirectionalLight( 0xffffff );
dirLight.position.set( 200, 200, 1000 ).normalize();
camera.add( dirLight );
camera.add( dirLight.target );
var loader = new THREE.NRRDLoader();
loader.load( "models/nrrd/stent.nrrd", function ( volume ) {
window.volume = volume;
if (false) {
// Box helper to see the extend of the volume
var geometry = new THREE.BoxBufferGeometry( volume.xLength, volume.yLength, volume.zLength );
geometry = geometry.translate(+volume.xLength / 2, -volume.yLength / 2, -volume.zLength / 2);
var material = new THREE.MeshBasicMaterial( { color: 0x00ff00 } );
var cube = new THREE.Mesh( geometry, material );
cube.visible = false;
var box = new THREE.BoxHelper( cube );
scene.add( box );
box.applyMatrix(volume.matrix);
scene.add( cube );
}
// Texture to hold the volume. We have scalars, so we put our data in the red channel.
// THREEJS will select R32F (33326) based on the RedFormat and FloatType.
// Also see https://www.khronos.org/registry/webgl/specs/latest/2.0/#TEXTURE_TYPES_FORMATS_FROM_DOM_ELEMENTS_TABLE
// TODO: look the dtype up in the volume metadata
var texture = new THREE.Texture3D(volume.data, volume.xLength, volume.yLength, volume.zLength, THREE.RedFormat, THREE.FloatType);
texture.minFilter = texture.magFilter = THREE.LinearFilter;
texture.unpackAlignment = 1;
texture.needsUpdate = true;
// Colormap texture
var cmtexture = new THREE.TextureLoader().load( 'textures/cm_viridis.png' ); // e.g. cm_gray or cm_viridis
// Material (shaders) to render the volume using raycasting
var volmaterial = new THREE.ShaderMaterial( THREE.ShaderLib['volumerender1'] );
volmaterial.side = THREE.BackSide; // The volume shader uses the backface as its "reference point"
// Apply volume material uniform info
volmaterial.uniforms.u_data = { type: 't', value: texture };
volmaterial.uniforms.u_cmdata = { type: 't', value: cmtexture };
volmaterial.uniforms.u_size = { type: 'v3', value: [volume.xLength, volume.yLength, volume.zLength] };
volmaterial.uniforms.u_renderstyle = {type: 'int', value: 0}; // 0: MIP, 1: ISO
volmaterial.uniforms.u_renderthreshold = {type: 'f', value: 0.3}; // For ISO renderstyle
volmaterial.uniforms.u_clim = {type: 'v2', value: [0, 1]};
volmaterial.needsUpdate = true;
// Geometry for the volume
var volgeometry = new THREE.BoxGeometry( volume.xLength, volume.yLength, volume.zLength );
volgeometry = volgeometry.translate(volume.xLength / 2 - 0.5, volume.yLength / 2 - 0.5, volume.zLength / 2 - 0.5);
var volcube = new THREE.Mesh(volgeometry, volmaterial);
scene.add( volcube );
// TODO: the texture coordinates currently map directly to world coordinates. That's why we translate
// the geometry above. But the extractSlice() below assume that the volume is centered at the origin.
// We need to add a material attribute with texture coordinates to fix this.
return;
//z plane
var indexZ = 0;
var sliceZ = volume.extractSlice('z',Math.floor(volume.RASDimensions[2]/4));
scene.add( sliceZ.mesh );
//y plane
var indexY = 0;
var sliceY = volume.extractSlice('y',Math.floor(volume.RASDimensions[1]/2));
scene.add( sliceY.mesh );
//x plane
var indexX = 0;
var sliceX = volume.extractSlice('x',Math.floor(volume.RASDimensions[0]/2));
scene.add( sliceX.mesh );
gui.add( sliceX, "index", 0, volume.RASDimensions[0], 1 ).name( "indexX" ).onChange( function () {sliceX.repaint.call(sliceX);} );
gui.add( sliceY, "index", 0, volume.RASDimensions[1], 1 ).name( "indexY" ).onChange( function () {sliceY.repaint.call(sliceY);} );
gui.add( sliceZ, "index", 0, volume.RASDimensions[2], 1 ).name( "indexZ" ).onChange( function () {sliceZ.repaint.call(sliceZ);} );
gui.add( volume, "lowerThreshold", volume.min, volume.max, 1).name( "Lower Threshold").onChange( function () {
volume.repaintAllSlices();
});
gui.add( volume, "upperThreshold", volume.min, volume.max, 1).name( "Upper Threshold").onChange( function () {
volume.repaintAllSlices();
});
gui.add( volume, "windowLow", volume.min, volume.max, 1).name( "Window Low").onChange( function () {
volume.repaintAllSlices();
});
gui.add( volume, "windowHigh", volume.min, volume.max, 1).name( "Window High").onChange( function () {
volume.repaintAllSlices();
});
} );
// renderer
var canvas = document.createElement( 'canvas' );
var context = canvas.getContext( 'webgl2' );
renderer = new THREE.WebGLRenderer( { canvas: canvas, context: context } );
renderer.setPixelRatio( window.devicePixelRatio );
renderer.setSize( window.innerWidth, window.innerHeight );
container = document.createElement( 'div' );
document.body.appendChild( container );
container.appendChild( renderer.domElement );
controls = new THREE.TrackballControls( camera, renderer.domElement );
controls.rotateSpeed = 5.0;
controls.zoomSpeed = 5;
controls.panSpeed = 2;
controls.noZoom = false;
controls.noPan = false;
controls.staticMoving = true;
controls.dynamicDampingFactor = 0.3;
stats = new Stats();
container.appendChild( stats.dom );
var gui = new dat.GUI();
setupInset();
window.addEventListener( 'resize', onWindowResize, false );
}
function onWindowResize() {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize( window.innerWidth, window.innerHeight );
controls.handleResize();
}
function animate() {
requestAnimationFrame( animate );
controls.update();
//copy position of the camera into inset
camera2.position.copy( camera.position );
camera2.position.sub( controls.target );
camera2.position.setLength( 300 );
camera2.lookAt( scene2.position );
renderer.render( scene, camera );
renderer2.render( scene2, camera2);
stats.update();
}
function rotateAroundWorldAxis(object, axis, radians) {
var rotWorldMatrix = new THREE.Matrix4();
rotWorldMatrix.makeRotationAxis(axis.normalize(), radians);
object.applyMatrix(rotWorldMatrix);
}
function setupInset () {
var insetWidth = 150,
insetHeight = 150;
container2 = document.getElementById('inset');
container2.width = insetWidth;
container2.height = insetHeight;
// renderer
renderer2 = new THREE.WebGLRenderer({alpha : true});
renderer2.setClearColor( 0x000000, 0 );
renderer2.setSize( insetWidth, insetHeight );
container2.appendChild( renderer2.domElement );
// scene
scene2 = new THREE.Scene();
// camera
camera2 = new THREE.PerspectiveCamera( 50, insetWidth / insetHeight, 1, 1000 );
camera2.up = camera.up; // important!
// axes
axes2 = new THREE.AxesHelper( 100 );
scene2.add( axes2 );
}
</script>
</body>
</html>
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册