提交 b0cfbad8 编写于 作者: A alteredq

Updated CTM pipeline to use single concatenated file for multi-material models.

Added Python script for file concatenation with keeping track of file start offsets.

This single file way is a bit slower, but should be easier on server / workers (before every part spawned own worker, now all parts for a single model are handled by a single worker).
上级 eab35933
{
"geometries" :
[
"camaro_000.ctm",
"camaro_001.ctm",
"camaro_002.ctm",
"camaro_003.ctm",
"camaro_004.ctm",
"camaro_005.ctm",
"camaro_006.ctm",
"camaro_007.ctm"
],
"data" : "camaro.ctm",
"offsets": [ 0, 39262, 79223, 83542, 94677, 95890, 144902, 470461 ],
"materials" :
[
......
......@@ -30,8 +30,6 @@ THREE.CTMLoader.prototype.loadParts = function( url, callback, useWorker, useBuf
basePath = basePath ? basePath : this.extractUrlbase( url );
console.log( basePath );
xhr.onreadystatechange = function() {
if ( xhr.readyState == 4 ) {
......@@ -40,27 +38,23 @@ THREE.CTMLoader.prototype.loadParts = function( url, callback, useWorker, useBuf
var jsonObject = JSON.parse( xhr.responseText );
var geometries = [], materials = [];
var partCounter = 0;
function generateCallback( index ) {
var materials = [], geometries = [], counter = 0;
return function ( geometry ) {
function callbackFinal( geometry ) {
geometries[ index ] = geometry;
counter += 1;
partCounter += 1;
geometries.push( geometry );
if ( partCounter === jsonObject.geometries.length ) {
if ( counter === jsonObject.offsets.length ) {
callback( geometries, materials );
}
callback( geometries, materials );
}
}
// init materials
for ( var i = 0; i < jsonObject.materials.length; i ++ ) {
......@@ -69,14 +63,10 @@ THREE.CTMLoader.prototype.loadParts = function( url, callback, useWorker, useBuf
}
// load individual CTM files
for ( var i = 0; i < jsonObject.geometries.length; i ++ ) {
// load joined CTM file
var partUrl = basePath + jsonObject.geometries[ i ];
scope.load( partUrl, generateCallback( i ), useWorker, useBuffers );
}
var partUrl = basePath + jsonObject.data;
scope.load( partUrl, callbackFinal, useWorker, useBuffers, jsonObject.offsets );
}
......@@ -96,10 +86,12 @@ THREE.CTMLoader.prototype.loadParts = function( url, callback, useWorker, useBuf
// - url (required)
// - callback (required)
THREE.CTMLoader.prototype.load = function( url, callback, useWorker, useBuffers ) {
THREE.CTMLoader.prototype.load = function( url, callback, useWorker, useBuffers, offsets ) {
var scope = this;
offsets = offsets !== undefined ? offsets : [ 0 ];
var xhr = new XMLHttpRequest(),
callbackProgress = null;
......@@ -121,15 +113,21 @@ THREE.CTMLoader.prototype.load = function( url, callback, useWorker, useBuffers
worker.onmessage = function( event ) {
var ctmFile = event.data;
var files = event.data;
if ( useBuffers ) {
for ( var i = 0; i < files.length; i ++ ) {
scope.createModelBuffers( ctmFile, callback );
var ctmFile = files[ i ];
} else {
if ( useBuffers ) {
scope.createModelClassic( ctmFile, callback );
scope.createModelBuffers( ctmFile, callback );
} else {
scope.createModelClassic( ctmFile, callback );
}
}
......@@ -138,24 +136,32 @@ THREE.CTMLoader.prototype.load = function( url, callback, useWorker, useBuffers
};
worker.postMessage( binaryData );
worker.postMessage( { "data": binaryData, "offsets": offsets } );
} else {
var ctmFile = new CTM.File( new CTM.Stream( binaryData ) );
if ( useBuffers ) {
for ( var i = 0; i < offsets.length; i ++ ) {
var stream = new CTM.Stream( binaryData );
stream.offset = offsets[ i ];
var ctmFile = new CTM.File( stream );
if ( useBuffers ) {
scope.createModelBuffers( ctmFile, callback );
scope.createModelBuffers( ctmFile, callback );
} else {
} else {
scope.createModelClassic( ctmFile, callback );
scope.createModelClassic( ctmFile, callback );
}
}
var e = Date.now();
console.log( "CTM data parse time [inline]: " + (e-s) + " ms" );
//var e = Date.now();
//console.log( "CTM data parse time [inline]: " + (e-s) + " ms" );
}
......
......@@ -2,7 +2,18 @@ importScripts( "lzma.js", "ctm.js" );
self.onmessage = function( event ) {
self.postMessage( new CTM.File( new CTM.Stream( event.data ) ) );
self.close();
var files = [];
for ( var i = 0; i < event.data.offsets.length; i ++ ) {
var stream = new CTM.Stream( event.data.data );
stream.offset = event.data.offsets[ i ];
files[ i ] = new CTM.File( stream );
}
self.postMessage( files );
self.close();
}
"""Join multiple binary files into single file and generate JSON snippet with offsets
-------------------------------------
How to use
-------------------------------------
python join_ctm.py -i "part_*.ctm" -o joined.ctm [-j offsets.js]
Will read multiple files following wildcard pattern (ordered lexicographically):
part_000.ctm
part_001.ctm
part_002.ctm
...
part_XXX.ctm
And generate single concatenated files:
joined.ctm
offsets.js (optional, offsets are also dumped to standard output)
"""
import getopt
import glob
import sys
import os
# #####################################################
# Templates
# #####################################################
TEMPLATE_JSON = u"""\
"offsets": [ %(offsets)s ],
"""
# #############################################################################
# Helpers
# #############################################################################
def usage():
print 'Usage: %s -i "filename_*.ctm" -o filename.ctm [-j offsets.js]' % os.path.basename(sys.argv[0])
# #####################################################
# Main
# #####################################################
if __name__ == "__main__":
# get parameters from the command line
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:o:j:", ["help", "input=", "output=", "json="])
except getopt.GetoptError:
usage()
sys.exit(2)
inpattern = ""
outname = ""
jsonname = ""
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-i", "--input"):
inpattern = a
elif o in ("-o", "--output"):
outname = a
elif o in ("-j", "--json"):
jsonname = a
# quit if required parameters are missing
if inpattern == "" or outname == "":
usage()
sys.exit(2)
outfile = open(outname, "wb")
matches = glob.glob(inpattern)
matches.sort()
total = 0
offsets = []
for filename in matches:
filesize = os.path.getsize(filename)
offsets.append(total)
total += filesize
print filename, filesize
infile = open(filename, "rb")
buffer = infile.read()
outfile.write(buffer)
infile.close()
outfile.close()
json_str = TEMPLATE_JSON % {
"offsets" : ", ".join(["%d" % o for o in offsets])
}
print json_str
if jsonname:
jsonfile = open(jsonname, "w")
jsonfile.write(json_str)
jsonfile.close()
\ No newline at end of file
"""Split single OBJ model into mutliple OBJ files by materials
-------------------------
How to use this converter
-------------------------
-------------------------------------
How to use
-------------------------------------
python split_obj.py -i infile.obj -o outfile
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册