提交 f30f0853 编写于 作者: J Jerome Etienne

more on benching wasm+worker

上级 e0310040
# WebAssembly
- thresholding is working in webassembly
- now lets make it fast
- O(n) incremental blur In webassembly
- Incremental blur: 3 stages. First, middle, end. Do slow loop for those. Only aim for O(n).
- do a linear time with the incremental technique
- do a pure average first
- you know how to code it
- later: do a gaussian approximentation - boxstackblur stuff which is the trick
- so you get a good idea of the speed
- so you get webassembly version from optimised c - this is the fastest it can be on the web at the moment
- good to bench webassembly
- test multiple browser - multiple resolutions
- see how hard it would be to incoporate it in threex-aruco.js
- source ~/webwork/emsdk/emsdk_env.sh
Found issues
......@@ -24,3 +7,12 @@ Found issues
- jsaruco - adaptiveThreshold is doing it on ALL bytes - so all channel ???
- it use blackwhite image - it only needs 1 channel - 8 bits is already a lot to store blackwhite
- this mean 4 times more work than needed
- NOTES: unclear this is true - grayscale is packing it all in 1 channel. check it out ?
# API issue
- missing videoElement.getImageData(). It is only available in canvas context
- cause a canvasContext.drawImage(videoElement) + canvasContext.getImageData()
- missing getImageData() .data directly as SharedArrayBuffer
- new API ? imageData = contextCanvas.getImageData(x, y, width, height, dstImageData)
- inability to pass a buffer i want to WebAssembly without copying
- Think about steps to experiment with webworkers. How to split the tasks between workers
- Workload sharing: split image in 4 - simpler to code
- artefacts on brothers s splits, may be fixed later if needed
- So we split the load in 4 and run that on all workers
- https://github.com/tc39/ecmascript_sharedmem/blob/master/TUTORIAL.md
- https://hacks.mozilla.org/2016/05/a-taste-of-javascripts-new-parallel-primitives/
- https://gist.github.com/dherman/5463054
- how thread notify each other ?
- currently this is message ?
- could it be better with atomics ?
- Current communication
- main-thread - asking workers to process part of the image
- worker - telling main-thread the process is completed
- issue with the setup
- i copy the whole thing SO SO much
self.importScripts('../../js-aruco/vendor/js-aruco/src/cv.js')
var greyCVImage = new CV.Image()
var thresCVImage = new CV.Image()
function convert2Grayscale(imageBuffer, imageW, imageH){
var imageArray = new Uint8Array(imageBuffer)
for(var i = 0; i < imageArray.byteLength; i+=4){
imageArray[i] = (imageArray[i] * 0.299 + imageArray[i + 1] * 0.587 + imageArray[i + 2] * 0.114 + 0.5) & 0xff;
imageArray[i+1] = imageArray[i+2] = imageArray[i];
imageArray[i+3] = 255;
}
}
function convert2GrayscaleZone(imageBuffer, imageW, imageH, originX, originY, width, height){
var imageArray = new Uint8Array(imageBuffer)
// debugger
for(var y = originY; y < originY+height; y++){
for(var x = originX; x < originX+width; x++){
var i = (y * imageW + x)*4
imageArray[i] = (imageArray[i] * 0.299 + imageArray[i + 1] * 0.587 + imageArray[i + 2] * 0.114 + 0.5) & 0xff;
// imageArray[i] = (255-imageArray[i]);
imageArray[i+1] = imageArray[i+2] = imageArray[i];
imageArray[i+3] = 255;
}
}
}
self.addEventListener('message', function(event){
// console.log('inworker: processing started!')
var data = event.data;
convert2GrayscaleZone(data.imageBuffer, data.imageW, data.imageH, data.originX, data.originY, data.width, data.height)
// console.log('inworker: processing done!')
self.postMessage('completed')
})
<body><script>
// create srcCanvas and srcImageData
var srcCanvas = document.createElement('canvas')
srcCanvas.width = 640
srcCanvas.height = 480
var srcContext = srcCanvas.getContext('2d')
document.body.appendChild(srcCanvas)
var srcImageData = srcContext.getImageData(0, 0, srcCanvas.width, srcCanvas.height)
if( typeof(SharedArrayBuffer) === 'undefined' ){
alert('SharedArrayBuffer - try with firefox or canary')
}
// create imageBuffer and imageArray
var imageBuffer = new SharedArrayBuffer(srcImageData.data.byteLength);
var imageArray = new Uint8Array(imageBuffer);
imageArray.set(srcImageData.data)
var videoElement = document.createElement('video')
// document.body.appendChild(videoElement)
videoElement.autoplay = true
// https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia
navigator.mediaDevices.getUserMedia({
video : {
width: srcCanvas.width,
height:srcCanvas.height
}
}).then(function(stream){
videoElement.src = window.URL.createObjectURL(stream);
}).catch(function(error){
console.assert(false, 'getUserMedia failed due to')
console.dir(error)
})
// create the worker
var workerURL='multicore-purejs-worker.js'
var workers = []
workers.push( new Worker(workerURL) )
workers.push( new Worker(workerURL) )
workers.push( new Worker(workerURL) )
workers.push( new Worker(workerURL) )
var worker = workers[0]
var tmpCanvas = document.createElement('canvas')
tmpCanvas.width = srcCanvas.width
tmpCanvas.height = srcCanvas.height
var tmpContext = tmpCanvas.getContext('2d')
requestAnimationFrame(function callback(){
processVideo(videoElement, function(){
requestAnimationFrame(callback)
})
})
// setTimeout(function(){
// console.log('starting to bench')
// var startedAt = Date.now()
// var nProcessVideo = 0
// var nFrame2Process = 400
//
// ;(function callback(){
// processVideo(videoElement, function(){
// nProcessVideo ++
// if( nProcessVideo > nFrame2Process ){
// var iterationDelay = (Date.now() - startedAt)/1000 /nFrame2Process
// console.log(Math.round(1/iterationDelay), 'fps' )
// return
// }
//
// // requestAnimationFrame(callback)
// callback()
// })
// })()
// }, 1000)
function processVideo(videoElement, onComplete){
// console.log('processVideo')
// update imageBuffer with videoElement
tmpContext.drawImage(videoElement, 0, 0)
var imageData = tmpContext.getImageData(0, 0, srcCanvas.width, srcCanvas.height)
imageArray.set(imageData.data)
var callsFourThreads = [
{
originX: 0,
originY: 0,
width: srcCanvas.width/2,
height: srcCanvas.height/2,
},
{
originX: srcCanvas.width/2,
originY: 0,
width: srcCanvas.width/2,
height: srcCanvas.height/2,
},
{
originX: 0,
originY: srcCanvas.height/2,
width: srcCanvas.width/2,
height: srcCanvas.height/2,
},
{
originX: srcCanvas.width/2,
originY: srcCanvas.height/2,
width: srcCanvas.width/2,
height: srcCanvas.height/2,
},
]
var callsOneThread = [
{
originX: 0,
originY: 0,
width: srcCanvas.width,
height: srcCanvas.height,
}
]
var nProcessed = 0
console.assert(callsOneThread.length <= workers.length)
console.assert(callsFourThreads.length <= workers.length)
callsFourThreads.forEach(function(call, workerIndex){
// debugger
var worker = workers[workerIndex]
// send it the imageBuffer to process
worker.postMessage({
imageBuffer: imageBuffer,
imageW : srcCanvas.width,
imageH : srcCanvas.height,
originX: call.originX,
originY: call.originY,
width: call.width,
height: call.height,
})
// wait for the answer
worker.addEventListener('message', function onMessage(message){
worker.removeEventListener('message', onMessage)
nProcessed ++;
// console.log('main-thread: nProcessed', nProcessed, calls.length)
if( nProcessed === callsFourThreads.length ){
// copy the processed image back to the srcCanvas
srcImageData.data.set(imageArray)
srcContext.putImageData(srcImageData, 0,0)
onComplete && onComplete()
}
})
})
}
</script></body>
<body><script>
var blob = new Blob([`
self.addEventListener('message', function(imageBuffer){
var imageArray = new Uint8Array(imageBuffer.data.imageBuffer);
var oldValue = imageArray[0];
console.log('inworker: waiting for value to differ from ' + oldValue)
while(imageArray[0] === oldValue) {}
console.log('inworker: value updated!')
})
`], {type: 'text/javascript'});
var workerURL = URL.createObjectURL(blob)
var imageBuffer = new SharedArrayBuffer(1024);
var imageArray = new Uint8Array(imageBuffer);
imageArray[0] = 123
var worker = new Worker(workerURL);
worker.postMessage({
imageBuffer: imageBuffer
})
setTimeout(function(){
console.log('main-thread: updating value')
imageArray[0]++
}, 500)
</script></body>
<body><script>
var blob = new Blob([`
self.addEventListener('message', function(imageBuffer){
var imageArray = new Uint8Array(imageBuffer.data.imageBuffer);
var oldValue = imageArray[0];
console.log('inworker: waiting for value to differ from ' + oldValue)
// while(imageArray[0] === oldValue) {}
for(var i = 0; i < imageArray.byteLength; i+=4){
imageArray[i] = 255 - imageArray[i];
imageArray[i+3] = 255;
}
console.log('inworker: value updated!')
self.postMessage('completed')
})
`], {type: 'text/javascript'});
var workerURL = URL.createObjectURL(blob)
// create srcCanvas
var srcCanvas = document.createElement('canvas')
srcCanvas.width = 320
srcCanvas.height = 240
var srcContext = srcCanvas.getContext('2d')
srcContext.fillStyle = 'rgba(255,255,0,255)';
// srcContext.fillRect(0,0,srcCanvas.width, srcCanvas.height)
srcContext.fillRect(10,10,50,50)
document.body.appendChild(srcCanvas)
var srcImageData = srcContext.getImageData(0, 0, srcCanvas.width, srcCanvas.height)
var imageBuffer = new SharedArrayBuffer(srcImageData.data.byteLength);
var imageArray = new Uint8Array(imageBuffer);
imageArray.set(srcImageData.data)
imageArray[0] = 123
var worker = new Worker(workerURL);
worker.postMessage({
imageBuffer: imageBuffer
})
worker.addEventListener('message', function(message){
console.log('main-thread: first value', imageArray[0])
// debugger
// var tmpImageData = srcContext.createImageData(srcCanvas.width, srcCanvas.height)
// tmpImageData.data.set(imageArray)
// srcContext.putImageData(tmpImageData, 0, 0)
srcImageData.data.set(imageArray)
srcContext.putImageData(srcImageData, 0, 0)
})
</script></body>
self.addEventListener('message', function(event) {
console.log('inWorker: received event', event)
var sharedArrayBuffer = event.data
var imageArray = new Uint8Array(sharedArrayBuffer.data)
console.log('starting working', sharedArrayBuffer.byteLength)
// debugger
// for(var i = 0; i < imageArray.byteLength; i+=4){
// imageArray[i] = 255 - imageArray[i]
// console.log('value', imageArray[i])
// imageArray[i+1] = 255 - imageArray[i+1]
// imageArray[i+2] = 255 - imageArray[i+2]
// imageArray[i+3] = 255
// }
console.log('stop working', imageArray)
setTimeout(function(){
self.postMessage('completed')
}, 1000)
}, false);
self.postMessage('started')
......@@ -9,27 +9,38 @@
document.body.appendChild(srcCanvas)
// create srcImageData
var srcImageData = srcContext.getImageData(0, 0, srcCanvas.width, srcCanvas.height)
var sharedArrayBuffer = new SharedArrayBuffer(32768);
var workerURL = 'worker-pool-worker.js'
// srcContext.fillStyle = 'black';
// srcContext.fillRect(0, 0, srcCanvas.width, srcCanvas.height)
//
// for(var i = 0; i < srcImageData.data.byteLength; i+=4){
// srcImageData.data[i] = 255 - srcImageData.data[i]
// srcImageData.data[i+1] = 255 - srcImageData.data[i+1]
// srcImageData.data[i+2] = 255 - srcImageData.data[i+2]
// srcImageData.data[i+3] = 127
// }
// srcContext.putImageData(srcImageData, 0, 0)
var workerURL = 'old-worker-pool-worker.js'
// create all workers
var workers = []
workers.push( new Worker(workerURL) )
workers.push( new Worker(workerURL) )
workers.push( new Worker(workerURL) )
workers.push( new Worker(workerURL) )
var nWorkers = workers.length
// workers.push( new Worker(workerURL) )
// workers.push( new Worker(workerURL) )
// workers.push( new Worker(workerURL) )
var worker = workers[1]
worker.postMessage('init')
worker.onmessage = function(event) {
// console.log('in mainthread - received event', event)
if( event.data === 'started' ){
console.log('in mainthread - worker started')
workers.forEach(function(worker){
worker.onmessage = function(event) {
// console.log('in mainthread - received event', event)
if( event.data === 'started' ){
console.log('in mainthread - worker started')
var sharedArrayBuffer = new SharedArrayBuffer(srcImageData.byteLength);
worker.postMessage(sharedArrayBuffer)
}else if( event.data === 'completed' ){
console.log('in mainthread - worker completed')
srcContext.putImageData(srcImageData, 0, 0)
}
}
}
})
</script></body>
self.addEventListener('message', function(imageBuffer){
var imageArray = new Uint8Array(imageBuffer.data.imageBuffer);
var oldValue = imageArray[0];
console.log('inworker: waiting for value to differ from ' + oldValue)
// while(imageArray[0] === oldValue) {}
for(var i = 0; i < imageArray.byteLength; i+=4){
imageArray[i] = 255 - imageArray[i];
imageArray[i+3] = 255;
}
console.log('inworker: value updated!')
self.postMessage('completed')
})
<body><script>
// create srcCanvas and srcImageData
var srcCanvas = document.createElement('canvas')
srcCanvas.width = 320
srcCanvas.height = 240
var srcContext = srcCanvas.getContext('2d')
srcContext.fillStyle = 'rgba(255,255,0,255)';
srcContext.fillRect(10,10,50,50)
document.body.appendChild(srcCanvas)
var srcImageData = srcContext.getImageData(0, 0, srcCanvas.width, srcCanvas.height)
if( typeof(SharedArrayBuffer) === 'undefined' ){
alert('SharedArrayBuffer - try with firefox or canary')
}
// create imageBuffer and imageArray
var imageBuffer = new SharedArrayBuffer(srcImageData.data.byteLength);
var imageArray = new Uint8Array(imageBuffer);
imageArray.set(srcImageData.data)
// create the worker
var workerURL='worker-pool-worker.js'
var workers = []
workers.push( new Worker(workerURL) )
workers.push( new Worker(workerURL) )
// workers.push( new Worker(workerURL) )
// workers.push( new Worker(workerURL) )
workers.forEach(function(worker){
// send it the imageBuffer to process
worker.postMessage({
imageBuffer: imageBuffer
})
// wait for the answer
worker.addEventListener('message', function(message){
console.log('main-thread: imageBuffer processed')
// copy the processed image back to the srcCanvas
srcImageData.data.set(imageArray)
srcContext.putImageData(srcImageData, 0, 0)
})
})
</script></body>
self.addEventListener('message', function(event) {
// console.log('in worker - received event', event)
if( event.data === 'init' ){
console.log('in worker - initialization', event)
// ... init stuff here
//
self.postMessage('started')
}
}, false);
......@@ -33,7 +33,6 @@
var greyCVImage = new CV.Image();
var thresCVImage = new CV.Image();
requestAnimationFrame(function callback(){
context.drawImage(videoElement, 0, 0)
var imageData = context.getImageData(0, 0, canvas.width, canvas.height)
......
# Steps
- thresholding is working in webassembly
- now lets make it fast
- O(n) incremental blur In webassembly
- Incremental blur: 3 stages. First, middle, end. Do slow loop for those. Only aim for O(n).
- do a linear time with the incremental technique
- do a pure average first
- you know how to code it
- later: do a gaussian approximentation - boxstackblur stuff which is the trick
- so you get a good idea of the speed
- so you get webassembly version from optimised c - this is the fastest it can be on the web at the moment
- good to bench webassembly
- test multiple browser - multiple resolutions
- see how hard it would be to incoporate it in threex-aruco.js
- source ~/webwork/emsdk/emsdk_env.sh
#include <emscripten/bind.h>
#include <emscripten.h>
#include <stdint.h>
#include <cassert>
extern "C" {
#define FILL_ALL_CHANNELS 0
EMSCRIPTEN_KEEPALIVE
void convertToGray(uint8_t *buffer, int imageW, int imageH){
......@@ -10,8 +12,10 @@ void convertToGray(uint8_t *buffer, int imageW, int imageH){
for(int i = 0; i < length; i+=4){
buffer[i] = (uint8_t)(buffer[i] * 0.299 + buffer[i + 1] * 0.587 + buffer[i + 2] * 0.114 + 0.5);
// buffer[i + 1] = buffer[i];
// buffer[i + 2] = buffer[i];
#if FILL_ALL_CHANNELS
buffer[i + 1] = buffer[i];
buffer[i + 2] = buffer[i];
#endif
}
}
......@@ -27,9 +31,11 @@ void meanBlurHorizontal(uint8_t *srcBuffer, uint8_t *dstBuffer, int imageW, int
dstBuffer[(y * imageW + x) * 4] = average / (windowW*2+1);
// dstBuffer[(y * imageW + x) * 4+1] = dstBuffer[(y * imageW + x) * 4];
// dstBuffer[(y * imageW + x) * 4+2] = dstBuffer[(y * imageW + x) * 4];
// dstBuffer[(y * imageW + x) * 4+3] = 255;
#if FILL_ALL_CHANNELS
dstBuffer[(y * imageW + x) * 4+1] = dstBuffer[(y * imageW + x) * 4];
dstBuffer[(y * imageW + x) * 4+2] = dstBuffer[(y * imageW + x) * 4];
dstBuffer[(y * imageW + x) * 4+3] = 255;
#endif
}
}
}
......@@ -46,9 +52,11 @@ void meanBlurVertical(uint8_t *srcBuffer, uint8_t *dstBuffer, int imageW, int im
dstBuffer[(y * imageW + x) * 4] = average / (windowH*2+1);
// dstBuffer[(y * imageW + x) * 4+1] = dstBuffer[(y * imageW + x) * 4];
// dstBuffer[(y * imageW + x) * 4+2] = dstBuffer[(y * imageW + x) * 4];
// dstBuffer[(y * imageW + x) * 4+3] = 255;
#if FILL_ALL_CHANNELS
dstBuffer[(y * imageW + x) * 4+1] = dstBuffer[(y * imageW + x) * 4];
dstBuffer[(y * imageW + x) * 4+2] = dstBuffer[(y * imageW + x) * 4];
dstBuffer[(y * imageW + x) * 4+3] = 255;
#endif
}
}
}
......@@ -64,10 +72,107 @@ void adaptativeThreshold(uint8_t *srcBuffer, uint8_t *bluredBuffer, uint8_t *dst
for(int i = 0; i < length; i+=4){
dstBuffer[i] = tab[srcBuffer[i] - bluredBuffer[i] + 255];
// dstBuffer[i + 1] = dstBuffer[i];
// dstBuffer[i + 2] = dstBuffer[i];
// dstBuffer[i + 3] = 255;
#if FILL_ALL_CHANNELS
dstBuffer[i + 1] = dstBuffer[i];
dstBuffer[i + 2] = dstBuffer[i];
dstBuffer[i + 3] = 255;
#endif
}
}
////////////////////////////////////////////////////////////////////////////////
//
////////////////////////////////////////////////////////////////////////////////
EMSCRIPTEN_KEEPALIVE
void meanBlurHorizontalExactSlow(uint8_t *srcBuffer, uint8_t *dstBuffer, int imageW, int imageH, int windowW){
// EM_ASM(console.log("DDD"));
for(int y = 0; y < imageH; y++){
for(int x = 0; x < windowW; x++){
uint32_t average = 0;
for(int d = -x; d <= windowW; d++){
average += srcBuffer[(y * imageW + x + d) * 4];
}
dstBuffer[(y * imageW + x) * 4] = average / (windowW+x+1);
}
for(int x = windowW; x < imageW-windowW; x++){
uint32_t average = 0;
for(int d = -windowW; d <= windowW; d++){
average += srcBuffer[(y * imageW + x + d) * 4];
}
dstBuffer[(y * imageW + x) * 4] = average / (windowW*2+1);
}
for(int x = imageW-windowW; x < imageW; x++){
uint32_t average = 0;
for(int d = -windowW; d <= windowW && x+d < imageW; d++){
average += srcBuffer[(y * imageW + x + d) * 4];
}
dstBuffer[(y * imageW + x) * 4] = average / (windowW+imageW-x+1);
}
#if FILL_ALL_CHANNELS
for(int x = 0; x < imageW; x++){
dstBuffer[(y * imageW + x) * 4+1] = dstBuffer[(y * imageW + x) * 4];
dstBuffer[(y * imageW + x) * 4+2] = dstBuffer[(y * imageW + x) * 4];
dstBuffer[(y * imageW + x) * 4+3] = 255;
}
#endif
}
}
////////////////////////////////////////////////////////////////////////////////
//
////////////////////////////////////////////////////////////////////////////////
EMSCRIPTEN_KEEPALIVE
void meanBlurHorizontalSlidingWindow(uint8_t *srcBuffer, uint8_t *dstBuffer, int imageW, int imageH, int windowW){
EM_ASM(console.log("meanBlurHorizontalSlidingWindow"));
uint8_t window[256];
uint32_t windowLength = windowW*2+1;
assert( windowLength < 256 );
for(int y = 0; y < imageH; y++){
for(int x = 0; x < windowW; x++){
}
for(int x = 0; x < windowW; x++){
uint32_t average = 0;
for(int d = -x; d <= windowW; d++){
average += srcBuffer[(y * imageW + x + d) * 4];
}
dstBuffer[(y * imageW + x) * 4] = average / (windowW+x+1);
}
for(int x = windowW; x < imageW-windowW; x++){
uint32_t average = 0;
for(int d = -windowW; d <= windowW; d++){
average += srcBuffer[(y * imageW + x + d) * 4];
}
dstBuffer[(y * imageW + x) * 4] = average / (windowW*2+1);
}
for(int x = imageW-windowW; x < imageW; x++){
uint32_t average = 0;
for(int d = -windowW; d <= windowW && x+d < imageW; d++){
average += srcBuffer[(y * imageW + x + d) * 4];
}
dstBuffer[(y * imageW + x) * 4] = average / (windowW+imageW-x+1);
}
#if FILL_ALL_CHANNELS
for(int x = 0; x < imageW; x++){
dstBuffer[(y * imageW + x) * 4+1] = dstBuffer[(y * imageW + x) * 4];
dstBuffer[(y * imageW + x) * 4+2] = dstBuffer[(y * imageW + x) * 4];
dstBuffer[(y * imageW + x) * 4+3] = 255;
}
#endif
}
}
}
......@@ -50,16 +50,28 @@
// process wasmBuffer1Array
Module.asm._convertToGray(wasmBuffer1Array.byteOffset, canvas.width, canvas.height)
var windowW = 5
var windowH = 5
var windowW = 30
var windowH = 30
Module.asm._meanBlurHorizontal (wasmBuffer1Array.byteOffset, wasmBuffer2Array.byteOffset, canvas.width, canvas.height, windowW)
Module.asm._meanBlurVertical (wasmBuffer2Array.byteOffset, wasmBuffer3Array.byteOffset, canvas.width, canvas.height, windowH)
Module.asm._adaptativeThreshold(wasmBuffer1Array.byteOffset, wasmBuffer3Array.byteOffset, wasmBuffer2Array.byteOffset, canvas.width, canvas.height, 7)
// copy processed image in context
tmpImageData.data.set(wasmBuffer2Array);
context.putImageData(tmpImageData, 0, 0)
if( true ){
Module.asm._meanBlurHorizontalSlidingWindow(wasmBuffer1Array.byteOffset, wasmBuffer2Array.byteOffset, canvas.width, canvas.height, windowW)
// Module.asm._meanBlurHorizontalExactSlow (wasmBuffer1Array.byteOffset, wasmBuffer2Array.byteOffset, canvas.width, canvas.height, windowW)
// Module.asm._meanBlurHorizontal (wasmBuffer1Array.byteOffset, wasmBuffer2Array.byteOffset, canvas.width, canvas.height, windowW)
// copy processed image in context
tmpImageData.data.set(wasmBuffer2Array);
context.putImageData(tmpImageData, 0, 0)
}else{
Module.asm._meanBlurHorizontal (wasmBuffer1Array.byteOffset, wasmBuffer2Array.byteOffset, canvas.width, canvas.height, windowW)
Module.asm._meanBlurVertical (wasmBuffer2Array.byteOffset, wasmBuffer3Array.byteOffset, canvas.width, canvas.height, windowH)
Module.asm._adaptativeThreshold(wasmBuffer1Array.byteOffset, wasmBuffer3Array.byteOffset, wasmBuffer2Array.byteOffset, canvas.width, canvas.height, 7)
// copy processed image in context
tmpImageData.data.set(wasmBuffer2Array);
context.putImageData(tmpImageData, 0, 0)
}
requestAnimationFrame(callback)
})
......
# Area Learning with AR.js
## More stable more robust AR with multi-markers
# intro - what is multimarkers
- before we had only one marker at a time e.g. give examples
- this is nice but we can go furthers.
- with multi-markers, we have multiple markers acting as one
larger marker. They have several keys advantages which allow
to have more stable, larger AR with a more robust detection.
Additionaly we designed a user experience with a nice workflow
to learn new areas of markers.
# Advantages of multimarkers
Recently added multimarkers. It provides very nice features in AR
- *More Stable* :
- *More Robust* :
So it ends with much larger augmented reality
(here video of large glass donut).
# Nice workflow for the user
- short descript of the steps
- a video showing the steps - with clear step 1/2/3 as captions
The workflow is splitted into 3 steps :
1. Learn the new area with multiple markers
2. Once learned, generate a file describing the area
3. Adding augmented reality on top of the area
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册