提交 cb1d2a8a 编写于 作者: J Jerome Etienne

more experimentation on jsaruco + wasm

上级 3808b99f
......@@ -335,6 +335,8 @@ Demo tested on the following browser setups:
Credits: @HelloDeadline, @sorianog
# Ideas
- play with camera parameters to optimize it for AR - available in mediastreamtrack
- https://www.oberhofer.co/mediastreamtrack-and-its-capabilities/
- AR Gaming - https://www.youtube.com/watch?v=EmGGGzibGok
- AR Business Card - https://vimeo.com/4979525
- ar cube visu - real nice for 360 AR https://twitter.com/jerome_etienne/status/869765239356747776
......
- start with a convert in gray convmertion
- plus thresholding
- then bench webassembly vs js
# GOAL
- filtering : convert2Grey + adaptativeThresholding
- code this in multiple versions : webworkers - webassembly - gpu
- mix them together - which combinasion
# Step1 Basic Demo
- read the webcam
- display the origianl image
- filter the image - use jsaruco function
- display the filtered image
# Step2 Implement webworkers + jsaruco
- aka all normal javascript - no webassembly so more stable
# Step3 Implement webassembly
- code in C the convert2Grey yourself first
- see about getting a horintal/vertical blur in C
- then do a adaptative thresholding
- result must be the same as the jsaruco version
# Step4 Implement a gpu version
- convert2Grey may be done in shader
- horizontal/vertical blur may be done in shader
- which resolution for the texture ?
- how many passes ? 4 different shaders or larger ones ?
# Step5 Mix them together
- what is possible ? what is buggy ?
- if all is running as expected, any combinaison would work.
- it is a matter of picking the fastest
- so try and measure :)
<script src='../../js-aruco/vendor/js-aruco/src/svd.js'></script>
<script src='../../js-aruco/vendor/js-aruco/src/posit1.js'></script>
<script src='../../js-aruco/vendor/js-aruco/src/cv.js'></script>
<script src='../../js-aruco/vendor/js-aruco/src/aruco.js'></script>
<body><script>
var videoElement = document.createElement('video')
// document.body.appendChild(videoElement)
videoElement.autoplay = true
navigator.mediaDevices.getUserMedia({
video:true
}).then(function(stream){
videoElement.src = window.URL.createObjectURL(stream);
}).catch(function(error){
console.assert(false)
console.dir(error)
})
var canvas = document.createElement('canvas')
canvas.width = 640
canvas.height = 480
document.body.appendChild(canvas)
var context = canvas.getContext('2d')
var greyCVImage = new CV.Image();
var thresCVImage = new CV.Image();
requestAnimationFrame(function callback(){
context.drawImage(videoElement, 0, 0)
var imageData = context.getImageData(0, 0, canvas.width, canvas.height)
CV.grayscale(imageData, greyCVImage)
CV.adaptiveThreshold(greyCVImage, thresCVImage, 2, 7);
var imageData = context.createImageData(canvas.width, canvas.height);
copyCVImage2ImageData(thresCVImage, imageData)
context.putImageData( imageData, 0, 0);
requestAnimationFrame(callback)
})
//////////////////////////////////////////////////////////////////////////////
// Code Separator
//////////////////////////////////////////////////////////////////////////////
function copyCVImage2ImageData(cvImage, imageData){
var i = cvImage.data.length, j = (i * 4) + 3;
while(i --){
imageData.data[j -= 4] = 255;
imageData.data[j - 1] = imageData.data[j - 2] = imageData.data[j - 3] = cvImage.data[i];
}
return imageData;
};
</script></body>
......@@ -4,7 +4,22 @@
- how to use webworker ?
- split the processing of the original image
- so gray scale + adatative threshold - splitted in 4 zones for 4 cpu
- https://github.com/andrei-markeev/ts2c
---
# webworker usage
- in 640x480, it take 18.3ms for context.detect
- 13.84ms for greyscale + adaptative threshold - 75% of the whole!
- it is doable on GPU too
- greyscale is per pixel
- adaptative threshold needs a local average blur and then it is per pixel
- there is a pool of workers
- and add task to it
- doing the greyscale
- doing the horizontal blur
- doing the vertical blur
- demo where you read the webcam and filter it with this
---
- see how to include it in ar.js
......
......@@ -105,9 +105,9 @@
// if( videoElement.readyState >= videoElement.HAVE_CURRENT_DATA ){
// detect markers in imageData
console.time('detect');
// console.time('detect');
var detectedMarkers = arucoContext.detect(videoElement)
console.timeEnd('detect');
// console.timeEnd('detect');
if( detectedMarkers.length > 0 ){
var detectedMarker = detectedMarkers[0]
......
......@@ -87,23 +87,22 @@ THREEx.ArucoDebug.prototype.drawCVImage = function(cvImage){
var context = canvas.getContext('2d');
var imageData = context.createImageData(canvas.width, canvas.height);
copyImage(cvImage, imageData)
this.copyCVImage2ImageData(cvImage, imageData)
context.putImageData( imageData, 0, 0);
return
function copyImage(src, dst){
var i = src.data.length, j = (i * 4) + 3;
while(i --){
dst.data[j -= 4] = 255;
dst.data[j - 1] = dst.data[j - 2] = dst.data[j - 3] = src.data[i];
}
return dst;
};
}
THREEx.ArucoDebug.prototype.copyCVImage2ImageData = function(cvImage, imageData){
var i = cvImage.data.length, j = (i * 4) + 3;
while(i --){
imageData.data[j -= 4] = 255;
imageData.data[j - 1] = imageData.data[j - 2] = imageData.data[j - 3] = cvImage.data[i];
}
return imageData;
};
//////////////////////////////////////////////////////////////////////////////
// Code Separator
//////////////////////////////////////////////////////////////////////////////
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册