提交 7110db47 编写于 作者: W wangqun

[change]update paddleJS construct

[change]update paddleJS construct
上级 cbeff2b5
# PaddleJS Examples
百度PaddleJS使用现成的 JavaScript 模型或转换 Paddle 模型以在浏览器中运行。
## 演示
目前Web项目运行TinyYolo模型可以达到30ms以内,对于一般的实时场景已经足够应对。
### 模块化
## 浏览器覆盖面
* PC: Chrome
* Mac: Chrome
* Android: Baidu App and QQ Browser
## 构建部署
```bash
cd web # 进入根目录
npm i # 安装依赖
mkdir dist # 创建资源目录
cd dist # 进入资源目录
git clone https://github.com/DerekYangMing/Paddle-Web-Models.git # 获取模型
mv Paddle-Web-Models/separablemodel . # 移动模型到制定地点
cd .. # 返回根目录
npm run tinyYolo # 启动 tinyYolo 在线推理服务
```
## 如何预览 demo
1. 在浏览器中打开url: https://localhost:8123/
2. 点击【开始检测】按钮。
3. 将人脸对准摄像头,没有问题的话,可以正常检测到人脸。
## 效果
![image](./tinyYolo/demoshow.png)
# PaddleJS Examples
百度PaddleJS使用现成的 JavaScript 模型或转换 Paddle 模型以在浏览器中运行。
## 演示
目前Web项目运行TinyYolo模型可以达到30ms以内,对于一般的实时场景已经足够应对。
### 模块化
## 浏览器覆盖面
* PC: Chrome
* Mac: Chrome
* Android: Baidu App and QQ Browser
## 构建部署
```bash
cd web # 进入根目录
npm i # 安装依赖
mkdir dist # 创建资源目录
cd dist # 进入资源目录
git clone https://github.com/DerekYangMing/Paddle-Web-Models.git # 获取模型
mv Paddle-Web-Models/separablemodel . # 移动模型到制定地点
cd .. # 返回根目录
npm run tinyYolo # 启动 tinyYolo 在线推理服务
```
## 如何预览 demo
1. 在浏览器中打开url: https://localhost:8123/
2. 点击【开始检测】按钮。
3. 将人脸对准摄像头,没有问题的话,可以正常检测到人脸。
## 效果
![image](./tinyYolo/demoshow.png)
import 'babel-polyfill';
import Paddle from '../../src/paddle/paddle';
import IO from '../../src/feed/imageFeed';
import Utils from '../../src/utils/utils';
// 获取map表
import Map from '../../test/data/map';
/**
* @file model demo 入口文件
* @author wangqun@baidu.com
*
*/
// 模型feed数据
const feedShape = {
'608': {
fw: 608,
fh: 608
},
'320': {
fw: 320,
fh: 320
},
'320fused': {
fw: 320,
fh: 320
},
'separate': {
fw: 320,
fh: 320
}
};
const modelType = 'separate';
const {fw, fh} = feedShape[modelType];
// 统计参数
let loaded = false;
let model = {};
window.statistic = [];
async function run(input) {
// const input = document.getElementById('mobilenet');
const io = new IO();
let feed = io.process({
input: input,
params: {
targetShape: [1, 3, fh, fw], // 目标形状 为了兼容之前的逻辑所以改个名
scale: 256, // 缩放尺寸
width: 224, height: 224, // 压缩宽高
shape: [3, 224, 224], // 预设tensor形状
mean: [0.485, 0.456, 0.406], // 预设期望
std: [0.229, 0.224, 0.225] // 预设方差
}});
console.dir(['feed', feed]);
const path = 'model/huangfan';
if (!loaded) {
const MODEL_CONFIG = {
dir: `/${path}/`, // 存放模型的文件夹
main: 'model.json', // 主文件
};
loaded = true;
const paddle = new Paddle({
urlConf: MODEL_CONFIG,
options: {
multipart: false,
dataType: 'json'
}
});
model = await paddle.load();
}
let inst = model.execute({
input: feed
});
// 其实这里应该有个fetch的执行调用或者fetch的输出
let result = await inst.read();
console.dir(['result', result]);
let maxItem = Utils.getMaxItem(result);
document.getElementById('txt').innerHTML = Map['' + maxItem.index];
console.log('识别出的结果是' + Map['' + maxItem.index]);
// console.dir(['每个op耗时', window.statistic]);
// let total = statistic.reduce((all, cur) => {
// return all + cur.runTime;
// }, 0);
// console.log('op total = ' + total);
}
var image = '';
function selectImage(file) {
if (!file.files || !file.files[0]) {
return;
}
let reader = new FileReader();
reader.onload = function (evt) {
let img = document.getElementById('image');
img.src = evt.target.result;
img.onload = function () {
run(img);
};
image = evt.target.result;
};
reader.readAsDataURL(file.files[0]);
}
// selectImage
document.getElementById('uploadImg').onchange = function () {
selectImage(this);
};
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>paddle web demo</title>
<meta name="viewport" content="width=device-width,minimum-scale=1.0,maximum-scale=1.0,user-scalable=no">
</head>
<body>
<img id="image" src="https://m.baidu.com/se/static/img/iphone/logo.png" style="max-width: 100%;">
<input type="file" id="uploadImg">
<div id="txt"></div>
<script src="index.es6"></script>
</body>
</html>
\ No newline at end of file
import 'babel-polyfill';
import Paddle from '../../src/paddle/paddle';
import IO from '../../src/feed/imageFeed';
/**
* @file model demo mnist 入口文件
* @author wangqun@baidu.com
*
*/
const pic = document.getElementById('pic');
const io = new IO();
let model = {};
async function run() {
let feed = io.process({
input: pic,
params: {
targetShape: [1, 3, 320, 320], // 目标形状 为了兼容之前的逻辑所以改个名
scale: 256, // 缩放尺寸
width: 224, height: 224, // 压缩宽高
shape: [3, 224, 224], // 预设tensor形状
mean: [0.485, 0.456, 0.406], // 预设期望
std: [0.229, 0.224, 0.225] // 预设方差
}});
console.dir(['feed', feed]);
const path = 'model/mnist';
const MODEL_CONFIG = {
dir: `/${path}/`, // 存放模型的文件夹
main: 'model.json', // 主文件
};
const paddle = new Paddle({
urlConf: MODEL_CONFIG,
options: {
multipart: false,
dataType: 'json'
}
});
model = await paddle.load();
let inst = model.execute({
input: feed
});
// 其实这里应该有个fetch的执行调用或者fetch的输出
let result = await inst.read();
// let inst = model.execute({input: cat});
// let res = inst.read();
console.dir(['result', result]);
// var fileDownload = require('js-file-download');
// fileDownload(res, 'result.csv');
}
run();
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>paddle web demo</title>
<meta name="viewport" content="width=device-width,minimum-scale=1.0,maximum-scale=1.0,user-scalable=no">
</head>
<body>
<div>
<img id="pic" src="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/wAALCAAcABwBAREA/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/9oACAEBAAA/APn+vTPDHwP8TeJ9DtdXiuLCzt7kbo0uWcOU7NgKRgjkc81i+O/hvrPgW8xco1zp7ELHfIm1HYqCRjJIPUc9cHFcbSgEnABJ9BXaafH8Rrrw3NpdjBrkmjohLQLE/l7c5OOPUHgV6Fcw3um/sxXNt4hZo7qW5X7FDdLtlRfOU7QG5zgSH/dPpXhFel/Bzxj4a8H6vfzeILZy86ILe6WLzPI27i3HUZ+XkA9PQ16Pc/Hfw7pM91LaXusa20wDRxSQRQww9eAdob35DfWuNg+Ny67Dfab430SDUNLuQxjW2UK8BwcAZPPOPmyCOvPSvH6KKKK//9k=" >
</div>
<script src="index.es6"></script>
</body>
</html>
import 'babel-polyfill';
import Paddle from '../../src/paddle/paddle';
import IO from '../../src/feed/imageFeed';
import Utils from '../../src/utils/utils';
// 获取map表
import Map from '../../test/data/map';
/**
* @file model demo 入口文件
* @author wangqun@baidu.com
*
*/
// 模型feed数据
const feedShape = {
'608': {
fw: 608,
fh: 608
},
'320': {
fw: 320,
fh: 320
},
'320fused': {
fw: 320,
fh: 320
},
'separate': {
fw: 244,
fh: 244
}
};
const modelType = 'separate';
const {fw, fh} = feedShape[modelType];
// 统计参数
let loaded = false;
let model = {};
window.statistic = [];
async function run(input) {
// const input = document.getElementById('mobilenet');
const io = new IO();
let feed = io.process({
input: input,
params: {
targetShape: [1, 3, fh, fw], // 目标形状 为了兼容之前的逻辑所以改个名
scale: 256, // 缩放尺寸
width: 224, height: 224, // 压缩宽高
shape: [3, 224, 224], // 预设tensor形状
mean: [0.485, 0.456, 0.406], // 预设期望
std: [0.229, 0.224, 0.225] // 预设方差
}});
console.log('feed', feed);
const path = 'model/mobileNet';
if (!loaded) {
const MODEL_CONFIG = {
dir: `/${path}/`, // 存放模型的文件夹
main: 'model.json', // 主文件
};
loaded = true;
const paddle = new Paddle({
urlConf: MODEL_CONFIG,
options: {
multipart: true,
dataType: 'json'
}
});
model = await paddle.load();
}
let inst = model.execute({
input: feed
});
// 其实这里应该有个fetch的执行调用或者fetch的输出
let result = await inst.read();
console.dir(['result', result]);
// let maxItem = Utils.getMaxItem(result);
// document.getElementById('txt').innerHTML = Map['' + maxItem.index];
// console.log('识别出的结果是' + Map['' + maxItem.index]);
// console.dir(['每个op耗时', window.statistic]);
// let total = statistic.reduce((all, cur) => {
// return all + cur.runTime;
// }, 0);
// console.log('op total = ' + total);
};
var image = '';
function selectImage(file) {
if (!file.files || !file.files[0]) {
return;
}
let reader = new FileReader();
reader.onload = function (evt) {
let img = document.getElementById('image');
img.src = evt.target.result;
img.onload = function() {
run(img);
};
image = evt.target.result;
}
reader.readAsDataURL(file.files[0]);
}
// selectImage
document.getElementById("uploadImg").onchange = function () {
selectImage(this);
};
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>paddle web demo</title>
<meta name="viewport" content="width=device-width,minimum-scale=1.0,maximum-scale=1.0,user-scalable=no">
</head>
<body>
<img id="image" src="https://m.baidu.com/se/static/img/iphone/logo.png" style="max-width: 100%;">
<input type="file" id="uploadImg">
<div id="txt"></div>
<script src="index.es6"></script>
</body>
</html>
\ No newline at end of file
此差异已折叠。
export default class Camera {
constructor(option) {
this.video = option.videoDom;
this.videoOption = option.videoOption;
}
// 访问用户媒体设备的兼容方法
getUserMedia(constraints, success, error) {
if (navigator.mediaDevices.getUserMedia) {
// 最新的标准API
navigator.mediaDevices.getUserMedia(constraints).then(success).catch(error);
}
else if (navigator.webkitGetUserMedia) {
// webkit核心浏览器
navigator.webkitGetUserMedia(constraints, success, error);
}
else if (navigator.mozGetUserMedia) {
// firfox浏览器
navigator.mozGetUserMedia(constraints, success, error);
}
else if (navigator.getUserMedia) {
// 旧版API
navigator.getUserMedia(constraints, success, error);
}
}
success(stream) {
// 兼容webkit核心浏览器
let CompatibleURL = window.URL || window.webkitURL;
// 将视频流设置为video元素的源
// video.src = CompatibleURL.createObjectURL(stream);
this.video.srcObject = stream;
this.video.play();
}
error(error) {
console.log(`访问用户媒体设备失败${error.name}, ${error.message}`);
}
run() {
if (navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.mediaDevices.getUserMedia) {
// 调用用户媒体设备, 访问摄像头
this.getUserMedia(this.videoOption, this.success.bind(this), this.error);
}
else {
alert('不支持访问用户媒体');
}
}
get curVideo() {
return this.video;
}
}
import 'babel-polyfill';
import Graph from '../../src/executor/loader';
import IO from '../../src/feed/imageFeed';
import Logger from '../../tools/logger';
window.log = new Logger();
// 统计参数
window.badCases = [];
// import Utils from '../src/utils/utils';
// 获取map表
// import Map from '../test/data/map';
// import demoPic from './bbt1.jpg';
// import demoPic2 from './bbt2.jpg';
// import demoPic3 from './bbt3.jpg';
// import demoPic4 from './bbt4.jpg';
// import demoPic5 from './bbt5.jpg';
// 后处理测试用例
// let tempPic = [demoPic, demoPic2, demoPic3, demoPic4, demoPic5];
/**
* @file model demo 入口文件
* @author wangqun@baidu.com
*
*/
// 模型输出shape
const outputShapes = {
'608': {
from: [19, 19, 25, 1],
to: [19, 19, 5, 5]
},
'320': {
from: [10, 10, 25, 1],
to: [10, 10, 5, 5]
},
'320fused': {
from: [10, 10, 25, 1],
to: [10, 10, 5, 5]
}
};
// 模型feed数据
const feedShape = {
'608': {
fw: 608,
fh: 608
},
'320': {
fw: 320,
fh: 320
},
'320fused': {
fw: 320,
fh: 320
}
};
// 模型路径
const modelPath = {
'608': 'faceModel',
'320': 'facemodel320',
'320fused': 'facemodelfused'
};
const modelType = '320fused';
const path = modelPath[modelType];
// 统计参数
let loaded = false;
let model = {};
window.statistic = [];
const {fw, fh} = feedShape[modelType];
// 第一遍执行比较慢 所以预热一下
async function preheat() {
const io = new IO();
let feed = io.process({
input: video,
params: {
gapFillWith: '#000', // 缩放后用什么填充不足方形部分
targetSize: {
height: fw,
width: fh
},
targetShape: [1, 3, fh, fw], // 目标形状 为了兼容之前的逻辑所以改个名
// shape: [3, 608, 608], // 预设tensor形状
mean: [117.001, 114.697, 97.404], // 预设期望
// std: [0.229, 0.224, 0.225] // 预设方差
}
});
const MODEL_URL = `/${path}/model.json`;
const MODEL_CONFIG = {
dir: `/${path}/`, // 存放模型的文件夹
main: 'model.json', // 主文件
};
loaded = true;
const graphModel = new Graph();
log.start('加载模型');
model = await graphModel.loadGraphModel(MODEL_CONFIG, {
multipart: true,
dataType: 'binary',
binaryOption: {
fileCount: 1, // 切成了多少文件
getFileName(i) { // 获取第i个文件的名称
return 'chunk_0.dat';
}
},
feed
});
log.end('加载模型');
let inst = model.execute({
input: feed
});
};
async function run(input) {
// const input = document.getElementById('mobilenet');
log.start('总耗时');
const io = new IO();
log.start('预处理');
let feed = io.process({
input: input,
params: {
gapFillWith: '#000', // 缩放后用什么填充不足方形部分
targetSize: {
height: fw,
width: fh
},
targetShape: [1, 3, fh, fw], // 目标形状 为了兼容之前的逻辑所以改个名
// shape: [3, 608, 608], // 预设tensor形状
mean: [117.001, 114.697, 97.404], // 预设期望
// std: [0.229, 0.224, 0.225] // 预设方差
}
});
log.end('预处理');
if (!loaded) {
const MODEL_URL = `/${path}/model.json`;
const MODEL_CONFIG = {
dir: `/${path}/`, // 存放模型的文件夹
main: 'model.json', // 主文件
};
loaded = true;
const graphModel = new Graph();
log.start('加载模型');
model = await graphModel.loadGraphModel(MODEL_CONFIG, {
multipart: true,
dataType: 'binary',
binaryOption: {
fileCount: 1, // 切成了多少文件
getFileName(i) { // 获取第i个文件的名称
return 'chunk_0.dat';
}
},
feed
});
log.end('加载模型');
}
log.start('运行耗时');
let inst = model.execute({
input: feed
});
// 其实这里应该有个fetch的执行调用或者fetch的输出
let result = await inst.read();
log.end('后处理-读取数据');
// console.dir(['result', result]);
log.start('后处理-形状调整');
const newData = [];
let newIndex = -1;
const [w, h, c, b] = outputShapes[modelType].from;
// c channel
for (let i = 0; i < c; i++) {
// height channel
for (let j = 0; j < h; j++) {
// width channel
for (let k = 0; k < w; k++) {
// position: (0, 0, 0, 0)
const index = j * (c * h) + k * c + i;
// const index = j * (i * k) + k * i + i;
newData[++newIndex] = result[index];
}
}
}
log.end('后处理-形状调整');
log.start('后处理-画框');
testRun(newData, input);
log.end('后处理-画框');
log.end('后处理');
log.end('总耗时');
};
var image = '';
function selectImage(file) {
if (!file.files || !file.files[0]) {
return;
}
let reader = new FileReader();
reader.onload = function (evt) {
let img = document.getElementById('image');
img.src = evt.target.result;
img.onload = function() {
log.during('每次执行的时间间隔');
run(img);
};
image = evt.target.result;
}
reader.readAsDataURL(file.files[0]);
};
// selectImage
document.getElementById("uploadImg").onchange = function () {
selectImage(this);
};
/* 后处理图片 by zhangmiao06 */
let preTestRun = (index) => {
let img = document.getElementById('image');
img.src = tempPic[index];
img.onload = function() {
testRun(testOutput.data[index], img);
};
};
let testRun = (data, img) => {
// console.log('ori', data);
const {from, to} = outputShapes[modelType];
// let shape = [1, 25, 19, 19];
let shape = [].concat(from).reverse();
// 1.从一维数组到1*25*19*19
let formatData = reshapeMany({
data: data,
reshapeShape: shape
});
// console.log('一维到多维', formatData);
// 2.从1*25*19*19 到 19*19*25*1
let formatData2 = transpose({
data: formatData,
shape: shape,
transposeShape: [2, 3, 1, 0]
});
// console.log('transpose', formatData2);
// 3.从19*19*25*1到19*19*5*5
let formatData3 = reshape({
data: formatData2,
shape: from,
reshapeShape: to
});
// console.log('reshape', formatData3);
// 4.运算
let finalData = handleFinal(formatData3, shape, img);
// console.log('final', finalData);
// 5.处理画布
// handleCanvas(finalData, img);
handleDiv(finalData, img);
};
// sigmoid
let sigmoid = (x) => {
if (x < -100) {
return 0.0;
}
return 1 / (1 + Math.exp(-x));
};
// transpose
let transpose = (data) => {
let shape = data.shape;
let transposeShape = data.transposeShape;
let formatData = data.data;
let formatData2 = [];
for(let n = 0; n < shape[transposeShape[0]]; n++) {
let nData = [];
for(let c = 0; c < shape[transposeShape[1]]; c++) {
let cData = [];
for(let row = 0; row < shape[transposeShape[2]]; row++) {
let rowData = [];
for(let col = 0; col < shape[transposeShape[3]]; col++) {
let tempArr = [n, c, row, col];
let newN = n;
let newC = c;
let newW = row;
let newH = col;
transposeShape.forEach((item, index)=> {
switch(item) {
case 0:
newN = tempArr[index];
break;
case 1:
newC = tempArr[index];
break;
case 2:
newW = tempArr[index];
break;
case 3:
newH = tempArr[index];
}
});
rowData.push(formatData[newN][newC][newW][newH]);
}
cData.push(rowData);
}
nData.push(cData);
}
formatData2.push(nData);
}
return formatData2;
};
// reshape
let reshape = (data) =>{
let formatData2 = data.data;
let shape = data.shape;
let reshapeShape = data.reshapeShape;
// 1.变成一维
let tempData = reshapeOne({
data: formatData2,
shape: shape
});
// 2.变成多维
let formatData3 = reshapeMany({
data: tempData,
reshapeShape: reshapeShape
});
return formatData3;
};
// 变成一维
let reshapeOne = (data) => {
let formatData2 = data.data;
let shape = data.shape;
let tempData = [];
for(let n = 0; n < shape[0]; n++) {
for(let c = 0; c < shape[1]; c++) {
for(let row = 0; row < shape[2]; row++) {
for(let col = 0; col < shape[3]; col++) {
tempData.push(formatData2[n][c][row][col]);
}
}
}
}
return tempData;
};
// 变成多维
let reshapeMany = (data) => {
let tempData = data.data;
let reshapeShape = data.reshapeShape;
let formatData3 = [];
for(let n = 0; n < reshapeShape[0]; n++) {
let nData = [];
for(let c = 0; c < reshapeShape[1]; c++) {
let cData = [];
for(let row = 0; row < reshapeShape[2]; row++) {
let rowData = [];
for(let col = 0; col < reshapeShape[3]; col++) {
let tempN = n * reshapeShape[1] * reshapeShape[2] * reshapeShape[3];
let tempC = c * reshapeShape[2] * reshapeShape[3];
let tempRow = row * reshapeShape[3];
rowData.push(tempData[tempN + tempC + tempRow + col]);
}
cData.push(rowData);
}
nData.push(cData);
}
formatData3.push(nData);
}
return formatData3;
};
let calSize = (img) => {
let w1 = img.width;
let h1 = img.height;
let wh1 = Math.max(w1, h1);
// let factor = 608.0 / wh1;
let factor = fw / wh1;
let width = Math.round(w1 * factor);
let height = Math.round(h1 * factor);
return [w1, h1, width, height];
};
// 处理运算
let handleFinal = (formatData3, shape, img) => {
let finalData = [];
let c = shape[2];
let [w1, h1, width, height] = calSize(img);
let factorX = Math.max(width, height) / width;
let factorY = Math.max(width, height) / height;
let maxProb = 0.0;
let anchors = [[1.603231, 2.094468], [6.041143, 7.080126], [2.882459, 3.518061], [4.266906, 5.178857], [9.041765, 10.66308]];
for(let i = 0; i < shape[2]; i++) {
for(let j = 0; j < shape[3]; j++) {
for(let k = 0; k < anchors.length; k++) {
let [a1, a2, a3, a4, prob] = formatData3[i][j][k];
prob = sigmoid(prob);
if (prob > maxProb && prob >= 0.5) {
let ctx = (j + sigmoid(a1)) / c * factorX;
let cty = (i + sigmoid(a2)) / c * factorY;
let col = Math.exp(a3) * anchors[k][0] / c * factorX;
let row = Math.exp(a4) * anchors[k][1] / c * factorY;
let x = (ctx - (col / 2));
let y = (cty - (row / 2));
finalData.push([x * w1, y * h1, col * w1, row * h1, prob]);
}
}
}
}
return finalData;
};
// 处理画布
let handleCanvas = (finalData, img) => {
let myCanvas = document.getElementById('myCanvas');
let [w1, h1, width, height] = calSize(img);
myCanvas.width = w1;
myCanvas.height = h1;
let ctx = myCanvas.getContext('2d');
ctx.drawImage(img, 0, 0, w1, h1);
finalData.forEach((demoArr,index) => {
let [demoLeft, demoTop, demoWidth, demoHeight, prob] = demoArr;
ctx.beginPath();
ctx.strokeStyle = 'red';
ctx.moveTo(demoLeft, demoTop);
ctx.lineTo(demoLeft + demoWidth, demoTop);
ctx.lineTo(demoLeft + demoWidth, demoTop + demoHeight);
ctx.lineTo(demoLeft, demoTop + demoHeight);
ctx.closePath();
ctx.stroke();
});
};
let handleDiv = (finalData, img) => {
if (finalData.length < 1) {
return false;
}
let myCanvas = document.getElementById('myDiv');
let maxIndex = 0;
if (finalData.length > 1) {
for(let i = 1; i < finalData.length; i++) {
if (finalData[i].prob > finalData[maxIndex].prob) {
maxIndex = i;
}
}
}
let [demoLeft, demoTop, demoWidth, demoHeight, prob] = finalData[maxIndex];
myCanvas.style.width = demoWidth;
myCanvas.style.height = demoHeight;
myCanvas.style.left = demoLeft;
myCanvas.style.top = demoTop;
};
// preTestRun(0);
// run(document.getElementById('pic'));
此差异已折叠。
// import VConsole from 'vconsole';
import 'babel-polyfill';
import Paddle from '../../src/paddle/paddle';
import IO from '../../src/feed/imageFeed';
// import Logger from '../../tools/logger';
// window.log = new Logger();
// // 统计参数
// window.badCases = [];
// 后处理测试用例
// let tempPic = [demoPic, demoPic2, demoPic3, demoPic4, demoPic5];
/**
* @file model demo 入口文件
* @author wangqun@baidu.com
*
*/
// 模型输出shape
const outputShapes = {
'608': {
from: [19, 19, 25, 1],
to: [19, 19, 5, 5]
},
'320': {
from: [10, 10, 25, 1],
to: [10, 10, 5, 5]
},
'320fused': {
from: [10, 10, 25, 1],
to: [10, 10, 5, 5]
},
'tinyYolo': {
from: [10, 10, 25, 1],
to: [10, 10, 5, 5]
}
};
// 模型feed数据
const feedShape = {
'608': {
fw: 608,
fh: 608
},
'320': {
fw: 320,
fh: 320
},
'320fused': {
fw: 320,
fh: 320
},
'tinyYolo': {
fw: 320,
fh: 320
}
};
// 模型路径
const modelPath = {
'tinyYolo': 'model/tinyYolo'
};
const modelType = 'tinyYolo';
const path = modelPath[modelType];
// 统计参数
let loaded = false;
let model = {};
window.statistic = [];
const {fw, fh} = feedShape[modelType];
// 第一遍执行比较慢 所以预热一下
async function run(input) {
// const input = document.getElementById('mobilenet');
//log.start('总耗时');
const io = new IO();
// log.start('预处理');
let feed = io.process({
input: input,
params: {
gapFillWith: '#000', // 缩放后用什么填充不足方形部分
targetSize: {
height: fw,
width: fh
},
targetShape: [1, 3, fh, fw], // 目标形状 为了兼容之前的逻辑所以改个名
// shape: [3, 608, 608], // 预设tensor形状
mean: [117.001, 114.697, 97.404], // 预设期望
// std: [0.229, 0.224, 0.225] // 预设方差
}
});
// log.end('预处理');
if (!loaded) {
const MODEL_CONFIG = {
dir: `/${path}/`, // 存放模型的文件夹
main: 'model.json', // 主文件
};
loaded = true;
const paddle = new Paddle({
urlConf: MODEL_CONFIG,
options: {
multipart: true,
dataType: 'binary',
options: {
fileCount: 1, // 切成了多少文件
getFileName(i) { // 获取第i个文件的名称
return 'chunk_0.dat';
}
}
}
});
model = await paddle.load();
}
let inst = model.execute({
input: feed
});
// 其实这里应该有个fetch的执行调用或者fetch的输出
let result = await inst.read();
// log.end('运行耗时');
// log.end('后处理-读取数据');
console.dir(['result', result]);
//log.start('后处理-形状调整');
const newData = [];
let newIndex = -1;
const [w, h, c, b] = outputShapes[modelType].from;
// c channel
for (let i = 0; i < c; i++) {
// height channel
for (let j = 0; j < h; j++) {
// width channel
for (let k = 0; k < w; k++) {
// position: (0, 0, 0, 0)
const index = j * (c * h) + k * c + i;
// const index = j * (i * k) + k * i + i;
newData[++newIndex] = result[index];
}
}
}
// log.end('后处理-形状调整');
// log.start('后处理-画框');
testRun(newData, input);
// log.end('后处理-画框');
// log.end('后处理');
// log.end('总耗时');
}
var image = '';
function selectImage(file) {
if (!file.files || !file.files[0]) {
return;
}
let reader = new FileReader();
reader.onload = function (evt) {
let img = document.getElementById('image');
img.src = evt.target.result;
img.onload = function() {
//log.during('每次执行的时间间隔');
run(img);
};
image = evt.target.result;
}
reader.readAsDataURL(file.files[0]);
}
// selectImage
document.getElementById("uploadImg").onchange = function () {
selectImage(this);
};
/* 后处理图片 by zhangmiao06 */
let preTestRun = (index) => {
let img = document.getElementById('image');
img.src = tempPic[index];
img.onload = function() {
testRun(testOutput.data[index], img);
};
};
let testRun = (data, img) => {
// console.log('ori', data);
const {from, to} = outputShapes[modelType];
// let shape = [1, 25, 19, 19];
let shape = [].concat(from).reverse();
// 1.从一维数组到1*25*19*19
let formatData = reshapeMany({
data: data,
reshapeShape: shape
});
// console.log('一维到多维', formatData);
// 2.从1*25*19*19 到 19*19*25*1
let formatData2 = transpose({
data: formatData,
shape: shape,
transposeShape: [2, 3, 1, 0]
});
// console.log('transpose', formatData2);
// 3.从19*19*25*1到19*19*5*5
let formatData3 = reshape({
data: formatData2,
shape: from,
reshapeShape: to
});
// console.log('reshape', formatData3);
// 4.运算
let finalData = handleFinal(formatData3, shape, img);
// console.log('final', finalData);
// 5.处理画布
// handleCanvas(finalData, img);
handleDiv(finalData, img);
};
// sigmoid
let sigmoid = (x) => {
if (x < -100) {
return 0.0;
}
return 1 / (1 + Math.exp(-x));
}
// transpose
let transpose = (data) => {
let shape = data.shape;
let transposeShape = data.transposeShape;
let formatData = data.data;
let formatData2 = [];
for(let n = 0; n < shape[transposeShape[0]]; n++) {
let nData = [];
for(let c = 0; c < shape[transposeShape[1]]; c++) {
let cData = [];
for(let row = 0; row < shape[transposeShape[2]]; row++) {
let rowData = [];
for(let col = 0; col < shape[transposeShape[3]]; col++) {
let tempArr = [n, c, row, col];
let newN = n;
let newC = c;
let newW = row;
let newH = col;
transposeShape.forEach((item, index)=> {
switch(item) {
case 0:
newN = tempArr[index];
break;
case 1:
newC = tempArr[index];
break;
case 2:
newW = tempArr[index];
break;
case 3:
newH = tempArr[index];
}
});
rowData.push(formatData[newN][newC][newW][newH]);
}
cData.push(rowData);
}
nData.push(cData);
}
formatData2.push(nData);
}
return formatData2;
};
// reshape
let reshape = (data) =>{
let formatData2 = data.data;
let shape = data.shape;
let reshapeShape = data.reshapeShape;
// 1.变成一维
let tempData = reshapeOne({
data: formatData2,
shape: shape
});
// 2.变成多维
let formatData3 = reshapeMany({
data: tempData,
reshapeShape: reshapeShape
});
return formatData3;
};
// 变成一维
let reshapeOne = (data) => {
let formatData2 = data.data;
let shape = data.shape;
let tempData = [];
for(let n = 0; n < shape[0]; n++) {
for(let c = 0; c < shape[1]; c++) {
for(let row = 0; row < shape[2]; row++) {
for(let col = 0; col < shape[3]; col++) {
tempData.push(formatData2[n][c][row][col]);
}
}
}
}
return tempData;
};
// 变成多维
let reshapeMany = (data) => {
let tempData = data.data;
let reshapeShape = data.reshapeShape;
let formatData3 = [];
for(let n = 0; n < reshapeShape[0]; n++) {
let nData = [];
for(let c = 0; c < reshapeShape[1]; c++) {
let cData = [];
for(let row = 0; row < reshapeShape[2]; row++) {
let rowData = [];
for(let col = 0; col < reshapeShape[3]; col++) {
let tempN = n * reshapeShape[1] * reshapeShape[2] * reshapeShape[3];
let tempC = c * reshapeShape[2] * reshapeShape[3];
let tempRow = row * reshapeShape[3];
rowData.push(tempData[tempN + tempC + tempRow + col]);
}
cData.push(rowData);
}
nData.push(cData);
}
formatData3.push(nData);
}
return formatData3;
};
let calSize = (img) => {
let w1 = img.width;
let h1 = img.height;
let wh1 = Math.max(w1, h1);
// let factor = 608.0 / wh1;
let factor = fw / wh1;
let width = Math.round(w1 * factor);
let height = Math.round(h1 * factor);
return [w1, h1, width, height];
};
// 处理运算
let handleFinal = (formatData3, shape, img) => {
let finalData = [];
let c = shape[2];
let [w1, h1, width, height] = calSize(img);
let factorX = Math.max(width, height) / width;
let factorY = Math.max(width, height) / height;
let maxProb = 0.0;
let anchors = [[1.603231, 2.094468], [6.041143, 7.080126], [2.882459, 3.518061], [4.266906, 5.178857], [9.041765, 10.66308]];
for(let i = 0; i < shape[2]; i++) {
for(let j = 0; j < shape[3]; j++) {
for(let k = 0; k < anchors.length; k++) {
let [a1, a2, a3, a4, prob] = formatData3[i][j][k];
prob = sigmoid(prob);
if (prob > maxProb && prob >= 0.5) {
let ctx = (j + sigmoid(a1)) / c * factorX;
let cty = (i + sigmoid(a2)) / c * factorY;
let col = Math.exp(a3) * anchors[k][0] / c * factorX;
let row = Math.exp(a4) * anchors[k][1] / c * factorY;
let x = (ctx - (col / 2));
let y = (cty - (row / 2));
finalData.push([x * w1, y * h1, col * w1, row * h1, prob]);
}
}
}
}
return finalData;
};
// 处理画布
let handleCanvas = (finalData, img) => {
let myCanvas = document.getElementById('myCanvas');
let [w1, h1, width, height] = calSize(img);
myCanvas.width = w1;
myCanvas.height = h1;
let ctx = myCanvas.getContext("2d");
ctx.drawImage(img, 0, 0, w1, h1);
finalData.forEach((demoArr,index) => {
let [demoLeft, demoTop, demoWidth, demoHeight, prob] = demoArr;
ctx.beginPath();
ctx.strokeStyle="red";
ctx.moveTo(demoLeft, demoTop);
ctx.lineTo(demoLeft + demoWidth, demoTop);
ctx.lineTo(demoLeft + demoWidth, demoTop + demoHeight);
ctx.lineTo(demoLeft, demoTop + demoHeight);
ctx.closePath();
ctx.stroke();
});
};
let handleDiv = (finalData, img) => {
if (finalData.length < 1) {
return false;
}
let myCanvas = document.getElementById('myDiv');
let maxIndex = 0;
if (finalData.length > 1) {
for(let i = 1; i < finalData.length; i++) {
if (finalData[i].prob > finalData[maxIndex].prob) {
maxIndex = i;
}
}
}
let [demoLeft, demoTop, demoWidth, demoHeight, prob] = finalData[maxIndex];
myCanvas.style.width = demoWidth;
myCanvas.style.height = demoHeight;
myCanvas.style.left = demoLeft;
myCanvas.style.top = demoTop;
};
// preTestRun(0);
// run(document.getElementById('pic'));
<!DOCYTPE html>
<html>
<head>
<meta charset="utf-8">
<title>paddle web demo</title>
<meta name="viewport" content="width=device-width,minimum-scale=1.0,maximum-scale=1.0,user-scalable=no">
<style>
.image-wrap {
position: relative;
}
#myDiv {
position: absolute;
border: 1px solid red;
box-sizing: border-box;
}
</style>
</head>
<body>
<div class="image-wrap">
<img id="mobilenet" />
</div>
<p>原图片</p>
<div class="image-wrap">
<img id="image" src=""/>
<div id="myDiv"></div>
</div>
<p>画布</p>
<canvas id="myCanvas"></canvas>
<br/>
<input type="file" id="uploadImg"/>
<div id="txt"></div>
</body>
<script src="index.es6"></script>
</html>
import 'babel-polyfill';
import Runner from '../src/executor/runner';
import Camera from '../src/executor/camera';
// 调试工具
// import vConsole from 'vconsole';
// const theConsole = new vConsole();
let startBtn = document.getElementById('start');
let stopBtn = document.getElementById('stop')
const runner = new Runner({
// 用哪个模型
modelName: 'separate' // '608' | '320' | '320fused' | 'separate'
});
startBtn.disabled = true;
runner.preheat()
.then(() =>{
startBtn.disabled = false
});
const domElement = document.getElementById('video');
const myCanvas = document.getElementById('myDiv');
const videoSelect = document.getElementById('videoSelect');
let camera = new Camera({
// 用来显示摄像头图像的dom
videoDom: domElement
});
camera.getDevices().then(devices => {
if (devices.length) {
camera.run(devices[0].deviceId);
devices.forEach((element, index) => {
let option = document.createElement('option');
option.value = element.deviceId;
option.text = (index + 1);
videoSelect.appendChild(option);
});
videoSelect.onchange = () => {
camera.run(videoSelect.value);
};
}
else {
camera.run();
}
});
const handleDiv = function (data) {
myCanvas.style.width = (data ? data[0] : 0) + 'px';
myCanvas.style.height = (data ? data[0] : 0) + 'px';
myCanvas.style.left = (data ? data[2] : 0) + 'px';
myCanvas.style.top = (data ? data[3] : 0) + 'px';
}
startBtn.addEventListener('click', function () {
startBtn.disabled = true;
runner.startStream(() => camera.curVideo, handleDiv);
});
stopBtn.addEventListener('click', function () {
startBtn.disabled = false;
runner.stopStream();
});
\ No newline at end of file
<!DOCYTPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="ie=edge">
<title>识别摄像头里的脸</title>
<style>
body {
margin: 0;
padding: 0;
}
#myDiv {
position: fixed;
border: 1px solid red;
box-sizing: border-box;
}
#video {
background: red;
}
</style>
</head>
<body>
<video id="video">
</video>
<p>
<button id="start">开始识别</button>
<button id="stop">结束</button>
</p>
<select id="videoSelect"></select>
<p id="tips">tips</p>
<div id="myDiv"></div>
<script src="./videoDemo.es6"></script>
</body>
</html>
\ No newline at end of file
此差异已折叠。
/* eslint-disable */
import 'babel-polyfill';
import Paddle from '../../src/paddle/paddle';
import IO from '../../src/feed/imageFeed';
// import Logger from '../../tools/logger';
// window.log = new Logger();
// // 统计参数
// window.badCases = [];
// 后处理测试用例
// let tempPic = [demoPic, demoPic2, demoPic3, demoPic4, demoPic5];
/**
* @file model demo 入口文件
* @author wangqun@baidu.com
*
*/
// 模型输出shape
const outputShapes = {
'608': {
from: [19, 19, 25, 1],
to: [19, 19, 5, 5]
},
'320': {
from: [10, 10, 25, 1],
to: [10, 10, 5, 5]
},
'320fused': {
from: [10, 10, 25, 1],
to: [10, 10, 5, 5]
},
'separate': {
from: [10, 10, 25, 1],
to: [10, 10, 5, 5]
}
};
// 模型feed数据
const feedShape = {
'608': {
fw: 608,
fh: 608
},
'320': {
fw: 320,
fh: 320
},
'320fused': {
fw: 320,
fh: 320
},
'separate': {
fw: 320,
fh: 320
}
};
// 模型路径
const modelPath = {
'separate': 'model/tinyYolo'
};
const modelType = 'separate';
const path = modelPath[modelType];
// 统计参数
let loaded = false;
let model = {};
window.statistic = [];
const {fw, fh} = feedShape[modelType];
// 第一遍执行比较慢 所以预热一下
async function run(input) {
// const input = document.getElementById('mobilenet');
//log.start('总耗时');
const io = new IO();
// log.start('预处理');
let feed = io.process({
input: input,
params: {
gapFillWith: '#000', // 缩放后用什么填充不足方形部分
targetSize: {
height: fw,
width: fh
},
targetShape: [1, 3, fh, fw], // 目标形状 为了兼容之前的逻辑所以改个名
// shape: [3, 608, 608], // 预设tensor形状
mean: [117.001, 114.697, 97.404], // 预设期望
// std: [0.229, 0.224, 0.225] // 预设方差
}
});
// log.end('预处理');
if (!loaded) {
const MODEL_CONFIG = {
dir: `/${path}/`, // 存放模型的文件夹
main: 'model.json', // 主文件
};
loaded = true;
const paddle = new Paddle({
urlConf: MODEL_CONFIG,
options: {
multipart: true,
dataType: 'binary',
options: {
fileCount: 1, // 切成了多少文件
getFileName(i) { // 获取第i个文件的名称
return 'chunk_0.dat';
}
},
feed
}
});
model = await paddle.load();
}
let inst = model.execute({
input: feed
});
// 其实这里应该有个fetch的执行调用或者fetch的输出
let result = await inst.read();
// log.end('运行耗时');
// log.end('后处理-读取数据');
console.dir(['result', result]);
//log.start('后处理-形状调整');
const newData = [];
let newIndex = -1;
const [w, h, c, b] = outputShapes[modelType].from;
// c channel
for (let i = 0; i < c; i++) {
// height channel
for (let j = 0; j < h; j++) {
// width channel
for (let k = 0; k < w; k++) {
// position: (0, 0, 0, 0)
const index = j * (c * h) + k * c + i;
// const index = j * (i * k) + k * i + i;
newData[++newIndex] = result[index];
}
}
}
// log.end('后处理-形状调整');
// log.start('后处理-画框');
testRun(newData, input);
// log.end('后处理-画框');
// log.end('后处理');
// log.end('总耗时');
}
var image = '';
function selectImage(file) {
if (!file.files || !file.files[0]) {
return;
}
let reader = new FileReader();
reader.onload = function (evt) {
let img = document.getElementById('image');
img.src = evt.target.result;
img.onload = function() {
//log.during('每次执行的时间间隔');
run(img);
};
image = evt.target.result;
}
reader.readAsDataURL(file.files[0]);
}
// selectImage
document.getElementById("uploadImg").onchange = function () {
selectImage(this);
};
/* 后处理图片 by zhangmiao06 */
let preTestRun = (index) => {
let img = document.getElementById('image');
img.src = tempPic[index];
img.onload = function() {
testRun(testOutput.data[index], img);
};
};
let testRun = (data, img) => {
// console.log('ori', data);
const {from, to} = outputShapes[modelType];
// let shape = [1, 25, 19, 19];
let shape = [].concat(from).reverse();
// 1.从一维数组到1*25*19*19
let formatData = reshapeMany({
data: data,
reshapeShape: shape
});
// console.log('一维到多维', formatData);
// 2.从1*25*19*19 到 19*19*25*1
let formatData2 = transpose({
data: formatData,
shape: shape,
transposeShape: [2, 3, 1, 0]
});
// console.log('transpose', formatData2);
// 3.从19*19*25*1到19*19*5*5
let formatData3 = reshape({
data: formatData2,
shape: from,
reshapeShape: to
});
// console.log('reshape', formatData3);
// 4.运算
let finalData = handleFinal(formatData3, shape, img);
// console.log('final', finalData);
// 5.处理画布
// handleCanvas(finalData, img);
handleDiv(finalData, img);
};
// sigmoid
let sigmoid = (x) => {
if (x < -100) {
return 0.0;
}
return 1 / (1 + Math.exp(-x));
}
// transpose
let transpose = (data) => {
let shape = data.shape;
let transposeShape = data.transposeShape;
let formatData = data.data;
let formatData2 = [];
for(let n = 0; n < shape[transposeShape[0]]; n++) {
let nData = [];
for(let c = 0; c < shape[transposeShape[1]]; c++) {
let cData = [];
for(let row = 0; row < shape[transposeShape[2]]; row++) {
let rowData = [];
for(let col = 0; col < shape[transposeShape[3]]; col++) {
let tempArr = [n, c, row, col];
let newN = n;
let newC = c;
let newW = row;
let newH = col;
transposeShape.forEach((item, index)=> {
switch(item) {
case 0:
newN = tempArr[index];
break;
case 1:
newC = tempArr[index];
break;
case 2:
newW = tempArr[index];
break;
case 3:
newH = tempArr[index];
}
});
rowData.push(formatData[newN][newC][newW][newH]);
}
cData.push(rowData);
}
nData.push(cData);
}
formatData2.push(nData);
}
return formatData2;
};
// reshape
let reshape = (data) =>{
let formatData2 = data.data;
let shape = data.shape;
let reshapeShape = data.reshapeShape;
// 1.变成一维
let tempData = reshapeOne({
data: formatData2,
shape: shape
});
// 2.变成多维
let formatData3 = reshapeMany({
data: tempData,
reshapeShape: reshapeShape
});
return formatData3;
};
// 变成一维
let reshapeOne = (data) => {
let formatData2 = data.data;
let shape = data.shape;
let tempData = [];
for(let n = 0; n < shape[0]; n++) {
for(let c = 0; c < shape[1]; c++) {
for(let row = 0; row < shape[2]; row++) {
for(let col = 0; col < shape[3]; col++) {
tempData.push(formatData2[n][c][row][col]);
}
}
}
}
return tempData;
};
// 变成多维
let reshapeMany = (data) => {
let tempData = data.data;
let reshapeShape = data.reshapeShape;
let formatData3 = [];
for(let n = 0; n < reshapeShape[0]; n++) {
let nData = [];
for(let c = 0; c < reshapeShape[1]; c++) {
let cData = [];
for(let row = 0; row < reshapeShape[2]; row++) {
let rowData = [];
for(let col = 0; col < reshapeShape[3]; col++) {
let tempN = n * reshapeShape[1] * reshapeShape[2] * reshapeShape[3];
let tempC = c * reshapeShape[2] * reshapeShape[3];
let tempRow = row * reshapeShape[3];
rowData.push(tempData[tempN + tempC + tempRow + col]);
}
cData.push(rowData);
}
nData.push(cData);
}
formatData3.push(nData);
}
return formatData3;
};
let calSize = (img) => {
let w1 = img.width;
let h1 = img.height;
let wh1 = Math.max(w1, h1);
// let factor = 608.0 / wh1;
let factor = fw / wh1;
let width = Math.round(w1 * factor);
let height = Math.round(h1 * factor);
return [w1, h1, width, height];
};
// 处理运算
let handleFinal = (formatData3, shape, img) => {
let finalData = [];
let c = shape[2];
let [w1, h1, width, height] = calSize(img);
let factorX = Math.max(width, height) / width;
let factorY = Math.max(width, height) / height;
let maxProb = 0.0;
let anchors = [[1.603231, 2.094468], [6.041143, 7.080126], [2.882459, 3.518061], [4.266906, 5.178857], [9.041765, 10.66308]];
for(let i = 0; i < shape[2]; i++) {
for(let j = 0; j < shape[3]; j++) {
for(let k = 0; k < anchors.length; k++) {
let [a1, a2, a3, a4, prob] = formatData3[i][j][k];
prob = sigmoid(prob);
if (prob > maxProb && prob >= 0.5) {
let ctx = (j + sigmoid(a1)) / c * factorX;
let cty = (i + sigmoid(a2)) / c * factorY;
let col = Math.exp(a3) * anchors[k][0] / c * factorX;
let row = Math.exp(a4) * anchors[k][1] / c * factorY;
let x = (ctx - (col / 2));
let y = (cty - (row / 2));
finalData.push([x * w1, y * h1, col * w1, row * h1, prob]);
}
}
}
}
return finalData;
};
// 处理画布
let handleCanvas = (finalData, img) => {
let myCanvas = document.getElementById('myCanvas');
let [w1, h1, width, height] = calSize(img);
myCanvas.width = w1;
myCanvas.height = h1;
let ctx = myCanvas.getContext("2d");
ctx.drawImage(img, 0, 0, w1, h1);
finalData.forEach((demoArr,index) => {
let [demoLeft, demoTop, demoWidth, demoHeight, prob] = demoArr;
ctx.beginPath();
ctx.strokeStyle="red";
ctx.moveTo(demoLeft, demoTop);
ctx.lineTo(demoLeft + demoWidth, demoTop);
ctx.lineTo(demoLeft + demoWidth, demoTop + demoHeight);
ctx.lineTo(demoLeft, demoTop + demoHeight);
ctx.closePath();
ctx.stroke();
});
};
let handleDiv = (finalData, img) => {
if (finalData.length < 1) {
return false;
}
let myCanvas = document.getElementById('myDiv');
let maxIndex = 0;
if (finalData.length > 1) {
for(let i = 1; i < finalData.length; i++) {
if (finalData[i].prob > finalData[maxIndex].prob) {
maxIndex = i;
}
}
}
let [demoLeft, demoTop, demoWidth, demoHeight, prob] = finalData[maxIndex];
myCanvas.style.width = demoWidth;
myCanvas.style.height = demoHeight;
myCanvas.style.left = demoLeft;
myCanvas.style.top = demoTop;
};
// preTestRun(0);
// run(document.getElementById('pic'));
/* eslint-enable */
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>paddle web demo</title>
<meta name="viewport" content="width=device-width,minimum-scale=1.0,maximum-scale=1.0,user-scalable=no">
<style>
.image-wrap {
position: relative;
}
#myDiv {
position: absolute;
border: 1px solid #f71111;
box-sizing: border-box;
}
</style>
</head>
<body>
<div class="image-wrap">
<img id="mobilenet">
</div>
<p>原图片</p>
<div class="image-wrap">
<img id="image" src="pic.png">
<div id="myDiv"></div>
</div>
<p>画布</p>
<canvas id="myCanvas"></canvas>
<input type="file" id="uploadImg">
<div id="txt"></div>
<script src="index.es6"></script>
</body>
</html>
...@@ -4,14 +4,21 @@ ...@@ -4,14 +4,21 @@
"description": "paddle", "description": "paddle",
"main": "index.js", "main": "index.js",
"scripts": { "scripts": {
"server": "parcel ./src/index.html", "mnistdemo": "parcel ./examples/mnist/index.html",
"testDemo": "parcel ./demo/index.html", "mobilenet": "parcel ./examples/mobileNet/index.html",
"testSDemo": "parcel ./demo/index.html --port 8125 --https", "tinyYolo": "parcel ./examples/tinyYolo/index.html",
"testVideoDemo": "parcel ./demo/videoDemo.html --port 8123 --https", "huangfan": "parcel ./examples/huangfan/index.html",
"yolo": "parcel ./examples/yolo/index.html",
"videoDemo": "parcel ./examples/videoDemo.html --port 8123 --https",
"unitTest": "parcel ./test/unitTest.html",
"test": "echo \"Error: no test specified\" && exit 1" "test": "echo \"Error: no test specified\" && exit 1"
}, },
"devDependencies": { "devDependencies": {
"@babel/core": "^7.7.2",
"@babel/preset-env": "^7.7.1",
"axios": "^0.17.1",
"babel-core": "^6.26.3", "babel-core": "^6.26.3",
"babel-loader": "^8.0.6",
"babel-plugin-transform-class-properties": "^6.24.1", "babel-plugin-transform-class-properties": "^6.24.1",
"babel-plugin-transform-decorators-legacy": "^1.3.5", "babel-plugin-transform-decorators-legacy": "^1.3.5",
"babel-plugin-transform-runtime": "^6.23.0", "babel-plugin-transform-runtime": "^6.23.0",
...@@ -20,7 +27,8 @@ ...@@ -20,7 +27,8 @@
"babel-preset-react": "^6.24.1", "babel-preset-react": "^6.24.1",
"babel-preset-stage-0": "^6.24.1", "babel-preset-stage-0": "^6.24.1",
"babel-runtime": "^6.26.0", "babel-runtime": "^6.26.0",
"parcel-bundler": "^1.10.3" "parcel-bundler": "^1.10.3",
"webpack-cli": "^3.3.6"
}, },
"keywords": [], "keywords": [],
"author": "", "author": "",
......
...@@ -26,7 +26,7 @@ export default class Camera { ...@@ -26,7 +26,7 @@ export default class Camera {
} }
// 访问用户媒体设备的兼容方法 // 访问用户媒体设备的兼容方法
run(deviceId) { run(deviceId, callback) {
if (window.stream) { if (window.stream) {
window.stream.getTracks().forEach(function (track) { window.stream.getTracks().forEach(function (track) {
track.stop(); track.stop();
...@@ -36,7 +36,9 @@ export default class Camera { ...@@ -36,7 +36,9 @@ export default class Camera {
let constraints = { let constraints = {
video: {} video: {}
}; };
const success = this.success.bind(this); const success = stream => {
this.success(stream, callback);
};
const error = this.error.bind(this); const error = this.error.bind(this);
if (this.deviceInfos.length) { if (this.deviceInfos.length) {
constraints.video.deviceId= {exact: deviceId || this.deviceInfos[0]}; constraints.video.deviceId= {exact: deviceId || this.deviceInfos[0]};
...@@ -63,7 +65,7 @@ export default class Camera { ...@@ -63,7 +65,7 @@ export default class Camera {
} }
} }
success(stream) { success(stream, callback) {
const domElement = this.video; const domElement = this.video;
// make stream available to console // make stream available to console
window.stream = stream; window.stream = stream;
...@@ -88,6 +90,7 @@ export default class Camera { ...@@ -88,6 +90,7 @@ export default class Camera {
domElement.height = $(domElement).height(); domElement.height = $(domElement).height();
} }
domElement.play(); domElement.play();
callback && callback();
}, false); }, false);
} }
......
...@@ -82,9 +82,10 @@ export default class GraphExecutor { ...@@ -82,9 +82,10 @@ export default class GraphExecutor {
// console.log(inputs, outputs); // console.log(inputs, outputs);
if (this.type !== 'feed') { if (this.type !== 'feed') {
// let time = +Date.now(); // let time = +Date.now();
log.start(this.opData.iLayer + '-' + this.type); // log.start(this.opData.iLayer + '-' + this.type);
console.log(this.type, this.opData);
runtime.run(this.type, this.opData, isRendered); runtime.run(this.type, this.opData, isRendered);
log.end(this.opData.iLayer + '-' + this.type); // log.end(this.opData.iLayer + '-' + this.type);
// if (runtime.gpu.frameBufferIsComplete().isComplete) { // if (runtime.gpu.frameBufferIsComplete().isComplete) {
// var result = runtime.read(); // var result = runtime.read();
// let res = Array.prototype.slice.call(result); // let res = Array.prototype.slice.call(result);
......
...@@ -137,7 +137,7 @@ export default class PostProcess { ...@@ -137,7 +137,7 @@ export default class PostProcess {
this.lastRect = [0, 0, 0, 0] this.lastRect = [0, 0, 0, 0]
} }
run(data, img, callback) { run(data, img, callback, canavs) {
let {from, to} = this.modelConfig.outputShapes; let {from, to} = this.modelConfig.outputShapes;
let shape = [].concat(from).reverse(); let shape = [].concat(from).reverse();
// 1.从一维数组到1*25*19*19 // 1.从一维数组到1*25*19*19
...@@ -167,7 +167,7 @@ export default class PostProcess { ...@@ -167,7 +167,7 @@ export default class PostProcess {
// console.log('final', finalData); // console.log('final', finalData);
// 5.处理画布 // 5.处理画布
// finalData.length && handleCanvas(finalData, img); // finalData.length && handleCanvas(finalData, img);
this.handleDiv(finalData, img, callback); this.handleDiv(finalData, img, callback, canavs);
} }
calSize(img) { calSize(img) {
...@@ -213,7 +213,7 @@ export default class PostProcess { ...@@ -213,7 +213,7 @@ export default class PostProcess {
return finalData; return finalData;
} }
handleDiv(finalData, img, callback) { handleDiv(finalData, img, callback, canavs) {
if (finalData.length < 1) { if (finalData.length < 1) {
callback(); callback();
return false; return false;
...@@ -230,7 +230,7 @@ export default class PostProcess { ...@@ -230,7 +230,7 @@ export default class PostProcess {
let [demoLeft, demoTop, demoWidth, demoHeight] = finalData[maxIndex]; let [demoLeft, demoTop, demoWidth, demoHeight] = finalData[maxIndex];
if (!isSimilar(this.lastRect, [demoLeft, demoTop, demoWidth, demoHeight])) { if (!isSimilar(this.lastRect, [demoLeft, demoTop, demoWidth, demoHeight])) {
callback([demoWidth, demoHeight,demoLeft, demoTop]); callback([demoWidth, demoHeight,demoLeft, demoTop], canavs);
}; };
this.lastRect = [demoLeft, demoTop, demoWidth, demoHeight]; this.lastRect = [demoLeft, demoTop, demoWidth, demoHeight];
} }
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
* r.run(document.getElementById('test')); * r.run(document.getElementById('test'));
* }); * });
*/ */
import IO from '../feed/ImageFeed'; import IO from '../feed/ImageFeed';
import DataFeed from '../feed/dataFeed'; import DataFeed from '../feed/dataFeed';
import Graph from './loader'; import Graph from './loader';
...@@ -43,10 +42,12 @@ export default class Runner { ...@@ -43,10 +42,12 @@ export default class Runner {
shape: [1, 3, fh, fw] shape: [1, 3, fh, fw]
}]; }];
const MODEL_URL = `/${path}/model.json`; const MODEL_URL = `/${path}/model.json`;
let dir = `https://mms-graph.cdn.bcebos.com/activity/facegame/paddle/${path}/`;
if (location.href.indexOf('test=1') > -1) {
dir = `/src/view/common/lib/paddle/${path}/`;
}
const MODEL_CONFIG = { const MODEL_CONFIG = {
dir: `/${path}/`, // 存放模型的文件夹 dir: dir,
// dir: `https://graph.baidu.com/mms/graph/static/asset/dll/${path}/`, // rd测试地址
// dir: `/src/view/common/lib/paddle/dist/${path}/`, // 本地测试地址
main: 'model.json' // 主文件 main: 'model.json' // 主文件
}; };
const graphModel = new Graph(); const graphModel = new Graph();
...@@ -77,8 +78,8 @@ export default class Runner { ...@@ -77,8 +78,8 @@ export default class Runner {
console.warn('It\'s better to preheat the model before running.'); console.warn('It\'s better to preheat the model before running.');
await this.preheat(); await this.preheat();
} }
log.start('总耗时'); // eslint-disable-line // log.start('总耗时'); // eslint-disable-line
log.start('预处理'); // eslint-disable-line // log.start('预处理'); // eslint-disable-line
let feed; let feed;
if (typeof input === 'string') { if (typeof input === 'string') {
const dfIO = new DataFeed(); const dfIO = new DataFeed();
...@@ -103,13 +104,13 @@ export default class Runner { ...@@ -103,13 +104,13 @@ export default class Runner {
} }
}); });
} }
log.end('预处理'); // eslint-disable-line // log.end('预处理'); // eslint-disable-line
log.start('运行耗时'); // eslint-disable-line // log.start('运行耗时'); // eslint-disable-line
let inst = this.model.execute({ let inst = this.model.execute({
input: feed input: feed
}); });
let result = await inst.read(); let result = await inst.read();
log.end('后处理-读取数据'); // eslint-disable-line // log.end('后处理-读取数据'); // eslint-disable-line
const newData = []; const newData = [];
let newIndex = -1; let newIndex = -1;
const [w, h, c, b] = this.modelConfig.outputShapes.from; const [w, h, c, b] = this.modelConfig.outputShapes.from;
...@@ -126,15 +127,15 @@ export default class Runner { ...@@ -126,15 +127,15 @@ export default class Runner {
} }
} }
} }
this.postProcess.run(newData, input, callback); this.postProcess.run(newData, input, callback, feed[0].canvas);
log.end('后处理'); // eslint-disable-line // log.end('后处理'); // eslint-disable-line
this.flags.isRunning = false; this.flags.isRunning = false;
log.end('总耗时'); // eslint-disable-line // log.end('总耗时'); // eslint-disable-line
} }
// 传入获取图片的function // 传入获取图片的function
async runStream(getMedia, callback) { async runStream(getMedia, callback) {
await this.run(getMedia(), callback); await this.run(getMedia, callback);
if (!this.flags.runVideoPaused) { if (!this.flags.runVideoPaused) {
setTimeout(async () => { setTimeout(async () => {
await this.runStream(getMedia, callback); await this.runStream(getMedia, callback);
......
import ops from './ops'; import ops from './ops';
/** /**
* @file 工厂类,生成fragment shader * @file 工厂类,生成fragment shader
* @author yangmingming * @author wangqun
*/ */
export default class Factory { export default class Factory {
constructor(opts) { constructor(opts) {
......
...@@ -130,6 +130,11 @@ export default { ...@@ -130,6 +130,11 @@ export default {
func: dynamic_func, func: dynamic_func,
confs: dynamic_conf confs: dynamic_conf
}, },
relu6: {
params: dynamic_params,
func: dynamic_func,
confs: dynamic_conf
},
scale: { scale: {
params: dynamic_params, params: dynamic_params,
func: dynamic_func, func: dynamic_func,
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
export default class imageFeed { export default class imageFeed {
constructor() { constructor() {
this.fromPixels2DContext = document.createElement('canvas').getContext('2d'); this.fromPixels2DContext = document.createElement('canvas').getContext('2d');
this.fromPixels2DContext2 = document.createElement('canvas').getContext('2d');
this.defaultWidth = 224; this.defaultWidth = 224;
this.defaultHeight = 224; this.defaultHeight = 224;
this.minPixels = 225; this.minPixels = 225;
...@@ -32,7 +33,8 @@ export default class imageFeed { ...@@ -32,7 +33,8 @@ export default class imageFeed {
let output = []; let output = [];
if (!this.result) { if (!this.result) {
const [b, c, h, w] = params.targetShape; const [b, c, h, w] = params.targetShape;
this.result = new Float32Array(h * w * 3); // 计算确定targetShape所需Float32Array占用空间
this.result = new Float32Array(h * w * c);
} }
output = this.fromPixels(input, params); output = this.fromPixels(input, params);
return output; return output;
...@@ -49,14 +51,17 @@ export default class imageFeed { ...@@ -49,14 +51,17 @@ export default class imageFeed {
const vPadding = Math.ceil((sh - height) / 2); const vPadding = Math.ceil((sh - height) / 2);
let data = imageData.data; let data = imageData.data;
// channel RGB
let red = []; let red = [];
let green = []; let green = [];
let blue = []; let blue = [];
// 平均数
let mean = opt.mean; let mean = opt.mean;
// 标准差
let std = opt.std; let std = opt.std;
// 考虑channel因素获取数据
for (let i = 0; i < data.length; i += 4) { for (let i = 0; i < data.length; i += 4) {
// img_mean 0.485, 0.456, 0.406
//img_std 0.229, 0.224, 0.225
let index = i / 4; let index = i / 4;
let vIndex = Math.floor(index / sw); let vIndex = Math.floor(index / sw);
let hIndex = index - (vIndex * sw) - 1; let hIndex = index - (vIndex * sw) - 1;
...@@ -67,6 +72,7 @@ export default class imageFeed { ...@@ -67,6 +72,7 @@ export default class imageFeed {
blue.push(((data[i + 2] / 255) - mean[2]) / std[2]); // blue blue.push(((data[i + 2] / 255) - mean[2]) / std[2]); // blue
} }
} }
// 转成 GPU 加速 NCHW 格式
let tmp = green.concat(blue); let tmp = green.concat(blue);
return red.concat(tmp); return red.concat(tmp);
}; };
...@@ -78,7 +84,7 @@ export default class imageFeed { ...@@ -78,7 +84,7 @@ export default class imageFeed {
allReshapeToRGB(imageData, opt, scaleSize) { allReshapeToRGB(imageData, opt, scaleSize) {
const {sw, sh} = scaleSize; const {sw, sh} = scaleSize;
const [b, c, h, w] = opt.targetShape; const [b, c, h, w] = opt.targetShape;
let data = imageData.data; let data = imageData.data || imageData;
let mean = opt.mean; let mean = opt.mean;
let dataLength = data.length; let dataLength = data.length;
// let result = new Float32Array(dataLength * 3); // let result = new Float32Array(dataLength * 3);
...@@ -127,6 +133,7 @@ export default class imageFeed { ...@@ -127,6 +133,7 @@ export default class imageFeed {
this.fromPixels2DContext.canvas.height = sh; this.fromPixels2DContext.canvas.height = sh;
this.fromPixels2DContext.drawImage( this.fromPixels2DContext.drawImage(
image, 0, 0, sw, sh); image, 0, 0, sw, sh);
this.setInputCanvas(image);
return {sw, sh}; return {sw, sh};
}; };
...@@ -167,11 +174,26 @@ export default class imageFeed { ...@@ -167,11 +174,26 @@ export default class imageFeed {
image, 0, 0, sw, sh); image, 0, 0, sw, sh);
// currentPic = this.fromPixels2DContext.canvas.toDataURL(); // currentPic = this.fromPixels2DContext.canvas.toDataURL();
} }
this.setInputCanvas(image);
// window.currentPic = this.fromPixels2DContext.canvas;// test only, demele me // window.currentPic = this.fromPixels2DContext.canvas;// test only, demele me
// document.getElementById('p-c').appendChild(this.fromPixels2DContext.canvas);// test only, demele me // document.getElementById('p-c').appendChild(this.fromPixels2DContext.canvas);// test only, demele me
return {sw: targetWidth, sh: targetHeight}; return {sw: targetWidth, sh: targetHeight};
} }
/**
* 设置原始video画布
* @param image 原始video
*/
setInputCanvas(image) {
// 原始图片宽高
const width = this.pixelWidth;
const height = this.pixelHeight;
// 画布设置
this.fromPixels2DContext2.canvas.width = width;
this.fromPixels2DContext2.canvas.height = height;
this.fromPixels2DContext2.drawImage(image, 0, 0, width, height);
}
/** /**
* 获取图像内容 * 获取图像内容
* @param pixels * @param pixels
...@@ -179,11 +201,12 @@ export default class imageFeed { ...@@ -179,11 +201,12 @@ export default class imageFeed {
*/ */
getImageData(pixels, scaleSize) { getImageData(pixels, scaleSize) {
const {sw, sh} = scaleSize; const {sw, sh} = scaleSize;
// 复制画布上指定矩形的像素数据
let vals = this.fromPixels2DContext let vals = this.fromPixels2DContext
.getImageData(0, 0, sw, sh); .getImageData(0, 0, sw, sh);
// crop图像 // crop图像
const width = pixels.width; // const width = pixels.width;
const height = pixels.height; // const height = pixels.height;
return vals; return vals;
}; };
...@@ -196,6 +219,7 @@ export default class imageFeed { ...@@ -196,6 +219,7 @@ export default class imageFeed {
let data = imageData.data; let data = imageData.data;
for (let i = 0; i < data.length; i += 4) { for (let i = 0; i < data.length; i += 4) {
// 3 channel 灰度处理无空间压缩
let avg = (data[i] + data[i + 1] + data[i + 2]) / 3; let avg = (data[i] + data[i + 1] + data[i + 2]) / 3;
data[i] = avg; // red data[i] = avg; // red
data[i + 1] = avg; // green data[i + 1] = avg; // green
...@@ -206,6 +230,8 @@ export default class imageFeed { ...@@ -206,6 +230,8 @@ export default class imageFeed {
fromPixels(pixels, opt) { fromPixels(pixels, opt) {
let data; let data;
// 原始video画布数据
let data2;
let scaleSize; let scaleSize;
if (pixels instanceof HTMLImageElement || pixels instanceof HTMLVideoElement) { if (pixels instanceof HTMLImageElement || pixels instanceof HTMLVideoElement) {
this.pixelWidth = pixels.naturalWidth || pixels.width; this.pixelWidth = pixels.naturalWidth || pixels.width;
...@@ -213,10 +239,12 @@ export default class imageFeed { ...@@ -213,10 +239,12 @@ export default class imageFeed {
if (opt.scale) { // 兼容以前的,如果有scale就是短边缩放到scale模式 if (opt.scale) { // 兼容以前的,如果有scale就是短边缩放到scale模式
scaleSize = this.reSize(pixels, opt); scaleSize = this.reSize(pixels, opt);
data = this.getImageData(opt, scaleSize); data = this.getImageData(opt, scaleSize);
data2 = this.fromPixels2DContext2.getImageData(0, 0, this.pixelWidth, this.pixelHeight);
} }
else if (opt.targetSize) { // 如果有targetSize,就是装在目标宽高里的模式 else if (opt.targetSize) { // 如果有targetSize,就是装在目标宽高里的模式
scaleSize = this.fitToTargetSize(pixels, opt); scaleSize = this.fitToTargetSize(pixels, opt);
data = this.getImageData(opt, scaleSize); data = this.getImageData(opt, scaleSize);
data2 = this.fromPixels2DContext2.getImageData(0, 0, this.pixelWidth, this.pixelHeight);
} }
} }
...@@ -224,14 +252,15 @@ export default class imageFeed { ...@@ -224,14 +252,15 @@ export default class imageFeed {
data = grayscale(data); data = grayscale(data);
} }
if (opt.shape) { if (opt.reShape) {
data = this.reshape(data, opt, scaleSize); data = this.reshape(data, opt, scaleSize);
} }
if (opt.targetShape) { if (opt.targetShape) {
data = this.allReshapeToRGB(data, opt, scaleSize); data = this.allReshapeToRGB(data, opt, scaleSize);
} }
return [{data: data, shape: opt.shape || opt.targetShape, name: 'image'}];
return [{data: data, shape: opt.shape || opt.targetShape, name: 'image', canvas: data2}];
} }
} }
/* eslint-enable */ /* eslint-enable */
...@@ -3,7 +3,7 @@ import VSHADER from '../shader/v_shader'; ...@@ -3,7 +3,7 @@ import VSHADER from '../shader/v_shader';
import VSHADER2 from '../shader/v_shader2'; import VSHADER2 from '../shader/v_shader2';
/** /**
* @file gpu运算 * @file gpu运算
* @author yangmingming * @author wangqun@baidu.com, yangmingming@baidu.com
*/ */
const CONF = { const CONF = {
alpha: false, alpha: false,
...@@ -268,6 +268,7 @@ export default class gpu { ...@@ -268,6 +268,7 @@ export default class gpu {
// this.currentTexture = this.textureBuffer[this.textureBufferIndex % 2]; // this.currentTexture = this.textureBuffer[this.textureBufferIndex % 2];
// this.textureBufferIndex = (this.textureBufferIndex + 1) >= 2 ? 0 : 1; // this.textureBufferIndex = (this.textureBufferIndex + 1) >= 2 ? 0 : 1;
this.currentTexture = this.outTextures[iLayer]; this.currentTexture = this.outTextures[iLayer];
console.log('this.currentTexture', this.currentTexture);
const gl = this.gl; const gl = this.gl;
gl.framebufferTexture2D(gl.FRAMEBUFFER, // The target is always a FRAMEBUFFER. gl.framebufferTexture2D(gl.FRAMEBUFFER, // The target is always a FRAMEBUFFER.
gl.COLOR_ATTACHMENT0, // We are providing the color buffer. gl.COLOR_ATTACHMENT0, // We are providing the color buffer.
...@@ -360,9 +361,16 @@ export default class gpu { ...@@ -360,9 +361,16 @@ export default class gpu {
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texImage2D(gl.TEXTURE_2D, 0, this.internalFormat, item.width_texture, gl.texImage2D(gl.TEXTURE_2D,
item.height_texture, 0, 0,
this.textureFormat, gl.FLOAT, item.data, 0); this.internalFormat,
item.width_texture,
item.height_texture,
0,
this.textureFormat,
gl.FLOAT,
item.data,
0);
} }
} }
...@@ -404,14 +412,15 @@ export default class gpu { ...@@ -404,14 +412,15 @@ export default class gpu {
render(data = [], iLayer = 0, isRendered = false) { render(data = [], iLayer = 0, isRendered = false) {
const gl = this.gl; const gl = this.gl;
let that = this;
let textureIndex = 0; let textureIndex = 0;
data.forEach(item => { data.forEach(item => {
if (item.type === 'texture') { if (item.type === 'texture') {
this.initTexture(textureIndex, item, iLayer, isRendered); that.initTexture(textureIndex, item, iLayer, isRendered);
gl.uniform1i(this.getUniformLoc(item.variable + '_' + item.tensor, iLayer, isRendered), textureIndex++); gl.uniform1i(that.getUniformLoc(item.variable + '_' + item.tensor, iLayer, isRendered), textureIndex++);
} }
else if (item.type === 'uniform') { else if (item.type === 'uniform') {
gl[item.setter](this.getUniformLoc(item.variable + '_' + item.tensor, iLayer, isRendered), item.data); gl[item.setter](that.getUniformLoc(item.variable + '_' + item.tensor, iLayer, isRendered), item.data);
} }
}); });
// gl.clearColor(.0, .0, .0, 1); // gl.clearColor(.0, .0, .0, 1);
...@@ -437,7 +446,7 @@ export default class gpu { ...@@ -437,7 +446,7 @@ export default class gpu {
gl2.bindBuffer(gl2.PIXEL_PACK_BUFFER, buffer); gl2.bindBuffer(gl2.PIXEL_PACK_BUFFER, buffer);
gl2.getBufferSubData(gl2.PIXEL_PACK_BUFFER, 0, pixels); gl2.getBufferSubData(gl2.PIXEL_PACK_BUFFER, 0, pixels);
gl2.bindBuffer(gl2.PIXEL_PACK_BUFFER, null); gl2.bindBuffer(gl2.PIXEL_PACK_BUFFER, null);
log.start('后处理-readloop'); // log.start('后处理-readloop');
// let result = []; // let result = [];
// let offset = 0; // let offset = 0;
// for (let h = 0; h < this.height_texture_out; h++) { // for (let h = 0; h < this.height_texture_out; h++) {
...@@ -460,7 +469,7 @@ export default class gpu { ...@@ -460,7 +469,7 @@ export default class gpu {
} }
// const result = Array.prototype.slice.call(pixels); // const result = Array.prototype.slice.call(pixels);
// console.dir(['result', result]); // console.dir(['result', result]);
log.end('后处理-readloop'); // log.end('后处理-readloop');
return result; return result;
} }
...@@ -517,20 +526,20 @@ export default class gpu { ...@@ -517,20 +526,20 @@ export default class gpu {
compute() { compute() {
let gl = this.gl; let gl = this.gl;
log.start('后处理-readinside'); // log.start('后处理-readinside');
const tt = +Date.now(); const tt = +Date.now();
let pixels = new Float32Array(this.width_texture_out * this.height_texture_out * 4); let pixels = new Float32Array(this.width_texture_out * this.height_texture_out * 4);
// gl.pixelStorei(gl.UNPACK_ALIGNMENT, 1); // gl.pixelStorei(gl.UNPACK_ALIGNMENT, 1);
const tt2 = +Date.now(); const tt2 = +Date.now();
gl.readPixels(0, 0, this.width_texture_out, this.height_texture_out, gl.RGBA, gl.FLOAT, pixels, 0); gl.readPixels(0, 0, this.width_texture_out, this.height_texture_out, gl.RGBA, gl.FLOAT, pixels, 0);
// console.log('本次读取数据时间是' + (+Date.now() - tt2)+ ',' + (tt2 - tt)); // console.log('本次读取数据时间是' + (+Date.now() - tt2)+ ',' + (tt2 - tt));
log.end('后处理-readinside'); // log.end('后处理-readinside');
log.start('后处理-readloop'); // log.start('后处理-readloop');
let result = []; let result = [];
for (let i = 0; i < this.width_texture_out * this.height_texture_out; i++) { for (let i = 0; i < this.width_texture_out * this.height_texture_out; i++) {
result.push(pixels[4 * i]); result.push(pixels[4 * i]);
} }
log.end('后处理-readloop'); // log.end('后处理-readloop');
return result; return result;
} }
......
/* eslint-disable */ /* eslint-disable */
import GraphExecutor from './executor'; import GraphExecutor from '../executor/executor';
import IO from '../feed/imageFeed'; import IO from '../feed/imageFeed';
import Runtime from '../../src/runtime/runtime'; import Runtime from '../runtime/runtime';
import OpData from '../utils/opData'; import OpData from '../utils/opData';
import Factory from '../factory/fshader/factory'; import Factory from '../factory/fshader/factory';
import Utils from '../utils/utils'; import Utils from '../utils/utils';
/** /**
* @file GraphModel,绘制生成model网络 * @file Graph,绘制生成model网络
* @author wangqun@baidu.com * @author wangqun@baidu.com
*/ */
let start = 0;
// 生成factory实例 // 生成factory实例
const factory = new Factory({}); const factory = new Factory({});
// 获取op的输入配置 // 获取op的输入配置
const opConfs = factory.getOpConfs(); const opConfs = factory.getOpConfs();
export default class GraphModel {
constructor(modelGonfig, loadOptions) { export default class Graph {
constructor(options) {
this.version = '0.0.1'; this.version = '0.0.1';
this.handler = 'io.IOHandler'; this.handler = 'io.IOHandler';
this.modelGonfig = modelGonfig; this.weightMap = '';
this.loadOptions = loadOptions; this.options = options || {};
this.multipart = false;
// feed数据 // feed数据
this.feed = null; this.feed = null;
this.index = 0; this.index = 0;
this.feedOp = null; this.feedOp = null;
this.feedItem = null; this.feedItem = null;
this.test = false;
this.isExecuted = false; this.isExecuted = false;
// 网络层数 // 网络层数
this.iLayer = 0; this.iLayer = 0;
// fetch xhr jsonp
this.params = {type: 'fetch'}; if (this.options && this.options.options && this.options.options.test === true) {
// 设置分片加载model this.test = true;
if (this.loadOptions) {
this.multipart = this.loadOptions.multipart;
this.feed = {input: this.loadOptions.feed};
if (loadOptions.dataType === 'binary') {
this.binaryOption = loadOptions.binaryOption;
}
} }
if (!this.loadOptions) { if (!this.inst) {
this.loadOptions = {};
} else {
// op runner // op runner
this.inst = Runtime.init(); this.inst = Runtime.init();
factory.setWebglVersion(this.inst.getWebglVersion()); factory.setWebglVersion(this.inst.getWebglVersion());
// this.fetchJson(this.modelGonfig.dir + 'x.json').then(data => {
// const [b, c, h, w] = [1, 3, 320, 320];
// const size = data.length;
// const total = 3 * 320 * 320;
// this.testData = new Float32Array(total);
// for (let i = 0; i < size; i++) {
// let j = i / (c * w) | 0;
// let k = i % (c * w);
// let b1 = j / h | 0;
// let h1 = j % h;
// let c1 = k % c;
// let w1 = k / c | 0;
// let l = b1 * (c * h * w) + c1 * (h * w) + h1 * (w) + w1;
// this.testData[i] = data[l];
// }
// });
}
}
fetchOneChunk(path) {
return this.fetch(path).then(request => {
return request.arrayBuffer();
})
}
fetchJson(path) {
return this.fetch(path).then(request => {
return request.json();
})
}
fetchAllData() {
// todo 兼容一下json的模式
let counts = this.binaryOption.fileCount;
let chunkArray = [];
for (let i = 1; i <= counts; i++) {
chunkArray.push(
this.fetchOneChunk(this.modelGonfig.dir + this.binaryOption.getFileName(i))
);
}
console.time('加载时间');
return Promise.all(chunkArray).then(chunks => {
console.timeEnd('加载时间');
let chunksLength = 0;
let f32Array = [];
let float32Chunk;
chunks.forEach(i => {
float32Chunk = new Float32Array(i);
f32Array.push(float32Chunk);
chunksLength += float32Chunk.length;
});
this.allData = new Float32Array(chunksLength);
let offset = 0;
f32Array.forEach(i => {
i.forEach(num => {
this.allData[offset] = num;
offset += 1;
})
});
});
}
traverse (arr) {
const TMP_SCHEME_REGEX = /\.tmp/;
const TMP_REGEX = /\-/;
let marker = 0; // 读到哪个位置了
let len; // 当前op长度
arr.filter(item => {
return item.name
&& item.name.match(TMP_SCHEME_REGEX) === null
&& item.name.match(TMP_REGEX) === null;
})
// .sort((a, b) => {
// if (a.name > b.name) {
// return 1;
// }
// if (a.name < b.name) {
// return -1;
// }
// return 0;
// }) // 按字母顺序排列 在model.json里
.forEach(item => {
len = item.shape.reduce((a, b) => a * b); // 长度为shape的乘积
item.data = this.allData.slice(marker, marker + len);
marker += len;
});
}
fetch(path, params) {
params = params || this.params;
let method = params.method || 'get';
let mode = params.mode || 'cors';
let myHeaders = new Headers();
return fetch(path, {
method: method,
mode: mode,
credentials: 'include',
headers: myHeaders
});
}
fetchModel(params) {
params = params || this.params;
const path = this.modelGonfig.dir + this.modelGonfig.main;
let load = null;
// jsonp请求方式
if (params && params.type === 'jsonp') {
let json;
let s = document.createElement('script');
s.src = path + '&jsonpCallback=fn';
window.fn = function(data) {
json = data;
// console.log(json);
};
//当script被插入文档中时,src中的资源就会开始加载
document.body.appendChild(s);
load = new Promise((resolve, reject) => {
s.onload = function(e) {
resolve(json);
}
s.onerror = function() {
reject(json);
}
});
this.handler = load;
}
// 原生fetch
else if (params.type === 'fetch') {
load = new Promise((resolve, reject) => {
this.fetch(path, params)
.then(response => response.json())
.then(responseData => resolve(responseData))
.then(err => reject(err))
});
this.handler = load;
} }
// ajax
else if (params.type === 'xhr') {
this.handler = load;
}
return load;
}
async load() {
let that = this;
const artifacts = this.handler = await this.fetchModel();
if (this.multipart === true) {
await this.fetchAllData()
.then(() => this.traverse(artifacts.vars));
}
const opsMap = this.createOpsMap(artifacts.ops, artifacts.vars);
this.weightMap = this.constructOpsMap(opsMap);
// 生成op数据
this.weightMap.forEach(op => {
const type = op.type;
if (type !== 'feed' && type !== 'fetch') {
that.buildOpData(op);
}
});
return true;
} }
buildOpData(op) { buildOpData(op) {
const tensor = this.constructTensor(op); const executor = this.constructExecutor(op);
const opData = new OpData(op.type, tensor.inputs, tensor.outputs, tensor.attrs); const opData = new OpData(op.type, executor.inputs, executor.outputs, executor.attrs);
const name = opData.name; const name = opData.name;
const fsCode = factory.buildShader(name, opData.data); const fsCode = factory.buildShader(name, opData.data);
opData.fsCode = fsCode; opData.fsCode = fsCode;
opData.program = this.inst.createProgram(fsCode, opData.tensor['out']); opData.program = this.inst.createProgram(fsCode, opData.tensor['out']);
opData.renderData = opConfs[name].map(elem => { opData.renderData = opConfs[name].map(elem => {
...@@ -214,6 +57,7 @@ export default class GraphModel { ...@@ -214,6 +57,7 @@ export default class GraphModel {
const tensorData = opData.tensor[item.tensor]; const tensorData = opData.tensor[item.tensor];
if (item.type === 'texture') { if (item.type === 'texture') {
item.data = tensorData.data; item.data = tensorData.data;
if (this.feedOp.id === op.id && item.tensor === 'origin') { if (this.feedOp.id === op.id && item.tensor === 'origin') {
item.shape = tensorData.shape; item.shape = tensorData.shape;
this.feedItem = item; this.feedItem = item;
...@@ -226,6 +70,7 @@ export default class GraphModel { ...@@ -226,6 +70,7 @@ export default class GraphModel {
} }
return item; return item;
}); });
// console.timeEnd('opData.renderData'); // console.timeEnd('opData.renderData');
opData.iLayer = this.iLayer++; opData.iLayer = this.iLayer++;
op.opData = opData; op.opData = opData;
...@@ -238,10 +83,11 @@ export default class GraphModel { ...@@ -238,10 +83,11 @@ export default class GraphModel {
return; return;
} }
executor.execute(this.inst, this.isExecuted); executor.execute(this.inst, this.isExecuted);
// if (executor.next && start++ < 2) {
if (executor.next) { if (executor.next) {
const id = executor.next; const id = executor.next;
const next = this.getTensor(id); const next = this.getTensor(id);
this.execute_(next[0]) this.execute_(next[0]);
} }
} }
/** /**
...@@ -262,7 +108,6 @@ export default class GraphModel { ...@@ -262,7 +108,6 @@ export default class GraphModel {
if (this.isExecuted) { if (this.isExecuted) {
this.updateFeed(); this.updateFeed();
} }
let start = +Date.now();
this.execute_(executor[0]); this.execute_(executor[0]);
this.isExecuted = true; this.isExecuted = true;
return this.inst; return this.inst;
...@@ -280,13 +125,13 @@ export default class GraphModel { ...@@ -280,13 +125,13 @@ export default class GraphModel {
return this.execute_(inputs, true, this.outputNodes); return this.execute_(inputs, true, this.outputNodes);
} }
getTensorAttr(name) { getTensorAttr(name) {
return this.handler.vars.filter((item, i) => { return this.data.vars.filter((item, i) => {
if (name === item.name) if (name === item.name)
return item; return item;
}); });
} }
constructTensor(executor) { constructExecutor(executor) {
const that = this; let that = this;
const inputName = executor.inputsName[0]; const inputName = executor.inputsName[0];
const input = executor.inputs; const input = executor.inputs;
const output = executor.outputs; const output = executor.outputs;
...@@ -294,14 +139,22 @@ export default class GraphModel { ...@@ -294,14 +139,22 @@ export default class GraphModel {
output[key] = that.getTensorAttr(output[key][0]); output[key] = that.getTensorAttr(output[key][0]);
}); });
Object.keys(input).forEach(function(key){ Object.keys(input).forEach(function(key){
if ((key === 'Input') && (inputName === 'pixel')) { if (that.test && ((key === 'Input') || (key === 'X'))) {
const pixel = that.getTensorAttr(inputName); input[key] = that.getTensorAttr(input[key][0]);
const io = new IO(); that.feedOp = executor;
input[key] = io.fromPixels(data, pixel); }
else if ((key === 'Input') && (inputName === 'pixel')) {
// const pixel = that.getTensorAttr(inputName);
// const io = new IO();
// input[key] = io.fromPixels(that.feed, pixel);
input[key] = that.feed.input;
that.feedOp = executor;
} }
else if ((key === 'Input') && (inputName === 'image' || inputName === 'x')) { else if ((key === 'Input') && (inputName === 'image' || inputName === 'x')) {
// that.feed.input[0].data = that.testData; // that.feed.input[0].data = that.testData;
input[key] = that.feed.input; input[key] = that.feed.input;
that.feedOp = executor; that.feedOp = executor;
} }
else { else {
...@@ -309,14 +162,13 @@ export default class GraphModel { ...@@ -309,14 +162,13 @@ export default class GraphModel {
} }
}); });
// console.log(input); // console.log(input);
const tensor = { return {
inputs: input, inputs: input,
outputs: output, outputs: output,
attrs: executor.attrs, attrs: executor.attrs,
type: executor.type, type: executor.type,
next: executor.next next: executor.next
}; };
return tensor;
} }
/** /**
* Construct Ops Relationship * Construct Ops Relationship
...@@ -393,26 +245,7 @@ export default class GraphModel { ...@@ -393,26 +245,7 @@ export default class GraphModel {
} }
}); });
} }
/**
* Load a graph model given a URL to the model definition.
* @param modelGonfig
* @param options
* @returns {Promise<void>}
*/
async loadGraphModel(modelGonfig, options) {
if (modelGonfig === null) {
// todo saniac 报错提示修改
throw new Error(
'modelGonfig in loadGraphModel() cannot be null. Please provide a url ' +
'or an IOHandler that loads the model');
}
if (options === null) {
options = {};
}
const model = new GraphModel(modelGonfig, options);
await model.load();
return model;
}
/** /**
* dispose * dispose
*/ */
......
import 'babel-polyfill';
import Graph from './executor/loader';
import IO from './executor/io';
/**
* @file model demo 入口文件
* @author yangmingming@baidu.com
*
*/
// 'http://mms-xr.cdn.bcebos.com/paddle/mnist/model.json'
const MODEL_URL = '../demo/model/model.json';
const graphModel = new Graph();
const model = graphModel.loadGraphModel(MODEL_URL);
const cat = document.getElementById('pic');
const io = new IO();
let inst = model.execute({input: cat});
let res = inst.read();
console.dir(['result', res]);
var fileDownload = require('js-file-download');
fileDownload(res, "result.csv");
<!DOCYTPE html>
<html>
<head>
<meta charset="utf-8">
<title>paddle web demo</title>
<meta name="viewport" content="width=device-width,minimum-scale=1.0,maximum-scale=1.0,user-scalable=no">
</head>
<body>
<div><img id="pic" src="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/wAALCAAcABwBAREA/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/9oACAEBAAA/APn+vTPDHwP8TeJ9DtdXiuLCzt7kbo0uWcOU7NgKRgjkc81i+O/hvrPgW8xco1zp7ELHfIm1HYqCRjJIPUc9cHFcbSgEnABJ9BXaafH8Rrrw3NpdjBrkmjohLQLE/l7c5OOPUHgV6Fcw3um/sxXNt4hZo7qW5X7FDdLtlRfOU7QG5zgSH/dPpXhFel/Bzxj4a8H6vfzeILZy86ILe6WLzPI27i3HUZ+XkA9PQ16Pc/Hfw7pM91LaXusa20wDRxSQRQww9eAdob35DfWuNg+Ny67Dfab430SDUNLuQxjW2UK8BwcAZPPOPmyCOvPSvH6KKKK//9k=" ></div>
</body>
<script src="index.es6"></script>
</html>
/* eslint-disable */
/**
* @file loader,model加载器
* @author wangqun@baidu.com
*/
export default class Loader {
constructor(modelGonfig, options) {
this.version = '0.0.1';
this.data = {};
this.modelGonfig = modelGonfig;
this.options = options;
this.multipart = false;
this.test = false;
// fetch xhr jsonp
this.params = {type: 'fetch'};
// 设置分片加载model
if (this.options) {
this.multipart = this.options.multipart;
if (options.dataType === 'binary') {
this.binaryOption = options.options;
this.dataType = options.dataType;
}
if (options.test) {
this.test = true;
}
}
if (!this.loadOptions) {
this.loadOptions = {};
}
}
fetchOneChunk(path) {
return this.fetch(path).then(request => {
return request.arrayBuffer();
})
}
fetchJson(path) {
return this.fetch(path).then(request => {
return request.json();
})
}
fetchChunks() {
let counts = this.binaryOption.fileCount;
let chunkArray = [];
for (let i = 1; i <= counts; i++) {
chunkArray.push(
this.fetchOneChunk(this.modelGonfig.dir + this.binaryOption.getFileName(i))
);
}
// console.time('加载时间');
return Promise.all(chunkArray).then(chunks => {
// console.timeEnd('加载时间');
let chunksLength = 0;
let f32Array = [];
let float32Chunk;
chunks.forEach(i => {
float32Chunk = new Float32Array(i);
f32Array.push(float32Chunk);
chunksLength += float32Chunk.length;
});
this.allData = new Float32Array(chunksLength);
let offset = 0;
f32Array.forEach(i => {
i.forEach(num => {
this.allData[offset] = num;
offset += 1;
})
});
});
}
fetchData(name) {
const path = this.modelGonfig.dir + name + '.json';
let load = new Promise((resolve, reject) => {
fetch(path, {
method: 'get', mode: 'cors', credentials: "include",
headers: { 'Content-Type': 'application/json;charset=utf-8'}})
.then(response => response.json())
.then(responseData => resolve(responseData))
.then(err => reject(err))
})
return load;
}
async fetchAllDate (arr) {
const TMP_SCHEME_REGEX = /\.tmp/;
const TMP_REGEX = /\-/;
let requesterArr = arr.map(item => {
if (item.name
&& item.name.match(TMP_SCHEME_REGEX) === null
&& item.name.match(TMP_REGEX) === null) {
return this.fetchData(item.name).then(data => item.data = data);
}
return Promise.resolve();
});
return Promise.all(requesterArr);
}
traverse (arr) {
const TMP_SCHEME_REGEX = /\.tmp/;
const TMP_REGEX = /\-/;
let marker = 0; // 读到哪个位置了
let len; // 当前op长度
arr.filter(item => {
return item.name
&& item.name.match(TMP_SCHEME_REGEX) === null
&& item.name.match(TMP_REGEX) === null;
})
.forEach(item => {
len = item.shape.reduce((a, b) => a * b); // 长度为shape的乘积
item.data = this.allData.slice(marker, marker + len);
marker += len;
});
}
fetch(path, params) {
params = params || this.params;
let method = params.method || 'get';
let mode = params.mode || 'no-cors';
let myHeaders = new Headers();
return fetch(path, {
method: method,
// mode: mode,
// credentials: 'include',
headers: myHeaders
});
}
fetchModel(params) {
params = params || this.params;
const path = this.modelGonfig.dir + this.modelGonfig.main;
let load = null;
// jsonp请求方式
if (params && params.type === 'jsonp') {
let json;
let s = document.createElement('script');
s.src = path + '&jsonpCallback=fn';
window.fn = function(data) {
json = data;
// console.log(json);
};
//当script被插入文档中时,src中的资源就会开始加载
document.body.appendChild(s);
load = new Promise((resolve, reject) => {
s.onload = function(e) {
resolve(json);
}
s.onerror = function() {
reject(json);
}
});
this.data = load;
}
// 原生fetch
else if (params.type === 'fetch') {
load = new Promise((resolve, reject) => {
this.fetch(path, params)
.then(response => response.json())
.then(responseData => resolve(responseData))
.then(err => reject(err))
});
this.data = load;
}
// ajax
else if (params.type === 'xhr') {
this.data = load;
}
return load;
}
async load() {
let that = this;
const artifacts = this.data = await this.fetchModel();
if (this.multipart === true) {
if (this.dataType === 'binary') {
await this.fetchChunks()
.then(() => this.traverse(artifacts.vars));
}
else {
await that.fetchAllDate(artifacts.vars);
}
}
return artifacts;
}
}
/* eslint-enable */
/* eslint-disable */
import 'babel-polyfill';
import Loader from '../loader/loader';
import Graph from '../graph/graph';
/**
* @file paddle对象,负责加载模型和执行在线推理
* @author wangqun@baidu.com
*/
export default class Paddle {
constructor(options) {
this.version = '0.0.1';
this.loader = '';
this.options = options;
this.graph = '';
this.multipart = false;
// feed数据
this.feed = null;
this.index = 0;
this.feedOp = null;
this.feedItem = null;
this.test = false;
this.isExecuted = false;
// 网络层数
this.iLayer = 0;
// fetch xhr jsonp
this.params = {type: 'fetch'};
}
async load() {
if (this.options === null) {
// todo saniac 报错提示修改
throw new Error(
'modelGonfig in loadGraphModel() cannot be null. Please provide a url ' +
'or an IOHandler that loads the model');
}
const model = new Loader(this.options.urlConf, this.options.options);
await model.load();
this.preGraph(model);
return this;
}
preGraph (artifacts) {
let that = this;
const graph = new Graph(that.options);
that.graph = graph;
that.graph.data = artifacts.data;
const opsMap = that.graph.createOpsMap(that.graph.data.ops, that.graph.data.vars);
that.graph.weightMap = that.graph.constructOpsMap(opsMap);
}
/**
* Executes inference for the model for given input tensors.
* @param inputs
* @param outputs
* @returns {*}
*/
execute(inputs) {
debugger;
let that = this;
this.feed = this.graph.feed = inputs;
// 生成op数据
if (!this.graph.isExecuted) {
this.graph.weightMap.forEach(op => {
const type = op.type;
if (type !== 'feed' && type !== 'fetch') {
console.log(op.type);
that.graph.buildOpData(op);
}
});
}
this.graph.execute(inputs);
return this.graph.inst;
}
updateFeed() {
this.graph.feedItem.data = this.graph.feed.input[0].data;
// Utils.img2texture(this.graph.feedItem);
}
/**
* dispose
*/
dispose() {
this.graph.dispose();
}
}
/* eslint-enable */
...@@ -3,7 +3,7 @@ import Gpu from '../gpu/gpu'; ...@@ -3,7 +3,7 @@ import Gpu from '../gpu/gpu';
import getMaxUniforms from '../test/getMaxUniforms'; import getMaxUniforms from '../test/getMaxUniforms';
/** /**
* @file gpu运行时 * @file gpu运行时
* @author yangmingming * @author wangqun@baidu.com, yangmingming@baidu.com
* *
*/ */
export default { export default {
...@@ -73,10 +73,10 @@ export default { ...@@ -73,10 +73,10 @@ export default {
async read() { async read() {
const pbo = this.gpu.createPBO(); const pbo = this.gpu.createPBO();
await this.gpu.createAndWaitForFence(); await this.gpu.createAndWaitForFence();
log.end('运行耗时'); // log.end('运行耗时');
log.start('后处理'); // log.start('后处理');
// 其实这里应该有个fetch的执行调用或者fetch的输出 // 其实这里应该有个fetch的执行调用或者fetch的输出
log.start('后处理-读取数据'); // log.start('后处理-读取数据');
// 开始读数据 // 开始读数据
return this.gpu.downloadFoat32TensorFromBuffer(pbo); return this.gpu.downloadFoat32TensorFromBuffer(pbo);
}, },
......
...@@ -10,9 +10,14 @@ float prelu(float x, float p, float b) { ...@@ -10,9 +10,14 @@ float prelu(float x, float p, float b) {
if (x < 0.0) { if (x < 0.0) {
result = x * p; result = x * p;
} }
return result;
}
float relu6(float x, float threshold, float b) {
float result = max(0.0,x);
result = min(result,threshold);
return result; return result;
} }
float leakyRelu(float x, float p, float b) { float leakyRelu(float x, float p, float b) {
float result = max(x, x * p); float result = max(x, x * p);
return result; return result;
...@@ -32,4 +37,6 @@ float softmax(float x, float p, float b) { ...@@ -32,4 +37,6 @@ float softmax(float x, float p, float b) {
float result = exp(x) / (10.0 * exp(x)); float result = exp(x) / (10.0 * exp(x));
return result; return result;
} }
`; `;
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
/** /**
* @file 公共方法 * @file 公共方法
* @author yangmingming * @author yangmingming
*
*/ */
export default ` export default `
......
...@@ -2,11 +2,13 @@ ...@@ -2,11 +2,13 @@
/** /**
* @file 公共方法 * @file 公共方法
* @author yangmingming * @author yangmingming
* @desc 获取输出tensor的坐标
*/ */
export default ` export default `
ivec4 getOutputTensorPos() { ivec4 getOutputTensorPos() {
// 获取原始长度 // 获取原始长度
vec2 outCoord = moveTexture2PosToReal_texture_out(vCoord.xy); vec2 outCoord = moveTexture2PosToReal_texture_out(vCoord.xy);
// 材质体系转tensor体系坐标位置
int x = int(outCoord.x / float(channel_out)); int x = int(outCoord.x / float(channel_out));
int c = int(mod(outCoord.x, float(channel_out))); int c = int(mod(outCoord.x, float(channel_out)));
int y = int(mod(outCoord.y, float(height_shape_out))); int y = int(mod(outCoord.y, float(height_shape_out)));
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
/** /**
* @file 公共方法 * @file 公共方法
* @author yangmingming * @author yangmingming
* desc 根据当前材质坐标位置获取值
*/ */
// 获取材质中的像素 // 获取材质中的像素
export default ` export default `
......
...@@ -2,8 +2,10 @@ ...@@ -2,8 +2,10 @@
/** /**
* @file 公共方法 * @file 公共方法
* @author yangmingming * @author yangmingming
* desc 根据tensor坐标获取这个tensor位置的值
*/ */
export default ` export default `
// 根据tensor坐标获取这个tensor位置的值
float getValueFromTensorPos_TENSOR_NAME(int r, int g, int b, int a) { float getValueFromTensorPos_TENSOR_NAME(int r, int g, int b, int a) {
vec4 pixels = TEXTURE2D(texture_TENSOR_NAME, vec4 pixels = TEXTURE2D(texture_TENSOR_NAME,
vec2( vec2(
...@@ -11,9 +13,10 @@ float getValueFromTensorPos_TENSOR_NAME(int r, int g, int b, int a) { ...@@ -11,9 +13,10 @@ float getValueFromTensorPos_TENSOR_NAME(int r, int g, int b, int a) {
(float(r * height_shape_TENSOR_NAME + b) + 0.5) / float(height_texture_TENSOR_NAME) (float(r * height_shape_TENSOR_NAME + b) + 0.5) / float(height_texture_TENSOR_NAME)
) )
); );
// 只用了r通道
return pixels.r; return pixels.r;
} }
// 紧凑型布局根据tensor坐标获取这个tensor位置的值
float getValueFromTensorPosLimit_TENSOR_NAME(int r, int g, int b, int a) { float getValueFromTensorPosLimit_TENSOR_NAME(int r, int g, int b, int a) {
float halfW = ceil(float(width_shape_TENSOR_NAME) / 2.0); float halfW = ceil(float(width_shape_TENSOR_NAME) / 2.0);
int x = int(mod(float(a), halfW)); int x = int(mod(float(a), halfW));
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
/** /**
* @file 公共方法 * @file 公共方法
* @author yangmingming * @author yangmingming
* desc packed布局 根据tensor坐标获取这个tensor位置的值
*/ */
export default ` export default `
float getValueFromTensorPosPacked_TENSOR_NAME(int r, int g, int b, int a) { float getValueFromTensorPosPacked_TENSOR_NAME(int r, int g, int b, int a) {
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
/** /**
* @file 公共方法 * @file 公共方法
* @author yangmingming * @author yangmingming
* desc 根据材质坐标获取这个材质位置的值
*/ */
// TEXTURE_NAME, tensor name // TEXTURE_NAME, tensor name
// 获取材质中的数据 // 获取材质中的数据
......
...@@ -2,18 +2,24 @@ ...@@ -2,18 +2,24 @@
/** /**
* @file 公共方法 * @file 公共方法
* @author yangmingming * @author yangmingming
* desc 坐标转化
*/ */
// TEXTURE_NAME, 材质name // TEXTURE_NAME, 材质name
// 材质坐标转化成真实尺寸坐标 // 材质坐标转化成真实尺寸坐标
export default ` export default `
vec2 _2d_shape_TEXTURE_NAME = vec2(float(width_TEXTURE_NAME), float(height_TEXTURE_NAME)); // vec2 moveTexture2PosToReal_TEXTURE_NAME(vec2 v) {
// return v * _2d_shape_TEXTURE_NAME;
// // vec2 v2;
// // v2.x = v.x * float(width_TEXTURE_NAME);
// // v2.y = v.y * float(height_TEXTURE_NAME);
// // return v2;
// }
vec2 moveTexture2PosToReal_TEXTURE_NAME(vec2 v) { vec2 moveTexture2PosToReal_TEXTURE_NAME(vec2 v) {
return v * _2d_shape_TEXTURE_NAME; vec2 v2;
// vec2 v2; v2.x = v.x * float(width_TEXTURE_NAME);
// v2.x = v.x * float(width_TEXTURE_NAME); v2.y = v.y * float(height_TEXTURE_NAME);
// v2.y = v.y * float(height_TEXTURE_NAME); return v2;
// return v2;
} }
`; `;
...@@ -11,7 +11,7 @@ export default ` ...@@ -11,7 +11,7 @@ export default `
precision mediump float; precision mediump float;
precision mediump int; precision mediump int;
#endif #endif
varying vec2 vCoord;
void setOutput(float result) { void setOutput(float result) {
gl_FragColor.r = result; gl_FragColor.r = result;
} }
......
/* eslint-disable */ /* eslint-disable */
/** /**
* @file 激活函数 * @file 激活函数
* @author yangmingming * @author wangqun@baidu.com
*/ */
export default ` export default `
float scale(float x, float p, float b) { float scale(float x, float p, float b) {
......
/* eslint-disable */ /* eslint-disable */
/** /**
* @file softmax激活函数 * @file softmax激活函数
* @author yangmingming * @author wangqun
*/ */
export default ` export default `
float softmax(float x, float p, float b) { float softmax(float x, float p, float b) {
......
...@@ -26,14 +26,12 @@ export default { ...@@ -26,14 +26,12 @@ export default {
'HEIGHT_TEXTURE_ORIGIN', 'HEIGHT_TEXTURE_ORIGIN',
'CHANNEL_ORIGIN', 'CHANNEL_ORIGIN',
'TOTAL_SHAPE_ORIGIN', 'TOTAL_SHAPE_ORIGIN',
'WIDTH_SHAPE_OUT', 'WIDTH_SHAPE_OUT',
'HEIGHT_SHAPE_OUT', 'HEIGHT_SHAPE_OUT',
'WIDTH_TEXTURE_OUT', 'WIDTH_TEXTURE_OUT',
'HEIGHT_TEXTURE_OUT', 'HEIGHT_TEXTURE_OUT',
'CHANNEL_OUT', 'CHANNEL_OUT',
'OFFSET_Y_OUT', 'OFFSET_Y_OUT',
'EPSILON', 'EPSILON',
'WIDTH_TEXTURE_SCALE', 'WIDTH_TEXTURE_SCALE',
'HEIGHT_TEXTURE_SCALE', 'HEIGHT_TEXTURE_SCALE',
......
/* eslint-disable */ /* eslint-disable */
/** /**
* @file softmax主函数 * @file batchnorm主函数
* @author yangmingming * @author wangqun
*/ */
export default ` export default `
// start函数 // start函数
void main(void) { void main(void) {
// 输出数据 // 输出数据
ivec4 oPos = getOutputTensorPos(); ivec4 oPos = getOutputTensorPos();
float o = getValueFromTensorPos_origin(oPos); float o = getValueFromTensorPos_origin(oPos.r, oPos.g, oPos.b, oPos.a);
// 归一化数据 // 归一化数据
vec4 scale = getPixelsFromTexturePos_texture_scale(vec2((float(int(oPos.g)) + 0.5) / float(width_texture_scale), 0.0)); vec4 scale = getPixelsFromTexturePos_texture_scale(vec2((float(int(oPos.g)) + 0.5) / float(width_texture_scale), 0.0));
float x = (o - scale[3]) / sqrt(scale[2] + epsilon); float x = (o - scale[3]) / sqrt(scale[2] + epsilon);
float res = scale[0] * x + scale[1]; float res = scale[0] * x + scale[1];
setOutput(res); setOutput(res);
} }
`; `;
\ No newline at end of file
...@@ -12,12 +12,10 @@ const int width_texture_origin = WIDTH_TEXTURE_ORIGIN; ...@@ -12,12 +12,10 @@ const int width_texture_origin = WIDTH_TEXTURE_ORIGIN;
const int height_texture_origin = HEIGHT_TEXTURE_ORIGIN; const int height_texture_origin = HEIGHT_TEXTURE_ORIGIN;
const int channel_origin = CHANNEL_ORIGIN; const int channel_origin = CHANNEL_ORIGIN;
const int total_shape_origin = TOTAL_SHAPE_ORIGIN; const int total_shape_origin = TOTAL_SHAPE_ORIGIN;
// 计算数据 // 计算数据
const float epsilon = float(EPSILON); const float epsilon = float(EPSILON);
const int width_texture_scale = WIDTH_TEXTURE_SCALE; const int width_texture_scale = WIDTH_TEXTURE_SCALE;
const int height_texture_scale = HEIGHT_TEXTURE_SCALE; const int height_texture_scale = HEIGHT_TEXTURE_SCALE;
// 输入数据 // 输入数据
uniform sampler2D texture_origin; uniform sampler2D texture_origin;
uniform sampler2D texture_scale; uniform sampler2D texture_scale;
......
/* eslint-disable */ graph.es6/* eslint-disable */
/** /**
* @file mul的配置文件 * @file mul的配置文件
* @author yangmingming zhangmiao06 * @author yangmingming zhangmiao06
......
...@@ -6,12 +6,14 @@ ...@@ -6,12 +6,14 @@
export default ` export default `
// start函数 // start函数
void main(void) { void main(void) {
float res = 0.0;
vec4 v4 = getPixelsFromTexturePos_texture_origin(vCoord); vec4 v4 = getPixelsFromTexturePos_texture_origin(vCoord);
vec2 onePixel = vec2(1.0 / float(width_texture_origin), 1.0 / float(height_texture_origin)); vec2 onePixel = vec2(1.0 / float(width_texture_origin), 1.0 / float(height_texture_origin));
float total = 0.0; float total = 0.0;
float maxValue = getPixelsFromTexturePos_texture_origin(onePixel).r; float maxValue = getPixelsFromTexturePos_texture_origin(onePixel).r;
int number = 0; int number = 0;
vec4 pixels; vec4 pixels;
vec4 result;
// 求最大 // 求最大
for (int i = 0; i < height_texture_origin; i++) { for (int i = 0; i < height_texture_origin; i++) {
for (int j = 0; j < width_texture_origin; j++) { for (int j = 0; j < width_texture_origin; j++) {
...@@ -50,6 +52,9 @@ void main(void) { ...@@ -50,6 +52,9 @@ void main(void) {
} }
} }
} }
gl_FragColor = exp(v4 - vec4(maxValue, maxValue, maxValue, maxValue)) / vec4(total, total, total, total) ; outColor = exp(v4 - vec4(maxValue, maxValue, maxValue, maxValue)) / vec4(total, total, total, total);
// res = result.a;
// setOutput(res);
} }
`; `;
/* eslint-disable */ /* eslint-disable */
/** /**
* @file 顶点文件 * @file 顶点文件
* @author yangmingming * @author wangqun
* @desc  顶点坐标系转换,适配webgl1
*/ */
export default ` export default `
attribute vec4 position; attribute vec4 position;
varying vec2 vCoord; varying vec2 vCoord;
void main() { void main() {
vCoord.x = (position.x + 1.0) / 2.0; vCoord.x = (position.x + 1.0) / 2.0;
vCoord.y = (position.y + 1.0) / 2.0; vCoord.y = (position.y + 1.0) / 2.0;
......
/* eslint-disable */ /* eslint-disable */
/** /**
* @file 顶点文件,webgl 2.0 * @file 顶点文件,webgl 2.0
* @author yangmingming * @author wangqun
* @desc  顶点坐标系转换,适配webgl2
*/ */
export default `#version 300 es export default `#version 300 es
in vec4 position; in vec4 position;
out vec2 vCoord; out vec2 vCoord;
void main() { void main() {
vCoord.x = (position.x + 1.0) / 2.0; vCoord.x = (position.x + 1.0) / 2.0;
vCoord.y = (position.y + 1.0) / 2.0; vCoord.y = (position.y + 1.0) / 2.0;
......
...@@ -3,7 +3,7 @@ import Utils from './utils'; ...@@ -3,7 +3,7 @@ import Utils from './utils';
import Tensor from './tensor'; import Tensor from './tensor';
/** /**
* @file op的数据对象 * @file op的数据对象
* @author yangmingming * @author wangqun, yangmingming
* *
*/ */
const keys = [ const keys = [
...@@ -38,7 +38,7 @@ const shaderAttrs = { ...@@ -38,7 +38,7 @@ const shaderAttrs = {
'pooling_type': 'type_pool' 'pooling_type': 'type_pool'
} }
}; };
// model的名字和paddle web的tensor名字mapping // model的名字和paddleJS的tensor名字mapping
const tensorName = { const tensorName = {
'input': 'origin', 'input': 'origin',
'x': 'origin', 'x': 'origin',
...@@ -80,6 +80,10 @@ const opBehavior = { ...@@ -80,6 +80,10 @@ const opBehavior = {
'transToPrelu', 'transToPrelu',
'needBatch' 'needBatch'
], ],
relu6: [
'transToRelu6',
'needBatch'
],
leaky_relu: [ leaky_relu: [
'transToLeakyrelu', 'transToLeakyrelu',
'needBatch' 'needBatch'
...@@ -87,17 +91,25 @@ const opBehavior = { ...@@ -87,17 +91,25 @@ const opBehavior = {
mul: [ mul: [
'reshape', 'reshape',
'needBatch' 'needBatch'
],
softmax: [
] ]
}; };
const mergeType = 'conv2d-elementwise_add'; const mergeType = 'conv2d-elementwise_add';
export default class OpData { export default class OpData {
constructor(name, input = {}, output = {}, attrs = {}) { constructor(name, input = {}, output = {}, attrs = {}) {
console.log('now in constructor');
console.dir(name);
console.dir(input);
console.dir(output);
this.realName = name; this.realName = name;
this.name = name; this.name = name;
this.attrs = attrs; this.attrs = attrs;
// 检查是否是融合op // 检查是否是融合op
this.checkIsMerge(); this.checkIsMerge();
// 是否忽略当前当前op, 使用dropout // 是否忽略当前当前op, 使用dropout
// dropout是指在深度学习网络的训练过程中,对于神经网络单元,按照一定的概率将其暂时从网络中丢弃。
this.isPass = this.checkIsPass(); this.isPass = this.checkIsPass();
if (this.isPass) { if (this.isPass) {
this.input = input; this.input = input;
...@@ -172,6 +184,9 @@ export default class OpData { ...@@ -172,6 +184,9 @@ export default class OpData {
} }
}); });
// console.dir(['tensors', this.tensor]); // console.dir(['tensors', this.tensor]);
// console.log('now in buildTensor show this and tensorData');
// console.log(this);
// console.log(tensorData);
} }
buildAttrs() { buildAttrs() {
...@@ -283,10 +298,14 @@ export default class OpData { ...@@ -283,10 +298,14 @@ export default class OpData {
item.notTensor = true; item.notTensor = true;
} }
}); });
return; return;
// mobilenet model // mobilenet model
// todo: 默认y的shape length是1, 以后需要实现通用版本 // todo: 默认y的shape length是1, 以后需要实现通用版本
console.log('2. x and y is ');
console.log(x);
console.log(y);
let shape = Utils.getBroadcastShapeInPaddle(x.shape, y.shape, this.attrs['axis']); let shape = Utils.getBroadcastShapeInPaddle(x.shape, y.shape, this.attrs['axis']);
// 填充shape数据 // 填充shape数据
if (small.shape.length === 1) { if (small.shape.length === 1) {
...@@ -316,6 +335,11 @@ export default class OpData { ...@@ -316,6 +335,11 @@ export default class OpData {
this.data['active_function'] = 'prelu'; this.data['active_function'] = 'prelu';
} }
transToRelu6(tensorData = []) {
this.data['multi_value'] = this.attrs['threshold'];
this.data['active_function'] = 'relu6';
}
transToLeakyrelu(tensorData = []) { transToLeakyrelu(tensorData = []) {
this.data['multi_value'] = this.attrs.alpha; this.data['multi_value'] = this.attrs.alpha;
this.data['active_function'] = 'leakyRelu'; this.data['active_function'] = 'leakyRelu';
...@@ -347,6 +371,7 @@ export default class OpData { ...@@ -347,6 +371,7 @@ export default class OpData {
mergeTensor(tensorData = []) { mergeTensor(tensorData = []) {
// 融合scale、bias、variance、mean // 融合scale、bias、variance、mean
let constants = ['scale', 'bias', 'variance', 'mean']; let constants = ['scale', 'bias', 'variance', 'mean'];
let result = {}; let result = {};
let data = []; let data = [];
...@@ -354,13 +379,18 @@ export default class OpData { ...@@ -354,13 +379,18 @@ export default class OpData {
result[tensor.tensorName] = tensor; result[tensor.tensorName] = tensor;
result[tensor.tensorName + 'Index'] = index; result[tensor.tensorName + 'Index'] = index;
}); });
for (let i = 0; i < result[constants[0]].shape[0]; i++) { for (let i = 0; i < result[constants[0]].shape[0]; i++) {
data.push(result[constants[0]].data[i]); data.push(result[constants[0]].data[i]);
data.push(result[constants[1]].data[i]); data.push(result[constants[1]].data[i]);
data.push(result[constants[2]].data[i]); data.push(result[constants[2]].data[i]);
data.push(result[constants[3]].data[i]); data.push(result[constants[3]].data[i]);
} }
tensorData[result[constants[0] + 'Index']].data = data; tensorData[result[constants[0] + 'Index']].data = data;
for (let i = 0; i < constants.length; i++){
tensorData[result[constants[i] + 'Index']].data = result[constants[i]].data;
}
// 充分利用shader空间 // 充分利用shader空间
tensorData[result[constants[0] + 'Index']].notCompressed = true; tensorData[result[constants[0] + 'Index']].notCompressed = true;
tensorData[result[constants[0] + 'Index']].shape[0] *= 4; tensorData[result[constants[0] + 'Index']].shape[0] *= 4;
......
/* eslint-disable */
import Utils from './utils'; import Utils from './utils';
/** /**
* @file Tensor类 * @file Tensor类
* @author yangmingming * @author wangqun, yangmingming
*/ */
export default class Tensor { export default class Tensor {
constructor(opts = {}) { constructor(opts = {}) {
...@@ -33,6 +32,7 @@ export default class Tensor { ...@@ -33,6 +32,7 @@ export default class Tensor {
// tensor数据 // tensor数据
let data; let data;
if (opts.type === 'image' || opts.type === 'x') { if (opts.type === 'image' || opts.type === 'x') {
console.log('image', this.data);
this.data = opts.data; this.data = opts.data;
} }
else if (opts.data && opts.data.length) { else if (opts.data && opts.data.length) {
...@@ -42,7 +42,7 @@ export default class Tensor { ...@@ -42,7 +42,7 @@ export default class Tensor {
let c = shape[1]; let c = shape[1];
let h = shape[2]; let h = shape[2];
let w = shape[3]; let w = shape[3];
if (w) {
for (let i = 0; i < opts.data.length; i++) { for (let i = 0; i < opts.data.length; i++) {
let j = i / (c * w) | 0; let j = i / (c * w) | 0;
let k = i % (c * w); let k = i % (c * w);
...@@ -54,6 +54,15 @@ export default class Tensor { ...@@ -54,6 +54,15 @@ export default class Tensor {
data[i] = opts.data[l]; data[i] = opts.data[l];
} }
this.data = data; this.data = data;
}
else {
if (opts.data.length > this.total) {
opts.data = opts.data.slice(0, this.total);
}
this.data = new Float32Array(opts.data);
debugger;
}
} else { } else {
// batchnorm的scale // batchnorm的scale
this.shape_texture = [4, 1, this.total / 4]; this.shape_texture = [4, 1, this.total / 4];
...@@ -158,4 +167,3 @@ export default class Tensor { ...@@ -158,4 +167,3 @@ export default class Tensor {
} }
} }
} }
/* eslint-enable */
/** /**
* @file 工具类 * @file 工具类
* @author yangmingming * @author wangqun, yangmingming
*/ */
/* eslint-disable */
export default { export default {
// todo: 适用2维矩阵乘法,以后实现通用版本 // todo: 适用2维矩阵乘法,以后实现通用版本
getReshapeInPaddle(inputShape = [], counterShape = [], outShape = []) { getReshapeInPaddle(inputShape = [], counterShape = [], outShape = []) {
...@@ -109,10 +108,10 @@ export default { ...@@ -109,10 +108,10 @@ export default {
* @return {{shape: *[], zeroNumber: number}} {Object} texture信息 * @return {{shape: *[], zeroNumber: number}} {Object} texture信息
*/ */
getTextureInfoFromTensorShape(shape = [], isPacked = false) { getTextureInfoFromTensorShape(shape = [], isPacked = false) {
let b = shape[0]; let b = shape[0] || 1;
let c = shape[1]; let c = shape[1] || 1;
let h = shape[2]; let h = shape[2] || 1;
let w = shape[3]; let w = shape[3] || 1;
let height = b * h; let height = b * h;
let width = c * w; let width = c * w;
let offsetX = 0; let offsetX = 0;
...@@ -179,12 +178,7 @@ export default { ...@@ -179,12 +178,7 @@ export default {
let l = b1 * (c * h * w) + c1 * (h * w) + h1 * (w) + w1; let l = b1 * (c * h * w) + c1 * (h * w) + h1 * (w) + w1;
data[offset] = renderData.data[l]; data[offset] = renderData.data[l];
offset += 4; offset += 4;
// data.push(renderData.data[l]);
// data.push(0);
// data.push(0);
// data.push(0);
} }
renderData.data = data; renderData.data = data;
} }
}; };
/* eslint-enable */
# PaddleJS Examples
百度PaddleJS使用现成的 JavaScript 模型或转换 Paddle 模型以在浏览器中运行。
## 演示
目前Web项目运行TinyYolo模型可以达到30ms以内,对于一般的实时场景已经足够应对。
### 模块化
## 浏览器覆盖面
* PC: Chrome
* Mac: Chrome
* Android: Baidu App and QQ Browser
## 构建部署
```bash
cd web # 进入根目录
npm i # 安装依赖
mkdir dist # 创建资源目录
cd dist # 进入资源目录
git clone https://github.com/DerekYangMing/Paddle-Web-Models.git # 获取模型
mv Paddle-Web-Models/separablemodel . # 移动模型到制定地点
cd .. # 返回根目录
npm run tinyYolo # 启动 tinyYolo 在线推理服务
```
## 如何预览 demo
1. 在浏览器中打开url: https://localhost:8123/
2. 点击【开始检测】按钮。
3. 将人脸对准摄像头,没有问题的话,可以正常检测到人脸。
## 效果
![image](./tinyYolo/demoshow.png)
此差异已折叠。
此差异已折叠。
{
"ops": [
{
"attrs": {
"Scale_in": 1.0,
"Scale_in_eltwise": 1.0,
"Scale_out": 1.0,
"Scale_weights": [
1.0
],
"data_format": "AnyLayout",
"dilations": [
1,
1
],
"exhaustive_search": false,
"force_fp32_output": false,
"fuse_relu": false,
"fuse_relu_before_depthwise_conv": false,
"fuse_residual_connection": false,
"groups": 2,
"is_test": 1,
"paddings": [
0,
0
],
"strides": [
1,
1
],
"use_cudnn": true,
"use_mkldnn": false,
"workspace_size_MB": 4096
},
"inputs": {
"Filter": [
"conv2d_0.w_0"
],
"Input": [
"pixel"
]
},
"outputs": {
"Output": [
"conv2d_0.tmp_0"
]
},
"type": "conv2d"
}
],
"vars": [
{
"data": [
0, 1, 0, 1, 0, 1, 0, 1, 0,
0, 1, 0, 1, 0, 1, 0, 1, 0,
0, 1, 0, 1, 0, 1, 0, 1, 0,
0, 1, 0, 1, 0, 1, 0, 1, 0,
0, 2, 0, 2, 0, 2, 0, 2, 0,
0, 2, 0, 2, 0, 2, 0, 2, 0,
0, 2, 0, 2, 0, 2, 0, 2, 0,
0, 2, 0, 2, 0, 2, 0, 2, 0
],
"name": "pixel",
"persistable": 0,
"shape": [
1,
8,
3,
3
]
},
{
"data": [
0, 1, 0, 1,
0, 1, 0, 1,
0, 1, 0, 1,
0, 1, 0, 1,
0, 2, 0, 2,
0, 2, 0, 2,
0, 2, 0, 2,
0, 2, 0, 2,
0, 2, 0, 2,
0, 2, 0, 2,
0, 2, 0, 2,
0, 2, 0, 2,
0, 2, 0, 2,
0, 2, 0, 2,
0, 2, 0, 2,
0, 2, 0, 2
],
"name": "conv2d_0.w_0",
"persistable": 1,
"shape": [
4,
4,
2,
2
]
},
{
"data": [
],
"name": "conv2d_1.b_0",
"persistable": 1,
"shape": [
50
]
},
{
"data": [
4, 4, 4, 4,
8, 8, 8, 8,
16, 16, 16, 16,
16, 16, 16, 16
],
"name": "conv2d_0.tmp_0",
"persistable": 0,
"shape": [
1,
4,
2,
2
]
},
]
}
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
{
"ops": [
{
"attrs": {
"bias": 0.0,
"bias_after_scale": true,
"scale": 1.0
},
"inputs": {
"X": [
"fc_0.tmp_2"
]
},
"outputs": {
"Out": [
"scale_0.tmp_0"
]
},
"type": "scale"
}
],
"vars": [
{
"data": [
1.6038109389511792e-28,
1.6038109389511792e-28,
1.6038109389511792e-28,
1.6038109389511792e-28,
1.6038109389511792e-28,
1.6038109389511792e-28,
1.6038109389511792e-28,
1.6038109389511792e-28,
1.0,
1.6504194778766687e-16
],
"name": "fc_0.tmp_2",
"persistable": 0,
"shape": [
10
]
},
{
"data": [
1.6038109389511792e-28,
1.6038109389511792e-28,
1.6038109389511792e-28,
1.6038109389511792e-28,
1.6038109389511792e-28,
1.6038109389511792e-28,
1.6038109389511792e-28,
1.6038109389511792e-28,
1.0,
1.6504194778766687e-16
],
"name": "scale_0.tmp_0",
"persistable": 0,
"shape": [
10
]
}
]
}
{
"ops": [
{
"attrs": {
"data_format": "AnyLayout",
"is_test": 1,
"use_cudnn": false,
"use_mkldnn": false
},
"inputs": {
"X": [
"softmax_0.tmp_0"
]
},
"outputs": {
"Out": [
"softmax_0.tmp_2"
]
},
"type": "softmax"
}
],
"vars": [
{
"data":
[0.0320586 , 0.08714432, 0.23688282, 0.64391426]
,
"name": "softmax_0.tmp_2",
"persistable": 0,
"shape": [2, 3, 4]
},
{
"data": [2.0, 3.0, 4.0, 5.0]
,
"name": "softmax_0.tmp_0",
"persistable": 0,
"shape": [2, 3, 4]
}
]
}
此差异已折叠。
import 'babel-polyfill';
import Paddle from '../../src/paddle/paddle';
const unitPath = {
'conv2d': 'model.test.conv2d.json',
'batchnorm': 'model.test.batchnorm.json',
'mul': 'model.test.mul.json',
'pool2d': 'model.test.pool2d.json',
'relu': 'model.test.relu.json',
'scale': 'model.test.scale.json',
'softmax': 'model.test.softmax.json',
'relu6' : 'model.test.relu6.json'
};
// 制定运行的 op
const modelType = 'softmax';
const unitData = unitPath[modelType];
let Diff = require('./diff');
let datas;
let otherResult;
let output
async function run() {
const path = 'test/unitData';
const MODEL_CONFIG = {
dir: `/${path}/`, // 存放模型的文件夹
main: unitData, // 主文件
};
const paddle = new Paddle({
urlConf: MODEL_CONFIG,
options: {
test: true
}
});
let model = await paddle.load();
datas = model.graph.data;
output = deepCopy(datas);
// 测试单元
model.graph.weightMap.forEach(op => {
const type = op.type;
if (type !== 'feed' && type !== 'fetch') {
console.log(op.type);
model.graph.buildOpData(op);
}
});
const executor = model.graph.weightMap;
let inst = model.graph.execute_(executor[0]);
let result = model.graph.inst.read();
console.dir(['result', result]);
var one = model.graph.inst.read();
// var other = getResult('conv2d');
console.log('one');
console.log(one);
console.log('other');
}
run();
function deepCopy (data) {
return JSON.parse(JSON.stringify(data));
}
// let output = deepCopy(datas);
let getTensor = function(id, times = 1) {
let find = 0;
let data = datas.ops.filter((item, idx) => {
if (id === item.type) {
++find;
if (find === times) {
return true;
}
}
});
return getInputs(data[0]);
};
let getInputs = function(data) {
Object.keys(data.inputs).forEach(function(key){
data.inputs[key] = getValue(data.inputs[key][0], datas);
});
Object.keys(data.outputs).forEach(function(key){
let out = getValue(data.outputs[key][0], datas)
data.outputs[key] = out;
otherResult = out[0].data;
});
return data;
};
let getResult = function(id) {
let data = output.ops.filter((item, idx) => {
if (id === item.type) {
return true;
}
});
return getoutputs(data[0]);
};
let getoutputs = function(data) {
let otherResult;
Object.keys(data.outputs).forEach(function(key){
let out = getValue(data.outputs[key][0], output);
otherResult = out[0].data;
});
return otherResult;
};
let getValue = function(name, datas) {
return datas.vars.filter((item, idx) => {
if (name === item.name) {
return item;
}
});
};
// // 测试单元
// let item = getTensor('conv2d');
let func = function (model) {
// console.log(other);
// var one = inst.read();
// var other = getResult('softmax');
// var color ='';
// var span = null;
// var diff = Diff.diffChars(one.toString(), other.toString()),
// display = document.getElementById('display'),
// fragment = document.createDocumentFragment();
//
// diff.forEach(function(part){
// // green for additions, red for deletions
// // grey for common parts
// color = part.added ? 'green' :
// part.removed ? 'red' : 'grey';
// span = document.createElement('span');
// span.style.color = color;
// span.appendChild(document
// .createTextNode(part.value));
// fragment.appendChild(span);
// });
//
// display.appendChild(fragment);
};
import 'babel-polyfill';
import units from './units/units';
let qs = require('qs');
/**
* @file 入口文件
* @author wangqun@baidu.com
*
*/
// 引入 op
const FSHADER_CON2D = require('../src/shader/f_elementwise_conv2d3_shader.c');
const shapeA = [1, 3, 256, 256];
const shapeB = [3];
const imgUrl = require('./data/banana.jpeg');
let shapeAData;
let shapeBData;
let inst;
const matrix = units.mockOrigin();
const filter = units.mockFilter();
// 原始张量,上下左右1个单位的padding,步长是1
let conf = {
'filter_size_width': 3,
'filter_size_height': 3,
'origin_size_width': matrix.sx,
'origin_size_height': matrix.sx,
'out_size_width': 3,
'out_size_height': 3,
'stride_horizontal': 1,
'stride_vertical': 1,
'pad_left': 1,
'pad_top': 1,
'dilation_horizontal': 2,
'dilation_vertical': 2
}
units.init(conf, FSHADER_CON2D).then(instance => {
if (!instance || typeof instance === 'string') {
throw new Error(instance || '不支持float texture');
}
inst = instance;
}).then(() => {
console.dir(['卷积核', filter]);
console.dir(['origin data', matrix.data]);
// 执行conv2d
inst.compute(filter, matrix.data, 'conv2d');
}).then(() => {
// 读取结果
const result = inst.read();
console.dir(['conv2d的执行结果', result]);
let input = {
filter: filter,
origin: matrix.data,
};
Object.assign(input, conf);
console.dir(['完整input', input]);
// console.dir(['完整输入和输出', params]);
inst.getResult('pool2d', input, result);
}).catch(err => {
console.log('-----------error---------' + err);
});
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
/**
* @file 打包到rd机器的配置
* @author yangmingming zhangmiao06
*/
const path = require('path');
const ExtractTextPlugin = require('extract-text-webpack-plugin');
const extractLess = new ExtractTextPlugin({
filename: '[name].css'
});
module.exports = {
mode: 'development',
devtool: 'none',
optimization: {
minimize: false
},
entry: {
camera: './src/executor/camera',
index: './src/executor/runner'
},
output: {
filename: '../graphfe/src/view/common/lib/paddle/[name].js',
path: path.resolve(__dirname, './'),
library: 'panorama',
libraryTarget: 'umd',
libraryExport: 'default'
},
module: {
rules: [{
test: /\.(eot|woff|woff2|ttf|svg|png|jpg)$/,
loader: 'url-loader?limit=30000&name=[name].[ext]'
}, {
test: /\.less$/,
exclude: /node_modules/,
loader: ExtractTextPlugin.extract([
{loader: 'css-loader', options: {minimize: true}},
{loader: 'less-loader'}
])
}]
},
plugins: [extractLess],
resolve: {
extensions: ['.es6', '.js', '.json']
}
};
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册