提交 4efa4ca9 编写于 作者: W wangqun

[D][change]update NHWC and NCHW fomat

update NHWC and NCHW fomat
上级 94b22c3c
[中文版](https://github.com/PaddlePaddle/Paddle-Lite/blob/develop/web/README_cn.md)
# Web
Web project is an open source deep learning framework designed to work on web browser. It could run on nearly every browser with WebGL support.
## Key Features
### Modular
Web project is built on Atom system which is a versatile framework to support GPGPU operation on WebGL. It is quite modular and could be used to make computation tasks faster by utilizing WebGL.
### High Performance
Web project could run TinyYolo model in less than 30ms on chrome. This is fast enough to run deep learning models in many realtime scenarios.
### Browser Coverage
* PC: Chrome
* Mac: Chrome
* Android: Baidu App and QQ Browser
## How To Build & Deploy Demo
```bash
cd web # enter root directory for web project
npm i # install dependencies for npm
mkdir dist # create deployment directory
cd dist # enter deployment directory
git clone https://github.com/DerekYangMing/Paddle-Web-Models.git # get models
mv Paddle-Web-Models/separablemodel . # move models to specific directory
cd .. # return to root directory for web project
npm run testVideoDemo # start demo
```
## How To Preview Demo
1. Open chrome with url: https://localhost:8123/
2. Start demo by pressing the 【start detection】 button.
3. Ensure at least one face is recorded by the camera. The face detection rectangle should be displayed if everything goes fine.
## Feedback and Community Support
- Questions, reports, and suggestions are welcome through Github Issues!
- Forum: Opinions and questions are welcome at our [PaddlePaddle Forum](https://ai.baidu.com/forum/topic/list/168)
- QQ group chat: 696965088
# PaddleJS Examples
百度PaddleJS使用现成的或者通过转换工具转换的 JavaScript 友好的paddle模型以在浏览器中运行,在浏览器中实现在线推理能力。
## 演示
目前Web项目运行TinyYolo模型可以达到30ms以内,对于一般的实时场景已经足够应对。
### 模块化
## 浏览器覆盖面
* PC: Chrome
* Mac: Chrome
* Android: Baidu App and QQ Browser
## 构建部署
```bash
cd web # 进入根目录
npm i # 安装依赖
mkdir dist # 创建资源目录
cd dist # 进入资源目录
git clone https://github.com/DerekYangMing/Paddle-Web-Models.git # 获取模型
mv Paddle-Web-Models/separablemodel . # 移动模型到制定地点
cd .. # 返回根目录
npm run tinyYolo # 启动 tinyYolo 在线推理服务
```
## 如何预览 demo
1. 在浏览器中打开url: https://localhost:8123/
2. 点击【开始检测】按钮。
3. 将人脸对准摄像头,没有问题的话,可以正常检测到人脸。
## 效果
![image](./tinyYolo/demoshow.png)
{
"name": "paddle-web-demo",
"name": "paddel-web",
"version": "1.0.0",
"description": "paddle",
"main": "index.js",
......
......@@ -32,8 +32,11 @@ export default class GraphExecutor {
else if (this.type === 'depthwise_conv2d') {
return this.inputs.Input;
}
else if (this.type === 'conv2d_transpose') {
return this.inputs.Input;
}
else if (this.type === 'elementwise_add') {
return this.inputs.X;
return this.inputs.X.concat(this.inputs.Y);
}
else if (this.type === 'relu' || this.type === 'leaky_relu') {
return this.inputs.X;
......@@ -65,8 +68,13 @@ export default class GraphExecutor {
}
else if (this.type === 'batchnorm' || this.type === 'batch_norm') {
this.outputs.out = this.outputs.Y;
return this.outputs.Y;
delete this.outputs.Y;
return this.outputs.out;
}
else if (this.outputs.Y) {
this.outpus.out = this.outputs.Y;
return this.outputs.out;
}
else {
return this.outputs.Out || this.outputs.Output;
}
......@@ -83,7 +91,6 @@ export default class GraphExecutor {
if (this.type !== 'feed') {
// let time = +Date.now();
// log.start(this.opData.iLayer + '-' + this.type);
console.log(this.type, this.opData);
runtime.run(this.type, this.opData, isRendered);
// log.end(this.opData.iLayer + '-' + this.type);
// if (runtime.gpu.frameBufferIsComplete().isComplete) {
......
/* eslint-disable */
/* 后处理图片 by zhangmiao06 */
// let preTestRun = index => {
// let img = document.getElementById('image');
// img.src = tempPic[index];
// img.onload = function () {
// testRun(testOutput.data[index], img);
// };
// };
/** 后处理图片 by zhangmiao06, wangqun
* let preTestRun = index => {
* let img = document.getElementById('image');
* img.src = tempPic[index];
* img.onload = function () {
* testRun(testOutput.data[index], img);
* };
* };
**/
import models from '../utils/models';
const isSimilar = (r1, r2, threshold = 5) => {
......@@ -132,11 +132,11 @@ const reshapeMany = data => {
export default class PostProcess {
constructor(options) {
this.modelConfig = models[options.modelName];
this.modelConfig = options;
this.count = 0;
this.lastRect = [0, 0, 0, 0]
}
run(data, img, callback, canavs) {
let {from, to} = this.modelConfig.outputShapes;
let shape = [].concat(from).reverse();
......
/**
* @file Runner 整个流程封装一下
* @author hantian(hantianjiao@baidu.com)
* @author hantian(hantianjiao@baidu.com), wangqungit push origin master:refs/for/master
* 使用方法:
* const runner = new Runner({
* modelName: 'separate' // '608' | '320' | '320fused' | 'separate'
......@@ -11,16 +11,15 @@
*/
import IO from '../feed/ImageFeed';
import DataFeed from '../feed/dataFeed';
import Graph from './loader';
import PostProcess from './postProcess';
import models from '../utils/models';
import Logger from '../../tools/logger';
import Paddle from '../paddle/paddle';
window.log = new Logger();
export default class Runner {
// 加载模型&预热
constructor(options) {
this.modelConfig = models[options.modelName];
this.modelConfig = options; // models[options.modelName];
this.flags = {
isRunning: false,
isPreheating: false,
......@@ -41,28 +40,42 @@ export default class Runner {
name: 'image',
shape: [1, 3, fh, fw]
}];
const MODEL_URL = `/${path}/model.json`;
let dir = `https://mms-graph.cdn.bcebos.com/activity/facegame/paddle/${path}/`;
if (location.href.indexOf('test=1') > -1) {
dir = `/src/view/common/lib/paddle/${path}/`;
}
const MODEL_CONFIG = {
dir: dir,
main: 'model.json' // 主文件
dir: `/${path}/`, // 存放模型的文件夹
main: 'model.json', // 主文件
};
const graphModel = new Graph();
this.model = await graphModel.loadGraphModel(MODEL_CONFIG, {
multipart: true,
dataType: 'binary',
binaryOption: {
fileCount: 1, // 切成了多少文件
getFileName(i) { // 获取第i个文件的名称
return 'chunk_0.dat';
// const graphModel = new Graph();
// this.model = await graphModel.loadGraphModel(MODEL_CONFIG, {
// multipart: true,
// dataType: 'binary',
// binaryOption: {
// fileCount: 1, // 切成了多少文件
// getFileName(i) { // 获取第i个文件的名称
// return 'chunk_0.dat';
// }
// },
// feed
// });
const paddle = new Paddle({
urlConf: MODEL_CONFIG,
options: {
multipart: true,
dataType: 'binary',
options: {
fileCount: 1, // 切成了多少文件
getFileName(i) { // 获取第i个文件的名称
return 'chunk_0.dat';
}
}
},
feed
}
});
this.model.execute({
this.model = await paddle.load();
let inst = this.model.execute({
input: feed
});
this.flags.isPreheating = false;
......@@ -149,6 +162,11 @@ export default class Runner {
startStream(getMedia, callback) {
this.flags.runVideoPaused = false;
this.runStream(getMedia, callback);
if (typeof getMedia === 'function') {
this.runStream(getMedia(), callback);
} else {
this.runStream(getMedia, callback);
}
}
}
import ops from './ops';
/**
* @file 工厂类,生成fragment shader
* @author wangqun
* @author yangmingming
*/
export default class Factory {
constructor(opts) {
......@@ -17,10 +17,11 @@ export default class Factory {
}
}
buildShader(opName, data) {
buildShader(opName, data, runtime = undefined) {
let result = '';
result = this.buildPrefix(opName);
result += this.buildCommon(opName);
result += runtime !== undefined ? this.buildRuntime(runtime) : '';
result += this.buildOp(opName);
data.texture2d = this.texture2d;
result = this.populateData(result, data);
......@@ -38,6 +39,12 @@ export default class Factory {
return ops.common.params + ops.common.func;
}
buildRuntime(runtime) {
return `
int layer_run_time = ${runtime};
`;
}
buildOp(opName) {
let code = ops.ops[opName].params;
// 依赖的方法
......@@ -50,7 +57,7 @@ export default class Factory {
let snippet = atoms[func];
code += this.populateData(snippet, data);
});
// suffix
// 引入 suffix 方法
code += this.buildSuffix(opName);
// main方法
code += ops.ops[opName].func;
......
......@@ -12,6 +12,9 @@ import conv2d_conf from '../../shader/conv2d/conf';
import conv2d_depthwise_params from '../../shader/conv2d_depthwise/params';
import conv2d_depthwise_func from '../../shader/conv2d_depthwise/main';
import conv2d_depthwise_conf from '../../shader/conv2d_depthwise/conf';
import conv2d_transpose_params from '../../shader/conv2d_transpose/params';
import conv2d_transpose_func from '../../shader/conv2d_transpose/main';
import conv2d_transpose_conf from '../../shader/conv2d_transpose/conf';
import dynamic_params from '../../shader/dynamic/params';
import dynamic_func from '../../shader/dynamic/main';
import dynamic_conf from '../../shader/dynamic/conf';
......@@ -36,6 +39,12 @@ import softmax_conf from '../../shader/softmax/conf';
import batchnorm_params from '../../shader/batchnorm/params';
import batchnorm_func from '../../shader/batchnorm/main';
import batchnorm_conf from '../../shader/batchnorm/conf';
import reshape_params from '../../shader/reshape/params';
import reshape_func from '../../shader/reshape/main';
import reshape_conf from '../../shader/reshape/conf';
import transpose_params from '../../shader/transpose/params';
import transpose_func from '../../shader/transpose/main';
import transpose_conf from '../../shader/transpose/conf';
import conv2d_elementwise_add_params from '../../shader/conv2d_elementwise_add/params';
import conv2d_elementwise_add_func from '../../shader/conv2d_elementwise_add/main';
......@@ -45,6 +54,14 @@ import conv2d_elementwise_add_winograd_params from '../../shader/conv2d_elementw
import conv2d_elementwise_add_winograd_func from '../../shader/conv2d_elementwise_add_winograd/main';
import conv2d_elementwise_add_winograd_conf from '../../shader/conv2d_elementwise_add_winograd/conf';
import concat_params from '../../shader/concat/params';
import concat_func from '../../shader/concat/main';
import concat_conf from '../../shader/concat/conf';
import split_params from '../../shader/split/params';
import split_func from '../../shader/split/main';
import split_conf from '../../shader/split/conf';
import getArrayIndexFromTensorPos from '../../shader/atom/getArrayIndexFromTensorPos';
import getArrayIndexFromTexturePos from '../../shader/atom/getArrayIndexFromTexturePos';
import getTensorPosFromArrayIndex from '../../shader/atom/getTensorPosFromArrayIndex';
......@@ -56,6 +73,7 @@ import moveTexture2PosToReal from '../../shader/atom/moveTexture2PosToReal';
import getPixelsFromTexturePos from '../../shader/atom/getPixelsFromTexturePos';
import getRangePowSumFromArrayIndex from '../../shader/atom/getRangePowSumFromArrayIndex';
import getRangeSumFromArrayIndex from '../../shader/atom/getRangeSumFromArrayIndex';
import transferFromNHWCtoNCHW from '../../shader/atom/transferFromNHWCtoNCHW';
import sigmoid from '../../shader/atom/sigmoid';
import prelu from '../../shader/atom/prelu';
import scale from '../../shader/atom/scale';
......@@ -75,6 +93,11 @@ export default {
ivec56
},
ops: {
conv2d_transpose:{
params: conv2d_transpose_params,
func: conv2d_transpose_func,
confs: conv2d_transpose_conf
},
conv2d: {
params: conv2d_params,
func: conv2d_func,
......@@ -125,6 +148,16 @@ export default {
func: mul_func,
confs: mul_conf
},
concat: {
params: concat_params,
func: concat_func,
confs: concat_conf
},
split: {
params: split_params,
func: split_func,
confs: split_conf
},
relu: {
params: dynamic_params,
func: dynamic_func,
......@@ -149,7 +182,17 @@ export default {
params: batchnorm_params,
func: batchnorm_func,
confs: batchnorm_conf
}
},
reshape: {
params: reshape_params,
func: reshape_func,
confs: reshape_conf
},
transpose: {
params: transpose_params,
func: transpose_func,
confs: transpose_conf
}
},
atoms: {
getArrayIndexFromTensorPos,
......@@ -166,6 +209,7 @@ export default {
sigmoid,
prelu,
scale,
softmax
softmax,
transferFromNHWCtoNCHW
}
};
......@@ -13,6 +13,7 @@ export default class imageFeed {
this.pixels = '';
this.defaultParams = {
gapFillWith: '#000',
mean: [0, 0, 0],
std: [1, 1, 1]
};
};
......@@ -61,7 +62,8 @@ export default class imageFeed {
let std = opt.std;
// 考虑channel因素获取数据
for (let i = 0; i < data.length; i += 4) {
// img_mean 0.485, 0.456, 0.406
//img_std 0.229, 0.224, 0.225
let index = i / 4;
let vIndex = Math.floor(index / sw);
let hIndex = index - (vIndex * sw) - 1;
......@@ -82,10 +84,12 @@ export default class imageFeed {
* @param shape
*/
allReshapeToRGB(imageData, opt, scaleSize) {
const {sw, sh} = scaleSize;
//const {sw, sh} = scaleSize;
const [b, c, h, w] = opt.targetShape;
let data = imageData.data || imageData;
// mean和std是介于0-1之间的
let mean = opt.mean;
let std = opt.std;
let dataLength = data.length;
// let result = new Float32Array(dataLength * 3);
let result = this.result;
......@@ -101,10 +105,14 @@ export default class imageFeed {
let iwj = iw + j;
for (let k = 0; k < c; ++k) {
let a = iwj * 4 + k;
result[offset++] = (data[a] - mean[k]) / 256;
result[offset] = data[a] / 255;
result[offset] -= mean[k];
result[offset] /= std[k];
offset++;
}
}
}
return result;
};
......@@ -115,6 +123,7 @@ export default class imageFeed {
* @return {Object} 缩放后的尺寸
*/
reSize(image, params) {
console.log('execute resize!!');
// 原始图片宽高
const width = this.pixelWidth;
const height = this.pixelHeight;
......@@ -136,7 +145,40 @@ export default class imageFeed {
this.setInputCanvas(image);
return {sw, sh};
};
/**
* 根据scale缩放图像并且缩放成目标尺寸并居中
*/
resizeAndFitTargetSize(image, params){
console.log('execute resizeAndFitTargetSize!!');
// 原始图片宽高
const width = this.pixelWidth;
const height = this.pixelHeight;
// 缩放后的宽高
let sw = width;
let sh = height;
// 最小边缩放到scale
if (width < height) {
sw = params.scale;
sh = Math.round(sw * height / width);
} else {
sh = params.scale;
sw = Math.round(sh * width / height);
}
this.fromPixels2DContext.canvas.width = sw;
this.fromPixels2DContext.canvas.height = sh;
const targetWidth = params.targetSize.width;
const targetHeight = params.targetSize.height;
this.fromPixels2DContext.drawImage(
image, 0, 0, sw, sh);
let x = (sw - targetWidth)/2;
let y = (sh - targetHeight)/2;
sw = targetWidth;
sh = targetHeight;
let data = this.getImageData(params, x, y, {sw, sh});
this.setInputCanvas(image);
return data;
}
/**
* 缩放成目标尺寸并居中
......@@ -199,14 +241,16 @@ export default class imageFeed {
* @param pixels
* @returns {Uint8ClampedArray}
*/
getImageData(pixels, scaleSize) {
getImageData(pixels, x, y, scaleSize) {
const {sw, sh} = scaleSize;
// 复制画布上指定矩形的像素数据
let vals = this.fromPixels2DContext
.getImageData(0, 0, sw, sh);
.getImageData(x, y, sw, sh);
// crop图像
// const width = pixels.width;
// const height = pixels.height;
return vals;
};
......@@ -236,18 +280,27 @@ export default class imageFeed {
if (pixels instanceof HTMLImageElement || pixels instanceof HTMLVideoElement) {
this.pixelWidth = pixels.naturalWidth || pixels.width;
this.pixelHeight = pixels.naturalHeight || pixels.height;
if (opt.scale) { // 兼容以前的,如果有scale就是短边缩放到scale模式
if (opt.scale && opt.targetSize){ // Moblienet的情况
data = this.resizeAndFitTargetSize(pixels, opt);
data2 = this.fromPixels2DContext2.getImageData(0, 0, this.pixelWidth, this.pixelHeight);
}
else if (opt.scale) { // 兼容以前的,如果有scale就是短边缩放到scale模式
scaleSize = this.reSize(pixels, opt);
data = this.getImageData(opt, scaleSize);
console.dir(scaleSize);
console.dir(pixels);
data = this.getImageData(opt, 0, 0, scaleSize);
data2 = this.fromPixels2DContext2.getImageData(0, 0, this.pixelWidth, this.pixelHeight);
}
else if (opt.targetSize) { // 如果有targetSize,就是装在目标宽高里的模式
else if (opt.targetSize) { // 如果有targetSize,就是装在目标宽高里的模式 TinyYolo的情况
scaleSize = this.fitToTargetSize(pixels, opt);
data = this.getImageData(opt, scaleSize);
data = this.getImageData(opt, 0, 0, scaleSize);
data2 = this.fromPixels2DContext2.getImageData(0, 0, this.pixelWidth, this.pixelHeight);
}
}
if (opt.gray) {
data = grayscale(data);
}
......@@ -259,7 +312,6 @@ export default class imageFeed {
if (opt.targetShape) {
data = this.allReshapeToRGB(data, opt, scaleSize);
}
return [{data: data, shape: opt.shape || opt.targetShape, name: 'image', canvas: data2}];
}
}
......
......@@ -23,6 +23,7 @@ export default class gpu {
opts.width_raw_canvas = Number(opts.width_raw_canvas) || 512;
opts.height_raw_canvas = Number(opts.height_raw_canvas) || 512;
const canvas = opts.el ? opts.el : document.createElement('canvas');
canvas.addEventListener('webglcontextlost', evt => {
evt.preventDefault();
console.log('webgl context is lost~');
......@@ -103,7 +104,7 @@ export default class gpu {
this.cacheTextures = {};
this.uniformLocations = {};
// texture buffer
this.outTextures = [];
this.texturesMap = {};
// pbo
this.pbo = gl.createBuffer();
}
......@@ -155,7 +156,7 @@ export default class gpu {
gl.FLOAT, // Data type for each chanel.
null);
gl.bindTexture(gl.TEXTURE_2D, null);
this.outTextures.push(texture);
this.texturesMap[out.tensorId] = texture;
return program;
}
......@@ -263,19 +264,19 @@ export default class gpu {
* @param {WebGLTexture} texture 材质
* @returns {WebGLFramebuffer} The framebuffer
*/
attachFrameBuffer(iLayer) {
attachFrameBuffer(iLayer, tensorId) {
this.prevTexture = this.currentTexture;
// this.currentTexture = this.textureBuffer[this.textureBufferIndex % 2];
// this.textureBufferIndex = (this.textureBufferIndex + 1) >= 2 ? 0 : 1;
this.currentTexture = this.outTextures[iLayer];
console.log('this.currentTexture', this.currentTexture);
this.currentTexture = this.texturesMap[tensorId];
const gl = this.gl;
gl.framebufferTexture2D(gl.FRAMEBUFFER, // The target is always a FRAMEBUFFER.
gl.COLOR_ATTACHMENT0, // We are providing the color buffer.
gl.COLOR_ATTACHMENT0, // We are providing the color buffer.表示texture是颜色关联对象
gl.TEXTURE_2D, // This is a 2D image texture.
this.currentTexture, // The texture.
0 // 0, we aren't using MIPMAPs
);
gl.viewport(
0,
0,
......@@ -340,7 +341,8 @@ export default class gpu {
const gl = this.gl;
let texture;
if (!item.data) {
texture = this.prevTexture;
// texture = this.prevTexture;
texture = this.texturesMap[item.tensorId];
} else {
// texture = gl.createTexture();
if (isRendered && (iLayer > 0 || (iLayer === 0 && item.tensor !== 'origin'))) {
......@@ -348,9 +350,7 @@ export default class gpu {
texture = tData[item.variable + '_' + item.tensor];
} else {
texture = gl.createTexture();
if (index === 0) {
this.cacheTextures['' + iLayer] = this.cacheTextures['' + iLayer] || {};
}
this.cacheTextures['' + iLayer] = this.cacheTextures['' + iLayer] || {};
this.cacheTextures['' + iLayer][item.variable + '_' + item.tensor] = texture;
}
}
......
......@@ -10,7 +10,6 @@ import Utils from '../utils/utils';
* @file Graph,绘制生成model网络
* @author wangqun@baidu.com
*/
let start = 0;
// 生成factory实例
const factory = new Factory({});
// 获取op的输入配置
......@@ -48,16 +47,19 @@ export default class Graph {
const executor = this.constructExecutor(op);
const opData = new OpData(op.type, executor.inputs, executor.outputs, executor.attrs);
const name = opData.name;
const fsCode = factory.buildShader(name, opData.data);
opData.fsCode = fsCode;
opData.program = this.inst.createProgram(fsCode, opData.tensor['out']);
opData.program = [];
opData.program = opData.outputTensors.map((outTensor, index) => {
const fsCode = factory.buildShader(name, opData.fShaderParams[index], index);
return this.inst.createProgram(fsCode, outTensor);
});
opData.renderData = opConfs[name].map(elem => {
let item = Object.assign({}, elem);
const tensorData = opData.tensor[item.tensor];
const tensorData = opData.inputTensors.find(tensor => tensor.name === item.tensor);
if (item.type === 'texture') {
item.tensorId = tensorData.opts.type;
item.data = tensorData.data;
if (this.feedOp.id === op.id && item.tensor === 'origin') {
item.shape = tensorData.shape;
this.feedItem = item;
......@@ -70,7 +72,6 @@ export default class Graph {
}
return item;
});
// console.timeEnd('opData.renderData');
opData.iLayer = this.iLayer++;
op.opData = opData;
......@@ -83,7 +84,6 @@ export default class Graph {
return;
}
executor.execute(this.inst, this.isExecuted);
// if (executor.next && start++ < 2) {
if (executor.next) {
const id = executor.next;
const next = this.getTensor(id);
......@@ -136,7 +136,9 @@ export default class Graph {
const input = executor.inputs;
const output = executor.outputs;
Object.keys(output).forEach(function(key){
output[key] = that.getTensorAttr(output[key][0]);
output[key].forEach((item, index) => {
output[key][index] = that.getTensorAttr(item)[0];
});
});
Object.keys(input).forEach(function(key){
if (that.test && ((key === 'Input') || (key === 'X'))) {
......@@ -240,8 +242,10 @@ export default class Graph {
*/
getNextExecutor(ops, id) {
return ops.filter((item, key) => {
if (id === item.inputsName[0]) {
return true;
for (let i = 0; i < item.inputsName.length; i++) {
if (id === item.inputsName[i]) {
return true;
}
}
});
}
......
......@@ -29,6 +29,24 @@ export default class Loader {
if (!this.loadOptions) {
this.loadOptions = {};
}
else {
// this.fetchJson(this.modelGonfig.dir + 'x.json').then(data => {
// const [b, c, h, w] = [1, 3, 320, 320];
// const size = data.length;
// const total = 3 * 320 * 320;
// this.testData = new Float32Array(total);
// for (let i = 0; i < size; i++) {
// let j = i / (c * w) | 0;
// let k = i % (c * w);
// let b1 = j / h | 0;
// let h1 = j % h;
// let c1 = k % c;
// let w1 = k / c | 0;
// let l = b1 * (c * h * w) + c1 * (h * w) + h1 * (w) + w1;
// this.testData[i] = data[l];
// }
// });
}
}
fetchOneChunk(path) {
......@@ -110,10 +128,22 @@ export default class Loader {
&& item.name.match(TMP_SCHEME_REGEX) === null
&& item.name.match(TMP_REGEX) === null;
})
// .sort((a, b) => {
// if (a.name > b.name) {
// return 1;
// }
// if (a.name < b.name) {
// return -1;
// }
// return 0;
// }) // 按字母顺序排列 在model.json里
.forEach(item => {
len = item.shape.reduce((a, b) => a * b); // 长度为shape的乘积
item.data = this.allData.slice(marker, marker + len);
marker += len;
// 为了减少模型体积,模型转换工具不会导出非persistable的数据,这里只需要读取persistable的数据
if (item.persistable) {
item.data = this.allData.slice(marker, marker + len);
marker += len;
}
});
}
......
......@@ -3,7 +3,7 @@ import 'babel-polyfill';
import Loader from '../loader/loader';
import Graph from '../graph/graph';
/**
* @file paddle对象,负责加载模型和执行在线推理
* @file GraphModel,绘制生成model网络
* @author wangqun@baidu.com
*/
......@@ -28,6 +28,7 @@ export default class Paddle {
}
async load() {
if (this.options === null) {
// todo saniac 报错提示修改
throw new Error(
......@@ -56,7 +57,6 @@ export default class Paddle {
* @returns {*}
*/
execute(inputs) {
debugger;
let that = this;
this.feed = this.graph.feed = inputs;
// 生成op数据
......
/* eslint-disable */
import Gpu from '../gpu/gpu';
import getMaxUniforms from '../test/getMaxUniforms';
import Factory from '../factory/fshader/factory';
import {getTextureShapeInfo} from '../utils/opData';
// 生成factory实例
const factory = new Factory({});
/**
* @file gpu运行时
* @author wangqun@baidu.com, yangmingming@baidu.com
......@@ -36,26 +40,30 @@ export default {
}
// 设置gpu参数
const gpu = this.gpu;
gpu.setOutProps(opData.tensor['out']);
// 生成帧缓存材质
gpu.attachFrameBuffer(opData.iLayer);
// let end = +Date.now();
let bufferStatus = gpu.frameBufferIsComplete();
if (bufferStatus.isComplete) {
// start = +Date.now();
// timeObj['buferstatus-time'] = start - end;
// gpu.attachShader(opData.fshader);
gpu.setProgram(opData.program, isRendered);
// end = +Date.now();
// timeObj['createshader-time'] = end - start;
// timeObj['jsTime'] = end - time;
// statistic.push(timeObj);
// 开始计算
this.gpu.render(opData.renderData, opData.iLayer, isRendered);
return this;
} else {
return bufferStatus.message;
}
opData.program.forEach((program, index) => {
const outTensor = opData.outputTensors[index];
const outTensorId = outTensor.tensorId;
gpu.setOutProps(outTensor);
// 生成帧缓存材质
gpu.attachFrameBuffer(opData.iLayer, outTensorId);
// let end = +Date.now();
let bufferStatus = gpu.frameBufferIsComplete();
if (bufferStatus.isComplete) {
// start = +Date.now();
// timeObj['buferstatus-time'] = start - end;
// gpu.attachShader(opData.fshader);
gpu.setProgram(program, isRendered);
// end = +Date.now();
// timeObj['createshader-time'] = end - start;
// timeObj['jsTime'] = end - time;
// statistic.push(timeObj);
// 开始计算,执行 gl.drawArrays
this.gpu.render(opData.renderData, opData.iLayer, isRendered);
}
});
},
/**
......
......@@ -2,7 +2,6 @@
/**
* @file 公共方法
* @author yangmingming
*
*/
export default `
......
......@@ -11,7 +11,7 @@ export default `
precision mediump float;
precision mediump int;
#endif
varying vec2 vCoord;
void setOutput(float result) {
gl_FragColor.r = result;
}
......
/* eslint-disable */
/**
* @file 公共方法
* @author chenhaoze
*/
// TEXTURE_NAME, tensor name
// 获取材质中的数据
// uniform sampler2D TEXTURE_NAME;
export default `
ivec4 transferFromNHWCtoNCHW( int sumVal, const int channel, const int width_shape, const int height_shape, const int total_shape) {
int n_origin = int(total_shape/(channel * width_shape * height_shape));
int new_a = sumVal % width_shape;
sumVal = int((sumVal - new_a) / width_shape);
int new_b = sumVal % height_shape;
sumVal = int((sumVal - new_b) / height_shape);
int new_g = sumVal % channel;
sumVal = int((sumVal - new_g) / channel);
int new_r = sumVal % n_origin;
return ivec4(new_r,new_g,new_b,new_a);
}
`;
/* eslint-disable */
/**
* @file batchnorm的配置文件
* @author yangmingming
* @author wangqun
*/
export default {
dep: [
......@@ -16,6 +16,30 @@ export default {
conf: {
TEXTURE_NAME: 'texture_scale'
}
},
{
func: 'getPixelsFromTexturePos',
conf: {
TEXTURE_NAME: 'texture_bias'
}
},
{
func: 'getPixelsFromTexturePos',
conf: {
TEXTURE_NAME: 'texture_variance'
}
},
{
func: 'getPixelsFromTexturePos',
conf: {
TEXTURE_NAME: 'texture_mean'
}
},
{
func: 'getPixelsFromTexturePos',
conf: {
TEXTURE_NAME: 'texture_origin'
}
}
],
conf: [
......@@ -26,15 +50,23 @@ export default {
'HEIGHT_TEXTURE_ORIGIN',
'CHANNEL_ORIGIN',
'TOTAL_SHAPE_ORIGIN',
'WIDTH_SHAPE_OUT',
'HEIGHT_SHAPE_OUT',
'WIDTH_TEXTURE_OUT',
'HEIGHT_TEXTURE_OUT',
'CHANNEL_OUT',
'OFFSET_Y_OUT',
'EPSILON',
'WIDTH_TEXTURE_SCALE',
'HEIGHT_TEXTURE_SCALE',
'WIDTH_TEXTURE_BIAS',
'HEIGHT_TEXTURE_BIAS',
'WIDTH_TEXTURE_MEAN',
'HEIGHT_TEXTURE_MEAN',
'WIDTH_TEXTURE_VARIANCE',
'HEIGHT_TEXTURE_VARIANCE',
'MULTI_VALUE',
'BIAS_VALUE',
'ACTIVE_FUNCTION'
......@@ -46,6 +78,24 @@ export default {
setter: 'initTexture',
type: 'texture'
},
{
tensor: 'bias',
variable: 'texture',
setter: 'initTexture',
type: 'texture'
},
{
tensor: 'mean',
variable: 'texture',
setter: 'initTexture',
type: 'texture'
},
{
tensor: 'variance',
variable: 'texture',
setter: 'initTexture',
type: 'texture'
},
{
tensor: 'origin',
variable: 'texture',
......@@ -53,4 +103,4 @@ export default {
type: 'texture'
}
]
};
\ No newline at end of file
};
......@@ -9,11 +9,15 @@ void main(void) {
// 输出数据
ivec4 oPos = getOutputTensorPos();
float o = getValueFromTensorPos_origin(oPos.r, oPos.g, oPos.b, oPos.a);
// 归一化数据
vec4 scale = getPixelsFromTexturePos_texture_scale(vec2((float(int(oPos.g)) + 0.5) / float(width_texture_scale), 0.0));
float x = (o - scale[3]) / sqrt(scale[2] + epsilon);
float res = scale[0] * x + scale[1];
vec4 scale = getPixelsFromTexturePos_texture_scale(vec2( float(oPos.g) / float(width_texture_scale), 0.0));
vec4 bias = getPixelsFromTexturePos_texture_bias(vec2((float(oPos.g)) / float(width_texture_bias), 0.0));
vec4 mean = getPixelsFromTexturePos_texture_mean(vec2((float(oPos.g)) / float(width_texture_mean), 0.0));
vec4 variance = getPixelsFromTexturePos_texture_variance(vec2((float(oPos.g)) / float(width_texture_variance), 0.0));
float x = (o - mean[0]) / sqrt(variance[0] + epsilon);
float res = scale[0] * x + bias[0];
setOutput(res);
}
`;
\ No newline at end of file
`;
/* eslint-disable */
/**
* @file batchnorm参数文件
* @author yangmingming
* @author wangqun
*/
export default `
// 输入数据
......@@ -14,9 +14,18 @@ const int channel_origin = CHANNEL_ORIGIN;
const int total_shape_origin = TOTAL_SHAPE_ORIGIN;
// 计算数据
const float epsilon = float(EPSILON);
const int width_texture_bias = WIDTH_TEXTURE_BIAS;
const int height_texture_bias = HEIGHT_TEXTURE_BIAS;
const int width_texture_variance = WIDTH_TEXTURE_VARIANCE;
const int height_texture_variance = HEIGHT_TEXTURE_VARIANCE;
const int width_texture_mean = WIDTH_TEXTURE_MEAN;
const int height_texture_mean = HEIGHT_TEXTURE_MEAN;
const int width_texture_scale = WIDTH_TEXTURE_SCALE;
const int height_texture_scale = HEIGHT_TEXTURE_SCALE;
// 输入数据
uniform sampler2D texture_origin;
uniform sampler2D texture_scale;
`;
\ No newline at end of file
uniform sampler2D texture_bias;
uniform sampler2D texture_variance;
uniform sampler2D texture_mean;
`;
/* eslint-disable */
/**
* @file concat的配置文件
* @author zhangjingyuan02
*/
export default {
dep: [
{
func: 'getValueFromTensorPos',
conf: {
TENSOR_NAME: 'origin'
}
},
{
func: 'getValueFromTensorPos',
conf: {
TENSOR_NAME: 'counter'
}
},
{
func: 'transferFromNHWCtoNCHW'
}
],
conf: [
'LENGTH_SHAPE_COUNTER',
'WIDTH_SHAPE_COUNTER',
'HEIGHT_SHAPE_COUNTER',
'WIDTH_TEXTURE_COUNTER',
'HEIGHT_TEXTURE_COUNTER',
'CHANNEL_COUNTER',
'WIDTH_SHAPE_ORIGIN',
'HEIGHT_SHAPE_ORIGIN',
'LENGTH_SHAPE_ORIGIN',
'WIDTH_TEXTURE_ORIGIN',
'HEIGHT_TEXTURE_ORIGIN',
'CHANNEL_ORIGIN',
'WIDTH_SHAPE_OUT',
'HEIGHT_SHAPE_OUT',
'WIDTH_TEXTURE_OUT',
'HEIGHT_TEXTURE_OUT',
'CHANNEL_OUT',
'OFFSET_Y_OUT'
],
input: [
{
tensor: 'origin',
variable: 'texture',
setter: 'initTexture',
type: 'texture'
},
{
tensor: 'counter',
variable: 'texture',
setter: 'initTexture',
type: 'texture'
}
]
};
/* eslint-disable */
/**
* @file concat主函数
* @author zhangjingyuan02
*/
export default `
// start函数
void main(void) {
ivec4 oPos = getOutputTensorPos();
// 输出坐标转换为输入坐标
int sumVal = oPos.g + oPos.a * channel_out + oPos.b * channel_out * width_shape_out + oPos.r * channel_out * width_shape_out * height_shape_out;
ivec4 new_oPos = transferFromNHWCtoNCHW(sumVal, channel_out, width_shape_out, height_shape_out, total_shape_out);
float o = 0.0;
if (new_oPos[dim] > inputs_dim[0] - 1) {
new_oPos[dim] = new_oPos[dim] - inputs_dim[0];
o = getValueFromTensorPos_counter(new_oPos.r, new_oPos.g, new_oPos.b, new_oPos.a);
}
else {
o = getValueFromTensorPos_origin(new_oPos.r, new_oPos.g, new_oPos.b, new_oPos.a);
}
setOutput(float(o));
}
`;
/* eslint-disable */
/**
* @file concat 参数文件
* @author zhangjingyuan02
*/
export default `
// mul的input数据
const int axis = AXIS;
// 常量
// 输入数据
const int length_shape_counter = LENGTH_SHAPE_COUNTER;
const int width_shape_counter = WIDTH_SHAPE_COUNTER;
const int height_shape_counter = HEIGHT_SHAPE_COUNTER;
const int width_texture_counter = WIDTH_TEXTURE_COUNTER;
const int height_texture_counter = HEIGHT_TEXTURE_COUNTER;
const int channel_counter = CHANNEL_COUNTER;
const int total_shape_counter = TOTAL_SHAPE_COUNTER;
const int width_shape_origin = WIDTH_SHAPE_ORIGIN;
const int height_shape_origin = HEIGHT_SHAPE_ORIGIN;
const int length_shape_origin = LENGTH_SHAPE_ORIGIN;
const int width_texture_origin = WIDTH_TEXTURE_ORIGIN;
const int height_texture_origin = HEIGHT_TEXTURE_ORIGIN;
const int channel_origin = CHANNEL_ORIGIN;
const int total_shape_origin = TOTAL_SHAPE_ORIGIN;
const int total_shape_out = TOTAL_SHAPE_OUT;
const int dim = DIM;
const int inputs_dim[1] = int[](INPUTS_DIM);
// uniform变量
// 输入数据
uniform sampler2D texture_counter;
uniform sampler2D texture_origin;
`;
......@@ -16,7 +16,13 @@ export default {
conf: {
TENSOR_NAME: 'filter'
}
}
},
{
func: 'transferFromNHWCtoNCHW',
conf:{
}
}
],
conf: [
'LENGTH_SHAPE_FILTER',
......
......@@ -22,7 +22,7 @@ export default `
const int width_texture_origin = WIDTH_TEXTURE_ORIGIN;
const int height_texture_origin = HEIGHT_TEXTURE_ORIGIN;
const int channel_origin = CHANNEL_ORIGIN;
// 计算相关
// 拆分步长
const int stride_h = STRIDES_X;
......
......@@ -16,7 +16,13 @@ export default {
conf: {
TENSOR_NAME: 'filter'
}
}
},
{
func: 'transferFromNHWCtoNCHW',
conf:{
}
}
],
conf: [
'LENGTH_SHAPE_FILTER',
......
......@@ -12,7 +12,6 @@ export default `
int y = oPos.b;
int b = oPos.r;
float res = 0.0;
int top = y * stride_v - padTop;
int left = x * stride_h - padLeft;
for (int fy = 0; fy < height_shape_filter; fy++) {
......
......@@ -16,7 +16,13 @@ export default {
conf: {
TENSOR_NAME: 'filter'
}
}
},
{
func: 'transferFromNHWCtoNCHW',
conf:{
}
}
],
conf: [
'LENGTH_SHAPE_FILTER',
......
......@@ -7,6 +7,8 @@ export default `
// start函数
void main(void) {
ivec4 oPos = getOutputTensorPosLIMIT_OUT();
int sumVal = oPos.g + oPos.a * channel_out + oPos.b * channel_out * width_shape_out;
ivec4 new_oPos = transferFromNHWCtoNCHW(sumVal, channel_out, width_shape_out, height_shape_out, total_shape_out);
int x = oPos.a;
int c = oPos.g;
int y = oPos.b;
......
......@@ -32,7 +32,8 @@ export default `
const int dilation_v = DILATIONS_Y;
// groups
const int groups = GROUPS;
const int total_shape_out = TOTAL_SHAPE_OUT;
// 加法
const int axis = AXIS;
......
/* eslint-disable */
/**
* @file conv2d的配置文件
* @author chenhaoze
*/
export default {
dep: [
{
func: 'getValueFromTensorPos',
conf: {
TENSOR_NAME: 'origin'
}
},
{
func: 'getValueFromTensorPos',
conf: {
TENSOR_NAME: 'filter'
}
}
],
conf: [
'LENGTH_SHAPE_FILTER',
'WIDTH_SHAPE_FILTER',
'HEIGHT_SHAPE_FILTER',
'WIDTH_TEXTURE_FILTER',
'HEIGHT_TEXTURE_FILTER',
'CHANNEL_FILTER',
'WIDTH_SHAPE_ORIGIN',
'HEIGHT_SHAPE_ORIGIN',
'LENGTH_SHAPE_ORIGIN',
'WIDTH_TEXTURE_ORIGIN',
'HEIGHT_TEXTURE_ORIGIN',
'CHANNEL_ORIGIN',
'WIDTH_SHAPE_OUT',
'HEIGHT_SHAPE_OUT',
'WIDTH_TEXTURE_OUT',
'HEIGHT_TEXTURE_OUT',
'CHANNEL_OUT',
'OFFSET_Y_OUT',
'STRIDE_HORIZONTAL',
'STRIDE_VERTICAL',
'PAD_LEFT',
'PAD_TOP',
'DILATION_HORIZONTAL',
'DILATION_VERTICAL',
'GROUPS',
'MULTI_VALUE',
'BIAS_VALUE',
'ACTIVE_FUNCTION'
],
input: [
// {
// tensor: 'filter',
// variable: 'numbers_shape',
// setter: 'uniform1iv',
// type: 'uniform'
// },
{
tensor: 'filter',
variable: 'texture',
setter: 'initTexture',
type: 'texture'
},
{
tensor: 'origin',
variable: 'texture',
setter: 'initTexture',
type: 'texture'
}
// {
// tensor: 'origin',
// variable: 'numbers_shape',
// setter: 'uniform1iv',
// type: 'uniform'
// },
// {
// tensor: 'out',
// variable: 'numbers_shape',
// setter: 'uniform1iv',
// type: 'uniform'
// }
]
};
/* eslint-disable */
/**
* @file 主函数
* @author chenhaoze
*/
export default `
// start函数
void main(void) {
ivec4 oPos = getOutputTensorPosLIMIT_OUT();
int x = oPos.a;
int c = oPos.g;
int y = oPos.b;
int b = oPos.r;
float res = 0.0;
// 重排遍历顺序
//int sumVal = oPos.g + oPos.a * channel_out + oPos.b * channel_out * width_shape_out;
//int new_a = sumVal % width_shape_out;
//int new_b = int((sumVal - new_a) / width_shape_out) % height_shape_out;
//int new_g = int((((sumVal - new_a) / width_shape_out) - new_b) / height_shape_out);
//int x = new_a;
//int c = new_g;
//int y = new_b;
// 获取output的坐标
int oTensorChannel = (c / (channel_out / groups)) * channel_filter;
int oy = y * 1 - padTop;
for (int fy = 0; fy < height_shape_filter; fy++) {
if (oy >= height_shape_origin) {
break;
}
if (oy < 0) {
oy += dilation_v;
continue;
}
int ox = x * 1 - padLeft;
for (int fx = 0; fx < width_shape_filter; fx++) {
if (ox >= width_shape_origin) {
break;
}
if (ox < 0) {
ox += dilation_h;
continue;
}
// channel计算
for (int j = 0; j < channel_filter; j++) {
float o = 0.0;
if (ox % stride_h == 0 && oy % stride_v == 0) {
int temp_x = int(ox / stride_h);
int temp_y = int(oy / stride_v);
o = getValueFromTensorPosLIMIT_ORIGIN_origin(b, oTensorChannel + j, temp_y, temp_x);
}
float f = getValueFromTensorPosLIMIT_FILTER_filter(c, j, fy, fx);
res += f * o;
}
ox += dilation_h;
}
oy += dilation_v;
}
setOutput(res);
}
`;
/* eslint-disable */
/**
* @file 参数文件
* @author chenhaoze
*/
export default `
// conv2d的input数据
// 常量
// 卷积核
const int length_shape_filter = LENGTH_SHAPE_FILTER;
const int width_shape_filter = WIDTH_SHAPE_FILTER;
const int height_shape_filter = HEIGHT_SHAPE_FILTER;
const int width_texture_filter = WIDTH_TEXTURE_FILTER;
const int height_texture_filter = HEIGHT_TEXTURE_FILTER;
const int channel_filter = CHANNEL_FILTER;
// 输入数据
const int width_shape_origin = WIDTH_SHAPE_ORIGIN;
const int height_shape_origin = HEIGHT_SHAPE_ORIGIN;
const int length_shape_origin = LENGTH_SHAPE_ORIGIN;
const int width_texture_origin = WIDTH_TEXTURE_ORIGIN;
const int height_texture_origin = HEIGHT_TEXTURE_ORIGIN;
const int channel_origin = CHANNEL_ORIGIN;
// 计算相关
// 拆分步长
const int stride_h = int(STRIDES_X);
const int stride_v = int(STRIDES_Y);
// padding的数目
const int padLeft = width_shape_filter - PADDINGS_X - 1;
const int padTop = height_shape_filter - PADDINGS_Y - 1;
// dilation膨胀系数
const int dilation_h = DILATION_H;
const int dilation_v = DILATION_V;
// groups
const int groups = GROUPS;
// uniform变量
// 卷积核
uniform sampler2D texture_filter;
// 输入数据
uniform sampler2D texture_origin;
`;
......@@ -5,16 +5,22 @@
*/
export default {
dep: [
{
func: 'getPixelsFromTexturePos',
{
func: 'getValueFromTensorPos',
conf: {
TEXTURE_NAME: 'texture_origin'
TENSOR_NAME: 'origin'
}
},
{
func: 'getPixelsFromTexturePos',
func: 'transferFromNHWCtoNCHW',
conf:{
}
},
{
func: 'getValueFromTensorPos',
conf: {
TEXTURE_NAME: 'texture_counter'
TENSOR_NAME: 'counter'
}
}
],
......@@ -26,7 +32,12 @@ export default {
'HEIGHT_TEXTURE_ORIGIN',
'CHANNEL_ORIGIN',
'TOTAL_SHAPE_COUNTER',
'WIDTH_SHAPE_COUNTER',
'HEIGHT_SHAPE_COUNTER',
'LENGTH_SHAPE_COUNTER',
'WIDTH_TEXTURE_COUNTER',
'HEIGHT_TEXTURE_COUNTER',
'CHANNEL_COUNTER',
'WIDTH_SHAPE_OUT',
'HEIGHT_SHAPE_OUT',
......@@ -35,10 +46,7 @@ export default {
'CHANNEL_OUT',
'OFFSET_Y_OUT',
'AXIS',
'MULTI_VALUE',
'BIAS_VALUE',
'ACTIVE_FUNCTION'
'AXIS'
],
input: [
{
......@@ -47,11 +55,11 @@ export default {
setter: 'initTexture',
type: 'texture'
},
{
{
tensor: 'counter',
variable: 'data',
setter: 'uniform1fv',
type: 'uniform'
variable: 'texture',
setter: 'initTexture',
type: 'texture'
}
]
};
......@@ -7,11 +7,23 @@ export default `
// start函数
void main(void) {
// 输出数据
ivec4 oPos = getOutputTensorPosLIMIT_OUT();
int index = oPos[axis];
float o = getPixelsFromTexturePos_texture_origin(vCoord).r;
float c = getValueFromCounter(index);
float res = ACTIVE_FUNCTION(o + c, multi_value, bias_value);
setOutput(res);
ivec4 oPos = getOutputTensorPos();
int sumVal = oPos.g + oPos.a * channel_origin + oPos.b * channel_origin * width_shape_origin + oPos.r * channel_origin * width_shape_origin * height_shape_origin;
float o = getValueFromTensorPos_origin(oPos.r, oPos.g, oPos.b, oPos.a);
ivec4 pos_counter;
pos_counter.r = channel_out;
pos_counter.g = height_shape_origin;
pos_counter.b = width_shape_origin;
pos_counter.a = 1;
int index = 0;
for (int i = 4 - length_shape_origin + axis; i < 4 - length_shape_origin + axis + length_shape_counter; i++ ){
if (index > 0) {
index = index * pos_counter[i];
}
index += oPos[i];
}
float c = getValueFromTensorPos_counter(oPos.r, oPos.g, oPos.b, oPos.a);
float res = c + o;
setOutput(res);
}
`;
......@@ -6,15 +6,21 @@
export default `
// 输入数据
const int axis = AXIS;
// const int total_shape_counter = TOTAL_SHAPE_COUNTER;
uniform float data_counter[TOTAL_SHAPE_COUNTER];
const int width_shape_origin = WIDTH_SHAPE_ORIGIN;
const int height_shape_origin = HEIGHT_SHAPE_ORIGIN;
const int length_shape_origin = LENGTH_SHAPE_ORIGIN;
const int width_texture_origin = WIDTH_TEXTURE_ORIGIN;
const int height_texture_origin = HEIGHT_TEXTURE_ORIGIN;
const int channel_origin = CHANNEL_ORIGIN;
const int height_shape_counter = HEIGHT_SHAPE_COUNTER;
const int width_shape_counter = WIDTH_SHAPE_COUNTER;
const int length_shape_counter = LENGTH_SHAPE_COUNTER;
const int width_texture_counter = WIDTH_TEXTURE_COUNTER;
const int height_texture_counter = HEIGHT_TEXTURE_COUNTER;
const int channel_counter = CHANNEL_COUNTER;
uniform sampler2D texture_origin;
float getValueFromCounter(int index) {
for (int i = 0; i < TOTAL_SHAPE_COUNTER; i++) {
if (i == index) {
return data_counter[i];
}
}
return 0.0;
}
uniform sampler2D texture_counter;
`;
graph.es6/* eslint-disable */
/* eslint-disable */
/**
* @file mul的配置文件
* @author yangmingming zhangmiao06
......
......@@ -5,7 +5,7 @@
export default `
// start函数
void main(void) {
float res = (-1.0 / exp(-20.0));
float res = 0.0;
// 获取output的坐标
ivec4 out_pos = getOutputTensorPosLIMIT_OUT();
// X、Y方向的移动步长
......
/* eslint-disable */
/**
* @file batchnorm的配置文件
* @author chenhaoze
*/
export default {
dep: [
{
func: 'getValueFromTensorPos',
conf: {
TENSOR_NAME: 'origin'
}
},
{
func: 'transferFromNHWCtoNCHW',
conf:{
}
}
],
conf: [
'WIDTH_SHAPE_ORIGIN',
'HEIGHT_SHAPE_ORIGIN',
'LENGTH_SHAPE_ORIGIN',
'WIDTH_TEXTURE_ORIGIN',
'HEIGHT_TEXTURE_ORIGIN',
'CHANNEL_ORIGIN',
'WIDTH_SHAPE_OUT',
'HEIGHT_SHAPE_OUT',
'WIDTH_TEXTURE_OUT',
'HEIGHT_TEXTURE_OUT',
'CHANNEL_OUT',
'OFFSET_Y_OUT',
'MULTI_VALUE',
'BIAS_VALUE',
'ACTIVE_FUNCTION'
],
input: [
{
tensor: 'origin',
variable: 'texture',
setter: 'initTexture',
type: 'texture'
}
]
};
/* eslint-disable */
/**
* @file reshape主函数
* @author chenhaoze
*/
export default `
// start函数
void main(void) {
// 输出数据
ivec4 oPos = getOutputTensorPos();
// 输出坐标转换为输入坐标
int sumVal = oPos.g + oPos.a * channel_out + oPos.b * channel_out * width_shape_out + oPos.r * channel_out * width_shape_out * height_shape_out;
ivec4 new_oPos = transferFromNHWCtoNCHW(sumVal, channel_origin, width_shape_origin, height_shape_origin, total_shape_origin);
float o = getValueFromTensorPos_origin(new_oPos.r, new_oPos.g, new_oPos.b, new_oPos.a);
setOutput(float(o));
}
`;
/* eslint-disable */
/**
* @file batchnorm参数文件
* @author chenhaoze
*/
export default `
// 输入数据
const int width_shape_origin = WIDTH_SHAPE_ORIGIN;
const int height_shape_origin = HEIGHT_SHAPE_ORIGIN;
const int length_shape_origin = LENGTH_SHAPE_ORIGIN;
const int width_texture_origin = WIDTH_TEXTURE_ORIGIN;
const int height_texture_origin = HEIGHT_TEXTURE_ORIGIN;
const int channel_origin = CHANNEL_ORIGIN;
const int total_shape_origin = TOTAL_SHAPE_ORIGIN;
// 输入数据
uniform sampler2D texture_origin;
`;
......@@ -6,17 +6,19 @@
export default {
dep: [
{
func: 'getPixelsFromTexturePos',
func: 'getValueFromTensorPos',
conf: {
TEXTURE_NAME: 'texture_origin'
TENSOR_NAME: 'origin'
}
}
],
conf: [
'WIDTH_SHAPE_ORIGIN',
'HEIGHT_SHAPE_ORIGIN',
'LENGTH_SHAPE_ORIGIN',
'WIDTH_TEXTURE_ORIGIN',
'HEIGHT_TEXTURE_ORIGIN',
'TOTAL_SHAPE_ORIGIN',
'OFFSET_Y_OUT'
'CHANNEL_ORIGIN',
],
input: [
{
......
......@@ -6,55 +6,40 @@
export default `
// start函数
void main(void) {
float res = 0.0;
vec4 v4 = getPixelsFromTexturePos_texture_origin(vCoord);
vec2 onePixel = vec2(1.0 / float(width_texture_origin), 1.0 / float(height_texture_origin));
ivec4 oPos = getOutputTensorPos();
const int n = int(total_shape_origin/channel_origin/height_shape_origin/width_shape_origin);
float o = getValueFromTensorPos_origin(oPos[0], oPos[1], oPos[2], oPos[3]);
// 输出坐标转换为输入坐标
float total = 0.0;
float maxValue = getPixelsFromTexturePos_texture_origin(onePixel).r;
int number = 0;
vec4 pixels;
vec4 result;
// 求最大
for (int i = 0; i < height_texture_origin; i++) {
for (int j = 0; j < width_texture_origin; j++) {
pixels = getPixelsFromTexturePos_texture_origin(onePixel * vec2(float(j), float(i)));
number = i * width_texture_origin + j;
if ((number * 4 + 1) < total_shape_origin) {
maxValue = max(pixels.r, maxValue);
}
if ((number * 4 + 2) < total_shape_origin) {
maxValue = max(pixels.g, maxValue);
}
if ((number * 4 + 3) < total_shape_origin) {
maxValue = max(pixels.b, maxValue);
}
if ((number * 4 + 4) < total_shape_origin) {
maxValue = max(pixels.a, maxValue);
}
float res = 0.0;
if (axis == 0) {
for (int i = 0; i < n; i++){
float temp = getValueFromTensorPos_origin(i, oPos[1], oPos[2], oPos[3]);
total += exp(temp);
}
res = exp(o) / total;
}
else if (axis == 1) {
for (int i = 0; i < channel_origin; i++){
float temp = getValueFromTensorPos_origin(oPos[0], i, oPos[2], oPos[3]);
total += exp(temp);
}
res = exp(o) / total;
}
else if (axis == 2) {
for (int i = 0; i < height_shape_origin; i++){
float temp = getValueFromTensorPos_origin(oPos[0], oPos[1], i, oPos[3]);
total += exp(temp);
}
res = exp(o) / total;
}
// 求和
for (int i = 0; i < height_texture_origin; i++) {
for (int j = 0; j < width_texture_origin; j++) {
pixels = getPixelsFromTexturePos_texture_origin(onePixel * vec2(float(j), float(i)));
number = i * width_texture_origin + j;
if ((number * 4 + 1) < total_shape_origin) {
total += exp(pixels.r - maxValue);
}
if ((number * 4 + 2) < total_shape_origin) {
total += exp(pixels.g - maxValue);
}
if ((number * 4 + 3) < total_shape_origin) {
total += exp(pixels.b - maxValue);
}
if ((number * 4 + 4) < total_shape_origin) {
total += exp(pixels.a - maxValue);
}
else {
for (int i = 0; i < width_shape_origin; i++){
float temp = getValueFromTensorPos_origin(oPos[0], oPos[1], oPos[2], i);
total += exp(temp);
}
res = exp(o) / total;
}
outColor = exp(v4 - vec4(maxValue, maxValue, maxValue, maxValue)) / vec4(total, total, total, total);
// res = result.a;
// setOutput(res);
setOutput(res);
}
`;
......@@ -5,10 +5,13 @@
*/
export default `
// 输入数据
const int width_shape_origin = WIDTH_SHAPE_ORIGIN;
const int height_shape_origin = HEIGHT_SHAPE_ORIGIN;
const int width_texture_origin = WIDTH_TEXTURE_ORIGIN;
const int height_texture_origin = HEIGHT_TEXTURE_ORIGIN;
const int total_shape_origin = TOTAL_SHAPE_ORIGIN;
const int channel_origin = CHANNEL_ORIGIN;
const int axis = AXIS;
// uniform变量
// 输入数据
uniform sampler2D texture_origin;
......
/* eslint-disable */
/**
* @file split的配置文件
* @author zhangjingyuan02
*/
export default {
dep: [
{
func: 'getValueFromTensorPos',
conf: {
TENSOR_NAME: 'origin'
}
},
{
func: 'transferFromNHWCtoNCHW'
}
],
conf: [
'WIDTH_SHAPE_ORIGIN',
'HEIGHT_SHAPE_ORIGIN',
'LENGTH_SHAPE_ORIGIN',
'WIDTH_TEXTURE_ORIGIN',
'HEIGHT_TEXTURE_ORIGIN',
'CHANNEL_ORIGIN',
'WIDTH_SHAPE_OUT',
'HEIGHT_SHAPE_OUT',
'WIDTH_TEXTURE_OUT',
'HEIGHT_TEXTURE_OUT',
'CHANNEL_OUT',
'OFFSET_Y_OUT',
],
input: [
{
tensor: 'origin',
variable: 'texture',
setter: 'initTexture',
type: 'texture'
}
]
};
/* eslint-disable */
/**
* @file split主函数
* @author zhangjingyuan02
*/
export default `
// start函数
void main(void) {
int length = int(target_value.length() / num);
ivec4 oPos = getOutputTensorPos();
// 输出坐标转换为输入坐标
int sumVal = oPos.g + oPos.a * channel_out + oPos.b * channel_out * width_shape_out + oPos.r * channel_out * width_shape_out * height_shape_out;
ivec4 new_oPos = transferFromNHWCtoNCHW(sumVal, channel_out, width_shape_out, height_shape_out, total_shape_out);
new_oPos[dim] = new_oPos[dim] + layer_run_time * length;
float o = getValueFromTensorPos_origin(new_oPos.r, new_oPos.g, new_oPos.b, new_oPos.a);
setOutput(float(o));
}
`;
/* eslint-disable */
/**
* @file split参数文件
* @author zhangjingyuan02
*/
export default `
// 常量
const int width_shape_origin = WIDTH_SHAPE_ORIGIN;
const int height_shape_origin = HEIGHT_SHAPE_ORIGIN;
const int length_shape_origin = LENGTH_SHAPE_ORIGIN;
const int width_texture_origin = WIDTH_TEXTURE_ORIGIN;
const int height_texture_origin = HEIGHT_TEXTURE_ORIGIN;
const int channel_origin = CHANNEL_ORIGIN;
const int total_shape_origin = TOTAL_SHAPE_ORIGIN;
const int total_shape_out = TOTAL_SHAPE_OUT;
const int dim = DIM;
const int num = NUM;
const int target_value[TARGET_LENGTH] = int[](TARGET_VALUE);
// 输入数据
uniform sampler2D texture_origin;
`;
/* eslint-disable */
/**
* @file batchnorm的配置文件
* @author chenhaoze
*/
export default {
dep: [
{
func: 'getValueFromTensorPos',
conf: {
TENSOR_NAME: 'origin'
}
},
{
func: 'transferFromNHWCtoNCHW',
conf:{
}
}
],
conf: [
'WIDTH_SHAPE_ORIGIN',
'HEIGHT_SHAPE_ORIGIN',
'LENGTH_SHAPE_ORIGIN',
'WIDTH_TEXTURE_ORIGIN',
'HEIGHT_TEXTURE_ORIGIN',
'CHANNEL_ORIGIN',
'WIDTH_SHAPE_OUT',
'HEIGHT_SHAPE_OUT',
'WIDTH_TEXTURE_OUT',
'HEIGHT_TEXTURE_OUT',
'CHANNEL_OUT',
'OFFSET_Y_OUT',
'MULTI_VALUE',
'BIAS_VALUE',
'ACTIVE_FUNCTION'
],
input: [
{
tensor: 'origin',
variable: 'texture',
setter: 'initTexture',
type: 'texture'
}
]
};
/* eslint-disable */
/**
* @file reshape主函数
* @author chenhaoze
*/
export default `
// start函数
void main(void) {
// 输出数据
ivec4 oPos = getOutputTensorPos();
// 重排遍历顺序
int sumVal = oPos.g + oPos.a * channel_out + oPos.b * channel_out * width_shape_out + oPos.r * channel_out * width_shape_out * height_shape_out;
ivec4 new_oPos = transferFromNHWCtoNCHW(sumVal, channel_out, width_shape_out, height_shape_out, total_shape_origin);
// 转置 坐标变换
oPos = new_oPos;
float o = 0.0;
if (perm_size == 1) {
o = getValueFromTensorPos_origin(oPos[0], oPos[1], oPos[2], oPos[3]);
}
else if (perm_size == 2) {
o = getValueFromTensorPos_origin(oPos[0], oPos[1], oPos[min(2 + perm_0, 3)], oPos[min(2 + perm_1, 3)]);
}
else if (perm_size == 3) {
o = getValueFromTensorPos_origin(oPos[0], oPos[min(1 + perm_0, 3)], oPos[min(1 + perm_1, 3)], oPos[min(1 + perm_2, 3)]);
}
else if (perm_size == 4) {
o = getValueFromTensorPos_origin(oPos[perm_0], oPos[perm_1], oPos[perm_2], oPos[perm_3]);
}
setOutput(float(o));
}
`;
/* eslint-disable */
/**
* @file batchnorm参数文件
* @author chenhaoze
*/
export default `
// 输入数据
const int width_shape_origin = WIDTH_SHAPE_ORIGIN;
const int height_shape_origin = HEIGHT_SHAPE_ORIGIN;
const int length_shape_origin = LENGTH_SHAPE_ORIGIN;
const int width_texture_origin = WIDTH_TEXTURE_ORIGIN;
const int height_texture_origin = HEIGHT_TEXTURE_ORIGIN;
const int channel_origin = CHANNEL_ORIGIN;
const int total_shape_origin = TOTAL_SHAPE_ORIGIN;
const int perm_size = PERM_SIZE;
const int perm_0 = PERM_0;
const int perm_1 = PERM_1;
const int perm_2 = PERM_2;
const int perm_3 = PERM_3;
// 输入数据
uniform sampler2D texture_origin;
`;
/* eslint-disable */
/**
* @file 顶点文件
* @author wangqun
* @desc  顶点坐标系转换,适配webgl1
* @author yangmingming
*/
export default `
attribute vec4 position;
varying vec2 vCoord;
void main() {
vCoord.x = (position.x + 1.0) / 2.0;
vCoord.y = (position.y + 1.0) / 2.0;
......
/* eslint-disable */
/**
* @file 顶点文件,webgl 2.0
* @author wangqun
* @desc  顶点坐标系转换,适配webgl2
* @author yangmingming
*/
export default `#version 300 es
in vec4 position;
out vec2 vCoord;
void main() {
vCoord.x = (position.x + 1.0) / 2.0;
vCoord.y = (position.y + 1.0) / 2.0;
......
......@@ -3,7 +3,7 @@ import Utils from './utils';
import Tensor from './tensor';
/**
* @file op的数据对象
* @author wangqun, yangmingming
* @author yangmingming
*
*/
const keys = [
......@@ -57,13 +57,16 @@ const opBehavior = {
'needBatch',
'isApplySeparableConv'
],
conv2d_transpose: [
'needBatch'
],
batchnorm: [
'needBatch',
'mergeTensor'
],
elementwise_add: [
'broadcast',
'needBatch'
'needBatch',
'processAxis'
],
conv2d_elementwise_add: [
'mergeAttrs',
......@@ -91,18 +94,34 @@ const opBehavior = {
mul: [
'reshape',
'needBatch'
],
reshape: [
'needBatch',
'inferShape'
],
transpose: [
'needBatch',
'setPerm'
],
concat: [
'normalizeDim',
'needBatch'
],
split: [
'normalizeDim',
'needBatch'
],
softmax: [
]
'needBatch'
],
scale: [
'needBatch'
],
};
const mergeType = 'conv2d-elementwise_add';
export default class OpData {
constructor(name, input = {}, output = {}, attrs = {}) {
console.log('now in constructor');
console.dir(name);
console.dir(input);
console.dir(output);
this.realName = name;
this.name = name;
this.attrs = attrs;
......@@ -120,14 +139,46 @@ export default class OpData {
'multi_value': '1.0',
'bias_value': '0.0'
};
// tensor数据
this.tensor = {};
this.inputTensors = [];
this.outputTensors = [];
this.fShaderParams = [];
this.buildTensor();
this.buildAttrs();
this.buildShaderParams();
}
}
inferShape(){
if (this.name == 'reshape'){
let inputShape = this.input.X[0].shape;
let targetShape = this.attrs.new_shape;
for (let i = 0; i < targetShape.length; i++){
if (targetShape[i] == 0) {
targetShape[i] = inputShape[i];
}
}
let total_length = 1;
for (let j = 0;j < inputShape.length; j++){
total_length *= inputShape[j];
}
let minusPos = -1;
for (let i = 0; i < targetShape.length; i++){
if (targetShape[i] == -1) {
minusPos = i;
continue;
}
total_length /= targetShape[i];
}
if (minusPos != -1) {
targetShape[minusPos] = total_length;
}
this.output.Out[0].shape = targetShape;
}
}
buildTensor() {
// todo: 是否需要形状对齐
// todo: 是否需要广播tensor
const tensorData = [];
......@@ -150,8 +201,10 @@ export default class OpData {
// 默认取第一个数据
const data = this.output[key] || [{}];
if (tensorName[key.toLowerCase()]) {
data[0].tensorName = tensorName[key.toLowerCase()];
tensorData.push(data[0]);
data.forEach(item => {
item.tensorName = tensorName[key.toLowerCase()];
tensorData.push(item);
});
}
}
}
......@@ -160,20 +213,22 @@ export default class OpData {
behavior.forEach(behavior => {
this[behavior](tensorData);
});
// 生成tensor对象
tensorData.forEach(data => {
// console.log(data);
if (data) {
let tensor = null;
const tensorName = data.tensorName;
if (data.notTensor) {
this.tensor[data.tensorName] = {
name: data.tensorName,
tensor = {
name: tensorName,
data: new Float32Array(data.data),
total_shape: data.data.length
};
} else {
this.tensor[data.tensorName] = new Tensor({
tensor = new Tensor({
type: data.name,
name: data.tensorName,
name: tensorName,
shape: data.shape,
data: data.data,
needBatch: data.needBatch || false,
......@@ -181,24 +236,25 @@ export default class OpData {
isPacked: data.isPacked || false
});
}
if (tensorName === 'out') {
this.outputTensors.push(tensor);
}
else {
this.inputTensors.push(tensor);
}
}
});
// console.dir(['tensors', this.tensor]);
// console.log('now in buildTensor show this and tensorData');
// console.log(this);
// console.log(tensorData);
}
buildAttrs() {
buildShaderParams() {
// 计算属性
for (let key in this.attrs) {
if (this.attrs.hasOwnProperty(key)) {
const item = this.attrs[key];
if (Object.prototype.toString.call(item) === '[object Array]') {
if (keys.indexOf(key) > -1) {
this.data[key + '_x'] = item[0];
this.data[key + '_y'] = item[1];
}
if (Object.prototype.toString.call(item) === '[object Array]' && keys.indexOf(key) > -1) {
this.data[key + '_x'] = item[0];
this.data[key + '_y'] = item[1];
} else {
this.data[key] = item;
// 获取shader所需的数据
......@@ -209,19 +265,66 @@ export default class OpData {
}
}
}
// 获取tensor的数据
for (let key in this.tensor) {
const tensor = this.tensor[key];
// 遍历 获取input tensor的数据
this.inputTensors.forEach(inputTensor => {
tensorAttrs.forEach(attr => {
this.data[attr+ '_' + tensor.name] = tensor[attr];
this.data[attr+ '_' + inputTensor.name] = inputTensor[attr];
});
}
});
// 根据out tensor 个数 生成对应的 fShader 个数
this.outputTensors.forEach(outTensor => {
const params = JSON.parse(JSON.stringify(this.data));
// 获取output tensor的数据
tensorAttrs.forEach(attr => {
params[attr+ '_' + outTensor.name] = outTensor[attr];
});
this.fShaderParams.push(params);
});
}
needBatch(tensorData = []) {
tensorData.forEach(data => (data.needBatch = true));
}
setPerm(tensorData = []){
let arrayPerm = this.attrs['perm'];
let l = arrayPerm.length;
if (l == 3) {
if (arrayPerm == [2,0,1]) {
arrayPerm = [1,2,0];
}
else if (arrayPerm == [1,2,0]){
arrayPerm = [2,0,1];
}
}
else if (l == 4){
let temp = [0,0,0,0];
for (let i = 0; i < 4; i++){
temp[[arrayPerm[i]]] = i;
}
arrayPerm = temp;
}
this.data['perm_0'] = 0;
this.data['perm_1'] = 0;
this.data['perm_2'] = 0;
this.data['perm_3'] = 0;
if (l >= 1) {
this.data['perm_0'] = arrayPerm[0];
}
if (l >= 2) {
this.data['perm_1'] = arrayPerm[1];
}
if (l >= 3) {
this.data['perm_2'] = arrayPerm[2];
}
if (l >= 4) {
this.data['perm_3'] = arrayPerm[3];
}
this.data['perm_size'] = l;
}
isGlobalPooling(tensorData = []) {
let counter = tensorData.filter(tensor => (tensor.tensorName === 'origin'))[0] || {};
let length = counter.shape && counter.shape.length || 0;
......@@ -236,6 +339,7 @@ export default class OpData {
}, {});
}
isApplyWinoGrad(tensorData = []) {
const filter = tensorData.filter(item => {
const [b, c, h, w] = item.shape;
......@@ -292,36 +396,6 @@ export default class OpData {
});
}
broadcast(tensorData = []) {
tensorData.forEach(item => {
if (item.tensorName === 'counter') {
item.notTensor = true;
}
});
return;
// mobilenet model
// todo: 默认y的shape length是1, 以后需要实现通用版本
console.log('2. x and y is ');
console.log(x);
console.log(y);
let shape = Utils.getBroadcastShapeInPaddle(x.shape, y.shape, this.attrs['axis']);
// 填充shape数据
if (small.shape.length === 1) {
const result = [];
small.shape = shape;
let total = shape.reduce((all, num) => all * num);
for (let i = 0; i < small.shape[0]; i++) {
let item = small.data[i];
for (let j = 0; j < total / shape[0]; j++) {
result.push(item);
}
}
small.data = result;
}
}
isMax(tensorData = []) {
const type = this.attrs['pooling_type'] === 'max' ? 1 : 0;
this.attrs['pooling_type'] = type;
......@@ -355,6 +429,37 @@ console.log(y);
}
}
normalizeDim() {
const origin_shape = this.input.X[0].shape;
const axis = this.attrs.axis > -1 ? this.attrs.axis : origin_shape.length + this.attrs.axis;
const dim_value = [];
for (let index = 0; index < origin_shape[axis]; index++) {
dim_value[index] = index;
}
this.attrs.target_length = dim_value.length;
this.attrs.target_value = dim_value;
// 保存 输入 tensor 对应dim 的长度
this.attrs.inputs_dim = [origin_shape[axis]];
this.attrs.dim = 4 - origin_shape.length + axis;
}
processAxis() {
let shape_x = this.input.X[0].shape;
let shape_y = this.input.Y[0].shape;
let y_length = shape_y.length;
for (let i = shape_y.length - 1; i >=0 ;i--){
if (shape_y[i] == 1) {
y_length -= 1;
}
}
let axis_temp = this.attrs['axis'];
if (axis_temp == -1) {
this.attrs['axis'] = shape_x.length - y_length;
}
this.attrs['shape_length_origin'] = shape_x.length;
this.attrs['shape_length_counter'] = y_length;
}
reshape(tensorData = []) {
let input = tensorData[0];
let counter = tensorData[1];
......@@ -380,23 +485,23 @@ console.log(y);
result[tensor.tensorName + 'Index'] = index;
});
for (let i = 0; i < result[constants[0]].shape[0]; i++) {
data.push(result[constants[0]].data[i]);
data.push(result[constants[1]].data[i]);
data.push(result[constants[2]].data[i]);
data.push(result[constants[3]].data[i]);
}
// for (let i = 0; i < result[constants[0]].shape[0]; i++) {
// data.push(result[constants[0]].data[i]);
// data.push(result[constants[1]].data[i]);
// data.push(result[constants[2]].data[i]);
// data.push(result[constants[3]].data[i]);
// }
tensorData[result[constants[0] + 'Index']].data = data;
// tensorData[result[constants[0] + 'Index']].data = data;
for (let i = 0; i < constants.length; i++){
tensorData[result[constants[i] + 'Index']].data = result[constants[i]].data;
}
// 充分利用shader空间
tensorData[result[constants[0] + 'Index']].notCompressed = true;
tensorData[result[constants[0] + 'Index']].shape[0] *= 4;
tensorData.splice(result[constants[1] + 'Index'], 1, 0);
tensorData.splice(result[constants[2] + 'Index'], 1, 0);
tensorData.splice(result[constants[3] + 'Index'], 1, 0);
//tensorData[result[constants[0] + 'Index']].notCompressed = true;
// tensorData[result[constants[0] + 'Index']].shape[0] *= 4;
//tensorData.splice(result[constants[1] + 'Index'], 1, 0);
//tensorData.splice(result[constants[2] + 'Index'], 1, 0);
//tensorData.splice(result[constants[3] + 'Index'], 1, 0);
}
checkIsMerge() {
......
/* eslint-disable */
import Utils from './utils';
/**
* @file Tensor类
* @author wangqun, yangmingming
* @author yangmingming
*/
export default class Tensor {
constructor(opts = {}) {
......@@ -10,6 +11,8 @@ export default class Tensor {
this.isPacked = this.isPacked || false;
// 设置tensor名字
this.name = opts.name;
// 设置 tensorId
this.tensorId = opts.type;
// tensor的形状
let shape = this.shape = opts.shape;
// 原始数据个数
......@@ -32,7 +35,6 @@ export default class Tensor {
// tensor数据
let data;
if (opts.type === 'image' || opts.type === 'x') {
console.log('image', this.data);
this.data = opts.data;
}
else if (opts.data && opts.data.length) {
......@@ -167,3 +169,4 @@ export default class Tensor {
}
}
}
/* eslint-enable */
/**
* @file 工具类
* @author wangqun, yangmingming
* @author yangmingming
*/
/* eslint-disable */
export default {
// todo: 适用2维矩阵乘法,以后实现通用版本
getReshapeInPaddle(inputShape = [], counterShape = [], outShape = []) {
......@@ -108,21 +109,22 @@ export default {
* @return {{shape: *[], zeroNumber: number}} {Object} texture信息
*/
getTextureInfoFromTensorShape(shape = [], isPacked = false) {
let b = shape[0] || 1;
let c = shape[1] || 1;
let h = shape[2] || 1;
let w = shape[3] || 1;
let b = shape[0];
let c = shape[1];
let h = shape[2];
let w = shape[3];
let height = b * h;
let width = c * w;
let offsetX = 0;
let offsetY = 0;
// 安卓和ios的max texture size是4096, 改造存储空间(2bh, cw / 2)
let exceedMax = false;
if (height > 4096 || width > 4096) {
height *= 2;
width = c * (Math.ceil(w / 2));
exceedMax = true;
}
// FIXME:为了让mobilenet能正常执行,这里先注释掉,待群哥修复
// if (height > MAX_TEXTURE_SIZE || width > MAX_TEXTURE_SIZE) {
// height *= 2;
// width = c * (Math.ceil(w / 2));
// exceedMax = true;
// }
if (isPacked) {
// 紧凑布局
height = b * c * Math.ceil(h / 2);
......@@ -178,7 +180,109 @@ export default {
let l = b1 * (c * h * w) + c1 * (h * w) + h1 * (w) + w1;
data[offset] = renderData.data[l];
offset += 4;
// data.push(renderData.data[l]);
// data.push(0);
// data.push(0);
// data.push(0);
}
renderData.data = data;
},
/*
* 将shape扩充到4维,在shape前补1
*/
padToFourDimShape(shape) {
let fourDimShape = [];
if (shape.length == 4) {
fourDimShape = shape;
} else if (shape.length < 4) {
for (let i = 0; i < 4 - shape.length; i++) {
fourDimShape.push(1);
}
fourDimShape = fourDimShape.concat(shape);
}
return fourDimShape;
},
/*
* 将nhwc排布数据转为nchw排布数据
*/
nhwc2nchw(data, shape) {
let N = shape[0];
let H = shape[1];
let W = shape[2];
let C = shape[3];
let WXC = W * C;
let HXWXC = H * W * C;
let nchwData = [];
for (let n = 0; n < N; n++) {
for (let c = 0; c < C; c++) {
for (let h = 0; h < H; h++) {
for (let w = 0; w < W; w++) {
nchwData.push(data[n * HXWXC + h * WXC + w * C + c]);
}
}
}
}
return nchwData;
},
/*
* 将nchw排布数据转为nhwc排布数据
*/
nchw2nhwc(data, shape) {
let N = shape[0];
let C = shape[1];
let H = shape[2];
let W = shape[3];
let HXW = H * W;
let CXHXW = C * H * W;
let nhwcData = [];
for (let n = 0; n < N; n++) {
for (let h = 0; h < H; h++) {
for (let w = 0; w < W; w++) {
for (let c = 0; c < C; c++) {
nhwcData.push(data[n * CXHXW + c * HXW + h * W + w]);
}
}
}
}
return nhwcData;
},
/*
* 等距间隔打印数据
*/
stridePrint(data, count = 20) {
let realPrintCount = count;
if (data.length <= realPrintCount) {
this.continuousPrint(data, realPrintCount);
return;
}
let numbers = [];
let stride = Math.floor(data.length / realPrintCount);
if (stride == 0) {
stride = 1;
}
realPrintCount = Math.floor(data.length / stride)
for (let i = 0; i < realPrintCount; i++) {
numbers.push(i * stride + ": " + data[i * stride]);
}
console.log(numbers)
},
/*
* 连续打印数据
*/
continuousPrint(data, count = 100) {
let numbers = [];
let realPrintCount = count;
if (data.length <= realPrintCount) {
realPrintCount = data.length;
}
for (let i = 0; i < realPrintCount; i++) {
numbers.push(i + ": " + data[i]);
}
console.log(numbers)
}
};
/* eslint-enable */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册