提交 9c8c0eb1 编写于 作者: W wangqun

[change]支持自动识别分片数量加载模型&fc算子&卷积性能优化&转化工具升级

支持自动识别分片数量加载模型&fc算子&卷积性能优化&转化工具升级&文档修正
上级 469f97bf
......@@ -147,7 +147,7 @@ export default Vue.extend({
{name: 'preheat time', t: this.preheatT},
{name: 'subsequent average time', t: this.remainOthersT},
{name: 'best time', t: this.bestT},
{name: 'op count', t: this.opCount},
{name: 'op count', t: this.opCount}
]
}
},
......@@ -239,7 +239,7 @@ export default Vue.extend({
totaltimeList.push(t);
ops.push(this.getOpPerf(quertyResults, this.aggregate, {}));
ops.push(this.getOpPerf(quertyResults, this.aggregate, {}, true));
curTimes++;
}
......@@ -286,7 +286,7 @@ export default Vue.extend({
item.count++;
return now;
},
getOpPerf(queryList, reduceF, acc) {
getOpPerf(queryList, reduceF, acc, needNoMean) {
let timeRes = acc ? queryList.reduce(reduceF, acc) : queryList.reduce(reduceF);
for(let key of Object.keys(timeRes)) {
const item = timeRes[key];
......@@ -294,7 +294,7 @@ export default Vue.extend({
if (name === 'feed') {
return item;
}
item.time = +(time / count).toFixed(4);
item.time = needNoMean ? time : +(time / count).toFixed(4);
item.count = 1;
}
return timeRes;
......
......@@ -47,14 +47,11 @@ export default class Camera {
if (this.deviceInfos.length) {
constraints.video.deviceId = {exact: deviceId || this.deviceInfos[0].deviceId};
}
if (!constraints.video.deviceId) {
if (!(constraints.video.deviceId && constraints.video.deviceId.exact)) {
constraints = {
video: true
};
}
else if (this.constraints) {
constraints = this.constraints;
}
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
// 最新的标准API
......
......@@ -34,6 +34,9 @@ import elementwise_add_conf from '../../shader/elementwise_add/conf';
import mul_params from '../../shader/mul/params';
import mul_func from '../../shader/mul/main';
import mul_conf from '../../shader/mul/conf';
import fc_params from '../../shader/fc/params';
import fc_func from '../../shader/fc/main';
import fc_conf from '../../shader/fc/conf';
import softmax_params from '../../shader/softmax/params';
import softmax_func from '../../shader/softmax/main';
import softmax_conf from '../../shader/softmax/conf';
......@@ -157,6 +160,11 @@ export default {
func: mul_func,
confs: mul_conf
},
fc: {
params: fc_params,
func: fc_func,
confs: fc_conf
},
concat: {
params: concat_params,
func: concat_func,
......
......@@ -175,14 +175,17 @@ export default class imageFeed {
let sh = height;
// 最小边缩放到scale
if (width < height) {
sw = params.scale;
sw = params.scale || width;
sh = Math.round(sw * height / width);
} else {
sh = params.scale;
}
else if (width > height){
sh = params.scale || height;
sw = Math.round(sh * width / height);
}
sw = params.scale;
sh = params.scale;
else {
sw = sh = params.scale || width;
}
this.fromPixels2DContext.canvas.width = sw;
this.fromPixels2DContext.canvas.height = sh;
this.fromPixels2DContext.drawImage(
......@@ -327,13 +330,13 @@ export default class imageFeed {
data = this.resizeAndFitTargetSize(pixels, opt);
data2 = this.fromPixels2DContext2.getImageData(0, 0, this.pixelWidth, this.pixelHeight);
}
else if (opt.scale) { // 直接resize到targetShape Humanseg的情况
scaleSize = this.reSize(pixels, opt);
else if (opt.targetSize) { // 如果有targetSize,就是装在目标宽高里的模式 TinyYolo的情况
scaleSize = this.fitToTargetSize(pixels, opt);
data = this.getImageData(opt, 0, 0, scaleSize);
data2 = this.fromPixels2DContext2.getImageData(0, 0, this.pixelWidth, this.pixelHeight);
}
else if (opt.targetSize) { // 如果有targetSize,就是装在目标宽高里的模式 TinyYolo的情况
scaleSize = this.fitToTargetSize(pixels, opt);
else {
scaleSize = this.reSize(pixels, opt);
data = this.getImageData(opt, 0, 0, scaleSize);
data2 = this.fromPixels2DContext2.getImageData(0, 0, this.pixelWidth, this.pixelHeight);
}
......
# PaddleJS Model 加载器
# PaddleJS 模型加载器
百度 PaddleJS 的使用这个加载器进行模型获取到浏览器。模型加载器可以加载浏览器友好的json文件类型和二进制文件类型,支持单文件加载和文件分片加载,极大的利用浏览器并行请求的特性加载推理模型。
......
......@@ -12,6 +12,7 @@ export default class Loader {
this.options = options;
this.multipart = false;
this.test = false;
this.chunkNum = 0;
// fetch xhr jsonp
this.params = {type: 'fetch'};
// 设置分片加载model
......@@ -62,7 +63,7 @@ export default class Loader {
}
fetchChunks() {
let counts = this.binaryOption.fileCount;
let counts = this.chunkNum || this.binaryOption.fileCount;
let chunkArray = [];
for (let i = 1; i <= counts; i++) {
chunkArray.push(
......@@ -206,6 +207,7 @@ export default class Loader {
async load() {
let that = this;
const artifacts = this.data = await this.fetchModel();
this.chunkNum = artifacts.chunkNum;
if (this.multipart === true) {
if (this.dataType === 'binary') {
await this.fetchChunks()
......
......@@ -69,7 +69,10 @@ export default {
'MULTI_VALUE',
'BIAS_VALUE',
'FUSE_RELU',
'ACTIVE_FUNCTION'
'ACTIVE_FUNCTION',
'FILTER_REMAINDER_VEC4',
'FILTER_NEAREST_VEC4'
],
input: [
// {
......
......@@ -34,10 +34,50 @@ export default `
continue;
}
// channel计算
for (int j = 0; j < channel_filter; j++) {
float f = getValueFromTensorPosLIMIT_FILTER_filter(c, j, fy, fx);
float o = getValueFromTensorPosLIMIT_ORIGIN_origin(b, oTensorChannel + j, oy, ox);
res += f * o;
for (int j = 0; j < filter_nearest_vec4; j += 4) {
vec4 fValues = vec4(
getValueFromTensorPosLIMIT_FILTER_filter(c, j, fy, fx),
getValueFromTensorPosLIMIT_FILTER_filter(c, j + 1, fy, fx),
getValueFromTensorPosLIMIT_FILTER_filter(c, j + 2, fy, fx),
getValueFromTensorPosLIMIT_FILTER_filter(c, j + 3, fy, fx)
);
vec4 oValues = vec4(
getValueFromTensorPosLIMIT_ORIGIN_origin(b, oTensorChannel + j, oy, ox),
getValueFromTensorPosLIMIT_ORIGIN_origin(b, oTensorChannel + j + 1, oy, ox),
getValueFromTensorPosLIMIT_ORIGIN_origin(b, oTensorChannel + j + 2, oy, ox),
getValueFromTensorPosLIMIT_ORIGIN_origin(b, oTensorChannel + j + 3, oy, ox)
);
res += dot(fValues, oValues);
}
if (filter_remainder_vec4 == 1) {
res += dot(
getValueFromTensorPosLIMIT_FILTER_filter(c, filter_nearest_vec4, fy, fx),
getValueFromTensorPosLIMIT_ORIGIN_origin(b, oTensorChannel + filter_nearest_vec4, oy, ox));
} else if (filter_remainder_vec4 == 2) {
vec2 fValues = vec2(
getValueFromTensorPosLIMIT_FILTER_filter(c, filter_nearest_vec4, fy, fx),
getValueFromTensorPosLIMIT_FILTER_filter(c, filter_nearest_vec4 + 1, fy, fx)
);
vec2 oValues = vec2(
getValueFromTensorPosLIMIT_ORIGIN_origin(b, oTensorChannel + filter_nearest_vec4, oy, ox),
getValueFromTensorPosLIMIT_ORIGIN_origin(b, oTensorChannel + filter_nearest_vec4 + 1, oy, ox)
);
res += dot(fValues, oValues);
} else if (filter_remainder_vec4 == 3) {
vec3 fValues = vec3(
getValueFromTensorPosLIMIT_FILTER_filter(c, filter_nearest_vec4, fy, fx),
getValueFromTensorPosLIMIT_FILTER_filter(c, filter_nearest_vec4 + 1, fy, fx),
getValueFromTensorPosLIMIT_FILTER_filter(c, filter_nearest_vec4 + 2, fy, fx)
);
vec3 oValues = vec3(
getValueFromTensorPosLIMIT_ORIGIN_origin(b, oTensorChannel + filter_nearest_vec4, oy, ox),
getValueFromTensorPosLIMIT_ORIGIN_origin(b, oTensorChannel + filter_nearest_vec4 + 1, oy, ox),
getValueFromTensorPosLIMIT_ORIGIN_origin(b, oTensorChannel + filter_nearest_vec4 + 2, oy, ox)
);
res += dot(fValues, oValues);
}
ox += dilation_h;
}
......
......@@ -54,4 +54,8 @@ export default `
// bias
uniform sampler2D texture_bias;
// 合并 channel 计算
const int filter_nearest_vec4 = FILTER_NEAREST_VEC4;
const int filter_remainder_vec4 = FILTER_REMAINDER_VEC4;
`;
......@@ -10,23 +10,22 @@ export default `
int x = oPos.a;
int c = oPos.g;
int y = oPos.b;
int b = oPos.r;
int b = oPos.r;
float res = 0.0;
int temp_x = 0;
int temp_y = 0;
float o = 0.0;
float f = 0.0;
if (int(mod(float(x), 2.0)) == 1) x = x - 2;
if (int(mod(float(y), 2.0)) == 1) y = y - 2;
// 获取output的坐标
int oTensorChannel = int(c * groups / channel_out) * channel_origin;
int oy = y;
int oy = y - padTop;
for (int fy = 0; fy < height_shape_filter; fy++) {
if (oy < 0) {
oy += dilation_v;
continue;
}
int ox = x;
int ox = x - padLeft;
for (int fx = 0; fx < width_shape_filter; fx++) {
if (ox < 0) {
......@@ -40,7 +39,7 @@ export default `
temp_y = int(floor(float(oy) / float(stride_v)));
if (temp_x < width_shape_origin && temp_y < height_shape_origin){
o = getValueFromTensorPosLIMIT_ORIGIN_origin(b, j, temp_y, temp_x);
f = getValueFromTensorPosLIMIT_FILTER_filter(j, c, fy, fx);
f = getValueFromTensorPosLIMIT_FILTER_filter(j, c, height_shape_filter-1-fy, width_shape_filter-1-fx);
res += f * o;
}
}
......
......@@ -28,10 +28,9 @@ export default `
const int stride_h = int(STRIDES_X);
const int stride_v = int(STRIDES_Y);
// padding的数目
//const int padLeft = width_shape_filter - PADDINGS_X - 1;
//const int padTop = height_shape_filter - PADDINGS_Y - 1;
const int padLeft = PADDINGS_X;
const int padTop = PADDINGS_Y;
const int padLeft = WIDTH_SHAPE_FILTER - PADDINGS_X - 1;
const int padTop = HEIGHT_SHAPE_FILTER - PADDINGS_Y - 1;
// dilation膨胀系数
const int dilation_h = DILATIONS_X;
const int dilation_v = DILATIONS_Y;
......
/* eslint-disable */
/**
* @file fc的配置文件
* @author zhangjingyuan02
*/
export default {
dep: [
{
func: 'getValueFromTensorPos',
conf: {
TENSOR_NAME: 'weight'
}
},
{
func: 'getValueFromTensorPos',
conf: {
TENSOR_NAME: 'origin'
}
},
{
func: 'getValueFromTensorPos',
conf: {
TENSOR_NAME: 'bias'
}
}
],
conf: [],
input: [
{
tensor: 'weight',
variable: 'texture',
setter: 'initTexture',
type: 'texture'
},
{
tensor: 'origin',
variable: 'texture',
setter: 'initTexture',
type: 'texture'
},
{
tensor: 'bias',
variable: 'texture',
setter: 'initTexture',
type: 'texture'
}
]
};
/* eslint-disable */
/**
* @file 主函数
* @author zhangjingyuan02
*/
export default `
// start函数
void main(void) {
float res = 0.0;
ivec4 out_pos = getOutputTensorPosLIMIT_OUT();
float bias = getValueFromTensorPosLIMIT_BIAS_bias(out_pos.r, out_pos.g, out_pos.b, out_pos.a);
for (int j = 0; j < width_shape_origin; j++) {
float w = getValueFromTensorPosLIMIT_WEIGHT_weight(out_pos[0], out_pos[1], j, out_pos[3]);
float o = getValueFromTensorPosLIMIT_ORIGIN_origin(out_pos[0], out_pos[1], out_pos[2], j);
res += w * o;
}
res = res + bias;
setOutput(res);
}
`;
/* eslint-disable */
/**
* @file fc参数文件
*/
export default `
// mul的input数据
// 常量
// 输入数据
// weight
const int length_shape_weight = LENGTH_SHAPE_WEIGHT;
const int width_shape_weight = WIDTH_SHAPE_WEIGHT;
const int height_shape_weight = HEIGHT_SHAPE_WEIGHT;
const int width_texture_weight = WIDTH_TEXTURE_WEIGHT;
const int height_texture_weight = HEIGHT_TEXTURE_WEIGHT;
const int channel_weight = CHANNEL_WEIGHT;
//input
const int width_shape_origin = WIDTH_SHAPE_ORIGIN;
const int height_shape_origin = HEIGHT_SHAPE_ORIGIN;
const int length_shape_origin = LENGTH_SHAPE_ORIGIN;
const int width_texture_origin = WIDTH_TEXTURE_ORIGIN;
const int height_texture_origin = HEIGHT_TEXTURE_ORIGIN;
const int channel_origin = CHANNEL_ORIGIN;
// bias
const int width_shape_bias = WIDTH_SHAPE_BIAS;
const int height_shape_bias = HEIGHT_SHAPE_BIAS;
const int length_shape_bias = LENGTH_SHAPE_BIAS;
const int width_texture_bias = WIDTH_TEXTURE_BIAS;
const int height_texture_bias = HEIGHT_TEXTURE_BIAS;
const int channel_bias = CHANNEL_BIAS;
// uniform变量
// 输入数据
uniform sampler2D texture_weight;
uniform sampler2D texture_origin;
uniform sampler2D texture_bias;
`;
......@@ -51,14 +51,16 @@ const tensorName = {
'scale': 'scale',
'bias': 'bias',
'mean': 'mean',
'variance': 'variance'
'variance': 'variance',
'w': 'weight'
};
// unique behavior
const opBehavior = {
conv2d: [
'needBatch',
'adaptPaddings',
'isApplySeparableConv'
'isApplySeparableConv',
'batchComputeConv2d'
],
conv2d_transpose: [
'needBatch',
......@@ -130,6 +132,10 @@ const opBehavior = {
scale: [
'needBatch'
],
fc: [
'flattenShape',
'needBatch'
]
};
const mergeType = 'conv2d-elementwise_add';
......@@ -419,6 +425,13 @@ export default class OpData {
});
}
batchComputeConv2d() {
let origin_shape_temp = this.input.Filter[0].shape;
let inChannels = origin_shape_temp[1];
this.attrs.filter_nearest_vec4 = Math.floor(inChannels / 4) * 4;
this.attrs.filter_remainder_vec4 = inChannels % 4;
}
setPacked(tensorData = []) {
const isPacked = this.attrs.ispacked;
tensorData.forEach(item => {
......@@ -539,15 +552,26 @@ export default class OpData {
}
}
flattenShape(tensorData = []) {
const target = tensorData.find(item => item.shape.length > 2);
if (target) {
const padShape = Utils.padToFourDimShape(target.shape);
target.shape = [padShape[0] * padShape[2], padShape[1] * padShape[3]];
}
}
reshape(tensorData = []) {
let input = tensorData[0];
let counter = tensorData[1];
const input = tensorData.find(item => item.tensorName === 'origin');
const counter = tensorData.find(item => item.tensorName === 'counter');
const out = tensorData.find(item => item.tensorName === 'out' || item.tensorName === 'output');
if (counter.shape.length > input.shape.length) {
input = tensorData[1];
counter = tensorData[0];
input = counter;
counter = input;
}
if (input.shape.length > 2 && counter.shape.length === 2) {
let shape = Utils.getReshapeInPaddle(input.shape, counter.shape, tensorData[2].shape);
let shape = Utils.getReshapeInPaddle(input.shape, counter.shape, out.shape);
input.shape = shape;
}
......
......@@ -153,6 +153,13 @@ export default {
height *= 4;
width = c * (Math.ceil(w / 4));
exceedMax = true;
if (height > GPU_TEXTURE_MAX_SIZE || width > GPU_TEXTURE_MAX_SIZE) {
const requested = `[${width}x${height}]`;
const max = `[${GPU_TEXTURE_MAX_SIZE}x${GPU_TEXTURE_MAX_SIZE}]`;
throw new Error(
'Requested texture size ' + requested +
' greater than WebGL maximum on this browser / GPU ' + max + '.');
}
}
if (isPacked) {
// 紧凑布局
......
{
"ops": [
{
"attrs": {
"__@kernel_type_attr@__": "fc/def/4/1/1",
"force_fp32_output": false,
"in_num_col_dims": 1,
"op_device": "",
"scale_out": 1.0,
"scale_x": 1.0,
"scale_y": [
1.0
],
"use_mkldnn": false,
"x_num_col_dims": 1,
"y_num_col_dims": 1
},
"inputs": {
"Bias": [
"fc10_offset"
],
"Input": [
"pool2d_0.tmp_0"
],
"W": [
"fc10_weights"
]
},
"outputs": {
"Out": [
"fc_0.tmp_1"
]
},
"type": "fc"
}
],
"vars": [
{
"data": [
1, 2, 3,
4, 5, 6,
7, 8, 9,
10, 11, 12
],
"name": "fc10_weights",
"persistable": 0,
"shape": [4, 3]
},
{
"data": [2, 3, 4, 5],
"name": "pool2d_0.tmp_0",
"persistable": 0,
"shape": [4, 1, 1]
},
{
"data": [1, 3, -1],
"name": "fc10_offset",
"persistable": 0,
"shape": [1, 3]
},
{
"data": [93, 109, 119],
"name": "fc_0.tmp_1",
"shape": [1, 3]
}
]
}
import Graph from '../../src/graph/graph';
import GraphExecutor from '../../src/executor/executor';
import opInfo from '../../test/data/model.test.fc.json';
import Utils from '../../src/utils/utils';
import {webgl} from './common';
import {nchwShape2nhwcShape, getOutputShape, deepCopy} from './common/utils';
const modelType= 'fc';
const output = deepCopy(opInfo);
const expected = output.vars.find(item => item.name === 'fc_0.tmp_1').data;
const op = opInfo.ops[0];
const graphExecutor = new GraphExecutor(op);
const graph = new Graph({
options: {
test: true,
gl: webgl
}
});
graph.data = opInfo;
graph.buildOpData(graphExecutor);
async function run() {
graph.execute_(graphExecutor);
let result = await graph.inst.read();
// 获取 NHWC -> NCHW 的 输出
const outputNCHWShape = getOutputShape(output, modelType);
const outputNHWCShape = nchwShape2nhwcShape(outputNCHWShape);
let nchwResult = Utils.nhwc2nchw(result, outputNHWCShape);
const formatData = Utils.formatReadData(nchwResult, outputNCHWShape);
console.log(formatData);
expect(JSON.stringify(formatData)).toBe(JSON.stringify(expected));
}
test('test op fc ==============>', async () => {
await run();
});
......@@ -22,7 +22,7 @@ const unitPath = {
'split': 'model.test.split.json'
};
// 制定运行的 op
const modelType = 'conv2d';
const modelType = 'conv2d_transpose';
// 制定运行的 op
const unitData = unitPath[modelType];
......@@ -51,6 +51,8 @@ async function run() {
const type = op.type;
if (type !== 'feed' && type !== 'fetch') {
console.log(op.type);
console.log("this is standard output:");
console.log(op.outputs.Output);
model.graph.buildOpData(op);
}
});
......
# PaddleJS Model Converter
Paddlejs model converter is a model transformation tool suitable for paddlejs. Its function is to convert paddlepadle model (or fluid model) into paddlejs model. This browser friendly format is used for loading prediction in paddlejs and browser. In addition, paddlejs model converter also provides powerful model optimization capabilities to help developers optimize model structure and improve runtime performance.
## 1. Tutorial
### 1.1. Environment Construction
#### Python Version
Confirm whether the python environment and version of the running platform meet the requirements. If Python 3 is used, you may need to change the `python3` in subsequent commands to `python3`:
- Python3: 3.5.1+ / 3.6 / 3.7
- Python2: 2.7.15+
#### Install Virtual Environment
*Since the development environment may have multiple versions of Python installed, there may be different versions of dependent packages. In order to avoid conflicts, it is strongly recommended to use Python virtual environment to execute the commands required by the conversion tool to avoid various problems. If you are not using a virtual environment or if you have a virtual environment installed, you can skip this step.*
Take Anaconda as an example:
Go to [Anaconda](https://www.anaconda.com/) main page,Select the corresponding platform and python version of anaconda and install it according to the official prompts;
After installation, execute the following command on the command line to create a python virtual environment:
``` bash
conda create --name <your_env_name>
```
Execute the following command to switch to the virtual environment
``` bash
# Linux or macOS
source activate <your_env_name>
# Windows
activate <your_env_name>
```
#### Installation Dependency
- 如果`不需要`使用优化模型的能力,执行命令:
``` bash
python -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple
```
- 如果`需要`使用优化模型的能力,执行命令:
``` bash
python -m pip install paddlepaddle paddlelite==2.6.0 -i https://mirror.baidu.com/pypi/simple
```
### 1.2. Get Start
- 如果待转换的 fluid 模型为`合并参数文件`,即一个模型对应一个参数文件:
``` bash
python convertToPaddleJSModel.py --modelPath=<fluid_model_file_path> --paramPath=<fluid_param_file_path> --outputDir=<paddlejs_model_directory>
```
- 如果待转换的 fluid 模型为`分片参数文件`,即一个模型文件对应多个参数文件:
``` bash
# 注意,使用这种方式调用转换器,需要保证 inputDir 中,模型文件名为'__model__'
python convertToPaddleJSModel.py --inputDir=<fluid_model_directory> --outputDir=<paddlejs_model_directory>
````
模型转换器将生成以下两种类型的文件以供 PaddleJS 使用:
- model.json (模型结构与参数清单)
- chunk_\*.dat (二进制参数文件集合)
## 2. Detailed Documentation
参数 | 描述
:-: | :-:
--inputDir | fluid 模型所在目录,当且仅当使用分片参数文件时使用该参数,将忽略 `modelPath` 和 `paramPath` 参数,且模型文件名必须为`__model__`
--modelPath | fluid 模型文件所在路径,使用合并参数文件时使用该参数
--paramPath | fluid 参数文件所在路径,使用合并参数文件时使用该参数
--outputDir | `必要参数`, paddleJS 模型输出路径
--optimize | 是否进行模型优化, `0` 为关闭优化,`1` 为开启优化(需安装 PaddleLite ),默认关闭优化
--logModelInfo | 是否打印模型结构信息, `0` 为不打印, `1` 为打印,默认不打印
--sliceDataSize | 分片输出 PaddleJS 参数文件时,每片文件的大小,单位:KB,默认 4096
## 3. Other information
若需要转换的模型为 `TensorFlow/Caffe/ONNX` 格式,可使用 PaddlePaddle 项目下的 `X2Paddle`工具,将其他格式的模型转为 fluid 模型后,再使用本工具转化为 PaddleJS 模型。
详细请参考 [X2Paddle 项目](https://github.com/PaddlePaddle/X2Paddle)
......@@ -30,7 +30,7 @@ sliceDataSize = 4 * 1024
# paddlepaddle运行程序实例
program = None
# 存放模型结构
modelInfo = {"vars": [], "ops": []}
modelInfo = {"vars": [], "ops": [], "chunkNum": 0}
# 存放参数数值(未排序)
paramValuesDict = {}
......@@ -210,6 +210,15 @@ def organizeModelOpInfo():
index += 1
print("Organizing model operators info successfully.")
def addChunkNumToJson(paramValueList):
totalParamValuesCount = len(paramValueList)
countPerSlice = int(sliceDataSize * 1024 / 4)
count = totalParamValuesCount / countPerSlice
modelInfo["chunkNum"] = math.ceil(count)
print("Model chunkNum set successfully.")
def convertToPaddleJSModel():
""" 转换fluid modle为paddleJS model """
# 初始化fluid运行环境和配置
......@@ -224,12 +233,14 @@ def convertToPaddleJSModel():
# 获取program中所有的var,按照字母顺序加入到model info,同时读取参数数值
organizeModelVariableInfo()
# 对参数数值dict,按照key(参数名)进行字母顺序排序,并组合到一起
paramValues = reorderParamsValue()
# model.json 设置分片参数
addChunkNumToJson(paramValues)
# 导出模型文件到json
dumpModelToJsonFile()
# 对参数数值dict,按照key(参数名)进行字母顺序排序,并组合到一起
paramValues = reorderParamsValue()
# 导出分片参数文件
sliceDataToBinaryFile(paramValues)
......
......@@ -82,7 +82,7 @@ if __name__ == "__main__":
print("enableLogModelInfo: " + str(enableLogModelInfo))
print("sliceDataSize:" + str(sliceDataSize))
pythonCmd = "python"
pythonCmd = "python3"
print("Starting...")
if enableOptimization:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册