From 788a53efbce52b107b98d096fbef656c1c39ccc1 Mon Sep 17 00:00:00 2001 From: jiangjiajun Date: Mon, 15 Jul 2019 17:35:21 +0800 Subject: [PATCH] remove old codes --- AUTHORS.md | 7 - README.md | 24 - caffe2fluid/.gitignore | 1 - caffe2fluid/README.md | 103 - caffe2fluid/README_en.md | 118 - caffe2fluid/convert.py | 85 - caffe2fluid/doc/Accuracy.md | 39 - caffe2fluid/doc/ArgMax.md | 31 - caffe2fluid/doc/BatchNorm.md | 44 - caffe2fluid/doc/Convolution.md | 76 - caffe2fluid/doc/Crop.md | 65 - caffe2fluid/doc/Deconvolution.md | 76 - caffe2fluid/doc/Dropout.md | 41 - caffe2fluid/doc/Eltwise.md | 66 - caffe2fluid/doc/EuclideanLoss.md | 47 - caffe2fluid/doc/Exp.md | 40 - caffe2fluid/doc/Flatten.md | 39 - caffe2fluid/doc/InnerProduct.md | 71 - caffe2fluid/doc/Input.md | 72 - caffe2fluid/doc/LRN.md | 44 - caffe2fluid/doc/Log.md | 40 - caffe2fluid/doc/Pooling.md | 89 - caffe2fluid/doc/Power.md | 34 - caffe2fluid/doc/ReadMe.md | 39 - caffe2fluid/doc/Reduction.md | 72 - caffe2fluid/doc/Reshape.md | 93 - caffe2fluid/doc/SigmoidCrossEntropyLoss.md | 37 - caffe2fluid/doc/Slice.md | 67 - caffe2fluid/doc/Sofmax.md | 30 - caffe2fluid/doc/SofmaxWithLoss.md | 89 - caffe2fluid/doc/Tile.md | 31 - caffe2fluid/examples/imagenet/README.md | 41 - caffe2fluid/examples/imagenet/compare.py | 103 - caffe2fluid/examples/imagenet/data/65.jpeg | Bin 109527 -> 0 bytes caffe2fluid/examples/imagenet/infer.py | 328 - caffe2fluid/examples/imagenet/tools/cmp.sh | 24 - .../examples/imagenet/tools/cmp_layers.sh | 48 - caffe2fluid/examples/imagenet/tools/diff.sh | 86 - caffe2fluid/examples/imagenet/tools/run.sh | 86 - caffe2fluid/examples/imagenet/tools/test.sh | 12 - caffe2fluid/examples/mnist/README.md | 10 - caffe2fluid/examples/mnist/evaluate.py | 83 - caffe2fluid/examples/mnist/run.sh | 75 - caffe2fluid/kaffe/__init__.py | 5 - caffe2fluid/kaffe/caffe/__init__.py | 1 - caffe2fluid/kaffe/caffe/resolver.py | 71 - caffe2fluid/kaffe/custom_layers/__init__.py | 115 - caffe2fluid/kaffe/custom_layers/argmax.py | 73 - caffe2fluid/kaffe/custom_layers/axpy.py | 51 - caffe2fluid/kaffe/custom_layers/crop.py | 77 - .../kaffe/custom_layers/detection_out.py | 79 - caffe2fluid/kaffe/custom_layers/flatten.py | 67 - caffe2fluid/kaffe/custom_layers/normalize.py | 56 - caffe2fluid/kaffe/custom_layers/permute.py | 40 - caffe2fluid/kaffe/custom_layers/power.py | 40 - caffe2fluid/kaffe/custom_layers/priorbox.py | 103 - caffe2fluid/kaffe/custom_layers/reduction.py | 67 - caffe2fluid/kaffe/custom_layers/register.py | 37 - caffe2fluid/kaffe/custom_layers/reshape.py | 94 - caffe2fluid/kaffe/custom_layers/roipooling.py | 53 - caffe2fluid/kaffe/custom_layers/select.py | 67 - caffe2fluid/kaffe/errors.py | 34 - caffe2fluid/kaffe/graph.py | 371 -- caffe2fluid/kaffe/layers.py | 250 - caffe2fluid/kaffe/net_template.py | 160 - caffe2fluid/kaffe/paddle/__init__.py | 2 - caffe2fluid/kaffe/paddle/network.py | 587 -- caffe2fluid/kaffe/paddle/transformer.py | 398 -- caffe2fluid/kaffe/protobuf_to_dict.py | 185 - caffe2fluid/kaffe/shapes.py | 163 - caffe2fluid/kaffe/transformers.py | 414 -- caffe2fluid/prepare.md | 43 - caffe2fluid/prepare_en.md | 35 - caffe2fluid/proto/caffe.proto | 1411 ---- caffe2fluid/proto/caffe_pb2.py | 5883 ----------------- caffe2fluid/proto/compile.sh | 24 - onnx2fluid/.gitignore | 60 - onnx2fluid/README.md | 87 - onnx2fluid/README_en.md | 85 - onnx2fluid/examples/convert_data_npz.py | 48 - onnx2fluid/examples/convert_data_pb.py | 64 - onnx2fluid/examples/gen_some_samples.py | 297 - onnx2fluid/examples/gen_unet.py | 137 - onnx2fluid/examples/gen_yolov2.py | 270 - onnx2fluid/examples/onnx_model_zoo.sh | 634 -- onnx2fluid/onnx2fluid/__init__.py | 0 onnx2fluid/onnx2fluid/__main__.py | 114 - onnx2fluid/onnx2fluid/cmdline.py | 136 - onnx2fluid/onnx2fluid/conversion.py | 347 - onnx2fluid/onnx2fluid/framework_pb2.py | 1726 ----- onnx2fluid/onnx2fluid/onnx_utils.py | 712 -- onnx2fluid/onnx2fluid/symbolic.py | 2615 -------- onnx2fluid/onnx2fluid/torch_export_helper.py | 185 - onnx2fluid/onnx2fluid/validation.py | 278 - onnx2fluid/onnx2fluid/writer.py | 503 -- onnx2fluid/requirements.txt | 3 - onnx2fluid/setup.cfg | 75 - onnx2fluid/setup.py | 17 - tensorflow2fluid/README.md | 136 - tensorflow2fluid/doc/ReadMe.md | 148 - tensorflow2fluid/doc/compare_op.md | 11 - tensorflow2fluid/doc/tf.case.md | 55 - .../doc/tf.clip_by_global_norm.md | 49 - tensorflow2fluid/doc/tf.clip_by_norm.md | 27 - .../doc/tf.contrib.layers.flatten.md | 37 - tensorflow2fluid/doc/tf.expand_dims.md | 41 - .../doc/tf.image.non_max_suppression.md | 56 - .../doc/tf.image.resize_images.md | 40 - tensorflow2fluid/doc/tf.layers.conv2d.md | 87 - tensorflow2fluid/doc/tf.layers.dense.md | 65 - .../doc/tf.losses.mean_and_squared_error.md | 28 - .../doc/tf.losses.sigmoid_cross_entropy.md | 56 - tensorflow2fluid/doc/tf.math.is_finite.md | 33 - tensorflow2fluid/doc/tf.math.rsqrt.md | 26 - tensorflow2fluid/doc/tf.matmul.md | 62 - tensorflow2fluid/doc/tf.nn.avg_pool.md | 59 - .../doc/tf.nn.bidirectional_dynamic_rnn.md | 74 - tensorflow2fluid/doc/tf.nn.conv2d.md | 51 - .../doc/tf.nn.conv2d_transpose.md | 95 - .../doc/tf.nn.conv3d_transpose.md | 95 - .../doc/tf.nn.depthwise_conv2d.md | 87 - tensorflow2fluid/doc/tf.nn.dropout.md | 49 - tensorflow2fluid/doc/tf.nn.dynamic_rnn.md | 78 - tensorflow2fluid/doc/tf.nn.l2_normalize.md | 40 - tensorflow2fluid/doc/tf.nn.lrn.md | 40 - tensorflow2fluid/doc/tf.nn.max_pool.md | 54 - .../doc/tf.nn.reduce_logsumexp.md | 29 - tensorflow2fluid/doc/tf.nn.rnn.GRUCell.md | 83 - .../doc/tf.nn.rnn_cell.LSTMCell.md | 88 - .../doc/tf.nn.rnn_cell.MultiRNNCell.md | 59 - .../doc/tf.nn.separable_conv2d.md | 33 - ...tf.nn.softmax_cross_entropy_with_logits.md | 49 - tensorflow2fluid/doc/tf.nn.top_k.md | 34 - tensorflow2fluid/doc/tf.one_hot.md | 40 - tensorflow2fluid/doc/tf.pad.md | 36 - tensorflow2fluid/doc/tf.placeholder.md | 38 - tensorflow2fluid/doc/tf.pow.md | 36 - tensorflow2fluid/doc/tf.print.md | 50 - tensorflow2fluid/doc/tf.reshape.md | 39 - tensorflow2fluid/doc/tf.reverse_sequence.md | 47 - tensorflow2fluid/doc/tf.scatter_update.md | 47 - tensorflow2fluid/doc/tf.slice.md | 42 - tensorflow2fluid/doc/tf.split.md | 42 - tensorflow2fluid/doc/tf.squared_difference.md | 27 - tensorflow2fluid/doc/tf.stop_gradient.md | 17 - tensorflow2fluid/doc/tf.while_loop.md | 56 - tensorflow2fluid/tf2fluid/__init__.py | 0 tensorflow2fluid/tf2fluid/convert.py | 148 - tensorflow2fluid/tf2fluid/framework_pb2.py | 1165 ---- tensorflow2fluid/tf2fluid/graph.py | 117 - tensorflow2fluid/tf2fluid/model_loader.py | 52 - tensorflow2fluid/tf2fluid/paddle_emitter.py | 1080 --- tensorflow2fluid/tf2fluid/tensorflow_graph.py | 162 - .../tf2fluid/tensorflow_parser.py | 274 - tensorflow2fluid/tf2fluid/utils.py | 115 - tensorflow2fluid/vgg_translate_tutorial.ipynb | 396 -- 156 files changed, 28135 deletions(-) delete mode 100644 AUTHORS.md delete mode 100644 caffe2fluid/.gitignore delete mode 100644 caffe2fluid/README.md delete mode 100644 caffe2fluid/README_en.md delete mode 100755 caffe2fluid/convert.py delete mode 100644 caffe2fluid/doc/Accuracy.md delete mode 100644 caffe2fluid/doc/ArgMax.md delete mode 100644 caffe2fluid/doc/BatchNorm.md delete mode 100644 caffe2fluid/doc/Convolution.md delete mode 100644 caffe2fluid/doc/Crop.md delete mode 100644 caffe2fluid/doc/Deconvolution.md delete mode 100644 caffe2fluid/doc/Dropout.md delete mode 100644 caffe2fluid/doc/Eltwise.md delete mode 100644 caffe2fluid/doc/EuclideanLoss.md delete mode 100644 caffe2fluid/doc/Exp.md delete mode 100644 caffe2fluid/doc/Flatten.md delete mode 100644 caffe2fluid/doc/InnerProduct.md delete mode 100644 caffe2fluid/doc/Input.md delete mode 100644 caffe2fluid/doc/LRN.md delete mode 100644 caffe2fluid/doc/Log.md delete mode 100644 caffe2fluid/doc/Pooling.md delete mode 100644 caffe2fluid/doc/Power.md delete mode 100644 caffe2fluid/doc/ReadMe.md delete mode 100644 caffe2fluid/doc/Reduction.md delete mode 100644 caffe2fluid/doc/Reshape.md delete mode 100644 caffe2fluid/doc/SigmoidCrossEntropyLoss.md delete mode 100644 caffe2fluid/doc/Slice.md delete mode 100644 caffe2fluid/doc/Sofmax.md delete mode 100644 caffe2fluid/doc/SofmaxWithLoss.md delete mode 100644 caffe2fluid/doc/Tile.md delete mode 100644 caffe2fluid/examples/imagenet/README.md delete mode 100644 caffe2fluid/examples/imagenet/compare.py delete mode 100644 caffe2fluid/examples/imagenet/data/65.jpeg delete mode 100644 caffe2fluid/examples/imagenet/infer.py delete mode 100755 caffe2fluid/examples/imagenet/tools/cmp.sh delete mode 100755 caffe2fluid/examples/imagenet/tools/cmp_layers.sh delete mode 100755 caffe2fluid/examples/imagenet/tools/diff.sh delete mode 100755 caffe2fluid/examples/imagenet/tools/run.sh delete mode 100755 caffe2fluid/examples/imagenet/tools/test.sh delete mode 100644 caffe2fluid/examples/mnist/README.md delete mode 100644 caffe2fluid/examples/mnist/evaluate.py delete mode 100755 caffe2fluid/examples/mnist/run.sh delete mode 100644 caffe2fluid/kaffe/__init__.py delete mode 100644 caffe2fluid/kaffe/caffe/__init__.py delete mode 100644 caffe2fluid/kaffe/caffe/resolver.py delete mode 100644 caffe2fluid/kaffe/custom_layers/__init__.py delete mode 100644 caffe2fluid/kaffe/custom_layers/argmax.py delete mode 100644 caffe2fluid/kaffe/custom_layers/axpy.py delete mode 100644 caffe2fluid/kaffe/custom_layers/crop.py delete mode 100644 caffe2fluid/kaffe/custom_layers/detection_out.py delete mode 100644 caffe2fluid/kaffe/custom_layers/flatten.py delete mode 100644 caffe2fluid/kaffe/custom_layers/normalize.py delete mode 100644 caffe2fluid/kaffe/custom_layers/permute.py delete mode 100644 caffe2fluid/kaffe/custom_layers/power.py delete mode 100644 caffe2fluid/kaffe/custom_layers/priorbox.py delete mode 100644 caffe2fluid/kaffe/custom_layers/reduction.py delete mode 100644 caffe2fluid/kaffe/custom_layers/register.py delete mode 100644 caffe2fluid/kaffe/custom_layers/reshape.py delete mode 100644 caffe2fluid/kaffe/custom_layers/roipooling.py delete mode 100644 caffe2fluid/kaffe/custom_layers/select.py delete mode 100644 caffe2fluid/kaffe/errors.py delete mode 100644 caffe2fluid/kaffe/graph.py delete mode 100644 caffe2fluid/kaffe/layers.py delete mode 100644 caffe2fluid/kaffe/net_template.py delete mode 100644 caffe2fluid/kaffe/paddle/__init__.py delete mode 100644 caffe2fluid/kaffe/paddle/network.py delete mode 100644 caffe2fluid/kaffe/paddle/transformer.py delete mode 100644 caffe2fluid/kaffe/protobuf_to_dict.py delete mode 100644 caffe2fluid/kaffe/shapes.py delete mode 100644 caffe2fluid/kaffe/transformers.py delete mode 100644 caffe2fluid/prepare.md delete mode 100644 caffe2fluid/prepare_en.md delete mode 100644 caffe2fluid/proto/caffe.proto delete mode 100644 caffe2fluid/proto/caffe_pb2.py delete mode 100755 caffe2fluid/proto/compile.sh delete mode 100644 onnx2fluid/.gitignore delete mode 100644 onnx2fluid/README.md delete mode 100644 onnx2fluid/README_en.md delete mode 100644 onnx2fluid/examples/convert_data_npz.py delete mode 100644 onnx2fluid/examples/convert_data_pb.py delete mode 100644 onnx2fluid/examples/gen_some_samples.py delete mode 100644 onnx2fluid/examples/gen_unet.py delete mode 100644 onnx2fluid/examples/gen_yolov2.py delete mode 100755 onnx2fluid/examples/onnx_model_zoo.sh delete mode 100644 onnx2fluid/onnx2fluid/__init__.py delete mode 100644 onnx2fluid/onnx2fluid/__main__.py delete mode 100644 onnx2fluid/onnx2fluid/cmdline.py delete mode 100644 onnx2fluid/onnx2fluid/conversion.py delete mode 100644 onnx2fluid/onnx2fluid/framework_pb2.py delete mode 100644 onnx2fluid/onnx2fluid/onnx_utils.py delete mode 100644 onnx2fluid/onnx2fluid/symbolic.py delete mode 100644 onnx2fluid/onnx2fluid/torch_export_helper.py delete mode 100644 onnx2fluid/onnx2fluid/validation.py delete mode 100644 onnx2fluid/onnx2fluid/writer.py delete mode 100644 onnx2fluid/requirements.txt delete mode 100644 onnx2fluid/setup.cfg delete mode 100755 onnx2fluid/setup.py delete mode 100644 tensorflow2fluid/README.md delete mode 100644 tensorflow2fluid/doc/ReadMe.md delete mode 100644 tensorflow2fluid/doc/compare_op.md delete mode 100644 tensorflow2fluid/doc/tf.case.md delete mode 100644 tensorflow2fluid/doc/tf.clip_by_global_norm.md delete mode 100644 tensorflow2fluid/doc/tf.clip_by_norm.md delete mode 100644 tensorflow2fluid/doc/tf.contrib.layers.flatten.md delete mode 100644 tensorflow2fluid/doc/tf.expand_dims.md delete mode 100644 tensorflow2fluid/doc/tf.image.non_max_suppression.md delete mode 100644 tensorflow2fluid/doc/tf.image.resize_images.md delete mode 100644 tensorflow2fluid/doc/tf.layers.conv2d.md delete mode 100644 tensorflow2fluid/doc/tf.layers.dense.md delete mode 100644 tensorflow2fluid/doc/tf.losses.mean_and_squared_error.md delete mode 100644 tensorflow2fluid/doc/tf.losses.sigmoid_cross_entropy.md delete mode 100644 tensorflow2fluid/doc/tf.math.is_finite.md delete mode 100644 tensorflow2fluid/doc/tf.math.rsqrt.md delete mode 100644 tensorflow2fluid/doc/tf.matmul.md delete mode 100644 tensorflow2fluid/doc/tf.nn.avg_pool.md delete mode 100644 tensorflow2fluid/doc/tf.nn.bidirectional_dynamic_rnn.md delete mode 100644 tensorflow2fluid/doc/tf.nn.conv2d.md delete mode 100644 tensorflow2fluid/doc/tf.nn.conv2d_transpose.md delete mode 100644 tensorflow2fluid/doc/tf.nn.conv3d_transpose.md delete mode 100644 tensorflow2fluid/doc/tf.nn.depthwise_conv2d.md delete mode 100644 tensorflow2fluid/doc/tf.nn.dropout.md delete mode 100644 tensorflow2fluid/doc/tf.nn.dynamic_rnn.md delete mode 100644 tensorflow2fluid/doc/tf.nn.l2_normalize.md delete mode 100644 tensorflow2fluid/doc/tf.nn.lrn.md delete mode 100644 tensorflow2fluid/doc/tf.nn.max_pool.md delete mode 100644 tensorflow2fluid/doc/tf.nn.reduce_logsumexp.md delete mode 100644 tensorflow2fluid/doc/tf.nn.rnn.GRUCell.md delete mode 100644 tensorflow2fluid/doc/tf.nn.rnn_cell.LSTMCell.md delete mode 100644 tensorflow2fluid/doc/tf.nn.rnn_cell.MultiRNNCell.md delete mode 100644 tensorflow2fluid/doc/tf.nn.separable_conv2d.md delete mode 100644 tensorflow2fluid/doc/tf.nn.softmax_cross_entropy_with_logits.md delete mode 100644 tensorflow2fluid/doc/tf.nn.top_k.md delete mode 100644 tensorflow2fluid/doc/tf.one_hot.md delete mode 100644 tensorflow2fluid/doc/tf.pad.md delete mode 100644 tensorflow2fluid/doc/tf.placeholder.md delete mode 100644 tensorflow2fluid/doc/tf.pow.md delete mode 100644 tensorflow2fluid/doc/tf.print.md delete mode 100644 tensorflow2fluid/doc/tf.reshape.md delete mode 100644 tensorflow2fluid/doc/tf.reverse_sequence.md delete mode 100644 tensorflow2fluid/doc/tf.scatter_update.md delete mode 100644 tensorflow2fluid/doc/tf.slice.md delete mode 100644 tensorflow2fluid/doc/tf.split.md delete mode 100644 tensorflow2fluid/doc/tf.squared_difference.md delete mode 100644 tensorflow2fluid/doc/tf.stop_gradient.md delete mode 100644 tensorflow2fluid/doc/tf.while_loop.md delete mode 100644 tensorflow2fluid/tf2fluid/__init__.py delete mode 100644 tensorflow2fluid/tf2fluid/convert.py delete mode 100644 tensorflow2fluid/tf2fluid/framework_pb2.py delete mode 100644 tensorflow2fluid/tf2fluid/graph.py delete mode 100644 tensorflow2fluid/tf2fluid/model_loader.py delete mode 100644 tensorflow2fluid/tf2fluid/paddle_emitter.py delete mode 100644 tensorflow2fluid/tf2fluid/tensorflow_graph.py delete mode 100644 tensorflow2fluid/tf2fluid/tensorflow_parser.py delete mode 100644 tensorflow2fluid/tf2fluid/utils.py delete mode 100644 tensorflow2fluid/vgg_translate_tutorial.ipynb diff --git a/AUTHORS.md b/AUTHORS.md deleted file mode 100644 index 4fd9d2b..0000000 --- a/AUTHORS.md +++ /dev/null @@ -1,7 +0,0 @@ -| Github account | name | -|---|---| -| jiangjiajun | Jia-Jun Jiang | -| walloollaw | Long Wang | -| Renwb1991 | Wen-Bin Ren | -| sunyanfang | Yan-Fang Sun | -| Macrobull | Nai-Rui Luo | diff --git a/README.md b/README.md index fadfbdd..e69de29 100644 --- a/README.md +++ b/README.md @@ -1,24 +0,0 @@ -# X2Paddle -[![License](https://img.shields.io/badge/license-Apache%202-blue.svg)](LICENSE) -[![Version](https://img.shields.io/github/release/PaddlePaddle/X2Paddle.svg)](https://github.com/PaddlePaddle/X2Paddle/releases) - -# 简介 - -X2Paddle支持将Caffe和TensorFlow模型转至PaddlePaddle模型,同时我们目前维护了TensorFlow/Caffe与PaddlePaddle接口对比分析文档。 - -任何使用问题均可通过[ISSUE](https://github.com/PaddlePaddle/X2Paddle/issues)的方式及时反馈,或者也可直接通过pull request的方式一起更新代码和文档。 - -## [caffe2fluid](caffe2fluid) -1. 支持将Caffe模型转至PaddlePaddle fluid可加载预测模型 -2. 提供Caffe-PaddlePaddle常用API的对比文档[[doc](caffe2fluid/doc)] - -## [tensorflow2fluid](tensorflow2fluid) -1. 支持将TensorFlow模型转至PaddlePaddle fluid可加载预测模型 -2. 提供TensorFlow-PaddlePaddle常用API的对比文档[[doc](tensorflow2fluid/doc)] - -## [onnx2fluid](onnx2fluid) -1. 支持将ONNX模型转至PaddlePaddle fluid可加载预测模型 -2. PyTorch支持导出为ONNX模型,因此也可通过onnx2fluid支持PyTorch模型的转换 - -# 贡献代码 -clone代码至本地后,先运行`X2Paddle/commit-prepare.sh`配置代码提交环境 diff --git a/caffe2fluid/.gitignore b/caffe2fluid/.gitignore deleted file mode 100644 index 8064fed..0000000 --- a/caffe2fluid/.gitignore +++ /dev/null @@ -1 +0,0 @@ -proto/caffepb.py diff --git a/caffe2fluid/README.md b/caffe2fluid/README.md deleted file mode 100644 index c680269..0000000 --- a/caffe2fluid/README.md +++ /dev/null @@ -1,103 +0,0 @@ -# caffe2fluid -[![License](https://img.shields.io/badge/license-Apache%202-blue.svg)](LICENSE) - -caffe2fluid用于将Caffe模型转换为PaddlePaddle模型,此外在[[doc](doc/ReadMe.md)]目录中整理了Caffe-PaddlePaddle的常用API对比分析。 - -## 环境依赖 - -> python >= 2.7 -> numpy -> protobuf >= 3.6.0 -> future - -**caffe2fluid的运行仅依赖上述条件** -但建议在环境中安装好Caffe和PaddlePaddle,便于转换模型后测试。环境安装可参考[安装文档](prepare.md)。 - -## 使用方法 - -### 模型转换 -1. Caffe模型转换为PaddlePaddle模型代码和参数文件(参数以numpy形式保存) - -``` -# --def_path : Caffe配置文件的保存路径 -# --caffemodel : Caffe模型的保存路径 -# --data-output-path : 转换后模型参数保存路径 -# --code-output-path : 转换后模型代码保存路径 -python convert.py --def_path alexnet.prototxt \ - --caffemodel alexnet.caffemodel \ - --data-output-path alexnet.npy \ - --code-output-path alexnet.py -``` - -2. 可通过如下方式,将模型网络结构和参数均序列化保存为PaddlePaddle框架支持加载的模型格式 -``` -# --model-param-path : 指定序列化后的模型保存路径 -python alexnet.py --npy_path alexnet.npy --model-param-path ./fluid_model -``` -或者也可在保存时,指定保存模型的输出 -``` -# 模型的输出为fc8和prob层 -python alexnet.py --npy_path alexnet.npy --model-param-path ./fluid --need-layers-name fc8,prob -``` -模型的加载及预测可参考PaddlePaddle官方文档[加载预测模型](http://www.paddlepaddle.org/documentation/docs/zh/1.3/api_guides/low_level/inference.html#id4) - -### 模型转换前后差异对比 -模型转换后,可通过如下方式,逐层对比转换后的模型与原模型的计算结果差异(**运行环境依赖Caffe和paddlepaddle**) -``` -# alexnet : Caffe配置文件(.prototxt)中“name”的值 -# ../../alexnet.prototxt : Caffe配置文件路径 -# ../../alexnet.caffemodel : Caffe模型文件路径 -# ../../alexnet.py : 转换后模型代码保存路径 -# ../../alexnet.npy : 转换后模型参数保存路径 -# ./data/65.jpeg : 需要测试的图像数据 -cd examples/imagenet -bash tools/diff.sh alexnet ../../alexnet.prototxt \ - ../../alexnet.caffemodel \ - ../../alexnet.py \ - ../../alexnet.npy \ - ./data/65.jpeg -``` - -## 自定义层转换 -在模型转换中遇到未支持的自定义层,用户可根据自己需要,添加代码实现自定义层,从而支持模型的完整转换,实现方式如下流程, -1. 在`kaffe/custom_layers`下实现自定义层,例如mylayer.py -> - 实现`shape_func(input_shape, [other_caffe_params])`,计算输出的大小 -> - 实现`layer_func(input_shape, [other_caffe_params])`,构造一个PaddlePaddle Fluid层 -> - 注册这两个函数 `register(kind=`MyType`, shape=shape_func, layer=layer_func)` -也可参考`kaffe/cusom_layers`下的其它自定义层实现 - -2. 添加`import mylayer`至`kaffe/custom_layers/__init__.py` - -3. 准备你的pycaffe作为你的定制版本(与以前的env准备相同) -> 选择一: -1. 编译你自己的caffe.proto来代替proto/caffe.proto -2. 修改./kaffe/caffe/resolver.py -```python -try: - # Try to import PyCaffe first - import caffe - self.caffe = caffe -except ImportError: - # Fall back to the protobuf implementation - self.caffepb = import_caffepb() - show_fallback_warning() -# 将上述代码替换为下列代码: -self.caffepb = import_caffepb() -show_fallback_warning() -``` - -> 选择二:更换你的pycaffe到特定的版本 - -4. 按照之前步骤,将Caffe模型转换为PaddlePaddle模型 - -5. 配置环境变量 -``` -export CAFFE2FLUID_CUSTOM_LAYERS=/path/to/caffe2fluid/kaffe -``` -## 模型测试 -caffe2fluid在如下模型上通过测试 -- [Lenet](https://github.com/ethereon/caffe-tensorflow/blob/master/examples/mnist) -- [ResNet(ResNet-50,ResNet-101,ResNet-152)](https://onedrive.live.com/?authkey=%21AAFW2-FVoxeVRck&id=4006CBB8476FF777%2117887&cid=4006CBB8476FF777) -- [GoogleNet](https://gist.github.com/jimmie33/7ea9f8ac0da259866b854460f4526034) -- [VGG](https://gist.github.com/ksimonyan/211839e770f7b538e2d8) -- [AlexNet](https://github.com/BVLC/caffe/tree/master/models/bvlc_alexnet) diff --git a/caffe2fluid/README_en.md b/caffe2fluid/README_en.md deleted file mode 100644 index 4c6c4a9..0000000 --- a/caffe2fluid/README_en.md +++ /dev/null @@ -1,118 +0,0 @@ -# caffe2fluid -[![License](https://img.shields.io/badge/license-Apache%202-blue.svg)](LICENSE) - -This tool is used to convert a Caffe model to a Fluid model. In the [[doc](doc/ReadMe.md)] directory, the common APIs of Caffe-PaddlePaddle are compared and analyzed. - -## Prerequisites - -> python >= 2.7 -> numpy -> protobuf >= 3.6.0 -> future - -**The running process of caffe2fluid only relies on above conditions.** -It is recommended to install the Caffe and PaddlePaddle in the environment for testing after converting the model. For environmental installation, please refer to [Installation Documentation](prepare_en.md) - -## HowTo - -### Model Conversion -1. Convert the Caffe's model to the PaddlePaddle's model code and parameter file (The parameters are saved as the form of numpy). - -``` -# --def_path : The path of Caffe's configuration file -# --caffemodel : The save path of Caffe's model file -# --data-output-path : The save path of the model after converting -# --code-output-path : The save path of the model code after converting -python convert.py --def_path alexnet.prototxt \ - --caffemodel alexnet.caffemodel \ - --data-output-path alexnet.npy \ - --code-output-path alexnet.py -``` - -2. The model network structure and parameters can be serialized as the model format supported by the PaddlePaddle framework. -``` -# --model-param-path : The save path of PaddlePaddle's serialized model -python alexnet.py --npy_path alexnet.npy --model-param-path ./fluid_model -``` -Or you can specify the output of the saved model when saving. -``` -# The output of model is the fc8 layer and prob layer. -python alexnet.py --npy_path alexnet.npy --model-param-path ./fluid --need-layers-name fc8,prob -``` -Model loading and prediction can refer to the [official PaddlePaddle document](http://www.paddlepaddle.org/documentation/docs/en/1.3/api_guides/low_level/inference_en.html). - -### Comparison of differences before and after model conversion -After the model is converted, the difference between the converted model and the original model can be compared layer by layer (**the running environment depends on caffe and paddlepaddle**) -``` -# alexnet : The value of "name" in the Caffe's configuration file (.prototxt) -# ../../alexnet.prototxt : The path of Caffe's configuration file -# ../../alexnet.caffemodel : The save path of Caffe's model file -# ../../alexnet.py : The save path of the model after converting -# ../../alexnet.npy : The save path of the model code after converting -# ./data/65.jpeg : The path of image which is need to reference -cd examples/imagenet -bash tools/diff.sh alexnet ../../alexnet.prototxt \ - ../../alexnet.caffemodel \ - ../../alexnet.py \ - ../../alexnet.npy \ - ./data/65.jpeg -``` - - - -## How to convert custom layer -In the model conversion, when encounter an unsupported custom layer, users can add code to achieve a custom layer according to their needs. thus supporting the complete conversion of the model. The implementation is the following process. - -1. Implement your custom layer in a file under `kaffe/custom_layers`, eg: mylayer.py - - Implement ```shape_func(input_shape, [other_caffe_params])``` to calculate the output shape - - Implement ```layer_func(inputs, name, [other_caffe_params])``` to construct a fluid layer - - Register these two functions ```register(kind='MyType', shape=shape_func, layer=layer_func)``` - - Notes: more examples can be found in `kaffe/custom_layers` - -2. Add ```import mylayer``` to `kaffe/custom_layers/__init__.py` - -3. Prepare your pycaffe as your customized version(same as previous env prepare) - - (option1) - 1. replace `proto/caffe.proto` with your own caffe.proto and compile it - 2. modify the ./kaffe/caffe/resolver.py -```python -try: - # Try to import PyCaffe first - import caffe - self.caffe = caffe -except ImportError: - # Fall back to the protobuf implementation - self.caffepb = import_caffepb() - show_fallback_warning() -# replace the above code with: -self.caffepb = import_caffepb() -show_fallback_warning() -``` - - (option2) change your `pycaffe` to the customized version - -4. Convert the Caffe model to Fluid model - -5. Set env $CAFFE2FLUID_CUSTOM_LAYERS to the parent directory of 'custom_layers' - ``` - export CAFFE2FLUID_CUSTOM_LAYERS=/path/to/caffe2fluid/kaffe - ``` - -### Tested models -The caffe2fluid passed the test on the following model: -- Lenet: -[model addr](https://github.com/ethereon/caffe-tensorflow/blob/master/examples/mnist) - -- ResNets:(ResNet-50, ResNet-101, ResNet-152) -[model addr](https://onedrive.live.com/?authkey=%21AAFW2-FVoxeVRck&id=4006CBB8476FF777%2117887&cid=4006CBB8476FF777) - -- GoogleNet: -[model addr](https://gist.github.com/jimmie33/7ea9f8ac0da259866b854460f4526034) - -- VGG: -[model addr](https://gist.github.com/ksimonyan/211839e770f7b538e2d8) - -- AlexNet: -[model addr](https://github.com/BVLC/caffe/tree/master/models/bvlc_alexnet) - -### Notes -Some of this code come from here: [caffe-tensorflow](https://github.com/ethereon/caffe-tensorflow) diff --git a/caffe2fluid/convert.py b/caffe2fluid/convert.py deleted file mode 100755 index 145d365..0000000 --- a/caffe2fluid/convert.py +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env python - -import os -import sys -import numpy as np -import argparse - -from kaffe import KaffeError, print_stderr -from kaffe.paddle import Transformer - - -def fatal_error(msg): - """ fatal error encounted - """ - print_stderr(msg) - exit(-1) - - -def validate_arguments(args): - """ validate args - """ - if (args.data_output_path is not None) and (args.caffemodel is None): - fatal_error('No input data path provided.') - if (args.caffemodel is not None) and (args.data_output_path is None): - fatal_error('No output data path provided.') - if (args.code_output_path is None) and (args.data_output_path is None): - fatal_error('No output path specified.') - - -def convert(def_path, caffemodel_path, data_output_path, code_output_path, - phase): - """ convert caffe model to tf/paddle models - """ - try: - transformer = Transformer(def_path, caffemodel_path, phase=phase) - print_stderr('Converting data...') - if caffemodel_path is not None: - data = transformer.transform_data() - print_stderr('Saving data...') - with open(data_output_path, 'wb') as data_out: - np.save(data_out, data) - if code_output_path: - print_stderr('Saving source...') - s = sys.version - with open(code_output_path, 'wb') as src_out: - if s.startswith('2'): - src_out.write(transformer.transform_source()) - else: - src_out.write(str.encode(transformer.transform_source())) - print_stderr('set env variable before using converted model '\ - 'if used custom_layers:') - custom_pk_path = os.path.dirname(os.path.abspath(__file__)) - custom_pk_path = os.path.join(custom_pk_path, 'kaffe') - print_stderr('export CAFFE2FLUID_CUSTOM_LAYERS=%s' % (custom_pk_path)) - print_stderr('Done.') - return 0 - except KaffeError as err: - fatal_error('Error encountered: {}'.format(err)) - - return 1 - - -def main(): - """ main - """ - parser = argparse.ArgumentParser() - parser.add_argument('--def_path', help='Model definition (.prototxt) path') - parser.add_argument('--caffemodel', help='Model data (.caffemodel) path') - parser.add_argument('--data-output-path', help='Converted data output path') - parser.add_argument( - '--code-output-path', help='Save generated source to this path') - parser.add_argument( - '-p', - '--phase', - default='test', - help='The phase to convert: test (default) or train') - args = parser.parse_args() - validate_arguments(args) - return convert(args.def_path, args.caffemodel, args.data_output_path, - args.code_output_path, args.phase) - - -if __name__ == '__main__': - ret = main() - sys.exit(ret) diff --git a/caffe2fluid/doc/Accuracy.md b/caffe2fluid/doc/Accuracy.md deleted file mode 100644 index 8574fcd..0000000 --- a/caffe2fluid/doc/Accuracy.md +++ /dev/null @@ -1,39 +0,0 @@ -## Accuracy - - -### [Accuracy](http://caffe.berkeleyvision.org/tutorial/layers/accuracy.html) -``` -layer { - name: "accuracy" - type: "Accuracy" - bottom: "input" - bottom: "label" - top: "accuracy" - include { - phase: TEST - } -} -``` - - -### [paddle.fluid.layers.accuracy](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#accuracy) -```python -paddle.fluid.layers.accuracy( - input, - label, - k=1, - correct=None, - total=None -) -``` - -### 功能差异 -#### 计算机制 -Caffe:只能计算每个类别中top1中正确预测的个数; -PaddlePaddle:可以通过设置`k`来计算每个类别中top k 中正确预测的个数。 - - - - - - diff --git a/caffe2fluid/doc/ArgMax.md b/caffe2fluid/doc/ArgMax.md deleted file mode 100644 index f6718e0..0000000 --- a/caffe2fluid/doc/ArgMax.md +++ /dev/null @@ -1,31 +0,0 @@ -## ArgMax - - -### [ArgMax](http://caffe.berkeleyvision.org/tutorial/layers/argmax.html) -``` -layer { - name: "argmax" - type: "ArgMax" - bottom: "data" - top: "argmax" - argmax_param { - out_max_val: false - top_k: 1 - axis: 0 - } -} -``` - - -### [paddle.fluid.layers.argmax](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-214-argmax) -```python -paddle.fluid.layers.argmax( - x, - axis=0 -) -``` - -### 功能差异 -#### 计算机制 -Caffe:可通过`top_k`和`out_max_val`参数设置得到前`k`的索引或数值; -PaddlePaddle:只能输出最大值的索引; diff --git a/caffe2fluid/doc/BatchNorm.md b/caffe2fluid/doc/BatchNorm.md deleted file mode 100644 index 705b987..0000000 --- a/caffe2fluid/doc/BatchNorm.md +++ /dev/null @@ -1,44 +0,0 @@ -## BatchNorm - - -### [BatchNorm](http://caffe.berkeleyvision.org/tutorial/layers/batchnorm.html) -``` -layer { - name: "bn" - type: "BatchNorm" - bottom: "data" - top: "bn" - batch_norm_param { - use_global_stats: true - moving_average_fraction: 0.999 - eps: 0.00001 - } -} -``` - - -### [paddle.fluid.layers.batch_norm](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-36-batch_norm) -```python -paddle.fluid.layers.batch_norm( - input, - act=None, - is_test=False, - momentum=0.9, - epsilon=1e-05, - param_attr=None, - bias_attr=None, - data_layout='NCHW', - in_place=False, - name=None, - moving_mean_name=None, - moving_variance_name=None, - do_model_average_for_mean_and_var=False, - fuse_with_relu=False, - use_global_stats=False -) -``` - -### 功能差异 -#### 计算机制 -Caffe:`BatchNorm`仅做了归一化计算,需结合`Scale`层进行缩放变换; -PaddlePaddle:包括归一化计算和缩放变换,`param_attr`和`bias_attr`即为缩放变换的设置参数。 diff --git a/caffe2fluid/doc/Convolution.md b/caffe2fluid/doc/Convolution.md deleted file mode 100644 index 03cccbe..0000000 --- a/caffe2fluid/doc/Convolution.md +++ /dev/null @@ -1,76 +0,0 @@ -## Convolution - - -### [Convolution](http://caffe.berkeleyvision.org/tutorial/layers/convolution.html) -``` -layer { - name: "conv" - type: "Convolution" - bottom: "data" - top: "conv" - # 卷积核的局部学习率和权值衰减因子 - param { - lr_mult: 1 - decay_mult: 1 - } - # 偏置项的局部学习率和权值衰减因子 - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 20 # 必填项 - kernel_size: 5 # 必填项 - stride: 1 - pad: 0 - group: 1 - bias_term: True - weight_filler { - type: "gaussian" - value: 0.01 - } - bias_filler { - type: "constant" - value: 0 - } - } -} -``` - - -### [paddle.fluid.layers.conv2d](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-45-conv2d) -```python -paddle.fluid.layers.conv2d( - input, - num_filters, - output_size, - stride=1, - padding=0, - dilation=1, - groups=None, - param_attr=None, - bias_attr=None, - use_cudnn=True, - act=None, - name=None -) -``` - -### 功能差异 -#### 参数初始化 -Caffe:Layer定义中共有两个结构体`param`用于设置局部学习率和权值衰减因子,其中第一个用于设置卷积核,第二个则用于设置偏值项;卷积核和偏置项的初始化参数在`convolution_param`中进行设置;是否使用偏置项可以使用`bias_term`进行设置; -PaddlePaddle:卷积核和偏置项的参数分别使用`param_attr`和`bias_attr`进行配置,配置参数如下所示,此外将`bias_attr`直接设为`False`表示不使用偏置项。 -```python -paddle.fluid.ParamAttr( - name=None, - initializer=None, - learning_rate=1.0, - regularizer=None, - trainable=True, - gradient_clip=None, - do_model_average=False -) -``` -#### 空洞卷积 -Caffe:无法使用空洞卷积; -PaddlePaddle:使用`dilation`参数来设置空洞卷积。 diff --git a/caffe2fluid/doc/Crop.md b/caffe2fluid/doc/Crop.md deleted file mode 100644 index 8fc0c8a..0000000 --- a/caffe2fluid/doc/Crop.md +++ /dev/null @@ -1,65 +0,0 @@ -## Crop - - -### [Crop](http://caffe.berkeleyvision.org/tutorial/layers/crop.html) -``` -layer { - name: "crop" - type: "Crop" - bottom: "data1" - bottom: "data2" - top: “crop" - crop_param { - axis: 1 - offset: 0 - offset: 2 - } -} -``` - - -### [paddle.fluid.layers.crop](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-51-crop) -```python -paddle.fluid.layers.crop( - x, - shape=None, - offsets=None, - name=None -) -``` - -### 功能差异 -#### 输出大小 -Caffe:输入为`data1`,裁剪的输出大小与`data2`(Variable类型)一致; -PaddlePaddle:`shape`参数支持python list的方式传入输出大小,同时也支持`Variable`类型的输入。当`shape`为`Variable`类型时,用法与Caffe类似,裁剪输出大小与`shape`参数的大小一致。 - -#### 裁剪偏移量 -Caffe:只需要设置需要裁剪的维度的偏移量。 -PaddlePaddle:每一个维度需要设置偏移量。 -### 代码示例 -``` -# Caffe示例: -# data1 shape:(20,3,128,128) -# data2 shape:(20,2,64,64) -layer { - name: "crop" - type: "Crop" - bottom: "data1" - bottom: "data2" - top: ”crop" - crop_param { - axis: 1 - offset: 0 - offset: 25 - offset: 25 - } -} -# 输出shape:(20,2,64,64) -``` -```python -# PaddlePaddle示例: -# inputs1输入shape:(20,3,128,128) -output1 = fluid.layers.crop(x=inputs1, shape=inputs2, offsets=[0,0,25,25]) -# 输出shape:(20,2,64,64) -output = fluid.layers.crop(x=inputs1, shape=[20,2,64,64], offsets=[0,0,25,25]) -``` diff --git a/caffe2fluid/doc/Deconvolution.md b/caffe2fluid/doc/Deconvolution.md deleted file mode 100644 index 9a56561..0000000 --- a/caffe2fluid/doc/Deconvolution.md +++ /dev/null @@ -1,76 +0,0 @@ -## Deconvolution - - -### [Deconvolution](http://caffe.berkeleyvision.org/tutorial/layers/deconvolution.html) -``` -layer { - name: "deconv" - type: "Deconvolution" - bottom: "data" - top: "deconv" - # 卷积核的局部学习率和权值衰减因子 - param { - lr_mult: 1 - decay_mult: 1 - } - # 偏置项的局部学习率和权值衰减因子 - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 20 # 必填项 - kernel_size: 3 # 必填项 - stride: 1 - pad: 0 - group: 1 - bias_term: True - weight_filler { - type: "gaussian" - value: 0.01 - } - bias_filler { - type: "constant" - value: 0 - } - } -} -``` - - -### [paddle.fluid.layers.conv2d_transpose](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-46-conv2d_transpose) -```python -paddle.fluid.layers.conv2d_transpose( - input, - num_filters, - output_size, - stride=1, - padding=0, - dilation=1, - groups=None, - param_attr=None, - bias_attr=None, - use_cudnn=True, - act=None, - name=None -) -``` - -### 功能差异 -#### 参数初始化 -Caffe:Layer定义中共有两个结构体`param`用于设置局部学习率和权值衰减因子,其中第一个用于设置卷积核,第二个则用于设置偏值项;卷积核和偏置项的初始化参数在`convolution_param`中进行设置;是否使用偏置项可以使用`bias_term`进行设置; -PaddlePaddle:卷积核和偏置项的参数分别使用`param_attr`和`bias_attr`进行配置,配置参数如下所示,此外将`bias_attr`直接设为`False`表示不使用偏置项。 -```python -paddle.fluid.ParamAttr( - name=None, - initializer=None, - learning_rate=1.0, - regularizer=None, - trainable=True, - gradient_clip=None, - do_model_average=False -) -``` -#### 空洞卷积 -Caffe:无法使用空洞卷积; -PaddlePaddle:使用`dilation`参数来设置空洞卷积。 diff --git a/caffe2fluid/doc/Dropout.md b/caffe2fluid/doc/Dropout.md deleted file mode 100644 index e10ac91..0000000 --- a/caffe2fluid/doc/Dropout.md +++ /dev/null @@ -1,41 +0,0 @@ -## Dropout - - -### [Dropout](http://caffe.berkeleyvision.org/tutorial/layers/dropout.html) -``` -layer { - name: "dropout" - type: "Dropout" - bottom: "data" - top: “dropout" - dropout_param { - dropout_ratio: 0.5 - } -} -``` - - -### [paddle.fluid.layers.dropout](hhttp://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-56-dropout) -```python -paddle.fluid.layers.dropout( - x, - dropout_prob, - is_test=False, - seed=None, - name=None, - dropout_implementation="downgrade_in_infer" -) -``` - -### 功能差异 -#### 实现方式 -Caffe:采用`upscale_in_train`方式实现; -PaddlePaddle:实现方式支持`downgrade_in_infer`和`upscale_in_infer`两种方式。 -``` -1. downgrade_in_infer实现方式 - 训练时: out = input * mask - 预测时: out = input * dropout_prob* (1.0 - dropout_prob) -2. upscale_in_infer实现方式 - 训练时: out = input * mask / (1.0 - dropout_prob) - 预测时: out = input -``` diff --git a/caffe2fluid/doc/Eltwise.md b/caffe2fluid/doc/Eltwise.md deleted file mode 100644 index 935dcd5..0000000 --- a/caffe2fluid/doc/Eltwise.md +++ /dev/null @@ -1,66 +0,0 @@ -## Eltwise - - -### [Eltwise](http://caffe.berkeleyvision.org/tutorial/layers/eltwise.html) -``` -layer { - name: "eltwise" - type: "Eltwise" - bottom: "data1" - bottom: "data2" - top: "prod" - eltwise_param { - operation: PROD # 还有MAX,SUM - stable_prod_grad: false - # coeff: 1 - # coeff: -1 - } -} -``` - - -### [paddle.fluid.layers.elementwise_add](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-61-elementwise_add) -### [paddle.fluid.layers.elementwise_max](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-63-elementwise_max) -### [paddle.fluid.layers.elementwise_mul](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-65-elementwise_mul) -```python -paddle.fluid.layers.elementwise_add( - x, - y, - axis=-1, - act=None, - name=None -) -和 -paddle.fluid.layers.elementwise_max( - x, - y, - axis=-1, - act=None, - name=None -) -和 -paddle.fluid.layers.elementwise_mul( - x, - y, - axis=-1, - act=None, - name=None -) -``` - -### 功能差异 -#### 输入数据 -Caffe:`num1`和`num2`的`shape`必须按相同; -PaddlePaddle:`Y`的`shape`可以是`X`的`shape`可以的一个连续子序列,并通过设置`axis`表示从哪一个维度开始对应。 - -#### 加法操作的差异 -Caffe:可以通过设置`coeff`参数为加法的每个输入添加一个权重; -PaddlePaddle:无权重设置功能。 - -#### 乘法操作 -Caffe:可以通过设置`stable_prod_grad`参数来选择是否渐进较慢的梯度计算方法; -PaddlePaddle:无设置`stable_prod_grad`参数的功能。 - -#### 其他 -Caffe:激活函数需要由另外一层完成; -PaddlePaddle:可以通过设置`act`对逐元素操作后的tensor变量执行非线性激活。 diff --git a/caffe2fluid/doc/EuclideanLoss.md b/caffe2fluid/doc/EuclideanLoss.md deleted file mode 100644 index 0b15be8..0000000 --- a/caffe2fluid/doc/EuclideanLoss.md +++ /dev/null @@ -1,47 +0,0 @@ -## EuclideanLoss - - -### [EuclideanLoss](http://caffe.berkeleyvision.org/tutorial/layers/euclideanloss.html) -``` -layer { - name: "loss" - type: "EuclideanLoss" - bottom: "input" - bottom: "label" - top: "loss" -} -``` - - -### [paddle.fluid.layers.square_error_cost](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-173-square_error_cost) -```python -paddle.fluid.layers.square_error_cost( - input, - label -) -``` - -### 功能差异 -#### 实现方式 -Caffe:对整个输入的欧氏距离进行取和后除以两倍的样本个数,最终获得一个标量数值。 - -PaddlePaddle:使用elemenwise方式,计算`input`和`label`对应元素的欧式距离,最终获得一个array(输入和输出`shape`一致): - -### 代码示例 -```python -# 利用PaddlePaddle实现Caffe的EuclideanLoss -def EuclideanLoss(inputs, label): - elw_eud = fluid.layers.square_error_cost(data, label) - eud = fluid.layers.reduce_mean(elw_eud) - eud = fluid.layers.scale(eud, scale=0.5) - return eud - -# 调用函数计算欧氏路离 -# inputs: [1, 2, 4, 5, 6] -# labels: [6, 5, 4, 3, 2] -# eud: 5.4 -inputs = fluid.layers.data(dtype='float32', shape=[5], name='data') -labels = fluid.layers.data(dtype='float32', shape=[5], name='label') -eud = EulideanLoss(inputs, labels) -``` - diff --git a/caffe2fluid/doc/Exp.md b/caffe2fluid/doc/Exp.md deleted file mode 100644 index a2d4911..0000000 --- a/caffe2fluid/doc/Exp.md +++ /dev/null @@ -1,40 +0,0 @@ -## Exp - - -### [Exp](http://caffe.berkeleyvision.org/tutorial/layers/exp.html) -``` -layer { - name: "exp" - type: "Exp" - bottom: "data" - top: "exp" - exp_param { - base: -1 - scale: 1 - shift: 0 - } -} -``` - - -### [paddle.fluid.layers.exp](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-196-exp) -```python -paddle.fluid.layers.exp( - x, - name=None -) -``` - -### 功能差异 -#### 计算机制 -Caffe:有三个关于计算的参数,其计算公式为: -$$ -y=\begin{cases} -e^{(shift+scale \times x)},\quad x\leq 0 \\\\ -base^{(shift+scale \times x)},\quad x>0 -\end{cases} -$$ - - -PaddlePaddle:计算公式为:$$y=e^x$$ - diff --git a/caffe2fluid/doc/Flatten.md b/caffe2fluid/doc/Flatten.md deleted file mode 100644 index 4fa8ee2..0000000 --- a/caffe2fluid/doc/Flatten.md +++ /dev/null @@ -1,39 +0,0 @@ -## Flatten - - -### [Flatten](http://caffe.berkeleyvision.org/tutorial/layers/flatten.html) -``` -layer { - name: "flatten" - type: "Flatten" - bottom: "data" - top: "flatten" - flatten_param { - axis: 1 - end_axis: -1 - } -} -``` - - -### [paddle.fluid.layers.reshape](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-134-reshape) -```python -paddle.fluid.layers.reshape( - x, - shape, - actual_shape=None, - act=None, - inplace=False, - name=None -) -``` - -### 功能差异 -#### 输入参数 -Caffe:分别使用参数`axis`和`end_axis`表示起始轴和结束轴,[axis, end_axis]轴上的数据将被压缩至一维, -但如若`axis-end_axis==1`时,则会在`axis`轴之后插入一维; -> 输入数据shape[2, 3, 4, 5] -> axis=1, end_axis=3:输出shape[2, 60] -> axis=3, end_axis=2:输出shape[2, 3, 4, 1, 5] - -PaddlePaddle:通过在`shape`参数设置具体的输出shape。 diff --git a/caffe2fluid/doc/InnerProduct.md b/caffe2fluid/doc/InnerProduct.md deleted file mode 100644 index 4c494d1..0000000 --- a/caffe2fluid/doc/InnerProduct.md +++ /dev/null @@ -1,71 +0,0 @@ -## InnerProduct -### [InnerProduct](http://caffe.berkeleyvision.org/tutorial/layers/innerproduct.html) -``` -layer { - name: "fc" - type: "InnerProduct" - bottom: "data" - top: "fc" - # 卷积核的局部学习率和权值衰减因子 - param { - lr_mult: 1 - decay_mult: 1 - } - # 偏置项的局部学习率和权值衰减因子 - param { - lr_mult: 2 - decay_mult: 0 - } - inner_product_param { - num_output: 20 # 必填项 - bias_term: True - weight_filler { - type: "gaussian" - value: 0.01 - } - bias_filler { - type: "constant" - value: 0 - } - } -} -``` - - -### [paddle.fluid.layers.fc](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-71-fc) -```python -paddle.fluid.layers.fc( - input, - size, - num_flatten_dims=1, - param_attr=None, - bias_attr=None, - act=None, - is_test=False, - name=None -) -``` - -### 功能差异 -#### 参数初始化 -Caffe:Layer定义中共有两个结构体`param`用于设置局部学习率和权值衰减因子,其中第一个用于设置权重,第二个则用于设置偏值项;权重和偏置项的初始化参数在`InnerProduct`中进行设置;是否使用偏置项可以使用`bias_term`进行设置; -PaddlePaddle:权重和偏置项的参数分别使用`param_attr`和`bias_attr`进行配置,配置参数如下所示,此外将`bias_attr`直接设为`False`表示不使用偏置项。 -```python -paddle.fluid.ParamAttr( - name=None, - initializer=None, - learning_rate=1.0, - regularizer=None, - trainable=True, - gradient_clip=None, - do_model_average=False -) -``` - -#### 多维输入 -Caffe:将输入数据的第一维默认为batch size,其余维度压缩至一维后,得到新的二维输入进行全连接计算; -PaddlePaddle:`[0, num_flatten_dims)`和`[num_flattens_dim, )`维上的数据分别被压缩至一维,得到新的二维输入进行全连接计算。 - -#### 其他 -Caffe:需要在另一个层中定义激活函数。 -PaddlePaddle:可以通过设置`act`这一参数来确定输出的激活函数。 diff --git a/caffe2fluid/doc/Input.md b/caffe2fluid/doc/Input.md deleted file mode 100644 index 8e1ae0f..0000000 --- a/caffe2fluid/doc/Input.md +++ /dev/null @@ -1,72 +0,0 @@ -## Input -### [Input](http://caffe.berkeleyvision.org/tutorial/layers/input.html) -``` -layer { - name: "input" - type: "Input" - top: "input" - input_param { - shape { - dim: 10 - dim: 3 - dim: 227 - dim: 227 - } - } -} -``` - - -### [paddle.fluid.layers.data](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-20-data) -```python -paddle.fluid.layers.data( - name, - shape, - append_batch_size=True, - dtype='float32', - lod_level=0, - type=VarType.LOD_TENSOR, - stop_gradient=True -) -``` - -### 功能差异 -#### 输入shape的差异 -Caffe:输入的shape中每一个维度的大小都需要详细定义。 -PaddlePaddle:可以根据设置设置`append_batch_size`来确定是否将数据第一个维度的大小加入到shape中,若该参数为True,输入数据第一个维度的大小则由传入数据决定,若该参数为False,则shape的第一个维度为输入数据第一个维度的大小。 - - - -#### 其他差异 -Caffe:不需要强制定义输入数据的类型。 -PaddlePaddle:需要强制定义输入数据的类型,同时可以通过设置`lod_level`表示输入的数据是不是一个序列,设置`stop_gradient`表示是否应该停止计算梯度。 - - -### 代码示例 -``` -# Caffe示例: -layer { - name: "input" - type: "Input" - top: "input" - input_param { - shape { - dim: 10 - dim: 3 - dim: 227 - dim: 227 - } - } -} -# 数据shape为[10,3,227,227] -``` - -``` python -# PaddlePaddle示例: -# 数据shape为[10,3,227,227] -inputs1 = paddle.fluid.layers.data(name='data1', shape=[10,3,227,227], - dtype='float32', append_batch_size=False) - -# 数据shape为[-1,3,227,227] -inputs2 = paddle.fluid.layers.data(name='data2', shape=[3,227,227], dtype='float32') -``` diff --git a/caffe2fluid/doc/LRN.md b/caffe2fluid/doc/LRN.md deleted file mode 100644 index cc2c7e5..0000000 --- a/caffe2fluid/doc/LRN.md +++ /dev/null @@ -1,44 +0,0 @@ -## LRN - - -### [LRN](http://caffe.berkeleyvision.org/tutorial/layers/lrn.html) -``` -layer { - name: "lrn" - type: "LRN" - bottom: "data" - top: "lrn" - lrn_param { - local_size: 5 - alpha: 1 - beta: 5 - norm_region: "ACROSS_CHANNELS" - } -} -``` - - -### [paddle.fluid.layers.lrn](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-101-lrn) -```python -paddle.fluid.layers.lrn( - input, - n=5, - k=1.0, - alpha=0.0001, - beta=0.75, - name=None -) -``` - -### 功能差异 -#### 参数差异 -Caffe:参数`norm_region`支持`ACROSS_CHANNELS`和`WITHIN_CHANNEL`两种模式; -PaddlePaddle:默认且仅支持`ACROSS_CHANNELS`模式。 - -#### 计算机制 -Caffe:在`ACROSS_CHANNELS`模式下,计算公式如下,公式中的$n$即为参数`local_size` -$$output(i,x,y)=input(i,x,y)/(1+\frac{\alpha}{n}\sum_{j=max(0,i-\frac{n}{2})}^{min(C,i+\frac{n}{2})}{input(j,x,y)^2})^\beta$$ - -PaddlePaddle:计算公式如下, -$$output(i,x,y)=input(i,x,y)/(k+\alpha\sum_{j=max(0,i-\frac{n}{2})}^{min(C,i+\frac{n}{2})}{input(j,x,y)^2})^\beta$$ - diff --git a/caffe2fluid/doc/Log.md b/caffe2fluid/doc/Log.md deleted file mode 100644 index b94983b..0000000 --- a/caffe2fluid/doc/Log.md +++ /dev/null @@ -1,40 +0,0 @@ -## Log - - -### [Log](http://caffe.berkeleyvision.org/tutorial/layers/log.html) -``` -layer { - name: "log" - type: "Log" - bottom: "data" - top: "log" - log_param { - base: -1 - scale: 1 - shift: 0 - } -} -``` - - -### [paddle.fluid.layers.log](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-95-log) -```python -paddle.fluid.layers.log( - x, - name=None -) -``` - -### 功能差异 -#### 计算机制 - -Caffe:计算公式如下, -$$ -y=\begin{cases} -ln(shift+scale \times x),\quad base\leq 0 \\\\ -log_{base}(shift+scale \times x),\quad base>0 -\end{cases} -$$ - -PaddlePaddle:计算公式如下, -$$y=ln(x)$$ diff --git a/caffe2fluid/doc/Pooling.md b/caffe2fluid/doc/Pooling.md deleted file mode 100644 index 82c19dc..0000000 --- a/caffe2fluid/doc/Pooling.md +++ /dev/null @@ -1,89 +0,0 @@ -## Pooling - -### [Pooling](http://caffe.berkeleyvision.org/tutorial/layers/pooling.html) -``` -layer{ - name: "pool" - type: "Pooling" - bottom: "data" - top: "pool" - pooling_param { - pool: MAX - kernel_size: 3 # 必填项 - stride: 1 - pad: 0 - } -} -``` -### [paddle.fluid.layers.pool2d](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-119-pool2d) -```python -paddle.fluid.layers.pool2d( - input, - pool_size, - pool_type='max', - pool_stride=1, - pool_padding=0, - global_pooling=False, - use_cudnn=True, - ceil_mode=False, - name=None, - exclusive=True -) -``` - -### 功能差异 -#### 输出大小 -Caffe:输出大小计算方式如下所示, -``` -H_out = (H_in-ksize[0]+2*padding[0])/strides[0]+1 -W_out = (W_in-ksize[1]+2*padding[1])/strides[1]+1 -``` - -PaddlePaddle:`ceil_mode`为`Ture`时,输出大小计算方式与Caffe一致;当`ceil_mode`为`False`时,输出大小计算方式如下所示, -``` -# ceil_model为False时,计算公式 -H_out = (H_in-ksize[0]+2*padding[0]+strides[0]-1)/strides[0]+1 -W_out = (W_in-ksize[1]+2*padding[1]+strides[1]-1)/strides[1]+1 -``` - -#### 池化方式 -Caffe:通过`pool`参数设置,支持`MAX`, `AVE`和`STOCHASTIC`三种池化方式; -PaddlePaddle:通过`pool_type`参数设置,支持`max`和`avg`两种池化方式。 - -#### 其他 -Caffe:无`exclusive`参数; -PaddlePaddle:`exclusive`参数为`True`的情况下,`avg`平均池化过程中会忽略填充值。 - - -### 代码示例 - -``` -# Caffe示例: -# 输入shape:(1,3,228,228) -# 输出shape:(1,3,114,114) -layer{ - name: "pool" - type: "Pooling" - bottom: "data" - top: "pool" - pooling_param { - pool: MAX - kernel_size: 3 - stride: 2 - } -} -``` -``` python -# PaddlePaddle示例: -# 输入shape:(1,3,228,228) -# 输出shape:(1,3,113,113) -pool1 = paddle.fluid.layers.pool2d(input = inputs , pool_size = 3, - pool_type = 'max', pool_stride = 2, - ceil_mode=False) -``` - - - - - - diff --git a/caffe2fluid/doc/Power.md b/caffe2fluid/doc/Power.md deleted file mode 100644 index cde0cf0..0000000 --- a/caffe2fluid/doc/Power.md +++ /dev/null @@ -1,34 +0,0 @@ -## Power - - -### [Power](http://caffe.berkeleyvision.org/tutorial/layers/power.html) -``` -layer { - name: "power" - type: "Power" - bottom: "data" - top: "power" - power_param { - power: 1 - scale: 1 - shift: 0 - } -} -``` - - -### [paddle.fluid.layers.pow](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-121-pow) -```python -paddle.fluid.layers.pow( - x, - factor=1.0, - name=None -) -``` - -### 功能差异 -#### 计算机制 -Caffe:计算公式如下所示, -$$y=(shift+scale \times x)^2$$ -PaddlePaddle:计算公式如下所示, -$$y=x^{factor}$$ diff --git a/caffe2fluid/doc/ReadMe.md b/caffe2fluid/doc/ReadMe.md deleted file mode 100644 index 58c22da..0000000 --- a/caffe2fluid/doc/ReadMe.md +++ /dev/null @@ -1,39 +0,0 @@ -# Caffe-Fluid常用层对应表 - -本文档梳理了Caffe常用Layer与PaddlePaddle API对应关系和差异分析。根据文档对应关系,有Caffe使用经验的用户,可根据对应关系,快速熟悉PaddlePaddle的接口使用 。 - - -| 序号 | Caffe层 | PaddlePaddle接口 | 备注 | -| ---- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| 1 | [AbsVal](http://caffe.berkeleyvision.org/tutorial/layers/absval.html) | [fluid.layers.abs](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-189-abs) | 功能一致 | -| 2 | [Accuracy](http://caffe.berkeleyvision.org/tutorial/layers/accuracy.html) | [fluid.layers.accuracy](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-269-accuracy) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/caffe2fluid/doc/Accuracy.md) | -| 3 | [ArgMax](http://caffe.berkeleyvision.org/tutorial/layers/argmax.html) | [fluid.layers.argmax](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-214-argmax) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/caffe2fluid/doc/ArgMax.md) | -| 4 | [BatchNorm](http://caffe.berkeleyvision.org/tutorial/layers/batchnorm.html) | [fluid.layers.batch_norm](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-36-batch_norm) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/caffe2fluid/doc/BatchNorm.md) | -| 5 | [BNLL](http://caffe.berkeleyvision.org/tutorial/layers/bnll.html) | [fluid.layers.softplus](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-204-softplus) | 功能一致 | -| 6 | [Concat](http://caffe.berkeleyvision.org/tutorial/layers/concat.html) | [fluid.layers.concat](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-219-concat) | 功能一致 | -| 7 | [Convolution](http://caffe.berkeleyvision.org/tutorial/layers/convolution.html) | [fluid.layers.conv2d](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-45-conv2d) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/caffe2fluid/doc/Convolution.md) | -| 8 | [Crop](http://caffe.berkeleyvision.org/tutorial/layers/crop.html) | [fluid.layers.crop](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-51-crop) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/caffe2fluid/doc/Crop.md) | -| 9 | [Deconvolution](http://caffe.berkeleyvision.org/tutorial/layers/deconvolution.html) | [fluid.layers.conv2d_transpose](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-46-conv2d_transpose) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/caffe2fluid/doc/Deconvolution.md) | -| 10 | [Dropout](http://caffe.berkeleyvision.org/tutorial/layers/dropout.html) | [fluid.layers.dropout](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-56-dropout) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/caffe2fluid/doc/Dropout.md) | -| 11 | [Eltwise](http://caffe.berkeleyvision.org/tutorial/layers/eltwise.html) | 无相应接口 | [Paddle实现方法](https://github.com/PaddlePaddle/X2Paddle/blob/master/caffe2fluid/doc/Eltwise.md) | -| 12 | [ELU](http://caffe.berkeleyvision.org/tutorial/layers/elu.html) | [fluid.layers.elu](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-68-elu) | 功能一致 | -| 13 | [EuclideanLoss](http://caffe.berkeleyvision.org/tutorial/layers/euclideanloss.html) | [fluid.layers.square_error_cost](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-173-square_error_cost) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/caffe2fluid/doc/EuclideanLoss.md) | -| 14 | [Exp](http://caffe.berkeleyvision.org/tutorial/layers/exp.html) | [fluid.layers.exp](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-196-exp) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/caffe2fluid/doc/Exp.md) | -| 15 | [Flatten](http://caffe.berkeleyvision.org/tutorial/layers/flatten.html) | [fluid.layers.reshape](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-134-reshape) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/caffe2fluid/doc/Flatten.md) | -| 16 | [InnerProduct](http://caffe.berkeleyvision.org/tutorial/layers/innerproduct.html) | [fluid.layers.fc](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-71-fc) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/caffe2fluid/doc/InnerProduct.md) | -| 17 | [Input](http://caffe.berkeleyvision.org/tutorial/layers/input.html) | [fluid.layers.data](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-20-data) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/caffe2fluid/doc/Input.md) | -| 18 | [Log](http://caffe.berkeleyvision.org/tutorial/layers/log.html) | [fluid.layers.log](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-95-log) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/caffe2fluid/doc/Log.md) | -| 19 | [LRN](http://caffe.berkeleyvision.org/tutorial/layers/lrn.html) | [fluid.layers.lrn](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-101-lrn) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/caffe2fluid/doc/LRN.md) | -| 20 | [Pooling](http://caffe.berkeleyvision.org/tutorial/layers/pooling.html) | [fluid.layers.pool2d](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-119-pool2d) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/caffe2fluid/doc/Pooling.md) | -| 21 | [Power](http://caffe.berkeleyvision.org/tutorial/layers/power.html) | [fluid.layers.pow](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-121-pow) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/caffe2fluid/doc/Power.md) | -| 22 | [PReLU](http://caffe.berkeleyvision.org/tutorial/layers/prelu.html) | [fluid.layers.prelu](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-122-prelu) | 功能一致 | -| 23 | [Reduction](http://caffe.berkeleyvision.org/tutorial/layers/reduction.html) | 无相应接口 | [Paddle实现方法](https://github.com/PaddlePaddle/X2Paddle/blob/master/caffe2fluid/doc/Reduction.md) | -| 24 | [ReLU](http://caffe.berkeleyvision.org/tutorial/layers/relu.html) | [fluid.layers.leaky_relu](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-132-relu) | 功能一致 | -| 25 | [Reshape](http://caffe.berkeleyvision.org/tutorial/layers/reshape.html) | [fluid.layers.reshape](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-134-reshape) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/caffe2fluid/doc/Reshape.md) | -| 26 | [SigmoidCrossEntropyLoss](http://caffe.berkeleyvision.org/tutorial/layers/sigmoidcrossentropyloss.html) | [fluid.layers.sigmoid_cross_entropy_with_logits](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-163-sigmoid_cross_entropy_with_logits) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/caffe2fluid/doc/SigmoidCrossEntropyLoss.md) | -| 27 | [Sigmoid](http://caffe.berkeleyvision.org/tutorial/layers/sigmoid.html) | [fluid.layers.sigmoid](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-202-sigmoid) | 功能一致 | -| 28 | [Slice](http://caffe.berkeleyvision.org/tutorial/layers/slice.html) | [fluid.layers.slice](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-165-slice) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/caffe2fluid/doc/Slice.md) | -| 29 | [SoftmaxWithLoss](http://caffe.berkeleyvision.org/tutorial/layers/softmaxwithloss.html) | [fluid.layers.softmax_with_cross_entropy](http://paddlepaddle.org/documentation/docs/zh/1.3/api_cn/layers_cn.html#permalink-164-softmax_with_cross_entropy) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/caffe2fluid/doc/SofmaxWithLoss.md) | -| 30 | [Softmax](http://caffe.berkeleyvision.org/tutorial/layers/softmax.html) | [fluid.layers.softmax](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-169-softmax_with_cross_entropy) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/caffe2fluid/doc/Sofmax.md) | -| 31 | [TanH](http://caffe.berkeleyvision.org/tutorial/layers/tanh.html) | [fluid.layers.tanh](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-209-tanh) | 功能一致 | -| 32 | [Tile](http://caffe.berkeleyvision.org/tutorial/layers/tile.html) | [fluid.layers.expand](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-70-expand) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/caffe2fluid/doc/Tile.md) | diff --git a/caffe2fluid/doc/Reduction.md b/caffe2fluid/doc/Reduction.md deleted file mode 100644 index 14a478c..0000000 --- a/caffe2fluid/doc/Reduction.md +++ /dev/null @@ -1,72 +0,0 @@ -## Reduction - - -### [Reduction](http://caffe.berkeleyvision.org/tutorial/layers/reshape.html) -``` -layer { - name: "reduce" - type: "Reduction" - bottom: "reduce" - top: “reduce" - reduction_param { - operation: SUM - axis: 1 - coeff: 2 - } -} -``` - - -### [paddle.fluid.layers.reduce_sum](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-131-reduce_sum) -### [paddle.fluid.layers.reduce_mean](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-128-reduce_mean) -```python -paddle.fluid.layers.reduce_sum( - input, - dim=None, - keep_dim=False, - name=None -) -``` -```python -paddle.fluid.layers.reduce_mean( - input, - dim=None, - keep_dim=False, - name=None -) -``` - -### 功能差异 -#### 操作类型 -Caffe:通过`operation`参数支持`SUM`、`ASUM`、`SUMSQ`、`MEAN`四种操作; -PaddlePaddle:`reduce_sum`和`reduce_mean`分别对应Caffe的`SUM`和`MEAN`操作,另外两种无对应。 - -#### 计算方式 -Caffe:`axis`为`int`型参数,该维及其后维度,均会被降维,且不保留对应部分的维度,如shape为`(30, 3, 6, 8)`, `axis`为2的情况下,得到的输出shape为`(30, 3)`; -PaddlePaddle:`dim`参数为`list`型参数,其指定的维度才会被降维,且当`keep_dim`为`True`时,降维的维度仍会以`1`的形式保留下来,如shape为`(30, 3, 6, 8)`, `dim`为`[2, 3]`,`keep_dim`为`True`的情况下,得到的输出shape为`(30, 3, 1, 1)`。 - -### 代码示例 -``` -# Caffe示例: -# 输入shape:(30,3,6,8) -layer { - name: "reduce" - type: "Reduction" - bottom: "reduce" - top: “reduce" - reduction_param { - operation: SUM - axis: 2 - coeff: 2 - } -} -# 输出shape:(30,3,) -``` -```python -# PaddlePaddle示例: -# 输入shape:(30,3,6,8) -output1 = fluid.layers.reduce_mean(input = inputs, dim=[1]) -# 输出shape:(30,6,8) -output2 = fluid.layers.reduce_mean(input = inputs, dim=[1], keep_dim=True, name=None) -# 输出shape:(30,1,6,8) -``` diff --git a/caffe2fluid/doc/Reshape.md b/caffe2fluid/doc/Reshape.md deleted file mode 100644 index ded74be..0000000 --- a/caffe2fluid/doc/Reshape.md +++ /dev/null @@ -1,93 +0,0 @@ -## Reshape - - -### [Reshape](http://caffe.berkeleyvision.org/tutorial/layers/reshape.html) -``` -layer { - name: "reshape" - type: "Reshape" - bottom: "data" - top: "reshape" - reshape_param { - shape{ - dim: 1 - ... - } - axis: 0 - num_axes: -1 - } -} -``` - - -### [paddle.fluid.layers.reshape](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-134-reshape) -```python -paddle.fluid.layers.reshape( - x, - shape, - actual_shape=None, - act=None, - inplace=False, - name=None -) -``` - -### 功能差异 -#### reshape机制的差异 -Caffe:使用0和-1分别代表复制的维度数和推断的维度数,但使用了`axis`和`num_axes`定义了其他的使用方法。当单独使用`axis`时,表示输出数据的前`axis`个维度由原始输入数据的前`axis`个维度复制而来,而`shape`里的维度信息则添加在这几个维度之后;当同时使用`axis`和`num_axes`两个参数时,表示`shape`中的第`1`个维度至第`1+num_axes`维度定义为输出中的第`axis+1`和`axis+num_axes+1`个维度,其余维度的维度数由原始输入数据的维度数代替,直至输出数据和输入数据摊平成一维时大小相同。 -PaddlePaddle:使用0和1分别代表复制的维度数和推断的维度数。 - - -#### 输出的差异 -Caffe:Reshape层在不改变数据的情况下改变输入blob的维度,处理过程只在输入blob上进行,没有进行数据的拷贝。 -PaddlePaddle:可以通过设置`inplace`表示是否对数据进行拷贝。 -#### 其他差异 -Caffe:激活函数需要由另外一层完成。 -PaddlePaddle:可以通过设置`act`对reshpe后的tensor变量执行非线性激活。 - - - -### 代码示例 -``` -# Caffe示例: -# 输入shape:(2,4,6) -layer { - name: "reshape" - type: "Reshape" - bottom: "data" - top: "reshape" - reshape_param { - shape { - dim: 3 - dim: 2 - } - axis: 2 - num_axes: 1 - } -} -# 输出shape:(2,4,3,2) -layer { - name: "reshape" - type: "Reshape" - bottom: "data" - top: "reshape" - reshape_param { - shape { - dim: 3 - dim: 2 - dim: 4 - } - axis: 1 - } -} -# 输出shape:(2,3,2,4) - -``` -```python -# PaddlePaddle示例: -# 输入shape:(2,4,6) -output1 = paddle.fluid.layers.reshape(x = inputs , shape = [2,4,-1,3]) -# 输出shape:(2,4,2,3) -output2 = paddle.fluid.layers.reshape(x = inputs , axis = [0,2,2,6]) -# 输出shape:(2,2,2,6) -``` diff --git a/caffe2fluid/doc/SigmoidCrossEntropyLoss.md b/caffe2fluid/doc/SigmoidCrossEntropyLoss.md deleted file mode 100644 index a30393a..0000000 --- a/caffe2fluid/doc/SigmoidCrossEntropyLoss.md +++ /dev/null @@ -1,37 +0,0 @@ -## SigmoidCrossEntropyLoss - - -### [SigmoidCrossEntropyLoss](http://caffe.berkeleyvision.org/tutorial/layers/sigmoidcrossentropyloss.html) -``` -layer { - name: "loss" - type: "SigmoidCrossEntropyLoss" - bottom: "x" - bottom: "label" - top: "loss" -} -``` - - -### [paddle.fluid.layers.sigmoid_cross_entropy_with_logits](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-163-sigmoid_cross_entropy_with_logits) -```python -paddle.fluid.layers.sigmoid_cross_entropy_with_logits( - x, - label, - ignore_index=-100, - name=None, - normalize=False -) -``` - -### 功能差异 -#### 输入数据 -Caffe:输入数据(`x`)的维度最大是4维(`N*C*H*W`); -PaddlePaddle:输入数据(`x`和`label`)的维度只能是2维(`N*K`)。 -#### 输出结果 -Caffe:输出的数据大小是`1*1*1*1`,即将所有位置上的loss取均值; -PaddlePaddle:输出和输入大小一致,即`N*H`。 -#### 其他差异 -Caffe:无`ignore_index`和`normalize`参数; -PaddlePaddle:可以通过设定`ignore_index`来确定忽略的目标值,同时它有一个`normalize`参数进行归一化。 - diff --git a/caffe2fluid/doc/Slice.md b/caffe2fluid/doc/Slice.md deleted file mode 100644 index e0625a2..0000000 --- a/caffe2fluid/doc/Slice.md +++ /dev/null @@ -1,67 +0,0 @@ -## Slice - - -### [Slice](http://caffe.berkeleyvision.org/tutorial/layers/slice.html) -``` -layer { - name: "slice" - type: "Slice" - bottom: "data" - top: "out1" - top: "out2" - top: "out3" - slice_param { - axis: 1 - alice_point: 1 - alice_point: 2 - } -} -``` - - -### [paddle.fluid.layers.slice](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-165-slice) -```python -paddle.fluid.layers.slice( - input, - axes, - starts, - ends -) -``` - -### 功能差异 -#### 输入参数 -Caffe:输入的`axis`和`alice_point`等参数都是数值。 -PaddlePaddle:输入的`axes`、`starts`和`ends`等输入参数都是list类型。 -#### slice机制 -Caffe:只能在一个维度上截取,但可以截取多个切片。 -PaddlePaddle:可以在多个维度上截取,但只能截取到一个切片。 -#### 其他差异 -PaddlePaddle:如果传递给`starts`或`end`的值大于n(此维度中的元素数目),则表示n。 -### 代码示例 -``` -# Caffe示例: -# 输入shape:(2,6) -layer { - name: "slice" - type: "Slice" - bottom: "data" - top: "out1" - top: "out2" - top: "out3" - slice_param { - axis: 1 # 使用-1效果相同 - slice_point: 1 - slice_point: 2 - } -} -# 输出3个数组,第一个shape:(2,1),第二个shape:(2,1),第三个shape:(2,4) -``` -```python -# PaddlePaddle示例: -# 输入shape:(2,6) -output1 = paddle.fluid.layers.slice(input=inputs, axes=[1], starts=[1], ends=[3]) -# 输出shape:(2,2) -output2 = paddle.fluid.layers.slice(input=inputs, axes=[0,1], starts=[0,1], ends=[1,3]) -# 输出shape:(1,2) -``` diff --git a/caffe2fluid/doc/Sofmax.md b/caffe2fluid/doc/Sofmax.md deleted file mode 100644 index 3b06e6d..0000000 --- a/caffe2fluid/doc/Sofmax.md +++ /dev/null @@ -1,30 +0,0 @@ -## Sofmax - - -### [Softmax](http://caffe.berkeleyvision.org/tutorial/layers/softmax.html) -``` -layer { - name: "softmax" - type: "Softmax" - bottom: "fc" - top: "softmax" -} -``` - - -### [paddle.fluid.layers.softmax](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-168-softmax) -```python -paddle.fluid.layers.softmax( - input, - use_cudnn=False, - name=None, - axis=-1 -) -``` - -### 功能差异 -#### 计算机制 -Caffe:计算softmax之前,对每个样本中的每个值减去该样本中的最大值; -PaddlePaddle:省略了这一操作直接计算softmax。 -#### 使用机制 -PaddlePaddle:通过设置`axis`来确定执行softmax的维度索引。 diff --git a/caffe2fluid/doc/SofmaxWithLoss.md b/caffe2fluid/doc/SofmaxWithLoss.md deleted file mode 100644 index 582b058..0000000 --- a/caffe2fluid/doc/SofmaxWithLoss.md +++ /dev/null @@ -1,89 +0,0 @@ -## SofmaxWithLoss - - -### [SofmaxWithLoss](http://caffe.berkeleyvision.org/tutorial/layers/softmaxwithloss.html) -``` -layer { - name: "loss" - type: "SoftmaxWithLoss" - bottom: "logits" - bottom: "label" - top: "loss" - softmax_param { - axis: 1 - } - loss_param { - ignore_label: -1 - normalize: 0 - normalization: FULL - } -} -``` - - -### [paddle.fluid.layers.softmax_with_cross_entropy](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-169-softmax_with_cross_entropy) -```python -paddle.fluid.layers.softmax_with_cross_entropy( - logits, - label, - soft_label=False, - ignore_index=-100, - numeric_stable_mode=True, - return_softmax=False -) -``` - -### 功能差异 -#### 输入数据 -Caffe:输入数据(`x`)的维度最大是4维(`N*C*H*W`); -PaddlePaddle:输入数据(`x`和`label`)的维度只能是2维(`N*K`)。 -#### 输入格式 -Caffe: 采用硬标签方式输入,同时进行预处理操作(为了避免上溢出和下溢出,对输入的每个值减去batch中该位置上的最大值); -PaddlePaddle:通过参数`soft_label`的设定,支持硬标签和软标签两种输入。 -> 计算softmax的loss时,根据每个样本是否被分配至多个类别中可以分为两类——硬标签和软标签 -> **硬标签:** 即one-hot label,每个样本仅分到一个类别中。在硬标签中,根据是否对未初始化的log概率进行预处理,又可以分为两类,预处理主要是完成对每个样本中的每个log概率减去该样本中的最大的log概率 -> **软标签:** 每个样本至少被分配到一个类别中 - -#### 输出结果 -Caffe:输出是对所有样本的loss进行归一化后的结果,归一化的方式由`normalization`和`normalize`参数决定; -``` -归一化形式: -1. 当`normalization`是FULL或0时,整个loss取和后除以batch的大小. -2. 当`normalization`是VALID或1时,整个loss取和后除以除`ignore_label`以外的样本数。 -3. 当`normalization`是NONE时,则loss取和. -4. 当`normalization`未设置时,采用`normalize`的值进行判断,若`normalize==1`则归一化方式是VALID,若`normalize==0`则归一化方式是FULL。 -``` -PaddlePaddle:输出是每个样本的loss所组成的一个向量,同时如果将参数`return_softmax`设为True,则输出的是loss向量和softmax值组成的一个元组。 - -### 代码示例 -``` -# Caffe示例: -# logits输入shape:(100,10) -# label输入shape:(100,1) -# 输出shape:() -layer { - name: "loss" - type: "SoftmaxWithLoss" - bottom: "logits" - bottom: "label" - top: "loss" - loss_param { - ignore_label: -1 - normalize: 0 - normalization: FULL - - } -} -``` - - -```python -# PaddlePaddle示例: -# logits输入shape:(100,10) -# label输入shape:(100,1) -# 输出shape:(10,1) -softmaxwithloss = fluid.layers.softmax_with_cross_entropy(logits=logs, label=labels, - soft_label=False, ignore_index=-100, - numeric_stable_mode=True, - return_softmax=False) -``` diff --git a/caffe2fluid/doc/Tile.md b/caffe2fluid/doc/Tile.md deleted file mode 100644 index d4d6d1b..0000000 --- a/caffe2fluid/doc/Tile.md +++ /dev/null @@ -1,31 +0,0 @@ -## Tile - - -### [Tile](http://caffe.berkeleyvision.org/tutorial/layers/tile.html) -``` -layer { - name: "tile" - type: "Tile" - bottom: "data" - top: "concat" - tile_param { - axis: 1 - tiles: 2 - } -} -``` - - -### [paddle.fluid.layers.concat](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#permalink-219-concat) -```python -paddle.fluid.layers.concat( - x, - expand_times, - name=None -) -``` - -### 功能差异 -#### 输入参数 -Caffe:只能在一个维度上进行复制。 -PaddlePaddle:`expand_times`为一个list或tuple,它存放的是每个维度复制的倍数。 diff --git a/caffe2fluid/examples/imagenet/README.md b/caffe2fluid/examples/imagenet/README.md deleted file mode 100644 index 28dbe90..0000000 --- a/caffe2fluid/examples/imagenet/README.md +++ /dev/null @@ -1,41 +0,0 @@ -A demo to show converting caffe models trained on 'imagenet' using caffe2fluid - ---- - -# How to use - -1. Prepare python environment - -2. Download caffe model to "../../models/xxx" which contains "xxx.caffemodel" and "xxx.prototxt" - -3. Convert the Caffe model to Fluid model - - generate fluid code and weight file - ```python convert.py alexnet.prototxt \ - --caffemodel alexnet.caffemodel \ - --data-output-path alexnet.npy \ - --code-output-path alexnet.py - ``` - - - save weights as fluid model file - ``` - python alexnet.py alexnet.npy ./fluid - ``` - -4. Do inference - ``` - python infer.py infer ./fluid data/65.jpeg - ``` - -5. convert model and do inference together - ``` - bash ./tools/run.sh alexnet ../../models ../../models - ``` - * Assume the Caffe model is stored in '../../models/alexnet.prototxt|caffemodel*' - * converted model will be stored as '../../models/alexnet.py|npy*' - -6. test the difference with caffe's results(need pycaffe installed) - ``` - bash ./tools/diff.sh alexnet ../../models/ ../../models - ``` - * Make sure your caffemodel stored in '../../models/alexnet.prototxt|caffemodel*' - * The results will be stored in '*./results/alexnet.paddle|caffe*' diff --git a/caffe2fluid/examples/imagenet/compare.py b/caffe2fluid/examples/imagenet/compare.py deleted file mode 100644 index 6d708e8..0000000 --- a/caffe2fluid/examples/imagenet/compare.py +++ /dev/null @@ -1,103 +0,0 @@ -#!/usr/bin/python - -# -#a tool to compare tensors in two files or two directories -# - -import sys -import os -import functools - - -def walk_dir(rootdir): - for subdir, dirs, files in os.walk(rootdir): - for file in files: - yield file - - -def calc_diff(f1, f2): - import numpy as np - - d1 = np.load(f1) - d2 = np.load(f2) - - #print d1.shape - #print d2.shape - #print d1[0, 0, 0:10, 0:10] - #print d2[0, 0, 0:10, 0:10] - - d1 = d1.flatten() - d2 = d2.flatten() - - d1_num = functools.reduce(lambda x, y: x * y, d1.shape) - d2_num = functools.reduce(lambda x, y: x * y, d2.shape) - if d1_num != d2_num: - print(d1.shape) - print(d2.shape) - assert (d1_num == d2_num), "their shape is not consistent" - - try: - mask = np.abs(d1) >= np.abs(d2) - mask = mask.astype('int32') - - df = np.abs(d1 - d2) - df = df / (1.0e-10 + np.abs(d1) * mask + np.abs(d2) * (1 - mask)) - max_df = np.max(df) - sq_df = np.mean(df * df) - return max_df, sq_df - except Exception as e: - return 1.0, 1.0 - - -def compare(path1, path2, no_exception): - def diff(f1, f2): - max_df, sq_df = calc_diff(f1, f2) - print('[max_df:%.4e, sq_df:%.4e] when compare %s <=> %s' % - (max_df, sq_df, os.path.basename(f1), os.path.basename(f2))) - if no_exception is False: - assert (max_df < 1e-5), \ - 'max_df is too large with value[%.6e]' % (max_df) - assert (sq_df < 1e-10), \ - 'sq_df is too large with value[%.6e]' % (sq_df) - - if os.path.exists(path1) is False: - print('not found %s' % (path1)) - return 1 - elif os.path.exists(path2) is False: - print('not found %s' % (path2)) - return 1 - - if path1.find('.npy') > 0 and path2.find('.npy') > 0: - diff(path1, path2) - return - - for f in walk_dir(path2): - if f.find('.npy') < 0: - continue - - f1 = os.path.join(path1, f) - f2 = os.path.join(path2, f) - diff(f1, f2) - - print('all checking succeed to pass') - return 0 - - -if __name__ == "__main__": - if len(sys.argv) == 1: - path1 = 'lenet.tf/results' - path2 = 'lenet.paddle/results' - elif len(sys.argv) >= 3: - path1 = sys.argv[1] - path2 = sys.argv[2] - if len(sys.argv) == 4: - no_exception = True - else: - no_exception = False - else: - print('usage:') - print(' %s [path1] [path2]' % (sys.argv[0])) - exit(1) - - #print('compare inner result in %s %s' % (path1, path2)) - exit(compare(path1, path2, no_exception)) diff --git a/caffe2fluid/examples/imagenet/data/65.jpeg b/caffe2fluid/examples/imagenet/data/65.jpeg deleted file mode 100644 index fd3a93f59385d6ff632483646e6caee300b56d09..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 109527 zcmb4qcT`hP@Na-XDAJ2TLQMb#M0$tNoAf3qNC`y|=?Fw1G!Zc*gx;h`2SF)H69okl zKtY-)MbHF85u{2FzkGl1ocI2IbMMJHIXgRd@7>v%ozKjke>-0SaF`pL83Sl&XaFA6 z3vj*xfY{s#3J(gq6BI0>C@Tk0(>JpO0s;R^=W_sq|9Jl&(bCe<{zpJMI$An0#AB=A$;b;0(NDlB+Lbr=Z87n|yv7Yew=X9C1dAKmD=AM< z#R$*<{`VpO_bGt?!~m$TO>I=6j^{rD(9_b>0jZ;*HUc>5M7iXFdi304ww^o+`XLFp zat3HKuU+5P__m_gPrmb6zy&HNEhil(01h}Kmbg| zz;u;=zxLh@`FAp04$Yv%Fyaa7WA)&laF3U58J`CiEkm}qQ$*0;alZ;% zShDdaa2OumzA+>Hy>2q%a#=Zz`=~~wyUq7{F3asqT_GZVbq9Idi-Uf=@WMqz{6{Ss zGUafUvp$$%%?EKq^c4-Ap!|?PdXeH1l&y)7oIpCgu$Ro;sos?bOlCnp#Kd^V?720C zXV98z?j~X2?Piysgn6yG+!SAI%Ls*A|64uE7v>H0WRe=|*&R!uzN+Bk%=f{?;v=ows-XoTa9UYYOVoePNlbP z_pFur_Sta}`MF7GxpeR&4^BaYI(3mKDE4SSCR+DaTBxv{hk0c*vjG1`)zuZqhcZGn zxwrCDe?ZotN^t_Ebamo!GfyM`l?(W}kL+~V!#zDAGPmz<+r;OOWrTa-bJe~gA6{=7 zIwa(HL<--*UpE|}*n54t*ytQnIjMe)5z zg9kq9Tj?Oq!=ALo^FaWQ-eNaOu!{UnOMlAWHe1DDv18Wm78ZAvKa_b2n3IA-Nc)$4 z(`@8K-smj}6(wLw-vkbqvFRO>n>>vzgx@Bbddl$PZsGcb*ewaim(ZsB3LMb*hO8`% zRyc1x(-Yp&fv`+?khXNdD2_v1+yyK#!Q$6s>syhs=i5)q#94otl(0J;&HJU80O(Me zNZxP{I(;t)RW!@;sx(%by`=_9INAQv`Y`#5Sbx%l7T~%pbq!5EmoMy`P;?FO@p3NJ z@@r~#RBPlfYv3ckzNljy8Zu$ESXW>DB{W|odQfq!_cWq+Q_#_Jt7v#fk4|gpXuaUi z6y2^U=esdG6>oJB@BJ^Fp$aXueJpYa;=y{^k^~)r$e9O{OkWfX-=w|s%g6IfzY@bE zRNQ);>VQnt9FDb6%sNJ$9j-p*oM%Nxat|r3kz`DXyGJWALprpU`Y&LEP22U53-mi- zrP_+_wM#C!jGlXP6#^z&TWlynb=Au^HY6oP-07mUmQ>`B@uZ%Ste5pZHIGy}MdTq% z;Rd)mj)=}Y`8x3Z&Up?R{J+%>=5%EjXzDw%=}ba*Fr7BwXSk=0*;N1623`BjOX9Kf zS3J)Qii0(rb{vH36@E$Y>(6?2ieCos(?Q}s)^}feooZQx>v?)1;a6HSu7%AxLwyN5 zwC29)v^=OR*mp2;^^t68iYL_t09 zSnoODy`ZfPcSnulm2$E?dQA}M@Q9P~>7E%pua^NyUG84j!&xclJ#t;>#LW+s5s z!=u&jMy9X#B#iq~X2)xxLwMnK)AqHy1K9aIskv|Tc6#vkQw<-~+ahm~eMN3{uta#<*J1VD0r0jla zq1RtXwfSh$-b;qJUsuoOR+-gxWdDHP>R2y#tkc-M15;4b)rr~Oo|MX(a^&%k=`JAH z6zM`vaVO!Q%J@~3D^$M^FLA}k#FSK9|9hqg$@#6I#r;vWqk=~>#UzJFiumTNg+ODf zB_7G>4@ohJi8Nf<`gjuYi6+@w?5_DEb50XhsaI1?Z&QEimYZoUvFU_O1~V&3N&EO} zCQAc*Wlc#Xa6w7I`FkRPzCL!<0iAt=prO@24put96Sg8%nD3LYTl)&rYaz5vCa?m} z4i1zgdHaisrB&hMaJ$O*_gPrF1_fLz!?7ZR(f7A>9a}U<_%XZRk^=i8d>$rybeA*G zIiymxF$eyYcm$$*eu1QKs_XcGxJ^l_jD)S2r@Z^UvM%ziUUyIng9=1vyFS zYX0dU$w6VwNfBY8-|gFH!;(r_ChLUnGzE1wRPfW*=MqjS8oyt8(jFJ)>jrqgW6;G5 z!u>MLFHY}Zv_GTm=PZ!l6#B)oGq z2e2j(6lU;I*uygepaYkz9Ev!4#Hgvre-wpti2D(>i??!hH2y7>ob+H>s$;h}i%2(h*>wW(cjkv4J;($n9j;18@|O%)@ajTi=%Eer$9^HxpJ6t`)=9f3 z5z4oUjZ6#WoO2fwW8yTdG&`2loeR}w9jCbB;P0&(?Lt*pFC|+Jg!Br9O>>&guU4>* zzHE;(NuoS68)6Xkx@-x7iRa2CA~Ojs#8Y@*e@E}XW!K6Sv&b1-@|410oi{Tl+P6KO&}dPDmS zKEH&?N!6*!xfh1AbgqqL8H`dN3>mle3l0H=xd2Vm(WVRvw1=zCMai<8K91T(vk8Op z!%GQAYlK8aP6uDgQ*~HsPD3 zd(j26F;d8s-I?dk;THWIrFPmyq3W<;V+FvfL&&9UCqc_k_uk?%U8RBo;zFZw>vN`8AX5BNRR#l$|w*kDkFHq?Z_Z}n?R ze^~$1%y^Ye(eIA&^d@6gL7iMPof_T3zAC#zW2JL~e)fmhUEuiqL)AANnedL))aDQt zflkPbzmsF8*{>9UDg$myaW@lLK;Q}Z(0h^=7>NqJJN~%)<(=v8sf^yQ(gfguEb_l~ zS!*~W{FcGa?G;A$b3p9^Ze9Gr<^s#~(9eJ?L%Z=NB{}b)N#n)AaK}pqKiuki_};Zw z(ZiZqF4H_Y2Q;CU$KBza%Aqe4j?-`4oP6fcSRb0Av(Q4zt^KG>NKh$Gpq-7&21|0zzsA*r!lDA=^UmZ1leIwmDZEmd2= zJ`V4B_lMmZFxI5KCDhT1+;QbOK+JOOZ}2AV6(JMX!-Rzo^q1?3nRtqQtkeQgfAQv) zgZ9QGyWXa!@08CBGt}|A3F@hmw!U>ne_urWnhLzz?d`NLoiiUY%BBZ~Ysj`kFaYeHT(I2FFHy6-6p}FB z&Z0b>sr;fNl-oGg(0&(dbhF3oYF7cZkFwhCXwJlh_W&N_YiRO#?==HjjgC@P> z09Cho>k4=6^k)yOZBDjC!Ck1%bwY6nDISl>QEY!*&Toego}QGEo9{3kRm=s!=l-ps zXF8ZSt4l|GJRF35^+uy0r+fB2ABKDQ1-|6Ke1_>VE6I4`^TF3^AzqeDG<9BT368+x zB?u?p=*E1366~7Y#1(eBI!PU^u*tku`%b6+MDu?N27`ohYt1lRDJBp6l^E2aX^{%a z360Z!hayBseA26!su_`-zv1UvuG{`1?}qg9J;Fy}68mvP-nHy{r2GutD_r*O^;;7W zbmxGoZk}DpIlv+j12_lF_6ikfCEsa$gs(@FMgqNhGrjfqgI9Kiahi;KU^CZ#*G)1Fu*-^>++h%Wmr^iQuHkR8iG~2Kb zq9-S>qR#=c$RNpKpIa_5o`F?RyW__?PXw-F#y4Y)zy1bSI1pM89|H-!&_R^&``d_^ zTVmL67M}phE^m(>s#x0LnMzAJUKmpzqv(>E8UEzPJ0{zxuwJT2V z+We=5276?S=7Kc?1*CrB;Abx~bfWnW*HOcxPV?u0zk6u|ppERngFnKx8dFSHG5hWR zDk9&o=oBJ^;~h{h|L#~jD@GM#r+k_hTkBYTmAfm$ z!dc+OD|jqO2w7q++}GdtE#BmJ zVWn~hk%kep#UE2<9Qi^g44}C3vR`0?@LqQP@7L;x?RdLEWNzcjF2*lyS>7k9Fm&8P ztQo0v(vHG~uhk)}Px0 zc9Vu>rxh$fOxyGm4A^}{MSU?puAGC85frhRHdL#O9}nwzkwOH-q~ z|1wC=G1eFjROM67C`Kg=m!ZD#{HZXQ%CbArz;W4pOX1zZ=efbdiR$US=BOqjP5wpC zBasN~eyYF|g(RT7=!=1>#LKo|ri=c>2XV+2Day}_WThLj36(n4$H}$tA4;Vb9H{&j z#;U2Uwj8P*g%Uf~vvI9VQ%cC2#aC`lJrHlIgoqbF=S|SrQ21QmTJ$N%#$5>cK%#XL=qTTvHp7-1ky{ z=`0S#V|6OswaLLNnYVEWr*ps`{j#Gqx{rf{bD<*zNX(bFKCcE4VDQkO4ydG!cMo<* z^GX$ZjT2{&)Jon?S+-unq$PxVaXggsY3GKvJ@6{JD`mAEj{;&rbqt`ok&4bI8-&_c z6B7H@Zpn+=x%jXgJ!M-4-27}+L8$$TpECLEd^_!PF0GSLlMD6HrF`se` zK3^l&@`=|)Tn~|IZvbP}@g*7)q4%;P6fZoiN;IjD{iJXD!a&QrSjn;j61Q{?h|_Ql zY*Mdl*2KA2j-}Urx&zVl*z7}W!q=w@=vm^I;Za&Yo|QS>+$WtfxZ)_W*GB9D%ieS+ z{k2G)3b0YLUD`*u8d-%stYd$hun8fWU;x8zbP#AvqE z=}x;I6{r%-<|UY$BqB2jxg6h(oIh-U2f;hHKyXaG+@#y!Zzx}ys@Dz1N3Rw&-=j9B z-??NT zw~nkWz9Xa{9TC3Zh3<+?T`nmu!KZiJuK?F%#d=Atzm|-1JAj&fnR#Lj%$8m`8SjZ$t+{<~G@-6@R#RW6y03BUUe-tqY!$``MFKJ;tkHs%Y(e>TZOeo?(dUs=T>F?nRQm80bCu@S%eWG5sef& z;_EOwR9ykw&2le+`f(3K&H)!5D4RuKer^1q{5`V|@NnL=TI;UOaS%_e#ceS-zmZS0 zb{g0!<>s0|&{3_;rw4}^y{4Lm6O&cYW0v@D z`y!A9H%o$oMxr+zfaXYj3+ePc0KcTys^o@w;g#r;ap9->+|j63cvW8QFxji-v(haW zLa#6hL>OA>UV9$vBe8j4rIcUk0WCeif41sxq6 z^Xmg#7kYTScKB(tG9nRB;%V0};umxQKtVMVCjF|vY=@DA-CslQOMF+Y+f~0%nH$Y! z)(eSSP)J$}T?q_g3hjL%lN^!~$W9hspBhLKC+u{-2(@Tvu5hwA8dapY$*Xqie-)KM zI4)0@f?xpk){}Pjp9%J^M{|M4C^zUh2}Rvr5mM~$HTxcX~Js~ zo-eIeTlr*`ewt}f`^Rr;l3=EWJ0Og)2Si2hYB4JBXb)X~sb8e^j-~bkkE-vs>S|C> zBw3K{rx%}*Oy;L%nWhAj;;6GO-Acpp+hw<(@>gDicI>vF!YHWK$%Ov#HQRLScR!RY zUf76H0TF=5%VShreyk?c&QI}yWZ#gkZN3U%D~h_|$qv$&7T7f3u9JC@319@4gi&I3 z`r@xzs^HqPVHkBayyG=#y+=bl^{Sn}MB>!R|5o_Ek_dn)j9bgK^h!3kI^H0DAHwpz zFcA^KR*nf)-yIC2ip6MUt8!VCe-_5Q{irTPg0n@NuEM>ub< zZ)f>gQ`X5n2_1{|9@0}YPG2l%?HyzFS5ovI(^~MMkKBF)Q)2~EctMPE#1gW&^YD-P z$b+Va=PMCy?*{Kn>N|WeVoAB#G{km&cr%inuZ>QiNK4&b~vAgx|aDoJwnMw3IFzoC7*O4h<>~-n!?wN$^U% za?R0)ta;E%vM^_hy&5KgL zJGYuyU)zZ*Pn!le(>BGi{fgP!{KHtMVxy6wr*)#(#rR!7J>cZMNQ8aCXz~bwyLOi6 zB=xJEm|YnDXwUl_(`adrW}Ng_rD)|1US)8ZqS=Mq+-nvLHf+B?JbVh!p3gdgSWEW5 zx>Dvd$*me%{xWhV!pwa5-|eCr|E}bq$9H66CqKw%e5%*a2xS~JDW+)jodawFN+}BP zgX7QO5Fp%nYUv;X+k|}GDqHcx4PS2R9d7FX@_89m7r)&g3b93LM;`@~gMJJm0l#S1 zZLXTEmb_p1muI&*`fKK;x&7k0Szk<2@94#N?T)!|_s86CR3*ZsR>e#5CX6N>fj57Y zJI2dA&;$si*8nJskl>i8HR4B?&!nVpxy)w;ap*_&2gVJM#X4Zq=cL_CGeJi=r|f~F z)mxXI(OsYHu0jYkHQx4v-M=cprvn$Ft^ys6?^uC!ZrJAI@@rdDGop!NJ9y~QrchCU zaE3|gVxy-}n4S(-^;U`BH}1a-;u)DZ69aF)J~q-n#&i)1|B|>|CRG*7xEUrt{M> zcLK_l4kjDN;suT?0?mf#U&S$?PWOeUM75mkuwa6L;}e|kODR7o$Ag#{;t6$sdZT=S z(DU{AL4$U3kKk2vbbIUx|2o&}p;5Y84^v1KRQ+(x-S*P`VP4+*bkpUV;*;D*JcS%z zRhkaEf$x$ocoIyD9>i?$}SJ4J|Zso)vfWV*1%}_Fj07{G1SNljWM&4!bYG^gP47E(LHL&^ed2 z6C5_9;gZ}+=XKZJxK7VAt&;b7hPSXW$J62?JQi<|?q8AXb+wxQzBTNIX^P&^gWa(~ z4Uj;jMCjE!`gZ9pPa&!GfLRy!yP(}tIC+{^*;rKC$(SU+I*ymYzV62+PB09GsvX3g zmUJ&OGQkKl*Pmqc%ca)4=u)FN_AzoD`qsRRb%)YajV*^9UL!MX6H1506l5>{kVzd_ zjW;IcNvi3<4+XC;pH=Y=*jtvB!2B-Q*|72JE+NRmi%mxrwexpuih5?(1S`>6Nop4= zH_19kkhoC!5rA#U9XYOZlRoWgXHX#Hm@etW%3`b>sv`!3FIf&tBWjViI>ifc1j`P`XGH5f~jS@ZbXvA2&jw@XlhU>AIa!lOaGC#OWUqqDB%NQuki$uI20ltOB%--Y0X z_o>O8cjOP^H|TS2i+l%-f*kxnomRYGO6k@#^LcG!O0?MLn*161OEfwKyfYd3?G%$R zVHk{p_NZ^Gh-`gtOHiP-kOqAMI=?pHQ$0WL8{!=zvGng`Fc)%PMbtT~oqzQ2afCwe zz7##PqA@58Q1zG$Kfu)YIx;ac1R!)9Gw0c{B^s&rE5lnr|2uQBTF<(TwW74;5L3dk zj|drDR)%QBwwSQP^+bVML1mC!SIcpFhsP2EAXHDUUzz?yp!XB)WdIhB(O3~oShVBQ zcO20mW?Gxh{6pJkG%In+b1pkxOz&lfFaBFq5p!B06pD7bJds9|!b;*b$~AOJiP|z6 zyI2DMs$OUBjGbcUMdqf%?Jf4=@Y4&c&qWkPH_@hvq4d3|UW4WW^)gmCWwa7nJUnW70IxzK7cp@`_yY%LWQF6kMtNlfk= ziI6%QSa{QBiXNYAe&9g=#)vaARJ|7{G6v4@}-rT2;7XuJ05@DM-CLZ`#iU- zHd*r(>2i4B1n2kr*mP1fuh)S zCL+N;3G0tOguf>pt>%5&Nu4=0)RBcRYx)JmB95Ec0_Mk0^rxLXm>aoynrce;o#UgH z5b%;Wzjod}n&UjH9EBkCks!0byDuD1R+R|rfdJD?lqC`VE(*Sg>2Vc`rTo-7&1Y0S ztI8F;W!ZVjV$a{==PTy;CHX@T4!5H*gH=&crmbK*#&~uEb*1`K>)9MVnR#1_GeD!L zpVRk|3F(*Qb5fTI6|R1Lc^BsPO}>F$4Uf^$ipaqqq)1`gHJ$n=6#soKne^uCy^<^Z zYnF|1dkib+Yh>S5>_}C@8yi-0dCY}A&S0IsR~e{RRCoGLqwGU+1n0BSN_;z}1jqc~ zs(yJWCk!S>B&d%a?O91p3_1nHnu*d^M!^#I0w^C4y^KF}VoG8n5E1$zR${+Bx~uzY z-70%Ny1eXfkhaS=b~O%G<+XpyzKD0ZF+3=Mg*TiG4zgUusg`!O`Ew1q?Vs_=WP%L? zj0Cu%={y6_Bo9tvJ$ou~`Aur@rZC@IKReM(0p?qh3#A|sjuf^1^<^>JS84Sgv-*4T z)-nmOeN-YOjQoH9x``JPF(sXJ^qw8B%+mJHOSQf_VCTsG{u!wjo9RXXXwX+F|=&(znB({Wl-p8_9kiXB&@?1e%z&W zg_a2#0u$cXR*|{pf zpgSH{dPG59MHei}xtSJOznA!(-F>Z?oU;9e*%%v~$(F1kb&U@w-DMTZLJyxSDQ?;q zYx8aS_;&;%cLs&B9*tSm2NGM@-+nRX*qXDaQzvUfvmPcDJpu6xP(zFFW9F>- z^$!)DLOOGB2{3X3OW0$zVKfH*C!FuB*Y1r3Yt(|OVjE2>ekpFT zz2s@2Sf;g|V$Ec(_Tc^-eK~zo2J5ji4Uz~Rw5=K|+s61D)~w(sl1CP%8eI98UVx=b zv@zrCUmxXjRIpi>3TbHFXh@||M77DJr^u9TfZ+!x`*^Nr;BV6j`B7!|olF-hg%oky zlv3?$!>TrjAV;w@nCz&FjmQ1IQFc>?wXj-2i=X2>v}-!iB<$artK9Za&uaQ}puj6E zR8R)lgM6xEE0Z?YBT}X+a&#J=RyYsi3NS65aQnRa1iYlGHng(E{rF*_T%s}VK^Fmj zfT-BpXS)9_qfJ>?Auz%ngWmM@6szwDk3{b-QC?Dz#<%X|b!C~pSciFYX>sppM14i# zl7m75Ai?mU#im4-oOD%nHgzH?+^n%+3dwy{IYb~Olu?Uvc-B>Icesce)VvUp(C(@~rf;+~J*lG!{L5hk(S(XUD zeQiV5VaSE(u~T))CL5{Vc%fTd5si93?;H%)3M4JR57`8Ulu|S%@;lSgLbt~goyV0} zG6xhJ%-4(!Lu%pb=YVqJ$^OTWv^2Q*HUBoH{N$Vl+eZ~K7w73V`H$Apo=zL8yQFr$ zbx;Ba7$PQD`_ygX>He~me%l|yf|VG4(*NBQ^;~DS_CZ;?&eAOK3TC zg~4j;qa#}ZlK;{QFMblaQ8%s>*SB$>uhAL0amLlZ^&yAydm*CLbKICF*N=XlxDywr z@%?eX#XVh%&&yo?ZtL3qsbBhdoxN;3On5^c6Ehe9Ak528>~@A+K~uQE8I1iSFZ1*c zn-@!A7!W4vl&$nPJs+?htiPu3Y6?sP&?msbr*S4{$R&J70r)t}#8<7CR zB4eSOv>0mG!Q?ivX`i9XPsLPsCqhLw`EEUCs+6WvG=H~Mq2)(8T3b{&Ne#)r=~`f2 z-N+v-XQn1uqM)JG54mup?6PB0o4f-OGWmmaLbXA8p6oXO(0_z+@AryPi^ptvWPw^* z3_3^oOSJ?&KoaPefd5PBu`m6lZ8MbaixRtwO23J2?e;4`7%(T9U)x{6}Zlw?+ z460=9j!#?B+;{Y!eY zDA(P-UqXq99p@4aBLo#S>h&pUq5Yt|bRW8VsLj<6c1;X0dZ{E*0GLBFh&R7N;Y)BO zXI4_igTJ1vFJ#x2*B;AsdNeVq9i9VPr-b>n!K)c;zjH1}vgBSGeea}tD6}KOG1|vV4~)h_ zh`9b z$W+@w&D7U*{EX*PkSNe~K%>c-p>MY|>6f#fg!|P>4fU7oqGmZN@ zhD1uI4=D>4U%sSSrpfu4(aDQ<5U430L50eTxAOH67aBFob-;Uzvd40>I4@71K;GbC z1t!qg-x;@i9k!2~p?f7B-2}%WH3+dT81_9OzR|k!k+?DXGGm-Bb*2^10YYsd)5bnp zzE7+w%QgufF_h>vVHrVw`Gcg-)s7j$#sdVG4iN|{XlKoYgTbb!(b8)Y@#Dv{x>6m3 zQb}ro5!~9ZBaNwu*}NcitaD3`xO=$%jIUwd=@t$qb3IviWVj@1E| zF94Cr&P;VU#WQQ1JeDGk(Ht~{v9V1J8r|hxQPHKS@H3BJm=&)aq&m9NkQcM6Jn%mX{WzwU3^ounjU3zjt-)TwOZ#$jI zggVHdf4MH1ddy{eM2Y!3$(!T{%V3c}x*_7}aW!NwlC3G3e*)+t_(y*d8)B>Cj3_43 zL4}t~+OSuHd_lkfCWwLoeY9_`^S$0qr<@`h>oLjgqwVQZHM&s$s%|d-uSyG+FYwo0 zD?+9X3lfqrx_v;@52UMX`YRvWZFFJgpdd^!sc!@zk5PcXL6&=tXZ>ccV@xwG$hkM7 zRT`tm#rn^s{P1+@AorJGP7#nTwv)r_M$I{OO2{j!_ zI;5Pba!#=r1drbaPvzy%g)%?F3u~30kZYd_nuxuzNHMSp6fJOlm=J(|_Li0b^>cEk z$6@)QvdSLsMWjrNp$aiD8d^fF?gl~F^e!r&guW{WJgeakl7x3szO8p8%ok=@TJ&cr zCBL|ys7TJxiWWXdm;v@owPYYO>kWUD#RCL%4*03+m11|refVPix&4 zi$k)4oWtVXXJ&nXXuP&&Eho%&@cI>$OMK9sMReTaT-{7(rAmc!PCjlW#o|JuNw=`t zTH43?%uLOq!sQ~F`oAX$8E|+fxv5Gr)F?q!hhpjWd&Jt8an#|DhaSB66ykS=EHK(v zRwz9mBBI%OPngiAF}du#dgWf zs)~Z7&pOuQr8S;B{ry%LK9Mf?c$H~H^)zfLLgnE;I~|?v0rSO)TnRP)!055kb3mje ze+D)*Y6ravF)e9ZF(kV{L#Q>dBhn>~a5y>~tr3aI&It`DEe0{$f& z;$7u!S`gkeT}c&VoOv@5-!V+^74{>xthE!XZ`{7`xEV}XIR$ig^meKKsXp|K)WNNM zw*trFS)|p>975iu^oYS_lLOL#i7pFz7`1jyE@P8YDz7Z|6J4_=_@r+^GNVOo#yW#h zD^Be+jQG6f3;OG%&tsdE0N8Q3ZHeABGB%$2p)DRId+|iF$zSI*&P49iLB*|1?T6ON z(;3U#^%*8I@W&y~$#g-4vtwgBP4tpO?<99s((FZ6GNkmW2SqtP1Ek#(*^v? z$w_}hU={(a&?~>ZWoEleRoNFhCIi8;1@GG;wp{QNB98AWA3$C=e5+eHtGjAu>yu{v z&U&rlLdxzAy8-*)+t4>?DfWq1ldL0%JF2RKS67e`okar4t$~a*y&rjo@L(YOU;M3S z&JSoZtp0hI4hKBB^#?5ZdUB#-Frj*t7XdmVRh;0}e zf_bySe@>8=p3jEzX$X$qy}ouZan*RNswy%tsj=Qb6nCX}0-X4)CJgPtMx%BlEEtX2 z7lvSKC=&Cxb*u<^jjCPU=KVnj*J@UqTC$%;tfYM^clfK%ruz|~2Otv#Z+tk!Nbnpd zocc2Kb$ZWL=LEH3s_J-q((Hp_r*%TnNBp~A|9NYeU4n+(?l`V~W@Yev<+}2(KAsQ5 z>27bOYqOFt3_n_r)V{Ho8=rkh&a$ij>M@PSg5R?kl0epc^{ooOEgQE`L4TbJ%UNw=gG zwJ{`ovlq3&-?cF%g^iI=hpUe#D>kqA?)lanrI46p;_}pFLYs^k)V8BsY!H`-6~|J*3f#`J#Q>Td0ci8>9T^k{hhAoDa9C@261m7aRq^^JHY z;0dSEWNPIJ8>J8Pf_5mm*=$YW7`?)vzomeJm(EoMft${HeRokZe z^U|HO;~QRn2E;^?&!v7Jj>~p6tXX88DB3a(H2@$!p|r?6%t_KF5=hh ztd$m(AAHJ8lQ|bXNX_N2v z?I)@=>%SP$p9c+S+DqaXN(5xVJH1D^%U_p&Xv4+oU#8|EC$hs5L+TD4SNChN+-&$5^3C zd_%oqV`j-0MoDULGjf8czFN=W!hpUwW%`3Y!{WM6Fb?8(60c9*3D&UEGBT!SS-EUH zX&}v9njCd)C$He(v&Y((J*TX5^*TAJHLRSuR#w&tL^YU4_|9oWqKJ`y67$lzu`xG)WsQunjReOM=bkP z7BC7p5WP{;xm;WcUDDhDmV#mDfRA$H7(nx3p5{ ziH5n?`uB|+AL0hZeICr#*CY%P)rH`3t%%);!>o=mqTHoNS^+JFKwn3ew=Do`C*+UNwLEbB!n7mtJRGGIN6!G7xkoH@3G-PLZ}G z+ws=R^1Sn0SM8n!+1+$V9|@E9HQx~WgPpQ+_T%SQC)U*z@_d=E-5%oeJvhlsXR#gc zRA!z{ezNuv(nQyC+Eu18(O@S^C(AK!1lvp#6c(x z_I4;U*MOdY%90-){N$!A4p6j3)G zolz0&n2})~vbkfWuC{)8D~sv()AxkOWbPJ!8ez{f^_IjtC!p8L@hHmk#;9LU-qUcS zUhDSKVF1M1)3*r9;vZO8G|kN?ic_;D5tEJJ2FHNe27c!+ObSoOL?AoH)<=mt8prRa zf>zf&TxC$%cijJVTxVcIv&^npHa{vkoeq(lcf^gF6l>AS?8?VaF1uDkSqmxx`}6<$Az_Rafy zPC`GiKJ1`OOBL-^_(n8{`+tfjL=K&W;sx^W!*y;4B{4u)?wx+Vx6SpwIYP5$+`4>W zW^cr_^?FBljq%^Q4O5B2_34u^NGiYmq~#b%9+VLi8ZuMy^N{J$ouH#noo^q<{d#xw zhwb!b!wAy4Uc2G>KlHeB<*ff*gL~z6j*cxq_Psut-VUz$7HS3gA!;n+ZnCX>Ew@U) zWweNDcGFWCjhjA}{IXX`%>lnM8m%hvk2|Nc><~oqB z8m{x!Ovwx>wDGJtJBYiXA5|Sha}vcvFnZ?z(V40w&DRS|zx|Bd$uh47YcBX3{cW-**vfkvR z7NmOBGkTT;n~Wt)=jOXgUO5>;<=$O0FacxPA4w%6uu%Nb{wE$2-)a7wNhl78!^7lv zUT%EHode+K0PV{}NypA|&T=_JbJk%!^}#SAZY(KllFxo#ie}p4{xvbrZ&##x`KYD4 zp<1UQ6S4ByfP0;L*}rnqhP|KYb0?6f5C;6y_Lt_Hk42}ZR&$6xOyafwc5Y4g9k9XQ zLaVIGwm)tLr9+IPO-UM;Eo@g{csGY#J$XY3XXni!9w&JKAF0#vYO@a?b%TVjeuOxJ$2G zpK{AYRzAn}i-=va7cHs&$iT4XvM#Q0hZXJ z4?)6$;SnCjm3O@yMScU_ffu3=N8@7l$8RAsdcKqprMYOBX+}e^rJ(2E=~)LIen~M& zw7#t$Y|hW#eZ)!z3U5&JAraj9uAbh;g+kXxk zrS)o_!TOos*N!NRWp#_x+9W*4(383D*DGkLppH5RIFrHU!Q217^s~He>P}3VXLEpL zhN86MQ0+IL3O%-qd7>mi+;FY)Kgq+TAI(vxuK-@USeERDW6nJQ6@n|rYCY!5Zwwl< zpT5@)16V{%Id=v6DdasJ$FFdnw3GM!$R`MLl>^{!?e(Gx$t$~%sF6tFw zAiz!l7XJ|=7rj0@9A6@GW4AEm=_DAnb7mZk*bA_4phXjkV%<)+_=|Ac#0E(WDx!DH zZ7|d+M=Z#nAC$>vjCxNV6AU==U@93pYnG_czx=1C;YQ{2oR5StPeQt*vDFgu7HEjD z4&6TM9}%B^m?eH^IjyJ5Zl?LB(LR^mQe~R49xxh`REoc8uw&&PpnI4;&ZXR0 z{lLkK_83C9W2Oe*Tf3g2+KK1bDD#o}`UIgT-Khn9D$rt0MODYLFi7=cgaDrh5t);& z>HMZN{CFm4apRf6Lva1suyf0HBCq#lv<>8xZg(aNtH~nUeQJ3%9iH^^lcepmQr9c( zXU5KLX}o~f+C_s3QkTyG+)%y)kN*MrKnA~OoMXv7SIzT6CBBE|R}YQK0G-vQ>Ji48 zLQknk=^T(v989se+;~; zjyL(aRLNUAjkIWWtv;}){MO}~ev-r{oW{j~<8TTCWwDaXPD$e*Iy_GtsL`<}mBY&0 zC!;Uk>3UAJeZ59C;Jkc(k-^D5XUl{+{!+((4dB} zmSU!B1(r73hIJnx<#y!pj1GG98^m#tbM^fH0Q#GqhaF1=YPt*)>K8QH_w<>hg<}>e z*JM)|V!;=9+Juvnfs#)rt*07UWl{>DvfwUT6#L8z6FP#0m6U8XqWi287uxxi?cg2d$K860`}=^1UTHsRzt zA}yob^Vig3(WSSi-P9ygB%g1lzof?`U`R@?<|BnAkMGpZcSftf=^e1CTM$}Wexx-U zI+RJLQ<=`$p^BuEFmHz}@(>6B`)~(2{kl}Z%+_dsAt9G1)5HzOv3B*z5H#J#EYf_k zm4u(}h?xHXt})h!at(7o;$ys@VIJxFB$nKYwIPZrrY|Ev$PNZhNXNf80Qn^2+^taBy*x@zzK3u?w>-xLk{=2mb)7-%MeJn@#S1+`Tl5B#{KS z2x}NDGZK@QP*jZYKMliSSK@fbM&rMT^=Cp@G@kO(HSg-sxR5GDra>&@Y&r3kQ?Z6J zP8jje$R|qqfy7tBK%Z;K~`-b>^>L&!E-jby;MMWynU`lN)0hBR)tO z9Q^e}@~m=gMQ1o-@M zgZuPfCHEDVc44y492P3@#1HBV#C&i;JfFu)^9P|F7Nku=HhqmHdlLl=R6n>6hT1%5 zoPVEBV2Thnx`rB%0u^eoU;$j2hBQpdtXmDVv0bWv zNhjbQIsN|t%c91hD#k*Sqyh$D5=PjY^OXnv(Ttpv^Z4p%nDtYgDu|n^yH6v_4%mS5 zK@LD-1`vVg2ge>c&mBK838S+KY3R~~Jc<@9Z_3BHf%x(9&&cPFpC^zMM#MF#h_AUx z)qPfQ*+9c+Q|ESb^PK+uO>*4h$eLN2mf~gmg@t`KY-A_uCpZc*6 zq(!=~cy^wibm6P(I!zzBooiPz*iy4fLG}U(0f#)C0l+6Db%FjVfk*`G%MTUknuAZO zQFoRYVZJsw5;;~-&&k>ufX)Y!ybOGt9;|$uY8^>&{6SV}W|9|Fw}7la-30#tiy$5k zj1oT|8S03)u>jGd41-M8r3uV&!ZJ`ELCNw!!9N`SM^$WX+~UIMM}tL3;XlkIq9Q)$ zNi)D7Bk}Nh96%eKHfT?)(9wNHj;yq-u)xHANIpE{&Hy9xo{uG6!;xE|BvjMYO2-G= zuH(x5AJ};K@(0IO`HDt_N3bW>No0B0tN08LxOvAI#((9~HPl=xN$v|JjiA8-$G|&7 z>Vl;MUKB8;2u;H zmN_Kz)I3ifk0O7hcW}d~PuSI&;Vlo}IvhvsBMqKTPBMAO!2ETjC_kA#U^et1R%g}V z17@2AiNGaeOA4h(zy*QNxRM4zz~jl|tjqI8%%LcMX@}CUZ4xh0PVtS)zXQi40rB8t zkIz>?RW}e4cM$3oY3cQ(hPqUzUo9zTR9NFBxyc~yB$9U!bJX)=0Bcgp*SIxXky?06 zb%eJ-+5lqFWBueH1>_v$c+NgJ7)sT*_c0t6HE}Gx&D(KnR{L!CMLPP+BSmKOTSAtfS1txp=YeQ25VRQ+-PIpDkMvHLA0yjf;B8>X*O-{{T$}=Rf;KIX}Nt z&*tOeK{4dvKP3Zrf}dTgVEw~rYZK05O}e7H+6QT`<|hiV2hv7u|3B7$rp#d$_|W zIRx-j6P|O9qPbPOI{ZsAS&H>)+T74Bb!GczEKFi@lFDo!72rENtT zJ#GPGQChQ&HeGC8)lhHrEXbaswljc01o6&s{+Z88Jb*ruWkv^K$%dfmuF+<%2B)fO zS|*_m8PrD5#XEQiCU6nl=Y&(A&qF7Wm;T`({{WBvA^Atpjhb1kR^L+6q!NJ1xx+FL z51tN5{A6{b=GuY`&QOh1Q7(Z))Sp#){=KcC7E`z}U)T;Y`woD_E1g-4xh!$&E4dZ@ zMzyNiJ%6*5x2DZ)C5Q$2J3tMQSXzyCM4sjjCq-?RHqC;QSLtf0QB#+yRL?@ zr+1%f==SSbGC`=764qU2GT9A03jUOgeZ0%Ph6gypjTrn8-~L6Me2NsNE}Qst?Ktb* zq1w8IKG@Q1L`1q`(XPJ%Cr z)%(_OrP_U|EJ_6%FhFg&<+iJs;6999ga()B-g+MGn-}DNSZd zw;u*f=Jw3letJ6ae_MIg@;E{Zs6ACqe|6%1+QM@l%hFM@kJW}9(YjP zuegFUz~t}%0F#U#QL<22?#82{QmQRYsG8`^L72=i34G`C!F@J;eS;o-i@SI*-i*uA>&UHyBsEimcwZs2M|h9XVorVST3@ zct0Zpsum14CKLk+YYAK=;>>od?h33}wig8C{0?$5e+R0P5ABm@-9coryq0*U79B$$ zP00#2oZ~!XVR^{#eh*6gM|&~W$kfZs;#^Hi$}_65G5bS)dC%>TKmFZHm%7wj`_1ay z)1JhTr=+eC5>_}2Q=Dh~KoAZI_~*w(NxgL&fFe3u6r_+$^2WuG1`bF$&N2_rBaC^+ zRmYgZ5;BH;J%ok^jbzTr0Lrhy{{R*Sc{%4J&srBTnVTJ(5TM2+O@*VKlx@NV8Sn;A z`JdmOyO8Qpbs#saixM)`fZ&287#ne%Z)1-b;~CCz`}Nr}vYkNSRlw6#nslB)46&6b z)H^n*2a-rWM~+A2eh*0gK|bNdjc6HE*Jkc$YR{CC{{V8E{7q_B_9E)`d){j@!=_xVZ%!`n>S{FQigwA$#6jAN$YKu%7*aAi zhCDt+*lm7gFBMTA+H_A=v@s;wo}QXgf~;b3EsZ{8uQX>r?BrqYZ`=d7{vAkpmes_?{WgiXREu`d^<$Q_PVttG zCOk5coxqTJI46uBKc1@j06$bzQ`C~Z4MHgy3Z$SR%z{m-q-P|KNBB<%;QVJDI%NAm zz}BI2$zD3rpqXkw7k*I{_YQJ$0R-UubLWpe2lCN$J;8-olLKoNV^IK_D@xNC+)cSd zl1bVK134HTen*}<&iVZgrK?vEKW+B=T1nSS)2b61ss)K4QpDwmj4lI{!1IiLq0dn& zyl#ccsCwP*2$gKCf|OI!)TFMARX)kYjD?hvz`0;X6aoJL?S4mGt9pQn7G&}26!d7d z8&m2K*M=}v1IGn4C5TH?dOhw^VFRk#y&j=T(vJrQ6Y4_3noXgbes+B>T|F?L!%`mMf+PYFhPb@1~wlUP~D6jhALUIc5Lmd1P2R}T4 zJegbzNXn~pWpcN3MP}L?LQN(;&sreStdYFCjvPxK<%}JK6lPpxmOcQ&o@0}{{(a4V zBz@s=Thv8#w{oMz?8PGy(>MblD|;Err~@In_{irSICcy5ZsKsGsV0jaqchC4`?Flt zh?V2nEl3g+;c`=AF69{Zu<*>o$l`c`C$s%$W>u{Sea&jNiBg?G zVYPAAUkk`{eC4y^P=1+Zj<%x9B)+M?5}re#-Nzo|k_bEu@Os>zS4JU9)YjW-lFR7U z(@HBc`mECce;Ysq@WKE*$5zFaIb``I;J8VK_kmQg((a5mEzR~h9zM>shwzS=DjGAt#ZXe~-$IVB9mNk9D1%PnzuNlThc;~?D7DX8c z7CMv!r_>_NC$?!NdNdO}D2}(fe(6{UvutWAF7Oj!oDFS|%ebtAmk@ z^YhjWxbvw40@(b^f`nUlGbL`|hfa38Lr9iBV;cwK%7NA+;FKJ3 zuYdd8&^(~zyXrjBJ5yJAUTe|mQC(O)5q70y^#b@Smn_5cgTTf)>iB*f`&QVHe=&Mk za&{Cj!%D89CcE79;+NE8<y5UxuoEE5GL^JeDHcQT2*b zS9oewIy{r^4fe|_*IlIY%7`#e_s3Mrj4pu^O4q+on4v8h63Jx9xf`S#SulTY56Ag+ zOmr7!BUh;!7xdL?MP-Hv{-Ts-1M`!}{{TL!!Ou`}p)_X>PR&A1Nu|mMDhYk03XEtjWp_;V|gj+ z`j~-)tGM32m#I-J z_WG3_Rbq)!%vPa6HvBQ%0w_>OW#D|{lhqXn6?Za_Y*b|)ms(VwLmJB(hfpkT5hy1F zD;yqo5Jw~9+tkhkZsiFiS%Ar=>5$5*mo*vPV%)LUI}eb!$QcA+V*n3<j_ z%`8dVqU9QvHT^$L)?Q^nTF#*iR-`0j3Wb%uLKis$CzJU-Lno1l9z3@!Ff~{jmP&A2 z5@95Q<#bT8W%s;C802Rez!~TE1mN|hd$M&iW8UK=tvz)?YJrMZ$Zf~c+}Y%XI6U#l z;OEcDsbvfGXvTK}FB<1=)C`krI(-eLoUFQpE1Z4_By9&7{{T*)bK^eY7~ilWJAL=r z`fQLvYq;nKmDO4}$(Mqmg9b%q!Bd_OBgcX|pX!heM;Ms;n1|{|XTPa64RY+1pdZwc zYfB(2l6|OD?<5AofLr#@T;5G?a(JfoBd7I~v@A+uuRgnOs?4c0qfYeJEy&CZJdD1= z6pi2!kU_>eLFZoR#(x^8(0x^k^>cSZ?Hyj`nH<6ir=HAlMm_slL~Jm8V19x@+yFT_ z>7UNXO@-IRPo*&r`81J!vI( zn;teBGUW!b-ZRIGeYK(1NOv(Z*p@~-s9nTHHn8^+3Y-uyPdL~zX65x%&J!Bw8By+i zC+atJX|?p|wMqM3R>(6=q3RP;wqoG063CJ$81*;wSLF{Tlt4?Y9cC^%HB?yQfnzL8ji&dummgy?JAS=9YN2@F4+!5)J9g zsX^|*{Uk@oM|&jI%Hb%zi=5B5dq3+RYwC8VrS(@&?@IH}B=S_&^x&+UpOA(o3%Czz zv!MMrImpHgUkk~M@>l+ZT-le*Mg!@iI-QQ;?)tG)r(dz-om$fgq=!?5Vn~;Uos@vs zCkJi|kTOR-dJNt`6ry86jP_>}?)^{eN!DT3G!Lc!08ZYLv6Io(ZX}j0$RBRxvb)k8 zs8-xmZO+g$xb^2`$K>;{025+n!d8JWTiw^+aLar}+kLnoK@-ujR=keuv|y^l$6=Ey z7!agj0fNBdm78S$0D%;;^bh%*-&yaEsg=!BF0rdq(vf#nsYzKOjNlLHYy`ThVVC?l z@#l{{u;jm}nRI2U?h&H@01zKgwC$1K)Gc>dja_Dn)k6YhNCjDb)7G`^c`#Jc2dzHQlZdj+#2!ge$0Uy(Zo;{=p>Ahzo!w4l zss8{JA4;pu9X%UbzuX07WOu2g+E}EM3>g*PH7?mn7hnM75uEk0{4B+QS`#?Rm-mRO ze-V^5Smm2T`fuGg>P0+ITqx)(2@7oq?230xSnLs~cV=OQAg<&;4x>)Ob9o>bkGOui z{d3W5>DB1{7VKRgNuEgJr5>TIyps}8m1YqIjZQ&3Lls5KGlk2GH;Ie>>Zpz7l6sU4 zNBFM#sY=+>sr@PKDC3Sug`C>C95mzfQhni+dU*7kd1A^!?rdQ){CpHGVW3|ug`zhq zo!R_#caLdLNiO{vi(P4BGOU#By>TmSiG*az2vdoXwG@x()VqE?II3hHRaMv_UmO#kK01zH35V@Vgn3HFu66s{^({T2 zrs4|E81$v~N+t{kDp-_no_{`nJx9nqmybeM9=Z`NO*>4MFSLVBzX|kI3pH^AJ~5y6 zB}gQ5?Zz|btOykS1#Erg0czx?1yiw`O_k@6H<~K47})T!+Q6#!Oj>ohG}t?P zT&$qWn>J-kETH&MUAK=U{s9~gqP}Ax92ZC2=fEo1!7cYiM~WrZG+kDusopr>(xoX5 zUx41Ks;>h%Cjb-p9aAoPA6l3OI~pQSJ-uF9V@lF>YV%7ZUXsM`hrs^;6k&4G5D%Pt zPdxPblaL4v547xbHbqqB+V67xEbaY5dAsw!D)xul1&SCugIup!)T#i?$Yl=9eZ-kL zJBh%~dWJs{7!#89?&IZlZ>2h%_UG{x{44hxO`}KrRQ)xglQ>fy4)di}Nf?9eNfC)< z8Nu9kleYx$dX_}O+HHR$0XY=tPeyjOp`~lOO=x>xy7cbzh9Ih~Li872LAN7xk+EIJ z^YQ2L*RSK_L5M20=3Jx}`lEV%vkViQ2cvWa&o5ctln#U$E}Pi3FJa{{T{q z5y1Zd=dN4|7Per^la}hFA|!yq$hNGxae*u39P@%nG&+IVqgvE8eQJ>*gqal_Wq}#wZ6}^bkIzwc)`wBk zmCVUvc&fa$^y#OQU;-m5sXX}Hc<8q=itBQ$pqd6n4cl|LgvVVYEAC$;AB>#;0G2WF z(8rS?05*)xlN}iY&!#I8Wi1JhB&Z}e?m8zZ_Z#Jyt_tzTb!B+tGA{f|PSLvnjFIQY zFnAgH9Q0oqi+~@zJkqG|BP@wDhb4Xg0I2iE)1TX)+n~rjO7RD{Y$B__uReCERGZU- z2Ow@^yMh4ZpY8L|Mf=9vjT1U&BSTi!<9g9H(`8T^Bh*C2mVh@e7ay=5hh+5}u;Hhrf9{YS^`_#ES&l%<=f z15u;VlC=#o@`t-#RGrvi7u-o=Irgv&NhcpZM;!*losm+3{T`%?3s-xGVu#K_ccE}q7hL0aA2Z%nTd!CB0tGK1~NCoGYZoM#KkJPh@X zvW03UGLm``cBS4LZ8)3kD8F&YNQyY?M&>&Jjy5|F)5pgo{{XL1&zON~EXyV`N7QP0 z`?;U$uX0{}b3)Tu}t+PSV2{ia2mn_&yDGT3b&qm^GA)Dk1nRcmlCfL z8eG%0_PL;@uiYKg(Nbl3po>e)QXpT|y$d1|f2ggvFx&%hdeQP3jhoa#lrQb-G|{^` z^%DO8f3No@whMZZHlausjzYwk{YYB{R3O^SP6<*=6OOFDDuGpUO^}AME-BdBHDs0t z?aeu)Vz!?UNXA3Q8&8(bbA=v#*)7$^Ai7aGbDoT}CW}_Jgr21>Rq(Cvf|7?AIlv_0 z3Gxu3+m3O^PEnNg5smg_GQ%pvXZgm1r_-*JNOAY8ERt?f%Pgs}cMZghfsS%W9XSq1 z)FR|{AGF5uE%l{5$)Lw%k-dK9m@9%Z$&k5GkUzq?#{h$pqk5df*@I0NcI*}#L+y&l zp>{_hm8C?$4gFOl5;Dhb0LB0~1oT@DPf~4>N8TiRpHiK6OG~u0DYNczJKB|%7a#`o z)d0v)08@&e@yQW*7p*weBYoel$YX!n(Esw61!kf#Z+JifAEGswxlx8(_k78hvxU&`7FdKH{QHwd> zD~eqYLZ#P8)w|ln(Y~psOQ+I~M?llUBbITvZD6QmSjuG-rzOKN_LT(R7taUk>Lx;! zL%8C%dQgg5>or|$#=yHy+EauFKEb&%MxY`b9X(_3@yW6vTAz9LR*7a9^OaQ!(Q1-uRq2lH?i;r(dJu}5W5R5*l^9Q4 zf_=;s9ix!mcD5OQ5u7zRsH4kM(45m){yc%U>Da$VyNkVNh5b+e0NG^DnZab)5%&J3 z41J7<&JNA4Zr&3w(~-3Y>IolArGNZee-r)BEvsFR^ooXo+X5t-J36m!*;X)>JHu|U zz%nd**+TD8ums;AMX301js@~EM=N(3FVvsoTe_-RdU0qvHC?EItZDGZkvLXn5}p_` zt3t(aKdS2pVp2&D9C#OSu2<>>Ezjcr09U^iskOaM2$^Fnj#l>Kwm=-q&*NUS#XFquEvGe?&GoIpE) z>@z%Qssfeia(Ny%d?m4b!f05NYnq4G9@x9vANlj^&cE8y+lt+~kXf+Pr=wX(dE~PM zZ;2x^RZyi=$kR3kz1F5pgE=K!-jfy6Py!l=fkR=suTXStPyR{mZV^ssQxovd?fiW?+o_ zlhEH9Mp_ zjjY235d?qE8Gvnw#z$oY$LVBpuIkRNBSF+h#JM*XGOOVlCQ$n;>h`1D8ns^C?@9Z6 zU(+Uer@WHZg<^JC$u7(4M(&Cum)_OM2vvf!*itoQ@(>Q|`pkIoa&8^}07=&10GP*UZVIUvxfUW)GR5`)a#@&0?a=st#giechs*d;L&S=n ze)^5{%SP_6s*!f5Zs`5J+d6A$U;h9w)E`QcdisTlE7)lOK%y|}vak{>stFOYrGZuU z6U+#j?s?7P{ri=^`2Gtl&)V9Squ71ZCCyqJLU|yT7M2(sP3wPI@(4bhxgaSY(TNTY zPgcR_*|KV87zq8saUBD{zPEo5v+H_|y??x`L0VT5RI8;%G^U{{6$dukR$DVcU$gS{qs>{TX5;!w|`2WoS{sO z5owu^rV3%t=^oN>9Ch;EUw}gu0P+6--*eT$<|>P6=pA>x^BKT18@6u*$Vwc5r*TXkJMsz0e_2w?Rs^C~`n5w{DX8gPtMx0nmR9s( zGig99TVx8#3lvpg!yMs`4F~B7)7SKho?s_wsmE;p02*J!Z+%qICiUp7w0h)69h{NE-_z)?vc?owf%Y2PY*Iz7f7q0a1UP7*)$z?s(#e z?+7&tzMii;#>JTWCH^wo^3d9eb zWclj0dVx;0D_ZAx>7BTZCaBXug!qmP?0Fs(dzk+KV0y*md50M4Suiu`MAbX8jr#`f zo2_edR$-N@wX$Rc`4N%7AdlN0zg~KIVb35<#yA;8A5slhxHWrHOQ%w8GCQC&u}5ZO z45J^Un3cCO`R60?jygHKmv#$aAIz$#t;qDPQ&+yz!*jRVg9lXmyHi6NStsT-Mr;lT z$UKaE^i}ec^^fave=HWEyOw2^vVfN2s`D?mF-y5|@q&5k%0Lyb{r;01ozH?4kpBQi zyug(sbC6q(0XRHh6O*4f>(a?}sVru?lf)yH2n6h3)D|&9f?<%iR)ozi*qgdvm!v-rLI@B9@RUnP%YnCyy}3QLe-+v<-^ zvyL(w$^d;cf)akAMsckp>5tZq_tblfxc>l%4*hEjsZyUyrP$q_t=!W4dqbxvWpg!{ z9WCls{{X1ju_Li#oQNcN-&qV_NGQQjIKW>(YU zp`{ISqvVt}+aEXU|Yc2L=5v9c_qVZmX) zt!ajri8nK5bz@*u?X}{)9P>#&ulJVb-0fXsuj!N{2tp$aFN~7gh&*S(bQPdtG1j9B zcU;RKr>|DMP%{!`GDkFRfZG;TBy9&HXi}hIMu3mB2f2axhC{867o}G-1X*@R3@-m&l(n%EH05;%oNy7}|I3K@9g_}Et zpY0@?wGXke9!2Ylc06gsxHLq~| z!&{JvF9&-PX_T5d*Kew}$GNu>KrI*;^uZ!ReBcquB88v!_axzKeM;V`-~F){sBZT+ zZPs)usLQHXl)2r2OlVo7!791roxuBf9X2l;G0<3pIv$MT9qaX<==3)#Q`YbD_FBPKTW3ZmvUEEv0IZb9Vn(|`VdE1N6+fB1v`qL{sp?I^l;@gwxVvoBcD zyE?zr9_7?EIis?bbscG%Re4rcSr_gLOX!t~L+w@y0_1*>G~xdMOg=N{e{&8`ij0E0 zAKEAGPvWOvr*_SK1L@w0PQ+>J%4}*9&LNFsUD6nn(`P6jV*)_TfmE@{75@OGBDhj+ zXZ>RVInJm2LVpw$qSke&J9F*y=qogFjXhxz!eb-Vk*2Zik|@blZIdUza~5=u9WRlG z7mfAR{{UHR4sz*xg(jW#`@LwkvE4o6saer(*jA#CsU%4RERD8BDoQGVRn|?LnHl8^ z?NsYQ_>YZ2(0_j7whX+unO?Wsn!jXN?j0stw)I+a2!hXMS%3AAO&_WyHW$=*6vZ1Z z6iQ=>^2Hukajy@=OYLzvD~oU zvvR(xM_7-x%nfZ7k*Q9}86-`zGb=%}F(SwMVO2e1&aZTW?p%2E^hD^-{Av9k`i-Yq z)4ra4Z>yzuDo%awFD^wNMIBK!6GML(&r_{Ld!5nguS$#Ppe8%`5r;o|D zs7?MPGujK~QsaN>r_;Xi?RtH$-sQ3p|UqIMMO2Lh%4lfwhs- zZ(DG9IZ%>C34a;tx1}jpisWfS6%#uYAl1XIr@wG#| zfN;L$RU2{UMGeJPD*30>^EQ;C}E*4|!bAMLc{o$35K_FvU4Gg0j8{mDlD zvt<_T54Y}Fp=e1`kw~&j8BLC|`P$NIq zhB8DbMpq}VFU;|Li~{=~gFPPz;mGz48Pq!m_=xS@TFi9fo(W)b$q_l391s)wL^zZw z0W-9p)h2&f>&EduEycc}zy0++FEhlU$eW&t(>|`~S{0phxNE~_TGb>(Nu{oWcf08` zK?1%u&e5w+7wHG}JK=zGKBva`k1v1TcwR?_pz|}lc2!>F);^#waq1G@r=;kqCY`3& zlF1eA&S6xV$yo8~uJG15yi5!bs(p{& zI)xVE%C1%{-W{@W7CKVB@cl z@_fW5r_%VoCixL_&+`3Py{+o{-)>vh&atLfB55h=WhD^UE4vtU&*?9g45Tn4CkL+` z{MS}2u1DhTZEp!5OrPRdHIL#eRqdYAsdu(!)-6F4QA4Nc`d!Gy_5bXCir$WJ!*Ll$98Jz-kSES-(BD7!An+x1#Ln`l6bdD!w&2U z!~=-QVV2w+0

*r{f+{{Tzt?vCqRXk)i#B^d`05_j{|fwP$@Vwly;PWvHvwqi!(* z*C~%>b0V%iHX|i@;XqYEmBsN)*F=x+2>4S_AMqo{Wz^^z(x%q~Z6 zG?ea8$fbZdbZG=-myO%w1KHar zCWuPs0w{!Pa&QzZXrSdt3f@MH9~ZRLT)rR#-lyna@LAja%k(R@Z};Uo5VpCgkxuOM zW?7-C4n#%K+YE3ErgsDFKcEh~!ps{Xb~DHLh88VGezf*C)z|L37AWc!A)$IKEGd#! zXs`*CU|}*z!sjFb*SUk@!=KB24`2TPW)|^Uxed;=y6S&MHHGH|#8Jr~{{TgZ=XW_c z$C00(!Ru@JoK029DieB!&n)vg`Yr*^Ax6qbv5bI(nr^hND`B zuU*RpO9-x^2*Ch=c>MnWpFLywOu}5`;x#CgzMR+XPh57ki`zN{Cn86VtyrP;TNopH z#gYF2-MF4lf!0qW;qDEk2;(xJrMs4B`*s>jPTgl(zTm}5*0oNxCKuGC71z%a6FQ7x2P6?>IXKr ztiwD}CWEDKVLOcD_bv#^FJ+b4p#K-a2$~Rk8IeQr4ks z4)KfnWoa5<4-tvVk+m`iU;}ahCmf7^8z|x!!g49?VA1w(dtQI=BV%X#`V52z5#!h;UOb**7Q5|aPLk^9uH1f{}eNS$@ zrP)?(G6@p15JH&PFDKjy2aE%QlYutz^`dSuWxFc{k4^8%Dn@Jfj+bOb8jn&|0 z1zUmu+l(3hjfTsZ-#LAVOsn+;l2~f@L9YP}OqG!`M&d|jE1muT#{(n#^tpU|hqg0@ zOnuasfVdzsZUbreV2!;M!=9+Lpg4%CiMxa9AAa`lt8-Aj zpnZ&XB_5d5w;Km(+vzs$2r2^~a03U%I@j?301wH}lw(%Zo_yFbEwG$+f2_Yzbrq1O za9W9w<^_b;ioiG^TXeH7M}Aen8&URfoWtd*KbrH(@zM5M${R23dW5I|gM@ldE)4&!C; znZ0T}`gio7_;=Fj>$=BkwOwmbn3(L|TL#67q~bisAON;T?ZB1~_cv|oI%QoaPOH9muM{@YJ5e_5q0D@Z;XPN{K?esp1fIO)_<_iO z-4oEu@X?J!WA1ow_=@hosQt_I=e0C`-tNj81ln(N>6a(c+F!SeR@0JD@#*Ge*(^Xu zR;#kY%&e=%;CA$<3ued@7`u;w*B{lFQknIKyuPMd?Ruh(t9_wbI@^>-2c;ybpVGt0 znb`vdQo8|M05aWu=fnOFiwX-iXMyH?NnQ2S{J8Ia&s|oTzh25y*W|n*f=P^M^4!dh z%CER?%x@zSa-#)($308T$e7NcT&B0w^7nDJqp5%9EfNY7#aXRIeuQeTHGRz@vn&a? zG{F``SA-blXMTNBvJSjg_>m}h&qp7QKXBCauBF-i!`sy#y1Sb~)G2l*i`$iN%#z8y zZjlqu6}2j=l4a~fsVFK-N;4Fu%PRSY;>pNa+)ho*BOd3KbS~%C`?gb}y^HDGeZz3w z)aQ;+e%g#IP)5we7mxb#%2qag>MO1oMltIr#8$qeHa->Vr|8e{?c80-r&Q8qw;Wm@ zZ)zx&C~5Osu_W51c;F2!n(C)(1Ch2!z>Xmr?m67Y(88wY6=VwEt*Y%1qE8gtOUu69s?GEp)sN9>sQ@OO83X(x2(tX5Qg+1zH^{Y1j07-S<5oAK}`Zlj-7j9+%vDoZ8&?C{q^kP*aKu@H1d2^%rQ}ovIaz zk7P^lefvk9lP%=YoAG$WspO+mSw5`y>$SZPS2}-j_XHZh^3fz-oobB$0Afn%HJ01y zylh(t#F5}~Jx5mIN_Y90aBu|uI+cI?iKX^n`yDGyift77LTD&3tktB95~iw+vPO;x zLJK}oR2&W)!SeCvp+jT2>v*e;mwKHCQPt%7m89D3XmrW1(`n&*S*Sns3LG-omB=6g z>Fr?32_b@#PCC(t%gV--L7T=!Ug35v(E4lIGexO)4!PbPu{@|2bEP8&u86N7^p)M_ zFSbV;e?qGePB6MWjL(l9j>p<&!hroPIKO9iC-B7_y8fOtPVdwtouaL+C7nU6eUmaa z$z^C_2vPE=3rInCIRKN^bMah7fh!q2Oo-O4ITRnm@9?K{U9+wB=h2hfdU2L(Cf9B2 zdX-4;NUI@lQYUwX*lo#FRcwL*DYSKs@nOt)alg0U`p-o?yvy5vdFI-mLwldKyK7VR zKCX86d}(oCHX>PTx+QAm-Z*2i8_HcRH>ixL6JU18-IdRklf?ckZ9-@sNFs{QbA1W= z?+1L)QD0VS`khH;f=PW{$d&-uWH9JgA&ALWoh$aW%melP;W^+uJU z>2&l6H0f=<9R&%qZL8TZSk){$KKQ^~n33K>%?w`M=o172$Ljoioc6mj!F37yr?j13toe`Sce`h_YNo|0^KlPx(`&mQfwxT&@45#aH z;cfwlo^D(g)q0ga(faSHeJbxtn)=)mQj+4&c8b}`U8r&QD2vBi`kD0>w{7ZD$sAfPi()fUuvaLMM(Ge9l9d?G zAQnvI=dZ{zc=)n?V}7cA;`zDgC^ReWD;56$@bdm0X;_Zbx{q{eQL;R9Mf}FZCdZt! z2AI1Q+;h(xusT!Vc&D*Z+v0veTIK%$zuI@2pTZy08$GcuiCP_M?K@DcY%%usVgSZ> zj43gaVqLBRocTE$^vl1hP_KJZ`kv;fi6ME` zzLHGHJ?;z<$0V6x4mWKCE;lZBSv*WRM;2f1L{HUbU8$UM&;A%+O4C=;-%jpdsLmz!aByiIvNcswLyHG*6~|?HwNC z(P7f9T!~t@-9=$Zi371+uL(tE1e1fDd>;p`STkYG^$-b}Gb7_XM^n&RewnM+NG6`X znLVHkQp*KraLb=*EW}BS5HXx&{>MJ1HgIVF0L00aLHx^O-IBRj^$6N5w%FIx%Vhrm zgyW3y@&E&^pRFKnPxQj57?wNLnrc@x?MnXu+SoEj40TGh7LIw5MhIfNNb)cN`8{U+ zRWKl#$HyR1jCFfG!4-I)N3)_w30BO}4b?T$xDkILmi#2GP>)QM!0sPrvW4^hh%ZRLGVe@GY# zamRz?@;c|tLw+V>-lT>~b79OviN-xPkwO0a93St|e2B*Q{{Y01z#@v(BCB0VSB)2c zy~iv8&$q`J1mJUmI?@$X(SdVcxKc7INequ1a{7A+RVl_e7|HlPGI{>oAYpVMzzEcy zCPhYY^CWSc=1hYNDucDM08^a)e{=FENE$NjwWfrN5LcCB4;kEmqsK3-xX+Rl5u6k5 z#~+e9C+X{Q3`qb~Y)Mu>b}mvF6v1m6$tSyz@Z45At$C9PoGd0IN)=) z1D<^3Mc34F!0Hw1M*)>Ul*Y`gyUy*nE&3FLftDB?A8!N3cw;@q%ES@&l94QT#I|L* zX&AF4Mocgu<2hhJCkG40KWuQ0jo6ZMDq=;Nb%j842gAZ3JjQgC9_tq0%yy|wJ z{{W}|08p${6QpgcN2E^l>xm9gnnFT_8w#AMU_mDUWMCd!D{$leW}$&dCignckQ^CQ zT-;QGvdTu_#0-Vr3CRF?1K@t!`D_`04kuIdtN6wGk84NQD${tRzO-o9^rAo0c9Lci zj0`ysLhN96_4JgqHipDgkQviE zLl7NQ2^-X6FgYwfojk=ojZCBA#0yhB8}wKBqxwyy>npG7y55Q|Tch^2V>6|EA4p6# zyT*v~fZ5t#d0;^#07sGWJS&w?2BZ0x!V0kFCvwyO0ElkfpGlGi?|lyBGDyo((q04Z zA<6W}KC3xC)+CbLmKgTr{0E)jc_V7KUupDS7EBdH{{VP~-`83VLfkQ5j>OsHWNPbt*%{7*V1jBQ`Y%VPLuJN?4{0GOPsy1$5y>6S|8N$sxU zeL%Tbktt0ap^FddEZa*)0chKE6+((ZBgoC;;rNDKt|qU&fAiGWmoJZuQ)CzXcJ5xibpHTM`T1++(l6x9E%AR8 z4oG<)#7C_CeeLZ7O?%o*^7qcU94$GwEOJ7LyLmDte|M+>ft0$23IJvxGh^@m3E<%H zlqfup-h8LUy!_egj)bFB`n5N7SeCDKUzPzcA)u};&m2k}nn_@Vvn)-7a>FDnZM4e3 zqigg0wOo9Z?s@Bt*}yxMN$=${*U0lsPdd+3T#gU;=U4i>GLl~ z^Dj_M^uPFP(zH!FjrhBt>V5N5Ow{|1)KN-C%} zJX~hGpCtbP?yx7R&$K_P-OGPU?+t!V$-Cd(m7xef^CS||Ll%o==5&YEyzscFBzWw& zPB#^0K`7nnGkLg>2FY8LUm2Z>uc_N~kE}mIyTZ1UYhCU;(W_UI{-IJ?>&GaOA&O^c ze&b-2Vpvrsc2dGU(eDZaYi!E;BT`6=lh~g~e~6^)Z{@weH+O3{`weZ$I_WyZ(X6o` z^p)FBkU=b{!6%A96yeD})I%bl7tHuFIp(INv3Y!2ns@C>y?buscDL0E-N~c&Jes@> zet9leu`MlPsLD+xS>uZq9tV)b(d~?73jywppX2f?QCDpC zpVZA;S=BA~mv76VQ?EFsPKCKl>wTR=dbx#)q$H^Wa|0$A{9w(-Yn2($3KQ2lCW~K7 z&~&ZZMfwtmp2U;G4#=mGR|?V;R7T2-5j3YEu?laEu)ny=BFQtj&6=V|twTE3lHpV*xq zCUm1MNTzMC84jRMzOlrPSy->>1-EsHa67jp+_P~VwFyzWo&DMtv>hw9ChbXW)4w%K z6(rHV*1T%%8Hlvo-4Mnk1jqvsA_RMKUyJg}-65YKi-WtO_P^wvgHpKCEyI3(W>`YqVont?ym z1fHiz?;rd?;nyhlB=tL%uVzVRs1>ws-_qVm?QZl$ZV8#8{=knY2_QYGcz+~ z$Si%N{SGW4YwRbM0ApmzE#IqJeT$IVmPfrAc?^Gik0Fi0$EUtbAf0_pTy(%Vq2+n-BxsWn+rTQOK^S?jd2XZo^$%+6Ts zT&l4E6&MP7*1)OyRv=WZoyuz3`k_x+)L&5geOI{Xyg*h>ItozCs*&KtS=}XXd%nj1 z01}A+qHE%v+A^F(2CQN-}1{3ZXH5gvW1A$XjVHU`;;hb zraWMtlP{T#Tv#Lf$H#yXQt4j28s*JS{<(JMiFWD;W7F;JniZ4NB^&J=F^P!A;gt=r z4Y!qrRgL{LONWT5~m3)!}&<7PDcItHcgB4Xe3_;W9jYbrQ@44aX{n zaw|Rm0NS-@lD4I%YJYZzYZQ;y3_b{N?T$V=(w;y9nRhe0@1Y*no}}|5!Kt;WrDk}Q zH;43)V}fu98Q-`b2Gft8x8%qhxqxE^Y=){l6@{8nvtc29{>h&i+Nzvn<96bB86HkL z;lyb7C^4I*Gk>_OO?QloOu%}(m59Mmxy~?2-I0(mJ~NK1$!~F#6a1ktfZ`ZZ6}K5X z99JL~J4)|dE^=6&ehxl5ZF-7w4pla+)vD#GR5nI;Gl~6KCFh4E@^DIita%6LsSK>7 zgVb}rG`LdlWsH7+`5}2Eh94t$jCDfrs@Cp747OoO z3PBFs7KF z{0KXji)~h+Y*cyTW=+Oe%yn222qV~bj2w~f{Qm%cn}Wp(0l1!9vi+x`+P#RE5)~zk zOkAIny}3BaB>entJwcbzj62Y|`KSD1_HF*~?ta;WLAOSX7c^^zNvzE`sWzbmk(jo} zf{nTfEFJ84F(QcB8Ob<4ATU4THctldRq1O?!MZ)q(`L*noHkzn5xeb zqsYc|4nT>$FH`-LvTl(h1JfXTtMQa?b1LGc-*c2}-Kv^>JJ$88w|i4hNMb4c*1Jb7 zYU~NGC79dPD52fdQHyMi6Qq(HWP_8=#t(8~nEfMWd9P8SdqvhBTF_b0ELH}ju-*(F z0-;=2nZQ7&KKNmW1Y`ooCA!y(&J+MK*AFpFHxH@YUF|DPS^KhC5_rL9zj=hn$jp67 zb16tI6@{bwMd8%sg2#u=<^>eTxnYDaVs#rG<*d=TuutFJ#Sk>(*&184vDYq+<|TOM zefQntDzqd9HNbBxp0PKQ0MVHSE-%O|b0cY=j`7s zOg1*fx>BcL=WZ7RtK##9fj2ZhApNZv`Z_w54R=o$^pUD}as$v2s=sL`CeyZ(iL4PBiK zdT(-Ty}R8Mp#FqaG@3Q+ED)AOVw27U5EH!RhD(P8DZtOr{5$C z)ONzjtk*es?ObBUDJOq%laC7PrlBzPOG!HA5 zoN7h%g&WBo2cy9T`;?pHP8mQ)*iM>lUwCntH4jS>yYbhB;Nwu(ZWiRgOp96jIExG!iT^ zvob$ZY^*)NxcHkP}IslJ-_bzLX_04jFpd>M2q+U@xCm|~R!Pihc0&*^P)MmI?xshy|N zM_iU8uM6WJ8pn?vF79aXyhnF0Q}z$(U1+-lwbQjUuc$P1D5@A?(zOULMLU|&soe`B zAy5g6)MJvxLHB@mpU=u;QdB(yy>~suXXq`@PH5jxG@0sX6e?*ct>_Rt-JwfR^{+#v z*~r{#+4T&Sf}N~@291>M;lECtULx_(vHm}w^PKrve{|_OXJY7e=tgu?t#WJp9urP{7I5~{51A^ay`f>BsPa?E? zk_-oM*|fmzx1gHSt^J~NH5lpR;JG+jo}vnHvj)|8+Q zvAk@hzd4c7zJ5pfo~&}DYd)%Vnwo1#>AbaLX$mj(3Wab35~Ko|3^t7CIU~jfI`$I{ z^e2$H2@+MIIkNIwo~@>AB#|^pgu@2p;QN5v?mxbAafUdRM^RXiYB5QpS;&^wY`c21 z?P7_%f;TAf@(ETW=N#jp0QVgju;L~jtE6uwg_7uEcx}?gyq{ptAL2OA0|Oim5Q;Z0 zmtRyKO(j-ZZyf&sXB;PJiY8bC?n1aJ85#Dm<0KLn@H#FzcRCPYp1L%0nvulpsO3y? zsoj7Tqabc92Ltst9Qh!wx}0*>r1fbUCF7DvwHQi)A@->#r9jF3Do30JzyrYKw-67g z%UYwrEZGr3 z>3}jB1`h8yJow`r=dJ>*nZ;LC8AS{>OsNB;=wd#Z2V{xPd0aB!dx+F zap!@~a6!QU^)+aLKC6rEzPIa^NDU<+gUG<(OV0){34CN>bI8ZIbA)_<#VbT^R35&p zkouP9uWDvM2`Fi#hi(SY#~I3=2P5(DW0AQXnZ*A9+bKHa;;Y)PHBVN=l1B{ARR~dm zlb>$(ZhuMOXMxn(x)H;khNqBzvUaC)cP%%Pg-gAysMgKKE?3sC>ArWf;6$j~MtCfN z*C*Y_ljUMLH#hj+Qi28Ucy|8)e|KMNQMx423M8CkMDHfbae z=*`CH-H)-9pD!Y^AH4Jmb<$B-n!SriYiq54cZR(V(q-_e$%p$L()+@Q) zqh(c|WA4~@W(d5l()L$=h6?(#^{yp!&7@-hTD19KDOX23EK!*j7a_9DF->PD@k zTkOqKu)8O;^cxixC}*`&lmN@N7ml2@gfWM#>s+*qk<98!mjJ%(YRLXMwQvo}W5V_> z%FHH|_F(b|H$go@x-O6Z7R4J7Lvh_UWqBg>yqsUyVlJUKIa-&Hn4kEr`pc>7vPo}P?XKN;T1h9C4IU_pgg@72a;#XwhyyZi zW_L~Cd%E<0OXH&01NzA2S1M@i%U5+-?tiGuqUfF9s%jLk>Y~+()#hl!Y4HzB?q!lj z!bcQk)-B2EyGqO((h3h=dH9GzN{}jNRcZv8$nX6L`kSR|cWddropQ`k+bU&|Jy_+P zUEP>T8y1qH$dJIcBg`2~Cm2^fP5%JYIP^3Ll|rQbJC>f=`bkIV{-3OOzkh4G7NOkG z>HD<$wk@JT9%D$t*m#E0GZL_9S$Q%mD`02k{ww~WY@b4#6Vk=tyN*Oq6T+XwO#c8; zyPLH~YIc8b>M_G2%dzzt#4@}poAVMA{Xn6&Z3F^8NI5-c@GysdStqrR8e>fz&r|;Z z1U=~wX?+yX{+oSXr=-u_nhaKSx}#FB_YF3iERp&#%96LIEG9u5Y_bCw5yH3wC$B@! z$^o&VG2z3^l4quSzqNZ0>Ry2!hflVTh%IN zkXHeLbJ%_=acH)rTf{f?&noNx0K*jee#rZ7jSV+K$gwn5Zb_)tGQ}&kd07lh(^^w6 zqSaMojbU}2&Nrfacz!gsN_8_nJ_Ow0$LP;?=rLQ9SM2`l(d(a;Oh8KOznKG+&D%W%d`<)w6(Jg04o))nLT4*giG~5Q7 zT3)mfH1gG$yZ!94?T#{W;0~_7Q2sGF4URIitkJWmyVsvl(=Olsa2Bh2zkdp9ZijE6 zjV%8F%vR!&x^`blBpea$2AKSj%E{EOH3~SHSM}o2^w{NU@YmDJP3*E<2lU+f^rn4~ zeg30a)HGo~qaNUZoF25No5`qkBb&ss8B7{oTIKsUE8Nvq#AxtMcFSBdn5RGg0EwyE z+dTfYk+oFRjBQ+U7ml0tC)udw2NDFkS6d#k(f&^$)!N+98go)xwhp2|dFt$1Oe|WR z2$mQ_dT?iRG4a&@09HVRR5e{l4@X@taA4X)K6~hlV))HA8h+L9==+ zYZjqojm)YL0qrJh6b*%YxF5)f#JMJ;<>GfSpQ>{LcC6Qif4Dd@#Ae=yoV-A8Tk-cb9WrV9B-v_N_ z#{l#Pm&f**F^Yw*<-EZqpLj5l$GPSVx&Q!%NeMzK1wM+_dVY2%JLP*{MV3Q)l|$*cN;M#88Bm{|@S z{{Rz36ig!lEyPbq_D9ez=AKm1JB@U$16`UM5>HY0w=aUf_(t9s=(T&m)TP3`n-DQ-)XwFxTLG6)>0V51W4{I+w@w^ z_>{L%VWv%ekM+a<=+Tbixc4G&yk4(XZ%f=kPUS%IwxuNS76w!OJ38p_;f8}QAg9HjbpR? zw)%!=G2C^MW%SF+swB#+as^`JEz&X!e_0wm%O-O66wg!Wt9Q1vO&u0a=) zOf)tRq54&3f(2B9V3sqzgoa?A-*>qjF#_@3e}C~OT#wb9%(djn+PabX(6sH}@DYhnkUHb~{;J2T_Q zJp2ad`woI;2T|J?8!}66BSPj0rj^3)+a#cQ@TWMy&mKqoI<1D8*-yATSEX_~%cxS1 zP>g$mdS$SG0fsVj@_ENxxJOlS{&DnY$)&=ZefHuJv2CG?h4|a<&IdmpJ`bL__}A1? za;Vfk~oQu{+5;pK%~vip;yos8>1b|GOgVlqk2 z2^q;gIQ_cJeiEaj6ZLC43)zh-MP@5?tjy%&{{T`6;NatN$iO6@!N)y%86Q>-=2Qmi zPPQvVs%D-VF^Gz@$u1RuDhD|LoackbIr+z3izq$9bZ$tt1N+`M=7`3`e@I~MY-byC ze13U9+dUJI4W8u_7y`tQ+_`26E4sx3dz6Nq93yeYHjueEI5-0!^Y9ljZsQC4G!w0!7OlV75uHy0ovlh>+S%!E8i}e%*4;f(VFCmCK zKn&Zqfx!cgayeBwjc#)x)kTL~y8=n}=8D7#NMCb1$?hKY2Oy|Y?YJHR2ORO2CQ~#b zlNLi}M2f}vsmpeEAz+FzwpcJp!6$>CYZuQ0A#sd!pE!|Ht4pEoJEUQ-)zV0zvn%X) zy;*-ll-fwZ7{)l-OLOBHBAD1Lo<-v!Z68ae1ohUvZ5lGN7O<)c?B{~{zyR$f$x)s& zj;g6*ghX9gGsr(u{{V>J;mfh0*Xh{qtt-AehKA#Ewuq71ma!S$U5u)VGRwG)*aUHs zGpKwh@w2I%xEr6IA6CDK^RH>|U+IU^KGBMsiR-nC^UHMFkVP{H@f##&U9l+$;O_;7 zNXJloK0Y@3!6s073MWF{ofegRn@uaUfJ# zGm^le!jZN7+(^9ffJ7;gnEhtQ^PZ0SN%S|pJF~UNbN96ivr~DiQWtN{y(ULoym^?h zFod-tGh=eJZU?2yADHw0B0gpEshc9OZ=io}r&I1f;CHbv%Try_d&2(!ZqGa*ww$yX ztHR1d^2G04Ov{u-WML9U!IyNa<>vnYQ57|JJue>yQY-H|M`8Z}4ZlfyCDUBhYQ?H+ zYRI*l?iREV1&N`JquM~3y=-amS2Q1+e>|3!t74;O;kg9{lB`S*r#8tXbi)}V!@{HjA@?df8RW%_&+?svW11J?NT=&EsjdG2 z2tBVWsI%DGR8vawM=gKnfC=7N!@Q~#kRSSLt-NiPGw2l?caKcw^H}YKDdOT*zUM}* z+6&v-GIj8^RMawkwu$Ut)v|>5j8y#pDBLC76rg z;kzKuBmH}!+OS6)-SPDIR5Qs=CK|4nrTXaxNsK?=!jBUYiqU3LLoP8Iz8|c({O>&t z4yLSM4Ds6N)bfpZe}<0Q*7XS`tZd$@O=)OdvcW3)Pne=;oQXFyjCyc4+}(F(aSIb?Q?Gz`~#R2&o0{s6SY{-&9>2ORw7BOElfm`t~(R5vPq1v@hvX zjnoNNMfBTrtG7610rGi19{CMBSMrWV6jQ(hQavZ=eEq?%MP8n_t$j%D`n#jmZCa-l zhof~>ob7>B!3b0V!Bl4p%Q0Zs_Iy7CD_$k3iTvWiuBUwMPvK9#HO*x;eJen1Qv8yM zZObGXLlk3qS1c6*g(Msi%YM9uJ$eJde)ll9mB`fe-jVzz{WiCxxcZPZO-5?+y>X}M zZj%aQ(hup_l_vn5j*Mf8m<{T_Lbp}wZMa(1ss1`^Z~zoY@O(Ik)M9_qcb+3loSxl=&x8;LL) zD%2gly3Ld6lvo-_oXSC0K&rb~hTP=tIHAprS5U4SJx@7zSMbI3KkELSM#iP3*3q?D zZOkEl&evqC2D|7=8WB8khlX5F1Y06x*d-t#CH9ev%SKQKaK0u7eSe9?JG=NV`b}2m zo2~a(bN4QtuIe%mQVn}W(=}~RRGlCZpH4NcT6=8JdYUrC|w-So+ ze_ky5cs`AB8QSR13-v+dWU3@wVDxTz=e>V~&#!%js%ZLI`XTkEwACwGSmn|CR()=f zl(+t(v~Nnqp$t<3rNMTw47mWtGu3hC%6i}R5V03l1_nr`?L9MC z(zV@S!L3C-GCs;C?ECO(v}9Hr%S9zI>OlD!tQI)nExTmpMsv57lsD}&lL#h@OMhqm z1)shVDfY+HAFDr5Brr;qVbeP+Q&=?;L6S;R+KM1r4D6OW1-)5z?P8Iyra|-a`V|Xe zRncMn=Wft{gWs)_YI~#X9`v1E1k#S+)8NwH=?lc<*s`r0`w}H)RcPEG4vd5C*dHam z&S0Eq8QG|R2!B@lqB-N#ew_V0qgv^@)yAdVmu|dmX%tTOgiu#W-I^fEz!I#hv?_?g z!Sd$5A&eOEC-a@}PyP(Prpxw+UHYc_YGMtM>CnwmyQ0J(0pwYd!yFB)W0E3ryn8?x zP7->R#pEKl5R=-Sh5S7BPttmJsYR^zCuZq7yve4$TCWUCE#;5YG0APW&nuEu{VAs) zAOQ~KWjXl;J8$Rsm!>|q_ub2;sq|-1p{-~=!Ajz20p~M%&8OFTHts~FVp~x}=_3Qb z+G7B=)%OD2ua$vXtNX^qwYQC%Hi5vheoJOet|?J zfDSN0@(E+F4h3LUPjY9K*&pI|Zrbe&w=8K|rlF#1RQ)iK#UkrTD=1I_Ai%zytBu8n zwfVqZqYJyk@WUP}#IUm0VCnX^?waWSp5pF!mGj8XGRGsFcm(tD){l@wYDu$_yNs*W6)L^O zT5!tf9b|;Fjt)q`{UmY${{2%L0H^N}3HQ15w+wWn`>3qTGKR>kp4I2ufP54Fg@?fB zr#Taidy1IPaeK`bn4t|VrAKGo_b`-b!1J^w21nqNG?FOm z1BMZcJ>|q#(+-W1mcOk0XyK&*zS)pp8uDW<~bFWp!UfqF|h3*gtN5bN>Jz+p4l3 zQQg4Nkk3^%J4<3y8$MYhU=X+a+2@=e{{S6Lkm}+G08-6&?36;H6lDZ~oNgXD{C)uZ zV~-tJXw*W*c1+SZ-YHRp@UjezfiM#-kaOj{_~ZT^F9HuC21_F5jT4!f7>`+rcMSgB z!1?{oK7Vd{eml5?*pMXXpzgm#Se%jL{=EMHey07*&)gC0h?Xeco(neTh0v8}{{Z%L z@H6@T{ANvrp5^i1D>FWFcJh4*fd#f3Y`q8+bXu z!w+x+^&IEOxxlq&xof}MAvP+yg7Ly5EwB|V6mx<`M?Nvf`F`bBRTP3YOqnl6BZ{=9 zNMz;q#s)$E00_fla67Y+6FwmXycH=(IloKfQ8=YbbS7vc_q0xC&o@sj=2<> z(qu=I`k$v|x}32LNQ`_yeZ^1GrIFce{hhsV&*wsk0(#G31y?Xx%VB zNd-s)1Yl&bz&OAJz`DJIC=@|f+v&?U+oq>p`+!jvMUrb6@P(gf+!S&E=YzKZ^n~$= zjWkR*`9xV5lbqw;OT+=Q;cd=@ODJ6r~aUgOe zb|hp1J-8=1RUBX(WAo5#KyKo*A9FD4bY)oU%RZlCWjSF^R>BRW0kw`$e}|us&NI{T z$JmD;AW<+$wP;pK!79q~$^@HiFRF+#0}-9OGvgy32_v3Wsd^CwK5wIMF$sK(kfrAi0=edi_@*KSZJSAJ` zMup#f&lhj^U5ejLd*Dqf#Zy&*rmv|qZdyp1NE`(!0FWBe8H8XlR#L$-d{4+J`%E~n z^61W=qJ0*x=} z=v~pPcfwC=N2l6y-K(dxS8M5&PoW5mIgOeILP9tj^X%@Dg|wyIn)yW! zNhC6=^RyosJJSV@ExDU6Yu%aID}N4uO{lCA*7`~G`n&qAv`LzMyFm1(@C$kwy7a_$B^eC0;`SeSTF}>?V*{DKP zGAc8+T1Qzk2w~~3xxr$Y#&_qazl=}r5kvY0bBq4~!{^fNPe-ApYTc!IXh&AY`qbyM zRGTNCt|T$}iL`e2vLnl%eOtKIo=2lCtY zE6t~~$F%HNWGbqFuF)FD>5^Gna{}2%gM?a-a?9jpQ+F%f%ltFc?)#m)sA^r!-};uP zN=R&Jvo-M%)6*n8!POPFl434gnIB;&2e@<_;Xr^6$c}vWQfh!diD19<=f5t}*R=Df zkGzr*7N2S;#5XH%L6@FGw90asb{kxc(U1mnMe*{eGG{(M=N-B9$Ls#7dhP!0??0vA zPrScFvFr= z4NF(_R`?{R z`zQA_u+h}D9ZyndH3#~ESG}f3cqtatTCC1;rDYM9g%(8$SC%*=ZBj@qzC){VbUeQM zulb3S#ZoL0U((_5>AmlBP}8)EorA8_sVuUH<<&cwW}){5m_-}NB?oL-K*_K$7~H(s zpE3GsC>Vgz-|~v!?C$V+;ugWRyxmr~ZI?A;?+l{P(1Ff&$#K~cWE(nhRf2ppej_hZJ~ z4LEV+}4jgg~Qn z9G(VFP65g5(8b`YYzse{lFj9ltN#Ed8&9B(>(o}Ta9dO7pJKp_QbJn+qh22nn@7^*JEp;FFC0Khv!P zDLOF#7pW9tqq=TE4)9bBxGnfN#~+N7pWCUb++&Ts^&*Nnl13@CU}G#v2z=z6%$j`@9ZybYH2h9?uU8fD6 zc;xU$IUlhU`L&@de1*u3q^rIu6;a3q1isc)1mF|2k>i~5bBql02Ytlo!Ir^k9hin)G#Ei6h$Qe0Q-S(PXUi0 z;~C@6Q*3;n2P~cT&rg)dMUN>AG@PichquV8s(d63l%-!GO;1X(aLqIL-!t zeyUM@LRf*^TWeDE;;mrQf&?;eAr!{DP5~ecl0xGg9Qjwa(^rx`ttEjX`b?x(nnM{;9wJS|s0XaC`et9K9nI|Qor%q^ z{Z9Hff4%#My;JyI>@7FEZ*7X+rKHf`_g3W*w91B5K!~mBZVQN=)wY5NW=|g1Ird>*AmP-t(R6Me-95x-sK?EcE zdzn;q*!&QyL_wFyHDglqS51nUrBZ&=mgrgk0NT-S=++IL{{X4bvW?l>wSgqyj!1OF zT?rJ)dnQ?;($+M1wEBV)P$6M5D#8i&?HJnZNdpazK68$=M9H(bbb53`Q5h^JEU4&! z@8L(caO7a%=llG0y6P~XKAp`W(<&IIQq?3@b}=)^T}cPQPb7eI`}rRjae;dxOk^b7 zQ>cLuSEbtv0oC(&$b zm21NZJxzkZ~R*1DE`LrSlzMRu;O zRzI`j+VPiX0D`3MU^f1r^UwKcIK#*;Ql|PcSiaW2AZyddak(iZ&46%D;g&mxIOOmT z_UXno8Ygp***0}HrjSO}WQIRGLjf@*7Y8|DGR?@~dEjyJ-y)?rlhi}0cUF^1Tae9b zR$8?-8+MX}(#K7>W@nXAlrRMgC?xG6xEUQ`&*Y*v6D^O$X4rvC+;z9_g$*XbBY-kj49XaAGm?K%&PmB9^r)vsI^}6z zz1%&!qHDT-liXd`rPkFo26%MsDqXR>r<0VxE$$$DnSf?E1FYGyV@9Sslo;}476ydp zA5}iH_V;7j?QIP^Zs&9By3Oz9mFmGbg(IIHvu>5tMnH^7BQ|!bqjTg4=g-9bD{{EJ zdmNn3nNLuT#FZp%FIbOKe{ofTu8(-Bd49N|V3pIJsnNXEzgJH1eF`cc2Voo{gS#sqU#br|pAKYUN6ZxHpt0uqO zM@ZH{Smd!zytXT=hU6=dvIJlk4CsWAPB!56KOOR4`-?^lh@evSEUBc&J!u5h(d&{6 z#(*-g-RB3-Ax_i&ryXoKffimj7DhONgyIgnQ{QEKvTa4Km=fgU(?}0 zq2bN64!=8J*yVfj4|4!m@#rWW01!mm;qIE9!P`2VdT-Q*`g+x(U$)eBDt8PJB!>}1YwcgSI3$RpkhG1o0fo<6 zvK8ZOc#ph}ByM&9Bl(F!?l0jNwf>=fO8Q6jtLq|9-9J)$Qnb)gy;W_|t6?QoXZ6*} zJc^7Rp^9AkMrpTaAyg;B#$^W~<@So+OQovlAI$Y0o7;K@i*7p_bqzM1k{AY_@*r?Q zVildxzA=I@NF4A1>(pF0@r??Z+_?jC{a&Q-Ls=qqikr6ZtDVviF@`ufIL-(B!0Tc# z63G7YMG?d7;11gb#0)UJMAD%b_et5<@+6bXDvKyPn1hPd0VmaxRDzSpXLd1;j zZV6ri28#5*elmUO9b8 z(TcL;8yF0Kh>^z`1K|AU$5p?ih@1$6{mfHEN-w^GGN8ExgppK?{{T=QXyZP4s`Gu#t>nJdUyg>*#`H%Sgh*qOScC2)5a)JyCqF+U{#^7~u0!OTjN^%nk_ya)+%8%|IT-TX z`Tqci!RZJmsK5mqGd_o25GwYG+iy} z)JpAMI1c3!GP?$lh4QL%f;{6O4l(i2KnJ+N#~U*%E$AjD)w)(R)f?R&!+MjJAck^; zDI9^Zl;HEb&T-?%^!IC({!20$jL$jf4l()l0I@rPkf?eOCQQhS}3E6d23`b zm(`So*j$$LmQux!Kda7gcALXE%>-H9X`g;w-TtVBD(VZhFJ z7Q^%I;A77m^uU55fKA*O-K>tWsDZ5S=L|NIeB&j&6hGTObJ7N0s&c6@4zsRBMgeSS zdfu?o;gTA0hBGNRZ`43h$Bg^FG1MQ)ui7qZOgy_aC7u_(0Q*#yPt*ZqY_kAB!jB#o zjPs2A^u^C)NfcmcPFvmd^otdzt*B{z(Q*Rk?hz$3LzpV7CbcMVXGN?BWL)1Syi{RA?VKA(Y}zE{fl zQOj*a8S5vSs=Qc(Jnh|`>#kPPbq?S9q4W>xFKpFG*;>RpTWSz87RivUF((Yk9z!V% zRZynI1Fsh^h2^Cb*&ms)6UKjFPG8%-!~8vsSnD@Fr1tiYG?$}yqp7>no=_e(FgnY! zMadW#JDGxRVq!!$`TkH}sZ7h{d_Nm$NHf%3r61DzTIDP04x9G#EmzaEDxl52Akuni z8!J9QM)xv+5Xx0{`f;+4*SQ=R5j_6@`dKa9;NM)kyXpR!uf?X&?H=@dH#CbHwu!6N zrq?Y005FC^HZ)rgq+Q3S>afWyP6l^%EDxDp)wMkwKMF&#b|s3x)6VJCy9ZI!b+4*A zb-ixPwDlNQQL5EjNr9Dzpi3MX18y#iHnzj<05ES~{;bPQ?0=b`^iFd9AXN6p@SF83 zy}N6=w6CMx)7)A=cIbMA18Yr|wX$Bk$XEtQnN&p_ikr3q(dT<621j2<;rv`@9qwDp z@Pel=bM$F0RHZkgHH)t!#>`YzmTlewa;Q_17-SLu08Y2QVl)Kt6>conJGV&FB$}fG z9o4R9^rr?0eHGmqec+XDLC65e6lZD2SaW%Gs@&C!9scg8n04RcdtBFhXSX#UrX81K zzuNVvp@KLxD{Tzw(@6)dN^QQKw2D}U^l71Ri0)e+!-V-cn@T2y@lYFMLSKHLcc0Y_ zT&$hv+a1%}OZ`b9uWF*J+uf3|x1{Ds4OfDJ0Q2zj- zSe?uPVsY1xoGZ8_bv;ZDQ}(obuXpd$pQbuyizjpc05j3(Ig$Y~?~=BkDnio~w>+3? z`%yvNF5EE*<+k;4A3NhD z6(O^O)o}T0+p3Z{sPR*CT}I_WroEcgEl*tHJcu(WJCGa@Sb)Cf-JSr>IR~c6lsMMo zrZT3UtHF8o0U=ry>d^wij`zL_O(VT!J8pV?P-0a23|*AdGXTu zFLP+iV!cFErtce9b>HPbQ8c}0Og+&?i3TShT1mxCp~I87%&xLXP^|Z zGQ)B$QjFCYYFa*zrTV*P7Awg-?Om0Ev?`cIZR~?B?Ftx^k_T4Fk$@m-COU0_WBg82 z-hJ=fG3f6;lcj3C*`?F3E}f>y4&WC0_`lW;;o4c6E>Ey8a7|0eU(F0O?OE8QQMjYkAJ5q?Pw;uIdYN{6_sQ$1B`(qG6G0t zJoRjzIAO^~q()K)8!;DkUiyVjT^CE!bp0Pp)9MK(l6x>4KWCA!3auFR5==Y47$H+= zZ*qYzCllBS)UG!{vpLt(-4E+$Z+Df=PJLe8&c>d!_dd;@TN9UOOw*DT3mtWYWEqOL z=uc~Ug_qC4;^s%SYaeO1Hhg&!xY3B#@AQ-3kZKBd(y|fzOay{pk8T*GYNA^)*k=Yn=-!#m;4;BmYMc}9;N#9Z zm|}x{%KWp&#DdGjzjI=76n1buSr#v7f9yEG$YM@EQP0O*7+uP;5-j%?$Wp9^e^?lm zckP9Dy01G|2uWt=0KPaU^VIV_`;+2E;jl!-tq)wXCg$}3Vh5eW(m32aHb;+dCmnK= zUCt~4pd=DOU)4pirYv>|DDLPpD_|)Z9ORM*xO{P*Iu6Wxop-yRHF$p7YO5xj3Iz1# zgbnc^AQCZwpVFWZGyRTFNNkD{%ac&ZW0Ai@%N9STA~BgaV}Qf~?&JVh@;UkGf*1b5GDvS(yEg?Saa6sHL zSoP3_9heN1W{y)lK@!Osl!5PzuLpZ$1+sh`0fXbB7>Ng8a}6D;?OCJLBY>(g84T>o zd=1#a7#@5P)_`7xnIaF`e5OzVI^}3&AQudQ$!v0S@%_3#s3TVi#=mHK)~8Wn-+qRb z8nD{T!ZjHf3cGSVm0T#!c=_wHQptR~76jfXDawMcsw6-;%)|Ug1y2jkS$H@;2OV}b zzU9`v&18a|O^`ztmX-<^i)O;?o46ZUhVDUMBa+zjfy`72ft8PDXe!iq`<1V{SxHbm z6O`Zn5(!|S@(u~fQ~Q!|)e^FfKM#_dI zxH#k=_0PZ-WmeL}%FGQ3t*9#)!MJA(hXaKJBpie1=d9TjAEZqf z+*aT^d_trViCXe%4dzK6Ngg6e01UAM0Qk-@aC&4)8iyucT87INk3j`{AVKO4%@{GX zZ^;J(BjjLy*gqW>M(QFx9rqPj)Ak7(6stQlW;U6CZ(-vs2Gj6-{{Yv6@@PQcdx6QL z#UmNfYgv()K152dpW7Zl2anjEGs#bvTc~Ily60i(7Y^yG*NI8k@3a!_w;PBD4U$=R zAD^F&r}9Dl(bUxE`sdJY$k)}W>iw~x>I~S0no27sP6+hQN9EY_#&gd-BY22cP_|FB z;GM<)0EhSR<*3_KxAd#EH2LC{h#GVgR`l#i7>LLXc2yEG`w3iRW1Nh2gm_r&nPl=^ zMx`;o@bLOCq~5uxX%@TtvuBkek%d0tw+IXu(;HxU7!$J|&;h_Flatn5p9a9+xJ?h| zW~=35Z$I!G+YKTmuc$v<`=+AAYqcxg?WqdjFEsBzQ5n^3W<+O=jW z%Ew_$oBc;sAS8tY2^4Pf*~aao+75Y?M^cHz}KOI^|{FWVVowX0r*5(#%O^z6i}&XJv{9k#nE zd;mxTpq^lFbvq}BLu^?5N|ui|boUjTTJEn~=*QGbY-O=h-)*T|dTg>VN39p!B8f;c zHYRO^sa9p&dWsU}i3U@MU#DVr?%30{wGr6veFka{5;j(gT9VVLjP1>dBLX~N0>ohp zla=s#)#G^O4j>(a@PR;D1I zRT!qfb~Hu``lKgIw80B)Z(S+_#y6@DJBZI1&N6z;zCjlm?lQR_JWE|e03b2I(ytHnTJ~bbDgc8+0tOKH)x*t?;5ej^)HX+ zYbr!h9I#|EBS$$3n^>t*=Wn<+I?u%szKNi-9kkwu)1Ll|Rfo8HSL)2#eufqyHjl1p zW@zE}Z*nAs(Ur)Mp)wf)lm%5zPgnURwuX1iuiPMg$@Gu1dy4cO&D?$0sc6^5wYrw{ zOY=cqDHuXs*lv{S(fGqLEs|hQ-;ZqxMs?1PgUok=B{7@c5@|EQJ8{ARewIuz;r$ZR9 zS_f)xCZLfg=^;Y}Y>mNW46HrO6cQJ$3xUbAp%>={>U6y)>7J{j&r-$jsJh0ndaOn| z&w7=INqHNjWqGq1!noWp2tPSv(`Nfc%3&Y9cOX_eBoonn(i61tPVt22)+ z>qY{rjz&OSg8o4u>>v{r1g7dY_3qULEOz&G>aa~PsizLTK{w$XVBhE46Sx z#51QZa5Ru6JN?k@Ke>Cexi0E@pK4gUq-jw%+9O>HKAMrHN?Wd0RB0lVH}xBN07goJ z`C^|v*y~XIr2AY|de?Xjc6yb*mg>5S&n# zu2plFfUzvg+L}Fm9<3>K<&RrvgwIk-Kt9apD*>FOY*=A=A$JDwa0tYtU%3Vh4)rKq z!`uDxadWmi+g|MLDzI3VKT0OkqDOGjyx!8YOhkJOtm>c_;~w6#=J5$1K_aE`c}f(m z1352sFL+(n^^VEbI}5nW`ZIZq-vNRQClnlzyt%&3&&l^H+{K_f68mEZEgL#u zpBec%^OOC)aYf84jG4!0@gu1)r6jV%CYDTqkt6Tgtj8r(u`82OD(gy70ZotUT$s~3pm;h=^GyVK&Cd7m|0iuiA%aQuA zkV(gnAEf+@b=fj8Hxqywxv!?GrP|w15!hi++!gp%!~UFnfzLk!u3Eb@xLkkK z>|tZvkV6b!n=~dW%-*aiE3$!rM&bx9f`7x#dHE-$#Z^o<3uM-}3)AAWZJi-DlE9d# zGeYl-hX-S!!lp3Vu0Yh9za5E6K9a~yujXL#2J6-*J{kaTA2Hcb*!N((k z=d8))Sv-1|`DK4}ily3><3+jLnhkl?{VWt%h$XmZ=vaa5u6j zRtO?sH*n(1h2-qW@-rC24aYpCu1(5~9YbS*Z9MFn-Nm+`tc>K1LuEx5xn5pC+*;o25;6xBEL#meJoec%y^XbtQ>mgC-f5 zV;llKoaceL4u7Ns8;-a|*)ha=;gScJLGAjJ)i!s#+px9Txz6Va9JzPF85=nF5OIuk zbn-nO;^f4m-lMv863?YG%YRX*tjjbebzxYM=lWq7h0206i~-L(y40*pi6hiwTSl&J z%WWya0Is1G>w}a5M=i;2LJ&v!XOeoT#yc_&fWBjcQk=5)kJlXp^a8>uEBjd z3t)gugfsZbAY;s*8nN8va-%}y7!5R_w5i&bc?u~=ug4_8m2eA?+f=ULzT?3-J_b5q zplC<3M{vTnC|0as^by@M;Hgkf8{ZPdG8ZQ#k^lvW#!p;LXhXpE(hK@W-vqT{4KCHz zje(R_K7uEwf>u}cFqkBB`2*)8K1IElK?Ld|w-pNMUsb2twY!LI{)^HPEXEUog^;@c z08bds3i}l9<0GyJI|AbXL859Ye$(uYLs9#?y_ecraIBuB7S~LxGLriVMLwFyHxg9F z0a-Sk*%%!Ija&F*DX(zaJ-yr1mrkkM{jp<6)cx{Sv0q4zlu;>ckiMF#AaB*T11@>N zAf70yDMU_e+c3NH>zZ9CuE3qcYO0ovt6a3!3Mp=VkcJY-O63&f0stfdg1JK~+REd| zys3>!u9@B4!uoxS8n0*f%><5Dj%C!RsIo~J$uhE+R7liuhin`iezBa`GO~(q@@BWj zgxEjgD(>&9nzwC^+=ooP#HK$prn-8?QK%_~vquaEweZib6?req4oMgs7 z+ntYtZRezP3hUgO8PtBMcKb)%G<_>kU8zcHma4}NM%J>Ue7IsU8v-4fhQt$TS8QfBLLE-#$rrPEOOyMZX9Ad-4U!dZ9R z;^210tZ^<)Oc742MwyqlkVf+EXJ!)2NSL!1+xd~)y+N{iFyQjaYByOae zE~SyO3aLf+U;r700n1={9aWIA)I=kwzN|Wpy+*~&Pq#Ea-33KoTD9b{B+3BVn@T8A zyF3i!@E7!vG%i}$f0@sbK>B-$b$-q4Zs^o2-0mF@w>5iD535>B8f;tRj|!oI+DiMM z^p-r4k=H&D$Lbdk%xmN!OWImqnN2BZ`Zk|J0)FB#6mUl(9sCm6ba!1XVtoylgsT`yPe-Ac}li6w10wN|bSyFg4>LLDMqs&<^P{-Mb%M@9TL zQN(I+@=f3VBT8C#cUG`#p4#lr-q0eNB{FE!u#(dHBqEr00ICY0+}Xex9OU#yCsq2Y zILryL{$bB=yzN5udw03Dd1k<{Yie}D%L+#r0A!7<#E?cEhUVq7$l=esh|ZqGqjvH7 zgJV~xrRx2p&!)XoY3Epyyan)1%L=kHs5`(fwZR?_Bd!aQ_9KN#vSVAbeN%&7xRNG; zrgpaGioBKQCd>hVRWOqnh#>iaj&_r^pBY(s9zIoi1XNLx4uG>0>%F`60)F4uleN2# zPknlM6|397HM-TIg@SEF-j3=9(8{ZrfyiLHv(h#vHUZRP=nRuV?SAa-`Mk)T;{WjkqECKXT;9!m3=3bS2e1mr>^HYfgPMJqtudP|Pr;T?`e&g6A zohfBwSimD8iQY!yd=}>bcX5-RWsl2d>`;&LY*Ox6 zZEG*3{k(8Ne%^@*c1w6DYJh;Kc#7q>fO6Rk9|}-|yR9pofY7lbai!VOnbUi&2)AkR zz{{(b9+sZ>l+WqlFRitC3RoU6Kvm=exDFmj>Y_doc4pJ}HI=B+LL{p!EYU&tR9r2$ z4}u|>Iz_Y+ppxJm;A9SiXAln>=Rc$MM&)_?rpIuY^^2=vhMh-E(dChn6h%8N%49M9 z#_>2Y$C$>{T(R6&EBP*`m-v_cB9W=pwCz%la6~TDp)7FEA!afrT4FfbK-$59YZ zGsjzTW%+{UEQBecCRCv==e=?TwYcx~U8Zx-1)mF(jCcTbUzB3}=#E-CmXoJbYVzB< zg$l<6!Ig@*z?*)br1ZS#bR^!QgkP4{(09Q~dJNeq@w6qaU7D&eZ zNzBSJf2emR5k}B)Zy;=>}ndND^zOJkzi<&-N@#zP=U8F zh(_Sdd1cR>;OCR1Bl6^05ctCJ0!?#EeO4BIQg|0@i2{lZrlno!U) zYQ3qW_WqSM8MR*OeHn)1BG`AGh$H%I>6IW6k7ytfpWJoLiEyO>7w7VFZab+KjL9yk zrqtE`^3iEor?)hXB$*gO&xUL$QNrYM2pGwt(Hn?eiK1j$k7>(p^}R{EtGcw*HOZQq zP9lb=EDi!XZdURPiou8nCm8EH_=?A7RdSUbMKix?Xw6eW)a~jTzM=q{2<}R<#cKp$ z`to~AGZI4#vG-)39W5x(7ap0nb5zu*)72%>)9K}#nw-kuOCFM0H+eJ-QU}1Rku#}b$5)N#R({DY8)*oF}n*q5c`qP1{q?k2hP$um+Hr08C+f~ zR;n$p-rrEK+SKNkvlmY7Mvgcs-L&yZMtMpSS;^ZYL|7CIRB}$%JOjf%QrGG*Tp@V3 za{9ORyD&%-Nz?RgI3s)9WiqD<+%_c6TvCL#Sc66X!S| zqwWaeUOO!56FcwrbmsU}8#hG%VN6*j5xIT_iE_#pnSxiN(rj`{CKE48PTvuYdZEk_1N_2$&qX5v(Oao1o(O^v`e zk2&dBAALY<>~$MywAP(V6^*GbQbrN7Lo!Y)D8NV~julXds;YejQb<}b*!S^zbJp6P@iO9a@1*(#wKp#e`#53Axtsm95 zmIo!USQ0p>`szbIRzFP$17}`)c#Mlg>RIy!Isi>IZsG2IW)@D- zy-FVFr3+cHqW4|N*{a!+m}IRTpY}~ZyAnkyWmRN8&uA(O5GdFDN-8eN7asPF! zT^{te`vPqs>5x6hbbU#tlEb`WUsR;Am5jvCH}zRY{{U!>NyBu?nS)t21etWMG(n5t z`k~yN)!mgX{V}Pd_g8Blxrb1-VLsKg85LGor;;cBtg+0Yl>&kTsZtXe>Wk)*#0}1D zXrukfuW)ER>Ggj|(64LVs}`H7QkvZ681Kb?t5)3}(6U6u45|Q;3FTQ>agch@eq;KR zS41`p2e!LI>Smp4JuZb_;?=809!yFj`(*?ajYNK;wnzotzDpdbz&#a5Q}~l-Pkm0; zqAzjjmIaMtSnZ8C+b9|dV^J+o8I@ys@${GO4y6+Ir`&OBV8Qh!y}Huu5J9JC zWu{i%Nm+*@VyXDs`w@}^GG!#tGRcfq#NSBzm)f!F+TNdQNBYMk)TY?;e=C~JI3|;F zfF}!vSKF1Cj1*#c2*gVtpGS)W{O2DGGVZfKQvMnqyCTyoFm4 zm_tT=Oh)g4yt4QhJZb$%BE-%y5&Wf^rl4=pdE=JDB0g;Tl-{Gy7Vcv)wjE0l3HTtK zXE+*hu{}xT3O6IID7mQ5r%gjwlIlc~&2Uu)w5nK<5=)QOl_ZUzjClAVauVEPwmU%A zCAAVI*%H0Uq!NExBMj~u5`1LFR29nS&JVlJc9P?Jilp(HSTQCyJ za=8GMQg<@0L0kqTbAg_VxUI!PN!(vM#IvQ#6kO7dHj;IO#UtgvP7z!X0mgYbDli9= zB_dORiuJm7H45gfJ22MN9U5m_bny^!Ny`}7OJ$Uu;g}7CV1fwX%03wWpxL~P9Q4uAm>qRGW{6;xeJj?EImF2<7nM)4t8Bnw$}3;gnYTcD?wThWYEzhV@HYYQ3)@ZeA78D2BJVsGGe&EVZ!c?R0rmckQ&PdCfs zaZpa`OA~Sga;Z<7>gx8|cK)87ryiNwvIJVwE%=t?(vnL+)k`>x?F@%IapXG>cTrz9 zx>^yxOe^pI028a}z3JM$=LLDBkGQnirNm!t888_!B!)80S(GvrP3jpy*^+z?wXRih z#OTb~120nD4wkB^9hA9n#!X{95h!UCcm)}D6i`a3UwB{f4zv#+=^*Y&a{8Q?yL&sd zT0yPrdXIYOZ%zYR7(KR92%9k^tsE)(2yB5M0Zx1!*~^#5P_|`;R$)szhL^0-zeIm3 zyI#`NRxyXO`ju2NtiGlWc4VAx;173I@w|XX8JV%s+&1R7uU7kLtE`I`q!?_5X%ys> zl|H8fpB!WH*0Nejn;0;fn%0puJHnT7X;-0adUc?gQA-u!3k)5{1-T>O3=VpmdFV$G z7xe6XT&QaQ08zD#Gf|IC8a9ul_WW@-+*p>t#=_?kU;?TMEODNGN%PiRo^!F|>TAd1 z3W3ev`ZlN|9ks3KX4KWAwNAV!zt@bh1p1PajH7;b6&?mYI!s<)%x)@WPlixjZ>Q`1 z!)2wh7N?|MqCM{o>7km;7%RJTR|P9%o_h5TY4_WfD8zxn;gY^*%pIh!rQF^*$dXH`?_ssUIBc!T0Vwq#cy%}OXmUasIB!GBk|MQZl$v2LZ9>>A>8R=WU!c?D%E;ZHBTV4R)1v!1f~4A}@k zYBn@cy12Ykn9!w8-1p4vNoy4)h9|bgSS!iMO{G9zOqoj&fHS~W!~mx5F=JLE8I75;P@E$>qby9rGX^8iK1b;ey2~nsvC4=hR&TW zWU!HkMV4nHB&>h~BV{l!BUL!bY;dT-m+Bggme;9Fuk_=#gqnm}C7Co9xaMm!>9c`u zOzkZag&R|F_OL29Wtb4bTc~64u+=Q4OwOQOZn{>N+fj`ghMV;hRMX^&H#Jt&(^XtP z2;N3_fVtX91zYk)ZQwIcZOUf^k(_a#oShV+JE0;UCH(>`3nVX2^wMlsXk`*QD=;Q9BRhb| zJYXm);A0&tW&#n8#^koGX>r!EHOSzvr$%>AJc``S|5ER(}J9d?A&5!^a*eZ|@ z1pJPWCh9Int<9`h{#O}>y(`wBS2F}hLV<}S@AX)2Z;TUx`54DVkgnrn$PaMZwur3j z1W$2*W(oG&WRd<04a6&DyyWM1IpeD%3E2=Hp(|dMsnV+HI$c?mNCcuFqB2y3C=qxE zB>Qo<1o506IRVfTL0aau{bBC=dO+jH#xeorJo}pml00-b@2OGtn?f}CtSB+n zC(>Brk=q+*-a*J90CMY}l1J~sQVIM-XCt^w1}TMSwRx;LVq=*=m8Eg*EsO!To>-0v z^YPP!u@to%Gg#xh8Z6f2(QH!6&bX%xiT zzN@I+Fv+$Xw1b-k6+LuwWqAQr1od5>bA@hYAnqwNJ2J%;igqYLHGLkYoW8%Q9D1U>CKpWQSyX_SA;1|bap!^7m14rgQluW|EABU5 z>a86sy}RFaOcKW~pDv5s&_K3ThFr8QCf96#^kr5nn9ehi)?BXS@nQX@q{Zfg~yi@D{(`= z%&TX;ve=%2^*g$f!*5pC>1fod(~2085oP{iDh2grN+@?2nX*K0At=mu3~)A#j8K+8 z+Gb&A=n`E@aFA*zw)I*XZ*oEC$z(&P!VJ)aIYK7{M(hZ1q>Qcx;7)pU9{t2b-o!;_ zht#0Z$uu4BtJbUo166`YJX?M7%#ma{rp8CG(G%(_u>{AG${6hCEbL z>FyiB;NY+Y!QgUdByuo(l^dy3hw9|Iec@B@bX`i(HOs6Op0oxw_V0-LjwBh}8C8_4 z1MT@SZ!9i>xm3a{dY9b;UahF;l`ZQKN!v+nnmFs-j6)@k7y!t=$Oj9+%CK$;$s)M& zk+%0DRC<=7M!guOlAIRx2qc-L-x7!189z`eRdI}w;}>#qIJljnvf3WISt>9MaqocO7AXS>gRJ{`UO{^RbYiFFW1&+`OCmjKVU6B;#{KgtzVQHPa za% z09!w-J~ucF3EXj<9OPgd6ZVRPbyJLePVAizy(iMGE&lP)ySq$|K}t$yS=<74Nxe~` zfx~cpHj^Vehso>B^8j(g&6wJaDU16~aV+R8YH?P9q?wdp#_=*a ziX5GxLBYoD#|6)wj~nCqc4KBm_qlh~`~Lv8J9^mDwQlzAoif(NlhLV5)QLg!hV+hk z-T409dkF7S&tyMplTV>q?#Uw3HC;EguIUF4d|a%ON?aVQu}K=Naj_B;vmV{ehu!7} z$1Hac<>C8GvS~fPQW_9xbgIx02-#L6zF$+2L$fllQ^q*jymQu+#bT;mvI@CN*L!bO zg2WbkIXi2%wFx5hzMx1qh8Q;4O>inShF9Z3}m6;tZ%iX zD#JSg$ic>P2U)zIAP(k*$H?gJRNGm3h?BQ;-71G}D~UgurI*v?cy@wgXxMuP>Q+*~ z`$i6OI)mmcyuAoc6}mey{CA-`h}0zRsk@4in`KtEXR@dqD}Wgy%VTd3kQ5&T2=`--or%3ks;S+@x4Ukxvej&B^1}s)W)nFU^Bc%P zCNQJ|RFa1tPBVZ!&o%{PW>HHn)aqo`&~!gWX{XY=YO_esD?I9E)Sb|k+z_eAWkNR( zJ5D+32#Sq!L2|-`y3;7aHLG2W9-~SFN)kBF0RI3>;Q99)4nG_Xz@Eg$;DbgGW{q~Y z+bj`C>?41+CSv7)-T*nz=>XuIc=OS}QhaN3btBfEN$0f?XDcLC;9vbX<2ecxmhf|& z=aa`w0QV{LELTwFsSIgd>sN|X5#8zqL+Pj^1&BT|^ZC!?JwE^i2;9wG$aM;n32U=h zO3vx%M#X|NGhvPuy^g>Qzz|MxgN%-%_zhT#^GP&1mAP+f5WCygELN*@&9am=LHZ>061vfo}q9>VVM%ntE1`jt2#SFU-+p3F}ZIw*xCzD$$27 zsb5W{M&0T<%?&5inUT^a*%GeYyrtRRHRmOW!OsKZrcA&YF@)E0TC>!*N-a{Cr+zyj z1WyZ+6;A9*GB*x3_|F484y|6qOo%iIn2o1-n6KQ=bb4jtn8h2$gC;d0ax!;20g`j` z!0JVd7{x%;mMM@*MR_V2*^U^%ulqH-G%3Oo!t^^sK6SyW}RXNSS6MO z(}TjEo6j4gC_JkaKI4;)Z~~A&B$q=ckk{2Cdlm$ip?yAMBJKevf6_)sP#KJ1fS>|P z0&|}rbk24qNdyUCvuzrPt>vE+b-hsYRQ?L6cL`^6&l7Rb7l;aVC$ z%(Q2&!8SY*DX_e;E}rd?lEmd$F&`i+0=C9?8rX-y1xnCF_O%z5oTR8vxWsCpVOB6b zySTt&PU3kuJr!Cv5zi&uB(-Qn^Q0QALPb?F#~KGzT;QSuw5|alIODE&PWN72OCiLFvV9jAsnURJ) z#dekCAiy5q+s{Lo22+jdGyAy$s=0xbEbD0g+p?No&}@eSm{vpaw1NmLTpTBrX_+3%G_SEDlCGmP#>^ z4a&^7X49s(U3T8}QN;DaCizs6wEBz?sqzYfqmn{pdd@Y}xNk;LCDJ?Y7_YX8sCM?6 z^E1dLxlGCS<%21dByv9aauo(dc@i-jKpjZq$ZV`-%!5t2A9HCs-QM26OzN%MI>c4c zB-XU;HY-y~&Z-^#5f`wjQUa@EEte#adW+#_(bTGDRcjr6r#tM3Yx$eb24w zkw)>?o47O;jnoGXB)-1oXFkt}%k7|s5Cya4ebE%`q&L5#urPA*u?uZi3mak&E zB+$pGHJUbIk>g}nu2v?HMI$RF;;V*PHlB0V%vp&v1fE3-JKTlzT@^KbIW6dMSDw6a zX1v<&YRu^xkVSaL*%%n38yw&foM$Atu3VT&A#dR%#}>P*wvsiwYhJH5o&l2D{{S*p zh{n7-2P`)P18D(C2MVJ;<3#cl^k*DLxGQ}_r>ac)o{1)ls`iqjrK=a-#5Q1c48#8b zN+V+Ygv)z&0DPW0M9drZi;DO-jz94Q(>taOLsW)_ttV;7mOr5i=~}B0(gYr_sa{b6 zn1jc)aIEY`3a2>-Fy(QbNLoGr0PncZN9`Wdtu<>}CvJAFsb!W?C7QG&c%vSs=`te& z2-^t?INjr(PM`yNnVEpkzTP8lVjCkJH&Rd0by<71^_2G!5%Y$3t|LR z2pIP{R-<;Nmnej5dim3Wzw>(Z^tj?ia)H)RT>k(`*9-a_aKs(EhCJ-92-MiCdNRik z)Lzq$GAHgEQ|b)zL|_{USp<9V!ca_V>O${qBDP5@w~Ar%g6PG|;~KFkm;V5WZp7`$ zV}raGT&H}!)R&ebFiKYk7Yz^%6z8C`(UsCCZKw>Gb)_a#p8x25VuC57C) zOoFai+AmGU*Btu*kw9+0T&ZPDuKZvX{UcqWs6S9Bh8H=Xdwn$RKH&O=^w+#K{^ss@ zwQU;FvrY{wPI&6-QyT6MuCC1-P$^}UZ7YY0F}rDT)P5n28iW4;iKhl^ikw$K&Wi_d z-O@c6yR*6UOU79h7Hc)7sXTGB`#2?;*zmY4M&dbAa4(A^FK0$eHZEHOa?Ku^m$f0O z!>ikhO06TBYp5@wAsE1&0<(KOgxm-$OJE+Z2w|xe4!=!79=&T(>C@=;=b=C*1$Yqx z*$pP_W?0Ka2rHZw$zz7+q$|kX)NsXn7YL_S?8`ZCxSj`vq${-pT4KO31w1H4T>Hx$ z;0`g-3+Tx;5pe5NwKlM5`ZH8SG0QAzI~boB11iT0k8As~a2E^-_fVlxxghOSv0{-* zNNCm5sm~177y|^r!|StKhHIR6-WnqhTAv581j$IKYh(79gX0 zSwE&Tv>n(a6~P}Kdauj_ay%@=dYm^bNnv$R)u}00m250?E=j;KIojyW`gWY|037mq zi~U{Am&AQB8g`Ft%#q1|w{+-aDmJXL$XG5v`#9%3Wd8tj)Xo+B$nyUH_Kj+55YG$~ z!>Q@|luV(T+>S(4(Xi1+D8cmdHV{rTxbSg~f_Z3laxI0fh?8p;XVaBrpmgU!8asOP zt#V{yRI1~4T(>z4Gq?lbbwlJJ79*Sns&X6s_1_78&3z+W?ATC7%INg0XHCKbyoN$2 zV}iLH4cW_W_hC75a^w!Cj9hA&XGN#3MG`)xskWabd1XTkkV!0Y*e=QdaWUGG9D+d{ zdq5fMX!3v;CU72Kh#s$~Y8tGv)q_Ir4KkHkoqpl%As6axWnirQ=HKD_qq+LBjot6?O}5df$+ev#@T;m&dA zE^=P7Ec6=~;tICsXVUeq+iJR+Y5FX>=h_JB%~lf9$)>(Y21zBr3&z~8ap#VD*^|mz zr4u9IH)QHD(xY8NPyYb#74xMut|+L4UT`pz6l46>cG_?=kX7M)vI#GsXS)8zT`D#ZO4J_ z_hW!gaBy?U2cl22C5pk3Qfk)SZqTHbAPjV6*=|L4 zS~E=*SZCJg$j(zW+7=+P4xq{ zD^C@FxQfg#W=eQS-b)^;1H?%ikGoK(}Wbh*XU?Y$b@!>0|T6c)GQqa+~=8oX> z{Ya#dqJ1@i-aT205af;AjQHo480GQ{e$f{%i~;(LY}&Ne#jP^VpkTKk0po`$1~&LC z51vQ2JC1YbfcY6n;@;;Bd(fUwyV>7esP~D#r&{dmy`>Bfe!LoSDXT|nK;$h@6X3uze>dbZsu&BMKxnOhS2(B-ifap*M6hwwRBYW>H1YFk=3hNjtO9>#E=*< z1)Butc6VSRcrV*DC&~nMI=yWo_uD0SpwnKIaRQ9!suh7b_8bIUDI19)i9a2B7|T^m z*&RmafAkGX3lnN_jhHFwOr~6@4CN3IaCZ(9cldbeikK0J>NFw=C9y8bpMNket?E2m zfno^3!(ea*21($M0L>IosK0Rqxn6pbNiBMn7PBiEAoUm5wofRideoy&C!jdf;O(y}(8Bv5Rt zVrW-nWDpri&RAhvEw>~NeDTYE<1ZbPH;eA-{lW>Lc*<`Pfh@9CJ9$rN3Ksx@;DP=V z=dNM-oyb?>M^ss5dlr~_g#ng1BOyq^Bh+3%0B##bPqY)0)8j&78X|;i4L?dq=9z4~ zvTl+Oxs%o?(#F~koJwD8JmLKKUjFP#F zVQBZMUL`EYaRGdOazMsLbB8BJXbd1)lrtlWnnmiA^E7c4c+BNbAhQVrsP^tT$T%z) z8;(`Qnh+895cyshR)M(@`?!?I+ld&%a)CFNkCHhV{{SFJ>q0P5Mkz`*B%*r|MIw2!Q+MS<%-S{fgvu;d@EkhiJNYTVG$@bP7G!jEBd1T0N zfFX8f8TS$HAdL0PhT6>L$y$SH_p~hsTpg<(kKS5!mx5LM*|gWNmLiEkm8P;?-B<&Eg)Q(ci zcidNJPVFAXt*IKFnGAKNj46F9Ns33aw1yx0VYk;%ef)ufc_i{>krj7RxE#KLmRee* z_H^hp{VmPw(B&mPVw zjeD~pw?3mHvsg*mLtT9*WIm!26vgVR?HKp$U=9v?k2K2NZZHgk?<|_Oi#DLO=<1SH z)nSL<1iEWW4a&~`)+kVhQsjmq!vYGBtVTtKA_oFsIigP4?W%fP*wg7%?wW==$igel zD%3zrpSVX{$n#^)WEeR1k8TDe3#hM{pWV54xV5y6J?vh()#x$=wPHF4bmuA&5m65y zum_JDfgVRyYEgFrwVH~m>a29i)>2JJy0rBoDWr~?>CIR->UTh%MsgQ!Km!L4(k^=( zmNTHbJ;9^b_5D(mmo%=Bm=5NX+s?P7MC3EEFBXDJ*#ee1Z>1zDqFbP1P6j zU{ZHia?+Sm*LzUum6B0jJ8FGcOktFrny&u#Ox`@G83YY%ene`gF zM!O_Q_g5l&F&BZVB1e)(^yI{W2zD4?0VH5A0O-a`3o;HzN?{t5o{bAq){{=JrWGwK zq;i(48iL|PBy-1vC>Ug89SoK>v$<-Z?;eq;H{XBeDg`A0(7ov)1#!mI-nbv-=RE}S z5Qx7my@Nc9r2QAE+1gc&DgL1SOru8Bf;OjAJ^7KJD}gEv#X0ve7)3Y(9eF?Wz8jmz zocTV3U(C+Wqv#r?JwEMP`ZsXsQ2ziUD`pkVVM(|0^qSL;AE5es$^004Zs!Kx`=IP z9oMNv6_H!DJ6}Q|SZLzYA-VUtW)f|7Bs6W7AxPbpKJ4QTJO(F`I*PtRCV;s9m)=tp z^xY@7`)Z{HT(}Q4Nn>EmxWNhsLaMt?3CJe{IXz!0A5bf)*^jH~Ei`6-w7gM&tzxW& z7(jXDT!A4ZV}NpZW9Orb#1vYj-zd8EE3ne^H~i2R-8qXqy1 z7&3v3{;isOl-WyX~1_68$IVbQ*_!&KS$|Iu!k_^+{ZT)#LZP>scK4hCyLpjRckqOVx)7Uy34~7%v9lk$ijoqo^!=WraEeuJc{Hb8BC&DZy$C`e$;0^x@wsU0I^6*MBW zjc@7JY}7$4m!d{y2xXO}ie1|S0H9_(tKgjDX*kH;(8k{4u_n~U7c6L6)Ry#`aa7bM zuOL;86tci}t135wMlg1%Bgj4q^>W^z7^|qpoLX#9C0M_gzoZ*x0>;F67*!eCGRK}U ze>mulBl*d*8>uJq`EuK3NMl8l3eftBq_ARk4{|W)k&J@F$AJh|3#cUbw5T9~qIoMv zV=FL$GnQ8FmzPs2vBR1d~*O-_p~XHMWYO?3Yy$J_#{~BXD7m zatF^D3VKuI?u@_5u|&qvn|9G-r(*q(CL(>&l3l<+*j%V1!RP+~amGpN%aGJoQSKi* z$6Pm`r zE4HfL(V}+=zM?XxfFgq#IKkY1Qx+qhFc39B;`b-A^Fdh8W&X-J8+pxy4C-(T$c()5 z%Z%e03~`Kf+=Xf&6#|TyXRlf+*qi0zOt#H6<4wNJm?q-2G`2hK7I@G_jh8<4z~ zYH1NiEqxi$V@aTn5@ucJAR)*M=g*Px{u}|*@zsczf3{6dN0!X7MFm|w^M+)M<~J&N zEI>keCph?C4?QikAc%F6$yy55o*AsnvWlkBQe^@$gD8xG`S%t8lZF6}KnM=6++d%1 z582wb(9Lqh>0qpKqyiv45>%%2LGE4)soF+Y001$E;iy%G(4R*{!Ae@5w3nhX6j>d2 z#-WG}{ZWTtPae;HdFROBN{z`nnX=o{>DP)ju_dWt1i+;FVEe%!9(Dn{h`Ur-s?$Hq z=UcUyGdoQ2go)Va4er{&uY^^`LHvQ&J08|LfOg$V)g3OqIp)=MF5ao8-(`vuY{3LV z+vTtx2!W6lz*Q%HR396N=`aiWk8r&a#2rDdQ>{F;Y-*Z&KBF3c>7t`6Mq_f!<)RJ_ z=`uk4F~9%;)tKCyEeaC7tM=BhaIM6(eA$Y>qRTK36hcA$NYTz%WlN!v4)7Vd00Gn= z75?7kQ!NT@&LQ7lLHm=xX;RVtpm*N2V@9p1J-9_Vf0tRR4(-e(g4Iedo6^)M3C+;?OeZ5Zjn+m~ddk21}uTb_xA!l?!cLLg*ZN$=h^ zvbn)rZWYOs(T#`!+~+z6)Gw|2-+1c!1f7dNWoS@SYf$q{ z#_0JZHwP2>4o?s@_kpqF(Nam5{TI0OJCJELZ)m-{4UIy%cb27?sTt;sGdyrrk;80F z#jwqjyyqV5*>kHX=T{phHU!sFy?ecNyEi+tO1)3Hdn$&PBnrBfc_-a@eWaS@Q~HGw z(6VK+g;#0LLci|fx)V+@Ly$ky?Jt!(i%4m$_U4hO>eoAtDsj(ZH>o9CN+iOhg(VGz zWqj?&IKaRJ4U3%t;&x}6_!{k|H62&E^!l2D=z4!@PpHB$={$v**Jvbfav~1ia@fle zkUaHPW~+|4@qxQ0bL;)x^<3SrEl;O9HF{!qj%1N%63E*@2Dh<;79POOv=T^N!;_yT zcd7ziQy3Kp)3raWnv?2uwY>wg>gfUh0QBu&QFWO%knQTi9|WQzWU)hmDi!vtb(_miaSu#dk0u-qx%C4hTCPEzx zo9Wup(y};sc?r<~z0Jp47nU7J`6u!*eW}`ukiFIRuX#dI^=7 zZWhF+>W!qa1h*a=k7mn78I@soDF|L2aJed;Mt*)d9tS_RFFS}D{4m7d?T0)!blebJKH;4k~Y2r zU=VS^7?-<+AMPZqbri0V8qKyeSsqxMY*3Oj>WuC?NY4k4cluWwU$~8{u`#M!(!@y8 zYZ2U%8zKmq?+F#BI13XpXKbYIACdjWI!w->>Lw68-iMlhdo}NgIFy znAdA&Om6rCkVY_1P;7&{ngdi{9F%R^Sn26D?91xMU>oPv66Ya=n}IQuLJm^|w;3aG z>LJbON)2q7Gf=aBG_$979;HeH7xg2i2(K%yaI!w;a1<$DYag5rMm07I5k_j+m7NE& z^=kC4X}X7ecTv)9s|joAk+m(X$8p&#vP&+0cLrQ}z%9g6z#hzT<`y~|m#Vr(k)-K~ zO>JwHszBnzvx{pJOYR#)(u}r911C8lhI5hDq(wnOXUNR$xp0Qyc^N+5dU7l6^^Kri zQy$~VFXNs;W6zI0XnHbc)IcRoJJPWB5ulld)|IPzo8lzx%XtKnM&FE_`5hwwXD&h} zuuBzROpPWwbEpOt^g}Qo<5emF+;B<8Gspv;xkmTY=4*K0A}`sl5?L2 ztDdzc2<~gAdeu$!&ZHfv!xHas#;>PIZ%dO-OF3yRQHhHTa;4+iRZBZ# z$UAY4n;2z3S5a~h)j|rkne17fdo@=9B7tLxP(;C5OEa;!W z;?wVXVvLohF@XCPDMJ<#Y$1tK3WneL{U-+i9-Z@Ep-e<~45uZVS0!luRFxlNOsCap zFrkT!2emwabK_~wNykM^h?x}LwULwX6Cyqc}mVIsH?u$p7W@<8xC+@E;be|$}J zcP<-JGh;U=4RLDNUW)1=0xER#U%L%3F+U3gJAK)q z%N5CF)oI?SuzHI;)1ici!4U}%@OLoC&p6HxNsG+H!%Pi8*tbMQ8n@K0$kLCTEX=o$}e(bDXDd=CZ9FiQtVS=HAVDC3fn;1 zcW|VDK|BNFJt$f+l}}d#lHJBKrKtpxmv!|4-yqyJmDuf(`mzf)++zc!%C|W(QL`3D zYQza06oyJ{SmDg%vJ8f2m0NPg8)g&&<;x$=;Ux#7EGw{Jlk1v|s+5NGA2Ui%#749H$++iI#>QiP!xTw=4 z(!7Q_2*@F0M%-MmBY;K}F#{REJTIvr;AqSw5Xm~rJTWGsq2)3|5LAmS0QAwfZWva} zWUt0@I&|2mMH8s8v>7uoU%uTM2;vVC1&J_0+mnTDp^pQ@9i#)Ei(x&+1QSpg^*gsB z6W67wY0S1+eKQgjmT=0(Pw_hf7a73-?`A#w=g6Tk?Il~fI8+%A55Ay_Zv{Bs4aDaSG7K? zQbu%-Pn`>_a?Hy9q?3A%JvJT#aKx#??d4Y+AE|_P0F6WBicwOQTGVoA85)Mmjt})2m@(u0V~fR8$ivG!Wt%3SCFwQy~o;}%iVfx6%)8M zZ3|yY<waXn8s+WI;5mq67e)bvT?)_a>O zkzLdEIO9Pk)9OyqO2`|_s;C%k%aPAmU&6{iGT*9C;+1U|>p$@w+MUe$chnBdp2mRo zql`zbRGPh_NeZb1ks`;Fza!L;j(o4_>lpHUoNZ1uGzGztb)!9zqJ2`+JD!j8T^c(} zQKV%lEDWxVocpq*9D=M0k_cdPl6v-2;sUk|@-ty&Y77js*R|Z*h1YLWQxP(}Mtc*b zca=dq!a%#W1lj;6hvc7vQy6RCae>A@ksC`jYSnc6*L58yLaY!UR@Kx{q$>d4tK&G_ zSOo*{PbaMkN3#dI5Q*;T)ggH_iQ?8`iN+#^c-}ci%!Otw3KBra>B-J`>N!vk+bfNj zEj)q^UsAWJeb|u228VLzN3H+ zk5GCYy>dUe)~xp*bW37L8hBu}VWmKJY}@wu}sWpih-#PuA+H7I1O z8dy5B!2bZ$k~<3iNai;0yfE9LsN9Hdoy_b7 zbP{cl6B};GR{2sm&U|8Tg|AR30NF9dpA^>UnI_lTEiMIP91US*+(A=?k+Q(97?KWo z@-vRAF_jk?&4{Pfp5Tj8g|&I(i+o0Oj#4WxIT6Brr3+&$0m)!M==kc_WW{<>UZs|J z3dshW1oWYUK8c>mAV(l(Cujr=61nFf4CEYRtq#WGA`fu;dbX2Vv&|flTM@yqOFDXw zDCD?Efeae};c>_V9Qajg+?{%QczM(8|M9C{7SSoSP z>LlRDgN{!p@zIQ!^>8>V+i6Xy-o2P2K8jZns1Dnd?=mH)e3mn6!0{2br~ zJT5u-9R_en5UqNU&m;nX)OCq1X_|V$>R1xnXpDYJmd_}ooE0aZ^ysq%lWGRNz$)6U zdMj3RHzNRL*soF|)e%Dpf9ywGwvm`9-um*D)E(v3m8DavAwjPOhFoJ1a57maK(<%g;EC!NY9Fgv1Ta5K#T>XH!f+R);Wi#z2iHIC+o?QCibbp z{+>qv0CqM_5P%!GCGBnsQ&6#~!)jS9%wq~nlD(NxSgOW^a0wXNe4aSZQFg7g zbzAo;l6Ji&RvmehS*^H_7HlJSn z`i6&JM$$D5fR=03jvEQJ0&htpRoY6A)G^1$9N+XR7IS$FPUfTOuWU^m`c3^NhNGrS zQ|+gzaGAe=naT@N#a5ObsI)f1Qx%P# z7-u75`P_NP$B*BN0b~$06yvi%$ev^qN^DM`naaZmJ3wNY_J#!K$GePUka3dO^hq*o zq%{a6aK|LeVkw{-*t5*7fB|Ej{yY{L$QVCAg=@R2m6gW7Dg~)VzKL5-khqd*CoJVy zWU43|Gs-bMkH;apA25%n)T%O$;dRS<1{Jv-z^{4{jK(&%c;Ba|&XFnfU~4!+RSN|;PFOKw033sy`8_`|6D9Rf zdzQ;lj_0hViNn0nyNUf-957Q9W8WSKau;?>hGBuv46(*L-$ox5n|^{?)Be*={+O0s zv565GeYim;(BKC78RUWRI%LVnYC*>nMpj_dEK2c*wQ7xqmDamTgkhD`g2QuQ0?eQ& z+mJFz+tFv#e1wjp3);XKr5%ed_Ma~l6Q{95;O+&0kBpq)oD7_RGdImgaH=2O2C|H} zqo>xMv&s=gbh3vmToCHNoFfy!IgI!?$#c7fSCFGBz0cUs-INh@jbpjAr-|9t#2`E| z%FM&uqXTkpJAeB}EKUr3tY0x}+`b&&Ea+-B(`jlNEme#8hu=;lSYmWm`EekMM| z$jQ5eL03>Hg1w8cAN1vx9GHR-d=7jOpZAFvBY-*$l*Cl_+L?#%r$Xxbg2TClY;6<{ zxjcZcOuc_$Tjf8lvHlt<$1(c}UKJ4u|RZ@A2$-o{)3F{s#s!gbt zZy%4>GPmB!iRbOhG!`WFE54voH;DoF0SP1`fr3sz+l~R_q0jDW99`>kSntOo2y2No z1u{JAZ(nr9q5( zjB?o_fdzphBxA=@%s^f-(TO%7x_#ZqQmq)x^8TB{JdsR-KX4m@ju#8SUQRL3fh=l3 zp&3p0BaFzmW~&4>VM`H(k*I176kw^&5-6wFF;W1?Vxz(MQl#<+RygQbfif}C8idt9 zaW#=Cu?1#DO88zOROF#zfN&H5G5F-;Jt=DuNF>yW=+=7Ch!EIV<0S7@Za>1?xHtni zAden7JtTGnPo_&l`m<`fm#opGo`3+x>bcyE8yggsQ;g*2qS!d>PDkbYgxY;JR))M* zzS?5Zsf^BKkA68a7E`sk$irtp=fNsEA^_HP3{lYG6g3N&w`)= z7?5~41ohYVjz}j_tTmqWt09UCk18XH)Q!)9!DF0dc^seZ$mzfbLPH^=xW?OCkyKRF zH3wIHt2I)HirY>Dqps!{4%pj_0x~@G0Igi8PDRf}{nqZeKalEegvH@DrMPB_Pb`}_vPMB$JPMbg=YSW~O8 zvY)t+6*9>rK=Mx2$hjoJb3pJl?6z_ z1xPQDar2n7+0cxvLsLveq+fX*VwAwr&mO@%rc_n^Ml`?}PzwN<;eq`?^~;L3JA~yI zP>N7M(Ifq=x?HLVj7DQ+vWMX9_LK}%f<8w+4o)PXOhes9RqMwTFv)0Bhf$VQK`fvd zB>w<)ImjH5kX6@wjFHHUsATrzWU^-rHx&#)=hFar!S9d&i~5stk32ok29%m@ zDwg!J>e2ekVI_*5N%vQchF(Cq{d-PEdGK*@aCJ6(!B+#)w4H9eF{Dy=Bt;-a-)~T2 zZ&;IYc732=VSh^P!=I7K=futQz;Z8e+TVXkqj@Lw5+Wj%rLyw8d|{R5e1^d%01RP& zJatt@K~NSAxJ~}$kGJEFe&Dxmsd&;BMztJqPZ$6wc9g0xW^LH>?)+n|36t58ZycVd zJk&plucX>N$$@zi+-|_6&8Z{ZsxSR^M-5?VTe=?yDj)*EIc3gI5=(-+++K5#0x*Hn|PEe&;w+I*wl` z1_zJ%l*G$lRHgT)(?8;;>OJ{;hJUYJ@$}ad69$Xg)(kZ0xNzno3H2;V`bjf07UO76 zSEBj+lH9P6qwU}MGNZ}Hrj=|-dpETI0F4gO)F^0={88wd){6(CVY8}eF={p(XKaSa zc_Ygzs-3%tEac&K5JnF(AZ{%pDmR5*QYyGRNG`2I)qj_N3Kku+nLhXz?a+`&Fjd$c@8# zL=xN(9R8E=KpjDY2}R@VO(C;+%X-LTiZ_z9@z^7_L2Y?vbRL$cCfI>Ade7 z1$8XNh8+9O8B{ZEKLh+dD(7Kxjf@>ht7^59KS3t9QV67s$m-!FBQAX;t`vpgsqd`h7Hr=hSZdp#rcq~ATeqN`soHW>XcUOnNqt}%dkz#DLP5y8h= zKTbfI!_kU<&eV-8??Xt5j$ufm4iSoOBQndl<#$GL$OH8Ldd|K%7cMg>BE(U4IV#4s zr?F#Ex?))ci;^G(#OkF>GbZ(u0Y=l2oN0{|88qdVU0+nLi34wz&0s7E3*pY(mq|ys z=PE#E8O|`ls8Ngofla1}uWE8quU`J641!wd%CNnEW0EAYtalBFpf9F&wm}V+V!??Z zNxFh1nvoZ%tdy=pc4yrmuOr6tP8S3j0wEiRxcqJ;a57I?>E)%6s6dWG8BbnNE}(=!=Y;qt0SBl~#pI?QuF+H~a9_1#jW_N&+W zciwPC8wIN^B&!6}Jw^eFEN|`vhLODSka_DkJbDt?-AdKVdo32JWi^@TS%j|NeO6H3 z-OL_7PT~N{fA%qnQs}_Y6FGX<-FuD1&KWIpFL>=dx%t# zR~Lt4e!lcuWHZ#l~tJ+B0Do&vBV1+jyZft(yF&HbeG<+tFdQ z>FMj++K#8E#_@n*vm8hY`?HWiE1kYF7-t_Rrpb&vlD83NPa4Q)hJ?0hKq98LBuFci zc`?BBn7@I6ka7Bs2|Yo;=uH>y6X+Rr3k@7kEUuvIZ(fC)I)0<9tP?p7Bas*Y zvN#K|`M@oO+qjT&dee&{(GgA$wEEOcv9|SiqJ~A8L_wLd0nCZF!Q^?!IOOrwS*SpY zFHmo`NHJqiNC9^eF4y%71K^iF;KXe zRweTi#`XYkb%6j;h45^aBvC7MJldK>bWkW(92dS5n__~24q-S zqj;eNWc!Fig#}Jd2nWD9=zQCisxnWwqgvcl7FxFCn{0*(@+p!q>Jdp$PRRU{-yn{B zbtQ04rAP*AV6+#K3es5hL{8z>hpzHB3+y~+g&YES!9N`|lgEs_ae`(cG)|E}yO6tSV{RHd?oljAUPvH_3PEg@Lw4P zDhMdC-0AucV0I>*Ex9z9w2BEM7+%5z24@(^3rSePB45X37D+!8>@9y+1tdbjS41Pwl1e3w8`iYg5r$7?aXM=ERZJP}a6a zvN|Xzt(JBnR|h*V0I(L)*q7?3>UmpH)W3>`(lm=&U)H~)P+f^b-5Q@|`iSmiJH!*A ze6&~%hXA+$WPgXx@w}YB#0CEVSxmx>fjhs^JsZC&rn9d1zjjpb{aaW2XfJ)fjTJie z-acZSs2ORDZmPp4XQY_UIQ4vd*$#~l{rt=sM(j^Ttdc6B3ArAp_0?l$c(PCU--4q& zrUy9NoVQmueZZ}9Ry3s4UO^H=cLXyuj-)V6%3_dX+*yGvxl#GZ>$v+egJZaLN;6iO zQ6sXl%@!Cec~IWb?JN%FA%957@CosT3w;?fN3PsgaitEod~PqjOX<(^izA0c8W z-_uMTkLoT)0|s1x!7F=>%jx8OO)O}&8vL@8Pf@WDMzYF|6DmoLH`};v9!K~?5^xVp zFplmiZCDc(saLD^bmqT4tacspNLh?0d6Nv}D{WE^7_q|>n9F z{WUy+&NGbU?im)YCl(@vihWt-CMr?eMuxbUmrb$$l0gKF?b>$}z-~GG7Rjm-Y)84R zY2>!__pHXWjM09e*}&eRm~uxven=ekA%5X!07&fJ)MAPWVEcHk258lVK{GH?%oUZC zgOC8p@sdUdo-bO29npxaTcIsk{{Ww9a-~oh+Vy5i@uY?K7~K)snOVp=*^X2KI6Yv< zf9{%-7PTR*uCQObtYFpQWs#X7vWn5i>{TUX3f^6T-O7@=$!?S9eS~(zJ;A5e=|iW< zYSxh`D+!TlC6#4}gYGi3Hx1!~C?!3kX*eAP%X_FB7$S*>%xz32qpIq9O^IB*Z%(hN zvJ8)N6k^BD7%?Lrp*wgTRTjpi+)&?ASSdTuMAW6LT^cNsBoQj@c5T6zJbr=Wf}8=6 z0VqVD(nU>Z+Sc_6D9H`2OH-jUdtocIq)Q+8Ko|!g`?<&nIL6V!?y4b00-!gkP?FS= zPowGfCO9l?$gq_-@T$r(2j`KycPAY`AT<-^G1xM@6&KK8sY^}OMHv$@?AoMFk+de( z8%Y~UB$X!zIR~tbjjq9##^iqKYp)cxwCfe9$xK>C^?f^PF#hZp%w+R{?ZNi(`09sZ z1;=8HFbLtTr+PJ{k{!gD+D~X;GT!C^n11-rkMGkx)hw#lvl3PA?HkpWDeg;7!DF%~ zt0Ko1PBXmZo!-zmz`?-j6Dj?*xTgrdQN%FJ>plHKX@%{fzoEA)8xM~MIRF;m@^R;` z2uY(I&c2x9mX!=KLq?Nnx>86mM)f&nIRtqG@^F9a&sqjCAkN)Im#e%4RE@PQPKXFC zr0xI_o(n7Gix4t)h2#U1o}^CYF*QM@r8MHklSCferS9~^J46wTdnEM66v}cvrG^R5 z&Zklb}#d7HbCd@E3fio8=h zK_r&tFS>8*u|$yk<*+l!;PHY_Rr+MWC!q?cn`_yQph~J^+VVW48$RrQqImE|I3xVW zS{SOV=2j305}m6lBUft@%%%@kV9X;N^SQCSAJgsqiR&D_k=$LWO$8YeIH75o$PTR| zewH|6ji7GH03RN5MmoyjROm}CGGGzc)F!uD)H*yg0!9(!`<6CH4Urg;K-hqeS0^Cx z(6Q9F7W>OJDK!GsWVJnXrWj80DU>$<04k`w06FDGM}U0wMrzTS_}u4R$@OpPZ*0`n zF<^U@3)jR?OvL84_lx&FWXKJ42sO3tg^ z38~X?mrkFeTC~v5Bzxo9RuUCjJi6^5Fu*50CQRs`xYTe>>N4#Qt(t$;Di`AI%H7eU zt#V_UeR8vDKB3yK3PCG+=i0#ffmIuD19nL048 zV-L9S&Z~lQIVXSz$0MmL8FCV6= z-*SON7G+bou`ACW;s!=K0Z0ZsekHJiDN@P1ET}?nO`%ID1?CI2ecZPPjEr>2eVB`_ zO=qrPo@s8X#|-MgGRpfGYX#+#1wl9=Kp4&m=(-cJ)Lvvlp@PcTw;XQ_jq0qM3I-xD zgV=!kj&L_*i~y5BY`+jO!RR8G%aC5=a4d1jf9y2PLrZB}-zA9M5kGK2V}cJOoMYpT zxdrtLw(c&S>uL30eJw)M5i?0*oIBMNZ3hLHAT~fM0BjBhdXu*frHH+VinBODVFPggM|c?_%+1;Hgi0OaR!;NTuH)p6%0DAD$R1kU23!{yiFyIqkp3#D+2Pm4WMi)<90TM^TE$EGaB_DhOQ@? zwxg_RvejxjcBxJP(LrLfH1f+30IMSAJh23V1~8-N$%b2B+dB*K?@_Yp;{AAURIy^M zxBjH?RT5KnnU%KgAmwt-K_na>k~+|ETI|kEHswa8eJv3H&3e3cy zjAd7JjLjMk-GJ&CafNF$owZjlHMCQ9X`aN=SVrF>sgyLO25I&*rN?r!9D@hmohUBxaK#1Zxr!=k+UKbMt@~J!nOtVoks+MD7`0 z`o^~F?6DTB9KW>i%;BXW#w7zF14XP_M3;~i3b5oLP%TxaVS*<(~8PI9c2nv<=n3ypA zW7)FVHFsmj8lyis+DzN#zPN`jBq*%pJ~rV zNo7||iJ{VIRc9sTF|3=T4ypkz=Xqn63xXK%amfRxC_Tw5u_Tu}!HRVX zw6T9X=Z)M6TQUyvDNQ!<&0 z?&VcD+~hdUPI>&ECi>ZuRn?1``&-Q#q!Vfs>npT_(z-{d$=aUYSb|&nXCRZeox6n! zgD9kdVi`S1Xj5mu91yH3(nP}!jH-8>0g^s(^SEPi!0L&EQz>S>%}sz>y?H9yMH!FN zYMYZJ1|HJbBR<^x;BNT>%B#lAP^FB(p}4C}Qc0_p^?$0t1F4Zo8^{~-G2~$2AI4M= zVv$;s$yiM0TM@||r~D?th^QO@Ljp;_9PyvP@z+wr#s2b(KXFG!(#d9}n5nF(?1?>d zVKL>3{(w&Wm%$rH7(ICln1eK`3fK9KD{He}7ADf7wd#qTEc$4N2O~IMc-lxDf0xf% zk%2W8n})79u8$flwhoz1fe}V_mn)8UF!P^wGw?7vvaLyvsiBi8RC%X@1%UfZ>9q?5 z2i<@UF_VmAJF*8un2vD8mNil0e^KKN$WmuB(JG# zI6mf071435Sem&=$EQDB+QS}?@eR9RAH5&Ey z!xebYJh#kjiwNMT%Hwf4Jaf(oA3b(F&3l*ND_zZX8LdcOSZ_^cbz@#i@H)vb+=*+MMM#k8l`Ms6 zZrjuKJCejc;h{$g{^9{dLR4d(a!yCidbk7)n~h+4)RyJi5ZA1ey=^wC&WMN*jq))8f-U`R+ zGAxng5>dwk7&zz2&&E2=;N8@!xFfhpI?BgtDSp-sUc~J2vES*|IM|zuk_k|Ag#Z>A z<2^Y9fDFh_)d`DB)+;Nb+OrvI`7bl?Ns|~Z#l3->gO40|^V0+aVrUE?&!JOk{WYOk zYRzMQC2v<`r0*q(2Ml)ex7)`9f!1T=sc7V$Gv?BSH|)h8mrmd)vb@ZII-FruN&O(; zU=j1vew+=6hkBb?U1jy{P{HL^s|mBSR-6+%}AEBRS3wwZ9qY6yzhLk8xd?r-kKPMuf2}<)H~8 z##Sa$jQ*pYs-ywQz!>@X%!mP_J0T?MF)tC)<}}J20!L-PG2Qjx~pf&ROLgG9EB60X%p= z9P!hw17;U<)Rt?(YfMtKw2PoB8H^(X;5Z(_NzQ&hzfSHCQ8*BLi$pq{DPn6@gmA{b ztH!`hmn3^bAj+c#-JIuw4;?_U9nRt|;1XBWZl$D_cCjf|XxI-|+!&nmln5|D05=5u z^ntAEQHrl}8`?Wrpet&cciuenJ0Du8ZOX@$XUG9qX9REnT=F_|j>Q;-m^UHRn#PZ0 zh~a_?l)y4Cr&P0IJ=<9BaqiwQN{#_jgY(o1SE$$alZ_Hd;Soz_wR;CSS=MD&x^=w^>2GS-?!Ty=*V_6AZP#a#OL~p9s~jLZNOr2=%_bKi zJ;&Nk&~kd79~&!acQ#@U=ZcNbH}^O3Q>9(etRJFXrQRP<>#|8|maF`p)qNr$$dUvK zLP*mQEG@TkhTNxc1ogA@*fITCjs65Hfl!Hk0pPaI*&40QDwMT~!CEzn1EJ>=MI&9rXz)YKX;yY(4;C8n(!{{T&tu?r>`24LGs9CcO4tC;KpLEOcu zb$CAABfoa63W27B(MtljVlyHmDaj{yxDYTwIpFoo2r_^GErgve)M)0gA!((7WA!3f z%u%QgRh(e{q3+5IXN(RwsRFiUZQR&*B{*!PRBKtjL31w30!T{+Im6^Az$HfQ+=0$U zM2HnGg2hNI_Xle`Nnp- zUpo)jw=NJUvlCVC-r}w!y;tc(?#TN{eO?%aWM|q7$|5*G*e#S(i42~nBLQ7jtD^t)zr`c%{6R zy(d26s-bYoGCkiXKaQbte^jUGIs}TIh5a(!G@SaLodUa}kNw@mWVhs#=Rcma97TOf zWy`8rZ_?w=tjBd~1kMlhBs{(!`ISSd0L{-QBidjSw%#8wo zxHvxm9D(usar<;b->Gy{>Q6?rHl~tTks$YOA8LdtWB&4ye$L#P?j{W;RS2|ksj z74%#zNe)9OY-5rKo^j4kO~r|!9?ctsuAQih@v_Mlp$suOf-@EXUB2Cp+(tQWK6CpH zm2fhfGGK=4Zz9+A;jwNgXN_c8og_Jq5;2xc_+ALd&nKR_p1n$fAu+YNlGT{>?K|&b z4d^j)?I7KZjhNbTkPb=Y^~R3ZxK_QupXyUTjpVZ%kL~+K^ljkR% zetN$>cP6WE3L1@Cm0*I!LsGnLE5QT^sv?XI0wCaydHj5QbdQt;h0w(-s@67a(Y2<< z1G5XO45)FmXZoC`6}%r$fNfOhH1)3Dk}{TUNi0#g6`bQ_a)4AfurHi| z3BkxcNatcY>S}BtEtp+RZ&lP@;i|GpU`DXRFkGU7H|ACd00WHkljlAK0qr0o1MN6C1O4K8>UkIqm{<8gY}eOyny^(@nrPBv>I7tgr;rb7G|UMgdve&v z#xgqVXR7J~Li&xBH2Y2IT%v5)cVA9FU-~v!9ou3gZa8Faa0eOvx*7vh(^cCah&xP-%;3or24gaWv!@5P)Di;RdZc{oXmMD2*BF8 zA&)*h0gUx5n0V|)86diSRMYW>tx3#4tX2?un?j!~ zPB;M%$C9MyqD_m46b(Q$%~d3f!#$?0Mpam1aW^uYZ^?2=3J&ZQILIH{spHLNr!rpS z2Wy_3&l6G;NU}UJwY)AvUL zw{bZGBWU9YNycyyTkj~<>Mw^^^@x>aqxK6MLSs_tEUZVgEJTIm9(mvso(brH+`keo z;EFQCX5>F_0Ft!sKg4r}jFMGA#!drg0~j7pk=4kqVf)VdrqS70*_x~(A6T|aNH=2*xsKz;c8+pR4?JL%J?cr@cQz>q zR!Z?f8H}bPgs_rjCm8??5yNAT$3H!H%O`WWYT&b3C7IG{$s#iqQ@C{}1=ft92=MB9uA_))+EAa3Wu!OOBGeqd8wM0&02b#Eox z%}s7NE@5!HTmXHWK*&TGkmH3J^NzFpv3D#>Fd|@S?WZ`oU3Hr}d)tn*Hz$RjM4i>0 zodjWut~Q0+?HL30ZX`0+#C5)!4QE=8oUp@6xAUzg270b#wIV5Atp&0twSAT zr&CQfwN$ArGss>kW>WG&<+6a~auq<3ew^Sa2a2h^84wLrnr7CbmY$-P>*?{t!KGmG zZHf@sauF1>zw{)VP@h<5mfq3Z@jH%R@CiSmdwf{j^)5=q_GujMi?%_ zbCy&%B}V1IIgvm0w(BGWaQA%=fSSlXi~fMDh03<+?=F$2efGs2p- zP+O>h2^%uFG|fC&ip}VIisT~9m!Ym&NQ{ysYZ#QHw$l4@eLchg0}crSM{QtEEI^=P zI=lA%pCtEZV>IuIoY=a2SUV`;FSY7IqHh-?nojLsXb~C#?;wny{b(TMI1_0LmpJ0 zXq9%h5;i#vxSlfIPa_KmdgZjG6SG0dp)cQ`F9Hk?xfhp9Su#!JcQpgu>-c#MX zIOD>uJe#B385Ak{VPjsUQc1&UcV#Zy$&Q=Ipx^>GBy2pX@^ks=n>89DAjUrO#ZFrF z&={oj&g^U$!~EnNXXDRax&=MYJga}{l1*;a6d*L7zE@~=AJhZD{{W72)bf%}^(>>0 z?U|C;#W5=EQz}OCMnPlyeEgsOI-LbIQNo$^MYk-*D0MRxZWj{Q1)Y?i@q>??6OMO& zdYHsdMmQh{$k`Cd6>=e0_o~S$PyyS@;QZskKc1zNC3P>45MgA*?Qt~3w;a$|cs3TZ z0DQXuMs{b(nUXd$(0B|qtsV*rMSOk6MrsZt7if=ZONPrmB>?^t_LIIsOA8R zS!VwHnp#&EQlhaudjiiKQCSK>a}qC*0LjnK87IV9c$oZ$Ug2=r^yf_K5sVp$pwdlUPPNWgVw8mc1kRldl0g8dV zlY^h0e*>);@qxHYlc6Woph;D(NbuHyLWnFp-q%y%z|QT*4T459{rad%Gmf}V_) zr?ICg5xf-PSI$rPvUe+-k(_~!dKtfNr2^4|#{q_xp&o>ESZ!FDjFugm!r0FN0-+h& zbD#cto-_3IFF+m547S$2nQTu_PsC+jc;Jx1mpfJkP{3|Jt{j~P^(9G7hKb{+ZL?cvLe0R4GF5_ zYsCy`pl)~TgTUS*p({P zqN%E~-kG&+c*@Y(Mva74j>Z0=+@Nj%1y&~*By*0Vg;X}?kCi#g;uXoW|h_E zor5HeTw^9Mcqqh$RgqaehcnH2igh+Y@BCEC+OKJG%>j1|Dh3+kQ@3C;$5 zpT>HmS%j6Rh8sm8Iz%K#3^GscQO@UP+(9RIEIfb*OrKN$R4?r}QL|%Dt6n&r8fEmt z#KB4Zc>pm|Kse9%Pd!s7D%3(2=*F8gSe7N@zP>q zG-Wt|C_=1NhQ)X!jjC6xD>Jz={{T^&1CMtBN`_<4Ho{i*E!w%L z)I^Qz!xTXg5k@`O9_7gg9z1^IsWR76jxVVpjjifBJTS>Z3UEihRfAxbCg3l)A7CIT zDo##7Jt`v}<52J1-dOc1Ua@DRZk-ugc&ouM-7<1;Ndp5Ua(Kdnj;a^bR$*2o8g!ni zmfVq@GR$&3Fjr(oKA2Sj+^NE!_i=!E<0GX+sW)NRF|E0w`(UyCtY{%K85r$WlYhZE z+{YyC@>_w`S*|WSlU9*0SCOOB<)>OV0XE1O*c^hyV3ff=bCHwJK=-JnN}Dl!As~gE zN@bMJs0>_hC5RaS@G+c{Fn`ylMI*SZqQo&xnGJjPWs^C8Hwv{K ziEUWFD?%u|Fp_4F7}psM$M`_O$^4Kw9R^}e$W3enp3+^3BCjMzW3qv|76WlAcH|J7 zhX;`U4tmPUtooY6P^h;vPAy4WSJbRQ3U5f(xv2V|XUjCe|t$u{Bi+rK85qNKi@uGtcQG&yKlJo}`rADtlT*d83JS2y0s_Au*Tt zqoDy*WR=>WkU%4my9c7jRA&QYQEWP>qY>0EY4g*C>Xh{?qi57r8Qh7IPS8iNoPm>{ zgc-Q?9k7Bb5ghfcTUqE)Q)a3=G=nR>*WKwbT;l|RjxabG=c=xDJCMzQ4KHvwcj?zc zvQMg2eV&@ifm9=({)|8dAhBReag&^d2MhC98oEI2EUvImA$UGJ#kYwz2asdvJUc7gl z>5%EnlFFsv8!#L(_b3dKoVT|da6$5{2M`Z%I{idnP&uI0r-`k{>rASC@P!VpFd2r4#?ermX{_X#Cnb#q}}5j=W~_UD;p z1=S{f;urlTRU6u>!JCW>`Oi2S2aJ1@Y^~5SgyyB4M42m~mN!`Ab#UM=a@okhP!2{h z&mMZI!Z}k!sum)}O*+0NuS7=}9+nesHeqIYIQTk3h1ZQ7WXr+=_>b9l{3-fu&YQo)W% zEUZT(&PF=p;OZO-LUlaKwslkwJP&cmsv1{{CI z+^<1$YtjnuemIGhCVJA=F7B*HD!|~E#~v~G9Yhk_jM!+{k?MEbPm(IhVf4w1NgaA6 zOQ9|f8JMY3LuBOqK2k@^Tc zjoyuMj2V9qW8+_7ttLo9Xl>$1n_W77$OBV&L;c2Hj#&Jd6f87Hldy$KsVh_azH zlC67|C5AEk|}5Hla-{nA38saERIR zGLj5Qy9zQgaHFcbd-Ve7+|3)p0Q9K)uTT`d z$4tucMr)}x)G3l_31wmd#uEyw&IWOw4;=MZ>f)}A2-Z0&$5m=aBD$z5rDFkQ&j(=n z@_8qYr$eDAR-_hw?kzec$6@y!J}tAw&AkTa^7 z-Ip-{7z6F{4n}-q#(L*!?kFVn0fOxIEka?Oy4IT}!a!14i-WsPb;_$>gs)JjN%E zQ!$W{?jcnBiN^%~bJB5AY8Ah6MxjQHd4!W`_oR3gW;Mfont9rDJ1}PiQ{qQ*MT;7N#v+2$vRsBz z%@l@f4-<@SY_GJOxjEW4j{_$i19vPN?+697!EQZ2M72*{u>c7vRwHIYu|P>GbCHa3 zk&Nf6h5}8Tw&Tg7n#~)@C0OB?(^&ztosy_b;g=haDoGac#IR5~a zNj|5h5cp)Qt06P(U@~LIazRs&eY?77SXkU&EpPp6+P7|fDuhh?mF%ygk(C~vNF$Qp z;l}0p^PY_uqa_NAVy04et0c8-`l!WcuAuB0J9nam0G+Fm{^?(F$>^vj`AIfXBnH)( zXGDa|7R8=aiA~5zVZr~BPVDt0T z`7;x7fD2U^X}WZjDpqq* ztw`R}8Bn9sIohNTfE&+~&sqSpBKZfXL|aFr4jb6m3Ue0E!C*g9;9&d@ZwIU4Rpm`2 z&QI=;NhKQ5F&>=zV=P=bk~IyM03LTA^3Q;Jncy1kb~GecQIyVByywyqTxLmK&d9*$ z8=t_y;14GQ_XDQ-B^MJ8jE!TCTbCc~k3QxvB&m*q+a$hxo`@-4?%LwDMW@75`vm}1-iWmd{ zdB===^Uu#g0Q6;4l^YNnx@^MK73u`314yotsdWVd46a5$sB%W^mB!>Fo{-|RLJ^X` zbQyH5GZ*BVCbMDV1C|jg#tV1@!R5DO^$!5&g4jUO)KilFs8o{uzRffUl>)2~s02I6 zHxQ&T*hvInZ8-`vo;smhPymar;J?fM&RHheYQ5=9Mn_g8?+ucJ2RH-91_m*Y9ZhiV zRAtmPuf97lHo( zQhE#p+;B;n#Wji$iJ`Ab)#=JdT1Zc(KrR(ow(oTtOo!SJgPa0SNX{!!!k^-wdYWa5 zms$(IWXmC%dC~ntD{oLs92O0bP=S&#Gs);6j4t|sqzx^4NlmE1VnO#Fk`0nHZ6?oJ zAi(fRQ-{Knk>GGiu{&PosXwm|Al9uVhMq}ZF{A$ghhcGyoGy4g0L(^DOs;yIR1Ju( zS+6_Vf(X9mNWqISuK3)pOKfex90fQy3Oo#)Vy&I3q)s1WsT=BTBtLTu@Nbes24s)Z z+aWmE1OBF9gV~P&}h3RfVMF^HnF%Ca#+pVbT)q5wF+ zCw4r6)Zd7jQtxvzT(52BwGG)Mp*U#K<6Wy7s*u1C$`1tPfd#Rgj~zppKel#7JDaU_ zv0j^fwvSa-Ad45HC@}y|0UqEF&IUaA2R%14Gkl{W)U_QllvU>R?MAUl5L&?psQl+{ zGs1v#{f2%z%;a`NzBVVg4b4Gp*tHg)BcuAt&bGny#ts7lIXFL_GvlZ;pHjmLgK{aP znY8%oikEc7VIs_9lr*fl;fNrj{5C%asJfqWxS3DwF>A{;C?dGj?9Zges|@U`Bgr}( z0Cu4LIRlP7ZAKMt+X^$Y1N^IbPy+~-t@r7kbX+%1b|0O*XmBe z1Q@?ktu2}`*Jsmp^Mpuh!lWynH|}*j;O7}Q{GO)3gLfzit?Cz9<%Vlk?^0MRD`GV= zGoqFN6g-@^SM?8R+mGL;#z8jFH{wliQN3>HG-R<=VTXK$pfVpBMM8v}A9hde;NbMR zZo~saacxSMHH3S8&#CG2Jb3kBuPPWPat7F@@?3%yxIW|OjyiP4N7P2)a!G{Jw_)R+ zD|&qzH8w}~u&77Q2?h3oKv98!etJ5EaZYQ!o~s57#!oz$3c)2aTd&J3sb1p7R0rh{D8X8CRn=+954HaIVV3k&ssnO zQ5jgx=C>S8TG!uL)Cosb4H(-S3}?h1y8@Jxsl7YTzYxE8n#mY7^=@#MTj&D%TQ6D6}_YEu0ORB##Hc_&rF1 zO^8XaQH+opx^Z1UPKp)&pxGG_s@` zr*dytg?pDEW-{2bIS>6z?h&G3V*q(4{JJ9%W4X-hESgLdYs8Z@^2K^U3b`euRn9;J zf=D9(f`1>5wJspLkY>0*tEWp9MyXi(XxrmSQ!b6j&frc5C;YxTqo4xd2BW`T3F3y@ zr>hI2A52CS9Fhie=N~`o$6CcushcJ}#mx~!u=7A=WRu^)Z%mVc6b2TZu=ZcUw-LayoQEtLC+ zCP@}8zM0&oZv?xJa&U3}dFzM-Mo@aFqMolzol1>;LYy{esx0M06TR}gah^_2GtO~= zo_d%cXzI5#v2JM6w%Sois5D5dXN1KKcgazPAQL7Fwj09ZC!niRj?^x0)M?OJS5%*S z!w^2auFBZJ%P9RLI0S*o+n*gfAnd@I2^41b6YhkHvBB%uK=R75?)!+giM zZEj{6+IrHM;ij?2hGsB^RT#)*P<^>1d$N9b$5q0&^no*zLPvIZEU1QhX<}I$J6V|v zZObZ#7$kW=9d|1{TT#^3#NNENC7tbCaVaw+ZfTp^Mslj07EzF-=jZX@Z~|yxyeLVh zaRM5Z!^<8$Xu>!i2^&c{1QLAy#N!VTC7fz+O*1F!BhL#f%LZU8!@pY}P@gSVra@lZFhdoIA*Y z!7Y_6pk$~A&f(T(%I(ykRK0gIr(TfP((TvQ9qTpELcx`qF(Lp=nMVZjF|_VreY}jG ziZcPY!9duXS(Yfsk4u{4T2)m_k=TtIsBS=FK?8%}o-%Rq)QP^M1_QXlqDYNaI`G`C zVpyYjnn8)O#O6lHW;`}Z;2eXGJv9(2Pa=~mvf8k3>Q?l_aw>9cLF=@w6dbT%s7S*c z@;}|iP6tS@eag~+0iQt8*;wS$^eul(v=anP7>U(*1ZNG)7Req!P)-5nSIa_CKmy6R zv}8|e)|+0_%G)Lr%-gG&=4K$W9iU3%{o;250Rxal5ie4CJdy`eb6Sj6Y&^5QxC18i zEhMVzfC$^l;CTe#;2h@|>chn&axI_l08Kt!Qd=@unk!eLnn@5m;6)ro$=Z?<7xaKI zqbs=ckU&lhbrj?zYGtO^q_a4qQsi1hb4m9G!n%tYQUHv{o&NwNjHx*U=Zr1Zrh-%s z_cG0HvaP7&lUAAn(zuaBp^h?C1!f?LNZ=OW24UkJIKJdcf_8Nyfa+SDacXeTtgz$P zRNf9@dv&!wxpGY>@~?W5@}ezR*+gS1dn8dMkYuG&1JU3V$A=Z}Xqk&-r7g`Nnt2$?m0U zeNGtCS@jEb>E4#W5mlN^uVCeHhZw-lIOlNswy!nSU%0Uxj33alLop4T@@F@XciHbC;MmB<@Dtz*J&yO8fBN6n7!jGtnw{%;YbTnp`g_%qexhXxB zmB|DIXKxw6{Xd^4J$5@$5uUr7%c(6XT3gUYZJ=hXjXO&rEXO<4U`9p($mDzw4^du3 zni1BFoZT`d==+mOiFDahJ4A+4Dh;?Kp)5hhKyN=d&sIkQ#5q2pOB&W)LCkjR>JdSv zu4QQ~K)$Taw45w_kOn*f;~flvk5fK3y97lO*wr-cKD>JDdOGQ{GM2Ftye~Oyl2SF` zHgSxuGsitgks31Wiq$l>DAu(eq_IT$bg_C;#bQ$Pvw`{yn~n(1as~zleB+=Njmg<2 zs$$39*0EN^x+OQ$WrKP9i%Pa+BY@jL$pn1(!Nxiv*67Np1QQ&{X~|*=^d`LoE3{^@r$^CHNs2f3GSM>S8`RAtOWfvKT>9=gwu5awCb``*Z-HCZf z&RKv5jky3E6UXFrPoCbS@+d#L6AM#WYF^Z(vbXG`LgpymP^2-~6^7u4-ZH#nJm;rU zr)Efk4T)o0?7Drcaa2fEw^giXm@}qW4+A?$B$J*`@1CDBfH?@b@-Z}b7bdcGD;6|& zlTGdoYN~=_hA;#T#y1u@-~z;)a(MICE>1$~QGin?xA!w%sZzy_EJGTZq*!V^alC^V z%!GrI#c{#O1M`lP5jCmJexpaM)?;fvnp3+Kklc@QkTHNy8TSBjpOMf}yAhNd+$I>S z#N}P&mDds@`lN{gU&k08PCsw*>p>D(Tk5Iww=OLvlT6YQ)KX_}pqN_c!C($SB$9c? zPf^JHcCwhj9muIqJJ+{fv<|kTMZ?J_`$q%@CN;p_$-wc*>BdVOglvorMm7vMjhit| z=}QZb%T}O>4NpT50C3Mxs+#SJTQc4YfhfAmcoL&#gH!im9Ao zYqJNfsMC&lCS;PlOc0}4LAp;WFb4ph1_vH9^Vb$y63vX(q_V=$qz(5eR(CBJ0}e?8 zBaT4xoPI|=aLdQ3FP7z~$YtA5dQU9%(;lhamSe{(0Lyd0+CM!@=CmXLI)Yle_on4Y zYTBtFh0r^N+|B@O2-||VAa3B~e%b0I-sMUFH3hE(HR@cRJv|==IG1dJ+~i}yUTj$eZkf_=k~ecWJ?)Cgm#qQ~`F ztw2cveaPy`_t3jQ>upk2Xx*eFi3jRnN{~=zgU=bq!Rea&oXiQSED`B#H{469G$~+i zh!Q;~WeOMv8DiP+elwi&$73`s#UuDc9#68Gb}L`)&H@=+;7fo=UOa)n`ijz+hAh%Ye_G#If3#pbM z?*xR96z2nxxBv!lGoGDW?+{llPpdp|$!a6E%Dh`u!N72%4URY&K2Mx+k%Q1VYq&mB z&1{H$TC9^BE3uzxZ%fs1Ku|+qfy*4=a6WN@A7huO_onpp?4S~$E=gAXBQ20Y{^h_R(3#wdweASDR<*NC?!6f;Jd0sBDG~rkWp(zC zX~18`2T$_F?khPXeb1nV+qIoaYt^1lOVAU?Ut$1Uur7(Xj41#q$R&O}4j6v%)s1yN zX(pC$UCiHZ#hf#hK_26i&Oij5V<4V*>wzPw%UEG%5)v7r{& z)T4${)!>5TsC8LPhF6UGtOnDbKnQdGA+(NqA%TUlSJX_+nV!6B6G07_4lyGFHYq>j z1Prhtu)Jp{o;lD|P`N*JC&_9xihES-%LIz@zpNb?pL>=iK*8Eb8&4VF=N#E}`(gz= zw*^~kIvR5J(@f zC4@q3gye<*u-%Qp{PWZ?sjj443)wQcS4^i;S+xrFUhMHkLP>3bU`GcHjE2q!2f;pi zapBRpe^3v5khN`bFPpU{l8{oY+v7rHJZ=b70J+EmK5?F=JW@|1H4KV6jUkHs#kBj7 zLrS!c6+;(iAN|aEc5IxVIXrYifGMMg9-_@!^iX>Abo9K|Dy|$tzOE0>)!Z`VXOc!d z{BxRfXA1uJA;2-bt)s>A! zBX(pOwbCMr_KgKSUr}dOLivS}0_3Le0DwMDInPL>5{IUX2BkevCOL}UD-fE1$jHp z-`k+K$^~zhqMq_vxjebCK4Cr8n+H#hPu&4oPda zFBh*2rLzM1?yLYU>c@<3Rw1yXAY-0SQZ9B6Qq{()HWO;%ymB=wcB2dJAyU5f$MqS; zGC|;fbB~Uf8KJ~ZY-&srotwz@FKWxIXfw#n(Z_j~Ng@Og0aeIck`6)fj~xVVEwdfm z72>maE=x30y$cdFQB5RPUEI8K4cvv!PBIP;S{#pF&^>T1knt%#9OnK(W5w zK6exT6a9_{N{L#4=-7t+k+pDFj(5y!{s!CXHaXg($pmnJf8VOIeWx~GVgY7&<2K=f zTUV?qY~6wjIbf?EWHm>!_N0Zg3cfcK zBm!GLaBv2H4O06YBhG0D%4m5oMKF-*c~rm+>c z5?E|t7LgD1!vIJjut7U{P;dg|`RPJ~vnb8?liRl%&1OcbmMcbp0+0uoDNuckq$;ue zlgR0bsU|nnlTCtnl}#%%t%t9%r4nTNVX|7L@ zcCS7G!({QtS3tl~>T-7%$r^pcG6%gqzeLs&Z~Y1w9hkujoPr4Q03@EWKXSt$5(@^U zOFw-)H~kn{i~AXvE;4cF7}^HW`RAZhh=`;y+*36~YpG_1xS*G6xt1#kCuo?v6bM%< zhWC59&p$m&lQ->}6mi=|w(3QP8g#ZGwpHgvY1kI_k1`T@pnf!9IUIRh*bO)}=aL+8XAe7NM)a^wuz0B-7`zmMZ@Mw8}Yb zAPu+>LT)U>k+gLogLWq-V^$2a%F@PE$5L75dt%Co1cA_a+JC|bP)-jGk-#IT;6y6G z?8a6#8x^I#H2P9sXrYam&bwAw04?dpk^8U|5OTwIFfqyKyO1tR8iG$F7fi33dTg^x zcThWxsqELw`sbX5BVYlxsK!d*fyg}}Jci|eZpTo|7p13iO8SMlTI`$P3C=2W;m<7d!u-yDy9HR?`M2-j4}Amc8;t_?n!ag z!Ynn3B&!v9Wz=lENRJp+9?l8co(U?hK;sz6Bgaht08J4SN)5m@JFt4!9<-FLY89bb zMR{Rq=9K03d}ZRI3n&kXU>kS#o-v@{hR))Pq=;?x2AqNX(ElvHD~Wd#NLK zJ;B$H0A!GQh{;VT`0Q#yEZTmkFWst^;E|PPd7**F^g$&YZ2tfND#rkTbK{<@a|*kW zTx4j*8h6`Ttt8c2eLvoI!m^1ZjPiFCe>o%M(P3$!Rf?a0uXhgV0QrbrFRC zdlMC2!pCk|C8=t85(Fvs=DI0hNjp8`1LO}RpBX(9kXaCsbSkE`wG9%}T$$(7u1o7M za{J;cc);0?1~5nD5Po_`BIk0gK_k}VMw9L#tzNPdzCw144tD~6Jo)+S(a)SHqde?5 ziT$t%yS_Wp){S&%?&uZ|q+O&6TjOzDfq{|7lb*a^)HUC!>Yo%;O{@0-x@>6>v?k1! zP^1+Jkgx+GODW@kSp0v-q<6&DIh9&2_)ut6NWtDxI~oPtX8 zpP$cMF^25Qe8Qj@+fMr`x@0yiYdyNPH#Ae&mO&!R8XRtof!Z596Y>4WM<@%_RVAB3 zF|DYr%{s&vE3TVWmSF^uSl-fr*xV*3jOTy`agaK&A5EBFMuY~uFjbBjEL1ivn1fcW z6^GgYAE}wYzv}VSvSA1b1{GpN2w$FER%;r*rKnWZ86z-LiL)Ac0D~Ai@JkMUIXzDr zwepl^2qPO7tvQBIwuYI1>7&WJbRoD5Nl>64A19*5`tAi=JKWN38N{-Z!h7o&j>n;6 z)R->ZlvtDixC9W#_UiO1%!X5{20o>xEn8M>fm#h6-52h2wtFY!f^tiEQ}Ofts<^@2 zq%wqjD_GzD8}>NeXQxv7Z^h!T$YaTz7H9kS3h72s#i2R$zv)CxVxQsoNTP5F+iLcXHPs-~bb2=Z_uC9}McGwnF|<8Mv! zLD`g}T8W^mp-rf1x3&Eu-1>sZ)#@4QZ*@|m1mhbQAd(oL&m48Ya65#l^kwef-`de5 zj=rX}YEDu!EDE5HfOE-q@y2`u)f)Xmpac1ptCh6fCd_qJsl_yIGkY^#- z{PT{aaFM%=GZ7@yAg3Oy47Fg?t=I)sXSKK>EA7IDVlvq|W%JL^Ll`7k?iO-(VKCUC zAFZLE|Qv{8-aczCfmF@#!Y)#w+=#lEZ8g-Xq zkh@H*-xfqXOmP&@DFYI!k)9QL6+}z*aT^Q3DLLX(XJvY@d!f z>KK%(QIuy5xofItu93$Oj#Y(3CTBYoNHL5832bLM2OsCww0eOZ70aa55M-0qK+s=K!e{g!BT8IhvC0!;$ z+hm2>NZ)jFyqi`MRAGwaa|6#OBy+&ePJ+eE`CNvmTh)b`V`_GzhtZZX>N5o=k9G$b z@wEB#(R39V9FZ5zigl}r1ac%w-mJMNBmscGI5{Wx=p*rmCNOq;l2|^qR(QyPq-4x{ zIRp=%7yy5tJ#0CcixVXQxh)wU!?R9-0DdwL`E-m3HTrU^v=!HniYd2zi9!A z(OCpU4{V!`7>pr7;NTDV@y9`EpQ;ip>P0k#=aQt#vV|yiB5}B$Fn&Dz@$>Q2@;~3G zcsHn~#+Xt%(?wvVS)F8^1ERc~VM`LFh!|cJgXcYC2tm}=txHA&sZXg{rCOrt^SsLn zmQT2jGb#o=w+A4p4ecHWJf5598mKYiA9zpP#a212SBgn>DJ0k$w%KG-6;!G=NX`aH z{{RRl9y(kY+O7vPDKm*6*&$fVss!`_A<&#QK+ieJ&z$wPVj!Ieb6PB{*DG19VwD|A zI4vnr!5BEt&%oekreXl&9HE-Ei!()9@}kENEc^Eo9|3^}1CzLaY#;5?5`93WM{rF= z{X}T)S>0U9k<2VQY-brOw;+|zA0wa7j)Sd;>&KV#mFcTb9m!WtjZK+j+aoP-BMfrb zQ~>QGEX3gBKN-l(*vQzG4to1ZC)C?V)1tGfNoIXP$!MTSOMTR!vowL2`$y`|Fc&9| zMqiKu+BY(H>MoLPM_pQXblVlH#$$@>!*eYml;q?RHtlGb`Cc+|I2}tPaul(w)x%6W zgca>PTCB0$fG?|2I1!e96t3nuD}@>5usn0qfIyK_3D}AIf^9oW)wFoKlTn_=qk2Y( zC$$)?9zEbHB18dqR*3RPf}C;(Qfnrq@x8AhsipmPzOi#qsNKcfdL&ew&m`*`p|87m z{-k>t%0S$}a56`pq|Zj7OgvB0He?m)S+bGTpzEksv0@PNo20JAeC|OQM&)C{1-Q>3 zh1zR@kMoI^#?Nu&b#5|TT|Ui~wJe^j^ClK)w(iE?D!gqOz|PMYJ!l{>h*+=YSFIyg zsd~Ll2UwCxY^2RAGC03vf&FVGeESr$F5I^Rj-bJSqcmexJGljQoiv%wrEF^2jIa=p zM2#BG6~jri9004iS%71L3W4#|Q8@xy2HD~|o5>}cvdKo)n$~7;WmqD_W)P|w8*-JA zTP27gj?zX32TUDE<54S)$(!lgtC*Ib-)Ut-%o08B(wXu4c*lS_zy}7{&;bwETRNLZrdV-W3U)N~ zTFU&Pt{7mI{aDXbD{zlwZ}%jMhXBvPTVkF+@9X zBN$!841NLUnQ(7XPCvA?F?njrLKd$eL0rb`h(i|Kz$y1_Eyn;H9~@^^G#-S{l$)a~ zmuo{5<@C)q%=M%ms9_fzp99$3008oQbx+cm6F9ESqeH5F{{W}=20O5%N|Eg$(tNMD zvM~0Jf92L(iW{k@5W4p<7Mj(Z*riYIbdwO@G!CU>V0j`l=R5!hu<1y+T();fv;ezLECKR-^!W2`%q*aa zmH{Pq(cEg{52i~9 zMV8Yjo~VC@1Z@fiNFR@mvb~nMrLzt^Pjb*T2h!oEU$!)ga<`~qm9v3kAd*$b>TKb^ z9y+jd)XRT)w9!+LMX1-)wGCESWfMa3SXU7?gt?_ae0xTA+~hXrK&U0`=t_8O6+5Z7evn_ z9tPw1z~J>4uw&z&2sA-fw0ACFyO|i~ySnX@$PP#Z`*1io;~excfmUTA{Z>6r$E{yc zzK^DfmdJ@<+F^xqMhiS}Uny9W6{l(OU{LyF0y4-<)I* z$?HbUMmxExy**i~&qmG3;XtBhHJUAoa!Es+jN>@+2lwd|65E83JC+$XK${jMB5)@6 zuOp5Fesi92`*pA&B4uWyW-A(8c5_uWB;^&PF+>mOTmggdayb70UZJ1I1Zq)Uy~W@8 ze^t3N#WXa8OCd!mn1`pddq@ECM$ky{p0Z=Y+_y9~Vd`1rpp~f6l7v>~d69i@R!#B8 zjPf(j#@rw8*0)L)A_ll7*EcRnr$uK@Skg~NYO57Y0AD8~;fKyTnkx}za68FV{m|WwtBiY4=O4Kr zx9!r939}y>kZx$Yg?d`Pr&CL>R#+iJP?vvrW>8be#z7tiGmP{pLDaLI*^adN?gV=7 z^HH(N74*!C30?^}0I|k-{r>>JRZv^00>bW3_Q-;?vMOGjsg7w213cgRS+?y|BTYi}9 zDTY5taWrI;un+M3^Yg*##|t*3%LVr*62%)rW{DVvD-M0aGy9(mK^}U!LZXa_qqtpx zc4+?Ac`r@1e&uD~?kE5lqbYQn+tp&REsLf{drY*t0`f9{`gJfX1 z_yfnDruGGiTyMymjVZx1yBp>xwKdqo=LGSEIl%-Ddag3n5Rq3?aUPn}R#RZShCq%Y zkSGiedB;Db9Dkop$a|3pNxxBjn@?FP(2H)^(yHH=OIuGpnH5Q=Bw)1c zK_C_bIKty3`09C1wq-HnH*Er`S*liL`X64`8{76k8u>yxg4p;l6dQS8-Xt1yPc$dk>mV1 z@Oo|r2tx|hB(Efbq;DdBPl7Tscp1(Ma52&XM^liCvm!G*5KF#KxU8y{V-YB3ng0N0 zQ~v<8fDV2+>y=D##F~lbxVltv-@iQicr!X}Swv!K00n^~1<3t}&&V0-Ze;-UBH6TW zw6Ri?NvKCs7XF|v!0vX=WB>&#$jCSY1LXAwTaJf_Fw44?JvpY%q8e!dQx#1q zin$eflh>{Mf_Sx9rqp#ASJ8wqCd9Cg3V>J_@(4J|YywXh=wrfG7swn<>uI#EKCyFA zyd;uD`*~vwP*;gxbcnkS;@D+7zCisz^4p zo_H7mv+#evRv1-!nZ9cfMDpj=&*H!7m(u?LsK@DS{k!#N>UVs>rdHOqx_zPB>#3a* zoUa}vnlHGC?95Wdd5kct6iYRC6?-+|lBdVLiEJpuTNvZ&57y-D*uIZm&_0#@PWsUq^36b6KU^>veUwNNrm5b2U0Jv~25Q#H(7?&$abpuqoJhrIBKh#J@=Q zXJR;JJ?b&+zpbB5JI6=w-sJi_-QAP*Yq#{S-mK>zQIvfNMsm?ERgWdi({()AHU(__Z-RIotp?y5zQFOyC`p z{8ard@B6*EH`Co)>O38-tky`h&i$9RgnBIZKl0l5Dbgf~C4EV5N2AJZJaN-#jIha8 zDWsi!Hmdj}Zucq~ga!Wqj1Q)8{Xgvv{eI8-rFTTtG%ZWKH9JGJqN{VVyLz6ux;?#X zU8Oy^q=FHD)nB)bY+dxyc?=3FOa;aUsxl!5BHrh&H8_o4;lH9wS*N#JPa@dTqd8Tl>A^T&>|k;u@Nbz$6eB}TH*M=jcdIZO)^ zuGl1Cz7+=~f9~TxI?J4r^m>k|OJlhfnD(bh>)2Rj+bnp5`$FMJU5ow^!8rZ;&QOM_ zQIK^S3o=Ehid6Iq8r?b7rCaG5G!YT-0VMDFg{ za-Gqct)uF#UaevF7u(Yd9M)uTVqK?&#us-xiSfr6=?E{gC|xWqMs!Gf9uwg4;>njX9u`$n@|aB)w`+Jk|>B3j=+Hz9@EEwd||wi(&7b$ z_ZT1@>JaoMh1z&iPIyBBA#zexecYEm06g+Kb{sb$jg1v1FS9K1QkKfvI%b?FZ&114 z>iG9Fk}z|R+XJp!BIUUns*Nkudhy8(eMt<0LdVq}){v+hMn@wg5J&wwfeeF;WdLkQ zX02{LF2W=hE?AZ0+R>;2Cdv0N_88Cq0KckZQpRz~_l+yW))6AHO0}6`*s?OQ46^dX z&Q0f+FX_n+@#8Q`UR44-&Ad%0WGCcJ%LhQ#OOIV9%vFjKa ztx08ZxDrPnoIWsdt)~Ok2X@hug4rkI!8rNpm20>l^3a*;86%Dfgc!BLy&_6V(`PVyUMmeQ?%py134fMpE*4bl}B+IiG~eDH^*i-m7P@JJirg7P$88Xn5HOcB!u*xF^l&o@roE!k7Xg`8U{j=2@LZJi`aeQ@>KebzT zj8+yiB*kFnW4CI6!{{V~J zagP8FxiOQYBr-99O{YazPX?cAl?7G+HrD-F1d<2pBxD2mB$LvzaT1G?#Y3%sErLe2 z*EJa`uG=#Dppw!t_JR&bQJjE0bPhWg03;Ix7W;ZA9@5#XsCx-ml(R^(md0}2V4iSK z7~`qBiWMo3At!PQu}fZ@*Qrv7@GWZ1AWhPjEr`Lw?+1c%KO?N*(fVZ( zL5$7Fs>f69ZZuF}$uvHhc64rit&RyGpV)Ep(}f=7+Q#G(SkxZf2_i~$;Fib$I=85x z1MOTKvi|^sqT4$I_P88ulz{&kzzm7VxxjjJG>~V$DYVRy@#XqE7H@X%70Af$H?tXGe z{{XH$o`rgVDvrz!CZ$a+g|QWO5(^R-_M)+4fJQI@B!B?%j;TNji<2E5OVmV>*EXH3 z-PNYFgkf1dNEOJ*0PVt`{loHl&yj3K$CLB8p2clO{5pP;i5!$Hv6fU>8aYH;i)|r+ z0Zstq?m70J4_TWX=t~9zW*1R3swn!VoW|~=(E!T7)MQn`+<}j@U?-FQBgZ{LC=O-KMaxaLEpfz=EPV{pM+B_Gab34OkNsB`B4Z9vd z0Xql);X%gHfO+emBFZBEWY;gW6KXKWCZJ5SA4x-rhp1OMRb>Hka^Hi;j~!KVYG%lk zderh))gR$&wSJ)eUe$iA=>Gsx{;z!=`hjyvtK0h9c2=VmQ>sM+*JzBgI=9=*sZxx2 zKD00PwPZEDSVXTu<$?6gC`cQfis#ZE#hx#sbu{LZu8-I~!=mfmwWrjS`f4<)uLV6e zMvyzTK@|9Q&#YZpy->yT6w1K;%U39hxX6Woj0)T zcF%bC6(sH7r`@;o>rJuhY+|Hh!b%vCXT-^52oxnN2TIV2WhYt4E{u>cL%h zn3qGK6|lWcywhDE0hiKLn|3bDkLK2D)_YuCBYJG=Nr z`g7b}rKI-tU#dS;^e)qG-qyQMxoc^sPIm8J-PJpR_35;`a?4zr*|{yKp|@9MfnL>w z<=rKWr;~6tQV!>;dy`kxF4~V)o>_&Pa>-&~v3ep&-HRSLWZFw(pK6?Zj-c0bIRM)M z5kvg8)RyKE>edgcL36c~oDb03xnKEmK00m(?+|`W;JvqEYA`LR1Um#}H>)}@n7%@Q z4mJ~mj{~Jd)ZE~<{Z55TS)H|wG-}nOio@2Bp|0dOIKjd25AWx!Uo5w9W6D2B43)ljfzw)DLkBmC1<)HNA30OB|yS8)=@jspO2!;b`feDxQ>R~H|Y z=GW9iLQC{?tCl98OiJ#pGP4OJ$tU#=0bVdM@PBTmufR%>7;ZPxAhoZUCBBuO#5s8* zQS6TcAoKdkcali~45Ev9V(9fqH#WrC6hoSSQj~9!em_2`YL1-BCKJNct!=>T*d=wTY}u5vR+ul~!VM z0Brkz1LGZN`9KgRT#Vkjg}k+45+swTf#YU~dH|%P0l2vHpMZWkJOoheHw@DBy*dpc z?P>2MRgf{o++l$oomDde|1_p8f;A0?u{PaTJp#XIo z+I5ZT(0XdMBavcnOpMM5&Tx1c=Yz*n%$kjj&vQX`?xK)t4Pq@TPkqPIbu!Dl0Du^9 zFb9tx9YcX8<&K2<3NUIgQngNNT5UNM$ekM`DCcnpBr*MmT6mB(GcG_*`--dEs&!kk zwx@Pzh9a*Urc@E&kMGGj1LLUjH#9Qpz0F}+q>@|quS+Saf;kxhUs0G5!si(Fx1N(G z_F!Fp>6+H-$zGj^^hA;*jH!&s6iMR_JmI?TD#Q6gZuRb1C~2oq5Tx?K3yEE3u8{ko z!utZrpZ9UdJyRwgN-7{~G-NetNvu6{_p*tWLQIT+m2CML&wqk6m53BwohCBg)F@iFGZmhVkxt~5zqTz~>#U=C!6!9rQ7&|LQSbXu%`E=}rcLL-F zX)ux*g(^}i4@-DMoCA^p{{V+S@akhN{{Tr)E*P4PT}ok2Hv6W9!m|YQ#AE_6zd1iA z$48haQJ6A|feEYW(_Mn@nM`~3L=ckfbMoH<+&?`Sp@R*>;j?8Ug|4g+7TqGOD}_vF zIX+JS{{Vikfwx9WE-(d%zDpHr%+td(cPr$UV634R13uH|rX<}NFbOe~cB;c&XGKct zv33&CiS9Nw<{Q!?a3J+5O&~X@=3_)!8bdJ9Y$J*aXnUeBcfQO zZ)olyIvy~f1GJuelk?JK$wyL{(2;ZMSC;h{?uChUG8b`naw~@Je;A~VmH@P0h?LO~i403POa+L0BVqNM~gvWy(zGmH`x zeEW}{IXz3|o49$IKc!-}ptK>DtH@zVE1-lTqyyo+?rf?2XFnZHlkF~Ifl?KtLX7qhbueHaRDZgPeb_ zKo%`CvWnzn)}mT7QjS^dS5k4&FkNr#ZHm)NAK0K;fl_0aS_r= zm7@j6(wxSl{{T-CDNto%PV5u8xCD%FKRr+6{Zh@I)X1iOwam*(GWYdGz(HesLM{gL09yuvhF*=_)-A}kV9wS^#}3hoLNcK zh0O74)M-IxS{76&-ce7i>=~1OGD?I00JvmhBjc%@exg2J2IBhEUcR#=w!~L88E22w znkWJ@C=7+9CH>5Iu1MqBxKV@DT}Uy=W*6#d>djFtD1sJgnb6EOr>sa)8OpBWzr;8s0D@;;Q2mV8#Tr(GHi7<)GI|sp0#5_w-gpPN@bQ>#gK;f@T}~7swnU= z@ID#Tu!;%;xnPDFs$H*sTT<1dG9OmC_bHY3v1k2QQM46Q-~)_#$iy10rgqC_gb9wJ zsnNS}>(#kUCrIJ-+i+mJhV8iau{*K&$T{Px1AoePa#yKE?w;|{J1UJ>t|hfZuQj%k zsNB+fOPp@n0T?I8oxhHkD{<&e7`#K3NhYFeTI~~iPVS0lT4l0gSFFCC_IVUClFmpR z6( 0: - net.load(data_path=net_weight, exe=exe, place=place) - else: - raise ValueError('not found weight file') - - #3, test this model - test_program = fluid.default_main_program().clone() - - fetch_list_var = [] - fetch_list_name = [] - if debug is False: - fetch_list_var.append(prediction) - else: - for k, v in net.layers.items(): - fetch_list_var.append(v) - fetch_list_name.append(k) - - return { - 'program': test_program, - 'feed_names': feed_names, - 'fetch_vars': fetch_list_var, - 'fetch_names': fetch_list_name, - 'feed_shapes': feed_shapes, - 'net': net - } - - -def get_shape(fluid, program, name): - for var in program.list_vars(): - if var.type == 'Input': - return list(var.shape[1:]) - - raise ValueError('not found shape for input layer[%s], ' - 'you can specify by yourself' % (name)) - - -def load_inference_model(dirname, exe): - """ load fluid's inference model - """ - fluid = import_fluid() - model_fn = 'model' - params_fn = 'params' - if os.path.exists(os.path.join(dirname, model_fn)) \ - and os.path.exists(os.path.join(dirname, params_fn)): - program, feed_names, fetch_targets = fluid.io.load_inference_model(\ - dirname, exe, model_fn, params_fn) - else: - raise ValueError('not found model files in direcotry[%s]' % (dirname)) - - #print fluid.global_scope().find_var(feed_names[0]) - input_shape = get_shape(fluid, program, feed_names[0]) - feed_shapes = [input_shape] - - return program, feed_names, fetch_targets, feed_shapes - - -def infer(model_path, imgfile, net_file=None, net_name=None, debug=True): - """ do inference using a model which consist 'xxx.py' and 'xxx.npy' - """ - fluid = import_fluid() - - place = fluid.CPUPlace() - exe = fluid.Executor(place) - try: - ret = load_inference_model(model_path, exe) - program, feed_names, fetch_targets, feed_shapes = ret - debug = False - print('found a inference model for fluid') - except ValueError as e: - print('try to load model using net file and weight file') - net_weight = model_path - ret = load_model(exe, place, net_file, net_name, net_weight, debug) - program = ret['program'] - feed_names = ret['feed_names'] - fetch_targets = ret['fetch_vars'] - fetch_list_name = ret['fetch_names'] - feed_shapes = ret['feed_shapes'] - net = ret['net'] - - input_name = list(feed_names)[0] - input_shape = list(feed_shapes)[0] - - np_images = load_data(imgfile, input_shape) - results = exe.run(program=program, - feed={input_name: np_images}, - fetch_list=fetch_targets) - - if debug is True: - dump_path = 'results.paddle' - dump_names = rename_layer_name(fetch_list_name, net) - dump_results(results, dump_names, dump_path) - print('all result of layers dumped to [%s]' % (dump_path)) - else: - result = results[0] - print('succeed infer with results[class:%d]' % (np.argmax(result))) - - return 0 - - -def caffe_infer(prototxt, caffemodel, datafile): - """ do inference using pycaffe for debug, - all intermediate results will be dumpped to 'results.caffe' - """ - import caffe - - net = caffe.Net(prototxt, caffemodel, caffe.TEST) - input_layer = list(net.blobs.keys())[0] - print('got name of input layer is:%s' % (input_layer)) - input_shape = list(net.blobs[input_layer].data.shape[1:]) - - if '.npy' in datafile: - np_images = np.load(datafile) - else: - np_images = load_data(datafile, input_shape) - - inputs = {input_layer: np_images} - net.forward_all(**inputs) - - results = [] - names = [] - for k, v in net.blobs.items(): - k = k.replace('/', '_') - names.append(k) - results.append(v.data[0].copy()) - - dump_path = 'results.caffe' - dump_results(results, names, dump_path) - print('all result of layers dumped to [%s]' % (dump_path)) - return 0 - - -if __name__ == "__main__": - """ maybe more convenient to use 'run.sh' to call this tool - """ - net_file = 'models/resnet50/resnet50.py' - weight_file = 'models/resnet50/resnet50.npy' - datafile = 'data/65.jpeg' - net_name = 'ResNet50' - model_file = 'models/resnet50/fluid' - - ret = None - if len(sys.argv) <= 2: - pass - elif sys.argv[1] == 'caffe': - if len(sys.argv) != 5: - print('usage:') - print('\tpython %s caffe [prototxt] [caffemodel] [datafile]' % - (sys.argv[0])) - sys.exit(1) - prototxt = sys.argv[2] - caffemodel = sys.argv[3] - datafile = sys.argv[4] - ret = caffe_infer(prototxt, caffemodel, datafile) - elif sys.argv[1] == 'infer': - if len(sys.argv) != 4: - print('usage:') - print('\tpython %s infer [fluid_model] [datafile]' % (sys.argv[0])) - sys.exit(1) - model_path = sys.argv[2] - datafile = sys.argv[3] - ret = infer(model_path, datafile) - elif sys.argv[1] == 'dump': - if len(sys.argv) != 6: - print('usage:') - print('\tpython %s dump [net_file] [weight_file] [datafile] [net_name]' \ - % (sys.argv[0])) - print('\teg:python %s dump %s %s %s %s' % (sys.argv[0],\ - net_file, weight_file, datafile, net_name)) - sys.exit(1) - - net_file = sys.argv[2] - weight_file = sys.argv[3] - datafile = sys.argv[4] - net_name = sys.argv[5] - ret = infer(weight_file, datafile, net_file, net_name) - - if ret is None: - print('usage:') - print(' python %s [infer] [fluid_model] [imgfile]' % (sys.argv[0])) - print(' eg:python %s infer %s %s' % (sys.argv[0], model_file, datafile)) - sys.exit(1) - - sys.exit(ret) diff --git a/caffe2fluid/examples/imagenet/tools/cmp.sh b/caffe2fluid/examples/imagenet/tools/cmp.sh deleted file mode 100755 index 54c7b48..0000000 --- a/caffe2fluid/examples/imagenet/tools/cmp.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -# -#function: -# a tool used to compare the results produced by paddle and caffe -# - -if [[ $# -lt 2 ]];then - echo "usage:" - echo " bash $0 [model_name] [param_name] [caffe_name]" - exit 1 -fi - -model_name=$1 -param_name=$2 -paddle_file="./results/${model_name}.paddle/${param_name}.npy" -if [[ $# -eq 3 ]];then - caffe_file="./results/${model_name}.caffe/${3}.npy" -else - caffe_file="./results/${model_name}.caffe/${2}.npy" -fi -cmd="python ./compare.py $paddle_file $caffe_file" -echo $cmd -eval $cmd diff --git a/caffe2fluid/examples/imagenet/tools/cmp_layers.sh b/caffe2fluid/examples/imagenet/tools/cmp_layers.sh deleted file mode 100755 index 37a106e..0000000 --- a/caffe2fluid/examples/imagenet/tools/cmp_layers.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash - -#function: -# a tool used to compare all layers' results -# -#set -x -if [[ $# -ne 1 ]];then - echo "usage:" - echo " bash $0 [model_name]" - echo " eg:bash $0 alexnet" - exit 1 -fi - -model_name=$1 -prototxt="models.caffe/$model_name/${model_name}.prototxt" -cat $prototxt | grep name | perl -ne 'if(/^\s*name\s*:\s+\"([^\"]+)/){ print $1."\n";}' >.layer_names - -final_layer=$(cat $prototxt | perl -ne 'if(/^\s*top\s*:\s+\"([^\"]+)/){ print $1."\n";}' | tail -n1) -ret=$(grep "^$final_layer$" .layer_names | wc -l) -if [[ $ret -eq 0 ]];then - echo $final_layer >>.layer_names -fi - -for i in $(cat .layer_names);do - i=${i//\//_} - cf_npy="results/${model_name}.caffe/${i}.npy" - #pd_npy="results/${model_name}.paddle/${i}.npy" - #pd_npy=$(find results/${model_name}.paddle -iname "${i}*.npy" | head -n1) - pd_npy=$(find results/${model_name}.paddle -iname "${i}.*npy" | grep deleted -v | head -n1) - - if [[ ! -e $cf_npy ]];then - echo "caffe's result not exist[$cf_npy]" - continue - fi - - if [[ ! -e $pd_npy ]];then - echo "paddle's result not exist[$pd_npy]" - continue - fi - - python compare.py $cf_npy $pd_npy no_exception - if [[ $? -eq 0 ]];then - echo "succeed to compare layer[$i]" - else - echo "failed to compare layer[$i]" - fi - -done diff --git a/caffe2fluid/examples/imagenet/tools/diff.sh b/caffe2fluid/examples/imagenet/tools/diff.sh deleted file mode 100755 index 8a561a8..0000000 --- a/caffe2fluid/examples/imagenet/tools/diff.sh +++ /dev/null @@ -1,86 +0,0 @@ -#!/bin/bash - -#function: -# a tool used to: -# 1, convert a caffe model -# 2, do inference(only in fluid) using this model -# -#usage: -# cd caffe2fluid/examples/imagenet && bash run.sh alexnet ./models/alexnet.prototxt ./models/alexnet.caffemodel ./models/alexnet.py ./models/alexnet.npy -# - -#set -x - - -if [[ $# -lt 5 ]];then - echo "usage:" - echo " bash $0 [model_name] [cf_prototxt_path] [cf_model_path] [pd_py_path] [pd_npy_path] [imagfile] [only_convert]" - echo " eg: bash $0 alexnet ./models/alexnet.prototxt ./models/alexnet.caffemodel ./models/alexnet.py ./models/alexnet.npy" - exit 1 -else - model_name=$1 - cf_prototxt_path=$2 - cf_model_path=$3 - pd_py_path=$4 - pd_npy_path=$5 - only_convert=$7 -fi -proto_file=$cf_prototxt_path -caffemodel_file=$cf_model_path -weight_file=$pd_npy_path -net_file=$pd_py_path - -if [[ ! -e $proto_file ]];then - echo "not found prototxt[$proto_file]" - exit 1 -fi - -if [[ ! -e $caffemodel_file ]];then - echo "not found caffemodel[$caffemodel_file]" - exit 1 -fi - -if [[ ! -e $pd_model_path ]];then - mkdir $pd_model_path -fi - -PYTHON=`which python` -if [[ -z $PYTHON ]];then - PYTHON=`which python` -fi -$PYTHON ../../convert.py \ - --npy_path $proto_file \ - --caffemodel $caffemodel_file \ - --data-output-path $weight_file\ - --code-output-path $net_file - -ret=$? -if [[ $ret -ne 0 ]];then - echo "failed to convert caffe model[$cf_model_path]" - exit $ret -else - echo "succeed to convert caffe model[$cf_model_path] to fluid model[$pd_model_path]" -fi - -if [[ -z $only_convert ]];then - PYTHON=`which python` - if [[ -z $PYTHON ]];then - PYTHON=`which python` - fi - if [[ -n $6 ]];then - imgfile=$6 - else - imgfile="data/65.jpeg" - fi - #FIX ME: - # only look the first line in prototxt file for the name of this network, maybe not correct - net_name=`grep "name" $proto_file | head -n1 | perl -ne 'if(/^name\s*:\s*\"([^\"]+)\"/){ print $1."\n";}'` - if [[ -z $net_name ]];then - net_name="MyNet" - fi - cmd="$PYTHON ./infer.py dump $net_file $weight_file $imgfile $net_name" - echo $cmd - eval $cmd - ret=$? -fi -exit $ret diff --git a/caffe2fluid/examples/imagenet/tools/run.sh b/caffe2fluid/examples/imagenet/tools/run.sh deleted file mode 100755 index c1756e8..0000000 --- a/caffe2fluid/examples/imagenet/tools/run.sh +++ /dev/null @@ -1,86 +0,0 @@ -#!/bin/bash - -#function: -# a tool used to: -# 1, convert a caffe model -# 2, do inference(only in fluid) using this model -# -#usage: -# cd caffe2fluid/examples/imagenet && bash run.sh alexnet ./models/alexnet.prototxt ./models/alexnet.caffemodel ./models/alexnet.py ./models/alexnet.npy -# - -#set -x - - -if [[ $# -lt 5 ]];then - echo "usage:" - echo " bash $0 [model_name] [cf_prototxt_path] [cf_model_path] [pd_py_path] [pd_npy_path] [imagfile] [only_convert]" - echo " eg: bash $0 alexnet ./models/alexnet.prototxt ./models/alexnet.caffemodel ./models/alexnet.py ./models/alexnet.npy" - exit 1 -else - model_name=$1 - cf_prototxt_path=$2 - cf_model_path=$3 - pd_py_path=$4 - pd_npy_path=$5 - only_convert=$7 -fi -proto_file=$cf_prototxt_path -caffemodel_file=$cf_model_path -weight_file=$pd_npy_path -net_file=$pd_py_path - -if [[ ! -e $proto_file ]];then - echo "not found prototxt[$proto_file]" - exit 1 -fi - -if [[ ! -e $caffemodel_file ]];then - echo "not found caffemodel[$caffemodel_file]" - exit 1 -fi - -if [[ ! -e $pd_model_path ]];then - mkdir $pd_model_path -fi - -PYTHON=`which python` -if [[ -z $PYTHON ]];then - PYTHON=`which python` -fi -$PYTHON ../../convert.py \ - $proto_file \ - --caffemodel $caffemodel_file \ - --data-output-path $weight_file\ - --code-output-path $net_file - -ret=$? -if [[ $ret -ne 0 ]];then - echo "failed to convert caffe model[$cf_model_path]" - exit $ret -else - echo "succeed to convert caffe model[$cf_model_path] to fluid model[$pd_model_path]" -fi - -if [[ -z $only_convert ]];then - PYTHON=`which python` - if [[ -z $PYTHON ]];then - PYTHON=`which python` - fi - if [[ -n $6 ]];then - imgfile=$6 - else - imgfile="data/65.jpeg" - fi - #FIX ME: - # only look the first line in prototxt file for the name of this network, maybe not correct - net_name=`grep "name" $proto_file | head -n1 | perl -ne 'if(/^name\s*:\s*\"([^\"]+)\"/){ print $1."\n";}'` - if [[ -z $net_name ]];then - net_name="MyNet" - fi - cmd="$PYTHON ./infer.py dump $net_file $weight_file $imgfile $net_name" - echo $cmd - eval $cmd - ret=$? -fi -exit $ret diff --git a/caffe2fluid/examples/imagenet/tools/test.sh b/caffe2fluid/examples/imagenet/tools/test.sh deleted file mode 100755 index 13e5db6..0000000 --- a/caffe2fluid/examples/imagenet/tools/test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# -#script to test all models -# - -models="alexnet vgg16 googlenet resnet152 resnet101 resnet50" -for i in $models;do - echo "begin to process $i" - bash ./tools/diff.sh $i 2>&1 - echo "finished to process $i with ret[$?]" -done diff --git a/caffe2fluid/examples/mnist/README.md b/caffe2fluid/examples/mnist/README.md deleted file mode 100644 index cd427d6..0000000 --- a/caffe2fluid/examples/mnist/README.md +++ /dev/null @@ -1,10 +0,0 @@ -a demo to show converting caffe model on 'mnist' using caffe2fluid - ---- - -# How to use - -1. prepare python environment -2. download caffe model to "models.caffe/lenet" which contains "lenet.caffemodel" and "lenet.prototxt" -3. run the tool - eg: bash ./run.sh lenet ./models.caffe/lenet ./models/lenet diff --git a/caffe2fluid/examples/mnist/evaluate.py b/caffe2fluid/examples/mnist/evaluate.py deleted file mode 100644 index 55b053e..0000000 --- a/caffe2fluid/examples/mnist/evaluate.py +++ /dev/null @@ -1,83 +0,0 @@ -#!/bin/env python - -#function: -# demo to show how to use converted model using caffe2fluid -# - -import sys -import os -import numpy as np -import paddle.fluid as fluid -import paddle - - -def test_model(exe, test_program, fetch_list, test_reader, feeder): - acc_set = [] - - for data in test_reader(): - acc_np, pred = exe.run(program=test_program, - feed=feeder.feed(data), - fetch_list=fetch_list) - acc_set.append(float(acc_np)) - - acc_val = np.array(acc_set).mean() - return float(acc_val) - - -def evaluate(net_file, model_file): - """ main - """ - #1, build model - net_path = os.path.dirname(net_file) - if net_path not in sys.path: - sys.path.insert(0, net_path) - - from lenet import LeNet as MyNet - - #1, define network topology - images = fluid.layers.data(name='image', shape=[1, 28, 28], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') - - net = MyNet({'data': images}) - prediction = net.layers['prob'] - acc = fluid.layers.accuracy(input=prediction, label=label) - - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - - #2, load weights - if model_file.find('.npy') > 0: - net.load(data_path=model_file, exe=exe, place=place) - else: - net.load(data_path=model_file, exe=exe) - - #3, test this model - test_program = fluid.default_main_program().clone() - test_reader = paddle.batch(paddle.dataset.mnist.test(), batch_size=128) - - feeder = fluid.DataFeeder(feed_list=[images, label], place=place) - fetch_list = [acc, prediction] - - print('go to test model using test set') - acc_val = test_model(exe, test_program, \ - fetch_list, test_reader, feeder) - - print('test accuracy is [%.4f], expected value[0.919]' % (acc_val)) - - -if __name__ == "__main__": - net_file = 'models/lenet/lenet.py' - weight_file = 'models/lenet/lenet.npy' - - argc = len(sys.argv) - if argc == 3: - net_file = sys.argv[1] - weight_file = sys.argv[2] - elif argc > 1: - print('usage:') - print('\tpython %s [net_file] [weight_file]' % (sys.argv[0])) - print('\teg:python %s %s %s %s' % (sys.argv[0], net_file, weight_file)) - sys.exit(1) - - evaluate(net_file, weight_file) diff --git a/caffe2fluid/examples/mnist/run.sh b/caffe2fluid/examples/mnist/run.sh deleted file mode 100755 index eee83ef..0000000 --- a/caffe2fluid/examples/mnist/run.sh +++ /dev/null @@ -1,75 +0,0 @@ -#!/bin/bash - -#function: -# a tool used to: -# 1, convert a caffe model -# 2, do inference using this model -# -#usage: -# bash run.sh lenet ./models.caffe/lenet ./models/lenet -# - -#set -x -if [[ $# -lt 3 ]];then - echo "usage:" - echo " bash $0 [model_name] [cf_model_path] [pd_model_path] [only_convert]" - echo " eg: bash $0 lenet ./models.caffe/lenet ./models/lenet" - exit 1 -else - model_name=$1 - cf_model_path=$2 - pd_model_path=$3 - no_eval=$4 -fi - -proto_file=$cf_model_path/${model_name}.prototxt -caffemodel_file=$cf_model_path/${model_name}.caffemodel -weight_file=$pd_model_path/${model_name}.npy -net_file=$pd_model_path/${model_name}.py - -if [[ ! -e $proto_file ]];then - echo "not found prototxt[$proto_file]" - exit 1 -fi - -if [[ ! -e $caffemodel_file ]];then - echo "not found caffemodel[$caffemodel_file]" - exit 1 -fi - -if [[ ! -e $pd_model_path ]];then - mkdir $pd_model_path -fi - -PYTHON=`which cfpython` -if [[ -z $PYTHON ]];then - PYTHON=`which python` -fi -$PYTHON ../../convert.py \ - $proto_file \ - --caffemodel $caffemodel_file \ - --data-output-path $weight_file\ - --code-output-path $net_file - -ret=$? -if [[ $ret -ne 0 ]];then - echo "failed to convert caffe model[$cf_model_path]" - exit $ret -else - echo "succeed to convert caffe model[$cf_model_path] to fluid model[$pd_model_path]" -fi - -if [[ -z $only_convert ]];then - PYTHON=`which pdpython` - if [[ -z $PYTHON ]];then - PYTHON=`which python` - fi - net_name=`grep "name" $proto_file | head -n1 | perl -ne 'if(/\"([^\"]+)\"/){ print $1."\n";}'` - if [[ $net_name != "LeNet" ]];then - echo "only support LeNet" - exit 1 - fi - $PYTHON ./evaluate.py $net_file $weight_file - ret=$? -fi -exit $ret diff --git a/caffe2fluid/kaffe/__init__.py b/caffe2fluid/kaffe/__init__.py deleted file mode 100644 index c11ce45..0000000 --- a/caffe2fluid/kaffe/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .graph import GraphBuilder, NodeMapper -from .errors import KaffeError, print_stderr - -import os -from . import paddle diff --git a/caffe2fluid/kaffe/caffe/__init__.py b/caffe2fluid/kaffe/caffe/__init__.py deleted file mode 100644 index 8d53dee..0000000 --- a/caffe2fluid/kaffe/caffe/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .resolver import get_caffe_resolver, has_pycaffe diff --git a/caffe2fluid/kaffe/caffe/resolver.py b/caffe2fluid/kaffe/caffe/resolver.py deleted file mode 100644 index b237f23..0000000 --- a/caffe2fluid/kaffe/caffe/resolver.py +++ /dev/null @@ -1,71 +0,0 @@ -import os -import sys -import subprocess - -SHARED_CAFFE_RESOLVER = None - - -def import_caffepb(): - p = os.path.realpath(__file__) - p = os.path.dirname(p) - p = os.path.join(p, '../../proto') - sys.path.insert(0, p) - s = sys.version - if s.startswith('2'): - import commands - pb_version = commands.getstatusoutput('protoc --version')[1] - else: - import subprocess - pb_version = subprocess.getstatusoutput('protoc --version')[1] - ver_str = pb_version.split(' ')[-1].replace('.', '') - ver_int = int(ver_str) - assert ver_int >= 360, 'The version of protobuf must be larger than 3.6.0!' - import caffe_pb2 - return caffe_pb2 - - -class CaffeResolver(object): - def __init__(self): - self.import_caffe() - - def import_caffe(self): - self.caffe = None - try: - # Try to import PyCaffe first - import caffe - self.caffe = caffe - except ImportError: - # Fall back to the protobuf implementation - self.caffepb = import_caffepb() - show_fallback_warning() - if self.caffe: - # Use the protobuf code from the imported distribution. - # This way, Caffe variants with custom layers will work. - self.caffepb = self.caffe.proto.caffe_pb2 - self.NetParameter = self.caffepb.NetParameter - - def has_pycaffe(self): - return self.caffe is not None - - -def get_caffe_resolver(): - global SHARED_CAFFE_RESOLVER - if SHARED_CAFFE_RESOLVER is None: - SHARED_CAFFE_RESOLVER = CaffeResolver() - return SHARED_CAFFE_RESOLVER - - -def has_pycaffe(): - return get_caffe_resolver().has_pycaffe() - - -def show_fallback_warning(): - msg = ''' ------------------------------------------------------------- - WARNING: PyCaffe not found! - Falling back to a pure protocol buffer implementation. - * Conversions will be drastically slower. ------------------------------------------------------------- - -''' - sys.stderr.write(msg) diff --git a/caffe2fluid/kaffe/custom_layers/__init__.py b/caffe2fluid/kaffe/custom_layers/__init__.py deleted file mode 100644 index 3e292e6..0000000 --- a/caffe2fluid/kaffe/custom_layers/__init__.py +++ /dev/null @@ -1,115 +0,0 @@ -""" -""" - -from .register import get_registered_layers -#custom layer import begins - -from . import axpy -from . import flatten -from . import argmax -from . import argmax -from . import reshape -from . import roipooling -from . import priorbox -from . import permute -from . import detection_out -from . import normalize -from . import select -from . import crop -from . import power -from . import reduction - -#custom layer import ends - -custom_layers = get_registered_layers() - - -def set_args(f, params, node=None): - """ set args for function 'f' using the parameters in node.layer.parameters - - Args: - f (function): a python function object - params (object): a object contains attributes needed by f's arguments - - Returns: - arg_names (list): a list of argument names - kwargs (dict): a dict contains needed arguments - """ - from ..protobuf_to_dict import protobuf_to_dict - - argc = f.__code__.co_argcount - arg_list = f.__code__.co_varnames[0:argc] - - kwargs = {} - for arg_name in arg_list: - if arg_name in params: - kwargs[arg_name] = params[arg_name] - - if node is not None and len(node.metadata): - kwargs.update(node.metadata) - - return arg_list, kwargs - - -def has_layer(kind): - """ test whether this layer exists in custom layer - """ - return kind in custom_layers - - -def compute_output_shape(kind, node): - assert kind in custom_layers, "layer[%s] not exist in custom layers" % ( - kind) - shape_func = custom_layers[kind]['shape'] - - parents = node.parents - inputs = [list(p.output_shape) for p in parents] - arg_names, kwargs = set_args(shape_func, node.params) - - if len(inputs) == 1: - inputs = inputs[0] - - return shape_func(inputs, **kwargs) - - -def make_node(template, kind, node): - """ make a PaddleNode for custom layer which means construct - a piece of code to define a layer implemented in 'custom_layers' - - Args: - @template (PaddleNode): a factory to new a instance of PaddleNode - @kind (str): type of custom layer - @node (graph.Node): a layer in the net - - Returns: - instance of PaddleNode - """ - assert kind in custom_layers, "layer[%s] not exist in custom layers" % ( - kind) - - layer_func = custom_layers[kind]['layer'] - - #construct arguments needed by custom layer function from node's parameters - arg_names, kwargs = set_args(layer_func, node.params, node) - - return template('custom_layer', kind, **kwargs) - - -def make_custom_layer(kind, inputs, name, *args, **kwargs): - """ execute a custom layer which is implemented by users - - Args: - @kind (str): type name of this layer - @inputs (vars): variable list created by fluid - @namme (str): name for this layer - @args (tuple): other positional arguments - @kwargs (dict): other kv arguments - - Returns: - output (var): output variable for this layer - """ - assert kind in custom_layers, "layer[%s] not exist in custom layers" % ( - kind) - - layer_func = custom_layers[kind]['layer'] - return layer_func(inputs, name, *args, **kwargs) diff --git a/caffe2fluid/kaffe/custom_layers/argmax.py b/caffe2fluid/kaffe/custom_layers/argmax.py deleted file mode 100644 index d419832..0000000 --- a/caffe2fluid/kaffe/custom_layers/argmax.py +++ /dev/null @@ -1,73 +0,0 @@ -""" a custom layer for 'argmax', maybe we should implement this in standard way. - more info can be found here: http://caffe.berkeleyvision.org/tutorial/layers/argmax.html -""" -from .register import register - - -def import_fluid(): - import paddle.fluid as fluid - return fluid - - -def argmax_shape(input_shape, out_max_val=False, top_k=1, axis=-1): - """ calculate the output shape of this layer using input shape - - Args: - @input_shape (list of num): a list of number which represents the input shape - @out_max_val (bool): parameter from caffe's ArgMax layer - @top_k (int): parameter from caffe's ArgMax layer - @axis (int): parameter from caffe's ArgMax layer - - Returns: - @output_shape (list of num): a list of numbers represent the output shape - """ - input_shape = list(input_shape) - - if axis < 0: - axis += len(input_shape) - - assert (axis + 1 == len(input_shape) - ), 'only can be applied on the last dimension[axis:%d, %s] now,'\ - 'make sure you have set axis param in xxx.prototxt file' \ - % (axis, str(input_shape)) - - output_shape = input_shape - output_shape[-1] = top_k - if out_max_val is True: - output_shape[-1] *= 2 - - return output_shape - - -def argmax_layer(input, name, out_max_val=False, top_k=1, axis=-1): - """ build a layer of type 'ArgMax' using fluid - - Args: - @input (variable): input fluid variable for this layer - @name (str): name for this layer - @out_max_val (bool): parameter from caffe's ArgMax layer - @top_k (int): parameter from caffe's ArgMax layer - @axis (int): parameter from caffe's ArgMax layer - - Returns: - output (variable): output variable for this layer - """ - - fluid = import_fluid() - - if axis < 0: - axis += len(input.shape) - - if out_max_val is True: - topk_var, index_var = fluid.layers.topk(input=input, k=top_k) - index_var = fluid.layers.cast(index_var, dtype=topk_var.dtype) - output = fluid.layers.concat( - [index_var, topk_var], axis=axis, name=name) - else: - topk_var, index_var = fluid.layers.topk(input=input, k=top_k, name=name) - output = index_var - - return output - - -register(kind='ArgMax', shape=argmax_shape, layer=argmax_layer) diff --git a/caffe2fluid/kaffe/custom_layers/axpy.py b/caffe2fluid/kaffe/custom_layers/axpy.py deleted file mode 100644 index b81d4f2..0000000 --- a/caffe2fluid/kaffe/custom_layers/axpy.py +++ /dev/null @@ -1,51 +0,0 @@ -""" A custom layer for 'axpy' which receives 3 tensors and output 1 tensor. - the function performed is:(the mupltiplication and add are elementewise) - output = inputs[0] * inputs[1] + inputs[2] -""" - -from .register import register - - -def axpy_shape(input_shapes): - """ calculate the output shape of this layer using input shapes - - Args: - @input_shapes (list of tuples): a list of input shapes - - Returns: - @output_shape (list of num): a list of numbers represent the output shape - """ - assert len(input_shapes) == 3, "not valid input shape for axpy layer" - assert len(input_shapes[0]) == len(input_shapes[1]), 'should have same dims' - - output_shape = input_shapes[1] - assert (input_shapes[2] == output_shape),\ - "shape not consistent for axpy[%s <--> %s]" \ - % (str(output_shape), str(input_shapes[2])) - - return output_shape - - -def axpy_layer(inputs, name): - """ build a layer of type 'Axpy' using fluid - - Args: - @inputs (list of variables): input fluid variables for this layer - @name (str): name for this layer - - Returns: - output (variable): output variable for this layer - """ - import paddle.fluid as fluid - - assert len(inputs) == 3, "invalid inputs for axpy[%s]" % (name) - alpha = inputs[0] - x = inputs[1] - y = inputs[2] - output = fluid.layers.elementwise_mul(x, alpha, axis=0) - output = fluid.layers.elementwise_add(output, y, name=name) - - return output - - -register(kind='Axpy', shape=axpy_shape, layer=axpy_layer) diff --git a/caffe2fluid/kaffe/custom_layers/crop.py b/caffe2fluid/kaffe/custom_layers/crop.py deleted file mode 100644 index d146be9..0000000 --- a/caffe2fluid/kaffe/custom_layers/crop.py +++ /dev/null @@ -1,77 +0,0 @@ -""" a custom layer for 'crop', maybe we should implement this in standard way. - more info can be found here: http://caffe.berkeleyvision.org/tutorial/layers/crop.html -""" -from .register import register - - -def crop_shape(input_shape, shape=None): - """ calculate the output shape of this layer using input shape - - Args: - @input_shape (num | list of num): a list of number or num which represents the input shape - @shape (list of integer): the shape of output - - Returns: - @output_shape (list of num): a list of numbers represent the output shape - """ - if isinstance(input_shape, list): - assert len(input_shape) == 2, "the number of crop's inputs must be 2" - return input_shape[1] - elif not shape is None: - assert len(shape) == len( - input_shape.shape), "input_shape is diff with output_shape" - return shape - else: - raise Exception("crop_shape input error") - return None - - -def crop_layer(input, name, shape=None, axis=2, offset=None): - """ build a layer of type 'Crop' using fluid - - Args: - @input (variables | list of variables): input fluid variable for this layer - @shape (list of integer): the shape of output - @name (str): name for this layer - @axis (integer): parameter from caffe's Crop layer - @offset (Variable|list/tuple of integer|None): parameter from caffe's Crop layer - - Returns: - output (variable): output variable for this layer - """ - input_shape = None - output_shape = None - input_tensor = None - if isinstance(input, list): - assert len(input) == 2, "the number of crop's inputs must be 2" - input_shape = input[0].shape - output_shape = input[1].shape - input_tensor = input[0] - elif not shape is None: - assert len(shape) == len( - input.shape), "input_shape is diff with output_shape" - input_shape = input.shape - output_shape = shape - input_tensor = input - else: - raise Exception("crop_layer input error") - - assert len(output_shape) == len( - input_shape), "input_shape is diff with output_shape" - - if axis < 0: - axis += len(input_shape) - - if offset is not None: - assert (len(input_shape) - axis - ) == len(offset), "invalid offset[%s] in crop layer" % ( - str(offset)) - offset = [0] * axis + offset - import paddle.fluid as fluid - output = fluid.layers.crop( - input_tensor, shape=output_shape, offsets=offset, name=name) - - return output - - -register(kind='Crop', shape=crop_shape, layer=crop_layer) diff --git a/caffe2fluid/kaffe/custom_layers/detection_out.py b/caffe2fluid/kaffe/custom_layers/detection_out.py deleted file mode 100644 index 54e0f68..0000000 --- a/caffe2fluid/kaffe/custom_layers/detection_out.py +++ /dev/null @@ -1,79 +0,0 @@ -""" A custom layer for 'detectionout' used in 'SSD' model to produce outputs - Note: Since Paddle's implementation of 'detectionout' applied 'flatten' and 'softmax' ops on the input of 'conf', - while Caffe's implementation do not. -""" - -from .register import register - - -def detectionoutput_shape(input_shape): - """ the output shape of this layer is dynamic and not determined by 'input_shape' - - Args: - @input_shape (list of int): input shape - - Returns: - @output_shape (list of num): a list of numbers represent the output shape - """ - output_shape = [-1, 6] - return output_shape - - -def detectionoutput_layer(inputs, - name, - background_label=0, - share_location=True, - nms_param=None, - keep_top_k=100, - confidence_threshold=0.1): - """ build a layer of type 'detectionout' using fluid - - Args: - @inputs (list of variables): input fluid variables for this layer - @name (str): name for this layer - - Returns: - output (variable): output variable for this layer - """ - import paddle.fluid as fluid - - if nms_param is None: - nms_param = {"nms_threshold": 0.3, "top_k": 10, "eta": 1.0} - - mbox_conf_flatten = inputs[1] - mbox_priorbox = inputs[2] - mbox_priorbox_list = fluid.layers.split(mbox_priorbox, 2, dim=1) - pb = mbox_priorbox_list[0] - pbv = mbox_priorbox_list[1] - pb = fluid.layers.reshape(x=pb, shape=[-1, 4]) - pbv = fluid.layers.reshape(x=pbv, shape=[-1, 4]) - mbox_loc = inputs[0] - mbox_loc = fluid.layers.reshape( - x=mbox_loc, shape=[0, mbox_conf_flatten.shape[1], 4]) - - default = {"nms_threshold": 0.3, "top_k": 10, "eta": 1.0} - fields = ['eta', 'top_k', 'nms_threshold'] - - for f in default.keys(): - if f not in nms_param: - nms_param[f] = default[f] - - nmsed_outs = fluid.layers.detection_output( - scores=mbox_conf_flatten, - loc=mbox_loc, - prior_box=pb, - prior_box_var=pbv, - background_label=background_label, - nms_threshold=nms_param["nms_threshold"], - nms_top_k=nms_param["top_k"], - keep_top_k=keep_top_k, - score_threshold=confidence_threshold, - nms_eta=nms_param["eta"]) - - return nmsed_outs - - -register( - kind='DetectionOutput', - shape=detectionoutput_shape, - layer=detectionoutput_layer) diff --git a/caffe2fluid/kaffe/custom_layers/flatten.py b/caffe2fluid/kaffe/custom_layers/flatten.py deleted file mode 100644 index 271cc99..0000000 --- a/caffe2fluid/kaffe/custom_layers/flatten.py +++ /dev/null @@ -1,67 +0,0 @@ -""" a custom layer for 'flatten', maybe we should implement this in standard way. - more info can be found here: http://caffe.berkeleyvision.org/tutorial/layers/flatten.html -""" -from .register import register -from functools import reduce - -def flatten_shape(input_shape, axis=1, end_axis=-1): - """ calculate the output shape of this layer using input shape - - Args: - @input_shape (list of num): a list of number which represents the input shape - @axis (int): parameter from caffe's Flatten layer - @end_axis (int): parameter from caffe's Flatten layer - - Returns: - @output_shape (list of num): a list of numbers represent the output shape - """ - - start_axis = axis - end_axis = end_axis - input_shape = list(input_shape) - if start_axis < 0: - start_axis += len(input_shape) - - if end_axis < 0: - end_axis += len(input_shape) + 1 - - assert start_axis <= end_axis, 'invalid axis[%d] or end_axis[%d] params'\ - % (start_axis, end_axis) - output_shape = input_shape[0:start_axis] - flat_sz = reduce(lambda a, b: a * b, input_shape[start_axis:end_axis]) - if flat_sz < 0: - flat_sz = -1 - output_shape += [flat_sz] - output_shape += input_shape[end_axis:-1] - - return output_shape - - -def flatten_layer(input, name, axis=1, end_axis=-1): - """ build a layer of type 'Flatten' using fluid - - Args: - @input (variable): input fluid variable for this layer - @name (str): name for this layer - @axis (int): parameter from caffe's Flatten layer - @end_axis (int): parameter from caffe's Flatten layer - - Returns: - output (variable): output variable for this layer - """ - import paddle.fluid as fluid - - input_shape = list(input.shape) - - if input_shape[0] == -1: - input_shape[0] = 0 - output_shape = flatten_shape(input_shape, axis=axis, end_axis=end_axis) - else: - output_shape = flatten_shape(input_shape, axis=axis, end_axis=end_axis) - - output = fluid.layers.reshape(input, shape=output_shape, name=name) - - return output - - -register(kind='Flatten', shape=flatten_shape, layer=flatten_layer) diff --git a/caffe2fluid/kaffe/custom_layers/normalize.py b/caffe2fluid/kaffe/custom_layers/normalize.py deleted file mode 100644 index f6e8c00..0000000 --- a/caffe2fluid/kaffe/custom_layers/normalize.py +++ /dev/null @@ -1,56 +0,0 @@ -""" A custom layer for 'normalize' op -""" - -from .register import register - - -def normalize_shape(input_shape, - across_spatial=True, - scale_filler=True, - eps=1e-10): - """ calculate the output shape of this layer using input shapes - - Args: - @input_shape (list of tuples): input shape - - Returns: - @output_shape (list of num): a list of numbers represent the output shape - """ - output_shape = input_shape - return output_shape - - -def normalize_layer(input, - name, - across_spatial=True, - scale_filler=True, - channel_shared=False, - eps=1e-10): - """ build a layer of type 'normalize' using fluid - - Args: - @inputs (list of variables): input fluid variables for this layer - @name (str): name for this layer - - Returns: - output (variable): output variable for this layer - """ - import paddle.fluid as fluid - - param_prefix = name.split('.')[0] - - assert across_spatial == False, "Only support across_spatial == False for Normalize[%s]" % ( - name) - l2_norm = fluid.layers.l2_normalize(input, axis=1) # l2 norm along channel - - shape = [1] if channel_shared else [input.shape[1]] - scale_attr = fluid.ParamAttr(name=param_prefix + '_scale') - scale_param = fluid.layers.create_parameter( - shape=shape, dtype=input.dtype, name=name, attr=scale_attr) - - out = fluid.layers.elementwise_mul( - x=l2_norm, y=scale_param, axis=-1 if channel_shared else 1) - return out - - -register(kind='Normalize', shape=normalize_shape, layer=normalize_layer) diff --git a/caffe2fluid/kaffe/custom_layers/permute.py b/caffe2fluid/kaffe/custom_layers/permute.py deleted file mode 100644 index f0633fd..0000000 --- a/caffe2fluid/kaffe/custom_layers/permute.py +++ /dev/null @@ -1,40 +0,0 @@ -""" A custom layer for 'Permute' which is equivalent to transpose in paddle -""" - -from .register import register - - -def permute_shape(input_shape, order): - """ calculate the output shape of this layer using input shapes - - Args: - @input_shape (list of numbers): input shape - - Returns: - @output_shape (list of num): a list of numbers represent the output shape - """ - output_shape = [] - for ii in order: - assert ii < len(input_shape), "invalid order for permute[%s]" % (name) - output_shape.append(input_shape[ii]) - return output_shape - - -def permute_layer(input, name, order): - """ build a layer of type 'permute' using fluid - - Args: - @input (input variable): input fluid variables for this layer - @name (str): name for this layer - @order (list of int): order to permute the dims - - Returns: - output (variable): output variable for this layer - """ - import paddle.fluid as fluid - output = fluid.layers.transpose(input, order, name=name) - - return output - - -register(kind='Permute', shape=permute_shape, layer=permute_layer) diff --git a/caffe2fluid/kaffe/custom_layers/power.py b/caffe2fluid/kaffe/custom_layers/power.py deleted file mode 100644 index a8b91f4..0000000 --- a/caffe2fluid/kaffe/custom_layers/power.py +++ /dev/null @@ -1,40 +0,0 @@ -""" a custom layer for 'power', maybe we should implement this in standard way. - more info can be found here: http://caffe.berkeleyvision.org/tutorial/layers/power.html -""" -from .register import register - - -def power_shape(input_shape, shape=None): - """ calculate the output shape of this layer using input shape - - Args: - @input_shape (list of num): a list of number which represents the input shape - - Returns: - @output_shape (list of num): a list of numbers represent the output shape - """ - return input_shape - - -def power_layer(input, name, power=1.0, scale=1.0, shift=0.0): - """ build a layer of type 'Power' using fluid - - Args: - @input (variables): input fluid variable for this layer - @name (str): name for this layer - @power (float): parameter from caffe's Power layer - @scale (float): parameter from caffe's Power layer - @shift (float): parameter from caffe's Power layer - - Returns: - output (variable): output variable for this layer - """ - import paddle.fluid as fluid - scale_out = fluid.layers.scale( - input, scale=scale, bias=shift, bias_after_scale=True) - output = fluid.layers.pow(scale_out, factor=power) - - return output - - -register(kind='Power', shape=power_shape, layer=power_layer) diff --git a/caffe2fluid/kaffe/custom_layers/priorbox.py b/caffe2fluid/kaffe/custom_layers/priorbox.py deleted file mode 100644 index e3eb640..0000000 --- a/caffe2fluid/kaffe/custom_layers/priorbox.py +++ /dev/null @@ -1,103 +0,0 @@ -""" A custom layer for 'priorbox' which is used in ssd to generate prior box info - Since the order of prior box is different between caffe and paddle, - we use 'slice' and 'concate' ops to align them. -""" - -from .register import register - - -def priorbox_shape(input_shapes, min_size, max_size=None, aspect_ratio=None): - """ calculate the output shape of this layer using input shapes - - Args: - @input_shapes (list of tuples): a list of input shapes - - Returns: - @output_shape (list of num): a list of numbers represent the output shape - """ - assert len(input_shapes) == 2, "invalid inputs for Priorbox[%s]" % (name) - fc_shape = input_shapes[0] - N = 1 - if not max_size == None: - N += 1 - if not aspect_ratio == None: - N += 2 * len(aspect_ratio) - - N_bbx = fc_shape[2] * fc_shape[3] * N - output_shape = [1, 2, 4 * N_bbx] - return output_shape - - -def priorbox_layer(inputs, - name, - min_size, - max_size=None, - aspect_ratio=None, - variance=[0.1, 0.1, 0.2, 0.2], - flip=False, - clip=False, - step=0.0, - offset=0.5): - """ build a layer of type 'Priorbox' using fluid - - Args: - @inputs (list of variables): input fluid variables for this layer - @name (str): name for this layer - - Returns: - output (variable): output variable for this layer - """ - import paddle.fluid as fluid - - assert len(inputs) == 2, "invalid inputs for Priorbox[%s]" % (name) - input = inputs[0] - image = inputs[1] - steps = tuple(step) if type(step) is list or type(step) is tuple else (step, - step) - box, variance_ = fluid.layers.prior_box( - input, - image, - min_size, - max_size, - aspect_ratio, - variance, - flip, - clip, - steps, - offset, - min_max_aspect_ratios_order=True) - """ - #adjust layout when the output is not consistent with caffe's - - feat_shape = list(input.shape) - H = feat_shape[2] - W = feat_shape[3] - box_tmp = fluid.layers.reshape(box, [H, W, -1, 4]) - nb_prior_bbx = int(box_tmp.shape[2]) - tensor_list = fluid.layers.split(box_tmp, nb_prior_bbx, 2) - - #TODO: - # current implementation for this layer is not efficient - # and we should fix this bug in future when Paddle support the same prior-box layout with Caffe - index_list = [0] - index_list = index_list * nb_prior_bbx - index_offset = 0 - if max_size is not None: - index_list[1] = -1 - index_offset = 1 - for ii in xrange(2 * len(aspect_ratio)): - index_list[ii + 1 + index_offset] = ii + 1 - - tensor_list_gathered = [tensor_list[ii] for ii in index_list] - caffe_prior_bbx = fluid.layers.concat(tensor_list_gathered, axis=2) - box = fluid.layers.reshape(caffe_prior_bbx, [1, 1, -1]) - """ - - box = fluid.layers.reshape(box, [1, 1, -1]) - variance_ = fluid.layers.reshape(variance_, [1, 1, -1]) - output = fluid.layers.concat([box, variance_], axis=1) - - return output - - -register(kind='PriorBox', shape=priorbox_shape, layer=priorbox_layer) diff --git a/caffe2fluid/kaffe/custom_layers/reduction.py b/caffe2fluid/kaffe/custom_layers/reduction.py deleted file mode 100644 index ced60d3..0000000 --- a/caffe2fluid/kaffe/custom_layers/reduction.py +++ /dev/null @@ -1,67 +0,0 @@ -""" a custom layer for 'crop', maybe we should implement this in standard way. - more info can be found here: http://caffe.berkeleyvision.org/tutorial/layers/reduction.html -""" -from .register import register - - -def reduction_shape(input_shape, axis=0): - """ calculate the output shape of this layer using input shape - - Args: - @input_shape (list of num): a list of number which represents the input shape - @axis (int): parameter from caffe's reduction layer - - Returns: - @output_shape (list of num): a list of numbers represent the output shape - """ - if axis < 0: - axis += len(input_shape) + 1 - - assert axis <= len(input_shape), 'invalid axis[%d] error' % (axis) - - return input_shape[0:axis] - - -def reduction_layer(input, name, axis=0, operation=1, coeff=1.0): - """ build a layer of type 'Crop' using fluid - - Args: - @input (variable): input fluid variable for this layer - @name (str): name for this layer - @axis (int): parameter from caffe's reduction layer - @operation (int): parameter from caffe's reduction layer - @coeff (float): parameter from caffe's reduction layer - - Returns: - output (variable): output variable for this layer - """ - assert operation >= 1 and operation <= 4, "reduction reduction [%s] error" % ( - operation) - - input_len = len(input.shape) - if axis < 0: - axis += input_len + 1 - - dim = range(input_len) - - import paddle.fluid as fluid - if operation == 1: ## operation = SUM - output = fluid.layers.reduce_sum( - input, dim=dim[axis:], keep_dim=False, name=name) - elif operation == 2: ## operation = ASUM - absout = fluid.layers.abs(input) - output = fluid.layers.reduce_sum( - absout, dim=dim[axis:], keep_dim=False, name=name) - elif operation == 3: ## operation = SUMSQ - powout = fluid.layers.pow(x=input, factor=2.0) - output = fluid.layers.reduce_sum( - powout, dim=dim[axis:], keep_dim=False, name=name) - else: ## operation = MEAN - output = fluid.layers.reduce_mean( - input, dim=dim[axis:], keep_dim=False, name=name) - - mulout = fluid.layers.scale(x=output, scale=coeff) - return mulout - - -register(kind='Reduction', shape=reduction_shape, layer=reduction_layer) diff --git a/caffe2fluid/kaffe/custom_layers/register.py b/caffe2fluid/kaffe/custom_layers/register.py deleted file mode 100644 index ae806cd..0000000 --- a/caffe2fluid/kaffe/custom_layers/register.py +++ /dev/null @@ -1,37 +0,0 @@ -""" this module provides 'register' for registering customized layers -""" - -g_custom_layers = {} - - -def register(kind, shape, layer): - """ register a custom layer or a list of custom layers - - Args: - @kind (str or list): type name of the layer - @shape (function): a function to generate the shape of layer's output - @layer (function): a function to generate the shape of layer's output - - Returns: - None - """ - assert type(shape).__name__ == 'function', 'shape should be a function' - assert type(layer).__name__ == 'function', 'layer should be a function' - - if type(kind) is str: - kind = [kind] - else: - assert type( - kind) is list, 'invalid param "kind" for register, not a list or str' - - for k in kind: - assert type( - k) is str, 'invalid param "kind" for register, not a list of str' - assert k not in g_custom_layers, 'this type[%s] has already been registered' % ( - k) - print('register layer[%s]' % (k)) - g_custom_layers[k] = {'shape': shape, 'layer': layer} - - -def get_registered_layers(): - return g_custom_layers diff --git a/caffe2fluid/kaffe/custom_layers/reshape.py b/caffe2fluid/kaffe/custom_layers/reshape.py deleted file mode 100644 index fc2da27..0000000 --- a/caffe2fluid/kaffe/custom_layers/reshape.py +++ /dev/null @@ -1,94 +0,0 @@ -""" a custom layer for 'reshape', maybe we should implement this in standard way. - more info can be found here: http://caffe.berkeleyvision.org/tutorial/layers/reshape.html -""" -from .register import register -from functools import reduce - - -def import_fluid(): - import paddle.fluid as fluid - return fluid - - -def reshape_shape(input_sp, shape, axis=0, num_axes=-1): - """ calculate the output shape of this layer using input shape - - Args: - @input_shape (list of num): a list of number which represents the input shape - @shape (object): parameter from caffe's Reshape layer - @axis (int): parameter from caffe's Reshape layer - @num_axes(int): parameter from caffe's Reshape layer - - Returns: - @output_shape (list of num): a list of numbers represent the output shape - """ - - def count(num_list): - return reduce(lambda a, b: a * b, num_list) - - input_shape = list(input_sp) - input_count = count(input_shape) - - input_num_axes = len(input_shape) - - input_start_axis = axis - start_axis = input_start_axis if input_start_axis >= 0 \ - else input_num_axes + input_start_axis + 1 - - assert start_axis >= 0, "[Reshape]axis %d out of range" % (input_start_axis) - assert start_axis <= input_num_axes, "[Reshape]axis %d out of range for %d-D input data"\ - % (input_start_axis, input_num_axes) - - assert num_axes >= -1, "[Reshape]num_axes must be >= 0, or -1 for all" - - end_axis = input_num_axes if num_axes == -1 else start_axis + num_axes - assert end_axis <= input_num_axes, "end_axis[%d] = axis[%d] + num_axes[%d] is out of range"\ - % (end_axis, start_axis, num_axes) - - num_axes_replaced = end_axis - start_axis - num_axes_retained = input_num_axes - num_axes_replaced - num_new_axes = len(shape['dim']) - output_shape = [] - - for i in range(start_axis): - output_shape.append(input_shape[i]) - - for i in range(num_new_axes): - output_shape.append(shape['dim'][i]) - - for i in range(end_axis, input_num_axes): - output_shape.append(input_shape[i]) - - assert len(output_shape) == num_axes_retained + num_new_axes,\ - "[Reshape]invalid dims of output shape[%s]" % (str(output_shape)) - - return output_shape - - -def reshape_layer(input, name, shape, axis=0, num_axes=-1): - """ build a layer of type 'Flatten' using fluid - - Args: - @input (variable): input fluid variable for this layer - @name (str): name for this layer - @shape (object): parameter from caffe's Reshape layer - @axis (int): parameter from caffe's Reshape layer - @num_axes(int): parameter from caffe's Reshape layer - - Returns: - output (variable): output variable for this layer - """ - fluid = import_fluid() - input_shape = list(input.shape) - if input_shape[0] == -1: - input_shape[0] = 0 - output_shape = reshape_shape(input_shape, shape, axis, num_axes) - else: - output_shape = reshape_shape(input_shape, shape, axis, num_axes) - output = fluid.layers.reshape(input, shape=output_shape, name=name) - - return output - - -register(kind='Reshape', shape=reshape_shape, layer=reshape_layer) - diff --git a/caffe2fluid/kaffe/custom_layers/roipooling.py b/caffe2fluid/kaffe/custom_layers/roipooling.py deleted file mode 100644 index ccbf24a..0000000 --- a/caffe2fluid/kaffe/custom_layers/roipooling.py +++ /dev/null @@ -1,53 +0,0 @@ -""" a custom layer for 'ROIPooling', maybe we should implement this in standard way. - more info can be found here: http://caffe.berkeleyvision.org/tutorial/layers/ROIPooling.html -""" -from .register import register - - -def roipooling_shape(input_shapes, pooled_h, pooled_w, spatial_scale): - """ calculate the output shape of this layer using input shape - - Args: - @input_shape (list of num): a list of number which represents the input shape - @out_max_val (bool): parameter from caffe's ROIPooling layer - @top_k (int): parameter from caffe's ROIPooling layer - @axis (int): parameter from caffe's ROIPooling layer - - Returns: - @output_shape (list of num): a list of numbers represent the output shape - """ - assert len(input_shapes) == 2, "not valid input shape for roipooling layer" - base_fea_shape = input_shapes[0] - rois_shape = input_shapes[1] - output_shape = base_fea_shape - output_shape[0] = rois_shape[0] - output_shape[2] = pooled_h - output_shape[3] = pooled_w - return output_shape - - -def roipooling_layer(inputs, name, pooled_h, pooled_w, spatial_scale): - """ build a layer of type 'ROIPooling' using fluid - - Args: - @input (variable): input fluid variable for this layer - @name (str): name for this layer - @out_max_val (bool): parameter from caffe's ROIPooling layer - @top_k (int): parameter from caffe's ROIPooling layer - @axis (int): parameter from caffe's ROIPooling layer - - Returns: - output (variable): output variable for this layer - """ - - import paddle.fluid as fluid - assert len(inputs) == 2, "not valid input shape for roipooling layer" - base_fea = inputs[0] - rois = inputs[1][:, 1:5] - rois_fea = fluid.layers.roi_pool(base_fea, rois, pooled_h, pooled_w, - spatial_scale) - - return rois_fea - - -register(kind='ROIPooling', shape=roipooling_shape, layer=roipooling_layer) diff --git a/caffe2fluid/kaffe/custom_layers/select.py b/caffe2fluid/kaffe/custom_layers/select.py deleted file mode 100644 index 708ac64..0000000 --- a/caffe2fluid/kaffe/custom_layers/select.py +++ /dev/null @@ -1,67 +0,0 @@ -""" a custom layer for 'select' which is used to replace standard 'Slice' layer - for converting layer with multiple different output tensors -""" -from .register import register - - -def select_shape(input_shape, slice_point, axis=1): - """ calculate the output shape of this layer using input shape - - Args: - @input_shape (list of num): a list of number which represents the input shape - @slice_point (list): parameter from caffe's Slice layer - @axis (int): parameter from caffe's Slice layer - - Returns: - @output_shape (list of num): a list of numbers represent the output shape - """ - - input_shape = list(input_shape) - start = slice_point[0] - if len(slice_point) == 2: - end = slice_point[1] - else: - end = input_shape[axis] - - assert end > start, "invalid slice_point with [start:%d, end:%d]"\ - % (start, end) - output_shape = input_shape - output_shape[axis] = end - start - return output_shape - - -def select_layer(input, name, slice_point, axis=1): - """ build a layer of type 'Slice' using fluid - - Args: - @input (variable): input fluid variable for this layer - @name (str): name for this layer - @slice_point (list): parameter from caffe's Slice layer - @axis (int): parameter from caffe's Slice layer - - Returns: - output (variable): output variable for this layer - """ - import paddle.fluid as fluid - input_shape = list(input.shape) - - start = slice_point[0] - if len(slice_point) == 2: - end = slice_point[1] - else: - end = input_shape[axis] - - sections = [] - if start > 0: - sections.append(start) - - pos = len(sections) - sections.append(end - start) - if end != input_shape[axis]: - sections.append(input_shape[axis] - end) - - outputs = fluid.layers.split(input, sections, dim=axis, name=name) - return outputs[pos] - - -register(kind='Select', shape=select_shape, layer=select_layer) diff --git a/caffe2fluid/kaffe/errors.py b/caffe2fluid/kaffe/errors.py deleted file mode 100644 index 75eced5..0000000 --- a/caffe2fluid/kaffe/errors.py +++ /dev/null @@ -1,34 +0,0 @@ -import sys - -#debug level, can be 'warn', 'verbose' -log_level = 'warn' - - -class KaffeError(Exception): - pass - - -def print_stderr(msg): - sys.stderr.write('%s\n' % msg) - - -def debug(msg): - if log_level == 'verbose': - print_stderr('[DEBUG]' + msg) - - -def notice(msg): - print_stderr('[NOTICE]' + msg) - - -def warn(msg): - print_stderr('[WARNING]' + msg) - - -def set_loglevel(level): - global log_level - - if 'warn' != level and 'verbose' != level: - raise Exception('not supported log level[%s]' % (level)) - - log_level = level diff --git a/caffe2fluid/kaffe/graph.py b/caffe2fluid/kaffe/graph.py deleted file mode 100644 index b325b93..0000000 --- a/caffe2fluid/kaffe/graph.py +++ /dev/null @@ -1,371 +0,0 @@ -from google.protobuf import text_format - -from .caffe import get_caffe_resolver -from .errors import KaffeError, print_stderr -from .layers import LayerAdapter, LayerType, NodeKind, NodeDispatch -from .shapes import make_tensor - - -class Node(object): - def __init__(self, name, kind, layer=None): - self.name = name - self.kind = kind - self.layer = LayerAdapter(layer, kind) if layer else None - self.parents = [] - self.children = [] - self.data = None #parameters of this node - self.output_shape = None #output shape of this node - self.metadata = {} - - def add_parent(self, parent_node): - assert parent_node not in self.parents - self.parents.append(parent_node) - if self not in parent_node.children: - parent_node.children.append(self) - - def add_child(self, child_node): - assert child_node not in self.children - self.children.append(child_node) - if self not in child_node.parents: - child_node.parents.append(self) - - def get_only_parent(self): - if len(self.parents) != 1: - raise KaffeError('Node (%s) expected to have 1 parent. Found %s.' % - (self, len(self.parents))) - return self.parents[0] - - @property - def parameters(self): - """ get parameters stored in a protobuf object - """ - if self.layer is not None: - return self.layer.parameters - return None - - @property - def params(self): - """ get parameters stored in a dict - """ - from .protobuf_to_dict import protobuf_to_dict - - p = self.parameters - if p is not None: - return protobuf_to_dict(p) - else: - return None - - def __str__(self): - return '[%s] %s' % (self.kind, self.name) - - def __repr__(self): - return '%s (0x%x)' % (self.name, id(self)) - - -class Graph(object): - def __init__(self, nodes=None, name=None, trace={}): - self.nodes = nodes or [] - self.node_lut = {node.name: node for node in self.nodes} - self.output_trace = trace - if name is None or name == '': - self.name = 'MyNet' - else: - self.name = name - - def add_node(self, node): - self.nodes.append(node) - self.node_lut[node.name] = node - - def get_node(self, name): - try: - return self.node_lut[name] - except KeyError: - raise KaffeError('Layer not found: %s' % name) - - def add_name_trace(self, trace, which='caffe'): - self.output_trace[which] = trace - - def get_name_trace(self, which=None): - if which is not None: - return self.output_trace[which] - else: - return self.output_trace - - def get_input_nodes(self): - return [node for node in self.nodes if len(node.parents) == 0] - - def get_output_nodes(self): - return [node for node in self.nodes if len(node.children) == 0] - - def topologically_sorted(self): - sorted_nodes = [] - unsorted_nodes = list(self.nodes) - temp_marked = set() - perm_marked = set() - - def visit(node): - if node in temp_marked: - raise KaffeError('Graph is not a DAG.') - if node in perm_marked: - return - temp_marked.add(node) - for child in node.children: - visit(child) - perm_marked.add(node) - temp_marked.remove(node) - sorted_nodes.insert(0, node) - - while len(unsorted_nodes): - visit(unsorted_nodes.pop()) - return sorted_nodes - - def compute_output_shapes(self): - sorted_nodes = self.topologically_sorted() - for node in sorted_nodes: - node.output_shape = make_tensor( - *NodeKind.compute_output_shape(node)) - - def replaced(self, new_nodes): - return Graph(nodes=new_nodes, name=self.name, trace=self.output_trace) - - def transformed(self, transformers): - graph = self - for transformer in transformers: - graph = transformer(graph) - if graph is None: - raise KaffeError('Transformer failed: {}'.format(transformer)) - assert isinstance(graph, Graph) - - return graph - - def __contains__(self, key): - return key in self.node_lut - - def __str__(self): - hdr = '{:<20} {:<30} {:>20} {:>20}'.format('Type', 'Name', 'Param', - 'Output') - s = [hdr, '-' * 94] - for node in self.topologically_sorted(): - # If the node has learned parameters, display the first one's shape. - # In case of convolutions, this corresponds to the weights. - if node.data is None: - data_shape = '--' - out_shape = node.output_shape or '--' - s.append('{:<20} {:<30} {:>20} {:>20}'.format( - node.kind, node.name, data_shape, str(tuple(out_shape)))) - else: - for d in node.data: - #data_shape = node.data[0].shape if node.data else '--' - data_shape = d.shape - out_shape = node.output_shape or '--' - s.append('{:<20} {:<30} {:>20} {:>20}'.format( - node.kind, node.name, str(data_shape), str(tuple(out_shape)))) - return '\n'.join(s) - - -class GraphBuilder(object): - '''Constructs a model graph from a Caffe protocol buffer definition.''' - - def __init__(self, def_path, phase='test'): - ''' - def_path: Path to the model definition (.prototxt) - data_path: Path to the model data (.caffemodel) - phase: Either 'test' or 'train'. Used for filtering phase-specific nodes. - ''' - self.def_path = def_path - self.phase = phase - self.load() - - def load(self): - '''Load the layer definitions from the prototxt.''' - self.params = get_caffe_resolver().NetParameter() - with open(self.def_path, 'rb') as def_file: - text_format.Merge(def_file.read(), self.params) - - def filter_layers(self, layers): - '''Filter out layers based on the current phase.''' - phase_map = {0: 'train', 1: 'test'} - filtered_layer_names = set() - filtered_layers = [] - for layer in layers: - phase = self.phase - if len(layer.include): - phase = phase_map[layer.include[0].phase] - if len(layer.exclude): - phase = phase_map[1 - layer.include[0].phase] - exclude = (phase != self.phase) - # Dropout layers appear in a fair number of Caffe - # test-time networks. These are just ignored. We'll - # filter them out here. - if (not exclude) and (phase == 'test'): - exclude = (layer.type == LayerType.Dropout) - if not exclude: - filtered_layers.append(layer) - # Guard against dupes. - assert layer.name not in filtered_layer_names - filtered_layer_names.add(layer.name) - return filtered_layers - - def make_node(self, layer): - '''Create a graph node for the given layer.''' - kind = NodeKind.map_raw_kind(layer.type) - if kind is None: - raise KaffeError('Unknown layer type encountered: %s' % layer.type) - - # We want to use the layer's top names (the "output" names), rather than the - # name attribute, which is more of readability thing than a functional one. - # Other layers will refer to a node by its "top name". - return Node(layer.name, kind, layer=layer) - - def make_input_nodes(self): - ''' - Create data input nodes. - - This method is for old-style inputs, where the input specification - was not treated as a first-class layer in the prototext. - Newer models use the "Input layer" type. - ''' - nodes = [Node(name, NodeKind.Data) for name in self.params.input] - inputs_num = len(nodes) - if inputs_num > 0: - input_dims_num = len(self.params.input_dim) - if input_dims_num > 0 and input_dims_num != inputs_num * 4: - raise KaffeError('invalid input_dim[%d] param in prototxt' % - (input_dims_num)) - - input_dims = [[]] * inputs_num - for i in range(input_dims_num): - dim = self.params.input_dim[i] - which = int(i / 4) - input_dims[which].append(int(dim)) - - for i in range(inputs_num): - if len(self.params.input_shape) == inputs_num: - input_dim = map(int, self.params.input_shape[i].dim) - input_dims[i] = input_dim - - nodes[i].output_shape = tuple(input_dims[i]) - return nodes - - def build(self): - ''' - Builds the graph from the Caffe layer definitions. - ''' - # Get the layers - layers = self.params.layers or self.params.layer - # Filter out phase-excluded layers - layers = self.filter_layers(layers) - # Get any separately-specified input layers - nodes = self.make_input_nodes() - nodes += [self.make_node(layer) for layer in layers] - # Initialize the graph - graph = Graph(nodes=nodes, name=self.params.name) - # Connect the nodes - # - # A note on layers and outputs: - # In Caffe, each layer can produce multiple outputs ("tops") from a set of inputs - # ("bottoms"). The bottoms refer to other layers' tops. The top can rewrite a bottom - # (in case of in-place operations). Note that the layer's name is not used for establishing - # any connectivity. It's only used for data association. By convention, a layer with a - # single top will often use the same name (although this is not required). - # - # The current implementation only supports single-output nodes (note that a node can still - # have multiple children, since multiple child nodes can refer to the single top's name). - node_outputs = {} - output_trace = {} - for layer in layers: - node = graph.get_node(layer.name) - for input_name in layer.bottom: - assert input_name != layer.name - parent_node = node_outputs.get(input_name) - if (parent_node is None) or (parent_node == node): - parent_node = graph.get_node(input_name) - node.add_parent(parent_node) - - if len(layer.top) > 1: - raise KaffeError('Multiple top nodes are not supported.') - - for output_name in layer.top: - if output_name == layer.name: - # Output is named the same as the node. No further action required. - continue - # There are two possibilities here: - # - # Case 1: output_name refers to another node in the graph. - # This is an "in-place operation" that overwrites an existing node. - # This would create a cycle in the graph. We'll undo the in-placing - # by substituting this node wherever the overwritten node is referenced. - # - # Case 2: output_name violates the convention layer.name == output_name. - # Since we are working in the single-output regime, we will can rename it to - # match the layer name. - # - # For both cases, future references to this top re-routes to this node. - node_outputs[output_name] = node - if output_name in output_trace: - output_trace[output_name].append(node.name) - else: - output_trace[output_name] = [output_name, node.name] - - #build a mapping from real-name to changed-name(for caffe's INPLACE inference) - real2chg = {} - deleted = {} - for k, v in output_trace.items(): - real2chg[v[-1]] = k - for n in v: - if n in real2chg: - continue - if n not in deleted: - deleted[n] = '%s.%s' % (k, v[-1]) - - graph.add_name_trace({ - 'real2chg': real2chg, - 'deleted': deleted - }, 'caffe') - graph.compute_output_shapes() - return graph - - -class NodeMapper(NodeDispatch): - def __init__(self, graph): - self.graph = graph - - def map(self): - nodes = self.graph.topologically_sorted() - # Remove input nodes - we'll handle them separately. - input_nodes = self.graph.get_input_nodes() - nodes = [t for t in nodes if t not in input_nodes] - # Decompose DAG into chains. - chains = [] - for node in nodes: - attach_to_chain = None - if len(node.parents) == 1: - parent = node.get_only_parent() - for chain in chains: - if chain[-1] == parent: - # Node is part of an existing chain. - attach_to_chain = chain - break - if attach_to_chain is None: - # Start a new chain for this node. - attach_to_chain = [] - chains.append(attach_to_chain) - attach_to_chain.append(node) - # Map each chain. - mapped_chains = [] - for chain in chains: - mapped_chains.append(self.map_chain(chain)) - return self.commit(mapped_chains) - - def map_chain(self, chain): - return [self.map_node(node) for node in chain] - - def map_node(self, node): - map_func = self.get_handler(node.kind, 'map') - mapped_node = map_func(node) - assert mapped_node is not None - mapped_node.node = node - return mapped_node - - def commit(self, mapped_chains): - raise NotImplementedError('Must be implemented by subclass.') \ No newline at end of file diff --git a/caffe2fluid/kaffe/layers.py b/caffe2fluid/kaffe/layers.py deleted file mode 100644 index 2e32761..0000000 --- a/caffe2fluid/kaffe/layers.py +++ /dev/null @@ -1,250 +0,0 @@ -import re -import numbers -from collections import namedtuple -import sys -from . import custom_layers -from .shapes import * - -LAYER_DESCRIPTORS = { - - # Caffe Types - 'AbsVal': shape_identity, - 'Accuracy': shape_scalar, - 'ArgMax': shape_not_implemented, - 'BatchNorm': shape_identity, - 'BNLL': shape_not_implemented, - 'Concat': shape_concat, - 'ContrastiveLoss': shape_scalar, - 'Convolution': shape_convolution, - 'Deconvolution': shape_deconvolution, - 'Data': shape_data, - 'Dropout': shape_identity, - 'DummyData': shape_data, - 'Crop': shape_crop, - 'EuclideanLoss': shape_scalar, - 'Eltwise': shape_identity, - 'Exp': shape_identity, - 'Flatten': shape_not_implemented, - 'HDF5Data': shape_data, - 'HDF5Output': shape_identity, - 'HingeLoss': shape_scalar, - 'Im2col': shape_not_implemented, - 'ImageData': shape_data, - 'InfogainLoss': shape_scalar, - 'InnerProduct': shape_inner_product, - 'Input': shape_data, - 'LRN': shape_identity, - 'MemoryData': shape_mem_data, - 'MultinomialLogisticLoss': shape_scalar, - 'MVN': shape_not_implemented, - 'Pooling': shape_pool, - 'Power': shape_power, - 'ReLU': shape_identity, - 'PReLU': shape_identity, - 'Scale': shape_identity, - 'Sigmoid': shape_identity, - 'SigmoidCrossEntropyLoss': shape_scalar, - 'Silence': shape_not_implemented, - 'Softmax': shape_identity, - 'SoftmaxWithLoss': shape_scalar, - 'Split': shape_not_implemented, - 'Slice': shape_not_implemented, - 'TanH': shape_identity, - 'WindowData': shape_not_implemented, - 'Threshold': shape_identity, -} - -# layer types in 'V1LayerParameter' -# (v1layertype name, enum value, mapped to layer type) -v1_layertypes = [ - ('ABSVAL', 35), - ('ACCURACY', 1), - ('ARGMAX', 30), - ('BNLL', 2), - ('CONCAT', 3), - ('CONVOLUTION', 4), - ('DATA', 5), - ('DECONVOLUTION', 39), - ('DROPOUT', 6), - ('ELTWISE', 25), - ('EXP', 38), - ('FLATTEN', 8), - ('IM2COL', 11), - ('INNERPRODUCT', 14), - ('LRN', 15), - ('MEMORYDATA', 29), - ('MULTINOMIALLOGISTICLOSS', 16), - ('MVN', 34), - ('POOLING', 17), - ('POWER', 26), - ('RELU', 18), - ('SIGMOID', 19), - ('SIGMOIDCROSSENTROPYLOSS', 27), - ('SILENCE', 36), - ('SOFTMAX', 20), - ('SPLIT', 22), - ('SLICE', 33), - ('TANH', 23), - ('WINDOWDATA', 24), - ('THRESHOLD', 31), -] - -LAYER_TYPES = LAYER_DESCRIPTORS.keys() -LayerType = type('LayerType', (), {t: t for t in LAYER_TYPES}) - -#map the layer name in V1 to standard name -V1_LAYER_MAP = {'_not_init_': True} - - -def get_v1_layer_map(): - global V1_LAYER_MAP - if '_not_init_' not in V1_LAYER_MAP: - return V1_LAYER_MAP - else: - del V1_LAYER_MAP['_not_init_'] - - name2layer = {} - for n in LAYER_TYPES: - name2layer[n.upper()] = n - - for l in v1_layertypes: - n, v = l - if n in name2layer and v not in V1_LAYER_MAP: - V1_LAYER_MAP[v] = name2layer[n] - else: - raise KaffeError('not found v1 layer type %s' % n) - return V1_LAYER_MAP - - -class NodeKind(LayerType): - @staticmethod - def map_raw_kind(kind): - if custom_layers.has_layer(kind): - return kind - - if kind in LAYER_TYPES: - return kind - - v1_layers = get_v1_layer_map() - if kind in v1_layers: - return v1_layers[kind] - else: - return None - - @staticmethod - def compute_output_shape(node): - if custom_layers.has_layer(node.kind): - return custom_layers.compute_output_shape(node.kind, node) - - try: - val = LAYER_DESCRIPTORS[node.kind](node) - return val - except NotImplementedError: - raise KaffeError( - 'Output shape computation not implemented for type: %s' % - node.kind) - - -class NodeDispatchError(KaffeError): - pass - - -class NodeDispatch(object): - @staticmethod - def get_handler_name(node_kind): - if len(node_kind) <= 6: - # A catch-all for things like ReLU and tanh - return node_kind.lower() - # Convert from CamelCase to under_scored - name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', node_kind) - return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower() - - def get_handler(self, node_kind, prefix): - if custom_layers.has_layer(node_kind): - return getattr(self, 'map_custom') - - name = self.get_handler_name(node_kind) - name = '_'.join((prefix, name)) - try: - return getattr(self, name) - except AttributeError: - raise NodeDispatchError( - 'No handler found for node kind: %s (expected: %s)' % - (node_kind, name)) - - -class LayerAdapter(object): - def __init__(self, layer, kind): - self.layer = layer - self.kind = kind - - @property - def parameters(self): - name = NodeDispatch.get_handler_name(self.kind) - if self.kind.lower() == "normalize": - name = "norm" - elif self.kind.lower() == "deconvolution": - name = "convolution" - - name = '_'.join((name, 'param')) - try: - return getattr(self.layer, name) - except AttributeError: - print(dir(self.layer)) - raise NodeDispatchError( - 'Caffe parameters not found attr[%s] for layer kind[%s]' % - (name, self.kind)) - - @staticmethod - def get_kernel_value(scalar, repeated, idx, default=None): - if scalar: - return scalar - if repeated: - if isinstance(repeated, numbers.Number): - return repeated - if len(repeated) == 1: - # Same value applies to all spatial dimensions - return int(repeated[0]) - assert idx < len(repeated) - # Extract the value for the given spatial dimension - return repeated[idx] - if default is None: - raise ValueError('Unable to determine kernel parameter!') - return default - - @property - def kernel_parameters(self): - assert self.kind in (NodeKind.Convolution, NodeKind.Pooling,\ - NodeKind.Deconvolution) - - params = self.parameters - k_h = self.get_kernel_value(params.kernel_h, params.kernel_size, 0) - k_w = self.get_kernel_value(params.kernel_w, params.kernel_size, 1) - s_h = self.get_kernel_value( - params.stride_h, params.stride, 0, default=1) - s_w = self.get_kernel_value( - params.stride_w, params.stride, 1, default=1) - p_h = self.get_kernel_value(params.pad_h, params.pad, 0, default=0) - p_w = self.get_kernel_value(params.pad_w, params.pad, 1, default=0) - - dila_h = dila_w = 1 - if self.kind in (NodeKind.Convolution, NodeKind.Deconvolution): - dila_len = len(params.dilation) - if dila_len == 2: - dila_h = params.dilation[0] - dila_w = params.dilation[1] - elif dila_len == 1: - dila_h = dila_w = params.dilation[0] - else: - assert dila_len == 0, "invalid length[%s] of dilation in convolution" % ( - dila_len) - - return KernelParameters(k_h, k_w, s_h, s_w, p_h, p_w, dila_h, dila_w) - - -KernelParameters = namedtuple( - 'KernelParameters', - [ - 'kernel_h', 'kernel_w', 'stride_h', 'stride_w', 'pad_h', 'pad_w', - 'dila_h', 'dila_w' - ], ) diff --git a/caffe2fluid/kaffe/net_template.py b/caffe2fluid/kaffe/net_template.py deleted file mode 100644 index f9387c9..0000000 --- a/caffe2fluid/kaffe/net_template.py +++ /dev/null @@ -1,160 +0,0 @@ -""" this module is used as a template for generating sub class of Network -""" - - -class MyNet(object): - ### automatically generated by caffe2fluid ### - inputs_info = "INPUTS_INFO" - custom_layers_path = "_CAFFE2FLUID_CUSTOM_LAYERS_" - - def custom_layer_factory(self): - import os - - pk_paths = [] - default = os.path.dirname(os.path.abspath(__file__)) - location = os.environ.get('CAFFE2FLUID_CUSTOM_LAYERS', default) - pk_name = 'custom_layers' - pk_dir = os.path.join(location, pk_name) - pk_paths.append((location, pk_dir)) - - location = MyNet.custom_layers_path - pk_dir = os.path.join(MyNet.custom_layers_path, pk_name) - pk_paths.append((location, pk_dir)) - - for loc, pk_dir in pk_paths: - if os.path.exists(pk_dir): - if loc not in sys.path: - sys.path.insert(0, loc) - break - - try: - from custom_layers import make_custom_layer - return make_custom_layer - except Exception as e: - print('maybe you should set $CAFFE2FLUID_CUSTOM_LAYERS first') - raise e - - @classmethod - def input_shapes(cls): - return cls.inputs_info - - @classmethod - def convert(cls, npy_model, fluid_path, outputs=None): - fluid = import_fluid() - shapes = cls.input_shapes() - input_name = list(shapes.keys())[0] - feed_data = {} - for name, shape in shapes.items(): - data_layer = fluid.layers.data( - name=name, shape=shape, dtype="float32") - feed_data[name] = data_layer - - net = cls(feed_data) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - net.load(data_path=npy_model, exe=exe, place=place) - output_vars = [] - - model_filename = 'model' - params_filename = 'params' - if outputs is None: - output_vars.append(net.get_output()) - else: - if outputs[0] == 'dump_all': - model_filename = None - params_filename = None - output_vars.append(net.get_output()) - else: - if type(outputs) is list: - for n in outputs: - assert n in net.layers, 'not found layer with this name[%s]' % ( - n) - output_vars.append(net.layers[n]) - - fluid.io.save_inference_model( - fluid_path, [input_name], - output_vars, - exe, - main_program=None, - model_filename=model_filename, - params_filename=params_filename) - return 0 - - -def main(): - """ a tool used to convert caffe model to fluid - """ - - import sys - import os - import argparse - filename = os.path.splitext(os.path.basename(sys.argv[0]))[0] - parser = argparse.ArgumentParser() - parser.add_argument('--npy_path', help='Model\'s parameters (.npy) path') - parser.add_argument('--model-param-path', help='The path of model and param which are convertd by .npy', - default='./fluid') - parser.add_argument( - '--need-layers-name', help='The layers need to save (split by ,)') - args = parser.parse_args() - npy_weight = args.npy_path - fluid_model = args.model_param_path - outputs = None - if len(sys.argv) >= 6: - outputs = args.need_layers_name.split(',') - - ret = MyNet.convert(npy_weight, fluid_model, outputs) - if ret == 0: - outputs = 'last output layer' if outputs is None else outputs - print('succeed to convert to fluid format with output layers[%s]' - ' in directory[%s]' % (outputs, fluid_model)) - else: - print('failed to convert model to fluid format') - - return ret - - -def generate_net_code(net_name, inputs_info): - """ generate framework of a custom net code which represent a subclass of Network - - Args: - @net_name (str): class name for this net - @inputs_info (str): a str which represents a dict, eg: '{"data": [3, 32, 32]}' - Returns: - net_codes (str): codes for this subclass - """ - import os - import inspect - - net_codes = str(inspect.getsource(MyNet)) - net_codes = net_codes.replace('MyNet(object)', '%s(Network)' % net_name) - net_codes = net_codes.replace('MyNet', net_name) - net_codes = net_codes.replace('"INPUTS_INFO"', inputs_info) - - custom_layer_dir = os.path.dirname(os.path.abspath(__file__)) - net_codes = net_codes.replace('_CAFFE2FLUID_CUSTOM_LAYERS_', - custom_layer_dir) - return net_codes - - -def generate_main_code(net_name): - """ generate a piece of code for 'main' function - - Args: - @net_name (str): class name for this net - - Returns: - main_codes (str): codes for this main function - """ - import inspect - - main_codes = str(inspect.getsource(main)) - main_codes = main_codes.replace('MyNet', net_name) - return main_codes - - -if __name__ == "__main__": - """ just for testing - """ - print(generate_net_code('Attribute', "{'data': [3, 277, 277]}")) - print(generate_main_code('Attribute')) diff --git a/caffe2fluid/kaffe/paddle/__init__.py b/caffe2fluid/kaffe/paddle/__init__.py deleted file mode 100644 index 685b653..0000000 --- a/caffe2fluid/kaffe/paddle/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .transformer import Transformer -from .network import Network diff --git a/caffe2fluid/kaffe/paddle/network.py b/caffe2fluid/kaffe/paddle/network.py deleted file mode 100644 index 88eba56..0000000 --- a/caffe2fluid/kaffe/paddle/network.py +++ /dev/null @@ -1,587 +0,0 @@ -import sys -import os -import math -import numpy as np -from past.builtins import basestring - - - -def import_fluid(): - import paddle.fluid as fluid - return fluid - - -def layer(op): - '''Decorator for composable network layers.''' - - def layer_decorated(self, *args, **kwargs): - # Automatically set a name if not provided. - name = kwargs.setdefault('name', self.get_unique_name(op.__name__)) - # Figure out the layer inputs. - if len(self.terminals) == 0: - raise RuntimeError('No input variables found for layer %s.' % name) - elif len(self.terminals) == 1: - layer_input = self.terminals[0] - else: - layer_input = list(self.terminals) - - self.layer_reverse_trace[name] = layer_input - # Perform the operation and get the output. - layer_output = op(self, layer_input, *args, **kwargs) - # Add to layer LUT. - self.layers[name] = layer_output - self.var2name[layer_output.name] = (name, layer_output) - - # This output is now the input for the next layer. - self.feed(layer_output) - # Return self for chained calls. - return self - - return layer_decorated - - -class Network(object): - def __init__(self, inputs, trainable=True): - # The input nodes for this network - self.inputs = inputs - # The current list of terminal nodes - self.terminals = [] - # Mapping from layer names to layers - self.layers = dict(inputs) - # If true, the resulting variables are set as trainable - self.trainable = trainable - # Switch variable for dropout - self.paddle_env = None - self.output_names = [] - self.name_trace = None - - self.layer_reverse_trace = {} - self.var2name = {} - self.setup() - - def setup(self): - '''Construct the network. ''' - raise NotImplementedError('Must be implemented by the subclass.') - - def locate_ancestor(self, v, which=[0], ancestor_level=1): - """ find a ancestor for a node 'v' which is a fluid variable - """ - ancestor = None - which = which * ancestor_level - name = self.var2name[v.name][0] - - for i in range(ancestor_level): - v = self.layer_reverse_trace[name] - if type(v) is list: - ancestor = self.var2name[v[which[i]].name] - else: - ancestor = self.var2name[v.name] - name = ancestor[0] - return ancestor - - def load(self, data_path, exe=None, place=None, ignore_missing=False): - '''Load network weights. - data_path: The path to the numpy-serialized network weights - ignore_missing: If true, serialized weights for missing layers are ignored. - ''' - fluid = import_fluid() - #load fluid mode directly - if os.path.isdir(data_path): - assert (exe is not None), \ - 'must provide a executor to load fluid model' - fluid.io.load_persistables(executor=exe, dirname=data_path) - return True - - #load model from a npy file - if exe is None or place is None: - if self.paddle_env is None: - place = fluid.CPUPlace() - exe = fluid.Executor(place) - self.paddle_env = {'place': place, 'exe': exe} - exe = exe.run(fluid.default_startup_program()) - else: - place = self.paddle_env['place'] - exe = self.paddle_env['exe'] - - data_dict = np.load(data_path, allow_pickle=True).item() - for op_name in data_dict: - if op_name == 'caffe2fluid_name_trace': - self.name_trace = data_dict[op_name] - continue - - layer = self.layers[op_name] - for param_name, data in data_dict[op_name].items(): - try: - name = '%s_%s' % (op_name, param_name) - v = fluid.global_scope().find_var(name) - w = v.get_tensor() - w.set(data.reshape(w.shape()), place) - except ValueError: - if not ignore_missing: - raise - return True - - def feed(self, *args): - '''Set the input(s) for the next operation by replacing the terminal nodes. - The arguments can be either layer names or the actual layers. - ''' - assert len(args) != 0 - self.terminals = [] - for fed_layer in args: - if isinstance(fed_layer, basestring): - try: - fed_layer = self.layers[fed_layer] - except KeyError: - raise KeyError('Unknown layer name fed: %s' % fed_layer) - self.terminals.append(fed_layer) - return self - - def get_output(self): - '''Returns the current network output.''' - return self.terminals[-1] - - def get_unique_name(self, prefix): - '''Returns an index-suffixed unique name for the given prefix. - This is used for auto-generating layer names based on the type-prefix. - ''' - ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1 - return '%s_%d' % (prefix, ident) - - def get_unique_output_name(self, prefix, layertype): - '''Returns an index-suffixed unique name for the given prefix. - This is used for auto-generating layer names based on the type-prefix. - ''' - ident = sum(t.startswith(prefix) for t in self.output_names) + 1 - unique_name = '%s.%s.output.%d' % (prefix, layertype, ident) - self.output_names.append(unique_name) - return unique_name - - @layer - def conv(self, - input, - k_h, - k_w, - c_o, - s_h, - s_w, - name, - relu=True, - relu_negative_slope=0.0, - padding=None, - dilation=1, - group=1, - biased=True): - if padding is None: - padding = [0, 0] - - # Get the number of channels in the input - c_i, h_i, w_i = input.shape[1:] - - # Verify that the grouping parameter is valid - assert c_i % group == 0 - assert c_o % group == 0 - - fluid = import_fluid() - prefix = name + '_' - leaky_relu = False - act = 'relu' - if relu is False: - act = None - elif relu_negative_slope != 0.0: - leaky_relu = True - act = None - - output = fluid.layers.conv2d( - name=self.get_unique_output_name(name, 'conv2d'), - input=input, - filter_size=[k_h, k_w], - num_filters=c_o, - stride=[s_h, s_w], - padding=padding, - dilation=dilation, - groups=group, - param_attr=fluid.ParamAttr(name=prefix + "weights"), - bias_attr=fluid.ParamAttr(name=prefix + "biases"), - act=act) - - if leaky_relu: - output = fluid.layers.leaky_relu(output, alpha=relu_negative_slope) - - return output - - @layer - def deconv(self, - input, - k_h, - k_w, - c_o, - s_h, - s_w, - name, - relu=True, - relu_negative_slope=0.0, - padding=None, - dilation=1, - biased=True): - if padding is None: - padding = [0, 0] - - # Get the number of channels in the input - c_i, h_i, w_i = input.shape[1:] - - fluid = import_fluid() - prefix = name + '_' - leaky_relu = False - act = 'relu' - if relu is False: - act = None - elif relu_negative_slope != 0.0: - leaky_relu = True - act = None - - p_h = padding[0] - p_w = padding[1] - h_o = (h_i - 1) * s_h - 2 * p_h + dilation * (k_h - 1) + 1 - w_o = (w_i - 1) * s_w - 2 * p_w + dilation * (k_w - 1) + 1 - output = fluid.layers.conv2d_transpose( - name=self.get_unique_output_name(name, 'conv2d_transpose'), - input=input, - num_filters=c_o, - output_size=[h_o, w_o], - filter_size=[k_h, k_w], - padding=padding, - stride=[s_h, s_w], - dilation=dilation, - param_attr=fluid.ParamAttr(name=prefix + "weights"), - bias_attr=fluid.ParamAttr(name=prefix + "biases"), - act=act) - - if leaky_relu: - output = fluid.layers.leaky_relu(output, alpha=relu_negative_slope) - - return output - - @layer - def relu(self, input, name): - fluid = import_fluid() - output = fluid.layers.relu(input) - return output - - @layer - def prelu(self, input, channel_shared, name): - fluid = import_fluid() - if channel_shared: - mode = 'all' - else: - mode = 'channel' - - prefix = name + '_' - output = fluid.layers.prelu( - input, - mode=mode, - param_attr=fluid.ParamAttr(name=prefix + 'negslope')) - return output - - def pool(self, - pool_type, - input, - k_h, - k_w, - s_h, - s_w, - ceil_mode, - padding, - name, - exclusive=True): - # Get the number of channels in the input - in_hw = input.shape[2:] - k_hw = [k_h, k_w] - s_hw = [s_h, s_w] - - fluid = import_fluid() - output = fluid.layers.pool2d( - name=name, - input=input, - pool_size=k_hw, - pool_stride=s_hw, - pool_padding=padding, - ceil_mode=ceil_mode, - pool_type=pool_type, - exclusive=exclusive) - return output - - @layer - def max_pool(self, - input, - k_h, - k_w, - s_h, - s_w, - ceil_mode, - padding=[0, 0], - name=None): - return self.pool( - 'max', - input, - k_h, - k_w, - s_h, - s_w, - ceil_mode, - padding, - name=self.get_unique_output_name(name, 'max_pool')) - - @layer - def avg_pool(self, - input, - k_h, - k_w, - s_h, - s_w, - ceil_mode, - padding=[0, 0], - name=None): - return self.pool( - 'avg', - input, - k_h, - k_w, - s_h, - s_w, - ceil_mode, - padding, - name=self.get_unique_output_name(name, 'avg_pool'), - exclusive=False) - - @layer - def sigmoid(self, input, name): - fluid = import_fluid() - return fluid.layers.sigmoid( - input, name=self.get_unique_output_name(name, 'sigmoid')) - - @layer - def tanh(self, input, name): - fluid = import_fluid() - return fluid.layers.tanh( - input, name=self.get_unique_output_name(name, 'tanh')) - - @layer - def lrn(self, input, radius, alpha, beta, name, bias=1.0): - fluid = import_fluid() - output = fluid.layers.lrn(input=input, - n=radius, - k=bias, - alpha=alpha, - beta=beta, - name=self.get_unique_output_name(name, 'lrn')) - return output - - @layer - def concat(self, inputs, axis, name): - fluid = import_fluid() - output = fluid.layers.concat( - input=inputs, - axis=axis, - name=self.get_unique_output_name(name, 'concat')) - return output - - @layer - def add(self, inputs, name): - fluid = import_fluid() - output = inputs[0] - for i in inputs[1:]: - output = fluid.layers.elementwise_add( - x=output, y=i, name=self.get_unique_output_name(name, 'add')) - return output - - @layer - def max(self, inputs, name): - fluid = import_fluid() - output = inputs[0] - for i in inputs[1:]: - output = fluid.layers.elementwise_max( - x=output, y=i, name=self.get_unique_output_name(name, 'max')) - return output - - @layer - def multiply(self, inputs, name): - fluid = import_fluid() - output = inputs[0] - for i in inputs[1:]: - output = fluid.layers.elementwise_mul( - x=output, y=i, name=self.get_unique_output_name(name, 'mul')) - return output - - @layer - def fc(self, input, num_out, name, relu=True, act=None): - fluid = import_fluid() - - if act is None: - act = 'relu' if relu is True else None - - prefix = name + '_' - output = fluid.layers.fc( - name=self.get_unique_output_name(name, 'fc'), - input=input, - size=num_out, - act=act, - param_attr=fluid.ParamAttr(name=prefix + 'weights'), - bias_attr=fluid.ParamAttr(name=prefix + 'biases')) - return output - - @layer - def softmax(self, input, axis=2, name=None): - fluid = import_fluid() - shape = input.shape - dims = len(shape) - axis = axis + dims if axis < 0 else axis - - need_transpose = False - if axis + 1 != dims: - need_transpose = True - - if need_transpose: - in_order = list(range(dims)) - in_order.remove(axis) - in_order.append(axis) - input = fluid.layers.transpose( - input, - perm=in_order, - name=self.get_unique_output_name(name, 'transpose')) - - output = fluid.layers.softmax( - input, name=self.get_unique_output_name(name, 'softmax')) - - if need_transpose: - out_order = [0, ] * dims - for id, v in enumerate(in_order): - out_order[v] = id - output = fluid.layers.transpose( - output, - perm=out_order, - name=self.get_unique_output_name(name, 'transpose')) - return output - - @layer - def batch_normalization(self, - input, - name, - scale_offset=True, - eps=1e-5, - relu=False, - relu_negative_slope=0.0): - # NOTE: Currently, only inference is supported - fluid = import_fluid() - prefix = name + '_' - param_attr = None if scale_offset is False else fluid.ParamAttr( - name=prefix + 'scale') - bias_attr = None if scale_offset is False else fluid.ParamAttr( - name=prefix + 'offset') - mean_name = prefix + 'mean' - variance_name = prefix + 'variance' - - leaky_relu = False - act = 'relu' - if relu is False: - act = None - elif relu_negative_slope != 0.0: - leaky_relu = True - act = None - - output = fluid.layers.batch_norm( - name=self.get_unique_output_name(name, 'batch_norm'), - input=input, - is_test=True, - param_attr=param_attr, - bias_attr=bias_attr, - moving_mean_name=mean_name, - moving_variance_name=variance_name, - epsilon=eps, - act=act) - - if leaky_relu: - output = fluid.layers.leaky_relu(output, alpha=relu_negative_slope) - - return output - - @layer - def dropout(self, input, drop_prob, name, is_test=True): - fluid = import_fluid() - if is_test: - output = input - else: - output = fluid.layers.dropout( - input, - dropout_prob=drop_prob, - is_test=is_test, - name=self.get_unique_output_name(name, 'dropout')) - return output - - @layer - def scale(self, input, axis=1, num_axes=1, name=None): - fluid = import_fluid() - - assert num_axes == 1, "layer scale not support this num_axes[%d] now" % ( - num_axes) - - prefix = name + '_' - if isinstance(input, list) and len(input) == 2: - # for two tensor, here resets axis to 1. Maybe there is a bug for unkown case. - axis = 1 - bias_shape = input[0].shape[axis:axis + num_axes] - scale_param = input[1] - input = input[0] - else: - bias_shape = input.shape[axis:axis + num_axes] - param_attr = fluid.ParamAttr(name=prefix + 'scale') - scale_param = fluid.layers.create_parameter( - shape=bias_shape, - dtype=input.dtype, - name=name, - attr=param_attr, - is_bias=True, - default_initializer=fluid.initializer.Constant(value=1.0)) - - output = fluid.layers.elementwise_mul( - input, - scale_param, - axis=axis, - name=self.get_unique_output_name(name, 'scale_mul')) - - scale_shape = bias_shape - offset_attr = fluid.ParamAttr(name=prefix + 'offset') - offset_param = fluid.layers.create_parameter( - shape=scale_shape, - dtype=input.dtype, - name=name, - attr=offset_attr, - is_bias=True, - default_initializer=fluid.initializer.Constant(value=0.0)) - - output = fluid.layers.elementwise_add( - output, - offset_param, - axis=axis, - name=self.get_unique_output_name(name, 'scale_add')) - return output - - def custom_layer_factory(self): - """ get a custom layer maker provided by subclass - """ - raise NotImplementedError( - '[custom_layer_factory] must be implemented by the subclass.') - - @layer - def custom_layer(self, inputs, kind, name, *args, **kwargs): - """ make custom layer - """ - #FIX ME: - # there is a trick for different API between caffe and paddle - if kind == "DetectionOutput": - conf_var = inputs[1] - real_conf_var = self.locate_ancestor(conf_var, ancestor_level=2) - inputs[1] = real_conf_var[1] - - name = self.get_unique_output_name(name, kind) - layer_factory = self.custom_layer_factory() - return layer_factory(kind, inputs, name, *args, **kwargs) diff --git a/caffe2fluid/kaffe/paddle/transformer.py b/caffe2fluid/kaffe/paddle/transformer.py deleted file mode 100644 index 77ef7aa..0000000 --- a/caffe2fluid/kaffe/paddle/transformer.py +++ /dev/null @@ -1,398 +0,0 @@ -import numpy as np -from past.builtins import basestring -from ..errors import KaffeError, print_stderr -from ..graph import GraphBuilder, NodeMapper -from ..layers import NodeKind -from ..transformers import (DataInjector, DataReshaper, NodeRenamer, - SubNodeFuser, ReLUFuser, BatchNormScaleBiasFuser, - BatchNormPreprocessor, ParameterNamer, CropFuser) -from . import network - - -class PaddleNode(object): - '''An intermediate representation for Paddle operations.''' - - def __init__(self, op, *args, **kwargs): - # A string corresponding to the Paddle operation - self.op = op - # Positional arguments for the operation - self.args = args - # Keyword arguments for the operation - self.kwargs = list(kwargs.items()) - # The source Caffe node - self.node = None - - def format(self, arg): - '''Returns a string representation for the given value.''' - return "'%s'" % arg if isinstance(arg, basestring) else str(arg) - - def pair(self, key, value): - '''Returns key=formatted(value).''' - return '%s=%s' % (key, self.format(value)) - - def emit(self): - '''Emits the Python source for this node.''' - # Format positional arguments - args = map(self.format, self.args) - args = list(args) - - # Format any keyword arguments - if self.kwargs: - args += [self.pair(k, v) for k, v in self.kwargs] - # Set the node name - args.append(self.pair('name', self.node.name)) - args = ', '.join(args) - return '%s(%s)' % (self.op, args) - - -class MaybeActivated(object): - def __init__(self, node, default=True): - self.inject_kwargs = {} - if node.metadata.get('relu', False) != default: - self.inject_kwargs['relu'] = not default - - default_slope = 0.0 - slope = node.metadata.get('relu_negative_slope', default_slope) - if slope != default_slope: - self.inject_kwargs['relu_negative_slope'] = slope - - def __call__(self, *args, **kwargs): - kwargs.update(self.inject_kwargs) - return PaddleNode(*args, **kwargs) - - -class PaddleMapper(NodeMapper): - def get_kernel_params(self, node): - kernel_params = node.layer.kernel_parameters - input_shape = node.get_only_parent().output_shape - padding = [kernel_params.pad_h, kernel_params.pad_w] - if padding[0] == 0 and padding[1] == 0: - padding = {} - else: - padding = {'padding': padding} - return (kernel_params, padding) - - def map_convolution(self, node): - (kernel_params, kwargs) = self.get_kernel_params(node) - h = kernel_params.kernel_h - w = kernel_params.kernel_w - c_o = node.output_shape[1] - c_i = node.parents[0].output_shape[1] - group = node.parameters.group - if group != 1: - kwargs['group'] = group - if not node.parameters.bias_term: - kwargs['biased'] = False - - if kernel_params.dila_h != 1 or kernel_params.dila_w != 1: - kwargs['dilation'] = (kernel_params.dila_h, kernel_params.dila_w) - - assert kernel_params.kernel_h == h - assert kernel_params.kernel_w == w - return MaybeActivated(node)( - 'conv', kernel_params.kernel_h, kernel_params.kernel_w, c_o, - kernel_params.stride_h, kernel_params.stride_w, **kwargs) - - def map_deconvolution(self, node): - (kernel_params, kwargs) = self.get_kernel_params(node) - h = kernel_params.kernel_h - w = kernel_params.kernel_w - c_o = node.output_shape[1] - c_i = node.parents[0].output_shape[1] - if not node.parameters.bias_term: - kwargs['biased'] = False - - if kernel_params.dila_h != 1 or kernel_params.dila_w != 1: - kwargs['dilation'] = (kernel_params.dila_h, kernel_params.dila_w) - - assert kernel_params.kernel_h == h - assert kernel_params.kernel_w == w - return MaybeActivated(node)( - 'deconv', kernel_params.kernel_h, kernel_params.kernel_w, c_o, - kernel_params.stride_h, kernel_params.stride_w, **kwargs) - - def map_relu(self, node): - return PaddleNode('relu') - - def map_prelu(self, node): - channel_shared = getattr(node.parameters, 'channel_shared', False) - return PaddleNode('prelu', channel_shared) - - def map_tanh(self, node): - return PaddleNode('tanh') - - def map_pooling(self, node): - pool_type = node.parameters.pool - if pool_type == 0: - pool_op = 'max_pool' - elif pool_type == 1: - pool_op = 'avg_pool' - else: - # Stochastic pooling, for instance. - raise KaffeError('Unsupported pooling type.') - - ceil_mode = getattr(node.layer.parameters, 'ceil_mode', True) - global_pool = getattr(node.layer.parameters, 'global_pooling', False) - if global_pool: - input_shape = node.get_only_parent().output_shape - return PaddleNode(pool_op, input_shape.height, input_shape.width, 1, - 1, ceil_mode) - else: - (kernel_params, padding) = self.get_kernel_params(node) - return PaddleNode(pool_op, kernel_params.kernel_h, - kernel_params.kernel_w, kernel_params.stride_h, - kernel_params.stride_w, ceil_mode, **padding) - - def map_sigmoid(self, node): - return PaddleNode('sigmoid') - - def map_custom(self, node): - from .. import custom_layers - return custom_layers.make_node(PaddleNode, node.kind, node) - - def map_inner_product(self, node): - #TODO: Axis - assert node.parameters.axis == 1 - #TODO: Unbiased - assert node.parameters.bias_term == True - return MaybeActivated(node)('fc', node.parameters.num_output) - - def map_softmax(self, node): - return PaddleNode('softmax', node.parameters.axis) - - def map_lrn(self, node): - params = node.parameters - # The window size must be an odd value. For a window - # size of (2*n+1), Paddle defines depth_radius = n. - assert params.local_size % 2 == 1 - # Caffe scales by (alpha/(2*n+1)), whereas Paddle - # just scales by alpha (as does Krizhevsky's paper). - # We'll account for that here. - alpha = params.alpha / float(params.local_size) - return PaddleNode('lrn', params.local_size, alpha, params.beta) - - def map_concat(self, node): - return PaddleNode('concat', node.parameters.axis) - - def map_dropout(self, node): - return PaddleNode('dropout', node.parameters.dropout_ratio) - - def map_batch_norm(self, node): - scale_offset = len(node.data) == 4 - - #this default value comes from caffe's param in batch_norm - default_eps = 1e-5 - kwargs = {'scale_offset': scale_offset} - if node.parameters.eps != default_eps: - kwargs['eps'] = node.parameters.eps - - return MaybeActivated( - node, default=False)('batch_normalization', **kwargs) - - def map_eltwise(self, node): - operations = {0: 'multiply', 1: 'add', 2: 'max'} - op_code = node.parameters.operation - try: - return PaddleNode(operations[op_code]) - except KeyError: - raise KaffeError('Unknown elementwise operation: {}'.format( - op_code)) - - def map_scale(self, node): - params = node.parameters - return PaddleNode('scale', axis=params.axis, num_axes=params.num_axes) - - def commit(self, chains): - return chains - - -class PaddleEmitter(object): - def __init__(self, tab=None): - self.tab = tab or ' ' * 4 - self.prefix = '' - self.net_name = '' - - def indent(self): - self.prefix += self.tab - - def outdent(self): - self.prefix = self.prefix[:-len(self.tab)] - - def statement(self, s): - return self.prefix + s + '\n' - - def emit_imports(self): - import inspect - codes = [] - codes.append( - '### generated by caffe2fluid, your net is in class "%s" ###\n' % - (self.net_name)) - network_source = inspect.getsource(network) - codes.append(network_source + '\n') - return self.statement('\n'.join(codes)) - - def emit_setup_def(self): - return self.statement('def setup(self):') - - def get_inputs_info(self, input_nodes): - input_shapes = {} - for n in input_nodes: - name = n.name - output_shape = n.output_shape - shape = [str(s) for s in output_shape[1:]] - input_shapes[name] = ', '.join(shape) - input_shapes = ['"%s": [%s]' % (n, l) for n, l in input_shapes.items()] - shape_str = ','.join(input_shapes) - return '{%s}' % (shape_str) - - def emit_main_def(self, name): - if name is None: - return '' - - self.prefix = '' - main_def = self.statement('if __name__ == "__main__":') - self.indent() - main_def += self.statement('exit(main())') - return '\n\n' + main_def - - def emit_parents(self, chain): - assert len(chain) - s = 'self.feed(' - sep = ', \n' + self.prefix + (' ' * len(s)) - s += sep.join( - ["'%s'" % parent.name for parent in chain[0].node.parents]) - return self.statement(s + ')') - - def emit_node(self, node): - return self.statement('self.' + node.emit()) - - def emit(self, name, chains, input_nodes=None): - from ..net_template import generate_net_code - from ..net_template import generate_main_code - - self.net_name = name - inputs_info = self.get_inputs_info(input_nodes) - - s = self.emit_imports() - s += generate_net_code(name, inputs_info) + '\n' - self.indent() - - # define the net using api - s += self.emit_setup_def() - self.indent() - blocks = [] - for chain in chains: - b = '' - b += self.emit_parents(chain) - for node in chain: - b += self.emit_node(node) - blocks.append(b[:-1]) - s = s + '\n\n'.join(blocks) - - # define the main function - s += '\n\n\n' + generate_main_code(name) - s += self.emit_main_def(name) - return s - - -class Transformer(object): - def __init__(self, def_path, data_path, verbose=True, phase='test'): - self.verbose = verbose - self.phase = phase - self.load(def_path, data_path, phase) - self.params = None - self.source = None - - def load(self, def_path, data_path, phase): - # Build the graph - graph = GraphBuilder(def_path, phase).build() - - if data_path is not None: - # Load and associate learned parameters - graph = DataInjector(def_path, data_path)(graph) - - # Transform the graph - transformers = [ - # Fuse split batch normalization layers - BatchNormScaleBiasFuser(), - - # Fuse ReLUs - # TODO: Move non-linearity application to layer wrapper, allowing - # any arbitrary operation to be optionally activated. - ReLUFuser(allowed_parent_types=[ - NodeKind.Convolution, NodeKind.InnerProduct, NodeKind.BatchNorm - ]), - - # Rename nodes - # Slashes are used for scoping in Paddle. Replace slashes - # in node names with underscores. - # (Caffe's GoogLeNet implementation uses slashes) - NodeRenamer(lambda node: node.name.replace('/', '_')), - - # Fuse Crop - # Crop is to return a scalar output Blob for an input Blob of arbitrary size. - # When one of the input Blob is "input" or "DummyData", we can remove this input Blob - # and put the shape into the reduction layer. - CropFuser() - ] - - self.graph = graph.transformed(transformers) - - - - #for the purpose of recording name mapping because of fused nodes - trace = SubNodeFuser.traced_names() - chg2real = {} - deleted = {} - for k, v in trace.items(): - chg2real[k] = v[-1] #mapping from changed-name to real-name - for n in v: - if n in chg2real: - continue - if n not in deleted: - deleted[n] = '%s.%s' % (k, v[-1]) - - self.graph.add_name_trace({ - 'chg2real': chg2real, - 'deleted': deleted - }, 'paddle') - - # Display the graph - if self.verbose: - print_stderr(self.graph) - - def transform_data(self): - if self.params is None: - transformers = [ - # Reshape the parameters to Paddle's ordering - DataReshaper({ - # (c_o, c_i) -> (c_i, c_o) - NodeKind.InnerProduct: (1, 0) - }), - - # Pre-process batch normalization data - BatchNormPreprocessor(), - - # Convert parameters to dictionaries - ParameterNamer(), - ] - - self.graph = self.graph.transformed(transformers) - - self.params = { - node.name: node.data - for node in self.graph.nodes if node.data - } - - self.params['caffe2fluid_name_trace'] = self.graph.get_name_trace() - - return self.params - - def transform_source(self): - if self.source is None: - mapper = PaddleMapper(self.graph) - chains = mapper.map() - emitter = PaddleEmitter() - input_nodes = self.graph.get_input_nodes() - self.source = emitter.emit(self.graph.name, chains, input_nodes) - return self.source diff --git a/caffe2fluid/kaffe/protobuf_to_dict.py b/caffe2fluid/kaffe/protobuf_to_dict.py deleted file mode 100644 index 5b65973..0000000 --- a/caffe2fluid/kaffe/protobuf_to_dict.py +++ /dev/null @@ -1,185 +0,0 @@ -"""a util for convert protobuf to dict -""" - -from google.protobuf.message import Message -from google.protobuf.descriptor import FieldDescriptor - -__all__ = [ - "protobuf_to_dict", "TYPE_CALLABLE_MAP", "dict_to_protobuf", - "REVERSE_TYPE_CALLABLE_MAP" -] - -EXTENSION_CONTAINER = '___X' - -TYPE_CALLABLE_MAP = { - FieldDescriptor.TYPE_DOUBLE: float, - FieldDescriptor.TYPE_FLOAT: float, - FieldDescriptor.TYPE_INT32: int, - FieldDescriptor.TYPE_INT64: int, - FieldDescriptor.TYPE_UINT32: int, - FieldDescriptor.TYPE_UINT64: int, - FieldDescriptor.TYPE_SINT32: int, - FieldDescriptor.TYPE_SINT64: int, - FieldDescriptor.TYPE_FIXED32: int, - FieldDescriptor.TYPE_FIXED64: int, - FieldDescriptor.TYPE_SFIXED32: int, - FieldDescriptor.TYPE_SFIXED64: int, - FieldDescriptor.TYPE_BOOL: bool, - FieldDescriptor.TYPE_STRING: str, - FieldDescriptor.TYPE_BYTES: lambda b: b.encode("base64"), - FieldDescriptor.TYPE_ENUM: int, -} - - -def repeated(type_callable): - return lambda value_list: [type_callable(value) for value in value_list] - - -def enum_label_name(field, value): - return field.enum_type.values_by_number[int(value)].name - - -def protobuf_to_dict(pb, - type_callable_map=TYPE_CALLABLE_MAP, - use_enum_labels=False): - result_dict = {} - extensions = {} - for field, value in pb.ListFields(): - type_callable = _get_field_value_adaptor(pb, field, type_callable_map, - use_enum_labels) - if field.label == FieldDescriptor.LABEL_REPEATED: - type_callable = repeated(type_callable) - - if field.is_extension: - extensions[str(field.number)] = type_callable(value) - continue - - result_dict[field.name] = type_callable(value) - - if extensions: - result_dict[EXTENSION_CONTAINER] = extensions - return result_dict - - -def _get_field_value_adaptor(pb, - field, - type_callable_map=TYPE_CALLABLE_MAP, - use_enum_labels=False): - if field.type == FieldDescriptor.TYPE_MESSAGE: - # recursively encode protobuf sub-message - return lambda pb: protobuf_to_dict(pb, - type_callable_map=type_callable_map, - use_enum_labels=use_enum_labels) - - if use_enum_labels and field.type == FieldDescriptor.TYPE_ENUM: - return lambda value: enum_label_name(field, value) - - if field.type in type_callable_map: - return type_callable_map[field.type] - - raise TypeError("Field %s.%s has unrecognised type id %d" % - (pb.__class__.__name__, field.name, field.type)) - - -def get_bytes(value): - return value.decode('base64') - - -REVERSE_TYPE_CALLABLE_MAP = {FieldDescriptor.TYPE_BYTES: get_bytes, } - - -def dict_to_protobuf(pb_klass_or_instance, - values, - type_callable_map=REVERSE_TYPE_CALLABLE_MAP, - strict=True): - """Populates a protobuf model from a dictionary. - - :param pb_klass_or_instance: a protobuf message class, or an protobuf instance - :type pb_klass_or_instance: a type or instance of a subclass of google.protobuf.message.Message - :param dict values: a dictionary of values. Repeated and nested values are - fully supported. - :param dict type_callable_map: a mapping of protobuf types to callables for setting - values on the target instance. - :param bool strict: complain if keys in the map are not fields on the message. - """ - if isinstance(pb_klass_or_instance, Message): - instance = pb_klass_or_instance - else: - instance = pb_klass_or_instance() - return _dict_to_protobuf(instance, values, type_callable_map, strict) - - -def _get_field_mapping(pb, dict_value, strict): - field_mapping = [] - for key, value in dict_value.items(): - if key == EXTENSION_CONTAINER: - continue - if key not in pb.DESCRIPTOR.fields_by_name: - if strict: - raise KeyError("%s does not have a field called %s" % (pb, key)) - continue - field_mapping.append( - (pb.DESCRIPTOR.fields_by_name[key], value, getattr(pb, key, None))) - - for ext_num, ext_val in dict_value.get(EXTENSION_CONTAINER, {}).items(): - try: - ext_num = int(ext_num) - except ValueError: - raise ValueError("Extension keys must be integers.") - if ext_num not in pb._extensions_by_number: - if strict: - raise KeyError( - "%s does not have a extension with number %s. Perhaps you forgot to import it?" - % (pb, key)) - continue - ext_field = pb._extensions_by_number[ext_num] - pb_val = None - pb_val = pb.Extensions[ext_field] - field_mapping.append((ext_field, ext_val, pb_val)) - - return field_mapping - - -def _dict_to_protobuf(pb, value, type_callable_map, strict): - fields = _get_field_mapping(pb, value, strict) - - for field, input_value, pb_value in fields: - if field.label == FieldDescriptor.LABEL_REPEATED: - for item in input_value: - if field.type == FieldDescriptor.TYPE_MESSAGE: - m = pb_value.add() - _dict_to_protobuf(m, item, type_callable_map, strict) - elif field.type == FieldDescriptor.TYPE_ENUM and isinstance( - item, basestring): - pb_value.append(_string_to_enum(field, item)) - else: - pb_value.append(item) - continue - if field.type == FieldDescriptor.TYPE_MESSAGE: - _dict_to_protobuf(pb_value, input_value, type_callable_map, strict) - continue - - if field.type in type_callable_map: - input_value = type_callable_map[field.type](input_value) - - if field.is_extension: - pb.Extensions[field] = input_value - continue - - if field.type == FieldDescriptor.TYPE_ENUM and isinstance(input_value, - basestring): - input_value = _string_to_enum(field, input_value) - - setattr(pb, field.name, input_value) - - return pb - - -def _string_to_enum(field, input_value): - enum_dict = field.enum_type.values_by_name - try: - input_value = enum_dict[input_value].number - except KeyError: - raise KeyError("`%s` is not a valid value for field `%s`" % - (input_value, field.name)) - return input_value diff --git a/caffe2fluid/kaffe/shapes.py b/caffe2fluid/kaffe/shapes.py deleted file mode 100644 index e3e4e71..0000000 --- a/caffe2fluid/kaffe/shapes.py +++ /dev/null @@ -1,163 +0,0 @@ -import math -from collections import namedtuple - -from .errors import KaffeError - -Tensor5DShape = namedtuple('Tensor5DShape', - ['batch_size', 'data1', 'daat2', 'data3', 'data4']) - -Tensor4DShape = namedtuple('Tensor4DShape', - ['batch_size', 'channels', 'height', 'width']) - -Tensor3DShape = namedtuple('Tensor3DShape', ['batch_size', 'data1', 'data2']) - -Tensor2DShape = namedtuple('Tensor2DShape', ['batch_size', 'data']) - -ScalarShape = namedtuple('ScalarShape', ['batch_size']) - - -def make_tensor(batch_size, d1=None, d2=None, d3=None): - if d3 is not None: - return Tensor4DShape(batch_size, d1, d2, d3) - elif d1 is not None and d2 is not None: - return Tensor3DShape(batch_size, d1, d2) - elif d1 is not None and d2 is None: - return Tensor2DShape(batch_size, d1) - elif d1 is None and d2 is None and d3 is None: - return ScalarShape(batch_size) - else: - raise NotImplementedError('invalid params for make_tensor %s' \ - % (str((batch_size, d1, d2, d3)))) - - -def get_filter_output_shape(i_h, i_w, params, round_func): - dila_h = getattr(params, 'dila_h', 1) - dila_w = getattr(params, 'dila_w', 1) - - o_h = (i_h + 2 * params.pad_h - - (dila_h * (params.kernel_h - 1) + 1)) / float(params.stride_h) + 1 - o_w = (i_w + 2 * params.pad_w - - (dila_w * (params.kernel_w - 1) + 1)) / float(params.stride_w) + 1 - - return (int(round_func(o_h)), int(round_func(o_w))) - - -def get_strided_kernel_output_shape(node, round_func): - assert node.layer is not None - input_shape = node.get_only_parent().output_shape - o_h, o_w = get_filter_output_shape(input_shape.height, input_shape.width, - node.layer.kernel_parameters, round_func) - params = node.layer.parameters - has_c_o = hasattr(params, 'num_output') - c = params.num_output if has_c_o else input_shape.channels - return make_tensor(input_shape.batch_size, c, o_h, o_w) - - -def shape_not_implemented(node): - raise NotImplementedError - - -def shape_identity(node): - assert len(node.parents) > 0 - return node.parents[0].output_shape - - -def shape_scalar(node): - return make_tensor(1, 1, 1, 1) - - -def shape_crop(node): - raise KaffeError('crop function had been defined in customer_layers') - - -def shape_power(node): - raise KaffeError('power function had been defined in customer_layers') - - -def shape_data(node): - if node.output_shape: - # Old-style input specification - shape = node.output_shape - else: - try: - # New-style input specification - shape = map(int, node.parameters.shape[0].dim) - except: - # We most likely have a data layer on our hands. The problem is, - # Caffe infers the dimensions of the data from the source (eg: LMDB). - # We want to avoid reading datasets here. Fail for now. - # This can be temporarily fixed by transforming the data layer to - # Caffe's "input" layer (as is usually used in the "deploy" version). - # TODO: Find a better solution for this. - raise KaffeError( - 'Cannot determine dimensions of data layer.\n' - 'See comments in function shape_data for more info.') - return shape - - -def shape_mem_data(node): - params = node.parameters - return make_tensor(params.batch_size, params.channels, params.height, - params.width) - - -def shape_concat(node): - axis = node.layer.parameters.axis - output_shape = None - for parent in node.parents: - if output_shape is None: - output_shape = list(parent.output_shape) - else: - output_shape[axis] += parent.output_shape[axis] - return tuple(output_shape) - - -def shape_convolution(node): - return get_strided_kernel_output_shape(node, math.floor) - - -def shape_deconvolution(node): - assert node.layer is not None - input_shape = node.get_only_parent().output_shape - h_i = input_shape.height - w_i = input_shape.width - - params = node.layer.kernel_parameters - p_h = params.pad_h - p_w = params.pad_w - - dila_h = params.dila_h - dila_w = params.dila_w - - k_h = params.kernel_h - k_w = params.kernel_w - - s_h = params.stride_h - s_w = params.stride_w - - h_o = (h_i - 1) * s_h - 2 * p_h + dila_h * (k_h - 1) + 1 - w_o = (w_i - 1) * s_w - 2 * p_w + dila_w * (k_w - 1) + 1 - - params = node.layer.parameters - has_c_o = hasattr(params, 'num_output') - c = params.num_output if has_c_o else input_shape.channels - return make_tensor(input_shape.batch_size, c, h_o, w_o) - - -def shape_pool(node): - global_pool = getattr(node.layer.parameters, 'global_pooling', False) - if global_pool: - input_shape = node.get_only_parent().output_shape - return make_tensor(input_shape.batch_size, input_shape.channels, 1, 1) - - ceil_mode = getattr(node.layer.parameters, 'ceil_mode', True) - if ceil_mode is True: - method = math.ceil - else: - method = math.floor - return get_strided_kernel_output_shape(node, method) - - -def shape_inner_product(node): - input_shape = node.get_only_parent().output_shape - return make_tensor(input_shape.batch_size, node.layer.parameters.num_output) diff --git a/caffe2fluid/kaffe/transformers.py b/caffe2fluid/kaffe/transformers.py deleted file mode 100644 index a0903d1..0000000 --- a/caffe2fluid/kaffe/transformers.py +++ /dev/null @@ -1,414 +0,0 @@ -''' -A collection of graph transforms. - -A transformer is a callable that accepts a graph and returns a transformed version. -''' -import os -import numpy as np - -from .caffe import get_caffe_resolver, has_pycaffe -from .errors import KaffeError, debug, notice, warn -from .layers import NodeKind - - -class DataInjector(object): - ''' - Associates parameters loaded from a .caffemodel file with their corresponding nodes. - ''' - - def __init__(self, def_path, data_path): - # The .prototxt file defining the graph - self.def_path = def_path - # The .caffemodel file containing the learned parameters - self.data_path = data_path - # Set to true if the fallback protocol-buffer based backend was used - self.did_use_pb = False - # A list containing (layer name, parameters) tuples - self.params = None - # Load the parameters - self.load() - - def load(self): - if has_pycaffe(): - self.load_using_caffe() - else: - self.load_using_pb() - - def load_using_caffe(self): - caffe = get_caffe_resolver().caffe - net = caffe.Net(self.def_path, self.data_path, caffe.TEST) - data = lambda blob: blob.data - self.params = [(k, list(map(data, v))) for k, v in net.params.items()] - - def load_using_pb(self): - data = get_caffe_resolver().NetParameter() - data.MergeFromString(open(self.data_path, 'rb').read()) - pair = lambda layer: (layer.name, self.normalize_pb_data(layer)) - layers = data.layers or data.layer - self.params = [pair(layer) for layer in layers if layer.blobs] - self.did_use_pb = True - - def normalize_pb_data(self, layer): - transformed = [] - for blob in layer.blobs: - if len(blob.shape.dim): - dims = blob.shape.dim - c_o, c_i, h, w = map(int, [1] * (4 - len(dims)) + list(dims)) - else: - c_o = blob.num - c_i = blob.channels - h = blob.height - w = blob.width - data = np.array(blob.data, dtype=np.float32).reshape(c_o, c_i, h, w) - transformed.append(data) - return transformed - - def adjust_parameters(self, node, data): - if not self.did_use_pb: - return data - - # When using the protobuf-backend, each parameter initially has four dimensions. - # In certain cases (like FC layers), we want to eliminate the singleton dimensions. - # This implementation takes care of the common cases. However, it does leave the - # potential for future issues. - # The Caffe-backend does not suffer from this problem. - data = list(data) - - squeeze_indices = [1] # Squeeze biases. - if node.kind == NodeKind.InnerProduct: - squeeze_indices.append(0) # Squeeze FC. - - for idx in squeeze_indices: - if idx >= len(data): - continue - - d = data[idx] - assert len( - d.shape - ) == 4, 'invalid shape[%s] from caffe when adjust_parameters' % ( - str(d.shape)) - - shape_old = d.shape - sq_axis = None - if idx == 0: - sq_axis = (0, 1) - elif idx == 1: - sq_axis = (0, 1, 2) - else: - continue - - data[idx] = np.squeeze(d, axis=sq_axis) - shape_new = data[idx].shape - if len(shape_old) != shape_new: - debug('squeeze idx:%d, with kind:%s,name:%s' % \ - (idx, node.kind, node.name)) - return data - - def __call__(self, graph): - for layer_name, data in self.params: - if layer_name in graph: - node = graph.get_node(layer_name) - node.data = self.adjust_parameters(node, data) - else: - notice('Ignoring parameters for non-existent layer: %s' % \ - layer_name) - return graph - - -class DataReshaper(object): - def __init__(self, mapping, replace=True): - # A dictionary mapping NodeKind to the transposed order. - self.mapping = mapping - # The node kinds eligible for reshaping - self.reshaped_node_types = self.mapping.keys() - # If true, the reshaped data will replace the old one. - # Otherwise, it's set to the reshaped_data attribute. - self.replace = replace - - def has_spatial_parent(self, node): - try: - parent = node.get_only_parent() - s = parent.output_shape - if len(s) == 4: - return s.height > 1 or s.width > 1 - else: - return False - except KaffeError: - return False - - def map(self, node_kind): - try: - return self.mapping[node_kind] - except KeyError: - raise KaffeError('Ordering not found for node kind: {}'.format( - node_kind)) - - def __call__(self, graph): - for node in graph.nodes: - if node.data is None: - continue - - if node.kind not in self.reshaped_node_types: - # Check for 2+ dimensional data - #if any(len(tensor.shape) > 1 for tensor in node.data): - # notice('parmaters not reshaped for node: {}'.format(node)) - continue - - transpose_order = self.map(node.kind) - weights = node.data[0] - if node.kind == NodeKind.InnerProduct: - # The FC layer connected to the spatial layer needs to be - # re-wired to match the new spatial ordering. - #in_shape = node.get_only_parent().output_shape - fc_shape = weights.shape - output_channels = fc_shape[0] - weights = weights.reshape((output_channels, -1)) - weights = weights.transpose(transpose_order) - node.reshaped_data = weights - else: - node.reshaped_data = weights.transpose(transpose_order) - - if self.replace: - for node in graph.nodes: - if hasattr(node, 'reshaped_data'): - # Set the weights - node.data[0] = node.reshaped_data - del node.reshaped_data - return graph - - -class CropFuser(object): - ''' - Crop is to return a scalar output Blob for an input Blob of arbitrary size. - When one of the input Blob is "input" or "DummyData", we can remove the input Blob - and put the shape into the reduction layer. - ''' - _traced_names = {} - - @classmethod - def traced_names(cls): - return cls._traced_names - - @classmethod - def trace(cls, fname, tname): - """ recording the names mapping, - the value of 'fname' will be replaced by value of 'tname' - """ - if fname not in cls._traced_names: - cls._traced_names[fname] = [] - cls._traced_names[fname].append(tname) - - def __init__(self, - allowed_parent_types=[NodeKind.Input, NodeKind.DummyData]): - self.allowed_parent_types = allowed_parent_types - - def __call__(self, graph): - nodes = graph.nodes - fused_nodes = [] - for node in nodes: - if len(node.parents) != 2: - # reduction layer must has two parent layers. - continue - parent = node.parents[1] - if not self.is_eligible_pair(parent, node): - continue - # Change the graph structure. - parent.children.remove(node) - node.parents.remove(parent) - # Let the sub-class merge the fused node in any arbitrary way. - if not len(parent.children): - fused_nodes.append(parent) - #fused_nodes.append(parent) - self.merge(parent, node) - # rebuild the graph - transformed_nodes = [node for node in nodes if node not in fused_nodes] - return graph.replaced(transformed_nodes) - - def is_eligible_pair(self, parent, child): - '''Returns true if this parent/child pair is eligible for fusion.''' - return child.kind == NodeKind.Crop - #return (self.allowed_parent_types is not None and \ - # len(parent.children) == 1 and \ - # parent.kind in self.allowed_parent_types and \ - # child.kind == NodeKind.Crop) - - def merge(self, parent, child): - '''Merge the parent node into the child.''' - child.metadata['shape'] = [ - parent.output_shape.batch_size, parent.output_shape.channels, - parent.output_shape.height, parent.output_shape.width - ] - - -class SubNodeFuser(object): - ''' - An abstract helper for merging a single-child with its single-parent. - ''' - _traced_names = {} - - @classmethod - def traced_names(cls): - return cls._traced_names - - @classmethod - def trace(cls, fname, tname): - """ recording the names mapping, - the value of 'fname' will be replaced by value of 'tname' - """ - if fname not in cls._traced_names: - cls._traced_names[fname] = [] - cls._traced_names[fname].append(tname) - - def __call__(self, graph): - nodes = graph.nodes - fused_nodes = [] - for node in nodes: - if len(node.parents) != 1: - # We're only fusing nodes with single parents - continue - parent = node.get_only_parent() - if len(parent.children) != 1: - # We can only fuse a node if its parent's - # value isn't used by any other node. - continue - if not self.is_eligible_pair(parent, node): - continue - # Rewrite the fused node's children to its parent. - for child in node.children: - pos = child.parents.index(node) - child.parents[pos] = parent - parent.add_child(child) - # Disconnect the fused node from the graph. - parent.children.remove(node) - fused_nodes.append(node) - # Let the sub-class merge the fused node in any arbitrary way. - self.merge(parent, node) - transformed_nodes = [node for node in nodes if node not in fused_nodes] - return graph.replaced(transformed_nodes) - - def is_eligible_pair(self, parent, child): - '''Returns true if this parent/child pair is eligible for fusion.''' - raise NotImplementedError('Must be implemented by subclass.') - - def merge(self, parent, child): - '''Merge the child node into the parent.''' - raise NotImplementedError('Must be implemented by subclass') - - -class ReLUFuser(SubNodeFuser): - ''' - Fuses rectified linear units with their parent nodes. - ''' - - def __init__(self, allowed_parent_types=None): - # Fuse ReLUs when the parent node is one of the given types. - # If None, all node types are eligible. - self.allowed_parent_types = allowed_parent_types - - def is_eligible_pair(self, parent, child): - return ((self.allowed_parent_types is None or \ - parent.kind in self.allowed_parent_types) and \ - child.kind == NodeKind.ReLU) - - def merge(self, parent, child): - SubNodeFuser.trace(parent.name, child.name) - parent.metadata['relu'] = True - parent.metadata['relu_negative_slope'] = child.parameters.negative_slope - - -class BatchNormScaleBiasFuser(SubNodeFuser): - ''' - The original batch normalization paper includes two learned - parameters: a scaling factor \gamma and a bias \beta. - Caffe's implementation does not include these two. However, it is commonly - replicated by adding a scaling+bias layer immidiately after the batch norm. - - This fuser merges the scaling+bias layer with the batch norm. - ''' - - def is_eligible_pair(self, parent, child): - return (parent.kind == NodeKind.BatchNorm and \ - child.kind == NodeKind.Scale and \ - child.parameters.axis == 1 and \ - child.parameters.bias_term == True) - - def merge(self, parent, child): - SubNodeFuser.trace(parent.name, child.name) - parent.scale_bias_node = child - - -class BatchNormPreprocessor(object): - ''' - Prescale batch normalization parameters. - Concatenate gamma (scale) and beta (bias) terms if set. - ''' - - def __call__(self, graph): - for node in graph.nodes: - if node.kind != NodeKind.BatchNorm: - continue - assert node.data is not None - assert len(node.data) == 3 - node.data = [np.squeeze(i) for i in node.data] - mean, variance, scale = node.data - # Prescale the stats - scaling_factor = 1.0 / scale if scale != 0 else 0 - mean *= scaling_factor - variance *= scaling_factor - # Replace with the updated values - node.data = [mean, variance] - if hasattr(node, 'scale_bias_node'): - # Include the scale and bias terms - gamma, beta = node.scale_bias_node.data - node.data += [np.squeeze(i) for i in [gamma, beta]] - return graph - - -class NodeRenamer(object): - ''' - Renames nodes in the graph using a given unary function that - accepts a node and returns its new name. - ''' - - def __init__(self, renamer): - self.renamer = renamer - - def __call__(self, graph): - for node in graph.nodes: - node.name = self.renamer(node) - return graph - - -class ParameterNamer(object): - ''' - Convert layer data arrays to a dictionary mapping parameter names to their values. - ''' - - def __call__(self, graph): - for node in graph.nodes: - if node.data is None: - continue - if node.kind in (NodeKind.Convolution, NodeKind.InnerProduct,\ - NodeKind.Deconvolution): - names = ('weights', ) - if node.parameters.bias_term: - names += ('biases', ) - elif node.kind == NodeKind.BatchNorm: - names = ('mean', 'variance') - if len(node.data) == 4: - names += ('scale', 'offset') - elif node.kind == NodeKind.Scale: - names = ('scale', ) - if getattr(node.parameters, 'bias_term', False): - names = ('scale', 'offset') - elif node.kind == NodeKind.PReLU: - names = ('negslope', ) - elif node.kind == "Normalize": - names = ('scale', ) - else: - warn('Unhandled parameters when naming this it[%s]' % - (node.kind)) - continue - assert len(names) == len(node.data) - node.data = dict(zip(names, node.data)) - return graph diff --git a/caffe2fluid/prepare.md b/caffe2fluid/prepare.md deleted file mode 100644 index 11f3d90..0000000 --- a/caffe2fluid/prepare.md +++ /dev/null @@ -1,43 +0,0 @@ -# 环境安装 -caffe2fluid在如下环境配置中进行测试,用户可按如下流程配置自己的环境,也可根据自己需求配置,满足caffe2fluid运行对环境的依赖即可。 - -## 1. 安装Anaconda -可直接参考官网安装文档 -[Linux下安装](https://docs.anaconda.com/anaconda/install/linux/) -[Mac下安装](https://docs.anaconda.com/anaconda/install/mac-os/) - -## 2.创建python环境 -通过使用anaconda,创建python环境,在创建的python环境中安装Caffe和PaddlePaddle,创建的环境可以独立于系统环境,对创建环境的修改,也不会影响其它环境或系统的依赖。 -```shell -# 创建名为caffe_paddle的环境,python版本指定为3.5 -conda create -n caffe-paddle python=3.5 - -# 激活环境 -source activate caffe-paddle - -# 安装PaddlePaddle和Caffe -# 安装后,可在python中执行"import caffe"和 -# "import paddle.fluid",判断是否已经安装成功 -pip install paddlepaddle-gpu -conda install caffe-gpu - -# 安装python的future模块 -pip install future - - -# 注意:由于protobuf版本问题,安装框架过程应先安装PaddlePaddle,再安装Caffe。 -# 如若先安装了Caffe,则可以在安装PaddlePaddle后执行下述命令解决 -pip uninstall protobuf -pip install protobuf==3.6.0 - -source deactivate -``` - -## 3. 在创建的python环境中使用caffe2fluid -在第2步安装中,需要注意到这两行命令 -```shell -source activate caffe-paddle -source deactivate -``` -**1. 第一行表示激活创建的环境,在使用caffe2fluid时需执行该行命令进入环境** -**2. 第二行表示退出环境** diff --git a/caffe2fluid/prepare_en.md b/caffe2fluid/prepare_en.md deleted file mode 100644 index aea3d4a..0000000 --- a/caffe2fluid/prepare_en.md +++ /dev/null @@ -1,35 +0,0 @@ -# Environment Installation -The caffe2fluid is tested in the following environment configuration. In order to meet the environment dependence of the caffe2fluid, users can configure their own environment according to the following process, or configure according to their own needs. - -## 1. Anaconda Installation -Directly refer to the official website installation documentation. -[Install in Linux](https://docs.anaconda.com/anaconda/install/linux/) -[Install in Mac](https://docs.anaconda.com/anaconda/install/mac-os/) - -## 2.Create Python Environment -Create a python environment by using anaconda. Then install Caffe and PaddlePaddle in the created python environment. The created environment can be independent of the system environment, so the modifications to the creation environment will not affect the dependencies of other environments or systems. -```shell -# Create the environment which is named as caffe_paddle, -# and the version of python is 3.5. -conda create -n caffe-paddle python=3.5 - -# Activate the environment. -source activate caffe-paddle - -# Install the PaddlePaddle and Caffe. -# After installion,run "import caffe" and "import paddle.fluid" -# to determine if it has been installed successfully. -pip install paddlepaddle-gpu -conda install caffe-gpu - -# Install the future module of python。 -pip install future - - -# Note: Due to the protobuf version, the installation framework should first install PaddlePaddle and then install Caffe. -# If you installed Caffe first, after installing PaddlePaddle you can solve by the following steps. -pip uninstall protobuf -pip install protobuf==3.6.0 - -source deactivate -``` diff --git a/caffe2fluid/proto/caffe.proto b/caffe2fluid/proto/caffe.proto deleted file mode 100644 index 18eb5ca..0000000 --- a/caffe2fluid/proto/caffe.proto +++ /dev/null @@ -1,1411 +0,0 @@ -syntax = "proto2"; - -package caffe; - -// Specifies the shape (dimensions) of a Blob. -message BlobShape { repeated int64 dim = 1 [ packed = true ]; } - -message BlobProto { - optional BlobShape shape = 7; - repeated float data = 5 [ packed = true ]; - repeated float diff = 6 [ packed = true ]; - repeated double double_data = 8 [ packed = true ]; - repeated double double_diff = 9 [ packed = true ]; - - // 4D dimensions -- deprecated. Use "shape" instead. - optional int32 num = 1 [ default = 0 ]; - optional int32 channels = 2 [ default = 0 ]; - optional int32 height = 3 [ default = 0 ]; - optional int32 width = 4 [ default = 0 ]; -} - -// The BlobProtoVector is simply a way to pass multiple blobproto instances -// around. -message BlobProtoVector { repeated BlobProto blobs = 1; } - -message Datum { - optional int32 channels = 1; - optional int32 height = 2; - optional int32 width = 3; - // the actual image data, in bytes - optional bytes data = 4; - optional int32 label = 5; - // Optionally, the datum could also hold float data. - repeated float float_data = 6; - // If true data contains an encoded image that need to be decoded - optional bool encoded = 7 [ default = false ]; -} - -message FillerParameter { - // The filler type. - optional string type = 1 [ default = 'constant' ]; - optional float value = 2 [ default = 0 ]; // the value in constant filler - optional float min = 3 [ default = 0 ]; // the min value in uniform filler - optional float max = 4 [ default = 1 ]; // the max value in uniform filler - optional float mean = 5 [ default = 0 ]; // the mean value in Gaussian filler - optional float std = 6 [ default = 1 ]; // the std value in Gaussian filler - // The expected number of non-zero output weights for a given input in - // Gaussian filler -- the default -1 means don't perform sparsification. - optional int32 sparse = 7 [ default = -1 ]; - // Normalize the filler variance by fan_in, fan_out, or their average. - // Applies to 'xavier' and 'msra' fillers. - enum VarianceNorm { - FAN_IN = 0; - FAN_OUT = 1; - AVERAGE = 2; - } - optional VarianceNorm variance_norm = 8 [ default = FAN_IN ]; -} - -message NetParameter { - optional string name = 1; // consider giving the network a name - // DEPRECATED. See InputParameter. The input blobs to the network. - repeated string input = 3; - // DEPRECATED. See InputParameter. The shape of the input blobs. - repeated BlobShape input_shape = 8; - - // 4D input dimensions -- deprecated. Use "input_shape" instead. - // If specified, for each input blob there should be four - // values specifying the num, channels, height and width of the input blob. - // Thus, there should be a total of (4 * #input) numbers. - repeated int32 input_dim = 4; - - // Whether the network will force every layer to carry out backward operation. - // If set False, then whether to carry out backward is determined - // automatically according to the net structure and learning rates. - optional bool force_backward = 5 [ default = false ]; - // The current "state" of the network, including the phase, level, and stage. - // Some layers may be included/excluded depending on this state and the states - // specified in the layers' include and exclude fields. - optional NetState state = 6; - - // Print debugging information about results while running Net::Forward, - // Net::Backward, and Net::Update. - optional bool debug_info = 7 [ default = false ]; - - // The layers that make up the net. Each of their configurations, including - // connectivity and behavior, is specified as a LayerParameter. - repeated LayerParameter layer = 100; // ID 100 so layers are printed last. - - // DEPRECATED: use 'layer' instead. - repeated V1LayerParameter layers = 2; -} - -// NOTE -// Update the next available ID when you add a new SolverParameter field. -// -// SolverParameter next available ID: 42 (last added: layer_wise_reduce) -message SolverParameter { - ////////////////////////////////////////////////////////////////////////////// - // Specifying the train and test networks - // - // Exactly one train net must be specified using one of the following fields: - // train_net_param, train_net, net_param, net - // One or more test nets may be specified using any of the following fields: - // test_net_param, test_net, net_param, net - // If more than one test net field is specified (e.g., both net and - // test_net are specified), they will be evaluated in the field order given - // above: (1) test_net_param, (2) test_net, (3) net_param/net. - // A test_iter must be specified for each test_net. - // A test_level and/or a test_stage may also be specified for each test_net. - ////////////////////////////////////////////////////////////////////////////// - - // Proto filename for the train net, possibly combined with one or more - // test nets. - optional string net = 24; - // Inline train net param, possibly combined with one or more test nets. - optional NetParameter net_param = 25; - - optional string train_net = 1; // Proto filename for the train net. - repeated string test_net = 2; // Proto filenames for the test nets. - optional NetParameter train_net_param = 21; // Inline train net params. - repeated NetParameter test_net_param = 22; // Inline test net params. - - // The states for the train/test nets. Must be unspecified or - // specified once per net. - // - // By default, train_state will have phase = TRAIN, - // and all test_state's will have phase = TEST. - // Other defaults are set according to the NetState defaults. - optional NetState train_state = 26; - repeated NetState test_state = 27; - - // The number of iterations for each test net. - repeated int32 test_iter = 3; - - // The number of iterations between two testing phases. - optional int32 test_interval = 4 [ default = 0 ]; - optional bool test_compute_loss = 19 [ default = false ]; - // If true, run an initial test pass before the first iteration, - // ensuring memory availability and printing the starting value of the loss. - optional bool test_initialization = 32 [ default = true ]; - optional float base_lr = 5; // The base learning rate - // the number of iterations between displaying info. If display = 0, no info - // will be displayed. - optional int32 display = 6; - // Display the loss averaged over the last average_loss iterations - optional int32 average_loss = 33 [ default = 1 ]; - optional int32 max_iter = 7; // the maximum number of iterations - // accumulate gradients over `iter_size` x `batch_size` instances - optional int32 iter_size = 36 [ default = 1 ]; - - // The learning rate decay policy. The currently implemented learning rate - // policies are as follows: - // - fixed: always return base_lr. - // - step: return base_lr * gamma ^ (floor(iter / step)) - // - exp: return base_lr * gamma ^ iter - // - inv: return base_lr * (1 + gamma * iter) ^ (- power) - // - multistep: similar to step but it allows non uniform steps defined by - // stepvalue - // - poly: the effective learning rate follows a polynomial decay, to be - // zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power) - // - sigmoid: the effective learning rate follows a sigmod decay - // return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize)))) - // - // where base_lr, max_iter, gamma, step, stepvalue and power are defined - // in the solver parameter protocol buffer, and iter is the current iteration. - optional string lr_policy = 8; - optional float gamma = 9; // The parameter to compute the learning rate. - optional float power = 10; // The parameter to compute the learning rate. - optional float momentum = 11; // The momentum value. - optional float weight_decay = 12; // The weight decay. - // regularization types supported: L1 and L2 - // controlled by weight_decay - optional string regularization_type = 29 [ default = "L2" ]; - // the stepsize for learning rate policy "step" - optional int32 stepsize = 13; - // the stepsize for learning rate policy "multistep" - repeated int32 stepvalue = 34; - - // Set clip_gradients to >= 0 to clip parameter gradients to that L2 norm, - // whenever their actual L2 norm is larger. - optional float clip_gradients = 35 [ default = -1 ]; - - optional int32 snapshot = 14 [ default = 0 ]; // The snapshot interval - optional string snapshot_prefix = 15; // The prefix for the snapshot. - // whether to snapshot diff in the results or not. Snapshotting diff will help - // debugging but the final protocol buffer size will be much larger. - optional bool snapshot_diff = 16 [ default = false ]; - enum SnapshotFormat { - HDF5 = 0; - BINARYPROTO = 1; - } - optional SnapshotFormat snapshot_format = 37 [ default = BINARYPROTO ]; - // the mode solver will use: 0 for CPU and 1 for GPU. Use GPU in default. - enum SolverMode { - CPU = 0; - GPU = 1; - } - optional SolverMode solver_mode = 17 [ default = GPU ]; - // the device_id will that be used in GPU mode. Use device_id = 0 in default. - optional int32 device_id = 18 [ default = 0 ]; - // If non-negative, the seed with which the Solver will initialize the Caffe - // random number generator -- useful for reproducible results. Otherwise, - // (and by default) initialize using a seed derived from the system clock. - optional int64 random_seed = 20 [ default = -1 ]; - - // type of the solver - optional string type = 40 [ default = "SGD" ]; - - // numerical stability for RMSProp, AdaGrad and AdaDelta and Adam - optional float delta = 31 [ default = 1e-8 ]; - // parameters for the Adam solver - optional float momentum2 = 39 [ default = 0.999 ]; - - // RMSProp decay value - // MeanSquare(t) = rms_decay*MeanSquare(t-1) + (1-rms_decay)*SquareGradient(t) - optional float rms_decay = 38 [ default = 0.99 ]; - - // If true, print information about the state of the net that may help with - // debugging learning problems. - optional bool debug_info = 23 [ default = false ]; - - // If false, don't save a snapshot after training finishes. - optional bool snapshot_after_train = 28 [ default = true ]; - - // DEPRECATED: old solver enum types, use string instead - enum SolverType { - SGD = 0; - NESTEROV = 1; - ADAGRAD = 2; - RMSPROP = 3; - ADADELTA = 4; - ADAM = 5; - } - // DEPRECATED: use type instead of solver_type - optional SolverType solver_type = 30 [ default = SGD ]; - - // Overlap compute and communication for data parallel training - optional bool layer_wise_reduce = 41 [ default = true ]; -} - -// A message that stores the solver snapshots -message SolverState { - optional int32 iter = 1; // The current iteration - optional string learned_net = 2; // The file that stores the learned net. - repeated BlobProto history = 3; // The history for sgd solvers - optional int32 current_step = 4 - [ default = 0 ]; // The current step for learning rate -} - -enum Phase { - TRAIN = 0; - TEST = 1; -} - -message NetState { - optional Phase phase = 1 [ default = TEST ]; - optional int32 level = 2 [ default = 0 ]; - repeated string stage = 3; -} - -message NetStateRule { - // Set phase to require the NetState have a particular phase (TRAIN or TEST) - // to meet this rule. - optional Phase phase = 1; - - // Set the minimum and/or maximum levels in which the layer should be used. - // Leave undefined to meet the rule regardless of level. - optional int32 min_level = 2; - optional int32 max_level = 3; - - // Customizable sets of stages to include or exclude. - // The net must have ALL of the specified stages and NONE of the specified - // "not_stage"s to meet the rule. - // (Use multiple NetStateRules to specify conjunctions of stages.) - repeated string stage = 4; - repeated string not_stage = 5; -} - -// Specifies training parameters (multipliers on global learning constants, -// and the name and other settings used for weight sharing). -message ParamSpec { - // The names of the parameter blobs -- useful for sharing parameters among - // layers, but never required otherwise. To share a parameter between two - // layers, give it a (non-empty) name. - optional string name = 1; - - // Whether to require shared weights to have the same shape, or just the same - // count -- defaults to STRICT if unspecified. - optional DimCheckMode share_mode = 2; - enum DimCheckMode { - // STRICT (default) requires that num, channels, height, width each match. - STRICT = 0; - // PERMISSIVE requires only the count (num*channels*height*width) to match. - PERMISSIVE = 1; - } - - // The multiplier on the global learning rate for this parameter. - optional float lr_mult = 3 [ default = 1.0 ]; - - // The multiplier on the global weight decay for this parameter. - optional float decay_mult = 4 [ default = 1.0 ]; -} - -// NOTE -// Update the next available ID when you add a new LayerParameter field. -// -// LayerParameter next available layer-specific ID: 147 (last added: -// recurrent_param) -message LayerParameter { - optional string name = 1; // the layer name - optional string type = 2; // the layer type - repeated string bottom = 3; // the name of each bottom blob - repeated string top = 4; // the name of each top blob - - // The train / test phase for computation. - optional Phase phase = 10; - - // The amount of weight to assign each top blob in the objective. - // Each layer assigns a default value, usually of either 0 or 1, - // to each top blob. - repeated float loss_weight = 5; - - // Specifies training parameters (multipliers on global learning constants, - // and the name and other settings used for weight sharing). - repeated ParamSpec param = 6; - - // The blobs containing the numeric parameters of the layer. - repeated BlobProto blobs = 7; - - // Specifies whether to backpropagate to each bottom. If unspecified, - // Caffe will automatically infer whether each input needs backpropagation - // to compute parameter gradients. If set to true for some inputs, - // backpropagation to those inputs is forced; if set false for some inputs, - // backpropagation to those inputs is skipped. - // - // The size must be either 0 or equal to the number of bottoms. - repeated bool propagate_down = 11; - - // Rules controlling whether and when a layer is included in the network, - // based on the current NetState. You may specify a non-zero number of rules - // to include OR exclude, but not both. If no include or exclude rules are - // specified, the layer is always included. If the current NetState meets - // ANY (i.e., one or more) of the specified rules, the layer is - // included/excluded. - repeated NetStateRule include = 8; - repeated NetStateRule exclude = 9; - - // Parameters for data pre-processing. - optional TransformationParameter transform_param = 100; - - // Parameters shared by loss layers. - optional LossParameter loss_param = 101; - - // Layer type-specific parameters. - // - // Note: certain layers may have more than one computational engine - // for their implementation. These layers include an Engine type and - // engine parameter for selecting the implementation. - // The default for the engine is set by the ENGINE switch at compile-time. - optional AccuracyParameter accuracy_param = 102; - optional ArgMaxParameter argmax_param = 103; - optional BatchNormParameter batch_norm_param = 139; - optional BiasParameter bias_param = 141; - optional ConcatParameter concat_param = 104; - optional ContrastiveLossParameter contrastive_loss_param = 105; - optional ConvolutionParameter convolution_param = 106; - optional CropParameter crop_param = 144; - optional DataParameter data_param = 107; - optional DropoutParameter dropout_param = 108; - optional DummyDataParameter dummy_data_param = 109; - optional EltwiseParameter eltwise_param = 110; - optional ELUParameter elu_param = 140; - optional EmbedParameter embed_param = 137; - optional ExpParameter exp_param = 111; - optional FlattenParameter flatten_param = 135; - optional HDF5DataParameter hdf5_data_param = 112; - optional HDF5OutputParameter hdf5_output_param = 113; - optional HingeLossParameter hinge_loss_param = 114; - optional ImageDataParameter image_data_param = 115; - optional InfogainLossParameter infogain_loss_param = 116; - optional InnerProductParameter inner_product_param = 117; - optional InputParameter input_param = 143; - optional LogParameter log_param = 134; - optional LRNParameter lrn_param = 118; - optional MemoryDataParameter memory_data_param = 119; - optional MVNParameter mvn_param = 120; - optional ParameterParameter parameter_param = 145; - optional PoolingParameter pooling_param = 121; - optional PowerParameter power_param = 122; - optional PReLUParameter prelu_param = 131; - optional PythonParameter python_param = 130; - optional RecurrentParameter recurrent_param = 146; - optional ReductionParameter reduction_param = 136; - optional ReLUParameter relu_param = 123; - optional ReshapeParameter reshape_param = 133; - optional ScaleParameter scale_param = 142; - optional SigmoidParameter sigmoid_param = 124; - optional SoftmaxParameter softmax_param = 125; - optional SPPParameter spp_param = 132; - optional SliceParameter slice_param = 126; - optional TanHParameter tanh_param = 127; - optional ThresholdParameter threshold_param = 128; - optional TileParameter tile_param = 138; - optional WindowDataParameter window_data_param = 129; -} - -// Message that stores parameters used to apply transformation -// to the data layer's data -message TransformationParameter { - // For data pre-processing, we can do simple scaling and subtracting the - // data mean, if provided. Note that the mean subtraction is always carried - // out before scaling. - optional float scale = 1 [ default = 1 ]; - // Specify if we want to randomly mirror data. - optional bool mirror = 2 [ default = false ]; - // Specify if we would like to randomly crop an image. - optional uint32 crop_size = 3 [ default = 0 ]; - // mean_file and mean_value cannot be specified at the same time - optional string mean_file = 4; - // if specified can be repeated once (would subtract it from all the channels) - // or can be repeated the same number of times as channels - // (would subtract them from the corresponding channel) - repeated float mean_value = 5; - // Force the decoded image to have 3 color channels. - optional bool force_color = 6 [ default = false ]; - // Force the decoded image to have 1 color channels. - optional bool force_gray = 7 [ default = false ]; -} - -// Message that stores parameters shared by loss layers -message LossParameter { - // If specified, ignore instances with the given label. - optional int32 ignore_label = 1; - // How to normalize the loss for loss layers that aggregate across batches, - // spatial dimensions, or other dimensions. Currently only implemented in - // SoftmaxWithLoss and SigmoidCrossEntropyLoss layers. - enum NormalizationMode { - // Divide by the number of examples in the batch times spatial dimensions. - // Outputs that receive the ignore label will NOT be ignored in computing - // the normalization factor. - FULL = 0; - // Divide by the total number of output locations that do not take the - // ignore_label. If ignore_label is not set, this behaves like FULL. - VALID = 1; - // Divide by the batch size. - BATCH_SIZE = 2; - // Do not normalize the loss. - NONE = 3; - } - // For historical reasons, the default normalization for - // SigmoidCrossEntropyLoss is BATCH_SIZE and *not* VALID. - optional NormalizationMode normalization = 3 [ default = VALID ]; - // Deprecated. Ignored if normalization is specified. If normalization - // is not specified, then setting this to false will be equivalent to - // normalization = BATCH_SIZE to be consistent with previous behavior. - optional bool normalize = 2; -} - -// Messages that store parameters used by individual layer types follow, in -// alphabetical order. - -message AccuracyParameter { - // When computing accuracy, count as correct by comparing the true label to - // the top k scoring classes. By default, only compare to the top scoring - // class (i.e. argmax). - optional uint32 top_k = 1 [ default = 1 ]; - - // The "label" axis of the prediction blob, whose argmax corresponds to the - // predicted label -- may be negative to index from the end (e.g., -1 for the - // last axis). For example, if axis == 1 and the predictions are - // (N x C x H x W), the label blob is expected to contain N*H*W ground truth - // labels with integer values in {0, 1, ..., C-1}. - optional int32 axis = 2 [ default = 1 ]; - - // If specified, ignore instances with the given label. - optional int32 ignore_label = 3; -} - -message ArgMaxParameter { - // If true produce pairs (argmax, maxval) - optional bool out_max_val = 1 [ default = false ]; - optional uint32 top_k = 2 [ default = 1 ]; - // The axis along which to maximise -- may be negative to index from the - // end (e.g., -1 for the last axis). - // By default ArgMaxLayer maximizes over the flattened trailing dimensions - // for each index of the first / num dimension. - optional int32 axis = 3; -} - -message ConcatParameter { - // The axis along which to concatenate -- may be negative to index from the - // end (e.g., -1 for the last axis). Other axes must have the - // same dimension for all the bottom blobs. - // By default, ConcatLayer concatenates blobs along the "channels" axis (1). - optional int32 axis = 2 [ default = 1 ]; - - // DEPRECATED: alias for "axis" -- does not support negative indexing. - optional uint32 concat_dim = 1 [ default = 1 ]; -} - -message BatchNormParameter { - // If false, normalization is performed over the current mini-batch - // and global statistics are accumulated (but not yet used) by a moving - // average. - // If true, those accumulated mean and variance values are used for the - // normalization. - // By default, it is set to false when the network is in the training - // phase and true when the network is in the testing phase. - optional bool use_global_stats = 1; - // What fraction of the moving average remains each iteration? - // Smaller values make the moving average decay faster, giving more - // weight to the recent values. - // Each iteration updates the moving average @f$S_{t-1}@f$ with the - // current mean @f$ Y_t @f$ by - // @f$ S_t = (1-\beta)Y_t + \beta \cdot S_{t-1} @f$, where @f$ \beta @f$ - // is the moving_average_fraction parameter. - optional float moving_average_fraction = 2 [ default = .999 ]; - // Small value to add to the variance estimate so that we don't divide by - // zero. - optional float eps = 3 [ default = 1e-5 ]; -} - -message BiasParameter { - // The first axis of bottom[0] (the first input Blob) along which to apply - // bottom[1] (the second input Blob). May be negative to index from the end - // (e.g., -1 for the last axis). - // - // For example, if bottom[0] is 4D with shape 100x3x40x60, the output - // top[0] will have the same shape, and bottom[1] may have any of the - // following shapes (for the given value of axis): - // (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60 - // (axis == 1 == -3) 3; 3x40; 3x40x60 - // (axis == 2 == -2) 40; 40x60 - // (axis == 3 == -1) 60 - // Furthermore, bottom[1] may have the empty shape (regardless of the value of - // "axis") -- a scalar bias. - optional int32 axis = 1 [ default = 1 ]; - - // (num_axes is ignored unless just one bottom is given and the bias is - // a learned parameter of the layer. Otherwise, num_axes is determined by the - // number of axes by the second bottom.) - // The number of axes of the input (bottom[0]) covered by the bias - // parameter, or -1 to cover all axes of bottom[0] starting from `axis`. - // Set num_axes := 0, to add a zero-axis Blob: a scalar. - optional int32 num_axes = 2 [ default = 1 ]; - - // (filler is ignored unless just one bottom is given and the bias is - // a learned parameter of the layer.) - // The initialization for the learned bias parameter. - // Default is the zero (0) initialization, resulting in the BiasLayer - // initially performing the identity operation. - optional FillerParameter filler = 3; -} - -message ContrastiveLossParameter { - // margin for dissimilar pair - optional float margin = 1 [ default = 1.0 ]; - // The first implementation of this cost did not exactly match the cost of - // Hadsell et al 2006 -- using (margin - d^2) instead of (margin - d)^2. - // legacy_version = false (the default) uses (margin - d)^2 as proposed in the - // Hadsell paper. New models should probably use this version. - // legacy_version = true uses (margin - d^2). This is kept to support / - // reproduce existing models and results - optional bool legacy_version = 2 [ default = false ]; -} - -message ConvolutionParameter { - optional uint32 num_output = 1; // The number of outputs for the layer - optional bool bias_term = 2 [ default = true ]; // whether to have bias terms - - // Pad, kernel size, and stride are all given as a single value for equal - // dimensions in all spatial dimensions, or once per spatial dimension. - repeated uint32 pad = 3; // The padding size; defaults to 0 - repeated uint32 kernel_size = 4; // The kernel size - repeated uint32 stride = 6; // The stride; defaults to 1 - // Factor used to dilate the kernel, (implicitly) zero-filling the resulting - // holes. (Kernel dilation is sometimes referred to by its use in the - // algorithme à trous from Holschneider et al. 1987.) - repeated uint32 dilation = 18; // The dilation; defaults to 1 - - // For 2D convolution only, the *_h and *_w versions may also be used to - // specify both spatial dimensions. - optional uint32 pad_h = 9 [ default = 0 ]; // The padding height (2D only) - optional uint32 pad_w = 10 [ default = 0 ]; // The padding width (2D only) - optional uint32 kernel_h = 11; // The kernel height (2D only) - optional uint32 kernel_w = 12; // The kernel width (2D only) - optional uint32 stride_h = 13; // The stride height (2D only) - optional uint32 stride_w = 14; // The stride width (2D only) - - optional uint32 group = 5 [ default = 1 ]; // The group size for group conv - - optional FillerParameter weight_filler = 7; // The filler for the weight - optional FillerParameter bias_filler = 8; // The filler for the bias - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 15 [ default = DEFAULT ]; - - // The axis to interpret as "channels" when performing convolution. - // Preceding dimensions are treated as independent inputs; - // succeeding dimensions are treated as "spatial". - // With (N, C, H, W) inputs, and axis == 1 (the default), we perform - // N independent 2D convolutions, sliding C-channel (or (C/g)-channels, for - // groups g>1) filters across the spatial axes (H, W) of the input. - // With (N, C, D, H, W) inputs, and axis == 1, we perform - // N independent 3D convolutions, sliding (C/g)-channels - // filters across the spatial axes (D, H, W) of the input. - optional int32 axis = 16 [ default = 1 ]; - - // Whether to force use of the general ND convolution, even if a specific - // implementation for blobs of the appropriate number of spatial dimensions - // is available. (Currently, there is only a 2D-specific convolution - // implementation; for input blobs with num_axes != 2, this option is - // ignored and the ND implementation will be used.) - optional bool force_nd_im2col = 17 [ default = false ]; -} - -message CropParameter { - // To crop, elements of the first bottom are selected to fit the dimensions - // of the second, reference bottom. The crop is configured by - // - the crop `axis` to pick the dimensions for cropping - // - the crop `offset` to set the shift for all/each dimension - // to align the cropped bottom with the reference bottom. - // All dimensions up to but excluding `axis` are preserved, while - // the dimensions including and trailing `axis` are cropped. - // If only one `offset` is set, then all dimensions are offset by this amount. - // Otherwise, the number of offsets must equal the number of cropped axes to - // shift the crop in each dimension accordingly. - // Note: standard dimensions are N,C,H,W so the default is a spatial crop, - // and `axis` may be negative to index from the end (e.g., -1 for the last - // axis). - optional int32 axis = 1 [ default = 2 ]; - repeated uint32 offset = 2; -} - -message DataParameter { - enum DB { - LEVELDB = 0; - LMDB = 1; - } - // Specify the data source. - optional string source = 1; - // Specify the batch size. - optional uint32 batch_size = 4; - // The rand_skip variable is for the data layer to skip a few data points - // to avoid all asynchronous sgd clients to start at the same point. The skip - // point would be set as rand_skip * rand(0,1). Note that rand_skip should not - // be larger than the number of keys in the database. - // DEPRECATED. Each solver accesses a different subset of the database. - optional uint32 rand_skip = 7 [ default = 0 ]; - optional DB backend = 8 [ default = LEVELDB ]; - // DEPRECATED. See TransformationParameter. For data pre-processing, we can do - // simple scaling and subtracting the data mean, if provided. Note that the - // mean subtraction is always carried out before scaling. - optional float scale = 2 [ default = 1 ]; - optional string mean_file = 3; - // DEPRECATED. See TransformationParameter. Specify if we would like to - // randomly - // crop an image. - optional uint32 crop_size = 5 [ default = 0 ]; - // DEPRECATED. See TransformationParameter. Specify if we want to randomly - // mirror - // data. - optional bool mirror = 6 [ default = false ]; - // Force the encoded image to have 3 color channels - optional bool force_encoded_color = 9 [ default = false ]; - // Prefetch queue (Increase if data feeding bandwidth varies, within the - // limit of device memory for GPU training) - optional uint32 prefetch = 10 [ default = 4 ]; -} - -message DropoutParameter { - optional float dropout_ratio = 1 [ default = 0.5 ]; // dropout ratio -} - -// DummyDataLayer fills any number of arbitrarily shaped blobs with random -// (or constant) data generated by "Fillers" (see "message FillerParameter"). -message DummyDataParameter { - // This layer produces N >= 1 top blobs. DummyDataParameter must specify 1 or - // N - // shape fields, and 0, 1 or N data_fillers. - // - // If 0 data_fillers are specified, ConstantFiller with a value of 0 is used. - // If 1 data_filler is specified, it is applied to all top blobs. If N are - // specified, the ith is applied to the ith top blob. - repeated FillerParameter data_filler = 1; - repeated BlobShape shape = 6; - - // 4D dimensions -- deprecated. Use "shape" instead. - repeated uint32 num = 2; - repeated uint32 channels = 3; - repeated uint32 height = 4; - repeated uint32 width = 5; -} - -message EltwiseParameter { - enum EltwiseOp { - PROD = 0; - SUM = 1; - MAX = 2; - } - optional EltwiseOp operation = 1 [ default = SUM ]; // element-wise operation - repeated float coeff = 2; // blob-wise coefficient for SUM operation - - // Whether to use an asymptotically slower (for >2 inputs) but stabler method - // of computing the gradient for the PROD operation. (No effect for SUM op.) - optional bool stable_prod_grad = 3 [ default = true ]; -} - -// Message that stores parameters used by ELULayer -message ELUParameter { - // Described in: - // Clevert, D.-A., Unterthiner, T., & Hochreiter, S. (2015). Fast and Accurate - // Deep Network Learning by Exponential Linear Units (ELUs). arXiv - optional float alpha = 1 [ default = 1 ]; -} - -// Message that stores parameters used by EmbedLayer -message EmbedParameter { - optional uint32 num_output = 1; // The number of outputs for the layer - // The input is given as integers to be interpreted as one-hot - // vector indices with dimension num_input. Hence num_input should be - // 1 greater than the maximum possible input value. - optional uint32 input_dim = 2; - - optional bool bias_term = 3 [ default = true ]; // Whether to use a bias term - optional FillerParameter weight_filler = 4; // The filler for the weight - optional FillerParameter bias_filler = 5; // The filler for the bias -} - -// Message that stores parameters used by ExpLayer -message ExpParameter { - // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0. - // Or if base is set to the default (-1), base is set to e, - // so y = exp(shift + scale * x). - optional float base = 1 [ default = -1.0 ]; - optional float scale = 2 [ default = 1.0 ]; - optional float shift = 3 [ default = 0.0 ]; -} - -/// Message that stores parameters used by FlattenLayer -message FlattenParameter { - // The first axis to flatten: all preceding axes are retained in the output. - // May be negative to index from the end (e.g., -1 for the last axis). - optional int32 axis = 1 [ default = 1 ]; - - // The last axis to flatten: all following axes are retained in the output. - // May be negative to index from the end (e.g., the default -1 for the last - // axis). - optional int32 end_axis = 2 [ default = -1 ]; -} - -// Message that stores parameters used by HDF5DataLayer -message HDF5DataParameter { - // Specify the data source. - optional string source = 1; - // Specify the batch size. - optional uint32 batch_size = 2; - - // Specify whether to shuffle the data. - // If shuffle == true, the ordering of the HDF5 files is shuffled, - // and the ordering of data within any given HDF5 file is shuffled, - // but data between different files are not interleaved; all of a file's - // data are output (in a random order) before moving onto another file. - optional bool shuffle = 3 [ default = false ]; -} - -message HDF5OutputParameter { optional string file_name = 1; } - -message HingeLossParameter { - enum Norm { - L1 = 1; - L2 = 2; - } - // Specify the Norm to use L1 or L2 - optional Norm norm = 1 [ default = L1 ]; -} - -message ImageDataParameter { - // Specify the data source. - optional string source = 1; - // Specify the batch size. - optional uint32 batch_size = 4 [ default = 1 ]; - // The rand_skip variable is for the data layer to skip a few data points - // to avoid all asynchronous sgd clients to start at the same point. The skip - // point would be set as rand_skip * rand(0,1). Note that rand_skip should not - // be larger than the number of keys in the database. - optional uint32 rand_skip = 7 [ default = 0 ]; - // Whether or not ImageLayer should shuffle the list of files at every epoch. - optional bool shuffle = 8 [ default = false ]; - // It will also resize images if new_height or new_width are not zero. - optional uint32 new_height = 9 [ default = 0 ]; - optional uint32 new_width = 10 [ default = 0 ]; - // Specify if the images are color or gray - optional bool is_color = 11 [ default = true ]; - // DEPRECATED. See TransformationParameter. For data pre-processing, we can do - // simple scaling and subtracting the data mean, if provided. Note that the - // mean subtraction is always carried out before scaling. - optional float scale = 2 [ default = 1 ]; - optional string mean_file = 3; - // DEPRECATED. See TransformationParameter. Specify if we would like to - // randomly - // crop an image. - optional uint32 crop_size = 5 [ default = 0 ]; - // DEPRECATED. See TransformationParameter. Specify if we want to randomly - // mirror - // data. - optional bool mirror = 6 [ default = false ]; - optional string root_folder = 12 [ default = "" ]; -} - -message InfogainLossParameter { - // Specify the infogain matrix source. - optional string source = 1; - optional int32 axis = 2 [ default = 1 ]; // axis of prob -} - -message InnerProductParameter { - optional uint32 num_output = 1; // The number of outputs for the layer - optional bool bias_term = 2 [ default = true ]; // whether to have bias terms - optional FillerParameter weight_filler = 3; // The filler for the weight - optional FillerParameter bias_filler = 4; // The filler for the bias - - // The first axis to be lumped into a single inner product computation; - // all preceding axes are retained in the output. - // May be negative to index from the end (e.g., -1 for the last axis). - optional int32 axis = 5 [ default = 1 ]; - // Specify whether to transpose the weight matrix or not. - // If transpose == true, any operations will be performed on the transpose - // of the weight matrix. The weight matrix itself is not going to be - // transposed - // but rather the transfer flag of operations will be toggled accordingly. - optional bool transpose = 6 [ default = false ]; -} - -message InputParameter { - // This layer produces N >= 1 top blob(s) to be assigned manually. - // Define N shapes to set a shape for each top. - // Define 1 shape to set the same shape for every top. - // Define no shape to defer to reshaping manually. - repeated BlobShape shape = 1; -} - -// Message that stores parameters used by LogLayer -message LogParameter { - // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0. - // Or if base is set to the default (-1), base is set to e, - // so y = ln(shift + scale * x) = log_e(shift + scale * x) - optional float base = 1 [ default = -1.0 ]; - optional float scale = 2 [ default = 1.0 ]; - optional float shift = 3 [ default = 0.0 ]; -} - -// Message that stores parameters used by LRNLayer -message LRNParameter { - optional uint32 local_size = 1 [ default = 5 ]; - optional float alpha = 2 [ default = 1. ]; - optional float beta = 3 [ default = 0.75 ]; - enum NormRegion { - ACROSS_CHANNELS = 0; - WITHIN_CHANNEL = 1; - } - optional NormRegion norm_region = 4 [ default = ACROSS_CHANNELS ]; - optional float k = 5 [ default = 1. ]; - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 6 [ default = DEFAULT ]; -} - -message MemoryDataParameter { - optional uint32 batch_size = 1; - optional uint32 channels = 2; - optional uint32 height = 3; - optional uint32 width = 4; -} - -message MVNParameter { - // This parameter can be set to false to normalize mean only - optional bool normalize_variance = 1 [ default = true ]; - - // This parameter can be set to true to perform DNN-like MVN - optional bool across_channels = 2 [ default = false ]; - - // Epsilon for not dividing by zero while normalizing variance - optional float eps = 3 [ default = 1e-9 ]; -} - -message ParameterParameter { optional BlobShape shape = 1; } - -message PoolingParameter { - enum PoolMethod { - MAX = 0; - AVE = 1; - STOCHASTIC = 2; - } - optional PoolMethod pool = 1 [ default = MAX ]; // The pooling method - // Pad, kernel size, and stride are all given as a single value for equal - // dimensions in height and width or as Y, X pairs. - optional uint32 pad = 4 [ default = 0 ]; // The padding size (equal in Y, X) - optional uint32 pad_h = 9 [ default = 0 ]; // The padding height - optional uint32 pad_w = 10 [ default = 0 ]; // The padding width - optional uint32 kernel_size = 2; // The kernel size (square) - optional uint32 kernel_h = 5; // The kernel height - optional uint32 kernel_w = 6; // The kernel width - optional uint32 stride = 3 [ default = 1 ]; // The stride (equal in Y, X) - optional uint32 stride_h = 7; // The stride height - optional uint32 stride_w = 8; // The stride width - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 11 [ default = DEFAULT ]; - // If global_pooling then it will pool over the size of the bottom by doing - // kernel_h = bottom->height and kernel_w = bottom->width - optional bool global_pooling = 12 [ default = false ]; -} - -message PowerParameter { - // PowerLayer computes outputs y = (shift + scale * x) ^ power. - optional float power = 1 [ default = 1.0 ]; - optional float scale = 2 [ default = 1.0 ]; - optional float shift = 3 [ default = 0.0 ]; -} - -message PythonParameter { - optional string module = 1; - optional string layer = 2; - // This value is set to the attribute `param_str` of the `PythonLayer` object - // in Python before calling the `setup()` method. This could be a number, - // string, dictionary in Python dict format, JSON, etc. You may parse this - // string in `setup` method and use it in `forward` and `backward`. - optional string param_str = 3 [ default = '']; - // DEPRECATED - optional bool share_in_parallel = 4 [ default = false ]; -} - -// Message that stores parameters used by RecurrentLayer -message RecurrentParameter { - // The dimension of the output (and usually hidden state) representation -- - // must be explicitly set to non-zero. - optional uint32 num_output = 1 [ default = 0 ]; - - optional FillerParameter weight_filler = 2; // The filler for the weight - optional FillerParameter bias_filler = 3; // The filler for the bias - - // Whether to enable displaying debug_info in the unrolled recurrent net. - optional bool debug_info = 4 [ default = false ]; - - // Whether to add as additional inputs (bottoms) the initial hidden state - // blobs, and add as additional outputs (tops) the final timestep hidden state - // blobs. The number of additional bottom/top blobs required depends on the - // recurrent architecture -- e.g., 1 for RNNs, 2 for LSTMs. - optional bool expose_hidden = 5 [ default = false ]; -} - -// Message that stores parameters used by ReductionLayer -message ReductionParameter { - enum ReductionOp { - SUM = 1; - ASUM = 2; - SUMSQ = 3; - MEAN = 4; - } - - optional ReductionOp operation = 1 [ default = SUM ]; // reduction operation - - // The first axis to reduce to a scalar -- may be negative to index from the - // end (e.g., -1 for the last axis). - // (Currently, only reduction along ALL "tail" axes is supported; reduction - // of axis M through N, where N < num_axes - 1, is unsupported.) - // Suppose we have an n-axis bottom Blob with shape: - // (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)). - // If axis == m, the output Blob will have shape - // (d0, d1, d2, ..., d(m-1)), - // and the ReductionOp operation is performed (d0 * d1 * d2 * ... * d(m-1)) - // times, each including (dm * d(m+1) * ... * d(n-1)) individual data. - // If axis == 0 (the default), the output Blob always has the empty shape - // (count 1), performing reduction across the entire input -- - // often useful for creating new loss functions. - optional int32 axis = 2 [ default = 0 ]; - - optional float coeff = 3 [ default = 1.0 ]; // coefficient for output -} - -// Message that stores parameters used by ReLULayer -message ReLUParameter { - // Allow non-zero slope for negative inputs to speed up optimization - // Described in: - // Maas, A. L., Hannun, A. Y., & Ng, A. Y. (2013). Rectifier nonlinearities - // improve neural network acoustic models. In ICML Workshop on Deep Learning - // for Audio, Speech, and Language Processing. - optional float negative_slope = 1 [ default = 0 ]; - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 2 [ default = DEFAULT ]; -} - -message ReshapeParameter { - // Specify the output dimensions. If some of the dimensions are set to 0, - // the corresponding dimension from the bottom layer is used (unchanged). - // Exactly one dimension may be set to -1, in which case its value is - // inferred from the count of the bottom blob and the remaining dimensions. - // For example, suppose we want to reshape a 2D blob "input" with shape 2 x 8: - // - // layer { - // type: "Reshape" bottom: "input" top: "output" - // reshape_param { ... } - // } - // - // If "input" is 2D with shape 2 x 8, then the following reshape_param - // specifications are all equivalent, producing a 3D blob "output" with shape - // 2 x 2 x 4: - // - // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 0 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 0 dim: 2 dim: -1 } } - // reshape_param { shape { dim: 0 dim:-1 dim: 4 } } - // - optional BlobShape shape = 1; - - // axis and num_axes control the portion of the bottom blob's shape that are - // replaced by (included in) the reshape. By default (axis == 0 and - // num_axes == -1), the entire bottom blob shape is included in the reshape, - // and hence the shape field must specify the entire output shape. - // - // axis may be non-zero to retain some portion of the beginning of the input - // shape (and may be negative to index from the end; e.g., -1 to begin the - // reshape after the last axis, including nothing in the reshape, - // -2 to include only the last axis, etc.). - // - // For example, suppose "input" is a 2D blob with shape 2 x 8. - // Then the following ReshapeLayer specifications are all equivalent, - // producing a blob "output" with shape 2 x 2 x 4: - // - // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 2 dim: 4 } axis: 1 } - // reshape_param { shape { dim: 2 dim: 4 } axis: -3 } - // - // num_axes specifies the extent of the reshape. - // If num_axes >= 0 (and axis >= 0), the reshape will be performed only on - // input axes in the range [axis, axis+num_axes]. - // num_axes may also be -1, the default, to include all remaining axes - // (starting from axis). - // - // For example, suppose "input" is a 2D blob with shape 2 x 8. - // Then the following ReshapeLayer specifications are equivalent, - // producing a blob "output" with shape 1 x 2 x 8. - // - // reshape_param { shape { dim: 1 dim: 2 dim: 8 } } - // reshape_param { shape { dim: 1 dim: 2 } num_axes: 1 } - // reshape_param { shape { dim: 1 } num_axes: 0 } - // - // On the other hand, these would produce output blob shape 2 x 1 x 8: - // - // reshape_param { shape { dim: 2 dim: 1 dim: 8 } } - // reshape_param { shape { dim: 1 } axis: 1 num_axes: 0 } - // - optional int32 axis = 2 [ default = 0 ]; - optional int32 num_axes = 3 [ default = -1 ]; -} - -message ScaleParameter { - // The first axis of bottom[0] (the first input Blob) along which to apply - // bottom[1] (the second input Blob). May be negative to index from the end - // (e.g., -1 for the last axis). - // - // For example, if bottom[0] is 4D with shape 100x3x40x60, the output - // top[0] will have the same shape, and bottom[1] may have any of the - // following shapes (for the given value of axis): - // (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60 - // (axis == 1 == -3) 3; 3x40; 3x40x60 - // (axis == 2 == -2) 40; 40x60 - // (axis == 3 == -1) 60 - // Furthermore, bottom[1] may have the empty shape (regardless of the value of - // "axis") -- a scalar multiplier. - optional int32 axis = 1 [ default = 1 ]; - - // (num_axes is ignored unless just one bottom is given and the scale is - // a learned parameter of the layer. Otherwise, num_axes is determined by the - // number of axes by the second bottom.) - // The number of axes of the input (bottom[0]) covered by the scale - // parameter, or -1 to cover all axes of bottom[0] starting from `axis`. - // Set num_axes := 0, to multiply with a zero-axis Blob: a scalar. - optional int32 num_axes = 2 [ default = 1 ]; - - // (filler is ignored unless just one bottom is given and the scale is - // a learned parameter of the layer.) - // The initialization for the learned scale parameter. - // Default is the unit (1) initialization, resulting in the ScaleLayer - // initially performing the identity operation. - optional FillerParameter filler = 3; - - // Whether to also learn a bias (equivalent to a ScaleLayer+BiasLayer, but - // may be more efficient). Initialized with bias_filler (defaults to 0). - optional bool bias_term = 4 [ default = false ]; - optional FillerParameter bias_filler = 5; -} - -message SigmoidParameter { - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 1 [ default = DEFAULT ]; -} - -message SliceParameter { - // The axis along which to slice -- may be negative to index from the end - // (e.g., -1 for the last axis). - // By default, SliceLayer concatenates blobs along the "channels" axis (1). - optional int32 axis = 3 [ default = 1 ]; - repeated uint32 slice_point = 2; - - // DEPRECATED: alias for "axis" -- does not support negative indexing. - optional uint32 slice_dim = 1 [ default = 1 ]; -} - -// Message that stores parameters used by SoftmaxLayer, SoftmaxWithLossLayer -message SoftmaxParameter { - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 1 [ default = DEFAULT ]; - - // The axis along which to perform the softmax -- may be negative to index - // from the end (e.g., -1 for the last axis). - // Any other axes will be evaluated as independent softmaxes. - optional int32 axis = 2 [ default = 1 ]; -} - -message TanHParameter { - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 1 [ default = DEFAULT ]; -} - -// Message that stores parameters used by TileLayer -message TileParameter { - // The index of the axis to tile. - optional int32 axis = 1 [ default = 1 ]; - - // The number of copies (tiles) of the blob to output. - optional int32 tiles = 2; -} - -// Message that stores parameters used by ThresholdLayer -message ThresholdParameter { - optional float threshold = 1 [ default = 0 ]; // Strictly positive values -} - -message WindowDataParameter { - // Specify the data source. - optional string source = 1; - // For data pre-processing, we can do simple scaling and subtracting the - // data mean, if provided. Note that the mean subtraction is always carried - // out before scaling. - optional float scale = 2 [ default = 1 ]; - optional string mean_file = 3; - // Specify the batch size. - optional uint32 batch_size = 4; - // Specify if we would like to randomly crop an image. - optional uint32 crop_size = 5 [ default = 0 ]; - // Specify if we want to randomly mirror data. - optional bool mirror = 6 [ default = false ]; - // Foreground (object) overlap threshold - optional float fg_threshold = 7 [ default = 0.5 ]; - // Background (non-object) overlap threshold - optional float bg_threshold = 8 [ default = 0.5 ]; - // Fraction of batch that should be foreground objects - optional float fg_fraction = 9 [ default = 0.25 ]; - // Amount of contextual padding to add around a window - // (used only by the window_data_layer) - optional uint32 context_pad = 10 [ default = 0 ]; - // Mode for cropping out a detection window - // warp: cropped window is warped to a fixed size and aspect ratio - // square: the tightest square around the window is cropped - optional string crop_mode = 11 [ default = "warp" ]; - // cache_images: will load all images in memory for faster access - optional bool cache_images = 12 [ default = false ]; - // append root_folder to locate images - optional string root_folder = 13 [ default = "" ]; -} - -message SPPParameter { - enum PoolMethod { - MAX = 0; - AVE = 1; - STOCHASTIC = 2; - } - optional uint32 pyramid_height = 1; - optional PoolMethod pool = 2 [ default = MAX ]; // The pooling method - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 6 [ default = DEFAULT ]; -} - -// DEPRECATED: use LayerParameter. -message V1LayerParameter { - repeated string bottom = 2; - repeated string top = 3; - optional string name = 4; - repeated NetStateRule include = 32; - repeated NetStateRule exclude = 33; - enum LayerType { - NONE = 0; - ABSVAL = 35; - ACCURACY = 1; - ARGMAX = 30; - BNLL = 2; - CONCAT = 3; - CONTRASTIVE_LOSS = 37; - CONVOLUTION = 4; - DATA = 5; - DECONVOLUTION = 39; - DROPOUT = 6; - DUMMY_DATA = 32; - EUCLIDEAN_LOSS = 7; - ELTWISE = 25; - EXP = 38; - FLATTEN = 8; - HDF5_DATA = 9; - HDF5_OUTPUT = 10; - HINGE_LOSS = 28; - IM2COL = 11; - IMAGE_DATA = 12; - INFOGAIN_LOSS = 13; - INNER_PRODUCT = 14; - LRN = 15; - MEMORY_DATA = 29; - MULTINOMIAL_LOGISTIC_LOSS = 16; - MVN = 34; - POOLING = 17; - POWER = 26; - RELU = 18; - SIGMOID = 19; - SIGMOID_CROSS_ENTROPY_LOSS = 27; - SILENCE = 36; - SOFTMAX = 20; - SOFTMAX_LOSS = 21; - SPLIT = 22; - SLICE = 33; - TANH = 23; - WINDOW_DATA = 24; - THRESHOLD = 31; - } - optional LayerType type = 5; - repeated BlobProto blobs = 6; - repeated string param = 1001; - repeated DimCheckMode blob_share_mode = 1002; - enum DimCheckMode { - STRICT = 0; - PERMISSIVE = 1; - } - repeated float blobs_lr = 7; - repeated float weight_decay = 8; - repeated float loss_weight = 35; - optional AccuracyParameter accuracy_param = 27; - optional ArgMaxParameter argmax_param = 23; - optional ConcatParameter concat_param = 9; - optional ContrastiveLossParameter contrastive_loss_param = 40; - optional ConvolutionParameter convolution_param = 10; - optional DataParameter data_param = 11; - optional DropoutParameter dropout_param = 12; - optional DummyDataParameter dummy_data_param = 26; - optional EltwiseParameter eltwise_param = 24; - optional ExpParameter exp_param = 41; - optional HDF5DataParameter hdf5_data_param = 13; - optional HDF5OutputParameter hdf5_output_param = 14; - optional HingeLossParameter hinge_loss_param = 29; - optional ImageDataParameter image_data_param = 15; - optional InfogainLossParameter infogain_loss_param = 16; - optional InnerProductParameter inner_product_param = 17; - optional LRNParameter lrn_param = 18; - optional MemoryDataParameter memory_data_param = 22; - optional MVNParameter mvn_param = 34; - optional PoolingParameter pooling_param = 19; - optional PowerParameter power_param = 21; - optional ReLUParameter relu_param = 30; - optional SigmoidParameter sigmoid_param = 38; - optional SoftmaxParameter softmax_param = 39; - optional SliceParameter slice_param = 31; - optional TanHParameter tanh_param = 37; - optional ThresholdParameter threshold_param = 25; - optional WindowDataParameter window_data_param = 20; - optional TransformationParameter transform_param = 36; - optional LossParameter loss_param = 42; - optional V0LayerParameter layer = 1; -} - -// DEPRECATED: V0LayerParameter is the old way of specifying layer parameters -// in Caffe. We keep this message type around for legacy support. -message V0LayerParameter { - optional string name = 1; // the layer name - optional string type = 2; // the string to specify the layer type - - // Parameters to specify layers with inner products. - optional uint32 num_output = 3; // The number of outputs for the layer - optional bool biasterm = 4 [ default = true ]; // whether to have bias terms - optional FillerParameter weight_filler = 5; // The filler for the weight - optional FillerParameter bias_filler = 6; // The filler for the bias - - optional uint32 pad = 7 [ default = 0 ]; // The padding size - optional uint32 kernelsize = 8; // The kernel size - optional uint32 group = 9 [ default = 1 ]; // The group size for group conv - optional uint32 stride = 10 [ default = 1 ]; // The stride - enum PoolMethod { - MAX = 0; - AVE = 1; - STOCHASTIC = 2; - } - optional PoolMethod pool = 11 [ default = MAX ]; // The pooling method - optional float dropout_ratio = 12 [ default = 0.5 ]; // dropout ratio - - optional uint32 local_size = 13 [ default = 5 ]; // for local response norm - optional float alpha = 14 [ default = 1. ]; // for local response norm - optional float beta = 15 [ default = 0.75 ]; // for local response norm - optional float k = 22 [ default = 1. ]; - - // For data layers, specify the data source - optional string source = 16; - // For data pre-processing, we can do simple scaling and subtracting the - // data mean, if provided. Note that the mean subtraction is always carried - // out before scaling. - optional float scale = 17 [ default = 1 ]; - optional string meanfile = 18; - // For data layers, specify the batch size. - optional uint32 batchsize = 19; - // For data layers, specify if we would like to randomly crop an image. - optional uint32 cropsize = 20 [ default = 0 ]; - // For data layers, specify if we want to randomly mirror data. - optional bool mirror = 21 [ default = false ]; - - // The blobs containing the numeric parameters of the layer - repeated BlobProto blobs = 50; - // The ratio that is multiplied on the global learning rate. If you want to - // set the learning ratio for one blob, you need to set it for all blobs. - repeated float blobs_lr = 51; - // The weight decay that is multiplied on the global weight decay. - repeated float weight_decay = 52; - - // The rand_skip variable is for the data layer to skip a few data points - // to avoid all asynchronous sgd clients to start at the same point. The skip - // point would be set as rand_skip * rand(0,1). Note that rand_skip should not - // be larger than the number of keys in the database. - optional uint32 rand_skip = 53 [ default = 0 ]; - - // Fields related to detection (det_*) - // foreground (object) overlap threshold - optional float det_fg_threshold = 54 [ default = 0.5 ]; - // background (non-object) overlap threshold - optional float det_bg_threshold = 55 [ default = 0.5 ]; - // Fraction of batch that should be foreground objects - optional float det_fg_fraction = 56 [ default = 0.25 ]; - - // optional bool OBSOLETE_can_clobber = 57 [default = true]; - - // Amount of contextual padding to add around a window - // (used only by the window_data_layer) - optional uint32 det_context_pad = 58 [ default = 0 ]; - - // Mode for cropping out a detection window - // warp: cropped window is warped to a fixed size and aspect ratio - // square: the tightest square around the window is cropped - optional string det_crop_mode = 59 [ default = "warp" ]; - - // For ReshapeLayer, one needs to specify the new dimensions. - optional int32 new_num = 60 [ default = 0 ]; - optional int32 new_channels = 61 [ default = 0 ]; - optional int32 new_height = 62 [ default = 0 ]; - optional int32 new_width = 63 [ default = 0 ]; - - // Whether or not ImageLayer should shuffle the list of files at every epoch. - // It will also resize images if new_height or new_width are not zero. - optional bool shuffle_images = 64 [ default = false ]; - - // For ConcatLayer, one needs to specify the dimension for concatenation, and - // the other dimensions must be the same for all the bottom blobs. - // By default it will concatenate blobs along the channels dimension. - optional uint32 concat_dim = 65 [ default = 1 ]; - - optional HDF5OutputParameter hdf5_output_param = 1001; -} - -message PReLUParameter { - // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers: - // Surpassing Human-Level Performance on ImageNet Classification, 2015. - - // Initial value of a_i. Default is a_i=0.25 for all i. - optional FillerParameter filler = 1; - // Whether or not slope parameters are shared across channels. - optional bool channel_shared = 2 [ default = false ]; -} diff --git a/caffe2fluid/proto/caffe_pb2.py b/caffe2fluid/proto/caffe_pb2.py deleted file mode 100644 index 15f2c64..0000000 --- a/caffe2fluid/proto/caffe_pb2.py +++ /dev/null @@ -1,5883 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: caffe.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='caffe.proto', - package='caffe', - syntax='proto2', - serialized_options=None, - serialized_pb=_b('\n\x0b\x63\x61\x66\x66\x65.proto\x12\x05\x63\x61\x66\x66\x65\"\x1c\n\tBlobShape\x12\x0f\n\x03\x64im\x18\x01 \x03(\x03\x42\x02\x10\x01\"\xcc\x01\n\tBlobProto\x12\x1f\n\x05shape\x18\x07 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x10\n\x04\x64\x61ta\x18\x05 \x03(\x02\x42\x02\x10\x01\x12\x10\n\x04\x64iff\x18\x06 \x03(\x02\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_data\x18\x08 \x03(\x01\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_diff\x18\t \x03(\x01\x42\x02\x10\x01\x12\x0e\n\x03num\x18\x01 \x01(\x05:\x01\x30\x12\x13\n\x08\x63hannels\x18\x02 \x01(\x05:\x01\x30\x12\x11\n\x06height\x18\x03 \x01(\x05:\x01\x30\x12\x10\n\x05width\x18\x04 \x01(\x05:\x01\x30\"2\n\x0f\x42lobProtoVector\x12\x1f\n\x05\x62lobs\x18\x01 \x03(\x0b\x32\x10.caffe.BlobProto\"\x81\x01\n\x05\x44\x61tum\x12\x10\n\x08\x63hannels\x18\x01 \x01(\x05\x12\x0e\n\x06height\x18\x02 \x01(\x05\x12\r\n\x05width\x18\x03 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\r\n\x05label\x18\x05 \x01(\x05\x12\x12\n\nfloat_data\x18\x06 \x03(\x02\x12\x16\n\x07\x65ncoded\x18\x07 \x01(\x08:\x05\x66\x61lse\"\x8a\x02\n\x0f\x46illerParameter\x12\x16\n\x04type\x18\x01 \x01(\t:\x08\x63onstant\x12\x10\n\x05value\x18\x02 \x01(\x02:\x01\x30\x12\x0e\n\x03min\x18\x03 \x01(\x02:\x01\x30\x12\x0e\n\x03max\x18\x04 \x01(\x02:\x01\x31\x12\x0f\n\x04mean\x18\x05 \x01(\x02:\x01\x30\x12\x0e\n\x03std\x18\x06 \x01(\x02:\x01\x31\x12\x12\n\x06sparse\x18\x07 \x01(\x05:\x02-1\x12\x42\n\rvariance_norm\x18\x08 \x01(\x0e\x32#.caffe.FillerParameter.VarianceNorm:\x06\x46\x41N_IN\"4\n\x0cVarianceNorm\x12\n\n\x06\x46\x41N_IN\x10\x00\x12\x0b\n\x07\x46\x41N_OUT\x10\x01\x12\x0b\n\x07\x41VERAGE\x10\x02\"\x8e\x02\n\x0cNetParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x03 \x03(\t\x12%\n\x0binput_shape\x18\x08 \x03(\x0b\x32\x10.caffe.BlobShape\x12\x11\n\tinput_dim\x18\x04 \x03(\x05\x12\x1d\n\x0e\x66orce_backward\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x05state\x18\x06 \x01(\x0b\x32\x0f.caffe.NetState\x12\x19\n\ndebug_info\x18\x07 \x01(\x08:\x05\x66\x61lse\x12$\n\x05layer\x18\x64 \x03(\x0b\x32\x15.caffe.LayerParameter\x12\'\n\x06layers\x18\x02 \x03(\x0b\x32\x17.caffe.V1LayerParameter\"\xc3\n\n\x0fSolverParameter\x12\x0b\n\x03net\x18\x18 \x01(\t\x12&\n\tnet_param\x18\x19 \x01(\x0b\x32\x13.caffe.NetParameter\x12\x11\n\ttrain_net\x18\x01 \x01(\t\x12\x10\n\x08test_net\x18\x02 \x03(\t\x12,\n\x0ftrain_net_param\x18\x15 \x01(\x0b\x32\x13.caffe.NetParameter\x12+\n\x0etest_net_param\x18\x16 \x03(\x0b\x32\x13.caffe.NetParameter\x12$\n\x0btrain_state\x18\x1a \x01(\x0b\x32\x0f.caffe.NetState\x12#\n\ntest_state\x18\x1b \x03(\x0b\x32\x0f.caffe.NetState\x12\x11\n\ttest_iter\x18\x03 \x03(\x05\x12\x18\n\rtest_interval\x18\x04 \x01(\x05:\x01\x30\x12 \n\x11test_compute_loss\x18\x13 \x01(\x08:\x05\x66\x61lse\x12!\n\x13test_initialization\x18 \x01(\x08:\x04true\x12\x0f\n\x07\x62\x61se_lr\x18\x05 \x01(\x02\x12\x0f\n\x07\x64isplay\x18\x06 \x01(\x05\x12\x17\n\x0c\x61verage_loss\x18! \x01(\x05:\x01\x31\x12\x10\n\x08max_iter\x18\x07 \x01(\x05\x12\x14\n\titer_size\x18$ \x01(\x05:\x01\x31\x12\x11\n\tlr_policy\x18\x08 \x01(\t\x12\r\n\x05gamma\x18\t \x01(\x02\x12\r\n\x05power\x18\n \x01(\x02\x12\x10\n\x08momentum\x18\x0b \x01(\x02\x12\x14\n\x0cweight_decay\x18\x0c \x01(\x02\x12\x1f\n\x13regularization_type\x18\x1d \x01(\t:\x02L2\x12\x10\n\x08stepsize\x18\r \x01(\x05\x12\x11\n\tstepvalue\x18\" \x03(\x05\x12\x1a\n\x0e\x63lip_gradients\x18# \x01(\x02:\x02-1\x12\x13\n\x08snapshot\x18\x0e \x01(\x05:\x01\x30\x12\x17\n\x0fsnapshot_prefix\x18\x0f \x01(\t\x12\x1c\n\rsnapshot_diff\x18\x10 \x01(\x08:\x05\x66\x61lse\x12K\n\x0fsnapshot_format\x18% \x01(\x0e\x32%.caffe.SolverParameter.SnapshotFormat:\x0b\x42INARYPROTO\x12;\n\x0bsolver_mode\x18\x11 \x01(\x0e\x32!.caffe.SolverParameter.SolverMode:\x03GPU\x12\x14\n\tdevice_id\x18\x12 \x01(\x05:\x01\x30\x12\x17\n\x0brandom_seed\x18\x14 \x01(\x03:\x02-1\x12\x11\n\x04type\x18( \x01(\t:\x03SGD\x12\x14\n\x05\x64\x65lta\x18\x1f \x01(\x02:\x05\x31\x65-08\x12\x18\n\tmomentum2\x18\' \x01(\x02:\x05\x30.999\x12\x17\n\trms_decay\x18& \x01(\x02:\x04\x30.99\x12\x19\n\ndebug_info\x18\x17 \x01(\x08:\x05\x66\x61lse\x12\"\n\x14snapshot_after_train\x18\x1c \x01(\x08:\x04true\x12;\n\x0bsolver_type\x18\x1e \x01(\x0e\x32!.caffe.SolverParameter.SolverType:\x03SGD\x12\x1f\n\x11layer_wise_reduce\x18) \x01(\x08:\x04true\"+\n\x0eSnapshotFormat\x12\x08\n\x04HDF5\x10\x00\x12\x0f\n\x0b\x42INARYPROTO\x10\x01\"\x1e\n\nSolverMode\x12\x07\n\x03\x43PU\x10\x00\x12\x07\n\x03GPU\x10\x01\"U\n\nSolverType\x12\x07\n\x03SGD\x10\x00\x12\x0c\n\x08NESTEROV\x10\x01\x12\x0b\n\x07\x41\x44\x41GRAD\x10\x02\x12\x0b\n\x07RMSPROP\x10\x03\x12\x0c\n\x08\x41\x44\x41\x44\x45LTA\x10\x04\x12\x08\n\x04\x41\x44\x41M\x10\x05\"l\n\x0bSolverState\x12\x0c\n\x04iter\x18\x01 \x01(\x05\x12\x13\n\x0blearned_net\x18\x02 \x01(\t\x12!\n\x07history\x18\x03 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x17\n\x0c\x63urrent_step\x18\x04 \x01(\x05:\x01\x30\"N\n\x08NetState\x12!\n\x05phase\x18\x01 \x01(\x0e\x32\x0c.caffe.Phase:\x04TEST\x12\x10\n\x05level\x18\x02 \x01(\x05:\x01\x30\x12\r\n\x05stage\x18\x03 \x03(\t\"s\n\x0cNetStateRule\x12\x1b\n\x05phase\x18\x01 \x01(\x0e\x32\x0c.caffe.Phase\x12\x11\n\tmin_level\x18\x02 \x01(\x05\x12\x11\n\tmax_level\x18\x03 \x01(\x05\x12\r\n\x05stage\x18\x04 \x03(\t\x12\x11\n\tnot_stage\x18\x05 \x03(\t\"\xa3\x01\n\tParamSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x31\n\nshare_mode\x18\x02 \x01(\x0e\x32\x1d.caffe.ParamSpec.DimCheckMode\x12\x12\n\x07lr_mult\x18\x03 \x01(\x02:\x01\x31\x12\x15\n\ndecay_mult\x18\x04 \x01(\x02:\x01\x31\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\x82\x14\n\x0eLayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0e\n\x06\x62ottom\x18\x03 \x03(\t\x12\x0b\n\x03top\x18\x04 \x03(\t\x12\x1b\n\x05phase\x18\n \x01(\x0e\x32\x0c.caffe.Phase\x12\x13\n\x0bloss_weight\x18\x05 \x03(\x02\x12\x1f\n\x05param\x18\x06 \x03(\x0b\x32\x10.caffe.ParamSpec\x12\x1f\n\x05\x62lobs\x18\x07 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x16\n\x0epropagate_down\x18\x0b \x03(\x08\x12$\n\x07include\x18\x08 \x03(\x0b\x32\x13.caffe.NetStateRule\x12$\n\x07\x65xclude\x18\t \x03(\x0b\x32\x13.caffe.NetStateRule\x12\x37\n\x0ftransform_param\x18\x64 \x01(\x0b\x32\x1e.caffe.TransformationParameter\x12(\n\nloss_param\x18\x65 \x01(\x0b\x32\x14.caffe.LossParameter\x12\x30\n\x0e\x61\x63\x63uracy_param\x18\x66 \x01(\x0b\x32\x18.caffe.AccuracyParameter\x12,\n\x0c\x61rgmax_param\x18g \x01(\x0b\x32\x16.caffe.ArgMaxParameter\x12\x34\n\x10\x62\x61tch_norm_param\x18\x8b\x01 \x01(\x0b\x32\x19.caffe.BatchNormParameter\x12)\n\nbias_param\x18\x8d\x01 \x01(\x0b\x32\x14.caffe.BiasParameter\x12,\n\x0c\x63oncat_param\x18h \x01(\x0b\x32\x16.caffe.ConcatParameter\x12?\n\x16\x63ontrastive_loss_param\x18i \x01(\x0b\x32\x1f.caffe.ContrastiveLossParameter\x12\x36\n\x11\x63onvolution_param\x18j \x01(\x0b\x32\x1b.caffe.ConvolutionParameter\x12)\n\ncrop_param\x18\x90\x01 \x01(\x0b\x32\x14.caffe.CropParameter\x12(\n\ndata_param\x18k \x01(\x0b\x32\x14.caffe.DataParameter\x12.\n\rdropout_param\x18l \x01(\x0b\x32\x17.caffe.DropoutParameter\x12\x33\n\x10\x64ummy_data_param\x18m \x01(\x0b\x32\x19.caffe.DummyDataParameter\x12.\n\reltwise_param\x18n \x01(\x0b\x32\x17.caffe.EltwiseParameter\x12\'\n\telu_param\x18\x8c\x01 \x01(\x0b\x32\x13.caffe.ELUParameter\x12+\n\x0b\x65mbed_param\x18\x89\x01 \x01(\x0b\x32\x15.caffe.EmbedParameter\x12&\n\texp_param\x18o \x01(\x0b\x32\x13.caffe.ExpParameter\x12/\n\rflatten_param\x18\x87\x01 \x01(\x0b\x32\x17.caffe.FlattenParameter\x12\x31\n\x0fhdf5_data_param\x18p \x01(\x0b\x32\x18.caffe.HDF5DataParameter\x12\x35\n\x11hdf5_output_param\x18q \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\x12\x33\n\x10hinge_loss_param\x18r \x01(\x0b\x32\x19.caffe.HingeLossParameter\x12\x33\n\x10image_data_param\x18s \x01(\x0b\x32\x19.caffe.ImageDataParameter\x12\x39\n\x13infogain_loss_param\x18t \x01(\x0b\x32\x1c.caffe.InfogainLossParameter\x12\x39\n\x13inner_product_param\x18u \x01(\x0b\x32\x1c.caffe.InnerProductParameter\x12+\n\x0binput_param\x18\x8f\x01 \x01(\x0b\x32\x15.caffe.InputParameter\x12\'\n\tlog_param\x18\x86\x01 \x01(\x0b\x32\x13.caffe.LogParameter\x12&\n\tlrn_param\x18v \x01(\x0b\x32\x13.caffe.LRNParameter\x12\x35\n\x11memory_data_param\x18w \x01(\x0b\x32\x1a.caffe.MemoryDataParameter\x12&\n\tmvn_param\x18x \x01(\x0b\x32\x13.caffe.MVNParameter\x12\x33\n\x0fparameter_param\x18\x91\x01 \x01(\x0b\x32\x19.caffe.ParameterParameter\x12.\n\rpooling_param\x18y \x01(\x0b\x32\x17.caffe.PoolingParameter\x12*\n\x0bpower_param\x18z \x01(\x0b\x32\x15.caffe.PowerParameter\x12+\n\x0bprelu_param\x18\x83\x01 \x01(\x0b\x32\x15.caffe.PReLUParameter\x12-\n\x0cpython_param\x18\x82\x01 \x01(\x0b\x32\x16.caffe.PythonParameter\x12\x33\n\x0frecurrent_param\x18\x92\x01 \x01(\x0b\x32\x19.caffe.RecurrentParameter\x12\x33\n\x0freduction_param\x18\x88\x01 \x01(\x0b\x32\x19.caffe.ReductionParameter\x12(\n\nrelu_param\x18{ \x01(\x0b\x32\x14.caffe.ReLUParameter\x12/\n\rreshape_param\x18\x85\x01 \x01(\x0b\x32\x17.caffe.ReshapeParameter\x12+\n\x0bscale_param\x18\x8e\x01 \x01(\x0b\x32\x15.caffe.ScaleParameter\x12.\n\rsigmoid_param\x18| \x01(\x0b\x32\x17.caffe.SigmoidParameter\x12.\n\rsoftmax_param\x18} \x01(\x0b\x32\x17.caffe.SoftmaxParameter\x12\'\n\tspp_param\x18\x84\x01 \x01(\x0b\x32\x13.caffe.SPPParameter\x12*\n\x0bslice_param\x18~ \x01(\x0b\x32\x15.caffe.SliceParameter\x12(\n\ntanh_param\x18\x7f \x01(\x0b\x32\x14.caffe.TanHParameter\x12\x33\n\x0fthreshold_param\x18\x80\x01 \x01(\x0b\x32\x19.caffe.ThresholdParameter\x12)\n\ntile_param\x18\x8a\x01 \x01(\x0b\x32\x14.caffe.TileParameter\x12\x36\n\x11window_data_param\x18\x81\x01 \x01(\x0b\x32\x1a.caffe.WindowDataParameter\"\xb6\x01\n\x17TransformationParameter\x12\x10\n\x05scale\x18\x01 \x01(\x02:\x01\x31\x12\x15\n\x06mirror\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x14\n\tcrop_size\x18\x03 \x01(\r:\x01\x30\x12\x11\n\tmean_file\x18\x04 \x01(\t\x12\x12\n\nmean_value\x18\x05 \x03(\x02\x12\x1a\n\x0b\x66orce_color\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\nforce_gray\x18\x07 \x01(\x08:\x05\x66\x61lse\"\xc2\x01\n\rLossParameter\x12\x14\n\x0cignore_label\x18\x01 \x01(\x05\x12\x44\n\rnormalization\x18\x03 \x01(\x0e\x32&.caffe.LossParameter.NormalizationMode:\x05VALID\x12\x11\n\tnormalize\x18\x02 \x01(\x08\"B\n\x11NormalizationMode\x12\x08\n\x04\x46ULL\x10\x00\x12\t\n\x05VALID\x10\x01\x12\x0e\n\nBATCH_SIZE\x10\x02\x12\x08\n\x04NONE\x10\x03\"L\n\x11\x41\x63\x63uracyParameter\x12\x10\n\x05top_k\x18\x01 \x01(\r:\x01\x31\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x14\n\x0cignore_label\x18\x03 \x01(\x05\"M\n\x0f\x41rgMaxParameter\x12\x1a\n\x0bout_max_val\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x10\n\x05top_k\x18\x02 \x01(\r:\x01\x31\x12\x0c\n\x04\x61xis\x18\x03 \x01(\x05\"9\n\x0f\x43oncatParameter\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x15\n\nconcat_dim\x18\x01 \x01(\r:\x01\x31\"j\n\x12\x42\x61tchNormParameter\x12\x18\n\x10use_global_stats\x18\x01 \x01(\x08\x12&\n\x17moving_average_fraction\x18\x02 \x01(\x02:\x05\x30.999\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x31\x65-05\"]\n\rBiasParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12&\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\"L\n\x18\x43ontrastiveLossParameter\x12\x11\n\x06margin\x18\x01 \x01(\x02:\x01\x31\x12\x1d\n\x0elegacy_version\x18\x02 \x01(\x08:\x05\x66\x61lse\"\xfc\x03\n\x14\x43onvolutionParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12\x0b\n\x03pad\x18\x03 \x03(\r\x12\x13\n\x0bkernel_size\x18\x04 \x03(\r\x12\x0e\n\x06stride\x18\x06 \x03(\r\x12\x10\n\x08\x64ilation\x18\x12 \x03(\r\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x10\n\x08kernel_h\x18\x0b \x01(\r\x12\x10\n\x08kernel_w\x18\x0c \x01(\r\x12\x10\n\x08stride_h\x18\r \x01(\r\x12\x10\n\x08stride_w\x18\x0e \x01(\r\x12\x10\n\x05group\x18\x05 \x01(\r:\x01\x31\x12-\n\rweight_filler\x18\x07 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x08 \x01(\x0b\x32\x16.caffe.FillerParameter\x12;\n\x06\x65ngine\x18\x0f \x01(\x0e\x32\".caffe.ConvolutionParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x10 \x01(\x05:\x01\x31\x12\x1e\n\x0f\x66orce_nd_im2col\x18\x11 \x01(\x08:\x05\x66\x61lse\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"0\n\rCropParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x32\x12\x0e\n\x06offset\x18\x02 \x03(\r\"\xa4\x02\n\rDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x31\n\x07\x62\x61\x63kend\x18\x08 \x01(\x0e\x32\x17.caffe.DataParameter.DB:\x07LEVELDB\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\"\n\x13\x66orce_encoded_color\x18\t \x01(\x08:\x05\x66\x61lse\x12\x13\n\x08prefetch\x18\n \x01(\r:\x01\x34\"\x1b\n\x02\x44\x42\x12\x0b\n\x07LEVELDB\x10\x00\x12\x08\n\x04LMDB\x10\x01\".\n\x10\x44ropoutParameter\x12\x1a\n\rdropout_ratio\x18\x01 \x01(\x02:\x03\x30.5\"\xa0\x01\n\x12\x44ummyDataParameter\x12+\n\x0b\x64\x61ta_filler\x18\x01 \x03(\x0b\x32\x16.caffe.FillerParameter\x12\x1f\n\x05shape\x18\x06 \x03(\x0b\x32\x10.caffe.BlobShape\x12\x0b\n\x03num\x18\x02 \x03(\r\x12\x10\n\x08\x63hannels\x18\x03 \x03(\r\x12\x0e\n\x06height\x18\x04 \x03(\r\x12\r\n\x05width\x18\x05 \x03(\r\"\xa5\x01\n\x10\x45ltwiseParameter\x12\x39\n\toperation\x18\x01 \x01(\x0e\x32!.caffe.EltwiseParameter.EltwiseOp:\x03SUM\x12\r\n\x05\x63oeff\x18\x02 \x03(\x02\x12\x1e\n\x10stable_prod_grad\x18\x03 \x01(\x08:\x04true\"\'\n\tEltwiseOp\x12\x08\n\x04PROD\x10\x00\x12\x07\n\x03SUM\x10\x01\x12\x07\n\x03MAX\x10\x02\" \n\x0c\x45LUParameter\x12\x10\n\x05\x61lpha\x18\x01 \x01(\x02:\x01\x31\"\xac\x01\n\x0e\x45mbedParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x11\n\tinput_dim\x18\x02 \x01(\r\x12\x17\n\tbias_term\x18\x03 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x04 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\"D\n\x0c\x45xpParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"9\n\x10\x46lattenParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x14\n\x08\x65nd_axis\x18\x02 \x01(\x05:\x02-1\"O\n\x11HDF5DataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x02 \x01(\r\x12\x16\n\x07shuffle\x18\x03 \x01(\x08:\x05\x66\x61lse\"(\n\x13HDF5OutputParameter\x12\x11\n\tfile_name\x18\x01 \x01(\t\"^\n\x12HingeLossParameter\x12\x30\n\x04norm\x18\x01 \x01(\x0e\x32\x1e.caffe.HingeLossParameter.Norm:\x02L1\"\x16\n\x04Norm\x12\x06\n\x02L1\x10\x01\x12\x06\n\x02L2\x10\x02\"\x97\x02\n\x12ImageDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x15\n\nbatch_size\x18\x04 \x01(\r:\x01\x31\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x16\n\x07shuffle\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x15\n\nnew_height\x18\t \x01(\r:\x01\x30\x12\x14\n\tnew_width\x18\n \x01(\r:\x01\x30\x12\x16\n\x08is_color\x18\x0b \x01(\x08:\x04true\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\x0c \x01(\t:\x00\"8\n\x15InfogainLossParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\"\xcb\x01\n\x15InnerProductParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x04 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x0f\n\x04\x61xis\x18\x05 \x01(\x05:\x01\x31\x12\x18\n\ttranspose\x18\x06 \x01(\x08:\x05\x66\x61lse\"1\n\x0eInputParameter\x12\x1f\n\x05shape\x18\x01 \x03(\x0b\x32\x10.caffe.BlobShape\"D\n\x0cLogParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"\xb8\x02\n\x0cLRNParameter\x12\x15\n\nlocal_size\x18\x01 \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x02 \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x03 \x01(\x02:\x04\x30.75\x12\x44\n\x0bnorm_region\x18\x04 \x01(\x0e\x32\x1e.caffe.LRNParameter.NormRegion:\x0f\x41\x43ROSS_CHANNELS\x12\x0c\n\x01k\x18\x05 \x01(\x02:\x01\x31\x12\x33\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1a.caffe.LRNParameter.Engine:\x07\x44\x45\x46\x41ULT\"5\n\nNormRegion\x12\x13\n\x0f\x41\x43ROSS_CHANNELS\x10\x00\x12\x12\n\x0eWITHIN_CHANNEL\x10\x01\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"Z\n\x13MemoryDataParameter\x12\x12\n\nbatch_size\x18\x01 \x01(\r\x12\x10\n\x08\x63hannels\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\r\n\x05width\x18\x04 \x01(\r\"d\n\x0cMVNParameter\x12 \n\x12normalize_variance\x18\x01 \x01(\x08:\x04true\x12\x1e\n\x0f\x61\x63ross_channels\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x31\x65-09\"5\n\x12ParameterParameter\x12\x1f\n\x05shape\x18\x01 \x01(\x0b\x32\x10.caffe.BlobShape\"\xa2\x03\n\x10PoolingParameter\x12\x35\n\x04pool\x18\x01 \x01(\x0e\x32\".caffe.PoolingParameter.PoolMethod:\x03MAX\x12\x0e\n\x03pad\x18\x04 \x01(\r:\x01\x30\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x13\n\x0bkernel_size\x18\x02 \x01(\r\x12\x10\n\x08kernel_h\x18\x05 \x01(\r\x12\x10\n\x08kernel_w\x18\x06 \x01(\r\x12\x11\n\x06stride\x18\x03 \x01(\r:\x01\x31\x12\x10\n\x08stride_h\x18\x07 \x01(\r\x12\x10\n\x08stride_w\x18\x08 \x01(\r\x12\x37\n\x06\x65ngine\x18\x0b \x01(\x0e\x32\x1e.caffe.PoolingParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x1d\n\x0eglobal_pooling\x18\x0c \x01(\x08:\x05\x66\x61lse\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"F\n\x0ePowerParameter\x12\x10\n\x05power\x18\x01 \x01(\x02:\x01\x31\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"g\n\x0fPythonParameter\x12\x0e\n\x06module\x18\x01 \x01(\t\x12\r\n\x05layer\x18\x02 \x01(\t\x12\x13\n\tparam_str\x18\x03 \x01(\t:\x00\x12 \n\x11share_in_parallel\x18\x04 \x01(\x08:\x05\x66\x61lse\"\xc0\x01\n\x12RecurrentParameter\x12\x15\n\nnum_output\x18\x01 \x01(\r:\x01\x30\x12-\n\rweight_filler\x18\x02 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x19\n\ndebug_info\x18\x04 \x01(\x08:\x05\x66\x61lse\x12\x1c\n\rexpose_hidden\x18\x05 \x01(\x08:\x05\x66\x61lse\"\xad\x01\n\x12ReductionParameter\x12=\n\toperation\x18\x01 \x01(\x0e\x32%.caffe.ReductionParameter.ReductionOp:\x03SUM\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x10\n\x05\x63oeff\x18\x03 \x01(\x02:\x01\x31\"5\n\x0bReductionOp\x12\x07\n\x03SUM\x10\x01\x12\x08\n\x04\x41SUM\x10\x02\x12\t\n\x05SUMSQ\x10\x03\x12\x08\n\x04MEAN\x10\x04\"\x8d\x01\n\rReLUParameter\x12\x19\n\x0enegative_slope\x18\x01 \x01(\x02:\x01\x30\x12\x34\n\x06\x65ngine\x18\x02 \x01(\x0e\x32\x1b.caffe.ReLUParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"Z\n\x10ReshapeParameter\x12\x1f\n\x05shape\x18\x01 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x14\n\x08num_axes\x18\x03 \x01(\x05:\x02-1\"\xa5\x01\n\x0eScaleParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12&\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x18\n\tbias_term\x18\x04 \x01(\x08:\x05\x66\x61lse\x12+\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\"x\n\x10SigmoidParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.caffe.SigmoidParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"L\n\x0eSliceParameter\x12\x0f\n\x04\x61xis\x18\x03 \x01(\x05:\x01\x31\x12\x13\n\x0bslice_point\x18\x02 \x03(\r\x12\x14\n\tslice_dim\x18\x01 \x01(\r:\x01\x31\"\x89\x01\n\x10SoftmaxParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.caffe.SoftmaxParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"r\n\rTanHParameter\x12\x34\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1b.caffe.TanHParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"/\n\rTileParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\r\n\x05tiles\x18\x02 \x01(\x05\"*\n\x12ThresholdParameter\x12\x14\n\tthreshold\x18\x01 \x01(\x02:\x01\x30\"\xc1\x02\n\x13WindowDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\x0c\x66g_threshold\x18\x07 \x01(\x02:\x03\x30.5\x12\x19\n\x0c\x62g_threshold\x18\x08 \x01(\x02:\x03\x30.5\x12\x19\n\x0b\x66g_fraction\x18\t \x01(\x02:\x04\x30.25\x12\x16\n\x0b\x63ontext_pad\x18\n \x01(\r:\x01\x30\x12\x17\n\tcrop_mode\x18\x0b \x01(\t:\x04warp\x12\x1b\n\x0c\x63\x61\x63he_images\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\r \x01(\t:\x00\"\xeb\x01\n\x0cSPPParameter\x12\x16\n\x0epyramid_height\x18\x01 \x01(\r\x12\x31\n\x04pool\x18\x02 \x01(\x0e\x32\x1e.caffe.SPPParameter.PoolMethod:\x03MAX\x12\x33\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1a.caffe.SPPParameter.Engine:\x07\x44\x45\x46\x41ULT\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"\xe0\x13\n\x10V1LayerParameter\x12\x0e\n\x06\x62ottom\x18\x02 \x03(\t\x12\x0b\n\x03top\x18\x03 \x03(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12$\n\x07include\x18 \x03(\x0b\x32\x13.caffe.NetStateRule\x12$\n\x07\x65xclude\x18! \x03(\x0b\x32\x13.caffe.NetStateRule\x12/\n\x04type\x18\x05 \x01(\x0e\x32!.caffe.V1LayerParameter.LayerType\x12\x1f\n\x05\x62lobs\x18\x06 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x0e\n\x05param\x18\xe9\x07 \x03(\t\x12>\n\x0f\x62lob_share_mode\x18\xea\x07 \x03(\x0e\x32$.caffe.V1LayerParameter.DimCheckMode\x12\x10\n\x08\x62lobs_lr\x18\x07 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x08 \x03(\x02\x12\x13\n\x0bloss_weight\x18# \x03(\x02\x12\x30\n\x0e\x61\x63\x63uracy_param\x18\x1b \x01(\x0b\x32\x18.caffe.AccuracyParameter\x12,\n\x0c\x61rgmax_param\x18\x17 \x01(\x0b\x32\x16.caffe.ArgMaxParameter\x12,\n\x0c\x63oncat_param\x18\t \x01(\x0b\x32\x16.caffe.ConcatParameter\x12?\n\x16\x63ontrastive_loss_param\x18( \x01(\x0b\x32\x1f.caffe.ContrastiveLossParameter\x12\x36\n\x11\x63onvolution_param\x18\n \x01(\x0b\x32\x1b.caffe.ConvolutionParameter\x12(\n\ndata_param\x18\x0b \x01(\x0b\x32\x14.caffe.DataParameter\x12.\n\rdropout_param\x18\x0c \x01(\x0b\x32\x17.caffe.DropoutParameter\x12\x33\n\x10\x64ummy_data_param\x18\x1a \x01(\x0b\x32\x19.caffe.DummyDataParameter\x12.\n\reltwise_param\x18\x18 \x01(\x0b\x32\x17.caffe.EltwiseParameter\x12&\n\texp_param\x18) \x01(\x0b\x32\x13.caffe.ExpParameter\x12\x31\n\x0fhdf5_data_param\x18\r \x01(\x0b\x32\x18.caffe.HDF5DataParameter\x12\x35\n\x11hdf5_output_param\x18\x0e \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\x12\x33\n\x10hinge_loss_param\x18\x1d \x01(\x0b\x32\x19.caffe.HingeLossParameter\x12\x33\n\x10image_data_param\x18\x0f \x01(\x0b\x32\x19.caffe.ImageDataParameter\x12\x39\n\x13infogain_loss_param\x18\x10 \x01(\x0b\x32\x1c.caffe.InfogainLossParameter\x12\x39\n\x13inner_product_param\x18\x11 \x01(\x0b\x32\x1c.caffe.InnerProductParameter\x12&\n\tlrn_param\x18\x12 \x01(\x0b\x32\x13.caffe.LRNParameter\x12\x35\n\x11memory_data_param\x18\x16 \x01(\x0b\x32\x1a.caffe.MemoryDataParameter\x12&\n\tmvn_param\x18\" \x01(\x0b\x32\x13.caffe.MVNParameter\x12.\n\rpooling_param\x18\x13 \x01(\x0b\x32\x17.caffe.PoolingParameter\x12*\n\x0bpower_param\x18\x15 \x01(\x0b\x32\x15.caffe.PowerParameter\x12(\n\nrelu_param\x18\x1e \x01(\x0b\x32\x14.caffe.ReLUParameter\x12.\n\rsigmoid_param\x18& \x01(\x0b\x32\x17.caffe.SigmoidParameter\x12.\n\rsoftmax_param\x18\' \x01(\x0b\x32\x17.caffe.SoftmaxParameter\x12*\n\x0bslice_param\x18\x1f \x01(\x0b\x32\x15.caffe.SliceParameter\x12(\n\ntanh_param\x18% \x01(\x0b\x32\x14.caffe.TanHParameter\x12\x32\n\x0fthreshold_param\x18\x19 \x01(\x0b\x32\x19.caffe.ThresholdParameter\x12\x35\n\x11window_data_param\x18\x14 \x01(\x0b\x32\x1a.caffe.WindowDataParameter\x12\x37\n\x0ftransform_param\x18$ \x01(\x0b\x32\x1e.caffe.TransformationParameter\x12(\n\nloss_param\x18* \x01(\x0b\x32\x14.caffe.LossParameter\x12&\n\x05layer\x18\x01 \x01(\x0b\x32\x17.caffe.V0LayerParameter\"\xd8\x04\n\tLayerType\x12\x08\n\x04NONE\x10\x00\x12\n\n\x06\x41\x42SVAL\x10#\x12\x0c\n\x08\x41\x43\x43URACY\x10\x01\x12\n\n\x06\x41RGMAX\x10\x1e\x12\x08\n\x04\x42NLL\x10\x02\x12\n\n\x06\x43ONCAT\x10\x03\x12\x14\n\x10\x43ONTRASTIVE_LOSS\x10%\x12\x0f\n\x0b\x43ONVOLUTION\x10\x04\x12\x08\n\x04\x44\x41TA\x10\x05\x12\x11\n\rDECONVOLUTION\x10\'\x12\x0b\n\x07\x44ROPOUT\x10\x06\x12\x0e\n\nDUMMY_DATA\x10 \x12\x12\n\x0e\x45UCLIDEAN_LOSS\x10\x07\x12\x0b\n\x07\x45LTWISE\x10\x19\x12\x07\n\x03\x45XP\x10&\x12\x0b\n\x07\x46LATTEN\x10\x08\x12\r\n\tHDF5_DATA\x10\t\x12\x0f\n\x0bHDF5_OUTPUT\x10\n\x12\x0e\n\nHINGE_LOSS\x10\x1c\x12\n\n\x06IM2COL\x10\x0b\x12\x0e\n\nIMAGE_DATA\x10\x0c\x12\x11\n\rINFOGAIN_LOSS\x10\r\x12\x11\n\rINNER_PRODUCT\x10\x0e\x12\x07\n\x03LRN\x10\x0f\x12\x0f\n\x0bMEMORY_DATA\x10\x1d\x12\x1d\n\x19MULTINOMIAL_LOGISTIC_LOSS\x10\x10\x12\x07\n\x03MVN\x10\"\x12\x0b\n\x07POOLING\x10\x11\x12\t\n\x05POWER\x10\x1a\x12\x08\n\x04RELU\x10\x12\x12\x0b\n\x07SIGMOID\x10\x13\x12\x1e\n\x1aSIGMOID_CROSS_ENTROPY_LOSS\x10\x1b\x12\x0b\n\x07SILENCE\x10$\x12\x0b\n\x07SOFTMAX\x10\x14\x12\x10\n\x0cSOFTMAX_LOSS\x10\x15\x12\t\n\x05SPLIT\x10\x16\x12\t\n\x05SLICE\x10!\x12\x08\n\x04TANH\x10\x17\x12\x0f\n\x0bWINDOW_DATA\x10\x18\x12\r\n\tTHRESHOLD\x10\x1f\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\xfd\x07\n\x10V0LayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x12\n\nnum_output\x18\x03 \x01(\r\x12\x16\n\x08\x62iasterm\x18\x04 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x06 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x0e\n\x03pad\x18\x07 \x01(\r:\x01\x30\x12\x12\n\nkernelsize\x18\x08 \x01(\r\x12\x10\n\x05group\x18\t \x01(\r:\x01\x31\x12\x11\n\x06stride\x18\n \x01(\r:\x01\x31\x12\x35\n\x04pool\x18\x0b \x01(\x0e\x32\".caffe.V0LayerParameter.PoolMethod:\x03MAX\x12\x1a\n\rdropout_ratio\x18\x0c \x01(\x02:\x03\x30.5\x12\x15\n\nlocal_size\x18\r \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x0e \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x0f \x01(\x02:\x04\x30.75\x12\x0c\n\x01k\x18\x16 \x01(\x02:\x01\x31\x12\x0e\n\x06source\x18\x10 \x01(\t\x12\x10\n\x05scale\x18\x11 \x01(\x02:\x01\x31\x12\x10\n\x08meanfile\x18\x12 \x01(\t\x12\x11\n\tbatchsize\x18\x13 \x01(\r\x12\x13\n\x08\x63ropsize\x18\x14 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x15 \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x05\x62lobs\x18\x32 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x10\n\x08\x62lobs_lr\x18\x33 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x34 \x03(\x02\x12\x14\n\trand_skip\x18\x35 \x01(\r:\x01\x30\x12\x1d\n\x10\x64\x65t_fg_threshold\x18\x36 \x01(\x02:\x03\x30.5\x12\x1d\n\x10\x64\x65t_bg_threshold\x18\x37 \x01(\x02:\x03\x30.5\x12\x1d\n\x0f\x64\x65t_fg_fraction\x18\x38 \x01(\x02:\x04\x30.25\x12\x1a\n\x0f\x64\x65t_context_pad\x18: \x01(\r:\x01\x30\x12\x1b\n\rdet_crop_mode\x18; \x01(\t:\x04warp\x12\x12\n\x07new_num\x18< \x01(\x05:\x01\x30\x12\x17\n\x0cnew_channels\x18= \x01(\x05:\x01\x30\x12\x15\n\nnew_height\x18> \x01(\x05:\x01\x30\x12\x14\n\tnew_width\x18? \x01(\x05:\x01\x30\x12\x1d\n\x0eshuffle_images\x18@ \x01(\x08:\x05\x66\x61lse\x12\x15\n\nconcat_dim\x18\x41 \x01(\r:\x01\x31\x12\x36\n\x11hdf5_output_param\x18\xe9\x07 \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"W\n\x0ePReLUParameter\x12&\n\x06\x66iller\x18\x01 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x1d\n\x0e\x63hannel_shared\x18\x02 \x01(\x08:\x05\x66\x61lse*\x1c\n\x05Phase\x12\t\n\x05TRAIN\x10\x00\x12\x08\n\x04TEST\x10\x01') -) - -_PHASE = _descriptor.EnumDescriptor( - name='Phase', - full_name='caffe.Phase', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='TRAIN', index=0, number=0, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='TEST', index=1, number=1, - serialized_options=None, - type=None), - ], - containing_type=None, - serialized_options=None, - serialized_start=15403, - serialized_end=15431, -) -_sym_db.RegisterEnumDescriptor(_PHASE) - -Phase = enum_type_wrapper.EnumTypeWrapper(_PHASE) -TRAIN = 0 -TEST = 1 - - -_FILLERPARAMETER_VARIANCENORM = _descriptor.EnumDescriptor( - name='VarianceNorm', - full_name='caffe.FillerParameter.VarianceNorm', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='FAN_IN', index=0, number=0, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='FAN_OUT', index=1, number=1, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='AVERAGE', index=2, number=2, - serialized_options=None, - type=None), - ], - containing_type=None, - serialized_options=None, - serialized_start=658, - serialized_end=710, -) -_sym_db.RegisterEnumDescriptor(_FILLERPARAMETER_VARIANCENORM) - -_SOLVERPARAMETER_SNAPSHOTFORMAT = _descriptor.EnumDescriptor( - name='SnapshotFormat', - full_name='caffe.SolverParameter.SnapshotFormat', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='HDF5', index=0, number=0, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='BINARYPROTO', index=1, number=1, - serialized_options=None, - type=None), - ], - containing_type=None, - serialized_options=None, - serialized_start=2171, - serialized_end=2214, -) -_sym_db.RegisterEnumDescriptor(_SOLVERPARAMETER_SNAPSHOTFORMAT) - -_SOLVERPARAMETER_SOLVERMODE = _descriptor.EnumDescriptor( - name='SolverMode', - full_name='caffe.SolverParameter.SolverMode', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='CPU', index=0, number=0, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='GPU', index=1, number=1, - serialized_options=None, - type=None), - ], - containing_type=None, - serialized_options=None, - serialized_start=2216, - serialized_end=2246, -) -_sym_db.RegisterEnumDescriptor(_SOLVERPARAMETER_SOLVERMODE) - -_SOLVERPARAMETER_SOLVERTYPE = _descriptor.EnumDescriptor( - name='SolverType', - full_name='caffe.SolverParameter.SolverType', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='SGD', index=0, number=0, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='NESTEROV', index=1, number=1, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ADAGRAD', index=2, number=2, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='RMSPROP', index=3, number=3, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ADADELTA', index=4, number=4, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ADAM', index=5, number=5, - serialized_options=None, - type=None), - ], - containing_type=None, - serialized_options=None, - serialized_start=2248, - serialized_end=2333, -) -_sym_db.RegisterEnumDescriptor(_SOLVERPARAMETER_SOLVERTYPE) - -_PARAMSPEC_DIMCHECKMODE = _descriptor.EnumDescriptor( - name='DimCheckMode', - full_name='caffe.ParamSpec.DimCheckMode', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='STRICT', index=0, number=0, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='PERMISSIVE', index=1, number=1, - serialized_options=None, - type=None), - ], - containing_type=None, - serialized_options=None, - serialized_start=2764, - serialized_end=2806, -) -_sym_db.RegisterEnumDescriptor(_PARAMSPEC_DIMCHECKMODE) - -_LOSSPARAMETER_NORMALIZATIONMODE = _descriptor.EnumDescriptor( - name='NormalizationMode', - full_name='caffe.LossParameter.NormalizationMode', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='FULL', index=0, number=0, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='VALID', index=1, number=1, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='BATCH_SIZE', index=2, number=2, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='NONE', index=3, number=3, - serialized_options=None, - type=None), - ], - containing_type=None, - serialized_options=None, - serialized_start=5687, - serialized_end=5753, -) -_sym_db.RegisterEnumDescriptor(_LOSSPARAMETER_NORMALIZATIONMODE) - -_CONVOLUTIONPARAMETER_ENGINE = _descriptor.EnumDescriptor( - name='Engine', - full_name='caffe.ConvolutionParameter.Engine', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='DEFAULT', index=0, number=0, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CAFFE', index=1, number=1, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CUDNN', index=2, number=2, - serialized_options=None, - type=None), - ], - containing_type=None, - serialized_options=None, - serialized_start=6718, - serialized_end=6761, -) -_sym_db.RegisterEnumDescriptor(_CONVOLUTIONPARAMETER_ENGINE) - -_DATAPARAMETER_DB = _descriptor.EnumDescriptor( - name='DB', - full_name='caffe.DataParameter.DB', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='LEVELDB', index=0, number=0, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='LMDB', index=1, number=1, - serialized_options=None, - type=None), - ], - containing_type=None, - serialized_options=None, - serialized_start=7079, - serialized_end=7106, -) -_sym_db.RegisterEnumDescriptor(_DATAPARAMETER_DB) - -_ELTWISEPARAMETER_ELTWISEOP = _descriptor.EnumDescriptor( - name='EltwiseOp', - full_name='caffe.EltwiseParameter.EltwiseOp', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='PROD', index=0, number=0, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SUM', index=1, number=1, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='MAX', index=2, number=2, - serialized_options=None, - type=None), - ], - containing_type=None, - serialized_options=None, - serialized_start=7446, - serialized_end=7485, -) -_sym_db.RegisterEnumDescriptor(_ELTWISEPARAMETER_ELTWISEOP) - -_HINGELOSSPARAMETER_NORM = _descriptor.EnumDescriptor( - name='Norm', - full_name='caffe.HingeLossParameter.Norm', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='L1', index=0, number=1, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='L2', index=1, number=2, - serialized_options=None, - type=None), - ], - containing_type=None, - serialized_options=None, - serialized_start=8020, - serialized_end=8042, -) -_sym_db.RegisterEnumDescriptor(_HINGELOSSPARAMETER_NORM) - -_LRNPARAMETER_NORMREGION = _descriptor.EnumDescriptor( - name='NormRegion', - full_name='caffe.LRNParameter.NormRegion', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='ACROSS_CHANNELS', index=0, number=0, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='WITHIN_CHANNEL', index=1, number=1, - serialized_options=None, - type=None), - ], - containing_type=None, - serialized_options=None, - serialized_start=8926, - serialized_end=8979, -) -_sym_db.RegisterEnumDescriptor(_LRNPARAMETER_NORMREGION) - -_LRNPARAMETER_ENGINE = _descriptor.EnumDescriptor( - name='Engine', - full_name='caffe.LRNParameter.Engine', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='DEFAULT', index=0, number=0, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CAFFE', index=1, number=1, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CUDNN', index=2, number=2, - serialized_options=None, - type=None), - ], - containing_type=None, - serialized_options=None, - serialized_start=6718, - serialized_end=6761, -) -_sym_db.RegisterEnumDescriptor(_LRNPARAMETER_ENGINE) - -_POOLINGPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor( - name='PoolMethod', - full_name='caffe.PoolingParameter.PoolMethod', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='MAX', index=0, number=0, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='AVE', index=1, number=1, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='STOCHASTIC', index=2, number=2, - serialized_options=None, - type=None), - ], - containing_type=None, - serialized_options=None, - serialized_start=9603, - serialized_end=9649, -) -_sym_db.RegisterEnumDescriptor(_POOLINGPARAMETER_POOLMETHOD) - -_POOLINGPARAMETER_ENGINE = _descriptor.EnumDescriptor( - name='Engine', - full_name='caffe.PoolingParameter.Engine', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='DEFAULT', index=0, number=0, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CAFFE', index=1, number=1, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CUDNN', index=2, number=2, - serialized_options=None, - type=None), - ], - containing_type=None, - serialized_options=None, - serialized_start=6718, - serialized_end=6761, -) -_sym_db.RegisterEnumDescriptor(_POOLINGPARAMETER_ENGINE) - -_REDUCTIONPARAMETER_REDUCTIONOP = _descriptor.EnumDescriptor( - name='ReductionOp', - full_name='caffe.ReductionParameter.ReductionOp', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='SUM', index=0, number=1, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ASUM', index=1, number=2, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SUMSQ', index=2, number=3, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='MEAN', index=3, number=4, - serialized_options=None, - type=None), - ], - containing_type=None, - serialized_options=None, - serialized_start=10189, - serialized_end=10242, -) -_sym_db.RegisterEnumDescriptor(_REDUCTIONPARAMETER_REDUCTIONOP) - -_RELUPARAMETER_ENGINE = _descriptor.EnumDescriptor( - name='Engine', - full_name='caffe.ReLUParameter.Engine', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='DEFAULT', index=0, number=0, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CAFFE', index=1, number=1, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CUDNN', index=2, number=2, - serialized_options=None, - type=None), - ], - containing_type=None, - serialized_options=None, - serialized_start=6718, - serialized_end=6761, -) -_sym_db.RegisterEnumDescriptor(_RELUPARAMETER_ENGINE) - -_SIGMOIDPARAMETER_ENGINE = _descriptor.EnumDescriptor( - name='Engine', - full_name='caffe.SigmoidParameter.Engine', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='DEFAULT', index=0, number=0, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CAFFE', index=1, number=1, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CUDNN', index=2, number=2, - serialized_options=None, - type=None), - ], - containing_type=None, - serialized_options=None, - serialized_start=6718, - serialized_end=6761, -) -_sym_db.RegisterEnumDescriptor(_SIGMOIDPARAMETER_ENGINE) - -_SOFTMAXPARAMETER_ENGINE = _descriptor.EnumDescriptor( - name='Engine', - full_name='caffe.SoftmaxParameter.Engine', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='DEFAULT', index=0, number=0, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CAFFE', index=1, number=1, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CUDNN', index=2, number=2, - serialized_options=None, - type=None), - ], - containing_type=None, - serialized_options=None, - serialized_start=6718, - serialized_end=6761, -) -_sym_db.RegisterEnumDescriptor(_SOFTMAXPARAMETER_ENGINE) - -_TANHPARAMETER_ENGINE = _descriptor.EnumDescriptor( - name='Engine', - full_name='caffe.TanHParameter.Engine', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='DEFAULT', index=0, number=0, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CAFFE', index=1, number=1, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CUDNN', index=2, number=2, - serialized_options=None, - type=None), - ], - containing_type=None, - serialized_options=None, - serialized_start=6718, - serialized_end=6761, -) -_sym_db.RegisterEnumDescriptor(_TANHPARAMETER_ENGINE) - -_SPPPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor( - name='PoolMethod', - full_name='caffe.SPPParameter.PoolMethod', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='MAX', index=0, number=0, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='AVE', index=1, number=1, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='STOCHASTIC', index=2, number=2, - serialized_options=None, - type=None), - ], - containing_type=None, - serialized_options=None, - serialized_start=9603, - serialized_end=9649, -) -_sym_db.RegisterEnumDescriptor(_SPPPARAMETER_POOLMETHOD) - -_SPPPARAMETER_ENGINE = _descriptor.EnumDescriptor( - name='Engine', - full_name='caffe.SPPParameter.Engine', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='DEFAULT', index=0, number=0, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CAFFE', index=1, number=1, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CUDNN', index=2, number=2, - serialized_options=None, - type=None), - ], - containing_type=None, - serialized_options=None, - serialized_start=6718, - serialized_end=6761, -) -_sym_db.RegisterEnumDescriptor(_SPPPARAMETER_ENGINE) - -_V1LAYERPARAMETER_LAYERTYPE = _descriptor.EnumDescriptor( - name='LayerType', - full_name='caffe.V1LayerParameter.LayerType', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='NONE', index=0, number=0, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ABSVAL', index=1, number=35, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ACCURACY', index=2, number=1, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ARGMAX', index=3, number=30, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='BNLL', index=4, number=2, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CONCAT', index=5, number=3, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CONTRASTIVE_LOSS', index=6, number=37, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CONVOLUTION', index=7, number=4, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='DATA', index=8, number=5, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='DECONVOLUTION', index=9, number=39, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='DROPOUT', index=10, number=6, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='DUMMY_DATA', index=11, number=32, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='EUCLIDEAN_LOSS', index=12, number=7, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ELTWISE', index=13, number=25, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='EXP', index=14, number=38, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='FLATTEN', index=15, number=8, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='HDF5_DATA', index=16, number=9, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='HDF5_OUTPUT', index=17, number=10, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='HINGE_LOSS', index=18, number=28, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='IM2COL', index=19, number=11, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='IMAGE_DATA', index=20, number=12, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='INFOGAIN_LOSS', index=21, number=13, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='INNER_PRODUCT', index=22, number=14, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='LRN', index=23, number=15, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='MEMORY_DATA', index=24, number=29, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='MULTINOMIAL_LOGISTIC_LOSS', index=25, number=16, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='MVN', index=26, number=34, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='POOLING', index=27, number=17, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='POWER', index=28, number=26, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='RELU', index=29, number=18, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SIGMOID', index=30, number=19, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SIGMOID_CROSS_ENTROPY_LOSS', index=31, number=27, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SILENCE', index=32, number=36, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SOFTMAX', index=33, number=20, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SOFTMAX_LOSS', index=34, number=21, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SPLIT', index=35, number=22, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SLICE', index=36, number=33, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='TANH', index=37, number=23, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='WINDOW_DATA', index=38, number=24, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='THRESHOLD', index=39, number=31, - serialized_options=None, - type=None), - ], - containing_type=None, - serialized_options=None, - serialized_start=13644, - serialized_end=14244, -) -_sym_db.RegisterEnumDescriptor(_V1LAYERPARAMETER_LAYERTYPE) - -_V1LAYERPARAMETER_DIMCHECKMODE = _descriptor.EnumDescriptor( - name='DimCheckMode', - full_name='caffe.V1LayerParameter.DimCheckMode', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='STRICT', index=0, number=0, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='PERMISSIVE', index=1, number=1, - serialized_options=None, - type=None), - ], - containing_type=None, - serialized_options=None, - serialized_start=2764, - serialized_end=2806, -) -_sym_db.RegisterEnumDescriptor(_V1LAYERPARAMETER_DIMCHECKMODE) - -_V0LAYERPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor( - name='PoolMethod', - full_name='caffe.V0LayerParameter.PoolMethod', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='MAX', index=0, number=0, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='AVE', index=1, number=1, - serialized_options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='STOCHASTIC', index=2, number=2, - serialized_options=None, - type=None), - ], - containing_type=None, - serialized_options=None, - serialized_start=9603, - serialized_end=9649, -) -_sym_db.RegisterEnumDescriptor(_V0LAYERPARAMETER_POOLMETHOD) - - -_BLOBSHAPE = _descriptor.Descriptor( - name='BlobShape', - full_name='caffe.BlobShape', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='dim', full_name='caffe.BlobShape.dim', index=0, - number=1, type=3, cpp_type=2, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=_b('\020\001'), file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=22, - serialized_end=50, -) - - -_BLOBPROTO = _descriptor.Descriptor( - name='BlobProto', - full_name='caffe.BlobProto', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='shape', full_name='caffe.BlobProto.shape', index=0, - number=7, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='data', full_name='caffe.BlobProto.data', index=1, - number=5, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=_b('\020\001'), file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='diff', full_name='caffe.BlobProto.diff', index=2, - number=6, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=_b('\020\001'), file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='double_data', full_name='caffe.BlobProto.double_data', index=3, - number=8, type=1, cpp_type=5, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=_b('\020\001'), file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='double_diff', full_name='caffe.BlobProto.double_diff', index=4, - number=9, type=1, cpp_type=5, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=_b('\020\001'), file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='num', full_name='caffe.BlobProto.num', index=5, - number=1, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='channels', full_name='caffe.BlobProto.channels', index=6, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='height', full_name='caffe.BlobProto.height', index=7, - number=3, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='width', full_name='caffe.BlobProto.width', index=8, - number=4, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=53, - serialized_end=257, -) - - -_BLOBPROTOVECTOR = _descriptor.Descriptor( - name='BlobProtoVector', - full_name='caffe.BlobProtoVector', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='blobs', full_name='caffe.BlobProtoVector.blobs', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=259, - serialized_end=309, -) - - -_DATUM = _descriptor.Descriptor( - name='Datum', - full_name='caffe.Datum', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='channels', full_name='caffe.Datum.channels', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='height', full_name='caffe.Datum.height', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='width', full_name='caffe.Datum.width', index=2, - number=3, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='data', full_name='caffe.Datum.data', index=3, - number=4, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='label', full_name='caffe.Datum.label', index=4, - number=5, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='float_data', full_name='caffe.Datum.float_data', index=5, - number=6, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='encoded', full_name='caffe.Datum.encoded', index=6, - number=7, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=312, - serialized_end=441, -) - - -_FILLERPARAMETER = _descriptor.Descriptor( - name='FillerParameter', - full_name='caffe.FillerParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='type', full_name='caffe.FillerParameter.type', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=_b("constant").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='value', full_name='caffe.FillerParameter.value', index=1, - number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='min', full_name='caffe.FillerParameter.min', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='max', full_name='caffe.FillerParameter.max', index=3, - number=4, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(1), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='mean', full_name='caffe.FillerParameter.mean', index=4, - number=5, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='std', full_name='caffe.FillerParameter.std', index=5, - number=6, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(1), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='sparse', full_name='caffe.FillerParameter.sparse', index=6, - number=7, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=-1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='variance_norm', full_name='caffe.FillerParameter.variance_norm', index=7, - number=8, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _FILLERPARAMETER_VARIANCENORM, - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=444, - serialized_end=710, -) - - -_NETPARAMETER = _descriptor.Descriptor( - name='NetParameter', - full_name='caffe.NetParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='caffe.NetParameter.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='input', full_name='caffe.NetParameter.input', index=1, - number=3, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='input_shape', full_name='caffe.NetParameter.input_shape', index=2, - number=8, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='input_dim', full_name='caffe.NetParameter.input_dim', index=3, - number=4, type=5, cpp_type=1, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='force_backward', full_name='caffe.NetParameter.force_backward', index=4, - number=5, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='state', full_name='caffe.NetParameter.state', index=5, - number=6, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='debug_info', full_name='caffe.NetParameter.debug_info', index=6, - number=7, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='layer', full_name='caffe.NetParameter.layer', index=7, - number=100, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='layers', full_name='caffe.NetParameter.layers', index=8, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=713, - serialized_end=983, -) - - -_SOLVERPARAMETER = _descriptor.Descriptor( - name='SolverParameter', - full_name='caffe.SolverParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='net', full_name='caffe.SolverParameter.net', index=0, - number=24, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='net_param', full_name='caffe.SolverParameter.net_param', index=1, - number=25, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='train_net', full_name='caffe.SolverParameter.train_net', index=2, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='test_net', full_name='caffe.SolverParameter.test_net', index=3, - number=2, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='train_net_param', full_name='caffe.SolverParameter.train_net_param', index=4, - number=21, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='test_net_param', full_name='caffe.SolverParameter.test_net_param', index=5, - number=22, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='train_state', full_name='caffe.SolverParameter.train_state', index=6, - number=26, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='test_state', full_name='caffe.SolverParameter.test_state', index=7, - number=27, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='test_iter', full_name='caffe.SolverParameter.test_iter', index=8, - number=3, type=5, cpp_type=1, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='test_interval', full_name='caffe.SolverParameter.test_interval', index=9, - number=4, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='test_compute_loss', full_name='caffe.SolverParameter.test_compute_loss', index=10, - number=19, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='test_initialization', full_name='caffe.SolverParameter.test_initialization', index=11, - number=32, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='base_lr', full_name='caffe.SolverParameter.base_lr', index=12, - number=5, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='display', full_name='caffe.SolverParameter.display', index=13, - number=6, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='average_loss', full_name='caffe.SolverParameter.average_loss', index=14, - number=33, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='max_iter', full_name='caffe.SolverParameter.max_iter', index=15, - number=7, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='iter_size', full_name='caffe.SolverParameter.iter_size', index=16, - number=36, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='lr_policy', full_name='caffe.SolverParameter.lr_policy', index=17, - number=8, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='gamma', full_name='caffe.SolverParameter.gamma', index=18, - number=9, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='power', full_name='caffe.SolverParameter.power', index=19, - number=10, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='momentum', full_name='caffe.SolverParameter.momentum', index=20, - number=11, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='weight_decay', full_name='caffe.SolverParameter.weight_decay', index=21, - number=12, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='regularization_type', full_name='caffe.SolverParameter.regularization_type', index=22, - number=29, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=_b("L2").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='stepsize', full_name='caffe.SolverParameter.stepsize', index=23, - number=13, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='stepvalue', full_name='caffe.SolverParameter.stepvalue', index=24, - number=34, type=5, cpp_type=1, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='clip_gradients', full_name='caffe.SolverParameter.clip_gradients', index=25, - number=35, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(-1), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='snapshot', full_name='caffe.SolverParameter.snapshot', index=26, - number=14, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='snapshot_prefix', full_name='caffe.SolverParameter.snapshot_prefix', index=27, - number=15, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='snapshot_diff', full_name='caffe.SolverParameter.snapshot_diff', index=28, - number=16, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='snapshot_format', full_name='caffe.SolverParameter.snapshot_format', index=29, - number=37, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='solver_mode', full_name='caffe.SolverParameter.solver_mode', index=30, - number=17, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='device_id', full_name='caffe.SolverParameter.device_id', index=31, - number=18, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='random_seed', full_name='caffe.SolverParameter.random_seed', index=32, - number=20, type=3, cpp_type=2, label=1, - has_default_value=True, default_value=-1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='type', full_name='caffe.SolverParameter.type', index=33, - number=40, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=_b("SGD").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='delta', full_name='caffe.SolverParameter.delta', index=34, - number=31, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(1e-08), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='momentum2', full_name='caffe.SolverParameter.momentum2', index=35, - number=39, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(0.999), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='rms_decay', full_name='caffe.SolverParameter.rms_decay', index=36, - number=38, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(0.99), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='debug_info', full_name='caffe.SolverParameter.debug_info', index=37, - number=23, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='snapshot_after_train', full_name='caffe.SolverParameter.snapshot_after_train', index=38, - number=28, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='solver_type', full_name='caffe.SolverParameter.solver_type', index=39, - number=30, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='layer_wise_reduce', full_name='caffe.SolverParameter.layer_wise_reduce', index=40, - number=41, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _SOLVERPARAMETER_SNAPSHOTFORMAT, - _SOLVERPARAMETER_SOLVERMODE, - _SOLVERPARAMETER_SOLVERTYPE, - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=986, - serialized_end=2333, -) - - -_SOLVERSTATE = _descriptor.Descriptor( - name='SolverState', - full_name='caffe.SolverState', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='iter', full_name='caffe.SolverState.iter', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='learned_net', full_name='caffe.SolverState.learned_net', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='history', full_name='caffe.SolverState.history', index=2, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='current_step', full_name='caffe.SolverState.current_step', index=3, - number=4, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2335, - serialized_end=2443, -) - - -_NETSTATE = _descriptor.Descriptor( - name='NetState', - full_name='caffe.NetState', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='phase', full_name='caffe.NetState.phase', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='level', full_name='caffe.NetState.level', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='stage', full_name='caffe.NetState.stage', index=2, - number=3, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2445, - serialized_end=2523, -) - - -_NETSTATERULE = _descriptor.Descriptor( - name='NetStateRule', - full_name='caffe.NetStateRule', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='phase', full_name='caffe.NetStateRule.phase', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='min_level', full_name='caffe.NetStateRule.min_level', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='max_level', full_name='caffe.NetStateRule.max_level', index=2, - number=3, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='stage', full_name='caffe.NetStateRule.stage', index=3, - number=4, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='not_stage', full_name='caffe.NetStateRule.not_stage', index=4, - number=5, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2525, - serialized_end=2640, -) - - -_PARAMSPEC = _descriptor.Descriptor( - name='ParamSpec', - full_name='caffe.ParamSpec', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='caffe.ParamSpec.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='share_mode', full_name='caffe.ParamSpec.share_mode', index=1, - number=2, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='lr_mult', full_name='caffe.ParamSpec.lr_mult', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(1), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='decay_mult', full_name='caffe.ParamSpec.decay_mult', index=3, - number=4, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(1), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _PARAMSPEC_DIMCHECKMODE, - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2643, - serialized_end=2806, -) - - -_LAYERPARAMETER = _descriptor.Descriptor( - name='LayerParameter', - full_name='caffe.LayerParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='caffe.LayerParameter.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='type', full_name='caffe.LayerParameter.type', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='bottom', full_name='caffe.LayerParameter.bottom', index=2, - number=3, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='top', full_name='caffe.LayerParameter.top', index=3, - number=4, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='phase', full_name='caffe.LayerParameter.phase', index=4, - number=10, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='loss_weight', full_name='caffe.LayerParameter.loss_weight', index=5, - number=5, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='param', full_name='caffe.LayerParameter.param', index=6, - number=6, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='blobs', full_name='caffe.LayerParameter.blobs', index=7, - number=7, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='propagate_down', full_name='caffe.LayerParameter.propagate_down', index=8, - number=11, type=8, cpp_type=7, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='include', full_name='caffe.LayerParameter.include', index=9, - number=8, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='exclude', full_name='caffe.LayerParameter.exclude', index=10, - number=9, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='transform_param', full_name='caffe.LayerParameter.transform_param', index=11, - number=100, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='loss_param', full_name='caffe.LayerParameter.loss_param', index=12, - number=101, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='accuracy_param', full_name='caffe.LayerParameter.accuracy_param', index=13, - number=102, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='argmax_param', full_name='caffe.LayerParameter.argmax_param', index=14, - number=103, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='batch_norm_param', full_name='caffe.LayerParameter.batch_norm_param', index=15, - number=139, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='bias_param', full_name='caffe.LayerParameter.bias_param', index=16, - number=141, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='concat_param', full_name='caffe.LayerParameter.concat_param', index=17, - number=104, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='contrastive_loss_param', full_name='caffe.LayerParameter.contrastive_loss_param', index=18, - number=105, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='convolution_param', full_name='caffe.LayerParameter.convolution_param', index=19, - number=106, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='crop_param', full_name='caffe.LayerParameter.crop_param', index=20, - number=144, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='data_param', full_name='caffe.LayerParameter.data_param', index=21, - number=107, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='dropout_param', full_name='caffe.LayerParameter.dropout_param', index=22, - number=108, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='dummy_data_param', full_name='caffe.LayerParameter.dummy_data_param', index=23, - number=109, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='eltwise_param', full_name='caffe.LayerParameter.eltwise_param', index=24, - number=110, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='elu_param', full_name='caffe.LayerParameter.elu_param', index=25, - number=140, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='embed_param', full_name='caffe.LayerParameter.embed_param', index=26, - number=137, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='exp_param', full_name='caffe.LayerParameter.exp_param', index=27, - number=111, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='flatten_param', full_name='caffe.LayerParameter.flatten_param', index=28, - number=135, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='hdf5_data_param', full_name='caffe.LayerParameter.hdf5_data_param', index=29, - number=112, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='hdf5_output_param', full_name='caffe.LayerParameter.hdf5_output_param', index=30, - number=113, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='hinge_loss_param', full_name='caffe.LayerParameter.hinge_loss_param', index=31, - number=114, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='image_data_param', full_name='caffe.LayerParameter.image_data_param', index=32, - number=115, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='infogain_loss_param', full_name='caffe.LayerParameter.infogain_loss_param', index=33, - number=116, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='inner_product_param', full_name='caffe.LayerParameter.inner_product_param', index=34, - number=117, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='input_param', full_name='caffe.LayerParameter.input_param', index=35, - number=143, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='log_param', full_name='caffe.LayerParameter.log_param', index=36, - number=134, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='lrn_param', full_name='caffe.LayerParameter.lrn_param', index=37, - number=118, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='memory_data_param', full_name='caffe.LayerParameter.memory_data_param', index=38, - number=119, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='mvn_param', full_name='caffe.LayerParameter.mvn_param', index=39, - number=120, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='parameter_param', full_name='caffe.LayerParameter.parameter_param', index=40, - number=145, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='pooling_param', full_name='caffe.LayerParameter.pooling_param', index=41, - number=121, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='power_param', full_name='caffe.LayerParameter.power_param', index=42, - number=122, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='prelu_param', full_name='caffe.LayerParameter.prelu_param', index=43, - number=131, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='python_param', full_name='caffe.LayerParameter.python_param', index=44, - number=130, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='recurrent_param', full_name='caffe.LayerParameter.recurrent_param', index=45, - number=146, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='reduction_param', full_name='caffe.LayerParameter.reduction_param', index=46, - number=136, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='relu_param', full_name='caffe.LayerParameter.relu_param', index=47, - number=123, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='reshape_param', full_name='caffe.LayerParameter.reshape_param', index=48, - number=133, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='scale_param', full_name='caffe.LayerParameter.scale_param', index=49, - number=142, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='sigmoid_param', full_name='caffe.LayerParameter.sigmoid_param', index=50, - number=124, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='softmax_param', full_name='caffe.LayerParameter.softmax_param', index=51, - number=125, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='spp_param', full_name='caffe.LayerParameter.spp_param', index=52, - number=132, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='slice_param', full_name='caffe.LayerParameter.slice_param', index=53, - number=126, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='tanh_param', full_name='caffe.LayerParameter.tanh_param', index=54, - number=127, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='threshold_param', full_name='caffe.LayerParameter.threshold_param', index=55, - number=128, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='tile_param', full_name='caffe.LayerParameter.tile_param', index=56, - number=138, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='window_data_param', full_name='caffe.LayerParameter.window_data_param', index=57, - number=129, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2809, - serialized_end=5371, -) - - -_TRANSFORMATIONPARAMETER = _descriptor.Descriptor( - name='TransformationParameter', - full_name='caffe.TransformationParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='scale', full_name='caffe.TransformationParameter.scale', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(1), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='mirror', full_name='caffe.TransformationParameter.mirror', index=1, - number=2, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='crop_size', full_name='caffe.TransformationParameter.crop_size', index=2, - number=3, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='mean_file', full_name='caffe.TransformationParameter.mean_file', index=3, - number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='mean_value', full_name='caffe.TransformationParameter.mean_value', index=4, - number=5, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='force_color', full_name='caffe.TransformationParameter.force_color', index=5, - number=6, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='force_gray', full_name='caffe.TransformationParameter.force_gray', index=6, - number=7, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=5374, - serialized_end=5556, -) - - -_LOSSPARAMETER = _descriptor.Descriptor( - name='LossParameter', - full_name='caffe.LossParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='ignore_label', full_name='caffe.LossParameter.ignore_label', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='normalization', full_name='caffe.LossParameter.normalization', index=1, - number=3, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='normalize', full_name='caffe.LossParameter.normalize', index=2, - number=2, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _LOSSPARAMETER_NORMALIZATIONMODE, - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=5559, - serialized_end=5753, -) - - -_ACCURACYPARAMETER = _descriptor.Descriptor( - name='AccuracyParameter', - full_name='caffe.AccuracyParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='top_k', full_name='caffe.AccuracyParameter.top_k', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='axis', full_name='caffe.AccuracyParameter.axis', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='ignore_label', full_name='caffe.AccuracyParameter.ignore_label', index=2, - number=3, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=5755, - serialized_end=5831, -) - - -_ARGMAXPARAMETER = _descriptor.Descriptor( - name='ArgMaxParameter', - full_name='caffe.ArgMaxParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='out_max_val', full_name='caffe.ArgMaxParameter.out_max_val', index=0, - number=1, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='top_k', full_name='caffe.ArgMaxParameter.top_k', index=1, - number=2, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='axis', full_name='caffe.ArgMaxParameter.axis', index=2, - number=3, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=5833, - serialized_end=5910, -) - - -_CONCATPARAMETER = _descriptor.Descriptor( - name='ConcatParameter', - full_name='caffe.ConcatParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='axis', full_name='caffe.ConcatParameter.axis', index=0, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='concat_dim', full_name='caffe.ConcatParameter.concat_dim', index=1, - number=1, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=5912, - serialized_end=5969, -) - - -_BATCHNORMPARAMETER = _descriptor.Descriptor( - name='BatchNormParameter', - full_name='caffe.BatchNormParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='use_global_stats', full_name='caffe.BatchNormParameter.use_global_stats', index=0, - number=1, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='moving_average_fraction', full_name='caffe.BatchNormParameter.moving_average_fraction', index=1, - number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(0.999), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='eps', full_name='caffe.BatchNormParameter.eps', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(1e-05), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=5971, - serialized_end=6077, -) - - -_BIASPARAMETER = _descriptor.Descriptor( - name='BiasParameter', - full_name='caffe.BiasParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='axis', full_name='caffe.BiasParameter.axis', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='num_axes', full_name='caffe.BiasParameter.num_axes', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='filler', full_name='caffe.BiasParameter.filler', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=6079, - serialized_end=6172, -) - - -_CONTRASTIVELOSSPARAMETER = _descriptor.Descriptor( - name='ContrastiveLossParameter', - full_name='caffe.ContrastiveLossParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='margin', full_name='caffe.ContrastiveLossParameter.margin', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(1), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='legacy_version', full_name='caffe.ContrastiveLossParameter.legacy_version', index=1, - number=2, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=6174, - serialized_end=6250, -) - - -_CONVOLUTIONPARAMETER = _descriptor.Descriptor( - name='ConvolutionParameter', - full_name='caffe.ConvolutionParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='num_output', full_name='caffe.ConvolutionParameter.num_output', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='bias_term', full_name='caffe.ConvolutionParameter.bias_term', index=1, - number=2, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='pad', full_name='caffe.ConvolutionParameter.pad', index=2, - number=3, type=13, cpp_type=3, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='kernel_size', full_name='caffe.ConvolutionParameter.kernel_size', index=3, - number=4, type=13, cpp_type=3, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='stride', full_name='caffe.ConvolutionParameter.stride', index=4, - number=6, type=13, cpp_type=3, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='dilation', full_name='caffe.ConvolutionParameter.dilation', index=5, - number=18, type=13, cpp_type=3, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='pad_h', full_name='caffe.ConvolutionParameter.pad_h', index=6, - number=9, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='pad_w', full_name='caffe.ConvolutionParameter.pad_w', index=7, - number=10, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='kernel_h', full_name='caffe.ConvolutionParameter.kernel_h', index=8, - number=11, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='kernel_w', full_name='caffe.ConvolutionParameter.kernel_w', index=9, - number=12, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='stride_h', full_name='caffe.ConvolutionParameter.stride_h', index=10, - number=13, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='stride_w', full_name='caffe.ConvolutionParameter.stride_w', index=11, - number=14, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='group', full_name='caffe.ConvolutionParameter.group', index=12, - number=5, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='weight_filler', full_name='caffe.ConvolutionParameter.weight_filler', index=13, - number=7, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='bias_filler', full_name='caffe.ConvolutionParameter.bias_filler', index=14, - number=8, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='engine', full_name='caffe.ConvolutionParameter.engine', index=15, - number=15, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='axis', full_name='caffe.ConvolutionParameter.axis', index=16, - number=16, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='force_nd_im2col', full_name='caffe.ConvolutionParameter.force_nd_im2col', index=17, - number=17, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _CONVOLUTIONPARAMETER_ENGINE, - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=6253, - serialized_end=6761, -) - - -_CROPPARAMETER = _descriptor.Descriptor( - name='CropParameter', - full_name='caffe.CropParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='axis', full_name='caffe.CropParameter.axis', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=2, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='offset', full_name='caffe.CropParameter.offset', index=1, - number=2, type=13, cpp_type=3, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=6763, - serialized_end=6811, -) - - -_DATAPARAMETER = _descriptor.Descriptor( - name='DataParameter', - full_name='caffe.DataParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='source', full_name='caffe.DataParameter.source', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='batch_size', full_name='caffe.DataParameter.batch_size', index=1, - number=4, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='rand_skip', full_name='caffe.DataParameter.rand_skip', index=2, - number=7, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='backend', full_name='caffe.DataParameter.backend', index=3, - number=8, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='scale', full_name='caffe.DataParameter.scale', index=4, - number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(1), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='mean_file', full_name='caffe.DataParameter.mean_file', index=5, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='crop_size', full_name='caffe.DataParameter.crop_size', index=6, - number=5, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='mirror', full_name='caffe.DataParameter.mirror', index=7, - number=6, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='force_encoded_color', full_name='caffe.DataParameter.force_encoded_color', index=8, - number=9, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='prefetch', full_name='caffe.DataParameter.prefetch', index=9, - number=10, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=4, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _DATAPARAMETER_DB, - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=6814, - serialized_end=7106, -) - - -_DROPOUTPARAMETER = _descriptor.Descriptor( - name='DropoutParameter', - full_name='caffe.DropoutParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='dropout_ratio', full_name='caffe.DropoutParameter.dropout_ratio', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(0.5), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=7108, - serialized_end=7154, -) - - -_DUMMYDATAPARAMETER = _descriptor.Descriptor( - name='DummyDataParameter', - full_name='caffe.DummyDataParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='data_filler', full_name='caffe.DummyDataParameter.data_filler', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='shape', full_name='caffe.DummyDataParameter.shape', index=1, - number=6, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='num', full_name='caffe.DummyDataParameter.num', index=2, - number=2, type=13, cpp_type=3, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='channels', full_name='caffe.DummyDataParameter.channels', index=3, - number=3, type=13, cpp_type=3, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='height', full_name='caffe.DummyDataParameter.height', index=4, - number=4, type=13, cpp_type=3, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='width', full_name='caffe.DummyDataParameter.width', index=5, - number=5, type=13, cpp_type=3, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=7157, - serialized_end=7317, -) - - -_ELTWISEPARAMETER = _descriptor.Descriptor( - name='EltwiseParameter', - full_name='caffe.EltwiseParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='operation', full_name='caffe.EltwiseParameter.operation', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='coeff', full_name='caffe.EltwiseParameter.coeff', index=1, - number=2, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='stable_prod_grad', full_name='caffe.EltwiseParameter.stable_prod_grad', index=2, - number=3, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _ELTWISEPARAMETER_ELTWISEOP, - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=7320, - serialized_end=7485, -) - - -_ELUPARAMETER = _descriptor.Descriptor( - name='ELUParameter', - full_name='caffe.ELUParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='alpha', full_name='caffe.ELUParameter.alpha', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(1), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=7487, - serialized_end=7519, -) - - -_EMBEDPARAMETER = _descriptor.Descriptor( - name='EmbedParameter', - full_name='caffe.EmbedParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='num_output', full_name='caffe.EmbedParameter.num_output', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='input_dim', full_name='caffe.EmbedParameter.input_dim', index=1, - number=2, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='bias_term', full_name='caffe.EmbedParameter.bias_term', index=2, - number=3, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='weight_filler', full_name='caffe.EmbedParameter.weight_filler', index=3, - number=4, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='bias_filler', full_name='caffe.EmbedParameter.bias_filler', index=4, - number=5, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=7522, - serialized_end=7694, -) - - -_EXPPARAMETER = _descriptor.Descriptor( - name='ExpParameter', - full_name='caffe.ExpParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='base', full_name='caffe.ExpParameter.base', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(-1), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='scale', full_name='caffe.ExpParameter.scale', index=1, - number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(1), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='shift', full_name='caffe.ExpParameter.shift', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=7696, - serialized_end=7764, -) - - -_FLATTENPARAMETER = _descriptor.Descriptor( - name='FlattenParameter', - full_name='caffe.FlattenParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='axis', full_name='caffe.FlattenParameter.axis', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='end_axis', full_name='caffe.FlattenParameter.end_axis', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=-1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=7766, - serialized_end=7823, -) - - -_HDF5DATAPARAMETER = _descriptor.Descriptor( - name='HDF5DataParameter', - full_name='caffe.HDF5DataParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='source', full_name='caffe.HDF5DataParameter.source', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='batch_size', full_name='caffe.HDF5DataParameter.batch_size', index=1, - number=2, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='shuffle', full_name='caffe.HDF5DataParameter.shuffle', index=2, - number=3, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=7825, - serialized_end=7904, -) - - -_HDF5OUTPUTPARAMETER = _descriptor.Descriptor( - name='HDF5OutputParameter', - full_name='caffe.HDF5OutputParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='file_name', full_name='caffe.HDF5OutputParameter.file_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=7906, - serialized_end=7946, -) - - -_HINGELOSSPARAMETER = _descriptor.Descriptor( - name='HingeLossParameter', - full_name='caffe.HingeLossParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='norm', full_name='caffe.HingeLossParameter.norm', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _HINGELOSSPARAMETER_NORM, - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=7948, - serialized_end=8042, -) - - -_IMAGEDATAPARAMETER = _descriptor.Descriptor( - name='ImageDataParameter', - full_name='caffe.ImageDataParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='source', full_name='caffe.ImageDataParameter.source', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='batch_size', full_name='caffe.ImageDataParameter.batch_size', index=1, - number=4, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='rand_skip', full_name='caffe.ImageDataParameter.rand_skip', index=2, - number=7, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='shuffle', full_name='caffe.ImageDataParameter.shuffle', index=3, - number=8, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='new_height', full_name='caffe.ImageDataParameter.new_height', index=4, - number=9, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='new_width', full_name='caffe.ImageDataParameter.new_width', index=5, - number=10, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='is_color', full_name='caffe.ImageDataParameter.is_color', index=6, - number=11, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='scale', full_name='caffe.ImageDataParameter.scale', index=7, - number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(1), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='mean_file', full_name='caffe.ImageDataParameter.mean_file', index=8, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='crop_size', full_name='caffe.ImageDataParameter.crop_size', index=9, - number=5, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='mirror', full_name='caffe.ImageDataParameter.mirror', index=10, - number=6, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='root_folder', full_name='caffe.ImageDataParameter.root_folder', index=11, - number=12, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=8045, - serialized_end=8324, -) - - -_INFOGAINLOSSPARAMETER = _descriptor.Descriptor( - name='InfogainLossParameter', - full_name='caffe.InfogainLossParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='source', full_name='caffe.InfogainLossParameter.source', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='axis', full_name='caffe.InfogainLossParameter.axis', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=8326, - serialized_end=8382, -) - - -_INNERPRODUCTPARAMETER = _descriptor.Descriptor( - name='InnerProductParameter', - full_name='caffe.InnerProductParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='num_output', full_name='caffe.InnerProductParameter.num_output', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='bias_term', full_name='caffe.InnerProductParameter.bias_term', index=1, - number=2, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='weight_filler', full_name='caffe.InnerProductParameter.weight_filler', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='bias_filler', full_name='caffe.InnerProductParameter.bias_filler', index=3, - number=4, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='axis', full_name='caffe.InnerProductParameter.axis', index=4, - number=5, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='transpose', full_name='caffe.InnerProductParameter.transpose', index=5, - number=6, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=8385, - serialized_end=8588, -) - - -_INPUTPARAMETER = _descriptor.Descriptor( - name='InputParameter', - full_name='caffe.InputParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='shape', full_name='caffe.InputParameter.shape', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=8590, - serialized_end=8639, -) - - -_LOGPARAMETER = _descriptor.Descriptor( - name='LogParameter', - full_name='caffe.LogParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='base', full_name='caffe.LogParameter.base', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(-1), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='scale', full_name='caffe.LogParameter.scale', index=1, - number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(1), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='shift', full_name='caffe.LogParameter.shift', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=8641, - serialized_end=8709, -) - - -_LRNPARAMETER = _descriptor.Descriptor( - name='LRNParameter', - full_name='caffe.LRNParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='local_size', full_name='caffe.LRNParameter.local_size', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=5, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='alpha', full_name='caffe.LRNParameter.alpha', index=1, - number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(1), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='beta', full_name='caffe.LRNParameter.beta', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(0.75), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='norm_region', full_name='caffe.LRNParameter.norm_region', index=3, - number=4, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='k', full_name='caffe.LRNParameter.k', index=4, - number=5, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(1), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='engine', full_name='caffe.LRNParameter.engine', index=5, - number=6, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _LRNPARAMETER_NORMREGION, - _LRNPARAMETER_ENGINE, - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=8712, - serialized_end=9024, -) - - -_MEMORYDATAPARAMETER = _descriptor.Descriptor( - name='MemoryDataParameter', - full_name='caffe.MemoryDataParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='batch_size', full_name='caffe.MemoryDataParameter.batch_size', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='channels', full_name='caffe.MemoryDataParameter.channels', index=1, - number=2, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='height', full_name='caffe.MemoryDataParameter.height', index=2, - number=3, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='width', full_name='caffe.MemoryDataParameter.width', index=3, - number=4, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=9026, - serialized_end=9116, -) - - -_MVNPARAMETER = _descriptor.Descriptor( - name='MVNParameter', - full_name='caffe.MVNParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='normalize_variance', full_name='caffe.MVNParameter.normalize_variance', index=0, - number=1, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='across_channels', full_name='caffe.MVNParameter.across_channels', index=1, - number=2, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='eps', full_name='caffe.MVNParameter.eps', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(1e-09), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=9118, - serialized_end=9218, -) - - -_PARAMETERPARAMETER = _descriptor.Descriptor( - name='ParameterParameter', - full_name='caffe.ParameterParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='shape', full_name='caffe.ParameterParameter.shape', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=9220, - serialized_end=9273, -) - - -_POOLINGPARAMETER = _descriptor.Descriptor( - name='PoolingParameter', - full_name='caffe.PoolingParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='pool', full_name='caffe.PoolingParameter.pool', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='pad', full_name='caffe.PoolingParameter.pad', index=1, - number=4, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='pad_h', full_name='caffe.PoolingParameter.pad_h', index=2, - number=9, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='pad_w', full_name='caffe.PoolingParameter.pad_w', index=3, - number=10, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='kernel_size', full_name='caffe.PoolingParameter.kernel_size', index=4, - number=2, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='kernel_h', full_name='caffe.PoolingParameter.kernel_h', index=5, - number=5, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='kernel_w', full_name='caffe.PoolingParameter.kernel_w', index=6, - number=6, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='stride', full_name='caffe.PoolingParameter.stride', index=7, - number=3, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='stride_h', full_name='caffe.PoolingParameter.stride_h', index=8, - number=7, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='stride_w', full_name='caffe.PoolingParameter.stride_w', index=9, - number=8, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='engine', full_name='caffe.PoolingParameter.engine', index=10, - number=11, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='global_pooling', full_name='caffe.PoolingParameter.global_pooling', index=11, - number=12, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _POOLINGPARAMETER_POOLMETHOD, - _POOLINGPARAMETER_ENGINE, - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=9276, - serialized_end=9694, -) - - -_POWERPARAMETER = _descriptor.Descriptor( - name='PowerParameter', - full_name='caffe.PowerParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='power', full_name='caffe.PowerParameter.power', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(1), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='scale', full_name='caffe.PowerParameter.scale', index=1, - number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(1), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='shift', full_name='caffe.PowerParameter.shift', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=9696, - serialized_end=9766, -) - - -_PYTHONPARAMETER = _descriptor.Descriptor( - name='PythonParameter', - full_name='caffe.PythonParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='module', full_name='caffe.PythonParameter.module', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='layer', full_name='caffe.PythonParameter.layer', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='param_str', full_name='caffe.PythonParameter.param_str', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='share_in_parallel', full_name='caffe.PythonParameter.share_in_parallel', index=3, - number=4, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=9768, - serialized_end=9871, -) - - -_RECURRENTPARAMETER = _descriptor.Descriptor( - name='RecurrentParameter', - full_name='caffe.RecurrentParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='num_output', full_name='caffe.RecurrentParameter.num_output', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='weight_filler', full_name='caffe.RecurrentParameter.weight_filler', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='bias_filler', full_name='caffe.RecurrentParameter.bias_filler', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='debug_info', full_name='caffe.RecurrentParameter.debug_info', index=3, - number=4, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='expose_hidden', full_name='caffe.RecurrentParameter.expose_hidden', index=4, - number=5, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=9874, - serialized_end=10066, -) - - -_REDUCTIONPARAMETER = _descriptor.Descriptor( - name='ReductionParameter', - full_name='caffe.ReductionParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='operation', full_name='caffe.ReductionParameter.operation', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='axis', full_name='caffe.ReductionParameter.axis', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='coeff', full_name='caffe.ReductionParameter.coeff', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(1), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _REDUCTIONPARAMETER_REDUCTIONOP, - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=10069, - serialized_end=10242, -) - - -_RELUPARAMETER = _descriptor.Descriptor( - name='ReLUParameter', - full_name='caffe.ReLUParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='negative_slope', full_name='caffe.ReLUParameter.negative_slope', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='engine', full_name='caffe.ReLUParameter.engine', index=1, - number=2, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _RELUPARAMETER_ENGINE, - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=10245, - serialized_end=10386, -) - - -_RESHAPEPARAMETER = _descriptor.Descriptor( - name='ReshapeParameter', - full_name='caffe.ReshapeParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='shape', full_name='caffe.ReshapeParameter.shape', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='axis', full_name='caffe.ReshapeParameter.axis', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='num_axes', full_name='caffe.ReshapeParameter.num_axes', index=2, - number=3, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=-1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=10388, - serialized_end=10478, -) - - -_SCALEPARAMETER = _descriptor.Descriptor( - name='ScaleParameter', - full_name='caffe.ScaleParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='axis', full_name='caffe.ScaleParameter.axis', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='num_axes', full_name='caffe.ScaleParameter.num_axes', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='filler', full_name='caffe.ScaleParameter.filler', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='bias_term', full_name='caffe.ScaleParameter.bias_term', index=3, - number=4, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='bias_filler', full_name='caffe.ScaleParameter.bias_filler', index=4, - number=5, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=10481, - serialized_end=10646, -) - - -_SIGMOIDPARAMETER = _descriptor.Descriptor( - name='SigmoidParameter', - full_name='caffe.SigmoidParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='engine', full_name='caffe.SigmoidParameter.engine', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _SIGMOIDPARAMETER_ENGINE, - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=10648, - serialized_end=10768, -) - - -_SLICEPARAMETER = _descriptor.Descriptor( - name='SliceParameter', - full_name='caffe.SliceParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='axis', full_name='caffe.SliceParameter.axis', index=0, - number=3, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='slice_point', full_name='caffe.SliceParameter.slice_point', index=1, - number=2, type=13, cpp_type=3, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='slice_dim', full_name='caffe.SliceParameter.slice_dim', index=2, - number=1, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=10770, - serialized_end=10846, -) - - -_SOFTMAXPARAMETER = _descriptor.Descriptor( - name='SoftmaxParameter', - full_name='caffe.SoftmaxParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='engine', full_name='caffe.SoftmaxParameter.engine', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='axis', full_name='caffe.SoftmaxParameter.axis', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _SOFTMAXPARAMETER_ENGINE, - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=10849, - serialized_end=10986, -) - - -_TANHPARAMETER = _descriptor.Descriptor( - name='TanHParameter', - full_name='caffe.TanHParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='engine', full_name='caffe.TanHParameter.engine', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _TANHPARAMETER_ENGINE, - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=10988, - serialized_end=11102, -) - - -_TILEPARAMETER = _descriptor.Descriptor( - name='TileParameter', - full_name='caffe.TileParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='axis', full_name='caffe.TileParameter.axis', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='tiles', full_name='caffe.TileParameter.tiles', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=11104, - serialized_end=11151, -) - - -_THRESHOLDPARAMETER = _descriptor.Descriptor( - name='ThresholdParameter', - full_name='caffe.ThresholdParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='threshold', full_name='caffe.ThresholdParameter.threshold', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=11153, - serialized_end=11195, -) - - -_WINDOWDATAPARAMETER = _descriptor.Descriptor( - name='WindowDataParameter', - full_name='caffe.WindowDataParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='source', full_name='caffe.WindowDataParameter.source', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='scale', full_name='caffe.WindowDataParameter.scale', index=1, - number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(1), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='mean_file', full_name='caffe.WindowDataParameter.mean_file', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='batch_size', full_name='caffe.WindowDataParameter.batch_size', index=3, - number=4, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='crop_size', full_name='caffe.WindowDataParameter.crop_size', index=4, - number=5, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='mirror', full_name='caffe.WindowDataParameter.mirror', index=5, - number=6, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='fg_threshold', full_name='caffe.WindowDataParameter.fg_threshold', index=6, - number=7, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(0.5), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='bg_threshold', full_name='caffe.WindowDataParameter.bg_threshold', index=7, - number=8, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(0.5), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='fg_fraction', full_name='caffe.WindowDataParameter.fg_fraction', index=8, - number=9, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(0.25), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='context_pad', full_name='caffe.WindowDataParameter.context_pad', index=9, - number=10, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='crop_mode', full_name='caffe.WindowDataParameter.crop_mode', index=10, - number=11, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=_b("warp").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='cache_images', full_name='caffe.WindowDataParameter.cache_images', index=11, - number=12, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='root_folder', full_name='caffe.WindowDataParameter.root_folder', index=12, - number=13, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=11198, - serialized_end=11519, -) - - -_SPPPARAMETER = _descriptor.Descriptor( - name='SPPParameter', - full_name='caffe.SPPParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='pyramid_height', full_name='caffe.SPPParameter.pyramid_height', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='pool', full_name='caffe.SPPParameter.pool', index=1, - number=2, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='engine', full_name='caffe.SPPParameter.engine', index=2, - number=6, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _SPPPARAMETER_POOLMETHOD, - _SPPPARAMETER_ENGINE, - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=11522, - serialized_end=11757, -) - - -_V1LAYERPARAMETER = _descriptor.Descriptor( - name='V1LayerParameter', - full_name='caffe.V1LayerParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='bottom', full_name='caffe.V1LayerParameter.bottom', index=0, - number=2, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='top', full_name='caffe.V1LayerParameter.top', index=1, - number=3, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='name', full_name='caffe.V1LayerParameter.name', index=2, - number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='include', full_name='caffe.V1LayerParameter.include', index=3, - number=32, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='exclude', full_name='caffe.V1LayerParameter.exclude', index=4, - number=33, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='type', full_name='caffe.V1LayerParameter.type', index=5, - number=5, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='blobs', full_name='caffe.V1LayerParameter.blobs', index=6, - number=6, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='param', full_name='caffe.V1LayerParameter.param', index=7, - number=1001, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='blob_share_mode', full_name='caffe.V1LayerParameter.blob_share_mode', index=8, - number=1002, type=14, cpp_type=8, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='blobs_lr', full_name='caffe.V1LayerParameter.blobs_lr', index=9, - number=7, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='weight_decay', full_name='caffe.V1LayerParameter.weight_decay', index=10, - number=8, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='loss_weight', full_name='caffe.V1LayerParameter.loss_weight', index=11, - number=35, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='accuracy_param', full_name='caffe.V1LayerParameter.accuracy_param', index=12, - number=27, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='argmax_param', full_name='caffe.V1LayerParameter.argmax_param', index=13, - number=23, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='concat_param', full_name='caffe.V1LayerParameter.concat_param', index=14, - number=9, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='contrastive_loss_param', full_name='caffe.V1LayerParameter.contrastive_loss_param', index=15, - number=40, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='convolution_param', full_name='caffe.V1LayerParameter.convolution_param', index=16, - number=10, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='data_param', full_name='caffe.V1LayerParameter.data_param', index=17, - number=11, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='dropout_param', full_name='caffe.V1LayerParameter.dropout_param', index=18, - number=12, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='dummy_data_param', full_name='caffe.V1LayerParameter.dummy_data_param', index=19, - number=26, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='eltwise_param', full_name='caffe.V1LayerParameter.eltwise_param', index=20, - number=24, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='exp_param', full_name='caffe.V1LayerParameter.exp_param', index=21, - number=41, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='hdf5_data_param', full_name='caffe.V1LayerParameter.hdf5_data_param', index=22, - number=13, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='hdf5_output_param', full_name='caffe.V1LayerParameter.hdf5_output_param', index=23, - number=14, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='hinge_loss_param', full_name='caffe.V1LayerParameter.hinge_loss_param', index=24, - number=29, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='image_data_param', full_name='caffe.V1LayerParameter.image_data_param', index=25, - number=15, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='infogain_loss_param', full_name='caffe.V1LayerParameter.infogain_loss_param', index=26, - number=16, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='inner_product_param', full_name='caffe.V1LayerParameter.inner_product_param', index=27, - number=17, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='lrn_param', full_name='caffe.V1LayerParameter.lrn_param', index=28, - number=18, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='memory_data_param', full_name='caffe.V1LayerParameter.memory_data_param', index=29, - number=22, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='mvn_param', full_name='caffe.V1LayerParameter.mvn_param', index=30, - number=34, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='pooling_param', full_name='caffe.V1LayerParameter.pooling_param', index=31, - number=19, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='power_param', full_name='caffe.V1LayerParameter.power_param', index=32, - number=21, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='relu_param', full_name='caffe.V1LayerParameter.relu_param', index=33, - number=30, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='sigmoid_param', full_name='caffe.V1LayerParameter.sigmoid_param', index=34, - number=38, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='softmax_param', full_name='caffe.V1LayerParameter.softmax_param', index=35, - number=39, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='slice_param', full_name='caffe.V1LayerParameter.slice_param', index=36, - number=31, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='tanh_param', full_name='caffe.V1LayerParameter.tanh_param', index=37, - number=37, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='threshold_param', full_name='caffe.V1LayerParameter.threshold_param', index=38, - number=25, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='window_data_param', full_name='caffe.V1LayerParameter.window_data_param', index=39, - number=20, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='transform_param', full_name='caffe.V1LayerParameter.transform_param', index=40, - number=36, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='loss_param', full_name='caffe.V1LayerParameter.loss_param', index=41, - number=42, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='layer', full_name='caffe.V1LayerParameter.layer', index=42, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _V1LAYERPARAMETER_LAYERTYPE, - _V1LAYERPARAMETER_DIMCHECKMODE, - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=11760, - serialized_end=14288, -) - - -_V0LAYERPARAMETER = _descriptor.Descriptor( - name='V0LayerParameter', - full_name='caffe.V0LayerParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='caffe.V0LayerParameter.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='type', full_name='caffe.V0LayerParameter.type', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='num_output', full_name='caffe.V0LayerParameter.num_output', index=2, - number=3, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='biasterm', full_name='caffe.V0LayerParameter.biasterm', index=3, - number=4, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='weight_filler', full_name='caffe.V0LayerParameter.weight_filler', index=4, - number=5, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='bias_filler', full_name='caffe.V0LayerParameter.bias_filler', index=5, - number=6, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='pad', full_name='caffe.V0LayerParameter.pad', index=6, - number=7, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='kernelsize', full_name='caffe.V0LayerParameter.kernelsize', index=7, - number=8, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='group', full_name='caffe.V0LayerParameter.group', index=8, - number=9, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='stride', full_name='caffe.V0LayerParameter.stride', index=9, - number=10, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='pool', full_name='caffe.V0LayerParameter.pool', index=10, - number=11, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='dropout_ratio', full_name='caffe.V0LayerParameter.dropout_ratio', index=11, - number=12, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(0.5), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='local_size', full_name='caffe.V0LayerParameter.local_size', index=12, - number=13, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=5, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='alpha', full_name='caffe.V0LayerParameter.alpha', index=13, - number=14, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(1), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='beta', full_name='caffe.V0LayerParameter.beta', index=14, - number=15, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(0.75), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='k', full_name='caffe.V0LayerParameter.k', index=15, - number=22, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(1), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='source', full_name='caffe.V0LayerParameter.source', index=16, - number=16, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='scale', full_name='caffe.V0LayerParameter.scale', index=17, - number=17, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(1), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='meanfile', full_name='caffe.V0LayerParameter.meanfile', index=18, - number=18, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='batchsize', full_name='caffe.V0LayerParameter.batchsize', index=19, - number=19, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='cropsize', full_name='caffe.V0LayerParameter.cropsize', index=20, - number=20, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='mirror', full_name='caffe.V0LayerParameter.mirror', index=21, - number=21, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='blobs', full_name='caffe.V0LayerParameter.blobs', index=22, - number=50, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='blobs_lr', full_name='caffe.V0LayerParameter.blobs_lr', index=23, - number=51, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='weight_decay', full_name='caffe.V0LayerParameter.weight_decay', index=24, - number=52, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='rand_skip', full_name='caffe.V0LayerParameter.rand_skip', index=25, - number=53, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='det_fg_threshold', full_name='caffe.V0LayerParameter.det_fg_threshold', index=26, - number=54, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(0.5), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='det_bg_threshold', full_name='caffe.V0LayerParameter.det_bg_threshold', index=27, - number=55, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(0.5), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='det_fg_fraction', full_name='caffe.V0LayerParameter.det_fg_fraction', index=28, - number=56, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=float(0.25), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='det_context_pad', full_name='caffe.V0LayerParameter.det_context_pad', index=29, - number=58, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='det_crop_mode', full_name='caffe.V0LayerParameter.det_crop_mode', index=30, - number=59, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=_b("warp").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='new_num', full_name='caffe.V0LayerParameter.new_num', index=31, - number=60, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='new_channels', full_name='caffe.V0LayerParameter.new_channels', index=32, - number=61, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='new_height', full_name='caffe.V0LayerParameter.new_height', index=33, - number=62, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='new_width', full_name='caffe.V0LayerParameter.new_width', index=34, - number=63, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='shuffle_images', full_name='caffe.V0LayerParameter.shuffle_images', index=35, - number=64, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='concat_dim', full_name='caffe.V0LayerParameter.concat_dim', index=36, - number=65, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='hdf5_output_param', full_name='caffe.V0LayerParameter.hdf5_output_param', index=37, - number=1001, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _V0LAYERPARAMETER_POOLMETHOD, - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=14291, - serialized_end=15312, -) - - -_PRELUPARAMETER = _descriptor.Descriptor( - name='PReLUParameter', - full_name='caffe.PReLUParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='filler', full_name='caffe.PReLUParameter.filler', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='channel_shared', full_name='caffe.PReLUParameter.channel_shared', index=1, - number=2, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=15314, - serialized_end=15401, -) - -_BLOBPROTO.fields_by_name['shape'].message_type = _BLOBSHAPE -_BLOBPROTOVECTOR.fields_by_name['blobs'].message_type = _BLOBPROTO -_FILLERPARAMETER.fields_by_name['variance_norm'].enum_type = _FILLERPARAMETER_VARIANCENORM -_FILLERPARAMETER_VARIANCENORM.containing_type = _FILLERPARAMETER -_NETPARAMETER.fields_by_name['input_shape'].message_type = _BLOBSHAPE -_NETPARAMETER.fields_by_name['state'].message_type = _NETSTATE -_NETPARAMETER.fields_by_name['layer'].message_type = _LAYERPARAMETER -_NETPARAMETER.fields_by_name['layers'].message_type = _V1LAYERPARAMETER -_SOLVERPARAMETER.fields_by_name['net_param'].message_type = _NETPARAMETER -_SOLVERPARAMETER.fields_by_name['train_net_param'].message_type = _NETPARAMETER -_SOLVERPARAMETER.fields_by_name['test_net_param'].message_type = _NETPARAMETER -_SOLVERPARAMETER.fields_by_name['train_state'].message_type = _NETSTATE -_SOLVERPARAMETER.fields_by_name['test_state'].message_type = _NETSTATE -_SOLVERPARAMETER.fields_by_name['snapshot_format'].enum_type = _SOLVERPARAMETER_SNAPSHOTFORMAT -_SOLVERPARAMETER.fields_by_name['solver_mode'].enum_type = _SOLVERPARAMETER_SOLVERMODE -_SOLVERPARAMETER.fields_by_name['solver_type'].enum_type = _SOLVERPARAMETER_SOLVERTYPE -_SOLVERPARAMETER_SNAPSHOTFORMAT.containing_type = _SOLVERPARAMETER -_SOLVERPARAMETER_SOLVERMODE.containing_type = _SOLVERPARAMETER -_SOLVERPARAMETER_SOLVERTYPE.containing_type = _SOLVERPARAMETER -_SOLVERSTATE.fields_by_name['history'].message_type = _BLOBPROTO -_NETSTATE.fields_by_name['phase'].enum_type = _PHASE -_NETSTATERULE.fields_by_name['phase'].enum_type = _PHASE -_PARAMSPEC.fields_by_name['share_mode'].enum_type = _PARAMSPEC_DIMCHECKMODE -_PARAMSPEC_DIMCHECKMODE.containing_type = _PARAMSPEC -_LAYERPARAMETER.fields_by_name['phase'].enum_type = _PHASE -_LAYERPARAMETER.fields_by_name['param'].message_type = _PARAMSPEC -_LAYERPARAMETER.fields_by_name['blobs'].message_type = _BLOBPROTO -_LAYERPARAMETER.fields_by_name['include'].message_type = _NETSTATERULE -_LAYERPARAMETER.fields_by_name['exclude'].message_type = _NETSTATERULE -_LAYERPARAMETER.fields_by_name['transform_param'].message_type = _TRANSFORMATIONPARAMETER -_LAYERPARAMETER.fields_by_name['loss_param'].message_type = _LOSSPARAMETER -_LAYERPARAMETER.fields_by_name['accuracy_param'].message_type = _ACCURACYPARAMETER -_LAYERPARAMETER.fields_by_name['argmax_param'].message_type = _ARGMAXPARAMETER -_LAYERPARAMETER.fields_by_name['batch_norm_param'].message_type = _BATCHNORMPARAMETER -_LAYERPARAMETER.fields_by_name['bias_param'].message_type = _BIASPARAMETER -_LAYERPARAMETER.fields_by_name['concat_param'].message_type = _CONCATPARAMETER -_LAYERPARAMETER.fields_by_name['contrastive_loss_param'].message_type = _CONTRASTIVELOSSPARAMETER -_LAYERPARAMETER.fields_by_name['convolution_param'].message_type = _CONVOLUTIONPARAMETER -_LAYERPARAMETER.fields_by_name['crop_param'].message_type = _CROPPARAMETER -_LAYERPARAMETER.fields_by_name['data_param'].message_type = _DATAPARAMETER -_LAYERPARAMETER.fields_by_name['dropout_param'].message_type = _DROPOUTPARAMETER -_LAYERPARAMETER.fields_by_name['dummy_data_param'].message_type = _DUMMYDATAPARAMETER -_LAYERPARAMETER.fields_by_name['eltwise_param'].message_type = _ELTWISEPARAMETER -_LAYERPARAMETER.fields_by_name['elu_param'].message_type = _ELUPARAMETER -_LAYERPARAMETER.fields_by_name['embed_param'].message_type = _EMBEDPARAMETER -_LAYERPARAMETER.fields_by_name['exp_param'].message_type = _EXPPARAMETER -_LAYERPARAMETER.fields_by_name['flatten_param'].message_type = _FLATTENPARAMETER -_LAYERPARAMETER.fields_by_name['hdf5_data_param'].message_type = _HDF5DATAPARAMETER -_LAYERPARAMETER.fields_by_name['hdf5_output_param'].message_type = _HDF5OUTPUTPARAMETER -_LAYERPARAMETER.fields_by_name['hinge_loss_param'].message_type = _HINGELOSSPARAMETER -_LAYERPARAMETER.fields_by_name['image_data_param'].message_type = _IMAGEDATAPARAMETER -_LAYERPARAMETER.fields_by_name['infogain_loss_param'].message_type = _INFOGAINLOSSPARAMETER -_LAYERPARAMETER.fields_by_name['inner_product_param'].message_type = _INNERPRODUCTPARAMETER -_LAYERPARAMETER.fields_by_name['input_param'].message_type = _INPUTPARAMETER -_LAYERPARAMETER.fields_by_name['log_param'].message_type = _LOGPARAMETER -_LAYERPARAMETER.fields_by_name['lrn_param'].message_type = _LRNPARAMETER -_LAYERPARAMETER.fields_by_name['memory_data_param'].message_type = _MEMORYDATAPARAMETER -_LAYERPARAMETER.fields_by_name['mvn_param'].message_type = _MVNPARAMETER -_LAYERPARAMETER.fields_by_name['parameter_param'].message_type = _PARAMETERPARAMETER -_LAYERPARAMETER.fields_by_name['pooling_param'].message_type = _POOLINGPARAMETER -_LAYERPARAMETER.fields_by_name['power_param'].message_type = _POWERPARAMETER -_LAYERPARAMETER.fields_by_name['prelu_param'].message_type = _PRELUPARAMETER -_LAYERPARAMETER.fields_by_name['python_param'].message_type = _PYTHONPARAMETER -_LAYERPARAMETER.fields_by_name['recurrent_param'].message_type = _RECURRENTPARAMETER -_LAYERPARAMETER.fields_by_name['reduction_param'].message_type = _REDUCTIONPARAMETER -_LAYERPARAMETER.fields_by_name['relu_param'].message_type = _RELUPARAMETER -_LAYERPARAMETER.fields_by_name['reshape_param'].message_type = _RESHAPEPARAMETER -_LAYERPARAMETER.fields_by_name['scale_param'].message_type = _SCALEPARAMETER -_LAYERPARAMETER.fields_by_name['sigmoid_param'].message_type = _SIGMOIDPARAMETER -_LAYERPARAMETER.fields_by_name['softmax_param'].message_type = _SOFTMAXPARAMETER -_LAYERPARAMETER.fields_by_name['spp_param'].message_type = _SPPPARAMETER -_LAYERPARAMETER.fields_by_name['slice_param'].message_type = _SLICEPARAMETER -_LAYERPARAMETER.fields_by_name['tanh_param'].message_type = _TANHPARAMETER -_LAYERPARAMETER.fields_by_name['threshold_param'].message_type = _THRESHOLDPARAMETER -_LAYERPARAMETER.fields_by_name['tile_param'].message_type = _TILEPARAMETER -_LAYERPARAMETER.fields_by_name['window_data_param'].message_type = _WINDOWDATAPARAMETER -_LOSSPARAMETER.fields_by_name['normalization'].enum_type = _LOSSPARAMETER_NORMALIZATIONMODE -_LOSSPARAMETER_NORMALIZATIONMODE.containing_type = _LOSSPARAMETER -_BIASPARAMETER.fields_by_name['filler'].message_type = _FILLERPARAMETER -_CONVOLUTIONPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER -_CONVOLUTIONPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER -_CONVOLUTIONPARAMETER.fields_by_name['engine'].enum_type = _CONVOLUTIONPARAMETER_ENGINE -_CONVOLUTIONPARAMETER_ENGINE.containing_type = _CONVOLUTIONPARAMETER -_DATAPARAMETER.fields_by_name['backend'].enum_type = _DATAPARAMETER_DB -_DATAPARAMETER_DB.containing_type = _DATAPARAMETER -_DUMMYDATAPARAMETER.fields_by_name['data_filler'].message_type = _FILLERPARAMETER -_DUMMYDATAPARAMETER.fields_by_name['shape'].message_type = _BLOBSHAPE -_ELTWISEPARAMETER.fields_by_name['operation'].enum_type = _ELTWISEPARAMETER_ELTWISEOP -_ELTWISEPARAMETER_ELTWISEOP.containing_type = _ELTWISEPARAMETER -_EMBEDPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER -_EMBEDPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER -_HINGELOSSPARAMETER.fields_by_name['norm'].enum_type = _HINGELOSSPARAMETER_NORM -_HINGELOSSPARAMETER_NORM.containing_type = _HINGELOSSPARAMETER -_INNERPRODUCTPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER -_INNERPRODUCTPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER -_INPUTPARAMETER.fields_by_name['shape'].message_type = _BLOBSHAPE -_LRNPARAMETER.fields_by_name['norm_region'].enum_type = _LRNPARAMETER_NORMREGION -_LRNPARAMETER.fields_by_name['engine'].enum_type = _LRNPARAMETER_ENGINE -_LRNPARAMETER_NORMREGION.containing_type = _LRNPARAMETER -_LRNPARAMETER_ENGINE.containing_type = _LRNPARAMETER -_PARAMETERPARAMETER.fields_by_name['shape'].message_type = _BLOBSHAPE -_POOLINGPARAMETER.fields_by_name['pool'].enum_type = _POOLINGPARAMETER_POOLMETHOD -_POOLINGPARAMETER.fields_by_name['engine'].enum_type = _POOLINGPARAMETER_ENGINE -_POOLINGPARAMETER_POOLMETHOD.containing_type = _POOLINGPARAMETER -_POOLINGPARAMETER_ENGINE.containing_type = _POOLINGPARAMETER -_RECURRENTPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER -_RECURRENTPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER -_REDUCTIONPARAMETER.fields_by_name['operation'].enum_type = _REDUCTIONPARAMETER_REDUCTIONOP -_REDUCTIONPARAMETER_REDUCTIONOP.containing_type = _REDUCTIONPARAMETER -_RELUPARAMETER.fields_by_name['engine'].enum_type = _RELUPARAMETER_ENGINE -_RELUPARAMETER_ENGINE.containing_type = _RELUPARAMETER -_RESHAPEPARAMETER.fields_by_name['shape'].message_type = _BLOBSHAPE -_SCALEPARAMETER.fields_by_name['filler'].message_type = _FILLERPARAMETER -_SCALEPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER -_SIGMOIDPARAMETER.fields_by_name['engine'].enum_type = _SIGMOIDPARAMETER_ENGINE -_SIGMOIDPARAMETER_ENGINE.containing_type = _SIGMOIDPARAMETER -_SOFTMAXPARAMETER.fields_by_name['engine'].enum_type = _SOFTMAXPARAMETER_ENGINE -_SOFTMAXPARAMETER_ENGINE.containing_type = _SOFTMAXPARAMETER -_TANHPARAMETER.fields_by_name['engine'].enum_type = _TANHPARAMETER_ENGINE -_TANHPARAMETER_ENGINE.containing_type = _TANHPARAMETER -_SPPPARAMETER.fields_by_name['pool'].enum_type = _SPPPARAMETER_POOLMETHOD -_SPPPARAMETER.fields_by_name['engine'].enum_type = _SPPPARAMETER_ENGINE -_SPPPARAMETER_POOLMETHOD.containing_type = _SPPPARAMETER -_SPPPARAMETER_ENGINE.containing_type = _SPPPARAMETER -_V1LAYERPARAMETER.fields_by_name['include'].message_type = _NETSTATERULE -_V1LAYERPARAMETER.fields_by_name['exclude'].message_type = _NETSTATERULE -_V1LAYERPARAMETER.fields_by_name['type'].enum_type = _V1LAYERPARAMETER_LAYERTYPE -_V1LAYERPARAMETER.fields_by_name['blobs'].message_type = _BLOBPROTO -_V1LAYERPARAMETER.fields_by_name['blob_share_mode'].enum_type = _V1LAYERPARAMETER_DIMCHECKMODE -_V1LAYERPARAMETER.fields_by_name['accuracy_param'].message_type = _ACCURACYPARAMETER -_V1LAYERPARAMETER.fields_by_name['argmax_param'].message_type = _ARGMAXPARAMETER -_V1LAYERPARAMETER.fields_by_name['concat_param'].message_type = _CONCATPARAMETER -_V1LAYERPARAMETER.fields_by_name['contrastive_loss_param'].message_type = _CONTRASTIVELOSSPARAMETER -_V1LAYERPARAMETER.fields_by_name['convolution_param'].message_type = _CONVOLUTIONPARAMETER -_V1LAYERPARAMETER.fields_by_name['data_param'].message_type = _DATAPARAMETER -_V1LAYERPARAMETER.fields_by_name['dropout_param'].message_type = _DROPOUTPARAMETER -_V1LAYERPARAMETER.fields_by_name['dummy_data_param'].message_type = _DUMMYDATAPARAMETER -_V1LAYERPARAMETER.fields_by_name['eltwise_param'].message_type = _ELTWISEPARAMETER -_V1LAYERPARAMETER.fields_by_name['exp_param'].message_type = _EXPPARAMETER -_V1LAYERPARAMETER.fields_by_name['hdf5_data_param'].message_type = _HDF5DATAPARAMETER -_V1LAYERPARAMETER.fields_by_name['hdf5_output_param'].message_type = _HDF5OUTPUTPARAMETER -_V1LAYERPARAMETER.fields_by_name['hinge_loss_param'].message_type = _HINGELOSSPARAMETER -_V1LAYERPARAMETER.fields_by_name['image_data_param'].message_type = _IMAGEDATAPARAMETER -_V1LAYERPARAMETER.fields_by_name['infogain_loss_param'].message_type = _INFOGAINLOSSPARAMETER -_V1LAYERPARAMETER.fields_by_name['inner_product_param'].message_type = _INNERPRODUCTPARAMETER -_V1LAYERPARAMETER.fields_by_name['lrn_param'].message_type = _LRNPARAMETER -_V1LAYERPARAMETER.fields_by_name['memory_data_param'].message_type = _MEMORYDATAPARAMETER -_V1LAYERPARAMETER.fields_by_name['mvn_param'].message_type = _MVNPARAMETER -_V1LAYERPARAMETER.fields_by_name['pooling_param'].message_type = _POOLINGPARAMETER -_V1LAYERPARAMETER.fields_by_name['power_param'].message_type = _POWERPARAMETER -_V1LAYERPARAMETER.fields_by_name['relu_param'].message_type = _RELUPARAMETER -_V1LAYERPARAMETER.fields_by_name['sigmoid_param'].message_type = _SIGMOIDPARAMETER -_V1LAYERPARAMETER.fields_by_name['softmax_param'].message_type = _SOFTMAXPARAMETER -_V1LAYERPARAMETER.fields_by_name['slice_param'].message_type = _SLICEPARAMETER -_V1LAYERPARAMETER.fields_by_name['tanh_param'].message_type = _TANHPARAMETER -_V1LAYERPARAMETER.fields_by_name['threshold_param'].message_type = _THRESHOLDPARAMETER -_V1LAYERPARAMETER.fields_by_name['window_data_param'].message_type = _WINDOWDATAPARAMETER -_V1LAYERPARAMETER.fields_by_name['transform_param'].message_type = _TRANSFORMATIONPARAMETER -_V1LAYERPARAMETER.fields_by_name['loss_param'].message_type = _LOSSPARAMETER -_V1LAYERPARAMETER.fields_by_name['layer'].message_type = _V0LAYERPARAMETER -_V1LAYERPARAMETER_LAYERTYPE.containing_type = _V1LAYERPARAMETER -_V1LAYERPARAMETER_DIMCHECKMODE.containing_type = _V1LAYERPARAMETER -_V0LAYERPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER -_V0LAYERPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER -_V0LAYERPARAMETER.fields_by_name['pool'].enum_type = _V0LAYERPARAMETER_POOLMETHOD -_V0LAYERPARAMETER.fields_by_name['blobs'].message_type = _BLOBPROTO -_V0LAYERPARAMETER.fields_by_name['hdf5_output_param'].message_type = _HDF5OUTPUTPARAMETER -_V0LAYERPARAMETER_POOLMETHOD.containing_type = _V0LAYERPARAMETER -_PRELUPARAMETER.fields_by_name['filler'].message_type = _FILLERPARAMETER -DESCRIPTOR.message_types_by_name['BlobShape'] = _BLOBSHAPE -DESCRIPTOR.message_types_by_name['BlobProto'] = _BLOBPROTO -DESCRIPTOR.message_types_by_name['BlobProtoVector'] = _BLOBPROTOVECTOR -DESCRIPTOR.message_types_by_name['Datum'] = _DATUM -DESCRIPTOR.message_types_by_name['FillerParameter'] = _FILLERPARAMETER -DESCRIPTOR.message_types_by_name['NetParameter'] = _NETPARAMETER -DESCRIPTOR.message_types_by_name['SolverParameter'] = _SOLVERPARAMETER -DESCRIPTOR.message_types_by_name['SolverState'] = _SOLVERSTATE -DESCRIPTOR.message_types_by_name['NetState'] = _NETSTATE -DESCRIPTOR.message_types_by_name['NetStateRule'] = _NETSTATERULE -DESCRIPTOR.message_types_by_name['ParamSpec'] = _PARAMSPEC -DESCRIPTOR.message_types_by_name['LayerParameter'] = _LAYERPARAMETER -DESCRIPTOR.message_types_by_name['TransformationParameter'] = _TRANSFORMATIONPARAMETER -DESCRIPTOR.message_types_by_name['LossParameter'] = _LOSSPARAMETER -DESCRIPTOR.message_types_by_name['AccuracyParameter'] = _ACCURACYPARAMETER -DESCRIPTOR.message_types_by_name['ArgMaxParameter'] = _ARGMAXPARAMETER -DESCRIPTOR.message_types_by_name['ConcatParameter'] = _CONCATPARAMETER -DESCRIPTOR.message_types_by_name['BatchNormParameter'] = _BATCHNORMPARAMETER -DESCRIPTOR.message_types_by_name['BiasParameter'] = _BIASPARAMETER -DESCRIPTOR.message_types_by_name['ContrastiveLossParameter'] = _CONTRASTIVELOSSPARAMETER -DESCRIPTOR.message_types_by_name['ConvolutionParameter'] = _CONVOLUTIONPARAMETER -DESCRIPTOR.message_types_by_name['CropParameter'] = _CROPPARAMETER -DESCRIPTOR.message_types_by_name['DataParameter'] = _DATAPARAMETER -DESCRIPTOR.message_types_by_name['DropoutParameter'] = _DROPOUTPARAMETER -DESCRIPTOR.message_types_by_name['DummyDataParameter'] = _DUMMYDATAPARAMETER -DESCRIPTOR.message_types_by_name['EltwiseParameter'] = _ELTWISEPARAMETER -DESCRIPTOR.message_types_by_name['ELUParameter'] = _ELUPARAMETER -DESCRIPTOR.message_types_by_name['EmbedParameter'] = _EMBEDPARAMETER -DESCRIPTOR.message_types_by_name['ExpParameter'] = _EXPPARAMETER -DESCRIPTOR.message_types_by_name['FlattenParameter'] = _FLATTENPARAMETER -DESCRIPTOR.message_types_by_name['HDF5DataParameter'] = _HDF5DATAPARAMETER -DESCRIPTOR.message_types_by_name['HDF5OutputParameter'] = _HDF5OUTPUTPARAMETER -DESCRIPTOR.message_types_by_name['HingeLossParameter'] = _HINGELOSSPARAMETER -DESCRIPTOR.message_types_by_name['ImageDataParameter'] = _IMAGEDATAPARAMETER -DESCRIPTOR.message_types_by_name['InfogainLossParameter'] = _INFOGAINLOSSPARAMETER -DESCRIPTOR.message_types_by_name['InnerProductParameter'] = _INNERPRODUCTPARAMETER -DESCRIPTOR.message_types_by_name['InputParameter'] = _INPUTPARAMETER -DESCRIPTOR.message_types_by_name['LogParameter'] = _LOGPARAMETER -DESCRIPTOR.message_types_by_name['LRNParameter'] = _LRNPARAMETER -DESCRIPTOR.message_types_by_name['MemoryDataParameter'] = _MEMORYDATAPARAMETER -DESCRIPTOR.message_types_by_name['MVNParameter'] = _MVNPARAMETER -DESCRIPTOR.message_types_by_name['ParameterParameter'] = _PARAMETERPARAMETER -DESCRIPTOR.message_types_by_name['PoolingParameter'] = _POOLINGPARAMETER -DESCRIPTOR.message_types_by_name['PowerParameter'] = _POWERPARAMETER -DESCRIPTOR.message_types_by_name['PythonParameter'] = _PYTHONPARAMETER -DESCRIPTOR.message_types_by_name['RecurrentParameter'] = _RECURRENTPARAMETER -DESCRIPTOR.message_types_by_name['ReductionParameter'] = _REDUCTIONPARAMETER -DESCRIPTOR.message_types_by_name['ReLUParameter'] = _RELUPARAMETER -DESCRIPTOR.message_types_by_name['ReshapeParameter'] = _RESHAPEPARAMETER -DESCRIPTOR.message_types_by_name['ScaleParameter'] = _SCALEPARAMETER -DESCRIPTOR.message_types_by_name['SigmoidParameter'] = _SIGMOIDPARAMETER -DESCRIPTOR.message_types_by_name['SliceParameter'] = _SLICEPARAMETER -DESCRIPTOR.message_types_by_name['SoftmaxParameter'] = _SOFTMAXPARAMETER -DESCRIPTOR.message_types_by_name['TanHParameter'] = _TANHPARAMETER -DESCRIPTOR.message_types_by_name['TileParameter'] = _TILEPARAMETER -DESCRIPTOR.message_types_by_name['ThresholdParameter'] = _THRESHOLDPARAMETER -DESCRIPTOR.message_types_by_name['WindowDataParameter'] = _WINDOWDATAPARAMETER -DESCRIPTOR.message_types_by_name['SPPParameter'] = _SPPPARAMETER -DESCRIPTOR.message_types_by_name['V1LayerParameter'] = _V1LAYERPARAMETER -DESCRIPTOR.message_types_by_name['V0LayerParameter'] = _V0LAYERPARAMETER -DESCRIPTOR.message_types_by_name['PReLUParameter'] = _PRELUPARAMETER -DESCRIPTOR.enum_types_by_name['Phase'] = _PHASE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -BlobShape = _reflection.GeneratedProtocolMessageType('BlobShape', (_message.Message,), dict( - DESCRIPTOR = _BLOBSHAPE, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.BlobShape) - )) -_sym_db.RegisterMessage(BlobShape) - -BlobProto = _reflection.GeneratedProtocolMessageType('BlobProto', (_message.Message,), dict( - DESCRIPTOR = _BLOBPROTO, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.BlobProto) - )) -_sym_db.RegisterMessage(BlobProto) - -BlobProtoVector = _reflection.GeneratedProtocolMessageType('BlobProtoVector', (_message.Message,), dict( - DESCRIPTOR = _BLOBPROTOVECTOR, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.BlobProtoVector) - )) -_sym_db.RegisterMessage(BlobProtoVector) - -Datum = _reflection.GeneratedProtocolMessageType('Datum', (_message.Message,), dict( - DESCRIPTOR = _DATUM, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.Datum) - )) -_sym_db.RegisterMessage(Datum) - -FillerParameter = _reflection.GeneratedProtocolMessageType('FillerParameter', (_message.Message,), dict( - DESCRIPTOR = _FILLERPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.FillerParameter) - )) -_sym_db.RegisterMessage(FillerParameter) - -NetParameter = _reflection.GeneratedProtocolMessageType('NetParameter', (_message.Message,), dict( - DESCRIPTOR = _NETPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.NetParameter) - )) -_sym_db.RegisterMessage(NetParameter) - -SolverParameter = _reflection.GeneratedProtocolMessageType('SolverParameter', (_message.Message,), dict( - DESCRIPTOR = _SOLVERPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.SolverParameter) - )) -_sym_db.RegisterMessage(SolverParameter) - -SolverState = _reflection.GeneratedProtocolMessageType('SolverState', (_message.Message,), dict( - DESCRIPTOR = _SOLVERSTATE, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.SolverState) - )) -_sym_db.RegisterMessage(SolverState) - -NetState = _reflection.GeneratedProtocolMessageType('NetState', (_message.Message,), dict( - DESCRIPTOR = _NETSTATE, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.NetState) - )) -_sym_db.RegisterMessage(NetState) - -NetStateRule = _reflection.GeneratedProtocolMessageType('NetStateRule', (_message.Message,), dict( - DESCRIPTOR = _NETSTATERULE, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.NetStateRule) - )) -_sym_db.RegisterMessage(NetStateRule) - -ParamSpec = _reflection.GeneratedProtocolMessageType('ParamSpec', (_message.Message,), dict( - DESCRIPTOR = _PARAMSPEC, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.ParamSpec) - )) -_sym_db.RegisterMessage(ParamSpec) - -LayerParameter = _reflection.GeneratedProtocolMessageType('LayerParameter', (_message.Message,), dict( - DESCRIPTOR = _LAYERPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.LayerParameter) - )) -_sym_db.RegisterMessage(LayerParameter) - -TransformationParameter = _reflection.GeneratedProtocolMessageType('TransformationParameter', (_message.Message,), dict( - DESCRIPTOR = _TRANSFORMATIONPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.TransformationParameter) - )) -_sym_db.RegisterMessage(TransformationParameter) - -LossParameter = _reflection.GeneratedProtocolMessageType('LossParameter', (_message.Message,), dict( - DESCRIPTOR = _LOSSPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.LossParameter) - )) -_sym_db.RegisterMessage(LossParameter) - -AccuracyParameter = _reflection.GeneratedProtocolMessageType('AccuracyParameter', (_message.Message,), dict( - DESCRIPTOR = _ACCURACYPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.AccuracyParameter) - )) -_sym_db.RegisterMessage(AccuracyParameter) - -ArgMaxParameter = _reflection.GeneratedProtocolMessageType('ArgMaxParameter', (_message.Message,), dict( - DESCRIPTOR = _ARGMAXPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.ArgMaxParameter) - )) -_sym_db.RegisterMessage(ArgMaxParameter) - -ConcatParameter = _reflection.GeneratedProtocolMessageType('ConcatParameter', (_message.Message,), dict( - DESCRIPTOR = _CONCATPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.ConcatParameter) - )) -_sym_db.RegisterMessage(ConcatParameter) - -BatchNormParameter = _reflection.GeneratedProtocolMessageType('BatchNormParameter', (_message.Message,), dict( - DESCRIPTOR = _BATCHNORMPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.BatchNormParameter) - )) -_sym_db.RegisterMessage(BatchNormParameter) - -BiasParameter = _reflection.GeneratedProtocolMessageType('BiasParameter', (_message.Message,), dict( - DESCRIPTOR = _BIASPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.BiasParameter) - )) -_sym_db.RegisterMessage(BiasParameter) - -ContrastiveLossParameter = _reflection.GeneratedProtocolMessageType('ContrastiveLossParameter', (_message.Message,), dict( - DESCRIPTOR = _CONTRASTIVELOSSPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.ContrastiveLossParameter) - )) -_sym_db.RegisterMessage(ContrastiveLossParameter) - -ConvolutionParameter = _reflection.GeneratedProtocolMessageType('ConvolutionParameter', (_message.Message,), dict( - DESCRIPTOR = _CONVOLUTIONPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.ConvolutionParameter) - )) -_sym_db.RegisterMessage(ConvolutionParameter) - -CropParameter = _reflection.GeneratedProtocolMessageType('CropParameter', (_message.Message,), dict( - DESCRIPTOR = _CROPPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.CropParameter) - )) -_sym_db.RegisterMessage(CropParameter) - -DataParameter = _reflection.GeneratedProtocolMessageType('DataParameter', (_message.Message,), dict( - DESCRIPTOR = _DATAPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.DataParameter) - )) -_sym_db.RegisterMessage(DataParameter) - -DropoutParameter = _reflection.GeneratedProtocolMessageType('DropoutParameter', (_message.Message,), dict( - DESCRIPTOR = _DROPOUTPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.DropoutParameter) - )) -_sym_db.RegisterMessage(DropoutParameter) - -DummyDataParameter = _reflection.GeneratedProtocolMessageType('DummyDataParameter', (_message.Message,), dict( - DESCRIPTOR = _DUMMYDATAPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.DummyDataParameter) - )) -_sym_db.RegisterMessage(DummyDataParameter) - -EltwiseParameter = _reflection.GeneratedProtocolMessageType('EltwiseParameter', (_message.Message,), dict( - DESCRIPTOR = _ELTWISEPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.EltwiseParameter) - )) -_sym_db.RegisterMessage(EltwiseParameter) - -ELUParameter = _reflection.GeneratedProtocolMessageType('ELUParameter', (_message.Message,), dict( - DESCRIPTOR = _ELUPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.ELUParameter) - )) -_sym_db.RegisterMessage(ELUParameter) - -EmbedParameter = _reflection.GeneratedProtocolMessageType('EmbedParameter', (_message.Message,), dict( - DESCRIPTOR = _EMBEDPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.EmbedParameter) - )) -_sym_db.RegisterMessage(EmbedParameter) - -ExpParameter = _reflection.GeneratedProtocolMessageType('ExpParameter', (_message.Message,), dict( - DESCRIPTOR = _EXPPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.ExpParameter) - )) -_sym_db.RegisterMessage(ExpParameter) - -FlattenParameter = _reflection.GeneratedProtocolMessageType('FlattenParameter', (_message.Message,), dict( - DESCRIPTOR = _FLATTENPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.FlattenParameter) - )) -_sym_db.RegisterMessage(FlattenParameter) - -HDF5DataParameter = _reflection.GeneratedProtocolMessageType('HDF5DataParameter', (_message.Message,), dict( - DESCRIPTOR = _HDF5DATAPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.HDF5DataParameter) - )) -_sym_db.RegisterMessage(HDF5DataParameter) - -HDF5OutputParameter = _reflection.GeneratedProtocolMessageType('HDF5OutputParameter', (_message.Message,), dict( - DESCRIPTOR = _HDF5OUTPUTPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.HDF5OutputParameter) - )) -_sym_db.RegisterMessage(HDF5OutputParameter) - -HingeLossParameter = _reflection.GeneratedProtocolMessageType('HingeLossParameter', (_message.Message,), dict( - DESCRIPTOR = _HINGELOSSPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.HingeLossParameter) - )) -_sym_db.RegisterMessage(HingeLossParameter) - -ImageDataParameter = _reflection.GeneratedProtocolMessageType('ImageDataParameter', (_message.Message,), dict( - DESCRIPTOR = _IMAGEDATAPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.ImageDataParameter) - )) -_sym_db.RegisterMessage(ImageDataParameter) - -InfogainLossParameter = _reflection.GeneratedProtocolMessageType('InfogainLossParameter', (_message.Message,), dict( - DESCRIPTOR = _INFOGAINLOSSPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.InfogainLossParameter) - )) -_sym_db.RegisterMessage(InfogainLossParameter) - -InnerProductParameter = _reflection.GeneratedProtocolMessageType('InnerProductParameter', (_message.Message,), dict( - DESCRIPTOR = _INNERPRODUCTPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.InnerProductParameter) - )) -_sym_db.RegisterMessage(InnerProductParameter) - -InputParameter = _reflection.GeneratedProtocolMessageType('InputParameter', (_message.Message,), dict( - DESCRIPTOR = _INPUTPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.InputParameter) - )) -_sym_db.RegisterMessage(InputParameter) - -LogParameter = _reflection.GeneratedProtocolMessageType('LogParameter', (_message.Message,), dict( - DESCRIPTOR = _LOGPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.LogParameter) - )) -_sym_db.RegisterMessage(LogParameter) - -LRNParameter = _reflection.GeneratedProtocolMessageType('LRNParameter', (_message.Message,), dict( - DESCRIPTOR = _LRNPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.LRNParameter) - )) -_sym_db.RegisterMessage(LRNParameter) - -MemoryDataParameter = _reflection.GeneratedProtocolMessageType('MemoryDataParameter', (_message.Message,), dict( - DESCRIPTOR = _MEMORYDATAPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.MemoryDataParameter) - )) -_sym_db.RegisterMessage(MemoryDataParameter) - -MVNParameter = _reflection.GeneratedProtocolMessageType('MVNParameter', (_message.Message,), dict( - DESCRIPTOR = _MVNPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.MVNParameter) - )) -_sym_db.RegisterMessage(MVNParameter) - -ParameterParameter = _reflection.GeneratedProtocolMessageType('ParameterParameter', (_message.Message,), dict( - DESCRIPTOR = _PARAMETERPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.ParameterParameter) - )) -_sym_db.RegisterMessage(ParameterParameter) - -PoolingParameter = _reflection.GeneratedProtocolMessageType('PoolingParameter', (_message.Message,), dict( - DESCRIPTOR = _POOLINGPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.PoolingParameter) - )) -_sym_db.RegisterMessage(PoolingParameter) - -PowerParameter = _reflection.GeneratedProtocolMessageType('PowerParameter', (_message.Message,), dict( - DESCRIPTOR = _POWERPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.PowerParameter) - )) -_sym_db.RegisterMessage(PowerParameter) - -PythonParameter = _reflection.GeneratedProtocolMessageType('PythonParameter', (_message.Message,), dict( - DESCRIPTOR = _PYTHONPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.PythonParameter) - )) -_sym_db.RegisterMessage(PythonParameter) - -RecurrentParameter = _reflection.GeneratedProtocolMessageType('RecurrentParameter', (_message.Message,), dict( - DESCRIPTOR = _RECURRENTPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.RecurrentParameter) - )) -_sym_db.RegisterMessage(RecurrentParameter) - -ReductionParameter = _reflection.GeneratedProtocolMessageType('ReductionParameter', (_message.Message,), dict( - DESCRIPTOR = _REDUCTIONPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.ReductionParameter) - )) -_sym_db.RegisterMessage(ReductionParameter) - -ReLUParameter = _reflection.GeneratedProtocolMessageType('ReLUParameter', (_message.Message,), dict( - DESCRIPTOR = _RELUPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.ReLUParameter) - )) -_sym_db.RegisterMessage(ReLUParameter) - -ReshapeParameter = _reflection.GeneratedProtocolMessageType('ReshapeParameter', (_message.Message,), dict( - DESCRIPTOR = _RESHAPEPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.ReshapeParameter) - )) -_sym_db.RegisterMessage(ReshapeParameter) - -ScaleParameter = _reflection.GeneratedProtocolMessageType('ScaleParameter', (_message.Message,), dict( - DESCRIPTOR = _SCALEPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.ScaleParameter) - )) -_sym_db.RegisterMessage(ScaleParameter) - -SigmoidParameter = _reflection.GeneratedProtocolMessageType('SigmoidParameter', (_message.Message,), dict( - DESCRIPTOR = _SIGMOIDPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.SigmoidParameter) - )) -_sym_db.RegisterMessage(SigmoidParameter) - -SliceParameter = _reflection.GeneratedProtocolMessageType('SliceParameter', (_message.Message,), dict( - DESCRIPTOR = _SLICEPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.SliceParameter) - )) -_sym_db.RegisterMessage(SliceParameter) - -SoftmaxParameter = _reflection.GeneratedProtocolMessageType('SoftmaxParameter', (_message.Message,), dict( - DESCRIPTOR = _SOFTMAXPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.SoftmaxParameter) - )) -_sym_db.RegisterMessage(SoftmaxParameter) - -TanHParameter = _reflection.GeneratedProtocolMessageType('TanHParameter', (_message.Message,), dict( - DESCRIPTOR = _TANHPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.TanHParameter) - )) -_sym_db.RegisterMessage(TanHParameter) - -TileParameter = _reflection.GeneratedProtocolMessageType('TileParameter', (_message.Message,), dict( - DESCRIPTOR = _TILEPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.TileParameter) - )) -_sym_db.RegisterMessage(TileParameter) - -ThresholdParameter = _reflection.GeneratedProtocolMessageType('ThresholdParameter', (_message.Message,), dict( - DESCRIPTOR = _THRESHOLDPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.ThresholdParameter) - )) -_sym_db.RegisterMessage(ThresholdParameter) - -WindowDataParameter = _reflection.GeneratedProtocolMessageType('WindowDataParameter', (_message.Message,), dict( - DESCRIPTOR = _WINDOWDATAPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.WindowDataParameter) - )) -_sym_db.RegisterMessage(WindowDataParameter) - -SPPParameter = _reflection.GeneratedProtocolMessageType('SPPParameter', (_message.Message,), dict( - DESCRIPTOR = _SPPPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.SPPParameter) - )) -_sym_db.RegisterMessage(SPPParameter) - -V1LayerParameter = _reflection.GeneratedProtocolMessageType('V1LayerParameter', (_message.Message,), dict( - DESCRIPTOR = _V1LAYERPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.V1LayerParameter) - )) -_sym_db.RegisterMessage(V1LayerParameter) - -V0LayerParameter = _reflection.GeneratedProtocolMessageType('V0LayerParameter', (_message.Message,), dict( - DESCRIPTOR = _V0LAYERPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.V0LayerParameter) - )) -_sym_db.RegisterMessage(V0LayerParameter) - -PReLUParameter = _reflection.GeneratedProtocolMessageType('PReLUParameter', (_message.Message,), dict( - DESCRIPTOR = _PRELUPARAMETER, - __module__ = 'caffe_pb2' - # @@protoc_insertion_point(class_scope:caffe.PReLUParameter) - )) -_sym_db.RegisterMessage(PReLUParameter) - - -_BLOBSHAPE.fields_by_name['dim']._options = None -_BLOBPROTO.fields_by_name['data']._options = None -_BLOBPROTO.fields_by_name['diff']._options = None -_BLOBPROTO.fields_by_name['double_data']._options = None -_BLOBPROTO.fields_by_name['double_diff']._options = None -# @@protoc_insertion_point(module_scope) diff --git a/caffe2fluid/proto/compile.sh b/caffe2fluid/proto/compile.sh deleted file mode 100755 index 5743d9c..0000000 --- a/caffe2fluid/proto/compile.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -#function: -# script used to generate caffepb.py from caffe.proto using protoc -# - -PROTOC=`which protoc` -if [[ -z $PROTOC ]];then - echo "not found protoc, you should first install it following this[https://github.com/google/protobuf/releases]" - exit 1 -fi - -WORK_ROOT=$(dirname `readlink -f "$BASH_SOURCE[0]"`) -PY_NAME="$WORK_ROOT/caffe_pb2.py" -$PROTOC --proto_path=$WORK_ROOT --python_out=$WORK_ROOT $WORK_ROOT/caffe.proto -ret=$? - -if [ -e "$PY_NAME" ];then - echo "succeed to generate [$PY_NAME]" - exit 0 -else - echo "failed to generate [$PY_NAME]" -fi -exit $ret diff --git a/onnx2fluid/.gitignore b/onnx2fluid/.gitignore deleted file mode 100644 index 2454dac..0000000 --- a/onnx2fluid/.gitignore +++ /dev/null @@ -1,60 +0,0 @@ -# Virtualenv -/.venv/ -/venv/ - -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] - -# C extensions -*.so - -# Distribution / packaging -/bin/ -/build/ -/develop-eggs/ -/dist/ -/eggs/ -/lib/ -/lib64/ -/output/ -/parts/ -/sdist/ -/var/ -/*.egg-info/ -/.installed.cfg -/*.egg -/.eggs - -# AUTHORS and ChangeLog will be generated while packaging -/AUTHORS -/ChangeLog - -# BCloud / BuildSubmitter -/build_submitter.* -/logger_client_log - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -.tox/ -.coverage -.cache -.pytest_cache -nosetests.xml -coverage.xml - -# Translations -*.mo - -# Sphinx documentation -/docs/_build/ - -/examples/*/ -/examples/*.gz -/examples/*.aria2 -/examples/*.onnx -/examples/*.np? -**/.* diff --git a/onnx2fluid/README.md b/onnx2fluid/README.md deleted file mode 100644 index 9468824..0000000 --- a/onnx2fluid/README.md +++ /dev/null @@ -1,87 +0,0 @@ -# onnx2fluid - -[![License](https://img.shields.io/badge/license-Apache%202-blue.svg)](LICENSE) - -onnx2fluid支持将ONNX模型转换为PaddlePaddle模型,并用于预测,用户也可以通过将PyTorch模型导出为ONNX模型,再使用onnx2fluid将模型转为PaddlePaddle模型。 - -## 特色 - -* 导出Python代码和fluid ProgramDesc模型 -* 权重可嵌入支持的算子中 -* 转换验证打包三合一 -* 转换过程不依赖PaddlePaddle -* 可自由扩展算子 - -## 环境配置 - -在如下环境配置中测试成功: - -* python 3.5+ -* onnx == 1.4.1 -* paddlepaddle == 1.5.0 (可选,仅用于验证) - -使用[Anaconda](https://docs.anaconda.com/anaconda/install): -``` shell -conda install -c conda-forge onnx -pip install paddlepaddle==1.5.0 -``` - -## 动手玩 - -测试ONNX官方预训练模型,包含alexnet, googlenet, caffenet, rcnn -inception_v1, inception_v2, resnet50, shufflenet, squeezenet, -vgg19, zfnet512等: - -``` shell -python setup.py install -cd examples -sh onnx_model_zoo.sh -``` - -使用PyTorch搭建模型,导出ONNX,转换并验证: - -``` shell -python setup.py install -cd examples -python gen_some_samples.py -onnx2fluid sample_1.onnx -t sample_1.npz -``` - -## 使用说明 - -目前支持 **ONNX opset 9+** 的部分算子,对应PyTorch版本 **1.0/1.1(stable opset)**,更多兼容信息请参考[ONNX文档](https://github.com/onnx/onnx/blob/master/docs/Operators.md) - -onnx2fluid: - -```shell -onnx2fluid [-dexy] [-o /path/to/export_dir/] [-z archive.zip] [-t test_data.npz] [-i [input_name1,input_name2]] /path/to/onnx/model.onnx - -optional arguments: - --debug, -d 启用调试 - --embed_params, -e 尝试权重内嵌 - --no-pedantic, -x 转换扩展的ONNX算子 - --skip-version-conversion, -y - 跳过ONNX算子版本转换 - --output_dir, -o 指定输出目录 - --archive [ARCHIVE], -z [ARCHIVE] - 如果验证通过,打包到指定的ZIP文件 - --infer_inputs, -i [input_name1,input_name2] - 调用PaddlePaddle fluid类形推导完善模型 -``` - -转换工具onnx2fluid.conversion: - -```shell -onnx2fluid.conversion [-dexy] [-o /path/to/export_dir/] /path/to/onnx/model.onnx -``` - -验证工具onnx2fluid.validate: - -```shell -onnx2fluid.validate [-d] [-t test_data.npz] [-i [input_name1,input_name2]] [-p 1e-3] /path/to/onnx/model.onnx -``` - -## 参考 - -* PaddlePaddle [算子](http://www.paddlepaddle.org/documentation/docs/zh/1.5/api_cn/layers_cn.html) -* PaddlePaddle [加载预测模型](http://www.paddlepaddle.org/documentation/docs/zh/1.5/api_guides/low_level/inference.html#id4) diff --git a/onnx2fluid/README_en.md b/onnx2fluid/README_en.md deleted file mode 100644 index 2a87822..0000000 --- a/onnx2fluid/README_en.md +++ /dev/null @@ -1,85 +0,0 @@ -# onnx2fluid - -[![License](https://img.shields.io/badge/license-Apache%202-blue.svg)](LICENSE) - -onnx2fluid supports converting ONNX model to PaddlePaddle fluid model for prediction. - -PyTorch to Paddlepaddle model conversion can be easily achieved with PyTorch ONNX export functions. - -## Features - -* Python code + ProgramDesc proto generation, flexible and compatible -* fluid layer weight embedding support -* conversion, validation, archiving all in one -* convert without PaddlePaddle dependency -* export and validation helper functions for PyTorch to PaddlePaddle conversion -* extra ONNX operator optimization for inference -* easily extensible for user-defined operators - -## Environment and dependency - -* python 3.5+ (python 2 not fully supported yet) -* onnx >= 1.4 -* paddlepaddle >= 1.3.0 (optional for validation) - -## Get started - -Test with pretrained models from ONNX repositories: - -``` shell -python setup.py install -cd examples -sh onnx_model_zoo.sh -``` - -Try exporting and validating from PyTorch to PaddlePaddle fluid: - -``` shell -python setup.py install -cd examples - -python gen_some_samples.py -onnx2fluid sample_1.onnx -t sample_1.npz - -python gen_unet.py -onnx2fluid sample_unet.onnx -t sample_unet.npz -``` - -## Usage - -**ONNX opset 9+** is mainly supported, corresponded to PyTorch **1.0/1.1(stable opset)**,for more information: [ONNX doc](https://github.com/onnx/onnx/blob/master/docs/Operators.md) - -onnx2fluid (all in one): - -```shell -onnx2fluid [-dexy] [-o /path/to/export_dir/] [-z archive.zip] [-t test_data.npz] [-i [input_name1,input_name2]] /path/to/onnx/model.onnx - -optional arguments: - --debug, -d enable debug logging and checking - --embed_params, -e try to embed parameters for trainable PaddlePaddle fluid layers - --no-pedantic, -x process non-standard ONNX ops - --skip-version-conversion, -y - skip ONNX op version conversion, workaround for RumtimeErrors - --output_dir, -o output directory - --archive [ARCHIVE], -z [ARCHIVE] - compress outputs to ZIP file if conversion successed - --infer_inputs, -i [input_name1,input_name2] - invoke PaddlePaddle fluid type-shape inference -``` - -onnx2fluid.conversion: - -```shell -onnx2fluid.conversion [-dexy] [-o /path/to/export_dir/] /path/to/onnx/model.onnx -``` - -onnx2fluid.validate: - -```shell -onnx2fluid.validate [-d] [-t test_data.npz] [-i [input_name1,input_name2]] [-p 1e-3] /path/to/onnx/model.onnx -``` - -## Reference - -* [PaddlePaddle fluid operators](http://www.paddlepaddle.org/documentation/docs/en/1.5/api/layers.html) -* load converted model via [load_inference_model](http://www.paddlepaddle.org/documentation/docs/en/1.5/api/io.html#permalink-1-load_inference_model) diff --git a/onnx2fluid/examples/convert_data_npz.py b/onnx2fluid/examples/convert_data_npz.py deleted file mode 100644 index 0bf613d..0000000 --- a/onnx2fluid/examples/convert_data_npz.py +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Wed Mar 27 11:50:03 2019 - -@author: Macrobull -""" - -import sys -import numpy as np - -from collections import OrderedDict as Dict - - -def make_var_name(name): - """ - make a valid variable name in Python code - """ - - assert name - - if name[0].isdigit(): - return 'var_' + name - for s in ' \\|/:-': # - name = name.replace(s, '_') - if name.startswith('_'): - name = 'var' + name - return name - - -fn = sys.argv[1] -input_names = sys.argv[2].split(',') -output_names = sys.argv[3].split(',') -squeeze_data = len(sys.argv) > 4 - -data = np.load(fn, encoding='bytes') -input_data = data['inputs'] -output_data = data['outputs'] - -while squeeze_data and input_data.ndim > 4 and input_data.shape[0] == 1: - input_data = input_data.squeeze(0) -while squeeze_data and output_data.ndim > 2 and output_data.shape[0] == 1: - output_data = output_data.squeeze(0) - -inputs = Dict(zip(map(make_var_name, input_names), [input_data])) -outputs = Dict(zip(map(make_var_name, output_names), [output_data])) - -np.savez(fn, inputs=inputs, outputs=outputs) # overwrite diff --git a/onnx2fluid/examples/convert_data_pb.py b/onnx2fluid/examples/convert_data_pb.py deleted file mode 100644 index f48f72e..0000000 --- a/onnx2fluid/examples/convert_data_pb.py +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Wed Mar 27 11:50:03 2019 - -@author: Macrobull -""" - -import os, sys -import numpy as np -import onnx -import onnx.numpy_helper as numpy_helper - -from collections import OrderedDict as Dict -from glob import glob - - -def make_var_name(name): - """ - make a valid variable name in Python code - """ - - assert name - - if name[0].isdigit(): - return 'var_' + name - for s in ' \\|/:-': # - name = name.replace(s, '_') - if name.startswith('_'): - name = 'var' + name - return name - - -data_dir = os.path.dirname(sys.argv[1]) -input_names = sys.argv[2].split(',') -output_names = sys.argv[3].split(',') -squeeze_data = len(sys.argv) > 4 - -# Load inputs -inputs = [] -for fn in glob(os.path.join(data_dir, 'input_*.pb')): - tensor = onnx.TensorProto() - with open(fn, 'rb') as f: - tensor.ParseFromString(f.read()) - tensor = numpy_helper.to_array(tensor) - while squeeze_data and tensor.ndim > 4 and tensor.shape[0] == 1: - tensor = tensor.squeeze(0) - inputs.append(tensor) - -# Load outputs -outputs = [] -for fn in glob(os.path.join(data_dir, 'output_*.pb')): - tensor = onnx.TensorProto() - with open(fn, 'rb') as f: - tensor.ParseFromString(f.read()) - tensor = numpy_helper.to_array(tensor) - while squeeze_data and tensor.ndim > 2 and tensor.shape[0] == 1: - tensor = tensor.squeeze(0) - outputs.append(tensor) - -inputs = Dict(zip(map(make_var_name, input_names), inputs)) -outputs = Dict(zip(map(make_var_name, output_names), outputs)) - -np.savez(data_dir, inputs=inputs, outputs=outputs) diff --git a/onnx2fluid/examples/gen_some_samples.py b/onnx2fluid/examples/gen_some_samples.py deleted file mode 100644 index 01ec25f..0000000 --- a/onnx2fluid/examples/gen_some_samples.py +++ /dev/null @@ -1,297 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Mar 22 11:19:45 2019 - -@author: Macrobull - -Not all ops in this file are supported by both PyTorch and ONNX -This only demostrates the conversion/validation workflow from PyTorch to ONNX to Paddle fluid -""" - -from __future__ import print_function - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from onnx2fluid.torch_export_helper import export_onnx_with_validation - -prefix = 'sample_' -idx = 0 - -######## example: RNN cell ######## - - -class Model(nn.Module): - def __init__(self): - super(Model, self).__init__() - self.gru = nn.GRUCell(6, 5) - self.lstm = nn.LSTMCell(5, 4) - - def forward(self, x, h1, h2, c2): - h = self.gru(x, h1) - h, c = self.lstm(h, (h2, c2)) - return h, c - - -model = Model() -model.eval() -xb = torch.rand((7, 6)) -h1 = torch.zeros((7, 5)) -h2 = torch.zeros((7, 4)) -c2 = torch.zeros((7, 4)) -yp = model(xb, h1, h2, c2) -idx += 1 -print('index: ', idx) -export_onnx_with_validation(model, [xb, h1, h2, c2], - prefix + str(idx), ['x', 'h1', 'h2', 'c2'], - ['h', 'c'], - verbose=True, - training=False) - -######## example: RNN ######## - - -class Model(nn.Module): - def __init__(self): - super(Model, self).__init__() - self.gru = nn.GRU(6, 5, 3) - self.lstm = nn.LSTM(5, 4, 2) - - def forward(self, x, h1, h2, c2): - y, h1 = self.gru(x, h1) - y, (h2, c2) = self.lstm(y, (h2, c2)) - return y - - -model = Model() -model.eval() -xb = torch.rand((8, 1, 6)) -h1 = torch.zeros((3, 1, 5)) -h2 = torch.zeros((2, 1, 4)) -c2 = torch.zeros((2, 1, 4)) -yp = model(xb, h1, h2, c2) -idx += 1 -print('index: ', idx) -export_onnx_with_validation(model, [xb, h1, h2, c2], - prefix + str(idx), ['x', 'h1', 'h2', 'c2'], ['y'], - verbose=True, - training=False) - -######## example: random ######## -""" - symbolic registration: - - def rand(g, *shapes): - shapes_list = list(shapes) - shape = _maybe_get_const(shapes_list[0], "is") - return g.op('RandomUniform', shape_i=shape) -""" - - -class Model(nn.Module): - def __init__(self): - super(Model, self).__init__() - - def forward(self, x): - y = torch.rand((2, 3)) # + torch.rand_like(x) - y = y + torch.randn((2, 3)) # + torch.randn_like(x) - y = y + x - return y - - -model = Model() -model.eval() -xb = torch.rand((2, 3)) -yp = model(xb) -idx += 1 -print('index: ', idx) -export_onnx_with_validation(model, [xb], - prefix + str(idx), ['x'], ['y'], - verbose=True, - training=False) - -######## example: fc ######## - - -class Model(nn.Module): - def __init__(self): - super(Model, self).__init__() - self.fc = nn.Linear(3, 8) - - def forward(self, x): - y = x - y = self.fc(y) - return y - - -model = Model() -model.eval() -xb = torch.rand((2, 3)) -yp = model(xb) -idx += 1 -print('index: ', idx) -export_onnx_with_validation(model, [xb], - prefix + str(idx), ['x'], ['y'], - verbose=True, - training=False) - -######## example: compare ######## - - -class Model(nn.Module): - def __init__(self): - super(Model, self).__init__() - - def forward(self, x0, x1): - x0 = x0.clamp(-1, 1) - a = torch.max(x0, x1) == x1 - b = x0 < x1 - c = x0 > x1 - return a, b, c - - -model = Model() -model.eval() -xb0 = torch.rand((2, 3)) -xb1 = torch.rand((2, 3)) -ya, yb, yc = model(xb0, xb1) -idx += 1 -print('index: ', idx) -export_onnx_with_validation(model, [xb0, xb1], - prefix + str(idx), ['x0', 'x1'], ['ya', 'yb', 'yc'], - verbose=True, - training=False) - -######## example: affine_grid ######## -""" - symbolic registration: - - @parse_args('v', 'is') - def affine_grid_generator(g, theta, size): - return g.op('AffineGrid', theta, size_i=size) -""" - - -class Model(nn.Module): - def __init__(self): - super(Model, self).__init__() - - def forward(self, theta): - grid = F.affine_grid(theta, (2, 2, 8, 8)) - return grid - - -model = Model() -model.eval() -theta = torch.rand((2, 2, 3)) -grid = model(theta) -idx += 1 -print('index: ', idx) -export_onnx_with_validation(model, (theta, ), - prefix + str(idx), ['theta'], ['grid'], - verbose=True, - training=False) - -######## example: conv2d_transpose ######## - - -class Model(nn.Module): - def __init__(self): - super(Model, self).__init__() - self.conv = nn.ConvTranspose2d(3, 8, 3) - self.dropout = nn.Dropout2d() - - def forward(self, x): - y = x - y = self.conv(y) - y = self.dropout(y) - return y - - -model = Model() -model.eval() -xb = torch.rand((2, 3, 4, 5)) -yp = model(xb) -idx += 1 -print('index: ', idx) -export_onnx_with_validation(model, [xb], - prefix + str(idx), ['x'], ['y'], - verbose=True, - training=False) - -######## example: conv2d ######## - - -class Model(nn.Module): - def __init__(self): - super(Model, self).__init__() - self.conv = nn.Conv2d(3, 8, 3) - self.batch_norm = nn.BatchNorm2d(8) - self.pool = nn.AdaptiveAvgPool2d(1) - - def forward(self, x): - y = x - y = self.conv(y) - y = self.batch_norm(y) - y = self.pool(y) - return y - - -model = Model() -model.eval() -xb = torch.rand((2, 3, 4, 5)) -yp = model(xb) -idx += 1 -print('index: ', idx) -export_onnx_with_validation(model, [xb], - prefix + str(idx), ['x'], ['y'], - verbose=True, - training=False) - -######### example: conv1d ######## -# -#class Model(nn.Module): -# def __init__(self): -# super(Model, self).__init__() -# self.batch_norm = nn.BatchNorm2d(3) -# -# def forward(self, x): -# y = x -# y = self.batch_norm(y) -# return y -# -# -#model = Model() -#model.eval() -#xb = torch.rand((2, 3, 4, 5)) -#yp = model(xb) -#idx += 1 -#print('index: ', idx) -#export_onnx_with_validation( -# model, [xb], prefix + str(idx), -# ['x'], ['y'], -# verbose=True, training=False) - -######## example: empty ######## - - -class Model(nn.Module): - def __init__(self): - super(Model, self).__init__() - - def forward(self, x): - return x - - -model = Model() -model.eval() -xb = torch.rand((2, 3)) -yp = model(xb) -idx += 1 -print('index: ', idx) -export_onnx_with_validation(model, [xb], - prefix + str(idx), ['y'], ['y'], - verbose=True, - training=False) diff --git a/onnx2fluid/examples/gen_unet.py b/onnx2fluid/examples/gen_unet.py deleted file mode 100644 index 501a6d5..0000000 --- a/onnx2fluid/examples/gen_unet.py +++ /dev/null @@ -1,137 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Mar 22 11:19:45 2019 - -@author: Macrobull -""" - -from __future__ import print_function - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from onnx2fluid.torch_export_helper import export_onnx_with_validation - - -# from https://github.com/milesial/Pytorch-UNet -class double_conv(nn.Module): - '''(conv => BN => ReLU) * 2''' - - def __init__(self, in_ch, out_ch): - super(double_conv, self).__init__() - self.conv = nn.Sequential(nn.Conv2d(in_ch, out_ch, 3, padding=1), - nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True), - nn.Conv2d(out_ch, out_ch, 3, padding=1), - nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True)) - - def forward(self, x): - x = self.conv(x) - return x - - -class inconv(nn.Module): - def __init__(self, in_ch, out_ch): - super(inconv, self).__init__() - self.conv = double_conv(in_ch, out_ch) - - def forward(self, x): - x = self.conv(x) - return x - - -class down(nn.Module): - def __init__(self, in_ch, out_ch): - super(down, self).__init__() - self.mpconv = nn.Sequential(nn.MaxPool2d(2), double_conv(in_ch, out_ch)) - - def forward(self, x): - x = self.mpconv(x) - return x - - -class up(nn.Module): - def __init__(self, in_ch, out_ch, bilinear=True): - super(up, self).__init__() - - # would be a nice idea if the upsampling could be learned too, - # but my machine do not have enough memory to handle all those weights - if bilinear: - self.up = nn.Upsample(scale_factor=2, - mode='bilinear') #, align_corners=True) - else: - self.up = nn.ConvTranspose2d(in_ch // 2, in_ch // 2, 2, stride=2) - - self.conv = double_conv(in_ch, out_ch) - - def forward(self, x1, x2): - x1 = self.up(x1) - - # input is CHW - if hasattr(self, 'diffY'): - diffY = self.diffY - diffX = self.diffX - else: - diffY = self.diffY = x2.size()[2] - x1.size()[2] - diffX = self.diffX = x2.size()[3] - x1.size()[3] - - x1 = F.pad( - x1, - (diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2)) - - # for padding issues, see - # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a - # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd - - x = torch.cat([x2, x1], dim=1) - x = self.conv(x) - return x - - -class outconv(nn.Module): - def __init__(self, in_ch, out_ch): - super(outconv, self).__init__() - self.conv = nn.Conv2d(in_ch, out_ch, 1) - - def forward(self, x): - x = self.conv(x) - return x - - -class UNet(nn.Module): - def __init__(self, n_channels, n_classes): - super(UNet, self).__init__() - self.inc = inconv(n_channels, 64) - self.down1 = down(64, 128) - self.down2 = down(128, 256) - self.down3 = down(256, 512) - self.down4 = down(512, 512) - self.up1 = up(1024, 256) - self.up2 = up(512, 128) - self.up3 = up(256, 64) - self.up4 = up(128, 64) - self.outc = outconv(64, n_classes) - - def forward(self, x): - x1 = self.inc(x) - x2 = self.down1(x1) - x3 = self.down2(x2) - x4 = self.down3(x3) - x5 = self.down4(x4) - x = self.up1(x5, x4) - x = self.up2(x, x3) - x = self.up3(x, x2) - x = self.up4(x, x1) - x = self.outc(x) - return F.sigmoid(x) - - -model = UNet(3, 80) -model.eval() -xb = torch.rand((1, 3, 512, 512)) -yp = model(xb) -export_onnx_with_validation(model, [xb], - 'sample_unet', ['image'], ['pred'], - verbose=True, - training=False) diff --git a/onnx2fluid/examples/gen_yolov2.py b/onnx2fluid/examples/gen_yolov2.py deleted file mode 100644 index 076dfce..0000000 --- a/onnx2fluid/examples/gen_yolov2.py +++ /dev/null @@ -1,270 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Mar 22 11:19:45 2019 - -@author: Macrobull -""" - -from __future__ import print_function - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from onnx2fluid.torch_export_helper import export_onnx_with_validation - - -# from https://github.com/santoshgsk/yolov2-pytorch/blob/master/yolotorch.ipynb -class Yolov2(nn.Module): - def __init__(self): - super(Yolov2, self).__init__() - - self.conv1 = nn.Conv2d(in_channels=3, - out_channels=32, - kernel_size=3, - stride=1, - padding=1, - bias=False) - self.batchnorm1 = nn.BatchNorm2d(32) - - self.conv2 = nn.Conv2d(in_channels=32, - out_channels=64, - kernel_size=3, - stride=1, - padding=1, - bias=False) - self.batchnorm2 = nn.BatchNorm2d(64) - - self.conv3 = nn.Conv2d(in_channels=64, - out_channels=128, - kernel_size=3, - stride=1, - padding=1, - bias=False) - self.batchnorm3 = nn.BatchNorm2d(128) - self.conv4 = nn.Conv2d(in_channels=128, - out_channels=64, - kernel_size=1, - stride=1, - padding=0, - bias=False) - self.batchnorm4 = nn.BatchNorm2d(64) - self.conv5 = nn.Conv2d(in_channels=64, - out_channels=128, - kernel_size=3, - stride=1, - padding=1, - bias=False) - self.batchnorm5 = nn.BatchNorm2d(128) - - self.conv6 = nn.Conv2d(in_channels=128, - out_channels=256, - kernel_size=3, - stride=1, - padding=1, - bias=False) - self.batchnorm6 = nn.BatchNorm2d(256) - self.conv7 = nn.Conv2d(in_channels=256, - out_channels=128, - kernel_size=1, - stride=1, - padding=0, - bias=False) - self.batchnorm7 = nn.BatchNorm2d(128) - self.conv8 = nn.Conv2d(in_channels=128, - out_channels=256, - kernel_size=3, - stride=1, - padding=1, - bias=False) - self.batchnorm8 = nn.BatchNorm2d(256) - - self.conv9 = nn.Conv2d(in_channels=256, - out_channels=512, - kernel_size=3, - stride=1, - padding=1, - bias=False) - self.batchnorm9 = nn.BatchNorm2d(512) - self.conv10 = nn.Conv2d(in_channels=512, - out_channels=256, - kernel_size=1, - stride=1, - padding=0, - bias=False) - self.batchnorm10 = nn.BatchNorm2d(256) - self.conv11 = nn.Conv2d(in_channels=256, - out_channels=512, - kernel_size=3, - stride=1, - padding=1, - bias=False) - self.batchnorm11 = nn.BatchNorm2d(512) - self.conv12 = nn.Conv2d(in_channels=512, - out_channels=256, - kernel_size=1, - stride=1, - padding=0, - bias=False) - self.batchnorm12 = nn.BatchNorm2d(256) - self.conv13 = nn.Conv2d(in_channels=256, - out_channels=512, - kernel_size=3, - stride=1, - padding=1, - bias=False) - self.batchnorm13 = nn.BatchNorm2d(512) - - self.conv14 = nn.Conv2d(in_channels=512, - out_channels=1024, - kernel_size=3, - stride=1, - padding=1, - bias=False) - self.batchnorm14 = nn.BatchNorm2d(1024) - self.conv15 = nn.Conv2d(in_channels=1024, - out_channels=512, - kernel_size=1, - stride=1, - padding=0, - bias=False) - self.batchnorm15 = nn.BatchNorm2d(512) - self.conv16 = nn.Conv2d(in_channels=512, - out_channels=1024, - kernel_size=3, - stride=1, - padding=1, - bias=False) - self.batchnorm16 = nn.BatchNorm2d(1024) - self.conv17 = nn.Conv2d(in_channels=1024, - out_channels=512, - kernel_size=1, - stride=1, - padding=0, - bias=False) - self.batchnorm17 = nn.BatchNorm2d(512) - self.conv18 = nn.Conv2d(in_channels=512, - out_channels=1024, - kernel_size=3, - stride=1, - padding=1, - bias=False) - self.batchnorm18 = nn.BatchNorm2d(1024) - - self.conv19 = nn.Conv2d(in_channels=1024, - out_channels=1024, - kernel_size=3, - stride=1, - padding=1, - bias=False) - self.batchnorm19 = nn.BatchNorm2d(1024) - self.conv20 = nn.Conv2d(in_channels=1024, - out_channels=1024, - kernel_size=3, - stride=1, - padding=1, - bias=False) - self.batchnorm20 = nn.BatchNorm2d(1024) - - self.conv21 = nn.Conv2d(in_channels=3072, - out_channels=1024, - kernel_size=3, - stride=1, - padding=1, - bias=False) - self.batchnorm21 = nn.BatchNorm2d(1024) - - self.conv22 = nn.Conv2d(in_channels=1024, - out_channels=125, - kernel_size=1, - stride=1, - padding=0) - - def reorg_layer(self, x): - stride = 2 - if hasattr(self, 'batch_size'): - batch_size, channels, height, width = self.batch_size, self.channels, self.height, self.width - new_ht = self.new_ht - new_wd = self.new_wd - new_channels = self.new_channels - else: - batch_size, channels, height, width = self.batch_size, self.channels, self.height, self.width = x.size( - ) - new_ht = self.new_ht = height // stride - new_wd = self.new_wd = width // stride - new_channels = self.new_channels = channels * stride * stride - - passthrough = x.permute(0, 2, 3, 1) - passthrough = passthrough.contiguous().view(-1, new_ht, stride, new_wd, - stride, channels) - passthrough = passthrough.permute(0, 1, 3, 2, 4, 5) - passthrough = passthrough.contiguous().view(-1, new_ht, new_wd, - new_channels) - passthrough = passthrough.permute(0, 3, 1, 2) - return passthrough - - def forward(self, x): - out = F.max_pool2d(F.leaky_relu(self.batchnorm1(self.conv1(x)), - negative_slope=0.1), - 2, - stride=2) - out = F.max_pool2d(F.leaky_relu(self.batchnorm2(self.conv2(out)), - negative_slope=0.1), - 2, - stride=2) - - out = F.leaky_relu(self.batchnorm3(self.conv3(out)), negative_slope=0.1) - out = F.leaky_relu(self.batchnorm4(self.conv4(out)), negative_slope=0.1) - out = F.leaky_relu(self.batchnorm5(self.conv5(out)), negative_slope=0.1) - out = F.max_pool2d(out, 2, stride=2) - - out = F.leaky_relu(self.batchnorm6(self.conv6(out)), negative_slope=0.1) - out = F.leaky_relu(self.batchnorm7(self.conv7(out)), negative_slope=0.1) - out = F.leaky_relu(self.batchnorm8(self.conv8(out)), negative_slope=0.1) - out = F.max_pool2d(out, 2, stride=2) - - out = F.leaky_relu(self.batchnorm9(self.conv9(out)), negative_slope=0.1) - out = F.leaky_relu(self.batchnorm10(self.conv10(out)), - negative_slope=0.1) - out = F.leaky_relu(self.batchnorm11(self.conv11(out)), - negative_slope=0.1) - out = F.leaky_relu(self.batchnorm12(self.conv12(out)), - negative_slope=0.1) - out = F.leaky_relu(self.batchnorm13(self.conv13(out)), - negative_slope=0.1) - passthrough = self.reorg_layer(out) - out = F.max_pool2d(out, 2, stride=2) - - out = F.leaky_relu(self.batchnorm14(self.conv14(out)), - negative_slope=0.1) - out = F.leaky_relu(self.batchnorm15(self.conv15(out)), - negative_slope=0.1) - out = F.leaky_relu(self.batchnorm16(self.conv16(out)), - negative_slope=0.1) - out = F.leaky_relu(self.batchnorm17(self.conv17(out)), - negative_slope=0.1) - out = F.leaky_relu(self.batchnorm18(self.conv18(out)), - negative_slope=0.1) - - out = F.leaky_relu(self.batchnorm19(self.conv19(out)), - negative_slope=0.1) - out = F.leaky_relu(self.batchnorm20(self.conv20(out)), - negative_slope=0.1) - - out = torch.cat([passthrough, out], 1) - out = F.leaky_relu(self.batchnorm21(self.conv21(out)), - negative_slope=0.1) - out = self.conv22(out) - - return out - - -model = Yolov2() -model.eval() -xb = torch.rand((1, 3, 224, 224)) -yp = model(xb) -export_onnx_with_validation(model, [xb], - 'sample_yolov2', ['image'], ['pred'], - verbose=True, - training=False) diff --git a/onnx2fluid/examples/onnx_model_zoo.sh b/onnx2fluid/examples/onnx_model_zoo.sh deleted file mode 100755 index 7f62585..0000000 --- a/onnx2fluid/examples/onnx_model_zoo.sh +++ /dev/null @@ -1,634 +0,0 @@ -#! /usr/bin/env sh - -# setopt SH_WORD_SPLIT # if zsh - -# alias python="python3" # if ... -# alias http_get="wget -c" # if no aria2 -alias http_get="aria2c -c -s8 -x8" - -base_url="https://s3.amazonaws.com/download.onnx/models/opset_9/" -convert_cmd="python -m onnx2fluid" -validate_cmd="$convert_cmd.validation" -convert_flags="-e -o /tmp/export/" -validate_flags1="/tmp/export/model.py" -validate_flags2="/tmp/export/__model__" -validate_flags3="/tmp/export/__model__ -i" - - -bvlc_alexnet() -{ - bn_tar="bvlc_alexnet" - fn_tar="$bn_tar.tar.gz" - fn_model="$bn_tar/model.onnx" - - http_get "$base_url$fn_tar" - rm -rf "$bn_tar/" - echo "extracting ..." - tar xf "$fn_tar" - - $convert_cmd $convert_flags "$fn_model" - for npz in "$bn_tar/"*.npz - do - echo "converting $npz ..." - python convert_data_npz.py "$npz" data_0 prob_1 -s - $validate_cmd $validate_flags1 -t "$npz" - $validate_cmd $validate_flags2 -t "$npz" - done - $validate_cmd $validate_flags3 -t "$npz" - for pb_dir in "$bn_tar/"*/ - do - echo "converting $pb_dir ..." - python convert_data_pb.py "$pb_dir" data_0 prob_1 - $validate_cmd $validate_flags1 -t $(dirname "$pb_dir/x").npz - $validate_cmd $validate_flags2 -t $(dirname "$pb_dir/x").npz - done - $validate_cmd $validate_flags3 -t $(dirname "$pb_dir/x").npz - - rm -rf "$bn_tar/" -} - -bvlc_googlenet() -{ - bn_tar="bvlc_googlenet" - fn_tar="$bn_tar.tar.gz" - fn_model="$bn_tar/model.onnx" - - http_get "$base_url$fn_tar" - rm -rf "$bn_tar/" - echo "extracting ..." - tar xf "$fn_tar" - - $convert_cmd $convert_flags "$fn_model" - for pb_dir in "$bn_tar/"*/ - do - echo "converting $pb_dir" - python convert_data_pb.py "$pb_dir" data_0 prob_1 - $validate_cmd $validate_flags1 -t $(dirname "$pb_dir/x").npz - $validate_cmd $validate_flags2 -t $(dirname "$pb_dir/x").npz - done - $validate_cmd $validate_flags3 -t $(dirname "$pb_dir/x").npz - - rm -rf "$bn_tar/" -} - -bvlc_reference_caffenet() -{ - bn_tar="bvlc_reference_caffenet" - fn_tar="$bn_tar.tar.gz" - fn_model="$bn_tar/model.onnx" - - http_get "$base_url$fn_tar" - rm -rf "$bn_tar/" - echo "extracting ..." - tar xf "$fn_tar" - - $convert_cmd $convert_flags "$fn_model" - for pb_dir in "$bn_tar/"*/ - do - echo "converting $pb_dir" - python convert_data_pb.py "$pb_dir" data_0 prob_1 - $validate_cmd $validate_flags1 -t $(dirname "$pb_dir/x").npz - $validate_cmd $validate_flags2 -t $(dirname "$pb_dir/x").npz - done - $validate_cmd $validate_flags3 -t $(dirname "$pb_dir/x").npz - - rm -rf "$bn_tar/" -} - -bvlc_reference_rcnn_ilsvrc13() -{ - bn_tar="bvlc_reference_rcnn_ilsvrc13" - fn_tar="$bn_tar.tar.gz" - fn_model="$bn_tar/model.onnx" - - http_get "$base_url$fn_tar" - rm -rf "$bn_tar/" - echo "extracting ..." - tar xf "$fn_tar" - - $convert_cmd $convert_flags "$fn_model" - for pb_dir in "$bn_tar/"*/ - do - echo "converting $pb_dir" - python convert_data_pb.py "$pb_dir" data_0 fc-rcnn_1 - $validate_cmd $validate_flags1 -t $(dirname "$pb_dir/x").npz - $validate_cmd $validate_flags2 -t $(dirname "$pb_dir/x").npz - done - $validate_cmd $validate_flags3 -t $(dirname "$pb_dir/x").npz - - rm -rf "$bn_tar/" -} - -densenet121() -{ - bn_tar="densenet121" - fn_tar="$bn_tar.tar.gz" - fn_model="$bn_tar/model.onnx" - - http_get "$base_url$fn_tar" - rm -rf "$bn_tar/" - echo "extracting ..." - tar xf "$fn_tar" - - $convert_cmd $convert_flags "$fn_model" - for npz in "$bn_tar/"*.npz - do - echo "converting $npz ..." - python convert_data_npz.py "$npz" data_0 fc6_1 -s - $validate_cmd $validate_flags1 -t "$npz" - $validate_cmd $validate_flags2 -t "$npz" - done - $validate_cmd $validate_flags3 -t "$npz" - for pb_dir in "$bn_tar/"*/ - do - echo "converting $pb_dir" - python convert_data_pb.py "$pb_dir" data_0 fc6_1 - $validate_cmd $validate_flags1 -t $(dirname "$pb_dir/x").npz - $validate_cmd $validate_flags2 -t $(dirname "$pb_dir/x").npz - done - $validate_cmd $validate_flags3 -t $(dirname "$pb_dir/x").npz - - rm -rf "$bn_tar/" -} - -emotion_ferplus() -{ - bn_tar="emotion_ferplus" - fn_tar="$bn_tar.tar.gz" - fn_model="$bn_tar/model.onnx" - - http_get "https://onnxzoo.blob.core.windows.net/models/opset_8/emotion_ferplus/$fn_tar" - rm -rf "$bn_tar/" - echo "extracting ..." - tar xf "$fn_tar" - - $convert_cmd $convert_flags "$fn_model" -y - for pb_dir in "$bn_tar/"*/ - do - echo "converting $pb_dir ..." - python convert_data_pb.py "$pb_dir" Input3 Plus692_Output_0 - $validate_cmd $validate_flags1 -t $(dirname "$pb_dir/x").npz - $validate_cmd $validate_flags2 -t $(dirname "$pb_dir/x").npz - done - $validate_cmd $validate_flags3 -t $(dirname "$pb_dir/x").npz - - rm -rf "$bn_tar/" -} - -inception_v1() -{ - bn_tar="inception_v1" - fn_tar="$bn_tar.tar.gz" - fn_model="$bn_tar/model.onnx" - - http_get "$base_url$fn_tar" - rm -rf "$bn_tar/" - echo "extracting ..." - tar xf "$fn_tar" - - $convert_cmd $convert_flags "$fn_model" - for npz in "$bn_tar/"*.npz - do - echo "converting $npz ..." - python convert_data_npz.py "$npz" data_0 prob_1 -s - $validate_cmd $validate_flags1 -t "$npz" - $validate_cmd $validate_flags2 -t "$npz" - done - $validate_cmd $validate_flags3 -t "$npz" - for pb_dir in "$bn_tar/"*/ - do - echo "converting $pb_dir ..." - python convert_data_pb.py "$pb_dir" data_0 prob_1 - $validate_cmd $validate_flags1 -t $(dirname "$pb_dir/x").npz - $validate_cmd $validate_flags2 -t $(dirname "$pb_dir/x").npz - done - $validate_cmd $validate_flags3 -t $(dirname "$pb_dir/x").npz - - rm -rf "$bn_tar/" -} - -inception_v2() -{ - bn_tar="inception_v2" - fn_tar="$bn_tar.tar.gz" - fn_model="$bn_tar/model.onnx" - - http_get "$base_url$fn_tar" - rm -rf "$bn_tar/" - echo "extracting ..." - tar xf "$fn_tar" - - $convert_cmd $convert_flags "$fn_model" - for npz in "$bn_tar/"*.npz - do - echo "converting $npz ..." - python convert_data_npz.py "$npz" data_0 prob_1 -s - $validate_cmd $validate_flags1 -t "$npz" - $validate_cmd $validate_flags2 -t "$npz" - done - $validate_cmd $validate_flags3 -t "$npz" - for pb_dir in "$bn_tar/"*/ - do - echo "converting $pb_dir ..." - python convert_data_pb.py "$pb_dir" data_0 prob_1 - $validate_cmd $validate_flags1 -t $(dirname "$pb_dir/x").npz - $validate_cmd $validate_flags2 -t $(dirname "$pb_dir/x").npz - done - $validate_cmd $validate_flags3 -t $(dirname "$pb_dir/x").npz - - rm -rf "$bn_tar/" -} - -mobilenet() -{ - bn_tar="mobilenetv2-1.0" - fn_tar="$bn_tar.tar.gz" - fn_model="$bn_tar/$bn_tar.onnx" - - http_get "https://s3.amazonaws.com/onnx-model-zoo/mobilenet/mobilenetv2-1.0/$fn_tar" - rm -rf "$bn_tar/" - echo "extracting ..." - tar xf "$fn_tar" - - $convert_cmd $convert_flags "$fn_model" -y - for pb_dir in "$bn_tar/"*/ - do - echo "converting $pb_dir ..." - python convert_data_pb.py "$pb_dir" data mobilenetv20_output_flatten0_reshape0 - $validate_cmd $validate_flags1 -t $(dirname "$pb_dir/x").npz - $validate_cmd $validate_flags2 -t $(dirname "$pb_dir/x").npz - done - $validate_cmd $validate_flags3 -t $(dirname "$pb_dir/x").npz - - rm -rf "$bn_tar/" -} - -resnet18() -{ - bn_tar="resnet18v1" - fn_tar="$bn_tar.tar.gz" - fn_model="$bn_tar/$bn_tar.onnx" - - http_get "https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet18v1/$fn_tar" - rm -rf "$bn_tar/" - echo "extracting ..." - tar xf "$fn_tar" - - $convert_cmd $convert_flags "$fn_model" -y - for pb_dir in "$bn_tar/"*/ - do - echo "converting $pb_dir ..." - python convert_data_pb.py "$pb_dir" data resnetv15_dense0_fwd - $validate_cmd $validate_flags1 -t $(dirname "$pb_dir/x").npz - $validate_cmd $validate_flags2 -t $(dirname "$pb_dir/x").npz - done - $validate_cmd $validate_flags3 -t $(dirname "$pb_dir/x").npz - - rm -rf "$bn_tar/" -} - -resnet50() -{ - bn_tar="resnet50" - fn_tar="$bn_tar.tar.gz" - fn_model="$bn_tar/model.onnx" - - http_get "$base_url$fn_tar" - rm -rf "$bn_tar/" - echo "extracting ..." - tar xf "$fn_tar" - - $convert_cmd $convert_flags "$fn_model" - for npz in "$bn_tar/"*.npz - do - echo "converting $npz ..." - python convert_data_npz.py "$npz" gpu_0/data_0 gpu_0/softmaxout_1 -s - $validate_cmd $validate_flags1 -t "$npz" - $validate_cmd $validate_flags2 -t "$npz" - done - $validate_cmd $validate_flags3 -t "$npz" - for pb_dir in "$bn_tar/"*/ - do - echo "converting $pb_dir ..." - python convert_data_pb.py "$pb_dir" gpu_0/data_0 gpu_0/softmaxout_1 - $validate_cmd $validate_flags1 -t $(dirname "$pb_dir/x").npz - $validate_cmd $validate_flags2 -t $(dirname "$pb_dir/x").npz - done - $validate_cmd $validate_flags3 -t $(dirname "$pb_dir/x").npz - - rm -rf "$bn_tar/" -} - -resnet100_arcface() -{ - bn_tar="resnet100" - fn_tar="$bn_tar.tar.gz" - fn_model="$bn_tar/$bn_tar.onnx" - - http_get "https://s3.amazonaws.com/onnx-model-zoo/arcface/resnet100/$fn_tar" - rm -rf "$bn_tar/" - echo "extracting ..." - tar xf "$fn_tar" - - $convert_cmd $convert_flags "$fn_model" -y - for pb_dir in "$bn_tar/"*/ - do - echo "converting $pb_dir ..." - python convert_data_pb.py "$pb_dir" data fc1 - $validate_cmd $validate_flags1 -t $(dirname "$pb_dir/x").npz - $validate_cmd $validate_flags2 -t $(dirname "$pb_dir/x").npz - done - $validate_cmd $validate_flags3 -t $(dirname "$pb_dir/x").npz - - rm -rf "$bn_tar/" -} - -resnet101_duc() -{ - bn_tar="ResNet101_DUC_HDC" - fn_tar="$bn_tar.tar.gz" - fn_model="$bn_tar/$bn_tar.onnx" - - http_get "https://s3.amazonaws.com/onnx-model-zoo/duc/$fn_tar" - rm -rf "$bn_tar/" - echo "extracting ..." - tar xf "$fn_tar" - - $convert_cmd $convert_flags "$fn_model" -y - for pb_dir in "$bn_tar/"*/ - do - echo "converting $pb_dir ..." - python convert_data_pb.py "$pb_dir" data seg_loss - $validate_cmd $validate_flags1 -t $(dirname "$pb_dir/x").npz - $validate_cmd $validate_flags2 -t $(dirname "$pb_dir/x").npz - done - $validate_cmd $validate_flags3 -t $(dirname "$pb_dir/x").npz - - rm -rf "$bn_tar/" -} - -resnet152() -{ - bn_tar="resnet152v2" - fn_tar="$bn_tar.tar.gz" - fn_model="$bn_tar/$bn_tar.onnx" - - http_get "https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet152v2/$fn_tar" - rm -rf "$bn_tar/" - echo "extracting ..." - tar xf "$fn_tar" - - $convert_cmd $convert_flags "$fn_model" -y - for pb_dir in "$bn_tar/"*/ - do - echo "converting $pb_dir ..." - python convert_data_pb.py "$pb_dir" data resnetv27_dense0_fwd - $validate_cmd $validate_flags1 -t $(dirname "$pb_dir/x").npz - $validate_cmd $validate_flags2 -t $(dirname "$pb_dir/x").npz - done - $validate_cmd $validate_flags3 -t $(dirname "$pb_dir/x").npz - - rm -rf "$bn_tar/" -} - -shufflenet() -{ - bn_tar="shufflenet" - fn_tar="$bn_tar.tar.gz" - fn_model="$bn_tar/model.onnx" - - http_get "$base_url$fn_tar" - rm -rf "$bn_tar/" - echo "extracting ..." - tar xf "$fn_tar" - - $convert_cmd $convert_flags "$fn_model" - for pb_dir in "$bn_tar/"*/ - do - echo "converting $pb_dir ..." - python convert_data_pb.py "$pb_dir" gpu_0/data_0 gpu_0/softmax_1 - $validate_cmd $validate_flags1 -t $(dirname "$pb_dir/x").npz - $validate_cmd $validate_flags2 -t $(dirname "$pb_dir/x").npz - done - $validate_cmd $validate_flags3 -t $(dirname "$pb_dir/x").npz - - rm -rf "$bn_tar/" -} - -squeezenet() -{ - bn_tar="squeezenet" - fn_tar="$bn_tar.tar.gz" - fn_model="$bn_tar/model.onnx" - - http_get "$base_url$fn_tar" - rm -rf "$bn_tar/" - echo "extracting ..." - tar xf "$fn_tar" - - $convert_cmd $convert_flags "$fn_model" - for pb_dir in "$bn_tar/"*/ - do - echo "converting $pb_dir" - python convert_data_pb.py "$pb_dir" data_0 softmaxout_1 - $validate_cmd $validate_flags1 -t $(dirname "$pb_dir/x").npz - $validate_cmd $validate_flags2 -t $(dirname "$pb_dir/x").npz - done - $validate_cmd $validate_flags3 -t $(dirname "$pb_dir/x").npz - - rm -rf "$bn_tar/" -} - -squeezenet1v1() -{ - bn_tar="squeezenet1.1" - fn_tar="$bn_tar.tar.gz" - fn_model="$bn_tar/$bn_tar.onnx" - - http_get "https://s3.amazonaws.com/onnx-model-zoo/squeezenet/squeezenet1.1/$fn_tar" - rm -rf "$bn_tar/" - echo "extracting ..." - tar xf "$fn_tar" - - $convert_cmd $convert_flags "$fn_model" - for pb_dir in "$bn_tar/"*/ - do - echo "converting $pb_dir ..." - python convert_data_pb.py "$pb_dir" data squeezenet0_flatten0_reshape0 - $validate_cmd $validate_flags1 -t $(dirname "$pb_dir/x").npz - $validate_cmd $validate_flags2 -t $(dirname "$pb_dir/x").npz - done - $validate_cmd $validate_flags3 -t $(dirname "$pb_dir/x").npz - - rm -rf "$bn_tar/" -} - -ssd() -{ - bn_tar="ssd" - fn_tar="$bn_tar.tar.gz" - fn_model="$bn_tar/model.onnx" - - http_get "https://onnxzoo.blob.core.windows.net/models/opset_10/ssd/$fn_tar" - rm -rf "$bn_tar/" - echo "extracting ..." - mkdir "$bn_tar" - tar xf "$fn_tar" -C "$bn_tar/" - - $convert_cmd $convert_flags "$fn_model" - for pb_dir in "$bn_tar/"*/ - do - echo "converting $pb_dir ..." - python convert_data_pb.py "$pb_dir" image bboxes,labels,scores - $validate_cmd $validate_flags1 -t $(dirname "$pb_dir/x").npz - $validate_cmd $validate_flags2 -t $(dirname "$pb_dir/x").npz - done - $validate_cmd $validate_flags3 -t $(dirname "$pb_dir/x").npz - - rm -rf "$bn_tar/" -} - -tiny_yolov2() -{ - bn_tar="tiny_yolov2" - fn_tar="$bn_tar.tar.gz" - fn_model="$bn_tar/model.onnx" - - http_get "https://onnxzoo.blob.core.windows.net/models/opset_8/tiny_yolov2/$fn_tar" - rm -rf "$bn_tar/" - echo "extracting ..." - tar xf "$fn_tar" - - $convert_cmd $convert_flags "$fn_model" -y - for pb_dir in "$bn_tar/"*/ - do - echo "converting $pb_dir" - python convert_data_pb.py "$pb_dir" image grid - $validate_cmd $validate_flags1 -t $(dirname "$pb_dir/x").npz - $validate_cmd $validate_flags2 -t $(dirname "$pb_dir/x").npz - done - $validate_cmd $validate_flags3 -t $(dirname "$pb_dir/x").npz - - rm -rf "$bn_tar/" -} - -vgg16bn() -{ - bn_tar="vgg16-bn" - fn_tar="$bn_tar.tar.gz" - fn_model="$bn_tar/$bn_tar.onnx" - - http_get "https://s3.amazonaws.com/onnx-model-zoo/vgg/vgg16-bn/$fn_tar" - rm -rf "$bn_tar/" - echo "extracting ..." - tar xf "$fn_tar" - - $convert_cmd $convert_flags "$fn_model" -y - for pb_dir in "$bn_tar/"*/ - do - echo "converting $pb_dir ..." - python convert_data_pb.py "$pb_dir" data vgg0_dense2_fwd - $validate_cmd $validate_flags1 -t $(dirname "$pb_dir/x").npz - $validate_cmd $validate_flags2 -t $(dirname "$pb_dir/x").npz - done - $validate_cmd $validate_flags3 -t $(dirname "$pb_dir/x").npz - - rm -rf "$bn_tar/" -} - -vgg19() -{ - bn_tar="vgg19" - fn_tar="$bn_tar.tar.gz" - fn_model="$bn_tar/model.onnx" - - http_get "$base_url$fn_tar" - rm -rf "$bn_tar/" - echo "extracting ..." - tar xf "$fn_tar" - - $convert_cmd $convert_flags "$fn_model" - for pb_dir in "$bn_tar/"*/ - do - echo "converting $pb_dir" - python convert_data_pb.py "$pb_dir" data_0 prob_1 - $validate_cmd $validate_flags1 -t $(dirname "$pb_dir/x").npz - $validate_cmd $validate_flags2 -t $(dirname "$pb_dir/x").npz - done - $validate_cmd $validate_flags3 -t $(dirname "$pb_dir/x").npz - - rm -rf "$bn_tar/" -} - -yolov3() -{ - bn_tar="yolov3" - fn_tar="$bn_tar.tar.gz" - fn_model="$bn_tar/yolov3.onnx" - - http_get "https://onnxzoo.blob.core.windows.net/models/opset_10/yolov3/$fn_tar" - rm -rf "$bn_tar/" - echo "extracting ..." - tar xf "$fn_tar" - - $convert_cmd $convert_flags "$fn_model" -x # - for pb_dir in "$bn_tar/"*/ - do - echo "converting $pb_dir ..." - python convert_data_pb.py "$pb_dir" input_1:01,image_shape:01 yolonms_layer_1/ExpandDims_1:0,yolonms_layer_1/ExpandDims_3:0,yolonms_layer_1/concat_2:0 - $validate_cmd $validate_flags1 -t $(dirname "$pb_dir/x").npz - $validate_cmd $validate_flags2 -t $(dirname "$pb_dir/x").npz - done - $validate_cmd $validate_flags3 -t $(dirname "$pb_dir/x").npz - - rm -rf "$bn_tar/" -} - -zfnet512() -{ - bn_tar="zfnet512" - fn_tar="$bn_tar.tar.gz" - fn_model="$bn_tar/model.onnx" - - http_get "$base_url$fn_tar" - rm -rf "$bn_tar/" - echo "extracting ..." - tar xf "$fn_tar" - - $convert_cmd $convert_flags "$fn_model" - for pb_dir in "$bn_tar/"*/ - do - echo "converting $pb_dir" - python convert_data_pb.py "$pb_dir" gpu_0/data_0 gpu_0/softmax_1 - $validate_cmd $validate_flags1 -t $(dirname "$pb_dir/x").npz - $validate_cmd $validate_flags2 -t $(dirname "$pb_dir/x").npz - done - $validate_cmd $validate_flags3 -t $(dirname "$pb_dir/x").npz - - rm -rf "$bn_tar/" -} - - -bvlc_alexnet -bvlc_googlenet -bvlc_reference_caffenet -bvlc_reference_rcnn_ilsvrc13 -densenet121 -emotion_ferplus # not supported -inception_v1 -inception_v2 -mobilenet -resnet18 -resnet50 -resnet100_arcface -resnet101_duc -resnet152 -shufflenet -squeezenet # softmax bug -squeezenet1v1 -ssd # version not supported -tiny_yolov2 # not supported -vgg16bn -vgg19 -yolov3 # malformed model ? -zfnet512 diff --git a/onnx2fluid/onnx2fluid/__init__.py b/onnx2fluid/onnx2fluid/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/onnx2fluid/onnx2fluid/__main__.py b/onnx2fluid/onnx2fluid/__main__.py deleted file mode 100644 index f09f63e..0000000 --- a/onnx2fluid/onnx2fluid/__main__.py +++ /dev/null @@ -1,114 +0,0 @@ -# -*- coding: UTF-8 -*- -################################################################################ -# -# Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved -# -################################################################################ -""" -本文件允许模块包以python -m onnx2fluid方式直接执行。 - -Authors: Macrobull -Date: 2019/02/22 10:25:46 -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import argparse, logging, sys - -parser = argparse.ArgumentParser( - description='onnx2fluid', - formatter_class=argparse.ArgumentDefaultsHelpFormatter, -) -parser.add_argument( - 'model', - nargs=1, - help='path to model.onnx', -) -parser.add_argument( - '--debug', - '-d', - action='store_true', - help='enable debug logging and checking', -) -parser.add_argument( - '--output_dir', - '-o', - type=str, - default='', - help='output directory', -) -parser.add_argument( - '--test_data', - '-t', - type=str, - default='', - help='I/O golden data for validation, e.g. test.npy, test.npz', -) -parser.add_argument( - '--embed_params', - '-e', - action='store_true', - help='try to embed parameters for trainable Paddle fluid layers', -) -parser.add_argument( - '--pedantic', - action='store_true', - default=True, - help='accept and convert only standard ONNX opset', -) -parser.add_argument( - '--no-pedantic', - '-x', - action='store_false', - dest='pedantic', - help='process non-standard ONNX ops, this may lead to fails', -) -parser.add_argument( - '--skip-version-conversion', - '-y', - action='store_true', - default=False, - help='skip ONNX op version conversion, workaround for RumtimeErrors', -) -parser.add_argument( - '--archive', - '-z', - nargs='?', - type=str, - default=None, - const='', - help='compress outputs to ZIP file if conversion successed', -) -parser.add_argument( - '--atol', - '-p', - type=float, - default=1e-3, - help='assertion absolute tolerance for validation', -) -parser.add_argument( - '--rtol', - type=float, - default=1e-2, - help='assertion relative tolerance for validation', -) -parser.add_argument( - '--infer_inputs', - '-i', - nargs='?', - default=None, - const='', - help='perform type-shape inference with given input names and re-save model', -) -args = parser.parse_args() - -logging_format = '[%(levelname)8s]%(name)s::%(funcName)s:%(lineno)04d: %(message)s' -logging_level = logging.DEBUG if args.debug else logging.INFO -logging.basicConfig(format=logging_format, level=logging_level) - -from .cmdline import main - -sys.exit(main(**args.__dict__)) diff --git a/onnx2fluid/onnx2fluid/cmdline.py b/onnx2fluid/onnx2fluid/cmdline.py deleted file mode 100644 index 4f6a674..0000000 --- a/onnx2fluid/onnx2fluid/cmdline.py +++ /dev/null @@ -1,136 +0,0 @@ -# -*- coding: UTF-8 -*- -################################################################################ -# -# Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved -# -################################################################################ -""" -本文件提供了命令行工具的入口逻辑。 - -Authors: Macrobull -Date: 2019/02/22 10:25:46 -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import logging, shutil, zipfile - -__all__ = [ - 'main', -] - -DEFAULT_MODEL_MODULE = 'model' -DEFAULT_MODEL_FUNC = 'inference' - - -def main(**kwargs): - """主程序入口""" - - from .conversion import DEFAULT_ONNX_OPSET_VERSION - from .conversion import convert - - logger = logging.getLogger('onnx2fluid') - # debug = kwargs.get('debug', False) - - # prepare arguments - filename = kwargs.pop('model')[0] - basepath, _ = shutil.os.path.splitext(filename) - save_dir = kwargs.pop('output_dir', '') - # model.onnx -> model/ - save_dir = (save_dir.rstrip(shutil.os.sep) - if save_dir else basepath) + shutil.os.sep - model_basename = DEFAULT_MODEL_MODULE + '.py' - model_func_name = DEFAULT_MODEL_FUNC - onnx_opset_pedantic = kwargs.pop('pedantic', True) - skip_version_conversion = kwargs.pop('skip_version_conversion', False) - onnx_opset_version = None if skip_version_conversion else DEFAULT_ONNX_OPSET_VERSION - - # convert - convert(filename, - save_dir, - model_basename=model_basename, - model_func_name=model_func_name, - onnx_opset_version=onnx_opset_version, - onnx_opset_pedantic=onnx_opset_pedantic, - **kwargs) - - # validate - passed = True - golden_data_filename = kwargs.pop('test_data', '') - infer_inputs = kwargs.pop('infer_inputs', None) - save_inference_model = infer_inputs is not None - if golden_data_filename or save_inference_model: - from .validation import validate - - if save_inference_model: - inference_input_names = infer_inputs.split(',') - else: - inference_input_names = None - - logger.info('starting validation on desc ...') - passed &= validate(shutil.os.path.join(save_dir, '__model__'), - golden_data_filename=golden_data_filename, - save_inference_model=save_inference_model, - inference_input_names=inference_input_names, - **kwargs) - - logger.info('starting validation on code ...') - # this re-generate desc proto with Python code when debug on - passed &= validate(shutil.os.path.join(save_dir, model_basename), - golden_data_filename=golden_data_filename, - model_func_name=model_func_name, - save_inference_model=save_inference_model, - inference_input_names=inference_input_names, - **kwargs) - - if not passed: - logger.fatal('validation failed, exit') - return - - # create zip file - archive = kwargs.pop('archive', None) - if archive is not None: - if archive == '': - archive = save_dir.rstrip(shutil.os.sep) + '.zip' - logger.info('compressing file to %s ...', archive) - shutil.sys.stderr.write('\n') - shutil.sys.stderr.flush() - file_list = shutil.os.listdir(save_dir) - fz = zipfile.ZipFile(archive, 'w', compression=zipfile.ZIP_LZMA) - for idx, fn in enumerate(file_list): - shutil.sys.stderr.write('\033[F\033[2K') - logger.info('file {}/{}: {}'.format(idx + 1, len(file_list), fn)) - shutil.sys.stderr.flush() - fz.write(shutil.os.path.join(save_dir, fn), arcname=fn) - fz.close() - logger.info('compressing done') - - -if __name__ == '__main__': - logging.basicConfig( - format= - '[%(levelname)8s]%(name)s::%(funcName)s:%(lineno)04d: %(message)s', - level=logging.DEBUG, - ) - - del main - - from onnx2fluid.cmdline import main - - main(model=['../examples/t1.onnx'], - output_dir='/tmp/export/', - embed_params=False, - pedantic=False, - test_data='../examples/t1.npz', - debug=True) - - main(model=['../examples/inception_v2/model.onnx'], - output_dir='/tmp/export/', - embed_params=True, - pedantic=False, - skip_version_conversion=False, - test_data='../examples/inception_v2/test_data_set_2.npz', - debug=True) diff --git a/onnx2fluid/onnx2fluid/conversion.py b/onnx2fluid/onnx2fluid/conversion.py deleted file mode 100644 index 4113b06..0000000 --- a/onnx2fluid/onnx2fluid/conversion.py +++ /dev/null @@ -1,347 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Mon Feb 25 09:50:35 2019 - -@author: Macrobull -""" - -from __future__ import division - -import logging, shutil - -__all__ = [ - 'convert', -] - -DEFAULT_ONNX_OPSET_VERSION = 9 - - -def make_var_name(name): - """ - make a valid variable name in Python code and filename in filesystem - """ - - if name == '': - return '' - if name[0].isdigit(): - return 'var_' + name - for s in ' \\|/:.-': - name = name.replace(s, '_') - if name.startswith('_'): - name = 'var' + name - return name - - -def convert(onnx_model_filename, - save_dir, - model_basename='model.py', - model_func_name='inference', - embed_params=False, - onnx_opset_version=None, - onnx_opset_pedantic=True, - debug=False, - **kwargs): - """ - convert an ONNX model to Paddle fluid Python code and desc pb - """ - - assert isinstance(onnx_model_filename, str) - assert isinstance(save_dir, str) - assert isinstance(model_basename, str) - assert isinstance(model_func_name, str) - assert onnx_opset_version is None or isinstance(onnx_opset_version, int) - - import onnx - - from onnx.checker import ValidationError - from onnx.checker import check_model - from onnx.version_converter import convert_version - - from .onnx_utils import DEFAULT_OP_DOMAIN - from .onnx_utils import graph_ops, graph_weights - from .onnx_utils import inferred_model_value_info - from .onnx_utils import polish_model - from .writer import Program, Writer - - logger = logging.getLogger('convert') - - # prepare onnx model - logger.info('loading model: %s ...', onnx_model_filename) - onnx_model = onnx.load(onnx_model_filename) - - try: - logger.info('checking model ...') - check_model(onnx_model) - if onnx_opset_version is None: # WORKAROUND: RuntimeError: No Adapter For OP - logger.warning( - 'opset conversion skipped for onnx_opset_pedantic is OFF') - logger.info('assumed opset version: %d', DEFAULT_ONNX_OPSET_VERSION) - else: - logger.info('using opset version: %d', onnx_opset_version) - onnx_model = convert_version(onnx_model, onnx_opset_version) - except ValidationError as e: - if onnx_opset_pedantic: - raise e - else: - logger.warning('due to onnx_opset_pedantic is OFF') - logger.warning('the ONNX model sanity checking error is suppressed') - logger.warning('value_info inferring may be uncompleted') - - # onnx model optimization - logger.info('model has %d ops', len(onnx_model.graph.node)) - logger.info('optimizing model ...') - onnx_model = polish_model(onnx_model, checking=onnx_opset_pedantic) - - # prepare filesystem - shutil.rmtree(save_dir, ignore_errors=True) - shutil.os.makedirs(save_dir, exist_ok=True) - logger.info('folder %s cleared', save_dir) - - # DEBUG: - if debug: - debug_model_filename, _ = shutil.os.path.splitext(onnx_model_filename) - onnx.save(onnx_model, debug_model_filename + '.polished.onnx') - - # I/O instances - onnx_graph = onnx_model.graph - fluid_program = Program() - fluid_writer = Writer() - - # model components - inp_vars = [make_var_name(value.name) for value in onnx_graph.input] - out_vars = [make_var_name(value.name) for value in onnx_graph.output] - par_vars = [] - value_infos = inferred_model_value_info(onnx_model) - value_infos = { - make_var_name(key): value - for key, value in value_infos.items() - } - - # prepare additional value_info - # for weights - for name, weight in graph_weights(onnx_graph): - var_name = make_var_name(name) - value_info = value_infos[var_name] - value_info['lod'] = [0] - value_info['embedded_as'] = [] - value_info['get_weight'] = (lambda w: lambda: w.tolist())( - weight) # lazy getter - - logger.info('conversion started') - # op set conversion - # topo = 'backward' if embed_params else 'forward' - topo = 'forward' - for name, domain, op_type, inputs, outputs, attrs in graph_ops(onnx_graph, - topo=topo): - op_name = make_var_name(name) - inputs = list(map(make_var_name, inputs)) - outputs = list(map(make_var_name, outputs)) - logger.debug('translating op %s(%s) %s::%s ...', name, op_name, domain, - op_type) - if domain == DEFAULT_OP_DOMAIN: - domain = '' - try: - fluid_writer.emit_op( - fluid_program, - op_name, - domain, - op_type, - inputs, - outputs, - attrs, - value_infos, - embed_params=embed_params, - ) - except BaseException as e: - logger.fatal('conversion failed for:\n\t%s -> %s::%s -> %s', inputs, - domain, op_type, outputs) - raise e - op_codes = fluid_program.codes - fluid_program.codes = [] - logger.info('%d ops in, %d ops out', len(onnx_graph.node), - len(fluid_program.op_descs)) - - # type-shape info copy - for var_name, value_info in value_infos.items(): - fluid_program.VarTypeShapeInfo(var_name, value_info, - remove_batch=False) # - bad_vars = [] - for var_name, var_desc in fluid_program.var_descs.items(): - if not var_desc.type.lod_tensor.HasField('tensor'): - bad_vars.append(var_name) - if bad_vars: - logger.warning('type-shape not infered for var %s ...', - ', '.join(bad_vars[:5])) - logger.warning('this causes little problem for PaddlePaddle, ' - 'but Paddle Mobile may not infer correctly') - logger.warning('please consider running validation with -i ' - 'to invoke type-shape inference in PaddlePaddle') - - # weight writer - for name, weight in graph_weights(onnx_graph): - var_name = make_var_name(name) - par_vars.append(var_name) - value_info = value_infos[var_name] - embedded_names = value_info.get('embedded_as', []) - if embedded_names: - if len(embedded_names) > 1: - logger.info( - 'weight %s is shared between ops, more disk space will be consumed', - name) - logger.debug('saving weight %s(%s[%d], %dB) as %s ...', name, - weight.dtype, weight.size, weight.nbytes, - embedded_names) - for embedded_name in embedded_names: # multiple references - fluid_writer.write_weight(weight, - shutil.os.path.join( - save_dir, embedded_name), - lod=value_info['lod']) - else: - logger.debug('saving weight %s(%s[%d], %dB) to %s ...', name, - weight.dtype, weight.size, weight.nbytes, var_name) - fluid_writer.write_weight(weight, - shutil.os.path.join(save_dir, var_name), - lod=value_info['lod']) - fluid_writer.emit_param(fluid_program, var_name, value_info) - param_codes = fluid_program.codes - fluid_program.codes = [] - logger.info('%d weights converted', len(par_vars)) - - # input writer - external_inputs = [] - for var_name in inp_vars: - if var_name not in par_vars: - value_info = value_infos[var_name] - assert value_info['external'] - external_inputs.append(var_name) - fluid_writer.emit_inputs(fluid_program, - external_inputs, - value_infos, - remove_batch=False) # TODO: - input_codes = fluid_program.codes - fluid_program.codes = [] - logger.info('%d inputs converted', len(external_inputs)) - - # output writer - external_outputs = [] - for var_name in out_vars: - if var_name not in par_vars: - value_info = value_infos[var_name] - assert value_info['external'] - external_outputs.append(var_name) - fluid_writer.emit_outputs(fluid_program, external_outputs) - output_codes = [''] + fluid_program.codes # add an empty line - fluid_program.codes = [] - logger.info('%d outputs converted', len(external_outputs)) - - # code generation - header_codes = fluid_writer.header_code( - model_func_name, - 'From: {}'.format(onnx_model_filename), - ) - code_filename = shutil.os.path.join(save_dir, model_basename) - fluid_writer.write_code_file( - code_filename, - header_codes, - input_codes, - param_codes, - op_codes, - output_codes, - ) - logger.info('code saved to %s, factory function: %s', code_filename, - model_func_name) - - # desc generation - desc_filename = shutil.os.path.join(save_dir, '__model__') - fluid_writer.write_desc_file( - desc_filename, - op_descs=fluid_program.op_descs, - var_descs=list(fluid_program.var_descs.values()), - ) - logger.info('program saved to %s', desc_filename) - - logger.info('conversion finished') - - -def main(): - import argparse - - parser = argparse.ArgumentParser( - description='onnx2fluid.convert', - formatter_class=argparse.ArgumentDefaultsHelpFormatter, - ) - parser.add_argument( - 'model', - nargs=1, - help='path to model.onnx', - ) - parser.add_argument( - '--debug', - '-d', - action='store_true', - help='enable debug logging and checking', - ) - parser.add_argument( - '--output_dir', - '-o', - type=str, - default='', - help='output directory', - ) - parser.add_argument( - '--embed_params', - '-e', - action='store_true', - help='try to embed parameters for trainable Paddle fluid layers', - ) - parser.add_argument( - '--pedantic', - action='store_true', - default=True, - help='accept and convert only standard ONNX opset', - ) - parser.add_argument( - '--no-pedantic', - '-x', - action='store_false', - dest='pedantic', - help='process non-standard ONNX ops, this may lead to fails', - ) - parser.add_argument( - '--skip-version-conversion', - '-y', - action='store_true', - default=False, - help='skip ONNX op version conversion, workaround for RumtimeErrors', - ) - args = parser.parse_args() - - logging_format = '[%(levelname)8s]%(name)s::%(funcName)s:%(lineno)04d: %(message)s' - logging_level = logging.DEBUG if args.debug else logging.INFO - logging.basicConfig(format=logging_format, level=logging_level) - - debug = args.debug - model_filename = args.model[0] - basepath, _ = shutil.os.path.splitext(model_filename) - save_dir = args.output_dir - save_dir = (save_dir.rstrip(shutil.os.sep) - if save_dir else basepath) + shutil.os.sep - embed_params = args.embed_params - pedantic = args.pedantic - skip_version_conversion = args.skip_version_conversion - - convert(model_filename, - save_dir, - embed_params=embed_params, - onnx_opset_pedantic=pedantic, - onnx_skip_version_conversion=skip_version_conversion, - debug=debug) - - -if __name__ == '__main__': - del convert - - from onnx2fluid.conversion import convert - - main() diff --git a/onnx2fluid/onnx2fluid/framework_pb2.py b/onnx2fluid/onnx2fluid/framework_pb2.py deleted file mode 100644 index 1ec59dc..0000000 --- a/onnx2fluid/onnx2fluid/framework_pb2.py +++ /dev/null @@ -1,1726 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: framework.proto - -import sys -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1')) -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - -DESCRIPTOR = _descriptor.FileDescriptor( - name='framework.proto', - package='paddle.framework.proto', - syntax='proto2', - serialized_pb=_b( - '\n\x0f\x66ramework.proto\x12\x16paddle.framework.proto\"\x1d\n\x07Version\x12\x12\n\x07version\x18\x01 \x01(\x03:\x01\x30\"\xec\x03\n\x06OpDesc\x12\x0c\n\x04type\x18\x03 \x02(\t\x12\x32\n\x06inputs\x18\x01 \x03(\x0b\x32\".paddle.framework.proto.OpDesc.Var\x12\x33\n\x07outputs\x18\x02 \x03(\x0b\x32\".paddle.framework.proto.OpDesc.Var\x12\x32\n\x05\x61ttrs\x18\x04 \x03(\x0b\x32#.paddle.framework.proto.OpDesc.Attr\x12\x18\n\tis_target\x18\x05 \x01(\x08:\x05\x66\x61lse\x1a\xef\x01\n\x04\x41ttr\x12\x0c\n\x04name\x18\x01 \x02(\t\x12.\n\x04type\x18\x02 \x02(\x0e\x32 .paddle.framework.proto.AttrType\x12\t\n\x01i\x18\x03 \x01(\x05\x12\t\n\x01\x66\x18\x04 \x01(\x02\x12\t\n\x01s\x18\x05 \x01(\t\x12\x0c\n\x04ints\x18\x06 \x03(\x05\x12\x0e\n\x06\x66loats\x18\x07 \x03(\x02\x12\x0f\n\x07strings\x18\x08 \x03(\t\x12\t\n\x01\x62\x18\n \x01(\x08\x12\r\n\x05\x62ools\x18\x0b \x03(\x08\x12\x11\n\tblock_idx\x18\x0c \x01(\x05\x12\t\n\x01l\x18\r \x01(\x03\x12\x12\n\nblocks_idx\x18\x0e \x03(\x05\x12\r\n\x05longs\x18\x0f \x03(\x03\x1a+\n\x03Var\x12\x11\n\tparameter\x18\x01 \x02(\t\x12\x11\n\targuments\x18\x02 \x03(\t\"\xb3\x03\n\x07OpProto\x12\x0c\n\x04type\x18\x01 \x02(\t\x12\x33\n\x06inputs\x18\x02 \x03(\x0b\x32#.paddle.framework.proto.OpProto.Var\x12\x34\n\x07outputs\x18\x03 \x03(\x0b\x32#.paddle.framework.proto.OpProto.Var\x12\x33\n\x05\x61ttrs\x18\x04 \x03(\x0b\x32$.paddle.framework.proto.OpProto.Attr\x12\x0f\n\x07\x63omment\x18\x05 \x02(\t\x1ax\n\x03Var\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0f\n\x07\x63omment\x18\x02 \x02(\t\x12\x19\n\nduplicable\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0cintermediate\x18\x04 \x01(\x08:\x05\x66\x61lse\x12\x1a\n\x0b\x64ispensable\x18\x05 \x01(\x08:\x05\x66\x61lse\x1ao\n\x04\x41ttr\x12\x0c\n\x04name\x18\x01 \x02(\t\x12.\n\x04type\x18\x02 \x02(\x0e\x32 .paddle.framework.proto.AttrType\x12\x0f\n\x07\x63omment\x18\x03 \x02(\t\x12\x18\n\tgenerated\x18\x04 \x01(\x08:\x05\x66\x61lse\"\xda\x08\n\x07VarType\x12\x32\n\x04type\x18\x01 \x02(\x0e\x32$.paddle.framework.proto.VarType.Type\x12\x41\n\rselected_rows\x18\x02 \x01(\x0b\x32*.paddle.framework.proto.VarType.TensorDesc\x12\x41\n\nlod_tensor\x18\x03 \x01(\x0b\x32-.paddle.framework.proto.VarType.LoDTensorDesc\x12H\n\x0ctensor_array\x18\x04 \x01(\x0b\x32\x32.paddle.framework.proto.VarType.LoDTensorArrayDesc\x12:\n\x06reader\x18\x05 \x01(\x0b\x32*.paddle.framework.proto.VarType.ReaderDesc\x12\x34\n\x05tuple\x18\x07 \x01(\x0b\x32%.paddle.framework.proto.VarType.Tuple\x1aS\n\nTensorDesc\x12\x37\n\tdata_type\x18\x01 \x02(\x0e\x32$.paddle.framework.proto.VarType.Type\x12\x0c\n\x04\x64ims\x18\x02 \x03(\x03\x1a\x61\n\rLoDTensorDesc\x12:\n\x06tensor\x18\x01 \x02(\x0b\x32*.paddle.framework.proto.VarType.TensorDesc\x12\x14\n\tlod_level\x18\x02 \x01(\x05:\x01\x30\x1a\x66\n\x12LoDTensorArrayDesc\x12:\n\x06tensor\x18\x01 \x02(\x0b\x32*.paddle.framework.proto.VarType.TensorDesc\x12\x14\n\tlod_level\x18\x02 \x01(\x05:\x01\x30\x1aO\n\nReaderDesc\x12\x41\n\nlod_tensor\x18\x01 \x03(\x0b\x32-.paddle.framework.proto.VarType.LoDTensorDesc\x1a\x43\n\x05Tuple\x12:\n\x0c\x65lement_type\x18\x01 \x03(\x0e\x32$.paddle.framework.proto.VarType.Type\"\xa2\x02\n\x04Type\x12\x08\n\x04\x42OOL\x10\x00\x12\t\n\x05INT16\x10\x01\x12\t\n\x05INT32\x10\x02\x12\t\n\x05INT64\x10\x03\x12\x08\n\x04\x46P16\x10\x04\x12\x08\n\x04\x46P32\x10\x05\x12\x08\n\x04\x46P64\x10\x06\x12\n\n\x06SIZE_T\x10\x13\x12\t\n\x05UINT8\x10\x14\x12\x08\n\x04INT8\x10\x15\x12\x0e\n\nLOD_TENSOR\x10\x07\x12\x11\n\rSELECTED_ROWS\x10\x08\x12\x12\n\x0e\x46\x45\x45\x44_MINIBATCH\x10\t\x12\x0e\n\nFETCH_LIST\x10\n\x12\x0f\n\x0bSTEP_SCOPES\x10\x0b\x12\x12\n\x0eLOD_RANK_TABLE\x10\x0c\x12\x14\n\x10LOD_TENSOR_ARRAY\x10\r\x12\x0e\n\nPLACE_LIST\x10\x0e\x12\n\n\x06READER\x10\x0f\x12\x07\n\x03RAW\x10\x11\x12\t\n\x05TUPLE\x10\x12\"b\n\x07VarDesc\x12\x0c\n\x04name\x18\x01 \x02(\t\x12-\n\x04type\x18\x02 \x02(\x0b\x32\x1f.paddle.framework.proto.VarType\x12\x1a\n\x0bpersistable\x18\x03 \x01(\x08:\x05\x66\x61lse\"\xa7\x01\n\tBlockDesc\x12\x0b\n\x03idx\x18\x01 \x02(\x05\x12\x12\n\nparent_idx\x18\x02 \x02(\x05\x12-\n\x04vars\x18\x03 \x03(\x0b\x32\x1f.paddle.framework.proto.VarDesc\x12+\n\x03ops\x18\x04 \x03(\x0b\x32\x1e.paddle.framework.proto.OpDesc\x12\x1d\n\x11\x66orward_block_idx\x18\x05 \x01(\x05:\x02-1\"r\n\x0bProgramDesc\x12\x31\n\x06\x62locks\x18\x01 \x03(\x0b\x32!.paddle.framework.proto.BlockDesc\x12\x30\n\x07version\x18\x02 \x01(\x0b\x32\x1f.paddle.framework.proto.Version*\x94\x01\n\x08\x41ttrType\x12\x07\n\x03INT\x10\x00\x12\t\n\x05\x46LOAT\x10\x01\x12\n\n\x06STRING\x10\x02\x12\x08\n\x04INTS\x10\x03\x12\n\n\x06\x46LOATS\x10\x04\x12\x0b\n\x07STRINGS\x10\x05\x12\x0b\n\x07\x42OOLEAN\x10\x06\x12\x0c\n\x08\x42OOLEANS\x10\x07\x12\t\n\x05\x42LOCK\x10\x08\x12\x08\n\x04LONG\x10\t\x12\n\n\x06\x42LOCKS\x10\n\x12\t\n\x05LONGS\x10\x0b\x42\x02H\x03' - )) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -_ATTRTYPE = _descriptor.EnumDescriptor( - name='AttrType', - full_name='paddle.framework.proto.AttrType', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor(name='INT', - index=0, - number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='FLOAT', - index=1, - number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='STRING', - index=2, - number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='INTS', - index=3, - number=3, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='FLOATS', - index=4, - number=4, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='STRINGS', - index=5, - number=5, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='BOOLEAN', - index=6, - number=6, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='BOOLEANS', - index=7, - number=7, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='BLOCK', - index=8, - number=8, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='LONG', - index=9, - number=9, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='BLOCKS', - index=10, - number=10, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='LONGS', - index=11, - number=11, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=2511, - serialized_end=2659, -) -_sym_db.RegisterEnumDescriptor(_ATTRTYPE) - -AttrType = enum_type_wrapper.EnumTypeWrapper(_ATTRTYPE) -INT = 0 -FLOAT = 1 -STRING = 2 -INTS = 3 -FLOATS = 4 -STRINGS = 5 -BOOLEAN = 6 -BOOLEANS = 7 -BLOCK = 8 -LONG = 9 -BLOCKS = 10 -LONGS = 11 - -_VARTYPE_TYPE = _descriptor.EnumDescriptor( - name='Type', - full_name='paddle.framework.proto.VarType.Type', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor(name='BOOL', - index=0, - number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='INT16', - index=1, - number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='INT32', - index=2, - number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='INT64', - index=3, - number=3, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='FP16', - index=4, - number=4, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='FP32', - index=5, - number=5, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='FP64', - index=6, - number=6, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='SIZE_T', - index=7, - number=19, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='UINT8', - index=8, - number=20, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='INT8', - index=9, - number=21, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='LOD_TENSOR', - index=10, - number=7, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='SELECTED_ROWS', - index=11, - number=8, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='FEED_MINIBATCH', - index=12, - number=9, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='FETCH_LIST', - index=13, - number=10, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='STEP_SCOPES', - index=14, - number=11, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='LOD_RANK_TABLE', - index=15, - number=12, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='LOD_TENSOR_ARRAY', - index=16, - number=13, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PLACE_LIST', - index=17, - number=14, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='READER', - index=18, - number=15, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='RAW', - index=19, - number=17, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='TUPLE', - index=20, - number=18, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=1832, - serialized_end=2122, -) -_sym_db.RegisterEnumDescriptor(_VARTYPE_TYPE) - -_VERSION = _descriptor.Descriptor( - name='Version', - full_name='paddle.framework.proto.Version', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='version', - full_name='paddle.framework.proto.Version.version', - index=0, - number=1, - type=3, - cpp_type=2, - label=1, - has_default_value=True, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[], - serialized_start=43, - serialized_end=72, -) - -_OPDESC_ATTR = _descriptor.Descriptor( - name='Attr', - full_name='paddle.framework.proto.OpDesc.Attr', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', - full_name='paddle.framework.proto.OpDesc.Attr.name', - index=0, - number=1, - type=9, - cpp_type=9, - label=2, - has_default_value=False, - default_value=_b("").decode('utf-8'), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='type', - full_name='paddle.framework.proto.OpDesc.Attr.type', - index=1, - number=2, - type=14, - cpp_type=8, - label=2, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='i', - full_name='paddle.framework.proto.OpDesc.Attr.i', - index=2, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='f', - full_name='paddle.framework.proto.OpDesc.Attr.f', - index=3, - number=4, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='s', - full_name='paddle.framework.proto.OpDesc.Attr.s', - index=4, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode('utf-8'), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='ints', - full_name='paddle.framework.proto.OpDesc.Attr.ints', - index=5, - number=6, - type=5, - cpp_type=1, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='floats', - full_name='paddle.framework.proto.OpDesc.Attr.floats', - index=6, - number=7, - type=2, - cpp_type=6, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='strings', - full_name='paddle.framework.proto.OpDesc.Attr.strings', - index=7, - number=8, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='b', - full_name='paddle.framework.proto.OpDesc.Attr.b', - index=8, - number=10, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='bools', - full_name='paddle.framework.proto.OpDesc.Attr.bools', - index=9, - number=11, - type=8, - cpp_type=7, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='block_idx', - full_name='paddle.framework.proto.OpDesc.Attr.block_idx', - index=10, - number=12, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='l', - full_name='paddle.framework.proto.OpDesc.Attr.l', - index=11, - number=13, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='blocks_idx', - full_name='paddle.framework.proto.OpDesc.Attr.blocks_idx', - index=12, - number=14, - type=5, - cpp_type=1, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='longs', - full_name='paddle.framework.proto.OpDesc.Attr.longs', - index=13, - number=15, - type=3, - cpp_type=2, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[], - serialized_start=283, - serialized_end=522, -) - -_OPDESC_VAR = _descriptor.Descriptor( - name='Var', - full_name='paddle.framework.proto.OpDesc.Var', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='parameter', - full_name='paddle.framework.proto.OpDesc.Var.parameter', - index=0, - number=1, - type=9, - cpp_type=9, - label=2, - has_default_value=False, - default_value=_b("").decode('utf-8'), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='arguments', - full_name='paddle.framework.proto.OpDesc.Var.arguments', - index=1, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[], - serialized_start=524, - serialized_end=567, -) - -_OPDESC = _descriptor.Descriptor( - name='OpDesc', - full_name='paddle.framework.proto.OpDesc', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='type', - full_name='paddle.framework.proto.OpDesc.type', - index=0, - number=3, - type=9, - cpp_type=9, - label=2, - has_default_value=False, - default_value=_b("").decode('utf-8'), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='inputs', - full_name='paddle.framework.proto.OpDesc.inputs', - index=1, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='outputs', - full_name='paddle.framework.proto.OpDesc.outputs', - index=2, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='attrs', - full_name='paddle.framework.proto.OpDesc.attrs', - index=3, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='is_target', - full_name='paddle.framework.proto.OpDesc.is_target', - index=4, - number=5, - type=8, - cpp_type=7, - label=1, - has_default_value=True, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - ], - extensions=[], - nested_types=[ - _OPDESC_ATTR, - _OPDESC_VAR, - ], - enum_types=[], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[], - serialized_start=75, - serialized_end=567, -) - -_OPPROTO_VAR = _descriptor.Descriptor( - name='Var', - full_name='paddle.framework.proto.OpProto.Var', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', - full_name='paddle.framework.proto.OpProto.Var.name', - index=0, - number=1, - type=9, - cpp_type=9, - label=2, - has_default_value=False, - default_value=_b("").decode('utf-8'), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='comment', - full_name='paddle.framework.proto.OpProto.Var.comment', - index=1, - number=2, - type=9, - cpp_type=9, - label=2, - has_default_value=False, - default_value=_b("").decode('utf-8'), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='duplicable', - full_name='paddle.framework.proto.OpProto.Var.duplicable', - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=True, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='intermediate', - full_name='paddle.framework.proto.OpProto.Var.intermediate', - index=3, - number=4, - type=8, - cpp_type=7, - label=1, - has_default_value=True, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='dispensable', - full_name='paddle.framework.proto.OpProto.Var.dispensable', - index=4, - number=5, - type=8, - cpp_type=7, - label=1, - has_default_value=True, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[], - serialized_start=772, - serialized_end=892, -) - -_OPPROTO_ATTR = _descriptor.Descriptor( - name='Attr', - full_name='paddle.framework.proto.OpProto.Attr', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', - full_name='paddle.framework.proto.OpProto.Attr.name', - index=0, - number=1, - type=9, - cpp_type=9, - label=2, - has_default_value=False, - default_value=_b("").decode('utf-8'), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='type', - full_name='paddle.framework.proto.OpProto.Attr.type', - index=1, - number=2, - type=14, - cpp_type=8, - label=2, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='comment', - full_name='paddle.framework.proto.OpProto.Attr.comment', - index=2, - number=3, - type=9, - cpp_type=9, - label=2, - has_default_value=False, - default_value=_b("").decode('utf-8'), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='generated', - full_name='paddle.framework.proto.OpProto.Attr.generated', - index=3, - number=4, - type=8, - cpp_type=7, - label=1, - has_default_value=True, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[], - serialized_start=894, - serialized_end=1005, -) - -_OPPROTO = _descriptor.Descriptor( - name='OpProto', - full_name='paddle.framework.proto.OpProto', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='type', - full_name='paddle.framework.proto.OpProto.type', - index=0, - number=1, - type=9, - cpp_type=9, - label=2, - has_default_value=False, - default_value=_b("").decode('utf-8'), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='inputs', - full_name='paddle.framework.proto.OpProto.inputs', - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='outputs', - full_name='paddle.framework.proto.OpProto.outputs', - index=2, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='attrs', - full_name='paddle.framework.proto.OpProto.attrs', - index=3, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='comment', - full_name='paddle.framework.proto.OpProto.comment', - index=4, - number=5, - type=9, - cpp_type=9, - label=2, - has_default_value=False, - default_value=_b("").decode('utf-8'), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - ], - extensions=[], - nested_types=[ - _OPPROTO_VAR, - _OPPROTO_ATTR, - ], - enum_types=[], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[], - serialized_start=570, - serialized_end=1005, -) - -_VARTYPE_TENSORDESC = _descriptor.Descriptor( - name='TensorDesc', - full_name='paddle.framework.proto.VarType.TensorDesc', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='data_type', - full_name='paddle.framework.proto.VarType.TensorDesc.data_type', - index=0, - number=1, - type=14, - cpp_type=8, - label=2, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='dims', - full_name='paddle.framework.proto.VarType.TensorDesc.dims', - index=1, - number=2, - type=3, - cpp_type=2, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[], - serialized_start=1393, - serialized_end=1476, -) - -_VARTYPE_LODTENSORDESC = _descriptor.Descriptor( - name='LoDTensorDesc', - full_name='paddle.framework.proto.VarType.LoDTensorDesc', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='tensor', - full_name='paddle.framework.proto.VarType.LoDTensorDesc.tensor', - index=0, - number=1, - type=11, - cpp_type=10, - label=2, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='lod_level', - full_name='paddle.framework.proto.VarType.LoDTensorDesc.lod_level', - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=True, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[], - serialized_start=1478, - serialized_end=1575, -) - -_VARTYPE_LODTENSORARRAYDESC = _descriptor.Descriptor( - name='LoDTensorArrayDesc', - full_name='paddle.framework.proto.VarType.LoDTensorArrayDesc', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='tensor', - full_name='paddle.framework.proto.VarType.LoDTensorArrayDesc.tensor', - index=0, - number=1, - type=11, - cpp_type=10, - label=2, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='lod_level', - full_name= - 'paddle.framework.proto.VarType.LoDTensorArrayDesc.lod_level', - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=True, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[], - serialized_start=1577, - serialized_end=1679, -) - -_VARTYPE_READERDESC = _descriptor.Descriptor( - name='ReaderDesc', - full_name='paddle.framework.proto.VarType.ReaderDesc', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='lod_tensor', - full_name='paddle.framework.proto.VarType.ReaderDesc.lod_tensor', - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[], - serialized_start=1681, - serialized_end=1760, -) - -_VARTYPE_TUPLE = _descriptor.Descriptor( - name='Tuple', - full_name='paddle.framework.proto.VarType.Tuple', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='element_type', - full_name='paddle.framework.proto.VarType.Tuple.element_type', - index=0, - number=1, - type=14, - cpp_type=8, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[], - serialized_start=1762, - serialized_end=1829, -) - -_VARTYPE = _descriptor.Descriptor( - name='VarType', - full_name='paddle.framework.proto.VarType', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='type', - full_name='paddle.framework.proto.VarType.type', - index=0, - number=1, - type=14, - cpp_type=8, - label=2, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='selected_rows', - full_name='paddle.framework.proto.VarType.selected_rows', - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='lod_tensor', - full_name='paddle.framework.proto.VarType.lod_tensor', - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='tensor_array', - full_name='paddle.framework.proto.VarType.tensor_array', - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='reader', - full_name='paddle.framework.proto.VarType.reader', - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='tuple', - full_name='paddle.framework.proto.VarType.tuple', - index=5, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - ], - extensions=[], - nested_types=[ - _VARTYPE_TENSORDESC, - _VARTYPE_LODTENSORDESC, - _VARTYPE_LODTENSORARRAYDESC, - _VARTYPE_READERDESC, - _VARTYPE_TUPLE, - ], - enum_types=[ - _VARTYPE_TYPE, - ], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[], - serialized_start=1008, - serialized_end=2122, -) - -_VARDESC = _descriptor.Descriptor( - name='VarDesc', - full_name='paddle.framework.proto.VarDesc', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', - full_name='paddle.framework.proto.VarDesc.name', - index=0, - number=1, - type=9, - cpp_type=9, - label=2, - has_default_value=False, - default_value=_b("").decode('utf-8'), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='type', - full_name='paddle.framework.proto.VarDesc.type', - index=1, - number=2, - type=11, - cpp_type=10, - label=2, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='persistable', - full_name='paddle.framework.proto.VarDesc.persistable', - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=True, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[], - serialized_start=2124, - serialized_end=2222, -) - -_BLOCKDESC = _descriptor.Descriptor( - name='BlockDesc', - full_name='paddle.framework.proto.BlockDesc', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='idx', - full_name='paddle.framework.proto.BlockDesc.idx', - index=0, - number=1, - type=5, - cpp_type=1, - label=2, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='parent_idx', - full_name='paddle.framework.proto.BlockDesc.parent_idx', - index=1, - number=2, - type=5, - cpp_type=1, - label=2, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='vars', - full_name='paddle.framework.proto.BlockDesc.vars', - index=2, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='ops', - full_name='paddle.framework.proto.BlockDesc.ops', - index=3, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='forward_block_idx', - full_name='paddle.framework.proto.BlockDesc.forward_block_idx', - index=4, - number=5, - type=5, - cpp_type=1, - label=1, - has_default_value=True, - default_value=-1, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[], - serialized_start=2225, - serialized_end=2392, -) - -_PROGRAMDESC = _descriptor.Descriptor( - name='ProgramDesc', - full_name='paddle.framework.proto.ProgramDesc', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='blocks', - full_name='paddle.framework.proto.ProgramDesc.blocks', - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='version', - full_name='paddle.framework.proto.ProgramDesc.version', - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[], - serialized_start=2394, - serialized_end=2508, -) - -_OPDESC_ATTR.fields_by_name['type'].enum_type = _ATTRTYPE -_OPDESC_ATTR.containing_type = _OPDESC -_OPDESC_VAR.containing_type = _OPDESC -_OPDESC.fields_by_name['inputs'].message_type = _OPDESC_VAR -_OPDESC.fields_by_name['outputs'].message_type = _OPDESC_VAR -_OPDESC.fields_by_name['attrs'].message_type = _OPDESC_ATTR -_OPPROTO_VAR.containing_type = _OPPROTO -_OPPROTO_ATTR.fields_by_name['type'].enum_type = _ATTRTYPE -_OPPROTO_ATTR.containing_type = _OPPROTO -_OPPROTO.fields_by_name['inputs'].message_type = _OPPROTO_VAR -_OPPROTO.fields_by_name['outputs'].message_type = _OPPROTO_VAR -_OPPROTO.fields_by_name['attrs'].message_type = _OPPROTO_ATTR -_VARTYPE_TENSORDESC.fields_by_name['data_type'].enum_type = _VARTYPE_TYPE -_VARTYPE_TENSORDESC.containing_type = _VARTYPE -_VARTYPE_LODTENSORDESC.fields_by_name[ - 'tensor'].message_type = _VARTYPE_TENSORDESC -_VARTYPE_LODTENSORDESC.containing_type = _VARTYPE -_VARTYPE_LODTENSORARRAYDESC.fields_by_name[ - 'tensor'].message_type = _VARTYPE_TENSORDESC -_VARTYPE_LODTENSORARRAYDESC.containing_type = _VARTYPE -_VARTYPE_READERDESC.fields_by_name[ - 'lod_tensor'].message_type = _VARTYPE_LODTENSORDESC -_VARTYPE_READERDESC.containing_type = _VARTYPE -_VARTYPE_TUPLE.fields_by_name['element_type'].enum_type = _VARTYPE_TYPE -_VARTYPE_TUPLE.containing_type = _VARTYPE -_VARTYPE.fields_by_name['type'].enum_type = _VARTYPE_TYPE -_VARTYPE.fields_by_name['selected_rows'].message_type = _VARTYPE_TENSORDESC -_VARTYPE.fields_by_name['lod_tensor'].message_type = _VARTYPE_LODTENSORDESC -_VARTYPE.fields_by_name[ - 'tensor_array'].message_type = _VARTYPE_LODTENSORARRAYDESC -_VARTYPE.fields_by_name['reader'].message_type = _VARTYPE_READERDESC -_VARTYPE.fields_by_name['tuple'].message_type = _VARTYPE_TUPLE -_VARTYPE_TYPE.containing_type = _VARTYPE -_VARDESC.fields_by_name['type'].message_type = _VARTYPE -_BLOCKDESC.fields_by_name['vars'].message_type = _VARDESC -_BLOCKDESC.fields_by_name['ops'].message_type = _OPDESC -_PROGRAMDESC.fields_by_name['blocks'].message_type = _BLOCKDESC -_PROGRAMDESC.fields_by_name['version'].message_type = _VERSION -DESCRIPTOR.message_types_by_name['Version'] = _VERSION -DESCRIPTOR.message_types_by_name['OpDesc'] = _OPDESC -DESCRIPTOR.message_types_by_name['OpProto'] = _OPPROTO -DESCRIPTOR.message_types_by_name['VarType'] = _VARTYPE -DESCRIPTOR.message_types_by_name['VarDesc'] = _VARDESC -DESCRIPTOR.message_types_by_name['BlockDesc'] = _BLOCKDESC -DESCRIPTOR.message_types_by_name['ProgramDesc'] = _PROGRAMDESC -DESCRIPTOR.enum_types_by_name['AttrType'] = _ATTRTYPE - -Version = _reflection.GeneratedProtocolMessageType( - 'Version', - (_message.Message, ), - dict(DESCRIPTOR=_VERSION, - __module__='framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.Version) - )) -_sym_db.RegisterMessage(Version) - -OpDesc = _reflection.GeneratedProtocolMessageType( - 'OpDesc', - (_message.Message, ), - dict( - Attr=_reflection.GeneratedProtocolMessageType( - 'Attr', - (_message.Message, ), - dict( - DESCRIPTOR=_OPDESC_ATTR, - __module__='framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.OpDesc.Attr) - )), - Var=_reflection.GeneratedProtocolMessageType( - 'Var', - (_message.Message, ), - dict( - DESCRIPTOR=_OPDESC_VAR, - __module__='framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.OpDesc.Var) - )), - DESCRIPTOR=_OPDESC, - __module__='framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.OpDesc) - )) -_sym_db.RegisterMessage(OpDesc) -_sym_db.RegisterMessage(OpDesc.Attr) -_sym_db.RegisterMessage(OpDesc.Var) - -OpProto = _reflection.GeneratedProtocolMessageType( - 'OpProto', - (_message.Message, ), - dict( - Var=_reflection.GeneratedProtocolMessageType( - 'Var', - (_message.Message, ), - dict( - DESCRIPTOR=_OPPROTO_VAR, - __module__='framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.OpProto.Var) - )), - Attr=_reflection.GeneratedProtocolMessageType( - 'Attr', - (_message.Message, ), - dict( - DESCRIPTOR=_OPPROTO_ATTR, - __module__='framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.OpProto.Attr) - )), - DESCRIPTOR=_OPPROTO, - __module__='framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.OpProto) - )) -_sym_db.RegisterMessage(OpProto) -_sym_db.RegisterMessage(OpProto.Var) -_sym_db.RegisterMessage(OpProto.Attr) - -VarType = _reflection.GeneratedProtocolMessageType( - 'VarType', - (_message.Message, ), - dict( - TensorDesc=_reflection.GeneratedProtocolMessageType( - 'TensorDesc', - (_message.Message, ), - dict( - DESCRIPTOR=_VARTYPE_TENSORDESC, - __module__='framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.VarType.TensorDesc) - )), - LoDTensorDesc=_reflection.GeneratedProtocolMessageType( - 'LoDTensorDesc', - (_message.Message, ), - dict( - DESCRIPTOR=_VARTYPE_LODTENSORDESC, - __module__='framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.VarType.LoDTensorDesc) - )), - LoDTensorArrayDesc=_reflection.GeneratedProtocolMessageType( - 'LoDTensorArrayDesc', - (_message.Message, ), - dict( - DESCRIPTOR=_VARTYPE_LODTENSORARRAYDESC, - __module__='framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.VarType.LoDTensorArrayDesc) - )), - ReaderDesc=_reflection.GeneratedProtocolMessageType( - 'ReaderDesc', - (_message.Message, ), - dict( - DESCRIPTOR=_VARTYPE_READERDESC, - __module__='framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.VarType.ReaderDesc) - )), - Tuple=_reflection.GeneratedProtocolMessageType( - 'Tuple', - (_message.Message, ), - dict( - DESCRIPTOR=_VARTYPE_TUPLE, - __module__='framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.VarType.Tuple) - )), - DESCRIPTOR=_VARTYPE, - __module__='framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.VarType) - )) -_sym_db.RegisterMessage(VarType) -_sym_db.RegisterMessage(VarType.TensorDesc) -_sym_db.RegisterMessage(VarType.LoDTensorDesc) -_sym_db.RegisterMessage(VarType.LoDTensorArrayDesc) -_sym_db.RegisterMessage(VarType.ReaderDesc) -_sym_db.RegisterMessage(VarType.Tuple) - -VarDesc = _reflection.GeneratedProtocolMessageType( - 'VarDesc', - (_message.Message, ), - dict(DESCRIPTOR=_VARDESC, - __module__='framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.VarDesc) - )) -_sym_db.RegisterMessage(VarDesc) - -BlockDesc = _reflection.GeneratedProtocolMessageType( - 'BlockDesc', - (_message.Message, ), - dict( - DESCRIPTOR=_BLOCKDESC, - __module__='framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.BlockDesc) - )) -_sym_db.RegisterMessage(BlockDesc) - -ProgramDesc = _reflection.GeneratedProtocolMessageType( - 'ProgramDesc', - (_message.Message, ), - dict( - DESCRIPTOR=_PROGRAMDESC, - __module__='framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.ProgramDesc) - )) -_sym_db.RegisterMessage(ProgramDesc) - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), - _b('H\003')) -# @@protoc_insertion_point(module_scope) diff --git a/onnx2fluid/onnx2fluid/onnx_utils.py b/onnx2fluid/onnx2fluid/onnx_utils.py deleted file mode 100644 index c0c3c66..0000000 --- a/onnx2fluid/onnx2fluid/onnx_utils.py +++ /dev/null @@ -1,712 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Sun Feb 24 17:23:09 2019 - -@author: Macrobull -""" - -from __future__ import division - -import logging -import numpy as np -import onnx -import onnx.optimizer as optimizer - -from collections import OrderedDict as Dict # as default dict -from onnx.checker import check_model -from onnx.helper import get_attribute_value, make_attribute, strip_doc_string -from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE -from onnx.numpy_helper import to_array -from onnx.shape_inference import infer_shapes - -logger = logging.getLogger(__name__) - -__all__ = [ - 'print_pb_structure', - 'build_value_refs', - 'tensor_dtype', - 'tensor_shape', - 'node_attrs', - 'node_topo', - 'node_iter', - 'graph_ops', - 'graph_weights', - 'inferred_model_value_info', - 'polish_model', - 'polish_and_save', - 'optimize_model_skip_op_for_inference', - 'optimize_model_strip_initializer', - 'optimize_model_cast', - 'optimize_model_slice', -] - -ONNX_INT_MAX = 2**63 - 1 - -DEFAULT_OP_DOMAIN = 'ai.onnx' - - -def print_pb_structure(message, loop_iterative=False, depth=0): - """ - print pb fields in its structure - """ - - if hasattr(message, 'DESCRIPTOR') and hasattr(message.DESCRIPTOR, 'fields'): - for field in message.DESCRIPTOR.fields: - print('\t' * depth + '-', field.name) - print_pb_structure(getattr(message, field.name), - loop_iterative=loop_iterative, - depth=(depth + 1)) - - if loop_iterative and hasattr(message, 'MergeFrom') and hasattr( - message, '__len__'): - for idx, item in enumerate(message): - print('\t' * depth + '-', idx) - print_pb_structure(item, - loop_iterative=loop_iterative, - depth=(depth + 1)) - - -def build_value_refs(nodes): - """ - build op reference of inputs and outputs - """ - - input_refs = Dict() - output_refs = Dict() - for idx, node in enumerate(nodes): - for val_name in node.input: - input_refs.setdefault(val_name, set()).add(idx) - for val_name in node.output: - output_refs.setdefault(val_name, set()).add(idx) - return input_refs, output_refs - - -def get_attribute_value2(attr): - """ - get_attribute_value enhanced - """ - - assert isinstance( - attr, onnx.AttributeProto), 'attr is not a AttributeProto instance' - - if attr.type == onnx.AttributeProto.TENSOR: - dtype = np.dtype(TENSOR_TYPE_TO_NP_TYPE[attr.t.data_type]) - data = attr.t.raw_data - value = np.frombuffer(data, - dtype=dtype, - count=(len(data) // dtype.itemsize)) - elif attr.type == onnx.AttributeProto.STRING: - value = attr.s - value = value.decode() if isinstance(value, bytes) else value - elif attr.type == onnx.AttributeProto.STRINGS: - value = attr.strings - value = [s.decode() if isinstance(s, bytes) else s for s in value] - else: - value = get_attribute_value(attr) - return value - - -def tensor_dtype(tensor): - """ - get ONNX tensor in np.dtype - """ - - assert isinstance( - tensor, onnx.ValueInfoProto), 'tensor is not a ValueInfoProto instance' - - return TENSOR_TYPE_TO_NP_TYPE[tensor.type.tensor_type.elem_type] - - -def tensor_shape(tensor): - """ - get ONNX tensor shape - """ - - assert isinstance( - tensor, onnx.ValueInfoProto), 'tensor is not a ValueInfoProto instance' - - return tuple([dim.dim_value for dim in tensor.type.tensor_type.shape.dim]) - - -def node_attrs(node): - """ - convert ONNX node attributes to dict - """ - - assert isinstance(node, onnx.NodeProto), 'node is not a NodeProto instance' - - return {attr.name: get_attribute_value2(attr) - for attr in node.attribute} # dict - - -def node_topo(nodes, topo='default'): - """ - build indices with given topology to an ONNX node graph - """ - - if topo == 'default': - return list(range(len(nodes))) - - node_topo = [] - node_in_degrees = [len(set(node.input)) - for node in nodes] # merge multiple references - node_out_degrees = [len(set(node.output)) - for node in nodes] # merge multiple references - input_refs, output_refs = build_value_refs(nodes) - - if topo == 'forward': - for val_name in input_refs: - if val_name not in output_refs: - for node_idx in input_refs[val_name]: - node_in_degrees[node_idx] -= 1 - queue = [] - for node_idx, degree in enumerate(node_in_degrees): - if degree == 0: - queue.append(node_idx) - while queue: - node_idx = queue.pop(0) - node_topo.append(node_idx) - for val_name in nodes[node_idx].output: - output_refs[val_name].remove(node_idx) - if output_refs[val_name]: - continue - output_refs.pop(val_name) - if val_name not in input_refs: - continue - for next_idx in input_refs[val_name]: - node_in_degrees[next_idx] -= 1 - if node_in_degrees[next_idx] == 0: - queue.insert(0, next_idx) # make it lazy - return node_topo - - if topo == 'backward': - for val_name in output_refs: - if val_name not in input_refs: - for node_idx in output_refs[val_name]: - node_out_degrees[node_idx] -= 1 - queue = [] - for node_idx, degree in enumerate(node_out_degrees): - if degree == 0: - queue.append(node_idx) - while queue: - node_idx = queue.pop(0) - node_topo.append(node_idx) - for val_name in nodes[node_idx].input: - input_refs[val_name].remove(node_idx) - if input_refs[val_name]: - continue - input_refs.pop(val_name) - if val_name not in output_refs: - continue - for next_idx in output_refs[val_name]: - node_out_degrees[next_idx] -= 1 - if node_out_degrees[next_idx] == 0: - queue.insert(0, next_idx) # make it lazy - return node_topo - - raise ValueError('unkown given topo: {}'.format(topo)) - - -def node_iter(nodes, indices=None): - """ - generator for ONNX node graph with given indices - """ - - if indices is None: - indices = range(len(nodes)) - - for index in indices: - node = nodes[index] - name = node.name - domain = node.domain - op_type = node.op_type - inputs = list(node.input) - outputs = list(node.output) - attrs = node_attrs(node) - - if name == '': - name = 'op_' + str(index) - - -# else: # make_op_name -# for s in ' \\|/:-': # -# name = name.replace(s, '_') - if domain == '': - domain = DEFAULT_OP_DOMAIN - - yield name, domain, op_type, inputs, outputs, attrs - - -def graph_ops(graph, topo='default'): - """ - generator for ONNX node graph with given topology - """ - - assert isinstance(graph, - onnx.GraphProto), 'graph is not a GraphProto instance' - - return node_iter(graph.node, node_topo(graph.node, topo)) - - -def graph_weights(graph): - """ - generator for weights of an ONNX model - """ - - assert isinstance(graph, - onnx.GraphProto), 'graph is not a GraphProto instance' - - for initializer in graph.initializer: - name = initializer.name - weight = to_array(initializer) - yield name, weight - - -def inferred_model_value_info(model): - """ - collect value/type info for an ONNX model - """ - - assert isinstance(model, - onnx.ModelProto), 'model is not a ModelProto instance' - - model = infer_shapes(model) - graph = model.graph - value_info = Dict() - for item in graph.value_info: - value_info[item.name] = { - 'dtype': tensor_dtype(item), - 'shape': tensor_shape(item), - 'external': False, - } - for item in graph.input: - assert item.name not in value_info - value_info[item.name] = { - 'dtype': tensor_dtype(item), - 'shape': tensor_shape(item), - 'external': True, - } - for item in graph.output: - # assert item.name not in value_info, 'bypass-model not supported' - value_info[item.name] = { - 'dtype': tensor_dtype(item), - 'shape': tensor_shape(item), - 'external': True, - } - return value_info - - -def skip_node_forward(nodes, src_output_name, dst_input_name, input_refs): - """ - skip nodes between src_output_name -> dst_input_name and connect this pair - """ - - processed = 0 - for next_idx in input_refs[src_output_name]: - next_node = nodes[next_idx] - for val_idx, next_input_name in enumerate(next_node.input): - if next_input_name == src_output_name: - next_node.input[val_idx] = dst_input_name - processed += 1 - return processed - - -def skip_node_backward(nodes, src_input_name, dst_output_name, output_refs): - """ - skip nodes between dst_output_name -> src_input_name and connect this pair - """ - - processed = 0 - for prev_idx in output_refs[src_input_name]: - prev_node = nodes[prev_idx] - for val_idx, prev_output_name in enumerate(prev_node.output): - if prev_output_name == src_input_name: - prev_node.output[val_idx] = dst_output_name - processed += 1 - return processed - - -def polish_model(model, internals=True, extras=True, checking=True): - """ - polish_model enhanced for inference - """ - - if checking: - check_model(model) - strip_doc_string(model) - if internals: - passes = optimizer.get_available_passes() - passes = list(filter(lambda name: not name.startswith('split_'), - passes)) # - logger.debug('builtin optimizations to perform in ONNX:\n\t%s', passes) - model = optimizer.optimize(model, passes=passes) - if extras: - for optimize in ( - optimize_model_skip_op_for_inference, - optimize_model_strip_initializer, - optimize_model_cast, - optimize_model_slice, - ): - model = optimize(model) - model = infer_shapes(model) - if checking: - check_model(model) - return model - - -def polish_and_save(model_filename, - suffix='.polished', - save_filename=None, - *args, - **kwargs): - """ - run polish_model and save - """ - - if save_filename is None: - save_filename = model_filename.replace('.onnx', suffix + '.onnx') - - model = onnx.load(model_filename) - model = polish_model(model, *args, **kwargs) - onnx.save(model, save_filename) - logger.info('polished model saved to: %s', save_filename) - return save_filename - - -def optimize_model_skip_op_for_inference(model, op_list=None): - """ - skip ops can be bypassed for inference - """ - - assert isinstance(model, - onnx.ModelProto), 'model is not a ModelProto instance' - - if op_list is None: - op_list = ('Dropout', 'Identity') - - nodes = model.graph.node - input_refs, output_refs = build_value_refs(nodes) - - ret = type(model)() - ret.CopyFrom(model) - ret.graph.ClearField( - 'value_info') # WORKAROUND: onnx do not drop old value_info - ret_nodes = ret.graph.node - nodes_to_remove = [] - for node_idx, node in enumerate(nodes): - if not (node.domain == DEFAULT_OP_DOMAIN or node.domain == ''): - continue - op_type = node.op_type - if op_type not in op_list: - continue - - if op_type in ('Dropout', ): - input_name = node.input[0] - output_name = node.output[0] - elif not (len(node.input) == 1 and len(node.output) == 1): - logger.warning( - 'currently only 1-input-1-output op supported, skip required %d: %s', - node_idx, node.op_type) - continue - else: - input_name = node.input[0] - output_name = node.output[0] - - if output_name in input_refs: - processed = skip_node_forward(ret_nodes, output_name, input_name, - input_refs) - elif input_name in output_refs: - processed = skip_node_backward(ret_nodes, input_name, output_name, - output_refs) - else: - processed = -1 - - if processed > 0: - nodes_to_remove.append(node_idx) - logger.debug('skip op %d: %s -> %s -> %s', node_idx, input_name, - node.op_type, output_name) - elif processed == 0: - logger.warning('weird, no node processed') - else: - logger.warning('standalone op %d: %s -> %s -> %s not skipped', - node_idx, input_name, node.op_type, output_name) - - nodes_to_remove.sort(reverse=True) - for node_idx in nodes_to_remove: - ret_nodes.pop(node_idx) - - return ret - - -def optimize_model_strip_initializer(model, keep_input_only=True): - """ - strip weights for inference - """ - - assert isinstance(model, - onnx.ModelProto), 'model is not a ModelProto instance' - - nodes = model.graph.node - input_refs, output_refs = build_value_refs(nodes) - out_names = [val.name for val in model.graph.output] - - ret = type(model)() - ret.CopyFrom(model) - ret.graph.ClearField( - 'value_info') # WORKAROUND: onnx do not drop old value_info - - # strip initializers - ret.graph.ClearField('initializer') - ret_initializers = ret.graph.initializer - for initializer in model.graph.initializer: - name = initializer.name - if name in input_refs: - ret_initializers.add().CopyFrom(initializer) - elif not keep_input_only and name in output_refs: - ret_initializers.add().CopyFrom(initializer) - else: - dtype = TENSOR_TYPE_TO_NP_TYPE[initializer.data_type] - logger.debug('initializer %s(%s[%d]) stripped', name, dtype, - len(initializer.raw_data) // dtype.itemsize) - - # strip inputs - ret.graph.ClearField('input') - ret_inputs = ret.graph.input - for item in model.graph.input: - name = item.name - if name in input_refs or name in out_names: - ret_inputs.add().CopyFrom(item) - else: - logger.debug('input %s(%s%s) stripped', name, tensor_dtype(item), - tuple(tensor_shape(item))) - return ret - - -def optimize_model_cast(model): - """ - strip cascade and unecessary onnx::Cast-9: - """ - - assert isinstance(model, - onnx.ModelProto), 'model is not a ModelProto instance' - - nodes = model.graph.node - input_refs, output_refs = build_value_refs(nodes) - value_info = inferred_model_value_info(model) - - ret = type(model)() - ret.CopyFrom(model) - ret.graph.ClearField( - 'value_info') # WORKAROUND: onnx do not drop old value_info - ret_nodes = ret.graph.node - nodes_to_remove = [] - for node_idx, node in enumerate(nodes): - if not (node.domain == DEFAULT_OP_DOMAIN or node.domain == ''): - continue - if node.op_type != 'Cast': - continue - attrs = node_attrs(node) - output_dtype = TENSOR_TYPE_TO_NP_TYPE[attrs['to']] - input_name = node.input[0] - info = value_info.get(input_name, None) # relax for un-inferrable - if info is None: - continue - input_dtype = info.get('dtype', None) - if input_dtype is None or input_dtype != output_dtype: - continue - - output_name = node.output[0] - if output_name in input_refs: - processed = skip_node_forward(ret_nodes, output_name, input_name, - input_refs) - elif input_name in output_refs: - processed = skip_node_backward(ret_nodes, input_name, output_name, - output_refs) - else: - processed = -1 - - if processed > 0: - nodes_to_remove.append(node_idx) - logger.debug('skip %s: %s -> %s Cast op', node.name, input_dtype, - output_dtype) - elif processed == 0: - logger.warning('weird, no node processed') - else: - logger.debug('keep standalone %s: %s -> %s Cast op', node.name, - input_dtype, output_dtype) - - nodes_to_remove.sort(reverse=True) - for node_idx in nodes_to_remove: - ret_nodes.pop(node_idx) - - return ret - - -def optimize_model_slice(model): - """ - strip cascade and unecessary onnx::Slice-1:9 - """ - - assert isinstance(model, - onnx.ModelProto), 'model is not a ModelProto instance' - - nodes = model.graph.node - input_refs, output_refs = build_value_refs(nodes) - - def build_slice_node_chain(node_idx): - chain = [] - while True: - node = nodes[node_idx] - if not (node.domain == DEFAULT_OP_DOMAIN or node.domain == ''): - return chain - if node.op_type != 'Slice': - return chain - chain.append(node_idx) - output_name = node.output[0] - if output_name not in input_refs or len( - input_refs[output_name]) != 1: - return chain - node_idx = list(input_refs[output_name])[0] - - # axis: (start, end) - def merge_slice(slice_chain): - merged_slice = dict() - for slice_node_idx in slice_chain: - node = nodes[slice_node_idx] - attrs = node_attrs(node) - for axis, start, end in zip(attrs['axes'], attrs['starts'], - attrs['ends']): - if start == 0 and end == ONNX_INT_MAX: - continue - if axis in merged_slice: - prev_start, prev_end = merged_slice[axis] - start += prev_start if start >= 0 else 0 if prev_end == ONNX_INT_MAX else prev_end - end += prev_start if end >= 0 else 0 if prev_end == ONNX_INT_MAX else prev_end - merged_slice[axis] = (start, end) - return merged_slice - - ret = type(model)() - ret.CopyFrom(model) - ret.graph.ClearField( - 'value_info') # WORKAROUND: onnx do not drop old value_info - ret_nodes = ret.graph.node - nodes_to_remove = [] - for node_idx in range(len(nodes)): - slice_chain = build_slice_node_chain(node_idx) - if not slice_chain: - continue - merged_slice = merge_slice(slice_chain) - if merged_slice and len(slice_chain) == 1: # no need to merge - continue - - attrs = {'axes': [], 'starts': [], 'ends': []} - for axis, (start, end) in merged_slice.items(): - attrs['axes'].append(axis) - attrs['starts'].append(start) - attrs['ends'].append(end) - first_node = nodes[slice_chain[0]] - last_node = nodes[slice_chain[-1]] - input_name = first_node.input[0] - output_name = last_node.output[0] - processed = -1 - if output_name in input_refs: # 0, [1...] - new_input_name = first_node.output[0] if merged_slice else input_name - processed = skip_node_forward(ret_nodes, output_name, - new_input_name, input_refs) - if processed > 0: - if merged_slice: - remain_idx = slice_chain[0] - remove_chain = slice_chain[1:] - slice_node = ret_nodes[remain_idx] - for attr in slice_node.attribute: - attr.CopyFrom( - make_attribute(attr.name, attrs[attr.name])) - logger.debug('merged slice chain %s -> %s%s -> %s', - input_name, remain_idx, remove_chain, - output_name) - else: - remove_chain = slice_chain - - if processed < 0 and input_name in output_refs: - new_output_name = last_node.input[0] if merged_slice else output_name - processed = skip_node_backward(ret_nodes, input_name, - new_output_name, output_refs) - if processed > 0: - if merged_slice: - remain_idx = slice_chain[-1] - remove_chain = slice_chain[:-1] - slice_node = ret_nodes[remain_idx] - for attr in slice_node.attribute: - attr.CopyFrom( - make_attribute(attr.name, attrs[attr.name])) - logger.debug('merged slice chain %s -> %s%s -> %s', - input_name, remove_chain, remain_idx, - output_name) - else: - remove_chain = slice_chain - - if processed > 0: - nodes_to_remove.extend(remove_chain) - if not merged_slice: - logger.debug('skip slice chain %s -> %s -> %s', input_name, - slice_chain, output_name) - elif processed < 0: # NEVERFIX: not merge standalone slice chain - logger.debug('keep standalone slice chain %s -> %s -> %s', - input_name, slice_chain, output_name) - - nodes_to_remove.sort(reverse=True) - for node_idx in nodes_to_remove: - ret_nodes.pop(node_idx) - - return ret - - -if __name__ == '__main__': - logging.basicConfig( - format= - '[%(levelname)8s]%(name)s::%(funcName)s:%(lineno)04d: %(message)s', - level=logging.DEBUG, - ) - - from onnx.version_converter import convert_version - - model = onnx.load('/tmp/export.onnx') - print_pb_structure(model, loop_iterative=False) - - check_model(model) - model = convert_version(model, 9) - model = polish_model(model) - - onnx.save(model, '/tmp/export.polished.onnx') - - graph = model.graph - value_info = inferred_model_value_info(model) - - name = graph.name - inputs = [value.name for value in graph.input] - outputs = [value.name for value in graph.output] - weights = [] - - logger.info('ops:') - for name, domain, op_type, _, _, attrs in graph_ops(graph, topo='forward'): - logger.info('- \t%s %s::%s: %s', name, domain, op_type, attrs) - - logger.info('weights:') - for name, array in graph_weights(graph): - weights.append(name) - logger.info('- \t%s: %s', name, array.shape) - - logger.info('inputs:') - external_inputs = [] - for name in inputs: - if name not in weights: - external_inputs.append(name) - logger.info('- \t%s: %s', name, value_info[name]['shape']) - - logger.info('outputs:') - external_outputs = [] - for name in outputs: - if name not in weights: - external_outputs.append(name) - logger.info('- \t%s: %s', name, value_info[name]['shape']) diff --git a/onnx2fluid/onnx2fluid/symbolic.py b/onnx2fluid/onnx2fluid/symbolic.py deleted file mode 100644 index e781d7d..0000000 --- a/onnx2fluid/onnx2fluid/symbolic.py +++ /dev/null @@ -1,2615 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -ONNX to Paddle fluid symbolic translation - -TODO: move non-ONNX ops out to symbolic_aten.py, symbolic_caffe2.py ... - -Created on Mon Feb 25 09:33:43 2019 - -@author: Macrobull -""" - -from __future__ import division - -import logging as _logging -import numpy as _np - -from collections import OrderedDict as _dict -from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE - -_logger = _logging.getLogger(__name__) - -ONNX_INT_MAX = 2**63 - 1 -FLUID_INT_MAX = 2**31 - 1 # - -DEFAULT_ONNX_OP_DOMAIN = '' -DEFAULT_FLUID_OP_NAMESCOPE = '/' - -DEFAULT_OP_MAPPING_FIELD_VALUES = _dict() -DEFAULT_OP_MAPPING_FIELD_VALUES['FLUID_OP'] = '' -DEFAULT_OP_MAPPING_FIELD_VALUES['FLUID_INPUT_ARGS'] = None -DEFAULT_OP_MAPPING_FIELD_VALUES['FLUID_OUTPUT_ARGS'] = None -DEFAULT_OP_MAPPING_FIELD_VALUES['ATTR_MAPPING'] = dict( -) # dict(onnx_attr_from=fluid_attr_to) -DEFAULT_OP_MAPPING_FIELD_VALUES['DEFAULTS'] = dict() # dict(fluid_attr=default) -DEFAULT_OP_MAPPING_FIELD_VALUES[ - 'INPUT_PERM'] = None # sampler: [idx_onnx_arg...] -DEFAULT_OP_MAPPING_FIELD_VALUES[ - 'OUTPUT_PERM'] = None # sampler: [idx_onnx_arg...] -DEFAULT_OP_MAPPING_FIELD_VALUES['FILL_NAME_FIELD'] = True -DEFAULT_OP_MAPPING_VALUES = list(DEFAULT_OP_MAPPING_FIELD_VALUES.values()) - -DEFAULT_OP_MAPPING = { - ## nil ops ## - 'RandomUniform': - ['uniform_random', [], ['Out'], dict(high='max', low='min'), - dict(max=1., min=0., seed=0), None, None, False], # TODO: add dtype support - 'RandomNormal': - ['gaussian_random', [], ['Out'], dict(scale='std'), - dict(mean=0., std=1., seed=0), None, None, False], # TODO: add dtype support - ## unary ops ## - 'Abs': ['abs', ['X'], ['Out']], - 'Acos': ['acos', ['X'], ['Out']], - 'Asin': ['asin', ['X'], ['Out']], - 'Atan': ['atan', ['X'], ['Out']], - 'ArgMax': ['argmax', ['X'], ['Out'], dict(keepdims=''), dict(axis=0)], - 'ArgMin': ['argmin', ['X'], ['Out'], dict(keepdims=''), dict(axis=0)], - 'Ceil': ['ceil', ['X'], ['Out']], - 'Clip': - ['clip', ['X'], ['Out'], dict(), dict( - min=(_np.array([255, 255, 127, 255], dtype=_np.uint8).view(_np.float32)), - max=(_np.array([255, 255, 127, 127], dtype=_np.uint8).view(_np.float32)), - )], - 'Cos': ['cos', ['X'], ['Out']], - 'Elu': ['elu', ['X'], ['Out'], dict(), dict(alpha=1.)], - 'Exp': ['exp', ['X'], ['Out']], - 'Flatten': ['flatten', ['X'], ['Out'], dict(), dict(axis=1)], # FIXME: emit flatten2 - 'Floor': ['floor', ['X'], ['Out']], - 'Gather': ['gather', ['X', "Index"], ['Out'], dict(axis='')], - 'HardSigmoid': - ['hard_sigmoid', ['X'], ['Out'], dict(alpha='slope', beta='offset'), - dict(slope=.2, offset=.5)], - 'Identity': ['assign', ['X'], ['Out']], - 'LeakyRelu': ['leaky_relu', ['X'], ['Out'], dict(), dict(alpha=.01)], - 'Log': ['log', ['X'], ['Out']], - 'LRN': - ['lrn', ['X'], ['Out', 'MidOut'], dict(size='n', bias='k'), - dict(n=5, k=1., alpha=1e-4, beta=.75)], # - 'Reciprocal': ['reciprocal', ['X'], ['Out']], - 'Relu': ['relu', ['X'], ['Out']], - 'Round': ['round', ['X'], ['Out']], - 'Selu': - ['selu', ['X'], ['Out'], dict(gamma='scale'), dict( - scale=1.0507009873554804934193349852946, - alpha=1.6732632423543772848170429916717, - )], - 'Shrink': ['softshrink', ['X'], ['Out'], dict(bias='', labmd='')], - 'Sigmoid': ['sigmoid', ['X'], ['Out']], - 'Sign': ['sign', ['X'], ['Out']], - 'Sin': ['sin', ['X'], ['Out']], - 'Squeeze': ['squeeze', ['X'], ['Out']], # FIXME: emit squeeze2 - # FIXME: default axis = -1, reshape required before and after - 'Softmax': ['softmax', ['X'], ['Out'], dict(axis=''), dict(axis=-1)], - 'Softplus': ['softplus', ['X'], ['Out']], - 'Softsign': ['softsign', ['X'], ['Out']], - 'SpaceToDepth': ['space_to_depth', ['X'], ['Out']], - 'Sqrt': ['sqrt', ['X'], ['Out']], - 'Tanh': ['tanh', ['X'], ['Out']], - 'ThresholdedRelu': - ['thresholded_relu', ['X'], ['Out'], dict(alpha='threshold'), dict(alpha=1.)], - #'Transpose': ['transpose', ['X'], ['Out']], - 'Unsqueeze': ['unsqueeze', ['X'], ['Out']], # FIXME: emit unsqueeze2 - ## binary ops ## - 'Add': ['elementwise_add', ['X', 'Y'], ['Out'], dict(), dict(axis=-1)], - #'AffineGrid': ['affine_grid', ['Theta'], ['Output'], dict(size='out_shape')], - 'And': ['logical_and', ['X', 'Y'], ['Out']], - 'Div': ['elementwise_div', ['X', 'Y'], ['Out'], dict(), dict(axis=-1)], - 'Equal': ['equal', ['X', 'Y'], ['Out'], dict(), dict(), None, None, False], - 'Greater': ['less_than', ['X', 'Y'], ['Out'], dict(), dict(), [1, 0], None, False], - 'Less': ['less_than', ['X', 'Y'], ['Out'], dict(), dict(), None, None, False], - 'MatMul': ['matmul', ['X', 'Y'], ['Out']], # defaults excluded for transpose_x vs transpose_X - 'Max': ['elementwise_max', ['X', 'Y'], ['Out'], dict(), dict(axis=-1)], - 'Min': ['elementwise_min', ['X', 'Y'], ['Out'], dict(), dict(axis=-1)], - 'Mod': ['elementwise_mod', ['X', 'Y'], ['Out'], dict(), dict(axis=-1)], - 'Mul': ['elementwise_mul', ['X', 'Y'], ['Out'], dict(), dict(axis=-1)], - 'Not': ['logical_not', ['X', 'Y'], ['Out']], - 'OneHot': # assuming values=[0, 1], axis=-1 and drop them - ['one_hot', ['Input', 'depth_tensor'], ['Out'], dict(axis=''), dict(), - [0, 1], None, False], - 'Or': ['logical_or', ['X', 'Y'], ['Out']], - 'Pow': ['elementwise_pow', ['X', 'Y'], ['Out'], dict(), dict(axis=-1)], # TODO: pow for scalar exponent - 'Sub': ['elementwise_sub', ['X', 'Y'], ['Out'], dict(), dict(axis=-1)], - 'Xor': ['logical_xor', ['X', 'Y'], ['Out']], - # reduce ops - # TODO: fix reduce_all ? - 'ReduceMax': - ['reduce_max', ['X'], ['Out'], dict(axes='dim', keepdims='keep_dim'), - dict(keep_dim=1)], - 'ReduceMean': - ['reduce_mean', ['X'], ['Out'], dict(axes='dim', keepdims='keep_dim'), - dict(keep_dim=1)], - 'ReduceMin': - ['reduce_min', ['X'], ['Out'], dict(axes='dim', keepdims='keep_dim'), - dict(keep_dim=1)], - 'ReduceProd': - ['reduce_prod', ['X'], ['Out'], dict(axes='dim', keepdims='keep_dim'), - dict(keep_dim=1)], - 'ReduceSum': - ['reduce_sum', ['X'], ['Out'], dict(axes='dim', keepdims='keep_dim'), - dict(keep_dim=1)], - # other ops - 'Scatter': ['scatter', ['X', 'Ids', 'Updates'], ['Out'], dict(), dict(overwrite=True)], - 'TopK': ['topk', ['X', 'K'], ['Out', 'Indices']], -} - -DEFAULT_IOA_CONSTRAINTS = { - 'ArgMax': [ - (lambda i, o, a: a.get('keepdims', 1) == 1, - 'only keepdims = 0 supported'), - ], - 'ArgMin': [ - (lambda i, o, a: a.get('keepdims', 1) == 1, - 'only keepdims = 0 supported'), - ], - 'Gather': [ - (lambda i, o, a: a.get('axis', 0) == 0, 'only axis = 0 supported'), - ], - 'Shrink': [ - (lambda i, o, a: a.get('bias', 0) == a.get('lambd', .5), - 'only SoftShrink with bias = lambd supported'), - ], - # 'Softmax': - # [(lambda i, o, a: a.get('axis', 1) == -2, 'Paddle fluid Softmax works on dim -2 only'), - # ], - 'OneHot': [ - (lambda i, o, a: a.get('axis', -1) == -1, 'only axis = -1 supported'), - ], - 'Scatter': [ - (lambda i, o, a: a.get('axis', 0) == 0, 'only axis = 0 supported'), - ], - 'TopK': [ - (lambda i, o, a: a.get('axis', -1) == -1, 'only axis = -1 supported'), - ], -} - - -def _dtype(value_infos, name): - return _np.dtype(value_infos[name]['dtype']) - - -def _dtype_or_none(value_infos, name): - if name not in value_infos: - return None - value_info = value_infos[name] - if 'dtype' not in value_info: - return None - return _np.dtype(value_info['dtype']) - - -def _shape(value_infos, name): - return list(value_infos[name]['shape']) - - -def _shape_or_none(value_infos, name): - if name not in value_infos: - return None - value_info = value_infos[name] - if 'shape' not in value_info: - return None - return list(value_info['shape']) - - -def _const_weight_or_none(value_infos, name): - if name not in value_infos: - return None - value_info = value_infos[name] - const_value = value_info.get('const_value', None) - if const_value is not None: - return const_value - get_weight_func = value_info.get('get_weight', None) - if get_weight_func is not None: - return get_weight_func() - return None - - -def _check_embeddable(value_infos, *names): - keyword = 'get_weight' - for name in names: - if keyword not in value_infos[name]: - _logger.warning('parameter %s not embeddable', name) - return False - return True - - -def _default(prog, op_type, inputs, outputs, attrs, *args, name='', **kwargs): - info = DEFAULT_OP_MAPPING[op_type] - info.extend(DEFAULT_OP_MAPPING_VALUES[len(info):]) - - ( - fluid_op, - fluid_input_args, - fluid_output_args, - attr_mapping, - default_attrs, - input_perm, - output_perm, - fill_name_field, - ) = info - - if fluid_op in DEFAULT_IOA_CONSTRAINTS: - for predicate, message in DEFAULT_IOA_CONSTRAINTS[fluid_op]: - assert predicate(inputs, outputs, attrs), message - - # bypass if key absent, drop if mapped key is '' or '_' - mapped_attrs = { - attr_mapping.get(key, key): value - for key, value in attrs.items() - } - if '' in mapped_attrs: - mapped_attrs.pop('') - if '_' in mapped_attrs: - mapped_attrs.pop('_') - fluid_attrs = default_attrs.copy() - fluid_attrs.update(mapped_attrs) # as new attrs - - var_inps = list(map(inputs.__getitem__, - input_perm)) if input_perm is not None else inputs - var_outs = list(map(outputs.__getitem__, - output_perm)) if output_perm is not None else outputs - for var_name in var_inps + var_outs: - assert var_name - - arg_name = ', name={}'.format( - repr(name)) if fill_name_field and name else '' - arg_attrs = [ - ', {}={}'.format(key, value) for key, value in fluid_attrs.items() - ] - - prog.Code('{} = layers.{}({}{}{})'.format( - ', '.join(var_outs), - fluid_op, - ', '.join(var_inps), - ''.join(arg_attrs)[(0 if var_inps else 2):], - arg_name, - )) - - # dummy var_out - num_vars = len(var_outs) - num_args = len(fluid_output_args) - if num_vars < num_args: - assert fill_name_field and name, 'name required to name dummy output variables' - for idx_out in range(num_vars, num_args): - var_out = name + '.' + fluid_output_args[idx_out] # dummy output - var_outs.append(var_out) - - for var_out in var_outs: - prog.VarDesc(var_out) - prog.OpDesc(fluid_op, (fluid_input_args, var_inps), - (fluid_output_args, var_outs), fluid_attrs) - - -def _assign(prog, mapping): - fluid_op = 'assign' - - for var_dst, var_src in mapping.items(): - assert var_dst and var_src - prog.Code('{} = {} # assign'.format(var_dst, var_src)) - # prog.Code('{} = layers.{}({})' - # .format(var_dst, - # fluid_op, - # var_src, - # )) - prog.VarDesc(var_dst) - prog.OpDesc( - fluid_op, - (['X'], [var_src]), - (['Out'], [var_dst]), - dict(), - ) - - -def _zeros_like(prog, var_ref, var_out): - prog.Op( - '', - 'Sub', - [var_ref, var_ref], - [var_out], - {'axis': 0}, - ) - - -def _pad_if_asymmetric(prog, pads, var_input, value_infos, scope): # pads: SSEE - assert len(pads) & 1 == 0 - ndims = len(pads) // 2 - symmetric = True - for idx_dim in range(ndims): - if pads[idx_dim] != pads[ndims + idx_dim]: - symmetric = False - break - if symmetric: - return pads[:ndims], var_input - - assert scope - var_padded = scope + '_pad' # explicit variable - prog.Op( - '', - 'Pad', - [var_input], - [var_padded], - { - 'mode': 'constant', - 'value': 0., - 'pads': pads, - }, - value_infos=value_infos, - name=(scope + '/pad'), - ) - return [0] * ndims, var_padded - - -def _adaptive_pool(prog, pool_type, inputs, outputs, attrs, name=''): - # I/O - var_x, = inputs - var_y, var_indices, = (outputs + [''] * 1)[:2] - assert var_x and var_y - - # interpretation - pool_size = attrs['output_size'] # required - poolnd = len(pool_size) - assert 2 <= poolnd <= 3, 'only pool2d and pool3d supported' - - fluid_op = 'adaptive_pool{}d'.format(poolnd) - name_attr = ', name={}'.format(repr(name)) if name else '' - - # generation - prog.Code('{}{} = layers.{}({}' - ', require_index={}' - ', pool_size={}' - ', pool_type={}' - '{})'.format( - var_y, - ', {}'.format(var_indices) if var_indices else '', - fluid_op, - var_x, - # attrs - bool(var_indices), - pool_size, - repr(pool_type), - name_attr, - )) - fluid_op = 'pool{}d'.format(poolnd) - prog.VarDesc(var_y) - if var_indices: - prog.VarDesc(var_indices) - prog.OpDesc( - fluid_op, - (['X'], [var_x]), - (['Out', 'Indices'], [var_y] + ([var_indices] if var_indices else [])), - { - 'adaptive': True, - 'pooling_type': pool_type, - 'ksize': pool_size, - # unused - # 'exclusive': True, - # 'global_pooling': False, - }, - ) - - -def _global_pool(prog, pool_type, inputs, outputs, value_infos, name=''): - # I/O - var_x, = inputs - var_y, = outputs - assert var_x and var_y - - # interpretation - input_shape = _shape_or_none(value_infos, var_x) - output_shape = _shape_or_none(value_infos, var_y) - assert input_shape is not None or output_shape is not None, 'poolnd not inferred' # NC... - if input_shape is not None: - poolnd = len(input_shape) - 2 # NC... - elif output_shape is not None: - poolnd = len(output_shape) - 2 # NC... - assert 2 <= poolnd <= 3, 'only pool2d and pool3d supported' - - fluid_op = 'pool{}d'.format(poolnd) - name_attr = ', name={}'.format(repr(name)) if name else '' - - # generation - prog.Code('{} = layers.{}({}, global_pooling=True' - ', pool_type={}' - '{})'.format( - var_y, - fluid_op, - var_x, - # attrs - repr(pool_type), - name_attr, - )) - prog.VarDesc(var_y) - prog.OpDesc( - fluid_op, - (['X'], [var_x]), - (['Out'], [var_y]), - { - 'global_pooling': True, - 'pooling_type': pool_type, - # unused - 'adaptive': False, - 'ksize': [-1, -1], - 'strides': [-1, -1], - 'paddings': [0, 0], - 'ceil_mode': False, - }, - ) - - -def _pool(prog, pool_type, inputs, outputs, attrs, value_infos, name): - # I/O - var_x, = inputs - var_y, var_indices, = (outputs + [''] * 1)[:2] - assert name and var_x and var_y - - # interpretation - assert attrs.get( - 'auto_pad', - 'NOTSET') == 'NOTSET', 'only auto_pad = NOTSET supported' # optional - assert attrs.get('count_include_pad', - 0) == 0, 'only count_include_pad = 0 supported' # optional - pool_size = attrs['kernel_shape'] # required - poolnd = len(pool_size) - assert 2 <= poolnd <= 3, 'only pool2d and pool3d supported' - - fluid_op = 'pool{}d'.format(poolnd) - strides = attrs.get('strides', [1] * poolnd) # optional - ceil_mode = bool(attrs.get('ceil_mode', 0)) # optional - pads = attrs.get('pads', [0] * (poolnd * 2)) # optional - paddings, var_x = _pad_if_asymmetric(prog, pads, var_x, value_infos, name) - name_attr = ', name={}'.format(repr(name)) - - # generation - prog.Code('{} = layers.{}({}, exclusive=True' - ', pool_size={}' - ', pool_type={}' - ', pool_stride={}' - ', pool_padding={}' - ', ceil_mode={}' - '{})'.format( - var_y, - fluid_op, - var_x, - # attrs - pool_size, - repr(pool_type), - strides, - paddings, - ceil_mode, - name_attr, - )) - prog.VarDesc(var_y) - if var_indices: - prog.VarDesc(var_indices) - prog.OpDesc( - fluid_op, - (['X'], [var_x]), - (['Out', 'Indices'], [var_y] + ([var_indices] if var_indices else [])), - { - 'global_pooling': False, - 'pooling_type': pool_type, - 'ksize': pool_size, - 'strides': strides, - 'paddings': paddings, - 'ceil_mode': ceil_mode, - # unused - 'adaptive': False, - # 'exclusive': True, - }, - ) - - -def _roi_pool(prog, fluid_op, inputs, outputs, attrs, name): - # I/O - var_x, var_rois, = inputs - var_y, = outputs - assert name and var_x and var_rois and var_y - - # interpretation - spatial_scale = attrs['spatial_scale'] # required - pooled_height, pooled_width = attrs['pooled_shape'] # required - od_attrs = { - 'pooled_height': pooled_height, - 'pooled_width': pooled_width, - 'spatial_scale': spatial_scale, - } - feature_attr = '' - is_max_pool = fluid_op == 'roi_pool' - if 'sampling_ratio' in attrs: # - sampling_ratio = attrs['sampling_ratio'] - od_attrs['sampling_ratio'] = sampling_ratio - feature_attr += ', sampling_ratio={}'.format(sampling_ratio) - if 'output_channels' in attrs: # - output_channels = attrs['output_channels'] - od_attrs['output_channels'] = output_channels - feature_attr += ', output_channels={}'.format(output_channels) - - # generation - prog.Code('{} = layers.{}({} {}' - ', spatial_scale={}' - ', pooled_height={}' - ', pooled_width={}' - '{})'.format( - var_y, - fluid_op, - var_x, - var_rois, - # attrs - spatial_scale, - pooled_height, - pooled_width, - feature_attr, - )) - prog.VarDesc(var_y) - if is_max_pool: - var_argmax = name + '.argmax' # hidden variable - prog.VarDesc(var_argmax) - prog.OpDesc( - fluid_op, - (['X', 'ROIs'], [var_x, var_rois]), - (['Out', 'Argmax'], [var_y] + ([var_argmax] if is_max_pool else [])), - od_attrs, - ) - - -def _interpolate(prog, inputs, outputs, attrs, value_infos, name=''): - # I/O - var_x, var_scales, = inputs - var_y, = outputs - assert var_x and var_scales and var_y - - # interpretation - # output shape - out_shape_ = _shape_or_none(value_infos, var_y) - if out_shape_ is not None: - assert len(out_shape_) == 4, 'only 4-D Tensor as X and Y supported' - out_shape_ = out_shape_[2:] - # try scales - scales = _const_weight_or_none(value_infos, var_scales) - if scales is not None: - assert len(scales) == 4, 'only 4-D Tensor as X and Y supported' - assert scales[0] == 1 and scales[ - 1] == 1, 'only scale on (NC)HW supported' - assert scales[2] == scales[ - 3], 'only aspect-ratio-invariant scale supported' - scale = scales[2] - # try input shape - if scale is None: - assert out_shape_, 'neither scales nor output shape available' - out_shape = out_shape_ - else: - out_shape = None - if out_shape_ is None: - in_shape = _shape_or_none(value_infos, var_x) - assert in_shape is not None, 'out_shape required but not inferrable' - assert len(in_shape) == 4, 'only 4-D Tensor as X and Y supported' - out_shape_ = [in_shape[2] * scale, in_shape[3] * scale] - mode = attrs.get('mode', 'nearest') - fluid_op = 'resize_{}'.format(mode) # not sure bilinear will be linear? - name_attr = ', name={}'.format(repr(name)) if name else '' - - # generation - prog.Code('{} = layers.{}({}' - ', scale={}' - ', out_shape={}' - '{})'.format( - var_y, - fluid_op, - var_x, - # attrs - scale, - out_shape, - name_attr, - )) - fluid_op = '{}_interp'.format(mode) - prog.VarDesc(var_y) - prog.OpDesc( - fluid_op, - (['X'], [var_x]), - (['Out'], [var_y]), - { - 'interp_method': mode, - 'out_h ': out_shape_[0], - 'out_w ': out_shape_[1], - }, - ) - - -def AdaptiveAveragePool(prog, inputs, outputs, attrs, *args, name='', **kwargs): - """ - aten::adaptive_avg_poolnd - """ - - return _adaptive_pool(prog, 'avg', inputs, outputs, attrs, name=name) - - -def AdaptiveMaxPool(prog, inputs, outputs, attrs, *args, name='', **kwargs): - """ - aten::adaptive_max_poolnd - """ - - return _adaptive_pool(prog, 'max', inputs, outputs, attrs, name=name) - - -def AffineGrid(prog, inputs, outputs, attrs, *args, name='', **kwargs): - """ - aten::affine_grid - """ - - # I/O - var_theta, = inputs - var_grid, = outputs - assert var_theta and var_grid - - # interpretation - fluid_op = 'affine_grid' - size = attrs['size'] # required - name_attr = ', name={}'.format(repr(name)) if name else '' - - # generation - prog.Code('{} = layers.{}({}' - ', out_shape={}' - '{})'.format( - var_grid, - fluid_op, - var_theta, - # attrs - size, - name_attr, - )) - prog.VarDesc(var_grid) - prog.OpDesc( - fluid_op, - (['Theta'], [var_theta]), - (['Output'], [var_grid]), - {'output_shape': size}, # f**k you API - ) - - -def AveragePool(prog, inputs, outputs, attrs, value_infos, name, *args, - **kwargs): - """ - onnx::AveragePool-10: - """ - - return _pool(prog, 'avg', inputs, outputs, attrs, value_infos, name) - - -def BatchNormalization(prog, - inputs, - outputs, - attrs, - value_infos, - name='', - embed_params=False, - *args, - **kwargs): - """ - onnx::BatchNormalization-9: - """ - - # I/O - var_x, var_scale, var_b, var_mean, var_var, = inputs - var_y, var_mean_, var_var_, var_saved_mean, var_saved_variance, = ( - outputs + [''] * 4)[:5] - assert var_x and var_scale and var_b and var_mean and var_var and var_y - assert var_saved_mean or name - assert var_saved_variance or name - var_saved_mean = var_saved_mean or (name + '.saved_mean') # dummy output - var_saved_variance = var_saved_variance or (name + '.saved_variance' - ) # dummy output - - # interpretation - fluid_op = 'batch_norm' - momentum = attrs.get('momentum', .9) # optional - epsilon = attrs.get('epsilon', 1e-5) # optional - name_attr = ', name={}'.format(repr(name)) if name else '' - embeddable = _check_embeddable(value_infos, var_scale, var_b, var_mean, - var_var) - if not embeddable: - _logger.warning('for op %s(%s -> BatchNormalization -> %s)', name, - inputs, outputs) - _logger.warning('one of the parameters is intermediate value') - _logger.warning('broken Python code will be generated') - embed_params &= embeddable - if embed_params: - assert name - embedded_scale = name + '.w_0' - embedded_b = name + '.b_0' - embedded_mean = name + '.w_1' - embedded_var = name + '.w_2' - value_infos[var_scale]['embedded_as'].append(embedded_scale) - value_infos[var_b]['embedded_as'].append(embedded_b) - value_infos[var_mean]['embedded_as'].append(embedded_mean) - value_infos[var_var]['embedded_as'].append(embedded_var) - var_scale = embedded_scale - var_b = embedded_b - var_mean = embedded_mean - var_var = embedded_var - param_attr = '' - else: - param_attr = (', param_attr={}, bias_attr={}' - ', moving_mean_name={}, moving_variance_name={}').format( - repr(var_scale), repr(var_b), repr(var_mean), - repr(var_var)) - - # generation - prog.Code('{} = layers.{}({}, is_test=True' - ', momentum={}' - ', epsilon={}' - '{}{})'.format( - var_y, - fluid_op, - var_x, - # attrs - momentum, - epsilon, - param_attr, - name_attr, - )) - prog.VarDesc(var_y) - prog.VarDesc(var_saved_mean) - prog.VarDesc(var_saved_variance) - prog.OpDesc( - fluid_op, - (['X', 'Scale', 'Bias', 'Mean', 'Variance' - ], [var_x, var_scale, var_b, var_mean, var_var]), - (['Y', 'MeanOut', 'SavedMean', 'SavedVariance', 'VarianceOut' - ], [var_y, var_mean, var_saved_mean, var_saved_variance, var_var]), - { - 'momentum': momentum, - 'epsilon': epsilon, - 'is_test': 1, - # unused - 'data_layout': 'NCHW', - }, - ) - - -def Cast(prog, inputs, outputs, attrs, value_infos, *args, **kwargs): - """ - onnx::Cast-9: - """ - - # I/O - var_input, = inputs - var_output, = outputs - assert var_input and var_output - - # interpretation - dtype = attrs['to'] # required - if not isinstance(dtype, _np.dtype): # additional: possible np.dtype - dtype = TENSOR_TYPE_TO_NP_TYPE[dtype] - - -# output_dtype = _dtype_or_none(value_infos, var_output) -# if output_dtype is not None: -# assert dtype == output_dtype, 'dtype of to unmatches output' - - fluid_op = 'cast' - - # generation - prog.Code('{} = layers.{}({}' - ', dtype={}' - ')'.format( - var_output, - fluid_op, - var_input, - # attrs - repr(dtype.name), - )) - prog.VarDesc(var_output) - prog.OpDesc( - fluid_op, - (['X'], [var_input]), - (['Out'], [var_output]), - { - 'in_dtype': prog.Dtype(_dtype(value_infos, - var_input)), # holy, required - 'out_dtype': prog.Dtype(dtype), - }, - ) - - -def Concat(prog, inputs, outputs, attrs, *args, name='', **kwargs): - """ - onnx::Concat-4: - """ - - # I/O - var_ret, = outputs - assert var_ret - - # interpretation - fluid_op = 'concat' - axis = attrs['axis'] # required - name_attr = ', name={}'.format(repr(name)) if name else '' - - # generation - prog.Code('{} = layers.{}({}' - ', axis={}' - '{})'.format( - var_ret, - fluid_op, - '[' + ', '.join(inputs) + ']', - # attrs - axis, - name_attr, - )) - prog.VarDesc(var_ret) - prog.OpDesc( - fluid_op, - (['X'] * len(inputs), inputs), - (['Out'], [var_ret]), - {'axis': axis}, - ) - - -def Constant(prog, inputs, outputs, attrs, value_infos, *args, **kwargs): - """ - onnx::Constant-9: - """ - - # I/O - assert len(inputs) == 0, 'constant op accept no inputs' - var_output, = outputs - assert var_output - - # interpretation - value = attrs['value'] # required - dtype = _np.dtype(value.dtype) - # output_dtype = _dtype_or_none(value_infos, var_output) - # if output_dtype is not None: - # assert dtype == output_dtype, 'tensor dtype unmatches storage dtype' - # dtype = _np.dtype('float32') # HINT: force to float32 - shape = attrs.get('shape', None) # additional - if shape is None: - shape = _shape_or_none(value_infos, var_output) - if shape is None: - shape = list(value.shape) - _logger.warning( - 'in op (Constant -> %s): ' - 'attribute "shape" of %s not inferred, ' - 'using value as 1-D tensor may lead to fails', outputs, var_output) - - # generation - if not shape or value.size == 1: # scalar or 1-size - shape = [1] # WORKAROUND: bad scalar support - value = value.tolist()[0] - fluid_op = 'fill_constant' - prog.Code('{} = layers.{}(shape={}, dtype={}, value={})'.format( - var_output, - fluid_op, - # attrs - shape, - repr(dtype.name), - value, - )) - prog.VarDesc(var_output) - prog.OpDesc( - fluid_op, - ([], []), - (['Out'], [var_output]), - { - 'shape': shape, - 'dtype': prog.Dtype(dtype), - 'value': value, - }, - ) - else: # list parameter -> const_value - prog.Code('# {} = {} # passed directly as literal'.format( - var_output, value.tolist())) - - value_infos[var_output]['const_value'] = value - - -def ConstantOfShape(prog, inputs, outputs, attrs, value_infos, *args, **kwargs): - """ - onnx::ConstantOfShape-9: - """ - - # I/O - var_shape, = inputs - var_output, = outputs - assert var_shape and var_output - - shape = _const_weight_or_none(value_infos, var_shape) - if shape is None: - shape = _shape_or_none(value_infos, var_output) - assert shape is not None, ( - 'given shape is neither const value nor deductible from output, ' - 'this is not supported') - attrs = attrs.copy() - attrs.setdefault('value', _np.array(0, dtype=_np.float32)) - attrs.update({'shape': shape}) # pass const - - prog.Code('# shape: {} = {} # const as literal'.format(var_shape, shape)) - prog.Op( - '', - 'Constant', - [], - outputs, - attrs, - value_infos=value_infos, - ) - - -def Conv(prog, - inputs, - outputs, - attrs, - value_infos, - name, - embed_params=False, - *args, - **kwargs): - """ - onnx::Conv-1: - """ - - # I/O - var_x, var_w, var_b, = (inputs + [''] * 1)[:3] - var_y, = outputs - assert name and var_x and var_w and var_y - - # interpretation - assert attrs.get( - 'auto_pad', - 'NOTSET') == 'NOTSET', 'only auto_pad = NOTSET supported' # optional - kernel_shape = attrs.get('kernel_shape', - _shape(value_infos, var_w)[2:]) # optional, HW - assert kernel_shape, 'kernel_shape not inferred' - convnd = len(kernel_shape) - assert 2 <= convnd <= 3, 'only conv2d and conv3d supported' - num_out_channels = _shape(value_infos, var_w)[0] # OI... - - fluid_op = 'conv{}d'.format(convnd) - num_groups = attrs.get('group', 1) # optional - strides = attrs.get('strides', [1] * convnd) # optional - dilations = attrs.get('dilations', [1] * convnd) # optional - pads = attrs.get('pads', [0] * (convnd * 2)) # optional - paddings, var_x = _pad_if_asymmetric(prog, pads, var_x, value_infos, name) - name_attr = ', name={}'.format(repr(name)) - embeddable = _check_embeddable(value_infos, - *([var_w] + ([var_b] if var_b else []))) - if not embeddable: - _logger.warning('for op %s(%s -> Conv -> %s)', name, inputs, outputs) - _logger.warning('one of the parameters is intermediate value') - _logger.warning('broken Python code will be generated') - embed_params &= embeddable - if embed_params: - embedded_w = name + '.w_0' - value_infos[var_w]['embedded_as'].append(embedded_w) - var_w = embedded_w - if var_b: - embedded_b = name + '.b_0' - value_infos[var_b]['embedded_as'].append(embedded_b) - var_b = embedded_b - param_attr = '' - else: - param_attr = ', bias_attr=False' - else: - param_attr = ', param_attr={}, bias_attr={}'.format( - repr(var_w), - repr(var_b) if var_b else False) - - # generation - prog.Code('{} = layers.{}({}' - ', num_filters={}' - ', filter_size={}' - ', stride={}' - ', padding={}' - ', dilation={}' - ', groups={}' - '{}{})'.format( - var_y, - fluid_op, - var_x, - # attrs - num_out_channels, - kernel_shape, - strides, - paddings, - dilations, - num_groups, - param_attr, - name_attr, - )) - var_conv = (name + '.conv') if var_b else var_y # hidden variable - prog.OpDesc( - fluid_op, - (['Input', 'Filter'], [var_x, var_w]), - (['Output'], [var_conv]), - { - 'strides': strides, - 'paddings': paddings, - 'dilations': dilations, - 'groups': num_groups, - }, - ) - if var_b: - prog.VarDesc(var_conv) - prog.IntermediateOp( - '', - 'Add', - [var_conv, var_b], # - [var_y], - {'axis': 1}, - name=(name + '/bias'), - ) - else: - prog.VarDesc(var_y) - - -def ConvTranspose(prog, - inputs, - outputs, - attrs, - value_infos, - name, - embed_params=False, - *args, - **kwargs): - """ - onnx::ConvTranspose-1: - """ - - # I/O - var_x, var_w, var_b, = (inputs + [''] * 1)[:3] - var_y, = outputs - assert name and var_x and var_w and var_y - - # interpretation - assert attrs.get( - 'auto_pad', - 'NOTSET') == 'NOTSET', 'only auto_pad = NOTSET supported' # optional - assert sum( - attrs.get('output_padding', - [])) == 0, 'only zero output_padding supported' # optional ? - kernel_shape = attrs.get('kernel_shape', - _shape(value_infos, var_w)[2:]) # optional, HW - assert kernel_shape, 'kernel_shape not inferred' - convnd = len(kernel_shape) - assert 2 <= convnd <= 3, 'only conv2d_transpose and conv3d_transpose supported' - num_out_channels = _shape(value_infos, var_w)[1] # IO... - - fluid_op = 'conv{}d_transpose'.format(convnd) - num_groups = attrs.get('group', 1) # optional - strides = attrs.get('strides', [1] * convnd) # optional - dilations = attrs.get('dilations', [1] * convnd) # optional - output_size = attrs.get('output_shape', []) # optional - pads = attrs.get('pads', [0] * (convnd * 2)) # optional - paddings, var_x = _pad_if_asymmetric(prog, pads, var_x, value_infos, name) - name_attr = ', name={}'.format(repr(name)) - embeddable = _check_embeddable(value_infos, - *([var_w] + ([var_b] if var_b else []))) - if not embeddable: - _logger.warning('for op %s(%s -> ConvTranspose -> %s)', name, inputs, - outputs) - _logger.warning('one of the parameters is intermediate value') - _logger.warning('broken Python code will be generated') - embed_params &= embeddable - if embed_params: - embedded_w = name + '.w_0' - value_infos[var_w]['embedded_as'].append(embedded_w) - var_w = embedded_w - if var_b: - embedded_b = name + '.b_0' - value_infos[var_b]['embedded_as'].append(embedded_b) - var_b = embedded_b - param_attr = '' - else: - param_attr = ', bias_attr=False' - else: - param_attr = ', param_attr={}, bias_attr={}'.format( - repr(var_w), - repr(var_b) if var_b else False) - - # generation - prog.Code('{} = layers.{}({}' - ', num_filters={}' - ', output_size={}' - ', filter_size={}' - ', padding={}' - ', stride={}' - ', dilation={}' - ', groups={}' - '{}{})'.format( - var_y, - fluid_op, - var_x, - # attrs - num_out_channels, - output_size or None, - kernel_shape, - paddings, - strides, - dilations, - num_groups, - param_attr, - name_attr, - )) - var_conv = (name + '.conv') if var_b else var_y # hidden variable - prog.OpDesc( - fluid_op, - (['Input', 'Filter'], [var_x, var_w]), - (['Output'], [var_conv]), - { - 'strides': strides, - 'paddings': paddings, - 'dilations': dilations, - 'groups': num_groups, - # unused - 'output_size': output_size, - }, - ) - if var_b: - prog.VarDesc(var_conv) - prog.IntermediateOp( - '', - 'Add', - [var_conv, var_b], # - [var_y], - {'axis': 1}, - name=(name + '/bias'), - ) - else: - prog.VarDesc(var_y) - - -def Gemm(prog, inputs, outputs, attrs, value_infos, name, *args, **kwargs): - """ - onnx::Gemm-9: - """ - - # due to fluid fc don't support transposed weight, we use matmul + ew_add - var_a, var_b, var_c, = inputs - var_y, = outputs - assert name and var_a and var_b and var_c and var_y - - alpha = attrs.get('alpha', 1.) # optional - beta = attrs.get('beta', 1.) # optional - trans_a = bool(attrs.get('transA', 0)) # optional - trans_b = bool(attrs.get('transB', 0)) # optional - - var_mm = var_y if beta == 0 else (name + '_mm') # explicit variable - prog.Op( - '', - 'MatMul', - [var_a, var_b], - [var_mm], - { - 'transpose_x': trans_a, - 'transpose_y': trans_b, - 'alpha': alpha, - }, - name=(name + '/mm'), - ) - prog.op_descs[-1].attrs.extend( - prog.OpDescAttrs({ - 'transpose_X': trans_a, - 'transpose_Y': trans_b, - })) # f**k you API - if beta != 0: - if beta == 1.: # exactly - prog.Op( - '', - 'Add', - [var_mm, var_c], - [var_y], - {'axis': 1}, - name=(name + '/bias'), - ) - else: - var_beta = name + '_beta' # explicit variable - var_vm = name + '_vm' # explicit variable - if beta.is_integer(): - vm_dtype = _dtype_or_none(value_infos, var_c) - if vm_dtype is None: - vm_dtype = _np.dtype('float32') - _logger.warning( - 'in op %s(%s -> Gemm -> %s): ' - 'attribute "beta" seems to be an interger, ' - 'however dtype can not be inferred, ' - 'still use float32', name, inputs, outputs) - beta = _np.dtype(vm_dtype).type(beta) - prog.Op( - '', - 'Constant', - [], - [var_beta], - {'value': beta}, - ) - prog.Op( - '', - 'Mul', - [var_c, var_beta], - [var_vm], - dict(), - name=(name + '.beta/scale'), - ) - prog.Op( - '', - 'Add', - [var_mm, var_vm], - [var_y], - {'axis': 1}, # - name=(name + '/bias'), - ) - - -def GlobalAveragePool(prog, - inputs, - outputs, - attrs_, - value_infos, - name='', - *args, - **kwargs): - """ - onnx::GlobalAveragePool-1: - """ - - return _global_pool(prog, 'avg', inputs, outputs, value_infos, name=name) - - -def GlobalMaxPool(prog, - inputs, - outputs, - attrs_, - value_infos, - name='', - *args, - **kwargs): - """ - onnx::GlobalMaxPool-1: - """ - - return _global_pool(prog, 'max', inputs, outputs, value_infos, name=name) - - -def GRU(prog, inputs, outputs, attrs, value_infos, name, *args, **kwargs): - """ - onnx::GRU-7: - """ - - var_x, var_w, var_r, var_b, var_len, var_xh, = (inputs + [''] * 3)[:6] - var_y, var_yh, = (outputs + [''] * 2)[:2] - assert name and var_x and var_w and var_r # and (var_y or var_yh) - var_gate = name + '.gate' # dummy output - var_reset = name + '.reset' # dummy output - var_hidden = name + '.hidden' # dummy output, # var_yh - - # interpretation - x_shape = _shape_or_none(value_infos, var_x) - assert x_shape is not None, 'shape of X required to be known' - assert x_shape[1] == 1, 'only X with batch_size = 1 supported' - assert 'clip' not in attrs, 'clipping not supported' - hidden_size = attrs.get('hidden_size', None) # optional - if hidden_size is None: - r_shape = _shape_or_none(value_infos, var_r) - if r_shape: - hidden_size = r_shape[-1] - if hidden_size is None: - w_shape = _shape_or_none(value_infos, var_w) - if w_shape: - hidden_size = w_shape[-2] // 3 - if hidden_size is None and var_b: - b_shape = _shape_or_none(value_infos, var_b) - if b_shape: - hidden_size = b_shape[-1] // 6 - if hidden_size is None and var_xh: - xh_shape = _shape_or_none(value_infos, var_xh) - if xh_shape: - hidden_size = xh_shape[-1] - assert hidden_size, 'hidden_size not inferred' - assert attrs.get( - 'linear_before_reset', - 0) == 0, 'only linear_before_reset = 0 supported' # optional - direction = attrs.get('direction', 'forward') # optional - assert direction != 'bidirectional', 'direction = bidirectional not supported' - activations = attrs.get('activations', ['Sigmoid', 'Tanh']) # optional - assert len(activations) == 2, 'bidirectional operation not supported' - activations = [s.lower() for s in activations] # TODO: check support - gate_activation, candidate_activation = activations - is_reverse = direction == 'reverse' - - fluid_op = 'dynamic_gru' - _logger.warning('for op (%s -> GRU -> %s)', inputs, outputs) - _logger.warning('one of the parameters is intermediate value') - _logger.warning('broken Python code will be generated') - - # generation - var_x0 = name + '_x0' # explicit variable - prog.Op( - '', - 'Squeeze', - [var_x], - [var_x0], - {'axes': [1]}, # index on n - name=(name + '.x/index'), - ) - var_w0 = name + '_w0' # explicit variable - prog.Op( - '', - 'Squeeze', - [var_w], - [var_w0], - {'axes': [0]}, # index on d - name=(name + '.w/index'), - ) - var_fc = name + '_fc' - var_mm = (name + '_mm') if var_b else var_fc - prog.Op( - '', - 'MatMul', - [var_x0, var_w0], - [var_mm], - { - 'transpose_x': 0, - 'transpose_y': 1, - }, - name=(name + '/mm'), - ) - prog.op_descs[-1].attrs.extend( - prog.OpDescAttrs({ - 'transpose_X': 0, - 'transpose_Y': 1, - })) # f**k you API - var_r0 = name + '_r0' # explicit variable - prog.Op( - '', - 'Squeeze', - [var_r], - [var_r0], - {'axes': [0]}, # index on d - name=(name + '.r/index'), - ) - var_r0t = name + '_r0t' # explicit variable - prog.Op( - '', - 'Transpose', - [var_r0], - [var_r0t], - {'perm': [1, 0]}, # transpose OI->IO - name=(name + '.r0/transpose'), - ) - if var_b: - var_bi = name + '_bi' # explicit variable - var_bh = name + '_bh' # explicit variable - prog.Op( - '', - 'Split', - [var_b], - [var_bi, var_bh], - { - 'axis': 1, # split on x - 'split': [hidden_size * 3, hidden_size * 3], - }, - name=(name + '.b/split'), - ) - # squeeze bi so Gemm Add can be performed on axis=1 exaclty - var_bi0 = name + '_bi0' # explicit variable - prog.Op( - '', - 'Squeeze', - [var_bi], - [var_bi0], - {'axes': [0]}, # slice on d - name=(name + '.bi/index'), - ) - prog.Op( - '', - 'Add', - [var_mm, var_bi0], - [var_fc], - {'axis': 1}, # - name=(name + '.i/bias'), - ) - if var_xh: - var_xh0 = name + '_xh0' # explicit variable - prog.Op( - '', - 'Squeeze', - [var_xh], - [var_xh0], - {'axes': [1]}, # index on n - name=(name + '.xh/index'), - ) - var_y00 = name + '_y00' # explicit variable # - prog.Code('{} = layers.{}({}, {}, origin_mode=True' - ', h_0={}' - ', is_reverse={}' - ', gate_activation={}' - ', candidate_activation={}' - ', param_attr={}, bias_attr={})'.format( - var_y00, - fluid_op, - var_fc, - hidden_size, - var_xh0 if var_xh else None, - is_reverse, - repr(gate_activation), - repr(candidate_activation), - repr(var_r0t), - repr(var_bh) if var_b else False, - )) - - fluid_op = 'gru' - prog.VarDesc(var_y00) - prog.VarDesc(var_gate) - prog.VarDesc(var_reset) - prog.VarDesc(var_hidden) - prog.OpDesc( - fluid_op, - (['Input', 'Weight', 'Bias', 'H0'], [var_fc, var_r0t] + - ([var_bh] if var_b else []) + ([var_xh0] if var_xh else [])), - (['Hidden', 'BatchGate', 'BatchResetHiddenPrev', 'BatchHidden' - ], [var_y00, var_gate, var_reset, var_hidden]), - { - 'is_reverse': is_reverse, - 'gate_activation': gate_activation, - 'activation': candidate_activation, - 'origin_mode': True, - }, - ) - if var_y: - prog.Op( - '', - 'Unsqueeze', - [var_y00], - [var_y], - {'axes': [1, 1]}, # extrude on dn - name=(name + '.y/reshape'), - ) - if var_yh: - prog.Op( - '', - 'Unsqueeze', - [var_y00], # - [var_yh], # - {'axes': [1, 1]}, # extrude on dn - name=(name + '.yh/reshape'), - ) - - -def LSTM(prog, inputs, outputs, attrs, value_infos, name, *args, **kwargs): - """ - onnx::LSTM-7: - """ - - var_x, var_w, var_r, var_b, var_len, var_xh, var_xc, var_p, = (inputs + - [''] * 5)[:8] - var_y, var_yh, var_yc, = (outputs + [''] * 3)[:3] - assert name and var_x and var_w and var_r # and (var_y or var_yh or var_yc) - var_gate = name + '.gate' - var_pre = name + '.pre' - - # interpretation - x_shape = _shape_or_none(value_infos, var_x) - assert x_shape is not None, 'shape of X required to be known' - assert x_shape[1] == 1, 'only X with batch_size = 1 supported' - assert 'clip' not in attrs, 'clipping not supported' - hidden_size = attrs.get('hidden_size', None) # optional - if hidden_size is None: - r_shape = _shape_or_none(value_infos, var_r) - if r_shape: - hidden_size = r_shape[-1] - if hidden_size is None: - w_shape = _shape_or_none(value_infos, var_w) - if w_shape: - hidden_size = w_shape[-2] // 4 - if hidden_size is None and var_b: - b_shape = _shape_or_none(value_infos, var_b) - if b_shape: - hidden_size = b_shape[-1] // 8 - if hidden_size is None and var_xh: - xh_shape = _shape_or_none(value_infos, var_xh) - if xh_shape: - hidden_size = xh_shape[-1] - if hidden_size is None and var_xc: - xc_shape = _shape_or_none(value_infos, var_xc) - if xc_shape: - hidden_size = xc_shape[-1] - if hidden_size is None and var_p: - p_shape = _shape_or_none(value_infos, var_p) - if p_shape: - hidden_size = p_shape[-1] // 3 - assert hidden_size, 'hidden_size not inferred' - assert attrs.get( - 'linear_before_reset', - 0) == 0, 'only linear_before_reset = 0 supported' # optional - assert attrs.get('input_forget', - 0) == 0, 'only input_forget = 0 supported' # optional - direction = attrs.get('direction', 'forward') # optional - assert direction != 'bidirectional', 'direction = bidirectional not supported' - activations = attrs.get('activations', - ['Sigmoid', 'Tanh', 'Tanh']) # optional - assert len(activations) == 3, 'bidirectional operation not supported' - activations = [s.lower() for s in activations] # TODO: check support - gate_activation, cell_activation, candidate_activation = activations - is_reverse = direction == 'reverse' - - fluid_op = 'dynamic_lstm' - name_attr = ', name={}'.format(repr(name)) - _logger.warning('for op %s(%s -> LSTM -> %s)', name, inputs, outputs) - _logger.warning('one of the parameters is intermediate value') - _logger.warning('broken Python code will be generated') - - # generation - var_x0 = name + '_x0' # explicit variable - prog.Op( - '', - 'Squeeze', - [var_x], - [var_x0], - {'axes': [1]}, # index on n - name=(name + '.x/index'), - ) - var_w0 = name + '_w0' # explicit variable - prog.Op( - '', - 'Squeeze', - [var_w], - [var_w0], - {'axes': [0]}, # index on d - name=(name + '.w/index'), - ) - var_fc = name + '_fc' - var_mm = (name + '_mm') if var_b else var_fc - prog.Op( - '', - 'MatMul', - [var_x0, var_w0], - [var_mm], - { - 'transpose_x': 0, - 'transpose_y': 1, - }, - name=(name + '/mm'), - ) - prog.op_descs[-1].attrs.extend( - prog.OpDescAttrs({ - 'transpose_X': 0, - 'transpose_Y': 1, - })) # f**k you API - var_r0 = name + '_r0' # explicit variable - prog.Op( - '', - 'Squeeze', - [var_r], - [var_r0], - {'axes': [0]}, # index on d - name=(name + '.r/index'), - ) - var_r0t = name + '_r0t' # explicit variable - prog.Op( - '', - 'Transpose', - [var_r0], - [var_r0t], - {'perm': [1, 0]}, # transpose OI->IO - name=(name + '.r0/transpose'), - ) - if var_b: - var_bi = name + '_bi' # explicit variable - var_bh = name + '_bh' # explicit variable - prog.Op( - '', - 'Split', - [var_b], - [var_bi, var_bh], - { - 'axis': 1, # split on x - 'split': [hidden_size * 4, hidden_size * 4], - }, - name=(name + '.b/split'), - ) - # squeeze bi so Gemm Add can be performed on axis=1 exaclty - var_bi0 = name + '_bi0' # explicit variable - prog.Op( - '', - 'Squeeze', - [var_bi], - [var_bi0], - {'axes': [0]}, # slice on d - name=(name + '.bi/index'), - ) - prog.Op( - '', - 'Add', - [var_mm, var_bi0], - [var_fc], - {'axis': 1}, # - name=(name + '.i/bias'), - ) - if var_xh: - var_xh0 = name + '_xh0' # explicit variable - prog.Op( - '', - 'Squeeze', - [var_xh], - [var_xh0], - {'axes': [1]}, # index on n - name=(name + '.xh/index'), - ) - if var_xc: - var_xc0 = name + '_xc0' # explicit variable - prog.Op( - '', - 'Squeeze', - [var_xc], - [var_xc0], - {'axes': [1]}, # index on n - name=(name + '.xc/index'), - ) - var_bhp = var_p - if var_b: - if var_p: - var_bhp = name + '_bhp' # explicit variable - prog.Op( - '', - 'Concat', - [var_bh, var_p], - [var_bhp], - {'axis': [1]}, # cat on x - name=(name + '/concat'), - ) - else: - var_bhp = var_bh - var_yh0 = name + '_yh0' # explicit variable - var_yc0 = name + '_yc0' # explicit variable - prog.Code('{}, {} = layers.{}({}, {}' - ', h_0={}' - ', c_0={}' - ', use_peepholes={}' - ', is_reverse={}' - ', gate_activation={}' - ', cell_activation={}' - ', candidate_activation={}' - ', param_attr={}, bias_attr={}' - '{})'.format( - var_yh0, - var_yc0, - fluid_op, - var_fc, - hidden_size * 4, - var_xh0 if var_xh else None, - var_xc0 if var_xc else None, - bool(var_p), - is_reverse, - repr(gate_activation), - repr(cell_activation), - repr(candidate_activation), - repr(var_r0t), - repr(var_bhp) if var_bhp else False, - name_attr, - )) - - fluid_op = 'lstm' - prog.VarDesc(var_yh0) - prog.VarDesc(var_yc0) - prog.VarDesc(var_gate) - prog.VarDesc(var_pre) - prog.OpDesc( - fluid_op, - (['Input', 'Weight', 'Bias', 'H0', 'C0'], [var_fc, var_r0t] + - ([var_bhp] if var_bhp else []) + ([var_xh0] if var_xh else []) + - ([var_xc0] if var_xc else [])), - (['Hidden', 'Cell', 'BatchGate', 'BatchCellPreAct' - ], [var_yh0, var_yc0, var_gate, var_pre]), - { - 'use_peepholes': bool(var_p), - 'is_reverse': is_reverse, - 'gate_activation': gate_activation, - 'cell_activation': cell_activation, - 'candidate_activation': candidate_activation, - }, - ) - if var_y: - prog.Op( - '', - 'Unsqueeze', - [var_yh0], # - [var_y], # var_y - {'axes': [1, 1]}, # extrude on dn - name=(name + '.y/reshape'), - ) - if var_yh: - prog.Op( - '', - 'Unsqueeze', - [var_yh0], - [var_yh], # var_yh - {'axes': [1, 1]}, # extrude on dn - name=(name + '.yh/reshape'), - ) - if var_yc: - prog.Op( - '', - 'Unsqueeze', - [var_yc0], - [var_yc], - {'axes': [1, 1]}, # extrude on dn - name=(name + '.yc/reshape'), - ) - - -def MaxPool(prog, inputs, outputs, attrs, value_infos, name, *args, **kwargs): - """ - onnx::MaxPool-10: - """ - - return _pool(prog, 'max', inputs, outputs, attrs, value_infos, name) - - -def MaxRoiPool(prog, inputs, outputs, attrs, name, *args, **kwargs): - """ - onnx::MaxRoiPool-1: - """ - - _roi_pool(prog, 'roi_pool', inputs, outputs, attrs, name) - - -def Pad(prog, inputs, outputs, attrs, value_infos, name='', *args, **kwargs): - """ - onnx::Pad-2: - """ - - # I/O - var_data, = inputs - var_output, = outputs - assert var_data and var_output - - # interpretation - pads = attrs['pads'] # required - mode = attrs.get('mode', 'constant') # optional - value = attrs.get('value', 0.) # optional - data_shape = _shape_or_none(value_infos, var_data) - output_shape = _shape_or_none(value_infos, var_output) - assume_pad2d = False - if len(pads) == 4: - assume_pad2d |= mode != 'constant' - if data_shape is not None: - assume_pad2d |= data_shape and len(data_shape) == 4 # NCHW - if output_shape is not None: - assume_pad2d |= output_shape and len(output_shape) == 4 # NCHW - od_attrs = {'pad_value': value} - if assume_pad2d: - fluid_op = 'pad2d' - pad2d_attr = ', mode={}, data_format="NCHW"'.format(repr(mode)) - od_attrs['mode'] = mode - od_attrs['data_format'] = "NCHW" - else: - assert mode == 'constant', 'mode {} supported only in pad2d'.format( - mode) - fluid_op = 'pad' - pad2d_attr = '' - paddings = _np.array(pads).reshape( - (-1, 2)).transpose().flatten().tolist() # SSEE -> SESE - od_attrs['paddings'] = paddings - name_attr = ', name={}'.format(repr(name)) if name else '' - - # generation - prog.Code('{} = layers.{}({}' - ', paddings={}' - ', pad_value={}' - '{}{})'.format( - var_output, - fluid_op, - var_data, - # attrs - paddings, - value, - pad2d_attr, - name_attr, - )) - prog.VarDesc(var_output) - prog.OpDesc( - fluid_op, - (['X', 'Paddings'], [var_data]), # - (['Out'], [var_output]), - od_attrs, - ) - - -def PRelu(prog, - inputs, - outputs, - attrs_, - value_infos, - name='', - embed_params=False, - *args, - **kwargs): - """ - onnx::PRelu-9: - """ - - # I/O - var_x, var_slope, = inputs - var_y, = outputs - assert name and var_x and var_slope and var_y - - # interpretation - mode = 'channel' - slope_shape = _shape_or_none(value_infos, var_slope) - if slope_shape is not None: - if not slope_shape: - mode = 'all' - elif len(slope_shape) >= 2: - if slope_shape[1] != _np.product( - slope_shape): # not channel broadcasting - mode = 'element' - fluid_op = 'prelu' - name_attr = ', name={}'.format(repr(name)) if name else '' - embeddable = _check_embeddable(value_infos, var_slope) - if not embeddable: - _logger.warning('for op %s(%s -> PRelu -> %s)', name, inputs, outputs) - _logger.warning('one of the parameters is intermediate value') - _logger.warning('broken Python code will be generated') - embed_params &= embeddable - if embed_params: - assert name - embedded_slope = name + '.w_0' - value_infos[var_slope]['embedded_as'].append(embedded_slope) - var_slope = embedded_slope - param_attr = '' - else: - param_attr = ', param_attr={}'.format(repr(var_slope)) - - # generation - prog.Code('{} = layers.{}({}' - ', mode={}' - '{}{})'.format( - var_y, - fluid_op, - var_x, - # attrs - repr(mode), - param_attr, - name_attr, - )) - prog.VarDesc(var_y) - prog.OpDesc( - fluid_op, - (['X', 'Alpha'], [var_x, var_slope]), - (['Out'], [var_y]), - {'mode': mode}, - ) - - -def PsRoiPool(prog, inputs, outputs, attrs, name, *args, **kwargs): - """ - caffe2::PsRoiPool - """ - - _roi_pool(prog, 'psroi_pool', inputs, outputs, attrs, name) - - -def Reshape(prog, inputs, outputs, attrs_, value_infos, name, *args, **kwargs): - """ - onnx::Reshape-5: - """ - - # I/O - var_data, var_shape, = inputs - var_reshaped, = outputs - assert name and var_data and var_shape and var_reshaped - - # interpretation - shape = _const_weight_or_none(value_infos, var_shape) - is_const_shape = shape is not None and 'const_value' in value_infos[ - var_shape] - if shape is None: - shape = _shape_or_none(value_infos, var_reshaped) - - -# assert shape is not None, ('given shape is neither const value nor deductible from output, ' -# 'this is not supported') - if shape is None: - shape = [1, -1] # who knows - _logger.warning( - 'in op %s(%s -> Reshape -> %s): ' - 'input "shape" not inferred, use [1, -1] as dummy value, ' - 'the behavior of Paddle fluid maybe undefined', name, inputs, - outputs) - shape_dtype = _dtype_or_none(value_infos, var_shape) - if shape_dtype is None: - _logger.warning( - 'in op %s(%s -> Reshape -> %s): ' - 'dtype of input "shape" not inferred, int32 assumed', name, inputs, - outputs) - shape_dtype = _np.dtype('int32') - fluid_op = 'reshape' - name_attr = ', name={}'.format(repr(name)) - - # generation - var_shape_i32 = ( - name + '_shape_i32' - ) if shape_dtype != _np.int32 else var_shape # explicit variable - prog.Code('# shape: {} = {} # const as literal'.format(var_shape, shape)) - if is_const_shape: - prog.Code('{} = layers.{}({}' - ', shape={}' - '{})'.format( - var_reshaped, - fluid_op, - var_data, - # attrs - shape, - name_attr, - )) - else: - if shape_dtype != _np.int32: - prog.Op( - '', - 'Cast', - [var_shape], - [var_shape_i32], - {'to': _np.dtype('int32')}, # use np.dtype - value_infos={ - var_shape: { - 'dtype': shape_dtype - }, - var_shape_i32: { - 'dtype': _np.dtype('int32') - }, - }, - name=(name + '/cast'), - ) - prog.Code('{} = layers.{}({}' - ', shape={}' - ', actual_shape={}' - '{})'.format( - var_reshaped, - fluid_op, - var_data, - # attrs - shape, - var_shape_i32, - name_attr, - )) - fluid_op = 'reshape2' - var_xshape = name + '.xshape' # dummy output - prog.VarDesc(var_reshaped) - prog.VarDesc(var_xshape) - prog.OpDesc( - fluid_op, - (['X', 'Shape', 'ShapeTensor'], [var_data, var_shape_i32]), # - (['Out', 'XShape'], [var_reshaped, var_xshape]), - {'shape': shape}, - ) - - -def Resize(prog, inputs, outputs, attrs, value_infos, name='', *args, **kwargs): - """ - onnx::Resize-10: - """ - - return _interpolate(prog, inputs, outputs, attrs, value_infos, name=name) - - -def RoiAlign(prog, inputs, outputs, attrs, name, *args, **kwargs): - """ - caffe2::RoiAlign - """ - - _roi_pool(prog, 'roi_align', inputs, outputs, attrs, name) - - -def Shape(prog, inputs, outputs, attrs_, name, **kwargs): - """ - onnx::Shape-1: - """ - - # I/O - var_data, = inputs - var_shape, = outputs - assert name and var_data and var_shape - - # interpretation - fluid_op = 'shape' - var_shape_i64 = name + '_shape_i64' - - # generation - prog.Code('{} = layers.{}({})'.format( - var_shape_i64, - fluid_op, - var_data, - # attrs - )) - prog.VarDesc(var_shape_i64) - prog.OpDesc( - fluid_op, - (['Input'], [var_data]), - (['Out'], [var_shape_i64]), - ) - prog.Op( - '', - 'Cast', - [var_shape_i64], - [var_shape], - {'to': _np.dtype('int32')}, # use np.dtype - value_infos={ - var_shape: { - 'dtype': _np.dtype('int32') - }, - var_shape_i64: { - 'dtype': _np.dtype('int64') - }, - }, - name=(name + '/cast'), - ) - - -def Slice(prog, inputs, outputs, attrs, value_infos, *args, **kwargs): - """ - onnx::Slice-1:9 - """ - - # I/O - var_data, = inputs - var_output, = outputs - assert var_data and var_output - - # interpretation - fluid_op = 'slice' - axes = attrs['axes'] # required - starts = attrs['starts'] # required - ends = attrs['ends'] # required - shape = _shape_or_none(value_infos, var_data) - if shape is not None: - # ndims = len(shape) - # for idx, value in enumerate(axes): - # if value > ONNX_INT_MAX // 2: - # axes[idx] = ndims + value - ONNX_INT_MAX - # FIXME: Paddle 1.3 Doc: '对于未知大小维度的末尾进行切片,则建议传入 INT_MAX' not works ? - for idx, value in enumerate(starts): - if value > ONNX_INT_MAX // 2: - value = value - ONNX_INT_MAX - starts[idx] = shape[axes[idx]] + value - for idx, value in enumerate(ends): - if value > ONNX_INT_MAX // 2: - value = value - ONNX_INT_MAX - ends[idx] = shape[axes[idx]] + value - - # generation - prog.Code('{} = layers.{}({}' - ', axes={}' - ', starts={}' - ', ends={}' - ')'.format( - var_output, - fluid_op, - var_data, - # attrs - axes, - starts, - ends, - )) - prog.VarDesc(var_output) - prog.OpDesc( - fluid_op, - (['Input'], [var_data]), - (['Out'], [var_output]), - { - 'axes': axes, - 'starts': starts, - 'ends': ends, - }, - ) - - -def Split(prog, inputs, outputs, attrs, *args, name='', **kwargs): - """ - onnx::Split-2: - """ - - # I/O - var_input, = inputs - assert var_input - - # interpretation - fluid_op = 'split' - split = attrs['split'] # required - axis = attrs.get('axis', 0) # optional - name_attr = ', name={}'.format(repr(name)) if name else '' - - # generation - prog.Code('{} = layers.{}({}, {}' - ', dim={}' - '{})'.format( - ', '.join(outputs), - fluid_op, - var_input, - split, - # attrs - axis, - name_attr, - )) - for var_out in outputs: - prog.VarDesc(var_out) - prog.OpDesc( - fluid_op, - (['X'], [var_input]), - (['Out'] * len(outputs), outputs), - { - 'axis': axis, - 'sections': split, - # unused - 'num': 0, - }, - ) - - -def Sum(prog, inputs, outputs, attrs_, *args, **kwargs): - """ - onnx::Sum-8: - """ - - # I/O - var_sum, = outputs - assert var_sum - - # interpretation - fluid_op = 'sums' - - # generation - prog.Code('{} = layers.{}({})'.format( - var_sum, - fluid_op, - '[' + ', '.join(inputs) + ']', - # attrs - )) - fluid_op = 'sum' - prog.VarDesc(var_sum) - prog.OpDesc( - fluid_op, - (['X'] * len(inputs), inputs), - (['Out'], [var_sum]), - dict(), - ) - - -def Tile(prog, inputs, outputs, attrs_, value_infos, name='', *args, **kwargs): - """ - onnx::Tile-1: - """ - - # I/O - var_input, var_repeats, = inputs - var_output, = outputs - assert var_input and var_repeats and var_output - - # interpretation - repeats = _const_weight_or_none(value_infos, var_repeats) - assert repeats is not None, 'only const repeats supported' # if contain_tensor(expand_times) - fluid_op = 'expand' - name_attr = ', name={}'.format(repr(name)) if name else '' - - # generation - prog.Code('# repeats: {} = {} # const as literal'.format( - var_repeats, repeats)) - prog.Code('{} = layers.{}({}' - ', expand_times={}' - '{})'.format( - var_output, - fluid_op, - var_input, - # attrs - repeats, - name_attr, - )) - prog.VarDesc(var_output) - prog.OpDesc( - fluid_op, - (['X', 'expand_times_tensor'], [var_input]), # TODO - (['Out'], [var_output]), - {'expand_times': repeats}, - ) - - -def Transpose(prog, inputs, outputs, attrs, name, *args, **kwargs): - """ - onnx::Transpose-1: - """ - - # I/O - var_data, = inputs - var_transposed, = outputs - assert name and var_data and var_transposed - - # interpretation - fluid_op = 'transpose' - perm = attrs['perm'] # required - name_attr = ', name={}'.format(repr(name)) if name else '' - - # generation - prog.Code('{} = layers.{}({}' - ', perm={}' - '{})'.format( - var_transposed, - fluid_op, - var_data, - # attrs - perm, - name_attr, - )) - fluid_op = 'transpose2' - var_xshape = name + '.xshape' # dummy output - prog.VarDesc(var_xshape) - prog.VarDesc(var_transposed) - prog.OpDesc( - fluid_op, - (['X'], [var_data]), - (['Out', 'XShape'], [var_transposed, var_xshape]), - {'axis': perm}, # f**k you API - ) - - -def Upsample(prog, - inputs, - outputs, - attrs, - value_infos, - name='', - *args, - **kwargs): - """ - onnx::Upsample-9:9 - """ - - return _interpolate(prog, inputs, outputs, attrs, value_infos, name=name) - - -if __name__ == '__main__': - _logging.basicConfig( - format= - '[%(levelname)8s]%(name)s::%(funcName)s:%(lineno)04d: %(message)s', - level=_logging.DEBUG, - ) - logger = _logging.getLogger('symbolic_test') - - import numpy as np - - from onnx2fluid.writer import Program - - prog = Program() - AdaptiveAveragePool( - prog, - ['X'], - ['Y'], - dict(output_size=[3, 3]), - dict(Y=dict(shape=(2, 3, 3, 3), dtype=np.float32)), - name='AdaptiveAveragePool2d', - ) - logger.info('AdaptiveAveragePool2d program:\n%s', prog) - - prog = Program() - AdaptiveAveragePool( - prog, - ['X'], - ['Y'], - dict(output_size=[3, 3, 3]), - dict(Y=dict(shape=(2, 3, 3, 3, 3), dtype=np.float32)), - name='AdaptiveAveragePool3d', - ) - logger.info('AdaptiveAveragePool3d program:\n%s', prog) - - prog = Program() - AffineGrid( - prog, - ['Theta'], - ['Grid'], - dict(size=[2, 2, 8, 8]), - dict(Grid=dict(shape=(2, 8, 8, 2), dtype=np.float32)), - ) - logger.info('AffineGrid program:\n%s', prog) - - prog = Program() - BatchNormalization( - prog, - ['X', 'scale', 'B', 'mean', 'var'], - ['Y'], - dict( - epsilon=1e-5, - momentum=.9, - ), - dict( - scale=dict(shape=(3, ), dtype=np.float32), - B=dict(shape=(3, ), dtype=np.float32), - mean=dict(shape=(3, ), dtype=np.float32), - var=dict(shape=(3, ), dtype=np.float32), - Y=dict(shape=(2, 3), dtype=np.float32), - ), - name='BatchNormalization', - embed_params=True, - ) - logger.info('BatchNormalization program:\n%s', prog) - - prog = Program() - Cast( - prog, - ['input'], - ['output'], - dict(to=2), # TensorProto.UINT8 - dict(input=dict(shape=(2, 3), dtype=np.float32), - output=dict(shape=(2, 3), dtype=np.uint8)), - ) - logger.info('Cast program:\n%s', prog) - - prog = Program() - _default( - prog, - 'Clip', - ['input'], - ['output'], - dict(min=-1., max=1.), - dict(output=dict(shape=(2, 3), dtype=np.float32)), - ) - logger.info('Clip program:\n%s', prog) - - prog = Program() - Conv( - prog, - ['X', 'W'], - ['Y'], - dict( - auto_pad='NOTSET', - dilations=[1, 1], - group=1, - kernel_shape=[3, 3], - pads=[1, 1, 1, 1], - strides=[1, 1], - ), - dict( - W=dict(shape=(2, 3, 3, 3), dtype=np.float32), - Y=dict(shape=(2, 2, 4, 6), dtype=np.float32), - ), - name='ConvNoBias2d', - embed_params=True, - ) - logger.info('ConvNoBias2d program:\n%s', prog) - - prog = Program() - Conv( - prog, - ['X', 'W', 'B'], - ['Y'], - dict( - auto_pad='NOTSET', - dilations=[1, 1], - group=1, - kernel_shape=[3, 3], - pads=[1, 1, 1, 1], - strides=[1, 1], - ), - dict( - W=dict(shape=(2, 3, 3, 3), dtype=np.float32), - B=dict(shape=(2), dtype=np.float32), - Y=dict(shape=(2, 2, 4, 6), dtype=np.float32), - ), - name='Conv2d', - embed_params=True, - ) - logger.info('Conv2d program:\n%s', prog) - - prog = Program() - ConvTranspose( - prog, - ['X', 'W', 'B'], - ['Y'], - dict( - auto_pad='NOTSET', - dilations=[1, 1], - group=1, - kernel_shape=[3, 3], - # output_padding=[1, 1, 1, 1], - # output_shape=[6, 8], - pads=[1, 1, 1, 1], - strides=[1, 1], - ), - dict( - W=dict(shape=(2, 3, 3, 3), dtype=np.float32), - B=dict(shape=(2), dtype=np.float32), - Y=dict(shape=(2, 2, 6, 8), dtype=np.float32), - ), - name='ConvTransposed2d', - embed_params=True, - ) - logger.info('ConvTransposed2d program:\n%s', prog) - - prog = Program() - Conv( - prog, - ['X', 'W'], - ['Y'], - dict( - auto_pad='NOTSET', - dilations=[1, 1, 1], - group=1, - kernel_shape=[3, 3, 3], - pads=[1, 1, 1, 1, 1, 1], - strides=[1, 1, 1], - ), - dict( - W=dict(shape=(2, 3, 3, 3, 3), dtype=np.float32), - Y=dict(shape=(2, 2, 4, 6, 8), dtype=np.float32), - ), - name='ConvNoBias3d', - embed_params=True, - ) - logger.info('ConvNoBias3d program:\n%s', prog) - - prog = Program() - Conv( - prog, - ['X', 'W', 'B'], - ['Y'], - dict( - auto_pad='NOTSET', - dilations=[1, 1, 1], - group=1, - kernel_shape=[3, 3, 3], - pads=[1, 1, 1, 1, 1, 1], - strides=[1, 1, 1], - ), - dict( - W=dict(shape=(2, 3, 3, 3, 3), dtype=np.float32), - B=dict(shape=(2), dtype=np.float32), - Y=dict(shape=(2, 2, 4, 6, 8), dtype=np.float32), - ), - name='Conv3d', - embed_params=True, - ) - logger.info('Conv3d program:\n%s', prog) - - prog = Program() - ConvTranspose( - prog, - ['X', 'W', 'B'], - ['Y'], - dict( - auto_pad='NOTSET', - dilations=[1, 1, 1], - group=1, - kernel_shape=[3, 3, 3], - # output_padding=[1, 1, 1, 1], - # output_shape=[6, 8], - pads=[1, 1, 1, 1, 1, 1], - strides=[1, 1, 1], - ), - dict( - W=dict(shape=(2, 3, 3, 3, 3), dtype=np.float32), - B=dict(shape=(2), dtype=np.float32), - Y=dict(shape=(2, 2, 6, 8, 9), dtype=np.float32), - ), - name='ConvTransposed3d', - embed_params=True, - ) - logger.info('ConvTransposed3d program:\n%s', prog) - - prog = Program() - _default( - prog, - 'Equal', - ['A', 'B'], - ['C'], - dict(), - dict(C=dict(shape=(2, 3), dtype=np.bool)), - ) - logger.info('Equal program:\n%s', prog) - - prog = Program() - Gemm( - prog, - ['A', 'B', 'C'], - ['Y'], - dict( - alpha=1., - beta=1., - transA=0, - transB=1, - ), - dict( - B=dict(shape=(8, 3), dtype=np.float32), - Y=dict(shape=(2, 8), dtype=np.float32), - ), - name='Gemm', - ) - logger.info('Gemm program:\n%s', prog) - - prog = Program() - _default( - prog, - 'Less', - ['A', 'B'], - ['C'], - dict(), - dict(C=dict(shape=(2, 3), dtype=np.bool)), - ) - logger.info('Less program:\n%s', prog) - - prog = Program() - _default(prog, - 'MatMul', ['A', 'B'], ['Y'], - dict(), - dict(Y=dict(shape=(2, 8), dtype=np.float32)), - name='MatMul') - logger.info('MatMul program:\n%s', prog) - - prog = Program() - _default( - prog, - 'OneHot', - ['indices', 'depth', 'values'], - ['output'], - dict(axis=-1), - dict(output=dict(shape=(2, 8), dtype=np.float32)), - ) - logger.info('OneHot program:\n%s', prog) - - prog = Program() - Pad( - prog, - ['data'], - ['output'], - dict( - mode='constant', - pads=[0, 1], - value=0., - ), - dict( - data=dict(shape=(2, 7), dtype=np.float32), - output=dict(shape=(2, 8), dtype=np.float32), - ), - name='Pad', - ) - logger.info('Pad program:\n%s', prog) - - prog = Program() - Pad( - prog, - ['data'], - ['output'], - dict( - mode='reflect', - pads=[0, 1, 2, 3], - value=0., - ), - dict( - data=dict(shape=(2, 3, 3, 3), dtype=np.float32), - output=dict(shape=(2, 3, 5, 7), dtype=np.float32), - ), - name='Pad2d', - ) - logger.info('Pad2d program:\n%s', prog) - - prog = Program() - PRelu( - prog, - ['X', 'slope'], - ['Y'], - dict(), - dict(Y=dict(shape=(2, 3), dtype=np.float32)), - name='PRelu', - ) - logger.info('PRelu program:\n%s', prog) - - prog = Program() - Tile(prog, ['input', 'repeats'], ['output'], - dict(), - dict(repeats=dict(const_value=[1, 2]), - output=dict(shape=(2, 2, 4), dtype=np.float32)), - name='Tile') - logger.info('Tile program:\n%s', prog) diff --git a/onnx2fluid/onnx2fluid/torch_export_helper.py b/onnx2fluid/onnx2fluid/torch_export_helper.py deleted file mode 100644 index ef13cea..0000000 --- a/onnx2fluid/onnx2fluid/torch_export_helper.py +++ /dev/null @@ -1,185 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Mar 22 11:22:46 2019 - -@author: Macrobull -""" - -from __future__ import division - -import logging -import numpy as np -import torch - -from collections import OrderedDict -from typing import ( - TypeVar, - Any, - Generic, - Iterable, - List, - Mapping, - Optional, - Sequence, - Text, - Tuple, - Union, -) - -logger = logging.getLogger(__name__) - -__all__ = [ - 'export_data', - 'export_onnx_with_validation', -] - -my_dict = OrderedDict - -KT = TypeVar('KT') -VT = TypeVar('VT') - - -class MyDict(my_dict, Generic[KT, VT]): - pass - - -def ensure_list(obj: Union[object, Sequence[object]]) -> List[object]: - if isinstance(obj, (list, tuple, set)): - return list(obj) - return [obj] - - -def ensure_tuple(obj: Union[object, Sequence[object]]) -> Tuple[object, ...]: - if isinstance(obj, (tuple, list, set)): - return tuple(obj) - return (obj, ) - - -def flatten_list(obj: List[Union[object, List[object]]], - out: Optional[List[object]] = None) -> List[object]: - assert isinstance(obj, list), 'list type required' - - if out is None: - out = type(obj)() - for item in obj: - if isinstance(item, list): - flatten_list(item, out) - else: - out.append(item) - return out - - -def export_data(state_dict: Mapping[Text, Any], prefix: Text = '') -> None: - """ - export binary data with meta text for raw C++ inference engines - """ - - def str_(obj: object) -> Text: - if isinstance(obj, (tuple, list, set)): - return str(obj)[1:-1].replace(' ', '') - return str(obj) - - prefix_ = prefix + ('_' if prefix else '') - fp = open('{}.txt'.format(prefix or 'meta'), mode='w') - for key, value in state_dict.items(): - data = None - if torch.is_tensor(value): - data = value.data.cpu().numpy() - elif isinstance(value, np.ndarray): - data = value - if data is not None: - data.tofile('{}{}.bin'.format(prefix_, key)) - fp.write('{}.dtype={}\n'.format(key, str_(data.dtype.name))) - fp.write('{}.shape={}\n'.format(key, str_(data.shape))) - else: - fp.write('{}={}\n'.format(key, str_(value))) - fp.close() - - -def export_onnx_with_validation( - model: torch.nn.Module, # or JITScriptModule - inputs: Sequence[Union[torch.Tensor, Sequence[object]]], - export_basepath: Text, - input_names: Optional[List[Text]] = None, - output_names: Optional[List[Text]] = None, - use_npz: bool = True, - *args, - **kwargs) -> Sequence[Union[torch.Tensor, Sequence[object]]]: - """ - export PyTorch model to ONNX model and export sample inputs and outputs in a Numpy file - """ - - is_tuple_or_list = lambda x: isinstance(x, (tuple, list)) - - def tensors_to_arrays(tensors: Union[torch.Tensor, Iterable[ - Union[torch.Tensor, Iterable[Any]]]], ) -> List[np.ndarray]: - if torch.is_tensor(tensors): - return tensors.data.cpu().numpy() - return list(map(tensors_to_arrays, tensors)) - - def zip_dict( - keys: Optional[Iterable[Any]], - values: Sequence[Union[Any, Sequence[Any]]], - ) -> MyDict[Text, Union[object, MyDict[Text, object]]]: - keys = keys or range(len(values)) - ret = my_dict() - for idx, (key, value) in enumerate(zip(keys, values)): - is_key_list = is_tuple_or_list(key) - is_value_list = is_tuple_or_list(value) - assert is_key_list == is_value_list, 'keys and values mismatch' - if is_value_list: - ret[str(idx)] = zip_dict(key, value) - else: - ret[key] = value - return ret - - torch_inputs = ensure_tuple(inputs) # WORKAROUND: for torch.onnx - outputs = torch.onnx.export(model, - torch_inputs, - export_basepath + '.onnx', - input_names=(None if input_names is None else - flatten_list(input_names)), - output_names=(None if output_names is None else - flatten_list(output_names)), - *args, - **kwargs) - if outputs is None: # WORKAROUND: for torch.onnx - training = kwargs.get('training', False) - with torch.onnx.set_training(model, training): - outputs = model(*inputs) - torch_outputs = ensure_tuple(outputs) - - inputs = zip_dict(input_names, tensors_to_arrays(torch_inputs)) - outputs = zip_dict(output_names, tensors_to_arrays(torch_outputs)) - if use_npz: - np.savez( - export_basepath + '.npz', - inputs=inputs, - outputs=outputs, - ) - else: - np.save(export_basepath + '.npy', - np.asarray(my_dict(inputs=inputs, outputs=outputs)), - allow_pickle=True) - - return torch_outputs - - -if __name__ == '__main__': - from torchvision.models import resnet18 as net - - model = net() - xb = torch.rand((1, 3, 224, 224)) - export_onnx_with_validation( - model, - (xb, ), - '/tmp/export', - input_names=[ - 'image', - ], - output_names=[ - 'prob', - ], - use_npz=True, - ) diff --git a/onnx2fluid/onnx2fluid/validation.py b/onnx2fluid/onnx2fluid/validation.py deleted file mode 100644 index efd5609..0000000 --- a/onnx2fluid/onnx2fluid/validation.py +++ /dev/null @@ -1,278 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Mar 22 12:17:19 2019 - -@author: Macrobull -""" - -import importlib, logging, os, sys - -logger = logging.getLogger(__name__) - -__all__ = [ - 'fluid_prog_shape_infer', - 'validate', -] - - -def flatten_dict(obj, out=None): - assert isinstance(obj, dict), 'dict type required' - - if out is None: - out = type(obj)() - for key, value in obj.items(): - if isinstance(value, dict): - flatten_dict(value, out) - else: - assert key not in out, 'key conflicted' - out[key] = value - return out - - -def ensure_list(obj): - if isinstance(obj, (list, tuple, set)): - return list(obj) - return [obj] - - -def fluid_prog_shape_infer(prog): - """ - additional type-shape inference for fluid program - """ - - import paddle.fluid as fluid - - assert isinstance(prog, - fluid.framework.Program), 'prog is not a Program instance' - - logger.info('performing type-shape inference ...') - for block in prog.blocks: - block_desc = block.desc - - for idx_op in range(block_desc.op_size()): - op_desc = block_desc.op(idx_op) - if op_desc.type() in ('feed', 'fetch'): - continue - - op_desc.infer_var_type(block_desc) - op_desc.infer_shape(block_desc) - - for var_name, var in block.vars.items(): - var_desc = var.desc - if var_desc.type() != fluid.core.VarDesc.VarType.LOD_TENSOR: - continue - - # WORKAROUND: dirty way to give dtype to partial-infered vars - # which could not be cleared! - try: - var.to_string(True) - except ValueError: - var_desc.set_dtype(fluid.core.VarDesc.VarType.FP32) - logger.debug('dtype of var %s not inferred, float32 assumed', - var_name) - - -def validate(fluid_model_filename, - golden_data_filename='', - atol=1e-3, - rtol=1e-3, - model_func_name='inference', - save_inference_model=False, - inference_input_names=None, - **kwargs): - """ - inference the converted Paddle fluid model, validate with given golden data - """ - - assert isinstance(fluid_model_filename, str) - - import numpy as np - import paddle.fluid as fluid - - logger = logging.getLogger('validate') - - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - - # load model - fluid_model_dir, basename = os.path.split(fluid_model_filename) - if basename == '__model__': # is desc program - logger.info('using desc file %s', basename) - prog, _, var_outs = fluid.io.load_inference_model(fluid_model_dir, exe) - out_names = var_outs # HINT: pass var if fetch ops already created - logger.info('model load passed') - elif basename.endswith('.py'): # is Python code - logger.info('using code file %s', basename) - module_name, _ = os.path.splitext(basename) - sys_path = sys.path.copy() - sys.path.append(fluid_model_dir) - try: - module = importlib.import_module(module_name) - func = getattr(module, model_func_name) - except AttributeError: - module_name = module_name + '.' + module_name - module = importlib.import_module(module_name) - func = getattr(module, model_func_name) - sys.path = sys_path - logger.debug('from %s imported %s: %s', module_name, model_func_name, - func) - - var_outs = func() - var_outs = ensure_list(var_outs) - out_names = [var.name for var in var_outs - ] # HINT: pass string to create fetch ops - logger.info('import passed') - - prog = fluid.default_main_program() - fluid.io.load_persistables(executor=exe, - dirname=fluid_model_dir, - main_program=prog) - logger.info('weight load passed') - else: - raise ValueError('unsupported Paddle fluid model filename') - - # load data - if golden_data_filename: - logger.info('using golden data %s', golden_data_filename) - if golden_data_filename.endswith('.npz'): - test_data = np.load( - golden_data_filename, - encoding='bytes', - allow_pickle=True, - ) - input_data = test_data['inputs'].tolist() - output_data = test_data['outputs'].tolist() - else: - test_data = np.load( - golden_data_filename, - encoding='bytes', - allow_pickle=True, - ).tolist() - input_data = test_data['inputs'] - output_data = test_data['outputs'] - - input_data = flatten_dict(input_data) - output_data = flatten_dict(output_data) - input_names = input_data.keys() - # output_names = output_data.keys() - logger.info('with %d inputs and %d outputs', len(input_data), - len(output_data)) - elif save_inference_model: - assert inference_input_names is not None, ( - 'input names required for type-shape inference') - - input_names = inference_input_names - logger.info('using input names: %s', ', '.join(input_names)) - - # type-shape inference and re-save - if save_inference_model: - fluid_prog_shape_infer(prog) - fluid.io.save_inference_model(fluid_model_dir, - input_names, - var_outs, - exe, - main_program=prog, - export_for_deployment=True) - logger.info('model re-save passed') - fluid.io.load_inference_model(fluid_model_dir, exe) - logger.info('model re-load passed') - - if golden_data_filename == '': - return True - - # execute - outputs = exe.run(prog, feed=input_data, - fetch_list=out_names) # out_names can be vars - logger.info('execution passed') - - # validate - passed = True - for (name, truth), output in zip(output_data.items(), outputs): - logger.info('testing on output {} ...'.format(name)) - try: - np.testing.assert_allclose(output, - truth, - rtol=rtol, - atol=atol, - equal_nan=False, - verbose=True) - except AssertionError as e: - passed = False - logger.error('failed: %s\n', e) - logger.info('accuracy %spassed', '' if passed else 'not ') - return passed - - -def main(): - import argparse - - parser = argparse.ArgumentParser( - description='onnx2fluid.validate', - formatter_class=argparse.ArgumentDefaultsHelpFormatter, - ) - parser.add_argument( - 'model', - nargs=1, - help='path to model.py or __model__', - ) - parser.add_argument( - '--debug', - '-d', - action='store_true', - help='enable debug logging and checking', - ) - parser.add_argument( - '--test_data', - '-t', - type=str, - default='', - help='I/O golden data for validation, e.g. test.npy, test.npz', - ) - parser.add_argument( - '--atol', - '-p', - type=float, - default=1e-3, - help='assertion absolute tolerance for validation', - ) - parser.add_argument( - '--rtol', - type=float, - default=1e-2, - help='assertion relative tolerance for validation', - ) - parser.add_argument( - '--infer_inputs', - '-i', - nargs='?', - default=None, - const='', - help= - 'perform type-shape inference with given input names and re-save model', - ) - args = parser.parse_args() - - logging_format = '[%(levelname)8s]%(name)s::%(funcName)s:%(lineno)04d: %(message)s' - logging_level = logging.DEBUG if args.debug else logging.INFO - logging.basicConfig(format=logging_format, level=logging_level) - - # debug = args.debug - fluid_model_filename = args.model[0] - golden_data_filename = args.test_data - atol, rtol = args.atol, args.rtol - save_inference_model = args.infer_inputs is not None - inference_input_names = args.infer_inputs.split( - ',') if args.infer_inputs else None - - validate(fluid_model_filename, - golden_data_filename=golden_data_filename, - atol=atol, - rtol=rtol, - save_inference_model=save_inference_model, - inference_input_names=inference_input_names) - - -if __name__ == '__main__': - main() diff --git a/onnx2fluid/onnx2fluid/writer.py b/onnx2fluid/onnx2fluid/writer.py deleted file mode 100644 index 32b91b5..0000000 --- a/onnx2fluid/onnx2fluid/writer.py +++ /dev/null @@ -1,503 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Sun Feb 24 20:44:43 2019 - -@author: Macrobull -""" - -from __future__ import division - -import logging, os -import numpy as np - -from collections import OrderedDict as Dict - -logger = logging.getLogger(__name__) - -from . import symbolic - -try: - import paddle.fluid.proto.framework_pb2 as framework_pb2 -except ImportError: - from . import framework_pb2 - - logger.warning('importing paddle.fluid.proto.framework_pb2d failed,' - 'using fallback framework_pb2') - -__all__ = [ - 'Program', - 'Writer', -] - - -def irepr(obj, to='_'): - """inline repr""" - - s = repr(obj) - for c in '\r\n': - s = s.replace(c, to) - if len(s) > 78: - s = s[:75] + '...' - return s - - -def flatten_list(obj, out=None): - assert isinstance(obj, list), 'list type required' - - if out is None: - out = type(obj)() - for item in obj: - if isinstance(item, list): - flatten_list(item, out) - else: - out.append(item) - return out - - -def make_attr_name(name): - """ - make a valid code name for ParamAttr - """ - - assert name != '', 'name should not be empty' - - for s in ' \\|/:.-': # - name = name.replace(s, '_') - if not name.startswith('_'): - name = '_' + name - return 'attr' + name - - -class Program(object): - """ - fluid Python code and ProgramDesc wrapper - """ - - DTYPE_TO_FRAMEWORK_DTYPE = { - 'bool': framework_pb2.VarType.BOOL, - 'int8': framework_pb2.VarType.INT8, - 'uint8': framework_pb2.VarType.UINT8, - 'int16': framework_pb2.VarType.INT16, - 'int32': framework_pb2.VarType.INT32, - 'int64': framework_pb2.VarType.INT64, - 'float16': framework_pb2.VarType.FP16, - 'float32': framework_pb2.VarType.FP32, - 'float64': framework_pb2.VarType.FP64 - } - - @staticmethod - def Dtype(dtype): - """ - convert dtype to fulid framework dtype - """ - - dtype = np.dtype(dtype).name - return Program.DTYPE_TO_FRAMEWORK_DTYPE[dtype] - - @staticmethod - def OpDescVars(keys, vals): - """ - make (OpDesc.Var)s - """ - - od_vars = [] - for idx, key in enumerate(keys): - od_var = framework_pb2.OpDesc.Var() - od_var.parameter = key - if idx < len(vals): - od_var.arguments.append(vals[idx]) # - od_vars.append(od_var) - return od_vars - - @staticmethod - def OpDescAttrs(attrs): - """ - make (OpDesc.Attr)s - """ - - od_attrs = [] - for key, value in attrs.items(): - od_attr = framework_pb2.OpDesc.Attr() - od_attr.name = key - if isinstance(value, bool): # bool.mro() = [bool, int, object] - od_attr.type = framework_pb2.BOOLEAN - od_attr.b = value - elif isinstance(value, int): # only cast to int32 - od_attr.type = framework_pb2.INT - od_attr.i = value - elif isinstance(value, float): - od_attr.type = framework_pb2.FLOAT - od_attr.f = value - elif isinstance(value, str): - od_attr.type = framework_pb2.STRING - od_attr.s = value - elif isinstance(value, list): - if value: # TODO: test all items - if isinstance(value[0], - bool): # bool.mro() = [bool, int, object] - od_attr.type = framework_pb2.BOOLEANS - od_attr.bools.extend(value) - elif isinstance(value[0], int): # only cast to int32 list - od_attr.type = framework_pb2.INTS - od_attr.ints.extend(value) - elif isinstance(value[0], float): - od_attr.type = framework_pb2.FLOATS - od_attr.floats.extend(value) - elif isinstance(value[0], str): - od_attr.type = framework_pb2.STRINGS - od_attr.strings.extend(value) - else: - raise ValueError('unsupported attribute {} = {}'.format( - key, value)) - else: # WORKAROUND: [] not inferred - # raise ValueError('unsupported attribute {} = {}'.format(key, value)) - od_attr.type = framework_pb2.INTS - logger.warning('using attribute %s = %s as INTS', key, - value) - else: - raise ValueError('unsupported attribute {} = {}'.format( - key, value)) - od_attrs.append(od_attr) - return od_attrs - - def __init__(self): - self.code_mutable = True - self.codes = [] - self.op_descs = [] - self.var_descs = Dict() - - def __repr__(self): - return ('Program(code mutable: {}) with:\n' - 'codes: {}\n' - 'op_descs: {}\n' - 'var_descs: {}\n').format(self.code_mutable, self.codes, - self.op_descs, - list(self.var_descs.values())) - - def Code(self, code): - """ - add Python code - """ - - if self.code_mutable: - self.codes.append(code) - - def OpDesc(self, op_type, input_key_vals, output_key_vals, attrs): - """ - add OpDesc - """ - - desc = framework_pb2.OpDesc() - desc.type = op_type - desc.inputs.extend(self.OpDescVars(*input_key_vals)) - desc.outputs.extend(self.OpDescVars(*output_key_vals)) - desc.attrs.extend(self.OpDescAttrs(attrs)) - self.op_descs.append(desc) - return desc - - def VarDesc(self, - name, - persistable=False, - value_info=None, - remove_batch=None): - """ - add VarDesc, - """ - - assert name not in self.var_descs, 'var name {} conflicts'.format(name) - - var_desc = framework_pb2.VarDesc() - var_desc.name = name - var_desc.persistable = persistable - var_desc.type.type = framework_pb2.VarType.LOD_TENSOR - self.var_descs[name] = var_desc - - if value_info is not None: - self.VarTypeShapeInfo(name, value_info, remove_batch=remove_batch) - - def Op(self, domain, op_type, inputs, outputs, attrs, *args, **kwargs): - """ - convert an ONNX op and add it to program - """ - - if domain != '': # TODO: symbolic file routing by domain - raise ValueError('only default domain supported') - - if op_type in symbolic.DEFAULT_OP_MAPPING: - symbolic._default(self, op_type, inputs, outputs, attrs, *args, - **kwargs) - elif hasattr(symbolic, op_type): - fn = getattr(symbolic, op_type) - fn(self, inputs, outputs, attrs, *args, **kwargs) - else: - raise ValueError('conversion for {}::{} not supported'.format( - domain, op_type)) - - def IntermediateOp(self, domain, op_type, inputs, outputs, attrs, *args, - **kwargs): - """ - convert an intermediate ONNX op declaring in desc program only - """ - - code_mutable = self.code_mutable - self.code_mutable = False - try: - self.Op(domain, op_type, inputs, outputs, attrs, *args, **kwargs) - except BaseException as e: - self.code_mutable = code_mutable - raise e - else: - self.code_mutable = code_mutable - - def VarTypeShapeInfo(self, name, value_info, remove_batch=None): - """ - set value_info for var - """ - - if name not in self.var_descs: - return - - dtype = value_info.get('dtype', None) - if dtype is None: - return - - var_desc = self.var_descs[name] - tensor_desc = var_desc.type.lod_tensor.tensor - tensor_desc.data_type = self.Dtype(dtype) # required - - shape = value_info.get('shape', None) - if not shape: # None or scalars - return - - tensor_desc.dims.extend(shape) - if remove_batch is None: - remove_batch = value_info.get('remove_batch', - False) #not persistable) - if remove_batch: - tensor_desc.dims[0] = -1 - - -class Writer(object): - """ - fluid code and desc writter - """ - - CODE_INDENT = ' ' * 4 # '\t' - - @staticmethod - def header_code(func_name, info=''): - """ - Python header codes - """ - - codes = [] - codes.append('"""') - codes.append('This code is generated by onnx2fluid.') - codes.append('{}'.format(info)) - codes.append('"""') - codes.append('') - codes.append('from __future__ import division') - codes.append('') - codes.append('from paddle.fluid import ParamAttr') - codes.append('from paddle.fluid import initializer, layers') - codes.append('') - codes.append('') - - codes.append('def {}():'.format(func_name)) - return codes - - @staticmethod - def emit_op(prog, name, domain, op_type, inputs, outputs, attrs, - value_infos, *args, **kwargs): - """ - emit an ONNX op into program - """ - - prog.Code('# {}, {}::{}: {} -> {}, {}'.format(name, domain, op_type, - inputs, outputs, - irepr(attrs, to=', '))) - prog.Op(domain, - op_type, - inputs, - outputs, - attrs, - value_infos=value_infos, - name=name, - *args, - **kwargs) - - @staticmethod - def emit_param(prog, name, value_info): - """ - emit an ONNX weight into program - """ - - embedded_names = value_info.get('embedded_as', []) - if embedded_names: - prog.Code('# parameter {} embedded as {}'.format( - name, embedded_names)) - for embedded_name in embedded_names: - prog.VarDesc(embedded_name, - persistable=True, - value_info=value_info) - else: - attr_name = make_attr_name(name) - prog.Code('# parameter {}'.format(name)) - prog.Code('{} = ParamAttr(name={})' # , trainable=True - .format(attr_name, repr(name))) - prog.Code( - '{} = layers.create_parameter(shape={}, dtype={}, name={}, attr={}' - ', default_initializer=initializer.Constant(0))' #, is_bias={} - .format(name, value_info['shape'], - repr(value_info['dtype'].name), repr(name), - attr_name)) #, value_info.get('is_bias', False))) - prog.VarDesc(name, persistable=True, value_info=value_info) - - @staticmethod - def emit_inputs(prog, names, value_infos, remove_batch=None): - """ - emit ONNX inputs into program - """ - - for idx, name in enumerate(names): - value_info = value_infos[name] - shape = value_info['shape'] - if remove_batch is None: - remove_batch = value_info.get('remove_batch', - True) # HINT: True by default ? - if remove_batch: - shape = shape[1:] - - prog.Code('# input {}'.format(name)) - prog.Code(( - '{} = layers.data(name={}, shape={}, dtype={}, ' - 'append_batch_size={})' # , stop_gradient=True - ).format( - name, - repr(name), - shape, - repr(value_info['dtype'].name), - remove_batch, - )) - prog.OpDesc( - 'feed', - (['X'], ['feed']), - (['Out'], [name]), - {'col': idx}, - ) - prog.VarDesc(name, value_info=value_info, remove_batch=remove_batch) - - @staticmethod - def emit_outputs(prog, names): #, value_infos - """ - emit ONNX outputs into program - """ - - code = 'return ' - for idx, name in enumerate(names): - code += name + ', ' - - prog.OpDesc( - 'fetch', - (['X'], [name]), - (['Out'], ['fetch']), - {'col': idx}, - ) - # var is emitted over ops - prog.Code(code) - - @staticmethod - def add_codes(codes, others, indent): - """ - flatten codes in program - """ - - for code in flatten_list(others): - codes.append(Writer.CODE_INDENT * indent + code) - return codes - - @staticmethod - def write_weight(weight, filename, lod=None): - """ - write single weight in fluid desc - """ - - assert isinstance(weight, np.ndarray), 'weight is not an ndarray' - assert lod is None or isinstance(lod, - list), 'lod should be None or list' - - if lod is None: - lod = [0] - - tensor_desc = framework_pb2.VarType.TensorDesc() - tensor_desc.data_type = Program.Dtype(weight.dtype) - tensor_desc.dims.extend(weight.shape) - - fp = open(filename, 'wb') - np.array([0], dtype=np.int32).tofile(fp) # version - np.array(lod, dtype=np.int64).tofile(fp) # LOD level - np.array([0], dtype=np.int32).tofile(fp) # tensor version - np.array([tensor_desc.ByteSize()], dtype=np.int32).tofile(fp) - fp.write(tensor_desc.SerializeToString()) - weight.tofile(fp) - fp.close() - - @staticmethod - def write_weights(weights, save_dir): - """ - write multiple weights in each fluid desc - """ - - for name, weight in weights.items(): - assert isinstance(weights, dict), 'dict type weights required' - - filename = os.path.join(save_dir, name) - Writer.write_weight(weight, filename) - logger.debug('saved weight %s to %s', name, filename) - - @staticmethod - def write_code_file(filename, header_code, *body_codes): - """ - write Python code to file - """ - - codes = [] - Writer.add_codes(codes, header_code, 0) - for body_code in body_codes: - Writer.add_codes(codes, body_code, 1) - - fp = open(filename, 'w') - for code in flatten_list(codes): - fp.write(code) - fp.write('\n') - fp.close() - logger.debug('saved codes to %s', filename) - - @staticmethod - def write_desc_file(filename, op_descs, var_descs): - """ - write desc program to file - """ - - prog_desc = framework_pb2.ProgramDesc() - block_desc = prog_desc.blocks.add() - block_desc.idx = 0 - block_desc.parent_idx = -1 - block_desc.ops.extend(op_descs) - block_desc.vars.extend(var_descs) - - # add feed-fetch on vars - feed_var_desc = block_desc.vars.add() - feed_var_desc.name = 'feed' - feed_var_desc.type.type = framework_pb2.VarType.FEED_MINIBATCH - feed_var_desc.persistable = True - fetch_var_desc = block_desc.vars.add() - fetch_var_desc.name = 'fetch' - fetch_var_desc.type.type = framework_pb2.VarType.FETCH_LIST - fetch_var_desc.persistable = True - - fp = open(filename, 'wb') - fp.write(prog_desc.SerializeToString()) - fp.close() - logger.debug('saved descs to %s', filename) diff --git a/onnx2fluid/requirements.txt b/onnx2fluid/requirements.txt deleted file mode 100644 index 9a67fa8..0000000 --- a/onnx2fluid/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ --e . -onnx>=1.4 -paddlepaddle>=1.5 diff --git a/onnx2fluid/setup.cfg b/onnx2fluid/setup.cfg deleted file mode 100644 index bf59c1f..0000000 --- a/onnx2fluid/setup.cfg +++ /dev/null @@ -1,75 +0,0 @@ -# setup.cfg相关文档可参考如下链接 -# https://setuptools.readthedocs.io/en/latest/setuptools.html#configuring-setup-using-setup-cfg-files -[metadata] -# 项目名称,发布、安装时以此作为包名 -name = onnx2fluid -# 作者姓名和邮箱地址 -author = Macrobull -# author_email = .Github@github.com -# 项目版本号,1.0以上版本才视为正式版 -version = 0.1.1 -# 项目概要描述信息,一句话让用户明白项目概要,不支持中文 -description = Inference model conversion from ONNX/PyTorch to Paddle fluid -# 项目的详细描述内容和格式,包括readme和changelog等,通常使用md或rst等格式 -long_description = file: README.md, CHANGELOG.md -long_description_content_type = text/markdown -# 开源授权协议,非对外开源的项目无需关注 -license = MIT -# 项目类别,非对外开源的项目无需关注 -# 从PyPI官方给出的列表中选择符合的内容进行填写 -# https://pypi.org/pypi?%3Aaction=list_classifiers -classifier = - Private :: Do Not Upload - Programming Language :: Python - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.5 -# 关键字,用于检索,方便用户搜索到你的项目 -keywords = - onnx paddlepaddle - -[options] -# 包名称,find:表示自动寻找,可在options.packages.find中进行详细配置 -packages = find: -# 依赖管理,包含项目运行时所需要的所有依赖库 -# 每行一个依赖库,只写直接依赖,通常无需考虑间接依赖 -# 在这里指定的版本限制应当尽量抽象,通常只要指定最低版本和大版本号即可 -install_requires = - onnx >= 1.4 - -# 测试依赖,包含项目测试时所需要的额外的依赖库,格式与install_requires一致 -# 可以使用内置的unittest,也可以使用更简单的pytest或nose等单测框架 -# python3自带mock库,而python2没有,如果需要使用则必须写入测试依赖中 -#tests_require = -# pytest -# mock - -# 单测代码目录 -#test_suite = onnx2fluid.tests -# 自动添加被版本控制的数据文件 -include_package_data = True -# 项目是纯py项目,可以直接执行zip源码包 -zip_safe = True - -# 可以通过以下配置将指定的函数变成命令行工具,允许用户直接执行 -[options.entry_points] -console_scripts = - onnx2fluid = onnx2fluid.__main__ - onnx2fluid_convert = onnx2fluid.conversion:main - onnx2fluid_validate = onnx2fluid.validation:main - -# 可以通过以下配置向包中添加conf或data等非py文件,安装时会一同安装到site-packages目录下 -# 仅支持文件,不支持目录,但可以使用通配 -#[options.package_data] -#onnx2fluid = -# conf/* -# data/* - -[sdist] -dist_dir = output/dist - -[bdist_wheel] -# 如果项目可以一份代码同时运行在python2和python3上,则设置universal为1 -#universal=1 -dist_dir = output/dist - -[easy_install] diff --git a/onnx2fluid/setup.py b/onnx2fluid/setup.py deleted file mode 100755 index 48d3925..0000000 --- a/onnx2fluid/setup.py +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env python -# -*- coding: UTF-8 -*- -################################################################################ -# -# Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved -# -################################################################################ -""" -Setup script. - -Authors: Macrobull -Date: 2019/02/22 10:25:46 -""" - -import setuptools - -setuptools.setup() diff --git a/tensorflow2fluid/README.md b/tensorflow2fluid/README.md deleted file mode 100644 index 66fb4bd..0000000 --- a/tensorflow2fluid/README.md +++ /dev/null @@ -1,136 +0,0 @@ -# tensorflow2fluid -[![License](https://img.shields.io/badge/license-Apache%202-blue.svg)](LICENSE) - - -tensorflow2fluid支持将训练好的TensorFlow模型转换为PaddlePaddle模型,包括基于PaddlePaddle实现的模型前向计算网络python代码,以及PaddlePaddle可加载的模型参数文件。 -此外在[[doc](doc)]目录中整理了TensorFlow-PaddlePaddle的常用API对比分析。 -[环境安装](#环境安装)  [使用方法](#使用方法)  [验证模型](#验证模型)  [常见问题](#常见问题) - -## 环境安装 - -工具开发过程中,我们在如下环境配置中测试模型转换,建议使用[anaconda](https://docs.anaconda.com/anaconda/install) - -> python == 2.7 or 3.6 - -> tensorflow == 1.12.0 - -> paddlepaddle == 1.3.0 - -``` shell -# pip install tensorflow-gpu -conda install tensorflow-gpu -pip install paddlepaddle-gpu - -# 上述安装过程可能会提示protobuf版本问题 -# 升级protobuf解决 -pip install protobuf --upgrade -``` - -## 使用方法 -本目录下提供了demo示例,展示如何将VGG_16模型转换为PaddlePaddle模型,详见[vgg_translate_tutorial](vgg_translate_tutorial.ipynb) -### 转换模型 -``` -python tf2fluid/convert.py --pb_file tf_model.pb \ - --in_nodes inputs \ - --output_nodes outputs \ - --input_shape None,224,224,3 \ - --input_format NHWC \ - --use_cuda True \ - --save_dir translated_paddle_model -``` -### 加载模型并预测 -本目录下提供了[model_loader.py](tf2fluid/model_loader.py),可以辅助用户简单的加载模型和预测,和dump模型,用户可直接参考其实现 - -``` python -# coding:utf-8 -# 代码运行目录 X2Paddle/tensorflow2fluid -import sys -import tf2fluid.model_loader as ml - -# 加载模型 -model = ml.ModelLoader("translated_paddle_model", use_cuda=True) - -# 随机生成数据用于模型预测 -# 注意Paddle CV模型输入格式为NCHW !!! -data = numpy.random.rand(5, 3, 224, 224).astype('float32') -results = model.inference(feed_dict={model.inputs[0]:data}) - -# 返回的results为list,元素为np.array -for res in results: - print(res.shape) -``` - -使用转换后的模型主要注意,**模型转换后,计算结果与原模型存在一定精度的diff,因此务必检查模型转换前后,在输入同样的数据前提下,diff是否符合预期** - -### 序列化模型结构 -tensorflow2fluid转换后的模型结构以python代码定义形式供用户直观阅读或修改,如若需要将模型结构和参数均序列化存储,可以上面的示例代码中,调用如下代码即可,序列化的模型结构和参数如何加载可见PaddlePaddle使用文档中的[加载预测模型](http://www.paddlepaddle.org/documentation/docs/zh/1.3/api_guides/low_level/inference.html#id4) -``` python -model.save_inference_model("new_model_dir") -``` - -### 参数说明 -|tf2fluid参数|说明| -|-----------|-----------------------------------------------| -|meta_file|TensorFlow模型序列化后保存的meta文件| -|ckpt_dir|TensorFlow模型保存checkpoint目录| -|pb_file|Tensorflow保存的pb格式模型| -|in_nodes|输入tensor名,多个输入时以空格分隔| -|input_shape|输入tensor的shape(batch维度以None表示),shape之间以空格分隔,shape内各维度以逗号分隔| -|input_format|输入数据格式,NHWC/NCHW/OTHER| -|output_nodes|输出tensor名,多个输出时以空格分隔| -|use_cuda|转换过程中是否使用GPU,默认True| -|save_dir|转换后的模型保存路径| - -目前支持tensorflow保存的checkpoint模型和将参数及模型结构序列化存储的pb模型,前者须指定meta_file和ckpt_dir,后者则指定pb_file -**FAQ:输入tensor名和输出tensor名是指什么?** -TensorFlow模型在infer时,一般调用代码形如`sess.run([output], {input:data})`,其中output即为输出tensor,input则为输入tensor,在进行模型转换时,需提供这input和output对应的`tensor name`,如在[vgg_translate_tutorial](vgg_translate_tutorial.ipynb)中转换VGG_16模型,输入的tensor名为 "inputs", 输出的tensor名为 "vgg_16/fc8/squeezed" - -### 转换后模型文件说明 -文件|作用 -:------------------:|:-----------------------------------------------: -mymodel.py|基于PaddlePaddle实现的模型网络结构python代码 -ref_name.info|my_model.py中各tensor与原TensorFlow模型中的tensor对应关系 -const_\*/params_\*|转换后的模型参数文件 -save_var.list|模型载入过程中的变量list - -## 验证模型 -tensorflow2fluid在如下tensorflow模型上测试了模型转换前后的diff - -| 模型类别 | 模型 | Code | 最大diff | -| -------- | ------------- | ------ | -------- | -| 图像分类 | VGG_16 | [code](https://github.com/tensorflow/models/blob/master/research/slim/nets/vgg.py) | 1.04E-05 | -| | VGG_19 | [code](https://github.com/tensorflow/models/blob/master/research/slim/nets/vgg.py) | 9.07E-06 | -| | ResNet V1 50 | [code](https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_v1.py) | 1.31E-06 | -| | ResNet V1 101 | [code](https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_v1.py) | 4.74E-07 | -| | Inception V3 | [code](https://github.com/tensorflow/models/blob/master/research/slim/nets/inception_v3.py) | 1.55E-04 | -| | NASNet_Large | [code](https://github.com/tensorflow/models/blob/master/research/slim/nets/nasnet/nasnet.py) | - | -| | PNASNet_Large | [code](https://github.com/tensorflow/models/blob/master/research/slim/nets/nasnet/pnasnet.py) | - | -| 目标检测 | YOLO-Small | [code](https://github.com/gliese581gg/YOLO_tensorflow) | 1.40E-06 | -| | YOLO-V3 | [code](https://github.com/mystic123/tensorflow-yolo-v3) | 6.20E-04 | -| 语义分割 | Unet | [code](https://github.com/jakeret/tf_unet) | 4.17E-07 | - -## 常见问题 -1. 转换参数`input_format`的设定? -> TensorFlow中的CV模型,大多采用`NHWC`的输入格式,但同时也可以支持`NCHW`的格式输入;而在PaddlePaddle中,支持的是`NCHW`的格式。因此需要在转换模型时,指定TensorFlow模型的输入格式,转换过程中会根据输入格式,对输入数据,参数进行变换。 - -2. 转换参数`input_shape`的设定? - -> 在模型转换时,需设定输入数据的具体`shape`。因为转换过程中,涉及到较多参数的转换,因此模型转换完成应用到预测时,输入数据的`shape`也须与之前指定的一致,否则可能会出错。 - -3. 转换参数`use_cuda`的设定? - -> 受限于PaddlePaddle与TensorFlow部分OP上的实现差异,部分tensor参数(在TensorFlow中,这部分参数类型是tensor类型,但值保持不变)需要通过infer得到。因此模型转换过程中,同时也会加载tensorflow模型进行预测,消耗计算资源。在有GPU资源的的前提下,将`use_cuda`设为`True`有助于提升转换速度。 - -4. 模型转换前后diff对比? - -> tensorflow2fluid仍在不断完善和测试中,用户转换完模型后,注意对比模型在转换前后的输出diff是否在可接受范围内。此外转换后的模型结构`mymodel.py`如存在构建失败的问题,可能是由于部分参数在特殊情况下未被考虑到导致,用户可以直接通过修改`mymodel.py`来解决。 - -5. 模型转换失败,提示"Unsupported OP: XXX"? - -> 目前tf2fluid支持50个左右常见OP的转换,仍然在不断补充中,当出现如上提示时,即表示模型存在暂未支持的OP,用户可以直接在[tf2fluid/paddle_emitter.py](tf2fluid/paddle_emitter.py)中仿照`emit_xxx`函数添加转换代码支持,或者也欢迎通过提ISSUE的方式让我们知道你的需求! - -## Link - -本目录下部分代码参考了MMdnn-Tensorflow,对此表示感谢! - -[MMdnn-Tensorflow](https://github.com/Microsoft/MMdnn/tree/master/mmdnn/conversion/tensorflow) diff --git a/tensorflow2fluid/doc/ReadMe.md b/tensorflow2fluid/doc/ReadMe.md deleted file mode 100644 index 613e160..0000000 --- a/tensorflow2fluid/doc/ReadMe.md +++ /dev/null @@ -1,148 +0,0 @@ -# TensorFlow-Fluid接口对应表 - -本文档基于TensorFlow v1.13梳理了常用API与PaddlePaddle API对应关系和差异分析。根据文档对应关系,有TensorFlow使用经验的用户,可根据对应关系,快速熟悉PaddlePaddle的接口使用 。 - -| 序号 | TensorFlow接口 | PaddlePaddle接口 | 备注 | -| ---- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| 1 | [tf.abs](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/abs) | [fluid.layers.abs](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#abs) | 功能一致 | -| 2 | [tf.add](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/add) | [fluid.layers.elementwise_add](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#elementwise_add) | 功能一致 | -| 3 | [tf.argmax](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/argmax) | [fluid.layers.argmax](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#argmax) | 功能一致 | -| 4 | [tf.argmin](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/argmin) | [fluid.layers.argmin](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#argmin) | 功能一致 | -| 5 | [tf.assign](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/assign) | [fluid.layers.assign](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#assign) | 功能一致 | -| 6 | [tf.assign_add](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/assign_add) | [fluid.layers.increment](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#increment) | 功能一致 | -| 7 | [tf.case](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/case) | [fluid.layers.Switch](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#Switch) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.case.md) | -| 8 | [tf.cast](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/dtypes/cast) | [fluid.layers.cast](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#cast) | 功能一致 | -| 9 | [tf.clip_by_global_norm](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/clip_by_global_norm) | [fluid.clip.GradientClipByGlobalNorm](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/clip_cn.html#gradientclipbyglobalnorm) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.clip_by_global_norm.md) | -| 10 | [tf.clip_by_norm](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/clip_by_norm) | [fluid.layers.clip_by_norm](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.clip_by_norm) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.clip_by_norm.md) | -| 11 | [tf.clip_by_value](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/clip_by_value) | [fluid.layers.clip](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#clip) | 功能一致 | -| 12 | [tf.concat](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/concat) | [fluid.layers.concat](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.concat) | 功能一致 | -| 13 | [tf.cond](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/cond) | [fluid.layers.ifElse](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#ifElse) | 功能一致 | -| 14 | [tf.constant](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/constant) | [fluid.layers.fill_constant](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#fill_constant) | 功能一致 | -| 15 | [tf.contrib.layers.batch_norm](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/contrib/layers/batch_norm) | [fluid.layers.batch_norm](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#batch_norm) | 功能一致 | -| 16 | [tf.contrib.layers.flatten](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/contrib/layers/flatten) | [fluid.layers.flatten](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#flatten) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.contrib.layers.flatten.md) | -| 17 | [tf.contrib.layers.fully_connected](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/contrib/layers/fully_connected) | [fluid.layers.fc](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#fc) | 功能一致 | -| 18 | [tf.contrib.layers.one_hot_encoding](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/contrib/layers/one_hot_encoding) | [fluid.layers.one_hot](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#one_hot) | 功能一致 | -| 19 | [tf.contrib.layers.softmax](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/contrib/layers/softmax) | [fluid.layers.softmax](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#softmax) | 功能一致 | -| 20 | [tf.contrib.layers.xavier_initializer](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/contrib/layers/xavier_initializer) | [fluid.initializer.Xavier](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/initializer_cn.html#xavier) | 功能一致 | -| 21 | [tf.nn.rnn.GRUCell](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/rnn_cell/GRUCell) | [fluid.layers.gru_unit](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#gru_unit) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.contrib.rnn.GRUCell.md) | -| 22 | [tf.nn.rnn.MultiRNNCell](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/rnn_cell/MultiRNNCell) | 无相应接口 | [Paddle实现方法](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.nn.rnn_cell.MultiRNNCell.md) | -| 23 | [tf.nn.rnn.static_rnn](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/static_rnn) | [fluid.layers.DynamicRNN](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#dynamicrnn) | 功能一致 | -| 24 | [tf.convert_to_tensor](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/convert_to_tensor) | [fluid.layers.assign](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#assign) | 功能一致 | -| 25 | [tf.cos](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/cos) | [fluid.layers.cos](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#cos) | 功能一致 | -| 26 | [tf.div](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/div) | [fluid.layers.elementwise_div](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.elementwise_div) | 功能一致 | -| 27 | [tf.divide](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/divide) | [fluid.layers.elementwise_div](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#elementwise_div) | 功能一致 | -| 28 | [tf.dropout](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/dropout) | [fluid.layers.dropout](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.dropout) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.nn.dropout.md) | -| 29 | [tf.equal](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/equal) | [运算符==](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/compare_op.md) | 功能一致 | -| 30 | [tf.exp](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/exp) | [fluid.layers.exp](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#exp) | 功能一致 | -| 31 | [tf.expand_dims](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/expand_dims) | [fluid.layers.unsqueeze](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#unsqueeze) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.expand_dims.md) | -| 32 | [tf.fill](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/fill) | [fluid.layers.fill_constant](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.fill_constant) | 功能一致 | -| 33 | [tf.floor](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/floor) | [fluid.layers.floor](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#floor) | 功能一致 | -| 34 | [tf.gather](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/gather) | [fluid.layers.gather](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.gather) | 功能一致 | -| 35 | [tf.greater](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/greater) | [运算符>](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/compare_op.md) | 功能一致 | -| 36 | [tf.greater_equal](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/greater_equal) | [运算符>=](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/compare_op.md) | 功能一致 | -| 37 | [tf.image.non_max_suppression](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/image/non_max_suppression) | [fluid.layers.multiclass_nms](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.multiclass_nms) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.image.non_max_suppression.md) | -| 38 | [tf.image.resize_bilinear](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/image/resize_bilinear) | [fluid.layers.resize_bilinear](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.resize_bilinear) | 功能一致 | -| 39 | [tf.image.resize_images](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/image/resize_images) | [fluid.layers.image_resize](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.image_resize) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.image.resize_images.md) | -| 40 | [tf.image.resize_nearest_neighbor](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/image/resize_nearest_neighbor) | [fluid.layers.resize_nearest](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.resize_nearest) | 功能一致 | -| 41 | [tf.is_finite](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/is_finite) | [fluid.layers.isfinite](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#isfinite) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.math.is_finite.md) | -| 42 | [tf.layers.batch_normalization](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/layers/batch_normalization) | [fluid.layers.batch_norm](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.batch_norm) | 功能一致 | -| 43 | [tf.layers.conv2d](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/layers/conv2d) | [fluid.layers.conv2d](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.conv2d) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.layers.conv2d.md) | -| 44 | [tf.layers.dense](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/layers/dense) | [fluid.layers.fc](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#fc) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.layers.dense.md) | -| 45 | [tf.layers.dropout](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/layers/dropout) | [fluid.layers.dropout](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#dropout) | 功能一致 | -| 46 | [tf.layers.Dropout](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/layers/Dropout) | [fluid.layers.dropout](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#dropout) | 功能一致 | -| 47 | [tf.layers.flatten](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/layers/flatten) | [fluid.layers.flatten](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.flatten) | 功能一致 | -| 48 | [tf.less](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/less) | [运算符<](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/compare_op.md) | 功能一致 | -| 49 | [tf.less_equal](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/less_equal) | [运算符<=](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/compare_op.md) | 功能一致 | -| 50 | [tf.log](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/log) | [fluid.layers.log](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.log) | 功能一致 | -| 51 | [tf.logical_and](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/logical_and) | [fluid.layers.logical_and](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#logical_and) | 功能一致 | -| 52 | [tf.logical_not](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/logical_not) | [fluid.layers.logical_not](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#logical_not) | 功能一致 | -| 53 | [tf.logical_or](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/logical_or) | [fluid.layers.logical_or](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#logical_or) | 功能一致 | -| 54 | [tf.losses.mean_squared_error](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/losses/mean_squared_error) | [fluid.layers.square_error_cost](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#square_error_cost) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.losses.mean_and_squared_error.md) | -| 55 | [tf.losses.sigmoid_cross_entropy](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/losses/sigmoid_cross_entropy) | [fluid.layers.sigmoid_cross_entropy_with_logits](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#sigmoid_cross_entropy_with_logits) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.losses.sigmoid_cross_entropy.md) | -| 56 | [tf.losses.softmax_cross_entropy](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/losses/softmax_cross_entropy) | [fluid.layers.softmax_with_cross_entropy](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.softmax_with_cross_entropy) | 功能一致 | -| 57 | [tf.matmul](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/linalg/matmul) | [fluid.layers.matmul](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#matmul) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.matmul.md) | -| 58 | [tf.maximum](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/maximum) | [fluid.layers.elementwise_max](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.elementwise_max) | 功能一致 | -| 59 | [tf.metrics.accuracy](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/metrics/accuracy) | [fluid.layers.accuracy](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.accuracy) | 功能一致 | -| 60 | [tf.metrics.mean](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/metrics/mean) | [fluid.layers.mean](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#mean) | 功能一致 | -| 61 | [tf.minimum](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/minimum) | [fluid.layers.elementwise_min](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#elementwise_min) | 功能一致 | -| 62 | [tf.multiply](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/multiply) | [fluid.layers.elementwise_mul](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#elementwise_mul) | 功能一致 | -| 63 | [tf.nn.avg_pool](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/avg_pool) | [fluid.layers.pool2d](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.pool2d) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.nn.avg_pool.md) | -| 64 | [tf.nn.batch_normalization](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/batch_normalization) | [fluid.layers.batch_norm](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.batch_norm) | 功能一致 | -| 65 | [tf.nn.bidirectional_dynamic_rnn](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/bidirectional_dynamic_rnn) | 无相应接口 | [Paddle实现方法](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.nn.bidirectional_dynamic_rnn.md) | -| 66 | [tf.nn.conv2d](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/conv2d) | [fluid.layers.conv2d](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.conv2d) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.nn.conv2d.md) | -| 67 | [tf.nn.conv2d_transpose](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/conv2d_transpose) | [fluid.layers.conv2d_transpose](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.conv2d_transpose) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.nn.conv2d_transpose.md) | -| 68 | [tf.nn.conv3d_transpose](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/conv3d_transpose) | [fluid.layers.conv3d_transpose](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.conv2d_transpose) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.nn.conv3d_transpose.md) | -| 69 | [tf.nn.depthwise_conv2d](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/depthwise_conv2d) | [fluid.layers.conv2d](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.conv2d) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.nn.depthwise_conv2d.md) | -| 70 | [tf.nn.dynamic_rnn](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/dynamic_rnn) | [fluid.layers.DynamicRNN](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#DynamicRNN) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.nn.dynamic_rnn.md) | -| 71 | [tf.nn.l2_normalize](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/l2_normalize) | [fluid.layers.l2_normalize](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#l2_normalize) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.nn.l2_normalize.md) | -| 72 | [tf.nn.leaky_relu](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/leaky_relu) | [fluid.layers.leaky_relu](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.leaky_relu) | 功能一致 | -| 73 | [tf.nn.lrn](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/local_response_normalization) | [fluid.layers.lrn](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.lrn) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.nn.lrn.md) | -| 74 | [tf.nn.max_pool](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/max_pool) | [fluid.layers.pool2d](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.pool2d) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.nn.max_pool.md) | -| 75 | [tf.nn.relu](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/relu) | [fluid.layers.relu](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#relu) | 功能一致 | -| 76 | [tf.nn.relu6](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/relu6) | [fluid.layers.relu6](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.relu6) | 功能一致 | -| 77 | [tf.nn.rnn_cell.LSTMCell](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/rnn_cell/LSTMCell) | [fluid.layers.lstm_unit](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#lstm_unit) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.nn.rnn_cell.LSTMCell.md) | -| 78 | [tf.nn.separable_conv2d](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/separable_conv2d) | 无相应接口 | [Paddle实现方法](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.nn.separable_conv2d.md) | -| 79 | [tf.nn.sigmoid](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/sigmoid) | [fluid.layers.sigmoid](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#sigmoid) | 功能一致 | -| 80 | [tf.nn.sigmoid_cross_entropy_with_logits](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/sigmoid_cross_entropy_with_logits) | [fluid.layers.sigmoid_cross_entropy_with_logits](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#sigmoid_cross_entropy_with_logits) | 功能一致 | -| 81 | [tf.nn.softmax](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/softmax) | [fluid.layers.softmax](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#softmax) | 功能一致 | -| 82 | [tf.nn.softmax_cross_entropy_with_logits](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/softmax_cross_entropy_with_logits) | [fluid.layers.softmax_with_cross_entropy](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#softmax_with_cross_entropy) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.nn.softmax_cross_entropy_with_logits.md) | -| 83 | [tf.nn.softplus](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/softplus) | [fluid.layers.softplus](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#softplus) | 功能一致 | -| 84 | [tf.nn.softsign](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/softsign) | [fluid.layers.softsign](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.softsign) | 功能一致 | -| 85 | [tf.nn.tanh](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/tanh) | [fluid.layers.tanh](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#tanh) | 功能一致 | -| 86 | [tf.one_hot](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/one_hot) | [fluid.layers.one_hot](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.one_hot) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.one_hot.md) | -| 87 | [tf.ones](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/ones) | [fluid.layers.ones](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#ones) | 功能一致 | -| 88 | [tf.intializers.ones](https://www.tensorflow.org/versions/r1.14/api_docs/python/tf/initializers/ones) | [fluid.initializer.Constant](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/initializer_cn.html#constant) | 功能一致 | -| 89 | [tf.pad](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/pad) | [fluid.layers.pad](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#pad) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.pad.md) | -| 90 | [tf.placeholder](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/placeholder) | [fluid.layers.data](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.data) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.placeholder.md) | -| 91 | [tf.pow](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/pow) | [fluid.layers.pow](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#pow) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.pow.md) | -| 92 | [tf.print](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/print) | [fluid.layers.print](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#print) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.print.md) | -| 93 | [tf.py_func](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/py_func) | [fluid.layers.py_func](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.py_func) | 功能一致 | -| 94 | [tf.random_normal](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/random/normal) | [fluid.layers.gaussian_random](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.gaussian_random) | 功能一致 | -| 95 | [tf.random_normal_initializer](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/initializers/random_normal) | [fluid.initializer.Normal](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/initializer_cn.html#normal) | 功能一致 | -| 96 | [tf.random_uniform](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/random/uniform) | [fluid.layers.uniform_random](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.uniform_random) | 功能一致 | -| 97 | [tf.random_uniform_initializer](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/initializers/random_uniform) | [fluid.initializer.UniformInitializer](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/initializer_cn.html#uniforminitializer) | 功能一致 | -| 98 | [tf.reduce_logsumexp](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/reduce_logsumexp) | 无相应接口 | [Paddle实现方法](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.nn.reduce_logsumexp.md) | -| 99 | [tf.reduce_max](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/reduce_max) | [fluid.layers.reduce_max](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#reduce_max) | 功能一致 | -| 100 | [tf.reduce_mean](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/reduce_mean) | [fluid.layers.reduce_mean](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#reduce_mean) | 功能一致 | -| 101 | [tf.reduce_min](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/reduce_min) | [fluid.layers.reduce_min](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#reduce_min) | 功能一致 | -| 102 | [tf.reduce_sum](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/reduce_sum) | [fluid.layers.reduce_sum](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#reduce_sum) | 功能一致 | -| 103 | [tf.reshape](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/reshape) | [fluid.layers.reshape](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.reshape) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.reshape.md) | -| 104 | [tf.reverse](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/reverse) | [fluid.layers.reverse](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#reverse) | 功能一致 | -| 105 | [tf.reverse_sequence](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/reverse_sequence) | [fluid.layers.sequence_reverse](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#sequence_reverse) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.reverse_sequence.md) | -| 106 | [tf.reverse_v2](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/reverse) | [fluid.layers.reverse](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.reverse) | 功能一致 | -| 107 | [tf.round](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/round) | [fluid.layers.round](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.round) | 功能一致 | -| 108 | [tf.rsqrt](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/rsqrt) | 无相应接口 | [Paddle实现方法](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.math.rsqrt.md) | -| 109 | [tf.scalar_mul](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/scalar_mul) | [fluid.layers.scale](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#scale) | 功能一致 | -| 110 | [tf.scatter_update](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/scatter_update) | [fluid.layers.scatter](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#scatter) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.scatter_update.md) | -| 111 | [tf.sequence_mask](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/sequence_mask) | [fluid.layers.sequence_mask](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#sequence_mask) | 功能一致 | -| 112 | [tf.shape](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/shape) | [fluid.layers.shape](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#shape) | 功能一致 | -| 113 | [tf.sigmoid](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/sigmoid) | [fluid.layers.sigmoid](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#sigmoid) | 功能一致 | -| 114 | [tf.sin](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/sin) | [fluid.layers.sin](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.sin) | 功能一致 | -| 115 | [tf.slice](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/slice) | [fluid.layers.slice](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#slice) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.slice.md) | -| 116 | [tf.split](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/split) | [fluid.layers.split](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#split) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.split.md) | -| 117 | [tf.sqrt](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/sqrt) | [fluid.layers.sqrt](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#sqrt) | 功能一致 | -| 118 | [tf.square](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/square) | [fluid.layers.square](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.square) | 功能一致 | -| 119 | [tf.squared_difference](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/squared_difference) | 无相应接口 | [Paddle实现方法](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.squared_difference.md) | -| 120 | [tf.squeeze](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/squeeze) | [fluid.layers.squeeze](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#squeeze) | 功能一致 | -| 121 | [tf.stack](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/stack) | [fluid.layers.stack](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#stack) | 功能一致 | -| 122 | [tf.stop_gradient](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/stop_gradient) | 无相应接口 | [Paddle实现方法](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.stop_gradient.md) | -| 123 | [tf.subtract](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/subtract) | [fluid.layers.elementwise_sub](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.elementwise_sub) | 功能一致 | -| 124 | [tf.tanh](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/tanh) | [fluid.layers.tanh](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#tanh) | 功能一致 | -| 125 | [tf.tile](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/tile) | [fluid.layers.expand](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.expand) | 功能一致 | -| 126 | [tf.top_k](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/top_k) | [fluid.layers.topk](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.topk) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.nn.top_k.md) | -| 127 | [tf.train.AdagradOptimizer](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/train/AdagradOptimizer) | [fluid.optimizer.AdagradOptimizer](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.optimizer.AdagradOptimizer) | 功能一致 | -| 128 | [tf.train.AdamOptimizer](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/train/AdamOptimizer) | [fluid.optimizer.Adam](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.optimizer.Adam) | 功能一致 | -| 129 | [tf.train.exponential_decay](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/train/exponential_decay) | [fluid.layers.exponential_decay](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.exponential_decay) | 功能一致 | -| 130 | [tf.train.GradientDescentOptimizer](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/train/GradientDescentOptimizer) | [fluid.optimizer.SGDOptimizer](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/optimizer_cn.html#sgdoptimizer) | 功能一致 | -| 131 | [tf.train.MomentumOptimizer](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/train/MomentumOptimizer) | [fluid.optimizer.MomentumOptimizer](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/optimizer_cn.html#momentumoptimizer) | 功能一致 | -| 132 | [tf.train.polynomial_decay](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/train/polynomial_decay) | [fluid.layers.polynomial_decay](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.polynomial_decay) | 功能一致 | -| 133 | [tf.train.RMSPropOptimizer](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/train/RMSPropOptimizer) | [fluid.optimizer.RMSPropOptimizer](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.optimizer.RMSPropOptimizer) | 功能一致 | -| 134 | [tf.transpose](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/transpose) | [fluid.layers.transpose](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.transpose) | 功能一致 | -| 135 | [tf.truediv](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/truediv) | [fluid.layers.elementwise_div](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.elementwise_div) | 功能一致 | -| 136 | [tf.truncated_normal](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/random/truncated_normal) | [fluid.initializer.TruncatedNormal](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/initializer_cn.html#truncatednormal) | 功能一致 | -| 137 | [tf.truncated_normal_initializer](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/initializers/truncated_normal) | [fluid.initializer.TruncatedNormal](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.initializer.TruncatedNormal) | 功能一致 | -| 138 | [tf.unstack](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/unstack) | [fluid.layers.unstack](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.unstack) | 功能一致 | -| 139 | [tf.Variable](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/Variable) | [fluid.layers.create_parameter](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#create_parameter) | 功能一致 | -| 140 | [tf.while_loop](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/while_loop) | [fluid.layers.While](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#While) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.while_loop.md) | -| 141 | [tf.zeros](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/zeros) | [fluid.layers.zeros](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#zeros) | 功能一致 | -| 142 | [tf.zeros_initializer](https://www.tensorflow.org/versions/r1.14/api_docs/python/tf/zeros_initializer) | [fluid.initializer.Constant](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/initializer_cn.html#constant) | 功能一致 | diff --git a/tensorflow2fluid/doc/compare_op.md b/tensorflow2fluid/doc/compare_op.md deleted file mode 100644 index 57e9d9b..0000000 --- a/tensorflow2fluid/doc/compare_op.md +++ /dev/null @@ -1,11 +0,0 @@ -## 比较函数 - -在PaddlePaddle中使用运算符来对tensor之间进行`element-wise`方式的对比。其与TensorFlow相应接口关系如下表所示, - -| TensorFlow接口 | PaddlePaddle接口 | -|--------------------------|-------------------------------------------------| -|[tf.math.less_equal](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/less_equal)|运算符`<=`| -|[tf.math.greater](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/greater)|运算符`>`| -|[tf.math.greater_equal](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/greater_equal)|运算符`>=`| -|[tf.math.equal](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/equal)|运算符`==` 或 [paddle.fluid.layers.equal](http://paddlepaddle.org/documentation/docs/zh/1.3/api_cn/layers_cn.html#permalink-7-equal) | -|[tf.math.less](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/less)|运算符`<` 或 [paddle.fluid.layers.less_than](http://paddlepaddle.org/documentation/docs/zh/1.3/api_cn/layers_cn.html#permalink-11-less_than) | \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.case.md b/tensorflow2fluid/doc/tf.case.md deleted file mode 100644 index 92016b9..0000000 --- a/tensorflow2fluid/doc/tf.case.md +++ /dev/null @@ -1,55 +0,0 @@ -## tf.case - -### [tf.case](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/case) - -```python -tf.case( - pred_fn_pairs, - default=None, - exclusive=False, - strict=False, - name='case' -) -``` - -### [paddle.fluid.layers.While](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#while) -```python -class paddle.fluid.layers.Switch( - name=None -) -``` - -### 功能差异 - -#### 使用方式 -TensorFlow:用户采用定义`条件-函数对`的方式,创建一个`case`操作; - -PaddlePaddle:用户通过在`switch`代码块中,定义`case`分支方式,实现`switch`操作。与TensorFlow对比,在使用形式上更类似于传统的c/c++代码。 - - -### 代码示例 -``` -# 如下代码展示进行学习率的调度,当global_step超过某个数值时,学习率减小 - -# 定义学习率tensor -lr = fluid.layers.tensor.create_global_var( - shape=[1], - value=0.0, - dtype='float32', - persistable=True, - name="learning_rate") - -# 定义学习率常量 -lr_0 = tensor.fill_constant( - shape=[1], dtype='float32', value=1.0) -lr_1 = tensor.fill_constant( - shape=[1], dtype='float32', value=0.1) - -# 当global_step超过10000时,采用lr_1,否则采用lr_0 -with fluid.layers.control_flow.Switch() as switch: - with switch.case(global_step > 10000): - fluid.layers.tensor.assign(input=lr_1, output=lr) - with switch.default(): - fluid.layers.tensor.assign(input=lr_0, output=lr) - -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.clip_by_global_norm.md b/tensorflow2fluid/doc/tf.clip_by_global_norm.md deleted file mode 100644 index 172c604..0000000 --- a/tensorflow2fluid/doc/tf.clip_by_global_norm.md +++ /dev/null @@ -1,49 +0,0 @@ -## tf.clip_by_global_norm - -### [tf.clip_by_global_norm](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/clip_by_global_norm) - -```python -tf.clip_by_global_norm( - t_list, - clip_norm, - use_norm=None, - name=None -) -``` - -### [paddle.fluid.clip.GradientClipByGlobalNorm](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/clip_cn.html#gradientclipbyglobalnorm) - -```python -paddle.fluid.clip.GradientClipByGlobalNorm( - clip_norm, - group_name='default_group' -) -``` - -### 功能差异 - -#### 使用方式 - -TensorFlow:采用函数调用形式,输入需要执行global_norm裁剪的tensor,返回裁剪后的结果; - -PaddlePaddle:采用类对象定义形式,使用`set_gradient_clip`函数设置`GradientClipByGlobalNorm`对象为裁剪方式。 - -#### 其他 -TensorFlow:使用`use_norm`支持外部设置global_norm,若没有设置则从`t_list`计算得到; - -PaddlePaddle:不支持外部设置。 - -### 代码示例 -``` -# 获取待裁剪的tensor列表 -p_g_clip = fluid.backward.append_backward(loss=avg_cost_clip) - -with fluid.program_guard(main_program=prog_clip): - # 设置裁剪方式 - fluid.clip.set_gradient_clip( - fluid.clip.GradientClipByGlobalNorm(clip_norm=2.0)) - - # 执行裁剪并获取结果 - p_g_clip = fluid.clip.append_gradient_clip_ops(p_g_clip) - -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.clip_by_norm.md b/tensorflow2fluid/doc/tf.clip_by_norm.md deleted file mode 100644 index b7f2754..0000000 --- a/tensorflow2fluid/doc/tf.clip_by_norm.md +++ /dev/null @@ -1,27 +0,0 @@ -## tf.clip_by_norm - -### [tf.clip_by_norm](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/clip_by_norm) - -``` python -tf.clip_by_norm( - t, - clip_norm, - axes=None, - name=None -) -``` - - -### [paddle.fluid.layers.clip_by_norm](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.clip_by_norm) -``` python -paddle.fluid.layers.clip_by_norm( - x, - max_norm, - name=None -) -``` -### 功能差异 - -#### 计算方式 -TensorFlow: 使用参数`axis`指定的轴计算L2范数`l2-norm`,如若`axis`为None,则表示使用整个输入数据的L2范数; -PaddlePaddle:使用整个输入数据的L2范数。 \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.contrib.layers.flatten.md b/tensorflow2fluid/doc/tf.contrib.layers.flatten.md deleted file mode 100644 index 8327a67..0000000 --- a/tensorflow2fluid/doc/tf.contrib.layers.flatten.md +++ /dev/null @@ -1,37 +0,0 @@ -## tf.contrib.layers.flatten - -### [tf.contrib.layers.flatten](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/contrib/layers/flatten) - -```python -tf.contrib.layers.flatten( - inputs, - outputs_collections=None, - scope=None -) -``` - -### [paddle.fluid.layers.flatten](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#flatten) - -```python -paddle.fluid.layers.flatten( - x, - axis=1, - name=None -) -``` - -### 功能差异 - -#### 计算方式 - -TensorFlow:固定第0维,将其他维合并; - -PaddlePaddle:使用`axis`指定两次合并的维度边界,参考下面示例。 - -### 代码示例 -``` -# 张量x的shape为 [2, 3, 4, 5] -out = fluid.layers.flatten(x, axis=2) -out.shape # [2*3, 4*5] - -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.expand_dims.md b/tensorflow2fluid/doc/tf.expand_dims.md deleted file mode 100644 index 1b284d3..0000000 --- a/tensorflow2fluid/doc/tf.expand_dims.md +++ /dev/null @@ -1,41 +0,0 @@ -## tf.expand_dims - -### [tf.expand_dims](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/expand_dims) -``` python -tf.expand_dims( - input, - axis=None, - name=None, - dim=None -) -``` - -### [paddle.fluid.layers.unsqueeze](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#unsqueeze) -``` python -paddle.fluid.layers.unsqueeze( - input, - axes, - name=None) -``` - -### 功能差异 - -#### 参数类型 -TensorFlow:`axis`为`int`类型或`0-D`tensor, 使用`axis`指定要增加维度的位置,支持负数进行索引; - -PaddlePaddle:`axes`为`list`类型,表示要增加维度的位置列表,支持在多个位置同时增加维度,也支持负数进行索引。 - - -### 代码示例 -```python -# 输入 tensor t 的 shape 为[3, 4] - -# 输出 tensor out 的 shape 为[1, 3, 4] -out = fluid.layers.unsqueeze(t, [0]) - -# 输出 tensor out 的 shape 为[3, 4, 1] -out = fluid.layers.unsqueeze(t, [-1]) - -# 输出 tensor out 的 shape 为[1, 1,3, 4] -out = fluid.layers.unsqueeze(t, [0, 1]) -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.image.non_max_suppression.md b/tensorflow2fluid/doc/tf.image.non_max_suppression.md deleted file mode 100644 index ec3b328..0000000 --- a/tensorflow2fluid/doc/tf.image.non_max_suppression.md +++ /dev/null @@ -1,56 +0,0 @@ -## tf.image.non_max_suppression - -### [tf.image.non_max_suppression](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/image/non_max_suppression) -``` python -tf.image.non_max_suppression( - boxes, - scores, - max_output_size, - iou_threshold=0.5, - score_threshold=float('-inf'), - name=None -) -``` - -### [paddle.fluid.layers.multiclass_nms](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.multiclass_nms) -``` python -paddle.fluid.layers.multiclass_nms( - bboxes, - scores, - score_threshold, - nms_top_k, - keep_top_k, - nms_threshold=0.3, - normalized=True, - nms_eta=1.0, - background_label=0, - name=None) -``` - -### 功能差异 -#### 输入格式 -TensorFlow:`boxes`的shape为`[num_boxes, 4]`, `scores`的shape为`[num_boxes]`; -PaddlePaddle:相对比Tensorflow,还支持batch和多类别,`bboxes`的shape为`[batch, num_boxes, 4]`, `scores`的shape为`[batch, num_classes, num_boxes]`。 - -#### 输出格式 -TensorFlow: 返回shape为`[N]`的tensor,表示为`boxes`中选取的index集合,长度为`N`; -PaddlePaddle: 返回`[N, 6]`的[LodTensor](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/fluid_cn.html#lodtensor),其中每行内容为`[lable, confidence, xmin, ymin, xmax, ymax]`。 - -#### 参数差异 -TensorFlow: 在所有boxes中,根据其它参数条件,最终选出的boxes数量不超过`max_output_size`; -PaddlePaddle: 在`nms_top_k`个boxes中,根据其它参数条件,最终选出的boxes数量不超过`keep_top_k`。 - -### 代码示例 -```python -clip_boxes = fluid.layers.data(dtype='float32', shape=[5000, 4], name='boxes') -scores = fluid.layers.data(dtype='float32', shape=[1, 5000], name='scores') - -# nms_top_k=-1,表示在输入的所有boxes中选取 -selected_boxes = fluid.layers.multiclass_nms( - clip_boxes, - scores, - scrore_threshold=0.5, - nms_top_k=-1, - keep_top_k=300, - nms_threshold=0.7) -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.image.resize_images.md b/tensorflow2fluid/doc/tf.image.resize_images.md deleted file mode 100644 index 8e95655..0000000 --- a/tensorflow2fluid/doc/tf.image.resize_images.md +++ /dev/null @@ -1,40 +0,0 @@ -## tf.image.resize_images - -### [tf.image.resize_images](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/image/resize_images) -``` python -tf.image.resize_images( - images, - size, - method=ResizeMethod.BILINEAR, - align_corners=False, - preserve_aspect_ratio=False -) -``` - -### [paddle.fluid.layers.image_resize](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.image_resize) -``` python -paddle.fluid.layers.image_resize( - input, - out_shape=None, - scale=None, - name=None, - resample='BILINEAR', - actual_shape=None, - align_corners=True, - align_mode=1 -) -``` - -### 功能差异 -#### 参数种类 -TensorFlow:支持`BILINEAR`,`NEAREST`,`BICUBIC`, `AREA`四种方式; -PaddlePaddle:支持`BILINEAR`和`NEAREST`两种方式, `align_mode`是`BILINEAR`的可选项,当为1的时候,与TensorFlow功能一致。 - -### 代码示例 -```python -# 输入图像数据shape为[None, 3, 300, 300] -inputs = fluid.layers.data(dtype='float32', shape=[3, 300, 300], name='inputs') - -# 输出shape为[3, 400, 500] -outputs = fluid.layers.image_reisze(inputs, [400, 500]) -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.layers.conv2d.md b/tensorflow2fluid/doc/tf.layers.conv2d.md deleted file mode 100644 index e46edb9..0000000 --- a/tensorflow2fluid/doc/tf.layers.conv2d.md +++ /dev/null @@ -1,87 +0,0 @@ -## tf.layers.conv2d - -### [tf.layers.conv2d](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/layers/conv2d) -``` python -tf.layers.conv2d( - inputs, - filters, - kernel_size, - strides=(1, 1), - padding='valid', - data_format='channels_last', - dilation_rate=(1, 1), - activation=None, - use_bias=True, - kernel_initializer=None, - bias_initializer=tf.zeros_initializer(), - kernel_regularizer=None, - bias_regularizer=None, - activity_regularizer=None, - kernel_constraint=None, - bias_constraint=None, - trainable=True, - name=None, - reuse=None -) -``` - -### [paddle.fluid.layers.conv2d](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.conv2d) -``` python -paddle.fluid.layers.conv2d( - input, - num_filters, - filter_size, - stride=1, - padding=0, - dilation=1, - groups=None, - param_attr=None, - bias_attr=None, - use_cudnn=True, - act=None, - name=None) -``` - -### 功能差异 - -#### 数据格式 - -TensorFlow: 默认输入数据格式为`NHWC`,表示`(batch,height, width, in_channels)`, 同时也将`data_format`参数设为`channels_first`,支持`NCHW`格式的数据输入。其中输入、输出、卷积核对应关系如下表所示, - -| 输入 | 卷积核 | 输出 | -|--------------------|-------------------|------------------| -|NHWC | (kernel_h, kernel_w, filters_num, in_channels)| (batch, out_h, out_w, filters_num)| -|NDHW | (kernel_h, kernel_w, filters_num, in_channels) | (batch, filters_num, out_h, out_w)| - -PaddlePaddle:只支持输入数据格式为`NCHW`,且**卷积核格式**与TensorFlow不同,其中输入、输出、卷积核对应关系如下表所示, - -| 输入 | 卷积核 | 输出 | -|--------------------|-------------------|------------------| -|NCHW | (in_channels, filters_num, kernel_h, kernel_w) | (batch, filters_num, out_h, out_w)| - -#### Padding机制 -TensorFlow: `SAME`和`VALID`两种选项。当为`SAME`时,padding的计算方式如下所示, -```python -# 计算在width上的padding size -# height上的padding计算方式同理 -ceil_size = ceil(input_width / stride_width) -pad_size = (ceil_size - 1) * stride_width + filter_width - input_width -pad_left = ceil(pad_size / 2) -pad_right = pad_size - pad_left -``` -PaddlePaddle:`padding`参数表示在输入图像四周padding的size大小。 - -#### 参数差异 -TensorFlow:深度可分离卷积使用[tf.layers.separable_conv2d](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/layers/separable_conv2d)接口; -PaddlePaddle: 使用`paddle.fluid.layers.conv2d`,可参考 -[PaddlePaddle对卷积的说明文档](http://paddlepaddle.org/documentation/docs/zh/1.4/api_guides/low_level/layers/conv.html), 同时也可参考[tf.nn.separable_conv2d](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.nn.separable_conv2d.md)中的代码示例。 - -### 代码示例 -```python -# 结合pad2d,实现SAME方式的padding -# 输入Shape:(None, 3, 200, 200) -# 输出Shape:(None, 5, 67, 67) -# 卷积核Shape: (5, 3, 4, 4) -inputs = paddle.fluid.layers.data(dtype='float32', shape=[3, 200, 200], name='inputs) -pad_inputs = paddle.fluid.layers.pad2d(inputs, paddings=[1, 2, 1, 2]) -outputs = paddle.fluid.layers.conv2d(pad_inputs, 5, [4, 4], (1, 1)) \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.layers.dense.md b/tensorflow2fluid/doc/tf.layers.dense.md deleted file mode 100644 index 02d0c49..0000000 --- a/tensorflow2fluid/doc/tf.layers.dense.md +++ /dev/null @@ -1,65 +0,0 @@ -## tf.layers.dense - -### [tf.layers.dense](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/layers/dense) -``` python -tf.layers.dense( - inputs, - units, - activation=None, - use_bias=True, - kernel_initializer=None, - bias_initializer=tf.zeros_initializer(), - kernel_regularizer=None, - bias_regularizer=None, - activity_regularizer=None, - kernel_constraint=None, - bias_constraint=None, - trainable=True, - name=None, - reuse=None -) -``` - -### [paddle.fluid.layers.fc](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#fc) -``` python -paddle.fluid.layers.fc( - input, - size, - num_flatten_dims=1, - param_attr=None, - bias_attr=None, - act=None, - is_test=False, - name=None -) - -``` - -### 功能差异 -#### 输入类型 -TensorFlow:`inputs`为一个tensor; -PaddlePaddle:允许`input`是一个tensor或者是一个tensor 列表,如果是tensor列表的情况,该layer会声明多个kernel,个数与列表长度相同,在将列表中各个tensor与对应kernel做矩阵乘法之后,将各个结果相加。 - -#### kernel、bias初始化 -TensorFlow:通过`kernel_initializer`与`bias_initializer`对`kernel`、`bias`进行初始化; -PaddlePaddle:通过设置`param_attr`,`bias_attr`为某种Attribute的方式,进行`kernel`、`bias`初始化。 - -#### 高维tensor处理 -TensorFlow:对于rank大于2的输入tensor,将其看做是最内两个维度所组成矩阵的堆叠,dense操作将改变最后一个维度; -PaddlePaddle:对于rank大于2的输入tensor,可以从第`num_flatten_dims`维开始(维度下标从0开始,`num_flatten_dims`最小为1),将各维度拍平,例如`shape`为(2,3,4,5),当`num_flatten_dims`为2时,输入tensor将被reshape成(2,3,20)的tensor,输出tensor的shape为(2,3,size)。 - -### 代码示例 -```python -# 输入 tensor t 的shape为[2, 3, 4, 5] - -# size=6, 输出tensor 的shape为[2,6] -out = fluid.layers.fc(t, size=6) - -# size=6, 设置kernel为均匀分布 -out = fluid.layers.fc(t, size=6, \ - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(low=-0.5, high=0.5))) - -# size=6, num_flatten_dims=2,输出tensor的shape为[2, 3, 6] -out = fluid.layers.fc(t, size=6, num_flatten_dims=2) - -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.losses.mean_and_squared_error.md b/tensorflow2fluid/doc/tf.losses.mean_and_squared_error.md deleted file mode 100644 index 56b9701..0000000 --- a/tensorflow2fluid/doc/tf.losses.mean_and_squared_error.md +++ /dev/null @@ -1,28 +0,0 @@ -## tf.losses.mean_and_squared_error - -### [tf.losses.mean_and_squared_error](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/losses/mean_squared_error) - -``` python -tf.losses.mean_squared_error( - labels, - predictions, - weights=1.0, - scope=None, - loss_collection=tf.GraphKeys.LOSSES, - reduction=Reduction.SUM_BY_NONZERO_WEIGHTS -) -``` - - -### [paddle.fluid.layers.square_error_cost](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.square_error_cost) -``` python -paddle.fluid.layers.square_error_cost( - input, - label -) -``` -### 功能差异 - -#### 计算方式 -TensorFlow: 提供`weights`参数,通过传入`weights`参数的shape,可实现不同的加权方式; -PaddlePaddle:不支持加权。 \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.losses.sigmoid_cross_entropy.md b/tensorflow2fluid/doc/tf.losses.sigmoid_cross_entropy.md deleted file mode 100644 index b6a9bb8..0000000 --- a/tensorflow2fluid/doc/tf.losses.sigmoid_cross_entropy.md +++ /dev/null @@ -1,56 +0,0 @@ -## tf.losses.sigmoid_cross_entropy - -### [tf.losses.sigmoid_cross_entropy](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/losses/sigmoid_cross_entropy) - -```python -tf.losses.sigmoid_cross_entropy( - multi_class_labels, - logits, - weights=1.0, - label_smoothing=0, - scope=None, - loss_collection=tf.GraphKeys.LOSSES, - reduction=Reduction.SUM_BY_NONZERO_WEIGHTS -) -``` - -### [paddle.fluid.layers.sigmoid_cross_entropy_with_logit](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#sigmoid_cross_entropy_with_logits) - -```python -paddle.fluid.layers.sigmoid_cross_entropy_with_logits( - x, - label, - ignore_index=-100, - name=None, - normalize=False) -``` - -### 功能差异 - -#### 返回值类型 - -Tensorflow:通过控制`reduction`参数,返回结果可以是rank为0的tensor,也可以是shape与`logits`相同的tensor; -PaddlePaddle:固定返回shape与`x`相同的tensor,表示每个样本在每个标签上的损失。 - -#### 调权与平滑 - -Tensorflow:通过`weights`,可以设置不同样本、不同label的权重;通过`label_smoothing`,可以控制对label进行平滑; -PaddlePaddle:不支持调权与平滑功能。 - -#### 忽略标签 -Tensorflow:不支持; -PaddlePaddle:通过设置`ignore_index`可以指定被忽略的标签,不影响梯度。 - -#### 归一化 -Tensorflow:不支持; -PaddlePaddle:通过设置`normalize`,各样本损失函数会除以除去`ignore_index`外的样本数。 - -### 代码示例 -``` -# x与label均是shape为[3,5]的tensor,表示三个样本,每个样本有5个类别 - -# out是shape为[3,5]的tensor,表示每个样本在每个类别上的loss -out = fluid.layers.sigmoid_cross_entropy_with_logits(x, label) - - -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.math.is_finite.md b/tensorflow2fluid/doc/tf.math.is_finite.md deleted file mode 100644 index e299bed..0000000 --- a/tensorflow2fluid/doc/tf.math.is_finite.md +++ /dev/null @@ -1,33 +0,0 @@ -## tf.math.is_finite - -### [tf.math.is_finite](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/is_finite) -``` python -tf.math.is_finite( - x, - name=None -) -``` - -### [paddle.fluid.layers.isfinite](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.isfinite) -``` python -paddle.fluid.layers.isfinite(x) -``` - -### 功能差异 - -#### 输出格式 -TensorFlow: 返回elementwise检查的结果,即输出与输入shape一致; -PaddlePaddle: 返回结果仅包含一个boolean值,若输入数据中均为`infinite`,则返回True,否则返回False。 - -### 代码示例 -```python -# TensorFlow示例 -# 输入[2.1, 3.2, 4.5] -# 输出[True, True, True] -result = tf.is_finite(inputs) - -# PaddlePaddle示例 -# 输入[2.1, 3.2, 4.5] -# 输出True -result = fluid.layers.isfinite(inputs) -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.math.rsqrt.md b/tensorflow2fluid/doc/tf.math.rsqrt.md deleted file mode 100644 index 193cb07..0000000 --- a/tensorflow2fluid/doc/tf.math.rsqrt.md +++ /dev/null @@ -1,26 +0,0 @@ -## tf.math.rsqrt - -### [tf.math.rsqrt](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/rsqrt) -``` python -tf.math.rsqrt( - x, - name=None -) -``` - -### PaddlePaddle实现 -PaddlePaddle中目前无对应接口,可使用如下代码实现 -``` python -def rsqrt(x): - net_0 = fluid.layers.sqrt(x) - net_1 = fluid.layers.pow(net_0, factor=-1.0) - return net_1 -``` - -### 代码示例 -``` python -inputs = fluid.layers.data(dtype='float32', shape=[1000], name='inputs') - -# 调用上述自定义函数 -result = rsqrt(inputs) -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.matmul.md b/tensorflow2fluid/doc/tf.matmul.md deleted file mode 100644 index 3e5247d..0000000 --- a/tensorflow2fluid/doc/tf.matmul.md +++ /dev/null @@ -1,62 +0,0 @@ -## tf.matmul - -### [tf.matmul](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/linalg/matmul) -``` python -tf.matmul( - a, - b, - transpose_a=False, - transpose_b=False, - adjoint_a=False, - adjoint_b=False, - a_is_sparse=False, - b_is_sparse=False, - name=None -) -``` - -### [paddle.fluid.layers.matmul](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#matmul) -``` python -paddle.fluid.layers.matmul( - x, - y, - transpose_x=False, - transpose_y=False, - alpha=1.0, - name=None -) -``` - -### 功能差异 -#### 输入格式 -TensorFlow:要求op的两个操作数具有相同的rank; -PaddlePaddle:允许两者具有不同的rank,具体说就是当任一操作数的rank大于2时,将其看做最里面两维度矩阵的堆叠,paddlepaddle将进行broadcast操作。 - -#### 其他 -TensorFlow:使用`adjoint`参数可以实现快速的共轭操作;paddlepaddle中并不支持; -PaddlePaddle:额外支持对输出进行数乘操作。 - - -### 代码示例 -```python -# x: [M, K], y: [K, N] -fluid.layers.matmul(x, y) # out: [M, N] - -# x: [B, ..., M, K], y: [B, ..., K, N] -fluid.layers.matmul(x, y) # out: [B, ..., M, N] - -# x: [B, M, K], y: [B, K, N] -fluid.layers.matmul(x, y) # out: [B, M, N] - -# x: [B, M, K], y: [K, N] -fluid.layers.matmul(x, y) # out: [B, M, N] - -# x: [B, M, K], y: [K] -fluid.layers.matmul(x, y) # out: [B, M] - -# x: [K], y: [K] -fluid.layers.matmul(x, y) # out: [1] - -# x: [M], y: [N] -fluid.layers.matmul(x, y, True, True) # out: [M, N] -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.nn.avg_pool.md b/tensorflow2fluid/doc/tf.nn.avg_pool.md deleted file mode 100644 index b92056c..0000000 --- a/tensorflow2fluid/doc/tf.nn.avg_pool.md +++ /dev/null @@ -1,59 +0,0 @@ -## tf.nn.avg_pool - -### [tf.nn.avg_pool](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/avg_pool) - -``` python -tf.nn.avg_pool( - value, - ksize, - strides, - padding, - data_format='NHWC', - name=None -) -``` - - -### [paddle.fluid.layers.pool2d](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.pool2d) -``` python -paddle.fluid.layers.pool2d( - input, - pool_size=-1, - pool_type='max', - pool_stride=1, - pool_padding=0, - global_pooling=False, - use_cudnn=True, - ceil_mode=False, - name=None, - exclusive=True) -``` -### 功能差异 - -#### 输入格式 -TensorFlow: 默认为`NHWC`的数据输入格式,同时也可通过修改`data_format`参数,支持`NCHW`的输入; -PaddlePaddle:只支持`NCHW`的数据输入格式。 - -#### Padding机制 - -Tensorflow: 存在`SAME`和`VALID`两种padding方式。当为`SAME`时,padding的size计算方式如下伪代码所示,需要注意的是,当计算得到的`pad_size`为奇 -数时,右侧与下方相对比左侧和上方会多1个size; -``` python -# 计算在width上的padding size -# height上的padding计算方式同理 -ceil_size = ceil(input_width / stride_width) -pad_size = (ceil_size - 1) * stride_width + filter_width - input_width -pad_left = ceil(pad_size / 2) -pad_right = pad_size - pad_left -``` -PaddlePaddle:在输入的上、下、左、右分别padding,size大小为`pool_padding`。 - -### 代码示例 -``` -inputs = fluid.layers.data(dtype='float32', shape=[3, 300, 300], name='inputs') - -# 计算得到输入的长、宽对应padding size为1 -# 当Tensorflow中padding为SAME时,可能会两侧padding的size不同,可调用pad2d对齐 -pad_res = fluid.layers.pad2d(inputs, paddings=[0, 1, 0, 1]) -conv_res = fluid.layers.pool2d(pad_res, pool_size=3, pool_type='avg', padding=[1, 1], pool_stride=2) -``` diff --git a/tensorflow2fluid/doc/tf.nn.bidirectional_dynamic_rnn.md b/tensorflow2fluid/doc/tf.nn.bidirectional_dynamic_rnn.md deleted file mode 100644 index 8100a4a..0000000 --- a/tensorflow2fluid/doc/tf.nn.bidirectional_dynamic_rnn.md +++ /dev/null @@ -1,74 +0,0 @@ -## tf.nn.bidirectional_dynamic_rnn - - -### [tf.nn.bidirectional_dynamic_rnn](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/bidirectional_dynamic_rnn) - -```python -tf.nn.bidirectional_dynamic_rnn( - cell_fw, - cell_bw, - inputs, - sequence_length=None, - initial_state_fw=None, - initial_state_bw=None, - dtype=None, - parallel_iterations=None, - swap_memory=False, - time_major=False, - scope=None -) -``` - -### 功能差异 - -#### 使用方式 -TensorFlow:用户通过定义正向与反向`cell`,可以实现一个双向RNN网络的功能; - -PaddlePaddle:并没有提供一个对应的接口,用户可以使用`DynamicRNN`组合实现得到,详见如下代码示例。 - -### 代码示例 -``` -# 如下代码片段实现双向lstm网络,lstm单元数为16 - -num_unit_0 = 16 - -# 定义LoD输入 -data = fluid.layers.data(name='input', shape=[1], dtype='int64', lod_level=1) - -# 获得正向与反向embedding -embedding = fluid.layers.embedding(input=data, size=[emb_vocab, emb_size], - is_sparse=False) -rev_embedding = fluid.layers.sequence_reverse(embedding) - -# 定义lstm网络 -def rnn(in_tensor): - drnn = fluid.layers.DynamicRNN() - with drnn.block(): - word = drnn.step_input(in_tensor) - - prev_hid0 = drnn.memory(shape=[num_unit_0]) - prev_cell0 = drnn.memory(shape=[num_unit_0]) - - cur_hid0, cur_cell0 = layers.lstm_unit(word, prev_hid0, prev_cell0) - - drnn.update_memory(prev_hid0, cur_hid0) - drnn.update_memory(prev_cell0, cur_cell0) - - drnn.output(cur_hid0) - - out = drnn() - return out - -# 计算正向lstm网络的输出 -out = rnn(embedding) - -# 计算反向lstm网络的输出 -rev_out = rnn(rev_embedding) - -# 再次反转使得rev_out每个时刻所处理的数据与out对应 -rev_rev_out = fluid.layers.sequence_reverse(rev_out) - -# 合并得到最后的输出,其shape为(-1, 32) -concat_out = layers.concat([out, rev_rev_out], axis=1) - -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.nn.conv2d.md b/tensorflow2fluid/doc/tf.nn.conv2d.md deleted file mode 100644 index e01a61f..0000000 --- a/tensorflow2fluid/doc/tf.nn.conv2d.md +++ /dev/null @@ -1,51 +0,0 @@ -## tf.nn.conv2d - -### [tf.nn.conv2d](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/conv2d) - -```python -tf.nn.conv2d( - input, - filter, - strides, - padding, - use_cudnn_on_gpu=True, - data_format='NHWC', - dilations=[1, 1, 1, 1], - name=None -) -``` - -### [paddle.fluid.layers.conv2d](http://www.paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.conv2d) - -```python -paddle.fluid.layers.conv2d( - input, - num_filters, - filter_size, - stride=1, - padding=0, - dilation=1, - groups=None, - param_attr=None, - bias_attr=None, - use_cudnn=True, - act=None, - name=None -) -``` - -### 功能差异 - -`tf.nn.conv2d`中的参数`filter`为具体的tensor,而`paddle.fluid.layers.conv2d`参数中则声明卷积核的`size`,函数内部创建卷积核tensor。也可通过如下代码示例,自行创建并复用卷积核 -需要注意的是PaddlePaddle中的输入、输出以及卷积核的格式与tensorflow存在部分差异,可参考[tf.layers.conv2d](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.layers.conv2d.md) - -### 代码示例 -```python -# 输入为NCHW格式 -inputs = fluid.layers.data(dtype='float32', shape=[-1, 3, 300, 300], name='inputs') -create_kernel = fluid.layers.create_parameters(shape=[5, 3, 2, 2], dtype='float32', name='kernel') - -# PaddlePaddle中可通过相同的参数命名引用同一个参数变量 -# 通过指定卷积核参数名(param_attr)为'kernel',引用了create_kernel -result = fluid.layers.conv2d(inputs, 5, [2, 2], param_attr='kernel') -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.nn.conv2d_transpose.md b/tensorflow2fluid/doc/tf.nn.conv2d_transpose.md deleted file mode 100644 index f47931c..0000000 --- a/tensorflow2fluid/doc/tf.nn.conv2d_transpose.md +++ /dev/null @@ -1,95 +0,0 @@ -## tf.nn.conv2d_transpose - -### [tf.nn.conv2d_transpose](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/conv2d_transpose) -``` python -tf.nn.conv2d_transpose( - value, - filter, - output_shape, - strides, - padding='SAME', - data_format='NHWC', - name=None -) -``` - -### [paddle.fluid.layers.conv2d_transpose](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.conv2d_transpose) -``` python -paddle.fluid.layers.conv2d_transpose( - input, - num_filters, - output_size=None, - filter_size=None, - padding=0, - stride=1, - dilation=1, - groups=None, - param_attr=None, - bias_attr=None, - use_cudnn=True, - act=None, - name=None -) -``` - -### 功能差异 - -#### 数据格式 - -TensorFlow: 默认输入数据格式为`NHWC`,表示`(batch,height, width, in_channels)`, 同时也将`data_format`参数设为`channels_first`,支持`NCHW`格式的数据输入。其中输入、输出、卷积核对应关系如下表所示, - -| 输入 | 卷积核 | 输出 | -|--------------------|-------------------|------------------| -|NHWC | (kernel_h, kernel_w, filters_num, in_channels)| (batch, out_h, out_w, filters_num)| -|NDHW | (kernel_h, kernel_w, filters_num, in_channels) | (batch, filters_num, out_h, out_w)| - -PaddlePaddle:只支持输入数据格式为`NCHW`,且**卷积核格式**与TensorFlow不同,其中输入、输出、卷积核对应关系如下表所示, - -| 输入 | 卷积核 | 输出 | -|--------------------|-------------------|------------------| -|NCHW | (in_channels, filters_num, kernel_h, kernel_w) | (batch, filters_num, out_h, out_w)| - -#### Padding机制 -TensorFlow: `SAME`和`VALID`两种选项。当为`SAME`时,padding的计算方式如下所示 -```python -# 计算在width上的padding size -# height上的padding计算方式同理 -ceil_size = ceil(input_width / stride_width) -pad_size = (ceil_size - 1) * stride_width + filter_width - input_width -pad_left = ceil(pad_size / 2) -pad_right = pad_size - pad_left -``` -PaddlePaddle:`padding`参数表示在输入图像四周padding的size大小 - -#### 输出大小 -TensorFlow:当padding为`SAME`和`VALID`两种情况下,输出大小计算方式如下所示 -```python -if padding == 'SAME': - output_size = input_size * stride -elif padding == 'VALID': - output_size = input_size * stride + max(kernel_size - stride, 0) -``` -PaddlePaddle: 输出大小计算公式如下,差异主要由于TensorFlow在`conv2d_transpose`的最后还存在**裁剪**步骤,因此可参考示例代码,调用`crop`解决 -```python -output_size = (input_size - 1) * stride - 2 * padding + dilation * (kernel - 1) + 1 -``` - -### 代码示例 -```python -# TensorFlow使用conv2d_transpose -# 输入shape: [-1, 20, 20, 3] -inputs = tf.placeholder(dtype=tf.float32, shape=[None, 20, 20, 3]) -filter = tf.random_uniform(shape=[5, 5, 3, 3], 0.0, 1.0) -batch = tf.shape(inputs)[0] -# conv2d_transpose输出shape: [-1, 40, 40, 3] -result = tf.nn.conv2d_transpose(inputs, filter, output_shape=[batch, 40, 40, 3], - strides=[1, 2, 2, 1], padding='SAME') - -#PaddlePaddle中使用conv2d_transpose -# 输入Shape:(None, 3, 20, 20) -inputs = fluid.layers.data(dtype='float32', shape=[3, 20, 20], name='inputs) -# conv2d_transpose输出shape:[-1, 3, 41, 41] -outputs = fluid.layers.conv2d_transpose(pad_inputs, 3, filter_size=[5, 5], - padding=[1, 1], stride=[2, 2], bias_attr=False) -# 裁剪后结果即为与TensorFlow一致 -outputs = fluid.layers.crop(outputs, shape=[-1, 3, 40, 40]) \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.nn.conv3d_transpose.md b/tensorflow2fluid/doc/tf.nn.conv3d_transpose.md deleted file mode 100644 index 4470806..0000000 --- a/tensorflow2fluid/doc/tf.nn.conv3d_transpose.md +++ /dev/null @@ -1,95 +0,0 @@ -## tf.nn.conv3d_transpose - -### [tf.nn.conv3d_transpose](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/conv3d_transpose) -``` python -tf.nn.conv3d_transpose( - value, - filter, - output_shape, - strides, - padding='SAME', - data_format='NDHWC', - name=None -) -``` - -### [paddle.fluid.layers.conv3d_transpose](http://www.paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.conv3d_transpose) -``` python -paddle.fluid.layers.conv3d_transpose( - input, - num_filters, - output_size=None, - filter_size=None, - padding=0, - stride=1, - dilation=1, - groups=None, - param_attr=None, - bias_attr=None, - use_cudnn=True, - act=None, - name=None -) -``` - -### 功能差异 - -#### 数据格式 - -TensorFlow: 默认输入数据格式为`NDHWC`,表示`(batch,depth, height, width, in_channels)`, 同时也将`data_format`参数设为`channels_first`,支持`NCDHW`格式的数据输入。其中输入、输出、卷积核对应关系如下表所示, - -| 输入 | 卷积核 | 输出 | -|--------------------|-------------------|------------------| -|NDHWC | (kernel_d, kernel_h, kernel_w, filters_num, in_channels)| (batch, out_d, out_h, out_w, filters_num)| -|NCDHW | (kernel_d, kernel_h, kernel_w, filters_num, in_channels) | (batch, filters_num, out_d, out_h, out_w)| - -PaddlePaddle: 只支持输入数据格式为`NCDHW`,且**卷积核格式**与TensorFlow不同,其中输入、输出、卷积核对应关系如下表所示, - -| 输入 | 卷积核 | 输出 | -|--------------------|-------------------|------------------| -|NCDHW | (in_channels, filters_num, kernel_d, kernel_h, kernel_w) | (batch, filters_num, out_d, out_h, out_w)| - -#### Padding机制 -TensorFlow: `SAME`和`VALID`两种选项。当为`SAME`时,padding的计算方式如下所示 -```python -# 计算在width上的padding size -# height上的padding计算方式同理 -ceil_size = ceil(input_width / stride_width) -pad_size = (ceil_size - 1) * stride_width + filter_width - input_width -pad_left = ceil(pad_size / 2) -pad_right = pad_size - pad_left -``` -PaddlePaddle:`padding`参数表示在输入图像四周padding的size大小 - -#### 输出大小 -TensorFlow:当padding为`SAME`和`VALID`两种情况下,输出大小计算方式如下所示 -```python -if padding == 'SAME': - output_size = input_size * stride -elif padding == 'VALID': - output_size = input_size * stride + max(kernel_size - stride, 0) -``` -PaddlePaddle: 输出大小计算公式如下,差异主要由于TensorFlow在`conv2d_transpose`的最后还存在**裁剪**步骤,因此可参考示例代码,调用`crop`解决 -```python -output_size = (input_size - 1) * stride - 2 * padding + dilation * (kernel - 1) + 1 -``` - -### 代码示例 -```python -# TensorFlow使用conv3d_transpose -# 输入shape: [-1, 5, 20, 40, 3] -inputs = tf.placeholder(dtype=tf.float32, shape=[None, 5, 20, 40, 3]) -filter = tf.random_uniform(shape=[2, 4, 5, 7, 3], 0.0, 1.0) -batch = tf.shape(inputs)[0] -# conv2d_transpose输出shape: [-1, 5, 40, 80, 7] -result = tf.nn.conv2d_transpose(inputs, filter, output_shape=[batch, 5, 40, 80, 7], - strides=(1, 2, 2), padding='SAME') - -#PaddlePaddle中使用conv3d_transpose -# 输入Shape:(None, 3, 5, 20, 40) -inputs = fluid.layers.data(dtype='float32', shape=[3, 5, 20, 40], name='inputs) -# conv3d_transpose输出shape:[-1, 7, 6, 40, 81] -outputs = fluid.layers.conv3d(inputs, 7, filter_size=(2, 4, 5), stride=(1, 2, 2), - padding=(0, 1, 1), bias_attr=False) -# 裁剪后结果即为与TensorFlow一致 -outputs = fluid.layers.crop(outputs, shape=[-1, 7, 5, 40, 80]) \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.nn.depthwise_conv2d.md b/tensorflow2fluid/doc/tf.nn.depthwise_conv2d.md deleted file mode 100644 index e6c1c1f..0000000 --- a/tensorflow2fluid/doc/tf.nn.depthwise_conv2d.md +++ /dev/null @@ -1,87 +0,0 @@ -## tf.nn.depthwise_conv2d - -### [tf.nn.depthwise_conv2d](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/depthwise_conv2d) - -```python -tf.nn.depthwise_conv2d( - input, - filter, - strides, - padding, - rate=None, - name=None, - data_format=None -) -``` - -### [paddle.fluid.layers.conv2d](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.conv2d) - -```python -paddle.fluid.layers.conv2d( - input, - num_filters, - filter_size, - stride=1, - padding=0, - dilation=1, - groups=None, - param_attr=None, - bias_attr=None, - use_cudnn=True, - act=None, - name=None -) -``` - - -### 功能差异 - -#### 数据格式 - -TensorFlow:默认输入数据格式为`NHWC`,表示`(batch, height, width, in_channels)`, 同时也将`data_format`参数设为`channels_first`,支持`NCHW`格式的数据输入。其中输入、输出、卷积核对应关系如下表所示, - -| 输入 | 卷积核 | 输出 | -|--------------------|-------------------|------------------| -|NHWC | (kernel_h, kernel_w, in_channels, channel_multiplier)| (batch, out_h, out_w, in_channel*channel_multiplier)| -|NCHW | (kernel_h, kernel_w, in_channels, channel_multiplier) | (batch, in_channel*channel_multiplier, out_h, out_w)| - -PaddlePaddle: 只支持输入数据格式为`NCHW`,且**卷积核格式**与TensorFlow不同,其中输入、输出、卷积核对应关系如下表所示,可以看到,需要设置`num_filters`参数与`in_channels`一致 - -| 输入 | 卷积核 | 输出 | -|--------------------|-------------------|------------------| -|NCHW | (num_filters, in_channels/groups, kernel_h, kernel_w) | (batch, num_filters, out_h, out_w)| - -#### Padding机制 -TensorFlow: `SAME`和`VALID`两种选项。当为`SAME`时,padding的计算方式如下所示 -```python -# 计算在width上的padding size -# height上的padding计算方式同理 -ceil_size = ceil(input_width / stride_width) -pad_size = (ceil_size - 1) * stride_width + filter_width - input_width -pad_left = ceil(pad_size / 2) -pad_right = pad_size - pad_left -``` -PaddlePaddle:`padding`参数表示在输入图像四周padding的size大小 - -#### 参数差异 -Tensorflow:普通2维卷积使用`tf.layers.conv2d` -PaddlePaddle:仍使用本接口,可参考在文档[tf.layers.conv2d](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.layers.conv2d.md)中 - -### 代码示例 - -```python -# TensorFlow中使用depthwise_conv2d -# 输入shape: [-1, 20, 20, 3] -inputs = tf.placeholder(dtype=tf.float32, shape=[None, 20, 20, 3]) -filter = tf.random_uniform(shape=[4, 4, 3, 1], 0.0, 1.0) -# 输出shape: [-1, 20, 20, 3] -result = tf.nn.depthwise_conv2d(inputs, filter, strides=[1, 1, 1, 1], padding='SAME') - -# PaddlePaddle中使用conv2d实现depthwise_conv2d -# 输入shape: [-1, 3, 20, 20] -inputs = fluid.layers.data(dtype='float32', shape=[3, 20, 20], name='inputs') -# 使用pad2d对齐TensorFlow的padding参数:SAME -inputs = fluid.layers.pad2d(inputs, paddings=[1, 2, 1, 2]) -#输出shape:[-1, 3, 20, 20] -result = fluid.layers.conv2d(inputs, 3, filter_size=[4, 4], groups=3, bias_attr=False) -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.nn.dropout.md b/tensorflow2fluid/doc/tf.nn.dropout.md deleted file mode 100644 index 9d43c2f..0000000 --- a/tensorflow2fluid/doc/tf.nn.dropout.md +++ /dev/null @@ -1,49 +0,0 @@ -## tf.dropout - -### [tf.nn.dropout](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/dropout) -``` python -tf.nn.dropout( - x, - keep_prob=None, - noise_shape=None, - seed=None, - name=None, - rate=None -) -``` - -### [paddle.fluid.layers.dropout](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#cn-api-fluid-layers-dropout) -``` python -paddle.fluid.layers.dropout( - x, - dropout_prob, - is_test=False, - seed=None, - name=None, - dropout_implementation='downgrade_in_infer' -) -``` - -### 功能差异 -#### 丢弃概率 -TensorFlow:使用`keep_prob`表示保留单元输出的概率,等价于`1-rate`; -PaddlePaddle:使用`dropout_prob`表示将单元输出设置为0的概率,即其丢弃概率; - -#### dropout独立性 -TensorFlow:通过设置一个可以广播到x的`noise_shape`,可以控制dropout的独立性; -PaddlePaddle:暂无此设置。 - -#### 实现方式 -TensorFlow:在训练时,被保留的单元输出要乘上`1/keep_prob`的系数,而在测试时,直接关闭dropout。 -PaddlePaddle:通过设置`dropout_implementation`有不同的实现。当设置为`downgrade_in_infer`时,在训练时,保留单元直接被输出,而测试时所有单元乘以`1-dropout_prob`的系数;当设置为`upscale_in_train`时,则与tensorflow的实现一致。 - -### 代码示例 -```python -# 输入 tensor t 为[[1,2],[3,4]] - -# 第0维前面padding长度为0,后面padding长度为1;第1维前面padding长度为1,后面padding长度为2 -out = fluid.layers.dropout(t, dropout_prob=0.2, dropout_implementation="upscale_in_train") - -# inference 时关闭dropout -inference_program = fluid.default_main_program().clone(for_test=True) -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.nn.dynamic_rnn.md b/tensorflow2fluid/doc/tf.nn.dynamic_rnn.md deleted file mode 100644 index d47bfbb..0000000 --- a/tensorflow2fluid/doc/tf.nn.dynamic_rnn.md +++ /dev/null @@ -1,78 +0,0 @@ -## tf.nn.dynamic_rnn - -### [tf.nn.dynamic_rnn](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/dynamic_rnn) -``` python -tf.nn.dynamic_rnn( - cell, - inputs, - sequence_length=None, - initial_state=None, - dtype=None, - parallel_iterations=None, - swap_memory=False, - time_major=False, - scope=None -) -``` - -### [paddle.fluid.layers.DynamicRNN](http://www.paddlepaddle.org/documentation/docs/zh/1.4/api_cn/api_guides/low_level/layers/control_flow.html#dynamicrnn) -``` python -paddle.fluid.layers.DynamicRNN(name=None) -``` - -### 功能差异 -#### 调用机制 -Tensorflow: `tf.nn.dynamic_rnn`通常与`tf.nn.rnn_cell.LSTMCell`、`tf.nn.rnn_cell.GRUCell`等Cell结合使用 -PaddlePaddle: 使用`paddle.fluid.layers.DynamicRNN`类实现类似功能 ,通过DynamicRNN提供的类方法,用户可以在`with block`中方便地自定义每个时间步的处理过程。 - -#### 输入格式 -TensorFlow: `tf.nn.dynamic_rnn`输入为序列数据,批输入中的每个序列需要填充到相同的长度 -PaddlePaddle: 使用 -[LoDTensor](http://www.paddlepaddle.org/documentation/docs/zh/1.4/user_guides/howto/basic_concept/lod_tensor.html)表示一个批输入,用户在使用时不需要进行填充操作。 - -### 代码示例 - -``` -# TensorFlow代码示例 -# 创建 BasicRNNCell -rnn_cell = tf.nn.rnn_cell.BasicRNNCell(hidden_size) -# 定义初始隐状态 -initial_state = rnn_cell.zero_state(batch_size, dtype=tf.float32) -# 输出shape为(batch_size, max_time, cell_state_size) -# 最后时刻隐状态shape为(batch_size, cell_state_size) -outputs, state = tf.nn.dynamic_rnn(rnn_cell, input_data, - initial_state=initial_state, - dtype=tf.float32) - -# PaddlePaddle代码示例 -# 创建一个DynamicRNN对象 -drnn = fluid.layers.DynamicRNN() -# 定义一个类似BasicRNNCell的处理过程 -with drnn.block(): - # 设置drnn的序列输入,并取得当前步的输入 - cur_input = drnn.step_input(input_data) - - # 设置memory变量,并取得上一时刻(或初始)隐状态 - last_hidden_state = drnn.memory(shape=[hidden_size], value=0.0) - - # 计算当前时刻隐状态 - cur_hidden_state = fluid.layers.fc(input=[cur_input, last_hidden_state], size=hidden_size, act='relu') - - # 更新隐状态 - drnn.update_memory(last_hidden_state, cur_hidden_state) - - # 记录本时刻的输出(BasicRNNCell中当前时刻的输出与当前时刻隐状态一致) - drnn.output(hidden) - -# 获取输出LoDTensor,其shape为(-1, hidden_size) -outputs = drnn() - -# 获取各序列最后时刻的隐状态,其shape为(batch_size, hidden_size) -state = fluid.layers.sequence_last_step(outputs) -``` - -### 其他 - -为了简化用户定义动态RNN的过程,paddle有如下op可供选择: -- [paddle.fluid.layers.dynamic_lstm](http://www.paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#dynamic-lstm):相当于 `tf.nn.dynamic_rnn`结合`tf.nn.rnn_cell.LSTMCell` -- [paddle.fluid.layers.dynamic_gru](http://www.paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#dynamic-gru):相当于`tf.nn.dynamic_rnn`结合`tf.nn.rnn_cell.GRUCell` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.nn.l2_normalize.md b/tensorflow2fluid/doc/tf.nn.l2_normalize.md deleted file mode 100644 index 387dd8b..0000000 --- a/tensorflow2fluid/doc/tf.nn.l2_normalize.md +++ /dev/null @@ -1,40 +0,0 @@ -## tf.nn.l2_normalize - -### [tf.nn.l2_normalize](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/l2_normalize) - -```python -tf.math.l2_normalize( - x, - axis=None, - epsilon=1e-12, - name=None, - dim=None -) -``` - -### [paddle.fluid.layers.l2_normalize](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#l2-normalize) - -```python -paddle.fluid.layers.l2_normalize( - x, - axis, - epsilon=1e-12, - name=None -) -``` - -### 功能差异 - -#### 计算方式 - -TensorFlow:计算方式为`output = x / sqrt(max(sum(x^2), epsilon))`; -PaddlePaddle:计算方式为`output = x / sqrt(sum(x^2) + epsilon))`。 - - -### 代码示例 -``` -# x是shape为[3,2]的张量 - -# out同样是shape[3,2]的张量,axis设置为1,表示将x中每个行向量做归一化 -out = fluid.layers.l2_normalize(x, axis=1) -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.nn.lrn.md b/tensorflow2fluid/doc/tf.nn.lrn.md deleted file mode 100644 index 06522e9..0000000 --- a/tensorflow2fluid/doc/tf.nn.lrn.md +++ /dev/null @@ -1,40 +0,0 @@ -## tf.nn.lrn - -### [tf.nn.lrn](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/local_response_normalization) - -```python -tf.nn.local_response_normalization( - input, - depth_radius=5, - bias=1, - alpha=1, - beta=0.5, - name=None -) -``` - -### [paddle.fluid.layers.lrn](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.lrn) - -```python -paddle.fluid.layers.lrn( - input, - n=5, - k=1.0, - alpha=0.0001, - beta=0.75, - name=None -) -``` - -### 功能差异 - -#### 计算方式 - -TensorFlow:计算公式如下所示,公式中的$n$即为参数`depth_radius` -$$output(i,x,y)=input(i,x,y)/(k+\alpha\sum_{j=max(0,i-n)}^{min(C,i+n+1)}{input(j,x,y)^2})^\beta$$ -PaddlePaddle:计算公式如下所示, -$$output(i,x,y)=input(i,x,y)/(k+\alpha\sum_{j=max(0,i-\frac{n}{2})}^{min(C,i+\frac{n}{2})}{input(j,x,y)^2})^\beta$$ - -#### 输入格式 -TensorFlow: 默认输入`NHWC`格式数据; -PaddlePaddle: 默认输入`NCHW`格式数据, \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.nn.max_pool.md b/tensorflow2fluid/doc/tf.nn.max_pool.md deleted file mode 100644 index 629a7a7..0000000 --- a/tensorflow2fluid/doc/tf.nn.max_pool.md +++ /dev/null @@ -1,54 +0,0 @@ -## tf.nn.max_pool - -### [tf.nn.max_pool](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/max_pool) - -``` python -tf.nn.max_pool( - value, - ksize, - strides, - padding, - data_format='NHWC', - name=None -) -``` - - -### [paddle.fluid.layers.pool2d](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.pool2d) -``` python -paddle.fluid.layers.pool2d( - input, - pool_size=-1, - pool_type='max', - pool_stride=1, - pool_padding=0, - global_pooling=False, - use_cudnn=True, - ceil_mode=False, - name=None, - exclusive=True) -``` -### 功能差异 - -#### 输入格式 -TensorFlow: 默认为`NHWC`的数据输入格式,同时也可通过修改`data_format`参数,支持`NCHW`的输入; -PaddlePaddle:只支持`NCHW`的数据输入格式。 - -#### Padding机制 - -Tensorflow: 存在`SAME`和`VALID`两种padding方式。当为`SAME`时,padding的size计算方式如下,仅在最右和最下进行padding; -``` -ceil_size = ceil(input_size / stride) -pad_size = (ceil_size - 1) * stride + filter_size - input_size -``` -PaddlePaddle:在输入的上、下、左、右分别padding,size大小为`pool_padding`,通过示例代码,可实现与Tensorflow中`max_pool`的`SAME`方式。 - -### 代码示例 -``` -inputs = fluid.layers.data(dtype='float32', shape=[3, 300, 300], name='inputs') - -# 计算得到输入的长、宽对应padding size为1 -# 在最右、最下进行padding -pad_res = fluid.layers.pad2d(inputs, padding=[0, 1, 0, 1]) -conv_res = fluid.layers.pool2d(pad_res, pool_size=3, pool_type='max', pool_stride=2) -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.nn.reduce_logsumexp.md b/tensorflow2fluid/doc/tf.nn.reduce_logsumexp.md deleted file mode 100644 index b40fc39..0000000 --- a/tensorflow2fluid/doc/tf.nn.reduce_logsumexp.md +++ /dev/null @@ -1,29 +0,0 @@ -## tf.math.reduce_logsumexp - -### [tf.math.reduce_logsumexp](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/reduce_logsumexp) -``` python -tf.math.log_softmax( - logits, - axis=None, - name=None, - dim=None -) -``` - -### PaddlePaddle实现 -PaddlePaddle中目前无对应接口,可使用如下代码实现 -``` python -def reduce_logsumexp(inputs, axis=None, keepdims=None): - net_0 = fluid.layers.exp(inputs) - net_1 = fluid.layers.reduce_sum(net_0, dim=axis, keep_dim=keepdims) - net_2 = fluid.layers.log(net_1) - return net_2 -``` - -### 代码示例 -``` python -inputs = fluid.layers.data(dtype='float32', shape=[1000], name='inputs') - -# 调用上述自定义函数 -result = reduce_logsumexp(inputs) -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.nn.rnn.GRUCell.md b/tensorflow2fluid/doc/tf.nn.rnn.GRUCell.md deleted file mode 100644 index b6a0c60..0000000 --- a/tensorflow2fluid/doc/tf.nn.rnn.GRUCell.md +++ /dev/null @@ -1,83 +0,0 @@ -## tf.contrib.rnn.GRUCell - -### [tf.nn.rnn.GRUCell](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/rnn_cell/GRUCell) - -```python -__init__( - num_units, - activation=None, - reuse=None, - kernel_initializer=None, - bias_initializer=None, - name=None, - dtype=None, - **kwargs -) -``` - -### [paddle.fluid.layers.gru_unit](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#gru-unit) - -```python -paddle.fluid.layers.gru_unit( - input, - hidden, - size, - param_attr=None, - bias_attr=None, - activation='tanh', - gate_activation='sigmoid', - origin_mode=False -) -``` - -### 功能差异 - -#### 实现方式 -TensorFlow:GRU的实现方式见论文[Learning Phrase Representations using RNN Encoder-Decoder for Statistical Machine Translation](http://arxiv.org/abs/1406.1078); -PaddlePaddle:GRU有两种实现方式,当设置`origin_mode=False`时,与TensorFlow实现方式一致;当设置`origin_mode=True`时,实现方式则参考论文[Empirical Evaluation of -Gated Recurrent Neural Networks -on Sequence Modeling](https://arxiv.org/pdf/1412.3555.pdf)。 - - -#### 使用方式 -TensorFlow:首先定义`GRUCell`对象,定义对象时只需要指定单元数`num_units`;由于`GRUCell`内部定义了`__call__`方法,因而其对象是可调用对象,直接使用`step_output, cur_state = cell(step_input, last_state)`的形式,可以计算得到当前步的输出与状态; - -PaddlePaddle:提供op形式的调用接口,通常与[paddle.fluid.layers.DynamicRNN](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#dynamicrnn)配合使用,以获取序列中的单步输入。**注意,为了提高`gru_unit`的计算效率,用户在使用该接口时需要遵从如下约定:假设要指定的GRU单元数为`num_units`,则`size`以及`input.shape[-1]`必须为`3*num_units`,`hidden.shape[-1]`为`num_units`,见如下代码示例小节。** - -#### 返回值 -TensorFlow:返回一个二元组,分别是当前时刻的输出值与隐藏状态,实际上输出值与隐藏状态为相同的tensor; -PaddlePaddle:返回一个三元组,即`(hidden_value, reset_hidden_value, gate_value)`。后面两个元素为内部使用,用户可以只关注第一个元素。 - - -### 代码示例 -``` -emb_size = 32 -emb_vocab = 10000 -num_unit_0 = 10 - -data = fluid.layers.data(name='input', shape=[1], dtype='int64', lod_level=1) -embedding = fluid.layers.embedding(input=data, size=[emb_vocab, emb_size], - is_sparse=False) - -# 为了调用gru_unit,输入最后的维度必须为实际单元数的3倍 -emb_fc = layers.fc(embedding, num_unit_0 * 3) - -drnn = fluid.layers.DynamicRNN() -with drnn.block(): - word = drnn.step_input(emb_fc) - - # 指定上一时刻的隐状态,单元数为num_unit_0 - prev_hid0 = drnn.memory(shape=[num_unit_0]) - - # 执行gru_unit计算,num_unit_0 为实际的单元数 - cur_hid0, _, _ = layers.gru_unit(word, prev_hid0, num_unit_0 * 3) - - # 更新隐状态 - drnn.update_memory(prev_hid0, cur_hid0) - - drnn.output(cur_hid0) - -out = drnn() -last = fluid.layers.sequence_last_step(out) - -``` diff --git a/tensorflow2fluid/doc/tf.nn.rnn_cell.LSTMCell.md b/tensorflow2fluid/doc/tf.nn.rnn_cell.LSTMCell.md deleted file mode 100644 index e4ccbdd..0000000 --- a/tensorflow2fluid/doc/tf.nn.rnn_cell.LSTMCell.md +++ /dev/null @@ -1,88 +0,0 @@ -## tf.nn.rnn_cell.LSTMCell - -### [tf.nn.rnn_cell.LSTMCell](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/rnn_cell/LSTMCell) - -```python -tf.nn.rnn_cell.LSTMCell( - num_units, - use_peepholes=False, - cell_clip=None, - initializer=None, - num_proj=None, - proj_clip=None, - num_unit_shards=None, - num_proj_shards=None, - forget_bias=1.0, - state_is_tuple=True, - activation=None, - reuse=None, - name=None, - dtype=None, - **kwargs -) -``` - -### [paddle.fluid.layers.lstm_unit](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#lstm-unit) - -```python -paddle.fluid.layers.lstm_unit( - x_t, - hidden_t_prev, - cell_t_prev, - forget_bias=0.0, - param_attr=None, - bias_attr=None, - name=None -) -``` - -### 功能差异 - -#### 使用方式 -TensorFlow:首先定义`LSTMCell`对象,定义对象时只需要指定单元数`num_units`;由于`LSTMCell`内部定义了`__call__`方法,因而其对象是可调用对象,直接使用`step_output, cur_state = cell(step_input, last_state)`的形式,可以计算得到当前步的输出与状态; - -PaddlePaddle:提供op形式的调用接口,通常与[paddle.fluid.layers.DynamicRNN](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#dynamicrnn)配合使用,以获取序列中的单步输入。**注意,`lstm_unit`通过`cell_t_prev`最后一个维度来确定lstm的单元数,同时要求`hidden_t_prev`与`cell_t_prev`最后的维度相同。** - -#### 窥孔连接 - -TensorFlow:通过设置`use_peepholes`选择LSTM的实现是否进行窥孔连接; -PaddlePaddle:只提供非窥孔连接的LSTM实现。 - -#### 输出变换 -TensorFlow:第一个返回值为`step_output`。当`num_proj`非空时,由`hidden_state`经过`fc`变换后得到`step_output`;而当`num_proj`为空时,则直接返回`hidden_step`作为`step_output`; -PaddlePaddle:第一个返回值为`hidden_state`,不涉及输出变换。 - -#### cell_state -TensorFlow:第二个返回值为`cell_state`。`cell_state`由真实的`cell_state`与`hidden_state`一起构成:当`state_id_tuple`为`True`时,返回真实的`cell_state`与`hidden_state`组成的`tuple`;反之,则返回`concat([cell_state, hidden_state], axis=1)`; -PaddlePaddle:第二个返回值为真实的`cell_state`。 - -### 代码示例 -``` -# embedding 是一个rank为2,lod_level为1的LoDTensor - -num_unit_0 = 32 -drnn = fluid.layers.DynamicRNN() -with drnn.block(): - word = drnn.step_input(embedding) - - # 记录hidden_state与cell_state,初始状态使用零向量 - prev_hid0 = drnn.memory(shape=[num_unit_0]) - prev_cell0 = drnn.memory(shape=[num_unit_0]) - - # 执行lstm计算 - cur_hid0, cur_cell0 = layers.lstm_unit(word, prev_hid0, prev_cell0) - - # 更新hidden_state与cell_state - drnn.update_memory(prev_hid0, cur_hid0) - drnn.update_memory(prev_cell0, cur_cell0) - - # 输出每个时刻的hidden_state - drnn.output(cur_hid0) - -# 获取每个时刻的输出 -out = drnn() - -# 获取最后时刻的输出 -last = fluid.layers.sequence_last(out) - -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.nn.rnn_cell.MultiRNNCell.md b/tensorflow2fluid/doc/tf.nn.rnn_cell.MultiRNNCell.md deleted file mode 100644 index 1686414..0000000 --- a/tensorflow2fluid/doc/tf.nn.rnn_cell.MultiRNNCell.md +++ /dev/null @@ -1,59 +0,0 @@ -## tf.nn.rnn_cell.MultiRNNCell - -### [tf.nn.rnn_cell.MultiRNNCell](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/rnn_cell/MultiRNNCell) - -```python -__init__( - cells, - state_is_tuple=True -) -``` - -### PaddlePaddle实现 -在Tensorflow中,用户通过定义多个单独的`RNNCell`生成一个`cell`列表,进而调用`MultiRNNCell`,可以实现一个多层RNN网络的功能。PaddlePaddle并没有提供一个对应的接口,用户可以在`DynamicRNN`的block中,通过组合多个RNN相关的`unit`实现类似的功能,可参考代码示例。 - - -### 代码示例 -``` -# 如下代码片段实现两层lstm网络,第一层单元数为32,第二层单元数为16 -num_unit_0 = 32 -num_unit_1 = 16 - -emb_size = 12 -emb_vocab = 10000 - -data = fluid.layers.data(name='input', shape=[1], dtype='int64', lod_level=1) -embedding = fluid.layers.embedding(input=data, size=[emb_vocab, emb_size]) - -drnn = fluid.layers.DynamicRNN() -with drnn.block(): - # 定义单步输入 - word = drnn.step_input(embedding) - - # 定义第一层lstm的hidden_state, cell_state - prev_hid0 = drnn.memory(shape=[num_unit_0]) - prev_cell0 = drnn.memory(shape=[num_unit_0]) - - # 定义第二层lstm的hidden_state, cell_state - prev_hid1 = drnn.memory(shape=[num_unit_1]) - prev_cell1 = drnn.memory(shape=[num_unit_1]) - - # 执行两层lstm运算 - cur_hid0, cur_cell0 = layers.lstm_unit(word, prev_hid0, prev_cell0) - cur_hid1, cur_cell1 = layers.lstm_unit(cur_hid0, prev_hid1, prev_cell1) - - # 更新第一层lstm的hidden_state, cell_state - drnn.update_memory(prev_hid0, cur_hid0) - drnn.update_memory(prev_cell0, cur_cell0) - - # 更新第二层lstm的hidden_state, cell_state - drnn.update_memory(prev_hid1, cur_hid1) - drnn.update_memory(prev_cell1, cur_cell1) - - drnn.output(cur_hid1) - -out = drnn() -last = fluid.layers.sequence_last_step(out) - - -``` diff --git a/tensorflow2fluid/doc/tf.nn.separable_conv2d.md b/tensorflow2fluid/doc/tf.nn.separable_conv2d.md deleted file mode 100644 index a9814e5..0000000 --- a/tensorflow2fluid/doc/tf.nn.separable_conv2d.md +++ /dev/null @@ -1,33 +0,0 @@ -## tf.nn.separable_conv2d - -### [tf.nn.separable_conv2d](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/separable_conv2d) -``` python -tf.nn.separable_conv2d( - input, - depthwise_filter, - pointwise_filter, - strides, - padding, - rate=None, - name=None, - data_format=None -) -``` - -### PaddlePaddle实现 -PaddlePaddle中目前无对应接口,可使用如下代码实现,在如下代码中只考虑了基本的`strides`参数,其它参数如`padding`在PaddlePaddle中使用机制 -以及输入输出和卷积核格式与TensorFlow存在差异,可参考文档[tf.layers.conv2d](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.layers.conv2d.md)和[tf.nn.depthwise_conv2d](https://github.com/PaddlePaddle/X2Paddle/blob/master/tensorflow2fluid/doc/tf.nn.depthwise_conv2d.md)中的说明。 -``` python -# TensorFlow中separable_conv2d的使用 -depthwise_filter = tf.random_uniform([4, 4, 3, 1], 0.0, 1.0) -pointwise_filter = tf.random_uniform([1, 1, 3, 5], 0.0, 1.0) -result = tf.nn.separable_conv2d(input, depthwise_filter, pointwise_filter, - strides=[1, 1, 1, 1], padding='VALID') - -# PaddlePaddle中对应如上代码实现separable_conv2d -depthwise_result = fluid.layers.conv2d(input, 3, filter_size=[4, 4], - stride=[1, 1], groups=3, bias_attr=False) -pointwise_result = fluid.layers.conv2d(depthwise_result, filter_size=[1, 1], - stride=[1, 1], bias_attr=False) - -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.nn.softmax_cross_entropy_with_logits.md b/tensorflow2fluid/doc/tf.nn.softmax_cross_entropy_with_logits.md deleted file mode 100644 index 06f1b10..0000000 --- a/tensorflow2fluid/doc/tf.nn.softmax_cross_entropy_with_logits.md +++ /dev/null @@ -1,49 +0,0 @@ -## tf.nn.softmax_cross_entropy_with_logits - -### [tf.nn.rnn_cell.MultiRNNCell](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/softmax_cross_entropy_with_logits) - -```python -tf.nn.softmax_cross_entropy_with_logits( - _sentinel=None, - labels=None, - logits=None, - dim=-1, - name=None -) -``` - -### [paddle.fluid.layers.softmax_with_cross_entropy](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#softmax-with-cross-entropy) -```python -paddle.fluid.layers.softmax_with_cross_entropy( - logits, - label, - soft_label=False, - ignore_index=-100, - numeric_stable_mode=False, - return_softmax=False, - axis=-1 -) -``` - -### 功能差异 - -#### 标签类型 -TensorFlow:`labels`只能使用软标签,其`shape`为`[batch, num_classes]`,表示样本在各个类别上的概率分布; - -PaddlePaddle:通过设置`soft_label`,可以选择软标签或者硬标签。当使用硬标签时,`label`的`shape`为`[batch, 1]`,`dtype`为`int64`;当使用软标签时,其`shape`为`[batch, num_classes]`,`dtype`为`int64`。 - -#### 返回值 -TensorFlow:返回`batch`中各个样本的log loss; - -PaddlePaddle:当`return_softmax`为`False`时,返回`batch`中各个样本的log loss;当`return_softmax`为`True`时,再额外返回`logtis`的归一化值。 - - -### 代码示例 -``` -# logits的shape为[32, 10], dtype为float32; label的shape为[32, 1], dtype为int64 - -# loss的shape为[32, 1], dtype为float32 -loss = fluid.layers.softmax_with_cross_entropy(logits, label, soft_label=False) - - -``` diff --git a/tensorflow2fluid/doc/tf.nn.top_k.md b/tensorflow2fluid/doc/tf.nn.top_k.md deleted file mode 100644 index 327d8d7..0000000 --- a/tensorflow2fluid/doc/tf.nn.top_k.md +++ /dev/null @@ -1,34 +0,0 @@ -## tf.nn.top_k - -### [tf.nn.top_k](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/nn/top_k) -``` python -tf.math.top_k( - input, - k=1, - sorted=True, - name=None -) -``` - -### [paddle.fluid.layers.topk](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#topk) -``` python -paddle.fluid.layers.topk( - input, - k, - name=None -) -``` - -### 功能差异 -#### 参数差异 -TensorFlow: 通过设置`sorted`参数,对返回的值与下标设置是否进行降序排序;`k`默认为1。 -PaddlePaddle: 对返回的top-k tensor进行降序排序;`k`没有默认值,必须设置。 - -### 代码示例 -```python -# 输入 tensor t 为[[2,6,3],[3,0,8]] - -# 当k=2时,输出 tensor out 为[[6,3], [8,3]],index为[[1,2],[2,0]] -out, index = fluid.layers.topk(t, k=1) - -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.one_hot.md b/tensorflow2fluid/doc/tf.one_hot.md deleted file mode 100644 index f2c56a0..0000000 --- a/tensorflow2fluid/doc/tf.one_hot.md +++ /dev/null @@ -1,40 +0,0 @@ -## tf.one_hot - -### [tf.one_hot](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/one_hot) -``` python -tf.one_hot( - indices, - depth, - on_value=None, - off_value=None, - axis=None, - dtype=None, - name=None -) -``` - -### [paddle.fluid.layers.one_hot](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#one-hot) -``` python -layers.one_hot(; - input, - depth -) -``` - -### 功能差异 -#### 输入格式 -TensorFlow:indices shape 没有限定;支持设置on与off的值; - -PaddlePaddle:input限定为2-D tensor,shape为(batch, 1)。 - -#### 参数种类 -TensorFlow:可以配置`on_value`和`off_value`,默认为`1`和`0`; -PaddlePaddle:无对应配置选项,即为默认的`1`和`0`。 - -### 代码示例 -```python -# 输入 tensor t 为[[1],[2]] - -# depth 为3时,输出 tensor out 为[[0, 1, 0], [0, 0, 1]] -out = fluid.layers.one_hot(t, 3) -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.pad.md b/tensorflow2fluid/doc/tf.pad.md deleted file mode 100644 index 71cc774..0000000 --- a/tensorflow2fluid/doc/tf.pad.md +++ /dev/null @@ -1,36 +0,0 @@ -## tf.pad - -### [tf.pad](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/pad) -``` python -tf.pad( - tensor, - paddings, - mode='CONSTANT', - name=None, - constant_values=0 -) -``` - -### [paddle.fluid.layers.pad](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#cn-api-fluid-layers-pad) -``` python -paddle.fluid.layers.pad( - x, - paddings, - pad_value=0.0, - name=None -) -``` - -### 功能差异 -#### padding方式 -TensorFlow:支持采用三种模式进行padding,不同padding模式决定pad的值是什么,包括constant、symmetric和reflect。padding的shape为(rank, 2),表示每一维前后padding的长度 - -PaddlePaddle:目前仅支持采用常量进行padding;指定padding长度时,采用一个一维列表表示,其长度为输入rank的两倍,连续的两个值表示某维度上前、后进行padding的长度 - -### 代码示例 -```python -# 输入 tensor t 为[[1,2],[3,4]] - -# 第0维前面padding长度为0,后面padding长度为1;第1维前面padding长度为1,后面padding长度为2 -out = fluid.layers.pad(t, paddings=[0,1,1,2]) -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.placeholder.md b/tensorflow2fluid/doc/tf.placeholder.md deleted file mode 100644 index a17015d..0000000 --- a/tensorflow2fluid/doc/tf.placeholder.md +++ /dev/null @@ -1,38 +0,0 @@ -## tf.placeholder - -### [tf.placeholder](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/placeholder) -``` python -tf.placeholder( - dtype, - shape=None, - name=None -) -``` - -### [paddle.fluid.layers.data](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#cn-api-fluid-layers-data) -``` python -paddle.fluid.layers.data( - name, - shape, - append_batch_size=True, - dtype='float32', - lod_level=0, - type=VarType.LOD_TENSOR, - stop_gradient=True) -``` - -### 功能差异 -#### Batch维度处理 -TensorFlow: 对于shape中的batch维度,需要用户使用`None`指定; -PaddlePaddle: 将第1维设置为`-1`表示batch维度;如若第1维为正数,则会默认在最前面插入batch维度,如若要避免batch维,可将参数`append_batch_size`设为`False`。 - - -### 代码示例 -```python - -# 创建输入型tensor out,其shape为[-1, 3, 4], 数据类型为float32 -out = fluid.layers.data('out', shape=[3, 4], dtype='float32') - -# 创建输入型tensor out,其shape为[3, -1, 4], 数据类型为float32 -out = fluid.layers.data('out', shape=[3, -1, 4], append_batch_size=False, dtype='float32') -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.pow.md b/tensorflow2fluid/doc/tf.pow.md deleted file mode 100644 index afd39b0..0000000 --- a/tensorflow2fluid/doc/tf.pow.md +++ /dev/null @@ -1,36 +0,0 @@ -## tf.pow - -### [tf.pow](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/pow) - -```python -tf.math.pow( - x, - y, - name=None -) -``` - -### [paddle.fluid.layers.pow](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#pow) - -```python -paddle.fluid.layers.pow( - x, - factor=1.0, - name=None -) -``` - -### 功能差异 - -#### 参数类型 - -TensorFlow:`x`与`y`为shape相同的tensor,执行element-wise求幂操作; - -PaddlePaddle:`x`为tensor,`factor`为浮点数,返回值为`x`每个元素执行按照`factor`执行求幂操作得到的tensor。 - -### 代码示例 -``` -# x为张量 [2, 3] -out = fluid.layers.pow(x, 2.0) # [4,9] - -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.print.md b/tensorflow2fluid/doc/tf.print.md deleted file mode 100644 index 8989fa7..0000000 --- a/tensorflow2fluid/doc/tf.print.md +++ /dev/null @@ -1,50 +0,0 @@ -## tf.print - -### [tf.print](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/print) - -```python -tf.print( - *inputs, - **kwargs -) -``` - -### [paddle.fluid.layers.Print](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#print) -```python -paddle.fluid.layers.Print( - input, - first_n=-1, - message=None, - summarize=-1, - print_tensor_name=True, - print_tensor_type=True, - print_tensor_shape=True, - print_tensor_lod=True, - print_phase='both' -) -``` - -### 功能差异 - -#### 使用方式 -TensorFlow:在`graph`模式下,该op的运行决定于是否直接被运行,或者作为直接运行的其他op的依赖;在`eager`模式下,该op在被调用后会自动运行; - -PaddlePaddle:在被调用后,该op被添加到代码块,之后执行到代码块时将自动运行。 - -#### input类型 -TensorFlow:可以是python primitives,也可以是tensor或其与python primitives的组合; - -PaddlePaddle:只可以是tensor。 - -#### 梯度打印 -TensorFlow:不支持; -PaddlePaddle:通过设置`print_phase`,可以控制是否打印`input`的梯度。 - - -### 代码示例 -``` -# input 是任意paddle tensor - -# 打印input的内容,如果有梯度的话也将打印梯度 -print(input, message="content of input") -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.reshape.md b/tensorflow2fluid/doc/tf.reshape.md deleted file mode 100644 index e7e0dd7..0000000 --- a/tensorflow2fluid/doc/tf.reshape.md +++ /dev/null @@ -1,39 +0,0 @@ -## tf.reshape - -### [tf.reshape](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/reshape) -``` python -tf.reshape( - tensor, - shape, - name=None -) -``` - -### [paddle.fluid.layers.reshape](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#cn-api-fluid-layers-reshape) -``` python -paddle.fluid.layers.reshape( - x, - shape, - actual_shape=None, - act=None, - inplace=False, - name=None) -``` - -### 功能差异: - -#### shape标记差别 -TensorFlow: shape 中可以使用单独一个-1,表示待推断的维度; -PaddlePaddle: shape 中除了可以使用单独一个-1表示待推断维度外,还能使用0,表示在输入tensor原来的shape中对应位置的维度。注意,0的下标不能超过原来tensor的rank。 - - -## 代码示例 -```python -# 输入 tensor t 的 shape 为[3, 4] - -# 输出 tensor out 的 shape 为[2,6] -out = fluid.layers.reshape(t, [-1, 6]) - -# 输出 tensor out 的 shape 为[3, 2, 2] -out = fluid.layers.reshape(t, [0, 2, 2]) -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.reverse_sequence.md b/tensorflow2fluid/doc/tf.reverse_sequence.md deleted file mode 100644 index 084fe12..0000000 --- a/tensorflow2fluid/doc/tf.reverse_sequence.md +++ /dev/null @@ -1,47 +0,0 @@ -## tf.reverse_sequence - -### [tf.reverse_sequence](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/reverse_sequence) - -```python -tf.reverse_sequence( - input, - seq_lengths, - seq_axis=None, - batch_axis=None, - name=None, - seq_dim=None, - batch_dim=None -) -``` - -### [paddle.fluid.layers.sequence_reverse](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#paddle.fluid.layers.sequence_reverse) - -```python -paddle.fluid.layers.sequence_reverse( - x, - name=None -) -``` - -### 功能差异 - -#### 输入格式 - -Tensorflow:`reverse_sequence`中,`input`是一个带padding的tensor,每个序列都会被填充到相同长度; -PaddlePaddle:`sequence_reverse`中,`x`是一个[LoDTensor](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/fluid_cn.html#lodtensor), -不需要进行填充; - -#### 参数类型 - -Tensorflow:通过`seq_axis`和`batch_axis`指定序列维度与batch维度;同时使用`seq_lengths`来表示每个序列的长度,属于序列的部分会被翻转,padding部分则被保留; -PaddlePaddle:由于`LoDTensor`本身已经携带序列信息,因而不需要用户提供除了输入tensor外的额外参数; - -### 代码示例 -``` -# x是shape为[5, 6]的LoDTensor,其LoD信息为{0, 2, 5},表示两个序列,长度分别是2和3 - -# out同样也是shape为[5, 6]的LoDTensor,LoD信息为{0, 2, 5},表示两个序列 -# out[0:2, 6] = x[2:0:-1, 6] -# out[2:5, 6] = x[5:2:-1, 6] -out = fluid.layers.sequence_reverse(x) -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.scatter_update.md b/tensorflow2fluid/doc/tf.scatter_update.md deleted file mode 100644 index 7223986..0000000 --- a/tensorflow2fluid/doc/tf.scatter_update.md +++ /dev/null @@ -1,47 +0,0 @@ -## tf.scatter_update - -### [tf.scatter_update](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/scatter_update) - -```python -tf.scatter_update( - ref, - indices, - updates, - use_locking=True, - name=None -) -``` - -### [paddle.fluid.layers.scatter](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#scatter) - -```python -paddle.fluid.layers.scatter( - input, - index, - updates, - name=None, - overwrite=True -) -``` - -### 功能差异 - -#### 参数类型 - -Tensorflow:`indices`支持任意维度,可以是变量,也可以是常量; -PaddlePaddle:`index`只支持1-d Variable。 - -#### 其他 -Tensorflow:`updates`支持numpy-style broadcasting; -PaddlePaddle:`updates`要求其rank与`input`相同,同时`updates.shape[0]`等于`index.shape[0]`。此外`overwrite`参数提供了当存在重复index时,两种不同的梯度更新策略。 - -### 代码示例 -``` -# x是dtype为float32, shape为[3,9,5]的张量 - -# 将x[1:,:,:]置为1,并返回更新后的张量 -out = layers.scatter(x, - index=layers.assign(np.array([1,2], dtype='int32')), - updates=layers.assign(np.ones((2,9,5), dtype='float32'))) - -``` diff --git a/tensorflow2fluid/doc/tf.slice.md b/tensorflow2fluid/doc/tf.slice.md deleted file mode 100644 index 04e1dd1..0000000 --- a/tensorflow2fluid/doc/tf.slice.md +++ /dev/null @@ -1,42 +0,0 @@ -## tf.slice - -### [tf.slice](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/slice) -``` python -tf.slice( - input_, - begin, - size, - name=None -) -``` - -### [paddle.fluid.layers.slice](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#cn-api-fluid-layers-slice) -``` python -paddle.fluid.layers.slice( - input, - axes, - starts, - ends -) -``` - -### 功能差异 -#### 参数类型 -TensorFlow:`begin/size`可以是python list,也可以是变量类型; -PaddlePaddle:`axes/starts/ends`只能是python list。 - -#### 参数种类 -TensorFlow:使用`begin`指定要开始截取tensor的位置,使用`size`指定截取长度,必须描述所有的轴; -PaddlePaddle:采用`axes`指定要操作的轴,未指定的轴默认全部截取,使用`starts`、`ends`分别指定截取tensor的开始与结束位置,注意采用的是先闭后开[start, end)的写法。 - - -### 代码示例 -```python -# 输入 tensor t 为[[0,1,2,3],[4,5,6,7],[8,9,10,11]] - -# 输出 tensor out 为[[1,2],[5,6]] -out = fluid.layers.slice(t, axes=[0,1], starts=[0,1], ends=[2,3]) - -# 输出 tensor out 为[[1,2],[5,6],[9,10]] -out = fluid.layers.slice(t, axes=[1], starts=[1], ends=[3]) -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.split.md b/tensorflow2fluid/doc/tf.split.md deleted file mode 100644 index c0b7b94..0000000 --- a/tensorflow2fluid/doc/tf.split.md +++ /dev/null @@ -1,42 +0,0 @@ -## tf.split - -### [tf.split](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/split) - -```python -tf.split( - value, - num_or_size_splits, - axis=0, - num=None, - name='split' -) -``` - -### [paddle.fluid.layers.split](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#split) - -```python -paddle.fluid.layers.split( - input, - num_or_sections, - dim=-1, - name=None -) -``` - -### 功能差异 - -#### 返回值类型 - -TensorFlow:`split`函数返回的结果均保存在一个tensor类型的值中; - -PaddlePaddle:`split`返回`list`类型结果,长度为`num_or_sections`。 - -### 代码示例 -``` -# x是shape为[3,9,5]的张量: -x0, x1, x2 = fluid.layers.split(x, num_or_sections=3, dim=1) -x0.shape # [3, 3, 5] -x1.shape # [3, 3, 5] -x2.shape # [3, 3, 5] - -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.squared_difference.md b/tensorflow2fluid/doc/tf.squared_difference.md deleted file mode 100644 index 5218257..0000000 --- a/tensorflow2fluid/doc/tf.squared_difference.md +++ /dev/null @@ -1,27 +0,0 @@ -## tf.squared_difference - -### [tf.squared_diffenrece](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/math/squared_difference) -``` python -tf.math.squared_difference( - x, - y, - name=None -) -``` - -### PaddlePaddle实现 -PaddlePaddle中目前无对应接口,可使用如下代码实现 -``` python -def squared_difference(x, y): - net_0 = fluid.layers.elementwise_sub(x, y) - net_1 = fluid.layers.elementwise_mul(net_0, net_0) - return net_1 -``` - -### 代码示例 -``` python -input_x = fluid.layers.data(dtype='float32', shape=[1000], name='input_x') -input_y = fluid.layers.data(dtype='float32', shape=[1000], name='input_y') -# 调用上述自定义函数 -result = squared_difference(input_x, input_y) -``` \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.stop_gradient.md b/tensorflow2fluid/doc/tf.stop_gradient.md deleted file mode 100644 index 1f7397c..0000000 --- a/tensorflow2fluid/doc/tf.stop_gradient.md +++ /dev/null @@ -1,17 +0,0 @@ -## tf.stop_gradient - -### [tf.stop_gradient](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/stop_gradient) -``` python -tf.stop_gradient( - input, - name=None -) -``` - -### PaddlePaddle实现 -TensorFlow中,使用`stop_gradient`表示该tensor不需要进行bp。而在PaddlePaddle中,每个tensor具有`stop_gradient`的属性,用户可以将该属性直接设置成`True`/`False`。 - -## 代码示例 -```python -# 将tensor t设置成不需要bp -t.stop_gradient = True \ No newline at end of file diff --git a/tensorflow2fluid/doc/tf.while_loop.md b/tensorflow2fluid/doc/tf.while_loop.md deleted file mode 100644 index f734fda..0000000 --- a/tensorflow2fluid/doc/tf.while_loop.md +++ /dev/null @@ -1,56 +0,0 @@ -## tf.while_loop - -### [tf.while_loop](https://www.tensorflow.org/versions/r1.13/api_docs/python/tf/while_loop) - -```python -tf.while_loop( - cond, - body, - loop_vars, - shape_invariants=None, - parallel_iterations=10, - back_prop=True, - swap_memory=False, - name=None, - maximum_iterations=None, - return_same_structure=False -) -``` - -### [paddle.fluid.layers.While](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/layers_cn.html#while) -```python -paddle.fluid.layers.While( - cond, - is_test=False, - name=None -) -``` - -### 功能差异 - -#### 使用方式 -TensorFlow:用户通过函数的方式定义`cond`和`body`,在循环体中操纵的是循环变量`loop_vars`,返回值为`tensor`或其`list`; -PaddlePaddle:用户通过op的方式定义`cond`,然后在`block`中实现循环体。*注意,在循环体中用户需要更新`cond`,具体可参数代码示例*。 - -#### 其他 -TensorFlow:支持设置最大迭代次数`maximum_iterations`及并行迭代`parallel_iterations`; -PaddlePaddle:不涉及最大迭代次数及并行。 - - -### 代码示例 -``` -# 如下代码片段实现从0到5循环叠加i -i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) -limit = fluid.layers.fill_constant(shape=[1], dtype='int64', value=5) - -# 定义条件 -cond = layers.less_than(x=i, y=limit) -while_op = layers.While(cond=cond) - -# 定义循环体 -with while_op.block(): - # 更新i - i = layers.increment(x=i, in_place=True) - # 更新条件状态 - layers.less_than(x=i, y=limit, cond=cond) -``` \ No newline at end of file diff --git a/tensorflow2fluid/tf2fluid/__init__.py b/tensorflow2fluid/tf2fluid/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/tensorflow2fluid/tf2fluid/convert.py b/tensorflow2fluid/tf2fluid/convert.py deleted file mode 100644 index 4f0ec10..0000000 --- a/tensorflow2fluid/tf2fluid/convert.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License" -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle_emitter import PaddleEmitter -from tensorflow_parser import TensorflowCkptParser -from tensorflow_parser import TensorflowPbParser -from six import text_type as _text_type -from utils import * -import argparse -import logging -import os -logging.basicConfig(level=logging.DEBUG) - - -def _get_parser(): - parser = argparse.ArgumentParser() - parser.add_argument( - "--meta_file", - "-m", - type=_text_type, - default=None, - help="meta file path for checkpoint format") - parser.add_argument( - "--ckpt_dir", - "-c", - type=_text_type, - default=None, - help="checkpoint directory") - parser.add_argument( - "--pb_file", - "-p", - type=_text_type, - default=None, - help="pb model file path") - parser.add_argument( - "--in_nodes", - "-i", - type=_text_type, - nargs="+", - default=None, - help="input nodes name") - parser.add_argument( - "--input_shape", - "-is", - type=_text_type, - nargs="+", - default=None, - help="input tensor shape") - parser.add_argument( - "--output_nodes", - "-o", - type=_text_type, - nargs="+", - default=None, - help="output nodes name") - parser.add_argument( - "--save_dir", - "-s", - type=_text_type, - default=None, - help="path to save transformed paddle model") - parser.add_argument( - "--input_format", - "-sf", - type=_text_type, - default=None, - help="input data format(NHWC/NCHW or OTHER)") - parser.add_argument( - "--use_cuda", - "-u", - type=_text_type, - default="True", - help="True for use gpu") - return parser - - -def run(args): - if args.meta_file is None and args.pb_file is None: - raise Exception("Need to define --meta_file or --pb_file") - if args.input_format is None: - raise Exception("Input format need to be defined(NHWC, NCHW or OTHER)") - assert args.use_cuda == "True" or args.use_cuda == "False" - if args.use_cuda == "False": - os.environ["CUDA_VISIBLE_DEVICES"] = "-1" - - if args.input_format == "NHWC": - input_format = NHWC - elif args.input_format == "NCHW": - input_format = NCHW - elif args.input_format == "OTHER": - input_format = OTHER - else: - raise Exception("Can not identify input format(NHWC/NCHW/OTHER)") - - assert args.in_nodes is not None - assert args.output_nodes is not None - assert args.input_shape is not None - assert args.save_dir is not None - - if not os.path.exists(args.save_dir): - os.makedirs(args.save_dir) - - input_shape = list() - for shape_str in args.input_shape: - items = shape_str.split(',') - for i in range(len(items)): - if items[i] != "None": - items[i] = int(items[i]) - else: - items[i] = None - - input_shape.append(items) - - logging.info("Loading tensorflow model...") - if args.meta_file is not None: - parser = TensorflowCkptParser(args.meta_file, args.ckpt_dir, - args.output_nodes, input_shape, - args.in_nodes, input_format) - else: - parser = TensorflowPbParser(args.pb_file, args.output_nodes, - input_shape, args.in_nodes, input_format) - logging.info("Tensorflow model loaded!") - - emitter = PaddleEmitter(parser, args.save_dir) - emitter.run() - - open(args.save_dir + "/__init__.py", "w").close() - - -def _main(): - parser = _get_parser() - args = parser.parse_args() - run(args) - - -if __name__ == "__main__": - _main() diff --git a/tensorflow2fluid/tf2fluid/framework_pb2.py b/tensorflow2fluid/tf2fluid/framework_pb2.py deleted file mode 100644 index 35115d7..0000000 --- a/tensorflow2fluid/tf2fluid/framework_pb2.py +++ /dev/null @@ -1,1165 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: framework.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='framework.proto', - package='paddle.framework.proto', - syntax='proto2', - serialized_pb=_b('\n\x0f\x66ramework.proto\x12\x16paddle.framework.proto\"\x1d\n\x07Version\x12\x12\n\x07version\x18\x01 \x01(\x03:\x01\x30\"\xec\x03\n\x06OpDesc\x12\x0c\n\x04type\x18\x03 \x02(\t\x12\x32\n\x06inputs\x18\x01 \x03(\x0b\x32\".paddle.framework.proto.OpDesc.Var\x12\x33\n\x07outputs\x18\x02 \x03(\x0b\x32\".paddle.framework.proto.OpDesc.Var\x12\x32\n\x05\x61ttrs\x18\x04 \x03(\x0b\x32#.paddle.framework.proto.OpDesc.Attr\x12\x18\n\tis_target\x18\x05 \x01(\x08:\x05\x66\x61lse\x1a\xef\x01\n\x04\x41ttr\x12\x0c\n\x04name\x18\x01 \x02(\t\x12.\n\x04type\x18\x02 \x02(\x0e\x32 .paddle.framework.proto.AttrType\x12\t\n\x01i\x18\x03 \x01(\x05\x12\t\n\x01\x66\x18\x04 \x01(\x02\x12\t\n\x01s\x18\x05 \x01(\t\x12\x0c\n\x04ints\x18\x06 \x03(\x05\x12\x0e\n\x06\x66loats\x18\x07 \x03(\x02\x12\x0f\n\x07strings\x18\x08 \x03(\t\x12\t\n\x01\x62\x18\n \x01(\x08\x12\r\n\x05\x62ools\x18\x0b \x03(\x08\x12\x11\n\tblock_idx\x18\x0c \x01(\x05\x12\t\n\x01l\x18\r \x01(\x03\x12\x12\n\nblocks_idx\x18\x0e \x03(\x05\x12\r\n\x05longs\x18\x0f \x03(\x03\x1a+\n\x03Var\x12\x11\n\tparameter\x18\x01 \x02(\t\x12\x11\n\targuments\x18\x02 \x03(\t\"\xb3\x03\n\x07OpProto\x12\x0c\n\x04type\x18\x01 \x02(\t\x12\x33\n\x06inputs\x18\x02 \x03(\x0b\x32#.paddle.framework.proto.OpProto.Var\x12\x34\n\x07outputs\x18\x03 \x03(\x0b\x32#.paddle.framework.proto.OpProto.Var\x12\x33\n\x05\x61ttrs\x18\x04 \x03(\x0b\x32$.paddle.framework.proto.OpProto.Attr\x12\x0f\n\x07\x63omment\x18\x05 \x02(\t\x1ax\n\x03Var\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0f\n\x07\x63omment\x18\x02 \x02(\t\x12\x19\n\nduplicable\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0cintermediate\x18\x04 \x01(\x08:\x05\x66\x61lse\x12\x1a\n\x0b\x64ispensable\x18\x05 \x01(\x08:\x05\x66\x61lse\x1ao\n\x04\x41ttr\x12\x0c\n\x04name\x18\x01 \x02(\t\x12.\n\x04type\x18\x02 \x02(\x0e\x32 .paddle.framework.proto.AttrType\x12\x0f\n\x07\x63omment\x18\x03 \x02(\t\x12\x18\n\tgenerated\x18\x04 \x01(\x08:\x05\x66\x61lse\"\xda\x08\n\x07VarType\x12\x32\n\x04type\x18\x01 \x02(\x0e\x32$.paddle.framework.proto.VarType.Type\x12\x41\n\rselected_rows\x18\x02 \x01(\x0b\x32*.paddle.framework.proto.VarType.TensorDesc\x12\x41\n\nlod_tensor\x18\x03 \x01(\x0b\x32-.paddle.framework.proto.VarType.LoDTensorDesc\x12H\n\x0ctensor_array\x18\x04 \x01(\x0b\x32\x32.paddle.framework.proto.VarType.LoDTensorArrayDesc\x12:\n\x06reader\x18\x05 \x01(\x0b\x32*.paddle.framework.proto.VarType.ReaderDesc\x12\x34\n\x05tuple\x18\x07 \x01(\x0b\x32%.paddle.framework.proto.VarType.Tuple\x1aS\n\nTensorDesc\x12\x37\n\tdata_type\x18\x01 \x02(\x0e\x32$.paddle.framework.proto.VarType.Type\x12\x0c\n\x04\x64ims\x18\x02 \x03(\x03\x1a\x61\n\rLoDTensorDesc\x12:\n\x06tensor\x18\x01 \x02(\x0b\x32*.paddle.framework.proto.VarType.TensorDesc\x12\x14\n\tlod_level\x18\x02 \x01(\x05:\x01\x30\x1a\x66\n\x12LoDTensorArrayDesc\x12:\n\x06tensor\x18\x01 \x02(\x0b\x32*.paddle.framework.proto.VarType.TensorDesc\x12\x14\n\tlod_level\x18\x02 \x01(\x05:\x01\x30\x1aO\n\nReaderDesc\x12\x41\n\nlod_tensor\x18\x01 \x03(\x0b\x32-.paddle.framework.proto.VarType.LoDTensorDesc\x1a\x43\n\x05Tuple\x12:\n\x0c\x65lement_type\x18\x01 \x03(\x0e\x32$.paddle.framework.proto.VarType.Type\"\xa2\x02\n\x04Type\x12\x08\n\x04\x42OOL\x10\x00\x12\t\n\x05INT16\x10\x01\x12\t\n\x05INT32\x10\x02\x12\t\n\x05INT64\x10\x03\x12\x08\n\x04\x46P16\x10\x04\x12\x08\n\x04\x46P32\x10\x05\x12\x08\n\x04\x46P64\x10\x06\x12\n\n\x06SIZE_T\x10\x13\x12\t\n\x05UINT8\x10\x14\x12\x08\n\x04INT8\x10\x15\x12\x0e\n\nLOD_TENSOR\x10\x07\x12\x11\n\rSELECTED_ROWS\x10\x08\x12\x12\n\x0e\x46\x45\x45\x44_MINIBATCH\x10\t\x12\x0e\n\nFETCH_LIST\x10\n\x12\x0f\n\x0bSTEP_SCOPES\x10\x0b\x12\x12\n\x0eLOD_RANK_TABLE\x10\x0c\x12\x14\n\x10LOD_TENSOR_ARRAY\x10\r\x12\x0e\n\nPLACE_LIST\x10\x0e\x12\n\n\x06READER\x10\x0f\x12\x07\n\x03RAW\x10\x11\x12\t\n\x05TUPLE\x10\x12\"b\n\x07VarDesc\x12\x0c\n\x04name\x18\x01 \x02(\t\x12-\n\x04type\x18\x02 \x02(\x0b\x32\x1f.paddle.framework.proto.VarType\x12\x1a\n\x0bpersistable\x18\x03 \x01(\x08:\x05\x66\x61lse\"\xa7\x01\n\tBlockDesc\x12\x0b\n\x03idx\x18\x01 \x02(\x05\x12\x12\n\nparent_idx\x18\x02 \x02(\x05\x12-\n\x04vars\x18\x03 \x03(\x0b\x32\x1f.paddle.framework.proto.VarDesc\x12+\n\x03ops\x18\x04 \x03(\x0b\x32\x1e.paddle.framework.proto.OpDesc\x12\x1d\n\x11\x66orward_block_idx\x18\x05 \x01(\x05:\x02-1\"r\n\x0bProgramDesc\x12\x31\n\x06\x62locks\x18\x01 \x03(\x0b\x32!.paddle.framework.proto.BlockDesc\x12\x30\n\x07version\x18\x02 \x01(\x0b\x32\x1f.paddle.framework.proto.Version*\x94\x01\n\x08\x41ttrType\x12\x07\n\x03INT\x10\x00\x12\t\n\x05\x46LOAT\x10\x01\x12\n\n\x06STRING\x10\x02\x12\x08\n\x04INTS\x10\x03\x12\n\n\x06\x46LOATS\x10\x04\x12\x0b\n\x07STRINGS\x10\x05\x12\x0b\n\x07\x42OOLEAN\x10\x06\x12\x0c\n\x08\x42OOLEANS\x10\x07\x12\t\n\x05\x42LOCK\x10\x08\x12\x08\n\x04LONG\x10\t\x12\n\n\x06\x42LOCKS\x10\n\x12\t\n\x05LONGS\x10\x0b\x42\x02H\x03') -) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -_ATTRTYPE = _descriptor.EnumDescriptor( - name='AttrType', - full_name='paddle.framework.proto.AttrType', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='INT', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='FLOAT', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='STRING', index=2, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='INTS', index=3, number=3, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='FLOATS', index=4, number=4, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='STRINGS', index=5, number=5, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='BOOLEAN', index=6, number=6, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='BOOLEANS', index=7, number=7, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='BLOCK', index=8, number=8, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='LONG', index=9, number=9, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='BLOCKS', index=10, number=10, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='LONGS', index=11, number=11, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=2511, - serialized_end=2659, -) -_sym_db.RegisterEnumDescriptor(_ATTRTYPE) - -AttrType = enum_type_wrapper.EnumTypeWrapper(_ATTRTYPE) -INT = 0 -FLOAT = 1 -STRING = 2 -INTS = 3 -FLOATS = 4 -STRINGS = 5 -BOOLEAN = 6 -BOOLEANS = 7 -BLOCK = 8 -LONG = 9 -BLOCKS = 10 -LONGS = 11 - - -_VARTYPE_TYPE = _descriptor.EnumDescriptor( - name='Type', - full_name='paddle.framework.proto.VarType.Type', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='BOOL', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='INT16', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='INT32', index=2, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='INT64', index=3, number=3, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='FP16', index=4, number=4, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='FP32', index=5, number=5, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='FP64', index=6, number=6, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SIZE_T', index=7, number=19, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='UINT8', index=8, number=20, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='INT8', index=9, number=21, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='LOD_TENSOR', index=10, number=7, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SELECTED_ROWS', index=11, number=8, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='FEED_MINIBATCH', index=12, number=9, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='FETCH_LIST', index=13, number=10, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='STEP_SCOPES', index=14, number=11, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='LOD_RANK_TABLE', index=15, number=12, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='LOD_TENSOR_ARRAY', index=16, number=13, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='PLACE_LIST', index=17, number=14, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='READER', index=18, number=15, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='RAW', index=19, number=17, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='TUPLE', index=20, number=18, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=1832, - serialized_end=2122, -) -_sym_db.RegisterEnumDescriptor(_VARTYPE_TYPE) - - -_VERSION = _descriptor.Descriptor( - name='Version', - full_name='paddle.framework.proto.Version', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='version', full_name='paddle.framework.proto.Version.version', index=0, - number=1, type=3, cpp_type=2, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=43, - serialized_end=72, -) - - -_OPDESC_ATTR = _descriptor.Descriptor( - name='Attr', - full_name='paddle.framework.proto.OpDesc.Attr', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='paddle.framework.proto.OpDesc.Attr.name', index=0, - number=1, type=9, cpp_type=9, label=2, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='type', full_name='paddle.framework.proto.OpDesc.Attr.type', index=1, - number=2, type=14, cpp_type=8, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='i', full_name='paddle.framework.proto.OpDesc.Attr.i', index=2, - number=3, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='f', full_name='paddle.framework.proto.OpDesc.Attr.f', index=3, - number=4, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='s', full_name='paddle.framework.proto.OpDesc.Attr.s', index=4, - number=5, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='ints', full_name='paddle.framework.proto.OpDesc.Attr.ints', index=5, - number=6, type=5, cpp_type=1, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='floats', full_name='paddle.framework.proto.OpDesc.Attr.floats', index=6, - number=7, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='strings', full_name='paddle.framework.proto.OpDesc.Attr.strings', index=7, - number=8, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='b', full_name='paddle.framework.proto.OpDesc.Attr.b', index=8, - number=10, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='bools', full_name='paddle.framework.proto.OpDesc.Attr.bools', index=9, - number=11, type=8, cpp_type=7, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='block_idx', full_name='paddle.framework.proto.OpDesc.Attr.block_idx', index=10, - number=12, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='l', full_name='paddle.framework.proto.OpDesc.Attr.l', index=11, - number=13, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='blocks_idx', full_name='paddle.framework.proto.OpDesc.Attr.blocks_idx', index=12, - number=14, type=5, cpp_type=1, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='longs', full_name='paddle.framework.proto.OpDesc.Attr.longs', index=13, - number=15, type=3, cpp_type=2, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=283, - serialized_end=522, -) - -_OPDESC_VAR = _descriptor.Descriptor( - name='Var', - full_name='paddle.framework.proto.OpDesc.Var', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='parameter', full_name='paddle.framework.proto.OpDesc.Var.parameter', index=0, - number=1, type=9, cpp_type=9, label=2, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='arguments', full_name='paddle.framework.proto.OpDesc.Var.arguments', index=1, - number=2, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=524, - serialized_end=567, -) - -_OPDESC = _descriptor.Descriptor( - name='OpDesc', - full_name='paddle.framework.proto.OpDesc', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='type', full_name='paddle.framework.proto.OpDesc.type', index=0, - number=3, type=9, cpp_type=9, label=2, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='inputs', full_name='paddle.framework.proto.OpDesc.inputs', index=1, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='outputs', full_name='paddle.framework.proto.OpDesc.outputs', index=2, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='attrs', full_name='paddle.framework.proto.OpDesc.attrs', index=3, - number=4, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='is_target', full_name='paddle.framework.proto.OpDesc.is_target', index=4, - number=5, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_OPDESC_ATTR, _OPDESC_VAR, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=75, - serialized_end=567, -) - - -_OPPROTO_VAR = _descriptor.Descriptor( - name='Var', - full_name='paddle.framework.proto.OpProto.Var', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='paddle.framework.proto.OpProto.Var.name', index=0, - number=1, type=9, cpp_type=9, label=2, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='comment', full_name='paddle.framework.proto.OpProto.Var.comment', index=1, - number=2, type=9, cpp_type=9, label=2, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='duplicable', full_name='paddle.framework.proto.OpProto.Var.duplicable', index=2, - number=3, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='intermediate', full_name='paddle.framework.proto.OpProto.Var.intermediate', index=3, - number=4, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='dispensable', full_name='paddle.framework.proto.OpProto.Var.dispensable', index=4, - number=5, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=772, - serialized_end=892, -) - -_OPPROTO_ATTR = _descriptor.Descriptor( - name='Attr', - full_name='paddle.framework.proto.OpProto.Attr', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='paddle.framework.proto.OpProto.Attr.name', index=0, - number=1, type=9, cpp_type=9, label=2, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='type', full_name='paddle.framework.proto.OpProto.Attr.type', index=1, - number=2, type=14, cpp_type=8, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='comment', full_name='paddle.framework.proto.OpProto.Attr.comment', index=2, - number=3, type=9, cpp_type=9, label=2, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='generated', full_name='paddle.framework.proto.OpProto.Attr.generated', index=3, - number=4, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=894, - serialized_end=1005, -) - -_OPPROTO = _descriptor.Descriptor( - name='OpProto', - full_name='paddle.framework.proto.OpProto', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='type', full_name='paddle.framework.proto.OpProto.type', index=0, - number=1, type=9, cpp_type=9, label=2, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='inputs', full_name='paddle.framework.proto.OpProto.inputs', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='outputs', full_name='paddle.framework.proto.OpProto.outputs', index=2, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='attrs', full_name='paddle.framework.proto.OpProto.attrs', index=3, - number=4, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='comment', full_name='paddle.framework.proto.OpProto.comment', index=4, - number=5, type=9, cpp_type=9, label=2, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_OPPROTO_VAR, _OPPROTO_ATTR, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=570, - serialized_end=1005, -) - - -_VARTYPE_TENSORDESC = _descriptor.Descriptor( - name='TensorDesc', - full_name='paddle.framework.proto.VarType.TensorDesc', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='data_type', full_name='paddle.framework.proto.VarType.TensorDesc.data_type', index=0, - number=1, type=14, cpp_type=8, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='dims', full_name='paddle.framework.proto.VarType.TensorDesc.dims', index=1, - number=2, type=3, cpp_type=2, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1393, - serialized_end=1476, -) - -_VARTYPE_LODTENSORDESC = _descriptor.Descriptor( - name='LoDTensorDesc', - full_name='paddle.framework.proto.VarType.LoDTensorDesc', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='tensor', full_name='paddle.framework.proto.VarType.LoDTensorDesc.tensor', index=0, - number=1, type=11, cpp_type=10, label=2, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='lod_level', full_name='paddle.framework.proto.VarType.LoDTensorDesc.lod_level', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1478, - serialized_end=1575, -) - -_VARTYPE_LODTENSORARRAYDESC = _descriptor.Descriptor( - name='LoDTensorArrayDesc', - full_name='paddle.framework.proto.VarType.LoDTensorArrayDesc', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='tensor', full_name='paddle.framework.proto.VarType.LoDTensorArrayDesc.tensor', index=0, - number=1, type=11, cpp_type=10, label=2, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='lod_level', full_name='paddle.framework.proto.VarType.LoDTensorArrayDesc.lod_level', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1577, - serialized_end=1679, -) - -_VARTYPE_READERDESC = _descriptor.Descriptor( - name='ReaderDesc', - full_name='paddle.framework.proto.VarType.ReaderDesc', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='lod_tensor', full_name='paddle.framework.proto.VarType.ReaderDesc.lod_tensor', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1681, - serialized_end=1760, -) - -_VARTYPE_TUPLE = _descriptor.Descriptor( - name='Tuple', - full_name='paddle.framework.proto.VarType.Tuple', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='element_type', full_name='paddle.framework.proto.VarType.Tuple.element_type', index=0, - number=1, type=14, cpp_type=8, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1762, - serialized_end=1829, -) - -_VARTYPE = _descriptor.Descriptor( - name='VarType', - full_name='paddle.framework.proto.VarType', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='type', full_name='paddle.framework.proto.VarType.type', index=0, - number=1, type=14, cpp_type=8, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='selected_rows', full_name='paddle.framework.proto.VarType.selected_rows', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='lod_tensor', full_name='paddle.framework.proto.VarType.lod_tensor', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='tensor_array', full_name='paddle.framework.proto.VarType.tensor_array', index=3, - number=4, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='reader', full_name='paddle.framework.proto.VarType.reader', index=4, - number=5, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='tuple', full_name='paddle.framework.proto.VarType.tuple', index=5, - number=7, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_VARTYPE_TENSORDESC, _VARTYPE_LODTENSORDESC, _VARTYPE_LODTENSORARRAYDESC, _VARTYPE_READERDESC, _VARTYPE_TUPLE, ], - enum_types=[ - _VARTYPE_TYPE, - ], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1008, - serialized_end=2122, -) - - -_VARDESC = _descriptor.Descriptor( - name='VarDesc', - full_name='paddle.framework.proto.VarDesc', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='paddle.framework.proto.VarDesc.name', index=0, - number=1, type=9, cpp_type=9, label=2, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='type', full_name='paddle.framework.proto.VarDesc.type', index=1, - number=2, type=11, cpp_type=10, label=2, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='persistable', full_name='paddle.framework.proto.VarDesc.persistable', index=2, - number=3, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2124, - serialized_end=2222, -) - - -_BLOCKDESC = _descriptor.Descriptor( - name='BlockDesc', - full_name='paddle.framework.proto.BlockDesc', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='idx', full_name='paddle.framework.proto.BlockDesc.idx', index=0, - number=1, type=5, cpp_type=1, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='parent_idx', full_name='paddle.framework.proto.BlockDesc.parent_idx', index=1, - number=2, type=5, cpp_type=1, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='vars', full_name='paddle.framework.proto.BlockDesc.vars', index=2, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='ops', full_name='paddle.framework.proto.BlockDesc.ops', index=3, - number=4, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='forward_block_idx', full_name='paddle.framework.proto.BlockDesc.forward_block_idx', index=4, - number=5, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=-1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2225, - serialized_end=2392, -) - - -_PROGRAMDESC = _descriptor.Descriptor( - name='ProgramDesc', - full_name='paddle.framework.proto.ProgramDesc', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='blocks', full_name='paddle.framework.proto.ProgramDesc.blocks', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='version', full_name='paddle.framework.proto.ProgramDesc.version', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2394, - serialized_end=2508, -) - -_OPDESC_ATTR.fields_by_name['type'].enum_type = _ATTRTYPE -_OPDESC_ATTR.containing_type = _OPDESC -_OPDESC_VAR.containing_type = _OPDESC -_OPDESC.fields_by_name['inputs'].message_type = _OPDESC_VAR -_OPDESC.fields_by_name['outputs'].message_type = _OPDESC_VAR -_OPDESC.fields_by_name['attrs'].message_type = _OPDESC_ATTR -_OPPROTO_VAR.containing_type = _OPPROTO -_OPPROTO_ATTR.fields_by_name['type'].enum_type = _ATTRTYPE -_OPPROTO_ATTR.containing_type = _OPPROTO -_OPPROTO.fields_by_name['inputs'].message_type = _OPPROTO_VAR -_OPPROTO.fields_by_name['outputs'].message_type = _OPPROTO_VAR -_OPPROTO.fields_by_name['attrs'].message_type = _OPPROTO_ATTR -_VARTYPE_TENSORDESC.fields_by_name['data_type'].enum_type = _VARTYPE_TYPE -_VARTYPE_TENSORDESC.containing_type = _VARTYPE -_VARTYPE_LODTENSORDESC.fields_by_name['tensor'].message_type = _VARTYPE_TENSORDESC -_VARTYPE_LODTENSORDESC.containing_type = _VARTYPE -_VARTYPE_LODTENSORARRAYDESC.fields_by_name['tensor'].message_type = _VARTYPE_TENSORDESC -_VARTYPE_LODTENSORARRAYDESC.containing_type = _VARTYPE -_VARTYPE_READERDESC.fields_by_name['lod_tensor'].message_type = _VARTYPE_LODTENSORDESC -_VARTYPE_READERDESC.containing_type = _VARTYPE -_VARTYPE_TUPLE.fields_by_name['element_type'].enum_type = _VARTYPE_TYPE -_VARTYPE_TUPLE.containing_type = _VARTYPE -_VARTYPE.fields_by_name['type'].enum_type = _VARTYPE_TYPE -_VARTYPE.fields_by_name['selected_rows'].message_type = _VARTYPE_TENSORDESC -_VARTYPE.fields_by_name['lod_tensor'].message_type = _VARTYPE_LODTENSORDESC -_VARTYPE.fields_by_name['tensor_array'].message_type = _VARTYPE_LODTENSORARRAYDESC -_VARTYPE.fields_by_name['reader'].message_type = _VARTYPE_READERDESC -_VARTYPE.fields_by_name['tuple'].message_type = _VARTYPE_TUPLE -_VARTYPE_TYPE.containing_type = _VARTYPE -_VARDESC.fields_by_name['type'].message_type = _VARTYPE -_BLOCKDESC.fields_by_name['vars'].message_type = _VARDESC -_BLOCKDESC.fields_by_name['ops'].message_type = _OPDESC -_PROGRAMDESC.fields_by_name['blocks'].message_type = _BLOCKDESC -_PROGRAMDESC.fields_by_name['version'].message_type = _VERSION -DESCRIPTOR.message_types_by_name['Version'] = _VERSION -DESCRIPTOR.message_types_by_name['OpDesc'] = _OPDESC -DESCRIPTOR.message_types_by_name['OpProto'] = _OPPROTO -DESCRIPTOR.message_types_by_name['VarType'] = _VARTYPE -DESCRIPTOR.message_types_by_name['VarDesc'] = _VARDESC -DESCRIPTOR.message_types_by_name['BlockDesc'] = _BLOCKDESC -DESCRIPTOR.message_types_by_name['ProgramDesc'] = _PROGRAMDESC -DESCRIPTOR.enum_types_by_name['AttrType'] = _ATTRTYPE - -Version = _reflection.GeneratedProtocolMessageType('Version', (_message.Message,), dict( - DESCRIPTOR = _VERSION, - __module__ = 'framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.Version) - )) -_sym_db.RegisterMessage(Version) - -OpDesc = _reflection.GeneratedProtocolMessageType('OpDesc', (_message.Message,), dict( - - Attr = _reflection.GeneratedProtocolMessageType('Attr', (_message.Message,), dict( - DESCRIPTOR = _OPDESC_ATTR, - __module__ = 'framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.OpDesc.Attr) - )) - , - - Var = _reflection.GeneratedProtocolMessageType('Var', (_message.Message,), dict( - DESCRIPTOR = _OPDESC_VAR, - __module__ = 'framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.OpDesc.Var) - )) - , - DESCRIPTOR = _OPDESC, - __module__ = 'framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.OpDesc) - )) -_sym_db.RegisterMessage(OpDesc) -_sym_db.RegisterMessage(OpDesc.Attr) -_sym_db.RegisterMessage(OpDesc.Var) - -OpProto = _reflection.GeneratedProtocolMessageType('OpProto', (_message.Message,), dict( - - Var = _reflection.GeneratedProtocolMessageType('Var', (_message.Message,), dict( - DESCRIPTOR = _OPPROTO_VAR, - __module__ = 'framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.OpProto.Var) - )) - , - - Attr = _reflection.GeneratedProtocolMessageType('Attr', (_message.Message,), dict( - DESCRIPTOR = _OPPROTO_ATTR, - __module__ = 'framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.OpProto.Attr) - )) - , - DESCRIPTOR = _OPPROTO, - __module__ = 'framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.OpProto) - )) -_sym_db.RegisterMessage(OpProto) -_sym_db.RegisterMessage(OpProto.Var) -_sym_db.RegisterMessage(OpProto.Attr) - -VarType = _reflection.GeneratedProtocolMessageType('VarType', (_message.Message,), dict( - - TensorDesc = _reflection.GeneratedProtocolMessageType('TensorDesc', (_message.Message,), dict( - DESCRIPTOR = _VARTYPE_TENSORDESC, - __module__ = 'framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.VarType.TensorDesc) - )) - , - - LoDTensorDesc = _reflection.GeneratedProtocolMessageType('LoDTensorDesc', (_message.Message,), dict( - DESCRIPTOR = _VARTYPE_LODTENSORDESC, - __module__ = 'framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.VarType.LoDTensorDesc) - )) - , - - LoDTensorArrayDesc = _reflection.GeneratedProtocolMessageType('LoDTensorArrayDesc', (_message.Message,), dict( - DESCRIPTOR = _VARTYPE_LODTENSORARRAYDESC, - __module__ = 'framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.VarType.LoDTensorArrayDesc) - )) - , - - ReaderDesc = _reflection.GeneratedProtocolMessageType('ReaderDesc', (_message.Message,), dict( - DESCRIPTOR = _VARTYPE_READERDESC, - __module__ = 'framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.VarType.ReaderDesc) - )) - , - - Tuple = _reflection.GeneratedProtocolMessageType('Tuple', (_message.Message,), dict( - DESCRIPTOR = _VARTYPE_TUPLE, - __module__ = 'framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.VarType.Tuple) - )) - , - DESCRIPTOR = _VARTYPE, - __module__ = 'framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.VarType) - )) -_sym_db.RegisterMessage(VarType) -_sym_db.RegisterMessage(VarType.TensorDesc) -_sym_db.RegisterMessage(VarType.LoDTensorDesc) -_sym_db.RegisterMessage(VarType.LoDTensorArrayDesc) -_sym_db.RegisterMessage(VarType.ReaderDesc) -_sym_db.RegisterMessage(VarType.Tuple) - -VarDesc = _reflection.GeneratedProtocolMessageType('VarDesc', (_message.Message,), dict( - DESCRIPTOR = _VARDESC, - __module__ = 'framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.VarDesc) - )) -_sym_db.RegisterMessage(VarDesc) - -BlockDesc = _reflection.GeneratedProtocolMessageType('BlockDesc', (_message.Message,), dict( - DESCRIPTOR = _BLOCKDESC, - __module__ = 'framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.BlockDesc) - )) -_sym_db.RegisterMessage(BlockDesc) - -ProgramDesc = _reflection.GeneratedProtocolMessageType('ProgramDesc', (_message.Message,), dict( - DESCRIPTOR = _PROGRAMDESC, - __module__ = 'framework_pb2' - # @@protoc_insertion_point(class_scope:paddle.framework.proto.ProgramDesc) - )) -_sym_db.RegisterMessage(ProgramDesc) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) -# @@protoc_insertion_point(module_scope) diff --git a/tensorflow2fluid/tf2fluid/graph.py b/tensorflow2fluid/tf2fluid/graph.py deleted file mode 100644 index 79b839c..0000000 --- a/tensorflow2fluid/tf2fluid/graph.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License" -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from utils import * -import collections - - -class GraphNode(object): - def __init__(self, layer, layer_name=None): - self.inputs = list() - self.outputs = list() - self.layer = layer - self.ref_name = None - self.output_name = None - if layer_name is not None: - self.layer_name = layer_name - else: - self.layer_name = layer.name - - def __hash__(self): - return hash(self.layer.name) - - def __eq__(self, other): - if self.layer.name == other.layer.name: - return True - return False - - -class Graph(object): - def __init__(self, model): - self.node_map = collections.OrderedDict() - self.input_nodes = list() - self.output_nodes = list() - self.topological_sort = list() - self.model = model - self.name_generator = NameGenerator() - - def build(self, input_format): - self._make_input_nodes() - self._make_output_nodes() - self._get_topological_sort() - self._gen_newname_for_nodes() - - def _make_input_nodes(self): - for name, node in self.node_map.items(): - if len(node.outputs) == 0 and len(node.inputs) == 0: - continue - node.left_inputs = len(node.inputs) - if len(node.inputs) == 0: - self.input_nodes.append(name) - - def _make_output_nodes(self): - for name, node in self.node_map.items(): - if len(node.outputs) == 0 and len(node.inputs) == 0: - continue - if len(node.outputs) == 0: - self.output_nodes.append(name) - - def _get_topological_sort(self): - self.topological_sort = self.input_nodes[:] - idx = 0 - while idx < len(self.topological_sort): - current_node = self.node_map[self.topological_sort[idx]] - for next_node in current_node.outputs: - next_node_info = self.node_map[next_node.layer_name] - next_node_info.left_inputs -= 1 - if next_node_info.left_inputs == 0: - self.topological_sort.append(next_node.layer_name) - idx += 1 - - def _gen_newname_for_nodes(self): - for node_name in self.topological_sort: - node = self.node_map[node_name] - ref_name = self.name_generator.get_name(node) - - if node.layer_type == 'split' or node.layer_type == 'splitv': - index = '0' - if len(node_name.split(':')) == 2: - index = node_name.split(':')[-1] - ref_name += '[{}]'.format(index) - - self.node_map[node.layer.name].ref_name = ref_name - self.node_map[node.layer.name].output_name = ref_name.split('[')[0] - - for node_name, node in self.node_map.items(): - ref_name = self.name_generator.get_name(node) - if node.layer_type == 'split' or node.layer_type == 'splitv': - index = '0' - if len(node_name.split(':')) == 2: - index = node_name.split(':')[-1] - ref_name += '[{}]'.format(index) - self.node_map[node_name].ref_name = ref_name - self.node_map[node_name].output_name = ref_name.split('[')[0] - - def get_node(self, name): - if name not in self.node_map: - raise Exception("Graph doesn't have node [%s]." % name) - else: - return self.node_map[name] - - def _make_connection(self, src, dst): - if src.layer_name == dst.layer_name or src.layer_name not in \ - self.node_map or dst.layer_name not in self.node_map: - raise Exception('Warning: Node not exist or there is a self-loop') - self.node_map[dst.layer_name].inputs.append(src) - self.node_map[src.layer_name].outputs.append(dst) diff --git a/tensorflow2fluid/tf2fluid/model_loader.py b/tensorflow2fluid/tf2fluid/model_loader.py deleted file mode 100644 index e81b804..0000000 --- a/tensorflow2fluid/tf2fluid/model_loader.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License" -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle.fluid as fluid -import sys - - -class ModelLoader(object): - def __init__(self, model_dir, use_cuda=False): - sys.path.append(model_dir) - mymodel = __import__("mymodel") - self.model = mymodel.Model() - self.model.build() - self.inputs = self.model.inputs - self.outputs = self.model.outputs - if use_cuda: - self.exe = fluid.Executor(fluid.CUDAPlace(0)) - else: - self.exe = fluid.Executor(fluid.CPUPlace()) - self.exe.run(fluid.default_startup_program()) - - var_list = list() - global_block = fluid.default_main_program().global_block() - with open(model_dir + "/save_var.list") as f: - for line in f: - try: - var = global_block.var(line.strip()) - var_list.append(var) - except: - pass - fluid.io.load_vars(self.exe, model_dir, vars=var_list) - self.program = fluid.default_main_program() - - def save_inference_model(self, save_dir): - fluid.io.save_inference_model(save_dir, self.model.inputs, - self.model.outputs, self.exe) - - def inference(self, feed_dict): - result = self.exe.run( - self.program, feed=feed_dict, fetch_list=self.model.outputs) - return result diff --git a/tensorflow2fluid/tf2fluid/paddle_emitter.py b/tensorflow2fluid/tf2fluid/paddle_emitter.py deleted file mode 100644 index 42a98d2..0000000 --- a/tensorflow2fluid/tf2fluid/paddle_emitter.py +++ /dev/null @@ -1,1080 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License" -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from tensorflow.core.framework import attr_value_pb2 -from tensorflow.python.framework import tensor_util -from utils import * -from functools import * -from six import string_types as _string_types -import framework_pb2 as framework -import logging -import math -import struct -import numpy -logging.basicConfig(level=logging.DEBUG) - - -class PaddleEmitter(object): - def __init__(self, parser, save_dir): - self.graph = parser.tf_graph - self.weights = parser.weights - self.infer = parser.infer - self.inputs_sample_data = dict() - self.save_dir = save_dir - self.body_code = "" - self.tab = " " * 4 - - self.outputs = parser.outputs - self.inputs = parser.inputs - outputs = list() - for output in self.outputs: - while True: - if output in self.graph.identity_relation: - output = self.graph.identity_relation[output] - else: - break - outputs.append(output) - self.outputs = outputs - - @staticmethod - def compute_padding_size(in_size, filter_size, stride): - new_size = int(math.ceil(in_size * 1.0 / stride)) - pad_size = (new_size - 1) * stride + filter_size - in_size - pad_0 = int(pad_size / 2) - pad_1 = pad_size - pad_0 - return [pad_0, pad_1] - - def check_op(self, node_name_list): - uncovered_ops = set() - for name in node_name_list: - node = self.graph.get_node(name) - if len(node.inputs) == 0 and len(node.outputs) == 0: - continue - if not hasattr(self, "emit_" + node.layer_type): - uncovered_ops.add(node.layer_type) - if len(uncovered_ops) > 0: - logging.error("{} OP are not supported".format(len(uncovered_ops))) - for op in uncovered_ops: - logging.error("Unsupported OP: {}".format(op)) - return False - return True - - # trick method to solve NHWC problem - def get_axis(self, node1, node2): - shape1 = node1.shape_dim_size - shape2 = node2.shape_dim_size - if shape1 == 4 and shape2 == 1 and node1.data_format == NHWC: - axis = 1 - elif shape2 == 4 and shape1 == 1 and node2.data_format == NHWC: - axis = 1 - else: - axis = -1 - return axis - - def elementwise(self, node, op): - data1 = node.inputs[0] - data2 = node.inputs[1] - axis = self.get_axis(data1, data2) - shape1 = self.infer.get_tensor_shape(data1.layer) - shape2 = self.infer.get_tensor_shape(data2.layer) - - op = "elementwise_" + op - if shape2.shape[0] == shape1.shape[0]: - if (shape1 == shape2).all(): - param_attr = { - 'x': data1.ref_name, - 'y': data2.ref_name, - } - node.code.add_layer(op, None, node.output_name, param_attr) - return - - index1_not_one = list(numpy.argwhere(shape1 != 1).flatten()) - index1_one = list(numpy.argwhere(shape1 == 1).flatten()) - perm1 = range(shape1.shape[0]) - perm2 = range(shape1.shape[0]) - if len(index1_one) != 0: - perm1 = index1_not_one + index1_one - - index2_not_one = list(numpy.argwhere(shape2 != 1).flatten()) - index2_one = list(numpy.argwhere(shape2 == 1).flatten()) - if len(index2_one) != 0: - perm2 = index2_not_one + index2_one - - perm = list(numpy.array(perm1)[numpy.array(perm2)]) - if perm != range(shape1.shape[0]): - param_attr = {"perm": perm} - node.code.add_layer("transpose", data1.ref_name, "temp1", - param_attr) - node.code.add_layer("transpose", data2.ref_name, "temp2", - param_attr) - if len(index2_one) > len(index1_one): - param_attr = {"x": "temp1", "y": "temp2"} - else: - param_attr = {"x": "temp2", "y": "temp1"} - node.code.add_layer(op, None, node.output_name, param_attr) - perm = sorted(range(len(perm)), key=lambda k: perm[k]) - param_attr = {"perm": perm} - node.code.add_layer("transpose", node.output_name, - node.output_name, param_attr) - else: - if len(index2_one) > len(index1_one): - param_attr = {"x": data1.ref_name, "y": data2.ref_name} - else: - param_attr = {"x": data2.ref_name, "y": data1.ref_name} - node.code.add_layer(op, None, node.output_name, param_attr) - else: - param_attr = { - "x": data1.ref_name, - "y": data2.ref_name, - "axis": axis - } - if shape2.shape[0] > shape1.shape[0]: - param_attr = { - "x": data2.ref_name, - "y": data1.ref_name, - "axis": axis - } - node.code.add_layer(op, None, node.output_name, param_attr) - - def export_weights(self, weight, paddle_var_name, dir): - self.save_var_set.add(paddle_var_name) - numpy_dtype_map = { - "int16": framework.VarType.INT16, - "int32": framework.VarType.INT32, - "int64": framework.VarType.INT64, - "float16": framework.VarType.FP16, - "float32": framework.VarType.FP32, - "float64": framework.VarType.FP64 - } - struct_write_format = { - "int16": "h", - "int32": "i", - "int64": "q", - "float16": "e", - "float32": "f", - "float64": "d" - } - shape = weight.shape - filew = open(dir + "/" + paddle_var_name, "wb") - filew.write(struct.pack('i', 0)) - filew.write(struct.pack('L', 0)) - filew.write(struct.pack('i', 0)) - tensor_desc = framework.VarType.TensorDesc() - if str(weight.dtype) in numpy_dtype_map: - tensor_desc.data_type = numpy_dtype_map[str(weight.dtype)] - else: - raise Exception("Unexpected array dtype [{}]".format(weight.dtype)) - tensor_desc.dims.extend(shape) - desc_size = tensor_desc.ByteSize() - filew.write(struct.pack('i', desc_size)) - filew.write(tensor_desc.SerializeToString()) - weight.tofile(filew) - filew.close() - - @property - def header_code(self): - code = list() - code.append("import paddle.fluid.layers as layers") - code.append("import paddle.fluid as fluid") - code.append("import numpy") - code.append("") - code.append("class Model(object):") - code.append(" def build(self):") - return code - - def add_codes(self, indent, codes): - if isinstance(codes, _string_types): - codes = codes.strip().split("\n") - if not isinstance(codes, list): - raise Exception("Unexpected error!") - for code in codes: - self.body_code += (self.tab * indent) + code + "\n" - - def run(self): - node = self.graph.tf_graph.node[0] - self.add_codes(0, self.header_code) - - self.save_var_set = set() - - # filter branch nodes, like 'split:1' - translate_nodes = [] - for node in self.graph.topological_sort: - if node.count(':') == 0: - translate_nodes.append(node) - - # check if exists unsupported OPs in model - if not self.check_op(translate_nodes): - return - - # ref_name.txt record relationship between - # paddle value name and tensorflow value name - ref_name_recorder = open(self.save_dir + "/ref_name.info", 'w') - - total_nodes_num = len(translate_nodes) - translated_nodes_count = 1 - for node in translate_nodes: - logging.info("TotalNum:{},TraslatedNum:{},CurrentNode:{}".format( - total_nodes_num, translated_nodes_count, node)) - current_node = self.graph.get_node(node) - ref_name_recorder.write("{}\t{}\n".format( - current_node.layer_name, current_node.output_name)) - translated_nodes_count += 1 - - # skip isolated nodes - if len(current_node.inputs) == 0 and len( - current_node.outputs) == 0: - continue - - op = current_node.layer_type - if hasattr(self, "emit_" + op): - func = getattr(self, "emit_" + op) - func(current_node) - else: - raise Exception("Unknow node op: {}".format(op)) - ref_name_recorder.close() - - # merge all the generated python codes - for node in translate_nodes: - codes = self.graph.get_node(node).code.gen_codes() - self.add_codes(2, codes) - - # add return value codes - outs = [] - for node in self.outputs: - outs.append(self.graph.get_node(node).output_name) - self.add_codes( - 2, "# {} : {}".format( - self.graph.get_node(node).output_name, - self.graph.get_node(node).layer_name)) - input_code = "self.inputs = {}".format([str(s) for s in self.inputs]) - output_code = "self.outputs = [{}]".format(", ".join(outs)) - self.add_codes(2, input_code) - self.add_codes(2, output_code) - - # write python code to file "my_model.py" - filew = open(self.save_dir + "/mymodel.py", 'w') - filew.write(self.body_code) - filew.close() - - # file "save_var.list" records name of dumped variables - filew = open(self.save_dir + "/save_var.list", 'w') - for var in self.save_var_set: - filew.write(var + '\n') - filew.close() - - logging.info("Model translated!") - return self.body_code - - def emit_placeholder(self, node): - shape = list(self.infer.get_tensor_shape(node.layer)) - - self.inputs_sample_data[node.layer_name] = [] - if shape[0] < 0 or shape[0] is None: - self.batch_node = node - for i in range(1, 4): - sample_data = numpy.random.random_sample([i] + shape[1:]) - self.inputs_sample_data[node.layer_name].append(sample_data) - else: - for i in range(1, 4): - sample_data = numpy.random.random_sample(shape) - self.inputs_sample_data[node.layer_name].append(sample_data) - - if node.data_format == NHWC and len(shape) == 4: - shape = [shape[0], shape[3], shape[1], shape[2]] - - param_attr = { - "name": "\'{}\'".format(node.ref_name), - "shape": shape, - "dtype": "\'{}\'".format(node.dtype), - "append_batch_size": False - } - node.code.add_layer("data", None, node.output_name, param_attr) - - def emit_const(self, node): - value = self.infer.get_const_tensor_value(node.layer) - shape = list(value.shape) - - try: - dtype = node.dtype - except: - return [] - - node.code.add_str("#{} {} {}".format(node.layer_name, node.ref_name, - value.shape)) - if value.size == 1: - param_attr = { - "shape": [1], - "value": value.flatten()[0], - "dtype": "\'{}\'".format(dtype), - } - node.code.add_layer("fill_constant", None, node.output_name, - param_attr) - else: - param_attr = { - "shape": shape, - "name": "\'{}\'".format(node.ref_name), - "dtype": "\'{}\'".format(dtype) - } - if node.dtype.startswith('int'): - param_attr["default_initializer"] = \ - "fluid.initializer.Constant(0)" - node.code.add_layer("create_parameter", None, node.output_name, - param_attr) - self.export_weights(value, node.ref_name, self.save_dir) - - def emit_conv2d(self, node): - data = node.inputs[0] - kernel = node.inputs[1] - - if len(kernel.outputs) == 1: - kernel.code.clear() - - padding_mode = node.get_attr("padding") - strides = node.get_attr("strides")[2:4] - k_shape = list(self.infer.get_tensor_shape(kernel.layer)) - input_shape = list(self.infer.get_tensor_shape(data.layer)) - input_h, input_w = input_shape[2:4] - k_h, k_w, channel, kernel_num = k_shape - if node.data_format == NHWC: - input_h, input_w = input_shape[1:3] - strides = node.get_attr("strides")[1:3] - - if kernel.layer_name in self.weights: - weight = self.weights[kernel.layer_name] - self.weights[kernel.layer_name] = numpy.transpose( - weight, (3, 2, 0, 1)) - self.export_weights(self.weights[kernel.layer_name], - kernel.ref_name, self.save_dir) - - conv2d_param = { - "num_filters": kernel_num, - "filter_size": [k_h, k_w], - "stride": strides, - "param_attr": "\'{}\'".format(kernel.ref_name), - "bias_attr": False - } - - if padding_mode == SAME: - pad_h = self.compute_padding_size(input_h, k_h, strides[0]) - pad_w = self.compute_padding_size(input_w, k_w, strides[1]) - if len(set(pad_h)) == 1 and len(set(pad_w)) == 1: - conv2d_param["padding"] = [pad_h[0], pad_w[0]] - node.code.add_layer("conv2d", data.ref_name, node.output_name, - conv2d_param) - else: - pad_param = {"paddings": pad_h + pad_w} - node.code.add_layer("pad2d", data.ref_name, node.output_name, - pad_param) - node.code.add_layer("conv2d", node.output_name, - node.output_name, conv2d_param) - else: - node.code.add_layer("conv2d", data.ref_name, node.output_name, - conv2d_param) - - def emit_variablev2(self, node): - shape = list(self.infer.get_tensor_shape(node.layer)) - - node.code.add_str("# variable[{}]:\t{}".format(node.output_name, - node.layer_name)) - - if node.layer_name in self.weights: - self.export_weights(self.weights[node.layer_name], node.ref_name, - self.save_dir) - - param_attr = { - "name": "\'{}\'".format(node.ref_name), - "shape": shape, - "dtype": "\'{}\'".format(node.dtype) - } - if node.dtype.startswith('int'): - param_attr["default_initializer"] = "fluid.initializer.Constant(0)" - node.code.add_layer("create_parameter", None, node.output_name, - param_attr) - - def emit_biasadd(self, node): - data = node.inputs[0] - bias = node.inputs[1] - - if bias.layer_name in self.weights: - self.export_weights(self.weights[bias.layer_name], bias.ref_name, - self.save_dir) - - self.emit_variablev2(bias) - param_attr = {"x": data.ref_name, "y": bias.ref_name, "axis": 1} - node.code.add_layer("elementwise_add", None, node.output_name, - param_attr) - - def emit_relu(self, node): - data = node.inputs[0] - node.code.add_layer("relu", data.ref_name, node.output_name) - - def emit_maxpool(self, node): - data = node.inputs[0] - padding_mode = node.get_attr("padding") - input_shape = list(self.infer.get_tensor_shape(data.layer)) - input_h, input_w = input_shape[2:4] - strides = node.get_attr("strides")[2:4] - pool_size = node.get_attr("ksize")[2:4] - if node.data_format == NHWC: - input_h, input_w = input_shape[1:3] - strides = node.get_attr("strides")[1:3] - pool_size = node.get_attr("ksize")[1:3] - - pool_param = { - "pool_size": pool_size, - "pool_type": "\'max\'", - "pool_stride": strides, - } - - if padding_mode == SAME: - pad_h = self.compute_padding_size(input_h, pool_size[0], - strides[0]) - pad_w = self.compute_padding_size(input_w, pool_size[1], - strides[1]) -# pad_right = pad_w[0] + pad_w[1] -# pad_bottom = pad_h[0] + pad_h[1] - if (pad_h[0] + pad_h[1]) % 2 != 0: - pad_h[1] += pad_h[0] - pad_h[0] = 0 - if (pad_w[0] + pad_w[1]) % 2 != 0: - pad_w[1] += pad_w[0] - pad_w[0] = 0 - #padding = [0, pad_bottom, 0, pad_right] - padding = pad_h + pad_w - pad_param = {"paddings": padding, "pad_value":-1000000.0} - node.code.add_layer("pad2d", data.ref_name, node.output_name, - pad_param) - node.code.add_layer("pool2d", node.output_name, node.output_name, - pool_param) - else: - node.code.add_layer("pool2d", data.ref_name, node.output_name, - pool_param) - - def emit_squeeze(self, node): - data = node.inputs[0] - axis = node.get_attr("squeeze_dims") - input_shape_len = data.shape_dim_size - if node.data_format == NHWC and input_shape_len == 4: - for i in range(0, len(axis)): - if axis[i] > 0: - axis[i] = (axis[i] + 1) % 4 + int((axis[i] + 1) / 4) - param_attr = {"axes": axis} - node.code.add_layer("squeeze", data.ref_name, node.output_name, - param_attr) - - def emit_add(self, node): - return self.elementwise(node, "add") - - def emit_mean(self, node): - data = node.inputs[0] - reduce_idx = node.inputs[1] - reduce_idx.code.clear() - idxs = list( - self.infer.get_const_tensor_value(reduce_idx.layer).flatten()) - data_shape_len = data.shape_dim_size - keep_dims = node.layer.attr['keep_dims'].b - if node.data_format == NHWC and data_shape_len == 4: - for i in range(len(idxs)): - if idxs[i] > 0: - idxs[i] = (idxs[i] + 1) % 4 + int((idxs[i] + 1) / 4) - param_attr = {"dim": list(idxs), "keep_dim": keep_dims} - node.code.add_layer("reduce_mean", data.ref_name, node.output_name, - param_attr) - - def emit_fusedbatchnorm(self, node): - data = node.inputs[0] - gamma = node.inputs[1] - beta = node.inputs[2] - moving_mean = node.inputs[3] - moving_variance = node.inputs[4] - if len(gamma.outputs) == 1: - gamma.code.clear() - if len(beta.outputs) == 1: - beta.code.clear() - if len(moving_mean.outputs) == 1: - moving_mean.code.clear() - if len(moving_variance.outputs) == 1: - moving_variance.code.clear() - - epsilon = round(node.get_attr('epsilon'), 6) - is_training = node.get_attr('is_training') - - if gamma.layer_name in self.weights: - self.export_weights(self.weights[gamma.layer_name], gamma.ref_name, - self.save_dir) - if beta.layer_name in self.weights: - self.export_weights(self.weights[beta.layer_name], beta.ref_name, - self.save_dir) - if moving_mean.layer_name in self.weights: - self.export_weights(self.weights[moving_mean.layer_name], - moving_mean.ref_name, self.save_dir) - if moving_variance.layer_name in self.weights: - self.export_weights(self.weights[moving_variance.layer_name], - moving_variance.ref_name, self.save_dir) - - param_attr = { - "epsilon": epsilon, - "param_attr": "\'{}\'".format(gamma.ref_name), - "bias_attr": "\'{}\'".format(beta.ref_name), - "moving_mean_name": "\'{}\'".format(moving_mean.ref_name), - "moving_variance_name": "\'{}\'".format(moving_variance.ref_name), - "is_test": not is_training - } - node.code.add_layer("batch_norm", data.ref_name, node.output_name, - param_attr) - - def emit_concatv2(self, node): - input_shape_len = node.inputs[0].shape_dim_size - axis = node.inputs[-1] - axis.code.clear() - axis = self.infer.get_const_tensor_value(axis.layer) - if axis < 0: - axis = input_shape_len + axis - if node.data_format == NHWC and input_shape_len == 4: - if axis > 0: - axis = (axis + 1) % 4 + int((axis + 1) / 4) - num_tensor = len(node.inputs) - 1 - input_list = [input.ref_name for input in node.inputs[:num_tensor]] - input_list = "[{}]".format(", ".join(input_list)) - param_attr = {"axis": axis} - node.code.add_layer("concat", input_list, node.output_name, param_attr) - - def emit_avgpool(self, node): - data = node.inputs[0] - padding_mode = node.get_attr("padding") - input_shape = list(self.infer.get_tensor_shape(data.layer)) - strides = node.get_attr("strides")[2:4] - pool_size = node.get_attr("ksize")[2:4] - input_h, input_w = input_shape[2:4] - - if node.data_format == NHWC: - strides = node.get_attr("strides")[1:3] - pool_size = node.get_attr("ksize")[1:3] - input_h, input_w = input_shape[1:3] - - param_attr = { - "pool_size": pool_size, - "pool_stride": strides, - "pool_type": "\'avg\'" - } - - if padding_mode == SAME: - pad_h = self.compute_padding_size(input_h, pool_size[0], - strides[0]) - pad_w = self.compute_padding_size(input_w, pool_size[1], - strides[0]) - if len(set(pad_h)) == 1 and len(set(pad_w)) == 1: - padding = [pad_h[0], pad_w[0]] - param_attr["pool_padding"] = padding - else: - pad_param = {"paddings": pad_h + pad_w} - node.code.add_layer("pad2d", data.ref_name, node.output_name, - pad_param) - node.code.add_layer("pool2d", node.output_name, - node.output_name, param_attr) - return - node.code.add_layer("pool2d", data.ref_name, node.output_name, - param_attr) - - def emit_rsqrt(self, node): - data = node.inputs[0] - pow_param = {"factor": -1.0} - node.code.add_layer("sqrt", data.ref_name, node.output_name) - node.code.add_layer("pow", node.output_name, node.output_name, - pow_param) - - def emit_mul(self, node): - return self.elementwise(node, "mul") - - def emit_sub(self, node): - data1 = node.inputs[0] - data2 = node.inputs[1] - axis = self.get_axis(data1, data2) - data1_shape = list(self.infer.get_tensor_shape(data1.layer)) - data2_shape = list(self.infer.get_tensor_shape(data2.layer)) - param_attr = {"x": data1.ref_name, "y": data2.ref_name, "axis": axis} - if len(data1_shape) == 4 and len(data2_shape) == 4 \ - and node.data_format == NHWC: - if data1_shape[-1] != data2_shape[-1]: - node.code.add_layer("transpose", data1.ref_name, "temp1", - {"perm": [0, 2, 3, 1]}) - node.code.add_layer("transpose", data2.ref_name, "temp2", - {"perm": [0, 2, 3, 1]}) - param_attr = {"x": "temp1", "y": "temp2", "axis": -1} - node.code.add_layer("elementwise_sub", None, node.output_name, - param_attr) - node.code.add_layer("transpose", node.output_name, - node.output_name, {"perm": [0, 3, 1, 2]}) - else: - node.code.add_layer("elementwise_sub", None, node.output_name, - param_attr) - - def emit_shape(self, node): - data = node.inputs[0] - input_shape_len = data.shape_dim_size - if input_shape_len == 4 and node.data_format == NHWC: - param = {"perm": [0, 2, 3, 1]} - node.code.add_layer("transpose", data.ref_name, node.output_name, - param) - node.code.add_layer("shape", node.output_name, node.output_name) - else: - node.code.add_layer("shape", data.ref_name, node.output_name) - param = {"dtype": "\'int32\'"} - node.code.add_layer("cast", node.output_name, node.output_name, param) - - def emit_pad(self, node): - data = node.inputs[0] - padding = node.inputs[1] - padding.code.clear() - padding = padding.layer.attr['value'].tensor - padding = tensor_util.MakeNdarray(padding).astype('int32') - if node.data_format == NHWC and padding.shape[0] == 4: - padding = padding[[0, 3, 1, 2]] - param_attr = {"paddings": list(padding.flatten())} - node.code.add_layer("pad", data.ref_name, node.output_name, param_attr) - - def emit_stridedslice(self, node): - data = node.inputs[0] - begin = node.inputs[1] - end = node.inputs[2] - strides = node.inputs[3] - begin.code.clear() - end.code.clear() - strides.code.clear() - - begin = list(self.infer.get_const_tensor_value(begin.layer).flatten()) - end = list(self.infer.get_const_tensor_value(end.layer).flatten()) - strides = list( - self.infer.get_const_tensor_value(strides.layer).flatten()) - - for i in range(len(strides)): - assert strides[i] == 1 - - if len(set(end)) == 1 and end[0] == 0: - output_shape = list(self.infer.get_tensor_shape(node.layer)) - if node.data_format == NHWC and len(output_shape) == 4: - output_shape = [output_shape[0], - output_shape[3], - output_shape[1], - output_shape[2]] - begin = [begin[0], begin[3], begin[1], begin[2]] - param = {"shape":output_shape, "offsets":begin} - node.code.add_layer("crop", data.ref_name, - node.output_name, param) - else: - param = {"axes": range(len(begin)), "starts": begin, "ends": end} - node.code.add_layer("slice", data.ref_name, - node.output_name, param) - - def emit_resizenearestneighbor(self, node): - data = node.inputs[0] - resize_shape = node.inputs[1] - resize_shape.code.clear() - align_corners = node.get_attr('align_corners') - - resize_shape = list(self.infer.get_shape_tensor(resize_shape.layer)) - param_attr = { - "align_corners": align_corners, - "out_shape": resize_shape - } - node.code.add_layer("resize_nearest", data.ref_name, node.output_name, - param_attr) - - def emit_maximum(self, node): - return self.elementwise(node, "max") - - def emit_minimum(self, node): - return self.elementwise(node, "min") - - def emit_sigmoid(self, node): - data = node.inputs[0] - node.code.add_layer("sigmoid", data.ref_name, node.output_name) - - def emit_pack(self, node): - inputs = [input.ref_name for input in node.inputs] - inputs = "[{}]".format(", ".join(inputs)) - node.code.add_layer("stack", inputs, node.output_name) - - def emit_reshape(self, node): - data = node.inputs[0] - shape = node.inputs[1] - input_shape_len = data.shape_dim_size - output_shape = list(self.infer.get_tensor_shape(node.layer)) - - shape = self.infer.get_shape_tensor(shape.layer, output_shape) - - reshape_param = {"shape": list(shape)} - if node.data_format == NHWC and input_shape_len == 4: - param_attr = {"perm": [0, 2, 3, 1]} - node.code.add_layer("transpose", data.ref_name, node.output_name, - param_attr) - node.code.add_layer("reshape", node.output_name, node.output_name, - reshape_param) - if len(shape) == 4: - param_attr = {"perm": [0, 3, 1, 2]} - node.code.add_layer("transpose", node.output_name, - node.output_name, param_attr) - else: - node.code.add_layer("reshape", data.ref_name, node.output_name, - reshape_param) - - def emit_conv2dbackpropinput(self, node): - output_shape = node.inputs[0] - kernel = node.inputs[1] - data = node.inputs[2] - if len(kernel.outputs) == 1: - kernel.code.clear() - output_shape.code.clear() - padding_mode = node.get_attr("padding") - strides = node.get_attr("strides")[2:4] - k_shape = self.infer.get_tensor_shape(kernel.layer) - k_h, k_w, k_num, channel = k_shape - if node.data_format == NHWC: - strides = node.get_attr("strides")[1:3] - - padding = [0, 0] - if padding_mode == SAME: - padding = [int(val) for val in [(k_h - strides[0]) / 2, \ - (k_w - strides[1]) / 2]] - - if kernel.layer_name in self.weights: - weight = self.weights[kernel.layer_name] - self.weights[kernel.layer_name] = numpy.transpose( - weight, (3, 2, 0, 1)) - self.export_weights(self.weights[kernel.layer_name], - kernel.ref_name, self.save_dir) - - output_shape = list(self.infer.get_shape_tensor(output_shape.layer)) - if node.data_format == NHWC and len(output_shape) == 4: - output_shape = [ - output_shape[0], output_shape[3], output_shape[1], - output_shape[2] - ] - - param_attr = { - "num_filters": k_num, - "filter_size": [k_h, k_w], - "padding": padding, - "stride": strides, - "param_attr": "\'{}\'".format(kernel.ref_name), - "bias_attr": False - } - node.code.add_layer("conv2d_transpose", data.ref_name, - node.output_name, param_attr) - if padding_mode == SAME: - param_attr = {"shape": list(output_shape)} - node.code.add_layer("crop", node.output_name, node.output_name, - param_attr) - - def emit_depthwiseconv2dnative(self, node): - data = node.inputs[0] - kernel = node.inputs[1] - if len(kernel.outputs) == 1: - kernel.code.clear() - - padding_mode = node.get_attr("padding") - strides = node.get_attr("strides")[2:4] - k_shape = self.infer.get_tensor_shape(kernel.layer) - input_shape = self.infer.get_tensor_shape(data.layer) - input_h, input_w = input_shape[2:4] - k_h, k_w, in_channels, channel_multiplier = k_shape - if node.data_format == NHWC: - strides = node.get_attr("strides")[1:3] - input_h, input_w = input_shape[1:3] - groups = channel_multiplier * in_channels - - if kernel.layer_name in self.weights: - weight = self.weights[kernel.layer_name] - self.weights[kernel.layer_name] = numpy.transpose( - weight, (2, 3, 0, 1)) - self.export_weights(self.weights[kernel.layer_name], - kernel.ref_name, self.save_dir) - conv_param = { - "num_filters": in_channels, - "filter_size": [k_h, k_w], - "stride": strides, - "groups": groups, - "param_attr": "\'{}\'".format(kernel.ref_name), - "bias_attr": False - } - if padding_mode == SAME: - pad_h = self.compute_padding_size(input_h, k_h, strides[0]) - pad_w = self.compute_padding_size(input_w, k_w, strides[1]) - if len(set(pad_h)) == 1 and len(set(pad_w)) == 1: - padding = [pad_h[0], pad_w[0]] - conv_param["padding"] = padding - node.code.add_layer("conv2d", data.ref_name, node.output_name, - conv_param) - else: - pad_param = {"paddings": pad_h + pad_w} - node.code.add_layer("pad2d", data.ref_name, node.output_name, - pad_param) - node.code.add_layer("conv2d", node.output_name, - node.output_name, conv_param) - else: - node.code.add_layer("conv2d", data.ref_name, node.output_name, - conv_param) - - def emit_softmax(self, node): - data = node.inputs[0] - node.code.add_layer("softmax", data.ref_name, node.output_name) - - def emit_matmul(self, node): - data0 = node.inputs[0] - data1 = node.inputs[1] - transpose_a = node.get_attr('transpose_a') - transpose_b = node.get_attr('transpose_b') - param_attr = { - "x": data0.ref_name, - "y": data1.ref_name, - "transpose_x": transpose_a, - "transpose_y": transpose_b - } - node.code.add_layer("matmul", None, node.output_name, param_attr) - - def emit_transpose(self, node): - data = node.inputs[0] - perm = node.inputs[1] - perm.code.clear() - perm = list(self.infer.get_shape_tensor(perm.layer)) - if node.data_format == NHWC and len(perm) == 4: - if perm == [0, 3, 1, 2]: - self.graph.set_data_format(node, NCHW) - node.code.add_str("{} = {}".format(node.output_name, - data.ref_name)) - else: - raise Exception("Unexpected situation in OP transpose") - elif node.data_format == NCHW and len(perm) == 4: - if perm == [0, 2, 3, 1]: - self.graph.set_data_format(node, NHWC) - node.code.add_str("{} = {}".format(node.output_name, - data.ref_name)) - else: - raise Exception("Unexpected situation in OP transpose") - else: - param_attr = {"perm": perm} - node.code.add_layer("transpose", data.ref_name, node.output_name, - param_attr) - - def emit_randomuniform(self, node): - shape = node.inputs[0] - shape = self.infer.get_shape_tensor(shape.layer) - if node.data_format == NHWC and len(shape) == 4: - shape = shape[[0, 3, 1, 2]] - batch_index = list(numpy.argwhere(shape < 0).flatten()) - shape = list(shape) - param_attr = { - "shape": shape, - "dtype": "\'float32\'", - "min": 0.00001, - "max": 0.99999 - } - if len(batch_index) > 1: - raise Exception("More than one dimension value less than zero") - if len(batch_index) == 0: - node.code.add_layer("uniform_random", None, node.output_name, - param_attr) - else: - param_attr["input_dim_idx"] = batch_index[0] - node.code.add_layer("uniform_random_batch_size_like", - self.batch_node.ref_name, node.output_name, - param_attr) - - def emit_floor(self, node): - data = node.inputs[0] - node.code.add_layer("floor", data.ref_name, node.output_name) - - def emit_exp(self, node): - data = node.inputs[0] - node.code.add_layer("exp", data.ref_name, node.output_name) - - def emit_floordiv(self, node): - self.emit_div(node) - param = {"dtype": "\'float32\'"} - node.code.add_layer("cast", node.output_name, node.output_name, param) - node.code.add_layer("floor", node.output_name, node.output_name) - - def emit_div(self, node): - data1 = node.inputs[0] - data2 = node.inputs[1] - axis = self.get_axis(data1, data2) - data1_shape = self.infer.get_tensor_shape(data1.layer) - data2_shape = self.infer.get_tensor_shape(data2.layer) - div_param = {"x": data1.ref_name, "y": data2.ref_name, "axis": axis} - if len(data1_shape) == 4 and len(data2_shape) == 4 \ - and node.data_format == NHWC: - if data1_shape[-1] != data2_shape[-1]: - perm = {"perm": [0, 2, 3, 1]} - node.code.add_layer("transpose", data1.ref_name, "temp1", perm) - node.code.add_layer("transpose", data2.ref_name, "temp2", perm) - div_param["x"] = "temp1" - div_param["y"] = "temp2" - div_param["axis"] = -1 - node.code.add_layer("elementwise_div", None, node.output_name, - div_param) - - def emit_realdiv(self, node): - return self.emit_div(node) - - def emit_slice(self, node): - data = node.inputs[0] - begin = node.inputs[1] - size = node.inputs[2] - begin.code.clear() - size.code.clear() - begin = list(self.infer.get_shape_tensor(begin.layer)) - size = list(self.infer.get_shape_tensor(size.layer)) - - input_shape = self.infer.get_tensor_shape(data.layer) - if len(numpy.argwhere(input_shape < 0).flatten()) > 1: - input_shape = list(self.infer.get_tensor_shape(data.layer)) - - assert len(begin) == len(input_shape) and len(size) == len(input_shape) - - if node.data_format == NHWC and len(input_shape) == 4: - begin = [begin[0], begin[3], begin[1], begin[2]] - size = [size[0], size[3], size[1], size[2]] - input_shape = [ - input_shape[0], input_shape[3], input_shape[1], input_shape[2] - ] - - for i in range(len(size)): - if size[i] < 0: - size[i] = input_shape[i] - begin[i] - param_attr = {"shape": size, "offsets": begin} - node.code.add_layer("crop", data.ref_name, node.output_name, - param_attr) - - def emit_sum(self, node): - data = node.inputs[0] - reduce_idx = node.inputs[1] - reduce_idx.code.clear() - idxs = tensor_util.MakeNdarray( - reduce_idx.layer.attr['value'].tensor).astype('int32').flatten() - data_shape_len = data.shape_dim_size - keep_dims = node.layer.attr['keep_dims'].b - if node.data_format == NHWC and data_shape_len == 4: - for i in range(idxs.shape[0]): - if idxs[i] > 0: - idxs[i] = (idxs[i] + 1) % 4 + int((idxs[i] + 1) / 4) - param = {"dim": list(idxs), "keep_dim": keep_dims} - node.code.add_layer("reduce_sum", data.ref_name, node.output_name, - param) - - def emit_max(self, node): - data = node.inputs[0] - reduce_idx = node.inputs[1] - reduce_idx.code.clear() - idxs = tensor_util.MakeNdarray( - reduce_idx.layer.attr['value'].tensor).astype('int32').flatten() - data_shape_len = data.shape_dim_size - keep_dims = node.layer.attr['keep_dims'].b - if node.data_format == NHWC and data_shape_len == 4: - for i in range(idxs.shape[0]): - if idxs[i] > 0: - idxs[i] = (idxs[i] + 1) % 4 + int((idxs[i] + 1) / 4) - param = {"dim": list(idxs), "keep_dim": keep_dims} - node.code.add_layer("reduce_max", data.ref_name, node.output_name, - param) - - def emit_fill(self, node): - shape = node.inputs[0] - shape.code.clear() - value = node.inputs[1] - value.code.clear() - - shape = list(self.infer.get_shape_tensor(shape.layer)) - value = list(self.infer.get_const_tensor_value(value.layer).flatten()) - assert len(value) == 1 - value = value[0] - - if node.data_format == NHWC and len(shape) == 4: - shape = [shape[0], shape[3], shape[1], shape[2]] - - param = { - "shape": shape, - "dtype": "\'{}\'".format(value.dtype), - "value": value - } - if shape[0] < 0: - node.code.add_layer("fill_constant_batch_size_like", - self.batch_node.ref_name, node.output_name, - param) - else: - node.code.add_layer("fill_constant", None, node.output_name, param) - - def emit_range(self, node): - start = node.inputs[0] - end = node.inputs[1] - delta = node.inputs[2] - start.code.clear() - end.code.clear() - delta.code.clear() - - start = self.infer.get_const_tensor_value(start.layer) - end = self.infer.get_const_tensor_value(end.layer) - delta = self.infer.get_const_tensor_value(delta.layer) - np_code = "np_array = numpy.arange({}, {}, {}).astype(\'{}\')".format( - start, end, delta, delta.dtype) - node.code.add_str(np_code) - node.code.add_layer("assign", "np_array", node.output_name) - - def emit_tile(self, node): - data = node.inputs[0] - expand_times = node.inputs[1] - expand_times.code.clear() - expand_times = list( - self.infer.get_const_tensor_value(expand_times.layer)) - param = {"expand_times": expand_times} - node.code.add_layer("expand", data.ref_name, node.output_name, param) - - def emit_splitv(self, node): - data = node.inputs[0] - num_sections = node.inputs[1] - num_sections.code.clear() - split_dim = node.inputs[2] - split_dim.code.clear() - num_sections = self.infer.get_const_tensor_value(num_sections.layer) - split_dim = self.infer.get_const_tensor_value(split_dim.layer) - input_shape = self.infer.get_tensor_shape(data.layer) - if split_dim < 0: - split_dim += len(input_shape) - - index = numpy.argwhere(num_sections < 0).flatten() - if index.shape[0] > 1: - raise Exception("More than one dimension less than 0") - if index.shape[0] == 1: - num_sections[index[0]] = input_shape[split_dim] - numpy.sum( - num_sections) + num_sections[index[0]] - param = {"num_or_sections": list(num_sections), "dim": split_dim} - node.code.add_layer("split", data.ref_name, node.output_name, param) - - def emit_expanddims(self, node): - data = node.inputs[0] - dim = node.inputs[1] - dim.code.clear() - dim = self.infer.get_const_tensor_value(dim.layer) - param = {"axes":[dim]} - node.code.add_layer("unsqueeze", data.ref_name, node.output_name, param) - - def emit_cast(self, node): - data = node.inputs[0] - dtype_map = {1: "float32", 3: "int32", 9: "int64"} - dtype = node.get_attr("DstT") - if dtype in dtype_map: - dtype = dtype_map[dtype] - else: - raise Exception("Unknow dtype: {}".format(dtype)) - param = {"dtype":"\'{}\'".format(dtype)} - node.code.add_layer("cast", data.ref_name, node.output_name, param) diff --git a/tensorflow2fluid/tf2fluid/tensorflow_graph.py b/tensorflow2fluid/tf2fluid/tensorflow_graph.py deleted file mode 100644 index fb88c84..0000000 --- a/tensorflow2fluid/tf2fluid/tensorflow_graph.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License" -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from graph import GraphNode, Graph -from tensorflow.core.framework import attr_value_pb2 -from utils import * - - -class TensorflowGraphNode(GraphNode): - dtype_map = {1: "float32", 3: "int32", 9: "int64"} - - def __init__(self, layer, input_format, layer_name=None): - super(TensorflowGraphNode, self).__init__(layer, layer_name) - self.codes = list() - self.code = FluidCode() - self.ref_as_const = 0 - self.data_format = input_format - - @property - def layer_type(self): - return self.layer.op.lower() - - @property - def shape_dim_size(self): - shape = self.layer.attr['_output_shapes'] - return len(shape.list.shape[0].dim) - - @property - def dtype(self): - dtype = self.get_attr("dtype") - if dtype in self.dtype_map: - dtype = self.dtype_map[dtype] - else: - raise Exception("Unknow dtype: {}".format(dtype)) - return dtype - - def get_attr(self, name, default_value=None): - if name in self.layer.attr: - attr = self.layer.attr[name] - field = attr.WhichOneof('value') - val = getattr(attr, field) if field else default_value - if isinstance(val, attr_value_pb2.AttrValue.ListValue): - result = list(val.ListFields()[0][1]) - for i in range(len(result)): - if isinstance(result[i], int): - result[i] = int(result[i]) - try: - if isinstance(result[i], long): - result[i] = int(result[i]) - except: - pass - return result - else: - return val if isinstance(val, bytes) else val - else: - return default_value - - def clear_code(self): - self.code.clear() - - -class TensorflowGraph(Graph): - useless_type = ['identity', 'placeholderwithdefault', 'switch', 'merge'] - - def __init__(self, tf_graph): - super(TensorflowGraph, self).__init__(tf_graph) - self.tf_graph = tf_graph - self.identity_relation = dict() - - def build(self, input_format): - skip_node = set(['const']) - for i, layer in enumerate(self.tf_graph.node): - self.node_map[layer.name] = TensorflowGraphNode( - layer, input_format) - - for i, layer in enumerate(self.tf_graph.node): - if layer.op.lower() in skip_node: - continue - for pred in layer.input: - if pred not in self.node_map and pred.split( - ':')[0] in self.node_map: - pred_node = self.node_map[pred.split(':')[0]] - if pred_node.layer_type == "switch": - self._make_connection(pred_node, - self.node_map[layer.name]) - elif pred_node.layer_type == "split" or \ - pred_node.layer_type == "splitv": - self.node_map[pred] = TensorflowGraphNode( - pred_node.layer, input_format, pred) - self._make_connection(self.node_map[pred], - self.node_map[layer.name]) - self._make_connection(pred_node, self.node_map[pred]) - else: - raise Exception("\nUnsupported situation(name:[{}]," \ - "OP[{}])".format(layer.name, layer.op)) - - elif pred in self.node_map: - self._make_connection(self.node_map[pred], - self.node_map[layer.name]) - - else: - raise Exception("input: {} not in node_map".format(pred)) - super(TensorflowGraph, self).build(input_format) - - self._process_useless_nodes() - self._check_dataformat(input_format) - - def _check_dataformat(self, input_format): - for i in range(len(self.topological_sort)): - current_node = self.node_map[self.topological_sort[i]] - if 'data_format'.encode() in current_node.layer.attr: - s = current_node.layer.attr['data_format'].s - if s != NHWC and s != NCHW: - raise Exception('Unkown dataformat {}'.format(s)) - self.set_data_format(current_node, s) - - def _process_useless_nodes(self): - remove_index = list() - for i in range(len(self.topological_sort)): - name = self.topological_sort[i] - current_node = self.node_map[name] - if current_node.layer_type in self.useless_type: - input = current_node.inputs[0] - self.identity_relation[current_node.layer.name] = input.layer.name - for node in current_node.outputs: - for k in range(0, len(node.inputs)): - if node.inputs[k] == current_node: - node.inputs[k] = input - if node not in input.outputs: - input.outputs.append(node) - input.outputs.remove(current_node) - del self.node_map[name] - if name in self.output_nodes: - self.output_nodes.remove(name) - if name in self.input_nodes: - self.input_nodes.remove(name) - remove_index.append(i) - - remove_index.sort(reverse=True) - for i in range(len(remove_index)): - del self.topological_sort[remove_index[i]] - - def set_data_format(self, node, data_format): - assert data_format == 'NHWC'.encode() or data_format == 'NCHW'.encode() - if node.data_format == data_format: - return - node.data_format = data_format - if len(node.outputs) == 0: - return - for output in node.outputs: - self.set_data_format(output, data_format) diff --git a/tensorflow2fluid/tf2fluid/tensorflow_parser.py b/tensorflow2fluid/tf2fluid/tensorflow_parser.py deleted file mode 100644 index 26d8a8d..0000000 --- a/tensorflow2fluid/tf2fluid/tensorflow_parser.py +++ /dev/null @@ -1,274 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License" -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import tensorflow as tf -from tensorflow_graph import TensorflowGraph -from tensorflow.python.framework import tensor_util -from tensorflow.python.tools import strip_unused_lib -from tensorflow.python.framework import dtypes -import logging -import numpy - - -class TensorflowCkptParser(object): - def __init__(self, - meta_file, - checkpoint_file, - dest_nodes, - input_shape=None, - in_nodes=None, - input_format="NCHW".encode()): - graph_def = None - self.weights = None - self.inputs = in_nodes - self.outputs = dest_nodes - sess = tf.Session() - if meta_file is None: - raise Exception("meta_file must be provided") - new_saver = tf.train.import_meta_graph(meta_file) - if checkpoint_file is not None: - self.weights = dict() - new_saver.restore(sess, - tf.train.latest_checkpoint(checkpoint_file)) - for var in tf.global_variables(): - value = var.eval(sess) - self.weights[var.name.split(':')[0]] = value - - self.infer = ModelInfer(sess) - graph_def, ver = tf.get_default_graph()._as_graph_def(add_shapes=True) - - if in_nodes is not None and input_shape is not None: - graph_def = strip_unused_lib.strip_unused( - input_graph_def=graph_def, - input_node_names=in_nodes, - output_node_names=dest_nodes, - placeholder_type_enum=dtypes.float32.as_datatype_enum) - - for node in graph_def.node: - if node.name in in_nodes: - index = in_nodes.index(node.name) - shape = [tf.Dimension(x) for x in input_shape[index]] - shape_proto = tf.TensorShape(shape).as_proto() - node.attr['_output_shapes'].list.shape.pop() - node.attr['_output_shapes'].list.shape.extend( - [shape_proto]) - self.infer.gen_sample_data(node.name, input_shape[index]) - - self.tf_graph = TensorflowGraph(graph_def) - else: - raise Exception('in_nodes and output_nodes need be provided') - - self.tf_graph.build(input_format) - - -class TensorflowPbParser(object): - def __init__(self, - pb_file, - dest_nodes, - input_shape=None, - in_nodes=None, - input_format="NCHW".encode()): - with open(pb_file, 'rb') as f: - serialized = f.read() - tf.reset_default_graph() - original_graph_def = tf.GraphDef() - original_graph_def.ParseFromString(serialized) - self.inputs = list() - self.outputs = dest_nodes - - sess = tf.Session(graph=tf.get_default_graph()) - sess.run(tf.global_variables_initializer()) - self.infer = ModelInfer(sess) - - original_graph_def = strip_unused_lib.strip_unused( - input_graph_def=original_graph_def, - input_node_names=in_nodes, - output_node_names=dest_nodes, - placeholder_type_enum=dtypes.float32.as_datatype_enum) - - graph_def = tf.GraphDef() - graph_def.ParseFromString(original_graph_def.SerializeToString()) - in_type_list = dict() - for node in graph_def.node: - if node.name in in_nodes: - in_type_list[node.name] = node.attr['dtype'].type - - input_shape = list(input_shape) - if not isinstance(input_shape[0], list): - input_shape = [input_shape] - - input_map = dict() - for i in range(len(input_shape)): - if in_type_list[in_nodes[i]] == 1 or in_type_list[ - in_nodes[i]] == 0: - dtype = tf.float32 - x = tf.placeholder(dtype, shape=input_shape[i]) - elif in_type_list[in_nodes[i]] == 3: - dtype = tf.int32 - x = tf.placehoder(dtype, shape=input_shape[i]) - else: - raise Exception("Unexpected dtype for input, only support " \ - "float32 and int32 now") - input_map[in_nodes[i] + ":0"] = x - self.inputs.append(x.name.split(':')[0]) - self.infer.gen_sample_data(x.name, input_shape[i]) - - tf.import_graph_def(graph_def, name="", input_map=input_map) - graph_def = tf.get_default_graph()._as_graph_def(add_shapes=True)[0] - - self.tf_graph = TensorflowGraph(graph_def) - self.tf_graph.build(input_format) - - self.weights = dict() - for node in graph_def.node: - if node.op.lower() == "const": - try: - node.attr['value'].tensor.tensor_content - weight = tensor_util.MakeNdarray(node.attr['value'].tensor) - self.weights[node.name] = weight - except: - continue - - -class ModelInfer(object): - """ Trick method for tensorflow2fluid - There are some Operators in PaddlePaddle not support - tensor as parameter, like reshape/transpose, Because these - parameters should be fixed in PaddlePaddle. So we - provide 'ModelInfer' here to solove this problem. - """ - - def __init__(self, sess): - self.sess = sess - self.inputs_sample_data = dict() - - def gen_sample_data(self, tensor_name, shape): - self.inputs_sample_data[tensor_name] = list() - if shape[0] is None or shape[0] < 0: - for i in range(1, 4): - data = numpy.random.random_sample([i] + shape[1:]) - self.inputs_sample_data[tensor_name].append(data) - else: - for i in range(1, 4): - data = numpy.random.random_sample(shape) - self.inputs_sample_data[tensor_name].append(data) - - def get_shape_tensor(self, layer, output_shape=None): - """ return value of shape parameter - return value of shape parameter which are tensor type - in tensorflow model - """ - - tensor_name = layer.name - if len(tensor_name.split(':')) < 2: - tensor_name = tensor_name + ':0' - output_tensor = self.sess.graph.get_tensor_by_name(tensor_name) - - tensor_values = [] - for i in range(0, 3): - inputs_tensors = dict() - for name, values in self.inputs_sample_data.items(): - if len(name.split(':')) < 2: - name = name + ':0' - tensor = self.sess.graph.get_tensor_by_name(name) - inputs_tensors[tensor] = values[i] - r, = self.sess.run([output_tensor], inputs_tensors) - tensor_values.append(r.flatten()) - - compare01 = (tensor_values[0] == tensor_values[1]) - compare12 = (tensor_values[1] == tensor_values[2]) - - if compare01.all() and compare12.all(): - return tensor_values[0] - - if (compare01 == compare12).all(): - index = numpy.argwhere(compare01 == False).flatten() - if index.shape[0] != 1: - raise Exception("There's not only one unstable dimension") - tensor_values[0][index[0]] = -1 - - index = numpy.argwhere(tensor_values[0] < 0).flatten() - if index.shape[0] > 2: - raise Exception("There's more than two values less than zero") - if index.shape[0] == 2: - if output_shape is None: - raise Exception("Need output_shape parameter, " \ - "get_shape_tensor(tensor_name, output_shape)") - tensor_values[0][index[1]] = output_shape[index[1]] - return tensor_values[0] - else: - raise Exception("Can not infer a stable shape tensor value") - - def get_tensor_shape(self, layer): - shape = layer.attr['_output_shapes'].list.shape[0] - shape = numpy.array([dim.size for dim in shape.dim]) - if numpy.argwhere(shape < 0).shape[0] <= 1 and len(shape) != 0: - return shape - tensor_name = layer.name - if len(tensor_name.split(':')) < 2: - tensor_name = tensor_name + ':0' - output_tensor = self.sess.graph.get_tensor_by_name(tensor_name) - - shapes = [] - for i in range(0, 3): - inputs_tensors = dict() - for name, values in self.inputs_sample_data.items(): - if len(name.split(':')) < 2: - name = name + ':0' - tensor = self.sess.graph.get_tensor_by_name(name) - inputs_tensors[tensor] = values[i] - r, = self.sess.run([output_tensor], inputs_tensors) - shapes.append(numpy.array(r.shape)) - - compare01 = (shapes[0] == shapes[1]) - compare12 = (shapes[1] == shapes[2]) - - if compare01.all() and compare12.all(): - return shapes[0] - - if (compare01 == compare12).all(): - index = numpy.argwhere(compare01 == False).flatten() - if index.shape[0] != 1: - raise Exception("There's not only one unstable dimension") - if index[0] != 0: - raise Exception("Batch size not in the first dimension") - shapes[0][0] = -1 - return shapes[0] - else: - raise Exception("Can not infer a stable tensor shape, failed!") - - def get_const_tensor_value(self, layer): - tensor_name = layer.name - if len(tensor_name.split(':')) < 2: - tensor_name = tensor_name + ':0' - output_tensor = self.sess.graph.get_tensor_by_name(tensor_name) - - result = [] - for i in range(0, 3): - inputs_tensors = dict() - for name, values in self.inputs_sample_data.items(): - if len(name.split(':')) < 2: - name = name + ':0' - tensor = self.sess.graph.get_tensor_by_name(name) - inputs_tensors[tensor] = values[i] - r, = self.sess.run([output_tensor], inputs_tensors) - result.append(r) - - compare01 = (result[0] == result[1]) - compare12 = (result[1] == result[2]) - - if compare01.all() and compare12.all(): - return result[0] - else: - raise Exception("Can not infer a stable constant tensor value") diff --git a/tensorflow2fluid/tf2fluid/utils.py b/tensorflow2fluid/tf2fluid/utils.py deleted file mode 100644 index cc344b9..0000000 --- a/tensorflow2fluid/tf2fluid/utils.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License" -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -NHWC = 'NHWC'.encode() -NCHW = 'NCHW'.encode() -OTHER = 'OTHER'.encode() -SAME = 'SAME'.encode() -VALID = 'VALID'.encode() - - -class NameGenerator(object): - def __init__(self): - self.param_index = 0 - self.net_index = 0 - self.const_index = 0 - self.names = dict() - - def get_name(self, node): - ref_name = None - op_name = node.layer_type - - if node.layer.name in self.names: - return self.names[node.layer.name] - - if op_name == "variablev2": - ref_name = "param_" + str(self.param_index) - self.param_index += 1 - elif op_name == "placeholder": - ref_name = node.layer.name - elif op_name == "const": - ref_name = "const_" + str(self.const_index) - self.const_index += 1 - elif op_name.lower() == "identity": - ref_name = self.names[node.layer.input[0]] - else: - ref_name = "net_" + str(self.net_index) - self.net_index += 1 - self.names[node.layer.name] = ref_name - return ref_name - - -class LayerCode(object): - def __init__(self): - self.op = None - self.param_attr = dict() - self.input = None - self.output = None - self.str_code = None - - def get_str_code(self): - if self.str_code is not None: - return self.str_code - - layer_code0 = "" - if self.output is not None: - layer_code0 = layer_code0 + self.output + " = " - layer_code0 += "layers." - - layer_code1 = self.op + "(" - if self.input is not None: - layer_code1 = layer_code1 + self.input + ", " - - layer_code2 = "" - for k, v in self.param_attr.items(): - layer_code2 = layer_code2 + k + "=" + "{}".format(v) + ", " - layer_code2 = layer_code2.strip(", ") - - layer_code = ( - layer_code0 + layer_code1 + layer_code2).strip(", ") + ")" - return layer_code - - -class FluidCode(object): - def __init__(self): - self.codes = list() - - def add_layer(self, op, input, output, param_attr=None): - if param_attr is None: - param_attr = dict() - layer_code = LayerCode() - layer_code.op = op - layer_code.input = input - layer_code.output = output - layer_code.param_attr = param_attr - self.codes.append(layer_code) - - def add_str(self, str_code): - layer_code = LayerCode() - layer_code.str_code = str_code - self.codes.append(layer_code) - - def clear(self): - self.codes = list() - - def gen_codes(self): - res = list() - if len(self.codes) == 0: - return [] - for code in self.codes: - if isinstance(code, LayerCode): - res.append(code.get_str_code()) - else: - raise Exception("Unexcept situation!") - return res diff --git a/tensorflow2fluid/vgg_translate_tutorial.ipynb b/tensorflow2fluid/vgg_translate_tutorial.ipynb deleted file mode 100644 index 14c5ef6..0000000 --- a/tensorflow2fluid/vgg_translate_tutorial.ipynb +++ /dev/null @@ -1,396 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Tutorial : tensorflow2fluid转换VGG_16模型\n", - "\n", - "VGG_16是CV领域的一个经典模型,本文档以tensorflow/models下的[VGG_16](https://github.com/tensorflow/models/blob/master/research/slim/nets/vgg.py)为例,展示如何将TensorFlow训练好的模型转换为PaddlePaddle模型。 \n", - "### 下载预训练模型" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "ename": "AttributeError", - "evalue": "module 'urllib' has no attribute 'urlretrieve'", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0murl\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m\"http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 10\u001b[0;31m \u001b[0mfetch\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0murllib\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0murlretrieve\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0murl\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"./vgg_16.tar.gz\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mschedule\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[0;31mAttributeError\u001b[0m: module 'urllib' has no attribute 'urlretrieve'" - ] - } - ], - "source": [ - "import urllib\n", - "import sys\n", - "def schedule(a, b, c):\n", - " per = 100.0 * a * b / c\n", - " per = int(per)\n", - " sys.stderr.write(\"\\rDownload percentage %.2f%%\" % per)\n", - " sys.stderr.flush()\n", - "\n", - "url = \"http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz\"\n", - "fetch = urllib.urlretrieve(url, \"./vgg_16.tar.gz\", schedule)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 解压下载的压缩文件" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "import tarfile\n", - "with tarfile.open(\"./vgg_16.tar.gz\", \"r:gz\") as f:\n", - " file_names = f.getnames()\n", - " for file_name in file_names:\n", - " f.extract(file_name, \"./\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 保存模型为checkpoint格式\n", - "\n", - "tensorflow2fluid目前支持checkpoint格式的模型或者是将网络结构和参数序列化的pb格式模型,上面下载的`vgg_16.ckpt`仅仅存储了模型参数,因此我们需要重新加载参数,并将网络结构和参数一起保存为checkpoint模型\n", - "\n", - "**注意:下面的代码里,运行TensorFlow模型和将TensorFlow模型转换为PaddlePaddle模型,依赖TensorFlow**" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "INFO:tensorflow:Restoring parameters from vgg_16.ckpt\n" - ] - } - ], - "source": [ - "import tensorflow.contrib.slim as slim\n", - "from tensorflow.contrib.slim.nets import vgg\n", - "import tensorflow as tf\n", - "import numpy\n", - "\n", - "with tf.Session() as sess:\n", - " inputs = tf.placeholder(dtype=tf.float32, shape=[None, 224, 224, 3], name=\"inputs\")\n", - " logits, endpoint = vgg.vgg_16(inputs, num_classes=1000, is_training=False)\n", - " load_model = slim.assign_from_checkpoint_fn(\"vgg_16.ckpt\", slim.get_model_variables(\"vgg_16\"))\n", - " load_model(sess)\n", - " \n", - " numpy.random.seed(13)\n", - " data = numpy.random.rand(5, 224, 224, 3)\n", - " input_tensor = sess.graph.get_tensor_by_name(\"inputs:0\")\n", - " output_tensor = sess.graph.get_tensor_by_name(\"vgg_16/fc8/squeezed:0\")\n", - " result = sess.run([output_tensor], {input_tensor:data})\n", - " numpy.save(\"tensorflow.npy\", numpy.array(result))\n", - " \n", - " saver = tf.train.Saver()\n", - " saver.save(sess, \"./checkpoint/model\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 将模型转换为PaddlePaddle模型\n", - "\n", - "**注意**:部分OP在转换时,需要将参数写入文件;或者是运行tensorflow模型进行infer,获取tensor值。两种情况下均会消耗一定的时间用于IO或计算,对于后一种情况,建议转换模型时将`use_cuda`参数设为`True`,加快infer速度\n", - "\n", - "可以通过下面的**模型转换python脚本**在代码中设置参数,在python脚本中进行模型转换。或者一般可以通过如下的命令行方式进行转换,\n", - "``` python\n", - "# 通过命令行也可进行模型转换\n", - "python tf2fluid/convert.py --meta_file checkpoint/model.meta --ckpt_dir checkpoint \\\n", - " --in_nodes inputs --input_shape None,224,224,3 \\\n", - " --output_nodes vgg_16/fc8/squeezed --use_cuda True \\\n", - " --input_format NHWC --save_dir paddle_model\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### 模型转换python脚本" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:root:Loading tensorflow model...\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "INFO:tensorflow:Restoring parameters from checkpoint/model\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:tensorflow:Restoring parameters from checkpoint/model\n", - "INFO:root:Tensorflow model loaded!\n", - "INFO:root:TotalNum:86,TraslatedNum:1,CurrentNode:inputs\n", - "INFO:root:TotalNum:86,TraslatedNum:2,CurrentNode:vgg_16/conv1/conv1_1/weights\n", - "INFO:root:TotalNum:86,TraslatedNum:3,CurrentNode:vgg_16/conv1/conv1_1/biases\n", - "INFO:root:TotalNum:86,TraslatedNum:4,CurrentNode:vgg_16/conv1/conv1_2/weights\n", - "INFO:root:TotalNum:86,TraslatedNum:5,CurrentNode:vgg_16/conv1/conv1_2/biases\n", - "INFO:root:TotalNum:86,TraslatedNum:6,CurrentNode:vgg_16/conv2/conv2_1/weights\n", - "INFO:root:TotalNum:86,TraslatedNum:7,CurrentNode:vgg_16/conv2/conv2_1/biases\n", - "INFO:root:TotalNum:86,TraslatedNum:8,CurrentNode:vgg_16/conv2/conv2_2/weights\n", - "INFO:root:TotalNum:86,TraslatedNum:9,CurrentNode:vgg_16/conv2/conv2_2/biases\n", - "INFO:root:TotalNum:86,TraslatedNum:10,CurrentNode:vgg_16/conv3/conv3_1/weights\n", - "INFO:root:TotalNum:86,TraslatedNum:11,CurrentNode:vgg_16/conv3/conv3_1/biases\n", - "INFO:root:TotalNum:86,TraslatedNum:12,CurrentNode:vgg_16/conv3/conv3_2/weights\n", - "INFO:root:TotalNum:86,TraslatedNum:13,CurrentNode:vgg_16/conv3/conv3_2/biases\n", - "INFO:root:TotalNum:86,TraslatedNum:14,CurrentNode:vgg_16/conv3/conv3_3/weights\n", - "INFO:root:TotalNum:86,TraslatedNum:15,CurrentNode:vgg_16/conv3/conv3_3/biases\n", - "INFO:root:TotalNum:86,TraslatedNum:16,CurrentNode:vgg_16/conv4/conv4_1/weights\n", - "INFO:root:TotalNum:86,TraslatedNum:17,CurrentNode:vgg_16/conv4/conv4_1/biases\n", - "INFO:root:TotalNum:86,TraslatedNum:18,CurrentNode:vgg_16/conv4/conv4_2/weights\n", - "INFO:root:TotalNum:86,TraslatedNum:19,CurrentNode:vgg_16/conv4/conv4_2/biases\n", - "INFO:root:TotalNum:86,TraslatedNum:20,CurrentNode:vgg_16/conv4/conv4_3/weights\n", - "INFO:root:TotalNum:86,TraslatedNum:21,CurrentNode:vgg_16/conv4/conv4_3/biases\n", - "INFO:root:TotalNum:86,TraslatedNum:22,CurrentNode:vgg_16/conv5/conv5_1/weights\n", - "INFO:root:TotalNum:86,TraslatedNum:23,CurrentNode:vgg_16/conv5/conv5_1/biases\n", - "INFO:root:TotalNum:86,TraslatedNum:24,CurrentNode:vgg_16/conv5/conv5_2/weights\n", - "INFO:root:TotalNum:86,TraslatedNum:25,CurrentNode:vgg_16/conv5/conv5_2/biases\n", - "INFO:root:TotalNum:86,TraslatedNum:26,CurrentNode:vgg_16/conv5/conv5_3/weights\n", - "INFO:root:TotalNum:86,TraslatedNum:27,CurrentNode:vgg_16/conv5/conv5_3/biases\n", - "INFO:root:TotalNum:86,TraslatedNum:28,CurrentNode:vgg_16/fc6/weights\n", - "INFO:root:TotalNum:86,TraslatedNum:29,CurrentNode:vgg_16/fc6/biases\n", - "INFO:root:TotalNum:86,TraslatedNum:30,CurrentNode:vgg_16/fc7/weights\n", - "INFO:root:TotalNum:86,TraslatedNum:31,CurrentNode:vgg_16/fc7/biases\n", - "INFO:root:TotalNum:86,TraslatedNum:32,CurrentNode:vgg_16/fc8/weights\n", - "INFO:root:TotalNum:86,TraslatedNum:33,CurrentNode:vgg_16/fc8/biases\n", - "INFO:root:TotalNum:86,TraslatedNum:34,CurrentNode:vgg_16/conv1/conv1_1/Conv2D\n", - "INFO:root:TotalNum:86,TraslatedNum:35,CurrentNode:vgg_16/conv1/conv1_1/BiasAdd\n", - "INFO:root:TotalNum:86,TraslatedNum:36,CurrentNode:vgg_16/conv1/conv1_1/Relu\n", - "INFO:root:TotalNum:86,TraslatedNum:37,CurrentNode:vgg_16/conv1/conv1_2/Conv2D\n", - "INFO:root:TotalNum:86,TraslatedNum:38,CurrentNode:vgg_16/conv1/conv1_2/BiasAdd\n", - "INFO:root:TotalNum:86,TraslatedNum:39,CurrentNode:vgg_16/conv1/conv1_2/Relu\n", - "INFO:root:TotalNum:86,TraslatedNum:40,CurrentNode:vgg_16/pool1/MaxPool\n", - "INFO:root:TotalNum:86,TraslatedNum:41,CurrentNode:vgg_16/conv2/conv2_1/Conv2D\n", - "INFO:root:TotalNum:86,TraslatedNum:42,CurrentNode:vgg_16/conv2/conv2_1/BiasAdd\n", - "INFO:root:TotalNum:86,TraslatedNum:43,CurrentNode:vgg_16/conv2/conv2_1/Relu\n", - "INFO:root:TotalNum:86,TraslatedNum:44,CurrentNode:vgg_16/conv2/conv2_2/Conv2D\n", - "INFO:root:TotalNum:86,TraslatedNum:45,CurrentNode:vgg_16/conv2/conv2_2/BiasAdd\n", - "INFO:root:TotalNum:86,TraslatedNum:46,CurrentNode:vgg_16/conv2/conv2_2/Relu\n", - "INFO:root:TotalNum:86,TraslatedNum:47,CurrentNode:vgg_16/pool2/MaxPool\n", - "INFO:root:TotalNum:86,TraslatedNum:48,CurrentNode:vgg_16/conv3/conv3_1/Conv2D\n", - "INFO:root:TotalNum:86,TraslatedNum:49,CurrentNode:vgg_16/conv3/conv3_1/BiasAdd\n", - "INFO:root:TotalNum:86,TraslatedNum:50,CurrentNode:vgg_16/conv3/conv3_1/Relu\n", - "INFO:root:TotalNum:86,TraslatedNum:51,CurrentNode:vgg_16/conv3/conv3_2/Conv2D\n", - "INFO:root:TotalNum:86,TraslatedNum:52,CurrentNode:vgg_16/conv3/conv3_2/BiasAdd\n", - "INFO:root:TotalNum:86,TraslatedNum:53,CurrentNode:vgg_16/conv3/conv3_2/Relu\n", - "INFO:root:TotalNum:86,TraslatedNum:54,CurrentNode:vgg_16/conv3/conv3_3/Conv2D\n", - "INFO:root:TotalNum:86,TraslatedNum:55,CurrentNode:vgg_16/conv3/conv3_3/BiasAdd\n", - "INFO:root:TotalNum:86,TraslatedNum:56,CurrentNode:vgg_16/conv3/conv3_3/Relu\n", - "INFO:root:TotalNum:86,TraslatedNum:57,CurrentNode:vgg_16/pool3/MaxPool\n", - "INFO:root:TotalNum:86,TraslatedNum:58,CurrentNode:vgg_16/conv4/conv4_1/Conv2D\n", - "INFO:root:TotalNum:86,TraslatedNum:59,CurrentNode:vgg_16/conv4/conv4_1/BiasAdd\n", - "INFO:root:TotalNum:86,TraslatedNum:60,CurrentNode:vgg_16/conv4/conv4_1/Relu\n", - "INFO:root:TotalNum:86,TraslatedNum:61,CurrentNode:vgg_16/conv4/conv4_2/Conv2D\n", - "INFO:root:TotalNum:86,TraslatedNum:62,CurrentNode:vgg_16/conv4/conv4_2/BiasAdd\n", - "INFO:root:TotalNum:86,TraslatedNum:63,CurrentNode:vgg_16/conv4/conv4_2/Relu\n", - "INFO:root:TotalNum:86,TraslatedNum:64,CurrentNode:vgg_16/conv4/conv4_3/Conv2D\n", - "INFO:root:TotalNum:86,TraslatedNum:65,CurrentNode:vgg_16/conv4/conv4_3/BiasAdd\n", - "INFO:root:TotalNum:86,TraslatedNum:66,CurrentNode:vgg_16/conv4/conv4_3/Relu\n", - "INFO:root:TotalNum:86,TraslatedNum:67,CurrentNode:vgg_16/pool4/MaxPool\n", - "INFO:root:TotalNum:86,TraslatedNum:68,CurrentNode:vgg_16/conv5/conv5_1/Conv2D\n", - "INFO:root:TotalNum:86,TraslatedNum:69,CurrentNode:vgg_16/conv5/conv5_1/BiasAdd\n", - "INFO:root:TotalNum:86,TraslatedNum:70,CurrentNode:vgg_16/conv5/conv5_1/Relu\n", - "INFO:root:TotalNum:86,TraslatedNum:71,CurrentNode:vgg_16/conv5/conv5_2/Conv2D\n", - "INFO:root:TotalNum:86,TraslatedNum:72,CurrentNode:vgg_16/conv5/conv5_2/BiasAdd\n", - "INFO:root:TotalNum:86,TraslatedNum:73,CurrentNode:vgg_16/conv5/conv5_2/Relu\n", - "INFO:root:TotalNum:86,TraslatedNum:74,CurrentNode:vgg_16/conv5/conv5_3/Conv2D\n", - "INFO:root:TotalNum:86,TraslatedNum:75,CurrentNode:vgg_16/conv5/conv5_3/BiasAdd\n", - "INFO:root:TotalNum:86,TraslatedNum:76,CurrentNode:vgg_16/conv5/conv5_3/Relu\n", - "INFO:root:TotalNum:86,TraslatedNum:77,CurrentNode:vgg_16/pool5/MaxPool\n", - "INFO:root:TotalNum:86,TraslatedNum:78,CurrentNode:vgg_16/fc6/Conv2D\n", - "INFO:root:TotalNum:86,TraslatedNum:79,CurrentNode:vgg_16/fc6/BiasAdd\n", - "INFO:root:TotalNum:86,TraslatedNum:80,CurrentNode:vgg_16/fc6/Relu\n", - "INFO:root:TotalNum:86,TraslatedNum:81,CurrentNode:vgg_16/fc7/Conv2D\n", - "INFO:root:TotalNum:86,TraslatedNum:82,CurrentNode:vgg_16/fc7/BiasAdd\n", - "INFO:root:TotalNum:86,TraslatedNum:83,CurrentNode:vgg_16/fc7/Relu\n", - "INFO:root:TotalNum:86,TraslatedNum:84,CurrentNode:vgg_16/fc8/Conv2D\n", - "INFO:root:TotalNum:86,TraslatedNum:85,CurrentNode:vgg_16/fc8/BiasAdd\n", - "INFO:root:TotalNum:86,TraslatedNum:86,CurrentNode:vgg_16/fc8/squeezed\n", - "INFO:root:Model translated!\n" - ] - } - ], - "source": [ - "import tf2fluid.convert as convert\n", - "import argparse\n", - "parser = convert._get_parser()\n", - "parser.meta_file = \"checkpoint/model.meta\"\n", - "parser.ckpt_dir = \"checkpoint\"\n", - "parser.in_nodes = [\"inputs\"]\n", - "parser.input_shape = [\"None,224,224,3\"]\n", - "parser.output_nodes = [\"vgg_16/fc8/squeezed\"]\n", - "parser.use_cuda = \"True\"\n", - "parser.input_format = \"NHWC\"\n", - "parser.save_dir = \"paddle_model\"\n", - "\n", - "convert.run(parser)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 加载转换后的PaddlePaddle模型,并进行预测\n", - "需要注意的是,转换后的PaddlePaddle CV模型**输入格式为NCHW**\n", - "\n", - "**注意:下面代码用于运行转换后的PaddlePaddle模型,并与TensorFlow计算结果对比diff,因此依赖PaddlePaddle**" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "ExecuteTime": { - "end_time": "2019-03-15T05:51:40.544737Z", - "start_time": "2019-03-15T05:51:27.857863Z" - } - }, - "outputs": [], - "source": [ - "import numpy\n", - "import tf2fluid.model_loader as ml\n", - "\n", - "model = ml.ModelLoader(\"paddle_model\", use_cuda=False)\n", - "\n", - "numpy.random.seed(13)\n", - "data = numpy.random.rand(5, 224, 224, 3).astype(\"float32\")\n", - "# NHWC -> NCHW\n", - "data = numpy.transpose(data, (0, 3, 1, 2))\n", - "\n", - "results = model.inference(feed_dict={model.inputs[0]:data})\n", - "\n", - "numpy.save(\"paddle.npy\", numpy.array(results))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 对比转换前后模型之前的预测结果diff" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "ExecuteTime": { - "end_time": "2019-03-15T05:52:02.126718Z", - "start_time": "2019-03-15T05:52:02.115849Z" - } - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "6.67572e-06\n" - ] - } - ], - "source": [ - "import numpy\n", - "paddle_result = numpy.load(\"paddle.npy\")\n", - "tensorflow_result = numpy.load(\"tensorflow.npy\")\n", - "diff = numpy.fabs(paddle_result - tensorflow_result)\n", - "print(numpy.max(diff))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 需要注意的点\n", - "1. 转换后的模型需要注意输入格式,PaddlePaddle中输入格式需为NCHW格式 \n", - "2. 此例中不涉及到输入中间层,如卷积层的输出,需要了解的是PaddlePaddle中的卷积层输出,卷积核的`shape`与Tensorflow有差异 \n", - "3. 模型转换完后,检查转换前后模型的diff,在本例中,测试得到的最大diff满足转换需求 " - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "tensorflow", - "language": "python", - "name": "tensorflow" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.5.6" - }, - "varInspector": { - "cols": { - "lenName": 16, - "lenType": 16, - "lenVar": 40 - }, - "kernels_config": { - "python": { - "delete_cmd_postfix": "", - "delete_cmd_prefix": "del ", - "library": "var_list.py", - "varRefreshCmd": "print(var_dic_list())" - }, - "r": { - "delete_cmd_postfix": ") ", - "delete_cmd_prefix": "rm(", - "library": "var_list.r", - "varRefreshCmd": "cat(var_dic_list()) " - } - }, - "types_to_exclude": [ - "module", - "function", - "builtin_function_or_method", - "instance", - "_Feature" - ], - "window_display": false - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} -- GitLab