diff --git a/mace/core/operator.cc b/mace/core/operator.cc index ea8dab31de9be2869f2dd6f447f22eb3894ed288..38a220eb4b2dcf1cf2091bc9a4f38742648cc977 100644 --- a/mace/core/operator.cc +++ b/mace/core/operator.cc @@ -85,6 +85,7 @@ extern void Register_MatMul(OperatorRegistry *op_registry); extern void Register_Pooling(OperatorRegistry *op_registry); extern void Register_Proposal(OperatorRegistry *op_registry); extern void Register_PSROIAlign(OperatorRegistry *op_registry); +extern void Register_ReOrganize(OperatorRegistry *op_registry); extern void Register_Reshape(OperatorRegistry *op_registry); extern void Register_ResizeBilinear(OperatorRegistry *op_registry); extern void Register_Slice(OperatorRegistry *op_registry); @@ -120,6 +121,7 @@ OperatorRegistry::OperatorRegistry() { ops::Register_Pooling(this); ops::Register_Proposal(this); ops::Register_PSROIAlign(this); + ops::Register_ReOrganize(this); ops::Register_Reshape(this); ops::Register_ResizeBilinear(this); ops::Register_Slice(this); diff --git a/mace/kernels/reorganize.h b/mace/kernels/reorganize.h new file mode 100644 index 0000000000000000000000000000000000000000..68c772090d5db75c5cf609da23ea82f2ccc844eb --- /dev/null +++ b/mace/kernels/reorganize.h @@ -0,0 +1,84 @@ +// +// Copyright (c) 2017 XiaoMi All rights reserved. +// +#ifndef MACE_KERNELS_REORGANIZE_H_ +#define MACE_KERNELS_REORGANIZE_H_ + +#include + +#include "mace/core/future.h" +#include "mace/core/runtime/opencl/cl2_header.h" +#include "mace/core/tensor.h" + +namespace mace { +namespace kernels { + +template +struct ReOrganizeFunctor { + void operator()(const Tensor *input, + const std::vector &out_shape, + Tensor *output, + StatsFuture *future) { + const bool w2c = out_shape[3] > input->dim(3); + + const index_t height = input->dim(1); + const index_t input_width = input->dim(2); + const index_t input_chan = input->dim(3); + const index_t output_width = output->dim(2); + const index_t output_chan = output->dim(3); + + const T *input_ptr = input->data(); + T *output_ptr = output->mutable_data(); + + if (w2c) { + MACE_CHECK((out_shape[3] % input->dim(3)) == 0); + const index_t multiplier = out_shape[3] / input->dim(3); +#pragma omp parallel for collapse(4) + for (index_t n = 0; n < out_shape[0]; ++n) { + for (index_t h = 0; h < out_shape[1]; ++h) { + for (index_t w = 0; w < out_shape[2]; ++w) { + for (index_t c = 0; c < out_shape[3]; ++c) { + const index_t out_offset = + ((n * height + h) * output_width + w) + * output_chan + c; + const index_t in_w_idx = w + (c % multiplier) * output_width; + const index_t in_chan_idx = c / multiplier; + const index_t in_offset = + ((n * height + h) * input_width + in_w_idx) + * input_chan + in_chan_idx; + output_ptr[out_offset] = input_ptr[in_offset]; + } + } + } + } + } else { + MACE_CHECK((input->dim(3) % out_shape[3]) == 0); + const index_t multiplier = input->dim(3) / out_shape[3]; + +#pragma omp parallel for collapse(4) + for (index_t n = 0; n < out_shape[0]; ++n) { + for (index_t h = 0; h < out_shape[1]; ++h) { + for (index_t w = 0; w < out_shape[2]; ++w) { + for (index_t c = 0; c < out_shape[3]; ++c) { + const index_t out_offset = + ((n * height + h) * output_width + w) + * output_chan + c; + const index_t in_w_idx = w % input_width; + const index_t in_chan_idx = w / input_width + c * multiplier; + const index_t in_offset = + ((n * height + h) * input_width + in_w_idx) + * input_chan + in_chan_idx; + output_ptr[out_offset] = input_ptr[in_offset]; + } + } + } + } + } + + } +}; + +} // namespace kernels +} // namespace mace + +#endif // MACE_KERNELS_REORGANIZE_H_ diff --git a/mace/ops/proposal.h b/mace/ops/proposal.h index 06dcc8a1b02b030a82e8bf5508421f0342decc46..6bd1c15917b57e7334a604986ec40cb03471871c 100644 --- a/mace/ops/proposal.h +++ b/mace/ops/proposal.h @@ -16,12 +16,12 @@ class ProposalOp : public Operator { public: ProposalOp(const OperatorDef &operator_def, Workspace *ws) : Operator(operator_def, ws), - functor_(OperatorBase::GetSingleArgument("min_size", 0), - OperatorBase::GetSingleArgument("nms_thresh", 0), - OperatorBase::GetSingleArgument("pre_nms_top_n", 0), - OperatorBase::GetSingleArgument("post_nms_top_n", 0), + functor_(OperatorBase::GetSingleArgument("min_size", 16), + OperatorBase::GetSingleArgument("nms_thresh", 0.7), + OperatorBase::GetSingleArgument("pre_nms_top_n", 6000), + OperatorBase::GetSingleArgument("post_nms_top_n", 300), OperatorBase::GetSingleArgument("feat_stride", 0), - OperatorBase::GetSingleArgument("base_size", 16), + OperatorBase::GetSingleArgument("base_size", 12), OperatorBase::GetRepeatedArgument("scales"), OperatorBase::GetRepeatedArgument("ratios")) {} diff --git a/mace/ops/reorganize.cc b/mace/ops/reorganize.cc new file mode 100644 index 0000000000000000000000000000000000000000..794464cfb473005cb4dc76271bc470227191d104 --- /dev/null +++ b/mace/ops/reorganize.cc @@ -0,0 +1,19 @@ +// +// Copyright (c) 2017 XiaoMi All rights reserved. +// + +#include "mace/ops/reorganize.h" + +namespace mace { +namespace ops { + +void Register_ReOrganize(OperatorRegistry *op_registry) { + REGISTER_OPERATOR(op_registry, OpKeyBuilder("ReOrganize") + .Device(DeviceType::CPU) + .TypeConstraint("T") + .Build(), + ReOrganizeOp); +} + +} // namespace ops +} // namespace mace diff --git a/mace/ops/reorganize.h b/mace/ops/reorganize.h new file mode 100644 index 0000000000000000000000000000000000000000..63b6110701a9c477982ef38f54489479ead89a1b --- /dev/null +++ b/mace/ops/reorganize.h @@ -0,0 +1,71 @@ +// +// Copyright (c) 2017 XiaoMi All rights reserved. +// + +#ifndef MACE_OPS_REORGANIZE_H_ +#define MACE_OPS_REORGANIZE_H_ + +#include + +#include "mace/core/operator.h" +#include "mace/kernels/reorganize.h" + +namespace mace { +namespace ops { + +template +class ReOrganizeOp : public Operator { + public: + ReOrganizeOp(const OperatorDef &op_def, Workspace *ws) + : Operator(op_def, ws), + shape_(OperatorBase::GetRepeatedArgument("shape")) {} + + bool Run(StatsFuture *future) override { + const Tensor *input = this->Input(INPUT); + const index_t num_dims = shape_.size(); + int unknown_idx = -1; + index_t product = 1; + std::vector out_shape; + + for (int i = 0; i < num_dims; ++i) { + if (shape_[i] == -1) { + MACE_CHECK(unknown_idx == -1) << "Only one input size may be -1"; + unknown_idx = i; + out_shape.push_back(1); + } else { + MACE_CHECK(shape_[i] >= 0) << "Shape must be non-negative: " + << shape_[i]; + out_shape.push_back(shape_[i]); + product *= shape_[i]; + } + } + + if (unknown_idx != -1) { + MACE_CHECK(product != 0) + << "Cannot infer shape if there is zero shape size."; + const index_t missing = input->size() / product; + MACE_CHECK(missing * product == input->size()) + << "Input size not match reshaped tensor size"; + out_shape[unknown_idx] = missing; + } + + Tensor *output = this->Output(OUTPUT); + output->Resize(out_shape); + + functor_(input, out_shape, output, future); + return true; + } + + private: + std::vector shape_; + kernels::ReOrganizeFunctor functor_; + + protected: + OP_INPUT_TAGS(INPUT); + OP_OUTPUT_TAGS(OUTPUT); +}; + +} // namespace ops +} // namespace mace + +#endif // MACE_OPS_REORGANIZE_H_ diff --git a/mace/ops/reorganize_test.cc b/mace/ops/reorganize_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..68e0886718d8728371878eff2eaa2e2b505d22d6 --- /dev/null +++ b/mace/ops/reorganize_test.cc @@ -0,0 +1,107 @@ +// +// Copyright (c) 2017 XiaoMi All rights reserved. +// + +#include "gmock/gmock.h" +#include "mace/core/operator.h" +#include "mace/ops/ops_test_util.h" + +namespace mace { +namespace ops { +namespace test { + +class ReOrganizeTest : public OpsTestBase {}; + +void TestReOrganize(const std::vector &input_shape, + const std::vector &input_data, + const std::vector &output_shape, + const std::vector &output_data) { + const std::vector out_shape(output_shape.begin(), output_shape.end()); + + // Construct graph + OpsTestNet net; + + OpDefBuilder("ReOrganize", "ReOrganizeTest") + .Input("Input") + .Output("Output") + .AddIntsArg("shape", out_shape) + .Finalize(net.NewOperatorDef()); + + // Add input data + net.AddInputFromArray("Input", + input_shape, input_data); + + // Run + net.RunOp(); + + auto output = net.GetTensor("Output"); + + EXPECT_THAT(output->shape(), ::testing::ContainerEq(output_shape)); + + const float *output_ptr = output->data(); + int size = output->size(); + for (int i = 0; i < size; ++i) { + ASSERT_EQ(output_data[i], output_ptr[i]) << "With Index " << i; + } + + // Reverse reorganzie + const std::vector in_shape(input_shape.begin(), input_shape.end()); + OpDefBuilder("ReOrganize", "ReOrganizeTest") + .Input("Input") + .Output("Output") + .AddIntsArg("shape", in_shape) + .Finalize(net.NewOperatorDef()); + + // Add input data + net.AddInputFromArray("Input", + output_shape, output_data); + + // Run + net.RunOp(); + + output = net.GetTensor("Output"); + + EXPECT_THAT(output->shape(), ::testing::ContainerEq(input_shape)); + + output_ptr = output->data(); + size = output->size(); + for (int i = 0; i < size; ++i) { + ASSERT_EQ(input_data[i], output_ptr[i]) << "With Index " << i; + } +} + +TEST_F(ReOrganizeTest, Simple) { + TestReOrganize({1, 1, 4, 6}, + {0, 4, 8, 12, 16, 20, + 1, 5, 9, 13, 17, 21, + 2, 6, 10, 14, 18, 22, + 3, 7, 11, 15, 19, 23}, + {1, 1, 8, 3}, + {0, 8, 16, 1, 9, 17, 2, 10, 18, 3, 11, 19, + 4, 12, 20, 5, 13, 21, 6, 14, 22, 7, 15, 23}); + TestReOrganize({1, 1, 5, 6}, + {0, 5, 10, 15, 20, 25, + 1, 6, 11, 16, 21, 26, + 2, 7, 12, 17, 22, 27, + 3, 8, 13, 18, 23, 28, + 4, 9, 14, 19, 24, 29}, + {1, 1, 10, 3}, + {0, 10, 20, 1, 11, 21, 2, 12, 22, 3, 13, 23, + 4, 14, 24, 5, 15, 25, 6, 16, 26, 7, 17, 27, + 8, 18, 28, 9, 19, 29}); +} + +TEST_F(ReOrganizeTest, Complex) { + TestReOrganize({1, 2, 2, 6}, + {0, 4, 8, 12, 16, 20, + 1, 5, 9, 13, 17, 21, + 2, 6, 10, 14, 18, 22, + 3, 7, 11, 15, 19, 23}, + {1, 2, 6, 2}, + {0, 12, 1, 13, 4, 16, 5, 17, 8, 20, 9, 21, + 2, 14, 3, 15, 6, 18, 7, 19, 10, 22, 11, 23}); +} + +} // namespace test +} // namespace ops +} // namespace mace diff --git a/mace/proto/caffe.proto b/mace/proto/caffe.proto index 22764abc33fda32026bf436b685d79aa18ade460..cec617b99b8a5cde9e93e2bb14be0cab21794908 100644 --- a/mace/proto/caffe.proto +++ b/mace/proto/caffe.proto @@ -404,7 +404,9 @@ message LayerParameter { optional ParameterParameter parameter_param = 145; optional PoolingParameter pooling_param = 121; optional PowerParameter power_param = 122; + optional ProposalParameter proposal_param = 8266713; optional PReLUParameter prelu_param = 131; + optional PSROIAlignParameter psroi_align_param = 1490; optional PythonParameter python_param = 130; optional RecurrentParameter recurrent_param = 146; optional ReductionParameter reduction_param = 136; @@ -944,6 +946,19 @@ message PowerParameter { optional float shift = 3 [default = 0.0]; } +// Message that stores parameters used by ProposalLayer +message ProposalParameter { + optional uint32 feat_stride = 1 [default = 16]; + repeated uint32 scales = 2; + repeated float ratios = 3; +} + +message PSROIAlignParameter { + required float spatial_scale = 1; + required int32 output_dim = 2; // output channel number + required int32 group_size = 3; // number of groups to encode position-sensitive score maps +} + message PythonParameter { optional string module = 1; optional string layer = 2; diff --git a/mace/python/tools/caffe_converter_lib.py b/mace/python/tools/caffe_converter_lib.py index 5db1be84f8fd5ba79962f498de31551645477fb8..7c7cd9abd71cb8b4720f782ffc71835033c3e97c 100644 --- a/mace/python/tools/caffe_converter_lib.py +++ b/mace/python/tools/caffe_converter_lib.py @@ -784,21 +784,89 @@ class CaffeConverter(object): self.net_def.op.extend([op_def]) self.resolved_ops.add(op.name) + def convert_reshape(self, op): + op_def = self.CommonConvert(op, 'ReOrganize') + input_shape = op.parents[0].output_shape_map[op.layer.bottom[0]] + output_shape = input_shape + shape_param = np.asarray(op.layer.reshape_param.shape.dim)[[0, 3, 2, 1]] + print shape_param + for i in range(len(shape_param)): + if shape_param[i] != 0: + output_shape[i] = shape_param[i] + shape_arg = op_def.arg.add() + shape_arg.name = 'shape' + shape_arg.ints.extend(output_shape) + op.output_shape_map[op.layer.top[0]] = output_shape + self.add_output_shape(op_def, output_shape) + op_def.output.extend([op.name + ':0']) + self.net_def.op.extend([op_def]) + self.resolved_ops.add(op.name) + + def convert_proposal_op(self, op): + assert self.device == 'cpu' + op_def = self.CommonConvert(op, op.type) + if op.layer.HasField('proposal_param'): + proposal_param = op.layer.proposal_param + feat_stride_arg = op_def.arg.add() + feat_stride_arg.name = 'feat_stride' + feat_stride_arg.i = proposal_param.feat_stride + scales_arg = op_def.arg.add() + scales_arg.name = 'scales' + scales_arg.ints.extend(list(proposal_param.scales)) + ratios_arg = op_def.arg.add() + ratios_arg.name = 'ratios' + ratios_arg.floats.extend(list(proposal_param.ratios)) + output_shape = op.parents[0].output_shape_map[op.layer.bottom[0]] + op.output_shape_map[op.layer.top[0]] = output_shape + self.add_output_shape(op_def, output_shape) + op_def.output.extend([op.name + ':0']) + self.net_def.op.extend([op_def]) + self.resolved_ops.add(op.name) + + def convert_psroi_align(self, op): + assert self.device == 'cpu' + op_def = self.CommonConvert(op, op.type) + if op.layer.HasField('psroi_align_param'): + psroi_align_param = op.layer.psroi_align_param + spatial_scale_arg = op_def.arg.add() + spatial_scale_arg.name = 'spatial_scale' + spatial_scale_arg.f = psroi_align_param.spatial_scale + output_dim_arg = op_def.arg.add() + output_dim_arg.name = 'output_dim' + output_dim_arg.i = psroi_align_param.output_dim + group_size_arg = op_def.arg.add() + group_size_arg.name = 'group_size' + group_size_arg.i = psroi_align_param.group_size + output_shape = op.parents[0].output_shape_map[op.layer.bottom[0]] + op.output_shape_map[op.layer.top[0]] = output_shape + self.add_output_shape(op_def, output_shape) + op_def.output.extend([op.name + ':0']) + self.net_def.op.extend([op_def]) + self.resolved_ops.add(op.name) + def replace_in_out_name(self, input_names, output_names, is_single): in_names = set([input_name + ":0" for input_name in input_names]) out_names = set([output_name + ":0" for output_name in output_names]) if is_single: for op in self.net_def.op: - if len(op.input) > 0 and op.input[0] in in_names: - op.input[0] = MACE_INPUT_NODE_NAME + ':0' - if len(op.output) > 0 and op.output[0] in out_names: - op.output[0] = MACE_OUTPUT_NODE_NAME + ':0' + for i in range(len(op.input)): + if op.input[i] in in_names: + op.input[i] = MACE_INPUT_NODE_NAME + ':0' + for i in range(len(op.output)): + if op.output[i] in out_names: + op.output[i] = MACE_OUTPUT_NODE_NAME + ':0' else: for op in self.net_def.op: - if len(op.input) > 0 and op.input[0] in in_names: - op.input[0] = MACE_INPUT_NODE_NAME + '_' + op.input[0] - if len(op.output) > 0 and op.output[0] in out_names: - op.output[0] = MACE_OUTPUT_NODE_NAME + '_' + op.output[0] + for i in range(len(op.input)): + if op.input[i] in in_names: + op.input[i] = MACE_INPUT_NODE_NAME + '_' + op.input[i] + if op.input[i] in out_names: + op.input[i] = MACE_OUTPUT_NODE_NAME + '_' + op.input[i] + for i in range(len(op.output)): + if op.output[i] in in_names: + op.output[i] = MACE_INPUT_NODE_NAME + '_' + op.output[i] + if op.output[i] in out_names: + op.output[i] = MACE_OUTPUT_NODE_NAME + '_' + op.output[i] def add_input_op_shape(self, input_nodes, input_shapes): assert len(input_nodes) == len(input_shapes) @@ -843,10 +911,16 @@ class CaffeConverter(object): self.convert_concat(op) elif op.type == 'Eltwise': self.convert_eltwise(op) - elif op.type in ['Softmax']: - self.convert_normal_op(op) elif op.type == 'Slice': self.convert_slice(op) + elif op.type == 'Reshape': + self.convert_reshape(op) + elif op.type == 'Proposal': + self.convert_proposal_op(op) + elif op.type == 'PSROIAlign': + self.convert_psroi_align(op) + elif op.type in ['Softmax']: + self.convert_normal_op(op) else: raise Exception('Unknown Op: %s, type: %s' % (op.name, op.type)) diff --git a/tools/env.sh b/tools/env.sh index f48787a8956ac79349379207b054b0e7c4723e5f..e61180e1dd32a0fca24c886e9aab2cf2a5542c53 100644 --- a/tools/env.sh +++ b/tools/env.sh @@ -2,8 +2,6 @@ LIBMACE_TAG=`git describe --abbrev=0 --tags` MACE_SOURCE_DIR=`/bin/pwd` -INPUT_FILE_NAME="model_input" -OUTPUT_FILE_NAME="model_out" PHONE_DATA_DIR="/data/local/tmp/mace_run" KERNEL_DIR="${PHONE_DATA_DIR}/cl/" CODEGEN_DIR=${MACE_SOURCE_DIR}/mace/codegen diff --git a/tools/mace_tools.py b/tools/mace_tools.py index 5afe2ee168dd9e1eb69ced19a5653d64204d5d34..c9a22f6472e33f8b8245cee9da5796c32d5d5e1d 100644 --- a/tools/mace_tools.py +++ b/tools/mace_tools.py @@ -14,6 +14,7 @@ import subprocess import sys import urllib import yaml +import re import adb_tools @@ -64,13 +65,37 @@ def clear_env(target_soc): command = "bash tools/clear_env.sh {}".format(target_soc) run_command(command) +def input_file_name(input_name): + return os.environ['INPUT_FILE_NAME'] + '_' + \ + re.sub('[^0-9a-zA-Z]+', '_', input_name) -def generate_random_input(target_soc, model_output_dir): +def generate_random_input(target_soc, model_output_dir, + input_names, input_files): generate_data_or_not = True command = "bash tools/validate_tools.sh {} {} {}".format( target_soc, model_output_dir, int(generate_data_or_not)) run_command(command) + input_name_list = [] + input_file_list = [] + if isinstance(input_names, list): + input_name_list.extend(input_names) + else: + input_name_list.append(input_names) + if isinstance(input_files, list): + input_file_list.extend(input_files) + else: + input_file_list.append(input_files) + assert len(input_file_list) == len(input_name_list) + for i in range(len(input_file_list)): + if input_file_list[i] is not None: + dst_input_file = model_output_dir + '/' + input_file_name(input_name_list[i]) + if input_file_list[i].startswith("http://") or \ + input_file_list[i].startswith("https://"): + urllib.urlretrieve(input_file_list[i], dst_input_file) + else: + print 'Copy input data:', dst_input_file + shutil.copy(input_file_list[i], dst_input_file) def generate_model_code(): command = "bash tools/generate_model_code.sh" @@ -215,6 +240,13 @@ def parse_args(): help="SoCs to build, comma seperated list (getprop ro.board.platform)") return parser.parse_known_args() +def set_environment(configs): + os.environ["EMBED_MODEL_DATA"] = str(configs["embed_model_data"]) + os.environ["VLOG_LEVEL"] = str(configs["vlog_level"]) + os.environ["PROJECT_NAME"] = os.path.splitext(os.path.basename( + FLAGS.config))[0] + os.environ['INPUT_FILE_NAME'] = "model_input" + os.environ['OUTPUT_FILE_NAME'] = "model_out" def main(unused_args): configs = parse_model_configs() @@ -223,10 +255,7 @@ def main(unused_args): FLAGS.round = 1 FLAGS.restart_round = 1 - os.environ["EMBED_MODEL_DATA"] = str(configs["embed_model_data"]) - os.environ["VLOG_LEVEL"] = str(configs["vlog_level"]) - os.environ["PROJECT_NAME"] = os.path.splitext(os.path.basename( - FLAGS.config))[0] + set_environment(configs) if FLAGS.mode == "build" or FLAGS.mode == "all": # Remove previous output dirs @@ -266,6 +295,7 @@ def main(unused_args): skip_validation = configs["models"][model_name].get( "skip_validation", 0) model_config = configs["models"][model_name] + input_file_list = model_config.get("input_files", []) for key in model_config: if key in ['input_nodes', 'output_nodes'] and isinstance( model_config[key], list): @@ -310,7 +340,8 @@ def main(unused_args): if FLAGS.mode == "build" or FLAGS.mode == "run" or FLAGS.mode == "validate"\ or FLAGS.mode == "benchmark" or FLAGS.mode == "all": - generate_random_input(target_soc, model_output_dir) + generate_random_input(target_soc, model_output_dir, + model_config['input_nodes'], input_file_list) if FLAGS.mode == "build" or FLAGS.mode == "all": generate_model_code() @@ -336,7 +367,7 @@ def main(unused_args): if FLAGS.mode == "throughput_test": merged_lib_file = FLAGS.output_dir + "/%s/%s/libmace_%s.%s.a" % \ (os.environ["PROJECT_NAME"], target_abi, os.environ["PROJECT_NAME"], target_soc) - generate_random_input(target_soc, FLAGS.output_dir) + generate_random_input(target_soc, FLAGS.output_dir, [], []) for model_name in configs["models"]: runtime = configs["models"][model_name]["runtime"] os.environ["%s_MODEL_TAG" % runtime.upper()] = model_name diff --git a/tools/validate.py b/tools/validate.py index 4aaceacdc3fe8c5b52108e40e850c8d737bad208..d46284dcfc01067cbd2641877592c107cce8f460 100644 --- a/tools/validate.py +++ b/tools/validate.py @@ -97,14 +97,17 @@ def validate_caffe_model(input_names, input_shapes, output_names, output_shapes) input_value = load_data(FLAGS.input_file + "_" + input_names[i]) input_value = input_value.reshape(input_shapes[i]).transpose((0, 3, 1, 2)) input_blob_name = input_names[i] - if input_names[i] in net.top_names: - input_blob_name = net.top_names[input_names[i]][0] + try: + if input_names[i] in net.top_names: + input_blob_name = net.top_names[input_names[i]][0] + except ValueError: + pass net.blobs[input_blob_name].data[0] = input_value net.forward() for i in range(len(output_names)): - value = net.blobs[net.top_names[output_names[i]][0]].data[0] + value = net.blobs[net.top_names[output_names[i]][0]].data out_shape = output_shapes[i] out_shape[1], out_shape[2], out_shape[3] = out_shape[3], out_shape[1], out_shape[2] value = value.reshape(out_shape).transpose((0, 2, 3, 1))