diff --git a/lite/core/mir/subgraph/subgraph_detector.cc b/lite/core/mir/subgraph/subgraph_detector.cc index 0af34219da915a327bd600676c57b64b2a1282b0..99aedee511812e298af3b58ab91e0ffe5f3dbbf8 100644 --- a/lite/core/mir/subgraph/subgraph_detector.cc +++ b/lite/core/mir/subgraph/subgraph_detector.cc @@ -463,6 +463,38 @@ void SubgraphFuser::InsertNewNode(SSAGraph *graph, idata_var_names); subgraph_op_desc.SetAttr>("output_data_names", odata_var_names); + + // Set input/output scale values of input/output var nodes for + // type_precision_cast_pass. + std::vector input_data_scales; + std::vector output_data_scales; + for (auto &var_node : idata_var_nodes) { + auto any_op_node = var_node->outlinks.front(); + CHECK(any_op_node->IsStmt()); + auto &any_inst = any_op_node->AsStmt(); + if (any_inst.op_info()->HasAttr("input_scale")) { + input_data_scales.push_back( + any_inst.op_info()->GetAttr("input_scale")); + } + } + for (auto &var_node : odata_var_nodes) { + auto any_op_node = var_node->inlinks.front(); + CHECK(any_op_node->IsStmt()); + auto &any_inst = any_op_node->AsStmt(); + if (any_inst.op_info()->HasAttr("output_scale")) { + output_data_scales.push_back( + any_inst.op_info()->GetAttr("output_scale")); + } + } + if (input_data_scales.size() > 0) { + subgraph_op_desc.SetAttr>("input_data_scales", + input_data_scales); + } + if (output_data_scales.size() > 0) { + subgraph_op_desc.SetAttr>("output_data_scales", + output_data_scales); + } + // Set all of the inputs and outputs to the target subgraph op // To prevent vars are removed in RuntimeProgram::UpdateVarsOfProgram() std::vector input_var_names; diff --git a/lite/kernels/apu/bridges/softmax_op.cc b/lite/kernels/apu/bridges/softmax_op.cc index 6a289ac987b9fa300cb548d190b6e46b67f24c44..13aadcddc64c5cd46f722547d215a1f06ce21941 100644 --- a/lite/kernels/apu/bridges/softmax_op.cc +++ b/lite/kernels/apu/bridges/softmax_op.cc @@ -104,7 +104,7 @@ int SoftmaxConverter(void* ctx, OpLite* op, KernelBase* kernel) { // Add out operand NeuronOperandType outType; outType.type = NEURON_TENSOR_QUANT8_ASYMM; - outType.scale = out_scale / 127; + outType.scale = out_scale; outType.zeroPoint = 128; outType.dimensionCount = x_dims.size(); outType.dimensions = &dims_x[0]; diff --git a/lite/utils/string.h b/lite/utils/string.h index b1aaf5d6c56d8931c4ad416f9d38c947abc68dd8..e326c54b53d24793742a02c7abeed64fdb0fefe4 100644 --- a/lite/utils/string.h +++ b/lite/utils/string.h @@ -60,13 +60,6 @@ static std::string to_string(const T& v) { return ss.str(); } -static std::string to_string(int index) { - const int BUFFER_LENGTH = 15; - char buffer[BUFFER_LENGTH]; - snprintf(buffer, sizeof(buffer), "%d", index); - return std::string(buffer); -} - template static T parse_string(const std::string& v) { return v;