未验证 提交 cc225b4e 编写于 作者: T teng 提交者: GitHub

fix typo (#1202)

* fix typo
上级 64d90a12
......@@ -288,7 +288,7 @@ static int cpu_describe(struct device* device, struct vector* allowed_ops, struc
}
if (NULL == blocked_ops)
{
TLOG_ERR("Error: Allowed op list pointer is NULL\n");
TLOG_ERR("Error: Blocked op list pointer is NULL\n");
}
for (int i = OP_GENERIC + 1; i < OP_BUILTIN_LAST - 1; i++)
......
......@@ -69,7 +69,7 @@ static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct ex
ret = ref_concat_int8(ir_graph, ir_node, concat_param->axis);
}
else
TLOG_ERR("Input data type %d not to be supported.\n", output_tensor->data_type);
TLOG_ERR("Output data type %d not to be supported.\n", output_tensor->data_type);
return ret;
}
......
......@@ -57,7 +57,7 @@ static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct ex
if (deconv_dw_run(input_tensor, weight_tensor, bias_tensor, output_tensor, deconv_param, num_thread, cpu_affinity) < 0)
{
TLOG_ERR("hcl conv run failed\n");
TLOG_ERR("hcl deconv run failed\n");
// set_tengine_errno(EFAULT);
return -1;
}
......
......@@ -967,7 +967,7 @@ static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct ex
input_chan_0, input_hw_0, input1_count4, exec_graph->num_thread, input_hw_1, eltwise_param);
else
{
TLOG_ERR("Input data type %d not to be supported.\n", output_tensor->data_type);
TLOG_ERR("Output data type %d not to be supported.\n", output_tensor->data_type);
return -1;
}
......
......@@ -98,7 +98,7 @@ static int ref_scatter_fp32(float* input, float* output, int* indices, float* up
{
int axis = op_param->axis;
bool is_onnx = op_param->is_onnx;
TLOG_ERR("indices %f %f \n", updates[0], updates[1]);
TLOG_ERR("updates %f %f \n", updates[0], updates[1]);
TLOG_ERR("indices %d %d \n", indices[0], indices[1]);
int outSize = 1;
for (int i = 0; i < op_param->dim_size; i++)
......
......@@ -63,7 +63,7 @@ void relu6_gpu_kernel(struct graph* ir_graph, struct node* ir_node, dict_uint2vo
void CUDAEngine::AddClipNode(struct graph* ir_graph, struct node* ir_node)
{
TLOG_INFO("Tengine GPU: Support OP(%d) OP_RELU.\n", ir_node->index);
TLOG_INFO("Tengine GPU: Support OP(%d) OP_CLIP.\n", ir_node->index);
relu6_gpu_kernel(ir_graph, ir_node, this->gpu_addr_map);
this->ops.push_back(std::bind(&relu6_gpu_kernel, ir_graph, ir_node, this->gpu_addr_map));
}
......@@ -68,7 +68,7 @@ void eltwisesum_gpu_kernel(struct graph* ir_graph, struct node* ir_node, dict_ui
void CUDAEngine::AddEltwiseNode(struct graph* ir_graph, struct node* ir_node)
{
TLOG_INFO("Tengine GPU: Support OP(%d) OP_RELU.\n", ir_node->index);
TLOG_INFO("Tengine GPU: Support OP(%d) OP_ELTWISE.\n", ir_node->index);
eltwisesum_gpu_kernel(ir_graph, ir_node, this->gpu_addr_map);
this->ops.push_back(std::bind(&eltwisesum_gpu_kernel, ir_graph, ir_node, this->gpu_addr_map));
}
......@@ -10,7 +10,7 @@ extern "C"
bool OCLEngine::AddSliceNode(struct node* ir_node)
{
TLOG_INFO("Tengine OpenCL: Support OP_FLATTEN(%d).\n", ir_node->index);
TLOG_INFO("Tengine OpenCL: Support OP_SLICE(%d).\n", ir_node->index);
struct graph* ir_graph = ir_node->graph;
......
......@@ -96,7 +96,7 @@ nvdla::priv::canonical_ast::Node * ODLAEngine::AddDeconvlutionNode(struct node*
break;
}
default:
fprintf(stderr, "Tengine: Unsupported weight quant data type(%d) of conv(id: %d, name: %s).\n", conv_weight->data_type, ir_node->index, ir_node->name);
fprintf(stderr, "Tengine: Unsupported weight quant data type(%d) of deconv(id: %d, name: %s).\n", conv_weight->data_type, ir_node->index, ir_node->name);
return nullptr;
}
......@@ -138,7 +138,7 @@ nvdla::priv::canonical_ast::Node * ODLAEngine::AddDeconvlutionNode(struct node*
break;
}
default:
fprintf(stderr, "Tengine: Unsupported weight quant data type(%d) of conv(id: %d, name: %s).\n", conv_bias->data_type, ir_node->index, ir_node->name);
fprintf(stderr, "Tengine: Unsupported weight quant data type(%d) of deconv(id: %d, name: %s).\n", conv_bias->data_type, ir_node->index, ir_node->name);
return nullptr;
}
......
......@@ -66,7 +66,7 @@ nvdla::priv::canonical_ast::Node * ODLAEngine::AddFullyConnectionNode(struct nod
{
if (weight_tensor->quant_param_num != weight_tensor->dims[0])
{
fprintf(stderr, "Tengine: Unsupported weight quant channel of conv(id: %d, name: %s).\n", ir_node->index, ir_node->name);
fprintf(stderr, "Tengine: Unsupported weight quant channel of fc(id: %d, name: %s).\n", ir_node->index, ir_node->name);
return nullptr;
}
......@@ -104,7 +104,7 @@ nvdla::priv::canonical_ast::Node * ODLAEngine::AddFullyConnectionNode(struct nod
break;
}
default:
fprintf(stderr, "Tengine: Unsupported weight quant data type(%d) of conv(id: %d, name: %s).\n", weight_tensor->data_type, ir_node->index, ir_node->name);
fprintf(stderr, "Tengine: Unsupported weight quant data type(%d) of fc(id: %d, name: %s).\n", weight_tensor->data_type, ir_node->index, ir_node->name);
return nullptr;
}
......@@ -147,7 +147,7 @@ nvdla::priv::canonical_ast::Node * ODLAEngine::AddFullyConnectionNode(struct nod
break;
}
default:
fprintf(stderr, "Tengine: Unsupported weight quant data type(%d) of conv(id: %d, name: %s).\n", bias_tensor->data_type, ir_node->index, ir_node->name);
fprintf(stderr, "Tengine: Unsupported weight quant data type(%d) of fc(id: %d, name: %s).\n", bias_tensor->data_type, ir_node->index, ir_node->name);
return nullptr;
}
......
......@@ -83,7 +83,7 @@ bool TensorRTEngine::AddCropNode(struct graph* ir_graph, struct node* node)
nvinfer1::IPaddingLayer* layer = this->network->addPadding(*crop_input_tensor, prePadding, postPadding);
if (nullptr == layer)
{
fprintf(stderr, "Tengine: Add Flatten(id: %d, name: %s) layer failed.\n", node->index, node->name);
fprintf(stderr, "Tengine: Add Crop(id: %d, name: %s) layer failed.\n", node->index, node->name);
return false;
}
......
......@@ -103,7 +103,7 @@ bool TensorRTEngine::AddDeConvolutionNode(struct graph* ir_graph, struct node *n
break;
}
default:
fprintf(stderr, "Tengine: Unsupported weight quant data type(%d) of conv(id: %d, name: %s).\n", deconv_weight->data_type, node->index, node->name);
fprintf(stderr, "Tengine: Unsupported weight quant data type(%d) of deconv(id: %d, name: %s).\n", deconv_weight->data_type, node->index, node->name);
return false;
}
......@@ -143,7 +143,7 @@ bool TensorRTEngine::AddDeConvolutionNode(struct graph* ir_graph, struct node *n
break;
}
default:
fprintf(stderr, "Tengine: Unsupported weight quant data type(%d) of conv(id: %d, name: %s).\n",
fprintf(stderr, "Tengine: Unsupported weight quant data type(%d) of deconv(id: %d, name: %s).\n",
deconv_bias->data_type, node->index, node->name);
return false;
}
......
......@@ -37,13 +37,13 @@ bool TensorRTEngine::AddReductionNode(struct graph* ir_graph, struct node* node)
struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, node->output_tensors[0]);
if (nullptr == input_tensor || nullptr == output_tensor)
{
fprintf(stderr, "Tengine: Get input & output for Reshape(id: %d, name: %s) layer failed.\n", node->index, node->name);
fprintf(stderr, "Tengine: Get input & output for Reduction(id: %d, name: %s) layer failed.\n", node->index, node->name);
return false;
}
if (!check_if_input_in_map(input_tensor->index, this->tensor_swap_map))
{
fprintf(stderr, "Tengine: Query input for Reshape(id: %d, name: %s) layer failed.\n", node->index, node->name);
fprintf(stderr, "Tengine: Query input for Reduction(id: %d, name: %s) layer failed.\n", node->index, node->name);
return false;
}
......
......@@ -64,7 +64,7 @@ bool TensorRTEngine::AddResizeNode(struct graph* ir_graph, struct node* node)
nvinfer1::IResizeLayer* layer = this->network->addResize(*interp_input_tensor);
if (nullptr == layer)
{
fprintf(stderr, "Tengine: Add Interp(id: %d, name: %s) layer failed.\n", node->index, node->name);
fprintf(stderr, "Tengine: Add Resize(id: %d, name: %s) layer failed.\n", node->index, node->name);
return false;
}
......
......@@ -37,13 +37,13 @@ bool TensorRTEngine::AddUpSampleNode(struct graph* ir_graph, struct node* node)
struct tensor* upsample_output = get_ir_graph_tensor(ir_graph, node->output_tensors[0]);
if (nullptr == upsample_input || nullptr == upsample_output)
{
fprintf(stderr, "Tengine: Get input & output for Interp(id: %d, name: %s) layer failed.\n", node->index, node->name);
fprintf(stderr, "Tengine: Get input & output for Upsample(id: %d, name: %s) layer failed.\n", node->index, node->name);
return false;
}
if (!check_if_input_in_map(upsample_input->index, this->tensor_swap_map))
{
fprintf(stderr, "Tengine: Query input for Interp(id: %d, name: %s) layer failed.\n", node->index, node->name);
fprintf(stderr, "Tengine: Query input for Upsample(id: %d, name: %s) layer failed.\n", node->index, node->name);
return false;
}
......
......@@ -260,7 +260,7 @@ int TensorRTEngine::Build(struct subgraph* subgraph)
{
if (!AddInterpNode(ir_graph, ir_node))
{
TLOG_ERR("Tengine: Cannot add FullyConnected op(%d).\n", ir_node->index);
TLOG_ERR("Tengine: Cannot add Interp op(%d).\n", ir_node->index);
return -6;
}
break;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册