diff --git a/lite/kernels/npu/bridges/dropout_op.cc b/lite/kernels/npu/bridges/dropout_op.cc index 505a20ee7f2e1f814a414e04b048b0bc0f8d1857..9bf7d3bbca00fb1c6bce964184ec36215a783ba0 100644 --- a/lite/kernels/npu/bridges/dropout_op.cc +++ b/lite/kernels/npu/bridges/dropout_op.cc @@ -34,8 +34,6 @@ int DropoutConverter(void* ctx, OpLite* op, KernelBase* kernel) { auto x_name = op_info->Input("X").front(); auto x = scope->FindMutableTensor(x_name); auto x_dims = x->dims(); - auto x_rank = x_dims.size(); - CHECK_GE(x_rank, 2); auto out_name = op_info->Output("Out").front(); @@ -45,9 +43,6 @@ int DropoutConverter(void* ctx, OpLite* op, KernelBase* kernel) { if (dropout_implementation == "upscale_in_train") { scale = 1.f; } - // HiAI only support [n, c, 1, 1] for the shape of scale - std::vector scale_shape = { - 1, x_rank < 3 ? 1 : x_dims[x_rank - 3], 1, 1}; // X node std::shared_ptr x_node = nullptr; @@ -61,11 +56,7 @@ int DropoutConverter(void* ctx, OpLite* op, KernelBase* kernel) { auto scale_node = graph->Add(out_name); auto scale_op = scale_node->data(); scale_op->set_input_x(*x_node->data()); - scale_op->set_attr_axis(1); - - // Add filter node(fill with scale) - auto filter_node = graph->Add(out_name + "/filter", scale, scale_shape); - scale_op->set_input_filter(*filter_node->data()); + scale_op->set_attr_filler_value(scale); return REBUILD_WHEN_SHAPE_CHANGED; } diff --git a/lite/kernels/npu/bridges/reduce_mean_op.cc b/lite/kernels/npu/bridges/reduce_mean_op.cc index b2fcd4742989f8d47fce3e3ef643dc32eb5ce5ea..c1cffe09ec10f6b641a47ee6bcd05758c08a81fc 100644 --- a/lite/kernels/npu/bridges/reduce_mean_op.cc +++ b/lite/kernels/npu/bridges/reduce_mean_op.cc @@ -43,7 +43,7 @@ int ReduceMeanConverter(void* ctx, OpLite* op, KernelBase* kernel) { dim[i] += x_dims.size(); } } - std::sort(dim.begin(), dim.end()); + std::stable_sort(dim.begin(), dim.end()); // X node std::shared_ptr x_node = nullptr;