未验证 提交 95265d5c 编写于 作者: A Aurelius84 提交者: GitHub

[Yaml] Fix topk yaml compilation problem on Windows (#41082)

* [Yaml] Fix topk yaml compilation on Windows

* fix make_shared

* fix conflict
上级 5c1631f2
...@@ -1107,10 +1107,12 @@ static std::string GenerateGradNodeCreationContent( ...@@ -1107,10 +1107,12 @@ static std::string GenerateGradNodeCreationContent(
size_t bwd_in_slot_num = out_vars.size(); size_t bwd_in_slot_num = out_vars.size();
size_t bwd_out_slot_num = in_vars.size(); size_t bwd_out_slot_num = in_vars.size();
const char* GRAD_OP_NODE_TEMPLATE = const char* GRAD_OP_NODE_TEMPLATE =
" auto grad_node = std::make_shared<GradNode%s>(%d, %d);\n"; " auto grad_node = std::shared_ptr<GradNode%s>(new GradNode%s(%d, "
"%d));\n";
grad_node_creation_str += " // Create GradOpNode\n"; grad_node_creation_str += " // Create GradOpNode\n";
grad_node_creation_str += paddle::string::Sprintf( grad_node_creation_str +=
GRAD_OP_NODE_TEMPLATE, op_type, bwd_in_slot_num, bwd_out_slot_num); paddle::string::Sprintf(GRAD_OP_NODE_TEMPLATE, op_type, op_type,
bwd_in_slot_num, bwd_out_slot_num);
grad_node_creation_str += "\n"; grad_node_creation_str += "\n";
VLOG(6) << "Generated GradOpNode construction"; VLOG(6) << "Generated GradOpNode construction";
......
...@@ -650,8 +650,12 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase): ...@@ -650,8 +650,12 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase):
num_backward_inputs = len(forward_outputs_position_map.keys()) num_backward_inputs = len(forward_outputs_position_map.keys())
num_backward_outputs = len(forward_inputs_position_map.keys()) num_backward_outputs = len(forward_inputs_position_map.keys())
grad_node_name = GetGradNodeName(forward_api_name) grad_node_name = GetGradNodeName(forward_api_name)
# NOTE(Aurelius74): DO NOT use make_shared here. Because some Node contains experimental::Scalar
node_construction_str = f" auto grad_node = std::make_shared<{grad_node_name}>({num_backward_inputs}, {num_backward_outputs});" # which contains "complex128" as data. "complex128" is memory-aligned manually. But make_shared
# request MEMALIGN for allocation (Maybe).
# See https://stackoverflow.com/questions/31228656/how-can-shared-ptr-disrupt-alignment
# and https://github.com/MRtrix3/mrtrix3/issues/957
node_construction_str = f" auto grad_node = std::shared_ptr<{grad_node_name}>(new {grad_node_name}({num_backward_inputs}, {num_backward_outputs}));"
# SetAttributes # SetAttributes
set_attributes_list = [] set_attributes_list = []
......
...@@ -1373,7 +1373,15 @@ ...@@ -1373,7 +1373,15 @@
backward : tile_grad backward : tile_grad
# no_need_buffer : x # no_need_buffer : x
# trace - api : top_k
args : (Tensor x, Scalar k, int axis = -1, bool largest = true, bool sorted = true)
output : Tensor(out), Tensor(indices)
infer_meta :
func : TopKInferMeta
kernel :
func : top_k
backward : top_k_grad
- api : trace - api : trace
args : (Tensor x, int offset, int axis1, int axis2) args : (Tensor x, int offset, int axis1, int axis2)
output : Tensor output : Tensor
......
...@@ -866,6 +866,16 @@ ...@@ -866,6 +866,16 @@
kernel : kernel :
func : tile_grad func : tile_grad
- backward_api : top_k_grad
forward : top_k (Tensor x, Scalar k, int axis = -1, bool largest = true, bool sorted = true) -> Tensor(out), Tensor(indices)
args : (Tensor x, Tensor indices, Tensor out_grad, Scalar k = -1, int axis = -1, bool largest = true, bool sorted = true)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : top_k_grad
- backward_api : trace_grad - backward_api : trace_grad
forward : trace (Tensor x, int offset, int axis1, int axis2) -> Tensor(out) forward : trace (Tensor x, int offset, int axis1, int axis2) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int offset, int axis1, int axis2) args : (Tensor x, Tensor out_grad, int offset, int axis1, int axis2)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册