未验证 提交 962f67d2 编写于 作者: C chen2016013 提交者: GitHub

[IR] Generate pd_op.parsed.yaml from pd_op.yaml (#56674)

* Generate pd_op.parsed.yaml from pd_op.yaml

* Generate pd_op.parsed.yaml from pd_op.yaml

* fix bug

* bug fix

* bug fix

* bug fix

* 向pd_ops.yaml中新增算子 & 修改pd_ops.parsed.yaml存放路径

* 修复路径依赖bug & 添加 .gitignore文件

* fix bug - compat input args in save_combine op

* fix compat file

* fix set_value_with_tensor yaml

* split backward op in original yaml file

* add send_v2 & recv_v2
上级 61abe526
...@@ -2,6 +2,9 @@ set(PD_DIALECT_BINARY_DIR ...@@ -2,6 +2,9 @@ set(PD_DIALECT_BINARY_DIR
"${PADDLE_BINARY_DIR}/paddle/fluid/ir/dialect/paddle_dialect/ir") "${PADDLE_BINARY_DIR}/paddle/fluid/ir/dialect/paddle_dialect/ir")
# Generate pd_dialect files defining op using op_gen_file # Generate pd_dialect files defining op using op_gen_file
set(op_gen_parsed_yaml_file
${PADDLE_SOURCE_DIR}/paddle/fluid/operators/generator/parse_op.py)
set(op_gen_file set(op_gen_file
${PADDLE_SOURCE_DIR}/paddle/fluid/ir/dialect/op_generator/op_gen.py) ${PADDLE_SOURCE_DIR}/paddle/fluid/ir/dialect/op_generator/op_gen.py)
set(op_compat_yaml_file ${PADDLE_SOURCE_DIR}/paddle/phi/api/yaml/op_compat.yaml) set(op_compat_yaml_file ${PADDLE_SOURCE_DIR}/paddle/phi/api/yaml/op_compat.yaml)
...@@ -23,11 +26,22 @@ set(fused_op_forward_yaml_file ...@@ -23,11 +26,22 @@ set(fused_op_forward_yaml_file
set(fused_op_backward_yaml_file set(fused_op_backward_yaml_file
${PADDLE_SOURCE_DIR}/paddle/fluid/operators/generator/parsed_ops/fused_backward.parsed.yaml ${PADDLE_SOURCE_DIR}/paddle/fluid/operators/generator/parsed_ops/fused_backward.parsed.yaml
) )
set(op_yaml_file3
${PADDLE_SOURCE_DIR}/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_op.yaml) set(pd_op_forward_yaml_file
${PADDLE_SOURCE_DIR}/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_ops.yaml)
set(pd_op_backward_yaml_file
${PADDLE_SOURCE_DIR}/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_ops_backward.yaml
)
set(parsed_op_dir
${PADDLE_SOURCE_DIR}/paddle/fluid/ir/dialect/paddle_dialect/ir/generated)
set(op_yaml_file3 ${parsed_op_dir}/pd_ops.parsed.yaml)
set(op_yaml_file4 ${parsed_op_dir}/pd_ops_backward.parsed.yaml)
set(op_yaml_files set(op_yaml_files
${op_forward_yaml_file1},${op_forward_yaml_file2},${op_backward_yaml_file1},${op_backward_yaml_file2},${fused_op_forward_yaml_file},${fused_op_backward_yaml_file},${op_yaml_file3} ${op_forward_yaml_file1},${op_forward_yaml_file2},${op_backward_yaml_file1},${op_backward_yaml_file2},${fused_op_forward_yaml_file},${fused_op_backward_yaml_file},${op_yaml_file3},${op_yaml_file4}
) )
set(op_namespace paddle,dialect) set(op_namespace paddle,dialect)
set(dialect_name pd) set(dialect_name pd)
...@@ -39,6 +53,15 @@ set(op_source_file_tmp ${op_source_file}.tmp) ...@@ -39,6 +53,15 @@ set(op_source_file_tmp ${op_source_file}.tmp)
set(op_vjp_source_file ${PD_DIALECT_BINARY_DIR}/pd_op_vjp.cc) set(op_vjp_source_file ${PD_DIALECT_BINARY_DIR}/pd_op_vjp.cc)
set(op_vjp_source_file_tmp ${op_vjp_source_file}.tmp) set(op_vjp_source_file_tmp ${op_vjp_source_file}.tmp)
add_custom_command(
OUTPUT ${op_yaml_file3} ${op_yaml_file4}
COMMAND ${CMAKE_COMMAND} -E make_directory ${parsed_op_dir}
COMMAND ${PYTHON_EXECUTABLE} ${op_gen_parsed_yaml_file} --op_yaml_path
${pd_op_forward_yaml_file} --output_path ${op_yaml_file3}
COMMAND ${PYTHON_EXECUTABLE} ${op_gen_parsed_yaml_file} --op_yaml_path
${pd_op_backward_yaml_file} --output_path ${op_yaml_file4} --backward
VERBATIM)
add_custom_command( add_custom_command(
OUTPUT ${op_header_file} ${op_source_file} ${op_vjp_source_file} OUTPUT ${op_header_file} ${op_source_file} ${op_vjp_source_file}
COMMAND COMMAND
...@@ -55,9 +78,14 @@ add_custom_command( ...@@ -55,9 +78,14 @@ add_custom_command(
${op_vjp_source_file} ${op_vjp_source_file}
COMMENT COMMENT
"copy_if_different ${op_header_file} ${op_source_file} ${op_vjp_source_file}" "copy_if_different ${op_header_file} ${op_source_file} ${op_vjp_source_file}"
DEPENDS ${op_gen_file} ${op_forward_yaml_file1} ${op_forward_yaml_file2} DEPENDS ${op_gen_file}
${op_backward_yaml_file1} ${op_backward_yaml_file2} ${op_forward_yaml_file1}
${op_forward_yaml_file2}
${op_backward_yaml_file1}
${op_backward_yaml_file2}
${op_compat_yaml_file} ${op_compat_yaml_file}
${op_yaml_file3}
${op_yaml_file4}
VERBATIM) VERBATIM)
set(api_gen_file set(api_gen_file
...@@ -79,9 +107,14 @@ add_custom_command( ...@@ -79,9 +107,14 @@ add_custom_command(
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${api_source_file_tmp} COMMAND ${CMAKE_COMMAND} -E copy_if_different ${api_source_file_tmp}
${api_source_file} ${api_source_file}
COMMENT "copy_if_different ${api_header_file} ${api_source_file}" COMMENT "copy_if_different ${api_header_file} ${api_source_file}"
DEPENDS ${api_gen_file} ${op_forward_yaml_file1} ${op_forward_yaml_file2} DEPENDS ${api_gen_file}
${op_backward_yaml_file1} ${op_backward_yaml_file2} ${op_forward_yaml_file1}
${op_forward_yaml_file2}
${op_backward_yaml_file1}
${op_backward_yaml_file2}
${op_compat_yaml_file} ${op_compat_yaml_file}
${op_yaml_file3}
${op_yaml_file4}
VERBATIM) VERBATIM)
set(python_c_gen_file set(python_c_gen_file
...@@ -105,9 +138,14 @@ add_custom_command( ...@@ -105,9 +138,14 @@ add_custom_command(
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${python_c_source_file_tmp} COMMAND ${CMAKE_COMMAND} -E copy_if_different ${python_c_source_file_tmp}
${python_c_source_file} ${python_c_source_file}
COMMENT "copy_if_different ${python_c_header_file} ${python_c_source_file}" COMMENT "copy_if_different ${python_c_header_file} ${python_c_source_file}"
DEPENDS ${python_c_gen_file} ${op_forward_yaml_file1} ${op_forward_yaml_file2} DEPENDS ${python_c_gen_file}
${op_backward_yaml_file1} ${op_backward_yaml_file2} ${op_forward_yaml_file1}
${op_forward_yaml_file2}
${op_backward_yaml_file1}
${op_backward_yaml_file2}
${op_compat_yaml_file} ${op_compat_yaml_file}
${op_yaml_file3}
${op_yaml_file4}
VERBATIM) VERBATIM)
add_custom_target(static_op_function_gen ALL DEPENDS ${python_c_header_file} add_custom_target(static_op_function_gen ALL DEPENDS ${python_c_header_file}
......
- op : add_n_
args : (Tensor[] inputs)
output : Tensor(out)
infer_meta:
func: AddNInferMeta
param: [inputs]
kernel:
func: add_n
param: [inputs]
backward: add_n_grad
- op : add_n_with_kernel
args : (Tensor[] inputs)
output : Tensor(out)
infer_meta:
func: AddNInferMeta
param: [inputs]
kernel:
func: add_n
param: [inputs]
backward: add_n_grad
- op : assert
args : (Tensor cond, Tensor[] data, int64_t summarize = -1)
output :
kernel :
func : assert
param : [cond, data, summarize]
data_type : cond
- op : assign_value
args : (int[] shape, DataType dtype, Scalar[] values, Place place = {})
output : Tensor(out)
infer_meta :
func : AssignValueInferMeta
param: [shape, dtype]
kernel :
func : assign_value
param : [shape, dtype, values]
backend: place>
data_type : dtype
- op : embedding_grad_sparse
args : (Tensor x, Tensor weight, Tensor out_grad, int64_t padding_idx = -1, bool sparse = false)
output : SelectedRows(weight_grad)
infer_meta:
func: EmbeddingGradSparseInferMeta
param: [weight]
kernel:
func: embedding_sparse_grad
param: [x, weight, out_grad, padding_idx, sparse]
data_type : weight
- op : feed
args : (str name, int col)
output : Tensor(out)
- op : fetch
args : (Tensor x, str name, int col)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : fetch
param : [x]
- op : load_combine
args : (str file_path, bool load_as_fp16, bool model_from_memory)
output : Tensor[](Out)
kernel:
func: load_combine
param: [file_path, load_as_fp16, model_from_memory]
optional : Out
- op : lod_array_length
args : (Tensor[] x)
output : Tensor(out)
- op : print
args : (Tensor in, int first_n, str message, int summarize, bool print_tensor_name = true, bool print_tensor_type = true, bool print_tensor_shape = true, bool print_tensor_layout = true, bool print_tensor_lod = true, str print_phase = "BOTH", bool is_forward = true)
output : Tensor(out)
infer_meta:
func: UnchangedInferMeta
param: [in]
kernel :
func : print_kernel
param: [in, first_n, message, summarize, print_tensor_name, print_tensor_type, print_tensor_shape, print_tensor_layout, print_tensor_lod, print_phase, is_forward]
- op : recv_v2
args : (int[] out_shape = {}, DataType dtype = DataType::FLOAT32, int peer = 0, int ring_id = 0, bool use_calc_stream = false, bool dynamic_shape = false)
output : Tensor(out)
infer_meta:
func: RecvV2InferMeta
param: [peer, dtype, out_shape]
kernel :
func : recv_v2
param : [ring_id, dynamic_shape, peer, out_shape, dtype, use_calc_stream]
data_type : dtype
- op : save_combine
args : (Tensor[] x, str file_path, bool overwrite, bool save_as_fp16, bool save_to_memory)
output : Tensor(out)
kernel:
func: save_combine_tensor
param: [x, file_path, overwrite, save_as_fp16, save_to_memory]
optional : out
- op : send_v2
args : (Tensor x, int ring_id = 0, int peer = 0, bool use_calc_stream = false, bool dynamic_shape = false)
output :
infer_meta:
func: SendV2InferMeta
param: [peer, ring_id]
kernel :
func : send_v2
param : [x, ring_id, dynamic_shape, peer, use_calc_stream]
- op : set_value
args : (Tensor x, int64_t[] starts, int64_t[] ends, int64_t[] steps, int64_t[] axes, int64_t[] decrease_axes, int64_t[] none_axes, int64_t[] shape, Scalar[] values)
output : Tensor(out)
infer_meta:
func: SetValueInferMeta
param: [x]
kernel:
func: set_value
param: [x, starts, ends, steps, axes, decrease_axes, none_axes, shape, values]
inplace: (x -> out)
backward: set_value_grad
- op : set_value_with_tensor
args : (Tensor x, Tensor values, int64_t[] starts, int64_t[] ends, int64_t[] steps, int64_t[] axes, int64_t[] decrease_axes, int64_t[] none_axes)
output : Tensor(out)
infer_meta:
func: SetValueInferMeta
param: [x]
kernel:
func: set_value_with_tensor
param: [x, values, starts, ends, steps, axes, decrease_axes, none_axes]
inplace: (x -> out)
backward: set_value_grad
- op : shadow_feed
args : (Tensor x)
output : Tensor(out)
infer_meta:
func: UnchangedInferMeta
param: [x]
kernel:
func: shadow_feed
param: [x]
- op : share_buffer_
args : (Tensor[] x, bool[] share_dims_and_dtype = {})
output : Tensor[](out){x.size()}, Tensor[](xout){x.size()}
- op : write_to_array
args : (Tensor i, Tensor x)
output : Tensor[](out)
backward: write_to_array_grad
- backward_op : set_value_grad
args : (Tensor out_grad, Tensor values, int64_t[] starts, int64_t[] ends, int64_t[] steps, int64_t[] axes, int64_t[] decrease_axes, int64_t[] none_axes)
output : Tensor(x_grad), Tensor(values_grad)
infer_meta:
func: SetValueGradInferMeta
param: [out_grad, values]
kernel:
func: set_value_grad
param: [out_grad, starts, ends, steps, axes, decrease_axes, none_axes]
...@@ -122,7 +122,6 @@ set(generated_static_argument_mapping_path ...@@ -122,7 +122,6 @@ set(generated_static_argument_mapping_path
${CMAKE_SOURCE_DIR}/paddle/phi/ops/compat/generated_static_sig.cc) ${CMAKE_SOURCE_DIR}/paddle/phi/ops/compat/generated_static_sig.cc)
set(generated_sparse_argument_mapping_path set(generated_sparse_argument_mapping_path
${CMAKE_SOURCE_DIR}/paddle/phi/ops/compat/generated_sparse_sig.cc) ${CMAKE_SOURCE_DIR}/paddle/phi/ops/compat/generated_sparse_sig.cc)
execute_process( execute_process(
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/paddle/fluid/operators/generator WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/paddle/fluid/operators/generator
COMMAND ${CMAKE_COMMAND} -E make_directory ${parsed_op_dir} RESULTS_VARIABLE COMMAND ${CMAKE_COMMAND} -E make_directory ${parsed_op_dir} RESULTS_VARIABLE
...@@ -139,6 +138,7 @@ message( ...@@ -139,6 +138,7 @@ message(
- ${legacy_bw_op_yaml_file} - ${legacy_bw_op_yaml_file}
- ${fused_op_yaml_file} - ${fused_op_yaml_file}
- ${static_op_yaml_file}") - ${static_op_yaml_file}")
execute_process( execute_process(
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/paddle/fluid/operators/generator WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/paddle/fluid/operators/generator
COMMAND ${PYTHON_EXECUTABLE} parse_op.py --op_yaml_path ${op_yaml_file} COMMAND ${PYTHON_EXECUTABLE} parse_op.py --op_yaml_path ${op_yaml_file}
......
...@@ -150,6 +150,8 @@ def parse_output(op_name: str, s: str) -> Dict[str, str]: ...@@ -150,6 +150,8 @@ def parse_output(op_name: str, s: str) -> Dict[str, str]:
def parse_outputs(op_name: str, outputs: str) -> List[Dict]: def parse_outputs(op_name: str, outputs: str) -> List[Dict]:
if outputs is None:
return []
outputs = parse_plain_list(outputs, sep=",") outputs = parse_plain_list(outputs, sep=",")
output_items = [] output_items = []
for output in outputs: for output in outputs:
...@@ -169,6 +171,7 @@ def parse_candidates(s: str) -> Dict[str, Any]: ...@@ -169,6 +171,7 @@ def parse_candidates(s: str) -> Dict[str, Any]:
delimiter = ">" if ">" in s else "," delimiter = ">" if ">" in s else ","
ordered = delimiter == ">" ordered = delimiter == ">"
candidates = parse_plain_list(s, delimiter) candidates = parse_plain_list(s, delimiter)
candidates = list(filter(None, candidates))
return {"ordered": ordered, "candidates": candidates} return {"ordered": ordered, "candidates": candidates}
...@@ -523,14 +526,20 @@ def parse_op_entry(op_entry: Dict[str, Any], name_field="op"): ...@@ -523,14 +526,20 @@ def parse_op_entry(op_entry: Dict[str, Any], name_field="op"):
if is_base_op: if is_base_op:
# kernel # kernel
if "kernel" in op_entry:
kernel = parse_kernel(op_name, op_entry["kernel"]) kernel = parse_kernel(op_name, op_entry["kernel"])
if kernel["param"] is None: if kernel["param"] is None:
kernel["param"] = input_names + attr_names kernel["param"] = input_names + attr_names
op.update({"kernel": kernel})
# infer meta # infer meta
if "infer_meta" in op_entry:
infer_meta = parse_infer_meta(op_entry["infer_meta"]) infer_meta = parse_infer_meta(op_entry["infer_meta"])
if infer_meta["param"] is None: if infer_meta["param"] is None:
infer_meta["param"] = copy(kernel["param"]) infer_meta["param"] = copy(kernel["param"])
op.update({"infer_meta": infer_meta})
# else:
# assert(outputs == []), f"No infer_meta is given in {op_name}."
# inplace # inplace
if "inplace" in op_entry: if "inplace" in op_entry:
...@@ -544,8 +553,6 @@ def parse_op_entry(op_entry: Dict[str, Any], name_field="op"): ...@@ -544,8 +553,6 @@ def parse_op_entry(op_entry: Dict[str, Any], name_field="op"):
view_pairs = None view_pairs = None
op.update( op.update(
{ {
"infer_meta": infer_meta,
"kernel": kernel,
"inplace": inplace_pairs, "inplace": inplace_pairs,
"view": view_pairs, "view": view_pairs,
} }
......
...@@ -79,7 +79,11 @@ opmaker_attr_types_map = { ...@@ -79,7 +79,11 @@ opmaker_attr_types_map = {
'str[]': 'std::vector<std::string>', 'str[]': 'std::vector<std::string>',
} }
output_type_map = {'Tensor': 'Tensor', 'Tensor[]': 'std::vector<Tensor>'} output_type_map = {
'Tensor': 'Tensor',
'Tensor[]': 'std::vector<Tensor>',
'SelectedRows': 'SelectedRows',
}
# ------------------------------ phi attr ------------------------------ # ------------------------------ phi attr ------------------------------
phi_attr_types_map = attr_types_map.copy() phi_attr_types_map = attr_types_map.copy()
......
...@@ -2375,6 +2375,10 @@ ...@@ -2375,6 +2375,10 @@
extra : extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false] attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : save_combine
inputs :
{x : X}
- op : scale - op : scale
backward : scale_grad backward : scale_grad
inputs : inputs :
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册