未验证 提交 c5d3bc0e 编写于 作者: Z zyfncg 提交者: GitHub

support heterogeneous tensor for kernel in yaml (#42898)

上级 7306d1fb
...@@ -904,10 +904,8 @@ def monkey_patch_varbase(): ...@@ -904,10 +904,8 @@ def monkey_patch_varbase():
#[1, 2, 3, 4, 5] #[1, 2, 3, 4, 5]
""" """
if self.is_sparse_coo(): if self.is_sparse_coo() or self.is_sparse_csr():
return _C_ops.final_state_sparse_coo_values(self) return _C_ops.final_state_sparse_values(self)
elif self.is_sparse_csr():
return _C_ops.final_state_sparse_csr_values(self)
else: else:
raise ValueError( raise ValueError(
"only SparseCooTensor and SparseCsrTensor have method values") "only SparseCooTensor and SparseCsrTensor have method values")
......
...@@ -47,10 +47,8 @@ def relu(x, name=None): ...@@ -47,10 +47,8 @@ def relu(x, name=None):
assert in_dynamic_mode(), "Currently, Sparse API only support dynamic mode" assert in_dynamic_mode(), "Currently, Sparse API only support dynamic mode"
if x.is_sparse_coo(): if x.is_sparse_coo() or x.is_sparse_csr():
return _C_ops.final_state_sparse_coo_relu(x) return _C_ops.final_state_sparse_relu(x)
elif x.is_sparse_csr():
return _C_ops.final_state_sparse_csr_relu(x)
else: else:
raise ValueError( raise ValueError(
"Currently, sparse.relu only support the input of SparseCooTensor or SparseCsrTensor" "Currently, sparse.relu only support the input of SparseCooTensor or SparseCsrTensor"
...@@ -87,10 +85,8 @@ def tanh(x, name=None): ...@@ -87,10 +85,8 @@ def tanh(x, name=None):
assert in_dynamic_mode(), "Currently, Sparse API only support dynamic mode" assert in_dynamic_mode(), "Currently, Sparse API only support dynamic mode"
if x.is_sparse_coo(): if x.is_sparse_coo() or x.is_sparse_csr():
return _C_ops.final_state_sparse_coo_tanh(x) return _C_ops.final_state_sparse_tanh(x)
elif x.is_sparse_csr():
return _C_ops.final_state_sparse_csr_tanh(x)
else: else:
raise ValueError( raise ValueError(
"Currently, sparse.tanh only support the input of SparseCooTensor or SparseCsrTensor" "Currently, sparse.tanh only support the input of SparseCooTensor or SparseCsrTensor"
...@@ -127,10 +123,8 @@ def sqrt(x, name=None): ...@@ -127,10 +123,8 @@ def sqrt(x, name=None):
assert in_dynamic_mode(), "Currently, Sparse API only support dynamic mode" assert in_dynamic_mode(), "Currently, Sparse API only support dynamic mode"
if x.is_sparse_coo(): if x.is_sparse_coo() or x.is_sparse_csr():
return _C_ops.final_state_sparse_coo_sqrt(x) return _C_ops.final_state_sparse_sqrt(x)
elif x.is_sparse_csr():
return _C_ops.final_state_sparse_csr_sqrt(x)
else: else:
raise ValueError( raise ValueError(
"Currently, sparse.sqrt only support the input of SparseCooTensor or SparseCsrTensor" "Currently, sparse.sqrt only support the input of SparseCooTensor or SparseCsrTensor"
...@@ -167,10 +161,8 @@ def sin(x, name=None): ...@@ -167,10 +161,8 @@ def sin(x, name=None):
assert in_dynamic_mode(), "Currently, Sparse API only support dynamic mode" assert in_dynamic_mode(), "Currently, Sparse API only support dynamic mode"
if x.is_sparse_coo(): if x.is_sparse_coo() or x.is_sparse_csr():
return _C_ops.final_state_sparse_coo_sin(x) return _C_ops.final_state_sparse_sin(x)
elif x.is_sparse_csr():
return _C_ops.final_state_sparse_csr_sin(x)
else: else:
raise ValueError( raise ValueError(
"Currently, sparse.sin only support the input of SparseCooTensor or SparseCsrTensor" "Currently, sparse.sin only support the input of SparseCooTensor or SparseCsrTensor"
......
...@@ -45,7 +45,8 @@ class BaseAPI(object): ...@@ -45,7 +45,8 @@ class BaseAPI(object):
'infer_meta']) 'infer_meta'])
self.kernel = self.parse_kernel(api_item_yaml['kernel']) self.kernel = self.parse_kernel(api_item_yaml['kernel'])
self.support_selected_rows_kernel = False if len(self.kernel[ self.support_selected_rows_kernel = False if len(self.kernel[
'func']) == 1 else True 'func']) == 1 or not self.kernel['func'][1].endswith(
'_sr') else True
self.data_transform = self.parse_data_transform(api_item_yaml) self.data_transform = self.parse_data_transform(api_item_yaml)
self.inplace_map, self.view_map = self.parse_inplace_and_view( self.inplace_map, self.view_map = self.parse_inplace_and_view(
api_item_yaml) api_item_yaml)
...@@ -248,13 +249,15 @@ class BaseAPI(object): ...@@ -248,13 +249,15 @@ class BaseAPI(object):
# backend : str, the names of param to choose the kernel backend, default is None # backend : str, the names of param to choose the kernel backend, default is None
# layout : str, the names of param to choose the kernel layout, default is None # layout : str, the names of param to choose the kernel layout, default is None
# data_type : str, the names of param to choose the kernel data_type, default is None # data_type : str, the names of param to choose the kernel data_type, default is None
# dispatch : {}, the key is kernel_func, the value is type of inputs and outputs for kernel (example: {kernel_name : (['dense','sparse_coo']#input,['sparse_coo']#output)})
kernel = { kernel = {
'func': [], 'func': [],
'param': None, 'param': None,
'backend': None, 'backend': None,
'layout': None, 'layout': None,
'data_type': None, 'data_type': None,
'use_gpudnn': 'false' 'use_gpudnn': 'false',
'dispatch': {}
} }
if 'backend' in kernel_config and len(kernel_config['backend']) > 0: if 'backend' in kernel_config and len(kernel_config['backend']) > 0:
kernel['backend'] = kernel_config['backend'] kernel['backend'] = kernel_config['backend']
...@@ -268,17 +271,21 @@ class BaseAPI(object): ...@@ -268,17 +271,21 @@ class BaseAPI(object):
kernel['use_gpudnn'] = kernel_config['use_gpudnn'] kernel['use_gpudnn'] = kernel_config['use_gpudnn']
if isinstance(kernel['use_gpudnn'], bool): if isinstance(kernel['use_gpudnn'], bool):
kernel['use_gpudnn'] = str(kernel['use_gpudnn']).lower() kernel['use_gpudnn'] = str(kernel['use_gpudnn']).lower()
kernel['func'] = [ kernel_funcs = re.compile(r'([a-zA-Z0-9_]+)\s*({[^}]+})?').findall(
kernel_fn.strip() for kernel_fn in kernel_config['func'].split(',') kernel_config['func'])
]
if len(kernel['func']) == 2: def parse_kernel_in_out_type(in_out_str):
assert kernel['func'][0] == self.api, \ if len(in_out_str) == 0:
f"{self.api} : Kernel func error: If kernel has two func config, the name of first func should be same with api name({self.api}), \ return None
but now is {kernel['func'][0]}." tmp_in_out_list = in_out_str[1:-1].split('->')
assert kernel['func'][1].endswith('_sr'), \ inputs = [item.strip() for item in tmp_in_out_list[0].split(',')]
f"{self.api} : Kernel func error: If kernel has two func config, the name of second func should be a selected_rows kernel (the func name endwith '_sr'), \ outputs = [item.strip() for item in tmp_in_out_list[1].split(',')]
but now is {kernel['func'][1]}." return (inputs, outputs)
for func_item in kernel_funcs:
kernel['func'].append(func_item[0])
kernel['dispatch'][func_item[0]] = parse_kernel_in_out_type(
func_item[1])
return kernel return kernel
......
- api : conv3d - api : conv3d
args : (Tensor x, Tensor kernel, int[] paddings, int[] dilations, int[] strides, int groups, bool subm) args : (Tensor x, Tensor kernel, int[] paddings, int[] dilations, int[] strides, int groups, bool subm)
output : Tensor(out@SparseCooTensor), Tensor(rulebook@DenseTensor) output : Tensor(out), Tensor(rulebook)
kernel : kernel :
func : sparse_conv3d func : sparse_conv3d{sparse_coo, dense -> sparse_coo, dense}
layout : x layout : x
intermediate : rulebook intermediate : rulebook
backward : conv3d_grad backward : conv3d_grad
- api : coo_relu
args : (Tensor x)
output : Tensor(out@SparseCooTensor)
kernel :
func : sparse_coo_relu
layout : x
backward : sparse_coo_relu_grad
- api : coo_sin
args : (Tensor x)
output : Tensor(out@SparseCooTensor)
kernel :
func : sparse_coo_sin
layout : x
backward : sparse_coo_sin_grad
- api : coo_sqrt
args : (Tensor x)
output : Tensor(out@SparseCooTensor)
kernel :
func : sparse_coo_sqrt
layout : x
backward : sparse_coo_sqrt_grad
- api : coo_tanh
args : (Tensor x)
output : Tensor(out@SparseCooTensor)
kernel :
func : sparse_coo_tanh
layout : x
backward : sparse_coo_tanh_grad
- api : coo_to_dense - api : coo_to_dense
args : (Tensor x) args : (Tensor x)
output : Tensor(out@DenseTensor) output : Tensor(out)
invoke : to_dense_impl(x) invoke : to_dense_impl(x)
backward : coo_to_dense_grad backward : coo_to_dense_grad
- api : coo_values
args : (Tensor x)
output : Tensor(out@DenseTensor)
kernel :
func : coo_values
layout : x
backward : coo_values_grad
- api : create_sparse_coo_tensor - api : create_sparse_coo_tensor
args : (Tensor values, Tensor indices, IntArray dense_shape) args : (Tensor values, Tensor indices, IntArray dense_shape)
output : Tensor(out@SparseCooTensor) output : Tensor(out)
kernel : kernel :
func : sparse_coo_tensor func : sparse_coo_tensor{dense, dense -> sparse_coo}
layout : values layout : values
data_type : values data_type : values
backward : create_sparse_coo_tensor_grad backward : create_sparse_coo_tensor_grad
- api : csr_relu - api : dense_to_coo
args : (Tensor x) args : (Tensor x, int64_t sparse_dim)
output : Tensor(out@SparseCsrTensor) output : Tensor(out)
kernel : invoke : to_sparse_coo_impl(x, sparse_dim)
func : sparse_csr_relu backward : dense_to_coo_grad
layout : x
- api : csr_sin - api : relu
args : (Tensor x) args : (Tensor x)
output : Tensor(out@SparseCsrTensor) output : Tensor(out)
kernel : kernel :
func : sparse_csr_sin func : sparse_coo_relu{sparse_coo -> sparse_coo},
sparse_csr_relu{sparse_csr -> sparse_csr}
layout : x layout : x
backward : relu_grad
- api : csr_sqrt - api : sin
args : (Tensor x) args : (Tensor x)
output : Tensor(out@SparseCsrTensor) output : Tensor(out@SparseCooTensor)
kernel : kernel :
func : sparse_csr_sqrt func : sparse_coo_sin {sparse_coo -> sparse_coo},
sparse_csr_sin {sparse_csr -> sparse_csr}
layout : x layout : x
backward : sin_grad
- api : csr_tanh - api : sqrt
args : (Tensor x) args : (Tensor x)
output : Tensor(out@SparseCsrTensor) output : Tensor(out)
kernel : kernel :
func : sparse_csr_tanh func : sparse_coo_sqrt{sparse_coo -> sparse_coo},
sparse_csr_sqrt{sparse_csr -> sparse_csr}
layout : x layout : x
backward : sqrt_grad
- api : csr_values - api : tanh
args : (Tensor x) args : (Tensor x)
output : Tensor(out@DenseTensor) output : Tensor(out)
kernel : kernel :
func : csr_values func : sparse_coo_tanh{sparse_coo -> sparse_coo},
sparse_csr_tanh{sparse_csr -> sparse_csr}
layout : x layout : x
backward : tanh_grad
- api : dense_to_coo
args : (Tensor x, int64_t sparse_dim)
output : Tensor(out@SparseCooTensor)
invoke : to_sparse_coo_impl(x, sparse_dim)
backward : dense_to_coo_grad
- api : to_dense - api : to_dense
args : (Tensor x) args : (Tensor x)
output : Tensor(out@DenseTensor) output : Tensor(out)
invoke : to_dense_impl(x) invoke : to_dense_impl(x)
- api : to_sparse_coo - api : to_sparse_coo
args : (Tensor x, int64_t sparse_dim) args : (Tensor x, int64_t sparse_dim)
output : Tensor(out@SparseCooTensor) output : Tensor(out)
invoke : to_sparse_coo_impl(x, sparse_dim) invoke : to_sparse_coo_impl(x, sparse_dim)
- api : to_sparse_csr - api : to_sparse_csr
args : (Tensor x) args : (Tensor x)
output : Tensor(out@SparseCsrTensor) output : Tensor(out)
invoke : to_sparse_csr_impl(x) invoke : to_sparse_csr_impl(x)
- api : values
args : (Tensor x)
output : Tensor(out)
kernel :
func : coo_values{sparse_coo -> dense},
csr_values{sparse_csr -> dense}
layout : x
backward : values_grad
- api: maxpool - api: maxpool
args : (Tensor x, int[] kernel_sizes, int[] paddings, int[] dilations, int[] strides) args : (Tensor x, int[] kernel_sizes, int[] paddings, int[] dilations, int[] strides)
output : Tensor(out@SparseCooTensor), Tensor(rulebook@DenseTensor) output : Tensor(out), Tensor(rulebook)
kernel : kernel :
func : sparse_maxpool func : sparse_maxpool{sparse_coo -> sparse_coo, dense}
layout : x layout : x
intermediate : rulebook intermediate : rulebook
backward : sparse_maxpool_grad backward : sparse_maxpool_grad
...@@ -47,6 +47,11 @@ class SparseAPI(ForwardAPI): ...@@ -47,6 +47,11 @@ class SparseAPI(ForwardAPI):
output_names = [] output_names = []
output_create = "" output_create = ""
return_type = self.get_return_type_with_intermediate(inplace_flag) return_type = self.get_return_type_with_intermediate(inplace_flag)
output_type_map = {
'dense': 'TensorType::DENSE_TENSOR',
'sparse_coo': 'TensorType::SPARSE_COO',
'sparse_csr': 'TensorType::SPARSE_CSR'
}
if len(output_type_list) == 1: if len(output_type_list) == 1:
kernel_output = 'kernel_out' kernel_output = 'kernel_out'
...@@ -56,7 +61,7 @@ class SparseAPI(ForwardAPI): ...@@ -56,7 +61,7 @@ class SparseAPI(ForwardAPI):
'names'][0] in self.inplace_map else "" 'names'][0] in self.inplace_map else ""
output_create = f""" output_create = f"""
{return_type} api_output{inplace_assign}; {return_type} api_output{inplace_assign};
auto* kernel_out = {set_out_func}(&api_output, {self.get_kernel_tensor_out_type(self.outputs['names'][0])});""" auto* kernel_out = {set_out_func}(&api_output, {output_type_map[output_type_list[0]]});"""
elif len(output_type_list) > 1: elif len(output_type_list) > 1:
output_create = f""" output_create = f"""
...@@ -67,7 +72,6 @@ class SparseAPI(ForwardAPI): ...@@ -67,7 +72,6 @@ class SparseAPI(ForwardAPI):
{return_type} api_output{{""" {return_type} api_output{{"""
for out_name in self.outputs['names']: for out_name in self.outputs['names']:
out_name = out_name.split('@')[0]
if out_name in self.inplace_map: if out_name in self.inplace_map:
output_create = output_create + self.inplace_map[ output_create = output_create + self.inplace_map[
out_name] + ', ' out_name] + ', '
...@@ -79,7 +83,7 @@ class SparseAPI(ForwardAPI): ...@@ -79,7 +83,7 @@ class SparseAPI(ForwardAPI):
kernel_output = kernel_output + f'kernel_out_{i}, ' kernel_output = kernel_output + f'kernel_out_{i}, '
output_names.append(f'kernel_out_{i}') output_names.append(f'kernel_out_{i}')
output_create = output_create + f""" output_create = output_create + f"""
auto* kernel_out_{i} = {set_out_func}(&std::get<{i}>(api_output), {self.get_kernel_tensor_out_type(self.outputs['names'][i])});""" auto* kernel_out_{i} = {set_out_func}(&std::get<{i}>(api_output), {output_type_map[output_type_list[i]]});"""
kernel_output = kernel_output[:-2] kernel_output = kernel_output[:-2]
else: else:
...@@ -139,18 +143,19 @@ class SparseAPI(ForwardAPI): ...@@ -139,18 +143,19 @@ class SparseAPI(ForwardAPI):
return kernel_context_code return kernel_context_code
def gen_sparse_kernel_code(self, inplace_flag=False): def gen_sparse_kernel_code(self, kernel_name, inplace_flag=False):
_, kernel_output_names, output_create = self.gene_output( _, kernel_output_names, output_create = self.gene_output(
self.outputs['types'], 'SetSparseKernelOutput', '', inplace_flag) self.kernel['dispatch'][kernel_name][1], 'SetSparseKernelOutput',
'', inplace_flag)
kernel_context_code = self.gen_sparse_kernel_context( kernel_context_code = self.gen_sparse_kernel_context(
kernel_output_names) kernel_output_names)
return_code = "" if len(self.gene_return_code( return_code = "" if len(self.gene_return_code(
)) == 0 else " " + self.gene_return_code() )) == 0 else " " + self.gene_return_code()
return f""" return f"""
auto phi_kernel = phi::KernelFactory::Instance().SelectKernelOrThrowError(
"{self.kernel['func'][0]}", {{kernel_backend, kernel_layout, kernel_data_type}});
VLOG(6) << "{self.api} api sparse kernel key: [" << kernel_backend << ", " << kernel_layout << ", "<< kernel_data_type << "]"; VLOG(6) << "{self.api} api sparse kernel key: [" << kernel_backend << ", " << kernel_layout << ", "<< kernel_data_type << "]";
auto phi_kernel = phi::KernelFactory::Instance().SelectKernelOrThrowError(
"{kernel_name}", {{kernel_backend, kernel_layout, kernel_data_type}});
VLOG(6) << "{self.api} api sparse kernel: " << phi_kernel; VLOG(6) << "{self.api} api sparse kernel: " << phi_kernel;
auto* dev_ctx = GetDeviceContextByBackend(kernel_backend); auto* dev_ctx = GetDeviceContextByBackend(kernel_backend);
...@@ -158,16 +163,50 @@ class SparseAPI(ForwardAPI): ...@@ -158,16 +163,50 @@ class SparseAPI(ForwardAPI):
{output_create} {output_create}
{kernel_context_code} {kernel_context_code}
phi_kernel(&kernel_context); phi_kernel(&kernel_context);
{return_code}""" {return_code}"""
def get_condition_code(self, kernel_name):
assert self.kernel['dispatch'][kernel_name], \
f"{self.api} api: the tensor type of inputs and outputs for kernel isn't set, see also 'kernel:func' of 'conv3d' in sparse_api.yaml."
input_types = self.kernel['dispatch'][kernel_name][0]
sparse_type_map = {
'sparse_coo': 'DataLayout::SPARSE_COO',
'sparse_csr': 'DataLayout::SPARSE_CSR'
}
condition_list = []
for i, in_type in enumerate(input_types):
if in_type == "dense":
condition_list.append(
f"phi::DenseTensor::classof({self.inputs['names'][i]}.impl().get())"
)
else:
condition_list.append(
f"{self.inputs['names'][i]}.layout() == {sparse_type_map[in_type]}"
)
return " && ".join(condition_list)
def gene_dispatch_code(self, kernel_name, inplace_flag=False):
dispatch_code = ""
return f"""
if ({self.get_condition_code(kernel_name)}) {{
{self.gen_sparse_kernel_code(kernel_name, inplace_flag)}
}}
"""
def gene_base_api_code(self, inplace_flag=False): def gene_base_api_code(self, inplace_flag=False):
api_func_name = self.get_api_func_name() api_func_name = self.get_api_func_name()
if inplace_flag and api_func_name[-1] != '_': if inplace_flag and api_func_name[-1] != '_':
api_func_name += '_' api_func_name += '_'
kernel_dispatch_code = f"{self.gene_kernel_select()}\n"
for kernel_name in self.kernel['func']:
kernel_dispatch_code += self.gene_dispatch_code(kernel_name,
inplace_flag)
return f""" return f"""
PADDLE_API {self.get_return_type()} {api_func_name}({self.get_define_args()}) {{ PADDLE_API {self.get_return_type()} {api_func_name}({self.get_define_args()}) {{
{self.gene_kernel_select()} {kernel_dispatch_code}
{self.gen_sparse_kernel_code(inplace_flag)} PADDLE_THROW(phi::errors::Unimplemented(
"The kernel of ({self.api}) for input tensors is unimplemented, please check the type of input tensors."));
}} }}
""" """
......
- backward_api : conv3d_grad - backward_api : conv3d_grad
forward : conv3d (Tensor x, Tensor kernel, int[] paddings, int[] dilations, int[] strides, int groups, bool subm) -> Tensor(out@SparseCooTensor), Tensor(rulebook@DenseTensor) forward : conv3d (Tensor x, Tensor kernel, int[] paddings, int[] dilations, int[] strides, int groups, bool subm) -> Tensor(out@SparseCooTensor), Tensor(rulebook@DenseTensor)
args : (Tensor x, Tensor kernel, Tensor rulebook, Tensor out_grad, int[] paddings, int[] dilations, int[] strides, int groups, bool subm) args : (Tensor x, Tensor kernel, Tensor rulebook, Tensor out_grad, int[] paddings, int[] dilations, int[] strides, int groups, bool subm)
output : Tensor(x_grad@SparseCooTensor), Tensor(kernel_grad@DenseTensor) output : Tensor(x_grad), Tensor(kernel_grad)
kernel : kernel :
func : sparse_conv3d_grad func : sparse_conv3d_grad{sparse_coo, dense, dense, sparse_coo -> sparse_coo, dense}
- backward_api : coo_to_dense_grad - backward_api : coo_to_dense_grad
forward : coo_to_dense(Tensor x) -> Tensor(out@DenseTensor) forward : coo_to_dense(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad@SparseCooTensor) output : Tensor(x_grad)
kernel : kernel :
func : sparse_coo_to_dense_grad func : sparse_coo_to_dense_grad{sparse_coo, dense-> sparse_coo}
- backward_api : coo_values_grad
forward : coo_values(Tensor x) -> Tensor(out@DenseTensor)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad@SparseCooTensor)
kernel :
func : coo_values_grad
- backward_api : create_sparse_coo_tensor_grad - backward_api : create_sparse_coo_tensor_grad
forward : create_sparse_coo_tensor(Tensor values, Tensor indices, IntArray dense_shape) -> Tensor(out@SparseCooTensor) forward : create_sparse_coo_tensor(Tensor values, Tensor indices, IntArray dense_shape) -> Tensor(out)
args : (Tensor indices, Tensor out_grad) args : (Tensor indices, Tensor out_grad)
output : Tensor(values_grad@DenseTensor) output : Tensor(values_grad)
kernel : kernel :
func : sparse_coo_tensor_grad func : sparse_coo_tensor_grad{dense, sparse_coo -> dense}
- backward_api : dense_to_coo_grad - backward_api : dense_to_coo_grad
forward : dense_to_coo(Tensor x, int64_t sparse_dim) -> Tensor(out@SparseCooTensor) forward : dense_to_coo(Tensor x, int64_t sparse_dim) -> Tensor(out)
args : (Tensor out_grad) args : (Tensor out_grad)
output : Tensor(x_grad@DenseTensor) output : Tensor(x_grad)
invoke : to_dense_impl(out_grad) invoke : to_dense_impl(out_grad)
- backward_api : sparse_coo_relu_grad - backward_api : relu_grad
forward : sparse_coo_relu(Tensor x) -> Tensor(out@SparseCooTensor) forward : relu(Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad) args : (Tensor out, Tensor out_grad)
output : Tensor(x_grad@SparseCooTensor) output : Tensor(x_grad)
kernel : kernel :
func : sparse_coo_relu_grad func : sparse_coo_relu_grad {sparse_coo, sparse_coo -> sparse_coo}
- backward_api : sparse_coo_sin_grad - backward_api : sin_grad
forward : sparse_coo_sin(Tensor x) -> Tensor(out@SparseCooTensor) forward : sin(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad@SparseCooTensor) output : Tensor(x_grad)
kernel :
func : sparse_coo_sin_grad {sparse_coo, sparse_coo -> sparse_coo}
- backward_api : sparse_maxpool_grad
forward : sparse_maxpool(Tensor x, int[] kernel_sizes, int[] paddings, int[] dilations, int[] strides) -> Tensor(out), Tensor(rulebook)
args : (Tensor x, Tensor rulebook, Tensor out, Tensor out_grad, int[] kernel_sizes)
output : Tensor(x_grad)
kernel : kernel :
func : sparse_coo_sin_grad func : sparse_maxpool_grad {sparse_coo, dense, sparse_coo, sparse_coo -> sparse_coo}
- backward_api : sparse_coo_sqrt_grad - backward_api : sqrt_grad
forward : sparse_coo_sqrt(Tensor x) -> Tensor(out@SparseCooTensor) forward : sqrt(Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad) args : (Tensor out, Tensor out_grad)
output : Tensor(x_grad@SparseCooTensor) output : Tensor(x_grad)
kernel : kernel :
func : sparse_coo_sqrt_grad func : sparse_coo_sqrt_grad {sparse_coo, sparse_coo -> sparse_coo}
- backward_api : sparse_coo_tanh_grad - backward_api : tanh_grad
forward : sparse_coo_tanh(Tensor x) -> Tensor(out@SparseCooTensor) forward : tanh(Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad) args : (Tensor out, Tensor out_grad)
output : Tensor(x_grad@SparseCooTensor) output : Tensor(x_grad)
kernel : kernel :
func : sparse_coo_tanh_grad func : sparse_coo_tanh_grad {sparse_coo, sparse_coo -> sparse_coo}
- backward_api : sparse_maxpool_grad - backward_api : values_grad
forward : sparse_maxpool(Tensor x, int[] kernel_sizes, int[] paddings, int[] dilations, int[] strides) -> Tensor(out@SparseCooTensor), Tensor(rulebook@DenseTensor) forward : coo_values(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor rulebook, Tensor out, Tensor out_grad, int[] kernel_sizes) args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad@SparseCooTensor) output : Tensor(x_grad)
kernel : kernel :
func : sparse_maxpool_grad func : coo_values_grad{sparse_coo, dense-> sparse_coo}
...@@ -35,7 +35,7 @@ class SparseBackwardAPI(SparseAPI, BackwardAPI): ...@@ -35,7 +35,7 @@ class SparseBackwardAPI(SparseAPI, BackwardAPI):
return BackwardAPI.get_return_type(self) return BackwardAPI.get_return_type(self)
def gene_return_code(self): def gene_return_code(self):
return "" return "return;"
def gene_api_declaration(self): def gene_api_declaration(self):
return SparseAPI.gene_api_declaration(self) return SparseAPI.gene_api_declaration(self)
...@@ -54,6 +54,11 @@ class SparseBackwardAPI(SparseAPI, BackwardAPI): ...@@ -54,6 +54,11 @@ class SparseBackwardAPI(SparseAPI, BackwardAPI):
kernel_output = "" kernel_output = ""
output_names = [] output_names = []
output_create = "" output_create = ""
output_type_map = {
'dense': 'TensorType::DENSE_TENSOR',
'sparse_coo': 'TensorType::SPARSE_COO',
'sparse_csr': 'TensorType::SPARSE_CSR'
}
if len(output_type_list) == 1: if len(output_type_list) == 1:
kernel_output = 'kernel_out' kernel_output = 'kernel_out'
...@@ -62,7 +67,7 @@ class SparseBackwardAPI(SparseAPI, BackwardAPI): ...@@ -62,7 +67,7 @@ class SparseBackwardAPI(SparseAPI, BackwardAPI):
0]] if inplace_flag and self.inplace_map is not None and self.outputs[ 0]] if inplace_flag and self.inplace_map is not None and self.outputs[
'names'][0] in self.inplace_map else "" 'names'][0] in self.inplace_map else ""
output_create = f""" output_create = f"""
auto kernel_out = {set_out_func}({self.outputs['names'][0].split('@')[0]}, {self.get_kernel_tensor_out_type(self.outputs['names'][0])});""" auto kernel_out = {set_out_func}({self.outputs['names'][0]}, {output_type_map[output_type_list[0]]});"""
elif len(output_type_list) > 1: elif len(output_type_list) > 1:
output_create = "" output_create = ""
...@@ -76,7 +81,7 @@ class SparseBackwardAPI(SparseAPI, BackwardAPI): ...@@ -76,7 +81,7 @@ class SparseBackwardAPI(SparseAPI, BackwardAPI):
*{self.outputs['names'][i]} = {self.inplace_map[self.outputs['names'][i]]};""" *{self.outputs['names'][i]} = {self.inplace_map[self.outputs['names'][i]]};"""
output_create = output_create + f""" output_create = output_create + f"""
auto kernel_out_{i} = {set_out_func}({self.outputs['names'][i].split('@')[0]}, {self.get_kernel_tensor_out_type(self.outputs['names'][i])});""" auto kernel_out_{i} = {set_out_func}({self.outputs['names'][i]}, {output_type_map[output_type_list[i]]});"""
kernel_output = kernel_output[:-2] kernel_output = kernel_output[:-2]
else: else:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册