未验证 提交 f4da2d4d 编写于 作者: H HongyuJia 提交者: GitHub

[phi] Transfer fluid bicubic_interp_v2 to phi bicubic_interp (add yaml) (#45151)

* transfer bicubic_interp op to phi, change name from bicubic_interp_v2 to bicubic_interp

* test final_state_bicubic_interp api

* testcase match imperative case
上级 951010a2
......@@ -353,6 +353,17 @@
func : bce_loss
backward : bce_loss_grad
- api : bicubic_interp
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output : Tensor(output)
infer_meta :
func : InterpolateInferMeta
optional: out_size, size_tensor, scale_tensor
kernel :
func : bicubic_interp
data_type : x
backward : bicubic_interp_grad
- api : bilinear_tensor_product
args : (Tensor x, Tensor y, Tensor weight, Tensor bias)
output : Tensor
......
......@@ -283,6 +283,18 @@
func : bce_loss_grad
inplace : (out_grad -> input_grad)
- backward_api : bicubic_interp_grad
forward : bicubic_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) -> Tensor(output)
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
optional: out_size, size_tensor, scale_tensor
kernel :
func : bicubic_interp_grad
data_type : output_grad
- backward_api : bilinear_tensor_product_grad
forward : bilinear_tensor_product (Tensor x, Tensor y, Tensor weight, Tensor bias) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor weight, Tensor out_grad)
......
......@@ -75,7 +75,9 @@ const std::unordered_set<std::string> deprecated_op_names(
"linear_interp",
"linear_interp_grad",
"nearest_interp",
"nearest_interp_grad"});
"nearest_interp_grad",
"bicubic_interp",
"bicubic_interp_grad"});
class DefaultKernelSignatureMap {
public:
......
......@@ -1072,7 +1072,7 @@ PD_REGISTER_KERNEL(linear_interp_grad,
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND);
}
PD_REGISTER_KERNEL(bicubic_interp_v2_grad,
PD_REGISTER_KERNEL(bicubic_interp_grad,
CPU,
ALL_LAYOUT,
phi::BicubicInterpGradKernel,
......
......@@ -1229,12 +1229,8 @@ PD_REGISTER_KERNEL(linear_interp,
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND);
}
PD_REGISTER_KERNEL(bicubic_interp_v2,
CPU,
ALL_LAYOUT,
phi::BicubicInterpKernel,
float,
double) {
PD_REGISTER_KERNEL(
bicubic_interp, CPU, ALL_LAYOUT, phi::BicubicInterpKernel, float, double) {
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND);
}
......@@ -1605,7 +1605,7 @@ PD_REGISTER_KERNEL(linear_interp_grad,
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND);
}
PD_REGISTER_KERNEL(bicubic_interp_v2_grad,
PD_REGISTER_KERNEL(bicubic_interp_grad,
GPU,
ALL_LAYOUT,
phi::BicubicInterpGradKernel,
......
......@@ -1481,7 +1481,7 @@ PD_REGISTER_KERNEL(linear_interp,
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND);
}
PD_REGISTER_KERNEL(bicubic_interp_v2,
PD_REGISTER_KERNEL(bicubic_interp,
GPU,
ALL_LAYOUT,
phi::BicubicInterpKernel,
......
......@@ -77,7 +77,7 @@ KernelSignature LinearInterpOpArgumentMapping(
KernelSignature BicubicInterpOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("bicubic_interp_v2",
return KernelSignature("bicubic_interp",
{"X", "OutSize", "SizeTensor", "Scale"},
{"data_layout",
"out_d",
......@@ -151,7 +151,7 @@ KernelSignature LinearInterpGradOpArgumentMapping(
KernelSignature BicubicInterpGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("bicubic_interp_v2_grad",
return KernelSignature("bicubic_interp_grad",
{"X", "OutSize", "SizeTensor", "Scale", "Out@GRAD"},
{"data_layout",
"out_d",
......@@ -168,9 +168,11 @@ KernelSignature BicubicInterpGradOpArgumentMapping(
PD_REGISTER_BASE_KERNEL_NAME(linear_interp_v2, linear_interp);
PD_REGISTER_BASE_KERNEL_NAME(nearest_interp_v2, nearest_interp);
PD_REGISTER_BASE_KERNEL_NAME(bicubic_interp_v2, bicubic_interp);
PD_REGISTER_BASE_KERNEL_NAME(linear_interp_v2_grad, linear_interp_grad);
PD_REGISTER_BASE_KERNEL_NAME(nearest_interp_v2_grad, nearest_interp_grad);
PD_REGISTER_BASE_KERNEL_NAME(bicubic_interp_v2_grad, bicubic_interp_grad);
PD_REGISTER_ARG_MAPPING_FN(bilinear_interp_v2,
phi::BilinearInterpOpArgumentMapping);
......
......@@ -619,7 +619,17 @@ def interpolate(x,
else:
out = _C_ops.nearest_interp_v2(x, *dy_attr)
elif resample_type == "bicubic":
out = _C_ops.bicubic_interp_v2(x, *dy_attr)
if in_dygraph_mode():
out = _C_ops.final_state_bicubic_interp(
x, inputs['OutSize'] if 'OutSize' in inputs else None,
inputs['SizeTensor'] if 'SizeTensor' in inputs else None,
inputs['Scale'] if 'Scale' in inputs else None,
attrs['data_layout'], attrs['out_d'], attrs['out_h'],
attrs['out_w'], attrs['scale'] if 'scale' in attrs else [],
attrs['interp_method'], attrs['align_corners'],
attrs['align_mode'])
else:
out = _C_ops.bicubic_interp_v2(x, *dy_attr)
return out
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='{}_interp_v2'.format(resample_type),
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册