未验证 提交 af7439ad 编写于 作者: J Jiabin Yang 提交者: GitHub

[Cherry-pick] Enabled test_imperative_star_gan_with_gradient_penalty.py under eager mode (#41994)

* cherry-pick python/paddle/utils/code_gen/backward.yaml

* remove unsupported yaml
Co-authored-by: NZhanlue Yang <jim19930609@gmail.com>
上级 0c141322
......@@ -32,6 +32,7 @@ ops_to_fill_zero_for_empty_grads = set([
"add_triple_grad",
"multiply_double_grad",
"multiply_triple_grad",
"conv2d_grad_grad",
])
# For API dispatch used at python-level
......
......@@ -118,7 +118,6 @@ class TensorWrapper {
p_ab_autograd_meta->SetGradNode(new_grad_node);
}
recovered_tensor.set_autograd_meta(p_ab_autograd_meta);
return recovered_tensor;
}
}
......
......@@ -20,11 +20,11 @@ namespace phi {
template <typename T, typename Context>
void ConvGradGradKernel(const Context& dev_ctx,
paddle::optional<const DenseTensor&> input_grad_grad,
paddle::optional<const DenseTensor&> filter_grad_grad,
const DenseTensor& out_grad,
const DenseTensor& input,
const DenseTensor& filter,
const DenseTensor& out_grad,
paddle::optional<const DenseTensor&> input_grad_grad,
paddle::optional<const DenseTensor&> filter_grad_grad,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string& paddding_algorithm,
......@@ -34,9 +34,9 @@ void ConvGradGradKernel(const Context& dev_ctx,
bool use_addto,
int workspace_size_MB,
bool exhaustive_search,
DenseTensor* out_grad_grad,
DenseTensor* input_grad,
DenseTensor* filter_grad);
DenseTensor* filter_grad,
DenseTensor* out_grad_grad);
template <typename T, typename Context>
void Conv3DGradGradKernel(const Context& dev_ctx,
......
......@@ -39,11 +39,11 @@ void Conv3DGradGradKernel(const Context& ctx,
DenseTensor* input_grad,
DenseTensor* filter_grad) {
ConvGradGradKernel<T>(ctx,
input_grad_grad,
filter_grad_grad,
out_grad,
input,
filter,
out_grad,
input_grad_grad,
filter_grad_grad,
strides,
paddings_t,
padding_algorithm,
......@@ -53,9 +53,9 @@ void Conv3DGradGradKernel(const Context& ctx,
use_addto,
workspace_size_MB,
exhaustive_search_t,
out_grad_grad,
input_grad,
filter_grad);
filter_grad,
out_grad_grad);
}
} // namespace phi
......
......@@ -44,11 +44,11 @@ namespace phi {
template <typename T, typename Context>
void ConvCudnnGradGradKernel(
const Context& ctx,
paddle::optional<const DenseTensor&> input_grad_grad,
paddle::optional<const DenseTensor&> filter_grad_grad,
const DenseTensor& out_grad,
const DenseTensor& input,
const DenseTensor& filter,
const DenseTensor& out_grad,
paddle::optional<const DenseTensor&> input_grad_grad,
paddle::optional<const DenseTensor&> filter_grad_grad,
const std::vector<int>& strides,
const std::vector<int>& paddings_t,
const std::string& padding_algorithm,
......@@ -58,9 +58,9 @@ void ConvCudnnGradGradKernel(
bool use_addto,
int workspace_size_MB,
bool exhaustive_search_t,
DenseTensor* out_grad_grad,
DenseTensor* input_grad,
DenseTensor* filter_grad) {
DenseTensor* filter_grad,
DenseTensor* out_grad_grad) {
auto X = &input;
auto W = &filter;
auto dO = &out_grad;
......@@ -689,11 +689,11 @@ void DepthwiseConvCudnnGradGradKernel(
DenseTensor* input_grad,
DenseTensor* filter_grad) {
ConvCudnnGradGradKernel<T>(ctx,
input_grad_grad,
filter_grad_grad,
out_grad,
input,
filter,
out_grad,
input_grad_grad,
filter_grad_grad,
strides,
paddings_t,
padding_algorithm,
......@@ -703,9 +703,9 @@ void DepthwiseConvCudnnGradGradKernel(
use_addto,
workspace_size_MB,
exhaustive_search_t,
out_grad_grad,
input_grad,
filter_grad);
filter_grad,
out_grad_grad);
}
template <typename T, typename Context>
......@@ -729,11 +729,11 @@ void Conv3DCudnnGradGradKernel(
DenseTensor* input_grad,
DenseTensor* filter_grad) {
ConvCudnnGradGradKernel<T>(ctx,
input_grad_grad,
filter_grad_grad,
out_grad,
input,
filter,
out_grad,
input_grad_grad,
filter_grad_grad,
strides,
paddings_t,
padding_algorithm,
......@@ -743,9 +743,9 @@ void Conv3DCudnnGradGradKernel(
use_addto,
workspace_size_MB,
exhaustive_search_t,
out_grad_grad,
input_grad,
filter_grad);
filter_grad,
out_grad_grad);
}
} // namespace phi
......
......@@ -26,11 +26,11 @@ namespace phi {
template <typename T, typename Context>
void ConvGradGradKernel(const Context& dev_ctx,
paddle::optional<const DenseTensor&> input_grad_grad,
paddle::optional<const DenseTensor&> filter_grad_grad,
const DenseTensor& out_grad,
const DenseTensor& input,
const DenseTensor& filter,
const DenseTensor& out_grad,
paddle::optional<const DenseTensor&> input_grad_grad,
paddle::optional<const DenseTensor&> filter_grad_grad,
const std::vector<int>& strides_t,
const std::vector<int>& paddings_t,
const std::string& padding_algorithm,
......@@ -40,9 +40,9 @@ void ConvGradGradKernel(const Context& dev_ctx,
bool use_addto,
int workspace_size_MB,
bool exhaustive_search,
DenseTensor* out_grad_grad,
DenseTensor* input_grad,
DenseTensor* filter_grad) {
DenseTensor* filter_grad,
DenseTensor* out_grad_grad) {
const DenseTensor* X = &input;
const DenseTensor* dY = &out_grad;
const DenseTensor* ddX = input_grad_grad.get_ptr();
......
......@@ -62,7 +62,7 @@ KernelSignature Conv2dGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
KernelSignature Conv2dDoubleGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("conv2d_grad_grad",
{"DDInput", "DDFilter", "DOutput", "Input", "Filter"},
{"Input", "Filter", "DOutput", "DDInput", "DDFilter"},
{"strides",
"paddings",
"padding_algorithm",
......@@ -72,7 +72,7 @@ KernelSignature Conv2dDoubleGradOpArgumentMapping(
"use_addto",
"workspace_size_MB",
"exhaustive_search"},
{"DDOutput", "DInput", "DFilter"});
{"DInput", "DFilter", "DDOutput"});
}
} // namespace phi
......
......@@ -590,6 +590,46 @@ class TestStarGANWithGradientPenalty(unittest.TestCase):
if fluid.is_compiled_with_cuda():
self.place_test(fluid.CUDAPlace(0))
def place_test(self, place):
cfg = Config(place, False)
dataset = create_mnist_dataset(cfg)
dataset = paddle.reader.cache(dataset)
fluid_dygraph_loss = []
with fluid.dygraph.guard(cfg.place):
fluid_dygraph_model = DyGraphTrainModel(cfg)
for batch_id, (image_real, label_org,
label_trg) in enumerate(dataset()):
loss = fluid_dygraph_model.run(image_real, label_org, label_trg)
fluid_dygraph_loss.append(loss)
eager_dygraph_loss = []
with _test_eager_guard():
with fluid.dygraph.guard(cfg.place):
eager_dygraph_model = DyGraphTrainModel(cfg)
for batch_id, (image_real, label_org,
label_trg) in enumerate(dataset()):
loss = eager_dygraph_model.run(image_real, label_org,
label_trg)
eager_dygraph_loss.append(loss)
for (g_loss_f, d_loss_f), (g_loss_e, d_loss_e) in zip(
fluid_dygraph_loss, eager_dygraph_loss):
self.assertEqual(g_loss_f, g_loss_e)
self.assertEqual(d_loss_f, d_loss_e)
def test_all_cases(self):
self.func_main()
class TestStarGANWithGradientPenaltyLegacy(unittest.TestCase):
def func_main(self):
self.place_test(fluid.CPUPlace())
if fluid.is_compiled_with_cuda():
self.place_test(fluid.CUDAPlace(0))
def place_test(self, place):
cfg = Config(place)
......@@ -617,8 +657,7 @@ class TestStarGANWithGradientPenalty(unittest.TestCase):
self.assertEqual(d_loss_s, d_loss_d)
def test_all_cases(self):
if _in_legacy_dygraph():
self.func_main()
self.func_main()
if __name__ == '__main__':
......
......@@ -434,7 +434,10 @@ def leaky_relu(x, negative_slope=0.01, name=None):
out = F.leaky_relu(x) # [-0.02, 0., 1.]
"""
if in_dynamic_mode():
if in_dygraph_mode():
return _C_ops.final_state_leaky_relu(x, negative_slope)
if _in_legacy_dygraph():
return _C_ops.leaky_relu(x, 'alpha', negative_slope)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
......
......@@ -257,6 +257,19 @@
args : (Tensor input, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search)
output : Tensor(input_grad), Tensor(filter_grad)
invoke : conv2d_grad_impl(input, filter, out_grad, strides, paddings, paddding_algorithm, groups, dilations, data_format, use_addto, workspace_size_MB, exhaustive_search)
backward : conv2d_grad_grad
- backward_api : conv2d_grad_grad
forward : conv2d_grad (Tensor input, Tensor filter, Tensor grad_out, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search) -> Tensor(grad_input), Tensor(grad_filter)
args : (Tensor input, Tensor filter, Tensor grad_out, Tensor grad_input_grad, Tensor grad_filter_grad, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search)
output : Tensor(input_grad), Tensor(filter_grad), Tensor(grad_out_grad)
infer_meta :
func : GeneralTernaryGradInferMeta
param: [input, filter, grad_out]
kernel :
func : conv2d_grad_grad
use_cudnn : true
optional : grad_input_grad, grad_filter_grad
- backward_api : conv2d_transpose_grad
forward : conv2d_transpose(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(out)
......@@ -739,6 +752,16 @@
func : label_smooth_grad
optional : prior_dist
- backward_api : leaky_relu_double_grad
forward : leaky_relu_grad (Tensor x, Tensor grad_out, float alpha) -> Tensor(grad_x)
args : (Tensor x, Tensor grad_x_grad, float alpha)
output : Tensor(grad_out_grad)
infer_meta :
func : UnchangedInferMeta
param : [grad_x_grad]
kernel :
func : leaky_relu_double_grad
- backward_api : leaky_relu_grad
forward : leaky_relu (Tensor x, float alpha) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float alpha)
......@@ -748,6 +771,7 @@
param : [x]
kernel :
func : leaky_relu_grad
backward : leaky_relu_double_grad
- backward_api : lerp_grad
forward : lerp (Tensor x, Tensor y, Tensor weight) -> Tensor(out)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册