未验证 提交 267b218f 编写于 作者: C cyber-pioneer 提交者: GitHub

add sin triple grad operator (#47753)

上级 f369b2b1
......@@ -36,6 +36,8 @@ ops_to_fill_zero_for_empty_grads = set(
"batch_norm_double_grad",
"tanh_double_grad",
"tanh_triple_grad",
"sin_double_grad",
"sin_triple_grad",
"subtract_double_grad",
"divide_double_grad",
"log_double_grad",
......
......@@ -661,6 +661,7 @@
param : [x, x]
kernel :
func : sin_double_grad
backward : sin_triple_grad
inplace : (grad_x_grad -> grad_out_grad)
- backward_op : sin_grad
......@@ -675,6 +676,17 @@
backward : sin_double_grad
inplace : (out_grad -> x_grad)
- backward_op : sin_triple_grad
forward : sin_double_grad (Tensor x, Tensor grad_out_forward, Tensor grad_x_grad_forward) -> Tensor(grad_x), Tensor(grad_out_grad)
args : (Tensor x, Tensor grad_out_forward, Tensor grad_x_grad_forward, Tensor grad_x_grad, Tensor grad_out_grad_grad)
output : Tensor(x_grad), Tensor(grad_out_forward_grad), Tensor(grad_x_grad_forward_grad)
infer_meta :
func : GeneralTernaryGradInferMeta
param : [x, x, grad_x_grad_forward]
kernel :
func : sin_triple_grad
inplace : (grad_x_grad_forward -> grad_out_forward_grad)
- backward_op : sinh_grad
forward : sinh (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
......
......@@ -831,7 +831,7 @@
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : sin
backward : sin_grad, sin_double_grad
backward : sin_grad, sin_double_grad, sin_triple_grad
inputs :
x : X
outputs :
......
......@@ -107,6 +107,17 @@ void TanhTripleGradKernel(const Context& dev_ctx,
DenseTensor* d_dout,
DenseTensor* d_ddx);
template <typename T, typename Context>
void SinTripleGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& dout,
const DenseTensor& ddx,
const DenseTensor& d_dx_new,
const DenseTensor& d_ddout,
DenseTensor* d_x_new,
DenseTensor* d_dout,
DenseTensor* d_ddx);
template <typename T, typename Context>
void LeakyReluDoubleGradKernel(const Context& dev_ctx,
const DenseTensor& x,
......
......@@ -345,6 +345,15 @@ PD_REGISTER_KERNEL(sin_double_grad,
phi::dtype::float16,
int,
int64_t) {}
PD_REGISTER_KERNEL(sin_triple_grad,
CPU,
ALL_LAYOUT,
phi::SinTripleGradKernel,
float,
double,
phi::dtype::float16,
int,
int64_t) {}
PD_REGISTER_ACTIVATION_GRAD_KERNEL(softsign_grad, SoftsignGradKernel)
PD_REGISTER_ACTIVATION_GRAD_KERNEL(sigmoid_grad, SigmoidGradKernel)
PD_REGISTER_ACTIVATION_GRAD_KERNEL(sigmoid_double_grad, SigmoidDoubleGradKernel)
......
......@@ -138,6 +138,64 @@ struct SinDoubleGradFunctor : public BaseActivationFunctor<T> {
static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
};
// 1st reverse grad
// y = sin(x)
// x --> y
// d1x = d1y * cos(x)
//
// 2nd reverse grad
// x, d1y --> d1x
// d2x = -sin(x) * d1y * d2d1x
// d2d1y = cos(x) * d2d1x
//
// 3rd reverse grad
// x, d1y, d2d1x --> d2x, d2d1y
// d3x = -cos(x) * d1y * d2d1x * d3d2x - sin(x) * d2d1x * d3d2d1y
// d3d1y = -sin(x) * d2d1x * d3d2x
// d3d2d1x = -sin(x) * d1y * d3d2x + cos(x) * d3d2d1y
template <typename T>
struct SinTripleGradFunctor : public BaseActivationFunctor<T> {
template <typename Device>
void operator()(const Device& dev,
const DenseTensor* X,
const DenseTensor* ddX,
const DenseTensor* dOut,
const DenseTensor* d_DDOut,
const DenseTensor* d_dx_New,
DenseTensor* d_d_Out,
DenseTensor* d_x_New,
DenseTensor* d_DDx) const {
auto* d = dev.eigen_device();
auto x = EigenVector<T>::Flatten(
GET_DATA_SAFELY(X, "Input", "x", "SinTripleGrad"));
auto d2d1x = EigenVector<T>::Flatten(
GET_DATA_SAFELY(ddX, "Input", "d2d1x", "SinTripleGrad"));
auto d1y = EigenVector<T>::Flatten(
GET_DATA_SAFELY(dOut, "Input", "d1y", "SinTripleGrad"));
auto d3d2d1y = EigenVector<T>::Flatten(
GET_DATA_SAFELY(d_DDOut, "Input", "d3d2d1y", "SinTripleGrad"));
auto d3d2x = EigenVector<T>::Flatten(
GET_DATA_SAFELY(d_dx_New, "Input", "d3d2x", "SinTripleGrad"));
auto d3x = EigenVector<T>::Flatten(
GET_DATA_SAFELY(d_x_New, "Output", "d3x", "SinTripleGrad"));
d3x.device(*d) = -x.unaryExpr(Cosine<T>()) * d1y * d2d1x * d3d2x -
x.unaryExpr(Sine<T>()) * d2d1x * d3d2d1y;
auto d3d1y = EigenVector<T>::Flatten(
GET_DATA_SAFELY(d_d_Out, "Output", "d3d1y", "SinTripleGrad"));
d3d1y.device(*d) = -x.unaryExpr(Sine<T>()) * d2d1x * d3d2x;
auto d3d2d1x = EigenVector<T>::Flatten(
GET_DATA_SAFELY(d_DDx, "Output", "d3d2d1x", "SinTripleGrad"));
d3d2d1x.device(*d) = -x.unaryExpr(Sine<T>()) * d1y * d3d2x +
x.unaryExpr(Cosine<T>()) * d3d2d1y;
}
static constexpr ActBwdOpFwdDeps FwdDeps() {
return ActBwdOpFwdDeps::kDepOut;
}
};
// reciprocal(x) = 1 / x
template <typename T>
struct ReciprocalFunctor : public BaseActivationFunctor<T> {
......
......@@ -427,6 +427,16 @@ PD_REGISTER_KERNEL(sin_double_grad,
int64_t,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(sin_triple_grad,
GPU,
ALL_LAYOUT,
phi::SinTripleGradKernel,
float,
double,
int,
int64_t,
phi::dtype::float16) {}
PD_REGISTER_ACTIVATION_GRAD_KERNEL(softsign_grad, SoftsignGradKernel)
PD_REGISTER_ACTIVATION_GRAD_KERNEL(sigmoid_grad, SigmoidGradKernel)
PD_REGISTER_ACTIVATION_GRAD_KERNEL(sigmoid_double_grad, SigmoidDoubleGradKernel)
......
......@@ -467,4 +467,38 @@ void SinDoubleGradKernel(const Context& dev_ctx,
functor(dev_ctx, &x, &dout, &ddx, dx, ddout);
}
template <typename T, typename Context>
void SinTripleGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& dout,
const DenseTensor& ddx,
const DenseTensor& d_dx_new,
const DenseTensor& d_ddout,
DenseTensor* d_x_new,
DenseTensor* d_dout,
DenseTensor* d_ddx) {
if (d_dout) {
d_dout->Resize(x.dims());
dev_ctx.template Alloc<T>(d_dout);
}
if (d_x_new) {
d_dout->Resize(x.dims());
dev_ctx.template Alloc<T>(d_x_new);
}
if (d_ddx) {
d_dout->Resize(ddx.dims());
dev_ctx.template Alloc<T>(d_ddx);
}
funcs::SinTripleGradFunctor<T> functor;
functor(dev_ctx,
&x,
&ddx,
&dout,
&d_ddout,
&d_dx_new, // input
d_dout,
d_x_new,
d_ddx); // output
}
} // namespace phi
......@@ -565,5 +565,37 @@ class TestPowDoubleGradCheck2(unittest.TestCase):
self.func(p)
class TestSinTripleGradCheck(unittest.TestCase):
def sin_wrapper(self, x):
return paddle.sin(x[0])
@prog_scope()
def func(self, place):
shape = [2, 3, 7, 9]
eps = 0.0005
dtype = np.float64
x = layers.data('x', shape, False, dtype=dtype)
x.persistable = True
y = layers.sin(x)
x_arr = np.random.random(shape).astype(dtype)
x_arr[np.abs(x_arr) < 0.005] = 0.002
gradient_checker.triple_grad_check(
[x], y, x_init=x_arr, place=place, eps=eps
)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
gradient_checker.triple_grad_check_for_dygraph(
self.sin_wrapper, [x], y, x_init=x_arr, place=place
)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def test_grad(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册