未验证 提交 02912ce7 编写于 作者: O oyjxer 提交者: GitHub

【NPU】Fix npu kernel elementwise_div_grad (#31753)

上级 e6af7c0d
...@@ -110,9 +110,15 @@ class ElementwiseDivGradNPUKernel : public framework::OpKernel<T> { ...@@ -110,9 +110,15 @@ class ElementwiseDivGradNPUKernel : public framework::OpKernel<T> {
if (dy) { if (dy) {
dy->mutable_data<T>(place); dy->mutable_data<T>(place);
Tensor y_grad_w(x->type()); Tensor neg_out(y->type());
neg_out.mutable_data<T>(y->dims(), place);
auto neg_out_runner = NpuOpRunner("Neg", {*out},
{neg_out}, {});
neg_out_runner.Run(stream);
Tensor y_grad_w(y->type());
y_grad_w.mutable_data<T>(y->dims(), place); y_grad_w.mutable_data<T>(y->dims(), place);
auto y_grad_w_runner = NpuOpRunner("Mul", {*out, y_power}, auto y_grad_w_runner = NpuOpRunner("Div", {neg_out, *y},
{y_grad_w}, {}); {y_grad_w}, {});
y_grad_w_runner.Run(stream); y_grad_w_runner.Run(stream);
......
...@@ -56,12 +56,24 @@ class TestElementwiseDiv(OpTest): ...@@ -56,12 +56,24 @@ class TestElementwiseDiv(OpTest):
def test_check_output(self): def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False) self.check_output_with_place(self.place, check_dygraph=False)
# TODO(ascendrc): Div grad test def test_check_grad_normal(self):
# def test_check_grad(self): self.check_grad_with_place(
# if self.dtype == np.float16: self.place, ['X', 'Y'],
# return 'Out',
# self.check_grad(['X'], 'Out') max_relative_error=0.007,
# check_dygraph=False)
def test_check_grad_ingore_x(self):
self.check_grad_with_place(
self.place, ['Y'],
'Out',
max_relative_error=0.007,
no_grad_set=set("X"),
check_dygraph=False)
def test_check_grad_ingore_y(self):
self.check_grad_with_place(
self.place, ['X'], 'Out', no_grad_set=set("Y"), check_dygraph=False)
@unittest.skipIf(not paddle.is_compiled_with_npu(), @unittest.skipIf(not paddle.is_compiled_with_npu(),
...@@ -123,7 +135,7 @@ class TestElementwiseDivNet(unittest.TestCase): ...@@ -123,7 +135,7 @@ class TestElementwiseDivNet(unittest.TestCase):
e = paddle.multiply(a, b) e = paddle.multiply(a, b)
f = paddle.multiply(c, d) f = paddle.multiply(c, d)
f.stop_gradient = True f.stop_gradient = True
g = paddle.divide(e, f) g = fluid.layers.elementwise_div(e, f)
fc_1 = fluid.layers.fc(input=g, size=128) fc_1 = fluid.layers.fc(input=g, size=128)
prediction = fluid.layers.fc(input=fc_1, size=2, act='softmax') prediction = fluid.layers.fc(input=fc_1, size=2, act='softmax')
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册