未验证 提交 79d71234 编写于 作者: F FlyingQianMM 提交者: GitHub

Correct CPU gradients of the argsort op (#22739)

* Correct CPU gradients of the argsort op, form a network to test its forward and backward process, test=develop

* fix dynamic threshold error in test_argsort_op, test=develop
上级 2b80e9a7
......@@ -81,13 +81,13 @@ static void FullAssign(Type input_height, Type input_width, int input_dim,
auto e_input = EigenVector<T>::Flatten(*input);
auto e_indices = EigenVector<Type>::Flatten(*indices);
for (Type j = 0; j < input_width; ++j) {
t_out[i * input_width + e_indices(j)] = e_input(e_indices(j));
t_out[i * input_width + e_indices(j)] = e_input(j);
}
} else {
auto e_input = EigenMatrix<T>::Reshape(*input, input_dim - 1);
auto e_indices = EigenMatrix<Type>::Reshape(*indices, input_dim - 1);
for (Type j = 0; j < input_width; ++j) {
t_out[i * input_width + e_indices(i, j)] = e_input(i, e_indices(i, j));
t_out[i * input_width + e_indices(i, j)] = e_input(i, j);
}
}
}
......
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -15,34 +15,176 @@
from __future__ import print_function
import unittest
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
from op_test import OpTest
import six
import paddle.fluid.core as core
from paddle.fluid import ParamAttr
from paddle.fluid.framework import Program, grad_var_name
from paddle.fluid.executor import Executor
from paddle.fluid.backward import append_backward
class TestArgsortOp(OpTest):
def setUp(self):
self.init_axis()
self.init_datatype()
self.init_direction()
x = np.random.random((2, 3, 4, 5, 10)).astype(self.dtype)
self.attrs = {'axis': self.axis, 'descending': self.descending}
if self.axis < 0:
self.axis = self.axis + len(x.shape)
np.random.seed(123)
class PyArgsort(object):
def __init__(self, input_shape, axis, descending, dtype):
self.x = np.random.random(input_shape).astype(dtype)
self.label = np.random.random(input_shape).astype(dtype)
if axis < 0:
self.axis = axis + len(self.x.shape)
else:
self.axis = axis
self.descending = descending
def forward(self):
if self.descending:
self.indices = np.flip(
np.argsort(
x, kind='quicksort', axis=self.axis), self.axis)
self.out = np.flip(
self.x, kind='quicksort', axis=self.axis), self.axis)
self.sorted_x = np.flip(
np.sort(
x, kind='quicksort', axis=self.axis), self.axis)
self.x, kind='quicksort', axis=self.axis), self.axis)
else:
self.indices = np.argsort(x, kind='quicksort', axis=self.axis)
self.out = np.sort(x, kind='quicksort', axis=self.axis)
self.indices = np.argsort(self.x, kind='quicksort', axis=self.axis)
self.sorted_x = np.sort(self.x, kind='quicksort', axis=self.axis)
self.loss = self.sorted_x * self.label
self.loss = np.sum(self.loss)
out = (np.array(
self.indices, dtype=self.indices.dtype), np.array(
self.sorted_x, dtype=self.sorted_x.dtype), np.array(
[self.loss], dtype=self.loss.dtype))
return out
self.op_type = "argsort"
self.inputs = {'X': x}
self.outputs = {'Indices': self.indices, 'Out': self.out}
def create_tensor(np_data, place):
tensor = core.LoDTensor()
tensor.set(np_data, place)
return tensor
class TestArgsortOpCPU(unittest.TestCase):
def setup_program(self):
self.main_program = Program()
self.startup_program = Program()
self.init_place()
def setUp(self):
self.init_axis()
self.init_datatype()
self.init_direction()
self.init_inputshape()
self.setup_program()
self.feed_data_field = {"x", "label"}
self.grad_data_field = {"x"}
self.py_argsort = PyArgsort(self.input_shape, self.axis,
self.descending, self.dtype)
with fluid.program_guard(self.main_program, self.startup_program):
x = fluid.layers.data(
name="x", shape=self.input_shape, dtype=self.dtype)
x.stop_gradient = False
label = fluid.layers.data(
name="label", shape=self.input_shape, dtype=self.dtype)
self.sorted_x, self.index = fluid.layers.argsort(
input=x, axis=self.axis, descending=self.descending)
self.sorted_x.stop_gradient = False
loss = fluid.layers.elementwise_mul(self.sorted_x, label)
self.loss = fluid.layers.reduce_sum(loss)
def forward(self):
self.feed_map = {
x: create_tensor(getattr(self.py_argsort, x), self.place)
for x in self.feed_data_field
}
exe = Executor(self.place)
out = exe.run(self.main_program,
feed=self.feed_map,
fetch_list=[self.index, self.sorted_x, self.loss])
return out
def backward(self):
self.feed_map = {
x: create_tensor(getattr(self.py_argsort, x), self.place)
for x in self.feed_data_field
}
fetch_list = [
self.main_program.global_block().var(grad_var_name(x))
for x in self.grad_data_field
]
exe = Executor(self.place)
out = exe.run(self.main_program,
feed=self.feed_map,
fetch_list=fetch_list,
return_numpy=False)
return out
def test_backward(self, numeric_grad_delta=1e-5, max_relative_error=1e-7):
self.check_forward()
with fluid.program_guard(self.main_program, self.startup_program):
append_backward(self.loss)
ana_grad = [np.array(x) for x in self.backward()]
num_grad = self.get_numerical_gradient(delta=numeric_grad_delta)
self.assert_is_close(
num_grad,
ana_grad,
'x',
max_relative_error=max_relative_error,
msg_prefix="Gradient Check On %s" % str(self.place))
def check_forward(self):
pd_outputs = self.forward()
py_outputs = self.py_argsort.forward()
for pd_output, py_output in zip(pd_outputs, py_outputs):
self.assertEqual(pd_output.shape, py_output.shape)
self.assertTrue(
np.allclose(
pd_output, py_output, atol=0, equal_nan=False))
def get_numerical_gradient(self, delta=1e-7):
if self.dtype == 'float16':
delta = np.array(delta).astype(np.float16)
feed_list = [getattr(self.py_argsort, x) for x in self.grad_data_field]
grad_list = [np.zeros_like(x) for x in feed_list]
for feed, grad in zip(feed_list, grad_list):
for f, g in np.nditer([feed, grad], op_flags=['readwrite']):
o = float(f)
f[...] = o + delta
y_pos = self.forward()[2]
f[...] = o - delta
y_neg = self.forward()[2]
f[...] = o
dout_dfeed = (y_pos - y_neg) / (delta * 2)
g[...] = dout_dfeed[0]
return grad_list
def assert_is_close(self, numeric_grads, analytic_grads, names,
max_relative_error, msg_prefix):
for a, b, name in six.moves.zip(numeric_grads, analytic_grads, names):
abs_a = np.abs(a)
abs_a[abs_a < 1e-3] = 1
diff_mat = np.abs(a - b) / abs_a
max_diff = np.max(diff_mat)
def err_msg():
offset = np.argmax(diff_mat > max_relative_error)
return ("%s error, %s variable %s max gradient diff %f over limit %f, "
"the first error element is %d, expected %f, but got %f.") \
% ('argsort', msg_prefix, name, max_diff, max_relative_error,
offset, a.flatten()[offset], b.flatten()[offset])
self.assertLessEqual(max_diff, max_relative_error, err_msg())
def init_axis(self):
self.axis = -1
......@@ -53,111 +195,127 @@ class TestArgsortOp(OpTest):
def init_direction(self):
self.descending = False
def test_check_output(self):
self.check_output()
def init_inputshape(self):
self.input_shape = (2, 2, 2, 2, 3)
def init_place(self):
self.place = core.CPUPlace()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestArgsortOpGPU(TestArgsortOpCPU):
def init_place(self):
if core.is_compiled_with_cuda():
self.place = core.CUDAPlace(0)
else:
self.place = core.CPUPlace()
class TestArgsortOpAxis0(TestArgsortOp):
class TestArgsortOpAxis0CPU(TestArgsortOpCPU):
def init_axis(self):
self.axis = 0
class TestArgsortOpAxis1(TestArgsortOp):
class TestArgsortOpAxis0GPU(TestArgsortOpGPU):
def init_axis(self):
self.axis = 1
self.axis = 0
class TestArgsortOpAxis2(TestArgsortOp):
class TestArgsortOpAxis1CPU(TestArgsortOpCPU):
def init_axis(self):
self.axis = 2
self.axis = 1
class TestArgsortOpAxisNeg1(TestArgsortOp):
class TestArgsortOpAxis1GPU(TestArgsortOpGPU):
def init_axis(self):
self.axis = -1
self.axis = 1
class TestArgsortOpAxisNeg2(TestArgsortOp):
class TestArgsortOpAxis2CPU(TestArgsortOpCPU):
def init_axis(self):
self.axis = -2
self.axis = 2
class TestArgsortOpFP16(TestArgsortOp):
def init_datatype(self):
if core.is_compiled_with_cuda():
self.dtype = 'float16'
class TestArgsortOpAxis2GPU(TestArgsortOpGPU):
def init_axis(self):
self.axis = 2
def test_check_output(self):
pass
def test_check_output_with_place(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-5)
class TestArgsortOpAxisNeg1CPU(TestArgsortOpCPU):
def init_axis(self):
self.axis = -1
class TestArgsortOpFP16Axis0(TestArgsortOpFP16):
class TestArgsortOpAxisNeg1GPU(TestArgsortOpGPU):
def init_axis(self):
self.axis = 0
self.axis = -1
class TestArgsortOpFP16Axis2(TestArgsortOpFP16):
class TestArgsortOpAxisNeg2CPU(TestArgsortOpCPU):
def init_axis(self):
self.axis = 2
self.axis = -2
class TestArgsortOpFP16AxisNeg2(TestArgsortOpFP16):
class TestArgsortOpAxisNeg2GPU(TestArgsortOpGPU):
def init_axis(self):
self.axis = -2
class TestArgsortOpFP16Axis4Neg4(TestArgsortOpFP16):
def init_axis(self):
self.axis = -4
class TestArgsortOpDescendingAxisCPU(TestArgsortOpCPU):
def init_direction(self):
self.descending = True
class TestArgsortOpDescendingAxis(TestArgsortOp):
class TestArgsortOpDescendingAxisGPU(TestArgsortOpGPU):
def init_direction(self):
self.descending = True
class TestArgsortOpDescendingAxis0(TestArgsortOpAxis0):
class TestArgsortOpDescendingAxis0CPU(TestArgsortOpAxis0CPU):
def init_direction(self):
self.descending = True
class TestArgsortOpDescendingAxis1(TestArgsortOpAxis1):
class TestArgsortOpDescendingAxis0GPU(TestArgsortOpAxis0GPU):
def init_direction(self):
self.descending = True
class TestArgsortOpDescendingAxis2(TestArgsortOpAxis2):
class TestArgsortOpDescendingAxis1CPU(TestArgsortOpAxis1CPU):
def init_direction(self):
self.descending = True
class TestArgsortOpDescendingAxisNeg1(TestArgsortOpAxisNeg1):
class TestArgsortOpDescendingAxis1GPU(TestArgsortOpAxis1GPU):
def init_direction(self):
self.descending = True
class TestArgsortOpDescendingAxisNeg2(TestArgsortOpAxisNeg2):
class TestArgsortOpDescendingAxis2CPU(TestArgsortOpAxis2CPU):
def init_direction(self):
self.descending = True
class TestArgsortOpFP32Axis(TestArgsortOp):
def init_datatype(self):
self.dtype = "float32"
class TestArgsortOpDescendingAxis2GPU(TestArgsortOpAxis2GPU):
def init_direction(self):
self.descending = True
class TestArgsortOpFP32DescendingAxis(TestArgsortOp):
def init_datatype(self):
self.dtype = "float32"
class TestArgsortOpDescendingAxisNeg1CPU(TestArgsortOpAxisNeg1CPU):
def init_direction(self):
self.descending = True
class TestArgsortOpDescendingAxisNeg1GPU(TestArgsortOpAxisNeg1GPU):
def init_direction(self):
self.descending = True
class TestArgsortOpDescendingAxisNeg2CPU(TestArgsortOpAxisNeg2CPU):
def init_direction(self):
self.descending = True
class TestArgsortOpDescendingAxisNeg2GPU(TestArgsortOpAxisNeg2GPU):
def init_direction(self):
self.descending = True
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册