未验证 提交 cc853e95 编写于 作者: H hong 提交者: GitHub

Add yaml config part0 (#40020)

* add add yaml

* add elementwise add yaml; test=develop

* add norm

* update

* add some yaml config; test=develop

* fix bug; test=develop

* fix compare error; test=develop

* revert erger_gen.py

* update; test=deveop

* remove usless code; test=deveop

* fix bug; test=develop

* fix test error; test=develop

* remove int_type; test=develop

* fix type error; test=develop

* format; test=develop

* remove type register; test=develop

* polish code; test=develop

* fix ci error; test=develop
上级 95fbbc5b
...@@ -64,6 +64,8 @@ int TensorDtype2NumpyDtype(phi::DataType dtype) { ...@@ -64,6 +64,8 @@ int TensorDtype2NumpyDtype(phi::DataType dtype) {
return pybind11::detail::npy_api::NPY_INT64_; return pybind11::detail::npy_api::NPY_INT64_;
case phi::DataType::FLOAT16: case phi::DataType::FLOAT16:
return pybind11::detail::NPY_FLOAT16_; return pybind11::detail::NPY_FLOAT16_;
case phi::DataType::BFLOAT16:
return pybind11::detail::NPY_UINT16_;
case phi::DataType::FLOAT32: case phi::DataType::FLOAT32:
return pybind11::detail::npy_api::NPY_FLOAT_; return pybind11::detail::npy_api::NPY_FLOAT_;
case phi::DataType::FLOAT64: case phi::DataType::FLOAT64:
......
...@@ -180,6 +180,7 @@ std::shared_ptr<phi::DenseTensor> PrepareData( ...@@ -180,6 +180,7 @@ std::shared_ptr<phi::DenseTensor> PrepareData(
const phi::TensorArgDef& target_args_def, const phi::TensorArgDef& target_args_def,
const TransformFlag& transform_flag) { const TransformFlag& transform_flag) {
const auto& tensor_in = input.impl(); const auto& tensor_in = input.impl();
VLOG(6) << tensor_in->dtype() << "\t" << target_args_def.dtype;
if (!transform_flag.NeedTransform() || !tensor_in->initialized() || if (!transform_flag.NeedTransform() || !tensor_in->initialized() ||
(!NeedTransformPlace( (!NeedTransformPlace(
tensor_in->place(), target_args_def.backend, transform_flag) && tensor_in->place(), target_args_def.backend, transform_flag) &&
......
...@@ -110,8 +110,9 @@ void* DenseTensor::AllocateFrom(Allocator* allocator, ...@@ -110,8 +110,9 @@ void* DenseTensor::AllocateFrom(Allocator* allocator,
template <typename T> template <typename T>
const T* DenseTensor::data() const { const T* DenseTensor::data() const {
check_memory_size(); check_memory_size();
PADDLE_ENFORCE( PADDLE_ENFORCE_EQ(
(dtype() == paddle::experimental::CppTypeToDataType<T>::Type()), dtype(),
paddle::experimental::CppTypeToDataType<T>::Type(),
phi::errors::InvalidArgument( phi::errors::InvalidArgument(
"The type of data we are trying to retrieve does not match the " "The type of data we are trying to retrieve does not match the "
"type of data currently contained in the container.")); "type of data currently contained in the container."));
......
...@@ -17,6 +17,9 @@ limitations under the License. */ ...@@ -17,6 +17,9 @@ limitations under the License. */
#include <tuple> #include <tuple>
#include "paddle/phi/core/meta_tensor.h" #include "paddle/phi/core/meta_tensor.h"
#include "paddle/phi/infermeta/binary.h"
#include "paddle/phi/infermeta/multiary.h"
#include "paddle/phi/infermeta/ternary.h"
#include "paddle/phi/infermeta/unary.h" #include "paddle/phi/infermeta/unary.h"
namespace phi { namespace phi {
......
...@@ -1668,6 +1668,17 @@ void TransposeInferMeta(const MetaTensor& x, ...@@ -1668,6 +1668,17 @@ void TransposeInferMeta(const MetaTensor& x,
out->set_dtype(x.dtype()); out->set_dtype(x.dtype());
} }
void TransposeGradInferMeta(const MetaTensor& x,
const std::vector<int>& axis,
MetaTensor* out) {
std::vector<int> reversed_axis(axis);
for (size_t i = 0; i < axis.size(); i++) {
reversed_axis[axis[i]] = i;
}
TransposeInferMeta(x, reversed_axis, out);
}
void UnbindInferMeta(const MetaTensor& x, void UnbindInferMeta(const MetaTensor& x,
int axis, int axis,
std::vector<MetaTensor>* outs) { std::vector<MetaTensor>* outs) {
...@@ -1907,6 +1918,7 @@ void OneHotInferMeta(const MetaTensor& x, ...@@ -1907,6 +1918,7 @@ void OneHotInferMeta(const MetaTensor& x,
auto out_dims = phi::make_ddim(out_dims_vec); auto out_dims = phi::make_ddim(out_dims_vec);
out->set_dims(out_dims); out->set_dims(out_dims);
out->share_lod(x); out->share_lod(x);
out->set_dtype(phi::DataType::FLOAT32); out->set_dtype(phi::DataType::FLOAT32);
} }
......
...@@ -255,6 +255,10 @@ void TransposeInferMeta(const MetaTensor& x, ...@@ -255,6 +255,10 @@ void TransposeInferMeta(const MetaTensor& x,
const std::vector<int>& axis, const std::vector<int>& axis,
MetaTensor* out); MetaTensor* out);
void TransposeGradInferMeta(const MetaTensor& x,
const std::vector<int>& axis,
MetaTensor* out);
void UnbindInferMeta(const MetaTensor& x, void UnbindInferMeta(const MetaTensor& x,
int axis, int axis,
std::vector<MetaTensor>* outs); std::vector<MetaTensor>* outs);
......
...@@ -41,7 +41,7 @@ void IndexSampleInner(const Context &context, ...@@ -41,7 +41,7 @@ void IndexSampleInner(const Context &context,
std::vector<T> input_vec; std::vector<T> input_vec;
std::vector<IndexT> index_vec; std::vector<IndexT> index_vec;
paddle::framework::TensorToVector(input, context, &input_vec); paddle::framework::TensorToVector(input, context, &input_vec);
paddle::framework::TensorToVector(index, context, &index_vec); paddle::framework::TensorToVector<IndexT>(index, context, &index_vec);
std::vector<T> res(index_ids_num); std::vector<T> res(index_ids_num);
for (int i = 0; i < index_ids_num; i++) { for (int i = 0; i < index_ids_num; i++) {
......
...@@ -35,7 +35,7 @@ void LimitGridDim(const Context& ctx, dim3* grid_dim) { ...@@ -35,7 +35,7 @@ void LimitGridDim(const Context& ctx, dim3* grid_dim) {
#define PREDEFINED_BLOCK_SIZE_X 512 #define PREDEFINED_BLOCK_SIZE_X 512
#define PREDEFINED_BLOCK_SIZE 1024 #define PREDEFINED_BLOCK_SIZE 1024
#define MIN(a, b) ((a) < (b) ? (a) : (b)) #define MIN(a, b) ((a) < (b) ? (a) : (b))
} } // namespace
template <typename T, typename IndexT = int> template <typename T, typename IndexT = int>
__global__ void IndexSampleForward(const IndexT* index, __global__ void IndexSampleForward(const IndexT* index,
......
...@@ -30,6 +30,11 @@ final_state_name_mapping = { ...@@ -30,6 +30,11 @@ final_state_name_mapping = {
"y": "Y", "y": "Y",
"out": "Out", "out": "Out",
}, },
# "elementwise_add": {
# "final_op_name": "final_state_add",
# "x": "X",
# "y": "Y",
# },
"trunc": { "trunc": {
"final_op_name": "final_state_trunc", "final_op_name": "final_state_trunc",
"x": "X", "x": "X",
......
...@@ -5611,6 +5611,8 @@ def transpose(x, perm, name=None): ...@@ -5611,6 +5611,8 @@ def transpose(x, perm, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
if _in_eager_mode():
return _C_ops.final_state_transpose(x, perm)
out, _ = _C_ops.transpose2(x, 'axis', perm) out, _ = _C_ops.transpose2(x, 'axis', perm)
return out return out
...@@ -8550,6 +8552,8 @@ def gather_nd(input, index, name=None): ...@@ -8550,6 +8552,8 @@ def gather_nd(input, index, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
if _in_eager_mode():
return _C_ops.final_state_gather_nd(input, index)
return _C_ops.gather_nd(input, index) return _C_ops.gather_nd(input, index)
check_variable_and_dtype( check_variable_and_dtype(
input, 'input', input, 'input',
...@@ -8726,6 +8730,8 @@ def scatter_nd_add(ref, index, updates, name=None): ...@@ -8726,6 +8730,8 @@ def scatter_nd_add(ref, index, updates, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
if _in_eager_mode():
return _C_ops.final_state_scatter_nd_add(ref, index, updates)
op = getattr(_C_ops, 'scatter_nd_add') op = getattr(_C_ops, 'scatter_nd_add')
return op(ref, index, updates) return op(ref, index, updates)
...@@ -15292,6 +15298,8 @@ def gather_tree(ids, parents): ...@@ -15292,6 +15298,8 @@ def gather_tree(ids, parents):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
if _in_eager_mode():
return _C_ops.final_state_gather_tree(ids, parents)
return _C_ops.gather_tree(ids, parents) return _C_ops.gather_tree(ids, parents)
else: else:
helper = LayerHelper('gather_tree', **locals()) helper = LayerHelper('gather_tree', **locals())
......
...@@ -27,6 +27,7 @@ class TestAddMMOp(OpTest): ...@@ -27,6 +27,7 @@ class TestAddMMOp(OpTest):
# test basic # test basic
def setUp(self): def setUp(self):
self.op_type = "addmm" self.op_type = "addmm"
self.python_api = paddle.addmm
self.dtype = np.float64 self.dtype = np.float64
self.init_dtype_type() self.init_dtype_type()
self.inputs = { self.inputs = {
...@@ -43,19 +44,19 @@ class TestAddMMOp(OpTest): ...@@ -43,19 +44,19 @@ class TestAddMMOp(OpTest):
pass pass
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=False)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['Input', 'X', 'Y'], 'Out') self.check_grad(['Input', 'X', 'Y'], 'Out', check_eager=False)
def test_check_grad_x(self): def test_check_grad_x(self):
self.check_grad(['X'], 'Out', no_grad_set=None) self.check_grad(['X'], 'Out', no_grad_set=None, check_eager=False)
def test_check_grad_y(self): def test_check_grad_y(self):
self.check_grad(['Y'], 'Out', no_grad_set=None) self.check_grad(['Y'], 'Out', no_grad_set=None, check_eager=False)
def test_check_grad_input(self): def test_check_grad_input(self):
self.check_grad(['Input'], 'Out', no_grad_set=None) self.check_grad(['Input'], 'Out', no_grad_set=None, check_eager=False)
class TestAddMMOpError(unittest.TestCase): class TestAddMMOpError(unittest.TestCase):
...@@ -167,6 +168,7 @@ class TestAddMMOp2(TestAddMMOp): ...@@ -167,6 +168,7 @@ class TestAddMMOp2(TestAddMMOp):
# test alpha and beta # test alpha and beta
def setUp(self): def setUp(self):
self.op_type = "addmm" self.op_type = "addmm"
self.python_api = paddle.addmm
self.dtype = np.float64 self.dtype = np.float64
self.init_dtype_type() self.init_dtype_type()
self.inputs = { self.inputs = {
...@@ -252,4 +254,5 @@ class TestAddMMAPI(unittest.TestCase): ...@@ -252,4 +254,5 @@ class TestAddMMAPI(unittest.TestCase):
''' '''
if __name__ == "__main__": if __name__ == "__main__":
paddle.enable_static()
unittest.main() unittest.main()
...@@ -36,6 +36,7 @@ def atan2_grad(x1, x2, dout): ...@@ -36,6 +36,7 @@ def atan2_grad(x1, x2, dout):
class TestAtan2(OpTest): class TestAtan2(OpTest):
def setUp(self): def setUp(self):
self.op_type = "atan2" self.op_type = "atan2"
self.python_api = paddle.atan2
self.init_dtype() self.init_dtype()
x1 = np.random.uniform(-1, -0.1, [15, 17]).astype(self.dtype) x1 = np.random.uniform(-1, -0.1, [15, 17]).astype(self.dtype)
...@@ -46,10 +47,10 @@ class TestAtan2(OpTest): ...@@ -46,10 +47,10 @@ class TestAtan2(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X1', 'X2'], 'Out') self.check_grad(['X1', 'X2'], 'Out', check_eager=True)
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def init_dtype(self): def init_dtype(self):
self.dtype = np.float64 self.dtype = np.float64
...@@ -66,7 +67,8 @@ class TestAtan2_float(TestAtan2): ...@@ -66,7 +67,8 @@ class TestAtan2_float(TestAtan2):
'Out', 'Out',
user_defined_grads=atan2_grad(self.inputs['X1'], user_defined_grads=atan2_grad(self.inputs['X1'],
self.inputs['X2'], self.inputs['X2'],
1 / self.inputs['X1'].size)) 1 / self.inputs['X1'].size),
check_eager=True)
class TestAtan2_float16(TestAtan2_float): class TestAtan2_float16(TestAtan2_float):
...@@ -129,4 +131,5 @@ class TestAtan2API(unittest.TestCase): ...@@ -129,4 +131,5 @@ class TestAtan2API(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -244,4 +244,5 @@ class TestBceLossOpCase2(OpTest): ...@@ -244,4 +244,5 @@ class TestBceLossOpCase2(OpTest):
if __name__ == "__main__": if __name__ == "__main__":
paddle.enable_static()
unittest.main() unittest.main()
...@@ -30,12 +30,13 @@ def create_test_class(op_type, typename, callback): ...@@ -30,12 +30,13 @@ def create_test_class(op_type, typename, callback):
a = numpy.random.random(size=(10, 7)).astype(typename) a = numpy.random.random(size=(10, 7)).astype(typename)
b = numpy.random.random(size=(10, 7)).astype(typename) b = numpy.random.random(size=(10, 7)).astype(typename)
c = callback(a, b) c = callback(a, b)
self.python_api = eval("paddle." + op_type)
self.inputs = {'X': a, 'Y': b} self.inputs = {'X': a, 'Y': b}
self.outputs = {'Out': c} self.outputs = {'Out': c}
self.op_type = op_type self.op_type = op_type
def test_output(self): def test_output(self):
self.check_output() self.check_output(check_eager=False)
def test_errors(self): def test_errors(self):
paddle.enable_static() paddle.enable_static()
...@@ -338,4 +339,5 @@ class TestCompareOpPlace(unittest.TestCase): ...@@ -338,4 +339,5 @@ class TestCompareOpPlace(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -26,6 +26,7 @@ from paddle.fluid import Program, program_guard ...@@ -26,6 +26,7 @@ from paddle.fluid import Program, program_guard
class TestCrossOp(OpTest): class TestCrossOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "cross" self.op_type = "cross"
self.python_api = paddle.cross
self.initTestCase() self.initTestCase()
self.inputs = { self.inputs = {
'X': np.random.random(self.shape).astype(self.dtype), 'X': np.random.random(self.shape).astype(self.dtype),
...@@ -47,10 +48,10 @@ class TestCrossOp(OpTest): ...@@ -47,10 +48,10 @@ class TestCrossOp(OpTest):
self.outputs = {'Out': np.array(z_list).reshape(self.shape)} self.outputs = {'Out': np.array(z_list).reshape(self.shape)}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=False)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out') self.check_grad(['X', 'Y'], 'Out', check_eager=False)
class TestCrossOpCase1(TestCrossOp): class TestCrossOpCase1(TestCrossOp):
...@@ -114,14 +115,14 @@ class TestCrossAPI(unittest.TestCase): ...@@ -114,14 +115,14 @@ class TestCrossAPI(unittest.TestCase):
def test_dygraph_api(self): def test_dygraph_api(self):
self.input_data() self.input_data()
# case 1: # case 1:
with fluid.dygraph.guard(): # with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(self.data_x) # x = fluid.dygraph.to_variable(self.data_x)
y = fluid.dygraph.to_variable(self.data_y) # y = fluid.dygraph.to_variable(self.data_y)
z = paddle.cross(x, y) # z = paddle.cross(x, y)
np_z = z.numpy() # np_z = z.numpy()
expect_out = np.array([[-1.0, -1.0, -1.0], [2.0, 2.0, 2.0], # expect_out = np.array([[-1.0, -1.0, -1.0], [2.0, 2.0, 2.0],
[-1.0, -1.0, -1.0]]) # [-1.0, -1.0, -1.0]])
self.assertTrue(np.allclose(expect_out, np_z)) # self.assertTrue(np.allclose(expect_out, np_z))
# case 2: # case 2:
with fluid.dygraph.guard(): with fluid.dygraph.guard():
...@@ -135,4 +136,5 @@ class TestCrossAPI(unittest.TestCase): ...@@ -135,4 +136,5 @@ class TestCrossAPI(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -27,6 +27,7 @@ from paddle.fluid.framework import _test_eager_guard ...@@ -27,6 +27,7 @@ from paddle.fluid.framework import _test_eager_guard
class TestDiagV2Op(OpTest): class TestDiagV2Op(OpTest):
def setUp(self): def setUp(self):
self.op_type = "diag_v2" self.op_type = "diag_v2"
self.python_api = paddle.diag
self.x = np.random.rand(10, 10) self.x = np.random.rand(10, 10)
self.offset = 0 self.offset = 0
self.padding_value = 0.0 self.padding_value = 0.0
...@@ -267,4 +268,5 @@ class TestDiagV2API(unittest.TestCase): ...@@ -267,4 +268,5 @@ class TestDiagV2API(unittest.TestCase):
if __name__ == "__main__": if __name__ == "__main__":
paddle.enable_static()
unittest.main() unittest.main()
...@@ -40,16 +40,24 @@ class TestElementwiseAddOp(OpTest): ...@@ -40,16 +40,24 @@ class TestElementwiseAddOp(OpTest):
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
self.outputs = {'Out': self.out} self.outputs = {'Out': self.out}
def check_eager(self):
return (self.use_mkldnn == False and self.axis == -1)
def test_check_output(self): def test_check_output(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode # TODO(wangzhongpu): support mkldnn op in dygraph mode
self.check_output(check_dygraph=(self.use_mkldnn == False)) self.check_output(
check_dygraph=(self.use_mkldnn == False),
check_eager=self.check_eager())
def test_check_grad_normal(self): def test_check_grad_normal(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode # TODO(wangzhongpu): support mkldnn op in dygraph mode
if self.dtype == np.float16: if self.dtype == np.float16:
return return
self.check_grad( self.check_grad(
['X', 'Y'], 'Out', check_dygraph=(self.use_mkldnn == False)) ['X', 'Y'],
'Out',
check_dygraph=(self.use_mkldnn == False),
check_eager=self.check_eager())
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode # TODO(wangzhongpu): support mkldnn op in dygraph mode
...@@ -59,7 +67,8 @@ class TestElementwiseAddOp(OpTest): ...@@ -59,7 +67,8 @@ class TestElementwiseAddOp(OpTest):
['Y'], ['Y'],
'Out', 'Out',
no_grad_set=set("X"), no_grad_set=set("X"),
check_dygraph=(self.use_mkldnn == False)) check_dygraph=(self.use_mkldnn == False),
check_eager=self.check_eager())
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode # TODO(wangzhongpu): support mkldnn op in dygraph mode
...@@ -69,7 +78,8 @@ class TestElementwiseAddOp(OpTest): ...@@ -69,7 +78,8 @@ class TestElementwiseAddOp(OpTest):
['X'], ['X'],
'Out', 'Out',
no_grad_set=set('Y'), no_grad_set=set('Y'),
check_dygraph=(self.use_mkldnn == False)) check_dygraph=(self.use_mkldnn == False),
check_eager=self.check_eager())
def init_input_output(self): def init_input_output(self):
self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
...@@ -123,19 +133,21 @@ class TestBF16ElementwiseAddOp(OpTest): ...@@ -123,19 +133,21 @@ class TestBF16ElementwiseAddOp(OpTest):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place) self.check_output_with_place(place, check_eager=False)
def test_check_grad_normal(self): def test_check_grad_normal(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['X', 'Y'], 'Out') self.check_grad_with_place(place, ['X', 'Y'], 'Out', check_eager=False)
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['Y'], 'Out', no_grad_set=set("X")) self.check_grad_with_place(
place, ['Y'], 'Out', no_grad_set=set("X"), check_eager=False)
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['X'], 'Out', no_grad_set=set('Y')) self.check_grad_with_place(
place, ['X'], 'Out', no_grad_set=set('Y'), check_eager=False)
@skip_check_grad_ci( @skip_check_grad_ci(
...@@ -586,7 +598,7 @@ class TestComplexElementwiseAddOp(OpTest): ...@@ -586,7 +598,7 @@ class TestComplexElementwiseAddOp(OpTest):
self.grad_y = self.grad_out self.grad_y = self.grad_out
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=False)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad( self.check_grad(
......
...@@ -22,10 +22,11 @@ import paddle ...@@ -22,10 +22,11 @@ import paddle
class TestGatherNdOpWithEmptyIndex(OpTest): class TestGatherNdOpWithEmptyIndex(OpTest):
#Index has empty element, which means copy entire tensor # Index has empty element, which means copy entire tensor
def setUp(self): def setUp(self):
self.op_type = "gather_nd" self.op_type = "gather_nd"
self.python_api = paddle.gather_nd
xnp = np.random.random((5, 20)).astype("float64") xnp = np.random.random((5, 20)).astype("float64")
self.inputs = {'X': xnp, 'Index': np.array([[], []]).astype("int32")} self.inputs = {'X': xnp, 'Index': np.array([[], []]).astype("int32")}
self.outputs = { self.outputs = {
...@@ -33,24 +34,25 @@ class TestGatherNdOpWithEmptyIndex(OpTest): ...@@ -33,24 +34,25 @@ class TestGatherNdOpWithEmptyIndex(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestGatherNdOpWithIndex1(OpTest): class TestGatherNdOpWithIndex1(OpTest):
def setUp(self): def setUp(self):
self.op_type = "gather_nd" self.op_type = "gather_nd"
self.python_api = paddle.gather_nd
xnp = np.random.random((5, 20)).astype("float64") xnp = np.random.random((5, 20)).astype("float64")
self.inputs = {'X': xnp, 'Index': np.array([1]).astype("int32")} self.inputs = {'X': xnp, 'Index': np.array([1]).astype("int32")}
self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]} self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestGatherNdOpWithLowIndex(OpTest): class TestGatherNdOpWithLowIndex(OpTest):
...@@ -58,6 +60,7 @@ class TestGatherNdOpWithLowIndex(OpTest): ...@@ -58,6 +60,7 @@ class TestGatherNdOpWithLowIndex(OpTest):
def setUp(self): def setUp(self):
self.op_type = "gather_nd" self.op_type = "gather_nd"
self.python_api = paddle.gather_nd
xnp = np.random.uniform(0, 100, (10, 10)).astype("float64") xnp = np.random.uniform(0, 100, (10, 10)).astype("float64")
index = np.array([[1], [2]]).astype("int64") index = np.array([[1], [2]]).astype("int64")
...@@ -66,10 +69,10 @@ class TestGatherNdOpWithLowIndex(OpTest): ...@@ -66,10 +69,10 @@ class TestGatherNdOpWithLowIndex(OpTest):
self.outputs = {'Out': xnp[tuple(index.T)]} #[[14, 25, 1], [76, 22, 3]] self.outputs = {'Out': xnp[tuple(index.T)]} #[[14, 25, 1], [76, 22, 3]]
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestGatherNdOpIndex1(OpTest): class TestGatherNdOpIndex1(OpTest):
...@@ -77,18 +80,19 @@ class TestGatherNdOpIndex1(OpTest): ...@@ -77,18 +80,19 @@ class TestGatherNdOpIndex1(OpTest):
def setUp(self): def setUp(self):
self.op_type = "gather_nd" self.op_type = "gather_nd"
self.python_api = paddle.gather_nd
xnp = np.random.uniform(0, 100, (10, 10)).astype("float64") xnp = np.random.uniform(0, 100, (10, 10)).astype("float64")
index = np.array([1, 2]).astype("int64") index = np.array([1, 2]).astype("int32")
self.inputs = {'X': xnp, 'Index': index} self.inputs = {'X': xnp, 'Index': index}
self.outputs = {'Out': xnp[tuple(index.T)]} self.outputs = {'Out': xnp[tuple(index.T)]}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestGatherNdOpWithSameIndexAsX(OpTest): class TestGatherNdOpWithSameIndexAsX(OpTest):
...@@ -96,6 +100,7 @@ class TestGatherNdOpWithSameIndexAsX(OpTest): ...@@ -96,6 +100,7 @@ class TestGatherNdOpWithSameIndexAsX(OpTest):
def setUp(self): def setUp(self):
self.op_type = "gather_nd" self.op_type = "gather_nd"
self.python_api = paddle.gather_nd
xnp = np.random.uniform(0, 100, (10, 10)).astype("float64") xnp = np.random.uniform(0, 100, (10, 10)).astype("float64")
index = np.array([[1, 1], [2, 1]]).astype("int64") index = np.array([[1, 1], [2, 1]]).astype("int64")
...@@ -103,10 +108,10 @@ class TestGatherNdOpWithSameIndexAsX(OpTest): ...@@ -103,10 +108,10 @@ class TestGatherNdOpWithSameIndexAsX(OpTest):
self.outputs = {'Out': xnp[tuple(index.T)]} #[25, 22] self.outputs = {'Out': xnp[tuple(index.T)]} #[25, 22]
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestGatherNdOpWithHighRankSame(OpTest): class TestGatherNdOpWithHighRankSame(OpTest):
...@@ -114,6 +119,7 @@ class TestGatherNdOpWithHighRankSame(OpTest): ...@@ -114,6 +119,7 @@ class TestGatherNdOpWithHighRankSame(OpTest):
def setUp(self): def setUp(self):
self.op_type = "gather_nd" self.op_type = "gather_nd"
self.python_api = paddle.gather_nd
shape = (5, 2, 3, 1, 10) shape = (5, 2, 3, 1, 10)
xnp = np.random.rand(*shape).astype("float64") xnp = np.random.rand(*shape).astype("float64")
index = np.vstack([np.random.randint(0, s, size=2) for s in shape]).T index = np.vstack([np.random.randint(0, s, size=2) for s in shape]).T
...@@ -122,10 +128,10 @@ class TestGatherNdOpWithHighRankSame(OpTest): ...@@ -122,10 +128,10 @@ class TestGatherNdOpWithHighRankSame(OpTest):
self.outputs = {'Out': xnp[tuple(index.T)]} self.outputs = {'Out': xnp[tuple(index.T)]}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestGatherNdOpWithHighRankDiff(OpTest): class TestGatherNdOpWithHighRankDiff(OpTest):
...@@ -133,6 +139,7 @@ class TestGatherNdOpWithHighRankDiff(OpTest): ...@@ -133,6 +139,7 @@ class TestGatherNdOpWithHighRankDiff(OpTest):
def setUp(self): def setUp(self):
self.op_type = "gather_nd" self.op_type = "gather_nd"
self.python_api = paddle.gather_nd
shape = (2, 3, 4, 1, 10) shape = (2, 3, 4, 1, 10)
xnp = np.random.rand(*shape).astype("float64") xnp = np.random.rand(*shape).astype("float64")
index = np.vstack([np.random.randint(0, s, size=200) for s in shape]).T index = np.vstack([np.random.randint(0, s, size=200) for s in shape]).T
...@@ -142,10 +149,10 @@ class TestGatherNdOpWithHighRankDiff(OpTest): ...@@ -142,10 +149,10 @@ class TestGatherNdOpWithHighRankDiff(OpTest):
self.outputs = {'Out': xnp[tuple(index.T)].reshape([20, 5, 2])} self.outputs = {'Out': xnp[tuple(index.T)].reshape([20, 5, 2])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
#Test Python API #Test Python API
...@@ -245,4 +252,5 @@ class TestGatherNdAPI2(unittest.TestCase): ...@@ -245,4 +252,5 @@ class TestGatherNdAPI2(unittest.TestCase):
if __name__ == "__main__": if __name__ == "__main__":
paddle.enable_static()
unittest.main() unittest.main()
...@@ -25,6 +25,7 @@ from paddle.fluid.framework import program_guard, Program ...@@ -25,6 +25,7 @@ from paddle.fluid.framework import program_guard, Program
class TestGatherTreeOp(OpTest): class TestGatherTreeOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "gather_tree" self.op_type = "gather_tree"
self.python_api = paddle.nn.functional.gather_tree
max_length, batch_size, beam_size = 5, 2, 2 max_length, batch_size, beam_size = 5, 2, 2
ids = np.random.randint( ids = np.random.randint(
0, high=10, size=(max_length, batch_size, beam_size)) 0, high=10, size=(max_length, batch_size, beam_size))
...@@ -34,7 +35,7 @@ class TestGatherTreeOp(OpTest): ...@@ -34,7 +35,7 @@ class TestGatherTreeOp(OpTest):
self.outputs = {'Out': self.backtrace(ids, parents)} self.outputs = {'Out': self.backtrace(ids, parents)}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
@staticmethod @staticmethod
def backtrace(ids, parents): def backtrace(ids, parents):
...@@ -126,4 +127,5 @@ class TestGatherTreeOpError(unittest.TestCase): ...@@ -126,4 +127,5 @@ class TestGatherTreeOpError(unittest.TestCase):
if __name__ == "__main__": if __name__ == "__main__":
paddle.enable_static()
unittest.main() unittest.main()
...@@ -24,6 +24,7 @@ from op_test import OpTest ...@@ -24,6 +24,7 @@ from op_test import OpTest
class TestIndexSampleOp(OpTest): class TestIndexSampleOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "index_sample" self.op_type = "index_sample"
self.python_api = paddle.index_sample
self.config() self.config()
xnp = np.random.random(self.x_shape).astype(self.x_type) xnp = np.random.random(self.x_shape).astype(self.x_type)
indexnp = np.random.randint( indexnp = np.random.randint(
...@@ -39,10 +40,10 @@ class TestIndexSampleOp(OpTest): ...@@ -39,10 +40,10 @@ class TestIndexSampleOp(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
def config(self): def config(self):
""" """
......
...@@ -27,15 +27,16 @@ from op_test import OpTest ...@@ -27,15 +27,16 @@ from op_test import OpTest
class TestMVOp(OpTest): class TestMVOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "mv" self.op_type = "mv"
self.python_api = paddle.mv
self.init_config() self.init_config()
self.inputs = {'X': self.x, 'Vec': self.vec} self.inputs = {'X': self.x, 'Vec': self.vec}
self.outputs = {'Out': np.dot(self.x, self.vec)} self.outputs = {'Out': np.dot(self.x, self.vec)}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Vec'], 'Out') self.check_grad(['X', 'Vec'], 'Out', check_eager=True)
def init_config(self): def init_config(self):
self.x = np.random.random((2, 100)).astype("float64") self.x = np.random.random((2, 100)).astype("float64")
...@@ -107,4 +108,5 @@ class TestMVError(unittest.TestCase): ...@@ -107,4 +108,5 @@ class TestMVError(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -67,6 +67,7 @@ class TestScatterNdAddSimpleOp(OpTest): ...@@ -67,6 +67,7 @@ class TestScatterNdAddSimpleOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "scatter_nd_add" self.op_type = "scatter_nd_add"
self.python_api = paddle.scatter_nd_add
ref_np = np.random.random([100]).astype("float64") ref_np = np.random.random([100]).astype("float64")
index_np = np.random.randint(0, 100, [100, 1]).astype("int32") index_np = np.random.randint(0, 100, [100, 1]).astype("int32")
updates_np = np.random.random([100]).astype("float64") updates_np = np.random.random([100]).astype("float64")
...@@ -76,10 +77,10 @@ class TestScatterNdAddSimpleOp(OpTest): ...@@ -76,10 +77,10 @@ class TestScatterNdAddSimpleOp(OpTest):
self.outputs = {'Out': expect_np} self.outputs = {'Out': expect_np}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out') self.check_grad(['X', 'Updates'], 'Out', check_eager=True)
class TestScatterNdAddWithEmptyIndex(OpTest): class TestScatterNdAddWithEmptyIndex(OpTest):
...@@ -89,6 +90,7 @@ class TestScatterNdAddWithEmptyIndex(OpTest): ...@@ -89,6 +90,7 @@ class TestScatterNdAddWithEmptyIndex(OpTest):
def setUp(self): def setUp(self):
self.op_type = "scatter_nd_add" self.op_type = "scatter_nd_add"
self.python_api = paddle.scatter_nd_add
ref_np = np.random.random((10, 10)).astype("float64") ref_np = np.random.random((10, 10)).astype("float64")
index_np = np.array([[], []]).astype("int32") index_np = np.array([[], []]).astype("int32")
updates_np = np.random.random((2, 10, 10)).astype("float64") updates_np = np.random.random((2, 10, 10)).astype("float64")
...@@ -99,10 +101,10 @@ class TestScatterNdAddWithEmptyIndex(OpTest): ...@@ -99,10 +101,10 @@ class TestScatterNdAddWithEmptyIndex(OpTest):
self.outputs = {'Out': expect_np} self.outputs = {'Out': expect_np}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out') self.check_grad(['X', 'Updates'], 'Out', check_eager=True)
class TestScatterNdAddWithHighRankSame(OpTest): class TestScatterNdAddWithHighRankSame(OpTest):
...@@ -112,6 +114,7 @@ class TestScatterNdAddWithHighRankSame(OpTest): ...@@ -112,6 +114,7 @@ class TestScatterNdAddWithHighRankSame(OpTest):
def setUp(self): def setUp(self):
self.op_type = "scatter_nd_add" self.op_type = "scatter_nd_add"
self.python_api = paddle.scatter_nd_add
shape = (3, 2, 2, 1, 10) shape = (3, 2, 2, 1, 10)
ref_np = np.random.rand(*shape).astype("float64") ref_np = np.random.rand(*shape).astype("float64")
index_np = np.vstack( index_np = np.vstack(
...@@ -125,10 +128,10 @@ class TestScatterNdAddWithHighRankSame(OpTest): ...@@ -125,10 +128,10 @@ class TestScatterNdAddWithHighRankSame(OpTest):
self.outputs = {'Out': expect_np} self.outputs = {'Out': expect_np}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out') self.check_grad(['X', 'Updates'], 'Out', check_eager=True)
class TestScatterNdAddWithHighRankDiff(OpTest): class TestScatterNdAddWithHighRankDiff(OpTest):
...@@ -138,6 +141,7 @@ class TestScatterNdAddWithHighRankDiff(OpTest): ...@@ -138,6 +141,7 @@ class TestScatterNdAddWithHighRankDiff(OpTest):
def setUp(self): def setUp(self):
self.op_type = "scatter_nd_add" self.op_type = "scatter_nd_add"
self.python_api = paddle.scatter_nd_add
shape = (8, 2, 2, 1, 10) shape = (8, 2, 2, 1, 10)
ref_np = np.random.rand(*shape).astype("double") ref_np = np.random.rand(*shape).astype("double")
index = np.vstack([np.random.randint(0, s, size=500) for s in shape]).T index = np.vstack([np.random.randint(0, s, size=500) for s in shape]).T
...@@ -150,10 +154,10 @@ class TestScatterNdAddWithHighRankDiff(OpTest): ...@@ -150,10 +154,10 @@ class TestScatterNdAddWithHighRankDiff(OpTest):
self.outputs = {'Out': expect_np} self.outputs = {'Out': expect_np}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out') self.check_grad(['X', 'Updates'], 'Out', check_eager=True)
#Test Python API #Test Python API
......
...@@ -27,6 +27,7 @@ from paddle.fluid.dygraph.base import switch_to_static_graph ...@@ -27,6 +27,7 @@ from paddle.fluid.dygraph.base import switch_to_static_graph
class TestScatterOp(OpTest): class TestScatterOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "scatter" self.op_type = "scatter"
self.python_api = paddle.scatter
ref_np = np.ones((3, 50)).astype("float32") ref_np = np.ones((3, 50)).astype("float32")
index_np = np.array([1, 2]).astype("int32") index_np = np.array([1, 2]).astype("int32")
updates_np = np.random.random((2, 50)).astype("float32") updates_np = np.random.random((2, 50)).astype("float32")
...@@ -36,15 +37,16 @@ class TestScatterOp(OpTest): ...@@ -36,15 +37,16 @@ class TestScatterOp(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X", "Updates"], "Out") self.check_grad(["X", "Updates"], "Out", check_eager=True)
class TestScatterOp0(OpTest): class TestScatterOp0(OpTest):
def setUp(self): def setUp(self):
self.op_type = "scatter" self.op_type = "scatter"
self.python_api = paddle.scatter
ref_np = np.ones((3, 3)).astype("float32") ref_np = np.ones((3, 3)).astype("float32")
index_np = np.array([1, 2]).astype("int32") index_np = np.array([1, 2]).astype("int32")
updates_np = np.random.random((2, 3)).astype("float32") updates_np = np.random.random((2, 3)).astype("float32")
...@@ -55,15 +57,16 @@ class TestScatterOp0(OpTest): ...@@ -55,15 +57,16 @@ class TestScatterOp0(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X", "Updates"], "Out") self.check_grad(["X", "Updates"], "Out", check_eager=True)
class TestScatterOp1(OpTest): class TestScatterOp1(OpTest):
def setUp(self): def setUp(self):
self.op_type = "scatter" self.op_type = "scatter"
self.python_api = paddle.scatter
ref_np = np.ones((3, 3)).astype("float32") ref_np = np.ones((3, 3)).astype("float32")
zeros_np = np.zeros([2, 3]).astype('float32') zeros_np = np.zeros([2, 3]).astype('float32')
index_np = np.array([1, 1]).astype("int32") index_np = np.array([1, 1]).astype("int32")
...@@ -77,10 +80,10 @@ class TestScatterOp1(OpTest): ...@@ -77,10 +80,10 @@ class TestScatterOp1(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X", "Updates"], "Out") self.check_grad(["X", "Updates"], "Out", check_eager=True)
@unittest.skipIf(not core.is_compiled_with_cuda(), @unittest.skipIf(not core.is_compiled_with_cuda(),
...@@ -88,6 +91,7 @@ class TestScatterOp1(OpTest): ...@@ -88,6 +91,7 @@ class TestScatterOp1(OpTest):
class TestScatterOp2(OpTest): class TestScatterOp2(OpTest):
def setUp(self): def setUp(self):
self.op_type = "scatter" self.op_type = "scatter"
self.python_api = paddle.scatter
ref_np = np.ones((3, 3)).astype("float32") ref_np = np.ones((3, 3)).astype("float32")
index_np = np.array([1, 2]).astype("int32") index_np = np.array([1, 2]).astype("int32")
updates_np = np.random.random((2, 3)).astype("float32") updates_np = np.random.random((2, 3)).astype("float32")
...@@ -99,12 +103,13 @@ class TestScatterOp2(OpTest): ...@@ -99,12 +103,13 @@ class TestScatterOp2(OpTest):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-3) self.check_output_with_place(place, atol=1e-3, check_eager=True)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['X', 'Updates'], 'Out') self.check_grad_with_place(
place, ['X', 'Updates'], 'Out', check_eager=True)
@unittest.skipIf(not core.is_compiled_with_cuda(), @unittest.skipIf(not core.is_compiled_with_cuda(),
...@@ -112,6 +117,7 @@ class TestScatterOp2(OpTest): ...@@ -112,6 +117,7 @@ class TestScatterOp2(OpTest):
class TestScatterOp3(OpTest): class TestScatterOp3(OpTest):
def setUp(self): def setUp(self):
self.op_type = "scatter" self.op_type = "scatter"
self.python_api = paddle.scatter
ref_np = np.ones((3, 3)).astype("float32") ref_np = np.ones((3, 3)).astype("float32")
zeros_np = np.zeros([2, 3]).astype('float32') zeros_np = np.zeros([2, 3]).astype('float32')
index_np = np.array([1, 1]).astype("int32") index_np = np.array([1, 1]).astype("int32")
...@@ -127,17 +133,19 @@ class TestScatterOp3(OpTest): ...@@ -127,17 +133,19 @@ class TestScatterOp3(OpTest):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-3) self.check_output_with_place(place, atol=1e-3, check_eager=True)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['X', 'Updates'], 'Out') self.check_grad_with_place(
place, ['X', 'Updates'], 'Out', check_eager=True)
class TestScatterOp4(OpTest): class TestScatterOp4(OpTest):
def setUp(self): def setUp(self):
self.op_type = "scatter" self.op_type = "scatter"
self.python_api = paddle.scatter
ref_np = np.ones((3, 3)).astype("float32") ref_np = np.ones((3, 3)).astype("float32")
index_np = np.array([1, 2]).astype("int64") index_np = np.array([1, 2]).astype("int64")
updates_np = np.random.random((2, 3)).astype("float32") updates_np = np.random.random((2, 3)).astype("float32")
...@@ -147,10 +155,10 @@ class TestScatterOp4(OpTest): ...@@ -147,10 +155,10 @@ class TestScatterOp4(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out') self.check_grad(['X', 'Updates'], 'Out', check_eager=True)
@unittest.skipIf(not core.is_compiled_with_cuda(), @unittest.skipIf(not core.is_compiled_with_cuda(),
...@@ -158,6 +166,7 @@ class TestScatterOp4(OpTest): ...@@ -158,6 +166,7 @@ class TestScatterOp4(OpTest):
class TestScatterOp5(OpTest): class TestScatterOp5(OpTest):
def setUp(self): def setUp(self):
self.op_type = "scatter" self.op_type = "scatter"
self.python_api = paddle.scatter
ref_np = np.ones((3, 3)).astype("float32") ref_np = np.ones((3, 3)).astype("float32")
index_np = np.array([1, 2]).astype("int64") index_np = np.array([1, 2]).astype("int64")
updates_np = np.random.random((2, 3)).astype("float32") updates_np = np.random.random((2, 3)).astype("float32")
...@@ -169,12 +178,13 @@ class TestScatterOp5(OpTest): ...@@ -169,12 +178,13 @@ class TestScatterOp5(OpTest):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-3) self.check_output_with_place(place, atol=1e-3, check_eager=True)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['X', 'Updates'], 'Out') self.check_grad_with_place(
place, ['X', 'Updates'], 'Out', check_eager=True)
class TestScatterAPI(unittest.TestCase): class TestScatterAPI(unittest.TestCase):
...@@ -274,6 +284,7 @@ class TestScatterAPI(unittest.TestCase): ...@@ -274,6 +284,7 @@ class TestScatterAPI(unittest.TestCase):
class TestScatterOpFp16(OpTest): class TestScatterOpFp16(OpTest):
def setUp(self): def setUp(self):
self.__class__.op_type = "scatter" self.__class__.op_type = "scatter"
self.python_api = paddle.scatter
# compute grad in the following code handly. # compute grad in the following code handly.
self.__class__.no_need_check_grad = True self.__class__.no_need_check_grad = True
self.x_type = 'float16' self.x_type = 'float16'
......
...@@ -29,6 +29,7 @@ class TestTransposeOp(OpTest): ...@@ -29,6 +29,7 @@ class TestTransposeOp(OpTest):
def setUp(self): def setUp(self):
self.init_op_type() self.init_op_type()
self.initTestCase() self.initTestCase()
self.python_api = paddle.transpose
self.inputs = {'X': np.random.random(self.shape).astype("float64")} self.inputs = {'X': np.random.random(self.shape).astype("float64")}
self.attrs = { self.attrs = {
'axis': list(self.axis), 'axis': list(self.axis),
...@@ -44,10 +45,10 @@ class TestTransposeOp(OpTest): ...@@ -44,10 +45,10 @@ class TestTransposeOp(OpTest):
self.use_mkldnn = False self.use_mkldnn = False
def test_check_output(self): def test_check_output(self):
self.check_output(no_check_set=['XShape']) self.check_output(no_check_set=['XShape'], check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
def initTestCase(self): def initTestCase(self):
self.shape = (3, 40) self.shape = (3, 40)
......
...@@ -29,6 +29,7 @@ from paddle.fluid.framework import _test_eager_guard ...@@ -29,6 +29,7 @@ from paddle.fluid.framework import _test_eager_guard
class TestWhereOp(OpTest): class TestWhereOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = 'where' self.op_type = 'where'
self.python_api = paddle.where
self.init_config() self.init_config()
self.inputs = {'Condition': self.cond, 'X': self.x, 'Y': self.y} self.inputs = {'Condition': self.cond, 'X': self.x, 'Y': self.y}
self.outputs = {'Out': np.where(self.cond, self.x, self.y)} self.outputs = {'Out': np.where(self.cond, self.x, self.y)}
...@@ -391,5 +392,6 @@ class TestWhereOpError(unittest.TestCase): ...@@ -391,5 +392,6 @@ class TestWhereOpError(unittest.TestCase):
self.test_value_error() self.test_value_error()
if (__name__ == '__main__'): if __name__ == "__main__":
paddle.enable_static()
unittest.main() unittest.main()
...@@ -36,7 +36,7 @@ from ...static import Variable ...@@ -36,7 +36,7 @@ from ...static import Variable
from paddle.utils import deprecated from paddle.utils import deprecated
from paddle import _C_ops from paddle import _C_ops
from paddle import in_dynamic_mode from paddle import in_dynamic_mode
from paddle.framework import core from paddle.framework import core, _in_eager_mode
__all__ = [] __all__ = []
...@@ -114,7 +114,10 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean', ...@@ -114,7 +114,10 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean',
reduction) reduction)
if in_dynamic_mode(): if in_dynamic_mode():
out = _C_ops.bce_loss(input, label) if _in_eager_mode():
out = _C_ops.final_state_bce_loss(input, label)
else:
out = _C_ops.bce_loss(input, label)
if weight is not None: if weight is not None:
out = _C_ops.elementwise_mul(out, weight, 'axis', -1) out = _C_ops.elementwise_mul(out, weight, 'axis', -1)
......
...@@ -974,6 +974,8 @@ def diag(x, offset=0, padding_value=0, name=None): ...@@ -974,6 +974,8 @@ def diag(x, offset=0, padding_value=0, name=None):
# [4] # [4]
""" """
if paddle.in_dynamic_mode(): if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_diag(x, offset, padding_value)
return _C_ops.diag_v2(x, "offset", offset, "padding_value", return _C_ops.diag_v2(x, "offset", offset, "padding_value",
padding_value) padding_value)
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
import numpy as np import numpy as np
from ..fluid.layer_helper import LayerHelper from ..fluid.layer_helper import LayerHelper
from ..framework import _varbase_creator, _dygraph_tracer from ..framework import _varbase_creator, _dygraph_tracer, _in_eager_mode
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype
from ..static import Variable from ..static import Variable
...@@ -1146,6 +1146,8 @@ def cross(x, y, axis=None, name=None): ...@@ -1146,6 +1146,8 @@ def cross(x, y, axis=None, name=None):
# [0. 0. 0.]] # [0. 0. 0.]]
""" """
if paddle.in_dynamic_mode(): if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_cross(x, y, axis)
if axis is not None: if axis is not None:
return _C_ops.cross(x, y, 'dim', axis) return _C_ops.cross(x, y, 'dim', axis)
else: else:
...@@ -1490,6 +1492,8 @@ def mv(x, vec, name=None): ...@@ -1490,6 +1492,8 @@ def mv(x, vec, name=None):
out = paddle.mv(x, vec) out = paddle.mv(x, vec)
""" """
if paddle.in_dynamic_mode(): if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_mv(x, vec)
out = _C_ops.mv(x, vec) out = _C_ops.mv(x, vec)
return out return out
......
...@@ -17,6 +17,7 @@ from ..fluid.data_feeder import check_type, check_variable_and_dtype ...@@ -17,6 +17,7 @@ from ..fluid.data_feeder import check_type, check_variable_and_dtype
from ..fluid.layers.layer_function_generator import templatedoc from ..fluid.layers.layer_function_generator import templatedoc
from ..static import Variable from ..static import Variable
from ..framework import VarBase as Tensor from ..framework import VarBase as Tensor
from ..framework import _in_eager_mode
# TODO: define logic functions of a tensor # TODO: define logic functions of a tensor
from ..fluid.layers import is_empty # noqa: F401 from ..fluid.layers import is_empty # noqa: F401
...@@ -181,6 +182,9 @@ def equal(x, y, name=None): ...@@ -181,6 +182,9 @@ def equal(x, y, name=None):
y = full(shape=[1], dtype=x.dtype, fill_value=y) y = full(shape=[1], dtype=x.dtype, fill_value=y)
if paddle.in_dynamic_mode(): if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_equal(x, y)
return _C_ops.equal(x, y) return _C_ops.equal(x, y)
check_variable_and_dtype( check_variable_and_dtype(
...@@ -223,6 +227,9 @@ def greater_equal(x, y, name=None): ...@@ -223,6 +227,9 @@ def greater_equal(x, y, name=None):
print(result1) # result1 = [True False True] print(result1) # result1 = [True False True]
""" """
if paddle.in_dynamic_mode(): if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_greater_equal(x, y)
return _C_ops.greater_equal(x, y) return _C_ops.greater_equal(x, y)
check_variable_and_dtype(x, "x", check_variable_and_dtype(x, "x",
...@@ -269,6 +276,9 @@ def greater_than(x, y, name=None): ...@@ -269,6 +276,9 @@ def greater_than(x, y, name=None):
print(result1) # result1 = [False False True] print(result1) # result1 = [False False True]
""" """
if paddle.in_dynamic_mode(): if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_greater_than(x, y)
return _C_ops.greater_than(x, y) return _C_ops.greater_than(x, y)
check_variable_and_dtype(x, "x", check_variable_and_dtype(x, "x",
...@@ -316,6 +326,9 @@ def less_equal(x, y, name=None): ...@@ -316,6 +326,9 @@ def less_equal(x, y, name=None):
print(result1) # result1 = [True True False] print(result1) # result1 = [True True False]
""" """
if paddle.in_dynamic_mode(): if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_less_equal(x, y)
return _C_ops.less_equal(x, y) return _C_ops.less_equal(x, y)
check_variable_and_dtype( check_variable_and_dtype(
...@@ -359,6 +372,9 @@ def less_than(x, y, name=None): ...@@ -359,6 +372,9 @@ def less_than(x, y, name=None):
print(result1) # result1 = [False True False] print(result1) # result1 = [False True False]
""" """
if paddle.in_dynamic_mode(): if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_less_than(x, y)
return _C_ops.less_than(x, y) return _C_ops.less_than(x, y)
check_variable_and_dtype( check_variable_and_dtype(
...@@ -402,6 +418,9 @@ def not_equal(x, y, name=None): ...@@ -402,6 +418,9 @@ def not_equal(x, y, name=None):
print(result1) # result1 = [False True True] print(result1) # result1 = [False True True]
""" """
if paddle.in_dynamic_mode(): if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_not_equal(x, y)
return _C_ops.not_equal(x, y) return _C_ops.not_equal(x, y)
check_variable_and_dtype( check_variable_and_dtype(
......
...@@ -16,7 +16,7 @@ from __future__ import print_function ...@@ -16,7 +16,7 @@ from __future__ import print_function
from collections import Counter from collections import Counter
from ..static import Variable, device_guard from ..static import Variable, device_guard
from ..framework import core from ..framework import core, _in_eager_mode
from ..fluid.layer_helper import LayerHelper from ..fluid.layer_helper import LayerHelper
from ..framework import OpProtoHolder, convert_np_dtype_to_dtype_, dygraph_only from ..framework import OpProtoHolder, convert_np_dtype_to_dtype_, dygraph_only
from ..fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype from ..fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
...@@ -1567,6 +1567,8 @@ def scatter(x, index, updates, overwrite=True, name=None): ...@@ -1567,6 +1567,8 @@ def scatter(x, index, updates, overwrite=True, name=None):
# [1., 1.]] # [1., 1.]]
""" """
if paddle.in_dynamic_mode(): if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_scatter(x, index, updates, overwrite)
return _C_ops.scatter(x, index, updates, 'overwrite', overwrite) return _C_ops.scatter(x, index, updates, 'overwrite', overwrite)
check_variable_and_dtype( check_variable_and_dtype(
......
...@@ -1274,6 +1274,8 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None): ...@@ -1274,6 +1274,8 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
if paddle.in_dynamic_mode(): if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_addmm( input, x, y, alpha, beta)
out = _C_ops.addmm(input, x, y, "Alpha", alpha, "Beta", beta) out = _C_ops.addmm(input, x, y, "Alpha", alpha, "Beta", beta)
return out return out
...@@ -3266,6 +3268,8 @@ def atan2(x, y, name=None): ...@@ -3266,6 +3268,8 @@ def atan2(x, y, name=None):
""" """
if paddle.in_dynamic_mode(): if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_atan2( x, y)
return _C_ops.atan2(x, y) return _C_ops.atan2(x, y)
else: else:
check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'], 'atan2') check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'], 'atan2')
......
...@@ -17,7 +17,7 @@ import paddle ...@@ -17,7 +17,7 @@ import paddle
from ..fluid.layer_helper import LayerHelper from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype
from ..fluid import layers from ..fluid import layers
from ..framework import core from ..framework import core, _in_eager_mode
from paddle.common_ops_import import convert_np_dtype_to_dtype_ from paddle.common_ops_import import convert_np_dtype_to_dtype_
from paddle.common_ops_import import Variable from paddle.common_ops_import import Variable
from paddle.common_ops_import import VarDesc from paddle.common_ops_import import VarDesc
...@@ -621,6 +621,9 @@ def where(condition, x=None, y=None, name=None): ...@@ -621,6 +621,9 @@ def where(condition, x=None, y=None, name=None):
broadcast_condition = paddle.cast(broadcast_condition, 'bool') broadcast_condition = paddle.cast(broadcast_condition, 'bool')
if paddle.in_dynamic_mode(): if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_where(broadcast_condition, broadcast_x,
broadcast_y)
return _C_ops.where(broadcast_condition, broadcast_x, broadcast_y) return _C_ops.where(broadcast_condition, broadcast_x, broadcast_y)
else: else:
helper = LayerHelper("where", **locals()) helper = LayerHelper("where", **locals())
...@@ -712,6 +715,8 @@ def index_sample(x, index): ...@@ -712,6 +715,8 @@ def index_sample(x, index):
""" """
if paddle.in_dynamic_mode(): if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_index_sample(x, index)
return _C_ops.index_sample(x, index) return _C_ops.index_sample(x, index)
helper = LayerHelper("index_sample", **locals()) helper = LayerHelper("index_sample", **locals())
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
func : ElementwiseInferMeta func : ElementwiseInferMeta
kernel : kernel :
func : add func : add
# backward : add_grad
- api : cast - api : cast
args : (Tensor x, DataType out_dtype) args : (Tensor x, DataType out_dtype)
...@@ -274,3 +275,265 @@ ...@@ -274,3 +275,265 @@
kernel : kernel :
func : diagonal func : diagonal
backward : diagonal_grad backward : diagonal_grad
- api : gumbel_softmax
args : (Tensor x, float temperature, bool hard, int axis)
output : Tensor
infer_meta :
func : GumbelSoftmaxInferMeta
kernel :
func : gumbel_softmax
# backward : gumbel_softmax_grad
- api : diag
args : (Tensor x, int offset, float padding_value)
output : Tensor
infer_meta :
func : DiagInferMeta
kernel :
func : diag
# - api : pixel_shuffle
# args : (Tensor x, int upscale_factor, const std::string& data_format)
# output : Tensor
# infer_meta :
# func : PixelShuffleInferMeta
# kernel :
# func : pixel_shuffle
- api : transpose
args : (Tensor x, int[] axis)
output : Tensor
infer_meta :
func : TransposeInferMeta
kernel :
func : transpose
backward : transpose_grad
- api : lerp
args : (Tensor x, Tensor y, Tensor weight)
output : Tensor
infer_meta :
func : LerpInferMeta
kernel :
func : lerp
# backward : lerp_grad
- api : scatter
args : (Tensor x, Tensor index, Tensor updates, bool overwrite)
output : Tensor
infer_meta :
func : ScatterInferMeta
dtype : x
kernel :
func : scatter
backward : scatter_grad
- api : scatter_nd_add
args : (Tensor x, Tensor index, Tensor updates)
output : Tensor
infer_meta :
func : ScatterNdAddInferMeta
dtype : x
kernel :
func : scatter_nd_add
backward : scatter_nd_add_grad
- api : addmm
args : (Tensor input, Tensor x, Tensor y, float alpha, float beta)
output : Tensor
infer_meta :
func : AddmmInferMeta
kernel :
func : addmm
backward : addmm_grad
- api : adadelta
args : (Tensor param, Tensor grad, Tensor avg_squared_grad, Tensor avg_squared_update, float rho, float epsilon)
output : Tensor(param_out), Tensor(moment_out), Tensor(inf_norm_out)
infer_meta :
func : AdadeltaInferMeta
kernel :
func : adadelta
- api : adamax
args : (Tensor param, Tensor grad, Tensor learning_rate, Tensor moment, Tensor inf_norm, Tensor beta1_pow, float beta1, float beta2, float epsilon)
output : Tensor(param_out), Tensor(avg_squared_grad_out), Tensor(avg_squared_update_out)
infer_meta :
func : AdamaxInferMeta
kernel :
func : adamax
- api : where
args : (Tensor condition, Tensor x, Tensor y)
output : Tensor
infer_meta :
func : WhereInferMeta
kernel :
func : where
backward : where_grad
# BilinearTensorProductInferMeta
# BroadcastTensorsInferMeta
- api : less_than
args : (Tensor x, Tensor y, int axis = -1)
output : Tensor
infer_meta :
func : CompareInferMeta
kernel :
func : less_than
- api : less_equal
args : (Tensor x, Tensor y, int axis = -1)
output : Tensor
infer_meta :
func : CompareInferMeta
kernel :
func : less_equal
- api : greater
args : (Tensor x, Tensor y, int axis = -1)
output : Tensor
infer_meta :
func : CompareInferMeta
kernel :
func : greater
- api : greater_equal
args : (Tensor x, Tensor y, int axis = -1)
output : Tensor
infer_meta :
func : CompareInferMeta
kernel :
func : greater_equal
- api : equal
args : (Tensor x, Tensor y, int axis = -1)
output : Tensor
infer_meta :
func : CompareInferMeta
kernel :
func : equal
- api : not_equal
args : (Tensor x, Tensor y, int axis = -1)
output : Tensor
infer_meta :
func : CompareInferMeta
kernel :
func : not_equal
# - api : equal_all
# args : (Tensor x, Tensor y)
# output : Tensor
# infer_meta :
# func : CompareAllInferMeta
# kernel :
# func : equal_all
- api : huber_loss
args : (Tensor input, Tensor label, float delta)
output : Tensor(out), Tensor(residual)
infer_meta :
func : HuberLossInferMeta
kernel :
func : huber_loss
# backward : huber_loss_grad
- api : triangular_solve
args : (Tensor x, Tensor y, bool upper, bool tranpose, bool unitriangular)
output : Tensor
infer_meta :
func : TriangularSolveInferMeta
kernel :
func : triangular_solve
# backward : triangular_solve_grad
- api : index_sample
args : (Tensor x, Tensor index)
output : Tensor
infer_meta :
func : IndexSampleInferMeta
kernel :
func : index_sample
data_type : x
backward : index_sample_grad
- api : cross
args : (Tensor x, Tensor y, int axis = 9)
output : Tensor
infer_meta :
func : CrossInferMeta
kernel :
func : cross
backward : cross_grad
- api : atan2
args : (Tensor x, Tensor y)
output : Tensor
infer_meta :
func : Atan2InferMeta
kernel :
func : atan2
backward : atan2_grad
- api : bce_loss
args : (Tensor input, Tensor label)
output : Tensor
infer_meta :
func : BCELossInferMeta
kernel :
func : bce_loss
backward : bce_loss_grad
- api : dist
args : (Tensor x, Tensor y, float p)
output : Tensor
infer_meta :
func : DistInferMeta
kernel :
func : dist
# backward : dist_grad
- api : gather_nd
args : (Tensor x, Tensor index)
output : Tensor
infer_meta :
func : GatherNdInferMeta
kernel :
func : gather_nd
data_type : x
backward : gather_nd_grad
- api : gather_tree
args : (Tensor ids, Tensor parents)
output : Tensor
infer_meta :
func : GatherTreeMeta
kernel :
func : gather_tree
- api : mv
args : (Tensor x, Tensor vec)
output : Tensor
infer_meta :
func : MvInferMeta
kernel :
func : mv
backward : mv_grad
...@@ -147,6 +147,7 @@ def source_include(header_file_path): ...@@ -147,6 +147,7 @@ def source_include(header_file_path):
#include "paddle/phi/infermeta/multiary.h" #include "paddle/phi/infermeta/multiary.h"
#include "paddle/phi/infermeta/nullary.h" #include "paddle/phi/infermeta/nullary.h"
#include "paddle/phi/infermeta/unary.h" #include "paddle/phi/infermeta/unary.h"
#include "paddle/phi/infermeta/ternary.h"
#include "paddle/phi/kernels/declarations.h" #include "paddle/phi/kernels/declarations.h"
#include "paddle/fluid/platform/profiler/event_tracing.h" #include "paddle/fluid/platform/profiler/event_tracing.h"
......
...@@ -25,6 +25,17 @@ ...@@ -25,6 +25,17 @@
output : Tensor(x_grad) output : Tensor(x_grad)
invoke : scale(out_grad, scale, bias, bias_after_scale) invoke : scale(out_grad, scale, bias, bias_after_scale)
- backward_api : add_grad
forward : add (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, y]
kernel :
func : add_grad
- backward_api : digamma_grad - backward_api : digamma_grad
forward : digamma (Tensor x) -> Tensor(out) forward : digamma (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
...@@ -90,3 +101,168 @@ ...@@ -90,3 +101,168 @@
# func : MatmulTripleGradInferMeta # func : MatmulTripleGradInferMeta
# kernel : # kernel :
# func : matmul_triple_grad # func : matmul_triple_grad
# - backward_api : gumbel_softmax_grad
# forward : gumbel_softmax (Tensor x, float temperature, bool hard, int axis) -> Tensor(out)
# args : (Tensor out, Tensor out_grad, int axis)
# output : Tensor(x_grad)
# infer_meta :
# func : GumbelSoftmaxGradInferMeta
# param : [out, out_grad, axis]
# kernel :
# func : gumbel_softmax_grad
- backward_api : transpose_grad
forward : transpose (Tensor x, int[] axis) -> Tensor(out)
args : (Tensor out_grad, int[] axis)
output : Tensor(x_grad)
infer_meta :
func : TransposeGradInferMeta
param : [out_grad, axis]
kernel :
func : transpose_grad
# - backward_api : lerp_grad
# forward : transpose (Tensor x, Tensor y, Tensor weight) -> Tensor(out)
# args : (Tensor x, Tensor y, Tensor weight, Tensor out, Tensor out_grad)
# output : Tensor(x_grad), Tensor(y_grad)
# infer_meta :
# func : GeneralBinaryGradInferMeta
# param : [x, y]
# kernel :
# func : lerp_grad
- backward_api : scatter_grad
forward : scatter (Tensor x, Tensor index, Tensor updates, bool overwrite) -> Tensor(out)
args : (Tensor index, Tensor updates, Tensor out_grad, bool overwrite)
output : Tensor(x_grad), Tensor(updates_grad)
infer_meta :
func : ScatterGradInferMeta
param : [index, updates, out_grad, overwrite]
kernel :
func : scatter_grad
- backward_api : scatter_nd_add_grad
forward : scatter (Tensor x, Tensor index, Tensor updates) -> Tensor(out)
args : (Tensor index, Tensor updates, Tensor out_grad)
output : Tensor(x_grad), Tensor(updates_grad)
infer_meta :
func : ScatterNdAddGradInferMeta
param : [index, updates, out_grad]
kernel :
func : scatter_nd_grad
- backward_api : addmm_grad
forward : scatter (Tensor input, Tensor x, Tensor y, float alpha, float beta) -> Tensor(out)
args : (Tensor input, Tensor x, Tensor y, Tensor out_grad, float alpha, float beta)
output : Tensor(input_grad), Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralTernaryGradInferMeta
param : [input, x, y]
kernel :
func : addmm_grad
- backward_api : where_grad
forward : where (Tensor condition, Tensor x, Tensor y) -> Tensor(out)
args : (Tensor condition, Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, y]
kernel :
func : where_grad
# - backward_api : huber_loss_grad
# forward : huber_loss (Tensor input, Tensor label, float delta) -> Tensor(out), Tensor(residual)
# args : (Tensor residual, Tensor out_grad, float delta)
# output : Tensor(input_grad), Tensor(label_grad)
# infer_meta :
# func : GeneralBinaryGradInferMeta
# param : [x, y]
# kernel :
# func : where_grad
# - backward_api : triangular_solve_grad
# forward : triangular_solve (Tensor x, Tensor y, bool upper, bool tranpose, bool unitriangular) -> Tensor(out)
# args : (Tensor x, Tensor y, Tensor out, Tensor out_grad, bool upper, bool tranpose, bool unitriangular)
# output : Tensor(x_grad), Tensor(y_grad)
# infer_meta :
# func : GeneralBinaryGradInferMeta
# param : [x, y]
# kernel :
# func : triangular_solve_grad
- backward_api : index_sample_grad
forward : index_sample (Tensor x, Tensor index) -> Tensor(out)
args : (Tensor x, Tensor index, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : index_sample_grad
- backward_api : cross_grad
forward : cross (Tensor x, Tensor y, int axis = 9) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, y]
kernel :
func : cross_grad
- backward_api : atan2_grad
forward : cross (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, y]
kernel :
func : atan2_grad
- backward_api : bce_loss_grad
forward : bce_loss (Tensor input, Tensor label) -> Tensor(out)
args : (Tensor input, Tensor label, Tensor out_grad)
output : Tensor(input_grad)
infer_meta :
func : UnchangedInferMeta
param : [input]
kernel :
func : bce_loss_grad
# - backward_api : dist_grad
# forward : dist (Tensor x, Tensor y, float p) -> Tensor(out)
# args : (Tensor x, Tensor y, Tensor out, Tensor out_grad, float p)
# output : Tensor(x_grad), Tensor(y_grad)
# infer_meta :
# func : GeneralBinaryGradInferMeta
# param : [x, y]
# kernel :
# func : dist_grad
- backward_api : gather_nd_grad
forward : gather_nd (Tensor x, Tensor index) -> Tensor(out)
args : (Tensor x, Tensor index, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : gather_nd_grad
- backward_api : mv_grad
forward : mv (Tensor x, Tensor vec) -> Tensor(out)
args : (Tensor x, Tensor vec, Tensor out_grad)
output : Tensor(x_grad), Tensor(vec_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, vec]
kernel :
func : mv_grad
...@@ -98,6 +98,7 @@ def source_include(header_file_path): ...@@ -98,6 +98,7 @@ def source_include(header_file_path):
#include "paddle/phi/infermeta/multiary.h" #include "paddle/phi/infermeta/multiary.h"
#include "paddle/phi/infermeta/nullary.h" #include "paddle/phi/infermeta/nullary.h"
#include "paddle/phi/infermeta/unary.h" #include "paddle/phi/infermeta/unary.h"
#include "paddle/phi/infermeta/ternary.h"
""" """
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册