提交 8a108f59 编写于 作者: P phlrain

Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into add_some_yaml_config

......@@ -64,6 +64,8 @@ int TensorDtype2NumpyDtype(phi::DataType dtype) {
return pybind11::detail::npy_api::NPY_INT64_;
case phi::DataType::FLOAT16:
return pybind11::detail::NPY_FLOAT16_;
case phi::DataType::BFLOAT16:
return pybind11::detail::NPY_UINT16_;
case phi::DataType::FLOAT32:
return pybind11::detail::npy_api::NPY_FLOAT_;
case phi::DataType::FLOAT64:
......
......@@ -180,6 +180,7 @@ std::shared_ptr<phi::DenseTensor> PrepareData(
const phi::TensorArgDef& target_args_def,
const TransformFlag& transform_flag) {
const auto& tensor_in = input.impl();
VLOG(6) << tensor_in->dtype() << "\t" << target_args_def.dtype;
if (!transform_flag.NeedTransform() || !tensor_in->initialized() ||
(!NeedTransformPlace(
tensor_in->place(), target_args_def.backend, transform_flag) &&
......
......@@ -110,8 +110,9 @@ void* DenseTensor::AllocateFrom(Allocator* allocator,
template <typename T>
const T* DenseTensor::data() const {
check_memory_size();
PADDLE_ENFORCE(
(dtype() == paddle::experimental::CppTypeToDataType<T>::Type()),
PADDLE_ENFORCE_EQ(
dtype(),
paddle::experimental::CppTypeToDataType<T>::Type(),
phi::errors::InvalidArgument(
"The type of data we are trying to retrieve does not match the "
"type of data currently contained in the container."));
......
......@@ -1668,6 +1668,17 @@ void TransposeInferMeta(const MetaTensor& x,
out->set_dtype(x.dtype());
}
void TransposeGradInferMeta(const MetaTensor& x,
const std::vector<int>& axis,
MetaTensor* out) {
std::vector<int> reversed_axis(axis);
for (size_t i = 0; i < axis.size(); i++) {
reversed_axis[axis[i]] = i;
}
TransposeInferMeta(x, reversed_axis, out);
}
void UnbindInferMeta(const MetaTensor& x,
int axis,
std::vector<MetaTensor>* outs) {
......@@ -1907,6 +1918,7 @@ void OneHotInferMeta(const MetaTensor& x,
auto out_dims = phi::make_ddim(out_dims_vec);
out->set_dims(out_dims);
out->share_lod(x);
out->set_dtype(phi::DataType::FLOAT32);
}
......
......@@ -255,6 +255,10 @@ void TransposeInferMeta(const MetaTensor& x,
const std::vector<int>& axis,
MetaTensor* out);
void TransposeGradInferMeta(const MetaTensor& x,
const std::vector<int>& axis,
MetaTensor* out);
void UnbindInferMeta(const MetaTensor& x,
int axis,
std::vector<MetaTensor>* outs);
......
......@@ -41,7 +41,7 @@ void IndexSampleInner(const Context &context,
std::vector<T> input_vec;
std::vector<IndexT> index_vec;
paddle::framework::TensorToVector(input, context, &input_vec);
paddle::framework::TensorToVector(index, context, &index_vec);
paddle::framework::TensorToVector<IndexT>(index, context, &index_vec);
std::vector<T> res(index_ids_num);
for (int i = 0; i < index_ids_num; i++) {
......
......@@ -35,7 +35,7 @@ void LimitGridDim(const Context& ctx, dim3* grid_dim) {
#define PREDEFINED_BLOCK_SIZE_X 512
#define PREDEFINED_BLOCK_SIZE 1024
#define MIN(a, b) ((a) < (b) ? (a) : (b))
}
} // namespace
template <typename T, typename IndexT = int>
__global__ void IndexSampleForward(const IndexT* index,
......
......@@ -30,6 +30,11 @@ final_state_name_mapping = {
"y": "Y",
"out": "Out",
},
# "elementwise_add": {
# "final_op_name": "final_state_add",
# "x": "X",
# "y": "Y",
# },
"trunc": {
"final_op_name": "final_state_trunc",
"x": "X",
......
......@@ -5611,6 +5611,8 @@ def transpose(x, perm, name=None):
"""
if in_dygraph_mode():
if _in_eager_mode():
return _C_ops.final_state_transpose(x, perm)
out, _ = _C_ops.transpose2(x, 'axis', perm)
return out
......@@ -8550,6 +8552,8 @@ def gather_nd(input, index, name=None):
"""
if in_dygraph_mode():
if _in_eager_mode():
return _C_ops.final_state_gather_nd(input, index)
return _C_ops.gather_nd(input, index)
check_variable_and_dtype(
input, 'input',
......@@ -8726,6 +8730,8 @@ def scatter_nd_add(ref, index, updates, name=None):
"""
if in_dygraph_mode():
if _in_eager_mode():
return _C_ops.final_state_scatter_nd_add(ref, index, updates)
op = getattr(_C_ops, 'scatter_nd_add')
return op(ref, index, updates)
......@@ -15309,6 +15315,8 @@ def gather_tree(ids, parents):
"""
if in_dygraph_mode():
if _in_eager_mode():
return _C_ops.final_state_gather_tree(ids, parents)
return _C_ops.gather_tree(ids, parents)
else:
helper = LayerHelper('gather_tree', **locals())
......
......@@ -27,6 +27,7 @@ class TestAddMMOp(OpTest):
# test basic
def setUp(self):
self.op_type = "addmm"
self.python_api = paddle.addmm
self.dtype = np.float64
self.init_dtype_type()
self.inputs = {
......@@ -43,19 +44,19 @@ class TestAddMMOp(OpTest):
pass
def test_check_output(self):
self.check_output()
self.check_output(check_eager=False)
def test_check_grad_normal(self):
self.check_grad(['Input', 'X', 'Y'], 'Out')
self.check_grad(['Input', 'X', 'Y'], 'Out', check_eager=False)
def test_check_grad_x(self):
self.check_grad(['X'], 'Out', no_grad_set=None)
self.check_grad(['X'], 'Out', no_grad_set=None, check_eager=False)
def test_check_grad_y(self):
self.check_grad(['Y'], 'Out', no_grad_set=None)
self.check_grad(['Y'], 'Out', no_grad_set=None, check_eager=False)
def test_check_grad_input(self):
self.check_grad(['Input'], 'Out', no_grad_set=None)
self.check_grad(['Input'], 'Out', no_grad_set=None, check_eager=False)
class TestAddMMOpError(unittest.TestCase):
......@@ -167,6 +168,7 @@ class TestAddMMOp2(TestAddMMOp):
# test alpha and beta
def setUp(self):
self.op_type = "addmm"
self.python_api = paddle.addmm
self.dtype = np.float64
self.init_dtype_type()
self.inputs = {
......@@ -252,4 +254,5 @@ class TestAddMMAPI(unittest.TestCase):
'''
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
......@@ -36,6 +36,7 @@ def atan2_grad(x1, x2, dout):
class TestAtan2(OpTest):
def setUp(self):
self.op_type = "atan2"
self.python_api = paddle.atan2
self.init_dtype()
x1 = np.random.uniform(-1, -0.1, [15, 17]).astype(self.dtype)
......@@ -46,10 +47,10 @@ class TestAtan2(OpTest):
self.outputs = {'Out': out}
def test_check_grad(self):
self.check_grad(['X1', 'X2'], 'Out')
self.check_grad(['X1', 'X2'], 'Out', check_eager=True)
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def init_dtype(self):
self.dtype = np.float64
......@@ -66,7 +67,8 @@ class TestAtan2_float(TestAtan2):
'Out',
user_defined_grads=atan2_grad(self.inputs['X1'],
self.inputs['X2'],
1 / self.inputs['X1'].size))
1 / self.inputs['X1'].size),
check_eager=True)
class TestAtan2_float16(TestAtan2_float):
......@@ -129,4 +131,5 @@ class TestAtan2API(unittest.TestCase):
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -244,4 +244,5 @@ class TestBceLossOpCase2(OpTest):
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
......@@ -30,12 +30,13 @@ def create_test_class(op_type, typename, callback):
a = numpy.random.random(size=(10, 7)).astype(typename)
b = numpy.random.random(size=(10, 7)).astype(typename)
c = callback(a, b)
self.python_api = eval("paddle." + op_type)
self.inputs = {'X': a, 'Y': b}
self.outputs = {'Out': c}
self.op_type = op_type
def test_output(self):
self.check_output()
self.check_output(check_eager=False)
def test_errors(self):
paddle.enable_static()
......@@ -338,4 +339,5 @@ class TestCompareOpPlace(unittest.TestCase):
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -26,6 +26,7 @@ from paddle.fluid import Program, program_guard
class TestCrossOp(OpTest):
def setUp(self):
self.op_type = "cross"
self.python_api = paddle.cross
self.initTestCase()
self.inputs = {
'X': np.random.random(self.shape).astype(self.dtype),
......@@ -47,10 +48,10 @@ class TestCrossOp(OpTest):
self.outputs = {'Out': np.array(z_list).reshape(self.shape)}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=False)
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out')
self.check_grad(['X', 'Y'], 'Out', check_eager=False)
class TestCrossOpCase1(TestCrossOp):
......@@ -114,14 +115,14 @@ class TestCrossAPI(unittest.TestCase):
def test_dygraph_api(self):
self.input_data()
# case 1:
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(self.data_x)
y = fluid.dygraph.to_variable(self.data_y)
z = paddle.cross(x, y)
np_z = z.numpy()
expect_out = np.array([[-1.0, -1.0, -1.0], [2.0, 2.0, 2.0],
[-1.0, -1.0, -1.0]])
self.assertTrue(np.allclose(expect_out, np_z))
# with fluid.dygraph.guard():
# x = fluid.dygraph.to_variable(self.data_x)
# y = fluid.dygraph.to_variable(self.data_y)
# z = paddle.cross(x, y)
# np_z = z.numpy()
# expect_out = np.array([[-1.0, -1.0, -1.0], [2.0, 2.0, 2.0],
# [-1.0, -1.0, -1.0]])
# self.assertTrue(np.allclose(expect_out, np_z))
# case 2:
with fluid.dygraph.guard():
......@@ -135,4 +136,5 @@ class TestCrossAPI(unittest.TestCase):
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -27,6 +27,7 @@ from paddle.fluid.framework import _test_eager_guard
class TestDiagV2Op(OpTest):
def setUp(self):
self.op_type = "diag_v2"
self.python_api = paddle.diag
self.x = np.random.rand(10, 10)
self.offset = 0
self.padding_value = 0.0
......@@ -267,4 +268,5 @@ class TestDiagV2API(unittest.TestCase):
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
......@@ -40,16 +40,24 @@ class TestElementwiseAddOp(OpTest):
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
self.outputs = {'Out': self.out}
def check_eager(self):
return (self.use_mkldnn == False and self.axis == -1)
def test_check_output(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
self.check_output(check_dygraph=(self.use_mkldnn == False))
self.check_output(
check_dygraph=(self.use_mkldnn == False),
check_eager=self.check_eager())
def test_check_grad_normal(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
if self.dtype == np.float16:
return
self.check_grad(
['X', 'Y'], 'Out', check_dygraph=(self.use_mkldnn == False))
['X', 'Y'],
'Out',
check_dygraph=(self.use_mkldnn == False),
check_eager=self.check_eager())
def test_check_grad_ingore_x(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
......@@ -59,7 +67,8 @@ class TestElementwiseAddOp(OpTest):
['Y'],
'Out',
no_grad_set=set("X"),
check_dygraph=(self.use_mkldnn == False))
check_dygraph=(self.use_mkldnn == False),
check_eager=self.check_eager())
def test_check_grad_ingore_y(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
......@@ -69,7 +78,8 @@ class TestElementwiseAddOp(OpTest):
['X'],
'Out',
no_grad_set=set('Y'),
check_dygraph=(self.use_mkldnn == False))
check_dygraph=(self.use_mkldnn == False),
check_eager=self.check_eager())
def init_input_output(self):
self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
......@@ -123,19 +133,21 @@ class TestBF16ElementwiseAddOp(OpTest):
def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place)
self.check_output_with_place(place, check_eager=False)
def test_check_grad_normal(self):
place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['X', 'Y'], 'Out')
self.check_grad_with_place(place, ['X', 'Y'], 'Out', check_eager=False)
def test_check_grad_ingore_x(self):
place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['Y'], 'Out', no_grad_set=set("X"))
self.check_grad_with_place(
place, ['Y'], 'Out', no_grad_set=set("X"), check_eager=False)
def test_check_grad_ingore_y(self):
place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['X'], 'Out', no_grad_set=set('Y'))
self.check_grad_with_place(
place, ['X'], 'Out', no_grad_set=set('Y'), check_eager=False)
@skip_check_grad_ci(
......@@ -586,7 +598,7 @@ class TestComplexElementwiseAddOp(OpTest):
self.grad_y = self.grad_out
def test_check_output(self):
self.check_output()
self.check_output(check_eager=False)
def test_check_grad_normal(self):
self.check_grad(
......
......@@ -22,10 +22,11 @@ import paddle
class TestGatherNdOpWithEmptyIndex(OpTest):
#Index has empty element, which means copy entire tensor
# Index has empty element, which means copy entire tensor
def setUp(self):
self.op_type = "gather_nd"
self.python_api = paddle.gather_nd
xnp = np.random.random((5, 20)).astype("float64")
self.inputs = {'X': xnp, 'Index': np.array([[], []]).astype("int32")}
self.outputs = {
......@@ -33,24 +34,25 @@ class TestGatherNdOpWithEmptyIndex(OpTest):
}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
class TestGatherNdOpWithIndex1(OpTest):
def setUp(self):
self.op_type = "gather_nd"
self.python_api = paddle.gather_nd
xnp = np.random.random((5, 20)).astype("float64")
self.inputs = {'X': xnp, 'Index': np.array([1]).astype("int32")}
self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
class TestGatherNdOpWithLowIndex(OpTest):
......@@ -58,6 +60,7 @@ class TestGatherNdOpWithLowIndex(OpTest):
def setUp(self):
self.op_type = "gather_nd"
self.python_api = paddle.gather_nd
xnp = np.random.uniform(0, 100, (10, 10)).astype("float64")
index = np.array([[1], [2]]).astype("int64")
......@@ -66,10 +69,10 @@ class TestGatherNdOpWithLowIndex(OpTest):
self.outputs = {'Out': xnp[tuple(index.T)]} #[[14, 25, 1], [76, 22, 3]]
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
class TestGatherNdOpIndex1(OpTest):
......@@ -77,18 +80,19 @@ class TestGatherNdOpIndex1(OpTest):
def setUp(self):
self.op_type = "gather_nd"
self.python_api = paddle.gather_nd
xnp = np.random.uniform(0, 100, (10, 10)).astype("float64")
index = np.array([1, 2]).astype("int64")
index = np.array([1, 2]).astype("int32")
self.inputs = {'X': xnp, 'Index': index}
self.outputs = {'Out': xnp[tuple(index.T)]}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
class TestGatherNdOpWithSameIndexAsX(OpTest):
......@@ -96,6 +100,7 @@ class TestGatherNdOpWithSameIndexAsX(OpTest):
def setUp(self):
self.op_type = "gather_nd"
self.python_api = paddle.gather_nd
xnp = np.random.uniform(0, 100, (10, 10)).astype("float64")
index = np.array([[1, 1], [2, 1]]).astype("int64")
......@@ -103,10 +108,10 @@ class TestGatherNdOpWithSameIndexAsX(OpTest):
self.outputs = {'Out': xnp[tuple(index.T)]} #[25, 22]
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
class TestGatherNdOpWithHighRankSame(OpTest):
......@@ -114,6 +119,7 @@ class TestGatherNdOpWithHighRankSame(OpTest):
def setUp(self):
self.op_type = "gather_nd"
self.python_api = paddle.gather_nd
shape = (5, 2, 3, 1, 10)
xnp = np.random.rand(*shape).astype("float64")
index = np.vstack([np.random.randint(0, s, size=2) for s in shape]).T
......@@ -122,10 +128,10 @@ class TestGatherNdOpWithHighRankSame(OpTest):
self.outputs = {'Out': xnp[tuple(index.T)]}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
class TestGatherNdOpWithHighRankDiff(OpTest):
......@@ -133,6 +139,7 @@ class TestGatherNdOpWithHighRankDiff(OpTest):
def setUp(self):
self.op_type = "gather_nd"
self.python_api = paddle.gather_nd
shape = (2, 3, 4, 1, 10)
xnp = np.random.rand(*shape).astype("float64")
index = np.vstack([np.random.randint(0, s, size=200) for s in shape]).T
......@@ -142,10 +149,10 @@ class TestGatherNdOpWithHighRankDiff(OpTest):
self.outputs = {'Out': xnp[tuple(index.T)].reshape([20, 5, 2])}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
#Test Python API
......@@ -245,4 +252,5 @@ class TestGatherNdAPI2(unittest.TestCase):
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
......@@ -25,6 +25,7 @@ from paddle.fluid.framework import program_guard, Program
class TestGatherTreeOp(OpTest):
def setUp(self):
self.op_type = "gather_tree"
self.python_api = paddle.nn.functional.gather_tree
max_length, batch_size, beam_size = 5, 2, 2
ids = np.random.randint(
0, high=10, size=(max_length, batch_size, beam_size))
......@@ -34,7 +35,7 @@ class TestGatherTreeOp(OpTest):
self.outputs = {'Out': self.backtrace(ids, parents)}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
@staticmethod
def backtrace(ids, parents):
......@@ -126,4 +127,5 @@ class TestGatherTreeOpError(unittest.TestCase):
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
......@@ -24,6 +24,7 @@ from op_test import OpTest
class TestIndexSampleOp(OpTest):
def setUp(self):
self.op_type = "index_sample"
self.python_api = paddle.index_sample
self.config()
xnp = np.random.random(self.x_shape).astype(self.x_type)
indexnp = np.random.randint(
......@@ -39,10 +40,10 @@ class TestIndexSampleOp(OpTest):
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
def config(self):
"""
......
......@@ -27,15 +27,16 @@ from op_test import OpTest
class TestMVOp(OpTest):
def setUp(self):
self.op_type = "mv"
self.python_api = paddle.mv
self.init_config()
self.inputs = {'X': self.x, 'Vec': self.vec}
self.outputs = {'Out': np.dot(self.x, self.vec)}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X', 'Vec'], 'Out')
self.check_grad(['X', 'Vec'], 'Out', check_eager=True)
def init_config(self):
self.x = np.random.random((2, 100)).astype("float64")
......@@ -107,4 +108,5 @@ class TestMVError(unittest.TestCase):
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -67,6 +67,7 @@ class TestScatterNdAddSimpleOp(OpTest):
def setUp(self):
self.op_type = "scatter_nd_add"
self.python_api = paddle.scatter_nd_add
ref_np = np.random.random([100]).astype("float64")
index_np = np.random.randint(0, 100, [100, 1]).astype("int32")
updates_np = np.random.random([100]).astype("float64")
......@@ -76,10 +77,10 @@ class TestScatterNdAddSimpleOp(OpTest):
self.outputs = {'Out': expect_np}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out')
self.check_grad(['X', 'Updates'], 'Out', check_eager=True)
class TestScatterNdAddWithEmptyIndex(OpTest):
......@@ -89,6 +90,7 @@ class TestScatterNdAddWithEmptyIndex(OpTest):
def setUp(self):
self.op_type = "scatter_nd_add"
self.python_api = paddle.scatter_nd_add
ref_np = np.random.random((10, 10)).astype("float64")
index_np = np.array([[], []]).astype("int32")
updates_np = np.random.random((2, 10, 10)).astype("float64")
......@@ -99,10 +101,10 @@ class TestScatterNdAddWithEmptyIndex(OpTest):
self.outputs = {'Out': expect_np}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out')
self.check_grad(['X', 'Updates'], 'Out', check_eager=True)
class TestScatterNdAddWithHighRankSame(OpTest):
......@@ -112,6 +114,7 @@ class TestScatterNdAddWithHighRankSame(OpTest):
def setUp(self):
self.op_type = "scatter_nd_add"
self.python_api = paddle.scatter_nd_add
shape = (3, 2, 2, 1, 10)
ref_np = np.random.rand(*shape).astype("float64")
index_np = np.vstack(
......@@ -125,10 +128,10 @@ class TestScatterNdAddWithHighRankSame(OpTest):
self.outputs = {'Out': expect_np}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out')
self.check_grad(['X', 'Updates'], 'Out', check_eager=True)
class TestScatterNdAddWithHighRankDiff(OpTest):
......@@ -138,6 +141,7 @@ class TestScatterNdAddWithHighRankDiff(OpTest):
def setUp(self):
self.op_type = "scatter_nd_add"
self.python_api = paddle.scatter_nd_add
shape = (8, 2, 2, 1, 10)
ref_np = np.random.rand(*shape).astype("double")
index = np.vstack([np.random.randint(0, s, size=500) for s in shape]).T
......@@ -150,10 +154,10 @@ class TestScatterNdAddWithHighRankDiff(OpTest):
self.outputs = {'Out': expect_np}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out')
self.check_grad(['X', 'Updates'], 'Out', check_eager=True)
#Test Python API
......
......@@ -27,6 +27,7 @@ from paddle.fluid.dygraph.base import switch_to_static_graph
class TestScatterOp(OpTest):
def setUp(self):
self.op_type = "scatter"
self.python_api = paddle.scatter
ref_np = np.ones((3, 50)).astype("float32")
index_np = np.array([1, 2]).astype("int32")
updates_np = np.random.random((2, 50)).astype("float32")
......@@ -36,15 +37,16 @@ class TestScatterOp(OpTest):
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(["X", "Updates"], "Out")
self.check_grad(["X", "Updates"], "Out", check_eager=True)
class TestScatterOp0(OpTest):
def setUp(self):
self.op_type = "scatter"
self.python_api = paddle.scatter
ref_np = np.ones((3, 3)).astype("float32")
index_np = np.array([1, 2]).astype("int32")
updates_np = np.random.random((2, 3)).astype("float32")
......@@ -55,15 +57,16 @@ class TestScatterOp0(OpTest):
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(["X", "Updates"], "Out")
self.check_grad(["X", "Updates"], "Out", check_eager=True)
class TestScatterOp1(OpTest):
def setUp(self):
self.op_type = "scatter"
self.python_api = paddle.scatter
ref_np = np.ones((3, 3)).astype("float32")
zeros_np = np.zeros([2, 3]).astype('float32')
index_np = np.array([1, 1]).astype("int32")
......@@ -77,10 +80,10 @@ class TestScatterOp1(OpTest):
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(["X", "Updates"], "Out")
self.check_grad(["X", "Updates"], "Out", check_eager=True)
@unittest.skipIf(not core.is_compiled_with_cuda(),
......@@ -88,6 +91,7 @@ class TestScatterOp1(OpTest):
class TestScatterOp2(OpTest):
def setUp(self):
self.op_type = "scatter"
self.python_api = paddle.scatter
ref_np = np.ones((3, 3)).astype("float32")
index_np = np.array([1, 2]).astype("int32")
updates_np = np.random.random((2, 3)).astype("float32")
......@@ -99,12 +103,13 @@ class TestScatterOp2(OpTest):
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-3)
self.check_output_with_place(place, atol=1e-3, check_eager=True)
def test_check_grad(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
self.check_grad_with_place(
place, ['X', 'Updates'], 'Out', check_eager=True)
@unittest.skipIf(not core.is_compiled_with_cuda(),
......@@ -112,6 +117,7 @@ class TestScatterOp2(OpTest):
class TestScatterOp3(OpTest):
def setUp(self):
self.op_type = "scatter"
self.python_api = paddle.scatter
ref_np = np.ones((3, 3)).astype("float32")
zeros_np = np.zeros([2, 3]).astype('float32')
index_np = np.array([1, 1]).astype("int32")
......@@ -127,17 +133,19 @@ class TestScatterOp3(OpTest):
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-3)
self.check_output_with_place(place, atol=1e-3, check_eager=True)
def test_check_grad(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
self.check_grad_with_place(
place, ['X', 'Updates'], 'Out', check_eager=True)
class TestScatterOp4(OpTest):
def setUp(self):
self.op_type = "scatter"
self.python_api = paddle.scatter
ref_np = np.ones((3, 3)).astype("float32")
index_np = np.array([1, 2]).astype("int64")
updates_np = np.random.random((2, 3)).astype("float32")
......@@ -147,10 +155,10 @@ class TestScatterOp4(OpTest):
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out')
self.check_grad(['X', 'Updates'], 'Out', check_eager=True)
@unittest.skipIf(not core.is_compiled_with_cuda(),
......@@ -158,6 +166,7 @@ class TestScatterOp4(OpTest):
class TestScatterOp5(OpTest):
def setUp(self):
self.op_type = "scatter"
self.python_api = paddle.scatter
ref_np = np.ones((3, 3)).astype("float32")
index_np = np.array([1, 2]).astype("int64")
updates_np = np.random.random((2, 3)).astype("float32")
......@@ -169,12 +178,13 @@ class TestScatterOp5(OpTest):
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-3)
self.check_output_with_place(place, atol=1e-3, check_eager=True)
def test_check_grad(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
self.check_grad_with_place(
place, ['X', 'Updates'], 'Out', check_eager=True)
class TestScatterAPI(unittest.TestCase):
......@@ -274,6 +284,7 @@ class TestScatterAPI(unittest.TestCase):
class TestScatterOpFp16(OpTest):
def setUp(self):
self.__class__.op_type = "scatter"
self.python_api = paddle.scatter
# compute grad in the following code handly.
self.__class__.no_need_check_grad = True
self.x_type = 'float16'
......
......@@ -29,6 +29,7 @@ class TestTransposeOp(OpTest):
def setUp(self):
self.init_op_type()
self.initTestCase()
self.python_api = paddle.transpose
self.inputs = {'X': np.random.random(self.shape).astype("float64")}
self.attrs = {
'axis': list(self.axis),
......@@ -44,10 +45,10 @@ class TestTransposeOp(OpTest):
self.use_mkldnn = False
def test_check_output(self):
self.check_output(no_check_set=['XShape'])
self.check_output(no_check_set=['XShape'], check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
def initTestCase(self):
self.shape = (3, 40)
......
......@@ -29,6 +29,7 @@ from paddle.fluid.framework import _test_eager_guard
class TestWhereOp(OpTest):
def setUp(self):
self.op_type = 'where'
self.python_api = paddle.where
self.init_config()
self.inputs = {'Condition': self.cond, 'X': self.x, 'Y': self.y}
self.outputs = {'Out': np.where(self.cond, self.x, self.y)}
......@@ -391,5 +392,6 @@ class TestWhereOpError(unittest.TestCase):
self.test_value_error()
if (__name__ == '__main__'):
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
......@@ -36,7 +36,7 @@ from ...static import Variable
from paddle.utils import deprecated
from paddle import _C_ops
from paddle import in_dynamic_mode
from paddle.framework import core
from paddle.framework import core, _in_eager_mode
__all__ = []
......@@ -114,7 +114,10 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean',
reduction)
if in_dynamic_mode():
out = _C_ops.bce_loss(input, label)
if _in_eager_mode():
out = _C_ops.final_state_bce_loss(input, label)
else:
out = _C_ops.bce_loss(input, label)
if weight is not None:
out = _C_ops.elementwise_mul(out, weight, 'axis', -1)
......
......@@ -974,6 +974,8 @@ def diag(x, offset=0, padding_value=0, name=None):
# [4]
"""
if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_diag(x, offset, padding_value)
return _C_ops.diag_v2(x, "offset", offset, "padding_value",
padding_value)
......
......@@ -1146,6 +1146,8 @@ def cross(x, y, axis=None, name=None):
# [0. 0. 0.]]
"""
if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_cross(x, y, axis)
if axis is not None:
return _C_ops.cross(x, y, 'dim', axis)
else:
......@@ -1494,6 +1496,8 @@ def mv(x, vec, name=None):
out = paddle.mv(x, vec)
"""
if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_mv(x, vec)
out = _C_ops.mv(x, vec)
return out
......
......@@ -17,7 +17,7 @@ from ..fluid.data_feeder import check_type, check_variable_and_dtype
from ..fluid.layers.layer_function_generator import templatedoc
from ..static import Variable
from ..framework import VarBase as Tensor
from paddle.fluid.framework import _in_eager_mode
from ..framework import _in_eager_mode
# TODO: define logic functions of a tensor
from ..fluid.layers import is_empty # noqa: F401
......@@ -182,6 +182,9 @@ def equal(x, y, name=None):
y = full(shape=[1], dtype=x.dtype, fill_value=y)
if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_equal(x, y)
return _C_ops.equal(x, y)
check_variable_and_dtype(
......@@ -224,6 +227,9 @@ def greater_equal(x, y, name=None):
print(result1) # result1 = [True False True]
"""
if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_greater_equal(x, y)
return _C_ops.greater_equal(x, y)
check_variable_and_dtype(x, "x",
......@@ -270,6 +276,9 @@ def greater_than(x, y, name=None):
print(result1) # result1 = [False False True]
"""
if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_greater_than(x, y)
return _C_ops.greater_than(x, y)
check_variable_and_dtype(x, "x",
......@@ -317,6 +326,9 @@ def less_equal(x, y, name=None):
print(result1) # result1 = [True True False]
"""
if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_less_equal(x, y)
return _C_ops.less_equal(x, y)
check_variable_and_dtype(
......@@ -360,6 +372,9 @@ def less_than(x, y, name=None):
print(result1) # result1 = [False True False]
"""
if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_less_than(x, y)
return _C_ops.less_than(x, y)
check_variable_and_dtype(
......@@ -403,6 +418,9 @@ def not_equal(x, y, name=None):
print(result1) # result1 = [False True True]
"""
if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_not_equal(x, y)
return _C_ops.not_equal(x, y)
check_variable_and_dtype(
......
......@@ -16,7 +16,7 @@ from __future__ import print_function
from collections import Counter
from ..static import Variable, device_guard
from ..framework import core
from ..framework import core, _in_eager_mode
from ..fluid.layer_helper import LayerHelper
from ..framework import OpProtoHolder, convert_np_dtype_to_dtype_, dygraph_only
from ..fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
......@@ -1570,6 +1570,8 @@ def scatter(x, index, updates, overwrite=True, name=None):
# [1., 1.]]
"""
if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_scatter(x, index, updates, overwrite)
return _C_ops.scatter(x, index, updates, 'overwrite', overwrite)
check_variable_and_dtype(
......
......@@ -1276,6 +1276,8 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_addmm( input, x, y, alpha, beta)
out = _C_ops.addmm(input, x, y, "Alpha", alpha, "Beta", beta)
return out
......@@ -3278,6 +3280,8 @@ def atan2(x, y, name=None):
"""
if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_atan2( x, y)
return _C_ops.atan2(x, y)
else:
check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'], 'atan2')
......
......@@ -17,7 +17,7 @@ import paddle
from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype
from ..fluid import layers
from ..framework import core
from ..framework import core, _in_eager_mode
from paddle.common_ops_import import convert_np_dtype_to_dtype_
from paddle.common_ops_import import Variable
from paddle.common_ops_import import VarDesc
......@@ -630,6 +630,9 @@ def where(condition, x=None, y=None, name=None):
broadcast_condition = paddle.cast(broadcast_condition, 'bool')
if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_where(broadcast_condition, broadcast_x,
broadcast_y)
return _C_ops.where(broadcast_condition, broadcast_x, broadcast_y)
else:
helper = LayerHelper("where", **locals())
......@@ -721,6 +724,8 @@ def index_sample(x, index):
"""
if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_index_sample(x, index)
return _C_ops.index_sample(x, index)
helper = LayerHelper("index_sample", **locals())
......
......@@ -5,6 +5,7 @@
func : ElementwiseInferMeta
kernel :
func : add
# backward : add_grad
- api : cast
args : (Tensor x, DataType out_dtype)
......@@ -1222,3 +1223,264 @@
# func : YoloBoxInferMeta
# kernel :
# func : yolo_box
- api : gumbel_softmax
args : (Tensor x, float temperature, bool hard, int axis)
output : Tensor
infer_meta :
func : GumbelSoftmaxInferMeta
kernel :
func : gumbel_softmax
# backward : gumbel_softmax_grad
- api : diag
args : (Tensor x, int offset, float padding_value)
output : Tensor
infer_meta :
func : DiagInferMeta
kernel :
func : diag
# - api : pixel_shuffle
# args : (Tensor x, int upscale_factor, const std::string& data_format)
# output : Tensor
# infer_meta :
# func : PixelShuffleInferMeta
# kernel :
# func : pixel_shuffle
- api : transpose
args : (Tensor x, int[] axis)
output : Tensor
infer_meta :
func : TransposeInferMeta
kernel :
func : transpose
backward : transpose_grad
- api : lerp
args : (Tensor x, Tensor y, Tensor weight)
output : Tensor
infer_meta :
func : LerpInferMeta
kernel :
func : lerp
# backward : lerp_grad
- api : scatter
args : (Tensor x, Tensor index, Tensor updates, bool overwrite)
output : Tensor
infer_meta :
func : ScatterInferMeta
dtype : x
kernel :
func : scatter
backward : scatter_grad
- api : scatter_nd_add
args : (Tensor x, Tensor index, Tensor updates)
output : Tensor
infer_meta :
func : ScatterNdAddInferMeta
dtype : x
kernel :
func : scatter_nd_add
backward : scatter_nd_add_grad
- api : addmm
args : (Tensor input, Tensor x, Tensor y, float alpha, float beta)
output : Tensor
infer_meta :
func : AddmmInferMeta
kernel :
func : addmm
backward : addmm_grad
- api : adadelta
args : (Tensor param, Tensor grad, Tensor avg_squared_grad, Tensor avg_squared_update, float rho, float epsilon)
output : Tensor(param_out), Tensor(moment_out), Tensor(inf_norm_out)
infer_meta :
func : AdadeltaInferMeta
kernel :
func : adadelta
- api : adamax
args : (Tensor param, Tensor grad, Tensor learning_rate, Tensor moment, Tensor inf_norm, Tensor beta1_pow, float beta1, float beta2, float epsilon)
output : Tensor(param_out), Tensor(avg_squared_grad_out), Tensor(avg_squared_update_out)
infer_meta :
func : AdamaxInferMeta
kernel :
func : adamax
- api : where
args : (Tensor condition, Tensor x, Tensor y)
output : Tensor
infer_meta :
func : WhereInferMeta
kernel :
func : where
backward : where_grad
# BilinearTensorProductInferMeta
# BroadcastTensorsInferMeta
- api : less_than
args : (Tensor x, Tensor y, int axis = -1)
output : Tensor
infer_meta :
func : CompareInferMeta
kernel :
func : less_than
- api : less_equal
args : (Tensor x, Tensor y, int axis = -1)
output : Tensor
infer_meta :
func : CompareInferMeta
kernel :
func : less_equal
- api : greater
args : (Tensor x, Tensor y, int axis = -1)
output : Tensor
infer_meta :
func : CompareInferMeta
kernel :
func : greater
- api : greater_equal
args : (Tensor x, Tensor y, int axis = -1)
output : Tensor
infer_meta :
func : CompareInferMeta
kernel :
func : greater_equal
- api : equal
args : (Tensor x, Tensor y, int axis = -1)
output : Tensor
infer_meta :
func : CompareInferMeta
kernel :
func : equal
- api : not_equal
args : (Tensor x, Tensor y, int axis = -1)
output : Tensor
infer_meta :
func : CompareInferMeta
kernel :
func : not_equal
# - api : equal_all
# args : (Tensor x, Tensor y)
# output : Tensor
# infer_meta :
# func : CompareAllInferMeta
# kernel :
# func : equal_all
- api : huber_loss
args : (Tensor input, Tensor label, float delta)
output : Tensor(out), Tensor(residual)
infer_meta :
func : HuberLossInferMeta
kernel :
func : huber_loss
# backward : huber_loss_grad
- api : triangular_solve
args : (Tensor x, Tensor y, bool upper, bool tranpose, bool unitriangular)
output : Tensor
infer_meta :
func : TriangularSolveInferMeta
kernel :
func : triangular_solve
# backward : triangular_solve_grad
- api : index_sample
args : (Tensor x, Tensor index)
output : Tensor
infer_meta :
func : IndexSampleInferMeta
kernel :
func : index_sample
data_type : x
backward : index_sample_grad
- api : cross
args : (Tensor x, Tensor y, int axis = 9)
output : Tensor
infer_meta :
func : CrossInferMeta
kernel :
func : cross
backward : cross_grad
- api : atan2
args : (Tensor x, Tensor y)
output : Tensor
infer_meta :
func : Atan2InferMeta
kernel :
func : atan2
backward : atan2_grad
- api : bce_loss
args : (Tensor input, Tensor label)
output : Tensor
infer_meta :
func : BCELossInferMeta
kernel :
func : bce_loss
backward : bce_loss_grad
- api : dist
args : (Tensor x, Tensor y, float p)
output : Tensor
infer_meta :
func : DistInferMeta
kernel :
func : dist
# backward : dist_grad
- api : gather_nd
args : (Tensor x, Tensor index)
output : Tensor
infer_meta :
func : GatherNdInferMeta
kernel :
func : gather_nd
data_type : x
backward : gather_nd_grad
- api : gather_tree
args : (Tensor ids, Tensor parents)
output : Tensor
infer_meta :
func : GatherTreeMeta
kernel :
func : gather_tree
- api : mv
args : (Tensor x, Tensor vec)
output : Tensor
infer_meta :
func : MvInferMeta
kernel :
func : mv
backward : mv_grad
......@@ -25,6 +25,17 @@
output : Tensor(x_grad)
invoke : scale(out_grad, scale, bias, bias_after_scale)
- backward_api : add_grad
forward : add (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, y]
kernel :
func : add_grad
- backward_api : digamma_grad
forward : digamma (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
......@@ -471,6 +482,118 @@
kernel :
func : cholesky_solve_grad
# - backward_api : gumbel_softmax_grad
# forward : gumbel_softmax (Tensor x, float temperature, bool hard, int axis) -> Tensor(out)
# args : (Tensor out, Tensor out_grad, int axis)
# output : Tensor(x_grad)
# infer_meta :
# func : GumbelSoftmaxGradInferMeta
# param : [out, out_grad, axis]
# kernel :
# func : gumbel_softmax_grad
- backward_api : transpose_grad
forward : transpose (Tensor x, int[] axis) -> Tensor(out)
args : (Tensor out_grad, int[] axis)
output : Tensor(x_grad)
infer_meta :
func : TransposeGradInferMeta
param : [out_grad, axis]
kernel :
func : transpose_grad
# - backward_api : lerp_grad
# forward : transpose (Tensor x, Tensor y, Tensor weight) -> Tensor(out)
# args : (Tensor x, Tensor y, Tensor weight, Tensor out, Tensor out_grad)
# output : Tensor(x_grad), Tensor(y_grad)
# infer_meta :
# func : GeneralBinaryGradInferMeta
# param : [x, y]
# kernel :
# func : lerp_grad
- backward_api : scatter_grad
forward : scatter (Tensor x, Tensor index, Tensor updates, bool overwrite) -> Tensor(out)
args : (Tensor index, Tensor updates, Tensor out_grad, bool overwrite)
output : Tensor(x_grad), Tensor(updates_grad)
infer_meta :
func : ScatterGradInferMeta
param : [index, updates, out_grad, overwrite]
kernel :
func : scatter_grad
- backward_api : scatter_nd_add_grad
forward : scatter (Tensor x, Tensor index, Tensor updates) -> Tensor(out)
args : (Tensor index, Tensor updates, Tensor out_grad)
output : Tensor(x_grad), Tensor(updates_grad)
infer_meta :
func : ScatterNdAddGradInferMeta
param : [index, updates, out_grad]
kernel :
func : scatter_nd_grad
- backward_api : addmm_grad
forward : scatter (Tensor input, Tensor x, Tensor y, float alpha, float beta) -> Tensor(out)
args : (Tensor input, Tensor x, Tensor y, Tensor out_grad, float alpha, float beta)
output : Tensor(input_grad), Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralTernaryGradInferMeta
param : [input, x, y]
kernel :
func : addmm_grad
- backward_api : where_grad
forward : where (Tensor condition, Tensor x, Tensor y) -> Tensor(out)
args : (Tensor condition, Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, y]
kernel :
func : where_grad
# - backward_api : huber_loss_grad
# forward : huber_loss (Tensor input, Tensor label, float delta) -> Tensor(out), Tensor(residual)
# args : (Tensor residual, Tensor out_grad, float delta)
# output : Tensor(input_grad), Tensor(label_grad)
# infer_meta :
# func : GeneralBinaryGradInferMeta
# param : [x, y]
# kernel :
# func : where_grad
# - backward_api : triangular_solve_grad
# forward : triangular_solve (Tensor x, Tensor y, bool upper, bool tranpose, bool unitriangular) -> Tensor(out)
# args : (Tensor x, Tensor y, Tensor out, Tensor out_grad, bool upper, bool tranpose, bool unitriangular)
# output : Tensor(x_grad), Tensor(y_grad)
# infer_meta :
# func : GeneralBinaryGradInferMeta
# param : [x, y]
# kernel :
# func : triangular_solve_grad
- backward_api : index_sample_grad
forward : index_sample (Tensor x, Tensor index) -> Tensor(out)
args : (Tensor x, Tensor index, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : index_sample_grad
- backward_api : cross_grad
forward : cross (Tensor x, Tensor y, int axis = 9) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, y]
kernel :
func : cross_grad
# - backward_api : dropout_grad
# forward : dropout (Tensor x, Tensor seed_tensor, float p, bool is_test, str mode, int seed, bool fix_seed) -> Tensor(out), Tensor(mask)
# args : (Tensor mask, Tensor out_grad, float p, bool is_test, str mode)
......@@ -552,6 +675,26 @@
kernel :
func : log_loss_grad
- backward_api : atan2_grad
forward : cross (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, y]
kernel :
func : atan2_grad
- backward_api : bce_loss_grad
forward : bce_loss (Tensor input, Tensor label) -> Tensor(out)
args : (Tensor input, Tensor label, Tensor out_grad)
output : Tensor(input_grad)
infer_meta :
func : UnchangedInferMeta
param : [input]
kernel :
func : bce_loss_grad
# - backward_api : masked_selecte_grad
# forward : masked_select (Tensor x, Tensor mask) -> Tensor(out)
# args : (Tensor out_grad, Tensor x, Tensor mask)
......@@ -674,6 +817,27 @@
param : [x]
kernel :
func : trace_grad
# - backward_api : dist_grad
# forward : dist (Tensor x, Tensor y, float p) -> Tensor(out)
# args : (Tensor x, Tensor y, Tensor out, Tensor out_grad, float p)
# output : Tensor(x_grad), Tensor(y_grad)
# infer_meta :
# func : GeneralBinaryGradInferMeta
# param : [x, y]
# kernel :
# func : dist_grad
- backward_api : gather_nd_grad
forward : gather_nd (Tensor x, Tensor index) -> Tensor(out)
args : (Tensor x, Tensor index, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : gather_nd_grad
- backward_api : unfold_grad
forward : unfold (Tensor x, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) -> Tensor(out)
......@@ -694,3 +858,13 @@
# param : [x]
# kernel :
# func : where_index_grad
- backward_api : mv_grad
forward : mv (Tensor x, Tensor vec) -> Tensor(out)
args : (Tensor x, Tensor vec, Tensor out_grad)
output : Tensor(x_grad), Tensor(vec_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, vec]
kernel :
func : mv_grad
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册