未验证 提交 16bfcd18 编写于 作者: X xiongkun 提交者: GitHub

[Yaml] transfer around 22 ops yaml file and pass the final state OpTest. (#41024)

* 1. add the python api grad 2. add final and intermediate state vlog 3. change the python_api error logic

* add python api or close the check_eager=True

* fix the compatibility
上级 2012aeb6
...@@ -933,7 +933,7 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj, ...@@ -933,7 +933,7 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj,
bool value = CastPyArg2Boolean(obj, op_type, arg_pos); bool value = CastPyArg2Boolean(obj, op_type, arg_pos);
return paddle::experimental::Scalar(value); return paddle::experimental::Scalar(value);
} else if (type_name == "paddle.Tensor") { } else if (type_name == "Tensor") {
paddle::experimental::Tensor& value = GetTensorFromPyObject( paddle::experimental::Tensor& value = GetTensorFromPyObject(
op_type, "" /*arg_name*/, obj, arg_pos, false /*dispensable*/); op_type, "" /*arg_name*/, obj, arg_pos, false /*dispensable*/);
return paddle::experimental::Scalar(value); return paddle::experimental::Scalar(value);
......
...@@ -1374,8 +1374,8 @@ void MvInferMeta(const MetaTensor& x, const MetaTensor& vec, MetaTensor* out) { ...@@ -1374,8 +1374,8 @@ void MvInferMeta(const MetaTensor& x, const MetaTensor& vec, MetaTensor* out) {
void PReluInferMeta(const MetaTensor& x, void PReluInferMeta(const MetaTensor& x,
const MetaTensor& alpha, const MetaTensor& alpha,
const std::string& mode,
const std::string& data_format, const std::string& data_format,
const std::string& mode,
MetaTensor* out, MetaTensor* out,
MetaConfig config) { MetaConfig config) {
auto x_dim = x.dims(); auto x_dim = x.dims();
......
...@@ -196,10 +196,10 @@ void MvInferMeta(const MetaTensor& x, const MetaTensor& vec, MetaTensor* out); ...@@ -196,10 +196,10 @@ void MvInferMeta(const MetaTensor& x, const MetaTensor& vec, MetaTensor* out);
void PReluInferMeta(const MetaTensor& x, void PReluInferMeta(const MetaTensor& x,
const MetaTensor& alpha, const MetaTensor& alpha,
const std::string& mode,
const std::string& data_format, const std::string& data_format,
const std::string& mode,
MetaTensor* out, MetaTensor* out,
MetaConfig config); MetaConfig config = MetaConfig());
void SearchsortedInferMeta(const MetaTensor& sorted_sequence, void SearchsortedInferMeta(const MetaTensor& sorted_sequence,
const MetaTensor& value, const MetaTensor& value,
......
...@@ -29,21 +29,28 @@ void AllCloseKernel(const Context& dev_ctx, ...@@ -29,21 +29,28 @@ void AllCloseKernel(const Context& dev_ctx,
const Scalar& atol, const Scalar& atol,
bool equal_nan, bool equal_nan,
DenseTensor* out) { DenseTensor* out) {
PADDLE_ENFORCE_EQ( double rtol_v, atol_v;
rtol.dtype(), if (rtol.dtype() == DataType::FLOAT64) {
DataType::FLOAT64, rtol_v = rtol.to<double>();
phi::errors::InvalidArgument( } else if (rtol.dtype() == DataType::FLOAT32) {
"Input (Rtol) type must be double, but get %s.", rtol.dtype())); rtol_v = rtol.to<float>();
PADDLE_ENFORCE_EQ( } else {
atol.dtype(), PADDLE_THROW(phi::errors::InvalidArgument(
DataType::FLOAT64, "Input (Rtol) type must be double or float, but get %s.",
phi::errors::InvalidArgument( rtol.dtype()));
"Input (Atol) type must be double, but get %s.", atol.dtype())); }
if (atol.dtype() == DataType::FLOAT64) {
atol_v = atol.to<double>();
} else if (atol.dtype() == DataType::FLOAT32) {
atol_v = atol.to<float>();
} else {
PADDLE_THROW(phi::errors::InvalidArgument(
"Input (Atol) type must be double or float, but get %s.",
atol.dtype()));
}
VLOG(3) << "rtol and atol is : " << rtol_v << " " << atol_v;
auto* in_a = x.data<T>(); auto* in_a = x.data<T>();
auto* in_b = y.data<T>(); auto* in_b = y.data<T>();
auto rtol_v = rtol.to<double>();
auto atol_v = atol.to<double>();
auto* out_data = dev_ctx.template Alloc<bool>(out); auto* out_data = dev_ctx.template Alloc<bool>(out);
*out_data = true; *out_data = true;
......
...@@ -46,9 +46,9 @@ static void kthvalueAssign(const Type& input_height, ...@@ -46,9 +46,9 @@ static void kthvalueAssign(const Type& input_height,
template <typename T, typename Context> template <typename T, typename Context>
void KthvalueGradKernel(const Context& dev_ctx, void KthvalueGradKernel(const Context& dev_ctx,
const DenseTensor& d_out,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& indices, const DenseTensor& indices,
const DenseTensor& d_out,
int k, int k,
int axis, int axis,
bool keepdim, bool keepdim,
......
...@@ -24,8 +24,8 @@ void PReluGradKernel(const Context& dev_ctx, ...@@ -24,8 +24,8 @@ void PReluGradKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& alpha, const DenseTensor& alpha,
const DenseTensor& out_grad, const DenseTensor& out_grad,
const std::string& mode,
const std::string& data_format, const std::string& data_format,
const std::string& mode,
DenseTensor* x_grad, DenseTensor* x_grad,
DenseTensor* alpha_grad) { DenseTensor* alpha_grad) {
const T* alpha_ptr = alpha.data<T>(); const T* alpha_ptr = alpha.data<T>();
......
...@@ -23,8 +23,8 @@ template <typename T, typename Context> ...@@ -23,8 +23,8 @@ template <typename T, typename Context>
void PReluKernel(const Context& dev_ctx, void PReluKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& alpha, const DenseTensor& alpha,
const std::string& mode,
const std::string& data_format, const std::string& data_format,
const std::string& mode,
DenseTensor* out) { DenseTensor* out) {
const T* x_ptr = x.data<T>(); const T* x_ptr = x.data<T>();
const T* alpha_ptr = alpha.data<T>(); const T* alpha_ptr = alpha.data<T>();
......
...@@ -51,21 +51,28 @@ void AllCloseKernel(const Context& dev_ctx, ...@@ -51,21 +51,28 @@ void AllCloseKernel(const Context& dev_ctx,
const Scalar& atol, const Scalar& atol,
bool equal_nan, bool equal_nan,
DenseTensor* out) { DenseTensor* out) {
PADDLE_ENFORCE_EQ( double rtol_v, atol_v;
rtol.dtype(), if (rtol.dtype() == DataType::FLOAT64) {
DataType::FLOAT64, rtol_v = rtol.to<double>();
phi::errors::InvalidArgument( } else if (rtol.dtype() == DataType::FLOAT32) {
"Input (Rtol) type must be double, but get %s.", rtol.dtype())); rtol_v = rtol.to<float>();
PADDLE_ENFORCE_EQ( } else {
atol.dtype(), PADDLE_THROW(phi::errors::InvalidArgument(
DataType::FLOAT64, "Input (Rtol) type must be double or float, but get %s.",
phi::errors::InvalidArgument( rtol.dtype()));
"Input (Atol) type must be double, but get %s.", atol.dtype())); }
if (atol.dtype() == DataType::FLOAT64) {
atol_v = atol.to<double>();
} else if (atol.dtype() == DataType::FLOAT32) {
atol_v = atol.to<float>();
} else {
PADDLE_THROW(phi::errors::InvalidArgument(
"Input (Atol) type must be double or float, but get %s.",
atol.dtype()));
}
VLOG(3) << "rtol and atol is : " << rtol_v << " " << atol_v;
const T* in_data = x.data<T>(); const T* in_data = x.data<T>();
const T* other_data = y.data<T>(); const T* other_data = y.data<T>();
auto rtol_v = rtol.to<double>();
auto atol_v = atol.to<double>();
bool* out_data = dev_ctx.template Alloc<bool>(out); bool* out_data = dev_ctx.template Alloc<bool>(out);
int num = x.numel(); int num = x.numel();
......
...@@ -34,9 +34,9 @@ static int getBlockSize(int col) { ...@@ -34,9 +34,9 @@ static int getBlockSize(int col) {
template <typename T, typename Context> template <typename T, typename Context>
void KthvalueGradKernel(const Context& dev_ctx, void KthvalueGradKernel(const Context& dev_ctx,
const DenseTensor& d_out,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& indices, const DenseTensor& indices,
const DenseTensor& d_out,
int k, int k,
int axis, int axis,
bool keepdim, bool keepdim,
......
...@@ -102,8 +102,8 @@ void PReluGradKernel(const Context& dev_ctx, ...@@ -102,8 +102,8 @@ void PReluGradKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& alpha, const DenseTensor& alpha,
const DenseTensor& out_grad, const DenseTensor& out_grad,
const std::string& mode,
const std::string& data_format, const std::string& data_format,
const std::string& mode,
DenseTensor* x_grad, DenseTensor* x_grad,
DenseTensor* alpha_grad) { DenseTensor* alpha_grad) {
dev_ctx.template Alloc<T>(x_grad); dev_ctx.template Alloc<T>(x_grad);
......
...@@ -24,8 +24,8 @@ template <typename T, typename Context> ...@@ -24,8 +24,8 @@ template <typename T, typename Context>
void PReluKernel(const Context& dev_ctx, void PReluKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& alpha, const DenseTensor& alpha,
const std::string& mode,
const std::string& data_format, const std::string& data_format,
const std::string& mode,
DenseTensor* out) { DenseTensor* out) {
const T* x_ptr = x.data<T>(); const T* x_ptr = x.data<T>();
T* o_ptr = dev_ctx.template Alloc<T>(out); T* o_ptr = dev_ctx.template Alloc<T>(out);
......
...@@ -33,8 +33,8 @@ struct LgammaGradFunctor { ...@@ -33,8 +33,8 @@ struct LgammaGradFunctor {
}; };
template <typename T, typename Context> template <typename T, typename Context>
void LgammaGradKernel(const Context& dev_ctx, void LgammaGradKernel(const Context& dev_ctx,
const DenseTensor& d_out,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& d_out,
DenseTensor* d_x) { DenseTensor* d_x) {
auto numel = d_out.numel(); auto numel = d_out.numel();
auto* dout_data = d_out.data<T>(); auto* dout_data = d_out.data<T>();
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
// XKTODO (change name)
void KLDivLossGradKernel(const Context& dev_ctx, void KLDivLossGradKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& label, const DenseTensor& label,
......
...@@ -20,9 +20,9 @@ ...@@ -20,9 +20,9 @@
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void KthvalueGradKernel(const Context& dev_ctx, void KthvalueGradKernel(const Context& dev_ctx,
const DenseTensor& d_out,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& indices, const DenseTensor& indices,
const DenseTensor& d_out,
int k, int k,
int axis, int axis,
bool keepdim, bool keepdim,
......
...@@ -21,7 +21,7 @@ namespace phi { ...@@ -21,7 +21,7 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void LgammaGradKernel(const Context& dev_ctx, void LgammaGradKernel(const Context& dev_ctx,
const DenseTensor& d_out,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& d_out,
DenseTensor* d_x); DenseTensor* d_x);
} // namespace phi } // namespace phi
...@@ -24,8 +24,8 @@ void PReluGradKernel(const Context& dev_ctx, ...@@ -24,8 +24,8 @@ void PReluGradKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& alpha, const DenseTensor& alpha,
const DenseTensor& out_grad, const DenseTensor& out_grad,
const std::string& mode,
const std::string& data_format, const std::string& data_format,
const std::string& mode,
DenseTensor* x_grad, DenseTensor* x_grad,
DenseTensor* alpha_grad); DenseTensor* alpha_grad);
} // namespace phi } // namespace phi
...@@ -22,7 +22,7 @@ template <typename T, typename Context> ...@@ -22,7 +22,7 @@ template <typename T, typename Context>
void PReluKernel(const Context& dev_ctx, void PReluKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& alpha, const DenseTensor& alpha,
const std::string& mode,
const std::string& data_format, const std::string& data_format,
const std::string& mode,
DenseTensor* out); DenseTensor* out);
} // namespace phi } // namespace phi
...@@ -20,7 +20,7 @@ namespace phi { ...@@ -20,7 +20,7 @@ namespace phi {
KernelSignature KthvalueGradOpArgumentMapping( KernelSignature KthvalueGradOpArgumentMapping(
const ArgumentMappingContext& ctx) { const ArgumentMappingContext& ctx) {
return KernelSignature("kthvalue_grad", return KernelSignature("kthvalue_grad",
{GradVarName("Out"), "X", "Indices"}, {"X", "Indices", GradVarName("Out")},
{"k", "axis", "keepdim"}, {"k", "axis", "keepdim"},
{GradVarName("X")}); {GradVarName("X")});
} }
......
...@@ -18,7 +18,7 @@ namespace phi { ...@@ -18,7 +18,7 @@ namespace phi {
KernelSignature LgammaGradOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature LgammaGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature( return KernelSignature(
"lgamma_grad", {GradVarName("Out"), "X"}, {}, {GradVarName("X")}); "lgamma_grad", {"X", GradVarName("Out")}, {}, {GradVarName("X")});
} }
} // namespace phi } // namespace phi
......
...@@ -16,13 +16,19 @@ ...@@ -16,13 +16,19 @@
namespace phi { namespace phi {
KernelSignature PReluOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature(
"prelu", {"X", "Alpha"}, {"data_format", "mode"}, {"Out"});
}
KernelSignature PReluGradOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature PReluGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature("prelu_grad", return KernelSignature("prelu_grad",
{"X", "Alpha", GradVarName("Out")}, {"X", "Alpha", GradVarName("Out")},
{"mode", "data_format"}, {"data_format", "mode"},
{GradVarName("X"), GradVarName("Alpha")}); {GradVarName("X"), GradVarName("Alpha")});
} }
} // namespace phi } // namespace phi
PD_REGISTER_ARG_MAPPING_FN(prelu, phi::PReluOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(prelu_grad, phi::PReluGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(prelu_grad, phi::PReluGradOpArgumentMapping);
...@@ -23,6 +23,7 @@ from ..proto import framework_pb2 ...@@ -23,6 +23,7 @@ from ..proto import framework_pb2
from ..framework import OpProtoHolder, Variable, core, convert_np_dtype_to_dtype_, _non_static_mode, in_dygraph_mode, _in_legacy_dygraph from ..framework import OpProtoHolder, Variable, core, convert_np_dtype_to_dtype_, _non_static_mode, in_dygraph_mode, _in_legacy_dygraph
from ..layer_helper import LayerHelper from ..layer_helper import LayerHelper
from ..data_feeder import check_variable_and_dtype from ..data_feeder import check_variable_and_dtype
from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph
from paddle import _C_ops from paddle import _C_ops
__all__ = [ __all__ = [
......
...@@ -25,6 +25,7 @@ import six ...@@ -25,6 +25,7 @@ import six
import paddle import paddle
from ..layer_helper import LayerHelper from ..layer_helper import LayerHelper
from paddle.fluid.framework import _in_legacy_dygraph
from ..initializer import Normal, Constant, NumpyArrayInitializer from ..initializer import Normal, Constant, NumpyArrayInitializer
from ..framework import Variable, OpProtoHolder, _non_static_mode, dygraph_only, _dygraph_tracer, default_main_program, _varbase_creator, static_only, _global_flags, _in_legacy_dygraph, in_dygraph_mode from ..framework import Variable, OpProtoHolder, _non_static_mode, dygraph_only, _dygraph_tracer, default_main_program, _varbase_creator, static_only, _global_flags, _in_legacy_dygraph, in_dygraph_mode
from .. import dygraph_utils from .. import dygraph_utils
...@@ -6427,7 +6428,9 @@ def squeeze(input, axes, name=None): ...@@ -6427,7 +6428,9 @@ def squeeze(input, axes, name=None):
y = layers.squeeze(input=x, axes=[2]) # y.shape=[None, 5, 10] y = layers.squeeze(input=x, axes=[2]) # y.shape=[None, 5, 10]
""" """
if _non_static_mode(): if in_dygraph_mode():
return _C_ops.final_state_squeeze(input, axes)[1]
if _in_legacy_dygraph():
out, _ = _C_ops.squeeze2(input, 'axes', axes) out, _ = _C_ops.squeeze2(input, 'axes', axes)
return out return out
...@@ -6488,8 +6491,10 @@ def unsqueeze(input, axes, name=None): ...@@ -6488,8 +6491,10 @@ def unsqueeze(input, axes, name=None):
item.numpy().item(0) if isinstance(item, Variable) else item item.numpy().item(0) if isinstance(item, Variable) else item
for item in axes for item in axes
] ]
out, _ = _C_ops.unsqueeze2(input, 'axes', axes) if _in_legacy_dygraph():
return out out, _ = _C_ops.unsqueeze2(input, 'axes', axes)
return out
return _C_ops.final_state_unsqueeze(input, axes)[1]
check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'unsqueeze') check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'unsqueeze')
check_variable_and_dtype(input, 'input', [ check_variable_and_dtype(input, 'input', [
...@@ -8910,7 +8915,9 @@ def log(x, name=None): ...@@ -8910,7 +8915,9 @@ def log(x, name=None):
res = paddle.log(x) res = paddle.log(x)
# [[0.693147, 1.09861, 1.38629], [1.94591, 2.07944, 2.19722]] # [[0.693147, 1.09861, 1.38629], [1.94591, 2.07944, 2.19722]]
""" """
if _non_static_mode(): if in_dygraph_mode():
return _C_ops.final_state_log(x)
if _in_legacy_dygraph():
return _C_ops.log(x) return _C_ops.log(x)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log") check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log")
......
...@@ -50,6 +50,7 @@ class TestActivation(OpTest): ...@@ -50,6 +50,7 @@ class TestActivation(OpTest):
self.op_type = "exp" self.op_type = "exp"
self.init_dtype() self.init_dtype()
self.init_kernel_type() self.init_kernel_type()
self.check_eager = False
np.random.seed(2049) np.random.seed(2049)
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
...@@ -59,12 +60,18 @@ class TestActivation(OpTest): ...@@ -59,12 +60,18 @@ class TestActivation(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output() check_eager = False
if hasattr(self, 'check_eager'):
check_eager = self.check_eager
self.check_output(check_eager=check_eager)
def test_check_grad(self): def test_check_grad(self):
if self.dtype == np.float16: if self.dtype == np.float16:
return return
self.check_grad(['X'], 'Out') check_eager = False
if hasattr(self, 'check_eager'):
check_eager = self.check_eager
self.check_grad(['X'], 'Out', check_eager=check_eager)
def init_dtype(self): def init_dtype(self):
self.dtype = np.float64 self.dtype = np.float64
...@@ -876,6 +883,8 @@ def ref_softshrink(x, threshold=0.5): ...@@ -876,6 +883,8 @@ def ref_softshrink(x, threshold=0.5):
class TestSoftshrink(TestActivation): class TestSoftshrink(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "softshrink" self.op_type = "softshrink"
self.check_eager = True
self.python_api = paddle.nn.functional.softshrink
self.init_dtype() self.init_dtype()
threshold = 0.8 threshold = 0.8
...@@ -890,7 +899,7 @@ class TestSoftshrink(TestActivation): ...@@ -890,7 +899,7 @@ class TestSoftshrink(TestActivation):
def test_check_grad(self): def test_check_grad(self):
if self.dtype == np.float16: if self.dtype == np.float16:
return return
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestSoftshrinkAPI(unittest.TestCase): class TestSoftshrinkAPI(unittest.TestCase):
...@@ -1050,6 +1059,8 @@ class TestAbs(TestActivation): ...@@ -1050,6 +1059,8 @@ class TestAbs(TestActivation):
class TestCeil(TestActivation): class TestCeil(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "ceil" self.op_type = "ceil"
self.check_eager = True
self.python_api = paddle.ceil
self.init_dtype() self.init_dtype()
np.random.seed(1024) np.random.seed(1024)
...@@ -1067,6 +1078,8 @@ class TestCeil(TestActivation): ...@@ -1067,6 +1078,8 @@ class TestCeil(TestActivation):
class TestFloor(TestActivation): class TestFloor(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "floor" self.op_type = "floor"
self.check_eager = True
self.python_api = paddle.floor
self.init_dtype() self.init_dtype()
np.random.seed(1024) np.random.seed(1024)
...@@ -1263,6 +1276,8 @@ class TestAtanh(TestActivation): ...@@ -1263,6 +1276,8 @@ class TestAtanh(TestActivation):
class TestRound(TestActivation): class TestRound(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "round" self.op_type = "round"
self.check_eager = True
self.python_api = paddle.round
self.init_dtype() self.init_dtype()
np.random.seed(1024) np.random.seed(1024)
...@@ -2075,6 +2090,8 @@ class TestReciprocal(TestActivation): ...@@ -2075,6 +2090,8 @@ class TestReciprocal(TestActivation):
class TestLog(TestActivation): class TestLog(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "log" self.op_type = "log"
self.check_eager = True
self.python_api = paddle.log
self.init_dtype() self.init_dtype()
np.random.seed(1024) np.random.seed(1024)
...@@ -2087,7 +2104,7 @@ class TestLog(TestActivation): ...@@ -2087,7 +2104,7 @@ class TestLog(TestActivation):
def test_check_grad(self): def test_check_grad(self):
if self.dtype == np.float16: if self.dtype == np.float16:
return return
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
def test_error(self): def test_error(self):
in1 = fluid.layers.data( in1 = fluid.layers.data(
...@@ -2102,6 +2119,8 @@ class TestLog(TestActivation): ...@@ -2102,6 +2119,8 @@ class TestLog(TestActivation):
class TestLog2(TestActivation): class TestLog2(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "log2" self.op_type = "log2"
self.check_eager = True
self.python_api = paddle.log2
self.init_dtype() self.init_dtype()
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
...@@ -2113,7 +2132,7 @@ class TestLog2(TestActivation): ...@@ -2113,7 +2132,7 @@ class TestLog2(TestActivation):
def test_check_grad(self): def test_check_grad(self):
if self.dtype == np.float16: if self.dtype == np.float16:
return return
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
def test_error(self): def test_error(self):
in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32") in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
...@@ -2151,6 +2170,8 @@ class TestLog2(TestActivation): ...@@ -2151,6 +2170,8 @@ class TestLog2(TestActivation):
class TestLog10(TestActivation): class TestLog10(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "log10" self.op_type = "log10"
self.check_eager = True
self.python_api = paddle.log10
self.init_dtype() self.init_dtype()
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
...@@ -2162,7 +2183,7 @@ class TestLog10(TestActivation): ...@@ -2162,7 +2183,7 @@ class TestLog10(TestActivation):
def test_check_grad(self): def test_check_grad(self):
if self.dtype == np.float16: if self.dtype == np.float16:
return return
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
def test_error(self): def test_error(self):
in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32") in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
...@@ -2200,6 +2221,8 @@ class TestLog10(TestActivation): ...@@ -2200,6 +2221,8 @@ class TestLog10(TestActivation):
class TestLog1p(TestActivation): class TestLog1p(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "log1p" self.op_type = "log1p"
self.check_eager = True
self.python_api = paddle.log1p
self.init_dtype() self.init_dtype()
np.random.seed(1024) np.random.seed(1024)
...@@ -2212,7 +2235,7 @@ class TestLog1p(TestActivation): ...@@ -2212,7 +2235,7 @@ class TestLog1p(TestActivation):
def test_check_grad(self): def test_check_grad(self):
if self.dtype == np.float16: if self.dtype == np.float16:
return return
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
def test_api(self): def test_api(self):
with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.program_guard(fluid.Program(), fluid.Program()):
...@@ -2298,6 +2321,8 @@ class TestSquareBF16(OpTest): ...@@ -2298,6 +2321,8 @@ class TestSquareBF16(OpTest):
class TestPow(TestActivation): class TestPow(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "pow" self.op_type = "pow"
self.python_api = paddle.pow
self.check_eager = False
self.init_dtype() self.init_dtype()
np.random.seed(1024) np.random.seed(1024)
...@@ -2311,12 +2336,14 @@ class TestPow(TestActivation): ...@@ -2311,12 +2336,14 @@ class TestPow(TestActivation):
def test_check_grad(self): def test_check_grad(self):
if self.dtype == np.float16: if self.dtype == np.float16:
return return
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=self.check_eager)
class TestPow_factor_tensor(TestActivation): class TestPow_factor_tensor(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "pow" self.op_type = "pow"
self.check_eager = False
self.python_api = paddle.pow
self.init_dtype() self.init_dtype()
np.random.seed(1024) np.random.seed(1024)
...@@ -2332,12 +2359,12 @@ class TestPow_factor_tensor(TestActivation): ...@@ -2332,12 +2359,12 @@ class TestPow_factor_tensor(TestActivation):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=self.check_eager)
def test_check_grad(self): def test_check_grad(self):
if self.dtype == np.float16: if self.dtype == np.float16:
return return
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=self.check_eager)
def test_api(self): def test_api(self):
input = np.random.uniform(1, 2, [11, 17]).astype("float32") input = np.random.uniform(1, 2, [11, 17]).astype("float32")
......
...@@ -29,6 +29,7 @@ class TestAllcloseOp(OpTest): ...@@ -29,6 +29,7 @@ class TestAllcloseOp(OpTest):
def setUp(self): def setUp(self):
self.set_args() self.set_args()
self.op_type = "allclose" self.op_type = "allclose"
self.python_api = paddle.allclose
self.inputs = { self.inputs = {
'Input': self.input, 'Input': self.input,
'Other': self.other, 'Other': self.other,
...@@ -48,7 +49,7 @@ class TestAllcloseOp(OpTest): ...@@ -48,7 +49,7 @@ class TestAllcloseOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
class TestAllcloseOpException(TestAllcloseOp): class TestAllcloseOpException(TestAllcloseOp):
...@@ -56,28 +57,28 @@ class TestAllcloseOpException(TestAllcloseOp): ...@@ -56,28 +57,28 @@ class TestAllcloseOpException(TestAllcloseOp):
def test_rtol_num(): def test_rtol_num():
self.inputs['Rtol'] = np.array([1e-05, 1e-05]).astype("float64") self.inputs['Rtol'] = np.array([1e-05, 1e-05]).astype("float64")
self.inputs['Atol'] = np.array([1e-08]).astype("float64") self.inputs['Atol'] = np.array([1e-08]).astype("float64")
self.check_output() self.check_output(check_eager=True)
self.assertRaises(ValueError, test_rtol_num) self.assertRaises(ValueError, test_rtol_num)
def test_rtol_type(): def test_rtol_type():
self.inputs['Rtol'] = np.array([5]).astype("int32") self.inputs['Rtol'] = np.array([5]).astype("int32")
self.inputs['Atol'] = np.array([1e-08]).astype("float64") self.inputs['Atol'] = np.array([1e-08]).astype("float64")
self.check_output() self.check_output(check_eager=True)
self.assertRaises(ValueError, test_rtol_type) self.assertRaises(ValueError, test_rtol_type)
def test_atol_num(): def test_atol_num():
self.inputs['Rtol'] = np.array([1e-05]).astype("float64") self.inputs['Rtol'] = np.array([1e-05]).astype("float64")
self.inputs['Atol'] = np.array([1e-08, 1e-08]).astype("float64") self.inputs['Atol'] = np.array([1e-08, 1e-08]).astype("float64")
self.check_output() self.check_output(check_eager=True)
self.assertRaises(ValueError, test_atol_num) self.assertRaises(ValueError, test_atol_num)
def test_atol_type(): def test_atol_type():
self.inputs['Rtol'] = np.array([1e-05]).astype("float64") self.inputs['Rtol'] = np.array([1e-05]).astype("float64")
self.inputs['Atol'] = np.array([8]).astype("int32") self.inputs['Atol'] = np.array([8]).astype("int32")
self.check_output() self.check_output(check_eager=True)
self.assertRaises(ValueError, test_atol_type) self.assertRaises(ValueError, test_atol_type)
......
...@@ -46,7 +46,7 @@ class TestComplexAbsOp(OpTest): ...@@ -46,7 +46,7 @@ class TestComplexAbsOp(OpTest):
self.grad_x = self.grad_out * (self.x / np.abs(self.x)) self.grad_x = self.grad_out * (self.x / np.abs(self.x))
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output(check_eager=False)
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
...@@ -54,7 +54,7 @@ class TestComplexAbsOp(OpTest): ...@@ -54,7 +54,7 @@ class TestComplexAbsOp(OpTest):
'Out', 'Out',
user_defined_grads=[self.grad_x], user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out], user_defined_grad_outputs=[self.grad_out],
check_eager=True) check_eager=False)
class TestComplexAbsOpZeroValues(OpTest): class TestComplexAbsOpZeroValues(OpTest):
...@@ -80,7 +80,7 @@ class TestComplexAbsOpZeroValues(OpTest): ...@@ -80,7 +80,7 @@ class TestComplexAbsOpZeroValues(OpTest):
self.grad_x = np.zeros(self.shape, self.dtype) self.grad_x = np.zeros(self.shape, self.dtype)
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output(check_eager=False)
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
...@@ -88,7 +88,7 @@ class TestComplexAbsOpZeroValues(OpTest): ...@@ -88,7 +88,7 @@ class TestComplexAbsOpZeroValues(OpTest):
'Out', 'Out',
user_defined_grads=[self.grad_x], user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out], user_defined_grad_outputs=[self.grad_out],
check_eager=True) check_eager=False)
class TestAbs(unittest.TestCase): class TestAbs(unittest.TestCase):
...@@ -133,7 +133,7 @@ class TestRealAbsOp(OpTest): ...@@ -133,7 +133,7 @@ class TestRealAbsOp(OpTest):
self.grad_x = self.grad_out * (self.x / np.abs(self.x)) self.grad_x = self.grad_out * (self.x / np.abs(self.x))
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output(check_eager=False)
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
...@@ -141,7 +141,7 @@ class TestRealAbsOp(OpTest): ...@@ -141,7 +141,7 @@ class TestRealAbsOp(OpTest):
'Out', 'Out',
user_defined_grads=[self.grad_x], user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out], user_defined_grad_outputs=[self.grad_out],
check_eager=True) check_eager=False)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -73,6 +73,7 @@ class TestCumprod(OpTest): ...@@ -73,6 +73,7 @@ class TestCumprod(OpTest):
self.init_params() self.init_params()
self.init_dtype() self.init_dtype()
self.op_type = "cumprod" self.op_type = "cumprod"
self.python_api = paddle.cumprod
self.inputs = {'X': None} self.inputs = {'X': None}
self.outputs = {'Out': None} self.outputs = {'Out': None}
self.attrs = {'dim': None} self.attrs = {'dim': None}
...@@ -110,7 +111,7 @@ class TestCumprod(OpTest): ...@@ -110,7 +111,7 @@ class TestCumprod(OpTest):
for dim in range(-len(self.shape), len(self.shape)): for dim in range(-len(self.shape), len(self.shape)):
for zero_num in self.zero_nums: for zero_num in self.zero_nums:
self.prepare_inputs_outputs_attrs(dim, zero_num) self.prepare_inputs_outputs_attrs(dim, zero_num)
self.check_output() self.check_output(check_eager=True)
# test backward. # test backward.
def test_check_grad(self): def test_check_grad(self):
...@@ -119,13 +120,14 @@ class TestCumprod(OpTest): ...@@ -119,13 +120,14 @@ class TestCumprod(OpTest):
self.prepare_inputs_outputs_attrs(dim, zero_num) self.prepare_inputs_outputs_attrs(dim, zero_num)
self.init_grad_input_output(dim) self.init_grad_input_output(dim)
if self.dtype == np.float64: if self.dtype == np.float64:
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
else: else:
self.check_grad( self.check_grad(
['X'], ['X'],
'Out', 'Out',
user_defined_grads=[self.grad_x], user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out]) user_defined_grad_outputs=[self.grad_out],
check_eager=True)
# test float32 case. # test float32 case.
......
...@@ -125,6 +125,7 @@ class TestElementwiseFmaxOp(OpTest): ...@@ -125,6 +125,7 @@ class TestElementwiseFmaxOp(OpTest):
def setUp(self): def setUp(self):
"""setUp""" """setUp"""
self.op_type = "elementwise_fmax" self.op_type = "elementwise_fmax"
self.python_api = paddle.fmax
# If x and y have the same value, the max() is not differentiable. # If x and y have the same value, the max() is not differentiable.
# So we generate test data by the following method # So we generate test data by the following method
# to avoid them being too close to each other. # to avoid them being too close to each other.
...@@ -136,21 +137,29 @@ class TestElementwiseFmaxOp(OpTest): ...@@ -136,21 +137,29 @@ class TestElementwiseFmaxOp(OpTest):
def test_check_output(self): def test_check_output(self):
"""test_check_output""" """test_check_output"""
self.check_output() self.check_output(check_eager=True)
def test_check_grad_normal(self): def test_check_grad_normal(self):
"""test_check_grad_normal""" """test_check_grad_normal"""
self.check_grad(['X', 'Y'], 'Out') self.check_grad(['X', 'Y'], 'Out', check_eager=True)
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
"""test_check_grad_ingore_x""" """test_check_grad_ingore_x"""
self.check_grad( self.check_grad(
['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X")) ['Y'],
'Out',
max_relative_error=0.005,
no_grad_set=set("X"),
check_eager=True)
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
"""test_check_grad_ingore_y""" """test_check_grad_ingore_y"""
self.check_grad( self.check_grad(
['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y')) ['X'],
'Out',
max_relative_error=0.005,
no_grad_set=set('Y'),
check_eager=True)
class TestElementwiseFmax2Op(OpTest): class TestElementwiseFmax2Op(OpTest):
...@@ -159,6 +168,7 @@ class TestElementwiseFmax2Op(OpTest): ...@@ -159,6 +168,7 @@ class TestElementwiseFmax2Op(OpTest):
def setUp(self): def setUp(self):
"""setUp""" """setUp"""
self.op_type = "elementwise_fmax" self.op_type = "elementwise_fmax"
self.python_api = paddle.fmax
# If x and y have the same value, the max() is not differentiable. # If x and y have the same value, the max() is not differentiable.
# So we generate test data by the following method # So we generate test data by the following method
# to avoid them being too close to each other. # to avoid them being too close to each other.
...@@ -172,18 +182,26 @@ class TestElementwiseFmax2Op(OpTest): ...@@ -172,18 +182,26 @@ class TestElementwiseFmax2Op(OpTest):
def test_check_output(self): def test_check_output(self):
"""test_check_output""" """test_check_output"""
self.check_output() self.check_output(check_eager=True)
def test_check_grad_normal(self): def test_check_grad_normal(self):
"""test_check_grad_normal""" """test_check_grad_normal"""
self.check_grad(['X', 'Y'], 'Out') self.check_grad(['X', 'Y'], 'Out', check_eager=True)
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
"""test_check_grad_ingore_x""" """test_check_grad_ingore_x"""
self.check_grad( self.check_grad(
['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X")) ['Y'],
'Out',
max_relative_error=0.005,
no_grad_set=set("X"),
check_eager=True)
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
"""test_check_grad_ingore_y""" """test_check_grad_ingore_y"""
self.check_grad( self.check_grad(
['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y')) ['X'],
'Out',
max_relative_error=0.005,
no_grad_set=set('Y'),
check_eager=True)
...@@ -127,6 +127,7 @@ class TestElementwiseFminOp(OpTest): ...@@ -127,6 +127,7 @@ class TestElementwiseFminOp(OpTest):
def setUp(self): def setUp(self):
"""setUp""" """setUp"""
self.op_type = "elementwise_fmin" self.op_type = "elementwise_fmin"
self.python_api = paddle.fmin
# If x and y have the same value, the min() is not differentiable. # If x and y have the same value, the min() is not differentiable.
# So we generate test data by the following method # So we generate test data by the following method
# to avoid them being too close to each other. # to avoid them being too close to each other.
...@@ -138,21 +139,29 @@ class TestElementwiseFminOp(OpTest): ...@@ -138,21 +139,29 @@ class TestElementwiseFminOp(OpTest):
def test_check_output(self): def test_check_output(self):
"""test_check_output""" """test_check_output"""
self.check_output() self.check_output(check_eager=True)
def test_check_grad_normal(self): def test_check_grad_normal(self):
"""test_check_grad_normal""" """test_check_grad_normal"""
self.check_grad(['X', 'Y'], 'Out') self.check_grad(['X', 'Y'], 'Out', check_eager=True)
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
"""test_check_grad_ingore_x""" """test_check_grad_ingore_x"""
self.check_grad( self.check_grad(
['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X")) ['Y'],
'Out',
max_relative_error=0.005,
no_grad_set=set("X"),
check_eager=True)
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
"""test_check_grad_ingore_y""" """test_check_grad_ingore_y"""
self.check_grad( self.check_grad(
['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y')) ['X'],
'Out',
max_relative_error=0.005,
no_grad_set=set('Y'),
check_eager=True)
class TestElementwiseFmin2Op(OpTest): class TestElementwiseFmin2Op(OpTest):
...@@ -161,6 +170,7 @@ class TestElementwiseFmin2Op(OpTest): ...@@ -161,6 +170,7 @@ class TestElementwiseFmin2Op(OpTest):
def setUp(self): def setUp(self):
"""setUp""" """setUp"""
self.op_type = "elementwise_fmin" self.op_type = "elementwise_fmin"
self.python_api = paddle.fmin
# If x and y have the same value, the min() is not differentiable. # If x and y have the same value, the min() is not differentiable.
# So we generate test data by the following method # So we generate test data by the following method
# to avoid them being too close to each other. # to avoid them being too close to each other.
...@@ -174,21 +184,29 @@ class TestElementwiseFmin2Op(OpTest): ...@@ -174,21 +184,29 @@ class TestElementwiseFmin2Op(OpTest):
def test_check_output(self): def test_check_output(self):
"""test_check_output""" """test_check_output"""
self.check_output() self.check_output(check_eager=True)
def test_check_grad_normal(self): def test_check_grad_normal(self):
"""test_check_grad_normal""" """test_check_grad_normal"""
self.check_grad(['X', 'Y'], 'Out') self.check_grad(['X', 'Y'], 'Out', check_eager=True)
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
"""test_check_grad_ingore_x""" """test_check_grad_ingore_x"""
self.check_grad( self.check_grad(
['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X")) ['Y'],
'Out',
max_relative_error=0.005,
no_grad_set=set("X"),
check_eager=True)
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
"""test_check_grad_ingore_y""" """test_check_grad_ingore_y"""
self.check_grad( self.check_grad(
['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y')) ['X'],
'Out',
max_relative_error=0.005,
no_grad_set=set('Y'),
check_eager=True)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -33,6 +33,7 @@ def gather_numpy(x, index, axis): ...@@ -33,6 +33,7 @@ def gather_numpy(x, index, axis):
class TestGatherOp(OpTest): class TestGatherOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "gather" self.op_type = "gather"
self.python_api = paddle.gather
self.config() self.config()
xnp = np.random.random(self.x_shape).astype(self.x_type) xnp = np.random.random(self.x_shape).astype(self.x_type)
self.inputs = { self.inputs = {
...@@ -42,10 +43,10 @@ class TestGatherOp(OpTest): ...@@ -42,10 +43,10 @@ class TestGatherOp(OpTest):
self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]} self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=False)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=False)
def config(self): def config(self):
""" """
...@@ -120,6 +121,7 @@ class TestCase6(TestGatherOp): ...@@ -120,6 +121,7 @@ class TestCase6(TestGatherOp):
class TestGatherBF16Op(OpTest): class TestGatherBF16Op(OpTest):
def setUp(self): def setUp(self):
self.op_type = "gather" self.op_type = "gather"
self.python_api = paddle.gather
self.dtype = np.uint16 self.dtype = np.uint16
self.config() self.config()
xnp = np.random.random(self.x_shape).astype(np.float32) xnp = np.random.random(self.x_shape).astype(np.float32)
...@@ -134,10 +136,10 @@ class TestGatherBF16Op(OpTest): ...@@ -134,10 +136,10 @@ class TestGatherBF16Op(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=False)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', numeric_grad_delta=0.5) self.check_grad(['X'], 'Out', numeric_grad_delta=0.5, check_eager=False)
def config(self): def config(self):
""" """
...@@ -153,6 +155,7 @@ class TestGatherBF16Op(OpTest): ...@@ -153,6 +155,7 @@ class TestGatherBF16Op(OpTest):
class TestGatherOp1(OpTest): class TestGatherOp1(OpTest):
def setUp(self): def setUp(self):
self.op_type = "gather" self.op_type = "gather"
self.python_api = paddle.gather
self.config() self.config()
xnp = np.random.random(self.x_shape).astype(self.x_type) xnp = np.random.random(self.x_shape).astype(self.x_type)
axis_np = np.array(self.axis).astype(self.index_type) axis_np = np.array(self.axis).astype(self.index_type)
...@@ -162,10 +165,10 @@ class TestGatherOp1(OpTest): ...@@ -162,10 +165,10 @@ class TestGatherOp1(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=False)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=False)
def config(self): def config(self):
""" """
......
...@@ -30,6 +30,7 @@ class TestIscloseOp(OpTest): ...@@ -30,6 +30,7 @@ class TestIscloseOp(OpTest):
paddle.enable_static() paddle.enable_static()
self.set_args() self.set_args()
self.op_type = "isclose" self.op_type = "isclose"
self.python_api = paddle.isclose
self.inputs = { self.inputs = {
'Input': self.input, 'Input': self.input,
'Other': self.other, 'Other': self.other,
...@@ -49,7 +50,7 @@ class TestIscloseOp(OpTest): ...@@ -49,7 +50,7 @@ class TestIscloseOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
class TestIscloseOpException(TestIscloseOp): class TestIscloseOpException(TestIscloseOp):
...@@ -57,28 +58,28 @@ class TestIscloseOpException(TestIscloseOp): ...@@ -57,28 +58,28 @@ class TestIscloseOpException(TestIscloseOp):
def test_rtol_num(): def test_rtol_num():
self.inputs['Rtol'] = np.array([1e-05, 1e-05]).astype("float64") self.inputs['Rtol'] = np.array([1e-05, 1e-05]).astype("float64")
self.inputs['Atol'] = np.array([1e-08]).astype("float64") self.inputs['Atol'] = np.array([1e-08]).astype("float64")
self.check_output() self.check_output(check_eager=True)
self.assertRaises(ValueError, test_rtol_num) self.assertRaises(ValueError, test_rtol_num)
def test_rtol_type(): def test_rtol_type():
self.inputs['Rtol'] = np.array([5]).astype("int32") self.inputs['Rtol'] = np.array([5]).astype("int32")
self.inputs['Atol'] = np.array([1e-08]).astype("float64") self.inputs['Atol'] = np.array([1e-08]).astype("float64")
self.check_output() self.check_output(check_eager=True)
self.assertRaises(ValueError, test_rtol_type) self.assertRaises(ValueError, test_rtol_type)
def test_atol_num(): def test_atol_num():
self.inputs['Rtol'] = np.array([1e-05]).astype("float64") self.inputs['Rtol'] = np.array([1e-05]).astype("float64")
self.inputs['Atol'] = np.array([1e-08, 1e-08]).astype("float64") self.inputs['Atol'] = np.array([1e-08, 1e-08]).astype("float64")
self.check_output() self.check_output(check_eager=True)
self.assertRaises(ValueError, test_atol_num) self.assertRaises(ValueError, test_atol_num)
def test_atol_type(): def test_atol_type():
self.inputs['Rtol'] = np.array([1e-05]).astype("float64") self.inputs['Rtol'] = np.array([1e-05]).astype("float64")
self.inputs['Atol'] = np.array([8]).astype("int32") self.inputs['Atol'] = np.array([8]).astype("int32")
self.check_output() self.check_output(check_eager=True)
self.assertRaises(ValueError, test_atol_type) self.assertRaises(ValueError, test_atol_type)
...@@ -211,7 +212,7 @@ class TestIscloseOpFloat64(TestIscloseOp): ...@@ -211,7 +212,7 @@ class TestIscloseOpFloat64(TestIscloseOp):
self.equal_nan = False self.equal_nan = False
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
class TestIscloseOpLargeDimInput(TestIscloseOp): class TestIscloseOpLargeDimInput(TestIscloseOp):
......
...@@ -17,6 +17,7 @@ import paddle ...@@ -17,6 +17,7 @@ import paddle
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
from paddle.nn.functional import kl_div
def kldiv_loss(x, target, reduction): def kldiv_loss(x, target, reduction):
...@@ -40,6 +41,7 @@ class TestKLDivLossOp(OpTest): ...@@ -40,6 +41,7 @@ class TestKLDivLossOp(OpTest):
def setUp(self): def setUp(self):
self.initTestCase() self.initTestCase()
self.op_type = 'kldiv_loss' self.op_type = 'kldiv_loss'
self.python_api = kl_div
x = np.random.uniform(-10, 10, self.x_shape).astype('float64') x = np.random.uniform(-10, 10, self.x_shape).astype('float64')
target = np.random.uniform(-10, 10, self.x_shape).astype('float64') target = np.random.uniform(-10, 10, self.x_shape).astype('float64')
...@@ -53,10 +55,11 @@ class TestKLDivLossOp(OpTest): ...@@ -53,10 +55,11 @@ class TestKLDivLossOp(OpTest):
self.outputs = {'Loss': loss.astype('float64')} self.outputs = {'Loss': loss.astype('float64')}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Loss', no_grad_set=set(["Target"])) self.check_grad(
['X'], 'Loss', no_grad_set=set(["Target"]), check_eager=True)
def initTestCase(self): def initTestCase(self):
self.x_shape = (4, 5, 5) self.x_shape = (4, 5, 5)
......
...@@ -41,6 +41,7 @@ class TestKthvalueOp(OpTest): ...@@ -41,6 +41,7 @@ class TestKthvalueOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "kthvalue" self.op_type = "kthvalue"
self.python_api = paddle.kthvalue
self.dtype = np.float64 self.dtype = np.float64
self.input_data = np.random.random((2, 1, 2, 4, 10)) self.input_data = np.random.random((2, 1, 2, 4, 10))
self.init_args() self.init_args()
...@@ -52,11 +53,11 @@ class TestKthvalueOp(OpTest): ...@@ -52,11 +53,11 @@ class TestKthvalueOp(OpTest):
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(set(['X']), 'Out') self.check_grad(set(['X']), 'Out', check_eager=True)
class TestKthvalueOpWithKeepdim(OpTest): class TestKthvalueOpWithKeepdim(OpTest):
...@@ -67,6 +68,7 @@ class TestKthvalueOpWithKeepdim(OpTest): ...@@ -67,6 +68,7 @@ class TestKthvalueOpWithKeepdim(OpTest):
def setUp(self): def setUp(self):
self.init_args() self.init_args()
self.op_type = "kthvalue" self.op_type = "kthvalue"
self.python_api = paddle.kthvalue
self.dtype = np.float64 self.dtype = np.float64
self.input_data = np.random.random((1, 3, 2, 4, 10)) self.input_data = np.random.random((1, 3, 2, 4, 10))
self.inputs = {'X': self.input_data} self.inputs = {'X': self.input_data}
...@@ -77,11 +79,11 @@ class TestKthvalueOpWithKeepdim(OpTest): ...@@ -77,11 +79,11 @@ class TestKthvalueOpWithKeepdim(OpTest):
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(set(['X']), 'Out') self.check_grad(set(['X']), 'Out', check_eager=True)
class TestKthvalueOpKernels(unittest.TestCase): class TestKthvalueOpKernels(unittest.TestCase):
......
...@@ -24,6 +24,7 @@ paddle.enable_static() ...@@ -24,6 +24,7 @@ paddle.enable_static()
class TestLgammaOp(OpTest): class TestLgammaOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = 'lgamma' self.op_type = 'lgamma'
self.python_api = paddle.lgamma
self.init_dtype_type() self.init_dtype_type()
shape = (5, 20) shape = (5, 20)
data = np.random.random(shape).astype(self.dtype) + 1 data = np.random.random(shape).astype(self.dtype) + 1
...@@ -38,10 +39,10 @@ class TestLgammaOp(OpTest): ...@@ -38,10 +39,10 @@ class TestLgammaOp(OpTest):
self.dtype = np.float64 self.dtype = np.float64
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', numeric_grad_delta=1e-7) self.check_grad(['X'], 'Out', numeric_grad_delta=1e-7, check_eager=True)
class TestLgammaOpFp32(TestLgammaOp): class TestLgammaOpFp32(TestLgammaOp):
...@@ -49,7 +50,8 @@ class TestLgammaOpFp32(TestLgammaOp): ...@@ -49,7 +50,8 @@ class TestLgammaOpFp32(TestLgammaOp):
self.dtype = np.float32 self.dtype = np.float32
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', numeric_grad_delta=0.005) self.check_grad(
['X'], 'Out', numeric_grad_delta=0.005, check_eager=True)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -42,6 +42,7 @@ def ref_log_softmax_grad(x, axis): ...@@ -42,6 +42,7 @@ def ref_log_softmax_grad(x, axis):
class TestLogSoftmaxOp(OpTest): class TestLogSoftmaxOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = 'log_softmax' self.op_type = 'log_softmax'
self.python_api = F.log_softmax
self.dtype = 'float64' self.dtype = 'float64'
self.shape = [2, 3, 4, 5] self.shape = [2, 3, 4, 5]
self.axis = -1 self.axis = -1
...@@ -59,10 +60,11 @@ class TestLogSoftmaxOp(OpTest): ...@@ -59,10 +60,11 @@ class TestLogSoftmaxOp(OpTest):
pass pass
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], ['Out'], user_defined_grads=[self.x_grad]) self.check_grad(
['X'], ['Out'], user_defined_grads=[self.x_grad], check_eager=True)
class TestLogSoftmaxShape(TestLogSoftmaxOp): class TestLogSoftmaxShape(TestLogSoftmaxOp):
...@@ -80,6 +82,7 @@ class TestLogSoftmaxAxis(TestLogSoftmaxOp): ...@@ -80,6 +82,7 @@ class TestLogSoftmaxAxis(TestLogSoftmaxOp):
class TestLogSoftmaxBF16Op(OpTest): class TestLogSoftmaxBF16Op(OpTest):
def setUp(self): def setUp(self):
self.op_type = 'log_softmax' self.op_type = 'log_softmax'
self.python_api = F.log_softmax
self.dtype = np.uint16 self.dtype = np.uint16
self.shape = [2, 3, 4, 5] self.shape = [2, 3, 4, 5]
self.axis = -1 self.axis = -1
...@@ -94,12 +97,14 @@ class TestLogSoftmaxBF16Op(OpTest): ...@@ -94,12 +97,14 @@ class TestLogSoftmaxBF16Op(OpTest):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place) self.check_output_with_place(place, check_eager=True)
def test_check_grad(self): def test_check_grad(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(
place, ['X'], ['Out'], user_defined_grads=[self.x_grad]) place, ['X'], ['Out'],
user_defined_grads=[self.x_grad],
check_eager=True)
class TestNNLogSoftmaxAPI(unittest.TestCase): class TestNNLogSoftmaxAPI(unittest.TestCase):
......
...@@ -18,6 +18,7 @@ import unittest ...@@ -18,6 +18,7 @@ import unittest
import numpy as np import numpy as np
from op_test import OpTest, skip_check_grad_ci, check_out_dtype from op_test import OpTest, skip_check_grad_ci, check_out_dtype
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -86,6 +87,10 @@ class ApiMaxTest(unittest.TestCase): ...@@ -86,6 +87,10 @@ class ApiMaxTest(unittest.TestCase):
z_expected = np.array(np.max(np_x, axis=0)) z_expected = np.array(np.max(np_x, axis=0))
self.assertEqual((np_z == z_expected).all(), True) self.assertEqual((np_z == z_expected).all(), True)
def test_eager_api(self):
with _test_eager_guard():
self.test_imperative_api()
def test_big_dimension(self): def test_big_dimension(self):
paddle.disable_static() paddle.disable_static()
x = paddle.rand(shape=[2, 2, 2, 2, 2, 2, 2]) x = paddle.rand(shape=[2, 2, 2, 2, 2, 2, 2])
......
...@@ -25,9 +25,22 @@ from paddle.fluid import Program, program_guard ...@@ -25,9 +25,22 @@ from paddle.fluid import Program, program_guard
np.random.seed(10) np.random.seed(10)
def mean_wrapper(x, axis=None, keepdim=False, reduce_all=False):
if reduce_all == True:
return paddle.mean(x, range(len(x.shape)), keepdim)
return paddle.mean(x, axis, keepdim)
def reduce_mean_wrapper(x, axis=0, keepdim=False, reduce_all=False):
if reduce_all == True:
return paddle.mean(x, range(len(x.shape)), keepdim)
return paddle.mean(x, axis, keepdim)
class TestMeanOp(OpTest): class TestMeanOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "mean" self.op_type = "mean"
self.python_api = mean_wrapper
self.dtype = np.float64 self.dtype = np.float64
self.init_dtype_type() self.init_dtype_type()
self.inputs = {'X': np.random.random((10, 10)).astype(self.dtype)} self.inputs = {'X': np.random.random((10, 10)).astype(self.dtype)}
...@@ -37,10 +50,10 @@ class TestMeanOp(OpTest): ...@@ -37,10 +50,10 @@ class TestMeanOp(OpTest):
pass pass
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_checkout_grad(self): def test_checkout_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestMeanOpError(unittest.TestCase): class TestMeanOpError(unittest.TestCase):
...@@ -117,6 +130,7 @@ def ref_reduce_mean_grad(x, axis, dtype): ...@@ -117,6 +130,7 @@ def ref_reduce_mean_grad(x, axis, dtype):
class TestReduceMeanOp(OpTest): class TestReduceMeanOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = 'reduce_mean' self.op_type = 'reduce_mean'
self.python_api = reduce_mean_wrapper
self.dtype = 'float64' self.dtype = 'float64'
self.shape = [2, 3, 4, 5] self.shape = [2, 3, 4, 5]
self.axis = [0] self.axis = [0]
...@@ -145,7 +159,7 @@ class TestReduceMeanOp(OpTest): ...@@ -145,7 +159,7 @@ class TestReduceMeanOp(OpTest):
def test_check_output(self): def test_check_output(self):
if self.dtype != 'float16': if self.dtype != 'float16':
self.check_output() self.check_output(check_eager=True)
else: else:
if not core.is_compiled_with_cuda(): if not core.is_compiled_with_cuda():
return return
...@@ -154,7 +168,7 @@ class TestReduceMeanOp(OpTest): ...@@ -154,7 +168,7 @@ class TestReduceMeanOp(OpTest):
def test_check_grad(self): def test_check_grad(self):
if self.dtype != 'float16': if self.dtype != 'float16':
self.check_grad(['X'], ['Out']) self.check_grad(['X'], ['Out'], check_eager=True)
else: else:
return return
if not core.is_compiled_with_cuda(): if not core.is_compiled_with_cuda():
...@@ -175,6 +189,7 @@ class TestReduceMeanOp(OpTest): ...@@ -175,6 +189,7 @@ class TestReduceMeanOp(OpTest):
class TestReduceMeanOpDefaultAttrs(TestReduceMeanOp): class TestReduceMeanOpDefaultAttrs(TestReduceMeanOp):
def setUp(self): def setUp(self):
self.op_type = 'reduce_mean' self.op_type = 'reduce_mean'
self.python_api = reduce_mean_wrapper
self.dtype = 'float64' self.dtype = 'float64'
self.shape = [2, 3, 4, 5] self.shape = [2, 3, 4, 5]
......
...@@ -19,6 +19,7 @@ import numpy as np ...@@ -19,6 +19,7 @@ import numpy as np
from op_test import OpTest, skip_check_grad_ci, check_out_dtype from op_test import OpTest, skip_check_grad_ci, check_out_dtype
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard
class ApiMinTest(unittest.TestCase): class ApiMinTest(unittest.TestCase):
...@@ -86,6 +87,10 @@ class ApiMinTest(unittest.TestCase): ...@@ -86,6 +87,10 @@ class ApiMinTest(unittest.TestCase):
z_expected = np.array(np.min(np_x, axis=0)) z_expected = np.array(np.min(np_x, axis=0))
self.assertEqual((np_z == z_expected).all(), True) self.assertEqual((np_z == z_expected).all(), True)
def test_eager_api(self):
with _test_eager_guard():
self.test_imperative_api()
class TestOutDtype(unittest.TestCase): class TestOutDtype(unittest.TestCase):
def test_min(self): def test_min(self):
......
...@@ -62,6 +62,7 @@ class TestModeOp(OpTest): ...@@ -62,6 +62,7 @@ class TestModeOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "mode" self.op_type = "mode"
self.python_api = paddle.mode
self.dtype = np.float64 self.dtype = np.float64
np.random.seed(666) np.random.seed(666)
self.input_data = np.random.rand(2, 64, 1) self.input_data = np.random.rand(2, 64, 1)
...@@ -73,11 +74,11 @@ class TestModeOp(OpTest): ...@@ -73,11 +74,11 @@ class TestModeOp(OpTest):
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(set(['X']), 'Out') self.check_grad(set(['X']), 'Out', check_eager=True)
class TestModeOpLastdim(OpTest): class TestModeOpLastdim(OpTest):
...@@ -86,6 +87,7 @@ class TestModeOpLastdim(OpTest): ...@@ -86,6 +87,7 @@ class TestModeOpLastdim(OpTest):
def setUp(self): def setUp(self):
self.op_type = "mode" self.op_type = "mode"
self.python_api = paddle.mode
self.dtype = np.float64 self.dtype = np.float64
np.random.seed(666) np.random.seed(666)
self.input_data = np.random.rand(2, 1, 1, 2, 30) self.input_data = np.random.rand(2, 1, 1, 2, 30)
...@@ -97,11 +99,11 @@ class TestModeOpLastdim(OpTest): ...@@ -97,11 +99,11 @@ class TestModeOpLastdim(OpTest):
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(set(['X']), 'Out') self.check_grad(set(['X']), 'Out', check_eager=True)
class TestModeOpKernels(unittest.TestCase): class TestModeOpKernels(unittest.TestCase):
......
...@@ -20,6 +20,24 @@ from op_test import OpTest, convert_float_to_uint16 ...@@ -20,6 +20,24 @@ from op_test import OpTest, convert_float_to_uint16
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle import _C_ops
from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph
# hack method for test p_norm final state
def p_norm_python_api(x,
p=2.0,
axis=-1,
epsilon=1e-12,
keepdim=False,
as_vector=False):
if in_dygraph_mode():
return _C_ops.final_state_p_norm(x, p, axis, epsilon, keepdim,
as_vector)
if _in_legacy_dygraph():
return _C_ops.p_norm(x, 'axis', axis, 'porder',
float(p), 'keepdim', keepdim, 'epsilon', epsilon,
'as_vector', as_vector)
def p_norm(x, axis, porder, keepdims=False, reduce_all=False): def p_norm(x, axis, porder, keepdims=False, reduce_all=False):
...@@ -110,6 +128,7 @@ class TestFrobeniusNormOp2(TestFrobeniusNormOp): ...@@ -110,6 +128,7 @@ class TestFrobeniusNormOp2(TestFrobeniusNormOp):
class TestPnormOp(OpTest): class TestPnormOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "p_norm" self.op_type = "p_norm"
self.python_api = p_norm_python_api
self.init_test_case() self.init_test_case()
x = (np.random.random(self.shape) + 0.5).astype(self.dtype) x = (np.random.random(self.shape) + 0.5).astype(self.dtype)
norm = p_norm(x, self.axis, self.porder, self.keepdim, self.asvector) norm = p_norm(x, self.axis, self.porder, self.keepdim, self.asvector)
...@@ -125,10 +144,10 @@ class TestPnormOp(OpTest): ...@@ -125,10 +144,10 @@ class TestPnormOp(OpTest):
self.gradient = self.calc_gradient() self.gradient = self.calc_gradient()
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
def init_test_case(self): def init_test_case(self):
self.shape = [2, 3, 4, 5] self.shape = [2, 3, 4, 5]
...@@ -287,6 +306,7 @@ class TestPnormOpFP161(TestPnormOpFP16): ...@@ -287,6 +306,7 @@ class TestPnormOpFP161(TestPnormOpFP16):
class TestPnormBF16Op(OpTest): class TestPnormBF16Op(OpTest):
def setUp(self): def setUp(self):
self.op_type = "p_norm" self.op_type = "p_norm"
self.python_api = p_norm_python_api
self.init_test_case() self.init_test_case()
self.x = (np.random.random(self.shape) + 0.5).astype(np.float32) self.x = (np.random.random(self.shape) + 0.5).astype(np.float32)
self.norm = p_norm(self.x, self.axis, self.porder, self.keepdim, self.norm = p_norm(self.x, self.axis, self.porder, self.keepdim,
...@@ -304,12 +324,15 @@ class TestPnormBF16Op(OpTest): ...@@ -304,12 +324,15 @@ class TestPnormBF16Op(OpTest):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-3) self.check_output_with_place(place, atol=1e-3, check_eager=True)
def test_check_grad(self): def test_check_grad(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(
place, ['X'], 'Out', user_defined_grads=self.gradient) place, ['X'],
'Out',
user_defined_grads=self.gradient,
check_eager=True)
def init_test_case(self): def init_test_case(self):
self.shape = [2, 3, 4, 5] self.shape = [2, 3, 4, 5]
......
...@@ -20,6 +20,7 @@ import paddle.nn.functional as F ...@@ -20,6 +20,7 @@ import paddle.nn.functional as F
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import numpy as np import numpy as np
from paddle.fluid.framework import _test_eager_guard
def p_normalize(x, axis=1, p=2, epsilon=1e-12, keepdims=True): def p_normalize(x, axis=1, p=2, epsilon=1e-12, keepdims=True):
...@@ -87,6 +88,12 @@ class TestNNFunctionalNormalize(unittest.TestCase): ...@@ -87,6 +88,12 @@ class TestNNFunctionalNormalize(unittest.TestCase):
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
self.run_static() self.run_static()
def test_cpu_eager(self):
with _test_eager_guard():
paddle.disable_static(place=paddle.fluid.CPUPlace())
self.run_imperative()
paddle.enable_static()
def test_gpu(self): def test_gpu(self):
if not fluid.core.is_compiled_with_cuda(): if not fluid.core.is_compiled_with_cuda():
return return
...@@ -98,6 +105,15 @@ class TestNNFunctionalNormalize(unittest.TestCase): ...@@ -98,6 +105,15 @@ class TestNNFunctionalNormalize(unittest.TestCase):
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
self.run_static(use_gpu=True) self.run_static(use_gpu=True)
def test_gpu_eager(self):
with _test_eager_guard():
if not fluid.core.is_compiled_with_cuda():
return
paddle.disable_static(place=paddle.fluid.CUDAPlace(0))
self.run_imperative()
paddle.enable_static()
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -30,6 +30,7 @@ class TestPad3dOp(OpTest): ...@@ -30,6 +30,7 @@ class TestPad3dOp(OpTest):
self.variable_paddings = False self.variable_paddings = False
self.initTestCase() self.initTestCase()
self.op_type = "pad3d" self.op_type = "pad3d"
self.python_api = paddle.nn.functional.pad
self.inputs = {'X': np.random.random(self.shape).astype("float64")} self.inputs = {'X': np.random.random(self.shape).astype("float64")}
self.attrs = {} self.attrs = {}
if self.variable_paddings: if self.variable_paddings:
...@@ -72,10 +73,10 @@ class TestPad3dOp(OpTest): ...@@ -72,10 +73,10 @@ class TestPad3dOp(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
def initTestCase(self): def initTestCase(self):
self.shape = (2, 3, 4, 5, 6) self.shape = (2, 3, 4, 5, 6)
......
...@@ -157,6 +157,7 @@ class PReluTest(OpTest): ...@@ -157,6 +157,7 @@ class PReluTest(OpTest):
self.init_input_shape() self.init_input_shape()
self.init_attr() self.init_attr()
self.op_type = "prelu" self.op_type = "prelu"
self.python_api = paddle.nn.functional.prelu
x_np = np.random.uniform(-1, 1, self.x_shape).astype(self.dtype) x_np = np.random.uniform(-1, 1, self.x_shape).astype(self.dtype)
# Since zero point in prelu is not differentiable, avoid randomize # Since zero point in prelu is not differentiable, avoid randomize
...@@ -207,10 +208,10 @@ class PReluTest(OpTest): ...@@ -207,10 +208,10 @@ class PReluTest(OpTest):
self.attrs = {'mode': "channel", "data_format": "NCHW"} self.attrs = {'mode': "channel", "data_format": "NCHW"}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=False)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Alpha'], 'Out') self.check_grad(['X', 'Alpha'], 'Out', check_eager=False)
@skip_check_grad_ci( @skip_check_grad_ci(
...@@ -373,7 +374,8 @@ def create_test_fp16_class(parent, ...@@ -373,7 +374,8 @@ def create_test_fp16_class(parent,
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
if core.is_float16_supported(place): if core.is_float16_supported(place):
self.check_output_with_place(place, atol=atol) self.check_output_with_place(
place, atol=atol, check_eager=False)
def test_check_grad(self): def test_check_grad(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
...@@ -381,7 +383,8 @@ def create_test_fp16_class(parent, ...@@ -381,7 +383,8 @@ def create_test_fp16_class(parent,
self.check_grad_with_place( self.check_grad_with_place(
place, ['X', 'Alpha'], place, ['X', 'Alpha'],
'Out', 'Out',
max_relative_error=max_relative_error) max_relative_error=max_relative_error,
check_eager=False)
cls_name = "{0}_{1}".format(parent.__name__, "Fp16Op") cls_name = "{0}_{1}".format(parent.__name__, "Fp16Op")
TestPReluFp16Case.__name__ = cls_name TestPReluFp16Case.__name__ = cls_name
......
...@@ -172,6 +172,7 @@ class TestMaxOp(OpTest): ...@@ -172,6 +172,7 @@ class TestMaxOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_max" self.op_type = "reduce_max"
self.python_api = paddle.max
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [-1]} self.attrs = {'dim': [-1]}
self.outputs = { self.outputs = {
...@@ -179,7 +180,7 @@ class TestMaxOp(OpTest): ...@@ -179,7 +180,7 @@ class TestMaxOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
@skip_check_grad_ci( @skip_check_grad_ci(
...@@ -190,6 +191,7 @@ class TestMinOp(OpTest): ...@@ -190,6 +191,7 @@ class TestMinOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_min" self.op_type = "reduce_min"
self.python_api = paddle.min
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [2]} self.attrs = {'dim': [2]}
self.outputs = { self.outputs = {
...@@ -197,7 +199,7 @@ class TestMinOp(OpTest): ...@@ -197,7 +199,7 @@ class TestMinOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
class TestMin6DOp(OpTest): class TestMin6DOp(OpTest):
...@@ -205,6 +207,7 @@ class TestMin6DOp(OpTest): ...@@ -205,6 +207,7 @@ class TestMin6DOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_min" self.op_type = "reduce_min"
self.python_api = paddle.min
self.inputs = { self.inputs = {
'X': np.random.random((2, 4, 3, 5, 6, 10)).astype("float64") 'X': np.random.random((2, 4, 3, 5, 6, 10)).astype("float64")
} }
...@@ -214,7 +217,7 @@ class TestMin6DOp(OpTest): ...@@ -214,7 +217,7 @@ class TestMin6DOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
class TestMin8DOp(OpTest): class TestMin8DOp(OpTest):
...@@ -222,6 +225,7 @@ class TestMin8DOp(OpTest): ...@@ -222,6 +225,7 @@ class TestMin8DOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_min" self.op_type = "reduce_min"
self.python_api = paddle.min
self.inputs = { self.inputs = {
'X': np.random.random((2, 4, 3, 5, 6, 3, 2, 4)).astype("float64") 'X': np.random.random((2, 4, 3, 5, 6, 3, 2, 4)).astype("float64")
} }
...@@ -231,7 +235,7 @@ class TestMin8DOp(OpTest): ...@@ -231,7 +235,7 @@ class TestMin8DOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
class TestProdOp(OpTest): class TestProdOp(OpTest):
...@@ -302,17 +306,19 @@ class TestProd8DOp(OpTest): ...@@ -302,17 +306,19 @@ class TestProd8DOp(OpTest):
class TestAllOp(OpTest): class TestAllOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_all" self.op_type = "reduce_all"
self.python_api = paddle.all
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.outputs = {'Out': self.inputs['X'].all()} self.outputs = {'Out': self.inputs['X'].all()}
self.attrs = {'reduce_all': True} self.attrs = {'reduce_all': True}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
class TestAll8DOp(OpTest): class TestAll8DOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_all" self.op_type = "reduce_all"
self.python_api = paddle.all
self.inputs = { self.inputs = {
'X': np.random.randint(0, 2, 'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool") (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
...@@ -321,23 +327,25 @@ class TestAll8DOp(OpTest): ...@@ -321,23 +327,25 @@ class TestAll8DOp(OpTest):
self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])} self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
class TestAllOpWithDim(OpTest): class TestAllOpWithDim(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_all" self.op_type = "reduce_all"
self.python_api = paddle.all
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': (1, )} self.attrs = {'dim': (1, )}
self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])} self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
class TestAll8DOpWithDim(OpTest): class TestAll8DOpWithDim(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_all" self.op_type = "reduce_all"
self.python_api = paddle.all
self.inputs = { self.inputs = {
'X': np.random.randint(0, 2, 'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool") (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
...@@ -346,12 +354,13 @@ class TestAll8DOpWithDim(OpTest): ...@@ -346,12 +354,13 @@ class TestAll8DOpWithDim(OpTest):
self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])} self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
class TestAllOpWithKeepDim(OpTest): class TestAllOpWithKeepDim(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_all" self.op_type = "reduce_all"
self.python_api = paddle.all
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': [1], 'keep_dim': True} self.attrs = {'dim': [1], 'keep_dim': True}
self.outputs = { self.outputs = {
...@@ -360,12 +369,13 @@ class TestAllOpWithKeepDim(OpTest): ...@@ -360,12 +369,13 @@ class TestAllOpWithKeepDim(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
class TestAll8DOpWithKeepDim(OpTest): class TestAll8DOpWithKeepDim(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_all" self.op_type = "reduce_all"
self.python_api = paddle.all
self.inputs = { self.inputs = {
'X': np.random.randint(0, 2, 'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool") (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
...@@ -377,7 +387,7 @@ class TestAll8DOpWithKeepDim(OpTest): ...@@ -377,7 +387,7 @@ class TestAll8DOpWithKeepDim(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
class TestAllOpError(unittest.TestCase): class TestAllOpError(unittest.TestCase):
...@@ -395,17 +405,19 @@ class TestAllOpError(unittest.TestCase): ...@@ -395,17 +405,19 @@ class TestAllOpError(unittest.TestCase):
class TestAnyOp(OpTest): class TestAnyOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_any" self.op_type = "reduce_any"
self.python_api = paddle.any
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.outputs = {'Out': self.inputs['X'].any()} self.outputs = {'Out': self.inputs['X'].any()}
self.attrs = {'reduce_all': True} self.attrs = {'reduce_all': True}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
class TestAny8DOp(OpTest): class TestAny8DOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_any" self.op_type = "reduce_any"
self.python_api = paddle.any
self.inputs = { self.inputs = {
'X': np.random.randint(0, 2, 'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool") (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
...@@ -414,23 +426,25 @@ class TestAny8DOp(OpTest): ...@@ -414,23 +426,25 @@ class TestAny8DOp(OpTest):
self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])} self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
class TestAnyOpWithDim(OpTest): class TestAnyOpWithDim(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_any" self.op_type = "reduce_any"
self.python_api = paddle.any
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': [1]} self.attrs = {'dim': [1]}
self.outputs = {'Out': self.inputs['X'].any(axis=1)} self.outputs = {'Out': self.inputs['X'].any(axis=1)}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
class TestAny8DOpWithDim(OpTest): class TestAny8DOpWithDim(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_any" self.op_type = "reduce_any"
self.python_api = paddle.any
self.inputs = { self.inputs = {
'X': np.random.randint(0, 2, 'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool") (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
...@@ -439,12 +453,13 @@ class TestAny8DOpWithDim(OpTest): ...@@ -439,12 +453,13 @@ class TestAny8DOpWithDim(OpTest):
self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])} self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
class TestAnyOpWithKeepDim(OpTest): class TestAnyOpWithKeepDim(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_any" self.op_type = "reduce_any"
self.python_api = paddle.any
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': (1, ), 'keep_dim': True} self.attrs = {'dim': (1, ), 'keep_dim': True}
self.outputs = { self.outputs = {
...@@ -453,12 +468,13 @@ class TestAnyOpWithKeepDim(OpTest): ...@@ -453,12 +468,13 @@ class TestAnyOpWithKeepDim(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
class TestAny8DOpWithKeepDim(OpTest): class TestAny8DOpWithKeepDim(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_any" self.op_type = "reduce_any"
self.python_api = paddle.any
self.inputs = { self.inputs = {
'X': np.random.randint(0, 2, 'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool") (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
...@@ -470,7 +486,7 @@ class TestAny8DOpWithKeepDim(OpTest): ...@@ -470,7 +486,7 @@ class TestAny8DOpWithKeepDim(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
class TestAnyOpError(unittest.TestCase): class TestAnyOpError(unittest.TestCase):
...@@ -600,6 +616,7 @@ class TestReduceMaxOpMultiAxises(OpTest): ...@@ -600,6 +616,7 @@ class TestReduceMaxOpMultiAxises(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_max" self.op_type = "reduce_max"
self.python_api = paddle.max
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [-2, -1]} self.attrs = {'dim': [-2, -1]}
self.outputs = { self.outputs = {
...@@ -607,7 +624,7 @@ class TestReduceMaxOpMultiAxises(OpTest): ...@@ -607,7 +624,7 @@ class TestReduceMaxOpMultiAxises(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
@skip_check_grad_ci( @skip_check_grad_ci(
...@@ -618,6 +635,7 @@ class TestReduceMinOpMultiAxises(OpTest): ...@@ -618,6 +635,7 @@ class TestReduceMinOpMultiAxises(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_min" self.op_type = "reduce_min"
self.python_api = paddle.min
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [1, 2]} self.attrs = {'dim': [1, 2]}
self.outputs = { self.outputs = {
...@@ -625,7 +643,7 @@ class TestReduceMinOpMultiAxises(OpTest): ...@@ -625,7 +643,7 @@ class TestReduceMinOpMultiAxises(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
class TestKeepDimReduceSumMultiAxises(OpTest): class TestKeepDimReduceSumMultiAxises(OpTest):
......
...@@ -27,6 +27,10 @@ paddle.enable_static() ...@@ -27,6 +27,10 @@ paddle.enable_static()
class TestSqueezeOp(OpTest): class TestSqueezeOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "squeeze2" self.op_type = "squeeze2"
self.python_api = paddle.squeeze
self.python_out_sig = [
"Out"
] # python out sig is customized output signature.
self.init_test_case() self.init_test_case()
self.inputs = {"X": np.random.random(self.ori_shape).astype("float64")} self.inputs = {"X": np.random.random(self.ori_shape).astype("float64")}
self.init_attrs() self.init_attrs()
...@@ -36,10 +40,10 @@ class TestSqueezeOp(OpTest): ...@@ -36,10 +40,10 @@ class TestSqueezeOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(no_check_set=['XShape']) self.check_output(no_check_set=['XShape'], check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X"], "Out") self.check_grad(["X"], "Out", check_eager=True)
def init_test_case(self): def init_test_case(self):
self.ori_shape = (1, 3, 1, 40) self.ori_shape = (1, 3, 1, 40)
......
...@@ -29,6 +29,8 @@ class TestUnsqueezeOp(OpTest): ...@@ -29,6 +29,8 @@ class TestUnsqueezeOp(OpTest):
def setUp(self): def setUp(self):
self.init_test_case() self.init_test_case()
self.op_type = "unsqueeze2" self.op_type = "unsqueeze2"
self.python_api = paddle.unsqueeze
self.python_out_sig = ["Out"]
self.inputs = {"X": np.random.random(self.ori_shape).astype("float64")} self.inputs = {"X": np.random.random(self.ori_shape).astype("float64")}
self.init_attrs() self.init_attrs()
self.outputs = { self.outputs = {
...@@ -37,10 +39,10 @@ class TestUnsqueezeOp(OpTest): ...@@ -37,10 +39,10 @@ class TestUnsqueezeOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(no_check_set=["XShape"]) self.check_output(no_check_set=["XShape"], check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X"], "Out") self.check_grad(["X"], "Out", check_eager=True)
def init_test_case(self): def init_test_case(self):
self.ori_shape = (3, 40) self.ori_shape = (3, 40)
...@@ -88,6 +90,8 @@ class TestUnsqueezeOp_AxesTensorList(OpTest): ...@@ -88,6 +90,8 @@ class TestUnsqueezeOp_AxesTensorList(OpTest):
def setUp(self): def setUp(self):
self.init_test_case() self.init_test_case()
self.op_type = "unsqueeze2" self.op_type = "unsqueeze2"
self.python_out_sig = ["Out"]
self.python_api = paddle.unsqueeze
axes_tensor_list = [] axes_tensor_list = []
for index, ele in enumerate(self.axes): for index, ele in enumerate(self.axes):
...@@ -105,10 +109,10 @@ class TestUnsqueezeOp_AxesTensorList(OpTest): ...@@ -105,10 +109,10 @@ class TestUnsqueezeOp_AxesTensorList(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(no_check_set=["XShape"]) self.check_output(no_check_set=["XShape"], check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X"], "Out") self.check_grad(["X"], "Out", check_eager=True)
def init_test_case(self): def init_test_case(self):
self.ori_shape = (20, 5) self.ori_shape = (20, 5)
...@@ -152,6 +156,8 @@ class TestUnsqueezeOp_AxesTensor(OpTest): ...@@ -152,6 +156,8 @@ class TestUnsqueezeOp_AxesTensor(OpTest):
def setUp(self): def setUp(self):
self.init_test_case() self.init_test_case()
self.op_type = "unsqueeze2" self.op_type = "unsqueeze2"
self.python_out_sig = ["Out"]
self.python_api = paddle.unsqueeze
self.inputs = { self.inputs = {
"X": np.random.random(self.ori_shape).astype("float64"), "X": np.random.random(self.ori_shape).astype("float64"),
...@@ -164,10 +170,10 @@ class TestUnsqueezeOp_AxesTensor(OpTest): ...@@ -164,10 +170,10 @@ class TestUnsqueezeOp_AxesTensor(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(no_check_set=["XShape"]) self.check_output(no_check_set=["XShape"], check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X"], "Out") self.check_grad(["X"], "Out", check_eager=True)
def init_test_case(self): def init_test_case(self):
self.ori_shape = (20, 5) self.ori_shape = (20, 5)
......
...@@ -23,7 +23,7 @@ from ...tensor.math import multiply ...@@ -23,7 +23,7 @@ from ...tensor.math import multiply
import warnings import warnings
from ...fluid.layer_helper import LayerHelper from ...fluid.layer_helper import LayerHelper
from ...fluid.framework import convert_np_dtype_to_dtype_ from ...fluid.framework import convert_np_dtype_to_dtype_
from ...fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ...fluid.framework import _in_legacy_dygraph, in_dygraph_mode, _non_static_mode
from ...fluid.data_feeder import check_variable_and_dtype, check_dtype from ...fluid.data_feeder import check_variable_and_dtype, check_dtype
import paddle import paddle
from paddle import _C_ops, in_dynamic_mode from paddle import _C_ops, in_dynamic_mode
...@@ -519,7 +519,9 @@ def prelu(x, weight, data_format="NCHW", name=None): ...@@ -519,7 +519,9 @@ def prelu(x, weight, data_format="NCHW", name=None):
1], "The weight size should be equal to x input channel in prelu() when weight shape is not [1]." 1], "The weight size should be equal to x input channel in prelu() when weight shape is not [1]."
mode = 'channel' mode = 'channel'
if in_dynamic_mode(): if in_dygraph_mode():
return _C_ops.final_state_prelu(x, weight, data_format, mode)
if _in_legacy_dygraph():
return _C_ops.prelu(x, weight, 'mode', mode, 'data_format', data_format) return _C_ops.prelu(x, weight, 'mode', mode, 'data_format', data_format)
helper = LayerHelper('prelu', **locals()) helper = LayerHelper('prelu', **locals())
...@@ -578,9 +580,10 @@ def relu_(x, name=None): ...@@ -578,9 +580,10 @@ def relu_(x, name=None):
Inplace version of ``relu`` API, the output Tensor will be inplaced with input ``x``. Inplace version of ``relu`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_nn_cn_relu`. Please refer to :ref:`api_nn_cn_relu`.
""" """
if paddle.fluid.framework._in_eager_mode_: if in_dygraph_mode():
return _C_ops.final_state_relu_(x) return _C_ops.final_state_relu_(x)
return _C_ops.relu_(x) if _in_legacy_dygraph():
return _C_ops.relu_(x)
def log_sigmoid(x, name=None): def log_sigmoid(x, name=None):
...@@ -1092,7 +1095,9 @@ def softshrink(x, threshold=0.5, name=None): ...@@ -1092,7 +1095,9 @@ def softshrink(x, threshold=0.5, name=None):
"The threshold must be no less than zero. Received: {}.".format( "The threshold must be no less than zero. Received: {}.".format(
threshold)) threshold))
if in_dynamic_mode(): if in_dygraph_mode():
return _C_ops.final_state_soft_shrink(x, threshold)
if _in_legacy_dygraph():
return _C_ops.softshrink(x, 'lambda', threshold) return _C_ops.softshrink(x, 'lambda', threshold)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
...@@ -1371,10 +1376,12 @@ def log_softmax(x, axis=-1, dtype=None, name=None): ...@@ -1371,10 +1376,12 @@ def log_softmax(x, axis=-1, dtype=None, name=None):
if (dtype is not None) and (not isinstance(dtype, core.VarDesc.VarType)): if (dtype is not None) and (not isinstance(dtype, core.VarDesc.VarType)):
dtype = convert_np_dtype_to_dtype_(dtype) dtype = convert_np_dtype_to_dtype_(dtype)
if in_dynamic_mode(): if _non_static_mode():
if dtype is not None: if dtype is not None:
x = _C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype) x = _C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
return _C_ops.log_softmax(x, 'axis', axis) if _in_legacy_dygraph():
return _C_ops.log_softmax(x, 'axis', axis)
return _C_ops.final_state_log_softmax(x, axis)
if dtype is None: if dtype is None:
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
......
...@@ -38,6 +38,7 @@ from paddle import _C_ops ...@@ -38,6 +38,7 @@ from paddle import _C_ops
from paddle.framework import in_dynamic_mode from paddle.framework import in_dynamic_mode
from paddle.tensor.creation import full from paddle.tensor.creation import full
from paddle.framework import core from paddle.framework import core
from paddle.fluid.framework import _in_legacy_dygraph
from paddle.static import default_main_program from paddle.static import default_main_program
__all__ = [] __all__ = []
...@@ -1352,8 +1353,11 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None): ...@@ -1352,8 +1353,11 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None):
if in_dynamic_mode(): if in_dynamic_mode():
if isinstance(pad, Variable): if isinstance(pad, Variable):
pad = pad.numpy() pad = pad.numpy()
out = _C_ops.pad3d(x, "paddings", pad, "mode", mode, "value", value, if _in_legacy_dygraph():
"data_format", data_format, "name", name) out = _C_ops.pad3d(x, "paddings", pad, "mode", mode, "value", value,
"data_format", data_format, "name", name)
else:
out = _C_ops.final_state_pad3d(x, pad, mode, value, data_format)
else: else:
attrs = {'mode': mode, 'value': value, 'data_format': data_format} attrs = {'mode': mode, 'value': value, 'data_format': data_format}
inputs = {'X': [x]} inputs = {'X': [x]}
......
...@@ -921,8 +921,11 @@ def kl_div(input, label, reduction='mean', name=None): ...@@ -921,8 +921,11 @@ def kl_div(input, label, reduction='mean', name=None):
label.dtype) == 'float32': label.dtype) == 'float32':
label = paddle.cast(label, 'float64') label = paddle.cast(label, 'float64')
if paddle.in_dynamic_mode(): if _non_static_mode():
out = _C_ops.kldiv_loss(input, label, 'reduction', 'none') if _in_legacy_dygraph():
out = _C_ops.kldiv_loss(input, label, 'reduction', 'none')
else:
out = _C_ops.final_state_kldiv_loss(input, label, 'none')
if reduction == 'mean': if reduction == 'mean':
out = paddle.mean(out) out = paddle.mean(out)
elif reduction == 'sum': elif reduction == 'sum':
......
...@@ -24,6 +24,7 @@ from ...fluid import dygraph_utils ...@@ -24,6 +24,7 @@ from ...fluid import dygraph_utils
import numbers import numbers
from paddle import _C_ops from paddle import _C_ops
from paddle import in_dynamic_mode from paddle import in_dynamic_mode
from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph
__all__ = [] __all__ = []
...@@ -78,7 +79,12 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None): ...@@ -78,7 +79,12 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None):
# [[0. 0.24253564 0.37139067] # [[0. 0.24253564 0.37139067]
# [1. 0.97014254 0.9284767 ]] # [1. 0.97014254 0.9284767 ]]
""" """
if in_dynamic_mode(): if in_dygraph_mode():
eps = fluid.dygraph.base.to_variable([epsilon], dtype=x.dtype)
out = _C_ops.final_state_p_norm(x, float(p), axis, epsilon, True, False)
return x / _C_ops.elementwise_max(out, eps)
if _in_legacy_dygraph():
eps = fluid.dygraph.base.to_variable([epsilon], dtype=x.dtype) eps = fluid.dygraph.base.to_variable([epsilon], dtype=x.dtype)
out = _C_ops.p_norm(x, 'axis', axis, 'porder', out = _C_ops.p_norm(x, 'axis', axis, 'porder',
float(p), 'keepdim', True, 'epsilon', epsilon) float(p), 'keepdim', True, 'epsilon', epsilon)
......
...@@ -20,6 +20,7 @@ from ...fluid.data_feeder import check_variable_and_dtype, check_type ...@@ -20,6 +20,7 @@ from ...fluid.data_feeder import check_variable_and_dtype, check_type
from ...fluid.layer_helper import LayerHelper from ...fluid.layer_helper import LayerHelper
from paddle import _C_ops from paddle import _C_ops
from paddle import in_dynamic_mode from paddle import in_dynamic_mode
from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph
__all__ = [] __all__ = []
...@@ -78,7 +79,12 @@ class PairwiseDistance(Layer): ...@@ -78,7 +79,12 @@ class PairwiseDistance(Layer):
check_type(self.keepdim, 'keepdim', (bool), 'PairwiseDistance') check_type(self.keepdim, 'keepdim', (bool), 'PairwiseDistance')
def forward(self, x, y): def forward(self, x, y):
if in_dynamic_mode(): if in_dygraph_mode():
sub = _C_ops.elementwise_sub(x, y)
return _C_ops.final_state_p_norm(sub, self.p, 1, self.epsilon,
self.keepdim, False)
if _in_legacy_dygraph():
sub = _C_ops.elementwise_sub(x, y) sub = _C_ops.elementwise_sub(x, y)
return _C_ops.p_norm(sub, 'axis', 1, 'porder', self.p, 'keepdim', return _C_ops.p_norm(sub, 'axis', 1, 'porder', self.p, 'keepdim',
self.keepdim, 'epsilon', self.epsilon) self.keepdim, 'epsilon', self.epsilon)
......
...@@ -288,10 +288,16 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): ...@@ -288,10 +288,16 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
axis (int, optional): None for last dimension. axis (int, optional): None for last dimension.
keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.
""" """
if paddle.in_dynamic_mode(): if in_dygraph_mode():
if axis is None: axis = -1
return _C_ops.final_state_p_norm(input, porder, axis, 1e-12,
keepdim, asvector)
if _in_legacy_dygraph():
if axis is None: axis = -1 if axis is None: axis = -1
return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, return _C_ops.p_norm(input, 'porder', porder, 'axis', axis,
'keepdim', keepdim, 'asvector', asvector) 'keepdim', keepdim, 'asvector', asvector)
if porder is not None: if porder is not None:
check_type(porder, 'porder', (float, int), 'p_norm') check_type(porder, 'porder', (float, int), 'p_norm')
if axis is not None: if axis is not None:
......
...@@ -122,11 +122,12 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): ...@@ -122,11 +122,12 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
# [True] # [True]
""" """
if paddle.in_dynamic_mode(): if in_dygraph_mode():
return _C_ops.final_state_allclose(x, y, rtol, atol, equal_nan)
if _in_legacy_dygraph():
return _C_ops.allclose(x, y, 'rtol', return _C_ops.allclose(x, y, 'rtol',
str(rtol), 'atol', str(rtol), 'atol',
str(atol), 'equal_nan', equal_nan) str(atol), 'equal_nan', equal_nan)
check_variable_and_dtype(x, "input", ['float32', 'float64'], 'allclose') check_variable_and_dtype(x, "input", ['float32', 'float64'], 'allclose')
check_variable_and_dtype(y, "input", ['float32', 'float64'], 'allclose') check_variable_and_dtype(y, "input", ['float32', 'float64'], 'allclose')
check_type(rtol, 'rtol', float, 'allclose') check_type(rtol, 'rtol', float, 'allclose')
...@@ -678,7 +679,9 @@ def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): ...@@ -678,7 +679,9 @@ def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
# [True, True] # [True, True]
""" """
if paddle.in_dynamic_mode(): if in_dygraph_mode():
return _C_ops.final_state_isclose(x, y, rtol, atol, equal_nan)
if _in_legacy_dygraph():
return _C_ops.isclose(x, y, 'rtol', return _C_ops.isclose(x, y, 'rtol',
str(rtol), 'atol', str(rtol), 'atol',
str(atol), 'equal_nan', equal_nan) str(atol), 'equal_nan', equal_nan)
......
...@@ -1409,7 +1409,9 @@ def gather(x, index, axis=None, name=None): ...@@ -1409,7 +1409,9 @@ def gather(x, index, axis=None, name=None):
if axis is None: if axis is None:
axis = 0 axis = 0
if paddle.in_dynamic_mode(): #if in_dygraph_mode():
#return _C_ops.final_state_gather(x, index, axis)
if _non_static_mode():
axis = axis.item() if isinstance(axis, paddle.Tensor) else axis axis = axis.item() if isinstance(axis, paddle.Tensor) else axis
return _C_ops.gather(x, index, None, "axis", axis, "overwrite", False) return _C_ops.gather(x, index, None, "axis", axis, "overwrite", False)
......
...@@ -28,7 +28,7 @@ from paddle.tensor.attribute import _complex_to_real_dtype ...@@ -28,7 +28,7 @@ from paddle.tensor.attribute import _complex_to_real_dtype
import paddle import paddle
from paddle.static import Variable from paddle.static import Variable
from ..framework import core from ..framework import core
from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode, _non_static_mode
from ..framework import _varbase_creator, convert_np_dtype_to_dtype_ from ..framework import _varbase_creator, convert_np_dtype_to_dtype_
from ..fluid.layer_helper import LayerHelper from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype
...@@ -150,7 +150,17 @@ def pow(x, y, name=None): ...@@ -150,7 +150,17 @@ def pow(x, y, name=None):
""" """
# in dynamic graph mode # in dynamic graph mode
if paddle.in_dynamic_mode(): #if in_dygraph_mode():
#if isinstance(y, (int, float)):
#return _C_ops.final_state_pow(x, y)
#elif isinstance(y, (paddle.Tensor, Variable)):
#return _elementwise_op_in_dygraph(
#x, y, axis=-1, act=None, op_name='elementwise_pow')
#else:
#raise TypeError('y must be scalar or tensor type, but received: %s '% (y.dtype))
#if _in_legacy_dygraph():
if _non_static_mode():
if isinstance(y, (int, float)): if isinstance(y, (int, float)):
return _C_ops.pow(x, 'factor', y) return _C_ops.pow(x, 'factor', y)
elif isinstance(y, (paddle.Tensor, Variable)): elif isinstance(y, (paddle.Tensor, Variable)):
...@@ -719,7 +729,9 @@ def fmax(x, y, name=None): ...@@ -719,7 +729,9 @@ def fmax(x, y, name=None):
op_type = 'elementwise_fmax' op_type = 'elementwise_fmax'
axis = -1 axis = -1
act = None act = None
if paddle.in_dynamic_mode(): if in_dygraph_mode():
return _C_ops.final_state_fmax(x, y, axis)
if _in_legacy_dygraph():
return _elementwise_op_in_dygraph( return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type) x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals())) return _elementwise_op(LayerHelper(op_type, **locals()))
...@@ -780,7 +792,9 @@ def fmin(x, y, name=None): ...@@ -780,7 +792,9 @@ def fmin(x, y, name=None):
op_type = 'elementwise_fmin' op_type = 'elementwise_fmin'
axis = -1 axis = -1
act = None act = None
if paddle.in_dynamic_mode(): if in_dygraph_mode():
return _C_ops.final_state_fmin(x, y, axis)
if _in_legacy_dygraph():
return _elementwise_op_in_dygraph( return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type) x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals())) return _elementwise_op(LayerHelper(op_type, **locals()))
...@@ -1711,7 +1725,11 @@ def max(x, axis=None, keepdim=False, name=None): ...@@ -1711,7 +1725,11 @@ def max(x, axis=None, keepdim=False, name=None):
""" """
reduce_all, axis = _get_reduce_all_value(axis) reduce_all, axis = _get_reduce_all_value(axis)
if paddle.in_dynamic_mode(): if in_dygraph_mode():
if reduce_all:
axis = range(len(x.shape))
return _C_ops.final_state_max(x, axis, keepdim)
if _in_legacy_dygraph():
return _C_ops.reduce_max(x, 'dim', axis, 'keep_dim', keepdim, return _C_ops.reduce_max(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all) 'reduce_all', reduce_all)
...@@ -1811,7 +1829,12 @@ def min(x, axis=None, keepdim=False, name=None): ...@@ -1811,7 +1829,12 @@ def min(x, axis=None, keepdim=False, name=None):
""" """
reduce_all, axis = _get_reduce_all_value(axis) reduce_all, axis = _get_reduce_all_value(axis)
if paddle.in_dynamic_mode(): if in_dygraph_mode():
if reduce_all:
axis = range(len(x.shape))
return _C_ops.final_state_min(x, axis, keepdim)
if _in_legacy_dygraph():
return _C_ops.reduce_min(x, 'dim', axis, 'keep_dim', keepdim, return _C_ops.reduce_min(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all) 'reduce_all', reduce_all)
...@@ -2081,7 +2104,9 @@ def log1p(x, name=None): ...@@ -2081,7 +2104,9 @@ def log1p(x, name=None):
# [[0.], [0.6931472]] # [[0.], [0.6931472]]
""" """
if paddle.in_dynamic_mode(): if in_dygraph_mode():
return _C_ops.final_state_log1p(x)
if _in_legacy_dygraph():
return _C_ops.log1p(x) return _C_ops.log1p(x)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log1p") check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log1p")
...@@ -2130,7 +2155,9 @@ def log2(x, name=None): ...@@ -2130,7 +2155,9 @@ def log2(x, name=None):
res = paddle.log2(x_i) res = paddle.log2(x_i)
print(res) # [1.0] print(res) # [1.0]
""" """
if paddle.in_dynamic_mode(): if in_dygraph_mode():
return _C_ops.final_state_log2(x)
if _in_legacy_dygraph():
return _C_ops.log2(x) return _C_ops.log2(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], "log2") check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], "log2")
...@@ -2180,7 +2207,9 @@ def log10(x, name=None): ...@@ -2180,7 +2207,9 @@ def log10(x, name=None):
res = paddle.log10(x_i) res = paddle.log10(x_i)
print(res) # [1.0] print(res) # [1.0]
""" """
if paddle.in_dynamic_mode(): if in_dygraph_mode():
return _C_ops.final_state_log10(x)
if _in_legacy_dygraph():
return _C_ops.log10(x) return _C_ops.log10(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], "log10") check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], "log10")
...@@ -2667,7 +2696,9 @@ def cumprod(x, dim=None, dtype=None, name=None): ...@@ -2667,7 +2696,9 @@ def cumprod(x, dim=None, dtype=None, name=None):
if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype): if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype):
x = cast(x, dtype) x = cast(x, dtype)
if paddle.in_dynamic_mode(): if in_dygraph_mode():
return _C_ops.final_state_cumprod(x, dim)
if _in_legacy_dygraph():
return _C_ops.cumprod(x, 'dim', dim) return _C_ops.cumprod(x, 'dim', dim)
check_variable_and_dtype(x, "x", ['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'], 'cumprod') check_variable_and_dtype(x, "x", ['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'], 'cumprod')
...@@ -3028,7 +3059,12 @@ def all(x, axis=None, keepdim=False, name=None): ...@@ -3028,7 +3059,12 @@ def all(x, axis=None, keepdim=False, name=None):
else: else:
reduce_all_flag = False reduce_all_flag = False
if paddle.in_dynamic_mode(): if in_dygraph_mode():
if reduce_all_flag:
axis = range(len(x.shape))
return _C_ops.final_state_all(x, axis, keepdim)
if _in_legacy_dygraph():
axis = axis if axis != None and axis != [] else [0] axis = axis if axis != None and axis != [] else [0]
return _C_ops.reduce_all(x, 'dim', axis, 'keep_dim', keepdim, return _C_ops.reduce_all(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag) 'reduce_all', reduce_all_flag)
...@@ -3120,7 +3156,12 @@ def any(x, axis=None, keepdim=False, name=None): ...@@ -3120,7 +3156,12 @@ def any(x, axis=None, keepdim=False, name=None):
else: else:
reduce_all_flag = False reduce_all_flag = False
if paddle.in_dynamic_mode(): if in_dygraph_mode():
if reduce_all_flag:
axis = range(len(x.shape))
return _C_ops.final_state_any(x, axis, keepdim)
if _in_legacy_dygraph():
axis = axis if axis != None and axis != [] else [0] axis = axis if axis != None and axis != [] else [0]
return _C_ops.reduce_any(x, 'dim', axis, 'keep_dim', keepdim, return _C_ops.reduce_any(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag) 'reduce_all', reduce_all_flag)
......
...@@ -518,7 +518,9 @@ def mode(x, axis=-1, keepdim=False, name=None): ...@@ -518,7 +518,9 @@ def mode(x, axis=-1, keepdim=False, name=None):
# [1, 0]])) # [1, 0]]))
""" """
if paddle.in_dynamic_mode(): if in_dygraph_mode():
return _C_ops.final_state_mode(x, axis, keepdim)
if _in_legacy_dygraph():
return _C_ops.mode(x, "axis", axis, "keepdim", keepdim) return _C_ops.mode(x, "axis", axis, "keepdim", keepdim)
helper = LayerHelper("mode", **locals()) helper = LayerHelper("mode", **locals())
...@@ -1002,11 +1004,16 @@ def kthvalue(x, k, axis=None, keepdim=False, name=None): ...@@ -1002,11 +1004,16 @@ def kthvalue(x, k, axis=None, keepdim=False, name=None):
# [[0, 2], # [[0, 2],
# [1, 2]])) # [1, 2]]))
""" """
if paddle.in_dynamic_mode(): if _non_static_mode():
if axis is not None: if axis is not None:
return _C_ops.kthvalue(x, 'k', k, "axis", axis, "keepdim", keepdim) if _in_legacy_dygraph():
return _C_ops.kthvalue(x, 'k', k, "axis", axis, "keepdim",
keepdim)
return _C_ops.final_state_kthvalue(x, k, axis, keepdim)
else: else:
return _C_ops.kthvalue(x, 'k', k, "keepdim", keepdim) if _in_legacy_dygraph():
return _C_ops.kthvalue(x, 'k', k, "keepdim", keepdim)
return _C_ops.final_state_kthvalue(x, k, -1, keepdim)
helper = LayerHelper("kthvalue", **locals()) helper = LayerHelper("kthvalue", **locals())
inputs = {"X": [x]} inputs = {"X": [x]}
......
...@@ -18,6 +18,7 @@ import numpy as np ...@@ -18,6 +18,7 @@ import numpy as np
from ..static import Variable from ..static import Variable
from ..fluid.layer_helper import LayerHelper from ..fluid.layer_helper import LayerHelper
from ..framework import core from ..framework import core
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from .search import where from .search import where
from ..fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype from ..fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
import paddle import paddle
...@@ -87,7 +88,11 @@ def mean(x, axis=None, keepdim=False, name=None): ...@@ -87,7 +88,11 @@ def mean(x, axis=None, keepdim=False, name=None):
if axis is None or len(axis) == 0: if axis is None or len(axis) == 0:
axis = [0] axis = [0]
if paddle.in_dynamic_mode(): if in_dygraph_mode():
if reduce_all:
axis = range(len(x.shape))
return _C_ops.final_state_mean(x, axis, keepdim)
if _in_legacy_dygraph():
return _C_ops.reduce_mean(x, 'dim', axis, 'keep_dim', keepdim, return _C_ops.reduce_mean(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all) 'reduce_all', reduce_all)
......
...@@ -72,6 +72,31 @@ ...@@ -72,6 +72,31 @@
func : addmm func : addmm
backward : addmm_grad backward : addmm_grad
- api : all
args : (Tensor x, int64_t[] dims={}, bool keep_dim=false)
output : Tensor(out)
infer_meta :
func : ReduceInferMeta
kernel :
func : all
- api : allclose
args : (Tensor x, Tensor y, Scalar rtol, Scalar atol, bool equal_nan)
output : Tensor(out)
infer_meta :
func : AllValueCompareInferMeta
param: [x, y]
kernel :
func : allclose
- api : any
args : (Tensor x, int64_t[] dims={}, bool keep_dim=false)
output : Tensor(out)
infer_meta :
func : ReduceInferMeta
kernel :
func : any
# arg_max # arg_max
- api : argmax - api : argmax
args : (Tensor x, int64_t axis, bool keepdims, bool flatten, int dtype) args : (Tensor x, int64_t axis, bool keepdims, bool flatten, int dtype)
...@@ -235,6 +260,15 @@ ...@@ -235,6 +260,15 @@
data_type : x data_type : x
backward : cast_grad backward : cast_grad
- api : ceil
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
kernel :
func : ceil
backward : ceil_grad
# cholesky # cholesky
- api : cholesky - api : cholesky
args : (Tensor x, bool upper) args : (Tensor x, bool upper)
...@@ -306,6 +340,16 @@ ...@@ -306,6 +340,16 @@
func : cross func : cross
backward : cross_grad backward : cross_grad
- api : cumprod
args : (Tensor x, int dim)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : cumprod
backward : cumprod_grad
# cumsum # cumsum
- api : cumsum - api : cumsum
args : (Tensor x, int axis, bool flatten, bool exclusive, bool reverse) args : (Tensor x, int axis, bool flatten, bool exclusive, bool reverse)
...@@ -458,6 +502,35 @@ ...@@ -458,6 +502,35 @@
kernel : kernel :
func : flip func : flip
- api : floor
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
kernel :
func : floor
backward : floor_grad
- api : fmax
args : (Tensor x, Tensor y, int axis)
output : Tensor(out)
infer_meta :
param: [x, y]
func : ElementwiseInferMeta
kernel :
func : fmax
backward : fmax_grad
- api : fmin
args : (Tensor x, Tensor y, int axis)
output : Tensor(out)
infer_meta :
param: [x, y]
func : ElementwiseInferMeta
kernel :
func : fmin
backward : fmin_grad
- api : full - api : full
args : (IntArray shape, Scalar value, DataType dtype=DataType::FLOAT32, Place place=CPUPlace()) args : (IntArray shape, Scalar value, DataType dtype=DataType::FLOAT32, Place place=CPUPlace())
output: Tensor output: Tensor
...@@ -500,6 +573,16 @@ ...@@ -500,6 +573,16 @@
kernel : kernel :
func : gather_tree func : gather_tree
- api : gelu
args : (Tensor x, bool approximate)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : gelu
backward : gelu_grad
- api : greater - api : greater
args : (Tensor x, Tensor y, int axis = -1) args : (Tensor x, Tensor y, int axis = -1)
output : Tensor output : Tensor
...@@ -594,6 +677,15 @@ ...@@ -594,6 +677,15 @@
kernel : kernel :
func : is_empty func : is_empty
- api : isclose
args : (Tensor x, Tensor y, Scalar rtol, Scalar atol, bool equal_nan)
output : Tensor(out)
infer_meta :
func : ValueCompareInferMeta
param: [x, y]
kernel :
func : isclose
# isfinite # isfinite
- api : isfinite - api : isfinite
args : (Tensor x) args : (Tensor x)
...@@ -621,6 +713,25 @@ ...@@ -621,6 +713,25 @@
kernel : kernel :
func : isnan, isnan_sr func : isnan, isnan_sr
- api : kldiv_loss
args : (Tensor x, Tensor label, str reduction)
output : Tensor(out)
infer_meta :
func : KLDivInferMeta
kernel :
func : kldiv_loss
data_type : x
backward : kldiv_loss_grad
- api : kthvalue
args : (Tensor x, int k, int axis, bool keepdim)
output : Tensor(out), Tensor(indices)
infer_meta :
func : KthvalueInferMeta
kernel :
func : kthvalue
backward : kthvalue_grad
# leaky_relu # leaky_relu
- api : leaky_relu - api : leaky_relu
args : (Tensor x, float alpha) args : (Tensor x, float alpha)
...@@ -657,6 +768,51 @@ ...@@ -657,6 +768,51 @@
kernel : kernel :
func : less_than func : less_than
- api : lgamma
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
kernel :
func : lgamma
backward : lgamma_grad
- api : log
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : log
backward: log_grad
- api : log10
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : log10
backward: log10_grad
- api : log1p
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : log1p
backward: log1p_grad
- api : log2
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : log2
backward: log2_grad
# log_loss # log_loss
- api : log_loss - api : log_loss
args : (Tensor input, Tensor label, float epsilon) args : (Tensor input, Tensor label, float epsilon)
...@@ -667,6 +823,15 @@ ...@@ -667,6 +823,15 @@
func : log_loss func : log_loss
backward : log_loss_grad backward : log_loss_grad
- api : log_softmax
args : (Tensor x, int axis)
output : Tensor(out)
infer_meta :
func : UnchangedInferMetaCheckAxis
kernel :
func : log_softmax
backward : log_softmax_grad
# logical_and # logical_and
- api : logical_and - api : logical_and
args : (Tensor x, Tensor y) args : (Tensor x, Tensor y)
...@@ -744,6 +909,15 @@ ...@@ -744,6 +909,15 @@
func : matrix_power func : matrix_power
backward : matrix_power_grad backward : matrix_power_grad
- api : max
args : (Tensor x, int64_t[] dims={}, bool keep_dim=false)
output : Tensor(out)
infer_meta :
func : ReduceInferMeta
kernel :
func : max
backward : max_grad
- api : maximum - api : maximum
args : (Tensor x, Tensor y) args : (Tensor x, Tensor y)
output : Tensor(out) output : Tensor(out)
...@@ -754,12 +928,22 @@ ...@@ -754,12 +928,22 @@
backward : maximum_grad backward : maximum_grad
- api : mean - api : mean
args : (Tensor x, int64_t[] axis={}, bool keep_dim=false) args : (Tensor x, int64_t[] dims={}, bool keep_dim=false)
output : Tensor output : Tensor(out)
infer_meta : infer_meta :
func : ReduceInferMeta func : ReduceInferMeta
kernel : kernel :
func : mean func : mean
backward : mean_grad
- api : min
args : (Tensor x, int64_t[] dims={}, bool keep_dim=false)
output : Tensor(out)
infer_meta :
func : ReduceInferMeta
kernel :
func : min
backward : min_grad
- api : minimum - api : minimum
args : (Tensor x, Tensor y) args : (Tensor x, Tensor y)
...@@ -770,6 +954,15 @@ ...@@ -770,6 +954,15 @@
func : minimum func : minimum
backward : minimum_grad backward : minimum_grad
- api : mode
args : (Tensor x, int axis, bool keepdim)
output : Tensor(out), Tensor(indices)
infer_meta :
func : ModeInferMeta
kernel :
func : mode
backward : mode_grad
- api : modulo - api : modulo
args : (Tensor x, Tensor y) args : (Tensor x, Tensor y)
output : Tensor output : Tensor
...@@ -838,6 +1031,15 @@ ...@@ -838,6 +1031,15 @@
output : Tensor output : Tensor
invoke : full_like(x, 1, dtype, place) invoke : full_like(x, 1, dtype, place)
- api : p_norm
args : (Tensor x, float porder, int axis, float epsilon, bool keepdim, bool asvector=false)
output : Tensor(out)
infer_meta :
func : PNormInferMeta
kernel :
func : p_norm
backward : p_norm_grad
# pad # pad
- api : pad - api : pad
args : (Tensor x, int[] paddings, float pad_value) args : (Tensor x, int[] paddings, float pad_value)
...@@ -848,6 +1050,15 @@ ...@@ -848,6 +1050,15 @@
func : pad func : pad
# backward : pad_grad # backward : pad_grad
- api : pad3d
args : (Tensor x, IntArray paddings, str mode, float pad_value, str data_format)
output : Tensor(out)
infer_meta :
func : Pad3dInferMeta
kernel :
func : pad3d
backward : pad3d_grad
# pixel_shuffle # pixel_shuffle
- api : pixel_shuffle - api : pixel_shuffle
args : (Tensor x, int upscale_factor, str data_format) args : (Tensor x, int upscale_factor, str data_format)
...@@ -875,6 +1086,15 @@ ...@@ -875,6 +1086,15 @@
kernel: kernel:
func : pool2d func : pool2d
- api : prelu
args : (Tensor x, Tensor alpha, str data_format, str mode)
output : Tensor(out)
infer_meta :
func : PReluInferMeta
kernel :
func : prelu
backward : prelu_grad
# put_along_axis # put_along_axis
- api : put_along_axis - api : put_along_axis
args : (Tensor x, Tensor index, Tensor value, int axis, str reduce) args : (Tensor x, Tensor index, Tensor value, int axis, str reduce)
...@@ -927,6 +1147,15 @@ ...@@ -927,6 +1147,15 @@
intermediate : xshape intermediate : xshape
backward: reshape_grad backward: reshape_grad
- api : round
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
kernel :
func : round
backward : round_grad
- api : scale - api : scale
args : (Tensor x, Scalar scale, float bias, bool bias_after_scale) args : (Tensor x, Scalar scale, float bias, bool bias_after_scale)
output : Tensor output : Tensor
...@@ -1107,6 +1336,16 @@ ...@@ -1107,6 +1336,16 @@
func : square func : square
backward : square_grad backward : square_grad
- api : squeeze
args : (Tensor x, int[] axes)
output : Tensor(xshape), Tensor(out)
infer_meta :
func : SqueezeInferMeta
kernel :
func : squeeze
view: (x -> out)
backward : squeeze_grad
- api : strided_slice - api : strided_slice
args : (Tensor x, int[] axes, IntArray starts, IntArray ends, IntArray strides) args : (Tensor x, int[] axes, IntArray starts, IntArray ends, IntArray strides)
output : Tensor output : Tensor
...@@ -1256,6 +1495,16 @@ ...@@ -1256,6 +1495,16 @@
backward : unfold_grad backward : unfold_grad
# no_need_buffer : x # no_need_buffer : x
- api : unsqueeze
args : (Tensor x, IntArray axes)
output : Tensor(xshape), Tensor(out)
infer_meta :
func : UnsqueezeInferMeta
kernel :
func : unsqueeze
view: (x -> out)
backward : unsqueeze_grad
# viterbi_decode # viterbi_decode
- api : viterbi_decode - api : viterbi_decode
args : (Tensor input, Tensor transition, Tensor length, bool include_bos_eos_tag) args : (Tensor input, Tensor transition, Tensor length, bool include_bos_eos_tag)
......
...@@ -142,6 +142,16 @@ ...@@ -142,6 +142,16 @@
func : cast_grad func : cast_grad
data_type : out_grad data_type : out_grad
- backward_api : ceil_grad
forward : ceil(Tensor x) -> Tensor(out)
args : (Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [out_grad]
kernel :
func : ceil_grad
- backward_api : cholesky_grad - backward_api : cholesky_grad
forward : cholesky (Tensor x, bool upper) -> Tensor(out) forward : cholesky (Tensor x, bool upper) -> Tensor(out)
args : (Tensor out, Tensor out_grad, bool upper) args : (Tensor out, Tensor out_grad, bool upper)
...@@ -192,6 +202,25 @@ ...@@ -192,6 +202,25 @@
kernel : kernel :
func : cross_grad func : cross_grad
- backward_api : cumprod_grad
forward : cumprod (Tensor x, int dim) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int dim)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : cumprod_grad
# - backward_api : gumbel_softmax_grad
# forward : gumbel_softmax (Tensor x, float temperature, bool hard, int axis) -> Tensor(out)
# args : (Tensor out, Tensor out_grad, int axis)
# output : Tensor(x_grad)
# infer_meta :
# func : GumbelSoftmaxGradInferMeta
# param : [out, out_grad, axis]
# kernel :
# func : gumbel_softmax_grad
- backward_api : diagonal_grad - backward_api : diagonal_grad
forward : diagonal (Tensor x, int offset, int axis1, int axis2) -> Tensor(out) forward : diagonal (Tensor x, int offset, int axis1, int axis2) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int offset = 0, int axis1 = 0, int axis2 = 1) args : (Tensor x, Tensor out_grad, int offset = 0, int axis1 = 0, int axis2 = 1)
...@@ -273,6 +302,36 @@ ...@@ -273,6 +302,36 @@
kernel : kernel :
func : erfinv_grad func : erfinv_grad
- backward_api : floor_grad
forward : floor(Tensor x) -> Tensor(out)
args : (Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [out_grad]
kernel :
func : floor_grad
- backward_api : fmax_grad
forward : fmax(Tensor x, Tensor y, int axis) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param: [x, y]
kernel :
func : fmax_grad
- backward_api : fmin_grad
forward : fmin(Tensor x, Tensor y, int axis) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param: [x, y]
kernel :
func : fmin_grad
- backward_api : gather_nd_grad - backward_api : gather_nd_grad
forward : gather_nd (Tensor x, Tensor index) -> Tensor(out) forward : gather_nd (Tensor x, Tensor index) -> Tensor(out)
args : (Tensor x, Tensor index, Tensor out_grad) args : (Tensor x, Tensor index, Tensor out_grad)
...@@ -283,6 +342,16 @@ ...@@ -283,6 +342,16 @@
kernel : kernel :
func : gather_nd_grad func : gather_nd_grad
- backward_api : gelu_grad
forward : gelu(Tensor x, bool approximate) -> Tensor(out)
args : (Tensor x, Tensor out_grad, bool approximate)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : gelu_grad
- backward_api : hard_shrink_grad - backward_api : hard_shrink_grad
forward : hard_shrink (Tensor x, float threshold) -> Tensor(out) forward : hard_shrink (Tensor x, float threshold) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float threshold) args : (Tensor x, Tensor out_grad, float threshold)
...@@ -314,6 +383,26 @@ ...@@ -314,6 +383,26 @@
func : index_sample_grad func : index_sample_grad
data_type : out_grad data_type : out_grad
- backward_api : kldiv_loss_grad
forward : kldiv_loss(Tensor x, Tensor label, str reduction) -> Tensor(out)
args : (Tensor x, Tensor label, Tensor out_grad, str reduction)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : kldiv_loss_grad
- backward_api : kthvalue_grad
forward : kthvalue(Tensor x, int k, int axis, bool keepdim) -> Tensor(out), Tensor(indices)
args : (Tensor x, Tensor indices, Tensor out_grad, int k, int axis, bool keepdim)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : kthvalue_grad
- backward_api : label_smooth_grad - backward_api : label_smooth_grad
forward : label_smooth (Tensor label, Tensor prior_dist, float epsilon) -> Tensor(out) forward : label_smooth (Tensor label, Tensor prior_dist, float epsilon) -> Tensor(out)
args : (Tensor out_grad, float epsilon) args : (Tensor out_grad, float epsilon)
...@@ -345,6 +434,56 @@ ...@@ -345,6 +434,56 @@
kernel : kernel :
func : lerp_grad func : lerp_grad
- backward_api : lgamma_grad
forward : lgamma(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : lgamma_grad
- backward_api : log10_grad
forward : log10 (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : log10_grad
- backward_api : log1p_grad
forward : log1p (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : log1p_grad
- backward_api : log2_grad
forward : log2 (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : log2_grad
- backward_api : log_grad
forward : log (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : log_grad
- backward_api : log_loss_grad - backward_api : log_loss_grad
forward : log_loss (Tensor input, Tensor label, float epsilon) -> Tensor(out) forward : log_loss (Tensor input, Tensor label, float epsilon) -> Tensor(out)
args : (Tensor input, Tensor label, Tensor out_grad, float epsilon) args : (Tensor input, Tensor label, Tensor out_grad, float epsilon)
...@@ -355,6 +494,16 @@ ...@@ -355,6 +494,16 @@
kernel : kernel :
func : log_loss_grad func : log_loss_grad
- backward_api : log_softmax_grad
forward : log_softmax(Tensor x, int axis) -> Tensor(out)
args : (Tensor out, Tensor out_grad, int axis)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [out]
kernel :
func : log_softmax_grad
- backward_api : logsigmoid_grad - backward_api : logsigmoid_grad
forward : logsigmoid (Tensor x) -> Tensor(out) forward : logsigmoid (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
...@@ -408,6 +557,16 @@ ...@@ -408,6 +557,16 @@
kernel : kernel :
func : matrix_power_grad func : matrix_power_grad
- backward_api : max_grad
forward: max (Tensor x, int64_t[] dims={}, bool keep_dim=false) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int64_t[] dims={}, bool keep_dim=false, bool reduce_all=false)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : max_grad
- backward_api : maximum_grad - backward_api : maximum_grad
forward : maximum(Tensor x, Tensor y) -> Tensor(out) forward : maximum(Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis=-1) args : (Tensor x, Tensor y, Tensor out_grad, int axis=-1)
...@@ -418,6 +577,26 @@ ...@@ -418,6 +577,26 @@
kernel : kernel :
func : maximum_grad func : maximum_grad
- backward_api : mean_grad
forward: mean (Tensor x, int64_t[] dims={}, bool keep_dim=false) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int64_t[] dims={}, bool keep_dim=false, bool reduce_all=false)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : mean_grad
- backward_api : min_grad
forward: min (Tensor x, int64_t[] dims={}, bool keep_dim=false) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int64_t[] dims={}, bool keep_dim=false, bool reduce_all=false)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : min_grad
- backward_api : minimum_grad - backward_api : minimum_grad
forward : minimum(Tensor x, Tensor y) -> Tensor(out) forward : minimum(Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis=-1) args : (Tensor x, Tensor y, Tensor out_grad, int axis=-1)
...@@ -428,6 +607,16 @@ ...@@ -428,6 +607,16 @@
kernel : kernel :
func : minimum_grad func : minimum_grad
- backward_api : mode_grad
forward : mode(Tensor x, int axis, bool keepdim) -> Tensor(out), Tensor(indices)
args : (Tensor x, Tensor indices, Tensor out_grad, int axis, bool keepdim)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : mode_grad
- backward_api : modulo_grad - backward_api : modulo_grad
forward : add (Tensor x, Tensor y) -> Tensor(out) forward : add (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis = -1) args : (Tensor x, Tensor y, Tensor out_grad, int axis = -1)
...@@ -470,6 +659,36 @@ ...@@ -470,6 +659,36 @@
data_type : input data_type : input
optional : weight optional : weight
- backward_api : p_norm_grad
forward : p_norm(Tensor x, float porder, int axis, float epsilon, bool keepdim, bool asvector=false) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, float porder, int axis, float epsilon, bool keepdim, bool asvector)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : p_norm_grad
- backward_api : pad3d_grad
forward : pad3d(Tensor x, IntArray paddings, str mode, float pad_value, str data_format) -> Tensor(out)
args : (Tensor x, Tensor out_grad, IntArray paddings, str mode, float pad_value, str data_format)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : pad3d_grad
- backward_api : prelu_grad
forward : prelu(Tensor x, Tensor alpha, str data_format, str mode) -> Tensor(out)
args : (Tensor x, Tensor alpha, Tensor out_grad, str data_format, str mode)
output : Tensor(x_grad), Tensor(alpha_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param: [x, alpha]
kernel :
func : prelu_grad
- backward_api : psroi_pool_grad - backward_api : psroi_pool_grad
forward : psroi_pool (Tensor x, Tensor rois, Tensor rois_num, int pooled_weight, int pooled_width, int output_channels, float spatial_scale ) -> Tensor(out) forward : psroi_pool (Tensor x, Tensor rois, Tensor rois_num, int pooled_weight, int pooled_width, int output_channels, float spatial_scale ) -> Tensor(out)
args : (Tensor x, Tensor rois, Tensor rois_num, Tensor out_grad, int pooled_weight, int pooled_width, int output_channels, float spatial_scale) args : (Tensor x, Tensor rois, Tensor rois_num, Tensor out_grad, int pooled_weight, int pooled_width, int output_channels, float spatial_scale)
...@@ -537,6 +756,16 @@ ...@@ -537,6 +756,16 @@
backend: out_grad backend: out_grad
layout: out_grad layout: out_grad
- backward_api : round_grad
forward : round(Tensor x) -> Tensor(out)
args : (Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [out_grad]
kernel :
func : round_grad
- backward_api : scale_grad - backward_api : scale_grad
forward : scale (Tensor x, Scalar scale, float bias, bool bias_after_scale) -> Tensor(out) forward : scale (Tensor x, Scalar scale, float bias, bool bias_after_scale) -> Tensor(out)
args : (Tensor out_grad, Scalar scale, float bias=0.0, bool bias_after_scale=true) args : (Tensor out_grad, Scalar scale, float bias=0.0, bool bias_after_scale=true)
...@@ -680,6 +909,16 @@ ...@@ -680,6 +909,16 @@
kernel : kernel :
func : square_grad func : square_grad
- backward_api : squeeze_grad
forward : squeeze(Tensor x, int[] axes) -> Tensor(xshape), Tensor(out)
args : (Tensor xshape, Tensor out_grad, int[] axes)
output : Tensor(x_grad)
infer_meta :
func : KernelWithXShapeInferMeta
param: [xshape]
kernel :
func : squeeze_grad
- backward_api : strided_slice_grad - backward_api : strided_slice_grad
forward : strided_slice (Tensor x, int[] axes, IntArray starts, IntArray ends, IntArray strides) -> Tensor(out) forward : strided_slice (Tensor x, int[] axes, IntArray starts, IntArray ends, IntArray strides) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int[] axes, IntArray starts, IntArray ends, IntArray strides) args : (Tensor x, Tensor out_grad, int[] axes, IntArray starts, IntArray ends, IntArray strides)
...@@ -810,6 +1049,16 @@ ...@@ -810,6 +1049,16 @@
kernel : kernel :
func : unfold_grad func : unfold_grad
- backward_api : unsqueeze_grad
forward : unsqueeze(Tensor x, IntArray axes) -> Tensor(xshape), Tensor(out)
args : (Tensor xshape, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : KernelWithXShapeInferMeta
param: [xshape]
kernel :
func : unsqueeze_grad
- backward_api : where_grad - backward_api : where_grad
forward : where (Tensor condition, Tensor x, Tensor y) -> Tensor(out) forward : where (Tensor condition, Tensor x, Tensor y) -> Tensor(out)
args : (Tensor condition, Tensor x, Tensor y, Tensor out_grad) args : (Tensor condition, Tensor x, Tensor y, Tensor out_grad)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册