未验证 提交 16bfcd18 编写于 作者: X xiongkun 提交者: GitHub

[Yaml] transfer around 22 ops yaml file and pass the final state OpTest. (#41024)

* 1. add the python api grad 2. add final and intermediate state vlog 3. change the python_api error logic

* add python api or close the check_eager=True

* fix the compatibility
上级 2012aeb6
......@@ -933,7 +933,7 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj,
bool value = CastPyArg2Boolean(obj, op_type, arg_pos);
return paddle::experimental::Scalar(value);
} else if (type_name == "paddle.Tensor") {
} else if (type_name == "Tensor") {
paddle::experimental::Tensor& value = GetTensorFromPyObject(
op_type, "" /*arg_name*/, obj, arg_pos, false /*dispensable*/);
return paddle::experimental::Scalar(value);
......
......@@ -1374,8 +1374,8 @@ void MvInferMeta(const MetaTensor& x, const MetaTensor& vec, MetaTensor* out) {
void PReluInferMeta(const MetaTensor& x,
const MetaTensor& alpha,
const std::string& mode,
const std::string& data_format,
const std::string& mode,
MetaTensor* out,
MetaConfig config) {
auto x_dim = x.dims();
......
......@@ -196,10 +196,10 @@ void MvInferMeta(const MetaTensor& x, const MetaTensor& vec, MetaTensor* out);
void PReluInferMeta(const MetaTensor& x,
const MetaTensor& alpha,
const std::string& mode,
const std::string& data_format,
const std::string& mode,
MetaTensor* out,
MetaConfig config);
MetaConfig config = MetaConfig());
void SearchsortedInferMeta(const MetaTensor& sorted_sequence,
const MetaTensor& value,
......
......@@ -29,21 +29,28 @@ void AllCloseKernel(const Context& dev_ctx,
const Scalar& atol,
bool equal_nan,
DenseTensor* out) {
PADDLE_ENFORCE_EQ(
rtol.dtype(),
DataType::FLOAT64,
phi::errors::InvalidArgument(
"Input (Rtol) type must be double, but get %s.", rtol.dtype()));
PADDLE_ENFORCE_EQ(
atol.dtype(),
DataType::FLOAT64,
phi::errors::InvalidArgument(
"Input (Atol) type must be double, but get %s.", atol.dtype()));
double rtol_v, atol_v;
if (rtol.dtype() == DataType::FLOAT64) {
rtol_v = rtol.to<double>();
} else if (rtol.dtype() == DataType::FLOAT32) {
rtol_v = rtol.to<float>();
} else {
PADDLE_THROW(phi::errors::InvalidArgument(
"Input (Rtol) type must be double or float, but get %s.",
rtol.dtype()));
}
if (atol.dtype() == DataType::FLOAT64) {
atol_v = atol.to<double>();
} else if (atol.dtype() == DataType::FLOAT32) {
atol_v = atol.to<float>();
} else {
PADDLE_THROW(phi::errors::InvalidArgument(
"Input (Atol) type must be double or float, but get %s.",
atol.dtype()));
}
VLOG(3) << "rtol and atol is : " << rtol_v << " " << atol_v;
auto* in_a = x.data<T>();
auto* in_b = y.data<T>();
auto rtol_v = rtol.to<double>();
auto atol_v = atol.to<double>();
auto* out_data = dev_ctx.template Alloc<bool>(out);
*out_data = true;
......
......@@ -46,9 +46,9 @@ static void kthvalueAssign(const Type& input_height,
template <typename T, typename Context>
void KthvalueGradKernel(const Context& dev_ctx,
const DenseTensor& d_out,
const DenseTensor& x,
const DenseTensor& indices,
const DenseTensor& d_out,
int k,
int axis,
bool keepdim,
......
......@@ -24,8 +24,8 @@ void PReluGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& alpha,
const DenseTensor& out_grad,
const std::string& mode,
const std::string& data_format,
const std::string& mode,
DenseTensor* x_grad,
DenseTensor* alpha_grad) {
const T* alpha_ptr = alpha.data<T>();
......
......@@ -23,8 +23,8 @@ template <typename T, typename Context>
void PReluKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& alpha,
const std::string& mode,
const std::string& data_format,
const std::string& mode,
DenseTensor* out) {
const T* x_ptr = x.data<T>();
const T* alpha_ptr = alpha.data<T>();
......
......@@ -51,21 +51,28 @@ void AllCloseKernel(const Context& dev_ctx,
const Scalar& atol,
bool equal_nan,
DenseTensor* out) {
PADDLE_ENFORCE_EQ(
rtol.dtype(),
DataType::FLOAT64,
phi::errors::InvalidArgument(
"Input (Rtol) type must be double, but get %s.", rtol.dtype()));
PADDLE_ENFORCE_EQ(
atol.dtype(),
DataType::FLOAT64,
phi::errors::InvalidArgument(
"Input (Atol) type must be double, but get %s.", atol.dtype()));
double rtol_v, atol_v;
if (rtol.dtype() == DataType::FLOAT64) {
rtol_v = rtol.to<double>();
} else if (rtol.dtype() == DataType::FLOAT32) {
rtol_v = rtol.to<float>();
} else {
PADDLE_THROW(phi::errors::InvalidArgument(
"Input (Rtol) type must be double or float, but get %s.",
rtol.dtype()));
}
if (atol.dtype() == DataType::FLOAT64) {
atol_v = atol.to<double>();
} else if (atol.dtype() == DataType::FLOAT32) {
atol_v = atol.to<float>();
} else {
PADDLE_THROW(phi::errors::InvalidArgument(
"Input (Atol) type must be double or float, but get %s.",
atol.dtype()));
}
VLOG(3) << "rtol and atol is : " << rtol_v << " " << atol_v;
const T* in_data = x.data<T>();
const T* other_data = y.data<T>();
auto rtol_v = rtol.to<double>();
auto atol_v = atol.to<double>();
bool* out_data = dev_ctx.template Alloc<bool>(out);
int num = x.numel();
......
......@@ -34,9 +34,9 @@ static int getBlockSize(int col) {
template <typename T, typename Context>
void KthvalueGradKernel(const Context& dev_ctx,
const DenseTensor& d_out,
const DenseTensor& x,
const DenseTensor& indices,
const DenseTensor& d_out,
int k,
int axis,
bool keepdim,
......
......@@ -102,8 +102,8 @@ void PReluGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& alpha,
const DenseTensor& out_grad,
const std::string& mode,
const std::string& data_format,
const std::string& mode,
DenseTensor* x_grad,
DenseTensor* alpha_grad) {
dev_ctx.template Alloc<T>(x_grad);
......
......@@ -24,8 +24,8 @@ template <typename T, typename Context>
void PReluKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& alpha,
const std::string& mode,
const std::string& data_format,
const std::string& mode,
DenseTensor* out) {
const T* x_ptr = x.data<T>();
T* o_ptr = dev_ctx.template Alloc<T>(out);
......
......@@ -33,8 +33,8 @@ struct LgammaGradFunctor {
};
template <typename T, typename Context>
void LgammaGradKernel(const Context& dev_ctx,
const DenseTensor& d_out,
const DenseTensor& x,
const DenseTensor& d_out,
DenseTensor* d_x) {
auto numel = d_out.numel();
auto* dout_data = d_out.data<T>();
......
......@@ -19,7 +19,6 @@
namespace phi {
template <typename T, typename Context>
// XKTODO (change name)
void KLDivLossGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& label,
......
......@@ -20,9 +20,9 @@
namespace phi {
template <typename T, typename Context>
void KthvalueGradKernel(const Context& dev_ctx,
const DenseTensor& d_out,
const DenseTensor& x,
const DenseTensor& indices,
const DenseTensor& d_out,
int k,
int axis,
bool keepdim,
......
......@@ -21,7 +21,7 @@ namespace phi {
template <typename T, typename Context>
void LgammaGradKernel(const Context& dev_ctx,
const DenseTensor& d_out,
const DenseTensor& x,
const DenseTensor& d_out,
DenseTensor* d_x);
} // namespace phi
......@@ -24,8 +24,8 @@ void PReluGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& alpha,
const DenseTensor& out_grad,
const std::string& mode,
const std::string& data_format,
const std::string& mode,
DenseTensor* x_grad,
DenseTensor* alpha_grad);
} // namespace phi
......@@ -22,7 +22,7 @@ template <typename T, typename Context>
void PReluKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& alpha,
const std::string& mode,
const std::string& data_format,
const std::string& mode,
DenseTensor* out);
} // namespace phi
......@@ -20,7 +20,7 @@ namespace phi {
KernelSignature KthvalueGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("kthvalue_grad",
{GradVarName("Out"), "X", "Indices"},
{"X", "Indices", GradVarName("Out")},
{"k", "axis", "keepdim"},
{GradVarName("X")});
}
......
......@@ -18,7 +18,7 @@ namespace phi {
KernelSignature LgammaGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature(
"lgamma_grad", {GradVarName("Out"), "X"}, {}, {GradVarName("X")});
"lgamma_grad", {"X", GradVarName("Out")}, {}, {GradVarName("X")});
}
} // namespace phi
......
......@@ -16,13 +16,19 @@
namespace phi {
KernelSignature PReluOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature(
"prelu", {"X", "Alpha"}, {"data_format", "mode"}, {"Out"});
}
KernelSignature PReluGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature("prelu_grad",
{"X", "Alpha", GradVarName("Out")},
{"mode", "data_format"},
{"data_format", "mode"},
{GradVarName("X"), GradVarName("Alpha")});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(prelu, phi::PReluOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(prelu_grad, phi::PReluGradOpArgumentMapping);
......@@ -23,6 +23,7 @@ from ..proto import framework_pb2
from ..framework import OpProtoHolder, Variable, core, convert_np_dtype_to_dtype_, _non_static_mode, in_dygraph_mode, _in_legacy_dygraph
from ..layer_helper import LayerHelper
from ..data_feeder import check_variable_and_dtype
from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph
from paddle import _C_ops
__all__ = [
......
......@@ -25,6 +25,7 @@ import six
import paddle
from ..layer_helper import LayerHelper
from paddle.fluid.framework import _in_legacy_dygraph
from ..initializer import Normal, Constant, NumpyArrayInitializer
from ..framework import Variable, OpProtoHolder, _non_static_mode, dygraph_only, _dygraph_tracer, default_main_program, _varbase_creator, static_only, _global_flags, _in_legacy_dygraph, in_dygraph_mode
from .. import dygraph_utils
......@@ -6427,7 +6428,9 @@ def squeeze(input, axes, name=None):
y = layers.squeeze(input=x, axes=[2]) # y.shape=[None, 5, 10]
"""
if _non_static_mode():
if in_dygraph_mode():
return _C_ops.final_state_squeeze(input, axes)[1]
if _in_legacy_dygraph():
out, _ = _C_ops.squeeze2(input, 'axes', axes)
return out
......@@ -6488,8 +6491,10 @@ def unsqueeze(input, axes, name=None):
item.numpy().item(0) if isinstance(item, Variable) else item
for item in axes
]
out, _ = _C_ops.unsqueeze2(input, 'axes', axes)
return out
if _in_legacy_dygraph():
out, _ = _C_ops.unsqueeze2(input, 'axes', axes)
return out
return _C_ops.final_state_unsqueeze(input, axes)[1]
check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'unsqueeze')
check_variable_and_dtype(input, 'input', [
......@@ -8910,7 +8915,9 @@ def log(x, name=None):
res = paddle.log(x)
# [[0.693147, 1.09861, 1.38629], [1.94591, 2.07944, 2.19722]]
"""
if _non_static_mode():
if in_dygraph_mode():
return _C_ops.final_state_log(x)
if _in_legacy_dygraph():
return _C_ops.log(x)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log")
......
......@@ -50,6 +50,7 @@ class TestActivation(OpTest):
self.op_type = "exp"
self.init_dtype()
self.init_kernel_type()
self.check_eager = False
np.random.seed(2049)
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
......@@ -59,12 +60,18 @@ class TestActivation(OpTest):
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
check_eager = False
if hasattr(self, 'check_eager'):
check_eager = self.check_eager
self.check_output(check_eager=check_eager)
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out')
check_eager = False
if hasattr(self, 'check_eager'):
check_eager = self.check_eager
self.check_grad(['X'], 'Out', check_eager=check_eager)
def init_dtype(self):
self.dtype = np.float64
......@@ -876,6 +883,8 @@ def ref_softshrink(x, threshold=0.5):
class TestSoftshrink(TestActivation):
def setUp(self):
self.op_type = "softshrink"
self.check_eager = True
self.python_api = paddle.nn.functional.softshrink
self.init_dtype()
threshold = 0.8
......@@ -890,7 +899,7 @@ class TestSoftshrink(TestActivation):
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
class TestSoftshrinkAPI(unittest.TestCase):
......@@ -1050,6 +1059,8 @@ class TestAbs(TestActivation):
class TestCeil(TestActivation):
def setUp(self):
self.op_type = "ceil"
self.check_eager = True
self.python_api = paddle.ceil
self.init_dtype()
np.random.seed(1024)
......@@ -1067,6 +1078,8 @@ class TestCeil(TestActivation):
class TestFloor(TestActivation):
def setUp(self):
self.op_type = "floor"
self.check_eager = True
self.python_api = paddle.floor
self.init_dtype()
np.random.seed(1024)
......@@ -1263,6 +1276,8 @@ class TestAtanh(TestActivation):
class TestRound(TestActivation):
def setUp(self):
self.op_type = "round"
self.check_eager = True
self.python_api = paddle.round
self.init_dtype()
np.random.seed(1024)
......@@ -2075,6 +2090,8 @@ class TestReciprocal(TestActivation):
class TestLog(TestActivation):
def setUp(self):
self.op_type = "log"
self.check_eager = True
self.python_api = paddle.log
self.init_dtype()
np.random.seed(1024)
......@@ -2087,7 +2104,7 @@ class TestLog(TestActivation):
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
def test_error(self):
in1 = fluid.layers.data(
......@@ -2102,6 +2119,8 @@ class TestLog(TestActivation):
class TestLog2(TestActivation):
def setUp(self):
self.op_type = "log2"
self.check_eager = True
self.python_api = paddle.log2
self.init_dtype()
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
......@@ -2113,7 +2132,7 @@ class TestLog2(TestActivation):
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
def test_error(self):
in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
......@@ -2151,6 +2170,8 @@ class TestLog2(TestActivation):
class TestLog10(TestActivation):
def setUp(self):
self.op_type = "log10"
self.check_eager = True
self.python_api = paddle.log10
self.init_dtype()
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
......@@ -2162,7 +2183,7 @@ class TestLog10(TestActivation):
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
def test_error(self):
in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
......@@ -2200,6 +2221,8 @@ class TestLog10(TestActivation):
class TestLog1p(TestActivation):
def setUp(self):
self.op_type = "log1p"
self.check_eager = True
self.python_api = paddle.log1p
self.init_dtype()
np.random.seed(1024)
......@@ -2212,7 +2235,7 @@ class TestLog1p(TestActivation):
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
def test_api(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
......@@ -2298,6 +2321,8 @@ class TestSquareBF16(OpTest):
class TestPow(TestActivation):
def setUp(self):
self.op_type = "pow"
self.python_api = paddle.pow
self.check_eager = False
self.init_dtype()
np.random.seed(1024)
......@@ -2311,12 +2336,14 @@ class TestPow(TestActivation):
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=self.check_eager)
class TestPow_factor_tensor(TestActivation):
def setUp(self):
self.op_type = "pow"
self.check_eager = False
self.python_api = paddle.pow
self.init_dtype()
np.random.seed(1024)
......@@ -2332,12 +2359,12 @@ class TestPow_factor_tensor(TestActivation):
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=self.check_eager)
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=self.check_eager)
def test_api(self):
input = np.random.uniform(1, 2, [11, 17]).astype("float32")
......
......@@ -29,6 +29,7 @@ class TestAllcloseOp(OpTest):
def setUp(self):
self.set_args()
self.op_type = "allclose"
self.python_api = paddle.allclose
self.inputs = {
'Input': self.input,
'Other': self.other,
......@@ -48,7 +49,7 @@ class TestAllcloseOp(OpTest):
}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
class TestAllcloseOpException(TestAllcloseOp):
......@@ -56,28 +57,28 @@ class TestAllcloseOpException(TestAllcloseOp):
def test_rtol_num():
self.inputs['Rtol'] = np.array([1e-05, 1e-05]).astype("float64")
self.inputs['Atol'] = np.array([1e-08]).astype("float64")
self.check_output()
self.check_output(check_eager=True)
self.assertRaises(ValueError, test_rtol_num)
def test_rtol_type():
self.inputs['Rtol'] = np.array([5]).astype("int32")
self.inputs['Atol'] = np.array([1e-08]).astype("float64")
self.check_output()
self.check_output(check_eager=True)
self.assertRaises(ValueError, test_rtol_type)
def test_atol_num():
self.inputs['Rtol'] = np.array([1e-05]).astype("float64")
self.inputs['Atol'] = np.array([1e-08, 1e-08]).astype("float64")
self.check_output()
self.check_output(check_eager=True)
self.assertRaises(ValueError, test_atol_num)
def test_atol_type():
self.inputs['Rtol'] = np.array([1e-05]).astype("float64")
self.inputs['Atol'] = np.array([8]).astype("int32")
self.check_output()
self.check_output(check_eager=True)
self.assertRaises(ValueError, test_atol_type)
......
......@@ -46,7 +46,7 @@ class TestComplexAbsOp(OpTest):
self.grad_x = self.grad_out * (self.x / np.abs(self.x))
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output(check_eager=False)
def test_check_grad(self):
self.check_grad(
......@@ -54,7 +54,7 @@ class TestComplexAbsOp(OpTest):
'Out',
user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out],
check_eager=True)
check_eager=False)
class TestComplexAbsOpZeroValues(OpTest):
......@@ -80,7 +80,7 @@ class TestComplexAbsOpZeroValues(OpTest):
self.grad_x = np.zeros(self.shape, self.dtype)
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output(check_eager=False)
def test_check_grad(self):
self.check_grad(
......@@ -88,7 +88,7 @@ class TestComplexAbsOpZeroValues(OpTest):
'Out',
user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out],
check_eager=True)
check_eager=False)
class TestAbs(unittest.TestCase):
......@@ -133,7 +133,7 @@ class TestRealAbsOp(OpTest):
self.grad_x = self.grad_out * (self.x / np.abs(self.x))
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output(check_eager=False)
def test_check_grad(self):
self.check_grad(
......@@ -141,7 +141,7 @@ class TestRealAbsOp(OpTest):
'Out',
user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out],
check_eager=True)
check_eager=False)
if __name__ == '__main__':
......
......@@ -73,6 +73,7 @@ class TestCumprod(OpTest):
self.init_params()
self.init_dtype()
self.op_type = "cumprod"
self.python_api = paddle.cumprod
self.inputs = {'X': None}
self.outputs = {'Out': None}
self.attrs = {'dim': None}
......@@ -110,7 +111,7 @@ class TestCumprod(OpTest):
for dim in range(-len(self.shape), len(self.shape)):
for zero_num in self.zero_nums:
self.prepare_inputs_outputs_attrs(dim, zero_num)
self.check_output()
self.check_output(check_eager=True)
# test backward.
def test_check_grad(self):
......@@ -119,13 +120,14 @@ class TestCumprod(OpTest):
self.prepare_inputs_outputs_attrs(dim, zero_num)
self.init_grad_input_output(dim)
if self.dtype == np.float64:
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
else:
self.check_grad(
['X'],
'Out',
user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out])
user_defined_grad_outputs=[self.grad_out],
check_eager=True)
# test float32 case.
......
......@@ -125,6 +125,7 @@ class TestElementwiseFmaxOp(OpTest):
def setUp(self):
"""setUp"""
self.op_type = "elementwise_fmax"
self.python_api = paddle.fmax
# If x and y have the same value, the max() is not differentiable.
# So we generate test data by the following method
# to avoid them being too close to each other.
......@@ -136,21 +137,29 @@ class TestElementwiseFmaxOp(OpTest):
def test_check_output(self):
"""test_check_output"""
self.check_output()
self.check_output(check_eager=True)
def test_check_grad_normal(self):
"""test_check_grad_normal"""
self.check_grad(['X', 'Y'], 'Out')
self.check_grad(['X', 'Y'], 'Out', check_eager=True)
def test_check_grad_ingore_x(self):
"""test_check_grad_ingore_x"""
self.check_grad(
['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X"))
['Y'],
'Out',
max_relative_error=0.005,
no_grad_set=set("X"),
check_eager=True)
def test_check_grad_ingore_y(self):
"""test_check_grad_ingore_y"""
self.check_grad(
['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y'))
['X'],
'Out',
max_relative_error=0.005,
no_grad_set=set('Y'),
check_eager=True)
class TestElementwiseFmax2Op(OpTest):
......@@ -159,6 +168,7 @@ class TestElementwiseFmax2Op(OpTest):
def setUp(self):
"""setUp"""
self.op_type = "elementwise_fmax"
self.python_api = paddle.fmax
# If x and y have the same value, the max() is not differentiable.
# So we generate test data by the following method
# to avoid them being too close to each other.
......@@ -172,18 +182,26 @@ class TestElementwiseFmax2Op(OpTest):
def test_check_output(self):
"""test_check_output"""
self.check_output()
self.check_output(check_eager=True)
def test_check_grad_normal(self):
"""test_check_grad_normal"""
self.check_grad(['X', 'Y'], 'Out')
self.check_grad(['X', 'Y'], 'Out', check_eager=True)
def test_check_grad_ingore_x(self):
"""test_check_grad_ingore_x"""
self.check_grad(
['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X"))
['Y'],
'Out',
max_relative_error=0.005,
no_grad_set=set("X"),
check_eager=True)
def test_check_grad_ingore_y(self):
"""test_check_grad_ingore_y"""
self.check_grad(
['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y'))
['X'],
'Out',
max_relative_error=0.005,
no_grad_set=set('Y'),
check_eager=True)
......@@ -127,6 +127,7 @@ class TestElementwiseFminOp(OpTest):
def setUp(self):
"""setUp"""
self.op_type = "elementwise_fmin"
self.python_api = paddle.fmin
# If x and y have the same value, the min() is not differentiable.
# So we generate test data by the following method
# to avoid them being too close to each other.
......@@ -138,21 +139,29 @@ class TestElementwiseFminOp(OpTest):
def test_check_output(self):
"""test_check_output"""
self.check_output()
self.check_output(check_eager=True)
def test_check_grad_normal(self):
"""test_check_grad_normal"""
self.check_grad(['X', 'Y'], 'Out')
self.check_grad(['X', 'Y'], 'Out', check_eager=True)
def test_check_grad_ingore_x(self):
"""test_check_grad_ingore_x"""
self.check_grad(
['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X"))
['Y'],
'Out',
max_relative_error=0.005,
no_grad_set=set("X"),
check_eager=True)
def test_check_grad_ingore_y(self):
"""test_check_grad_ingore_y"""
self.check_grad(
['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y'))
['X'],
'Out',
max_relative_error=0.005,
no_grad_set=set('Y'),
check_eager=True)
class TestElementwiseFmin2Op(OpTest):
......@@ -161,6 +170,7 @@ class TestElementwiseFmin2Op(OpTest):
def setUp(self):
"""setUp"""
self.op_type = "elementwise_fmin"
self.python_api = paddle.fmin
# If x and y have the same value, the min() is not differentiable.
# So we generate test data by the following method
# to avoid them being too close to each other.
......@@ -174,21 +184,29 @@ class TestElementwiseFmin2Op(OpTest):
def test_check_output(self):
"""test_check_output"""
self.check_output()
self.check_output(check_eager=True)
def test_check_grad_normal(self):
"""test_check_grad_normal"""
self.check_grad(['X', 'Y'], 'Out')
self.check_grad(['X', 'Y'], 'Out', check_eager=True)
def test_check_grad_ingore_x(self):
"""test_check_grad_ingore_x"""
self.check_grad(
['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X"))
['Y'],
'Out',
max_relative_error=0.005,
no_grad_set=set("X"),
check_eager=True)
def test_check_grad_ingore_y(self):
"""test_check_grad_ingore_y"""
self.check_grad(
['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y'))
['X'],
'Out',
max_relative_error=0.005,
no_grad_set=set('Y'),
check_eager=True)
if __name__ == "__main__":
......
......@@ -33,6 +33,7 @@ def gather_numpy(x, index, axis):
class TestGatherOp(OpTest):
def setUp(self):
self.op_type = "gather"
self.python_api = paddle.gather
self.config()
xnp = np.random.random(self.x_shape).astype(self.x_type)
self.inputs = {
......@@ -42,10 +43,10 @@ class TestGatherOp(OpTest):
self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=False)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=False)
def config(self):
"""
......@@ -120,6 +121,7 @@ class TestCase6(TestGatherOp):
class TestGatherBF16Op(OpTest):
def setUp(self):
self.op_type = "gather"
self.python_api = paddle.gather
self.dtype = np.uint16
self.config()
xnp = np.random.random(self.x_shape).astype(np.float32)
......@@ -134,10 +136,10 @@ class TestGatherBF16Op(OpTest):
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=False)
def test_check_grad(self):
self.check_grad(['X'], 'Out', numeric_grad_delta=0.5)
self.check_grad(['X'], 'Out', numeric_grad_delta=0.5, check_eager=False)
def config(self):
"""
......@@ -153,6 +155,7 @@ class TestGatherBF16Op(OpTest):
class TestGatherOp1(OpTest):
def setUp(self):
self.op_type = "gather"
self.python_api = paddle.gather
self.config()
xnp = np.random.random(self.x_shape).astype(self.x_type)
axis_np = np.array(self.axis).astype(self.index_type)
......@@ -162,10 +165,10 @@ class TestGatherOp1(OpTest):
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=False)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=False)
def config(self):
"""
......
......@@ -30,6 +30,7 @@ class TestIscloseOp(OpTest):
paddle.enable_static()
self.set_args()
self.op_type = "isclose"
self.python_api = paddle.isclose
self.inputs = {
'Input': self.input,
'Other': self.other,
......@@ -49,7 +50,7 @@ class TestIscloseOp(OpTest):
}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
class TestIscloseOpException(TestIscloseOp):
......@@ -57,28 +58,28 @@ class TestIscloseOpException(TestIscloseOp):
def test_rtol_num():
self.inputs['Rtol'] = np.array([1e-05, 1e-05]).astype("float64")
self.inputs['Atol'] = np.array([1e-08]).astype("float64")
self.check_output()
self.check_output(check_eager=True)
self.assertRaises(ValueError, test_rtol_num)
def test_rtol_type():
self.inputs['Rtol'] = np.array([5]).astype("int32")
self.inputs['Atol'] = np.array([1e-08]).astype("float64")
self.check_output()
self.check_output(check_eager=True)
self.assertRaises(ValueError, test_rtol_type)
def test_atol_num():
self.inputs['Rtol'] = np.array([1e-05]).astype("float64")
self.inputs['Atol'] = np.array([1e-08, 1e-08]).astype("float64")
self.check_output()
self.check_output(check_eager=True)
self.assertRaises(ValueError, test_atol_num)
def test_atol_type():
self.inputs['Rtol'] = np.array([1e-05]).astype("float64")
self.inputs['Atol'] = np.array([8]).astype("int32")
self.check_output()
self.check_output(check_eager=True)
self.assertRaises(ValueError, test_atol_type)
......@@ -211,7 +212,7 @@ class TestIscloseOpFloat64(TestIscloseOp):
self.equal_nan = False
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
class TestIscloseOpLargeDimInput(TestIscloseOp):
......
......@@ -17,6 +17,7 @@ import paddle
import unittest
import numpy as np
from op_test import OpTest
from paddle.nn.functional import kl_div
def kldiv_loss(x, target, reduction):
......@@ -40,6 +41,7 @@ class TestKLDivLossOp(OpTest):
def setUp(self):
self.initTestCase()
self.op_type = 'kldiv_loss'
self.python_api = kl_div
x = np.random.uniform(-10, 10, self.x_shape).astype('float64')
target = np.random.uniform(-10, 10, self.x_shape).astype('float64')
......@@ -53,10 +55,11 @@ class TestKLDivLossOp(OpTest):
self.outputs = {'Loss': loss.astype('float64')}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Loss', no_grad_set=set(["Target"]))
self.check_grad(
['X'], 'Loss', no_grad_set=set(["Target"]), check_eager=True)
def initTestCase(self):
self.x_shape = (4, 5, 5)
......
......@@ -41,6 +41,7 @@ class TestKthvalueOp(OpTest):
def setUp(self):
self.op_type = "kthvalue"
self.python_api = paddle.kthvalue
self.dtype = np.float64
self.input_data = np.random.random((2, 1, 2, 4, 10))
self.init_args()
......@@ -52,11 +53,11 @@ class TestKthvalueOp(OpTest):
def test_check_output(self):
paddle.enable_static()
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
paddle.enable_static()
self.check_grad(set(['X']), 'Out')
self.check_grad(set(['X']), 'Out', check_eager=True)
class TestKthvalueOpWithKeepdim(OpTest):
......@@ -67,6 +68,7 @@ class TestKthvalueOpWithKeepdim(OpTest):
def setUp(self):
self.init_args()
self.op_type = "kthvalue"
self.python_api = paddle.kthvalue
self.dtype = np.float64
self.input_data = np.random.random((1, 3, 2, 4, 10))
self.inputs = {'X': self.input_data}
......@@ -77,11 +79,11 @@ class TestKthvalueOpWithKeepdim(OpTest):
def test_check_output(self):
paddle.enable_static()
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
paddle.enable_static()
self.check_grad(set(['X']), 'Out')
self.check_grad(set(['X']), 'Out', check_eager=True)
class TestKthvalueOpKernels(unittest.TestCase):
......
......@@ -24,6 +24,7 @@ paddle.enable_static()
class TestLgammaOp(OpTest):
def setUp(self):
self.op_type = 'lgamma'
self.python_api = paddle.lgamma
self.init_dtype_type()
shape = (5, 20)
data = np.random.random(shape).astype(self.dtype) + 1
......@@ -38,10 +39,10 @@ class TestLgammaOp(OpTest):
self.dtype = np.float64
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', numeric_grad_delta=1e-7)
self.check_grad(['X'], 'Out', numeric_grad_delta=1e-7, check_eager=True)
class TestLgammaOpFp32(TestLgammaOp):
......@@ -49,7 +50,8 @@ class TestLgammaOpFp32(TestLgammaOp):
self.dtype = np.float32
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', numeric_grad_delta=0.005)
self.check_grad(
['X'], 'Out', numeric_grad_delta=0.005, check_eager=True)
if __name__ == "__main__":
......
......@@ -42,6 +42,7 @@ def ref_log_softmax_grad(x, axis):
class TestLogSoftmaxOp(OpTest):
def setUp(self):
self.op_type = 'log_softmax'
self.python_api = F.log_softmax
self.dtype = 'float64'
self.shape = [2, 3, 4, 5]
self.axis = -1
......@@ -59,10 +60,11 @@ class TestLogSoftmaxOp(OpTest):
pass
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], ['Out'], user_defined_grads=[self.x_grad])
self.check_grad(
['X'], ['Out'], user_defined_grads=[self.x_grad], check_eager=True)
class TestLogSoftmaxShape(TestLogSoftmaxOp):
......@@ -80,6 +82,7 @@ class TestLogSoftmaxAxis(TestLogSoftmaxOp):
class TestLogSoftmaxBF16Op(OpTest):
def setUp(self):
self.op_type = 'log_softmax'
self.python_api = F.log_softmax
self.dtype = np.uint16
self.shape = [2, 3, 4, 5]
self.axis = -1
......@@ -94,12 +97,14 @@ class TestLogSoftmaxBF16Op(OpTest):
def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place)
self.check_output_with_place(place, check_eager=True)
def test_check_grad(self):
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, ['X'], ['Out'], user_defined_grads=[self.x_grad])
place, ['X'], ['Out'],
user_defined_grads=[self.x_grad],
check_eager=True)
class TestNNLogSoftmaxAPI(unittest.TestCase):
......
......@@ -18,6 +18,7 @@ import unittest
import numpy as np
from op_test import OpTest, skip_check_grad_ci, check_out_dtype
import paddle
from paddle.fluid.framework import _test_eager_guard
import paddle.fluid.core as core
......@@ -86,6 +87,10 @@ class ApiMaxTest(unittest.TestCase):
z_expected = np.array(np.max(np_x, axis=0))
self.assertEqual((np_z == z_expected).all(), True)
def test_eager_api(self):
with _test_eager_guard():
self.test_imperative_api()
def test_big_dimension(self):
paddle.disable_static()
x = paddle.rand(shape=[2, 2, 2, 2, 2, 2, 2])
......
......@@ -25,9 +25,22 @@ from paddle.fluid import Program, program_guard
np.random.seed(10)
def mean_wrapper(x, axis=None, keepdim=False, reduce_all=False):
if reduce_all == True:
return paddle.mean(x, range(len(x.shape)), keepdim)
return paddle.mean(x, axis, keepdim)
def reduce_mean_wrapper(x, axis=0, keepdim=False, reduce_all=False):
if reduce_all == True:
return paddle.mean(x, range(len(x.shape)), keepdim)
return paddle.mean(x, axis, keepdim)
class TestMeanOp(OpTest):
def setUp(self):
self.op_type = "mean"
self.python_api = mean_wrapper
self.dtype = np.float64
self.init_dtype_type()
self.inputs = {'X': np.random.random((10, 10)).astype(self.dtype)}
......@@ -37,10 +50,10 @@ class TestMeanOp(OpTest):
pass
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_checkout_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
class TestMeanOpError(unittest.TestCase):
......@@ -117,6 +130,7 @@ def ref_reduce_mean_grad(x, axis, dtype):
class TestReduceMeanOp(OpTest):
def setUp(self):
self.op_type = 'reduce_mean'
self.python_api = reduce_mean_wrapper
self.dtype = 'float64'
self.shape = [2, 3, 4, 5]
self.axis = [0]
......@@ -145,7 +159,7 @@ class TestReduceMeanOp(OpTest):
def test_check_output(self):
if self.dtype != 'float16':
self.check_output()
self.check_output(check_eager=True)
else:
if not core.is_compiled_with_cuda():
return
......@@ -154,7 +168,7 @@ class TestReduceMeanOp(OpTest):
def test_check_grad(self):
if self.dtype != 'float16':
self.check_grad(['X'], ['Out'])
self.check_grad(['X'], ['Out'], check_eager=True)
else:
return
if not core.is_compiled_with_cuda():
......@@ -175,6 +189,7 @@ class TestReduceMeanOp(OpTest):
class TestReduceMeanOpDefaultAttrs(TestReduceMeanOp):
def setUp(self):
self.op_type = 'reduce_mean'
self.python_api = reduce_mean_wrapper
self.dtype = 'float64'
self.shape = [2, 3, 4, 5]
......
......@@ -19,6 +19,7 @@ import numpy as np
from op_test import OpTest, skip_check_grad_ci, check_out_dtype
import paddle
import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard
class ApiMinTest(unittest.TestCase):
......@@ -86,6 +87,10 @@ class ApiMinTest(unittest.TestCase):
z_expected = np.array(np.min(np_x, axis=0))
self.assertEqual((np_z == z_expected).all(), True)
def test_eager_api(self):
with _test_eager_guard():
self.test_imperative_api()
class TestOutDtype(unittest.TestCase):
def test_min(self):
......
......@@ -62,6 +62,7 @@ class TestModeOp(OpTest):
def setUp(self):
self.op_type = "mode"
self.python_api = paddle.mode
self.dtype = np.float64
np.random.seed(666)
self.input_data = np.random.rand(2, 64, 1)
......@@ -73,11 +74,11 @@ class TestModeOp(OpTest):
def test_check_output(self):
paddle.enable_static()
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
paddle.enable_static()
self.check_grad(set(['X']), 'Out')
self.check_grad(set(['X']), 'Out', check_eager=True)
class TestModeOpLastdim(OpTest):
......@@ -86,6 +87,7 @@ class TestModeOpLastdim(OpTest):
def setUp(self):
self.op_type = "mode"
self.python_api = paddle.mode
self.dtype = np.float64
np.random.seed(666)
self.input_data = np.random.rand(2, 1, 1, 2, 30)
......@@ -97,11 +99,11 @@ class TestModeOpLastdim(OpTest):
def test_check_output(self):
paddle.enable_static()
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
paddle.enable_static()
self.check_grad(set(['X']), 'Out')
self.check_grad(set(['X']), 'Out', check_eager=True)
class TestModeOpKernels(unittest.TestCase):
......
......@@ -20,6 +20,24 @@ from op_test import OpTest, convert_float_to_uint16
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle import _C_ops
from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph
# hack method for test p_norm final state
def p_norm_python_api(x,
p=2.0,
axis=-1,
epsilon=1e-12,
keepdim=False,
as_vector=False):
if in_dygraph_mode():
return _C_ops.final_state_p_norm(x, p, axis, epsilon, keepdim,
as_vector)
if _in_legacy_dygraph():
return _C_ops.p_norm(x, 'axis', axis, 'porder',
float(p), 'keepdim', keepdim, 'epsilon', epsilon,
'as_vector', as_vector)
def p_norm(x, axis, porder, keepdims=False, reduce_all=False):
......@@ -110,6 +128,7 @@ class TestFrobeniusNormOp2(TestFrobeniusNormOp):
class TestPnormOp(OpTest):
def setUp(self):
self.op_type = "p_norm"
self.python_api = p_norm_python_api
self.init_test_case()
x = (np.random.random(self.shape) + 0.5).astype(self.dtype)
norm = p_norm(x, self.axis, self.porder, self.keepdim, self.asvector)
......@@ -125,10 +144,10 @@ class TestPnormOp(OpTest):
self.gradient = self.calc_gradient()
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
def init_test_case(self):
self.shape = [2, 3, 4, 5]
......@@ -287,6 +306,7 @@ class TestPnormOpFP161(TestPnormOpFP16):
class TestPnormBF16Op(OpTest):
def setUp(self):
self.op_type = "p_norm"
self.python_api = p_norm_python_api
self.init_test_case()
self.x = (np.random.random(self.shape) + 0.5).astype(np.float32)
self.norm = p_norm(self.x, self.axis, self.porder, self.keepdim,
......@@ -304,12 +324,15 @@ class TestPnormBF16Op(OpTest):
def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-3)
self.check_output_with_place(place, atol=1e-3, check_eager=True)
def test_check_grad(self):
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, ['X'], 'Out', user_defined_grads=self.gradient)
place, ['X'],
'Out',
user_defined_grads=self.gradient,
check_eager=True)
def init_test_case(self):
self.shape = [2, 3, 4, 5]
......
......@@ -20,6 +20,7 @@ import paddle.nn.functional as F
import paddle.fluid as fluid
import paddle.fluid.core as core
import numpy as np
from paddle.fluid.framework import _test_eager_guard
def p_normalize(x, axis=1, p=2, epsilon=1e-12, keepdims=True):
......@@ -87,6 +88,12 @@ class TestNNFunctionalNormalize(unittest.TestCase):
with fluid.program_guard(fluid.Program()):
self.run_static()
def test_cpu_eager(self):
with _test_eager_guard():
paddle.disable_static(place=paddle.fluid.CPUPlace())
self.run_imperative()
paddle.enable_static()
def test_gpu(self):
if not fluid.core.is_compiled_with_cuda():
return
......@@ -98,6 +105,15 @@ class TestNNFunctionalNormalize(unittest.TestCase):
with fluid.program_guard(fluid.Program()):
self.run_static(use_gpu=True)
def test_gpu_eager(self):
with _test_eager_guard():
if not fluid.core.is_compiled_with_cuda():
return
paddle.disable_static(place=paddle.fluid.CUDAPlace(0))
self.run_imperative()
paddle.enable_static()
if __name__ == "__main__":
unittest.main()
......@@ -30,6 +30,7 @@ class TestPad3dOp(OpTest):
self.variable_paddings = False
self.initTestCase()
self.op_type = "pad3d"
self.python_api = paddle.nn.functional.pad
self.inputs = {'X': np.random.random(self.shape).astype("float64")}
self.attrs = {}
if self.variable_paddings:
......@@ -72,10 +73,10 @@ class TestPad3dOp(OpTest):
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
def initTestCase(self):
self.shape = (2, 3, 4, 5, 6)
......
......@@ -157,6 +157,7 @@ class PReluTest(OpTest):
self.init_input_shape()
self.init_attr()
self.op_type = "prelu"
self.python_api = paddle.nn.functional.prelu
x_np = np.random.uniform(-1, 1, self.x_shape).astype(self.dtype)
# Since zero point in prelu is not differentiable, avoid randomize
......@@ -207,10 +208,10 @@ class PReluTest(OpTest):
self.attrs = {'mode': "channel", "data_format": "NCHW"}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=False)
def test_check_grad(self):
self.check_grad(['X', 'Alpha'], 'Out')
self.check_grad(['X', 'Alpha'], 'Out', check_eager=False)
@skip_check_grad_ci(
......@@ -373,7 +374,8 @@ def create_test_fp16_class(parent,
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=atol)
self.check_output_with_place(
place, atol=atol, check_eager=False)
def test_check_grad(self):
place = core.CUDAPlace(0)
......@@ -381,7 +383,8 @@ def create_test_fp16_class(parent,
self.check_grad_with_place(
place, ['X', 'Alpha'],
'Out',
max_relative_error=max_relative_error)
max_relative_error=max_relative_error,
check_eager=False)
cls_name = "{0}_{1}".format(parent.__name__, "Fp16Op")
TestPReluFp16Case.__name__ = cls_name
......
......@@ -172,6 +172,7 @@ class TestMaxOp(OpTest):
def setUp(self):
self.op_type = "reduce_max"
self.python_api = paddle.max
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [-1]}
self.outputs = {
......@@ -179,7 +180,7 @@ class TestMaxOp(OpTest):
}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
@skip_check_grad_ci(
......@@ -190,6 +191,7 @@ class TestMinOp(OpTest):
def setUp(self):
self.op_type = "reduce_min"
self.python_api = paddle.min
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [2]}
self.outputs = {
......@@ -197,7 +199,7 @@ class TestMinOp(OpTest):
}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
class TestMin6DOp(OpTest):
......@@ -205,6 +207,7 @@ class TestMin6DOp(OpTest):
def setUp(self):
self.op_type = "reduce_min"
self.python_api = paddle.min
self.inputs = {
'X': np.random.random((2, 4, 3, 5, 6, 10)).astype("float64")
}
......@@ -214,7 +217,7 @@ class TestMin6DOp(OpTest):
}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
class TestMin8DOp(OpTest):
......@@ -222,6 +225,7 @@ class TestMin8DOp(OpTest):
def setUp(self):
self.op_type = "reduce_min"
self.python_api = paddle.min
self.inputs = {
'X': np.random.random((2, 4, 3, 5, 6, 3, 2, 4)).astype("float64")
}
......@@ -231,7 +235,7 @@ class TestMin8DOp(OpTest):
}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
class TestProdOp(OpTest):
......@@ -302,17 +306,19 @@ class TestProd8DOp(OpTest):
class TestAllOp(OpTest):
def setUp(self):
self.op_type = "reduce_all"
self.python_api = paddle.all
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.outputs = {'Out': self.inputs['X'].all()}
self.attrs = {'reduce_all': True}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
class TestAll8DOp(OpTest):
def setUp(self):
self.op_type = "reduce_all"
self.python_api = paddle.all
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
......@@ -321,23 +327,25 @@ class TestAll8DOp(OpTest):
self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
class TestAllOpWithDim(OpTest):
def setUp(self):
self.op_type = "reduce_all"
self.python_api = paddle.all
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': (1, )}
self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
class TestAll8DOpWithDim(OpTest):
def setUp(self):
self.op_type = "reduce_all"
self.python_api = paddle.all
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
......@@ -346,12 +354,13 @@ class TestAll8DOpWithDim(OpTest):
self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
class TestAllOpWithKeepDim(OpTest):
def setUp(self):
self.op_type = "reduce_all"
self.python_api = paddle.all
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': [1], 'keep_dim': True}
self.outputs = {
......@@ -360,12 +369,13 @@ class TestAllOpWithKeepDim(OpTest):
}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
class TestAll8DOpWithKeepDim(OpTest):
def setUp(self):
self.op_type = "reduce_all"
self.python_api = paddle.all
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
......@@ -377,7 +387,7 @@ class TestAll8DOpWithKeepDim(OpTest):
}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
class TestAllOpError(unittest.TestCase):
......@@ -395,17 +405,19 @@ class TestAllOpError(unittest.TestCase):
class TestAnyOp(OpTest):
def setUp(self):
self.op_type = "reduce_any"
self.python_api = paddle.any
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.outputs = {'Out': self.inputs['X'].any()}
self.attrs = {'reduce_all': True}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
class TestAny8DOp(OpTest):
def setUp(self):
self.op_type = "reduce_any"
self.python_api = paddle.any
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
......@@ -414,23 +426,25 @@ class TestAny8DOp(OpTest):
self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
class TestAnyOpWithDim(OpTest):
def setUp(self):
self.op_type = "reduce_any"
self.python_api = paddle.any
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': [1]}
self.outputs = {'Out': self.inputs['X'].any(axis=1)}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
class TestAny8DOpWithDim(OpTest):
def setUp(self):
self.op_type = "reduce_any"
self.python_api = paddle.any
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
......@@ -439,12 +453,13 @@ class TestAny8DOpWithDim(OpTest):
self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
class TestAnyOpWithKeepDim(OpTest):
def setUp(self):
self.op_type = "reduce_any"
self.python_api = paddle.any
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': (1, ), 'keep_dim': True}
self.outputs = {
......@@ -453,12 +468,13 @@ class TestAnyOpWithKeepDim(OpTest):
}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
class TestAny8DOpWithKeepDim(OpTest):
def setUp(self):
self.op_type = "reduce_any"
self.python_api = paddle.any
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
......@@ -470,7 +486,7 @@ class TestAny8DOpWithKeepDim(OpTest):
}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
class TestAnyOpError(unittest.TestCase):
......@@ -600,6 +616,7 @@ class TestReduceMaxOpMultiAxises(OpTest):
def setUp(self):
self.op_type = "reduce_max"
self.python_api = paddle.max
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [-2, -1]}
self.outputs = {
......@@ -607,7 +624,7 @@ class TestReduceMaxOpMultiAxises(OpTest):
}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
@skip_check_grad_ci(
......@@ -618,6 +635,7 @@ class TestReduceMinOpMultiAxises(OpTest):
def setUp(self):
self.op_type = "reduce_min"
self.python_api = paddle.min
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [1, 2]}
self.outputs = {
......@@ -625,7 +643,7 @@ class TestReduceMinOpMultiAxises(OpTest):
}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
class TestKeepDimReduceSumMultiAxises(OpTest):
......
......@@ -27,6 +27,10 @@ paddle.enable_static()
class TestSqueezeOp(OpTest):
def setUp(self):
self.op_type = "squeeze2"
self.python_api = paddle.squeeze
self.python_out_sig = [
"Out"
] # python out sig is customized output signature.
self.init_test_case()
self.inputs = {"X": np.random.random(self.ori_shape).astype("float64")}
self.init_attrs()
......@@ -36,10 +40,10 @@ class TestSqueezeOp(OpTest):
}
def test_check_output(self):
self.check_output(no_check_set=['XShape'])
self.check_output(no_check_set=['XShape'], check_eager=True)
def test_check_grad(self):
self.check_grad(["X"], "Out")
self.check_grad(["X"], "Out", check_eager=True)
def init_test_case(self):
self.ori_shape = (1, 3, 1, 40)
......
......@@ -29,6 +29,8 @@ class TestUnsqueezeOp(OpTest):
def setUp(self):
self.init_test_case()
self.op_type = "unsqueeze2"
self.python_api = paddle.unsqueeze
self.python_out_sig = ["Out"]
self.inputs = {"X": np.random.random(self.ori_shape).astype("float64")}
self.init_attrs()
self.outputs = {
......@@ -37,10 +39,10 @@ class TestUnsqueezeOp(OpTest):
}
def test_check_output(self):
self.check_output(no_check_set=["XShape"])
self.check_output(no_check_set=["XShape"], check_eager=True)
def test_check_grad(self):
self.check_grad(["X"], "Out")
self.check_grad(["X"], "Out", check_eager=True)
def init_test_case(self):
self.ori_shape = (3, 40)
......@@ -88,6 +90,8 @@ class TestUnsqueezeOp_AxesTensorList(OpTest):
def setUp(self):
self.init_test_case()
self.op_type = "unsqueeze2"
self.python_out_sig = ["Out"]
self.python_api = paddle.unsqueeze
axes_tensor_list = []
for index, ele in enumerate(self.axes):
......@@ -105,10 +109,10 @@ class TestUnsqueezeOp_AxesTensorList(OpTest):
}
def test_check_output(self):
self.check_output(no_check_set=["XShape"])
self.check_output(no_check_set=["XShape"], check_eager=True)
def test_check_grad(self):
self.check_grad(["X"], "Out")
self.check_grad(["X"], "Out", check_eager=True)
def init_test_case(self):
self.ori_shape = (20, 5)
......@@ -152,6 +156,8 @@ class TestUnsqueezeOp_AxesTensor(OpTest):
def setUp(self):
self.init_test_case()
self.op_type = "unsqueeze2"
self.python_out_sig = ["Out"]
self.python_api = paddle.unsqueeze
self.inputs = {
"X": np.random.random(self.ori_shape).astype("float64"),
......@@ -164,10 +170,10 @@ class TestUnsqueezeOp_AxesTensor(OpTest):
}
def test_check_output(self):
self.check_output(no_check_set=["XShape"])
self.check_output(no_check_set=["XShape"], check_eager=True)
def test_check_grad(self):
self.check_grad(["X"], "Out")
self.check_grad(["X"], "Out", check_eager=True)
def init_test_case(self):
self.ori_shape = (20, 5)
......
......@@ -23,7 +23,7 @@ from ...tensor.math import multiply
import warnings
from ...fluid.layer_helper import LayerHelper
from ...fluid.framework import convert_np_dtype_to_dtype_
from ...fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from ...fluid.framework import _in_legacy_dygraph, in_dygraph_mode, _non_static_mode
from ...fluid.data_feeder import check_variable_and_dtype, check_dtype
import paddle
from paddle import _C_ops, in_dynamic_mode
......@@ -519,7 +519,9 @@ def prelu(x, weight, data_format="NCHW", name=None):
1], "The weight size should be equal to x input channel in prelu() when weight shape is not [1]."
mode = 'channel'
if in_dynamic_mode():
if in_dygraph_mode():
return _C_ops.final_state_prelu(x, weight, data_format, mode)
if _in_legacy_dygraph():
return _C_ops.prelu(x, weight, 'mode', mode, 'data_format', data_format)
helper = LayerHelper('prelu', **locals())
......@@ -578,9 +580,10 @@ def relu_(x, name=None):
Inplace version of ``relu`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_nn_cn_relu`.
"""
if paddle.fluid.framework._in_eager_mode_:
if in_dygraph_mode():
return _C_ops.final_state_relu_(x)
return _C_ops.relu_(x)
if _in_legacy_dygraph():
return _C_ops.relu_(x)
def log_sigmoid(x, name=None):
......@@ -1092,7 +1095,9 @@ def softshrink(x, threshold=0.5, name=None):
"The threshold must be no less than zero. Received: {}.".format(
threshold))
if in_dynamic_mode():
if in_dygraph_mode():
return _C_ops.final_state_soft_shrink(x, threshold)
if _in_legacy_dygraph():
return _C_ops.softshrink(x, 'lambda', threshold)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
......@@ -1371,10 +1376,12 @@ def log_softmax(x, axis=-1, dtype=None, name=None):
if (dtype is not None) and (not isinstance(dtype, core.VarDesc.VarType)):
dtype = convert_np_dtype_to_dtype_(dtype)
if in_dynamic_mode():
if _non_static_mode():
if dtype is not None:
x = _C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
return _C_ops.log_softmax(x, 'axis', axis)
if _in_legacy_dygraph():
return _C_ops.log_softmax(x, 'axis', axis)
return _C_ops.final_state_log_softmax(x, axis)
if dtype is None:
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
......
......@@ -38,6 +38,7 @@ from paddle import _C_ops
from paddle.framework import in_dynamic_mode
from paddle.tensor.creation import full
from paddle.framework import core
from paddle.fluid.framework import _in_legacy_dygraph
from paddle.static import default_main_program
__all__ = []
......@@ -1352,8 +1353,11 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None):
if in_dynamic_mode():
if isinstance(pad, Variable):
pad = pad.numpy()
out = _C_ops.pad3d(x, "paddings", pad, "mode", mode, "value", value,
"data_format", data_format, "name", name)
if _in_legacy_dygraph():
out = _C_ops.pad3d(x, "paddings", pad, "mode", mode, "value", value,
"data_format", data_format, "name", name)
else:
out = _C_ops.final_state_pad3d(x, pad, mode, value, data_format)
else:
attrs = {'mode': mode, 'value': value, 'data_format': data_format}
inputs = {'X': [x]}
......
......@@ -921,8 +921,11 @@ def kl_div(input, label, reduction='mean', name=None):
label.dtype) == 'float32':
label = paddle.cast(label, 'float64')
if paddle.in_dynamic_mode():
out = _C_ops.kldiv_loss(input, label, 'reduction', 'none')
if _non_static_mode():
if _in_legacy_dygraph():
out = _C_ops.kldiv_loss(input, label, 'reduction', 'none')
else:
out = _C_ops.final_state_kldiv_loss(input, label, 'none')
if reduction == 'mean':
out = paddle.mean(out)
elif reduction == 'sum':
......
......@@ -24,6 +24,7 @@ from ...fluid import dygraph_utils
import numbers
from paddle import _C_ops
from paddle import in_dynamic_mode
from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph
__all__ = []
......@@ -78,7 +79,12 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None):
# [[0. 0.24253564 0.37139067]
# [1. 0.97014254 0.9284767 ]]
"""
if in_dynamic_mode():
if in_dygraph_mode():
eps = fluid.dygraph.base.to_variable([epsilon], dtype=x.dtype)
out = _C_ops.final_state_p_norm(x, float(p), axis, epsilon, True, False)
return x / _C_ops.elementwise_max(out, eps)
if _in_legacy_dygraph():
eps = fluid.dygraph.base.to_variable([epsilon], dtype=x.dtype)
out = _C_ops.p_norm(x, 'axis', axis, 'porder',
float(p), 'keepdim', True, 'epsilon', epsilon)
......
......@@ -20,6 +20,7 @@ from ...fluid.data_feeder import check_variable_and_dtype, check_type
from ...fluid.layer_helper import LayerHelper
from paddle import _C_ops
from paddle import in_dynamic_mode
from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph
__all__ = []
......@@ -78,7 +79,12 @@ class PairwiseDistance(Layer):
check_type(self.keepdim, 'keepdim', (bool), 'PairwiseDistance')
def forward(self, x, y):
if in_dynamic_mode():
if in_dygraph_mode():
sub = _C_ops.elementwise_sub(x, y)
return _C_ops.final_state_p_norm(sub, self.p, 1, self.epsilon,
self.keepdim, False)
if _in_legacy_dygraph():
sub = _C_ops.elementwise_sub(x, y)
return _C_ops.p_norm(sub, 'axis', 1, 'porder', self.p, 'keepdim',
self.keepdim, 'epsilon', self.epsilon)
......
......@@ -288,10 +288,16 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
axis (int, optional): None for last dimension.
keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.
"""
if paddle.in_dynamic_mode():
if in_dygraph_mode():
if axis is None: axis = -1
return _C_ops.final_state_p_norm(input, porder, axis, 1e-12,
keepdim, asvector)
if _in_legacy_dygraph():
if axis is None: axis = -1
return _C_ops.p_norm(input, 'porder', porder, 'axis', axis,
'keepdim', keepdim, 'asvector', asvector)
if porder is not None:
check_type(porder, 'porder', (float, int), 'p_norm')
if axis is not None:
......
......@@ -122,11 +122,12 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
# [True]
"""
if paddle.in_dynamic_mode():
if in_dygraph_mode():
return _C_ops.final_state_allclose(x, y, rtol, atol, equal_nan)
if _in_legacy_dygraph():
return _C_ops.allclose(x, y, 'rtol',
str(rtol), 'atol',
str(atol), 'equal_nan', equal_nan)
check_variable_and_dtype(x, "input", ['float32', 'float64'], 'allclose')
check_variable_and_dtype(y, "input", ['float32', 'float64'], 'allclose')
check_type(rtol, 'rtol', float, 'allclose')
......@@ -678,7 +679,9 @@ def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
# [True, True]
"""
if paddle.in_dynamic_mode():
if in_dygraph_mode():
return _C_ops.final_state_isclose(x, y, rtol, atol, equal_nan)
if _in_legacy_dygraph():
return _C_ops.isclose(x, y, 'rtol',
str(rtol), 'atol',
str(atol), 'equal_nan', equal_nan)
......
......@@ -1409,7 +1409,9 @@ def gather(x, index, axis=None, name=None):
if axis is None:
axis = 0
if paddle.in_dynamic_mode():
#if in_dygraph_mode():
#return _C_ops.final_state_gather(x, index, axis)
if _non_static_mode():
axis = axis.item() if isinstance(axis, paddle.Tensor) else axis
return _C_ops.gather(x, index, None, "axis", axis, "overwrite", False)
......
......@@ -28,7 +28,7 @@ from paddle.tensor.attribute import _complex_to_real_dtype
import paddle
from paddle.static import Variable
from ..framework import core
from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode, _non_static_mode
from ..framework import _varbase_creator, convert_np_dtype_to_dtype_
from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype
......@@ -150,7 +150,17 @@ def pow(x, y, name=None):
"""
# in dynamic graph mode
if paddle.in_dynamic_mode():
#if in_dygraph_mode():
#if isinstance(y, (int, float)):
#return _C_ops.final_state_pow(x, y)
#elif isinstance(y, (paddle.Tensor, Variable)):
#return _elementwise_op_in_dygraph(
#x, y, axis=-1, act=None, op_name='elementwise_pow')
#else:
#raise TypeError('y must be scalar or tensor type, but received: %s '% (y.dtype))
#if _in_legacy_dygraph():
if _non_static_mode():
if isinstance(y, (int, float)):
return _C_ops.pow(x, 'factor', y)
elif isinstance(y, (paddle.Tensor, Variable)):
......@@ -719,7 +729,9 @@ def fmax(x, y, name=None):
op_type = 'elementwise_fmax'
axis = -1
act = None
if paddle.in_dynamic_mode():
if in_dygraph_mode():
return _C_ops.final_state_fmax(x, y, axis)
if _in_legacy_dygraph():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
......@@ -780,7 +792,9 @@ def fmin(x, y, name=None):
op_type = 'elementwise_fmin'
axis = -1
act = None
if paddle.in_dynamic_mode():
if in_dygraph_mode():
return _C_ops.final_state_fmin(x, y, axis)
if _in_legacy_dygraph():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
......@@ -1711,7 +1725,11 @@ def max(x, axis=None, keepdim=False, name=None):
"""
reduce_all, axis = _get_reduce_all_value(axis)
if paddle.in_dynamic_mode():
if in_dygraph_mode():
if reduce_all:
axis = range(len(x.shape))
return _C_ops.final_state_max(x, axis, keepdim)
if _in_legacy_dygraph():
return _C_ops.reduce_max(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all)
......@@ -1811,7 +1829,12 @@ def min(x, axis=None, keepdim=False, name=None):
"""
reduce_all, axis = _get_reduce_all_value(axis)
if paddle.in_dynamic_mode():
if in_dygraph_mode():
if reduce_all:
axis = range(len(x.shape))
return _C_ops.final_state_min(x, axis, keepdim)
if _in_legacy_dygraph():
return _C_ops.reduce_min(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all)
......@@ -2081,7 +2104,9 @@ def log1p(x, name=None):
# [[0.], [0.6931472]]
"""
if paddle.in_dynamic_mode():
if in_dygraph_mode():
return _C_ops.final_state_log1p(x)
if _in_legacy_dygraph():
return _C_ops.log1p(x)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log1p")
......@@ -2130,7 +2155,9 @@ def log2(x, name=None):
res = paddle.log2(x_i)
print(res) # [1.0]
"""
if paddle.in_dynamic_mode():
if in_dygraph_mode():
return _C_ops.final_state_log2(x)
if _in_legacy_dygraph():
return _C_ops.log2(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], "log2")
......@@ -2180,7 +2207,9 @@ def log10(x, name=None):
res = paddle.log10(x_i)
print(res) # [1.0]
"""
if paddle.in_dynamic_mode():
if in_dygraph_mode():
return _C_ops.final_state_log10(x)
if _in_legacy_dygraph():
return _C_ops.log10(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], "log10")
......@@ -2667,7 +2696,9 @@ def cumprod(x, dim=None, dtype=None, name=None):
if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype):
x = cast(x, dtype)
if paddle.in_dynamic_mode():
if in_dygraph_mode():
return _C_ops.final_state_cumprod(x, dim)
if _in_legacy_dygraph():
return _C_ops.cumprod(x, 'dim', dim)
check_variable_and_dtype(x, "x", ['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'], 'cumprod')
......@@ -3028,7 +3059,12 @@ def all(x, axis=None, keepdim=False, name=None):
else:
reduce_all_flag = False
if paddle.in_dynamic_mode():
if in_dygraph_mode():
if reduce_all_flag:
axis = range(len(x.shape))
return _C_ops.final_state_all(x, axis, keepdim)
if _in_legacy_dygraph():
axis = axis if axis != None and axis != [] else [0]
return _C_ops.reduce_all(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag)
......@@ -3120,7 +3156,12 @@ def any(x, axis=None, keepdim=False, name=None):
else:
reduce_all_flag = False
if paddle.in_dynamic_mode():
if in_dygraph_mode():
if reduce_all_flag:
axis = range(len(x.shape))
return _C_ops.final_state_any(x, axis, keepdim)
if _in_legacy_dygraph():
axis = axis if axis != None and axis != [] else [0]
return _C_ops.reduce_any(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag)
......
......@@ -518,7 +518,9 @@ def mode(x, axis=-1, keepdim=False, name=None):
# [1, 0]]))
"""
if paddle.in_dynamic_mode():
if in_dygraph_mode():
return _C_ops.final_state_mode(x, axis, keepdim)
if _in_legacy_dygraph():
return _C_ops.mode(x, "axis", axis, "keepdim", keepdim)
helper = LayerHelper("mode", **locals())
......@@ -1002,11 +1004,16 @@ def kthvalue(x, k, axis=None, keepdim=False, name=None):
# [[0, 2],
# [1, 2]]))
"""
if paddle.in_dynamic_mode():
if _non_static_mode():
if axis is not None:
return _C_ops.kthvalue(x, 'k', k, "axis", axis, "keepdim", keepdim)
if _in_legacy_dygraph():
return _C_ops.kthvalue(x, 'k', k, "axis", axis, "keepdim",
keepdim)
return _C_ops.final_state_kthvalue(x, k, axis, keepdim)
else:
return _C_ops.kthvalue(x, 'k', k, "keepdim", keepdim)
if _in_legacy_dygraph():
return _C_ops.kthvalue(x, 'k', k, "keepdim", keepdim)
return _C_ops.final_state_kthvalue(x, k, -1, keepdim)
helper = LayerHelper("kthvalue", **locals())
inputs = {"X": [x]}
......
......@@ -18,6 +18,7 @@ import numpy as np
from ..static import Variable
from ..fluid.layer_helper import LayerHelper
from ..framework import core
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from .search import where
from ..fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
import paddle
......@@ -87,7 +88,11 @@ def mean(x, axis=None, keepdim=False, name=None):
if axis is None or len(axis) == 0:
axis = [0]
if paddle.in_dynamic_mode():
if in_dygraph_mode():
if reduce_all:
axis = range(len(x.shape))
return _C_ops.final_state_mean(x, axis, keepdim)
if _in_legacy_dygraph():
return _C_ops.reduce_mean(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all)
......
......@@ -72,6 +72,31 @@
func : addmm
backward : addmm_grad
- api : all
args : (Tensor x, int64_t[] dims={}, bool keep_dim=false)
output : Tensor(out)
infer_meta :
func : ReduceInferMeta
kernel :
func : all
- api : allclose
args : (Tensor x, Tensor y, Scalar rtol, Scalar atol, bool equal_nan)
output : Tensor(out)
infer_meta :
func : AllValueCompareInferMeta
param: [x, y]
kernel :
func : allclose
- api : any
args : (Tensor x, int64_t[] dims={}, bool keep_dim=false)
output : Tensor(out)
infer_meta :
func : ReduceInferMeta
kernel :
func : any
# arg_max
- api : argmax
args : (Tensor x, int64_t axis, bool keepdims, bool flatten, int dtype)
......@@ -235,6 +260,15 @@
data_type : x
backward : cast_grad
- api : ceil
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
kernel :
func : ceil
backward : ceil_grad
# cholesky
- api : cholesky
args : (Tensor x, bool upper)
......@@ -306,6 +340,16 @@
func : cross
backward : cross_grad
- api : cumprod
args : (Tensor x, int dim)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : cumprod
backward : cumprod_grad
# cumsum
- api : cumsum
args : (Tensor x, int axis, bool flatten, bool exclusive, bool reverse)
......@@ -458,6 +502,35 @@
kernel :
func : flip
- api : floor
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
kernel :
func : floor
backward : floor_grad
- api : fmax
args : (Tensor x, Tensor y, int axis)
output : Tensor(out)
infer_meta :
param: [x, y]
func : ElementwiseInferMeta
kernel :
func : fmax
backward : fmax_grad
- api : fmin
args : (Tensor x, Tensor y, int axis)
output : Tensor(out)
infer_meta :
param: [x, y]
func : ElementwiseInferMeta
kernel :
func : fmin
backward : fmin_grad
- api : full
args : (IntArray shape, Scalar value, DataType dtype=DataType::FLOAT32, Place place=CPUPlace())
output: Tensor
......@@ -500,6 +573,16 @@
kernel :
func : gather_tree
- api : gelu
args : (Tensor x, bool approximate)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : gelu
backward : gelu_grad
- api : greater
args : (Tensor x, Tensor y, int axis = -1)
output : Tensor
......@@ -594,6 +677,15 @@
kernel :
func : is_empty
- api : isclose
args : (Tensor x, Tensor y, Scalar rtol, Scalar atol, bool equal_nan)
output : Tensor(out)
infer_meta :
func : ValueCompareInferMeta
param: [x, y]
kernel :
func : isclose
# isfinite
- api : isfinite
args : (Tensor x)
......@@ -621,6 +713,25 @@
kernel :
func : isnan, isnan_sr
- api : kldiv_loss
args : (Tensor x, Tensor label, str reduction)
output : Tensor(out)
infer_meta :
func : KLDivInferMeta
kernel :
func : kldiv_loss
data_type : x
backward : kldiv_loss_grad
- api : kthvalue
args : (Tensor x, int k, int axis, bool keepdim)
output : Tensor(out), Tensor(indices)
infer_meta :
func : KthvalueInferMeta
kernel :
func : kthvalue
backward : kthvalue_grad
# leaky_relu
- api : leaky_relu
args : (Tensor x, float alpha)
......@@ -657,6 +768,51 @@
kernel :
func : less_than
- api : lgamma
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
kernel :
func : lgamma
backward : lgamma_grad
- api : log
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : log
backward: log_grad
- api : log10
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : log10
backward: log10_grad
- api : log1p
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : log1p
backward: log1p_grad
- api : log2
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : log2
backward: log2_grad
# log_loss
- api : log_loss
args : (Tensor input, Tensor label, float epsilon)
......@@ -667,6 +823,15 @@
func : log_loss
backward : log_loss_grad
- api : log_softmax
args : (Tensor x, int axis)
output : Tensor(out)
infer_meta :
func : UnchangedInferMetaCheckAxis
kernel :
func : log_softmax
backward : log_softmax_grad
# logical_and
- api : logical_and
args : (Tensor x, Tensor y)
......@@ -744,6 +909,15 @@
func : matrix_power
backward : matrix_power_grad
- api : max
args : (Tensor x, int64_t[] dims={}, bool keep_dim=false)
output : Tensor(out)
infer_meta :
func : ReduceInferMeta
kernel :
func : max
backward : max_grad
- api : maximum
args : (Tensor x, Tensor y)
output : Tensor(out)
......@@ -754,12 +928,22 @@
backward : maximum_grad
- api : mean
args : (Tensor x, int64_t[] axis={}, bool keep_dim=false)
output : Tensor
args : (Tensor x, int64_t[] dims={}, bool keep_dim=false)
output : Tensor(out)
infer_meta :
func : ReduceInferMeta
kernel :
func : mean
backward : mean_grad
- api : min
args : (Tensor x, int64_t[] dims={}, bool keep_dim=false)
output : Tensor(out)
infer_meta :
func : ReduceInferMeta
kernel :
func : min
backward : min_grad
- api : minimum
args : (Tensor x, Tensor y)
......@@ -770,6 +954,15 @@
func : minimum
backward : minimum_grad
- api : mode
args : (Tensor x, int axis, bool keepdim)
output : Tensor(out), Tensor(indices)
infer_meta :
func : ModeInferMeta
kernel :
func : mode
backward : mode_grad
- api : modulo
args : (Tensor x, Tensor y)
output : Tensor
......@@ -838,6 +1031,15 @@
output : Tensor
invoke : full_like(x, 1, dtype, place)
- api : p_norm
args : (Tensor x, float porder, int axis, float epsilon, bool keepdim, bool asvector=false)
output : Tensor(out)
infer_meta :
func : PNormInferMeta
kernel :
func : p_norm
backward : p_norm_grad
# pad
- api : pad
args : (Tensor x, int[] paddings, float pad_value)
......@@ -848,6 +1050,15 @@
func : pad
# backward : pad_grad
- api : pad3d
args : (Tensor x, IntArray paddings, str mode, float pad_value, str data_format)
output : Tensor(out)
infer_meta :
func : Pad3dInferMeta
kernel :
func : pad3d
backward : pad3d_grad
# pixel_shuffle
- api : pixel_shuffle
args : (Tensor x, int upscale_factor, str data_format)
......@@ -875,6 +1086,15 @@
kernel:
func : pool2d
- api : prelu
args : (Tensor x, Tensor alpha, str data_format, str mode)
output : Tensor(out)
infer_meta :
func : PReluInferMeta
kernel :
func : prelu
backward : prelu_grad
# put_along_axis
- api : put_along_axis
args : (Tensor x, Tensor index, Tensor value, int axis, str reduce)
......@@ -927,6 +1147,15 @@
intermediate : xshape
backward: reshape_grad
- api : round
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
kernel :
func : round
backward : round_grad
- api : scale
args : (Tensor x, Scalar scale, float bias, bool bias_after_scale)
output : Tensor
......@@ -1107,6 +1336,16 @@
func : square
backward : square_grad
- api : squeeze
args : (Tensor x, int[] axes)
output : Tensor(xshape), Tensor(out)
infer_meta :
func : SqueezeInferMeta
kernel :
func : squeeze
view: (x -> out)
backward : squeeze_grad
- api : strided_slice
args : (Tensor x, int[] axes, IntArray starts, IntArray ends, IntArray strides)
output : Tensor
......@@ -1256,6 +1495,16 @@
backward : unfold_grad
# no_need_buffer : x
- api : unsqueeze
args : (Tensor x, IntArray axes)
output : Tensor(xshape), Tensor(out)
infer_meta :
func : UnsqueezeInferMeta
kernel :
func : unsqueeze
view: (x -> out)
backward : unsqueeze_grad
# viterbi_decode
- api : viterbi_decode
args : (Tensor input, Tensor transition, Tensor length, bool include_bos_eos_tag)
......
......@@ -142,6 +142,16 @@
func : cast_grad
data_type : out_grad
- backward_api : ceil_grad
forward : ceil(Tensor x) -> Tensor(out)
args : (Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [out_grad]
kernel :
func : ceil_grad
- backward_api : cholesky_grad
forward : cholesky (Tensor x, bool upper) -> Tensor(out)
args : (Tensor out, Tensor out_grad, bool upper)
......@@ -192,6 +202,25 @@
kernel :
func : cross_grad
- backward_api : cumprod_grad
forward : cumprod (Tensor x, int dim) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int dim)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : cumprod_grad
# - backward_api : gumbel_softmax_grad
# forward : gumbel_softmax (Tensor x, float temperature, bool hard, int axis) -> Tensor(out)
# args : (Tensor out, Tensor out_grad, int axis)
# output : Tensor(x_grad)
# infer_meta :
# func : GumbelSoftmaxGradInferMeta
# param : [out, out_grad, axis]
# kernel :
# func : gumbel_softmax_grad
- backward_api : diagonal_grad
forward : diagonal (Tensor x, int offset, int axis1, int axis2) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int offset = 0, int axis1 = 0, int axis2 = 1)
......@@ -273,6 +302,36 @@
kernel :
func : erfinv_grad
- backward_api : floor_grad
forward : floor(Tensor x) -> Tensor(out)
args : (Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [out_grad]
kernel :
func : floor_grad
- backward_api : fmax_grad
forward : fmax(Tensor x, Tensor y, int axis) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param: [x, y]
kernel :
func : fmax_grad
- backward_api : fmin_grad
forward : fmin(Tensor x, Tensor y, int axis) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param: [x, y]
kernel :
func : fmin_grad
- backward_api : gather_nd_grad
forward : gather_nd (Tensor x, Tensor index) -> Tensor(out)
args : (Tensor x, Tensor index, Tensor out_grad)
......@@ -283,6 +342,16 @@
kernel :
func : gather_nd_grad
- backward_api : gelu_grad
forward : gelu(Tensor x, bool approximate) -> Tensor(out)
args : (Tensor x, Tensor out_grad, bool approximate)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : gelu_grad
- backward_api : hard_shrink_grad
forward : hard_shrink (Tensor x, float threshold) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float threshold)
......@@ -314,6 +383,26 @@
func : index_sample_grad
data_type : out_grad
- backward_api : kldiv_loss_grad
forward : kldiv_loss(Tensor x, Tensor label, str reduction) -> Tensor(out)
args : (Tensor x, Tensor label, Tensor out_grad, str reduction)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : kldiv_loss_grad
- backward_api : kthvalue_grad
forward : kthvalue(Tensor x, int k, int axis, bool keepdim) -> Tensor(out), Tensor(indices)
args : (Tensor x, Tensor indices, Tensor out_grad, int k, int axis, bool keepdim)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : kthvalue_grad
- backward_api : label_smooth_grad
forward : label_smooth (Tensor label, Tensor prior_dist, float epsilon) -> Tensor(out)
args : (Tensor out_grad, float epsilon)
......@@ -345,6 +434,56 @@
kernel :
func : lerp_grad
- backward_api : lgamma_grad
forward : lgamma(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : lgamma_grad
- backward_api : log10_grad
forward : log10 (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : log10_grad
- backward_api : log1p_grad
forward : log1p (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : log1p_grad
- backward_api : log2_grad
forward : log2 (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : log2_grad
- backward_api : log_grad
forward : log (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : log_grad
- backward_api : log_loss_grad
forward : log_loss (Tensor input, Tensor label, float epsilon) -> Tensor(out)
args : (Tensor input, Tensor label, Tensor out_grad, float epsilon)
......@@ -355,6 +494,16 @@
kernel :
func : log_loss_grad
- backward_api : log_softmax_grad
forward : log_softmax(Tensor x, int axis) -> Tensor(out)
args : (Tensor out, Tensor out_grad, int axis)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [out]
kernel :
func : log_softmax_grad
- backward_api : logsigmoid_grad
forward : logsigmoid (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
......@@ -408,6 +557,16 @@
kernel :
func : matrix_power_grad
- backward_api : max_grad
forward: max (Tensor x, int64_t[] dims={}, bool keep_dim=false) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int64_t[] dims={}, bool keep_dim=false, bool reduce_all=false)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : max_grad
- backward_api : maximum_grad
forward : maximum(Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis=-1)
......@@ -418,6 +577,26 @@
kernel :
func : maximum_grad
- backward_api : mean_grad
forward: mean (Tensor x, int64_t[] dims={}, bool keep_dim=false) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int64_t[] dims={}, bool keep_dim=false, bool reduce_all=false)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : mean_grad
- backward_api : min_grad
forward: min (Tensor x, int64_t[] dims={}, bool keep_dim=false) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int64_t[] dims={}, bool keep_dim=false, bool reduce_all=false)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : min_grad
- backward_api : minimum_grad
forward : minimum(Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis=-1)
......@@ -428,6 +607,16 @@
kernel :
func : minimum_grad
- backward_api : mode_grad
forward : mode(Tensor x, int axis, bool keepdim) -> Tensor(out), Tensor(indices)
args : (Tensor x, Tensor indices, Tensor out_grad, int axis, bool keepdim)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : mode_grad
- backward_api : modulo_grad
forward : add (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis = -1)
......@@ -470,6 +659,36 @@
data_type : input
optional : weight
- backward_api : p_norm_grad
forward : p_norm(Tensor x, float porder, int axis, float epsilon, bool keepdim, bool asvector=false) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, float porder, int axis, float epsilon, bool keepdim, bool asvector)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : p_norm_grad
- backward_api : pad3d_grad
forward : pad3d(Tensor x, IntArray paddings, str mode, float pad_value, str data_format) -> Tensor(out)
args : (Tensor x, Tensor out_grad, IntArray paddings, str mode, float pad_value, str data_format)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : pad3d_grad
- backward_api : prelu_grad
forward : prelu(Tensor x, Tensor alpha, str data_format, str mode) -> Tensor(out)
args : (Tensor x, Tensor alpha, Tensor out_grad, str data_format, str mode)
output : Tensor(x_grad), Tensor(alpha_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param: [x, alpha]
kernel :
func : prelu_grad
- backward_api : psroi_pool_grad
forward : psroi_pool (Tensor x, Tensor rois, Tensor rois_num, int pooled_weight, int pooled_width, int output_channels, float spatial_scale ) -> Tensor(out)
args : (Tensor x, Tensor rois, Tensor rois_num, Tensor out_grad, int pooled_weight, int pooled_width, int output_channels, float spatial_scale)
......@@ -537,6 +756,16 @@
backend: out_grad
layout: out_grad
- backward_api : round_grad
forward : round(Tensor x) -> Tensor(out)
args : (Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [out_grad]
kernel :
func : round_grad
- backward_api : scale_grad
forward : scale (Tensor x, Scalar scale, float bias, bool bias_after_scale) -> Tensor(out)
args : (Tensor out_grad, Scalar scale, float bias=0.0, bool bias_after_scale=true)
......@@ -680,6 +909,16 @@
kernel :
func : square_grad
- backward_api : squeeze_grad
forward : squeeze(Tensor x, int[] axes) -> Tensor(xshape), Tensor(out)
args : (Tensor xshape, Tensor out_grad, int[] axes)
output : Tensor(x_grad)
infer_meta :
func : KernelWithXShapeInferMeta
param: [xshape]
kernel :
func : squeeze_grad
- backward_api : strided_slice_grad
forward : strided_slice (Tensor x, int[] axes, IntArray starts, IntArray ends, IntArray strides) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int[] axes, IntArray starts, IntArray ends, IntArray strides)
......@@ -810,6 +1049,16 @@
kernel :
func : unfold_grad
- backward_api : unsqueeze_grad
forward : unsqueeze(Tensor x, IntArray axes) -> Tensor(xshape), Tensor(out)
args : (Tensor xshape, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : KernelWithXShapeInferMeta
param: [xshape]
kernel :
func : unsqueeze_grad
- backward_api : where_grad
forward : where (Tensor condition, Tensor x, Tensor y) -> Tensor(out)
args : (Tensor condition, Tensor x, Tensor y, Tensor out_grad)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册