未验证 提交 3b0e911c 编写于 作者: W wanghuancoder 提交者: GitHub

[Eager] dataloader2 (#41338)

* eager math op, test=develop

* eager support lookahead, test=develop

* refine,test=develop

* refine doc, test=develop

* refine,test =develop

* refie, test=develop

* refie, test=develop

* refie, test=develop

* test_paddle_multiprocessing

* refine, test=develop

* refine, test=develop

* fix bug, test=develop

* refine, test=develop

* dataloader, test=develop

* refine, test=develop

* refine, test=develop

* refine, test=develop

* test_datasets timeout, test=develop

* refine, test=develop
上级 ceb3382b
...@@ -2653,7 +2653,8 @@ static void GenerateForwardDygraphFile(const std::string& forward_cc_path, ...@@ -2653,7 +2653,8 @@ static void GenerateForwardDygraphFile(const std::string& forward_cc_path,
"#include \"paddle/fluid/eager/api/utils/global_utils.h\"\n" "#include \"paddle/fluid/eager/api/utils/global_utils.h\"\n"
"#include \"paddle/fluid/eager/amp_utils.h\"\n" "#include \"paddle/fluid/eager/amp_utils.h\"\n"
"#include \"paddle/fluid/eager/amp_auto_cast.h\"\n" "#include \"paddle/fluid/eager/amp_auto_cast.h\"\n"
"#include \"paddle/fluid/platform/profiler/event_tracing.h\"\n\n"; "#include \"paddle/fluid/platform/profiler/event_tracing.h\"\n"
"#pragma GCC diagnostic ignored \"-Wunused-variable\"\n\n";
std::string forward_cc_include_str = std::string forward_cc_include_str =
paddle::string::Sprintf(FORWARD_INCLUDE_TEMPLATE); paddle::string::Sprintf(FORWARD_INCLUDE_TEMPLATE);
std::ofstream forward_cc_stream(forward_cc_path, std::ios::out); std::ofstream forward_cc_stream(forward_cc_path, std::ios::out);
......
...@@ -42,7 +42,9 @@ limitations under the License. */ ...@@ -42,7 +42,9 @@ limitations under the License. */
#include "pybind11/detail/internals.h" #include "pybind11/detail/internals.h"
#pragma GCC diagnostic ignored "-Wmissing-field-initializers" #pragma GCC diagnostic ignored "-Wmissing-field-initializers"
#include "paddle/fluid/framework/python_headers.h" #include "paddle/fluid/framework/python_headers.h"
#include "paddle/fluid/memory/allocation/mmap_allocator.h"
#include "paddle/fluid/pybind/tensor_py.h" #include "paddle/fluid/pybind/tensor_py.h"
#include "paddle/phi/core/ddim.h"
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
...@@ -1390,6 +1392,40 @@ static PyObject* tensor__reset_grad_inplace_version(TensorObject* self, ...@@ -1390,6 +1392,40 @@ static PyObject* tensor__reset_grad_inplace_version(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* tensor_method__share_memory(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
#ifndef _WIN32
PADDLE_ENFORCE_EQ(platform::is_cpu_place(self->tensor.inner_place()), true,
platform::errors::InvalidArgument(
"Sharing memory only support CPU Tensor currently"));
// 1. get LoDTensor
auto* t =
std::dynamic_pointer_cast<phi::DenseTensor>(self->tensor.impl()).get();
// 2. allocate shared memory
void* data_ptr = t->data();
size_t data_size =
t->numel() *
framework::SizeOfType(framework::TransToProtoVarType(t->dtype()));
auto shared_writer_holder =
memory::allocation::AllocateMemoryMapWriterAllocation(data_size);
// 3. maintain mmap fd set & backup ipc_name
const std::string& ipc_name = shared_writer_holder->ipc_name();
memory::allocation::MemoryMapFdSet::Instance().Insert(ipc_name);
// 4. copy data & reset holder
memory::Copy(platform::CPUPlace(), shared_writer_holder->ptr(),
platform::CPUPlace(), data_ptr, data_size);
t->ResetHolder(shared_writer_holder);
return ToPyObject(t);
#else
PADDLE_THROW(platform::errors::PermissionDenied(
"Sharing memory in Windows OS is not supported currently"));
Py_INCREF(Py_None);
return Py_None;
#endif
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* tensor__offset(TensorObject* self, PyObject* args, static PyObject* tensor__offset(TensorObject* self, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_TRY EAGER_TRY
...@@ -1536,6 +1572,8 @@ PyMethodDef variable_methods[] = { ...@@ -1536,6 +1572,8 @@ PyMethodDef variable_methods[] = {
{"_reset_grad_inplace_version", {"_reset_grad_inplace_version",
(PyCFunction)(void (*)(void))tensor__reset_grad_inplace_version, (PyCFunction)(void (*)(void))tensor__reset_grad_inplace_version,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"_share_memory", (PyCFunction)(void (*)(void))tensor_method__share_memory,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_offset", (PyCFunction)(void (*)(void))tensor__offset, {"_offset", (PyCFunction)(void (*)(void))tensor__offset,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
#if defined(PADDLE_WITH_CUDA) #if defined(PADDLE_WITH_CUDA)
......
...@@ -156,6 +156,17 @@ int64_t CastPyArg2AttrLong(PyObject* obj, ssize_t arg_pos) { ...@@ -156,6 +156,17 @@ int64_t CastPyArg2AttrLong(PyObject* obj, ssize_t arg_pos) {
} }
} }
size_t CastPyArg2AttrSize_t(PyObject* obj, ssize_t arg_pos) {
if (PyObject_CheckLongOrConvertToLong(&obj)) {
return PyLong_AsSize_t(obj);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
"long, but got %s",
arg_pos + 1, (reinterpret_cast<PyTypeObject*>(obj->ob_type))->tp_name));
}
}
float CastPyArg2AttrFloat(PyObject* obj, ssize_t arg_pos) { float CastPyArg2AttrFloat(PyObject* obj, ssize_t arg_pos) {
if (PyObject_CheckFloatOrConvertToFloat(&obj)) { if (PyObject_CheckFloatOrConvertToFloat(&obj)) {
return static_cast<float>(PyFloat_AsDouble(obj)); return static_cast<float>(PyFloat_AsDouble(obj));
...@@ -297,6 +308,51 @@ std::vector<int> CastPyArg2VectorOfInt(PyObject* obj, size_t arg_pos) { ...@@ -297,6 +308,51 @@ std::vector<int> CastPyArg2VectorOfInt(PyObject* obj, size_t arg_pos) {
return result; return result;
} }
std::vector<size_t> CastPyArg2VectorOfSize_t(PyObject* obj, size_t arg_pos) {
std::vector<size_t> result;
if (PyList_Check(obj)) {
Py_ssize_t len = PyList_Size(obj);
PyObject* item = nullptr;
for (Py_ssize_t i = 0; i < len; i++) {
item = PyList_GetItem(obj, i);
if (PyObject_CheckLongOrConvertToLong(&item)) {
result.emplace_back(PyLong_AsSize_t(item));
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
"list of int, but got %s at pos %d",
arg_pos + 1,
reinterpret_cast<PyTypeObject*>(item->ob_type)->tp_name, i));
}
}
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
"list, but got %s",
arg_pos + 1, reinterpret_cast<PyTypeObject*>(obj->ob_type)->tp_name));
}
return result;
}
std::vector<std::vector<size_t>> CastPyArg2VectorOfVectorOfSize_t(
PyObject* obj, size_t arg_pos) {
std::vector<std::vector<size_t>> result;
if (PyList_Check(obj)) {
Py_ssize_t len = PyList_Size(obj);
PyObject* item = nullptr;
for (Py_ssize_t i = 0; i < len; i++) {
item = PyList_GetItem(obj, i);
result.emplace_back(CastPyArg2VectorOfSize_t(item, arg_pos));
}
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
"list but got %s",
arg_pos + 1, reinterpret_cast<PyTypeObject*>(obj->ob_type)->tp_name));
}
return result;
}
platform::Place CastPyArg2Place(PyObject* obj, ssize_t arg_pos) { platform::Place CastPyArg2Place(PyObject* obj, ssize_t arg_pos) {
platform::Place place; platform::Place place;
if (PyObject_IsInstance(obj, reinterpret_cast<PyObject*>(g_place_pytype))) { if (PyObject_IsInstance(obj, reinterpret_cast<PyObject*>(g_place_pytype))) {
...@@ -432,10 +488,10 @@ PyObject* ToPyObject(int value) { return PyLong_FromLong(value); } ...@@ -432,10 +488,10 @@ PyObject* ToPyObject(int value) { return PyLong_FromLong(value); }
PyObject* ToPyObject(uint32_t value) { return PyLong_FromUnsignedLong(value); } PyObject* ToPyObject(uint32_t value) { return PyLong_FromUnsignedLong(value); }
PyObject* ToPyObject(size_t value) { return PyLong_FromLong(value); }
PyObject* ToPyObject(int64_t value) { return PyLong_FromLongLong(value); } PyObject* ToPyObject(int64_t value) { return PyLong_FromLongLong(value); }
PyObject* ToPyObject(size_t value) { return PyLong_FromSize_t(value); }
PyObject* ToPyObject(float value) { return PyLong_FromDouble(value); } PyObject* ToPyObject(float value) { return PyLong_FromDouble(value); }
PyObject* ToPyObject(double value) { return PyLong_FromDouble(value); } PyObject* ToPyObject(double value) { return PyLong_FromDouble(value); }
...@@ -508,6 +564,16 @@ PyObject* ToPyObject(const std::vector<int64_t>& value) { ...@@ -508,6 +564,16 @@ PyObject* ToPyObject(const std::vector<int64_t>& value) {
return result; return result;
} }
PyObject* ToPyObject(const std::vector<size_t>& value) {
PyObject* result = PyList_New((Py_ssize_t)value.size());
for (size_t i = 0; i < value.size(); i++) {
PyList_SET_ITEM(result, (Py_ssize_t)i, ToPyObject(value[i]));
}
return result;
}
PyObject* ToPyObject(const std::vector<float>& value) { PyObject* ToPyObject(const std::vector<float>& value) {
PyObject* result = PyList_New((Py_ssize_t)value.size()); PyObject* result = PyList_New((Py_ssize_t)value.size());
...@@ -528,6 +594,16 @@ PyObject* ToPyObject(const std::vector<double>& value) { ...@@ -528,6 +594,16 @@ PyObject* ToPyObject(const std::vector<double>& value) {
return result; return result;
} }
PyObject* ToPyObject(const std::vector<std::vector<size_t>>& value) {
PyObject* result = PyList_New((Py_ssize_t)value.size());
for (size_t i = 0; i < value.size(); i++) {
PyList_SET_ITEM(result, static_cast<Py_ssize_t>(i), ToPyObject(value[i]));
}
return result;
}
PyObject* ToPyObject(const std::vector<paddle::experimental::Tensor>& value, PyObject* ToPyObject(const std::vector<paddle::experimental::Tensor>& value,
bool return_py_none_if_not_initialize) { bool return_py_none_if_not_initialize) {
PyObject* result = PyList_New((Py_ssize_t)value.size()); PyObject* result = PyList_New((Py_ssize_t)value.size());
......
...@@ -36,6 +36,7 @@ bool PyObject_CheckStr(PyObject* obj); ...@@ -36,6 +36,7 @@ bool PyObject_CheckStr(PyObject* obj);
bool CastPyArg2AttrBoolean(PyObject* obj, ssize_t arg_pos); bool CastPyArg2AttrBoolean(PyObject* obj, ssize_t arg_pos);
int CastPyArg2AttrInt(PyObject* obj, ssize_t arg_pos); int CastPyArg2AttrInt(PyObject* obj, ssize_t arg_pos);
int64_t CastPyArg2AttrLong(PyObject* obj, ssize_t arg_pos); int64_t CastPyArg2AttrLong(PyObject* obj, ssize_t arg_pos);
size_t CastPyArg2AttrSize_t(PyObject* obj, ssize_t arg_pos);
float CastPyArg2AttrFloat(PyObject* obj, ssize_t arg_pos); float CastPyArg2AttrFloat(PyObject* obj, ssize_t arg_pos);
std::string CastPyArg2AttrString(PyObject* obj, ssize_t arg_pos); std::string CastPyArg2AttrString(PyObject* obj, ssize_t arg_pos);
paddle::CustomOpKernelContext CastPyArg2CustomOpKernelContext(PyObject* obj, paddle::CustomOpKernelContext CastPyArg2CustomOpKernelContext(PyObject* obj,
...@@ -50,14 +51,17 @@ framework::Tensor CastPyArg2FrameworkTensor(PyObject* obj, ssize_t arg_pos); ...@@ -50,14 +51,17 @@ framework::Tensor CastPyArg2FrameworkTensor(PyObject* obj, ssize_t arg_pos);
std::vector<framework::LoDTensor> CastPyArg2VectorOfTensorBase(PyObject* obj, std::vector<framework::LoDTensor> CastPyArg2VectorOfTensorBase(PyObject* obj,
ssize_t arg_pos); ssize_t arg_pos);
std::vector<int> CastPyArg2VectorOfInt(PyObject* obj, size_t arg_pos); std::vector<int> CastPyArg2VectorOfInt(PyObject* obj, size_t arg_pos);
std::vector<size_t> CastPyArg2VectorOfSize_t(PyObject* obj, size_t arg_pos);
std::vector<std::vector<size_t>> CastPyArg2VectorOfVectorOfSize_t(
PyObject* obj, size_t arg_pos);
framework::proto::VarType::Type CastPyArg2ProtoType(PyObject* obj, framework::proto::VarType::Type CastPyArg2ProtoType(PyObject* obj,
ssize_t arg_pos); ssize_t arg_pos);
PyObject* ToPyObject(int value); PyObject* ToPyObject(int value);
PyObject* ToPyObject(uint32_t value); PyObject* ToPyObject(uint32_t value);
PyObject* ToPyObject(size_t value);
PyObject* ToPyObject(bool value); PyObject* ToPyObject(bool value);
PyObject* ToPyObject(int64_t value); PyObject* ToPyObject(int64_t value);
PyObject* ToPyObject(size_t value);
PyObject* ToPyObject(float value); PyObject* ToPyObject(float value);
PyObject* ToPyObject(double value); PyObject* ToPyObject(double value);
PyObject* ToPyObject(const char* value); PyObject* ToPyObject(const char* value);
...@@ -69,8 +73,10 @@ PyObject* ToPyObject(const paddle::experimental::Tensor& value, ...@@ -69,8 +73,10 @@ PyObject* ToPyObject(const paddle::experimental::Tensor& value,
PyObject* ToPyObject(const std::vector<bool>& value); PyObject* ToPyObject(const std::vector<bool>& value);
PyObject* ToPyObject(const std::vector<int>& value); PyObject* ToPyObject(const std::vector<int>& value);
PyObject* ToPyObject(const std::vector<int64_t>& value); PyObject* ToPyObject(const std::vector<int64_t>& value);
PyObject* ToPyObject(const std::vector<size_t>& value);
PyObject* ToPyObject(const std::vector<float>& value); PyObject* ToPyObject(const std::vector<float>& value);
PyObject* ToPyObject(const std::vector<double>& value); PyObject* ToPyObject(const std::vector<double>& value);
PyObject* ToPyObject(const std::vector<std::vector<size_t>>& value);
PyObject* ToPyObject(const std::vector<paddle::experimental::Tensor>& value, PyObject* ToPyObject(const std::vector<paddle::experimental::Tensor>& value,
bool return_py_none_if_not_initialize = false); bool return_py_none_if_not_initialize = false);
PyObject* ToPyObject(const platform::Place& value); PyObject* ToPyObject(const platform::Place& value);
......
...@@ -241,6 +241,8 @@ std::map<std::string, std::set<std::string>> op_passing_outs_map = { ...@@ -241,6 +241,8 @@ std::map<std::string, std::set<std::string>> op_passing_outs_map = {
{"run_program", {"Out", "DOut", "OutScope"}}, {"run_program", {"Out", "DOut", "OutScope"}},
{"clear_float_status", {"FloatStatusOut"}}, {"clear_float_status", {"FloatStatusOut"}},
{"get_float_status", {"FloatStatusOut"}}, {"get_float_status", {"FloatStatusOut"}},
{"assign", {"Out"}},
{"assign_value", {"Out"}},
}; };
// NOTE(pangyoki): Tensor View Strategy. // NOTE(pangyoki): Tensor View Strategy.
......
...@@ -57,7 +57,7 @@ def default_collate_fn(batch): ...@@ -57,7 +57,7 @@ def default_collate_fn(batch):
if isinstance(sample, np.ndarray): if isinstance(sample, np.ndarray):
batch = np.stack(batch, axis=0) batch = np.stack(batch, axis=0)
return batch return batch
elif isinstance(sample, paddle.Tensor): elif isinstance(sample, (paddle.Tensor, core.eager.Tensor)):
return layers.stack(batch, axis=0) return layers.stack(batch, axis=0)
elif isinstance(sample, numbers.Number): elif isinstance(sample, numbers.Number):
batch = np.array(batch) batch = np.array(batch)
...@@ -99,7 +99,7 @@ def default_convert_fn(batch): ...@@ -99,7 +99,7 @@ def default_convert_fn(batch):
Batched data: batched each number, numpy array and paddle.Tensor Batched data: batched each number, numpy array and paddle.Tensor
in input data. in input data.
""" """
if isinstance(batch, (paddle.Tensor, np.ndarray)): if isinstance(batch, (paddle.Tensor, np.ndarray, core.eager.Tensor)):
return batch return batch
elif isinstance(batch, (str, bytes)): elif isinstance(batch, (str, bytes)):
return batch return batch
......
...@@ -229,7 +229,7 @@ class _DataLoaderIterSingleProcess(_DataLoaderIterBase): ...@@ -229,7 +229,7 @@ class _DataLoaderIterSingleProcess(_DataLoaderIterBase):
# pack as LoDTensorArray # pack as LoDTensorArray
array = core.LoDTensorArray() array = core.LoDTensorArray()
for slot in batch: for slot in batch:
if isinstance(slot, paddle.Tensor): if isinstance(slot, (paddle.Tensor, core.eager.Tensor)):
slot = slot.value().get_tensor() slot = slot.value().get_tensor()
elif not isinstance(slot, core.LoDTensor): elif not isinstance(slot, core.LoDTensor):
tmp = core.LoDTensor() tmp = core.LoDTensor()
...@@ -543,7 +543,8 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase): ...@@ -543,7 +543,8 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase):
# LoDTensor not in shared memory is not # LoDTensor not in shared memory is not
# serializable, cannot be create in workers # serializable, cannot be create in workers
for slot in batch: for slot in batch:
if isinstance(slot, paddle.Tensor): if isinstance(slot, (paddle.Tensor,
core.eager.Tensor)):
slot = slot.value().get_tensor() slot = slot.value().get_tensor()
elif not isinstance(slot, core.LoDTensor): elif not isinstance(slot, core.LoDTensor):
tmp = core.LoDTensor() tmp = core.LoDTensor()
......
...@@ -36,7 +36,8 @@ def _flatten_batch(batch): ...@@ -36,7 +36,8 @@ def _flatten_batch(batch):
def _flatten(batch, flat_batch, structure, field_idx): def _flatten(batch, flat_batch, structure, field_idx):
if isinstance(batch, Sequence): if isinstance(batch, Sequence):
for field in batch: for field in batch:
if isinstance(field, (np.ndarray, paddle.Tensor)): if isinstance(field, (np.ndarray, paddle.Tensor,
paddle.fluid.core.eager.Tensor)):
structure.append('{}{}'.format(FIELD_PREFIX, field_idx)) structure.append('{}{}'.format(FIELD_PREFIX, field_idx))
flat_batch.append(field) flat_batch.append(field)
field_idx += 1 field_idx += 1
...@@ -54,7 +55,8 @@ def _flatten_batch(batch): ...@@ -54,7 +55,8 @@ def _flatten_batch(batch):
structure.append(field) structure.append(field)
elif isinstance(batch, Mapping): elif isinstance(batch, Mapping):
for k, field in batch.items(): for k, field in batch.items():
if isinstance(field, (np.ndarray, paddle.Tensor)): if isinstance(field, (np.ndarray, paddle.Tensor,
paddle.fluid.core.eager.Tensor)):
structure[k] = '{}{}'.format(FIELD_PREFIX, field_idx) structure[k] = '{}{}'.format(FIELD_PREFIX, field_idx)
flat_batch.append(field) flat_batch.append(field)
field_idx += 1 field_idx += 1
......
...@@ -876,9 +876,9 @@ class BilinearInitializer(Initializer): ...@@ -876,9 +876,9 @@ class BilinearInitializer(Initializer):
raise ValueError("The size of input is too big. ") raise ValueError("The size of input is too big. ")
if framework._non_static_mode(): if framework._non_static_mode():
out_var = _C_ops.assign_value('shape', _C_ops.assign_value(out_var, 'shape',
list(shape), 'dtype', out_dtype, list(shape), 'dtype', out_dtype, value_name,
value_name, values) values)
if var.dtype in [ if var.dtype in [
VarDesc.VarType.FP16, VarDesc.VarType.BF16, VarDesc.VarType.FP16, VarDesc.VarType.BF16,
VarDesc.VarType.FP64 VarDesc.VarType.FP64
...@@ -985,9 +985,9 @@ class NumpyArrayInitializer(Initializer): ...@@ -985,9 +985,9 @@ class NumpyArrayInitializer(Initializer):
"saving it to file and 'load_op' to load it") "saving it to file and 'load_op' to load it")
if framework._non_static_mode(): if framework._non_static_mode():
out_var = _C_ops.assign_value('shape', _C_ops.assign_value(out_var, 'shape',
list(self._value.shape), 'dtype', list(self._value.shape), 'dtype', out_dtype,
out_dtype, value_name, values) value_name, values)
if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]: if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype, var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
'out_dtype', var.dtype) 'out_dtype', var.dtype)
......
...@@ -22,10 +22,11 @@ import paddle ...@@ -22,10 +22,11 @@ import paddle
import paddle.vision.transforms as transforms import paddle.vision.transforms as transforms
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.io import * from paddle.io import *
from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph
class TestDatasetAbstract(unittest.TestCase): class TestDatasetAbstract(unittest.TestCase):
def test_main(self): def func_test_main(self):
dataset = Dataset() dataset = Dataset()
try: try:
d = dataset[0] d = dataset[0]
...@@ -39,6 +40,11 @@ class TestDatasetAbstract(unittest.TestCase): ...@@ -39,6 +40,11 @@ class TestDatasetAbstract(unittest.TestCase):
except NotImplementedError: except NotImplementedError:
pass pass
def test_main(self):
with _test_eager_guard():
self.func_test_main()
self.func_test_main()
class TestDatasetWithDiffOutputPlace(unittest.TestCase): class TestDatasetWithDiffOutputPlace(unittest.TestCase):
def get_dataloader(self, num_workers): def get_dataloader(self, num_workers):
...@@ -60,7 +66,7 @@ class TestDatasetWithDiffOutputPlace(unittest.TestCase): ...@@ -60,7 +66,7 @@ class TestDatasetWithDiffOutputPlace(unittest.TestCase):
self.assertTrue(label.place.is_cpu_place()) self.assertTrue(label.place.is_cpu_place())
break break
def test_single_process(self): def func_test_single_process(self):
self.run_check_on_cpu() self.run_check_on_cpu()
if paddle.is_compiled_with_cuda(): if paddle.is_compiled_with_cuda():
# Get (image, label) tuple from MNIST dataset # Get (image, label) tuple from MNIST dataset
...@@ -72,7 +78,12 @@ class TestDatasetWithDiffOutputPlace(unittest.TestCase): ...@@ -72,7 +78,12 @@ class TestDatasetWithDiffOutputPlace(unittest.TestCase):
self.assertTrue(label.place.is_cuda_pinned_place()) self.assertTrue(label.place.is_cuda_pinned_place())
break break
def test_multi_process(self): def test_single_process(self):
with _test_eager_guard():
self.func_test_single_process()
self.func_test_single_process()
def func_test_multi_process(self):
# DataLoader with multi-process mode is not supported on MacOs and Windows currently # DataLoader with multi-process mode is not supported on MacOs and Windows currently
if sys.platform != 'darwin' and sys.platform != 'win32': if sys.platform != 'darwin' and sys.platform != 'win32':
self.run_check_on_cpu() self.run_check_on_cpu()
...@@ -86,6 +97,11 @@ class TestDatasetWithDiffOutputPlace(unittest.TestCase): ...@@ -86,6 +97,11 @@ class TestDatasetWithDiffOutputPlace(unittest.TestCase):
self.assertTrue(label.place.is_cuda_pinned_place()) self.assertTrue(label.place.is_cuda_pinned_place())
break break
def test_multi_process(self):
with _test_eager_guard():
self.func_test_multi_process()
self.func_test_multi_process()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -22,6 +22,7 @@ from paddle.fluid.op import Operator ...@@ -22,6 +22,7 @@ from paddle.fluid.op import Operator
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph
LOOKAHEAD_K = 5 LOOKAHEAD_K = 5
LOOKAHEAD_ALPHA = 0.2 LOOKAHEAD_ALPHA = 0.2
...@@ -68,7 +69,7 @@ class TestLookAhead(unittest.TestCase): ...@@ -68,7 +69,7 @@ class TestLookAhead(unittest.TestCase):
slow_param.all(), latest_b.all(), delta=5e-3) slow_param.all(), latest_b.all(), delta=5e-3)
fast_param = latest_b - SGD_LR * b_grad fast_param = latest_b - SGD_LR * b_grad
def test_look_ahead_dygraph(self): def func_test_look_ahead_dygraph(self):
BATCH_SIZE = 16 BATCH_SIZE = 16
BATCH_NUM = 4 BATCH_NUM = 4
EPOCH_NUM = 4 EPOCH_NUM = 4
...@@ -142,6 +143,11 @@ class TestLookAhead(unittest.TestCase): ...@@ -142,6 +143,11 @@ class TestLookAhead(unittest.TestCase):
train(layer, loader, loss_fn, lookahead) train(layer, loader, loss_fn, lookahead)
def test_look_ahead_dygraph(self):
with _test_eager_guard():
self.func_test_look_ahead_dygraph()
self.func_test_look_ahead_dygraph()
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -19,6 +19,7 @@ import paddle ...@@ -19,6 +19,7 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import numpy as np import numpy as np
import inspect import inspect
from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph
class TestMathOpPatchesVarBase(unittest.TestCase): class TestMathOpPatchesVarBase(unittest.TestCase):
...@@ -26,7 +27,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -26,7 +27,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
self.shape = [10, 1024] self.shape = [10, 1024]
self.dtype = np.float32 self.dtype = np.float32
def test_add(self): def func_test_add(self):
a_np = np.random.random(self.shape).astype(self.dtype) a_np = np.random.random(self.shape).astype(self.dtype)
b_np = np.random.random(self.shape).astype(self.dtype) b_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
...@@ -35,7 +36,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -35,7 +36,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
res = a + b res = a + b
self.assertTrue(np.array_equal(res.numpy(), a_np + b_np)) self.assertTrue(np.array_equal(res.numpy(), a_np + b_np))
def test_sub(self): def test_add(self):
with _test_eager_guard():
self.func_test_add()
self.func_test_add()
def func_test_sub(self):
a_np = np.random.random(self.shape).astype(self.dtype) a_np = np.random.random(self.shape).astype(self.dtype)
b_np = np.random.random(self.shape).astype(self.dtype) b_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
...@@ -44,7 +50,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -44,7 +50,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
res = a - b res = a - b
self.assertTrue(np.array_equal(res.numpy(), a_np - b_np)) self.assertTrue(np.array_equal(res.numpy(), a_np - b_np))
def test_mul(self): def test_sub(self):
with _test_eager_guard():
self.func_test_sub()
self.func_test_sub()
def func_test_mul(self):
a_np = np.random.random(self.shape).astype(self.dtype) a_np = np.random.random(self.shape).astype(self.dtype)
b_np = np.random.random(self.shape).astype(self.dtype) b_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
...@@ -53,7 +64,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -53,7 +64,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
res = a * b res = a * b
self.assertTrue(np.array_equal(res.numpy(), a_np * b_np)) self.assertTrue(np.array_equal(res.numpy(), a_np * b_np))
def test_div(self): def test_mul(self):
with _test_eager_guard():
self.func_test_mul()
self.func_test_mul()
def func_test_div(self):
a_np = np.random.random(self.shape).astype(self.dtype) a_np = np.random.random(self.shape).astype(self.dtype)
b_np = np.random.random(self.shape).astype(self.dtype) b_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
...@@ -63,7 +79,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -63,7 +79,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
#NOTE: Not sure why array_equal fails on windows, allclose is acceptable #NOTE: Not sure why array_equal fails on windows, allclose is acceptable
self.assertTrue(np.allclose(res.numpy(), a_np / b_np)) self.assertTrue(np.allclose(res.numpy(), a_np / b_np))
def test_add_scalar(self): def test_div(self):
with _test_eager_guard():
self.func_test_div()
self.func_test_div()
def func_test_add_scalar(self):
a_np = np.random.random(self.shape).astype(self.dtype) a_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np) a = fluid.dygraph.to_variable(a_np)
...@@ -71,7 +92,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -71,7 +92,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
res = a + b res = a + b
self.assertTrue(np.array_equal(res.numpy(), a_np + b)) self.assertTrue(np.array_equal(res.numpy(), a_np + b))
def test_add_scalar_reverse(self): def test_add_scalar(self):
with _test_eager_guard():
self.func_test_add_scalar()
self.func_test_add_scalar()
def func_test_add_scalar_reverse(self):
a_np = np.random.random(self.shape).astype(self.dtype) a_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np) a = fluid.dygraph.to_variable(a_np)
...@@ -79,7 +105,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -79,7 +105,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
res = b + a res = b + a
self.assertTrue(np.array_equal(res.numpy(), b + a_np)) self.assertTrue(np.array_equal(res.numpy(), b + a_np))
def test_sub_scalar(self): def test_add_scalar_reverse(self):
with _test_eager_guard():
self.func_test_add_scalar_reverse()
self.func_test_add_scalar_reverse()
def func_test_sub_scalar(self):
a_np = np.random.random(self.shape).astype(self.dtype) a_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np) a = fluid.dygraph.to_variable(a_np)
...@@ -87,7 +118,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -87,7 +118,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
res = a - b res = a - b
self.assertTrue(np.array_equal(res.numpy(), a_np - b)) self.assertTrue(np.array_equal(res.numpy(), a_np - b))
def test_sub_scalar_reverse(self): def test_sub_scalar(self):
with _test_eager_guard():
self.func_test_sub_scalar()
self.func_test_sub_scalar()
def func_test_sub_scalar_reverse(self):
a_np = np.random.random(self.shape).astype(self.dtype) a_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np) a = fluid.dygraph.to_variable(a_np)
...@@ -95,7 +131,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -95,7 +131,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
res = b - a res = b - a
self.assertTrue(np.array_equal(res.numpy(), b - a_np)) self.assertTrue(np.array_equal(res.numpy(), b - a_np))
def test_mul_scalar(self): def test_sub_scalar_reverse(self):
with _test_eager_guard():
self.func_test_sub_scalar_reverse()
self.func_test_sub_scalar_reverse()
def func_test_mul_scalar(self):
a_np = np.random.random(self.shape).astype(self.dtype) a_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np) a = fluid.dygraph.to_variable(a_np)
...@@ -103,8 +144,13 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -103,8 +144,13 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
res = a * b res = a * b
self.assertTrue(np.array_equal(res.numpy(), a_np * b)) self.assertTrue(np.array_equal(res.numpy(), a_np * b))
def test_mul_scalar(self):
with _test_eager_guard():
self.func_test_mul_scalar()
self.func_test_mul_scalar()
# div_scalar, not equal # div_scalar, not equal
def test_div_scalar(self): def func_test_div_scalar(self):
a_np = np.random.random(self.shape).astype(self.dtype) a_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np) a = fluid.dygraph.to_variable(a_np)
...@@ -112,8 +158,13 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -112,8 +158,13 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
res = a / b res = a / b
self.assertTrue(np.allclose(res.numpy(), a_np / b)) self.assertTrue(np.allclose(res.numpy(), a_np / b))
def test_div_scalar(self):
with _test_eager_guard():
self.func_test_div_scalar()
self.func_test_div_scalar()
# pow of float type, not equal # pow of float type, not equal
def test_pow(self): def func_test_pow(self):
a_np = np.random.random(self.shape).astype(self.dtype) a_np = np.random.random(self.shape).astype(self.dtype)
b_np = np.random.random(self.shape).astype(self.dtype) b_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
...@@ -122,7 +173,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -122,7 +173,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
res = a**b res = a**b
self.assertTrue(np.allclose(res.numpy(), a_np**b_np)) self.assertTrue(np.allclose(res.numpy(), a_np**b_np))
def test_floor_div(self): def test_pow(self):
with _test_eager_guard():
self.func_test_pow()
self.func_test_pow()
def func_test_floor_div(self):
a_np = np.random.randint(1, 100, size=self.shape) a_np = np.random.randint(1, 100, size=self.shape)
b_np = np.random.randint(1, 100, size=self.shape) b_np = np.random.randint(1, 100, size=self.shape)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
...@@ -131,7 +187,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -131,7 +187,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
res = a // b res = a // b
self.assertTrue(np.array_equal(res.numpy(), a_np // b_np)) self.assertTrue(np.array_equal(res.numpy(), a_np // b_np))
def test_mod(self): def test_floor_div(self):
with _test_eager_guard():
self.func_test_floor_div()
self.func_test_floor_div()
def func_test_mod(self):
a_np = np.random.randint(1, 100, size=self.shape) a_np = np.random.randint(1, 100, size=self.shape)
b_np = np.random.randint(1, 100, size=self.shape) b_np = np.random.randint(1, 100, size=self.shape)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
...@@ -140,8 +201,13 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -140,8 +201,13 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
res = a % b res = a % b
self.assertTrue(np.array_equal(res.numpy(), a_np % b_np)) self.assertTrue(np.array_equal(res.numpy(), a_np % b_np))
def test_mod(self):
with _test_eager_guard():
self.func_test_mod()
self.func_test_mod()
# for bitwise and/or/xor/not # for bitwise and/or/xor/not
def test_bitwise(self): def func_test_bitwise(self):
paddle.disable_static() paddle.disable_static()
x_np = np.random.randint(-100, 100, [2, 3, 5]) x_np = np.random.randint(-100, 100, [2, 3, 5])
...@@ -165,8 +231,13 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -165,8 +231,13 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
out = ~x out = ~x
self.assertTrue(np.array_equal(out.numpy(), out_np)) self.assertTrue(np.array_equal(out.numpy(), out_np))
def test_bitwise(self):
with _test_eager_guard():
self.func_test_bitwise()
self.func_test_bitwise()
# for logical compare # for logical compare
def test_equal(self): def func_test_equal(self):
a_np = np.asarray([1, 2, 3, 4, 5]) a_np = np.asarray([1, 2, 3, 4, 5])
b_np = np.asarray([1, 2, 3, 4, 5]) b_np = np.asarray([1, 2, 3, 4, 5])
c_np = np.asarray([1, 2, 2, 4, 5]) c_np = np.asarray([1, 2, 2, 4, 5])
...@@ -179,7 +250,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -179,7 +250,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
self.assertTrue(np.array_equal(res1.numpy(), a_np == b_np)) self.assertTrue(np.array_equal(res1.numpy(), a_np == b_np))
self.assertTrue(np.array_equal(res2.numpy(), a_np == c_np)) self.assertTrue(np.array_equal(res2.numpy(), a_np == c_np))
def test_not_equal(self): def test_equal(self):
with _test_eager_guard():
self.func_test_equal()
self.func_test_equal()
def func_test_not_equal(self):
a_np = np.asarray([1, 2, 3, 4, 5]) a_np = np.asarray([1, 2, 3, 4, 5])
b_np = np.asarray([1, 2, 3, 4, 5]) b_np = np.asarray([1, 2, 3, 4, 5])
c_np = np.asarray([1, 2, 2, 4, 5]) c_np = np.asarray([1, 2, 2, 4, 5])
...@@ -192,7 +268,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -192,7 +268,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
self.assertTrue(np.array_equal(res1.numpy(), a_np != b_np)) self.assertTrue(np.array_equal(res1.numpy(), a_np != b_np))
self.assertTrue(np.array_equal(res2.numpy(), a_np != c_np)) self.assertTrue(np.array_equal(res2.numpy(), a_np != c_np))
def test_less_than(self): def test_not_equal(self):
with _test_eager_guard():
self.func_test_not_equal()
self.func_test_not_equal()
def func_test_less_than(self):
a_np = np.random.random(self.shape).astype(self.dtype) a_np = np.random.random(self.shape).astype(self.dtype)
b_np = np.random.random(self.shape).astype(self.dtype) b_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
...@@ -201,7 +282,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -201,7 +282,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
res = (a < b) res = (a < b)
self.assertTrue(np.array_equal(res.numpy(), a_np < b_np)) self.assertTrue(np.array_equal(res.numpy(), a_np < b_np))
def test_less_equal(self): def test_less_than(self):
with _test_eager_guard():
self.func_test_less_than()
self.func_test_less_than()
def func_test_less_equal(self):
a_np = np.random.random(self.shape).astype(self.dtype) a_np = np.random.random(self.shape).astype(self.dtype)
b_np = np.random.random(self.shape).astype(self.dtype) b_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
...@@ -210,7 +296,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -210,7 +296,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
res = (a <= b) res = (a <= b)
self.assertTrue(np.array_equal(res.numpy(), a_np <= b_np)) self.assertTrue(np.array_equal(res.numpy(), a_np <= b_np))
def test_greater_than(self): def test_less_equal(self):
with _test_eager_guard():
self.func_test_less_equal()
self.func_test_less_equal()
def func_test_greater_than(self):
a_np = np.random.random(self.shape).astype(self.dtype) a_np = np.random.random(self.shape).astype(self.dtype)
b_np = np.random.random(self.shape).astype(self.dtype) b_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
...@@ -219,7 +310,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -219,7 +310,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
res = (a > b) res = (a > b)
self.assertTrue(np.array_equal(res.numpy(), a_np > b_np)) self.assertTrue(np.array_equal(res.numpy(), a_np > b_np))
def test_greater_equal(self): def test_greater_than(self):
with _test_eager_guard():
self.func_test_greater_than()
self.func_test_greater_than()
def func_test_greater_equal(self):
a_np = np.random.random(self.shape).astype(self.dtype) a_np = np.random.random(self.shape).astype(self.dtype)
b_np = np.random.random(self.shape).astype(self.dtype) b_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
...@@ -228,27 +324,47 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -228,27 +324,47 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
res = (a >= b) res = (a >= b)
self.assertTrue(np.array_equal(res.numpy(), a_np >= b_np)) self.assertTrue(np.array_equal(res.numpy(), a_np >= b_np))
def test_neg(self): def test_greater_equal(self):
with _test_eager_guard():
self.func_test_greater_equal()
self.func_test_greater_equal()
def func_test_neg(self):
a_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype) a_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np) a = fluid.dygraph.to_variable(a_np)
res = -a res = -a
self.assertTrue(np.array_equal(res.numpy(), -a_np)) self.assertTrue(np.array_equal(res.numpy(), -a_np))
def test_float_int_long(self): def test_neg(self):
with _test_eager_guard():
self.func_test_neg()
self.func_test_neg()
def func_test_float_int_long(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(np.array([100.1])) a = fluid.dygraph.to_variable(np.array([100.1]))
self.assertTrue(float(a) == 100.1) self.assertTrue(float(a) == 100.1)
self.assertTrue(int(a) == 100) self.assertTrue(int(a) == 100)
self.assertTrue(int(a) == 100) self.assertTrue(int(a) == 100)
def test_len(self): def test_float_int_long(self):
with _test_eager_guard():
self.func_test_float_int_long()
self.func_test_float_int_long()
def func_test_len(self):
a_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype) a_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np) a = fluid.dygraph.to_variable(a_np)
self.assertTrue(len(a) == 10) self.assertTrue(len(a) == 10)
def test_index(self): def test_len(self):
with _test_eager_guard():
self.func_test_len()
self.func_test_len()
def func_test_index(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
var1 = fluid.dygraph.to_variable(np.array([2])) var1 = fluid.dygraph.to_variable(np.array([2]))
i_tmp = 0 i_tmp = 0
...@@ -260,7 +376,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -260,7 +376,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
str1 = "just test" str1 = "just test"
self.assertTrue(str1[var1] == 's') self.assertTrue(str1[var1] == 's')
def test_np_left_mul(self): def test_index(self):
with _test_eager_guard():
self.func_test_index()
self.func_test_index()
def func_test_np_left_mul(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
t = np.sqrt(2.0 * np.pi) t = np.sqrt(2.0 * np.pi)
x = fluid.layers.ones((2, 2), dtype="float32") x = fluid.layers.ones((2, 2), dtype="float32")
...@@ -274,7 +395,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -274,7 +395,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
rtol=1e-05, rtol=1e-05,
atol=0.0)) atol=0.0))
def test_add_different_dtype(self): def test_np_left_mul(self):
with _test_eager_guard():
self.func_test_np_left_mul()
self.func_test_np_left_mul()
def func_test_add_different_dtype(self):
a_np = np.random.random(self.shape).astype(np.float32) a_np = np.random.random(self.shape).astype(np.float32)
b_np = np.random.random(self.shape).astype(np.float16) b_np = np.random.random(self.shape).astype(np.float16)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
...@@ -283,7 +409,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -283,7 +409,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
res = a + b res = a + b
self.assertTrue(np.array_equal(res.numpy(), a_np + b_np)) self.assertTrue(np.array_equal(res.numpy(), a_np + b_np))
def test_floordiv_different_dtype(self): def test_add_different_dtype(self):
with _test_eager_guard():
self.func_test_add_different_dtype()
self.func_test_add_different_dtype()
def func_test_floordiv_different_dtype(self):
a_np = np.full(self.shape, 10, np.int64) a_np = np.full(self.shape, 10, np.int64)
b_np = np.full(self.shape, 2, np.int32) b_np = np.full(self.shape, 2, np.int32)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
...@@ -292,7 +423,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -292,7 +423,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
res = a // b res = a // b
self.assertTrue(np.array_equal(res.numpy(), a_np // b_np)) self.assertTrue(np.array_equal(res.numpy(), a_np // b_np))
def test_astype(self): def test_floordiv_different_dtype(self):
with _test_eager_guard():
self.func_test_floordiv_different_dtype()
self.func_test_floordiv_different_dtype()
def func_test_astype(self):
a_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype) a_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np) a = fluid.dygraph.to_variable(a_np)
...@@ -306,7 +442,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -306,7 +442,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy())) self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))
self.assertTrue(np.array_equal(res1.numpy(), res3.numpy())) self.assertTrue(np.array_equal(res1.numpy(), res3.numpy()))
def test_conpare_op_broadcast(self): def test_astype(self):
with _test_eager_guard():
self.func_test_astype()
self.func_test_astype()
def func_test_conpare_op_broadcast(self):
a_np = np.random.uniform(-1, 1, [10, 1, 10]).astype(self.dtype) a_np = np.random.uniform(-1, 1, [10, 1, 10]).astype(self.dtype)
b_np = np.random.uniform(-1, 1, [1, 1, 10]).astype(self.dtype) b_np = np.random.uniform(-1, 1, [1, 1, 10]).astype(self.dtype)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
...@@ -316,7 +457,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -316,7 +457,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
self.assertEqual((a != b).dtype, fluid.core.VarDesc.VarType.BOOL) self.assertEqual((a != b).dtype, fluid.core.VarDesc.VarType.BOOL)
self.assertTrue(np.array_equal((a != b).numpy(), a_np != b_np)) self.assertTrue(np.array_equal((a != b).numpy(), a_np != b_np))
def test_tensor_patch_method(self): def test_conpare_op_broadcast(self):
with _test_eager_guard():
self.func_test_conpare_op_broadcast()
self.func_test_conpare_op_broadcast()
def func_test_tensor_patch_method(self):
paddle.disable_static() paddle.disable_static()
x_np = np.random.uniform(-1, 1, [2, 3]).astype(self.dtype) x_np = np.random.uniform(-1, 1, [2, 3]).astype(self.dtype)
y_np = np.random.uniform(-1, 1, [2, 3]).astype(self.dtype) y_np = np.random.uniform(-1, 1, [2, 3]).astype(self.dtype)
...@@ -590,13 +736,23 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -590,13 +736,23 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
self.assertTrue(inspect.ismethod(a.std)) self.assertTrue(inspect.ismethod(a.std))
self.assertTrue(inspect.ismethod(a.numel)) self.assertTrue(inspect.ismethod(a.numel))
def test_complex_scalar(self): def test_tensor_patch_method(self):
with _test_eager_guard():
self.func_test_tensor_patch_method()
self.func_test_tensor_patch_method()
def func_test_complex_scalar(self):
a_np = np.random.random(self.shape).astype(self.dtype) a_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np) a = fluid.dygraph.to_variable(a_np)
res = 1J * a res = 1J * a
self.assertTrue(np.array_equal(res.numpy(), 1J * a_np)) self.assertTrue(np.array_equal(res.numpy(), 1J * a_np))
def test_complex_scalar(self):
with _test_eager_guard():
self.func_test_complex_scalar()
self.func_test_complex_scalar()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -21,6 +21,7 @@ import paddle ...@@ -21,6 +21,7 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.io import Dataset, IterableDataset, TensorDataset, \ from paddle.io import Dataset, IterableDataset, TensorDataset, \
ComposeDataset, ChainDataset, DataLoader, random_split, Subset ComposeDataset, ChainDataset, DataLoader, random_split, Subset
from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph
IMAGE_SIZE = 32 IMAGE_SIZE = 32
...@@ -76,21 +77,28 @@ class TestTensorDataset(unittest.TestCase): ...@@ -76,21 +77,28 @@ class TestTensorDataset(unittest.TestCase):
assert len(label) == 1 assert len(label) == 1
assert input.shape == [1, 3, 4] assert input.shape == [1, 3, 4]
assert label.shape == [1, 1] assert label.shape == [1, 1]
assert isinstance(input, paddle.Tensor) assert isinstance(input,
assert isinstance(label, paddle.Tensor) (fluid.core.VarBase, fluid.core.eager.Tensor))
assert isinstance(label,
(fluid.core.VarBase, fluid.core.eager.Tensor))
assert np.allclose(input.numpy(), input_np[i]) assert np.allclose(input.numpy(), input_np[i])
assert np.allclose(label.numpy(), label_np[i]) assert np.allclose(label.numpy(), label_np[i])
def test_main(self): def func_test_main(self):
places = [paddle.CPUPlace()] places = [paddle.CPUPlace()]
if paddle.is_compiled_with_cuda(): if paddle.is_compiled_with_cuda():
places.append(paddle.CUDAPlace(0)) places.append(paddle.CUDAPlace(0))
for p in places: for p in places:
self.run_main(num_workers=0, places=p) self.run_main(num_workers=0, places=p)
def test_main(self):
with _test_eager_guard():
self.func_test_main()
self.func_test_main()
class TestComposeDataset(unittest.TestCase): class TestComposeDataset(unittest.TestCase):
def test_main(self): def func_test_main(self):
paddle.static.default_startup_program().random_seed = 1 paddle.static.default_startup_program().random_seed = 1
paddle.static.default_main_program().random_seed = 1 paddle.static.default_main_program().random_seed = 1
...@@ -108,9 +116,14 @@ class TestComposeDataset(unittest.TestCase): ...@@ -108,9 +116,14 @@ class TestComposeDataset(unittest.TestCase):
assert np.allclose(input2, input2_t) assert np.allclose(input2, input2_t)
assert np.allclose(label2, label2_t) assert np.allclose(label2, label2_t)
def test_main(self):
with _test_eager_guard():
self.func_test_main()
self.func_test_main()
class TestRandomSplitApi(unittest.TestCase): class TestRandomSplitApi(unittest.TestCase):
def test_main(self): def func_test_main(self):
paddle.static.default_startup_program().random_seed = 1 paddle.static.default_startup_program().random_seed = 1
paddle.static.default_main_program().random_seed = 1 paddle.static.default_main_program().random_seed = 1
...@@ -129,9 +142,14 @@ class TestRandomSplitApi(unittest.TestCase): ...@@ -129,9 +142,14 @@ class TestRandomSplitApi(unittest.TestCase):
self.assertTrue(len(elements_list) == 0) self.assertTrue(len(elements_list) == 0)
def test_main(self):
with _test_eager_guard():
self.func_test_main()
self.func_test_main()
class TestRandomSplitError(unittest.TestCase): class TestRandomSplitError(unittest.TestCase):
def test_errors(self): def func_test_errors(self):
paddle.static.default_startup_program().random_seed = 1 paddle.static.default_startup_program().random_seed = 1
paddle.static.default_main_program().random_seed = 1 paddle.static.default_main_program().random_seed = 1
...@@ -139,6 +157,11 @@ class TestRandomSplitError(unittest.TestCase): ...@@ -139,6 +157,11 @@ class TestRandomSplitError(unittest.TestCase):
self.assertRaises(ValueError, paddle.io.random_split, range(5), [8]) self.assertRaises(ValueError, paddle.io.random_split, range(5), [8])
self.assertRaises(ValueError, paddle.io.random_split, range(5), []) self.assertRaises(ValueError, paddle.io.random_split, range(5), [])
def test_errors(self):
with _test_eager_guard():
self.func_test_errors()
self.func_test_errors()
class TestSubsetDataset(unittest.TestCase): class TestSubsetDataset(unittest.TestCase):
def run_main(self, num_workers, places): def run_main(self, num_workers, places):
...@@ -173,8 +196,10 @@ class TestSubsetDataset(unittest.TestCase): ...@@ -173,8 +196,10 @@ class TestSubsetDataset(unittest.TestCase):
assert len(label) == 1 assert len(label) == 1
assert input.shape == [1, 3, 4] assert input.shape == [1, 3, 4]
assert label.shape == [1, 1] assert label.shape == [1, 1]
assert isinstance(input, paddle.Tensor) assert isinstance(input,
assert isinstance(label, paddle.Tensor) (fluid.core.VarBase, fluid.core.eager.Tensor))
assert isinstance(label,
(fluid.core.VarBase, fluid.core.eager.Tensor))
elements_list = list() elements_list = list()
for _, (input, label) in enumerate(dataloader()): for _, (input, label) in enumerate(dataloader()):
...@@ -192,7 +217,7 @@ class TestSubsetDataset(unittest.TestCase): ...@@ -192,7 +217,7 @@ class TestSubsetDataset(unittest.TestCase):
self.assertEqual(odd_list, elements_list) self.assertEqual(odd_list, elements_list)
def test_main(self): def func_test_main(self):
paddle.static.default_startup_program().random_seed = 1 paddle.static.default_startup_program().random_seed = 1
paddle.static.default_main_program().random_seed = 1 paddle.static.default_main_program().random_seed = 1
...@@ -202,6 +227,11 @@ class TestSubsetDataset(unittest.TestCase): ...@@ -202,6 +227,11 @@ class TestSubsetDataset(unittest.TestCase):
for p in places: for p in places:
self.run_main(num_workers=0, places=p) self.run_main(num_workers=0, places=p)
def test_main(self):
with _test_eager_guard():
self.func_test_main()
self.func_test_main()
class TestChainDataset(unittest.TestCase): class TestChainDataset(unittest.TestCase):
def run_main(self, num_workers, places): def run_main(self, num_workers, places):
...@@ -227,13 +257,18 @@ class TestChainDataset(unittest.TestCase): ...@@ -227,13 +257,18 @@ class TestChainDataset(unittest.TestCase):
assert np.allclose(label, samples[idx][1]) assert np.allclose(label, samples[idx][1])
idx += 1 idx += 1
def test_main(self): def func_test_main(self):
places = [paddle.CPUPlace()] places = [paddle.CPUPlace()]
if paddle.is_compiled_with_cuda(): if paddle.is_compiled_with_cuda():
places.append(paddle.CUDAPlace(0)) places.append(paddle.CUDAPlace(0))
for p in places: for p in places:
self.run_main(num_workers=0, places=p) self.run_main(num_workers=0, places=p)
def test_main(self):
with _test_eager_guard():
self.func_test_main()
self.func_test_main()
class NumpyMixTensorDataset(Dataset): class NumpyMixTensorDataset(Dataset):
def __init__(self, sample_num): def __init__(self, sample_num):
...@@ -269,8 +304,10 @@ class TestNumpyMixTensorDataset(TestTensorDataset): ...@@ -269,8 +304,10 @@ class TestNumpyMixTensorDataset(TestTensorDataset):
assert len(label) == 1 assert len(label) == 1
assert input.shape == [1, IMAGE_SIZE] assert input.shape == [1, IMAGE_SIZE]
assert label.shape == [1, 1] assert label.shape == [1, 1]
assert isinstance(input, paddle.Tensor) assert isinstance(input,
assert isinstance(label, paddle.Tensor) (fluid.core.VarBase, fluid.core.eager.Tensor))
assert isinstance(label,
(fluid.core.VarBase, fluid.core.eager.Tensor))
class ComplextDataset(Dataset): class ComplextDataset(Dataset):
...@@ -325,10 +362,15 @@ class TestComplextDataset(unittest.TestCase): ...@@ -325,10 +362,15 @@ class TestComplextDataset(unittest.TestCase):
assert data[4]['a'].shape == [2] assert data[4]['a'].shape == [2]
assert data[4]['b'].shape == [2, 2] assert data[4]['b'].shape == [2, 2]
def test_main(self): def func_test_main(self):
for num_workers in [0, 2]: for num_workers in [0, 2]:
self.run_main(num_workers) self.run_main(num_workers)
def test_main(self):
with _test_eager_guard():
self.func_test_main()
self.func_test_main()
class SingleFieldDataset(Dataset): class SingleFieldDataset(Dataset):
def __init__(self, sample_num): def __init__(self, sample_num):
...@@ -360,13 +402,19 @@ class TestSingleFieldDataset(unittest.TestCase): ...@@ -360,13 +402,19 @@ class TestSingleFieldDataset(unittest.TestCase):
drop_last=True) drop_last=True)
for i, data in enumerate(dataloader()): for i, data in enumerate(dataloader()):
assert isinstance(data, paddle.Tensor) assert isinstance(data,
(fluid.core.VarBase, fluid.core.eager.Tensor))
assert data.shape == [2, 2, 3] assert data.shape == [2, 2, 3]
def test_main(self): def func_test_main(self):
for num_workers in [0, 2]: for num_workers in [0, 2]:
self.run_main(num_workers) self.run_main(num_workers)
def test_main(self):
with _test_eager_guard():
self.func_test_main()
self.func_test_main()
class SingleFieldIterableDataset(IterableDataset): class SingleFieldIterableDataset(IterableDataset):
def __init__(self, sample_num): def __init__(self, sample_num):
...@@ -390,12 +438,17 @@ class TestDataLoaderGenerateStates(unittest.TestCase): ...@@ -390,12 +438,17 @@ class TestDataLoaderGenerateStates(unittest.TestCase):
[2834126987, 2358157858, 1860244682, 1437227251], [2834126987, 2358157858, 1860244682, 1437227251],
[457190280, 2660306227, 859341110, 354512857]] [457190280, 2660306227, 859341110, 354512857]]
def test_main(self): def func_test_main(self):
from paddle.fluid.dataloader.worker import _generate_states from paddle.fluid.dataloader.worker import _generate_states
for inp, outp in zip(self.inputs, self.outputs): for inp, outp in zip(self.inputs, self.outputs):
out = _generate_states(*inp) out = _generate_states(*inp)
assert out == outp assert out == outp
def test_main(self):
with _test_eager_guard():
self.func_test_main()
self.func_test_main()
class TestDatasetWithDropLast(unittest.TestCase): class TestDatasetWithDropLast(unittest.TestCase):
def run_main(self, dataset, num_samples, batch_size): def run_main(self, dataset, num_samples, batch_size):
...@@ -413,14 +466,24 @@ class TestDatasetWithDropLast(unittest.TestCase): ...@@ -413,14 +466,24 @@ class TestDatasetWithDropLast(unittest.TestCase):
datas.append(data) datas.append(data)
assert len(datas) == steps assert len(datas) == steps
def test_map_dataset(self): def func_test_map_dataset(self):
dataset = RandomDataset(10) dataset = RandomDataset(10)
self.run_main(dataset, 10, 3) self.run_main(dataset, 10, 3)
def test_iterable_dataset(self): def test_map_dataset(self):
with _test_eager_guard():
self.func_test_map_dataset()
self.func_test_map_dataset()
def func_test_iterable_dataset(self):
dataset = RandomIterableDataset(10) dataset = RandomIterableDataset(10)
self.run_main(dataset, 10, 3) self.run_main(dataset, 10, 3)
def test_iterable_dataset(self):
with _test_eager_guard():
self.func_test_iterable_dataset()
self.func_test_iterable_dataset()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -19,6 +19,7 @@ import unittest ...@@ -19,6 +19,7 @@ import unittest
import time import time
import paddle import paddle
import paddle.incubate.multiprocessing as mp import paddle.incubate.multiprocessing as mp
from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph, in_dygraph_mode
REPEAT = 20 REPEAT = 20
HAS_SHM_FILES = os.path.isdir('/dev/shm') HAS_SHM_FILES = os.path.isdir('/dev/shm')
...@@ -174,26 +175,54 @@ class TestMultiprocessingBase(unittest.TestCase): ...@@ -174,26 +175,54 @@ class TestMultiprocessingBase(unittest.TestCase):
class TestMultiprocessingCpu(TestMultiprocessingBase): class TestMultiprocessingCpu(TestMultiprocessingBase):
def test_pass_tensor(self): def func_test_pass_tensor(self):
if in_dygraph_mode():
return
paddle.set_device("cpu") paddle.set_device("cpu")
self._test_sharing(repeat=REPEAT) self._test_sharing(repeat=REPEAT)
def test_pass_parambase(self): def test_pass_tensor(self):
with _test_eager_guard():
self.func_test_pass_tensor()
self.func_test_pass_tensor()
def func_test_pass_parambase(self):
if in_dygraph_mode():
return
paddle.set_device("cpu") paddle.set_device("cpu")
self._test_sharing(repeat=1, param=True) self._test_sharing(repeat=1, param=True)
def test_pass_empty(self): def test_pass_parambase(self):
with _test_eager_guard():
self.func_test_pass_parambase()
self.func_test_pass_parambase()
def func_test_pass_empty(self):
if in_dygraph_mode():
return
paddle.set_device("cpu") paddle.set_device("cpu")
self._test_empty() self._test_empty()
def test_pass_empty(self):
with _test_eager_guard():
self.func_test_pass_empty()
self.func_test_pass_empty()
class TestMultiprocessingGpu(TestMultiprocessingBase): class TestMultiprocessingGpu(TestMultiprocessingBase):
@unittest.skipIf(not paddle.fluid.core.is_compiled_with_cuda(), @unittest.skipIf(not paddle.fluid.core.is_compiled_with_cuda(),
"core is not compiled with CUDA") "core is not compiled with CUDA")
def test_pass_tensor(self): def func_test_pass_tensor(self):
if in_dygraph_mode():
return
paddle.set_device("gpu") paddle.set_device("gpu")
self._test_sharing(mp.get_context("spawn"), "gpu") self._test_sharing(mp.get_context("spawn"), "gpu")
def test_pass_tensor(self):
with _test_eager_guard():
self.func_test_pass_tensor()
self.func_test_pass_tensor()
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -185,9 +185,10 @@ class Dirac(Initializer): ...@@ -185,9 +185,10 @@ class Dirac(Initializer):
if framework.in_dygraph_mode(): if framework.in_dygraph_mode():
with fluid.dygraph.no_grad(): with fluid.dygraph.no_grad():
tmp_tensor = _C_ops.assign_value('shape', [len(idx_list)], tmp_tensor = framework._varbase_creator()
'dtype', VarDesc.VarType.INT64, _C_ops.assign_value(tmp_tensor, 'shape', [len(idx_list)],
'int64_values', idx_list) 'dtype', VarDesc.VarType.INT64,
'int64_values', idx_list)
tmp_tensor._share_underline_tensor_to(index_tensor) tmp_tensor._share_underline_tensor_to(index_tensor)
else: else:
block.append_op( block.append_op(
...@@ -207,9 +208,10 @@ class Dirac(Initializer): ...@@ -207,9 +208,10 @@ class Dirac(Initializer):
if framework.in_dygraph_mode(): if framework.in_dygraph_mode():
with fluid.dygraph.no_grad(): with fluid.dygraph.no_grad():
tmp_tensor = _C_ops.assign_value('shape', [len(value_list)], tmp_tensor = framework._varbase_creator()
'dtype', VarDesc.VarType.FP32, _C_ops.assign_value(tmp_tensor, 'shape', [len(value_list)],
'fp32_values', value_list) 'dtype', VarDesc.VarType.FP32,
'fp32_values', value_list)
tmp_tensor._share_underline_tensor_to(value_tensor) tmp_tensor._share_underline_tensor_to(value_tensor)
else: else:
block.append_op( block.append_op(
......
...@@ -1126,7 +1126,7 @@ def t(input, name=None): ...@@ -1126,7 +1126,7 @@ def t(input, name=None):
return out return out
def cross(x, y, axis=None, name=None): def cross(x, y, axis=9, name=None):
""" """
Computes the cross product between two tensors along an axis. Computes the cross product between two tensors along an axis.
...@@ -1136,7 +1136,7 @@ def cross(x, y, axis=None, name=None): ...@@ -1136,7 +1136,7 @@ def cross(x, y, axis=None, name=None):
Args: Args:
x (Tensor): The first input tensor. x (Tensor): The first input tensor.
y (Tensor): The second input tensor. y (Tensor): The second input tensor.
axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3. axis (int, optional): The axis along which to compute the cross product. It defaults to be 9 which indicates using the first axis found with the length 3.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns: Returns:
......
...@@ -282,8 +282,7 @@ def greater_than(x, y, name=None): ...@@ -282,8 +282,7 @@ def greater_than(x, y, name=None):
print(result1) # result1 = [False False True] print(result1) # result1 = [False False True]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
axis = -1 # default value return _C_ops.final_state_greater_than(x, y, -1)
return _C_ops.final_state_greater_than(x, y, axis)
else: else:
if _in_legacy_dygraph(): if _in_legacy_dygraph():
return _C_ops.greater_than(x, y) return _C_ops.greater_than(x, y)
......
...@@ -205,13 +205,17 @@ def _elementwise_op_in_dygraph(x, ...@@ -205,13 +205,17 @@ def _elementwise_op_in_dygraph(x,
def is_inplace(op_name): def is_inplace(op_name):
return op_name[-1] == "_" return op_name[-1] == "_"
if in_dygraph_mode(): if op_name not in OP_NAMEMAPPING.keys():
op = getattr(_C_ops, OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name)
out = op(x, y)
if _in_legacy_dygraph():
op = getattr(_C_ops, op_name) op = getattr(_C_ops, op_name)
out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn) out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
else:
if in_dygraph_mode():
op = getattr(_C_ops, OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name)
out = op(x, y)
if _in_legacy_dygraph():
op = getattr(_C_ops, op_name)
out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
return dygraph_utils._append_activation_in_dygraph( return dygraph_utils._append_activation_in_dygraph(
out, act, use_mkldnn=use_mkldnn) out, act, use_mkldnn=use_mkldnn)
......
...@@ -47,7 +47,7 @@ set_tests_properties(test_dataset_cifar PROPERTIES TIMEOUT 120) ...@@ -47,7 +47,7 @@ set_tests_properties(test_dataset_cifar PROPERTIES TIMEOUT 120)
set_tests_properties(test_pretrained_model PROPERTIES TIMEOUT 120) set_tests_properties(test_pretrained_model PROPERTIES TIMEOUT 120)
set_tests_properties(test_model PROPERTIES TIMEOUT 300) set_tests_properties(test_model PROPERTIES TIMEOUT 300)
set_tests_properties(test_dataset_movielens PROPERTIES TIMEOUT 120) set_tests_properties(test_dataset_movielens PROPERTIES TIMEOUT 120)
set_tests_properties(test_datasets PROPERTIES TIMEOUT 150) set_tests_properties(test_datasets PROPERTIES TIMEOUT 300)
set_tests_properties(test_dataset_wmt PROPERTIES TIMEOUT 120) set_tests_properties(test_dataset_wmt PROPERTIES TIMEOUT 120)
set_tests_properties(test_vision_models PROPERTIES TIMEOUT 120) set_tests_properties(test_vision_models PROPERTIES TIMEOUT 120)
set_tests_properties(test_dataset_uci_housing PROPERTIES TIMEOUT 120) set_tests_properties(test_dataset_uci_housing PROPERTIES TIMEOUT 120)
......
...@@ -22,6 +22,7 @@ import cv2 ...@@ -22,6 +22,7 @@ import cv2
import paddle.vision.transforms as T import paddle.vision.transforms as T
from paddle.vision.datasets import DatasetFolder, ImageFolder, MNIST, FashionMNIST, Flowers from paddle.vision.datasets import DatasetFolder, ImageFolder, MNIST, FashionMNIST, Flowers
from paddle.dataset.common import _check_exists_and_download from paddle.dataset.common import _check_exists_and_download
from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph
class TestFolderDatasets(unittest.TestCase): class TestFolderDatasets(unittest.TestCase):
...@@ -39,7 +40,7 @@ class TestFolderDatasets(unittest.TestCase): ...@@ -39,7 +40,7 @@ class TestFolderDatasets(unittest.TestCase):
def tearDown(self): def tearDown(self):
shutil.rmtree(self.data_dir) shutil.rmtree(self.data_dir)
def test_dataset(self): def func_test_dataset(self):
dataset_folder = DatasetFolder(self.data_dir) dataset_folder = DatasetFolder(self.data_dir)
for _ in dataset_folder: for _ in dataset_folder:
...@@ -52,7 +53,12 @@ class TestFolderDatasets(unittest.TestCase): ...@@ -52,7 +53,12 @@ class TestFolderDatasets(unittest.TestCase):
for _ in dataset_folder: for _ in dataset_folder:
pass pass
def test_folder(self): def test_dataset(self):
with _test_eager_guard():
self.func_test_dataset()
self.func_test_dataset()
def func_test_folder(self):
loader = ImageFolder(self.data_dir) loader = ImageFolder(self.data_dir)
for _ in loader: for _ in loader:
...@@ -64,7 +70,12 @@ class TestFolderDatasets(unittest.TestCase): ...@@ -64,7 +70,12 @@ class TestFolderDatasets(unittest.TestCase):
assert len(loader) == 4 assert len(loader) == 4
def test_transform(self): def test_folder(self):
with _test_eager_guard():
self.func_test_folder()
self.func_test_folder()
def func_test_transform(self):
def fake_transform(img): def fake_transform(img):
return img return img
...@@ -78,7 +89,12 @@ class TestFolderDatasets(unittest.TestCase): ...@@ -78,7 +89,12 @@ class TestFolderDatasets(unittest.TestCase):
for _ in loader: for _ in loader:
pass pass
def test_errors(self): def test_transform(self):
with _test_eager_guard():
self.func_test_transform()
self.func_test_transform()
def func_test_errors(self):
with self.assertRaises(RuntimeError): with self.assertRaises(RuntimeError):
ImageFolder(self.empty_dir) ImageFolder(self.empty_dir)
with self.assertRaises(RuntimeError): with self.assertRaises(RuntimeError):
...@@ -87,9 +103,14 @@ class TestFolderDatasets(unittest.TestCase): ...@@ -87,9 +103,14 @@ class TestFolderDatasets(unittest.TestCase):
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
_check_exists_and_download('temp_paddle', None, None, None, False) _check_exists_and_download('temp_paddle', None, None, None, False)
def test_errors(self):
with _test_eager_guard():
self.func_test_errors()
self.func_test_errors()
class TestMNISTTest(unittest.TestCase): class TestMNISTTest(unittest.TestCase):
def test_main(self): def func_test_main(self):
transform = T.Transpose() transform = T.Transpose()
mnist = MNIST(mode='test', transform=transform) mnist = MNIST(mode='test', transform=transform)
self.assertTrue(len(mnist) == 10000) self.assertTrue(len(mnist) == 10000)
...@@ -102,9 +123,14 @@ class TestMNISTTest(unittest.TestCase): ...@@ -102,9 +123,14 @@ class TestMNISTTest(unittest.TestCase):
self.assertTrue(label.shape[0] == 1) self.assertTrue(label.shape[0] == 1)
self.assertTrue(0 <= int(label) <= 9) self.assertTrue(0 <= int(label) <= 9)
def test_main(self):
with _test_eager_guard():
self.func_test_main()
self.func_test_main()
class TestMNISTTrain(unittest.TestCase): class TestMNISTTrain(unittest.TestCase):
def test_main(self): def func_test_main(self):
transform = T.Transpose() transform = T.Transpose()
mnist = MNIST(mode='train', transform=transform) mnist = MNIST(mode='train', transform=transform)
self.assertTrue(len(mnist) == 60000) self.assertTrue(len(mnist) == 60000)
...@@ -133,9 +159,14 @@ class TestMNISTTrain(unittest.TestCase): ...@@ -133,9 +159,14 @@ class TestMNISTTrain(unittest.TestCase):
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
mnist = MNIST(mode='train', transform=transform, backend=1) mnist = MNIST(mode='train', transform=transform, backend=1)
def test_main(self):
with _test_eager_guard():
self.func_test_main()
self.func_test_main()
class TestFASHIONMNISTTest(unittest.TestCase): class TestFASHIONMNISTTest(unittest.TestCase):
def test_main(self): def func_test_main(self):
transform = T.Transpose() transform = T.Transpose()
mnist = FashionMNIST(mode='test', transform=transform) mnist = FashionMNIST(mode='test', transform=transform)
self.assertTrue(len(mnist) == 10000) self.assertTrue(len(mnist) == 10000)
...@@ -148,9 +179,14 @@ class TestFASHIONMNISTTest(unittest.TestCase): ...@@ -148,9 +179,14 @@ class TestFASHIONMNISTTest(unittest.TestCase):
self.assertTrue(label.shape[0] == 1) self.assertTrue(label.shape[0] == 1)
self.assertTrue(0 <= int(label) <= 9) self.assertTrue(0 <= int(label) <= 9)
def test_main(self):
with _test_eager_guard():
self.func_test_main()
self.func_test_main()
class TestFASHIONMNISTTrain(unittest.TestCase): class TestFASHIONMNISTTrain(unittest.TestCase):
def test_main(self): def func_test_main(self):
transform = T.Transpose() transform = T.Transpose()
mnist = FashionMNIST(mode='train', transform=transform) mnist = FashionMNIST(mode='train', transform=transform)
self.assertTrue(len(mnist) == 60000) self.assertTrue(len(mnist) == 60000)
...@@ -179,16 +215,26 @@ class TestFASHIONMNISTTrain(unittest.TestCase): ...@@ -179,16 +215,26 @@ class TestFASHIONMNISTTrain(unittest.TestCase):
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
mnist = FashionMNIST(mode='train', transform=transform, backend=1) mnist = FashionMNIST(mode='train', transform=transform, backend=1)
def test_dataset_value(self): def test_main(self):
with _test_eager_guard():
self.func_test_main()
self.func_test_main()
def func_test_dataset_value(self):
fmnist = FashionMNIST(mode='train') fmnist = FashionMNIST(mode='train')
value = np.mean([np.array(x[0]) for x in fmnist]) value = np.mean([np.array(x[0]) for x in fmnist])
# 72.94035223214286 was getted from competitive products # 72.94035223214286 was getted from competitive products
np.testing.assert_allclose(value, 72.94035223214286) np.testing.assert_allclose(value, 72.94035223214286)
def test_dataset_value(self):
with _test_eager_guard():
self.func_test_dataset_value()
self.func_test_dataset_value()
class TestFlowersTrain(unittest.TestCase): class TestFlowersTrain(unittest.TestCase):
def test_main(self): def func_test_main(self):
flowers = Flowers(mode='train') flowers = Flowers(mode='train')
self.assertTrue(len(flowers) == 6149) self.assertTrue(len(flowers) == 6149)
...@@ -201,9 +247,14 @@ class TestFlowersTrain(unittest.TestCase): ...@@ -201,9 +247,14 @@ class TestFlowersTrain(unittest.TestCase):
self.assertTrue(image.shape[2] == 3) self.assertTrue(image.shape[2] == 3)
self.assertTrue(label.shape[0] == 1) self.assertTrue(label.shape[0] == 1)
def test_main(self):
with _test_eager_guard():
self.func_test_main()
self.func_test_main()
class TestFlowersValid(unittest.TestCase): class TestFlowersValid(unittest.TestCase):
def test_main(self): def func_test_main(self):
flowers = Flowers(mode='valid') flowers = Flowers(mode='valid')
self.assertTrue(len(flowers) == 1020) self.assertTrue(len(flowers) == 1020)
...@@ -216,9 +267,14 @@ class TestFlowersValid(unittest.TestCase): ...@@ -216,9 +267,14 @@ class TestFlowersValid(unittest.TestCase):
self.assertTrue(image.shape[2] == 3) self.assertTrue(image.shape[2] == 3)
self.assertTrue(label.shape[0] == 1) self.assertTrue(label.shape[0] == 1)
def test_main(self):
with _test_eager_guard():
self.func_test_main()
self.func_test_main()
class TestFlowersTest(unittest.TestCase): class TestFlowersTest(unittest.TestCase):
def test_main(self): def func_test_main(self):
flowers = Flowers(mode='test') flowers = Flowers(mode='test')
self.assertTrue(len(flowers) == 1020) self.assertTrue(len(flowers) == 1020)
...@@ -247,6 +303,11 @@ class TestFlowersTest(unittest.TestCase): ...@@ -247,6 +303,11 @@ class TestFlowersTest(unittest.TestCase):
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
flowers = Flowers(mode='test', backend=1) flowers = Flowers(mode='test', backend=1)
def test_main(self):
with _test_eager_guard():
self.func_test_main()
self.func_test_main()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册