diff --git a/paddle/fluid/framework/lod_tensor.cc b/paddle/fluid/framework/lod_tensor.cc index 2b4683f9e778593852029ec7e9ada7390e915e8b..9883a1940567fb5f5e6ce1eed7774c7d4a90dc9e 100644 --- a/paddle/fluid/framework/lod_tensor.cc +++ b/paddle/fluid/framework/lod_tensor.cc @@ -53,32 +53,8 @@ std::ostream &operator<<(std::ostream &os, const LoD &lod) { } std::ostream &operator<<(std::ostream &os, const LoDTensor &t) { - if (!platform::is_cpu_place(t.place())) { - LoDTensor cpu_tensor; - cpu_tensor.set_lod(t.lod()); - framework::TensorCopy(t, platform::CPUPlace(), &cpu_tensor); - platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); - auto &dev_ctx = *pool.Get(t.place()); - dev_ctx.Wait(); - - os << cpu_tensor; - return os; - } - - os << "dim: " << t.dims() << "\n"; - os << "lod: " << t.lod() << "\n"; - - // only print first ten elements - int64_t size = t.numel() < 10 ? t.numel() : 10; - for (int64_t i = 0; i < size; ++i) { - if (t.type() == proto::VarType::FP32) { - os << t.data()[i] << " "; - } else if (t.type() == proto::VarType::INT64) { - os << t.data()[i] << " "; - } else { - PADDLE_THROW("LoDTensor data type not in [float, int64_t]"); - } - } + os << "\tlod: " << t.lod() << "\n"; + os << static_cast(t) << "\n"; return os; } diff --git a/paddle/fluid/framework/tensor_util.cc b/paddle/fluid/framework/tensor_util.cc index a7f09df4917532e7261cee471c711897c8eb3447..33ef3b91866f477910b105b15014854788a070d5 100644 --- a/paddle/fluid/framework/tensor_util.cc +++ b/paddle/fluid/framework/tensor_util.cc @@ -491,5 +491,51 @@ void TensorFromStream(std::istream& is, Tensor* tensor, } } +template +std::ostream& print_tensor(std::ostream& os, const framework::Tensor& tensor) { + auto inspect = tensor.data(); + auto element_num = tensor.numel(); + + os << "\tdata: ["; + if (element_num > 0) { + os << inspect[0]; + for (int j = 1; j < element_num; ++j) { + os << " " << inspect[j]; + } + } + os << "]"; + return os; +} + +std::ostream& operator<<(std::ostream& os, const Tensor& t) { + os << "\tdim: " << t.dims() << "\n"; + os << "\tlayout: " << DataLayoutToString(t.layout()) << "\n"; + + Tensor tensor; + tensor.Resize(t.dims()); + if (platform::is_cpu_place(t.place())) { + tensor.ShareDataWith(t); + } else { + platform::CPUPlace place; + framework::TensorCopy(t, place, &tensor); + platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); + auto& dev_ctx = *pool.Get(t.place()); + dev_ctx.Wait(); + } + +#define PrintTensorCallback(cpp_type, proto_type) \ + do { \ + if (tensor.type() == proto_type) { \ + os << "\tdtype: " << proto_type << "\n"; \ + print_tensor(os, tensor); \ + return os; \ + } \ + } while (0) + + _ForEachDataType_(PrintTensorCallback); + VLOG(1) << "PrintVar: unrecognized data type:" << t.type(); + return os; +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/tensor_util.h b/paddle/fluid/framework/tensor_util.h index 1ffd357e62b4bdc72dbec627c463730aa2c8f720..e382f920399ad171d2aeafc30ac8a480fd97e608 100644 --- a/paddle/fluid/framework/tensor_util.h +++ b/paddle/fluid/framework/tensor_util.h @@ -151,5 +151,7 @@ void TensorToVector(const Tensor& src, std::vector* dst) { memory::Copy(dst_place, dst_ptr, boost::get(src.place()), src_ptr, size); } + +std::ostream& operator<<(std::ostream& os, const Tensor& t); } // namespace framework } // namespace paddle diff --git a/paddle/fluid/platform/lodtensor_printer.cc b/paddle/fluid/platform/lodtensor_printer.cc index 07eaf42d2d3bc20e7f7dc56bb0f4e0cc2fbac5e3..33d0fe6268046b3bcbd4addc75fcf34c03d70bf9 100644 --- a/paddle/fluid/platform/lodtensor_printer.cc +++ b/paddle/fluid/platform/lodtensor_printer.cc @@ -20,24 +20,6 @@ limitations under the License. */ namespace paddle { namespace platform { -template -void print_lod_tensor(const std::string& var_name, - const framework::LoDTensor& lod_tensor, - const std::string& print_info) { - auto inspect = lod_tensor.data(); - auto element_num = lod_tensor.numel(); - - std::ostringstream sstream; - sstream << print_info << "\t"; - sstream << var_name << "\t"; - sstream << inspect[0]; - for (int j = 1; j < element_num; ++j) { - sstream << " " << inspect[j]; - } - - std::cout << sstream.str() << std::endl; -} - void PrintVar(framework::Scope* scope, const std::string& var_name, const std::string& print_info) { framework::Variable* var = scope->FindVar(var_name); @@ -52,26 +34,11 @@ void PrintVar(framework::Scope* scope, const std::string& var_name, return; } - framework::LoDTensor printed_tensor; - printed_tensor.set_lod(tensor->lod()); - printed_tensor.Resize(tensor->dims()); - if (platform::is_cpu_place(tensor->place())) { - printed_tensor.ShareDataWith(*tensor); - } else { - platform::CPUPlace place; - framework::TensorCopy(*tensor, place, &printed_tensor); - } - -#define PrintLoDTensorCallback(cpp_type, proto_type) \ - do { \ - if (tensor->type() == proto_type) { \ - print_lod_tensor(var_name, printed_tensor, print_info); \ - return; \ - } \ - } while (0) - - _ForEachDataType_(PrintLoDTensorCallback); - VLOG(1) << "PrintVar: unrecognized data type:" << printed_tensor.type(); + std::ostringstream sstream; + sstream << print_info << "\t"; + sstream << var_name << "\t"; + sstream << *tensor << "\t"; + std::cout << sstream.str() << std::endl; } } // end namespace platform diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index a7036246afdac9724dd72cc12dc0825195977378..859928e3724c5c6a71716bd3df6a7ccb6793097c 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -59,6 +59,7 @@ limitations under the License. */ #include "paddle/fluid/pybind/imperative.h" #include "paddle/fluid/pybind/inference_api.h" #include "paddle/fluid/pybind/ir.h" + #ifndef _WIN32 #include "paddle/fluid/pybind/nccl_wrapper_py.h" #endif @@ -391,7 +392,12 @@ PYBIND11_MODULE(core, m) { .def("_get_double_element", TensorGetElement) .def("_place", [](Tensor &self) { return self.place(); }) .def("_dtype", [](Tensor &self) { return self.type(); }) - .def("__getitem__", PySliceTensor, py::return_value_policy::reference); + .def("__getitem__", PySliceTensor, py::return_value_policy::reference) + .def("__str__", [](const Tensor &self) { + std::stringstream ostr; + ostr << self; + return ostr.str(); + }); py::class_(m, "LoDTensor", R"DOC( LoDTensor is a Tensor with optional LoD information. @@ -610,7 +616,12 @@ PYBIND11_MODULE(core, m) { Returns: out (Tensor): new Tensor(NOT LoDTensor). - )DOC"); + )DOC") + .def("__str__", [](const LoDTensor &self) { + std::stringstream ostr; + ostr << self; + return ostr.str(); + }); py::class_(m, "SelectedRows") .def("__init__", diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 3f160b71f44fcd140e92674e3447bdd97cb33579..0991ea7427b77862c2bc2090c91be7d127e4b3ab 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -564,8 +564,9 @@ class Variable(object): """ if in_dygraph_mode(): # TODO(panyx0718): add more dygraph debug info. - return 'name %s, dtype: %s shape: %s' % (self.name, self.dtype, - self.shape) + return 'name %s, dtype: %s shape: %s %s' % ( + self.name, self.dtype, self.shape, + str(self._ivar.value().get_tensor())) assert isinstance(throw_on_error, bool) and isinstance(with_details, bool) diff --git a/python/paddle/fluid/tests/test_lod_tensor.py b/python/paddle/fluid/tests/test_lod_tensor.py index 722b5f07b04f9374db3f262f5134347fe753ba19..9bd343c103f15de728be9e2f6caa0d644f3cda0f 100644 --- a/python/paddle/fluid/tests/test_lod_tensor.py +++ b/python/paddle/fluid/tests/test_lod_tensor.py @@ -15,6 +15,7 @@ from __future__ import print_function import paddle.fluid as fluid +import paddle.fluid.core as core from paddle.fluid.lod_tensor import create_lod_tensor, create_random_int_lodtensor import numpy as np import unittest @@ -96,6 +97,23 @@ class TestLoDTensor(unittest.TestCase): recursive_seq_lens) self.assertEqual(tensor.shape(), [10, 1]) + def test_print_lodtensor(self): + shape = [1] + recursive_seq_lens = [[2, 3, 5]] + dict_size = 100 + low = 0 + high = dict_size - 1 + tensor = create_random_int_lodtensor(recursive_seq_lens, shape, + fluid.CPUPlace(), low, high) + print(tensor) + self.assertTrue(isinstance(str(tensor), str)) + + if core.is_compiled_with_cuda(): + gtensor = create_random_int_lodtensor(recursive_seq_lens, shape, + fluid.CUDAPlace(0), low, high) + print(gtensor) + self.assertTrue(isinstance(str(gtensor), str)) + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_tensor.py b/python/paddle/fluid/tests/unittests/test_tensor.py index 3c974ea460c11a49b657b724bf521d1c16f3a189..4615511ed85441551ed3a5071a8cf1d0dfe32984 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_tensor.py @@ -236,6 +236,26 @@ class TestTensor(unittest.TestCase): place = core.CUDAPlace(0) self.run_sliece_tensor(place) + def test_print_tensor(self): + scope = core.Scope() + var = scope.var("test_tensor") + place = core.CPUPlace() + tensor = var.get_tensor() + tensor._set_dims([10, 10]) + tensor._alloc_int(place) + tensor_array = numpy.array(tensor) + self.assertEqual((10, 10), tensor_array.shape) + tensor_array[0, 0] = 1 + tensor_array[2, 2] = 2 + tensor.set(tensor_array, place) + print(tensor) + self.assertTrue(isinstance(str(tensor), str)) + + if core.is_compiled_with_cuda(): + tensor.set(tensor_array, core.CUDAPlace(0)) + print(tensor) + self.assertTrue(isinstance(str(tensor), str)) + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_variable.py b/python/paddle/fluid/tests/unittests/test_variable.py index 35e4af2d098dcb0a4ac63e2b65982bfc9dabf803..a6c43bb83736c2d740aae7f43e4f78ec17e413c5 100644 --- a/python/paddle/fluid/tests/unittests/test_variable.py +++ b/python/paddle/fluid/tests/unittests/test_variable.py @@ -15,7 +15,7 @@ from __future__ import print_function import unittest -from paddle.fluid.framework import default_main_program, Program, convert_np_dtype_to_dtype_ +from paddle.fluid.framework import default_main_program, Program, convert_np_dtype_to_dtype_, in_dygraph_mode import paddle.fluid as fluid import paddle.fluid.core as core import numpy as np @@ -145,6 +145,24 @@ class TestVariable(unittest.TestCase): if core.is_compiled_with_cuda(): self._test_slice(core.CUDAPlace(0)) + def _tostring(self): + b = default_main_program().current_block() + w = b.create_var(dtype="float64", lod_level=0) + print(w) + self.assertTrue(isinstance(str(w), str)) + + if core.is_compiled_with_cuda(): + wc = b.create_var(dtype="int", lod_level=0) + print(wc) + self.assertTrue(isinstance(str(wc), str)) + + def test_tostring(self): + with fluid.dygraph.guard(): + self._tostring() + + with fluid.program_guard(default_main_program()): + self._tostring() + if __name__ == '__main__': unittest.main()