未验证 提交 6724a652 编写于 作者: W wopeizl 提交者: GitHub

add __str__ method for tensor and lodtensor to support print test=dev… (#17588)

* add __str__ method for tensor and lodtensor to support print test=develop
上级 887a39f0
......@@ -53,32 +53,8 @@ std::ostream &operator<<(std::ostream &os, const LoD &lod) {
}
std::ostream &operator<<(std::ostream &os, const LoDTensor &t) {
if (!platform::is_cpu_place(t.place())) {
LoDTensor cpu_tensor;
cpu_tensor.set_lod(t.lod());
framework::TensorCopy(t, platform::CPUPlace(), &cpu_tensor);
platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
auto &dev_ctx = *pool.Get(t.place());
dev_ctx.Wait();
os << cpu_tensor;
return os;
}
os << "dim: " << t.dims() << "\n";
os << "lod: " << t.lod() << "\n";
// only print first ten elements
int64_t size = t.numel() < 10 ? t.numel() : 10;
for (int64_t i = 0; i < size; ++i) {
if (t.type() == proto::VarType::FP32) {
os << t.data<float>()[i] << " ";
} else if (t.type() == proto::VarType::INT64) {
os << t.data<int64_t>()[i] << " ";
} else {
PADDLE_THROW("LoDTensor data type not in [float, int64_t]");
}
}
os << "\tlod: " << t.lod() << "\n";
os << static_cast<Tensor>(t) << "\n";
return os;
}
......
......@@ -491,5 +491,51 @@ void TensorFromStream(std::istream& is, Tensor* tensor,
}
}
template <typename T>
std::ostream& print_tensor(std::ostream& os, const framework::Tensor& tensor) {
auto inspect = tensor.data<T>();
auto element_num = tensor.numel();
os << "\tdata: [";
if (element_num > 0) {
os << inspect[0];
for (int j = 1; j < element_num; ++j) {
os << " " << inspect[j];
}
}
os << "]";
return os;
}
std::ostream& operator<<(std::ostream& os, const Tensor& t) {
os << "\tdim: " << t.dims() << "\n";
os << "\tlayout: " << DataLayoutToString(t.layout()) << "\n";
Tensor tensor;
tensor.Resize(t.dims());
if (platform::is_cpu_place(t.place())) {
tensor.ShareDataWith(t);
} else {
platform::CPUPlace place;
framework::TensorCopy(t, place, &tensor);
platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
auto& dev_ctx = *pool.Get(t.place());
dev_ctx.Wait();
}
#define PrintTensorCallback(cpp_type, proto_type) \
do { \
if (tensor.type() == proto_type) { \
os << "\tdtype: " << proto_type << "\n"; \
print_tensor<cpp_type>(os, tensor); \
return os; \
} \
} while (0)
_ForEachDataType_(PrintTensorCallback);
VLOG(1) << "PrintVar: unrecognized data type:" << t.type();
return os;
}
} // namespace framework
} // namespace paddle
......@@ -151,5 +151,7 @@ void TensorToVector(const Tensor& src, std::vector<T>* dst) {
memory::Copy(dst_place, dst_ptr, boost::get<platform::CPUPlace>(src.place()),
src_ptr, size);
}
std::ostream& operator<<(std::ostream& os, const Tensor& t);
} // namespace framework
} // namespace paddle
......@@ -20,24 +20,6 @@ limitations under the License. */
namespace paddle {
namespace platform {
template <typename T>
void print_lod_tensor(const std::string& var_name,
const framework::LoDTensor& lod_tensor,
const std::string& print_info) {
auto inspect = lod_tensor.data<T>();
auto element_num = lod_tensor.numel();
std::ostringstream sstream;
sstream << print_info << "\t";
sstream << var_name << "\t";
sstream << inspect[0];
for (int j = 1; j < element_num; ++j) {
sstream << " " << inspect[j];
}
std::cout << sstream.str() << std::endl;
}
void PrintVar(framework::Scope* scope, const std::string& var_name,
const std::string& print_info) {
framework::Variable* var = scope->FindVar(var_name);
......@@ -52,26 +34,11 @@ void PrintVar(framework::Scope* scope, const std::string& var_name,
return;
}
framework::LoDTensor printed_tensor;
printed_tensor.set_lod(tensor->lod());
printed_tensor.Resize(tensor->dims());
if (platform::is_cpu_place(tensor->place())) {
printed_tensor.ShareDataWith(*tensor);
} else {
platform::CPUPlace place;
framework::TensorCopy(*tensor, place, &printed_tensor);
}
#define PrintLoDTensorCallback(cpp_type, proto_type) \
do { \
if (tensor->type() == proto_type) { \
print_lod_tensor<cpp_type>(var_name, printed_tensor, print_info); \
return; \
} \
} while (0)
_ForEachDataType_(PrintLoDTensorCallback);
VLOG(1) << "PrintVar: unrecognized data type:" << printed_tensor.type();
std::ostringstream sstream;
sstream << print_info << "\t";
sstream << var_name << "\t";
sstream << *tensor << "\t";
std::cout << sstream.str() << std::endl;
}
} // end namespace platform
......
......@@ -59,6 +59,7 @@ limitations under the License. */
#include "paddle/fluid/pybind/imperative.h"
#include "paddle/fluid/pybind/inference_api.h"
#include "paddle/fluid/pybind/ir.h"
#ifndef _WIN32
#include "paddle/fluid/pybind/nccl_wrapper_py.h"
#endif
......@@ -391,7 +392,12 @@ PYBIND11_MODULE(core, m) {
.def("_get_double_element", TensorGetElement<double>)
.def("_place", [](Tensor &self) { return self.place(); })
.def("_dtype", [](Tensor &self) { return self.type(); })
.def("__getitem__", PySliceTensor, py::return_value_policy::reference);
.def("__getitem__", PySliceTensor, py::return_value_policy::reference)
.def("__str__", [](const Tensor &self) {
std::stringstream ostr;
ostr << self;
return ostr.str();
});
py::class_<LoDTensor, Tensor>(m, "LoDTensor", R"DOC(
LoDTensor is a Tensor with optional LoD information.
......@@ -610,7 +616,12 @@ PYBIND11_MODULE(core, m) {
Returns:
out (Tensor): new Tensor(NOT LoDTensor).
)DOC");
)DOC")
.def("__str__", [](const LoDTensor &self) {
std::stringstream ostr;
ostr << self;
return ostr.str();
});
py::class_<SelectedRows>(m, "SelectedRows")
.def("__init__",
......
......@@ -564,8 +564,9 @@ class Variable(object):
"""
if in_dygraph_mode():
# TODO(panyx0718): add more dygraph debug info.
return 'name %s, dtype: %s shape: %s' % (self.name, self.dtype,
self.shape)
return 'name %s, dtype: %s shape: %s %s' % (
self.name, self.dtype, self.shape,
str(self._ivar.value().get_tensor()))
assert isinstance(throw_on_error, bool) and isinstance(with_details,
bool)
......
......@@ -15,6 +15,7 @@
from __future__ import print_function
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.lod_tensor import create_lod_tensor, create_random_int_lodtensor
import numpy as np
import unittest
......@@ -96,6 +97,23 @@ class TestLoDTensor(unittest.TestCase):
recursive_seq_lens)
self.assertEqual(tensor.shape(), [10, 1])
def test_print_lodtensor(self):
shape = [1]
recursive_seq_lens = [[2, 3, 5]]
dict_size = 100
low = 0
high = dict_size - 1
tensor = create_random_int_lodtensor(recursive_seq_lens, shape,
fluid.CPUPlace(), low, high)
print(tensor)
self.assertTrue(isinstance(str(tensor), str))
if core.is_compiled_with_cuda():
gtensor = create_random_int_lodtensor(recursive_seq_lens, shape,
fluid.CUDAPlace(0), low, high)
print(gtensor)
self.assertTrue(isinstance(str(gtensor), str))
if __name__ == '__main__':
unittest.main()
......@@ -236,6 +236,26 @@ class TestTensor(unittest.TestCase):
place = core.CUDAPlace(0)
self.run_sliece_tensor(place)
def test_print_tensor(self):
scope = core.Scope()
var = scope.var("test_tensor")
place = core.CPUPlace()
tensor = var.get_tensor()
tensor._set_dims([10, 10])
tensor._alloc_int(place)
tensor_array = numpy.array(tensor)
self.assertEqual((10, 10), tensor_array.shape)
tensor_array[0, 0] = 1
tensor_array[2, 2] = 2
tensor.set(tensor_array, place)
print(tensor)
self.assertTrue(isinstance(str(tensor), str))
if core.is_compiled_with_cuda():
tensor.set(tensor_array, core.CUDAPlace(0))
print(tensor)
self.assertTrue(isinstance(str(tensor), str))
if __name__ == '__main__':
unittest.main()
......@@ -15,7 +15,7 @@
from __future__ import print_function
import unittest
from paddle.fluid.framework import default_main_program, Program, convert_np_dtype_to_dtype_
from paddle.fluid.framework import default_main_program, Program, convert_np_dtype_to_dtype_, in_dygraph_mode
import paddle.fluid as fluid
import paddle.fluid.core as core
import numpy as np
......@@ -145,6 +145,24 @@ class TestVariable(unittest.TestCase):
if core.is_compiled_with_cuda():
self._test_slice(core.CUDAPlace(0))
def _tostring(self):
b = default_main_program().current_block()
w = b.create_var(dtype="float64", lod_level=0)
print(w)
self.assertTrue(isinstance(str(w), str))
if core.is_compiled_with_cuda():
wc = b.create_var(dtype="int", lod_level=0)
print(wc)
self.assertTrue(isinstance(str(wc), str))
def test_tostring(self):
with fluid.dygraph.guard():
self._tostring()
with fluid.program_guard(default_main_program()):
self._tostring()
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册