未验证 提交 b83ffda4 编写于 作者: Y yuyang18

Try hide APIs

上级 5832e817
......@@ -78,37 +78,37 @@ PYBIND11_PLUGIN(core) {
py::class_<Tensor>(m, "Tensor", py::buffer_protocol())
.def_buffer(
[](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
.def("get_dims",
.def("_get_dims",
[](const Tensor &self) { return vectorize(self.dims()); })
.def("set_dims",
.def("_set_dims",
[](Tensor &self, const std::vector<int64_t> &dim) {
self.Resize(make_ddim(dim));
})
.def("set_layout",
.def("_set_layout",
[](Tensor &self, const std::string &layout) {
self.set_layout(StringToDataLayout(layout));
})
.def("alloc_float",
.def("_alloc_float",
[](Tensor &self, paddle::platform::CUDAPlace &place) {
self.mutable_data<float>(place);
})
.def("alloc_float",
.def("_alloc_float",
[](Tensor &self, paddle::platform::CPUPlace &place) {
self.mutable_data<float>(place);
})
.def("alloc_int",
.def("_alloc_int",
[](Tensor &self, paddle::platform::CPUPlace &place) {
self.mutable_data<int>(place);
})
.def("alloc_int",
.def("_alloc_int",
[](Tensor &self, paddle::platform::CUDAPlace &place) {
self.mutable_data<int>(place);
})
.def("alloc_int",
.def("_alloc_int",
[](Tensor &self, paddle::platform::CUDAPinnedPlace &place) {
self.mutable_data<int>(place);
})
.def("alloc_float",
.def("_alloc_float",
[](Tensor &self, paddle::platform::CUDAPinnedPlace &place) {
self.mutable_data<float>(place);
})
......@@ -136,11 +136,11 @@ PYBIND11_PLUGIN(core) {
.def("set", PyCUDAPinnedTensorSetFromArray<uint8_t>)
#endif
.def("shape", [](Tensor &self) { return vectorize(self.dims()); })
.def("set_float_element", TensorSetElement<float>)
.def("get_float_element", TensorGetElement<float>)
.def("set_double_element", TensorSetElement<double>)
.def("get_double_element", TensorGetElement<double>)
.def("dtype", [](Tensor &self) { return ToDataType(self.type()); });
.def("_set_float_element", TensorSetElement<float>)
.def("_get_float_element", TensorGetElement<float>)
.def("_set_double_element", TensorSetElement<double>)
.def("_get_double_element", TensorGetElement<double>)
.def("_dtype", [](Tensor &self) { return ToDataType(self.type()); });
py::class_<LoDTensor, Tensor>(m, "LoDTensor")
.def_buffer(
......
......@@ -60,8 +60,8 @@ def get_numeric_gradient(place,
return np.array(sum).mean()
tensor_to_check = scope.find_var(input_to_check).get_tensor()
tensor_size = product(tensor_to_check.get_dims())
tensor_to_check_dtype = tensor_to_check.dtype()
tensor_size = product(tensor_to_check.shape())
tensor_to_check_dtype = tensor_to_check._dtype()
if tensor_to_check_dtype == core.VarDesc.VarType.FP32:
tensor_to_check_dtype = np.float32
elif tensor_to_check_dtype == core.VarDesc.VarType.FP64:
......@@ -74,15 +74,15 @@ def get_numeric_gradient(place,
def __get_elem__(tensor, i):
if tensor_to_check_dtype == np.float32:
return tensor.get_float_element(i)
return tensor._get_float_element(i)
else:
return tensor.get_double_element(i)
return tensor._get_double_element(i)
def __set_elem__(tensor, i, e):
if tensor_to_check_dtype == np.float32:
tensor.set_float_element(i, e)
tensor._set_float_element(i, e)
else:
tensor.set_double_element(i, e)
tensor._set_double_element(i, e)
# we only compute gradient of one element each time.
# we use a for loop to compute the gradient of every element.
......@@ -107,7 +107,7 @@ def get_numeric_gradient(place,
__set_elem__(tensor_to_check, i, origin)
gradient_flat[i] = (y_pos - y_neg) / delta / 2
return gradient_flat.reshape(tensor_to_check.get_dims())
return gradient_flat.reshape(tensor_to_check.shape())
class OpTest(unittest.TestCase):
......@@ -125,7 +125,7 @@ class OpTest(unittest.TestCase):
@classmethod
def tearDownClass(cls):
'''Restore random seeds'''
"""Restore random seeds"""
np.random.set_state(cls._np_rand_state)
random.setstate(cls._py_rand_state)
......
......@@ -26,7 +26,7 @@ class TestTensor(unittest.TestCase):
tensor = var.get_tensor()
tensor.set_dims([1000, 784])
tensor.alloc_int(place)
tensor._alloc_int(place)
tensor_array = numpy.array(tensor)
self.assertEqual((1000, 784), tensor_array.shape)
tensor_array[3, 9] = 1
......@@ -45,7 +45,7 @@ class TestTensor(unittest.TestCase):
tensor = var.get_tensor()
tensor.set_dims([1000, 784])
tensor.alloc_float(place)
tensor._alloc_float(place)
tensor_array = numpy.array(tensor)
self.assertEqual((1000, 784), tensor_array.shape)
......@@ -64,7 +64,7 @@ class TestTensor(unittest.TestCase):
lod_tensor = var_lod.get_tensor()
lod_tensor.set_dims([4, 4, 6])
lod_tensor.alloc_int(place)
lod_tensor._alloc_int(place)
array = numpy.array(lod_tensor)
array[0, 0, 0] = 3
array[3, 3, 5] = 10
......@@ -85,7 +85,7 @@ class TestTensor(unittest.TestCase):
lod_tensor = var_lod.get_tensor()
lod_tensor.set_dims([5, 2, 3, 4])
lod_tensor.alloc_float(place)
lod_tensor._alloc_float(place)
tensor_array = numpy.array(lod_tensor)
self.assertEqual((5, 2, 3, 4), tensor_array.shape)
......@@ -104,14 +104,13 @@ class TestTensor(unittest.TestCase):
self.assertListEqual(lod_py, lod)
def test_lod_tensor_init(self):
scope = core.Scope()
place = core.CPUPlace()
lod_py = [[2, 1], [1, 2, 2]]
lod_tensor = core.LoDTensor()
lod_tensor.set_dims([5, 2, 3, 4])
lod_tensor.set_recursive_sequence_lengths(lod_py)
lod_tensor.alloc_float(place)
lod_tensor._alloc_float(place)
tensor_array = numpy.array(lod_tensor)
tensor_array[0, 0, 0, 0] = 1.0
tensor_array[0, 0, 0, 1] = 2.0
......@@ -131,7 +130,7 @@ class TestTensor(unittest.TestCase):
lod_tensor.set_dims([5, 2, 3, 4])
lod_tensor.set_recursive_sequence_lengths(lod_py)
lod_tensor.alloc_float(place)
lod_tensor._alloc_float(place)
tensor_array = numpy.array(lod_tensor)
tensor_array[0, 0, 0, 0] = 1.0
tensor_array[0, 0, 0, 1] = 2.0
......@@ -150,14 +149,14 @@ class TestTensor(unittest.TestCase):
tensor = var.get_tensor()
tensor.set_dims([0, 1])
tensor.alloc_float(place)
tensor._alloc_float(place)
tensor_array = numpy.array(tensor)
self.assertEqual((0, 1), tensor_array.shape)
if core.is_compiled_with_cuda():
gpu_place = core.CUDAPlace(0)
tensor.alloc_float(gpu_place)
tensor._alloc_float(gpu_place)
tensor_array = numpy.array(tensor)
self.assertEqual((0, 1), tensor_array.shape)
......
......@@ -75,7 +75,7 @@ def set_input(scope, op, inputs, place):
if isinstance(var, tuple):
tensor.set_recursive_sequence_lengths(var[1])
var = var[0]
tensor.set_dims(var.shape)
tensor._set_dims(var.shape)
tensor.set(var, place)
elif isinstance(var, float):
scope.find_var(var_name).set_float(var)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册