diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index cc846a1d2f8902e499b6ab5bc1916e4d9d8a4b09..d8dc421bed711cfc1a149592c24b11c4ef115ec9 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -87,37 +87,37 @@ PYBIND11_PLUGIN(core) { py::class_(m, "Tensor", py::buffer_protocol()) .def_buffer( [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); }) - .def("get_dims", + .def("_get_dims", [](const Tensor &self) { return vectorize(self.dims()); }) - .def("set_dims", + .def("_set_dims", [](Tensor &self, const std::vector &dim) { self.Resize(make_ddim(dim)); }) - .def("set_layout", + .def("_set_layout", [](Tensor &self, const std::string &layout) { self.set_layout(StringToDataLayout(layout)); }) - .def("alloc_float", + .def("_alloc_float", [](Tensor &self, paddle::platform::CUDAPlace &place) { self.mutable_data(place); }) - .def("alloc_float", + .def("_alloc_float", [](Tensor &self, paddle::platform::CPUPlace &place) { self.mutable_data(place); }) - .def("alloc_int", + .def("_alloc_int", [](Tensor &self, paddle::platform::CPUPlace &place) { self.mutable_data(place); }) - .def("alloc_int", + .def("_alloc_int", [](Tensor &self, paddle::platform::CUDAPlace &place) { self.mutable_data(place); }) - .def("alloc_int", + .def("_alloc_int", [](Tensor &self, paddle::platform::CUDAPinnedPlace &place) { self.mutable_data(place); }) - .def("alloc_float", + .def("_alloc_float", [](Tensor &self, paddle::platform::CUDAPinnedPlace &place) { self.mutable_data(place); }) @@ -145,11 +145,11 @@ PYBIND11_PLUGIN(core) { .def("set", PyCUDAPinnedTensorSetFromArray) #endif .def("shape", [](Tensor &self) { return vectorize(self.dims()); }) - .def("set_float_element", TensorSetElement) - .def("get_float_element", TensorGetElement) - .def("set_double_element", TensorSetElement) - .def("get_double_element", TensorGetElement) - .def("dtype", [](Tensor &self) { return ToDataType(self.type()); }); + .def("_set_float_element", TensorSetElement) + .def("_get_float_element", TensorGetElement) + .def("_set_double_element", TensorSetElement) + .def("_get_double_element", TensorGetElement) + .def("_dtype", [](Tensor &self) { return ToDataType(self.type()); }); py::class_(m, "LoDTensor") .def_buffer( diff --git a/python/paddle/fluid/clip.py b/python/paddle/fluid/clip.py index 18e2f3045e272fb4712391f87bffd3f367c1c744..2a8e3d410add466436524d8cc7714fce955af2b5 100644 --- a/python/paddle/fluid/clip.py +++ b/python/paddle/fluid/clip.py @@ -31,7 +31,7 @@ class BaseErrorClipAttr(object): def __str__(self): raise NotImplementedError() - def append_clip_op(self, block, grad_name): + def _append_clip_op(self, block, grad_name): raise NotImplementedError() @@ -67,7 +67,7 @@ class ErrorClipByValue(BaseErrorClipAttr): def __str__(self): return "ByValue, min=%f, max=%f" % (self.min, self.max) - def append_clip_op(self, block, grad_name): + def _append_clip_op(self, block, grad_name): clip_op_desc = block.desc.append_op() clip_op_desc.set_type("clip") clip_op_desc.set_input("X", [grad_name]) @@ -90,17 +90,17 @@ def error_clip_callback(block, context): "Variable's error_clip should be an instance of BaseErrorClipAttr or None." ) if error_clip is not None: - error_clip.append_clip_op(block, grad_n) + error_clip._append_clip_op(block, grad_n) class BaseGradientClipAttr(object): def __str__(self): raise NotImplementedError() - def process_context(self, context, param, grad): + def _process_context(self, context, param, grad): raise NotImplementedError() - def create_operators(self, param, grad): + def _create_operators(self, param, grad): raise NotImplementedError() @@ -108,10 +108,10 @@ class NullGradientClipAttr(BaseGradientClipAttr): def __str__(self): return "Null" - def process_context(self, context, param, grad): + def _process_context(self, context, param, grad): pass - def create_operators(self, param, grad): + def _create_operators(self, param, grad): return param, grad @@ -153,10 +153,10 @@ class GradientClipByValue(BaseGradientClipAttr): def __str__(self): return "ByValue, min=%f, max=%f" % (self.min, self.max) - def process_context(self, context, param, grad): + def _process_context(self, context, param, grad): pass - def create_operators(self, param, grad): + def _create_operators(self, param, grad): new_grad = layers.clip(x=grad, min=self.min, max=self.max) return param, new_grad @@ -199,10 +199,10 @@ class GradientClipByNorm(BaseGradientClipAttr): def __str__(self): return "ByNorm, clip_norm=%f" % self.clip_norm - def process_context(self, context, param, grad): + def _process_context(self, context, param, grad): pass - def create_operators(self, param, grad): + def _create_operators(self, param, grad): new_grad = layers.clip_by_norm(x=grad, max_norm=self.clip_norm) return param, new_grad @@ -257,7 +257,7 @@ class GradientClipByGlobalNorm(BaseGradientClipAttr): return "ByGlobalNorm, group_name=%s, clip_norm=%f" % (self.group_name, self.clip_norm) - def process_context(self, context, param, grad): + def _process_context(self, context, param, grad): if self.group_name not in context: context[self.group_name] = [] context[self.group_name + "_clip_value"] = self.clip_norm @@ -274,7 +274,7 @@ class GradientClipByGlobalNorm(BaseGradientClipAttr): self.context = context - def create_operators(self, param, grad): + def _create_operators(self, param, grad): group_scale_name = self.group_name + "_scale" if group_scale_name not in self.context: group_norm_var = layers.sums(input=self.context[self.group_name]) @@ -336,12 +336,12 @@ def append_gradient_clip_ops(param_grad): "clip attribute should be an instance of BaseGradientClipAttr" ) - clip_attr.process_context(context=context, param=p, grad=g) + clip_attr._process_context(context=context, param=p, grad=g) res = [] for p, g in param_grad: with p.block.program.optimized_guard(p): - res.append(clip_attr.create_operators(param=p, grad=g)) + res.append(clip_attr._create_operators(param=p, grad=g)) return res diff --git a/python/paddle/fluid/layer_helper.py b/python/paddle/fluid/layer_helper.py index 86efd1ff51cf29485ee28b4d60ffb1439af1aad9..de752d1daeb6bc725cf6eff1bb74a786e2ad6b95 100644 --- a/python/paddle/fluid/layer_helper.py +++ b/python/paddle/fluid/layer_helper.py @@ -68,11 +68,11 @@ class LayerHelper(object): @property def param_attr(self): - return ParamAttr.to_attr(self.kwargs.get('param_attr', None)) + return ParamAttr._to_attr(self.kwargs.get('param_attr', None)) @property def bias_attr(self): - return ParamAttr.to_attr(self.kwargs.get('bias_attr', None)) + return ParamAttr._to_attr(self.kwargs.get('bias_attr', None)) def multiple_param_attr(self, length): param_attr = self.param_attr @@ -262,11 +262,11 @@ class LayerHelper(object): g_param = self.startup_program.global_block().create_parameter( dtype=dtype, shape=g_param_shape, - **g_param_attr.to_kwargs(with_initializer=False)) + **g_param_attr._to_kwargs(with_initializer=False)) v_param = self.startup_program.global_block().create_parameter( dtype=dtype, shape=v_param_shape, - **v_param_attr.to_kwargs(with_initializer=True)) + **v_param_attr._to_kwargs(with_initializer=True)) __norm_except_dim( x=v_param, out=g_param, @@ -275,9 +275,9 @@ class LayerHelper(object): # Add weight normalization to main_program g_param = self.main_program.global_block().create_parameter( - dtype=dtype, shape=g_param_shape, **g_param_attr.to_kwargs()) + dtype=dtype, shape=g_param_shape, **g_param_attr._to_kwargs()) v_param = self.main_program.global_block().create_parameter( - dtype=dtype, shape=v_param_shape, **v_param_attr.to_kwargs()) + dtype=dtype, shape=v_param_shape, **v_param_attr._to_kwargs()) w_param = __weight_normalize(g_param, v_param, dim=attr.dim) return w_param @@ -296,11 +296,11 @@ class LayerHelper(object): if default_initializer is None and attr.initializer is None: if is_bias: - attr.set_default_bias_initializer() + attr._set_default_bias_initializer() else: - attr.set_default_param_initializer() + attr._set_default_param_initializer() else: - attr.set_default_initializer(default_initializer) + attr._set_default_initializer(default_initializer) # If weight normalization is set, insert extra parameters and ops. # Refer to https://arxiv.org/pdf/1602.07868.pdf @@ -310,9 +310,9 @@ class LayerHelper(object): return param self.startup_program.global_block().create_parameter( - dtype=dtype, shape=shape, **attr.to_kwargs(with_initializer=True)) + dtype=dtype, shape=shape, **attr._to_kwargs(with_initializer=True)) return self.main_program.global_block().create_parameter( - dtype=dtype, shape=shape, **attr.to_kwargs()) + dtype=dtype, shape=shape, **attr._to_kwargs()) def get_parameter(self, name): param = self.main_program.global_block().var(name) diff --git a/python/paddle/fluid/param_attr.py b/python/paddle/fluid/param_attr.py index 0a42b9fca8dba7a11b414990be6c04c93158864f..4a61f85ec4b5c5108ded31632af75dbbdaaaba71 100644 --- a/python/paddle/fluid/param_attr.py +++ b/python/paddle/fluid/param_attr.py @@ -67,7 +67,7 @@ class ParamAttr(object): self.gradient_clip = gradient_clip self.model_average = do_model_average - def set_default_initializer(self, initializer): + def _set_default_initializer(self, initializer): """ Set the default initializer, the initializer should be Constant, Uniform, Normal, Xavier, MSRA. @@ -88,7 +88,7 @@ class ParamAttr(object): self.initializer = initializer - def set_default_param_initializer(self): + def _set_default_param_initializer(self): """ Set the default initializer for the parameter with Xavier. @@ -98,9 +98,9 @@ class ParamAttr(object): Returns: None. """ - self.set_default_initializer(Xavier()) + self._set_default_initializer(Xavier()) - def set_default_bias_initializer(self): + def _set_default_bias_initializer(self): """ Set the default initializer for the bias with Constant(0.0). @@ -110,10 +110,10 @@ class ParamAttr(object): Returns: None. """ - self.set_default_initializer(Constant(0.0)) + self._set_default_initializer(Constant(0.0)) @staticmethod - def to_attr(arg): + def _to_attr(arg): """ Create ParamAttr[s]. @@ -131,7 +131,7 @@ class ParamAttr(object): if arg is None: return ParamAttr() elif isinstance(arg, list) or isinstance(arg, tuple): - return [ParamAttr.to_attr(a) for a in arg] + return [ParamAttr._to_attr(a) for a in arg] elif isinstance(arg, ParamAttr): return arg elif isinstance(arg, str) or isinstance(arg, unicode): @@ -141,11 +141,11 @@ class ParamAttr(object): elif isinstance(arg, WeightDecayRegularizer): return ParamAttr(regularizer=arg) elif isinstance(arg, bool): - return ParamAttr.to_attr(None) if arg else False + return ParamAttr._to_attr(None) if arg else False else: raise TypeError("{0} cast to ParamAttr".format(type(arg))) - def to_kwargs(self, with_initializer=False): + def _to_kwargs(self, with_initializer=False): """ Returns the attributes of this parameter. diff --git a/python/paddle/fluid/regularizer.py b/python/paddle/fluid/regularizer.py index dac474d5ee76590a75311d6bf2c4cb2fe85b6c40..53f35f5cc062b4da431be19e4484f316bb37be9f 100644 --- a/python/paddle/fluid/regularizer.py +++ b/python/paddle/fluid/regularizer.py @@ -15,10 +15,7 @@ import framework from . import core -__all__ = [ - 'append_regularization_ops', 'L1Decay', 'L2Decay', 'L1DecayRegularizer', - 'L2DecayRegularizer' -] +__all__ = ['L1Decay', 'L2Decay', 'L1DecayRegularizer', 'L2DecayRegularizer'] def append_regularization_ops(parameters_and_grads, regularization=None): diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index e056ef9952a519d6c4d580b27f1118a3a91f13af..6824ede82b74c4e9783682149db870a471c35079 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -60,8 +60,8 @@ def get_numeric_gradient(place, return np.array(sum).mean() tensor_to_check = scope.find_var(input_to_check).get_tensor() - tensor_size = product(tensor_to_check.get_dims()) - tensor_to_check_dtype = tensor_to_check.dtype() + tensor_size = product(tensor_to_check.shape()) + tensor_to_check_dtype = tensor_to_check._dtype() if tensor_to_check_dtype == core.VarDesc.VarType.FP32: tensor_to_check_dtype = np.float32 elif tensor_to_check_dtype == core.VarDesc.VarType.FP64: @@ -74,15 +74,15 @@ def get_numeric_gradient(place, def __get_elem__(tensor, i): if tensor_to_check_dtype == np.float32: - return tensor.get_float_element(i) + return tensor._get_float_element(i) else: - return tensor.get_double_element(i) + return tensor._get_double_element(i) def __set_elem__(tensor, i, e): if tensor_to_check_dtype == np.float32: - tensor.set_float_element(i, e) + tensor._set_float_element(i, e) else: - tensor.set_double_element(i, e) + tensor._set_double_element(i, e) # we only compute gradient of one element each time. # we use a for loop to compute the gradient of every element. @@ -107,7 +107,7 @@ def get_numeric_gradient(place, __set_elem__(tensor_to_check, i, origin) gradient_flat[i] = (y_pos - y_neg) / delta / 2 - return gradient_flat.reshape(tensor_to_check.get_dims()) + return gradient_flat.reshape(tensor_to_check.shape()) class OpTest(unittest.TestCase): @@ -125,7 +125,7 @@ class OpTest(unittest.TestCase): @classmethod def tearDownClass(cls): - '''Restore random seeds''' + """Restore random seeds""" np.random.set_state(cls._np_rand_state) random.setstate(cls._py_rand_state) diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py index a62ee9596d0f6c58135b4a13249b638e84e63c3c..fcb2612326e74cf6417aa93f2691154c79b5e44c 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py @@ -129,7 +129,6 @@ def create_or_get_tensor(scope, var_name, var, place): if var is not None: assert isinstance(var, np.ndarray) tensor.set_recursive_sequence_lengths([]) - tensor.set_dims(var.shape) tensor.set(var, place) return tensor diff --git a/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py b/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py index 92e718662dfd7998be3ede2994f160059679fa8a..31af1245720405ee067a0acf3575e3ae86372c13 100644 --- a/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py +++ b/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py @@ -65,10 +65,10 @@ class TestDyRnnStaticInput(unittest.TestCase): return self._lodtensor_to_ndarray(fetch_outs[0]) def _lodtensor_to_ndarray(self, lod_tensor): - dims = lod_tensor.get_dims() + dims = lod_tensor.shape() ndarray = np.zeros(shape=dims).astype('float32') for i in xrange(np.product(dims)): - ndarray.ravel()[i] = lod_tensor.get_float_element(i) + ndarray.ravel()[i] = lod_tensor._get_float_element(i) return ndarray, lod_tensor.recursive_sequence_lengths() def build_graph(self, only_forward=False): @@ -185,19 +185,19 @@ class TestDyRnnStaticInput(unittest.TestCase): actual_gradients, actual_lod = self.fetch_value(static_input_grad) - static_input_shape = self.static_input_tensor.get_dims() + static_input_shape = self.static_input_tensor.shape() numeric_gradients = np.zeros(shape=static_input_shape).astype('float32') # calculate numeric gradients tensor_size = np.product(static_input_shape) for i in xrange(tensor_size): - origin = self.static_input_tensor.get_float_element(i) + origin = self.static_input_tensor._get_float_element(i) x_pos = origin + self._delta - self.static_input_tensor.set_float_element(i, x_pos) + self.static_input_tensor._set_float_element(i, x_pos) y_pos = self.fetch_value(loss)[0][0] x_neg = origin - self._delta - self.static_input_tensor.set_float_element(i, x_neg) + self.static_input_tensor._set_float_element(i, x_neg) y_neg = self.fetch_value(loss)[0][0] - self.static_input_tensor.set_float_element(i, origin) + self.static_input_tensor._set_float_element(i, origin) numeric_gradients.ravel()[i] = (y_pos - y_neg) / self._delta / 2 self.assertTrue(np.allclose(actual_gradients, numeric_gradients, 0.001)) self.assertTrue( diff --git a/python/paddle/fluid/tests/unittests/test_selected_rows.py b/python/paddle/fluid/tests/unittests/test_selected_rows.py index 3d7b86787fbf0a855bcd86b8a873c9134cb1d5cc..f504a06ffff8cb636498652554fca05e22bb905d 100644 --- a/python/paddle/fluid/tests/unittests/test_selected_rows.py +++ b/python/paddle/fluid/tests/unittests/test_selected_rows.py @@ -40,12 +40,12 @@ class TestSelectedRows(unittest.TestCase): # compare tensor self.assertAlmostEqual(2.0, - selected_rows.get_tensor().get_float_element(0)) + selected_rows.get_tensor()._get_float_element(0)) self.assertAlmostEqual(1.0, - selected_rows.get_tensor().get_float_element(1)) + selected_rows.get_tensor()._get_float_element(1)) self.assertAlmostEqual( 4.0, - selected_rows.get_tensor().get_float_element(2 * row_numel + 8)) + selected_rows.get_tensor()._get_float_element(2 * row_numel + 8)) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py b/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py index b779f0fb014bbba62927754ea6f36828a32e6c0a..24bc2cbaf86e8ed2c6a359c4c4d9a1e1507df746 100644 --- a/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py +++ b/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py @@ -45,8 +45,8 @@ class TestShrinkRNNMemoryBase(unittest.TestCase): def sum_lodtensor(self, tensor): sum_res = 0.0 - for i in xrange(np.product(tensor.get_dims())): - sum_res += tensor.get_float_element(i) + for i in xrange(np.product(tensor.shape())): + sum_res += tensor._get_float_element(i) return sum_res diff --git a/python/paddle/fluid/tests/unittests/test_tensor.py b/python/paddle/fluid/tests/unittests/test_tensor.py index f17edd3025b17549892bbd47935a1d2452cefac3..5ccc876ae8e6e20f76c77c1892f4de59d72bffc8 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_tensor.py @@ -25,8 +25,8 @@ class TestTensor(unittest.TestCase): tensor = var.get_tensor() - tensor.set_dims([1000, 784]) - tensor.alloc_int(place) + tensor._set_dims([1000, 784]) + tensor._alloc_int(place) tensor_array = numpy.array(tensor) self.assertEqual((1000, 784), tensor_array.shape) tensor_array[3, 9] = 1 @@ -44,8 +44,8 @@ class TestTensor(unittest.TestCase): tensor = var.get_tensor() - tensor.set_dims([1000, 784]) - tensor.alloc_float(place) + tensor._set_dims([1000, 784]) + tensor._alloc_float(place) tensor_array = numpy.array(tensor) self.assertEqual((1000, 784), tensor_array.shape) @@ -63,8 +63,8 @@ class TestTensor(unittest.TestCase): var_lod = scope.var("test_lod_tensor") lod_tensor = var_lod.get_tensor() - lod_tensor.set_dims([4, 4, 6]) - lod_tensor.alloc_int(place) + lod_tensor._set_dims([4, 4, 6]) + lod_tensor._alloc_int(place) array = numpy.array(lod_tensor) array[0, 0, 0] = 3 array[3, 3, 5] = 10 @@ -84,8 +84,8 @@ class TestTensor(unittest.TestCase): var_lod = scope.var("test_lod_tensor") lod_tensor = var_lod.get_tensor() - lod_tensor.set_dims([5, 2, 3, 4]) - lod_tensor.alloc_float(place) + lod_tensor._set_dims([5, 2, 3, 4]) + lod_tensor._alloc_float(place) tensor_array = numpy.array(lod_tensor) self.assertEqual((5, 2, 3, 4), tensor_array.shape) @@ -104,14 +104,13 @@ class TestTensor(unittest.TestCase): self.assertListEqual(lod_py, lod) def test_lod_tensor_init(self): - scope = core.Scope() place = core.CPUPlace() lod_py = [[2, 1], [1, 2, 2]] lod_tensor = core.LoDTensor() - lod_tensor.set_dims([5, 2, 3, 4]) + lod_tensor._set_dims([5, 2, 3, 4]) lod_tensor.set_recursive_sequence_lengths(lod_py) - lod_tensor.alloc_float(place) + lod_tensor._alloc_float(place) tensor_array = numpy.array(lod_tensor) tensor_array[0, 0, 0, 0] = 1.0 tensor_array[0, 0, 0, 1] = 2.0 @@ -129,9 +128,9 @@ class TestTensor(unittest.TestCase): lod_py = [[2, 1], [1, 2, 2]] lod_tensor = core.LoDTensor() - lod_tensor.set_dims([5, 2, 3, 4]) + lod_tensor._set_dims([5, 2, 3, 4]) lod_tensor.set_recursive_sequence_lengths(lod_py) - lod_tensor.alloc_float(place) + lod_tensor._alloc_float(place) tensor_array = numpy.array(lod_tensor) tensor_array[0, 0, 0, 0] = 1.0 tensor_array[0, 0, 0, 1] = 2.0 @@ -149,15 +148,15 @@ class TestTensor(unittest.TestCase): tensor = var.get_tensor() - tensor.set_dims([0, 1]) - tensor.alloc_float(place) + tensor._set_dims([0, 1]) + tensor._alloc_float(place) tensor_array = numpy.array(tensor) self.assertEqual((0, 1), tensor_array.shape) if core.is_compiled_with_cuda(): gpu_place = core.CUDAPlace(0) - tensor.alloc_float(gpu_place) + tensor._alloc_float(gpu_place) tensor_array = numpy.array(tensor) self.assertEqual((0, 1), tensor_array.shape) diff --git a/python/paddle/fluid/tests/unittests/testsuite.py b/python/paddle/fluid/tests/unittests/testsuite.py index a995ee10f29a714b674fae4b31070e6ba2ca9953..55c6e54906e739ef0bc953fa5c9e9641ec575ccf 100644 --- a/python/paddle/fluid/tests/unittests/testsuite.py +++ b/python/paddle/fluid/tests/unittests/testsuite.py @@ -75,7 +75,7 @@ def set_input(scope, op, inputs, place): if isinstance(var, tuple): tensor.set_recursive_sequence_lengths(var[1]) var = var[0] - tensor.set_dims(var.shape) + tensor._set_dims(var.shape) tensor.set(var, place) elif isinstance(var, float): scope.find_var(var_name).set_float(var)