diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index 77e6fa89003b6ff365e3fd0c98852ede0a4ab5ef..ae00e9f54c3a0e7da65c7738931ce3345bca98b2 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -71,8 +71,8 @@ def main(): assert isinstance(updater, api.ParameterUpdater) # define network - images = paddle_v2.layer.data(name='pixel', type=dp.dense_vector(784)) - label = paddle_v2.layer.data(name='label', type=dp.integer_value(10)) + images = paddle_v2.layer.data(name='pixel', data_type=dp.dense_vector(784)) + label = paddle_v2.layer.data(name='label', data_type=dp.integer_value(10)) hidden1 = paddle_v2.layer.fc(input=images, size=200) hidden2 = paddle_v2.layer.fc(input=hidden1, size=200) inference = paddle_v2.layer.fc(input=hidden2, diff --git a/demo/mnist/api_train_v2.py b/demo/mnist/api_train_v2.py index 979cc3f1b9eb22d00cb49fe5ca2eab95dff78b56..b46cf1c8700ba8de2f9e5f8af3320ed6e1d9f7a9 100644 --- a/demo/mnist/api_train_v2.py +++ b/demo/mnist/api_train_v2.py @@ -16,8 +16,8 @@ def main(): paddle.init(use_gpu=False, trainer_count=1) # define network topology - images = paddle.layer.data(name='pixel', type=dense_vector(784)) - label = paddle.layer.data(name='label', type=integer_value(10)) + images = paddle.layer.data(name='pixel', data_type=dense_vector(784)) + label = paddle.layer.data(name='label', data_type=integer_value(10)) hidden1 = paddle.layer.fc(input=images, size=200) hidden2 = paddle.layer.fc(input=hidden1, size=200) inference = paddle.layer.fc(input=hidden2, diff --git a/python/paddle/v2/layer.py b/python/paddle/v2/layer.py index e006b789222c0191d2e20506f1161323570e9b51..511b3e7457c261c2aed69c3a55ab34cd06bd4a78 100644 --- a/python/paddle/v2/layer.py +++ b/python/paddle/v2/layer.py @@ -165,26 +165,22 @@ So we also need to implement some special LayerV2. class DataLayerV2(Layer): - def __init__(self, name, type, **kwargs): - self.__method_name__ = 'data_layer' - - assert isinstance(type, dp.InputType) + def __init__(self, name, data_type, **kwargs): + assert isinstance(data_type, dp.InputType) - # get data_size from type.dim - args = dict() - for key in kwargs: - args[key] = kwargs[key] - args['size'] = type.dim - self.__args__ = args + self.__method_name__ = 'data_layer' + self.__kwargs__ = kwargs + self.__data_size__ = data_type.dim super(DataLayerV2, self).__init__(name=name, parent_layers=dict()) def to_proto_impl(self, **kwargs): args = dict() + args['size'] = self.__data_size__ for each in kwargs: args[each] = kwargs[each] - for each in self.__args__: - args[each] = self.__args__[each] + for each in self.__kwargs__: + args[each] = self.__kwargs__[each] return getattr(conf_helps, self.__method_name__)(name=self.name, **args) @@ -202,8 +198,8 @@ cross_entropy_cost = __convert_to_v2__( parent_names=['input', 'label']) if __name__ == '__main__': - pixel = data(name='pixel', type=dp.dense_vector(784)) - label = data(name='label', type=dp.integer_value(10)) + pixel = data(name='pixel', data_type=dp.dense_vector(784)) + label = data(name='label', data_type=dp.integer_value(10)) hidden = fc(input=pixel, size=100, act=conf_helps.SigmoidActivation()) inference = fc(input=hidden, size=10, act=conf_helps.SoftmaxActivation()) maxid = max_id(input=inference)