diff --git a/demo/mnist/api_train_v2.py b/demo/mnist/api_train_v2.py index a23ddfaca011e2057af15fc4c559080c29eecc73..8a612cbc66d54ba3a3a9a444f85626812940cf6a 100644 --- a/demo/mnist/api_train_v2.py +++ b/demo/mnist/api_train_v2.py @@ -41,7 +41,7 @@ def main(): trainer.train( train_data_reader=train_reader, - topology=cost, + cost=cost, parameters=parameters, event_handler=event_handler, batch_size=32, # batch size should be refactor in Data reader diff --git a/python/paddle/v2/trainer.py b/python/paddle/v2/trainer.py index 3bf2128e16b5ad17968bd4c8debf577b86c50414..be33b910807ff30eb8ea9b37d5b172a556a1c3f5 100644 --- a/python/paddle/v2/trainer.py +++ b/python/paddle/v2/trainer.py @@ -62,7 +62,7 @@ class SGD(ITrainer): def train(self, train_data_reader, - topology, + cost, parameters, num_passes=1, test_data_reader=None, @@ -73,7 +73,7 @@ class SGD(ITrainer): Training method. Will train num_passes of input data. :param train_data_reader: - :param topology: cost layers, use one or more Layers to represent it. + :param cost: cost layers, to be optimized. :param parameters: The parameter pools. :param num_passes: The total train passes. :param test_data_reader: @@ -86,7 +86,7 @@ class SGD(ITrainer): if event_handler is None: event_handler = default_event_handler - topology = Topology(topology) + topology = Topology(cost) __check_train_args__(**locals())