提交 a66052c6 编写于 作者: H Helin Wang

improve trainer API

- The trainer and inferencer will load params from disk if param_path
  argument is not None in their constructor.

- Remove params.py, we will expose core.Scope to the user if needed
  (e.g., for GAN). Currently we will not expose it, unless we clearly
  know doing so can support GAN.

- Add `save_params` to Trainer (a TODO item).

- rename "network" to "program"
上级 6d5e582d
......@@ -21,7 +21,11 @@ import executor
from executor import *
import trainer
from trainer import *
from trainer import Trainer
from trainer import BeginEpochEvent
from trainer import EndEpochEvent
from trainer import BeginStepEvent
from trainer import EndStepEvent
import inferencer
from inferencer import Inferencer
......
......@@ -12,18 +12,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import core
__all__ = ['Inferencer', ]
class Inferencer(object):
def __init__(self, network_func, params, place=None):
def __init__(self, network_func, param_path=None, place=None):
# 1. we need to generate a framework.Program by calling
# network_func. Reference: fluid.program_guard in test_word2vec.py
# 2. move the default_main_program to self.program.
# 3. run the default_startup program.
self.params = params
# 4. load params from param_path into scope
self.scope = core.Scope()
self.place = place
def infer(self, inputs):
......
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import core
__all__ = ['Params', ]
class Params(object):
def __init__(self, path=None):
self.scope = core.Scope()
if path:
self._load(path)
def _load(self, path):
# reference: load_persistables in io.py
pass
def save(self, path):
# reference: save_persistables in io.py
pass
def add_params(self, scope):
# take the keys from the scope,
# if not already exists in self.scope,
# add the key and value into self.scope.
pass
......@@ -39,7 +39,7 @@ word_dict = paddle.dataset.imikolov.build_dict()
dict_size = len(word_dict)
def inference_network(is_sparse):
def inference_program(is_sparse):
first_word = fluid.layers.data(name='firstw', shape=[1], dtype='int64')
second_word = fluid.layers.data(name='secondw', shape=[1], dtype='int64')
third_word = fluid.layers.data(name='thirdw', shape=[1], dtype='int64')
......@@ -79,9 +79,9 @@ def inference_network(is_sparse):
return predict_word
def train_network(is_sparse):
def train_program(is_sparse):
next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64')
predict_word = inference_network(is_sparse)
predict_word = inference_program(is_sparse)
cost = fluid.layers.cross_entropy(input=predict_word, label=next_word)
avg_cost = fluid.layers.mean(cost)
return avg_cost
......@@ -100,23 +100,25 @@ def train(use_cuda, is_sparse, save_path):
word_dict, N))
if avg_cost < 5.0:
trainer.params.save(save_path)
trainer.save_params(save_path)
return
if math.isnan(avg_cost):
sys.exit("got NaN loss, training failed.")
trainer = fluid.Trainer(
partial(train_network, is_sparse),
partial(train_program, is_sparse),
fluid.optimizer.SGD(learning_rate=0.001),
place=place)
trainer.train(
reader=train_reader, num_epochs=100, event_handler=event_handler)
def infer(use_cuda, save_path):
params = fluid.Params(save_path)
def infer(use_cuda, is_sparse, save_path):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.Inferencer(inference_network, params, place=place)
inferencer = fluid.Inferencer(
partial(inference_program, is_sparse),
param_path=save_path,
place=place)
lod = [0, 1]
first_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1)
......@@ -138,7 +140,7 @@ def main(use_cuda, is_sparse):
save_path = "word2vec.inference.model"
train(use_cuda, is_sparse, save_path)
infer(use_cuda, save_path)
infer(use_cuda, is_sparse, save_path)
if __name__ == '__main__':
......
......@@ -56,23 +56,22 @@ class Trainer(object):
"""
Args:
network_func(callable): A function which will return loss. The loss must be a scaler.
program_func(callable): A function which will return loss. The loss must be a scaler.
optimizer(optimizer.Optimizer): The optimizer should be an instance of Optimizer
params:
place: The device place of this trainer.
"""
def __init__(self, network_func, optimizer, params=None, place=None):
def __init__(self, program_func, optimizer, param_path=None, place=None):
# 1. we need to generate a framework.Program by calling
# network_func. Reference: fluid.program_guard in
# program_func. Reference: fluid.program_guard in
# test_word2vec.py
self.scope = self._get_scope_from_params(params)
self.scope = core.Scope()
self.startup_program = framework.Program()
self.train_program = framework.Program()
with framework.program_guard(self.train_program, self.startup_program):
loss = network_func()
loss = program_func()
if not isinstance(optimizer, opt_module.Optimizer):
raise TypeError(
"The optimizer should be an instance of Optimizer")
......@@ -84,14 +83,13 @@ class Trainer(object):
# 2. move the default_main_program to self.program and run the
# default_startup program on an empty core.Scope()
# Run startup program
if params is None:
exe = executor.Executor(place)
exe.run(self.startup_program, scope=self.scope)
exe = executor.Executor(place)
exe.run(self.startup_program, scope=self.scope)
# 3. call self.params.add_vars with the initialized scope, it
# will add the new vars of the initialized scope into
# self.params.
# TODO(yuyang): This depends on parameters implementation.
if param_path:
# load params from param_path into scope
# TODO(yuyang): This depends on parameters implementation.
pass
# TODO(helin): support distributed training
......@@ -124,19 +122,9 @@ class Trainer(object):
def test(self, reader):
pass
def _get_scope_from_params(self, params):
"""
Get Scope from parameter object.
Args:
params(Parameter|None): The parameter object instance. Could be None.
Returns: New scope if params is None. Or params.scope()
NOTE: This method is WIP. Not fully implemented.
"""
if params is None:
return core.Scope() # new scope when params is None
else:
raise NotImplementedError("Not implemented right now.")
def save_params(self, param_path):
# reference: save_persistables in io.py
pass
@staticmethod
def _check_and_get_place(place):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册