ThreadParameterUpdater.h 3.2 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Z
zhangjinchao01 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

X
Xin Pan 已提交
17 18 19 20 21 22
#include "paddle/legacy/parameter/AverageOptimizer.h"
#include "paddle/legacy/parameter/FirstOrderOptimizer.h"
#include "paddle/legacy/parameter/OptimizerFunctions.h"
#include "paddle/legacy/parameter/OptimizerWithRegularizer.h"
#include "paddle/legacy/parameter/Parameter.h"
#include "paddle/legacy/parameter/Regularizer.h"
X
Xin Pan 已提交
23
#include "paddle/legacy/utils/Util.h"
Z
zhangjinchao01 已提交
24 25 26 27 28 29 30 31 32 33 34 35

#include <memory>
#include <vector>

namespace paddle {

/**
 * \brief A parameter updater that uses multiple threads to update parameters.
   This parameter updater handles GPU and CPU updates differently,
   because at the current moment, the merging on CPU is happening on the
   main thread, and the its parameter size can be much larger than the one GPU.
   Thus, for GPU, the parameter updates happens in updateImpl() function, which
Q
qiaolongfei 已提交
36 37
   is called by gradient machines as a callback function supplied to backward()
   and forwardBackward().
Z
zhangjinchao01 已提交
38 39 40 41
   For CPU, the parameter updates happens in separate threads maintained by this
   class.
 */
class SgdThreadUpdater : public ParameterUpdater {
W
Wu Yi 已提交
42
 public:
Z
zhangjinchao01 已提交
43 44 45 46 47 48 49
  explicit SgdThreadUpdater(const OptimizationConfig& optConfig);
  virtual ~SgdThreadUpdater() {}

  // Use the startPass() function of the base optimizer.
  virtual void startPass();

  // Use the finishPass() function of the base optimizer.
50
  virtual bool finishPass();
Z
zhangjinchao01 已提交
51

Y
Yu Yang 已提交
52
  virtual void init(const std::vector<ParameterPtr>& parameters);
Z
zhangjinchao01 已提交
53 54 55 56 57 58 59
  virtual PassType startBatch(int64_t batchSize);
  // Call finishBatch for each optimizer.
  virtual void finishBatch(real cost);
  virtual void catchUpWith();
  virtual void apply();
  virtual void restore();

W
Wu Yi 已提交
60
 protected:
Z
zhangjinchao01 已提交
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
  // This is the function that will be eventualy called by the GradientMachine.
  // used only for GPU update.
  virtual void updateImpl(Parameter* para);
  OptimizationConfig config_;
  int64_t numSamplesProcessed_;

  // One optimizers for each parameter.
  std::vector<std::unique_ptr<ParameterOptimizer>> optimizers_;

  // The update function for CPU sparse parameters.
  void threadUpdateSparse(int tid, size_t numThreads, Parameter* para);

  // The update function for CPU dense parameters.
  void threadUpdateDense(int tid, size_t numThreads, Parameter* para);
  // The update function for after update operations, such as averager.
  void threadTraverse(const ParameterOptimizer::TraverseCallback& callback,
77 78 79
                      int tid,
                      size_t numThreads,
                      Parameter* para);
X
xuwei06 已提交
80
  typedef std::function<const ParameterOptimizer::TraverseCallback(Parameter*)>
81
      GetTraverseCallback;
Z
zhangjinchao01 已提交
82 83 84 85
  void traverse(GetTraverseCallback getTraverseCallback);
};

}  // namespace paddle