ThreadParameterUpdater.h 3.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include "paddle/parameter/AverageOptimizer.h"
#include "paddle/parameter/FirstOrderOptimizer.h"
#include "paddle/parameter/OptimizerFunctions.h"
#include "paddle/parameter/OptimizerWithRegularizer.h"
#include "paddle/parameter/Parameter.h"
#include "paddle/parameter/Regularizer.h"
Y
Yu Yang 已提交
23
#include "paddle/utils/Util.h"
24 25 26 27 28 29 30 31 32 33 34 35

#include <memory>
#include <vector>

namespace paddle {

/**
 * \brief A parameter updater that uses multiple threads to update parameters.
   This parameter updater handles GPU and CPU updates differently,
   because at the current moment, the merging on CPU is happening on the
   main thread, and the its parameter size can be much larger than the one GPU.
   Thus, for GPU, the parameter updates happens in updateImpl() function, which
36 37
   is called by gradient machines as a callback function supplied to backward()
   and forwardBackward().
38 39 40 41
   For CPU, the parameter updates happens in separate threads maintained by this
   class.
 */
class SgdThreadUpdater : public ParameterUpdater {
42
 public:
43 44 45 46 47 48 49
  explicit SgdThreadUpdater(const OptimizationConfig& optConfig);
  virtual ~SgdThreadUpdater() {}

  // Use the startPass() function of the base optimizer.
  virtual void startPass();

  // Use the finishPass() function of the base optimizer.
50
  virtual bool finishPass();
51

52
  virtual void init(const std::vector<ParameterPtr>& parameters);
53 54 55 56 57 58 59
  virtual PassType startBatch(int64_t batchSize);
  // Call finishBatch for each optimizer.
  virtual void finishBatch(real cost);
  virtual void catchUpWith();
  virtual void apply();
  virtual void restore();

60
 protected:
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
  // This is the function that will be eventualy called by the GradientMachine.
  // used only for GPU update.
  virtual void updateImpl(Parameter* para);
  OptimizationConfig config_;
  int64_t numSamplesProcessed_;

  // One optimizers for each parameter.
  std::vector<std::unique_ptr<ParameterOptimizer>> optimizers_;

  // The update function for CPU sparse parameters.
  void threadUpdateSparse(int tid, size_t numThreads, Parameter* para);

  // The update function for CPU dense parameters.
  void threadUpdateDense(int tid, size_t numThreads, Parameter* para);
  // The update function for after update operations, such as averager.
  void threadTraverse(const ParameterOptimizer::TraverseCallback& callback,
77 78 79
                      int tid,
                      size_t numThreads,
                      Parameter* para);
X
xuwei06 已提交
80
  typedef std::function<const ParameterOptimizer::TraverseCallback(Parameter*)>
81
      GetTraverseCallback;
82 83 84 85
  void traverse(GetTraverseCallback getTraverseCallback);
};

}  // namespace paddle
新手
引导
客服 返回
顶部