提交 d6a27ade 编写于 作者: T tensor-tang

add OMP SGD to speedup with CPUs

上级 330e9929
......@@ -92,6 +92,28 @@ public:
const T* getData() const { return this->data_; }
T* getData() { return this->data_; }
#ifdef PADDLE_USE_MKLDNN
/**
* sgd update with openmp to speedup
*/
void sgdUpdateWithOMP(VectorT& gradVec,
VectorT& momVec,
T learningRate,
T momentum,
T decayRate) {
size_t size = this->getSize();
T* val = this->getData();
T* grd = gradVec.getData();
T* mom = momVec.getData();
decayRate *= learningRate;
#pragma omp parallel for
for (size_t i = 0; i < size; ++i) {
mom[i] = momentum * mom[i] - learningRate * grd[i] - decayRate * val[i];
val[i] += mom[i];
}
}
#endif
virtual void zeroMem() = 0;
// set all elements to value
virtual void reset(const T& value) = 0;
......
......@@ -37,6 +37,15 @@ public:
real torch_learningRate = optConfig_.learning_method() == "torch_momentum"
? 1.0 - paraConfig.momentum()
: 1.0;
#ifdef PADDLE_USE_MKLDNN
vecs[PARAMETER_VALUE]->sgdUpdateWithOMP(
*vecs[PARAMETER_GRADIENT],
*vecs[PARAMETER_MOMENTUM],
learningRate_ * paraConfig.learning_rate() *
(firstTime_ ? 1.0 : torch_learningRate),
paraConfig.momentum(),
applyDecay_ ? paraConfig.decay_rate() : 0);
#else
vecs[PARAMETER_VALUE]->sgdUpdate(
*vecs[PARAMETER_GRADIENT],
*vecs[PARAMETER_MOMENTUM],
......@@ -44,6 +53,7 @@ public:
(firstTime_ ? 1.0 : torch_learningRate),
paraConfig.momentum(),
applyDecay_ ? paraConfig.decay_rate() : 0);
#endif
}
virtual void finishBatch() { firstTime_ = false; }
};
......
......@@ -30,6 +30,9 @@ void sgdUpdateCpu(real learningRate,
const real* grad,
real* momentumVec) {
decayRate *= learningRate;
#ifdef PADDLE_USE_MKLDNN
#pragma omp parallel for
#endif
for (size_t i = 0; i < size; ++i) {
momentumVec[i] = momentum * momentumVec[i] - learningRate * grad[i] -
decayRate * value[i];
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册