diff --git a/paddle/gserver/dataproviders/PyDataProvider2.cpp b/paddle/gserver/dataproviders/PyDataProvider2.cpp index e3e4457f9b72c5edb8082fdf378ae662b4aee42f..b4215bb307cc31ce64bb724986b88fdc20bbbf45 100644 --- a/paddle/gserver/dataproviders/PyDataProvider2.cpp +++ b/paddle/gserver/dataproviders/PyDataProvider2.cpp @@ -390,9 +390,7 @@ private: if (this->loadThread_) { // wait poolActualSize < poolSize; std::unique_lock l(mtx_); - pushCV_.wait(l, [this, additionalBatchSize] { - return this->poolActualSize_ < poolSize_; - }); + pushCV_.wait(l, [this] { return this->poolActualSize_ < poolSize_; }); } { diff --git a/paddle/gserver/gradientmachines/MultiGradientMachine.cpp b/paddle/gserver/gradientmachines/MultiGradientMachine.cpp index 3f46cc98cdef17d14c253c732814bcba005fd667..b8d4d28f0f309a5f7348605e8d35e160e7fd5552 100644 --- a/paddle/gserver/gradientmachines/MultiGradientMachine.cpp +++ b/paddle/gserver/gradientmachines/MultiGradientMachine.cpp @@ -52,7 +52,7 @@ MultiGradientMachine::MultiGradientMachine(const ModelConfig& config, } else { numDevices_ = 0; } - ParamInitCallback mainParamInitCb = [this](int paramId, Parameter* para) { + ParamInitCallback mainParamInitCb = [](int paramId, Parameter* para) { // only create buf for CPU parameters // GPU parameters will be created in each thread if (para->useGpu()) return; diff --git a/paddle/gserver/layers/RecurrentLayerGroup.cpp b/paddle/gserver/layers/RecurrentLayerGroup.cpp index 27e8b5868e6d85cf004945d7cb086d6d57487f9f..44b57185c5a5fa7703ca477b990a73cdad2c2aa1 100644 --- a/paddle/gserver/layers/RecurrentLayerGroup.cpp +++ b/paddle/gserver/layers/RecurrentLayerGroup.cpp @@ -72,7 +72,7 @@ void RecurrentLayerGroup::initSubNetwork( setNeedGradient(true); network_.reset(new RecurrentGradientMachine(config_.name(), rootNetwork)); - ParamInitCallback cb = [this, rootNetwork](int paramId, Parameter* para) { + ParamInitCallback cb = [rootNetwork](int paramId, Parameter* para) { para->enableSharedType( PARAMETER_VALUE, rootNetwork->getParameters()[paramId]->getBuf(PARAMETER_VALUE), diff --git a/paddle/parameter/Argument.cpp b/paddle/parameter/Argument.cpp index cfdaf8998b04e0307bc442dec0df734452634c67..94522f718a0c19bfc704ca92eddef5c5a9cb6919 100644 --- a/paddle/parameter/Argument.cpp +++ b/paddle/parameter/Argument.cpp @@ -325,12 +325,12 @@ void Argument::concat(const std::vector& args, ->copyFrom(*src->subVec(srcStartRow, size), stream); }; - auto copyStrs = [batchSize, stream](SVectorPtr& dst, - const SVectorPtr& src, - int desStartRow, - int srcStartRow, - int size, - bool useGpu) { + auto copyStrs = [batchSize](SVectorPtr& dst, + const SVectorPtr& src, + int desStartRow, + int srcStartRow, + int size, + bool useGpu) { if (!src) { dst.reset(); return; @@ -413,7 +413,7 @@ void Argument::concat(const std::vector& args, dst->subVec(startRow, src->getSize())->copyFrom(*src, stream); }; - auto copyStrs = [batchSize, stream]( + auto copyStrs = [batchSize]( SVectorPtr& dst, const SVectorPtr& src, int startRow, bool useGpu) { if (!src) { dst.reset(); diff --git a/paddle/parameter/AverageOptimizer.cpp b/paddle/parameter/AverageOptimizer.cpp index 75998d81dd9c8be35fe45e903dc1cd69068f83c6..82a7fed6c6451b8908851f2d039f17b9dc513818 100644 --- a/paddle/parameter/AverageOptimizer.cpp +++ b/paddle/parameter/AverageOptimizer.cpp @@ -81,9 +81,9 @@ ParameterOptimizer::TraverseCallback AverageOptimizer::needSpecialTraversal( if (numUpdates_ % kMaxNumAccumulates == 0) { // Move the sum to a different buffer to avoid loss of precision // due to too many sums. - callbacks.emplace_back([this](const VectorPtr vecs[], - const ParameterConfig& config, - size_t sparseId) { + callbacks.emplace_back([](const VectorPtr vecs[], + const ParameterConfig& config, + size_t sparseId) { vecs[PARAMETER_SUM2]->add(*vecs[PARAMETER_SUM1]); vecs[PARAMETER_SUM1]->zeroMem(); }); @@ -94,9 +94,9 @@ ParameterOptimizer::TraverseCallback AverageOptimizer::needSpecialTraversal( if (auto callback = this->startCatchUpWith()) { callbacks.emplace_back(callback); } - callbacks.emplace_back([this](const VectorPtr vecs[], - const ParameterConfig& config, - size_t sparseId) { + callbacks.emplace_back([](const VectorPtr vecs[], + const ParameterConfig& config, + size_t sparseId) { vecs[PARAMETER_SUM3]->add(*vecs[PARAMETER_SUM1], *vecs[PARAMETER_SUM2]); vecs[PARAMETER_SUM1]->zeroMem(); vecs[PARAMETER_SUM2]->zeroMem(); diff --git a/paddle/parameter/FirstOrderOptimizer.cpp b/paddle/parameter/FirstOrderOptimizer.cpp index 5e280bcac3389179181d2eda58c08e579e867ecc..182e833405e8f8bc3a4c9ffddbf628040f9cceaa 100644 --- a/paddle/parameter/FirstOrderOptimizer.cpp +++ b/paddle/parameter/FirstOrderOptimizer.cpp @@ -145,9 +145,9 @@ AdagradParameterOptimizer::needSpecialTraversal( if (numUpdates_ % kMaxNumAccumulates == 0) { // Move the sum to a different buffer to avoid loss of precision // due to too many sums. - return [this](const VectorPtr vecs[], - const ParameterConfig& config, - size_t sparseId) { + return [](const VectorPtr vecs[], + const ParameterConfig& config, + size_t sparseId) { vecs[PARAMETER_GRADIENT_SQURESUM]->add( *vecs[PARAMETER_GRADIENT_SQURESUM1]); vecs[PARAMETER_GRADIENT_SQURESUM1]->zeroMem();