提交 87f9191e 编写于 作者: F fengjiayi

fix Clang compile errors

上级 d67b9ced
...@@ -390,9 +390,7 @@ private: ...@@ -390,9 +390,7 @@ private:
if (this->loadThread_) { // wait poolActualSize < poolSize; if (this->loadThread_) { // wait poolActualSize < poolSize;
std::unique_lock<std::mutex> l(mtx_); std::unique_lock<std::mutex> l(mtx_);
pushCV_.wait(l, [this, additionalBatchSize] { pushCV_.wait(l, [this] { return this->poolActualSize_ < poolSize_; });
return this->poolActualSize_ < poolSize_;
});
} }
{ {
......
...@@ -52,7 +52,7 @@ MultiGradientMachine::MultiGradientMachine(const ModelConfig& config, ...@@ -52,7 +52,7 @@ MultiGradientMachine::MultiGradientMachine(const ModelConfig& config,
} else { } else {
numDevices_ = 0; numDevices_ = 0;
} }
ParamInitCallback mainParamInitCb = [this](int paramId, Parameter* para) { ParamInitCallback mainParamInitCb = [](int paramId, Parameter* para) {
// only create buf for CPU parameters // only create buf for CPU parameters
// GPU parameters will be created in each thread // GPU parameters will be created in each thread
if (para->useGpu()) return; if (para->useGpu()) return;
......
...@@ -72,7 +72,7 @@ void RecurrentLayerGroup::initSubNetwork( ...@@ -72,7 +72,7 @@ void RecurrentLayerGroup::initSubNetwork(
setNeedGradient(true); setNeedGradient(true);
network_.reset(new RecurrentGradientMachine(config_.name(), rootNetwork)); network_.reset(new RecurrentGradientMachine(config_.name(), rootNetwork));
ParamInitCallback cb = [this, rootNetwork](int paramId, Parameter* para) { ParamInitCallback cb = [rootNetwork](int paramId, Parameter* para) {
para->enableSharedType( para->enableSharedType(
PARAMETER_VALUE, PARAMETER_VALUE,
rootNetwork->getParameters()[paramId]->getBuf(PARAMETER_VALUE), rootNetwork->getParameters()[paramId]->getBuf(PARAMETER_VALUE),
......
...@@ -325,12 +325,12 @@ void Argument::concat(const std::vector<Argument>& args, ...@@ -325,12 +325,12 @@ void Argument::concat(const std::vector<Argument>& args,
->copyFrom(*src->subVec(srcStartRow, size), stream); ->copyFrom(*src->subVec(srcStartRow, size), stream);
}; };
auto copyStrs = [batchSize, stream](SVectorPtr& dst, auto copyStrs = [batchSize](SVectorPtr& dst,
const SVectorPtr& src, const SVectorPtr& src,
int desStartRow, int desStartRow,
int srcStartRow, int srcStartRow,
int size, int size,
bool useGpu) { bool useGpu) {
if (!src) { if (!src) {
dst.reset(); dst.reset();
return; return;
...@@ -413,7 +413,7 @@ void Argument::concat(const std::vector<Argument>& args, ...@@ -413,7 +413,7 @@ void Argument::concat(const std::vector<Argument>& args,
dst->subVec(startRow, src->getSize())->copyFrom(*src, stream); dst->subVec(startRow, src->getSize())->copyFrom(*src, stream);
}; };
auto copyStrs = [batchSize, stream]( auto copyStrs = [batchSize](
SVectorPtr& dst, const SVectorPtr& src, int startRow, bool useGpu) { SVectorPtr& dst, const SVectorPtr& src, int startRow, bool useGpu) {
if (!src) { if (!src) {
dst.reset(); dst.reset();
......
...@@ -81,9 +81,9 @@ ParameterOptimizer::TraverseCallback AverageOptimizer::needSpecialTraversal( ...@@ -81,9 +81,9 @@ ParameterOptimizer::TraverseCallback AverageOptimizer::needSpecialTraversal(
if (numUpdates_ % kMaxNumAccumulates == 0) { if (numUpdates_ % kMaxNumAccumulates == 0) {
// Move the sum to a different buffer to avoid loss of precision // Move the sum to a different buffer to avoid loss of precision
// due to too many sums. // due to too many sums.
callbacks.emplace_back([this](const VectorPtr vecs[], callbacks.emplace_back([](const VectorPtr vecs[],
const ParameterConfig& config, const ParameterConfig& config,
size_t sparseId) { size_t sparseId) {
vecs[PARAMETER_SUM2]->add(*vecs[PARAMETER_SUM1]); vecs[PARAMETER_SUM2]->add(*vecs[PARAMETER_SUM1]);
vecs[PARAMETER_SUM1]->zeroMem(); vecs[PARAMETER_SUM1]->zeroMem();
}); });
...@@ -94,9 +94,9 @@ ParameterOptimizer::TraverseCallback AverageOptimizer::needSpecialTraversal( ...@@ -94,9 +94,9 @@ ParameterOptimizer::TraverseCallback AverageOptimizer::needSpecialTraversal(
if (auto callback = this->startCatchUpWith()) { if (auto callback = this->startCatchUpWith()) {
callbacks.emplace_back(callback); callbacks.emplace_back(callback);
} }
callbacks.emplace_back([this](const VectorPtr vecs[], callbacks.emplace_back([](const VectorPtr vecs[],
const ParameterConfig& config, const ParameterConfig& config,
size_t sparseId) { size_t sparseId) {
vecs[PARAMETER_SUM3]->add(*vecs[PARAMETER_SUM1], *vecs[PARAMETER_SUM2]); vecs[PARAMETER_SUM3]->add(*vecs[PARAMETER_SUM1], *vecs[PARAMETER_SUM2]);
vecs[PARAMETER_SUM1]->zeroMem(); vecs[PARAMETER_SUM1]->zeroMem();
vecs[PARAMETER_SUM2]->zeroMem(); vecs[PARAMETER_SUM2]->zeroMem();
......
...@@ -145,9 +145,9 @@ AdagradParameterOptimizer::needSpecialTraversal( ...@@ -145,9 +145,9 @@ AdagradParameterOptimizer::needSpecialTraversal(
if (numUpdates_ % kMaxNumAccumulates == 0) { if (numUpdates_ % kMaxNumAccumulates == 0) {
// Move the sum to a different buffer to avoid loss of precision // Move the sum to a different buffer to avoid loss of precision
// due to too many sums. // due to too many sums.
return [this](const VectorPtr vecs[], return [](const VectorPtr vecs[],
const ParameterConfig& config, const ParameterConfig& config,
size_t sparseId) { size_t sparseId) {
vecs[PARAMETER_GRADIENT_SQURESUM]->add( vecs[PARAMETER_GRADIENT_SQURESUM]->add(
*vecs[PARAMETER_GRADIENT_SQURESUM1]); *vecs[PARAMETER_GRADIENT_SQURESUM1]);
vecs[PARAMETER_GRADIENT_SQURESUM1]->zeroMem(); vecs[PARAMETER_GRADIENT_SQURESUM1]->zeroMem();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册