未验证 提交 ba97194c 编写于 作者: T Tao Luo 提交者: GitHub

Merge pull request #10165 from JiayiFeng/fix_Clang_compile_error

fix Clang compile errors
......@@ -390,9 +390,7 @@ private:
if (this->loadThread_) { // wait poolActualSize < poolSize;
std::unique_lock<std::mutex> l(mtx_);
pushCV_.wait(l, [this, additionalBatchSize] {
return this->poolActualSize_ < poolSize_;
});
pushCV_.wait(l, [this] { return this->poolActualSize_ < poolSize_; });
}
{
......
......@@ -52,7 +52,7 @@ MultiGradientMachine::MultiGradientMachine(const ModelConfig& config,
} else {
numDevices_ = 0;
}
ParamInitCallback mainParamInitCb = [this](int paramId, Parameter* para) {
ParamInitCallback mainParamInitCb = [](int paramId, Parameter* para) {
// only create buf for CPU parameters
// GPU parameters will be created in each thread
if (para->useGpu()) return;
......
......@@ -72,7 +72,7 @@ void RecurrentLayerGroup::initSubNetwork(
setNeedGradient(true);
network_.reset(new RecurrentGradientMachine(config_.name(), rootNetwork));
ParamInitCallback cb = [this, rootNetwork](int paramId, Parameter* para) {
ParamInitCallback cb = [rootNetwork](int paramId, Parameter* para) {
para->enableSharedType(
PARAMETER_VALUE,
rootNetwork->getParameters()[paramId]->getBuf(PARAMETER_VALUE),
......
......@@ -325,12 +325,12 @@ void Argument::concat(const std::vector<Argument>& args,
->copyFrom(*src->subVec(srcStartRow, size), stream);
};
auto copyStrs = [batchSize, stream](SVectorPtr& dst,
const SVectorPtr& src,
int desStartRow,
int srcStartRow,
int size,
bool useGpu) {
auto copyStrs = [batchSize](SVectorPtr& dst,
const SVectorPtr& src,
int desStartRow,
int srcStartRow,
int size,
bool useGpu) {
if (!src) {
dst.reset();
return;
......@@ -413,7 +413,7 @@ void Argument::concat(const std::vector<Argument>& args,
dst->subVec(startRow, src->getSize())->copyFrom(*src, stream);
};
auto copyStrs = [batchSize, stream](
auto copyStrs = [batchSize](
SVectorPtr& dst, const SVectorPtr& src, int startRow, bool useGpu) {
if (!src) {
dst.reset();
......
......@@ -81,9 +81,9 @@ ParameterOptimizer::TraverseCallback AverageOptimizer::needSpecialTraversal(
if (numUpdates_ % kMaxNumAccumulates == 0) {
// Move the sum to a different buffer to avoid loss of precision
// due to too many sums.
callbacks.emplace_back([this](const VectorPtr vecs[],
const ParameterConfig& config,
size_t sparseId) {
callbacks.emplace_back([](const VectorPtr vecs[],
const ParameterConfig& config,
size_t sparseId) {
vecs[PARAMETER_SUM2]->add(*vecs[PARAMETER_SUM1]);
vecs[PARAMETER_SUM1]->zeroMem();
});
......@@ -94,9 +94,9 @@ ParameterOptimizer::TraverseCallback AverageOptimizer::needSpecialTraversal(
if (auto callback = this->startCatchUpWith()) {
callbacks.emplace_back(callback);
}
callbacks.emplace_back([this](const VectorPtr vecs[],
const ParameterConfig& config,
size_t sparseId) {
callbacks.emplace_back([](const VectorPtr vecs[],
const ParameterConfig& config,
size_t sparseId) {
vecs[PARAMETER_SUM3]->add(*vecs[PARAMETER_SUM1], *vecs[PARAMETER_SUM2]);
vecs[PARAMETER_SUM1]->zeroMem();
vecs[PARAMETER_SUM2]->zeroMem();
......
......@@ -145,9 +145,9 @@ AdagradParameterOptimizer::needSpecialTraversal(
if (numUpdates_ % kMaxNumAccumulates == 0) {
// Move the sum to a different buffer to avoid loss of precision
// due to too many sums.
return [this](const VectorPtr vecs[],
const ParameterConfig& config,
size_t sparseId) {
return [](const VectorPtr vecs[],
const ParameterConfig& config,
size_t sparseId) {
vecs[PARAMETER_GRADIENT_SQURESUM]->add(
*vecs[PARAMETER_GRADIENT_SQURESUM1]);
vecs[PARAMETER_GRADIENT_SQURESUM1]->zeroMem();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册