提交 c53d2142 编写于 作者: L Luo Tao

fix compiler warning from MKLDNNLayer and so on

上级 9928eb81
...@@ -36,8 +36,8 @@ TEST(LoDTensor, LoDInGPU) { ...@@ -36,8 +36,8 @@ TEST(LoDTensor, LoDInGPU) {
lod_tensor.mutable_data<float>(place); lod_tensor.mutable_data<float>(place);
lod_tensor.set_lod(src_lod); lod_tensor.set_lod(src_lod);
CHECK_EQ(lod_tensor.lod_element(0, 2), 4); CHECK_EQ(lod_tensor.lod_element(0, 2), 4UL);
CHECK_EQ(lod_tensor.lod_element(0, 4), 8); CHECK_EQ(lod_tensor.lod_element(0, 4), 8UL);
auto lod = lod_tensor.lod(); auto lod = lod_tensor.lod();
......
...@@ -28,7 +28,7 @@ bool MKLDNNConvLayer::init(const LayerMap& layerMap, ...@@ -28,7 +28,7 @@ bool MKLDNNConvLayer::init(const LayerMap& layerMap,
if (!MKLDNNLayer::init(layerMap, parameterMap)) { if (!MKLDNNLayer::init(layerMap, parameterMap)) {
return false; return false;
} }
CHECK_EQ(inputLayers_.size(), 1) << "Only support one input layer yet"; CHECK_EQ(inputLayers_.size(), 1UL) << "Only support one input layer yet";
CHECK_EQ(inputLayers_.size(), parameters_.size()); CHECK_EQ(inputLayers_.size(), parameters_.size());
CHECK(config_.shared_biases()) << "Only support shared biases yet"; CHECK(config_.shared_biases()) << "Only support shared biases yet";
......
...@@ -28,7 +28,7 @@ bool MKLDNNFcLayer::init(const LayerMap& layerMap, ...@@ -28,7 +28,7 @@ bool MKLDNNFcLayer::init(const LayerMap& layerMap,
return false; return false;
} }
CHECK_EQ(inputLayers_.size(), 1) << "Only support one input layer yet"; CHECK_EQ(inputLayers_.size(), 1UL) << "Only support one input layer yet";
CHECK_EQ(inputLayers_.size(), parameters_.size()); CHECK_EQ(inputLayers_.size(), parameters_.size());
CHECK(!parameters_[0]->isSparse()) << "Do not support sparse yet"; CHECK(!parameters_[0]->isSparse()) << "Do not support sparse yet";
......
...@@ -228,7 +228,7 @@ void genGroundTruth(vector<SingleBeamExpansion>& beamExpansions, ...@@ -228,7 +228,7 @@ void genGroundTruth(vector<SingleBeamExpansion>& beamExpansions,
curBeam.groundTruth[j] = *(start + n); curBeam.groundTruth[j] = *(start + n);
curBeam.inBeam[j] = 1; curBeam.inBeam[j] = 1;
} else { } else {
CHECK_LE(curBeam.rowIdxInBeam[j] + 1, CHECK_LE((size_t)curBeam.rowIdxInBeam[j] + 1,
curBeam.subSeqStartPos.size() - 1); curBeam.subSeqStartPos.size() - 1);
int start = curBeam.subSeqStartPos[curBeam.rowIdxInBeam[j]]; int start = curBeam.subSeqStartPos[curBeam.rowIdxInBeam[j]];
int end = curBeam.subSeqStartPos[curBeam.rowIdxInBeam[j] + 1]; int end = curBeam.subSeqStartPos[curBeam.rowIdxInBeam[j] + 1];
......
...@@ -42,7 +42,7 @@ class MultiplexGPUKernel : public framework::OpKernel { ...@@ -42,7 +42,7 @@ class MultiplexGPUKernel : public framework::OpKernel {
for (auto i = 0; i < rows; i++) { for (auto i = 0; i < rows; i++) {
int32_t k = index[i]; int32_t k = index[i];
PADDLE_ENFORCE_GE(k, 0, "index must be nonnegative."); PADDLE_ENFORCE_GE(k, 0, "index must be nonnegative.");
PADDLE_ENFORCE_LT(k, ins.size(), PADDLE_ENFORCE_LT((size_t)k, ins.size(),
"index exceeds the number of candidate tensors."); "index exceeds the number of candidate tensors.");
memory::Copy(place, out->data<T>() + i * cols, place, memory::Copy(place, out->data<T>() + i * cols, place,
ins[k]->data<T>() + i * cols, cols * sizeof(T), stream); ins[k]->data<T>() + i * cols, cols * sizeof(T), stream);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册