diff --git a/paddle/function/cross_map_normal_op.cpp b/paddle/function/cross_map_normal_op.cpp index a9c7693830542f0e0d852f629d210b92a5bf2069..74094bc4fc8052aba0ae955217311e28eda7c2a7 100644 --- a/paddle/function/cross_map_normal_op.cpp +++ b/paddle/function/cross_map_normal_op.cpp @@ -128,11 +128,11 @@ public: void calc(const Arguments& inputs, const Arguments& outputs, const Arguments& inouts) override { - CHECK_EQ(1, inputs.size()); - CHECK_EQ(2, outputs.size()); - CHECK_EQ(0, inouts.size()); + CHECK_EQ(1, static_cast(inputs.size())); + CHECK_EQ(2, static_cast(outputs.size())); + CHECK_EQ(0, static_cast(inouts.size())); - CHECK_EQ(inputs[0].dims_.size(), 4); + CHECK_EQ(static_cast(inputs[0].dims_.size()), 4); for (size_t i = 0; i < inputs[0].dims_.size(); i++) { CHECK_EQ(inputs[0].dims_[i], outputs[0].dims_[i]); CHECK_EQ(inputs[0].dims_[i], outputs[1].dims_[i]); @@ -180,11 +180,11 @@ public: void calc(const Arguments& inputs, const Arguments& outputs, const Arguments& inouts) override { - CHECK_EQ(4, inputs.size()); - CHECK_EQ(1, outputs.size()); - CHECK_EQ(0, inouts.size()); + CHECK_EQ(4, static_cast(inputs.size())); + CHECK_EQ(1, static_cast(outputs.size())); + CHECK_EQ(0, static_cast(inouts.size())); - CHECK_EQ(inputs[0].dims_.size(), 4); + CHECK_EQ(static_cast(inputs[0].dims_.size()), 4); for (size_t i = 0; i < inputs[0].dims_.size(); i++) { CHECK_EQ(inputs[0].dims_[i], inputs[1].dims_[i]); CHECK_EQ(inputs[0].dims_[i], inputs[2].dims_[i]); diff --git a/paddle/gserver/layers/ConvProjection.cpp b/paddle/gserver/layers/ConvProjection.cpp index e1c4b91ace21522a3bc640dfc4eaa1a42668ed02..0281170bc59855f6f4d2f4212523275a92d202d5 100644 --- a/paddle/gserver/layers/ConvProjection.cpp +++ b/paddle/gserver/layers/ConvProjection.cpp @@ -130,7 +130,8 @@ void ConvProjection::reshapeTensorDesc(int batchSize) { void ConvProjection::reshape(int batchSize) { size_t width = calOutputSize(); CHECK_EQ(width, out_->value->getWidth()); - CHECK_EQ(channels_ * imageH_ * imageW_, in_->value->getWidth()) + CHECK_EQ(static_cast(channels_ * imageH_ * imageW_), + in_->value->getWidth()) << "Wrong input size for convolution" << " channels=" << channels_ << " imageH=" << imageH_ << " imageW=" << imageW_ << " inputSize=" << in_->value->getWidth(); diff --git a/paddle/gserver/tests/LayerGradUtil.cpp b/paddle/gserver/tests/LayerGradUtil.cpp index 57c176810fddf96828c210807673b7d1a3c739c0..ae016e74eaa84f7c43a30c09c8c4577e25360c4e 100644 --- a/paddle/gserver/tests/LayerGradUtil.cpp +++ b/paddle/gserver/tests/LayerGradUtil.cpp @@ -310,7 +310,7 @@ void initDataLayer(TestConfig testConf, testConf.inputDefs[i].labelSeqStartPositions; if (labelSeqStartPositions.size() != 0) { CHECK(!sequenceStartPositions); - CHECK_GE(labelSeqStartPositions.size(), 2); + CHECK_GE(static_cast(labelSeqStartPositions.size()), 2); sequenceStartPositions = ICpuGpuVector::create(labelSeqStartPositions.size(), useGpu); diff --git a/paddle/gserver/tests/test_BatchNorm.cpp b/paddle/gserver/tests/test_BatchNorm.cpp index 7f5fcb670b70aed9f0a04180d344556a0390122f..e000c6994407cd73fec35b2bd1e644308ff25a8c 100644 --- a/paddle/gserver/tests/test_BatchNorm.cpp +++ b/paddle/gserver/tests/test_BatchNorm.cpp @@ -114,8 +114,8 @@ TEST(Layer, batchNorm) { bnLayer->forward(PASS_GC); convLayer->forward(PASS_GC); - CHECK_EQ(convLayer->getOutputValue()->getHeight(), 100); - CHECK_EQ(convLayer->getOutputValue()->getWidth(), 576); + CHECK_EQ(static_cast(convLayer->getOutputValue()->getHeight()), 100); + CHECK_EQ(static_cast(convLayer->getOutputValue()->getWidth()), 576); } int main(int argc, char** argv) { diff --git a/paddle/gserver/tests/test_PyDataProvider2.cpp b/paddle/gserver/tests/test_PyDataProvider2.cpp index 5f8bc5ecd0f77efc6dcda0330f124ca6cab7f277..7e193eb31a03e6a6b8b0b02e89608a0e02b9e248 100644 --- a/paddle/gserver/tests/test_PyDataProvider2.cpp +++ b/paddle/gserver/tests/test_PyDataProvider2.cpp @@ -293,7 +293,7 @@ TEST(PyDataProvider2, can_over_batch_size) { while (true) { int64_t realBatchSize = provider->getNextBatchInternal(batchSize, &batch); if (realBatchSize) { - CHECK_LE(realBatchSize, batchSize); + CHECK_LE(static_cast(realBatchSize), batchSize); } else { break; } diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index 50d2e3eb671028c8169321fcd85fe25735c11a14..b281d5eb02f6d5ee46b3f4155b98c738f05d6640 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -2268,7 +2268,7 @@ void CpuMatrix::contextProjectionBackward(Matrix* inputGrad, int64_t inputDim = inputGrad ? inputGrad->getWidth() : weightGrad ? weightGrad->getWidth() : 0; - CHECK_EQ(getWidth(), inputDim * contextLength); + CHECK_EQ(getWidth(), static_cast(inputDim * contextLength)); const int* starts = sequence.getData(); size_t numSequences = sequence.getSize() - 1;