提交 de3c1527 编写于 作者: L liaogang

Fix glog check type unmatch

上级 46cadaea
...@@ -128,11 +128,11 @@ public: ...@@ -128,11 +128,11 @@ public:
void calc(const Arguments& inputs, void calc(const Arguments& inputs,
const Arguments& outputs, const Arguments& outputs,
const Arguments& inouts) override { const Arguments& inouts) override {
CHECK_EQ(1, inputs.size()); CHECK_EQ(1, static_cast<int>(inputs.size()));
CHECK_EQ(2, outputs.size()); CHECK_EQ(2, static_cast<int>(outputs.size()));
CHECK_EQ(0, inouts.size()); CHECK_EQ(0, static_cast<int>(inouts.size()));
CHECK_EQ(inputs[0].dims_.size(), 4); CHECK_EQ(static_cast<int>(inputs[0].dims_.size()), 4);
for (size_t i = 0; i < inputs[0].dims_.size(); i++) { for (size_t i = 0; i < inputs[0].dims_.size(); i++) {
CHECK_EQ(inputs[0].dims_[i], outputs[0].dims_[i]); CHECK_EQ(inputs[0].dims_[i], outputs[0].dims_[i]);
CHECK_EQ(inputs[0].dims_[i], outputs[1].dims_[i]); CHECK_EQ(inputs[0].dims_[i], outputs[1].dims_[i]);
...@@ -180,11 +180,11 @@ public: ...@@ -180,11 +180,11 @@ public:
void calc(const Arguments& inputs, void calc(const Arguments& inputs,
const Arguments& outputs, const Arguments& outputs,
const Arguments& inouts) override { const Arguments& inouts) override {
CHECK_EQ(4, inputs.size()); CHECK_EQ(4, static_cast<int>(inputs.size()));
CHECK_EQ(1, outputs.size()); CHECK_EQ(1, static_cast<int>(outputs.size()));
CHECK_EQ(0, inouts.size()); CHECK_EQ(0, static_cast<int>(inouts.size()));
CHECK_EQ(inputs[0].dims_.size(), 4); CHECK_EQ(static_cast<int>(inputs[0].dims_.size()), 4);
for (size_t i = 0; i < inputs[0].dims_.size(); i++) { for (size_t i = 0; i < inputs[0].dims_.size(); i++) {
CHECK_EQ(inputs[0].dims_[i], inputs[1].dims_[i]); CHECK_EQ(inputs[0].dims_[i], inputs[1].dims_[i]);
CHECK_EQ(inputs[0].dims_[i], inputs[2].dims_[i]); CHECK_EQ(inputs[0].dims_[i], inputs[2].dims_[i]);
......
...@@ -130,7 +130,8 @@ void ConvProjection::reshapeTensorDesc(int batchSize) { ...@@ -130,7 +130,8 @@ void ConvProjection::reshapeTensorDesc(int batchSize) {
void ConvProjection::reshape(int batchSize) { void ConvProjection::reshape(int batchSize) {
size_t width = calOutputSize(); size_t width = calOutputSize();
CHECK_EQ(width, out_->value->getWidth()); CHECK_EQ(width, out_->value->getWidth());
CHECK_EQ(channels_ * imageH_ * imageW_, in_->value->getWidth()) CHECK_EQ(static_cast<size_t>(channels_ * imageH_ * imageW_),
in_->value->getWidth())
<< "Wrong input size for convolution" << "Wrong input size for convolution"
<< " channels=" << channels_ << " imageH=" << imageH_ << " channels=" << channels_ << " imageH=" << imageH_
<< " imageW=" << imageW_ << " inputSize=" << in_->value->getWidth(); << " imageW=" << imageW_ << " inputSize=" << in_->value->getWidth();
......
...@@ -310,7 +310,7 @@ void initDataLayer(TestConfig testConf, ...@@ -310,7 +310,7 @@ void initDataLayer(TestConfig testConf,
testConf.inputDefs[i].labelSeqStartPositions; testConf.inputDefs[i].labelSeqStartPositions;
if (labelSeqStartPositions.size() != 0) { if (labelSeqStartPositions.size() != 0) {
CHECK(!sequenceStartPositions); CHECK(!sequenceStartPositions);
CHECK_GE(labelSeqStartPositions.size(), 2); CHECK_GE(static_cast<int>(labelSeqStartPositions.size()), 2);
sequenceStartPositions = sequenceStartPositions =
ICpuGpuVector::create(labelSeqStartPositions.size(), useGpu); ICpuGpuVector::create(labelSeqStartPositions.size(), useGpu);
......
...@@ -114,8 +114,8 @@ TEST(Layer, batchNorm) { ...@@ -114,8 +114,8 @@ TEST(Layer, batchNorm) {
bnLayer->forward(PASS_GC); bnLayer->forward(PASS_GC);
convLayer->forward(PASS_GC); convLayer->forward(PASS_GC);
CHECK_EQ(convLayer->getOutputValue()->getHeight(), 100); CHECK_EQ(static_cast<int>(convLayer->getOutputValue()->getHeight()), 100);
CHECK_EQ(convLayer->getOutputValue()->getWidth(), 576); CHECK_EQ(static_cast<int>(convLayer->getOutputValue()->getWidth()), 576);
} }
int main(int argc, char** argv) { int main(int argc, char** argv) {
......
...@@ -293,7 +293,7 @@ TEST(PyDataProvider2, can_over_batch_size) { ...@@ -293,7 +293,7 @@ TEST(PyDataProvider2, can_over_batch_size) {
while (true) { while (true) {
int64_t realBatchSize = provider->getNextBatchInternal(batchSize, &batch); int64_t realBatchSize = provider->getNextBatchInternal(batchSize, &batch);
if (realBatchSize) { if (realBatchSize) {
CHECK_LE(realBatchSize, batchSize); CHECK_LE(static_cast<size_t>(realBatchSize), batchSize);
} else { } else {
break; break;
} }
......
...@@ -2268,7 +2268,7 @@ void CpuMatrix::contextProjectionBackward(Matrix* inputGrad, ...@@ -2268,7 +2268,7 @@ void CpuMatrix::contextProjectionBackward(Matrix* inputGrad,
int64_t inputDim = inputGrad ? inputGrad->getWidth() int64_t inputDim = inputGrad ? inputGrad->getWidth()
: weightGrad ? weightGrad->getWidth() : 0; : weightGrad ? weightGrad->getWidth() : 0;
CHECK_EQ(getWidth(), inputDim * contextLength); CHECK_EQ(getWidth(), static_cast<size_t>(inputDim * contextLength));
const int* starts = sequence.getData(); const int* starts = sequence.getData();
size_t numSequences = sequence.getSize() - 1; size_t numSequences = sequence.getSize() - 1;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册