diff --git a/paddle/memory/detail/system_allocator.cc b/paddle/memory/detail/system_allocator.cc index 33166d9ce23a4a345fc00a65adf63281b13643c3..6b4e46f56a0c9c9836c5b353ec9c554454ab0491 100644 --- a/paddle/memory/detail/system_allocator.cc +++ b/paddle/memory/detail/system_allocator.cc @@ -41,7 +41,16 @@ void* CPUAllocator::Alloc(size_t& index, size_t size) { index = 0; // unlock memory - void* p = malloc(size); + void* p; + +#ifdef PADDLE_USE_MKLDNN + // refer to https://github.com/01org/mkl-dnn/blob/master/include/mkldnn.hpp + // memory alignment + PADDLE_ENFORCE_EQ(posix_memalign(&p, 4096ul, size), 0); +#else + PADDLE_ENFORCE_EQ(posix_memalign(&p, 32ul, size), 0); +#endif + PADDLE_ENFORCE(p, "Fail to allocate CPU memory: size = %d .", size); if (p != nullptr) { if (FLAGS_use_pinned_memory) { diff --git a/paddle/operators/nccl_op_test.cu b/paddle/operators/nccl_op_test.cu index 80c50a28a9e5d560fc693c518b9e62091ddc5724..e5927d56ae7cfbd09e941c993041af46ecd8d70d 100644 --- a/paddle/operators/nccl_op_test.cu +++ b/paddle/operators/nccl_op_test.cu @@ -185,7 +185,7 @@ TEST_F(NCCLTester, ncclAllReduceOp) { recv_tensor.numel() * sizeof(float), static_cast(dev_ctxs[i])->stream()); - for (size_t j = 0; j < f::product(kDims); ++j) { + for (int64_t j = 0; j < f::product(kDims); ++j) { ASSERT_NEAR(ct[j], result, 1e-5); } } @@ -234,7 +234,7 @@ TEST_F(NCCLTester, ncclReduceOp) { recv_tensor.numel() * sizeof(float), static_cast(dev_ctxs[kRoot])->stream()); - for (int j = 0; j < f::product(kDims); ++j) { + for (int64_t j = 0; j < f::product(kDims); ++j) { ASSERT_NEAR(ct[j], result, 1e-5); } } @@ -282,7 +282,7 @@ TEST_F(NCCLTester, ncclBcastOp) { recv_tensor.numel() * sizeof(float), static_cast(dev_ctxs[idx])->stream()); - for (size_t j = 0; j < f::product(kDims); ++j) { + for (int64_t j = 0; j < f::product(kDims); ++j) { ASSERT_NEAR(ct[j], result, 1e-5); } } diff --git a/paddle/operators/reshape_op.cc b/paddle/operators/reshape_op.cc index eda8226480a66ae1a631391e9335db04604039c5..9213cc7a85822e4c78ef72aec2bf86d2edac023a 100644 --- a/paddle/operators/reshape_op.cc +++ b/paddle/operators/reshape_op.cc @@ -36,7 +36,7 @@ class ReshapeOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(shape.size() > 0, "Attr(shape) shouldn't be empty."); auto x_dims = ctx->GetInputDim("X"); // TODO(qiao) change batch_size - for (int i = 1; i < shape.size(); ++i) { + for (size_t i = 1; i < shape.size(); ++i) { PADDLE_ENFORCE(shape[i] > 0, "Each dimension of shape " "must be positiv except the first."); diff --git a/paddle/operators/save_load_op_test.cc b/paddle/operators/save_load_op_test.cc index fe2b15ec09c6d29ad5f78e5c36f534c6a88497e6..a57466a48d4d6016fe2618d19fdca4c4f667124a 100644 --- a/paddle/operators/save_load_op_test.cc +++ b/paddle/operators/save_load_op_test.cc @@ -34,7 +34,7 @@ TEST(SaveLoadOp, CPU) { tensor->set_lod(expect_lod); int* expect = tensor->mutable_data(place); - for (size_t i = 0; i < paddle::framework::product(tensor->dims()); ++i) { + for (int64_t i = 0; i < tensor->numel(); ++i) { expect[i] = static_cast(i); } paddle::framework::AttributeMap attrs; @@ -50,7 +50,7 @@ TEST(SaveLoadOp, CPU) { "load", {}, {{"Out", {"out_var"}}}, attrs); load_op->Run(scope, ctx); int* actual = target->data(); - for (size_t i = 0; i < paddle::framework::product(tensor->dims()); ++i) { + for (int64_t i = 0; i < tensor->numel(); ++i) { EXPECT_EQ(expect[i], actual[i]); } auto& actual_lod = target->lod(); @@ -60,4 +60,4 @@ TEST(SaveLoadOp, CPU) { EXPECT_EQ(expect_lod[i][j], actual_lod[i][j]); } } -} \ No newline at end of file +}