提交 b8872121 编写于 作者: F fengjiayi

fix broadcast_op_test and reduce_op_test

上级 bcf260e1
...@@ -40,7 +40,7 @@ option(WITH_AMD_GPU "Compile PaddlePaddle with AMD GPU" OFF) ...@@ -40,7 +40,7 @@ option(WITH_AMD_GPU "Compile PaddlePaddle with AMD GPU" OFF)
option(WITH_AVX "Compile PaddlePaddle with AVX intrinsics" ${AVX_FOUND}) option(WITH_AVX "Compile PaddlePaddle with AVX intrinsics" ${AVX_FOUND})
option(WITH_MKL "Compile PaddlePaddle with MKL support." ${AVX_FOUND}) option(WITH_MKL "Compile PaddlePaddle with MKL support." ${AVX_FOUND})
option(WITH_DSO "Compile PaddlePaddle with dynamic linked CUDA" ON) option(WITH_DSO "Compile PaddlePaddle with dynamic linked CUDA" ON)
option(WITH_TESTING "Compile PaddlePaddle with unit testing" OFF) option(WITH_TESTING "Compile PaddlePaddle with unit testing" ON)
option(WITH_SWIG_PY "Compile PaddlePaddle with inference api" ON) option(WITH_SWIG_PY "Compile PaddlePaddle with inference api" ON)
option(WITH_STYLE_CHECK "Compile PaddlePaddle with style check" ON) option(WITH_STYLE_CHECK "Compile PaddlePaddle with style check" ON)
option(WITH_PYTHON "Compile PaddlePaddle with python interpreter" ON) option(WITH_PYTHON "Compile PaddlePaddle with python interpreter" ON)
......
...@@ -139,7 +139,7 @@ struct TestBroadcastOpHandle { ...@@ -139,7 +139,7 @@ struct TestBroadcastOpHandle {
PADDLE_ENFORCE_EQ(out_tensor.lod(), lod, "lod is not equal."); PADDLE_ENFORCE_EQ(out_tensor.lod(), lod, "lod is not equal.");
f::Tensor result_tensor; f::Tensor result_tensor;
f::TensorCopy(out_tensor, cpu_place, *(ctxs_[j]), &result_tensor); f::TensorCopy(out_tensor, cpu_place, *(ctxs_[j]), &result_tensor, true);
float* ct = result_tensor.mutable_data<float>(cpu_place); float* ct = result_tensor.mutable_data<float>(cpu_place);
for (int64_t i = 0; i < f::product(kDims); ++i) { for (int64_t i = 0; i < f::product(kDims); ++i) {
...@@ -185,7 +185,7 @@ struct TestBroadcastOpHandle { ...@@ -185,7 +185,7 @@ struct TestBroadcastOpHandle {
} }
f::Tensor result_tensor; f::Tensor result_tensor;
f::TensorCopy(rt, cpu_place, *(ctxs_[j]), &result_tensor); f::TensorCopy(rt, cpu_place, *(ctxs_[j]), &result_tensor, true);
float* ct = result_tensor.data<float>(); float* ct = result_tensor.data<float>();
for (int64_t i = 0; i < f::product(kDims); ++i) { for (int64_t i = 0; i < f::product(kDims); ++i) {
......
...@@ -194,7 +194,8 @@ struct TestReduceOpHandle { ...@@ -194,7 +194,8 @@ struct TestReduceOpHandle {
} }
f::Tensor result_tensor; f::Tensor result_tensor;
f::TensorCopy(rt, cpu_place, *(ctxs_[output_scope_idx]), &result_tensor); f::TensorCopy(rt, cpu_place, *(ctxs_[output_scope_idx]), &result_tensor,
true);
float *ct = result_tensor.data<float>(); float *ct = result_tensor.data<float>();
for (int64_t j = 0; j < f::product(result_tensor.dims()); ++j) { for (int64_t j = 0; j < f::product(result_tensor.dims()); ++j) {
...@@ -239,7 +240,8 @@ struct TestReduceOpHandle { ...@@ -239,7 +240,8 @@ struct TestReduceOpHandle {
auto &rt = out_var->Get<f::LoDTensor>(); auto &rt = out_var->Get<f::LoDTensor>();
f::Tensor result_tensor; f::Tensor result_tensor;
f::TensorCopy(rt, cpu_place, *(ctxs_[output_scope_idx]), &result_tensor); f::TensorCopy(rt, cpu_place, *(ctxs_[output_scope_idx]), &result_tensor,
true);
float *ct = result_tensor.data<float>(); float *ct = result_tensor.data<float>();
for (int64_t j = 0; j < f::product(result_tensor.dims()); ++j) { for (int64_t j = 0; j < f::product(result_tensor.dims()); ++j) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册