未验证 提交 71fa3ca9 编写于 作者: F fengjiayi 提交者: GitHub

Merge pull request #10232 from JiayiFeng/fix_unittests

Fix unittests
...@@ -139,7 +139,7 @@ struct TestBroadcastOpHandle { ...@@ -139,7 +139,7 @@ struct TestBroadcastOpHandle {
PADDLE_ENFORCE_EQ(out_tensor.lod(), lod, "lod is not equal."); PADDLE_ENFORCE_EQ(out_tensor.lod(), lod, "lod is not equal.");
f::Tensor result_tensor; f::Tensor result_tensor;
f::TensorCopy(out_tensor, cpu_place, *(ctxs_[j]), &result_tensor); f::TensorCopySync(out_tensor, cpu_place, &result_tensor);
float* ct = result_tensor.mutable_data<float>(cpu_place); float* ct = result_tensor.mutable_data<float>(cpu_place);
for (int64_t i = 0; i < f::product(kDims); ++i) { for (int64_t i = 0; i < f::product(kDims); ++i) {
...@@ -185,7 +185,7 @@ struct TestBroadcastOpHandle { ...@@ -185,7 +185,7 @@ struct TestBroadcastOpHandle {
} }
f::Tensor result_tensor; f::Tensor result_tensor;
f::TensorCopy(rt, cpu_place, *(ctxs_[j]), &result_tensor); f::TensorCopySync(rt, cpu_place, &result_tensor);
float* ct = result_tensor.data<float>(); float* ct = result_tensor.data<float>();
for (int64_t i = 0; i < f::product(kDims); ++i) { for (int64_t i = 0; i < f::product(kDims); ++i) {
......
...@@ -66,8 +66,7 @@ void FetchOpHandle::RunImpl() { ...@@ -66,8 +66,7 @@ void FetchOpHandle::RunImpl() {
auto &t = var->Get<framework::LoDTensor>(); auto &t = var->Get<framework::LoDTensor>();
if (platform::is_gpu_place(t.place())) { if (platform::is_gpu_place(t.place())) {
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
TensorCopy(t, cpu, *dev_ctxes_[t.place()], &tensors_[i], true); TensorCopySync(t, cpu, &tensors_[i]);
dev_ctxes_.at(t.place())->Wait();
#endif #endif
} else { } else {
tensors_[i].ShareDataWith(t); tensors_[i].ShareDataWith(t);
......
...@@ -194,7 +194,7 @@ struct TestReduceOpHandle { ...@@ -194,7 +194,7 @@ struct TestReduceOpHandle {
} }
f::Tensor result_tensor; f::Tensor result_tensor;
f::TensorCopy(rt, cpu_place, *(ctxs_[output_scope_idx]), &result_tensor); f::TensorCopySync(rt, cpu_place, &result_tensor);
float *ct = result_tensor.data<float>(); float *ct = result_tensor.data<float>();
for (int64_t j = 0; j < f::product(result_tensor.dims()); ++j) { for (int64_t j = 0; j < f::product(result_tensor.dims()); ++j) {
...@@ -239,7 +239,7 @@ struct TestReduceOpHandle { ...@@ -239,7 +239,7 @@ struct TestReduceOpHandle {
auto &rt = out_var->Get<f::LoDTensor>(); auto &rt = out_var->Get<f::LoDTensor>();
f::Tensor result_tensor; f::Tensor result_tensor;
f::TensorCopy(rt, cpu_place, *(ctxs_[output_scope_idx]), &result_tensor); f::TensorCopySync(rt, cpu_place, &result_tensor);
float *ct = result_tensor.data<float>(); float *ct = result_tensor.data<float>();
for (int64_t j = 0; j < f::product(result_tensor.dims()); ++j) { for (int64_t j = 0; j < f::product(result_tensor.dims()); ++j) {
......
...@@ -20,7 +20,7 @@ namespace paddle { ...@@ -20,7 +20,7 @@ namespace paddle {
namespace framework { namespace framework {
void TensorCopy(const Tensor& src, const platform::Place& dst_place, void TensorCopy(const Tensor& src, const platform::Place& dst_place,
const platform::DeviceContext& ctx, Tensor* dst, bool sync) { const platform::DeviceContext& ctx, Tensor* dst) {
VLOG(3) << "TensorCopy " << src.dims() << " from " << src.place() << " to " VLOG(3) << "TensorCopy " << src.dims() << " from " << src.place() << " to "
<< dst_place; << dst_place;
src.check_memory_size(); src.check_memory_size();
...@@ -48,9 +48,7 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place, ...@@ -48,9 +48,7 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place,
auto ctx_gpu_place = boost::get<platform::CUDAPlace>(ctx_place); auto ctx_gpu_place = boost::get<platform::CUDAPlace>(ctx_place);
PADDLE_ENFORCE_EQ(src_gpu_place, ctx_gpu_place); PADDLE_ENFORCE_EQ(src_gpu_place, ctx_gpu_place);
auto stream = auto stream =
sync ? nullptr reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
: reinterpret_cast<const platform::CUDADeviceContext&>(ctx)
.stream();
memory::Copy(dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream); memory::Copy(dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream);
} else if (platform::is_cpu_place(src_place) && } else if (platform::is_cpu_place(src_place) &&
platform::is_gpu_place(dst_place)) { platform::is_gpu_place(dst_place)) {
...@@ -61,9 +59,7 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place, ...@@ -61,9 +59,7 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place,
auto ctx_gpu_place = boost::get<platform::CUDAPlace>(ctx_place); auto ctx_gpu_place = boost::get<platform::CUDAPlace>(ctx_place);
PADDLE_ENFORCE_EQ(dst_gpu_place, ctx_gpu_place); PADDLE_ENFORCE_EQ(dst_gpu_place, ctx_gpu_place);
auto stream = auto stream =
sync ? nullptr reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
: reinterpret_cast<const platform::CUDADeviceContext&>(ctx)
.stream();
memory::Copy(dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size, stream); memory::Copy(dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size, stream);
} else if (platform::is_gpu_place(src_place) && } else if (platform::is_gpu_place(src_place) &&
platform::is_gpu_place(dst_place)) { platform::is_gpu_place(dst_place)) {
...@@ -72,9 +68,7 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place, ...@@ -72,9 +68,7 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place,
auto ctx_place = ctx.GetPlace(); auto ctx_place = ctx.GetPlace();
PADDLE_ENFORCE(platform::is_gpu_place(ctx_place)); PADDLE_ENFORCE(platform::is_gpu_place(ctx_place));
auto stream = auto stream =
sync ? nullptr reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
: reinterpret_cast<const platform::CUDADeviceContext&>(ctx)
.stream();
memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream); memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream);
} }
#endif #endif
...@@ -92,6 +86,41 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place, ...@@ -92,6 +86,41 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place,
TensorCopy(src, dst_place, *dev_ctx, dst); TensorCopy(src, dst_place, *dev_ctx, dst);
} }
void TensorCopySync(const Tensor& src, const platform::Place& dst_place,
Tensor* dst) {
VLOG(3) << "TensorCopySync " << src.dims() << " from " << src.place()
<< " to " << dst_place;
src.check_memory_size();
dst->Resize(src.dims());
dst->set_layout(src.layout());
auto src_place = src.place();
auto src_ptr = src.data<void>();
auto dst_ptr = dst->mutable_data(dst_place, src.type());
auto size = src.numel() * SizeOfType(src.type());
if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) {
memory::Copy(boost::get<platform::CPUPlace>(dst_place), dst_ptr,
boost::get<platform::CPUPlace>(src_place), src_ptr, size);
}
#ifdef PADDLE_WITH_CUDA
else if (platform::is_gpu_place(src_place) && // NOLINT
platform::is_cpu_place(dst_place)) {
auto src_gpu_place = boost::get<platform::CUDAPlace>(src_place);
auto dst_cpu_place = boost::get<platform::CPUPlace>(dst_place);
memory::Copy(dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size, nullptr);
} else if (platform::is_cpu_place(src_place) &&
platform::is_gpu_place(dst_place)) {
auto src_cpu_place = boost::get<platform::CPUPlace>(src_place);
auto dst_gpu_place = boost::get<platform::CUDAPlace>(dst_place);
memory::Copy(dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size, nullptr);
} else if (platform::is_gpu_place(src_place) &&
platform::is_gpu_place(dst_place)) {
auto src_gpu_place = boost::get<platform::CUDAPlace>(src_place);
auto dst_gpu_place = boost::get<platform::CUDAPlace>(dst_place);
memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, nullptr);
}
#endif
}
template <typename Predicate, typename DevCtx> template <typename Predicate, typename DevCtx>
struct AnyDTypeVisitor { struct AnyDTypeVisitor {
Predicate predicate_; Predicate predicate_;
......
...@@ -24,10 +24,11 @@ namespace paddle { ...@@ -24,10 +24,11 @@ namespace paddle {
namespace framework { namespace framework {
void TensorCopy(const Tensor& src, const platform::Place& dst_place, void TensorCopy(const Tensor& src, const platform::Place& dst_place,
const platform::DeviceContext& ctx, Tensor* dst, const platform::DeviceContext& ctx, Tensor* dst);
bool sync = false);
void TensorCopy(const Tensor& src, const platform::Place& dst_place, void TensorCopy(const Tensor& src, const platform::Place& dst_place,
Tensor* dst); Tensor* dst);
void TensorCopySync(const Tensor& src, const platform::Place& dst_place,
Tensor* dst);
template <typename T> template <typename T>
void TensorFromVector(const std::vector<T>& src, void TensorFromVector(const std::vector<T>& src,
......
...@@ -57,10 +57,7 @@ class FetchOp : public framework::OperatorBase { ...@@ -57,10 +57,7 @@ class FetchOp : public framework::OperatorBase {
// FIXME(yuyang18): Should we assume the fetch operator always generate // FIXME(yuyang18): Should we assume the fetch operator always generate
// CPU outputs? // CPU outputs?
auto &dev_ctx = *pool.Get(src_item.place()); TensorCopySync(src_item, platform::CPUPlace(), &dst_item);
TensorCopy(src_item, platform::CPUPlace(), dev_ctx, &dst_item);
dev_ctx.Wait();
dst_item.set_lod(src_item.lod()); dst_item.set_lod(src_item.lod());
VLOG(3) << "Fetch variable " << fetch_var_name << " to " << out_name; VLOG(3) << "Fetch variable " << fetch_var_name << " to " << out_name;
......
...@@ -72,8 +72,8 @@ void testConcat() { ...@@ -72,8 +72,8 @@ void testConcat() {
} }
if (paddle::platform::is_gpu_place(Place())) { if (paddle::platform::is_gpu_place(Place())) {
TensorCopy(input_a_cpu, Place(), *context, &input_a); TensorCopySync(input_a_cpu, Place(), &input_a);
TensorCopy(input_b_cpu, Place(), *context, &input_b); TensorCopySync(input_b_cpu, Place(), &input_b);
} }
std::vector<Tensor> input; std::vector<Tensor> input;
...@@ -89,7 +89,7 @@ void testConcat() { ...@@ -89,7 +89,7 @@ void testConcat() {
int* out_ptr; int* out_ptr;
if (paddle::platform::is_gpu_place(Place())) { if (paddle::platform::is_gpu_place(Place())) {
TensorCopy(out, CPUPlace(), *context, &out_cpu); TensorCopySync(out, CPUPlace(), &out_cpu);
out_ptr = out_cpu.data<int>(); out_ptr = out_cpu.data<int>();
} else { } else {
out_ptr = out.data<int>(); out_ptr = out.data<int>();
...@@ -144,8 +144,8 @@ void testConcat() { ...@@ -144,8 +144,8 @@ void testConcat() {
} }
if (paddle::platform::is_gpu_place(Place())) { if (paddle::platform::is_gpu_place(Place())) {
TensorCopy(input_a_cpu, Place(), *context, &input_a); TensorCopySync(input_a_cpu, Place(), &input_a);
TensorCopy(input_b_cpu, Place(), *context, &input_b); TensorCopySync(input_b_cpu, Place(), &input_b);
} }
input.clear(); input.clear();
...@@ -159,7 +159,7 @@ void testConcat() { ...@@ -159,7 +159,7 @@ void testConcat() {
PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); PADDLE_ENFORCE_EQ(input_b.dims(), dim_b);
if (paddle::platform::is_gpu_place(Place())) { if (paddle::platform::is_gpu_place(Place())) {
TensorCopy(out, CPUPlace(), *context, &out_cpu); TensorCopySync(out, CPUPlace(), &out_cpu);
out_ptr = out_cpu.data<int>(); out_ptr = out_cpu.data<int>();
} else { } else {
out_ptr = out.data<int>(); out_ptr = out.data<int>();
...@@ -216,8 +216,8 @@ void testConcat() { ...@@ -216,8 +216,8 @@ void testConcat() {
} }
if (paddle::platform::is_gpu_place(Place())) { if (paddle::platform::is_gpu_place(Place())) {
TensorCopy(input_a_cpu, Place(), *context, &input_a); TensorCopySync(input_a_cpu, Place(), &input_a);
TensorCopy(input_b_cpu, Place(), *context, &input_b); TensorCopySync(input_b_cpu, Place(), &input_b);
} }
input.clear(); input.clear();
...@@ -231,7 +231,7 @@ void testConcat() { ...@@ -231,7 +231,7 @@ void testConcat() {
PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); PADDLE_ENFORCE_EQ(input_b.dims(), dim_b);
if (paddle::platform::is_gpu_place(Place())) { if (paddle::platform::is_gpu_place(Place())) {
TensorCopy(out, CPUPlace(), *context, &out_cpu); TensorCopySync(out, CPUPlace(), &out_cpu);
out_ptr = out_cpu.data<int>(); out_ptr = out_cpu.data<int>();
} else { } else {
out_ptr = out.data<int>(); out_ptr = out.data<int>();
...@@ -290,8 +290,8 @@ void testConcat() { ...@@ -290,8 +290,8 @@ void testConcat() {
} }
if (paddle::platform::is_gpu_place(Place())) { if (paddle::platform::is_gpu_place(Place())) {
TensorCopy(input_a_cpu, Place(), *context, &input_a); TensorCopySync(input_a_cpu, Place(), &input_a);
TensorCopy(input_b_cpu, Place(), *context, &input_b); TensorCopySync(input_b_cpu, Place(), &input_b);
} }
input.clear(); input.clear();
...@@ -305,7 +305,7 @@ void testConcat() { ...@@ -305,7 +305,7 @@ void testConcat() {
PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); PADDLE_ENFORCE_EQ(input_b.dims(), dim_b);
if (paddle::platform::is_gpu_place(Place())) { if (paddle::platform::is_gpu_place(Place())) {
TensorCopy(out, CPUPlace(), *context, &out_cpu); TensorCopySync(out, CPUPlace(), &out_cpu);
out_ptr = out_cpu.data<int>(); out_ptr = out_cpu.data<int>();
} else { } else {
out_ptr = out.data<int>(); out_ptr = out.data<int>();
......
...@@ -63,7 +63,7 @@ void testIm2col() { ...@@ -63,7 +63,7 @@ void testIm2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
input = input_tmp; input = input_tmp;
} else { } else {
TensorCopy(input_tmp, *place, *context, &input); TensorCopySync(input_tmp, *place, &input);
} }
output_cfo.mutable_data<float>( output_cfo.mutable_data<float>(
{1, filter_size, filter_size, output_height, output_width}, *place); {1, filter_size, filter_size, output_height, output_width}, *place);
...@@ -88,7 +88,7 @@ void testIm2col() { ...@@ -88,7 +88,7 @@ void testIm2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
out_cfo_ptr = output_cfo.data<float>(); out_cfo_ptr = output_cfo.data<float>();
} else { } else {
TensorCopy(output_cfo, paddle::platform::CPUPlace(), *context, &output_tmp); TensorCopySync(output_cfo, paddle::platform::CPUPlace(), &output_tmp);
out_cfo_ptr = output_tmp.data<float>(); out_cfo_ptr = output_tmp.data<float>();
} }
for (int i = 0; i < 6; ++i) { for (int i = 0; i < 6; ++i) {
...@@ -99,7 +99,7 @@ void testIm2col() { ...@@ -99,7 +99,7 @@ void testIm2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
out_ocf_ptr = output_ocf.data<float>(); out_ocf_ptr = output_ocf.data<float>();
} else { } else {
TensorCopy(output_ocf, paddle::platform::CPUPlace(), *context, &output_tmp); TensorCopySync(output_ocf, paddle::platform::CPUPlace(), &output_tmp);
out_ocf_ptr = output_tmp.data<float>(); out_ocf_ptr = output_tmp.data<float>();
} }
...@@ -120,7 +120,7 @@ void testIm2col() { ...@@ -120,7 +120,7 @@ void testIm2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
input = input_tmp; input = input_tmp;
} else { } else {
TensorCopy(input_tmp, *place, *context, &input); TensorCopySync(input_tmp, *place, &input);
} }
col2im(*context, output_cfo, dilation, stride, padding, &input); col2im(*context, output_cfo, dilation, stride, padding, &input);
...@@ -129,7 +129,7 @@ void testIm2col() { ...@@ -129,7 +129,7 @@ void testIm2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
in_ptr = input.data<float>(); in_ptr = input.data<float>();
} else { } else {
TensorCopy(input, paddle::platform::CPUPlace(), *context, &input_tmp); TensorCopySync(input, paddle::platform::CPUPlace(), &input_tmp);
in_ptr = input_tmp.data<float>(); in_ptr = input_tmp.data<float>();
} }
for (int i = 0; i < 6; ++i) { for (int i = 0; i < 6; ++i) {
...@@ -141,7 +141,7 @@ void testIm2col() { ...@@ -141,7 +141,7 @@ void testIm2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
input = input_tmp; input = input_tmp;
} else { } else {
TensorCopy(input_tmp, *place, *context, &input); TensorCopySync(input_tmp, *place, &input);
} }
col2im_ocf(*context, output_ocf, dilation, stride, padding, &input); col2im_ocf(*context, output_ocf, dilation, stride, padding, &input);
...@@ -149,7 +149,7 @@ void testIm2col() { ...@@ -149,7 +149,7 @@ void testIm2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
in_ptr = input.data<float>(); in_ptr = input.data<float>();
} else { } else {
TensorCopy(input, paddle::platform::CPUPlace(), *context, &input_tmp); TensorCopySync(input, paddle::platform::CPUPlace(), &input_tmp);
in_ptr = input_tmp.data<float>(); in_ptr = input_tmp.data<float>();
} }
for (int i = 0; i < 6; ++i) { for (int i = 0; i < 6; ++i) {
......
...@@ -40,15 +40,15 @@ TEST(math_function, notrans_mul_trans_fp32) { ...@@ -40,15 +40,15 @@ TEST(math_function, notrans_mul_trans_fp32) {
float arr[6] = {0, 1, 2, 3, 4, 5}; float arr[6] = {0, 1, 2, 3, 4, 5};
memcpy(input1_ptr, arr, 6 * sizeof(float)); memcpy(input1_ptr, arr, 6 * sizeof(float));
TensorCopy(input1, gpu_place, context, &input1_gpu); TensorCopySync(input1, gpu_place, &input1_gpu);
TensorCopy(input1, gpu_place, context, &input2_gpu); TensorCopySync(input1, gpu_place, &input2_gpu);
out_gpu.mutable_data<float>({2, 2}, gpu_place); out_gpu.mutable_data<float>({2, 2}, gpu_place);
paddle::operators::math::matmul<CUDADeviceContext, float>( paddle::operators::math::matmul<CUDADeviceContext, float>(
context, input1_gpu, false, input2_gpu, true, 1, &out_gpu, 0); context, input1_gpu, false, input2_gpu, true, 1, &out_gpu, 0);
TensorCopy(out_gpu, cpu_place, context, &out); TensorCopySync(out_gpu, cpu_place, &out);
float* out_ptr = out.data<float>(); float* out_ptr = out.data<float>();
context.Wait(); context.Wait();
...@@ -80,8 +80,8 @@ TEST(math_function, notrans_mul_trans_fp16) { ...@@ -80,8 +80,8 @@ TEST(math_function, notrans_mul_trans_fp16) {
float16* input1_ptr = input1.mutable_data<float16>({2, 3}, cpu_place); float16* input1_ptr = input1.mutable_data<float16>({2, 3}, cpu_place);
fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5}); fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5});
TensorCopy(input1, gpu_place, context, &input1_gpu); TensorCopySync(input1, gpu_place, &input1_gpu);
TensorCopy(input1, gpu_place, context, &input2_gpu); TensorCopySync(input1, gpu_place, &input2_gpu);
out_gpu.mutable_data<float16>({2, 2}, gpu_place); out_gpu.mutable_data<float16>({2, 2}, gpu_place);
...@@ -89,7 +89,7 @@ TEST(math_function, notrans_mul_trans_fp16) { ...@@ -89,7 +89,7 @@ TEST(math_function, notrans_mul_trans_fp16) {
context, input1_gpu, false, input2_gpu, true, float16(1), &out_gpu, context, input1_gpu, false, input2_gpu, true, float16(1), &out_gpu,
float16(0)); float16(0));
TensorCopy(out_gpu, cpu_place, context, &out); TensorCopySync(out_gpu, cpu_place, &out);
float16* out_ptr = out.data<float16>(); float16* out_ptr = out.data<float16>();
context.Wait(); context.Wait();
...@@ -117,15 +117,15 @@ TEST(math_function, trans_mul_notrans_fp32) { ...@@ -117,15 +117,15 @@ TEST(math_function, trans_mul_notrans_fp32) {
float arr[6] = {0, 1, 2, 3, 4, 5}; float arr[6] = {0, 1, 2, 3, 4, 5};
memcpy(input1_ptr, arr, 6 * sizeof(float)); memcpy(input1_ptr, arr, 6 * sizeof(float));
TensorCopy(input1, gpu_place, context, &input1_gpu); TensorCopySync(input1, gpu_place, &input1_gpu);
TensorCopy(input1, gpu_place, context, &input2_gpu); TensorCopySync(input1, gpu_place, &input2_gpu);
out_gpu.mutable_data<float>({3, 3}, gpu_place); out_gpu.mutable_data<float>({3, 3}, gpu_place);
paddle::operators::math::matmul<paddle::platform::CUDADeviceContext, float>( paddle::operators::math::matmul<paddle::platform::CUDADeviceContext, float>(
context, input1_gpu, true, input2_gpu, false, 1, &out_gpu, 0); context, input1_gpu, true, input2_gpu, false, 1, &out_gpu, 0);
TensorCopy(out_gpu, cpu_place, context, &out); TensorCopySync(out_gpu, cpu_place, &out);
float* out_ptr = out.data<float>(); float* out_ptr = out.data<float>();
context.Wait(); context.Wait();
...@@ -162,8 +162,8 @@ TEST(math_function, trans_mul_notrans_fp16) { ...@@ -162,8 +162,8 @@ TEST(math_function, trans_mul_notrans_fp16) {
float16* input1_ptr = input1.mutable_data<float16>({2, 3}, cpu_place); float16* input1_ptr = input1.mutable_data<float16>({2, 3}, cpu_place);
fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5}); fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5});
TensorCopy(input1, gpu_place, context, &input1_gpu); TensorCopySync(input1, gpu_place, &input1_gpu);
TensorCopy(input1, gpu_place, context, &input2_gpu); TensorCopySync(input1, gpu_place, &input2_gpu);
out_gpu.mutable_data<float16>({3, 3}, gpu_place); out_gpu.mutable_data<float16>({3, 3}, gpu_place);
...@@ -171,7 +171,7 @@ TEST(math_function, trans_mul_notrans_fp16) { ...@@ -171,7 +171,7 @@ TEST(math_function, trans_mul_notrans_fp16) {
context, input1_gpu, true, input2_gpu, false, float16(1), &out_gpu, context, input1_gpu, true, input2_gpu, false, float16(1), &out_gpu,
float16(0)); float16(0));
TensorCopy(out_gpu, cpu_place, context, &out); TensorCopySync(out_gpu, cpu_place, &out);
float16* out_ptr = out.data<float16>(); float16* out_ptr = out.data<float16>();
context.Wait(); context.Wait();
...@@ -214,9 +214,9 @@ TEST(math_function, gemm_notrans_cublas_fp32) { ...@@ -214,9 +214,9 @@ TEST(math_function, gemm_notrans_cublas_fp32) {
float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7}; float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7};
memcpy(input3_ptr, arr3, 8 * sizeof(float)); memcpy(input3_ptr, arr3, 8 * sizeof(float));
TensorCopy(input1, gpu_place, context, &input1_gpu); TensorCopySync(input1, gpu_place, &input1_gpu);
TensorCopy(input2, gpu_place, context, &input2_gpu); TensorCopySync(input2, gpu_place, &input2_gpu);
TensorCopy(input3, gpu_place, context, &input3_gpu); TensorCopySync(input3, gpu_place, &input3_gpu);
float* a = input1_gpu.data<float>(); float* a = input1_gpu.data<float>();
float* b = input2_gpu.data<float>(); float* b = input2_gpu.data<float>();
float* c = input3_gpu.mutable_data<float>(gpu_place); float* c = input3_gpu.mutable_data<float>(gpu_place);
...@@ -224,7 +224,7 @@ TEST(math_function, gemm_notrans_cublas_fp32) { ...@@ -224,7 +224,7 @@ TEST(math_function, gemm_notrans_cublas_fp32) {
paddle::operators::math::gemm<paddle::platform::CUDADeviceContext, float>( paddle::operators::math::gemm<paddle::platform::CUDADeviceContext, float>(
context, false, false, m, n, k, 1, a, 3, b + 1, 4, 1, c + 1, 4); context, false, false, m, n, k, 1, a, 3, b + 1, 4, 1, c + 1, 4);
TensorCopy(input3_gpu, cpu_place, context, &input3); TensorCopySync(input3_gpu, cpu_place, &input3);
// numpy code: // numpy code:
// a = np.arange(6).reshape(2, 3) // a = np.arange(6).reshape(2, 3)
...@@ -274,9 +274,9 @@ TEST(math_function, gemm_notrans_cublas_fp16) { ...@@ -274,9 +274,9 @@ TEST(math_function, gemm_notrans_cublas_fp16) {
float16* input3_ptr = input3.mutable_data<float16>({2, 4}, cpu_place); float16* input3_ptr = input3.mutable_data<float16>({2, 4}, cpu_place);
fill_fp16_data(input3_ptr, input3.numel(), {0, 1, 2, 3, 4, 5, 6, 7}); fill_fp16_data(input3_ptr, input3.numel(), {0, 1, 2, 3, 4, 5, 6, 7});
TensorCopy(input1, gpu_place, context, &input1_gpu); TensorCopySync(input1, gpu_place, &input1_gpu);
TensorCopy(input2, gpu_place, context, &input2_gpu); TensorCopySync(input2, gpu_place, &input2_gpu);
TensorCopy(input3, gpu_place, context, &input3_gpu); TensorCopySync(input3, gpu_place, &input3_gpu);
float16* a = input1_gpu.data<float16>(); float16* a = input1_gpu.data<float16>();
float16* b = input2_gpu.data<float16>(); float16* b = input2_gpu.data<float16>();
float16* c = input3_gpu.mutable_data<float16>(gpu_place); float16* c = input3_gpu.mutable_data<float16>(gpu_place);
...@@ -285,7 +285,7 @@ TEST(math_function, gemm_notrans_cublas_fp16) { ...@@ -285,7 +285,7 @@ TEST(math_function, gemm_notrans_cublas_fp16) {
context, false, false, m, n, k, float16(1), a, 3, b + 1, 4, float16(1), context, false, false, m, n, k, float16(1), a, 3, b + 1, 4, float16(1),
c + 1, 4); c + 1, 4);
TensorCopy(input3_gpu, cpu_place, context, &input3); TensorCopySync(input3_gpu, cpu_place, &input3);
// numpy code: // numpy code:
// a = np.arange(6).reshape(2, 3) // a = np.arange(6).reshape(2, 3)
...@@ -332,9 +332,9 @@ TEST(math_function, gemm_trans_cublas_fp32) { ...@@ -332,9 +332,9 @@ TEST(math_function, gemm_trans_cublas_fp32) {
float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7}; float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7};
memcpy(input3_ptr, arr3, 8 * sizeof(float)); memcpy(input3_ptr, arr3, 8 * sizeof(float));
TensorCopy(input1, gpu_place, context, &input1_gpu); TensorCopySync(input1, gpu_place, &input1_gpu);
TensorCopy(input2, gpu_place, context, &input2_gpu); TensorCopySync(input2, gpu_place, &input2_gpu);
TensorCopy(input3, gpu_place, context, &input3_gpu); TensorCopySync(input3, gpu_place, &input3_gpu);
float* a = input1_gpu.data<float>(); float* a = input1_gpu.data<float>();
float* b = input2_gpu.data<float>(); float* b = input2_gpu.data<float>();
float* c = input3_gpu.mutable_data<float>(gpu_place); float* c = input3_gpu.mutable_data<float>(gpu_place);
...@@ -342,7 +342,7 @@ TEST(math_function, gemm_trans_cublas_fp32) { ...@@ -342,7 +342,7 @@ TEST(math_function, gemm_trans_cublas_fp32) {
paddle::operators::math::gemm<paddle::platform::CUDADeviceContext, float>( paddle::operators::math::gemm<paddle::platform::CUDADeviceContext, float>(
context, false, true, m, n, k, 1, a, 3, b + 3, 3, 1, c + 1, 4); context, false, true, m, n, k, 1, a, 3, b + 3, 3, 1, c + 1, 4);
TensorCopy(input3_gpu, cpu_place, context, &input3); TensorCopySync(input3_gpu, cpu_place, &input3);
context.Wait(); context.Wait();
EXPECT_EQ(input3_ptr[0], 0); EXPECT_EQ(input3_ptr[0], 0);
...@@ -386,9 +386,9 @@ TEST(math_function, gemm_trans_cublas_fp16) { ...@@ -386,9 +386,9 @@ TEST(math_function, gemm_trans_cublas_fp16) {
float16* input3_ptr = input3.mutable_data<float16>({2, 4}, cpu_place); float16* input3_ptr = input3.mutable_data<float16>({2, 4}, cpu_place);
fill_fp16_data(input3_ptr, input3.numel(), {0, 1, 2, 3, 4, 5, 6, 7}); fill_fp16_data(input3_ptr, input3.numel(), {0, 1, 2, 3, 4, 5, 6, 7});
TensorCopy(input1, gpu_place, context, &input1_gpu); TensorCopySync(input1, gpu_place, &input1_gpu);
TensorCopy(input2, gpu_place, context, &input2_gpu); TensorCopySync(input2, gpu_place, &input2_gpu);
TensorCopy(input3, gpu_place, context, &input3_gpu); TensorCopySync(input3, gpu_place, &input3_gpu);
float16* a = input1_gpu.data<float16>(); float16* a = input1_gpu.data<float16>();
float16* b = input2_gpu.data<float16>(); float16* b = input2_gpu.data<float16>();
float16* c = input3_gpu.mutable_data<float16>(gpu_place); float16* c = input3_gpu.mutable_data<float16>(gpu_place);
...@@ -397,7 +397,7 @@ TEST(math_function, gemm_trans_cublas_fp16) { ...@@ -397,7 +397,7 @@ TEST(math_function, gemm_trans_cublas_fp16) {
context, false, true, m, n, k, float16(1), a, 3, b + 3, 3, float16(1), context, false, true, m, n, k, float16(1), a, 3, b + 3, 3, float16(1),
c + 1, 4); c + 1, 4);
TensorCopy(input3_gpu, cpu_place, context, &input3); TensorCopySync(input3_gpu, cpu_place, &input3);
context.Wait(); context.Wait();
EXPECT_EQ(static_cast<float>(input3_ptr[0]), 0); EXPECT_EQ(static_cast<float>(input3_ptr[0]), 0);
...@@ -441,14 +441,14 @@ void GemvTest(int m, int n, bool trans) { ...@@ -441,14 +441,14 @@ void GemvTest(int m, int n, bool trans) {
data_b[i] = static_cast<T>(i); data_b[i] = static_cast<T>(i);
} }
TensorCopy(mat_a, gpu_place, context, &g_mat_a); TensorCopySync(mat_a, gpu_place, &g_mat_a);
TensorCopy(vec_b, gpu_place, context, &g_vec_b); TensorCopySync(vec_b, gpu_place, &g_vec_b);
paddle::operators::math::gemv<CUDADeviceContext, T>( paddle::operators::math::gemv<CUDADeviceContext, T>(
context, trans, static_cast<int>(m), static_cast<int>(n), 1., g_data_a, context, trans, static_cast<int>(m), static_cast<int>(n), 1., g_data_a,
g_data_b, 0., g_data_c); g_data_b, 0., g_data_c);
TensorCopy(g_vec_c, cpu_place, context, &vec_c); TensorCopySync(g_vec_c, cpu_place, &vec_c);
if (!trans) { if (!trans) {
for (int i = 0; i < m; ++i) { for (int i = 0; i < m; ++i) {
......
...@@ -72,7 +72,7 @@ void testVol2col() { ...@@ -72,7 +72,7 @@ void testVol2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
input = input_tmp; input = input_tmp;
} else { } else {
paddle::framework::TensorCopy(input_tmp, *place, *context, &input); paddle::framework::TensorCopySync(input_tmp, *place, &input);
} }
output.mutable_data<float>({1, filter_size, filter_size, filter_size, output.mutable_data<float>({1, filter_size, filter_size, filter_size,
output_depth, output_height, output_width}, output_depth, output_height, output_width},
...@@ -86,7 +86,7 @@ void testVol2col() { ...@@ -86,7 +86,7 @@ void testVol2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
out_cfo_ptr = output.data<float>(); out_cfo_ptr = output.data<float>();
} else { } else {
TensorCopy(output, paddle::platform::CPUPlace(), *context, &output_tmp); TensorCopySync(output, paddle::platform::CPUPlace(), &output_tmp);
out_cfo_ptr = output_tmp.data<float>(); out_cfo_ptr = output_tmp.data<float>();
} }
...@@ -100,7 +100,7 @@ void testVol2col() { ...@@ -100,7 +100,7 @@ void testVol2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
input = input_tmp; input = input_tmp;
} else { } else {
TensorCopy(input_tmp, *place, *context, &input); TensorCopySync(input_tmp, *place, &input);
} }
paddle::operators::math::Col2VolFunctor<DeviceContext, float> col2vol; paddle::operators::math::Col2VolFunctor<DeviceContext, float> col2vol;
...@@ -110,7 +110,7 @@ void testVol2col() { ...@@ -110,7 +110,7 @@ void testVol2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
in_ptr = input.data<float>(); in_ptr = input.data<float>();
} else { } else {
TensorCopy(input, paddle::platform::CPUPlace(), *context, &input_tmp); TensorCopySync(input, paddle::platform::CPUPlace(), &input_tmp);
in_ptr = input_tmp.data<float>(); in_ptr = input_tmp.data<float>();
} }
......
...@@ -228,10 +228,8 @@ TEST_F(NCCLTester, ncclReduceOp) { ...@@ -228,10 +228,8 @@ TEST_F(NCCLTester, ncclReduceOp) {
result_tensor->Resize(kDims); result_tensor->Resize(kDims);
auto *ct = result_tensor->mutable_data<float>(cpu_place); auto *ct = result_tensor->mutable_data<float>(cpu_place);
paddle::memory::Copy( paddle::memory::Copy(cpu_place, ct, p::CUDAPlace(gpu_list_[kRoot]), rt,
cpu_place, ct, p::CUDAPlace(gpu_list_[kRoot]), rt, recv_tensor.numel() * sizeof(float), nullptr);
recv_tensor.numel() * sizeof(float),
static_cast<p::CUDADeviceContext *>(dev_ctxs_[kRoot])->stream());
for (int64_t j = 0; j < f::product(kDims); ++j) { for (int64_t j = 0; j < f::product(kDims); ++j) {
ASSERT_NEAR(ct[j], expected_result, 1e-5); ASSERT_NEAR(ct[j], expected_result, 1e-5);
......
...@@ -168,11 +168,10 @@ void DoubleBufferReader::PrefetchThreadFunc() { ...@@ -168,11 +168,10 @@ void DoubleBufferReader::PrefetchThreadFunc() {
} }
if (platform::is_gpu_place(place_)) { if (platform::is_gpu_place(place_)) {
auto& gpu_batch = gpu_tensor_cache_[cached_tensor_id]; auto& gpu_batch = gpu_tensor_cache_[cached_tensor_id];
auto* gpu_ctx = ctxs_[cached_tensor_id].get();
gpu_batch.resize(cpu_batch.size()); gpu_batch.resize(cpu_batch.size());
for (size_t i = 0; i < cpu_batch.size(); ++i) { for (size_t i = 0; i < cpu_batch.size(); ++i) {
framework::TensorCopy(cpu_batch[i], place_, *gpu_ctx, &gpu_batch[i], // TODO(fengjiayi): Use asynchronous TensorCopy instead
true); framework::TensorCopySync(cpu_batch[i], place_, &gpu_batch[i]);
gpu_batch[i].set_lod(cpu_batch[i].lod()); gpu_batch[i].set_lod(cpu_batch[i].lod());
} }
} }
......
...@@ -130,10 +130,8 @@ class ReshapeKernel : public framework::OpKernel<T> { ...@@ -130,10 +130,8 @@ class ReshapeKernel : public framework::OpKernel<T> {
auto *shape_data = shape_tensor->data<int>(); auto *shape_data = shape_tensor->data<int>();
framework::Tensor cpu_shape_tensor; framework::Tensor cpu_shape_tensor;
if (platform::is_gpu_place(ctx.GetPlace())) { if (platform::is_gpu_place(ctx.GetPlace())) {
TensorCopy(*shape_tensor, platform::CPUPlace(), ctx.device_context(), TensorCopySync(*shape_tensor, platform::CPUPlace(), &cpu_shape_tensor);
&cpu_shape_tensor);
shape_data = cpu_shape_tensor.data<int>(); shape_data = cpu_shape_tensor.data<int>();
ctx.device_context().Wait();
} }
auto shape = auto shape =
std::vector<int>(shape_data, shape_data + shape_tensor->numel()); std::vector<int>(shape_data, shape_data + shape_tensor->numel());
...@@ -152,9 +150,7 @@ class ReshapeKernel : public framework::OpKernel<T> { ...@@ -152,9 +150,7 @@ class ReshapeKernel : public framework::OpKernel<T> {
out->Resize(out_dims); out->Resize(out_dims);
if (!inplace) { if (!inplace) {
out->mutable_data<T>(ctx.GetPlace()); out->mutable_data<T>(ctx.GetPlace());
framework::TensorCopy(*in, ctx.GetPlace(), ctx.device_context(), out); framework::TensorCopySync(*in, ctx.GetPlace(), out);
ctx.device_context().Wait();
// TensorCopy will resize to in_dims.
out->Resize(out_dims); out->Resize(out_dims);
} else { } else {
out->ShareDataWith(*in); out->ShareDataWith(*in);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册