reduce_op_handle_test.cc 8.4 KB
Newer Older
C
chengduoZH 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/framework/details/reduce_op_handle.h"
#include "gtest/gtest.h"

#include "paddle/fluid/platform/device_context.h"

namespace paddle {
namespace framework {
namespace details {
namespace f = paddle::framework;
namespace p = paddle::platform;

// test data amount
const f::DDim kDims = {20, 20};

struct TestReduceOpHandle {
  bool use_gpu_;
  Scope g_scope_;
  std::vector<Scope *> local_scopes_;
  std::unique_ptr<OpHandleBase> op_handle_;
  std::vector<std::unique_ptr<VarHandleBase>> vars_;
  std::vector<p::Place> gpu_list_;
  std::vector<std::unique_ptr<p::DeviceContext>> ctxs_;

#ifdef PADDLE_WITH_CUDA
  std::unique_ptr<platform::NCCLContextMap> nccl_ctxs_;
#endif

  void WaitAll() {
    for (size_t j = 0; j < ctxs_.size(); ++j) {
      ctxs_[j]->Wait();
    }
#ifdef PADDLE_WITH_CUDA
C
chengduoZH 已提交
47 48 49
    if (nccl_ctxs_) {
      nccl_ctxs_->WaitAll();
    }
C
chengduoZH 已提交
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
#endif
  }

  void InitCtxOnGpu(bool use_gpu) {
    use_gpu_ = use_gpu;
    if (use_gpu) {
#ifdef PADDLE_WITH_CUDA
      int count = p::GetCUDADeviceCount();
      if (count <= 1) {
        LOG(WARNING) << "Cannot test multi-gpu Broadcast, because the CUDA "
                        "device count is "
                     << count;
        exit(0);
      }
      for (int i = 0; i < count; ++i) {
        auto p = p::CUDAPlace(i);
        gpu_list_.push_back(p);
        ctxs_.emplace_back(new p::CUDADeviceContext(p));
      }
C
chengduoZH 已提交
69
      nccl_ctxs_.reset(new platform::NCCLContextMap(gpu_list_));
C
chengduoZH 已提交
70 71 72 73 74 75 76 77 78 79 80
#else
      PADDLE_THROW("CUDA is not support.");
#endif
    } else {
      int count = 8;
      for (int i = 0; i < count; ++i) {
        auto p = p::CPUPlace();
        gpu_list_.push_back(p);
        ctxs_.emplace_back(new p::CPUDeviceContext(p));
      }
#ifdef PADDLE_WITH_CUDA
C
chengduoZH 已提交
81
      nccl_ctxs_.reset(nullptr);
C
chengduoZH 已提交
82
#endif
C
chengduoZH 已提交
83
    }
C
chengduoZH 已提交
84 85 86 87 88 89 90 91 92
  }

  void InitReduceOp(size_t input_scope_idx) {
    for (size_t j = 0; j < gpu_list_.size(); ++j) {
      local_scopes_.push_back(&(g_scope_.NewScope()));
      local_scopes_[j]->Var("out");
    }
    local_scopes_[input_scope_idx]->Var("input");

C
chengduoZH 已提交
93 94 95 96 97 98 99 100
    if (use_gpu_) {
#ifdef PADDLE_WITH_CUDA
      op_handle_.reset(
          new ReduceOpHandle(local_scopes_, gpu_list_, nccl_ctxs_.get()));
#else
      PADDLE_THROW("CUDA is not support.");
#endif
    } else {
C
chengduoZH 已提交
101
#ifdef PADDLE_WITH_CUDA
C
chengduoZH 已提交
102 103
      op_handle_.reset(
          new ReduceOpHandle(local_scopes_, gpu_list_, nccl_ctxs_.get()));
C
chengduoZH 已提交
104
#else
C
chengduoZH 已提交
105
      op_handle_.reset(new ReduceOpHandle(local_scopes_, gpu_list_));
C
chengduoZH 已提交
106
#endif
C
chengduoZH 已提交
107
    }
C
chengduoZH 已提交
108 109 110

    // add input
    for (size_t j = 0; j < gpu_list_.size(); ++j) {
C
chengduoZH 已提交
111
      if (!use_gpu_) {
Q
qiaolongfei 已提交
112
        op_handle_->SetDeviceContext(gpu_list_[j], ctxs_[j].get());
C
chengduoZH 已提交
113
      }
114
      auto *in_var_handle = new VarHandle(1, j, "input", gpu_list_[j]);
C
chengduoZH 已提交
115
      in_var_handle->generated_op_ = nullptr;
116
      vars_.emplace_back(in_var_handle);
C
chengduoZH 已提交
117 118 119 120 121 122 123 124 125 126 127
      op_handle_->AddInput(in_var_handle);
    }

    // add dummy var
    vars_.emplace_back(new DummyVarHandle());
    DummyVarHandle *in_dummy_var_handle =
        static_cast<DummyVarHandle *>(vars_.back().get());
    in_dummy_var_handle->generated_op_ = nullptr;
    op_handle_->AddInput(in_dummy_var_handle);

    // add output
128 129 130
    auto *out_var_handle =
        new VarHandle(2, input_scope_idx, "out", gpu_list_[input_scope_idx]);
    vars_.emplace_back(out_var_handle);
C
chengduoZH 已提交
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
    op_handle_->AddOutput(out_var_handle);

    // add dummy var
    vars_.emplace_back(new DummyVarHandle());
    DummyVarHandle *dummy_var_handle =
        static_cast<DummyVarHandle *>(vars_.back().get());
    op_handle_->AddOutput(dummy_var_handle);
  }

  void TestReduceSelectedRows(size_t output_scope_idx) {
    int height = kDims[0] * 2;
    std::vector<int64_t> rows{0, 1, 2, 3, 3, 0, 14, 7, 3, 1,
                              2, 4, 6, 3, 1, 1, 1,  1, 3, 7};
    std::vector<float> send_vector(f::product(kDims));
    for (size_t k = 0; k < send_vector.size(); ++k) {
      send_vector[k] = k;
    }

    for (size_t input_scope_idx = 0; input_scope_idx < gpu_list_.size();
         ++input_scope_idx) {
      auto in_var = local_scopes_[input_scope_idx]->Var("input");
      auto in_selected_rows = in_var->GetMutable<f::SelectedRows>();
      auto value = in_selected_rows->mutable_value();
      value->mutable_data<float>(kDims, gpu_list_[input_scope_idx]);

      in_selected_rows->set_height(height);
      in_selected_rows->set_rows(rows);

      paddle::framework::TensorFromVector<float>(
          send_vector, *(ctxs_[input_scope_idx]), value);
      value->Resize(kDims);
    }

    auto out_var = local_scopes_[output_scope_idx]->Var("out");
    auto out_selected_rows = out_var->GetMutable<f::SelectedRows>();

    auto in_var = local_scopes_[output_scope_idx]->Var("input");
    auto in_selected_rows = in_var->GetMutable<f::SelectedRows>();

    out_selected_rows->mutable_value()->ShareDataWith(
        in_selected_rows->value());

    op_handle_->Run(false);

    WaitAll();

    p::CPUPlace cpu_place;

    auto &out_select_rows = out_var->Get<f::SelectedRows>();
    auto rt = out_select_rows.value();

    PADDLE_ENFORCE_EQ(out_select_rows.height(), height, "height is not equal.");
    for (size_t k = 0; k < out_select_rows.rows().size(); ++k) {
      PADDLE_ENFORCE_EQ(out_select_rows.rows()[k], rows[k % rows.size()]);
    }

    f::Tensor result_tensor;
    f::TensorCopy(rt, cpu_place, *(ctxs_[output_scope_idx]), &result_tensor);
    float *ct = result_tensor.data<float>();

    for (int64_t j = 0; j < f::product(result_tensor.dims()); ++j) {
      ASSERT_NEAR(ct[j], send_vector[j % send_vector.size()], 1e-5);
    }
  }

  void TestReduceLodTensors(size_t output_scope_idx) {
    std::vector<float> send_vector(static_cast<size_t>(f::product(kDims)));
    for (size_t k = 0; k < send_vector.size(); ++k) {
      send_vector[k] = k;
    }
    f::LoD lod{{0, 10, 20}};

    for (size_t input_scope_idx = 0; input_scope_idx < gpu_list_.size();
         ++input_scope_idx) {
      auto in_var = local_scopes_[input_scope_idx]->Var("input");
      auto in_lod_tensor = in_var->GetMutable<f::LoDTensor>();
      in_lod_tensor->mutable_data<float>(kDims, gpu_list_[input_scope_idx]);
      in_lod_tensor->set_lod(lod);

      paddle::framework::TensorFromVector<float>(
          send_vector, *(ctxs_[input_scope_idx]), in_lod_tensor);
    }

    auto out_var = local_scopes_[output_scope_idx]->Var("out");
    auto out_lodtensor = out_var->GetMutable<f::LoDTensor>();

    auto in_var = local_scopes_[output_scope_idx]->Var("input");
    auto in_lodtensor = in_var->Get<f::LoDTensor>();

    out_lodtensor->ShareDataWith(in_lodtensor);

    op_handle_->Run(false);

    WaitAll();

    p::CPUPlace cpu_place;

    auto &rt = out_var->Get<f::LoDTensor>();

    f::Tensor result_tensor;
    f::TensorCopy(rt, cpu_place, *(ctxs_[output_scope_idx]), &result_tensor);
    float *ct = result_tensor.data<float>();

    for (int64_t j = 0; j < f::product(result_tensor.dims()); ++j) {
      ASSERT_NEAR(ct[j], send_vector[j] * gpu_list_.size(), 1e-5);
    }
  }
};

TEST(ReduceTester, TestCPUReduceTestSelectedRows) {
  TestReduceOpHandle test_op;
  size_t input_scope_idx = 0;
  test_op.InitCtxOnGpu(false);
  test_op.InitReduceOp(input_scope_idx);
  test_op.TestReduceSelectedRows(input_scope_idx);
}
C
chengduoZH 已提交
247 248 249 250 251 252 253 254
TEST(ReduceTester, TestCPUReduceTestLodTensor) {
  TestReduceOpHandle test_op;
  size_t input_scope_idx = 0;
  test_op.InitCtxOnGpu(false);
  test_op.InitReduceOp(input_scope_idx);
  test_op.TestReduceLodTensors(input_scope_idx);
}
#ifdef PADDLE_WITH_CUDA
C
chengduoZH 已提交
255

C
chengduoZH 已提交
256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
TEST(ReduceTester, TestGPUReduceTestSelectedRows) {
  TestReduceOpHandle test_op;
  size_t input_scope_idx = 0;
  test_op.InitCtxOnGpu(true);
  test_op.InitReduceOp(input_scope_idx);
  test_op.TestReduceSelectedRows(input_scope_idx);
}

TEST(ReduceTester, TestGPUReduceTestLodTensor) {
  TestReduceOpHandle test_op;
  size_t input_scope_idx = 0;
  test_op.InitCtxOnGpu(true);
  test_op.InitReduceOp(input_scope_idx);
  test_op.TestReduceLodTensors(input_scope_idx);
}
#endif
C
chengduoZH 已提交
272 273 274 275

}  // namespace details
}  // namespace framework
}  // namespace paddle