brpc_service_sparse_sgd_test.cc 11.7 KB
Newer Older
T
tangwei12 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include <unistd.h>
#include <string>
#include <thread>  // NOLINT

#include "gtest/gtest.h"
#include "paddle/fluid/distributed/ps.pb.h"
21 22 23
#include "paddle/fluid/distributed/ps/service/brpc_ps_client.h"
#include "paddle/fluid/distributed/ps/service/brpc_ps_server.h"
#include "paddle/fluid/distributed/ps/service/env.h"
24 25
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/platform/place.h"
26
#include "paddle/phi/kernels/funcs/math_function.h"
27 28 29 30 31 32 33 34 35 36 37

namespace paddle {
namespace distributed {
class DownpourBrpcClosure;
class PSClient;
class PSServer;
}  // namespace distributed
namespace framework {
class Variable;
}  // namespace framework
}  // namespace paddle
T
tangwei12 已提交
38

39
namespace phi {
40
class DenseTensor;
41
}  // namespace phi
42

T
tangwei12 已提交
43 44 45 46 47 48 49 50 51
namespace framework = paddle::framework;
namespace platform = paddle::platform;
namespace operators = paddle::operators;
namespace memory = paddle::memory;
namespace distributed = paddle::distributed;

void CreateVarsOnScope(framework::Scope* scope, platform::CPUPlace* place) {
  auto x_var = scope->Var("x");
  x_var->GetMutable<framework::LoDTensor>();
52 53
  auto x_g_var = scope->Var("x@GRAD");
  x_g_var->GetMutable<framework::LoDTensor>();
T
tangwei12 已提交
54 55 56 57 58 59 60 61 62 63
}

void InitTensorsOnClient(framework::Scope* scope, platform::CPUPlace* place,
                         int64_t rows_numel) {
  CreateVarsOnScope(scope, place);

  auto x_var = scope->Var("x")->GetMutable<framework::LoDTensor>();
  float* x_ptr =
      x_var->mutable_data<float>(framework::DDim({1, rows_numel}), *place);
  for (int64_t i = 0; i < rows_numel; ++i) x_ptr[i] = 1.0;
64 65 66 67 68 69 70

  auto g_size = rows_numel +
                30;  // hard code here: key_num * (fea_dim + 3), show/clk/slot
  auto x_g_var = scope->Var("x@GRAD")->GetMutable<framework::LoDTensor>();
  float* x_g_ptr =
      x_g_var->mutable_data<float>(framework::DDim({1, g_size}), *place);
  for (int64_t i = 0; i < g_size; ++i) x_g_ptr[i] = 1.0;
T
tangwei12 已提交
71 72 73 74 75
}

void GetDownpourSparseTableProto(
    ::paddle::distributed::TableParameter* sparse_table_proto) {
  sparse_table_proto->set_table_id(0);
76 77 78
  sparse_table_proto->set_table_class("MemorySparseTable");
  sparse_table_proto->set_shard_num(10);
  ::paddle::distributed::TableAccessorParameter* accessor_config =
T
tangwei12 已提交
79
      sparse_table_proto->mutable_accessor();
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106

  accessor_config->set_accessor_class("SparseAccessor");
  accessor_config->set_fea_dim(10);
  accessor_config->set_embedx_dim(9);
  accessor_config->set_embedx_threshold(0);
  accessor_config->mutable_ctr_accessor_param()->set_nonclk_coeff(0.2);
  accessor_config->mutable_ctr_accessor_param()->set_click_coeff(1);
  accessor_config->mutable_ctr_accessor_param()->set_base_threshold(0.5);
  accessor_config->mutable_ctr_accessor_param()->set_delta_threshold(0.2);
  accessor_config->mutable_ctr_accessor_param()->set_delta_keep_days(16);
  accessor_config->mutable_ctr_accessor_param()->set_show_click_decay_rate(
      0.99);

  accessor_config->mutable_embed_sgd_param()->set_name("SparseNaiveSGDRule");
  auto* naive_param =
      accessor_config->mutable_embed_sgd_param()->mutable_naive();
  naive_param->set_learning_rate(1.0);
  naive_param->set_initial_range(0.3);
  naive_param->add_weight_bounds(-10.0);
  naive_param->add_weight_bounds(10.0);

  accessor_config->mutable_embedx_sgd_param()->set_name("SparseNaiveSGDRule");
  naive_param = accessor_config->mutable_embedx_sgd_param()->mutable_naive();
  naive_param->set_learning_rate(1.0);
  naive_param->set_initial_range(0.3);
  naive_param->add_weight_bounds(-10.0);
  naive_param->add_weight_bounds(10.0);
T
tangwei12 已提交
107 108 109 110 111 112 113 114 115 116 117
}

::paddle::distributed::PSParameter GetServerProto() {
  // Generate server proto desc
  ::paddle::distributed::PSParameter server_fleet_desc;
  ::paddle::distributed::ServerParameter* server_proto =
      server_fleet_desc.mutable_server_param();
  ::paddle::distributed::DownpourServerParameter* downpour_server_proto =
      server_proto->mutable_downpour_server_param();
  ::paddle::distributed::ServerServiceParameter* server_service_proto =
      downpour_server_proto->mutable_service_param();
T
tangwei12 已提交
118
  server_service_proto->set_service_class("BrpcPsService");
T
tangwei12 已提交
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
  server_service_proto->set_server_class("BrpcPsServer");
  server_service_proto->set_client_class("BrpcPsClient");
  server_service_proto->set_start_server_port(0);
  server_service_proto->set_server_thread_num(12);

  ::paddle::distributed::TableParameter* sparse_table_proto =
      downpour_server_proto->add_downpour_table_param();
  GetDownpourSparseTableProto(sparse_table_proto);
  return server_fleet_desc;
}

::paddle::distributed::PSParameter GetWorkerProto() {
  ::paddle::distributed::PSParameter worker_fleet_desc;
  ::paddle::distributed::WorkerParameter* worker_proto =
      worker_fleet_desc.mutable_worker_param();

  ::paddle::distributed::DownpourWorkerParameter* downpour_worker_proto =
      worker_proto->mutable_downpour_worker_param();

  ::paddle::distributed::TableParameter* worker_sparse_table_proto =
      downpour_worker_proto->add_downpour_table_param();
  GetDownpourSparseTableProto(worker_sparse_table_proto);

  ::paddle::distributed::ServerParameter* server_proto =
      worker_fleet_desc.mutable_server_param();
  ::paddle::distributed::DownpourServerParameter* downpour_server_proto =
      server_proto->mutable_downpour_server_param();
  ::paddle::distributed::ServerServiceParameter* server_service_proto =
      downpour_server_proto->mutable_service_param();
T
tangwei12 已提交
148
  server_service_proto->set_service_class("BrpcPsService");
T
tangwei12 已提交
149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
  server_service_proto->set_server_class("BrpcPsServer");
  server_service_proto->set_client_class("BrpcPsClient");
  server_service_proto->set_start_server_port(0);
  server_service_proto->set_server_thread_num(12);

  ::paddle::distributed::TableParameter* server_sparse_table_proto =
      downpour_server_proto->add_downpour_table_param();
  GetDownpourSparseTableProto(server_sparse_table_proto);

  return worker_fleet_desc;
}

/*-------------------------------------------------------------------------*/

std::string ip_ = "127.0.0.1";
uint32_t port_ = 4209;

std::vector<std::string> host_sign_list_;

std::shared_ptr<paddle::distributed::PSServer> pserver_ptr_;

std::shared_ptr<paddle::distributed::PSClient> worker_ptr_;

void RunServer() {
  ::paddle::distributed::PSParameter server_proto = GetServerProto();

  auto _ps_env = paddle::distributed::PaddlePSEnvironment();
Z
zhaocaibei123 已提交
176
  _ps_env.SetPsServers(&host_sign_list_, 1);
T
tangwei12 已提交
177
  pserver_ptr_ = std::shared_ptr<paddle::distributed::PSServer>(
Z
zhaocaibei123 已提交
178
      paddle::distributed::PSServerFactory::Create(server_proto));
179 180 181
  std::vector<framework::ProgramDesc> empty_vec;
  framework::ProgramDesc empty_prog;
  empty_vec.push_back(empty_prog);
Z
zhaocaibei123 已提交
182 183
  pserver_ptr_->Configure(server_proto, _ps_env, 0, empty_vec);
  pserver_ptr_->Start(ip_, port_);
T
tangwei12 已提交
184 185 186 187 188 189 190 191
}

void RunClient(std::map<uint64_t, std::vector<paddle::distributed::Region>>&
                   dense_regions) {
  ::paddle::distributed::PSParameter worker_proto = GetWorkerProto();
  paddle::distributed::PaddlePSEnvironment _ps_env;
  auto servers_ = host_sign_list_.size();
  _ps_env = paddle::distributed::PaddlePSEnvironment();
Z
zhaocaibei123 已提交
192
  _ps_env.SetPsServers(&host_sign_list_, servers_);
T
tangwei12 已提交
193
  worker_ptr_ = std::shared_ptr<paddle::distributed::PSClient>(
Z
zhaocaibei123 已提交
194 195
      paddle::distributed::PSClientFactory::Create(worker_proto));
  worker_ptr_->Configure(worker_proto, dense_regions, _ps_env, 0);
T
tangwei12 已提交
196 197 198 199 200 201
}

void RunBrpcPushSparse() {
  setenv("http_proxy", "", 1);
  setenv("https_proxy", "", 1);
  auto ph_host = paddle::distributed::PSHost(ip_, port_, 0);
Z
zhaocaibei123 已提交
202
  host_sign_list_.push_back(ph_host.SerializeToString());
T
tangwei12 已提交
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233

  // Srart Server
  std::thread server_thread(RunServer);
  sleep(1);

  // Start Client
  framework::Scope client_scope;
  platform::CPUPlace place;
  InitTensorsOnClient(&client_scope, &place, 100);
  std::map<uint64_t, std::vector<paddle::distributed::Region>> dense_regions;
  dense_regions.insert(
      std::pair<uint64_t, std::vector<paddle::distributed::Region>>(0, {}));
  auto regions = dense_regions[0];
  framework::Variable* var = client_scope.FindVar("x");
  framework::LoDTensor* tensor = var->GetMutable<framework::LoDTensor>();

  RunClient(dense_regions);
  std::vector<uint64_t> fea_keys(10);
  std::vector<float> fea_values(100);
  std::vector<float> fea_temp_values(100);
  std::vector<float*> fea_value_ptr(10);
  std::vector<float*> fea_temp_value_ptr(10);

  for (size_t idx = 0; idx < fea_keys.size(); ++idx) {
    fea_keys[idx] = (uint64_t)idx;
    fea_value_ptr[idx] = fea_values.data() + idx * 10;
    fea_temp_value_ptr[idx] = fea_temp_values.data() + idx * 10;
  }

  /*-----------------------Test Server Init----------------------------------*/
  LOG(INFO) << "Run pull_sparse_param";
Z
zhaocaibei123 已提交
234
  auto pull_status = worker_ptr_->PullSparse(
235
      fea_value_ptr.data(), 0, fea_keys.data(), fea_keys.size(), true);
T
tangwei12 已提交
236 237
  pull_status.wait();

238 239 240
  /*-----------------------Test Push Grad----------------------------------*/
  // first to expand embedx, init
  paddle::distributed::DownpourBrpcClosure* closure_push_grad =
T
tangwei12 已提交
241 242 243 244
      new paddle::distributed::DownpourBrpcClosure(1, [&](void* done) {
        int ret = 0;
        auto* closure = (paddle::distributed::DownpourBrpcClosure*)done;
        for (size_t i = 0; i < 1; ++i) {
T
tangwei12 已提交
245
          if (closure->check_response(
246
                  i, paddle::distributed::PS_PUSH_SPARSE_TABLE) != 0) {
T
tangwei12 已提交
247 248 249 250 251 252 253
            ret = -1;
            break;
          }
        }
        closure->set_promise_value(ret);
      });

254 255
  framework::Variable* g_var = client_scope.FindVar("x@GRAD");
  framework::LoDTensor* g_tensor = g_var->GetMutable<framework::LoDTensor>();
T
tangwei12 已提交
256

257 258 259 260
  LOG(INFO) << "Run push_sparse_grad";
  std::vector<float*> push_g_vec;
  for (auto i = 0; i < static_cast<int>(fea_keys.size()); ++i) {
    push_g_vec.push_back(g_tensor->data<float>() + i * 13);
T
tangwei12 已提交
261
  }
262 263 264 265
  auto push_grad_status = worker_ptr_->PushSparseRawGradient(
      0, fea_keys.data(), (const float**)push_g_vec.data(), fea_keys.size(),
      closure_push_grad);
  push_grad_status.wait();
T
tangwei12 已提交
266

267 268 269 270
  // pull
  pull_status = worker_ptr_->PullSparse(fea_value_ptr.data(), 0,
                                        fea_keys.data(), fea_keys.size(), true);
  pull_status.wait();
T
tangwei12 已提交
271

272
  paddle::distributed::DownpourBrpcClosure* closure_push_grad1 =
T
tangwei12 已提交
273 274 275 276
      new paddle::distributed::DownpourBrpcClosure(1, [&](void* done) {
        int ret = 0;
        auto* closure = (paddle::distributed::DownpourBrpcClosure*)done;
        for (size_t i = 0; i < 1; ++i) {
T
tangwei12 已提交
277 278
          if (closure->check_response(
                  i, paddle::distributed::PS_PUSH_SPARSE_TABLE) != 0) {
T
tangwei12 已提交
279 280 281 282 283 284 285
            ret = -1;
            break;
          }
        }
        closure->set_promise_value(ret);
      });

286 287
  // push again, embedx update this time
  push_grad_status = worker_ptr_->PushSparseRawGradient(
T
tangwei12 已提交
288
      0, fea_keys.data(), (const float**)push_g_vec.data(), fea_keys.size(),
289
      closure_push_grad1);
T
tangwei12 已提交
290 291
  push_grad_status.wait();

292
  // pull update
Z
zhaocaibei123 已提交
293
  auto pull_update_status = worker_ptr_->PullSparse(
294
      fea_temp_value_ptr.data(), 0, fea_keys.data(), fea_keys.size(), true);
T
tangwei12 已提交
295 296 297 298 299 300 301
  pull_update_status.wait();

  for (size_t idx = 0; idx < tensor->numel(); ++idx) {
    EXPECT_FLOAT_EQ(fea_temp_values[idx], fea_values[idx] - 1.0);
  }

  LOG(INFO) << "Run stop_server";
Z
zhaocaibei123 已提交
302
  worker_ptr_->StopServer();
T
tangwei12 已提交
303
  LOG(INFO) << "Run finalize_worker";
Z
zhaocaibei123 已提交
304
  worker_ptr_->FinalizeWorker();
T
tangwei12 已提交
305 306 307 308
  server_thread.join();
}

TEST(RunBrpcPushSparse, Run) { RunBrpcPushSparse(); }