task_node.h 3.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
16 17
#include <cstdint>
#include <memory>
18
#include <string>
19 20
#include <unordered_set>
#include <vector>
21

L
LiYuRio 已提交
22
#include "paddle/fluid/framework/program_desc.h"
23
#include "paddle/fluid/platform/macros.h"
24 25

namespace paddle {
26 27 28
namespace framework {
class OperatorBase;
}
29 30 31 32
namespace distributed {

class TaskNode final {
 public:
33
  using OperatorBase = paddle::framework::OperatorBase;
34
  TaskNode(int32_t role, int64_t rank, int64_t task_id, int64_t max_run_times,
35
           int64_t max_slot_nums);
36
  TaskNode(int32_t role, const std::vector<OperatorBase*>& ops, int64_t rank,
37
           int64_t task_id, int64_t max_run_times, int64_t max_slot_nums);
L
LiYuRio 已提交
38 39
  TaskNode(const paddle::framework::ProgramDesc& program, int64_t rank,
           int64_t max_run_times, int64_t max_slot_nums);
40
  ~TaskNode() = default;
41

42 43
  int64_t rank() const { return rank_; }
  int64_t task_id() const { return task_id_; }
44
  int32_t role() const { return role_; }
45 46
  int64_t max_run_times() const { return max_run_times_; }
  int64_t max_slot_nums() const { return max_slot_nums_; }
47 48 49 50
  int64_t run_per_steps() const { return run_per_steps_; }
  int64_t run_at_offset() const { return run_at_offset_; }
  int64_t reply_up_per_steps() const { return reply_up_per_steps_; }
  int64_t send_down_per_steps() const { return send_down_per_steps_; }
51 52 53 54 55 56
  const std::unordered_map<int64_t, int64_t>& upstream() const {
    return upstream_;
  }
  const std::unordered_map<int64_t, int64_t>& downstream() const {
    return downstream_;
  }
57
  const std::string& type() const { return type_; }
L
LiYuRio 已提交
58
  const paddle::framework::ProgramDesc& program() const { return program_; }
59
  const std::vector<OperatorBase*>& ops() const { return ops_; }
60 61 62 63 64 65 66
  const std::vector<std::unique_ptr<OperatorBase>>& unique_ops() const {
    return ops_vec_;
  }
  const std::unordered_map<const OperatorBase*, std::vector<std::string>>&
  unused_vars() const {
    return unused_vars_;
  }
67

68 69 70 71
  void SetRunPerSteps(int64_t value);
  void SetRunAtOffset(int64_t value);
  void SetReplyUpPerSteps(int64_t value);
  void SetSendDownPerSteps(int64_t value);
72
  void SetType(const std::string& type) { type_ = type; }
73 74 75 76 77
  void SetUnusedVars(
      const std::unordered_map<const OperatorBase*, std::vector<std::string>>&
          unused_vars) {
    unused_vars_ = unused_vars;
  }
78

79 80 81
  // upstream need buffs?
  bool AddUpstreamTask(int64_t task_id, int64_t buff_size = 1);
  bool AddDownstreamTask(int64_t task_id, int64_t buff_size = 1);
82
  std::string DebugString() const;
83

84 85 86
 private:
  DISABLE_COPY_AND_ASSIGN(TaskNode);
  TaskNode() = default;
87
  // ops_ will be removed in the future
88
  std::vector<OperatorBase*> ops_;
89 90 91
  // task_id-->buff_size
  std::unordered_map<int64_t, int64_t> upstream_;
  std::unordered_map<int64_t, int64_t> downstream_;
L
LiYuRio 已提交
92
  framework::ProgramDesc program_;
93
  std::vector<std::unique_ptr<OperatorBase>> ops_vec_;
94 95 96
  std::unordered_map<const OperatorBase*, std::vector<std::string>>
      unused_vars_;

97
  int32_t role_;
98 99
  int64_t rank_;
  int64_t task_id_;
100 101
  int64_t max_run_times_;
  int64_t max_slot_nums_;
102

103 104 105 106 107 108 109
  int64_t run_per_steps_{1};
  int64_t run_at_offset_{0};
  // one input produces multi times output
  int64_t reply_up_per_steps_{1};
  // one output need multi times input
  int64_t send_down_per_steps_{1};

110
  std::string type_;
111 112 113 114
};

}  // namespace distributed
}  // namespace paddle