未验证 提交 e4cfa60c 编写于 作者: L LiYuRio 提交者: GitHub

add interpreter job and plan (#54219)

上级 e8735ddf
......@@ -38,3 +38,8 @@ cc_library(
interpreter
SRCS ${INTERPRETER_SRCS}
DEPS standalone_executor ${INTERPRETER_DEPS})
cc_library(
plan
SRCS plan.cc
DEPS framework_proto)
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/macros.h"
namespace paddle {
namespace framework {
class Job final {
public:
explicit Job(const std::string& type) : type_(type), micro_batch_id_(-1) {}
~Job() = default;
const std::string& GetJobType() const { return type_; }
int64_t GetMicroBatchId() const { return micro_batch_id_; }
void SetMicroBatchId(int64_t micro_batch_id) {
micro_batch_id_ = micro_batch_id;
}
private:
DISABLE_COPY_AND_ASSIGN(Job);
std::string type_;
int64_t micro_batch_id_;
};
} // namespace framework
} // namespace paddle
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/new_executor/interpreter/plan.h"
#include "paddle/fluid/framework/new_executor/interpreter/job.h"
#include "paddle/fluid/framework/program_desc.h"
namespace paddle {
namespace framework {
const std::vector<Job*>& Plan::GetJobList() const { return job_list_; }
const std::unordered_map<std::string, ProgramDesc*>& Plan::GetTypeToProgram()
const {
return type_to_program_;
}
} // namespace framework
} // namespace paddle
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <unordered_map>
#include <vector>
#include "paddle/phi/core/macros.h"
namespace paddle {
namespace framework {
class ProgramDesc;
class Job;
class Plan final {
public:
Plan(const std::vector<Job*>& job_list,
const std::unordered_map<std::string, ProgramDesc*>& type_to_program)
: job_list_(job_list), type_to_program_(type_to_program) {}
~Plan() = default;
const std::vector<Job*>& GetJobList() const;
const std::unordered_map<std::string, ProgramDesc*>& GetTypeToProgram() const;
private:
DISABLE_COPY_AND_ASSIGN(Plan);
std::vector<Job*> job_list_;
std::unordered_map<std::string, ProgramDesc*> type_to_program_;
};
} // namespace framework
} // namespace paddle
......@@ -34,6 +34,7 @@ set(PYBIND_DEPS
cost_model
cuda_graph_with_memory_pool
fleet_executor
plan
global_utils
phi_utils
phi
......
......@@ -53,6 +53,8 @@ limitations under the License. */
#include "paddle/fluid/framework/lod_rank_table.h"
#include "paddle/fluid/framework/lod_tensor_array.h"
#include "paddle/fluid/framework/new_executor/executor_statistics.h"
#include "paddle/fluid/framework/new_executor/interpreter/job.h"
#include "paddle/fluid/framework/new_executor/interpreter/plan.h"
#include "paddle/fluid/framework/new_executor/standalone_executor.h"
#include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/framework/op_registry.h"
......@@ -1854,6 +1856,21 @@ All parameter, weight, gradient are variables in Paddle.
return py::cast(std::move(ret));
});
py::class_<framework::Job>(m, "job")
.def(py::init<const std::string &>(), py::arg("type"))
.def("type", &framework::Job::GetJobType)
.def("micro_batch_id", &framework::Job::GetMicroBatchId)
.def("set_micro_batch_id", &framework::Job::SetMicroBatchId);
py::class_<framework::Plan>(m, "plan")
.def(py::init<const std::vector<Job *> &,
const std::unordered_map<std::string,
framework::ProgramDesc *> &>(),
py::arg("job_list"),
py::arg("type_to_program"))
.def("job_list", &framework::Plan::GetJobList)
.def("type_to_program", &framework::Plan::GetTypeToProgram);
m.def("init_gflags", framework::InitGflags);
m.def("init_glog", framework::InitGLOG);
m.def("init_memory_method", framework::InitMemoryMethod);
......
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from paddle import static
from paddle.fluid import core
class TestStandaloneExecutorPlan(unittest.TestCase):
def test_standalone_executor_plan(self):
micro_batch_id = 0
forward_job = core.job("forward")
backward_job = core.job("backward")
optimizer_job = core.job("optimizer")
forward_job.set_micro_batch_id(micro_batch_id)
backward_job.set_micro_batch_id(micro_batch_id)
optimizer_job.set_micro_batch_id(micro_batch_id)
self.assertEqual(forward_job.micro_batch_id(), micro_batch_id)
self.assertEqual(forward_job.type(), "forward")
forward_program = static.Program()
backward_program = static.Program()
optimizer_program = static.Program()
job_list = [forward_job, backward_job, optimizer_job]
type_to_program = {
"forward": forward_program.desc,
"backward": backward_program.desc,
"optimizer": optimizer_program.desc,
}
plan = core.plan(job_list, type_to_program)
self.assertEqual(plan.job_list(), job_list)
self.assertEqual(plan.type_to_program(), type_to_program)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册