未验证 提交 d33e99fe 编写于 作者: H huangxu96 提交者: GitHub

static cost model (#36775)

Add Static CostModel. Static data is based on op benchmark system
上级 bf2839f5
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .cost_model import CostModel # noqa: F401
__all__ = ['CostModel']
......@@ -15,6 +15,8 @@
import paddle
import paddle.static as static
import numpy as np
import json
import os
from paddle.fluid import core
......@@ -36,7 +38,6 @@ class CostModel():
paddle.optimizer.SGD(learning_rate=0.01).minimize(loss)
print("main program is: {}".format(main_program))
#print("start up program is: {}".format(startup_program))
return startup_program, main_program
......@@ -44,7 +45,7 @@ class CostModel():
startup_program,
main_program,
device='gpu',
fetch_cost_list=['time', 'memory']):
fetch_cost_list=['time']):
place = paddle.set_device('gpu')
x = np.random.random(size=(10, 1)).astype('float32')
......@@ -53,17 +54,33 @@ class CostModel():
exe.run(startup_program)
paddle.fluid.profiler.start_profiler("All")
exe.run(main_program, feed={"X": x}, fetch_list=[])
# core.CostModel.ProfileMeasure(main_program, device)
print("core:<<<<<<<")
cost_model = core.CostModel()
cost_data = cost_model.ProfileMeasure(device)
# cost_list = self.stop_cost_model()
# return cost_list
def static_cost_data(self):
static_cost_data_path = os.path.join(
os.path.dirname(__file__), "static_op_benchmark.json")
with open(static_cost_data_path, 'r') as load_f:
load_dict = json.load(load_f)
self._static_cost_data = load_dict
# return all static cost data
return load_dict
cost_model = CostModel()
def get_static_op_time(self, op_name, forward=True, dtype="float32"):
# if forward is True, return op forward time, otherwise return op backward time.
if op_name == None:
raise ValueError(
'op_name should not be empty when you want to get static op time'
)
startup_program, main_program = cost_model.build_program()
op_cost = {}
for op_data in self._static_cost_data:
if (op_data["op"] == op_name) and (dtype in op_data["config"]):
if (forward):
op_cost["op_time"] = op_data["paddle_gpu_time"]
else:
op_cost["op_time"] = op_data["paddle_gpu_time_backward"]
op_cost["config"] = op_data["config"]
cost_model.profile_measure(startup_program, main_program)
return op_cost
因为 它太大了无法显示 source diff 。你可以改为 查看blob
......@@ -18,6 +18,7 @@ import unittest
import paddle
import paddle.fluid.core as core
from paddle.cost_model import CostModel
paddle.enable_static()
......@@ -51,6 +52,41 @@ class TestCostModel(unittest.TestCase):
self.assertGreaterEqual(cost_data.get_whole_time_ms(),
fc_op_time + mean_op_time)
def test_static_op_benchmark_cost_model(self):
op_name = "abs"
cost_model = CostModel()
# init static data
cost_model.static_cost_data()
op_name = "abs"
abs_op_cost = cost_model.get_static_op_time(op_name)
abs_op_time = abs_op_cost["op_time"]
abs_op_config = abs_op_cost["config"]
print("abs_op_time:", abs_op_time)
print("abs_op_config:", abs_op_config)
self.assertGreater(float(abs_op_time), 0)
conv2d_op_cost = cost_model.get_static_op_time("conv2d")
conv2d_op_time = conv2d_op_cost["op_time"]
conv2d_op_config = conv2d_op_cost["config"]
self.assertGreater(float(conv2d_op_time), 0)
print("conv2d_op_time:", conv2d_op_time)
print("conv2d_op_config:", conv2d_op_config)
conv2d_backward_op_cost = cost_model.get_static_op_time(
"conv2d", forward=False)
conv2d_backward_op_time = conv2d_backward_op_cost["op_time"]
conv2d_backward_op_config = conv2d_backward_op_cost["config"]
self.assertGreater(float(conv2d_backward_op_time), 0)
print("conv2d_backward_op_time:", conv2d_backward_op_time)
print("conv2d_backward_op_config:", conv2d_backward_op_config)
conv2d_fp16_op_cost = cost_model.get_static_op_time(
"conv2d", dtype="float16")
conv2d_fp16_op_time = conv2d_fp16_op_cost["op_time"]
conv2d_fp16_op_config = conv2d_fp16_op_cost["config"]
self.assertGreater(float(conv2d_fp16_op_time), 0)
print("conv2d_fp16_op_time:", conv2d_fp16_op_time)
print("conv2d_fp16_op_config:", conv2d_fp16_op_config)
if __name__ == '__main__':
unittest.main()
......@@ -334,6 +334,7 @@ packages=['paddle',
'paddle.fluid.incubate.fleet.collective',
'paddle.fluid.incubate.fleet.utils',
'paddle.amp',
'paddle.cost_model',
'paddle.hapi',
'paddle.vision',
'paddle.vision.models',
......@@ -404,6 +405,7 @@ if os.name != 'nt':
else:
package_data={'paddle.fluid': ['${FLUID_CORE_NAME}' + '.pyd', '${FLUID_CORE_NAME}' + '.lib']}
package_data['paddle.fluid'] += ['${PADDLE_BINARY_DIR}/python/paddle/cost_model/static_op_benchmark.json']
if '${HAS_NOAVX_CORE}' == 'ON':
package_data['paddle.fluid'] += ['core_noavx' + ('.so' if os.name != 'nt' else '.pyd')]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册