未验证 提交 ea6716a5 编写于 作者: W wanghuancoder 提交者: GitHub

Add check if fluid.data() variable no feed data (#25858)

* add check if fluid.data() variable no feed data, test=develop

* Add testcase for feed check, test=develop
上级 3ec0bcbb
......@@ -1156,6 +1156,26 @@ class Executor(object):
compiled = isinstance(program, compiler.CompiledProgram)
# Check if fluid.data() variable no feed data
if use_prune:
if compiled:
global_block = program._program.global_block()
else:
global_block = program.global_block()
for varname in global_block.vars:
vardesc = global_block.desc.find_var(cpt.to_bytes(varname))
varobj = global_block.vars[varname]
# Can not check var build by fluid.layers.data(), bucause fluid.layers.data() had not set need_check_feed
if vardesc.persistable() == False and \
vardesc.type() == core.VarDesc.VarType.LOD_TENSOR and \
vardesc.need_check_feed() == True and \
varobj._stop_gradient == True and \
varobj.is_data == True and \
varobj.belong_to_optimizer == False and \
varname not in feed:
raise ValueError('Need feed data for variable %s' % varname)
acp._auto_checkpoint(self, program)
# For backward compatibility, run directly.
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy
import paddle.fluid.core as core
import paddle.fluid as fluid
class TestExecutor(unittest.TestCase):
def net(self):
lr = fluid.data(name="lr", shape=[1], dtype='float32')
x = fluid.data(name="x", shape=[None, 1], dtype='float32')
y = fluid.data(name="y", shape=[None, 1], dtype='float32')
y_predict = fluid.layers.fc(input=x, size=1, act=None)
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
opt = fluid.optimizer.Adam(learning_rate=lr)
opt.minimize(avg_cost)
return lr, avg_cost
def test_program_check_feed(self):
main_program = fluid.Program()
startup_program = fluid.Program()
scope = fluid.Scope()
with fluid.program_guard(main_program, startup_program):
with fluid.scope_guard(scope):
cpu = fluid.CPUPlace()
exe = fluid.Executor(cpu)
lr, cost = self.net()
exe.run(startup_program)
train_data = [[1.0], [2.0], [3.0], [4.0]]
y_true = [[2.0], [4.0], [6.0], [8.0]]
a = 0
with self.assertRaises(ValueError):
exe.run(feed={'x': train_data,
'lr': a},
fetch_list=[lr, cost],
return_numpy=False,
use_prune=True)
def test_compiled_program_check_feed(self):
main_program = fluid.Program()
startup_program = fluid.Program()
scope = fluid.Scope()
with fluid.program_guard(main_program, startup_program):
with fluid.scope_guard(scope):
cpu = fluid.CPUPlace()
exe = fluid.Executor(cpu)
lr, cost = self.net()
exe.run(startup_program)
compiled_prog = fluid.CompiledProgram(
main_program).with_data_parallel(loss_name=cost.name)
train_data = [[1.0], [2.0], [3.0], [4.0]]
y_true = [[2.0], [4.0], [6.0], [8.0]]
a = 0
with self.assertRaises(ValueError):
exe.run(compiled_prog,
feed={'x': train_data,
'lr': a},
fetch_list=[lr, cost],
return_numpy=False,
use_prune=True)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册