paddle.fluid.core.EnforceNotMet: holder_ should not be null
Created by: ThyrixYang
I've just copied the code from
And I got the following error:
Traceback (most recent call last):
File "try2.py", line 181, in <module>
feed_order=['pixel', 'label'])
File "/home/thyrix/.local/lib/python2.7/site-packages/paddle/fluid/trainer.py", line 413, in train
feed_order)
File "/home/thyrix/.local/lib/python2.7/site-packages/paddle/fluid/trainer.py", line 469, in _train_by_executor
self._train_by_any_executor(event_handler, exe, num_epochs, reader)
File "/home/thyrix/.local/lib/python2.7/site-packages/paddle/fluid/trainer.py", line 481, in _train_by_any_executor
event_handler(BeginEpochEvent(epoch_id))
File "try2.py", line 175, in event_handler
trainer.save_params(params_dirname)
File "/home/thyrix/.local/lib/python2.7/site-packages/paddle/fluid/trainer.py", line 440, in save_params
io.save_persistables(exe, dirname=param_path)
File "/home/thyrix/.local/lib/python2.7/site-packages/paddle/fluid/io.py", line 289, in save_persistables
filename=filename)
File "/home/thyrix/.local/lib/python2.7/site-packages/paddle/fluid/io.py", line 167, in save_vars
filename=filename)
File "/home/thyrix/.local/lib/python2.7/site-packages/paddle/fluid/io.py", line 198, in save_vars
executor.run(save_program)
File "/home/thyrix/.local/lib/python2.7/site-packages/paddle/fluid/executor.py", line 443, in run
self.executor.run(program.desc, scope, 0, True, True)
paddle.fluid.core.EnforceNotMet: holder_ should not be null
Tensor not initialized yet when Tensor::type() is called. at [/paddle/paddle/fluid/framework/tensor.h:139]
PaddlePaddle Call Stacks:
0 0x7f524cc6dc76p paddle::platform::EnforceNotMet::EnforceNotMet(std::__exception_ptr::exception_ptr, char const*, int) + 486
1 0x7f524cc70027p paddle::framework::Tensor::type() const + 151
2 0x7f524d753248p paddle::operators::SaveOp::RunImpl(paddle::framework::Scope const&, boost::variant<paddle::platform::CUDAPlace, paddle::platform::CPUPlace, paddle::platform::CUDAPinnedPlace, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_> const&) const + 1240
3 0x7f524daa218dp paddle::framework::OperatorBase::Run(paddle::framework::Scope const&, boost::variant<paddle::platform::CUDAPlace, paddle::platform::CPUPlace, paddle::platform::CUDAPinnedPlace, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_> const&) + 205
4 0x7f524cd099efp paddle::framework::Executor::RunPreparedContext(paddle::framework::ExecutorPrepareContext*, paddle::framework::Scope*, bool, bool, bool) + 255
5 0x7f524cd0aa40p paddle::framework::Executor::Run(paddle::framework::ProgramDesc const&, paddle::framework::Scope*, int, bool, bool) + 128
6 0x7f524cc8563bp void pybind11::cpp_function::initialize<pybind11::cpp_function::initialize<void, paddle::framework::Executor, paddle::framework::ProgramDesc const&, paddle::framework::Scope*, int, bool, bool, pybind11::name, pybind11::is_method, pybind11::sibling>(void (paddle::framework::Executor::*)(paddle::framework::ProgramDesc const&, paddle::framework::Scope*, int, bool, bool), pybind11::name const&, pybind11::is_method const&, pybind11::sibling const&)::{lambda(paddle::framework::Executor*, paddle::framework::ProgramDesc const&, paddle::framework::Scope*, int, bool, bool)#1}, void, paddle::framework::Executor*, paddle::framework::ProgramDesc const&, paddle::framework::Scope*, int, bool, bool, pybind11::name, pybind11::is_method, pybind11::sibling>(pybind11::cpp_function::initialize<void, paddle::framework::Executor, paddle::framework::ProgramDesc const&, paddle::framework::Scope*, int, bool, bool, pybind11::name, pybind11::is_method, pybind11::sibling>(void (paddle::framework::Executor::*)(paddle::framework::ProgramDesc const&, paddle::framework::Scope*, int, bool, bool), pybind11::name const&, pybind11::is_method const&, pybind11::sibling const&)::{lambda(paddle::framework::Executor*, paddle::framework::ProgramDesc const&, paddle::framework::Scope*, int, bool, bool)#1}&&, void (*)(paddle::framework::Executor*, paddle::framework::ProgramDesc const&, paddle::framework::Scope*, int, bool, bool), pybind11::name const&, pybind11::is_method const&, pybind11::sibling const&)::{lambda(pybind11::detail::function_call&)#3}::_FUN(pybind11::detail::function_call) + 555
7 0x7f524cc7db44p pybind11::cpp_function::dispatcher(_object*, _object*, _object*) + 2596
8 0x55ef38161d57p PyEval_EvalFrameEx + 29351
9 0x55ef381588cap PyEval_EvalCodeEx + 1754
10 0x55ef3816024ep PyEval_EvalFrameEx + 22430
11 0x55ef381588cap PyEval_EvalCodeEx + 1754
12 0x55ef3816024ep PyEval_EvalFrameEx + 22430
13 0x55ef381588cap PyEval_EvalCodeEx + 1754
14 0x55ef3816024ep PyEval_EvalFrameEx + 22430
15 0x55ef381588cap PyEval_EvalCodeEx + 1754
16 0x55ef3816024ep PyEval_EvalFrameEx + 22430
17 0x55ef3815fd72p PyEval_EvalFrameEx + 21186
18 0x55ef3815fd72p PyEval_EvalFrameEx + 21186
19 0x55ef3815fd72p PyEval_EvalFrameEx + 21186
20 0x55ef3815fd72p PyEval_EvalFrameEx + 21186
21 0x55ef381588cap PyEval_EvalCodeEx + 1754
22 0x55ef3816024ep PyEval_EvalFrameEx + 22430
23 0x55ef381588cap PyEval_EvalCodeEx + 1754
24 0x55ef381581e9p PyEval_EvalCode + 25
25 0x55ef38188bdfp
26 0x55ef38183952p PyRun_FileExFlags + 130
27 0x55ef38182dcdp PyRun_SimpleFileExFlags + 397
28 0x55ef3813258bp Py_Main + 1675
29 0x7f5291965b97p __libc_start_main + 231
30 0x55ef38131e0ap _start + 42
my code:
import paddle
import paddle.fluid as fluid
import numpy
import sys
def vgg_bn_drop(input):
def conv_block(ipt, num_filter, groups, dropouts):
return fluid.nets.img_conv_group(
input=ipt,
pool_size=2,
pool_stride=2,
conv_num_filter=[num_filter] * groups,
conv_filter_size=3,
conv_act='relu',
conv_with_batchnorm=True,
conv_batchnorm_drop_rate=dropouts,
pool_type='max')
conv1 = conv_block(input, 64, 2, [0.3, 0])
conv2 = conv_block(conv1, 128, 2, [0.4, 0])
conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0])
conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0])
conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0])
drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5)
fc1 = fluid.layers.fc(input=drop, size=512, act=None)
bn = fluid.layers.batch_norm(input=fc1, act='relu')
drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5)
fc2 = fluid.layers.fc(input=drop2, size=512, act=None)
predict = fluid.layers.fc(input=fc2, size=10, act='softmax')
return predict
def conv_bn_layer(input,
ch_out,
filter_size,
stride,
padding,
act='relu',
bias_attr=False):
tmp = fluid.layers.conv2d(
input=input,
filter_size=filter_size,
num_filters=ch_out,
stride=stride,
padding=padding,
act=None,
bias_attr=bias_attr)
return fluid.layers.batch_norm(input=tmp, act=act)
def shortcut(input, ch_in, ch_out, stride):
if ch_in != ch_out:
return conv_bn_layer(input, ch_out, 1, stride, 0, None)
else:
return input
def basicblock(input, ch_in, ch_out, stride):
tmp = conv_bn_layer(input, ch_out, 3, stride, 1)
tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None, bias_attr=True)
short = shortcut(input, ch_in, ch_out, stride)
return fluid.layers.elementwise_add(x=tmp, y=short, act='relu')
def layer_warp(block_func, input, ch_in, ch_out, count, stride):
tmp = block_func(input, ch_in, ch_out, stride)
for i in range(1, count):
tmp = block_func(tmp, ch_out, ch_out, 1)
return tmp
def resnet_cifar10(ipt, depth=32):
# depth should be one of 20, 32, 44, 56, 110, 1202
assert (depth - 2) % 6 == 0
n = (depth - 2) / 6
nStages = {16, 64, 128}
conv1 = conv_bn_layer(ipt, ch_out=16, filter_size=3, stride=1, padding=1)
res1 = layer_warp(basicblock, conv1, 16, 16, n, 1)
res2 = layer_warp(basicblock, res1, 16, 32, n, 2)
res3 = layer_warp(basicblock, res2, 32, 64, n, 2)
pool = fluid.layers.pool2d(
input=res3, pool_size=8, pool_type='avg', pool_stride=1)
predict = fluid.layers.fc(input=pool, size=10, act='softmax')
return predict
def inference_program():
# The image is 32 * 32 with RGB representation.
data_shape = [3, 32, 32]
images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32')
predict = resnet_cifar10(images, 32)
# predict = vgg_bn_drop(images) # un-comment to use vgg net
return predict
def train_program():
predict = inference_program()
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(cost)
accuracy = fluid.layers.accuracy(input=predict, label=label)
return [avg_cost, accuracy]
def optimizer_program():
return fluid.optimizer.Adam(learning_rate=0.001)
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer(
train_func=train_program,
optimizer_func=optimizer_program,
place=place)
# Each batch will yield 128 images
BATCH_SIZE = 128
# Reader for training
train_reader = paddle.batch(
paddle.reader.shuffle(paddle.dataset.cifar.train10(), buf_size=50000),
batch_size=BATCH_SIZE)
# Reader for testing. A separated data set for testing.
test_reader = paddle.batch(
paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE)
params_dirname = "image_classification_resnet.inference.model"
from paddle.v2.plot import Ploter
train_title = "Train cost"
test_title = "Test cost"
cost_ploter = Ploter(train_title, test_title)
step = 0
def event_handler_plot(event):
global step
if isinstance(event, fluid.EndStepEvent):
if step % 1 == 0:
cost_ploter.append(train_title, step, event.metrics[0])
cost_ploter.plot()
step += 1
if isinstance(event, fluid.EndEpochEvent):
avg_cost, accuracy = trainer.test(
reader=test_reader,
feed_order=['pixel', 'label'])
cost_ploter.append(test_title, step, avg_cost)
# save parameters
if params_dirname is not None:
trainer.save_params(params_dirname)
params_dirname = "image_classification_resnet.inference.model"
# event handler to track training and testing process
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if event.step % 100 == 0:
print("\nPass %d, Batch %d, Cost %f, Acc %f" %
(event.step, event.epoch, event.metrics[0],
event.metrics[1]))
else:
sys.stdout.write('.')
sys.stdout.flush()
if isinstance(event, fluid.EndEpochEvent):
# Test against with the test dataset to get accuracy.
avg_cost, accuracy = trainer.test(
reader=test_reader, feed_order=['pixel', 'label'])
print('\nTest with Pass {0}, Loss {1:2.2}, Acc {2:2.2}'.format(event.epoch, avg_cost, accuracy))
# save parameters
if params_dirname is not None:
trainer.save_params(params_dirname)
trainer.train(
reader=train_reader,
num_epochs=2,
event_handler=event_handler,
feed_order=['pixel', 'label'])
But the linear regression code in the official tutorial worked well, so I think it's not an installation problem.