使用动态图在自己服务器上出现[Hint: CUDNN_STATUS_BAD_PARAM] at (/paddle/paddle/fluid/operators/batch_norm_op.cu:287)
Created by: kali20gakki
如题, 错误信息
(for_paddle) [zhangwei3@g0111 YOLOV3_Face_darknet]$ python train.py
2020-03-17 17:21:22,014-INFO: EPOCH: 1000, batch_size: 5
W0317 17:21:23.146333 121560 device_context.cc:237] Please NOTE: device: 0, CUDA Capability: 70, Driver API Version: 10.1, Runtime API Version: 10.0
W0317 17:21:23.153177 121560 device_context.cc:245] device: 0, cuDNN Version: 7.4.
W0317 17:21:23.153228 121560 device_context.cc:271] WARNING: device: 0. The installed Paddle is compiled with CUDNN 7.6, but CUDNN version in your machine is 7.4, which may cause serious incompatible bug. Please recompile or reinstall Paddle with compatible CUDNN version.
2020-03-17 17:21:24,703-INFO: set data loader...
2020-03-17 17:21:37,620-INFO: set data loader sucessfully!
2020-03-17 17:21:37,620-INFO: start training...
Traceback (most recent call last):
File "train.py", line 188, in <module>
train()
File "train.py", line 92, in train
outputs = model(img)
File "/home/zhangwei3/.conda/envs/for_paddle/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py", line 304, in __call__
outputs = self.forward(*inputs, **kwargs)
File "/dat01/zhangwei3/YOLOV3_Face_darknet/models/yolov3.py", line 399, in forward
blocks = self.block(inputs) # blocks: C0 C1 C2
File "/home/zhangwei3/.conda/envs/for_paddle/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py", line 304, in __call__
outputs = self.forward(*inputs, **kwargs)
File "/dat01/zhangwei3/YOLOV3_Face_darknet/models/yolov3.py", line 227, in forward
out = self.conv0(inputs)
File "/home/zhangwei3/.conda/envs/for_paddle/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py", line 304, in __call__
outputs = self.forward(*inputs, **kwargs)
File "/dat01/zhangwei3/YOLOV3_Face_darknet/models/yolov3.py", line 73, in forward
out = self.batch_norm(out)
File "/home/zhangwei3/.conda/envs/for_paddle/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py", line 304, in __call__
outputs = self.forward(*inputs, **kwargs)
File "/home/zhangwei3/.conda/envs/for_paddle/lib/python3.7/site-packages/paddle/fluid/dygraph/nn.py", line 1176, in forward
outs = core.ops.batch_norm(inputs, attrs, outputs)
paddle.fluid.core_avx.EnforceNotMet:
--------------------------------------------
C++ Call Stacks (More useful to developers):
--------------------------------------------
0 std::string paddle::platform::GetTraceBackString<char const*>(char const*&&, char const*, int)
1 paddle::platform::EnforceNotMet::EnforceNotMet(std::__exception_ptr::exception_ptr, char const*, int)
2 paddle::operators::BatchNormKernel<paddle::platform::CUDADeviceContext, float>::Compute(paddle::framework::ExecutionContext const&) const
3 std::_Function_handler<void (paddle::framework::ExecutionContext const&), paddle::framework::OpKernelRegistrarFunctor<paddle::platform::CUDAPlace, false, 0ul, paddle::operators::BatchNormKernel<paddle::platform::CUDADeviceContext, float>, paddle::operators::BatchNormKernel<paddle::platform::CUDADeviceContext, double>, paddle::operators::BatchNormKernel<paddle::platform::CUDADeviceContext, paddle::platform::float16> >::operator()(char const*, char const*, int) const::{lambda(paddle::framework::ExecutionContext const&)#1}>::_M_invoke(std::_Any_data const&, paddle::framework::ExecutionContext const&)
4 paddle::imperative::PreparedOp::Run(std::map<std::string, std::vector<std::shared_ptr<paddle::imperative::VarBase>, std::allocator<std::shared_ptr<paddle::imperative::VarBase> > >, std::less<std::string>, std::allocator<std::pair<std::string const, std::vector<std::shared_ptr<paddle::imperative::VarBase>, std::allocator<std::shared_ptr<paddle::imperative::VarBase> > > > > > const*, std::map<std::string, std::vector<std::shared_ptr<paddle::imperative::VarBase>, std::allocator<std::shared_ptr<paddle::imperative::VarBase> > >, std::less<std::string>, std::allocator<std::pair<std::string const, std::vector<std::shared_ptr<paddle::imperative::VarBase>, std::allocator<std::shared_ptr<paddle::imperative::VarBase> > > > > > const*, std::unordered_map<std::string, boost::variant<boost::blank, int, float, std::string, std::vector<int, std::allocator<int> >, std::vector<float, std::allocator<float> >, std::vector<std::string, std::allocator<std::string> >, bool, std::vector<bool, std::allocator<bool> >, paddle::framework::BlockDesc*, long, std::vector<paddle::framework::BlockDesc*, std::allocator<paddle::framework::BlockDesc*> >, std::vector<long, std::allocator<long> >, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_>, std::hash<std::string>, std::equal_to<std::string>, std::allocator<std::pair<std::string const, boost::variant<boost::blank, int, float, std::string, std::vector<int, std::allocator<int> >, std::vector<float, std::allocator<float> >, std::vector<std::string, std::allocator<std::string> >, bool, std::vector<bool, std::allocator<bool> >, paddle::framework::BlockDesc*, long, std::vector<paddle::framework::BlockDesc*, std::allocator<paddle::framework::BlockDesc*> >, std::vector<long, std::allocator<long> >, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_> > > > const*)
5 paddle::imperative::OpBase::Run(std::map<std::string, std::vector<std::shared_ptr<paddle::imperative::VarBase>, std::allocator<std::shared_ptr<paddle::imperative::VarBase> > >, std::less<std::string>, std::allocator<std::pair<std::string const, std::vector<std::shared_ptr<paddle::imperative::VarBase>, std::allocator<std::shared_ptr<paddle::imperative::VarBase> > > > > > const&, std::map<std::string, std::vector<std::shared_ptr<paddle::imperative::VarBase>, std::allocator<std::shared_ptr<paddle::imperative::VarBase> > >, std::less<std::string>, std::allocator<std::pair<std::string const, std::vector<std::shared_ptr<paddle::imperative::VarBase>, std::allocator<std::shared_ptr<paddle::imperative::VarBase> > > > > > const&)
6 paddle::imperative::Tracer::TraceOp(std::string const&, std::map<std::string, std::vector<std::shared_ptr<paddle::imperative::VarBase>, std::allocator<std::shared_ptr<paddle::imperative::VarBase> > >, std::less<std::string>, std::allocator<std::pair<std::string const, std::vector<std::shared_ptr<paddle::imperative::VarBase>, std::allocator<std::shared_ptr<paddle::imperative::VarBase> > > > > > const&, std::map<std::string, std::vector<std::shared_ptr<paddle::imperative::VarBase>, std::allocator<std::shared_ptr<paddle::imperative::VarBase> > >, std::less<std::string>, std::allocator<std::pair<std::string const, std::vector<std::shared_ptr<paddle::imperative::VarBase>, std::allocator<std::shared_ptr<paddle::imperative::VarBase> > > > > > const&, std::unordered_map<std::string, boost::variant<boost::blank, int, float, std::string, std::vector<int, std::allocator<int> >, std::vector<float, std::allocator<float> >, std::vector<std::string, std::allocator<std::string> >, bool, std::vector<bool, std::allocator<bool> >, paddle::framework::BlockDesc*, long, std::vector<paddle::framework::BlockDesc*, std::allocator<paddle::framework::BlockDesc*> >, std::vector<long, std::allocator<long> >, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_>, std::hash<std::string>, std::equal_to<std::string>, std::allocator<std::pair<std::string const, boost::variant<boost::blank, int, float, std::string, std::vector<int, std::allocator<int> >, std::vector<float, std::allocator<float> >, std::vector<std::string, std::allocator<std::string> >, bool, std::vector<bool, std::allocator<bool> >, paddle::framework::BlockDesc*, long, std::vector<paddle::framework::BlockDesc*, std::allocator<paddle::framework::BlockDesc*> >, std::vector<long, std::allocator<long> >, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_> > > >)
----------------------
Error Message Summary:
----------------------
Error: An error occurred here. There is no accurate error hint for this error yet. We are continuously in the process of increasing hint for this kind of error check. It would be helpful if you could inform us of how this conversion went by opening a github issue. And we will resolve it with high priority.
- New issue link: https://github.com/PaddlePaddle/Paddle/issues/new
- Recommended issue content: all error stack information
[Hint: CUDNN_STATUS_BAD_PARAM] at (/paddle/paddle/fluid/operators/batch_norm_op.cu:287)