Traceback (most recent call last):
File "/home/aistudio/work/resnet50/train_fast.py", line 66, in
opt.minimize(loss_train)
File "</opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/decorator.py:decorator-gen-36>", line 2, in minimize
File "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/wrapped_decorator.py", line 25, inimpl
return wrapped_func(args,kwargs)
File "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/base.py", line 78, inimpl*
return func(*args,**kwargs)
File "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/optimizer.py", line 685, in minimize
loss, startup_program=startup_program, params_grads=params_grads)
File "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/optimizer.py", line 615, in apply_optimize
self.regularization)
File "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/regularizer.py", line 80, in append_regularization_ops
outputs={"Out": new_grad})
File "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/framework.py", line 2479, in append_op
kwargs.get("stop_gradient", False))
File "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/tracer.py", line 47, in trace_op
not stop_gradient)
C++ Call Stacks (More useful to developers):
0 std::string paddle::platform::GetTraceBackString<std::string const&>(std::string const&, char const*, int)
1 paddle::platform::EnforceNotMet::EnforceNotMet(std::string const&, char const*, int)
2 paddle::framework::Tensor::check_memory_size() const
3 paddle::framework::EigenVector<float, 1, long>::Flatten(paddle::framework::Tensor&)
4 void paddle::operators::SumToLoDTensor(paddle::framework::ExecutionContext const&)
5 std::Function_handler<void (paddle::framework::ExecutionContext const&), paddle::framework::OpKernelRegistrarFunctor<paddle::platform::CUDAPlace, false, 0ul, paddle::operators::SumKernel<paddle::platform::CUDADeviceContext, float>, paddle::operators::SumKernel<paddle::platform::CUDADeviceContext, double>, paddle::operators::SumKernel<paddle::platform::CUDADeviceContext, int>, paddle::operators::SumKernel<paddle::platform::CUDADeviceContext, long>, paddle::operators::SumKernel<paddle::platform::CUDADeviceContext, paddle::platform::float16> >::operator()(char const, char const*, int) const::{lambda(paddle::framework::ExecutionContext const&) #1 }>::*M_invoke(std::Any_data const&, paddle::framework::ExecutionContext const&)
6 paddle::imperative::PreparedOp::Run()
7 paddle::imperative::OpBase::Run(std::map<std::string, std::vector<std::shared_ptrpaddle::imperative::VarBase, std::allocator<std::shared_ptrpaddle::imperative::VarBase > >, std::lessstd::string, std::allocator<std::pair<std::string const, std::vector<std::shared_ptrpaddle::imperative::VarBase, std::allocator<std::shared_ptrpaddle::imperative::VarBase > > > > > const&, std::map<std::string, std::vector<std::shared_ptrpaddle::imperative::VarBase, std::allocator<std::shared_ptrpaddle::imperative::VarBase > >, std::lessstd::string, std::allocator<std::pair<std::string const, std::vector<std::shared_ptrpaddle::imperative::VarBase, std::allocator<std::shared_ptrpaddle::imperative::VarBase > > > > > const&)
8 paddle::imperative::Tracer::TraceOp(std::string const&, std::map<std::string, std::vector<std::shared_ptrpaddle::imperative::VarBase, std::allocator<std::shared_ptrpaddle::imperative::VarBase > >, std::lessstd::string, std::allocator<std::pair<std::string const, std::vector<std::shared_ptrpaddle::imperative::VarBase, std::allocator<std::shared_ptrpaddle::imperative::VarBase > > > > > const&, std::map<std::string, std::vector<std::shared_ptrpaddle::imperative::VarBase, std::allocator<std::shared_ptrpaddle::imperative::VarBase > >, std::lessstd::string, std::allocator<std::pair<std::string const, std::vector<std::shared_ptrpaddle::imperative::VarBase, std::allocator<std::shared_ptrpaddle::imperative::VarBase > > > > > const&, std::unordered_map<std::string, boost::variant<boost::blank, int, float, std::string, std::vector<int, std::allocator >, std::vector<float, std::allocator >, std::vector<std::string, std::allocatorstd::string >, bool, std::vector<bool, std::allocator >, paddle::framework::BlockDesc, long, std::vector<paddle::framework::BlockDesc*, std::allocatorpaddle::framework::BlockDesc* >, std::vector<long, std::allocator >, boost::detail::variant::void*, boost::detail::variant::void*, boost::detail::variant::void*, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_>, std::hashstd::string, std::equal_tostd::string, std::allocator<std::pair<std::string const, boost::variant<boost::blank, int, float, std::string, std::vector<int, std::allocator >, std::vector<float, std::allocator >, std::vector<std::string, std::allocatorstd::string >, bool, std::vector<bool, std::allocator >, paddle::framework::BlockDesc*, long, std::vector<paddle::framework::BlockDesc*, std::allocatorpaddle::framework::BlockDesc* >, std::vector<long, std::allocator >, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_> > > >, paddle::platform::Place const&, bool)
5条答案
按热度按时间hof1towb1#
看上去是动态图trace报错, 请 @phlrain 看下?
yduiuuwa2#
@phlrain 动态图运行报错
knsnq2tg3#
能方便提供下使用的paddle的版本,和能够复现的代码吗
hfwmuf9z4#
@phlrain https://aistudio.baidu.com/aistudio/projectdetail/268453
9q78igpj5#
@phlrain 找到原因了吗?