Paddle Invoke operator elementwise_sub_grad error

7uzetpgm  于 2021-12-07  发布在  Java
关注(0)|答案(2)|浏览(416)
  • 版本、环境信息:

   1)PaddlePaddle版本:1.5.1
   2)CPU/GPU:
   3)系统环境:mac 10.13.6
   4)Python3.6
   
运行如下代码报错

import paddle.fluid as fluid
import numpy as np
def test(Loss,shape,pred_type,target_type):
    main_program = fluid.Program()
    start_program = fluid.Program()
    pred_np = np.ones(shape, pred_type)
    target_np = np.ones(shape, target_type)
    with fluid.program_guard(main_program, start_program):
        optimizer = fluid.optimizer.SGD(0.1)
        pred = fluid.layers.data('pred', shape, append_batch_size=False,dtype=pred_type)
        target = fluid.layers.data('target', shape, append_batch_size=False, dtype=target_type)
        loss = Loss(pred, target)
        loss=fluid.layers.reduce_sum(loss)
        optimizer.minimize(loss)
    exe = fluid.Executor(fluid.CPUPlace())
    exe.run(start_program)
    out = exe.run(main_program, feed={'pred': pred_np, 'target': target_np}, fetch_list=[loss.name])
    name=Loss.name#Loss.__class__.__name__
    print('name:{}|loss:{}'.format(name,out))
def bounded_iou_loss(pred, target, beta=0.2, eps=1e-3):
    pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5
    pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5
    pred_w = pred[:, 2] - pred[:, 0] + 1
    pred_h = pred[:, 3] - pred[:, 1] + 1

    target_ctrx = (target[:, 0] + target[:, 2]) * 0.5
    target_ctry = (target[:, 1] + target[:, 3]) * 0.5
    target_w = target[:, 2] - target[:, 0] + 1
    target_h = target[:, 3] - target[:, 1] + 1

    dx = target_ctrx - pred_ctrx
    dy = target_ctry - pred_ctry
    #IOU(x)=max(0,{w_t-2x|delta_x|}/{w_t+2x|delta_x|}
    loss_dx = 1 - fluid.layers.elementwise_max(
        (target_w - 2 * fluid.layers.abs(dx)) /
        (target_w + 2 * fluid.layers.abs(dx) + eps), fluid.layers.zeros_like(dx))
    loss_dy = 1 - fluid.layers.elementwise_max(
        (target_h - 2 * fluid.layers.abs(dy)) /
        (target_h + 2 * fluid.layers.abs(dy) + eps), fluid.layers.zeros_like(dy))
    #IOU(w)=min(w/w_t,w_t/w)
    loss_dw = 1 - fluid.layers.elementwise_min(target_w / (pred_w + eps), pred_w /
                            (target_w + eps))
    loss_dh = 1 - fluid.layers.elementwise_min(target_h / (pred_h + eps), pred_h /
                            (target_h + eps))
    loss_comb = fluid.layers.stack([loss_dx, loss_dy, loss_dw, loss_dh],
                            axis=-1)#.view(loss_dx.size(0), -1)
    loss_comb=fluid.layers.reshape(loss_comb,[-1,1],inplace=True)#loss_dx.shap[0]

    betas=fluid.layers.fill_constant_batch_size_like(loss_comb,loss_comb.shape,dtype='float32',value=beta)
    cond=fluid.layers.less_than(loss_comb,betas)
    ie=fluid.layers.IfElse(cond)
    with ie.true_block():
        true_loss=ie.input(loss_comb)
        out=0.5*true_loss*true_loss/beta
        ie.output(out)
    with ie.false_block():
        false_loss=ie.input(loss_comb)
        out=false_loss-0.5*beta
        ie.output(out)
    loss=ie()#List[]
    return loss[0]
if __name__ == '__main__':
    test(bounded_iou_loss,[6,4],'float32','float32')

报错信息如下

File "/Users/baidu/PycharmProjects/Region Proposal by Guided Anchoring paddlepaddle/Models/Losses/test.py", line 77, in <module>
    test(bounded_iou_loss,[6,4],'float32','float32')
  File "/Users/baidu/PycharmProjects/Region Proposal by Guided Anchoring paddlepaddle/Models/Losses/test.py", line 20, in test
    out = exe.run(main_program, feed={'pred': pred_np, 'target': target_np}, fetch_list=[loss.name])
  File "/opt/Anaconda/anaconda3/envs/paddle/lib/python3.6/site-packages/paddle/fluid/executor.py", line 651, in run
    use_program_cache=use_program_cache)
  File "/opt/Anaconda/anaconda3/envs/paddle/lib/python3.6/site-packages/paddle/fluid/executor.py", line 749, in _run
    exe.run(program.desc, scope, 0, True, True, fetch_var_name)
paddle.fluid.core_avx.EnforceNotMet: Invoke operator elementwise_sub_grad error.
Python Callstacks: 
  File "/opt/Anaconda/anaconda3/envs/paddle/lib/python3.6/site-packages/paddle/fluid/framework.py", line 1771, in append_op
    attrs=kwargs.get("attrs", None))
  File "/opt/Anaconda/anaconda3/envs/paddle/lib/python3.6/site-packages/paddle/fluid/layers/math_op_patch.py", line 149, in __impl__
    attrs={'axis': axis})
  File "/Users/baidu/PycharmProjects/Region Proposal by Guided Anchoring paddlepaddle/Models/Losses/test.py", line 69, in bounded_iou_loss
    out=false_loss-0.5*beta
  File "/Users/baidu/PycharmProjects/Region Proposal by Guided Anchoring paddlepaddle/Models/Losses/test.py", line 15, in test
    loss = Loss(pred, target)
  File "/Users/baidu/PycharmProjects/Region Proposal by Guided Anchoring paddlepaddle/Models/Losses/test.py", line 77, in <module>
    test(bounded_iou_loss,[6,4],'float32','float32')
C++ Callstacks: 
holder_ should not be null
Tensor not initialized yet when Tensor::type() is called. at [/home/teamcity/work/ef54dc8a5b211854/paddle/fluid/framework/tensor.h:139]
PaddlePaddle Call Stacks: 
0          0x127c8c3e4p void paddle::platform::EnforceNotMet::Init<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > >(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, char const*, int) + 628
1          0x127c8c110p paddle::platform::EnforceNotMet::EnforceNotMet(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, char const*, int) + 80
2          0x127c8330cp paddle::framework::Tensor::type() const + 108
3          0x128a7e7aep paddle::operators::ElementwiseOpGrad::GetExpectedKernelType(paddle::framework::ExecutionContext const&) const + 142
4          0x1290440b5p paddle::framework::OperatorWithKernel::ChooseKernel(paddle::framework::RuntimeContext const&, paddle::framework::Scope const&, boost::variant<paddle::platform::CUDAPlace, paddle::platform::CPUPlace, paddle::platform::CUDAPinnedPlace, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_> const&) const + 229
5          0x129043bddp paddle::framework::OperatorWithKernel::RunImpl(paddle::framework::Scope const&, boost::variant<paddle::platform::CUDAPlace, paddle::platform::CPUPlace, paddle::platform::CUDAPinnedPlace, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_> const&, paddle::framework::RuntimeContext*) const + 109
6          0x129043ac8p paddle::framework::OperatorWithKernel::RunImpl(paddle::framework::Scope const&, boost::variant<paddle::platform::CUDAPlace, paddle::platform::CPUPlace, paddle::platform::CUDAPinnedPlace, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_> const&) const + 488
7          0x12903fdd5p paddle::framework::OperatorBase::Run(paddle::framework::Scope const&, boost::variant<paddle::platform::CUDAPlace, paddle::platform::CPUPlace, paddle::platform::CUDAPinnedPlace, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_> const&) + 357
8          0x127dfb6efp paddle::framework::Executor::RunPreparedContext(paddle::framework::ExecutorPrepareContext*, paddle::framework::Scope*, bool, bool, bool) + 335
9          0x127dfb18cp paddle::framework::Executor::Run(paddle::framework::ProgramDesc const&, paddle::framework::Scope*, int, bool, bool, std::__1::vector<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::allocator<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > > > const&, bool) + 172
10         0x127cee1edp void pybind11::cpp_function::initialize<paddle::pybind::pybind11_init_core_avx(pybind11::module&)::$_84, void, paddle::framework::Executor&, paddle::framework::ProgramDesc const&, paddle::framework::Scope*, int, bool, bool, std::__1::vector<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::allocator<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > > > const&, pybind11::name, pybind11::is_method, pybind11::sibling>(paddle::pybind::pybind11_init_core_avx(pybind11::module&)::$_84&&, void (*)(paddle::framework::Executor&, paddle::framework::ProgramDesc const&, paddle::framework::Scope*, int, bool, bool, std::__1::vector<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::allocator<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > > > const&), pybind11::name const&, pybind11::is_method const&, pybind11::sibling const&)::'lambda'(pybind11::detail::function_call&)::__invoke(pybind11::detail::function_call&) + 301
11         0x127c6ba08p pybind11::cpp_function::dispatcher(_object*, _object*, _object*) + 3400
12         0x10683700ap _PyCFunction_FastCallDict + 362
13         0x1067be1d1p _PyObject_FastCallKeywords + 385
14         0x10690c488p call_function + 392
15         0x106909ef2p _PyEval_EvalFrameDefault + 46818
16         0x1068fd659p _PyEval_EvalCodeWithName + 425
17         0x10690c7eap fast_function + 362
18         0x10690c3ecp call_function + 236
19         0x106909fa2p _PyEval_EvalFrameDefault + 46994
20         0x1068fd659p _PyEval_EvalCodeWithName + 425
21         0x10690c7eap fast_function + 362
22         0x10690c3ecp call_function + 236
23         0x106909fa2p _PyEval_EvalFrameDefault + 46994
24         0x10690c73cp fast_function + 188
25         0x10690c3ecp call_function + 236
26         0x106909ef2p _PyEval_EvalFrameDefault + 46818
27         0x1068fd659p _PyEval_EvalCodeWithName + 425
28         0x10695610cp PyRun_FileExFlags + 252
29         0x1069555e4p PyRun_SimpleFileExFlags + 372
30         0x10697c2a6p Py_Main + 3734
31         0x1067ae009p main + 313
32      0x7fff53d30015p start + 1
7uhlpewt

7uhlpewt1#

问题出错是你的代码里IfElse condition ,全部数据判断进入了true_block,没有任何数据进入false_block,所以反向传播的时候梯度会报错。IfElse 这个问我们会尽快解决。为了不耽误您的使用,你可以尝试将传入的数据改为随机值,让true_block 和false_block均有数据进入, 比如将:

pred_np = np.ones(shape, pred_type)
   target_np = np.ones(shape, target_type)

换为:

pred_np = np.random.randint(0,5,shape).astype(pred_type)
    target_np = np.random.randint(0,5,shape).astype(target_type)

并把name 去掉(Loss 没有name 函数)

name=Loss.name#Loss.__class__.__name__

即可成功运行。

h79rfbju

h79rfbju2#

问题是如果在bp过程中学到了全是true或者全是false该怎么办

相关问题