Paddle prediction(c++ version) occur error with message "Tensor holds no memory. Call Tensor::mutable_data firstc"

vs3odd8k  于 2021-12-07  发布在  Java
关注(0)|答案(3)|浏览(352)

System information

ubuntu18.04 ,linux ,nvidia Tesla V100-PCIE,
-PaddlePaddle version (eg.1.1)or CommitID
1.8
-GPU: including CUDA/CUDNN version
cuda 10.0 , cudnn 7
-OS Platform (eg.Mac OS 10.14)
ubuntu
-Python version
python 3.8.3
-API information
train ocr attention with gpu with paddle1.8, then convert into c++ version,when run predict ,occur errors:

I1223 08:05:24.510879 26118 analysis_predictor.cc:496] ======= optimize end =======
I1223 08:05:24.510918 26118 naive_executor.cc:95] --- skip [feed], feed -> init_scores
I1223 08:05:24.510921 26118 naive_executor.cc:95] --- skip [feed], feed -> init_ids
I1223 08:05:24.510923 26118 naive_executor.cc:95] --- skip [feed], feed -> pixel
I1223 08:05:24.511201 26118 naive_executor.cc:95] --- skip [save_infer_model/scale_0.tmp_0], fetch -> fetch
W1223 08:05:24.511250 26118 device_context.cc:252] Please NOTE: device: 0, CUDA Capability: 70, Driver API Version: 10.2, Runtime API Version: 10.0
W1223 08:05:24.514454 26118 device_context.cc:260] device: 0, cuDNN Version: 7.6.
terminate called after throwing an instance of 'paddle::platform::EnforceNotMet'
what():

C++ Call Stacks (More useful to developers):

0 std::string paddle::platform::GetTraceBackString<std::string const&>(std::string const&, char const*, int)
1 paddle::framework::Tensor::check_memory_size() const
2 paddle::framework::TensorCopy(paddle::framework::Tensor const&, paddle::platform::Place const&, paddle::platform::DeviceContext const&, paddle::framework::Tensor*)
3 paddle::operators::ReadFromArrayOp::RunImpl(paddle::framework::Scope const&, paddle::platform::Place const&) const
4 paddle::framework::OperatorBase::Run(paddle::framework::Scope const&, paddle::platform::Place const&)
5 paddle::framework::Executor::RunPartialPreparedContext(paddle::framework::ExecutorPrepareContext*, paddle::framework::Scope*, long, long, bool, bool, bool)
6 paddle::framework::Executor::RunPreparedContext(paddle::framework::ExecutorPrepareContext*, paddle::framework::Scope*, bool, bool, bool)
7 paddle::operators::WhileOp::RunImpl(paddle::framework::Scope const&, paddle::platform::Place const&) const
8 paddle::framework::OperatorBase::Run(paddle::framework::Scope const&, paddle::platform::Place const&)
9 paddle::framework::NaiveExecutor::Run()
10 paddle::AnalysisPredictor::ZeroCopyRun()

Python Call Stacks (More useful to users):

File "/home/mk/anaconda3/envs/paddle_env/lib/python3.7/site-packages/paddle/fluid/framework.py", line 2610, in append_op
attrs=kwargs.get("attrs", None))
File "/home/mk/anaconda3/envs/paddle_env/lib/python3.7/site-packages/paddle/fluid/layer_helper.py", line 43, in append_op
return self.main_program.current_block().append_op(*args,**kwargs)
File "/home/mk/anaconda3/envs/paddle_env/lib/python3.7/site-packages/paddle/fluid/layers/control_flow.py", line 1794, in array_read
outputs={'Out': [out]})
File "/ssd4/moshuojie/code/ocr_recognition/attention_model.py", line 301, in attention_infer
pre_ids = fluid.layers.array_read(array=ids_array, i=counter)
File "/ssd4/moshuojie/code/ocr_recognition/infer.py", line 67, in inference
ids = infer(images, num_classes, use_cudnn=True if args.use_gpu else False)
File "/ssd4/moshuojie/code/ocr_recognition/infer.py", line 202, in main
inference(args)
File "/ssd4/moshuojie/code/ocr_recognition/infer.py", line 206, in
main()
File "/home/mk/anaconda3/envs/paddle_env/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/mk/anaconda3/envs/paddle_env/lib/python3.7/runpy.py", line 96, in _run_module_code
mod_name, mod_spec, pkg_name, script_name)
File "/home/mk/anaconda3/envs/paddle_env/lib/python3.7/runpy.py", line 263, in run_path
pkg_name=pkg_name, script_name=fname)
File "/home/mk/.vscode-server/extensions/ms-python.python-2020.11.371526539/pythonFiles/lib/python/debugpy/../debugpy/server/cli.py", line 267, in run_file
runpy.run_path(options.target, run_name=compat.force_str("main"))
File "/home/mk/.vscode-server/extensions/ms-python.python-2020.11.371526539/pythonFiles/lib/python/debugpy/../debugpy/server/cli.py", line 430, in main
run()
File "/home/mk/.vscode-server/extensions/ms-python.python-2020.11.371526539/pythonFiles/lib/python/debugpy/main.py", line 45, in
cli.main()
File "/home/mk/anaconda3/envs/paddle_env/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/mk/anaconda3/envs/paddle_env/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"main", mod_spec)

Error Message Summary:

**Error: Tensor holds no memory. Call Tensor::mutable_data first.

[Hint: holder_ should not be null.] at (/home/george/paddle/paddle/fluid/framework/tensor.cc:23)
[operator < read_from_array > error]
Aborted (core dumped)**

hrysbysz

hrysbysz1#

您好,我们已经收到了您的问题,会安排技术人员尽快解答您的问题,请耐心等待。请您再次检查是否提供了清晰的问题描述、复现代码、环境&版本、报错信息等。同时,您也可以通过查看官网API文档常见问题历史IssueAI社区来寻求解答。祝您生活愉快~

Hi! We've received your issue and please be patient to get responded. We will arrange technicians to answer your questions as soon as possible. Please make sure that you have posted enough message to demo your request. You may also check out the APIFAQGithub Issue and AI community to get the answer.Have a nice day!

mcvgt66p

mcvgt66p2#

麻烦帖一下完整的infer.py代码

hrirmatl

hrirmatl3#

@WenmuZhou infer.py代码:

def inference(args):
"""OCR inference"""
if args.model == "crnn_ctc":
infer = ctc_infer
get_feeder_data = get_ctc_feeder_for_infer
else:
infer = attention_infer
get_feeder_data = get_attention_feeder_for_infer
eos = 1
sos = 0
num_classes = data_reader.num_classes()
data_shape = data_reader.data_shape()

define network

if len(list(data_shape)) == 3:
data_shape = [None] + list(data_shape)
images = fluid.data(name='pixel', shape=data_shape, dtype='float32')
ids = infer(images, num_classes, use_cudnn=True if args.use_gpu else False)

data reader

infer_reader = data_reader.inference(
batch_size=args.batch_size,
infer_images_dir=args.input_images_dir,
infer_list_file=args.input_images_list,
cycle=True if args.iterations > 0 else False,
model=args.model)

prepare environment

place = fluid.CPUPlace()
if args.use_gpu:
place = fluid.CUDAPlace(0)

exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())

# load dictionary

dict_map = None
if args.dict is not None and os.path.isfile(args.dict):
    dict_map = {}
    with open(args.dict) as dict_file:
        for i, word in enumerate(dict_file):
            dict_map[i] = word.strip()
    print("Loaded dict from %s" % args.dict)

# load init model

model_dir = args.model_path
fluid.load(
    program=fluid.default_main_program(),
    model_path=model_dir,
    executor=exe,
    var_list=fluid.io.get_program_parameter(fluid.default_main_program()))
print("Init model from: %s." % args.model_path)

fluid.io.save_inference_model(dirname="./mobilenet/", feeded_var_names=['pixel'],
target_vars=[ids], executor=exe,model_filename='model',params_filename='params')

batch_times = []
iters = 0
for data in infer_reader():
    feed_dict = get_feeder_data(data, place)
    if args.iterations > 0 and iters == args.iterations + args.skip_batch_num:
        break
    if iters < args.skip_batch_num:
        print("Warm-up itaration")
    if iters == args.skip_batch_num:
        profiler.reset_profiler()

    start = time.time()
    result = exe.run(fluid.default_main_program(),
                     feed=feed_dict,
                     fetch_list=[ids],
                     return_numpy=False)
    indexes = prune(np.array(result[0]).flatten(), 0, 1)
    batch_time = time.time() - start
    fps = args.batch_size / batch_time
    batch_times.append(batch_time)
    if dict_map is not None:
        print("Iteration %d, latency: %.5f s, fps: %f, result: %s" % (
            iters,
            batch_time,
            fps,
            [dict_map[index] for index in indexes], ))
    else:
        print("Iteration %d, latency: %.5f s, fps: %f, result: %s" % (
            iters,
            batch_time,
            fps,
            indexes, ))

    iters += 1

latencies = batch_times[args.skip_batch_num:]
latency_avg = np.average(latencies)
latency_pc99 = np.percentile(latencies, 99)
fpses = np.divide(args.batch_size, latencies)
fps_avg = np.average(fpses)
fps_pc99 = np.percentile(fpses, 1)

# Benchmark output

print('\nTotal examples (incl. warm-up): %d' % (iters * args.batch_size))
print('average latency: %.5f s, 99pc latency: %.5f s' % (latency_avg,
                                                         latency_pc99))
print('average fps: %.5f, fps for 99pc latency: %.5f' % (fps_avg, fps_pc99))

def prune(words, sos, eos):
"""Remove unused tokens in prediction result."""
start_index = 0
end_index = len(words)
if sos in words:
start_index = np.where(words == sos)[0][0] + 1
if eos in words:
end_index = np.where(words == eos)[0][0]
return words[start_index:end_index]

def main():
args = parser.parse_args()
print_arguments(args)
check_gpu(args.use_gpu)
if args.profile:
if args.use_gpu:
with profiler.cuda_profiler("cuda_profiler.txt", 'csv') as nvprof:
inference(args)
else:
with profiler.profiler("CPU", sorted_key='total') as cpuprof:
inference(args)
else:
inference(args)

相关问题