Paddle 在ubuntu16下利用编译好的paddle测试推理遇到了链接库的问题

syqv5f0l  于 2021-11-29  发布在  Java
关注(0)|答案(7)|浏览(257)

(1).##################################### 环境 ######################################
ubuntu16.04, python=3.6.10

(2).################################################################################
paddle-1.8
GIT COMMIT ID: 1e01335
WITH_MKL: ON
WITH_MKLDNN: ON
WITH_GPU: ON
CUDA version: 10.0
CUDNN version: v7.6
CXX compiler version: 4.8.2

GIT COMMIT ID: 5c2b925
WITH_MKL: ON
WITH_MKLDNN: ON
WITH_GPU: ON
CUDA version: 10.0
CUDNN version: v7.6
CXX compiler version: 4.8.2

paddle预编译模型:ubuntu14.04_cuda10.0_cudnn7_avx_mkl(两个模型都试了)
https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html

(3). ############################# model下载地址 #############################
http://paddle-inference-dist.cdn.bcebos.com/tensorrt_test/mobilenet.tar.gz

(4). ############################# CMakeLists.txt #############################
cmake_minimum_required (VERSION 3.8)

project ("Paddle")

project(cpp_inference_demo CXX C)
option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON)
option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." ON)
option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." ON)
option(USE_TENSORRT "Compile demo with TensorRT." OFF)

macro(safe_set_static_flag)
foreach(flag_var
CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)
if(${flag_var} MATCHES "/MD")
string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")
endif(${flag_var} MATCHES "/MD")
endforeach(flag_var)
endmacro()

set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -g")
set(CMAKE_STATIC_LIBRARY_PREFIX "")
message("flags" ${CMAKE_CXX_FLAGS})

set(PADDLE_LIB "/home/yuan/XSpace/Env-Test/Paddle/fluid_inference")
set(DEMO_NAME "mobilenet_test")
set(CUDA_LIB "/usr/local/cuda-10.0/lib64")
set(CUDNN_LIB "/usr/local/cuda-10.0/lib64")
set(CMAKE_SHARED_LIBRARY_SUFFIX ".so")

if(NOT DEFINED PADDLE_LIB)
message(FATAL_ERROR "please set PADDLE_LIB with -DPADDLE_LIB=/path/paddle/lib")
endif()
if(NOT DEFINED DEMO_NAME)
message(FATAL_ERROR "please set DEMO_NAME with -DDEMO_NAME=demo_name")
endif()

if (USE_TENSORRT AND WITH_GPU)
include_directories("${TENSORRT_ROOT}/include")
link_directories("${TENSORRT_ROOT}/lib")
endif()

include_directories(${PADDLE_LIB})
include_directories(${PADDLE_LIB}/third_party/install/protobuf/include)
include_directories(${PADDLE_LIB}/third_party/install/glog/include)
include_directories(${PADDLE_LIB}/third_party/install/gflags/include)
include_directories(${PADDLE_LIB}/third_party/install/xxhash/include)
include_directories(${PADDLE_LIB}/third_party/install/zlib/include)
include_directories(${PADDLE_LIB}/third_party/boost)
include_directories(${PADDLE_LIB}/third_party/eigen3)

add_executable(${DEMO_NAME} pp_test.cc)

if(WITH_MKL)
include_directories("${PADDLE_LIB}/third_party/install/mklml/include")
set(MATH_LIB ${PADDLE_LIB}/third_party/install/mklml/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX}
${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX})
set(MKLDNN_PATH "${PADDLE_LIB}/third_party/install/mkldnn")
if(EXISTS ${MKLDNN_PATH})
include_directories("${MKLDNN_PATH}/include")
set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0)
endif()
else()
set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()

Note: libpaddle_inference_api.so/a must put before libpaddle_fluid.so/a

if(WITH_STATIC_LIB)
set(DEPS
${PADDLE_LIB}/paddle/lib/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX})
else()
set(DEPS
${PADDLE_LIB}/paddle/lib/libpaddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX})
endif()

set(EXTERNAL_LIB "-lrt -ldl -lpthread")
set(DEPS ${DEPS}
${MATH_LIB} ${MKLDNN_LIB}
glog gflags protobuf z xxhash
${EXTERNAL_LIB})

if(WITH_GPU)

if (USE_TENSORRT)

set(DEPS ${DEPS}

${TENSORRT_ROOT}/lib/libnvinfer${CMAKE_SHARED_LIBRARY_SUFFIX})

set(DEPS ${DEPS}

${TENSORRT_ROOT}/lib/libnvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX})

endif()

set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX} )
set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX} )
set(DEPS ${DEPS} ${CUDA_LIB}/libcublas${CMAKE_SHARED_LIBRARY_SUFFIX} )
set(DEPS ${DEPS} ${CUDNN_LIB}/libcudnn${CMAKE_SHARED_LIBRARY_SUFFIX} )
endif()

target_link_libraries(${DEMO_NAME} ${DEPS})

(5). ############################# 测试代码 #############################
#include <gflags/gflags.h>
#include <glog/logging.h>
#include
#include
#include
#include
#include "paddle/include/paddle_inference_api.h"

namespace paddle {
void CreateConfig(AnalysisConfig* config, const std::string& model_dirname) {
// 模型从磁盘进行加载
config->SetModel(model_dirname + "/model",
model_dirname + "/params");
// config->SetModel(model_dirname);
// 如果模型从内存中加载,可以使用SetModelBuffer接口
// config->SetModelBuffer(prog_buffer, prog_size, params_buffer, params_size);
config->EnableUseGpu(100 /设定GPU初始显存池为MB/, 0 /设定GPU ID为0/); //开启GPU预测

/* for cpu
config->DisableGpu();
config->EnableMKLDNN(); // 开启MKLDNN加速
config->SetCpuMathLibraryNumThreads(10);
*/

// 使用ZeroCopyTensor,此处必须设置为false
config->SwitchUseFeedFetchOps(false);
// 若输入为多个,此处必须设置为true
config->SwitchSpecifyInputNames(true);
config->SwitchIrDebug(true); // 可视化调试选项,若开启,则会在每个图优化过程后生成dot文件
// config->SwitchIrOptim(false); // 默认为true。如果设置为false,关闭所有优化
// config->EnableMemoryOptim(); // 开启内存/显存复用
}

void RunAnalysis(int batch_size, std::string model_dirname) {
// 1. 创建AnalysisConfig
AnalysisConfig config;
CreateConfig(&config, model_dirname);

// 2. 根据config 创建predictor,并准备输入数据,此处以全0数据为例
auto predictor = CreatePaddlePredictor(config);
int channels = 3;
int height = 224;
int width = 224;
float input[batch_size * channels * height * width] = {0};

// 3. 创建输入
// 使用了ZeroCopy接口,可以避免预测中多余的CPU copy,提升预测性能
auto input_names = predictor->GetInputNames();
auto input_t = predictor->GetInputTensor(input_names[0]);
input_t->Reshape({batch_size, channels, height, width});
input_t->copy_from_cpu(input);

// 4. 运行预测引擎
CHECK(predictor->ZeroCopyRun());

// 5. 获取输出
std::vector out_data;
auto output_names = predictor->GetOutputNames();
auto output_t = predictor->GetOutputTensor(output_names[0]);
std::vector output_shape = output_t->shape();
int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1, std::multiplies());

out_data.resize(out_num);
output_t->copy_to_cpu(out_data.data());
}
} // namespace paddle

int main() {
paddle::RunAnalysis(1, "./mobilenet");
return 0;
}

(6). ############################# problem #############################
yuan@Y:~/XSpace/Env-Test/Paddle/build$ cmake .. && make
flags -std=c++11 -g
-- Configuring done
-- Generating done
-- Build files have been written to: /home/yuan/XSpace/Env-Test/Paddle/build
[ 50%] Linking CXX executable mobilenet_test
CMakeFiles/mobilenet_test.dir/pp_test.cc.o:在函数‘paddle::CreateConfig(paddle::AnalysisConfig*, std::__cxx11::basic_string<char, std::char_traits, std::allocator > const&)’中:
/home/yuan/XSpace/Env-Test/Paddle/pp_test.cc:14:对‘paddle::AnalysisConfig::SetModel(std::__cxx11::basic_string<char, std::char_traits, std::allocator > const&, std::__cxx11::basic_string<char, std::char_traits, std::allocator > const&)’未定义的引用
collect2: error: ld returned 1 exit status
CMakeFiles/mobilenet_test.dir/build.make:91: recipe for target 'mobilenet_test' failed
make[2]:***[mobilenet_test] Error 1
CMakeFiles/Makefile2:72: recipe for target 'CMakeFiles/mobilenet_test.dir/all' failed
make[1]:***[CMakeFiles/mobilenet_test.dir/all] Error 2
Makefile:83: recipe for target 'all' failed
make:***[all] Error 2
yuan@Y:~/XSpace/Env-Test/Paddle/build$

p5cysglq

p5cysglq1#

请问下能否跑通预测demo,即链接下的预测样例

https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/inference_deployment/inference/native_infer.html#a-name-c-c-a

1dkrff03

1dkrff032#

我的配置:
WITH_MKL=ON
WITH_GPU=ON
USE_TENSORRT=OFF

运行的时候会缺少TENSORRT的依赖库,如果不需要用tensorRT,跑这个demo是否也需要tensorRT的依赖库?

sirbozc5

sirbozc53#

我这边没有tensorRT,所以没有装tensorRT的依赖库

nnsrf1az

nnsrf1az4#

按理来说,是不需要trt的,但是我怀疑paddle的so中引用了 trt的符号,此时 如果不指定trt的路径 就会报找不到符号的错误

u7up0aaq

u7up0aaq5#

谢谢,我装一下tensorRT的依赖库,再尝试一下

vfhzx4xs

vfhzx4xs6#

已经装了tensorRT,但是依然无法成功编译测试案例

-- Generating done
-- Build files have been written to: /home/yuan/XSpace/Env-Test/Paddle/sample/inference/build
Scanning dependencies of target mobilenet_test
[ 50%] Building CXX object CMakeFiles/mobilenet_test.dir/mobilenet_test.o
[100%] Linking CXX executable mobilenet_test
CMakeFiles/mobilenet_test.dir/mobilenet_test.o:在函数‘paddle::PrepareTRTConfig(paddle::AnalysisConfig*, int)’中:
/home/yuan/XSpace/Env-Test/Paddle/sample/inference/mobilenet_test.cc:25:对‘paddle::AnalysisConfig::SetModel(std::__cxx11::basic_string<char, std::char_traits, std::allocator > const&, std::__cxx11::basic_string<char, std::char_traits, std::allocator > const&)’未定义的引用
CMakeFiles/mobilenet_test.dir/mobilenet_test.o:在函数‘__static_initialization_and_destruction_0(int, int)’中:
/home/yuan/XSpace/Env-Test/Paddle/sample/inference/mobilenet_test.cc:12:对‘google::FlagRegisterer::FlagRegisterer<std::__cxx11::basic_string<char, std::char_traits, std::allocator > >(char const*, char const*, char const*, std::__cxx11::basic_string<char, std::char_traits, std::allocator >, std::__cxx11::basic_string<char, std::char_traits, std::allocator >)’未定义的引用
collect2: error: ld returned 1 exit status
CMakeFiles/mobilenet_test.dir/build.make:93: recipe for target 'mobilenet_test' failed
make[2]:***[mobilenet_test] Error 1
CMakeFiles/Makefile2:72: recipe for target 'CMakeFiles/mobilenet_test.dir/all' failed
make[1]:***[CMakeFiles/mobilenet_test.dir/all] Error 2
Makefile:83: recipe for target 'all' failed
make:***[all] Error 2
run_impl.sh: 28: run_impl.sh: ./mobilenet_test: not found

kgsdhlau

kgsdhlau7#

能否提供一下在ubantu16.04下测试成功的案例呢?

相关问题