void makeJPEG(PyObject* _py_list_src, int idx, int _target_size, bool _crop_to_square, PyObject* _py_list_tgt, bool withGpu) {
cv::Mat _resized_mat_buffer;
cv::cuda::GpuMat _resized_mat_buffer_gpu;
std::vector<uchar> _output_jpeg_buffer;
std::vector<int> _encode_params;
_encode_params.push_back(CV_IMWRITE_JPEG_QUALITY);
_encode_params.push_back(JPEG_QUALITY);
/*
* Decompress JPEG
*/
PyObject* pySrc = PyList_GET_ITEM(_py_list_src, idx);
uchar* src = (unsigned char*)PyString_AsString(pySrc);
size_t src_len = PyString_GET_SIZE(pySrc);
vector<uchar> src_vec(src, src + src_len);
cv::Mat decoded_mat = cv::imdecode(cv::Mat(src_vec), CV_LOAD_IMAGE_COLOR);
assert(decoded_mat.channels() == 3);
// Load to GPU.
cv::cuda::GpuMat decoded_mat_gpu;
if (withGpu) {
decoded_mat_gpu.upload(decoded_mat);
}
/*
* Resize
*/
double min_dim = std::min(decoded_mat.size().height, decoded_mat.size().width);
double scale_factor = _target_size / min_dim;
int new_height = round(scale_factor * decoded_mat.size().height);
int new_width = round(scale_factor * decoded_mat.size().width);
assert((new_height == _target_size && new_width >= _target_size)
|| (new_width == _target_size && new_height >= _target_size));
int interpolation = scale_factor == 1 ? cv::INTER_LINEAR
: scale_factor > 1 ? cv::INTER_CUBIC : cv::INTER_AREA;
if (withGpu) {
cv::cuda::resize(decoded_mat_gpu, _resized_mat_buffer_gpu, cv::Size(new_width, new_height), 0, 0, interpolation);
_resized_mat_buffer_gpu.download(_resized_mat_buffer);
} else {
cv::resize(decoded_mat, _resized_mat_buffer, cv::Size(new_width, new_height), 0, 0, interpolation);
}
/*
* Conditionally crop and compress JPEG
*/
if (_crop_to_square) {
int crop_start_x = (new_width - _target_size) / 2;
int crop_start_y = (new_height - _target_size) / 2;
cv::Rect cropRect(crop_start_x, crop_start_y, _target_size, _target_size);
cv::Mat cropped_mat_buffer = _resized_mat_buffer(cropRect);
cv::imencode(".jpg", cropped_mat_buffer, _output_jpeg_buffer, _encode_params);
} else {
cv::imencode(".jpg", _resized_mat_buffer, _output_jpeg_buffer, _encode_params);
}
char* output_jpeg_buffer_ptr = reinterpret_cast<char*>(&_output_jpeg_buffer[0]);
PyObject* pyStr = PyString_FromStringAndSize(output_jpeg_buffer_ptr, _output_jpeg_buffer.size());
#pragma omp critical
{
PyList_Append(_py_list_tgt, pyStr);
}
Py_DECREF(pyStr);
}
使用OpenCV操作CUDA送检图像到TensorRT(SSD模型)_ikevin的专栏-CSDN博客
Cuda::GpuMat imageGpu(cv::Size(640,480),CV_32FC4);
Capture>> frame;
cvtColor(frame,imageRGBA,CV_RGB2BGRA);
imageRGBA.convertTo(imageRGBA, CV_32FC4,4);
imageGpu.upload(imageRGBA);
...
// Here is the memory data allocated when getting the GPU corresponding to the above upload
// That is, imageNetMean's first parameter parameter
(float4*)imageGpu.ptr<float4>()
完整代码:GitHub - wkexinw/tensorSSD: Deep-Learn model SSD_300x300 transplante to TensorRT(Nvidia Jetson Tx2)
版权说明 : 本文为转载文章, 版权归原作者所有 版权申明
原文链接 : https://blog.csdn.net/jacke121/article/details/121850614
内容来源于网络,如有侵权,请联系作者删除!