我有一个IYUV(4:2:0)格式的视频流,并试图将其转换为CVPixelBufferRef
,然后转换为CMSampleBufferRef
并以AVSampleBufferDisplayLayer
播放(需要AVPictureInPictureController
)。我已经尝试了几个版本的解决方案,但没有一个实际上工作得很好,希望有视频处理经验的人可以告诉我这里做错了什么。
全功能:
- (CMSampleBufferRef)makeSampleBufferFromTexturesWithY:(void *)yPtr U:(void *)uPtr V:(void *)vPtr yStride:(int)yStride uStride:(int)uStride vStride:(int)vStride width:(int)width height:(int)height doMirror:(BOOL)doMirror doMirrorVertical:(BOOL)doMirrorVertical
{
NSDictionary *pixelAttributes = @{(NSString *)kCVPixelBufferIOSurfacePropertiesKey:@{}}; // For 1,2,3
CVPixelBufferRef pixelBuffer = NULL;
CVReturn result;
result = CVPixelBufferCreate(kCFAllocatorDefault,
width,
height,
kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange // For 1,2,3
// kCVPixelFormatType_32BGRA, // For 4.
(__bridge CFDictionaryRef)(pixelAttributes),
&pixelBuffer);
if (result != kCVReturnSuccess) {
NSLog(@"PIP: Unable to create cvpixelbuffer %d", result);
return nil;
}
/// Converter code below...
CMFormatDescriptionRef formatDesc;
result = CMVideoFormatDescriptionCreateForImageBuffer(kCFAllocatorDefault, pixelBuffer, &formatDesc);
if (result != kCVReturnSuccess) {
NSAssert(NO, @"PIP: Failed to create CMFormatDescription: %d", result);
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
return nil;
}
CMTime now = CMTimeMakeWithSeconds(CACurrentMediaTime(), 1000);
CMSampleTimingInfo timingInfo;
timingInfo.duration = CMTimeMakeWithSeconds(1, 1000);
timingInfo.presentationTimeStamp = now;
timingInfo.decodeTimeStamp = now;
@try {
if (@available(iOS 13.0, *)) {
CMSampleBufferRef sampleBuffer;
CMSampleBufferCreateReadyWithImageBuffer(kCFAllocatorDefault, pixelBuffer, formatDesc, &timingInfo, &sampleBuffer);
// CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
CVPixelBufferRelease(pixelBuffer);
pixelBuffer = nil;
// free(dest.data);
// free(uvPlane);
return sampleBuffer;
} else {
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
return nil;
}
} @catch (NSException *exception) {
NSAssert(NO, @"PIP: Failed to create CVSampleBuffer: %@", exception);
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
return nil;
}
}
字符串
以下是我找到的一些解决方案:
1.合并UV,但一半底部是绿色的。
CVPixelBufferLockBaseAddress(pixelBuffer, 0);
uint8_t *yDestPlane = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0);
memcpy(yDestPlane, yPtr, width * height);
CGFloat uPlaneSize = width * height / 4;
CGFloat vPlaneSize = width * height / 4;
CGFloat numberOfElementsForChroma = uPlaneSize + vPlaneSize;
// for simplicity and speed create a combined UV panel to hold the pixels
uint8_t *uvPlane = calloc(numberOfElementsForChroma, sizeof(uint8_t));
memcpy(uvPlane, uPtr, uPlaneSize);
memcpy(uvPlane += (uint8_t)(uPlaneSize), vPtr, vPlaneSize);
uint8_t *uvDestPlane = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 1);
memcpy(uvDestPlane, uvPlane, numberOfElementsForChroma);
型
1.交错U和V,图像仍然扭曲
CVPixelBufferLockBaseAddress(pixelBuffer, 0);
uint8_t *yDestPlane = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0);
for (int i = 0, k = 0; i < height; i ++) {
for (int j = 0; j < width; j ++) {
yDestPlane[k++] = ((unsigned char *)yPtr)[j + i * yStride];
}
}
uint8_t *uvDestPlane = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 1);
for (int row = 0, index = 0; row < height / 2; row++) {
for (int col = 0; col < width / 2; col++) {
uvDestPlane[index++] = ((unsigned char *)uPtr)[col + row * uStride];
uvDestPlane[index++] = ((unsigned char *)vPtr)[col + row * vStride];
}
}
型
1.有些类似于1。
int yPixels = yStride * height;
int uPixels = uStride * height/2;
int vPixels = vStride * height/2;
CVPixelBufferLockBaseAddress(pixelBuffer, 0);
uint8_t *yDestPlane = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0);
memcpy(yDestPlane, yPtr, yPixels);
uint8_t *uvDestPlane = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 1);
memcpy(uvDestPlane , uPtr, uPixels);
memcpy(uvDestPlane + uPixels, vPtr, vPixels);
型
1.使用Accelerate
将YUV转换为BGRA,然后转换为CVPixelBuffer,没有错误但没有渲染视频
vImage_Buffer srcYp = {
.width = width,
.height = height,
.rowBytes = yStride,
.data = yPtr,
};
vImage_Buffer srcCb = {
.width = width / 2,
.height = height / 2,
.rowBytes = uStride,
.data = uPtr,
};
vImage_Buffer srcCr = {
.width = width / 2,
.height = height / 2,
.rowBytes = vStride,
.data = vPtr,
};
vImage_Buffer dest;
dest.data = NULL;
dest.width = width;
dest.height = height;
vImage_Error error = kvImageNoError;
error = vImageBuffer_Init(&dest, height, width, 32, kvImagePrintDiagnosticsToConsole);
// vImage_YpCbCrPixelRange pixelRange = (vImage_YpCbCrPixelRange){ 0, 128, 255, 255, 255, 1, 255, 0 };
vImage_YpCbCrPixelRange pixelRange = { 16, 128, 235, 240, 255, 0, 255, 0 };
vImage_YpCbCrToARGB info;
error = kvImageNoError;
error = vImageConvert_YpCbCrToARGB_GenerateConversion(kvImage_YpCbCrToARGBMatrix_ITU_R_601_4,
&pixelRange,
&info,
kvImage420Yp8_Cb8_Cr8,
kvImageARGB8888,
kvImagePrintDiagnosticsToConsole);
error = kvImageNoError;
uint8_t permuteMap[4] = {3, 2, 1, 0}; // BGRA - iOS only support BGRA
error = vImageConvert_420Yp8_Cb8_Cr8ToARGB8888(&srcYp,
&srcCb,
&srcCr,
&dest,
&info,
permuteMap, // for iOS must be no NULL, mac can be NULL iOS only support BGRA
255,
kvImagePrintDiagnosticsToConsole);
if (error != kvImageNoError) {
NSAssert(NO, @"PIP: vImageConvert error %ld", error);
return nil;
}
// vImageBuffer_CopyToCVPixelBuffer will give out error destFormat bitsPerComponent = 0 is not supported
// vImage_CGImageFormat format = {
// .bitsPerComponent = 8,
// .bitsPerPixel = 32,
// .bitmapInfo = (CGBitmapInfo)kCGBitmapByteOrder32Little | kCGImageAlphaNoneSkipFirst,
// .colorSpace = CGColorSpaceCreateDeviceRGB()
// };
// vImageCVImageFormatRef vformat = vImageCVImageFormat_CreateWithCVPixelBuffer(pixelBuffer);
//
// error = vImageBuffer_CopyToCVPixelBuffer(&dest, &format, pixelBuffer, vformat, 0, kvImagePrintDiagnosticsToConsole);
result = CVPixelBufferCreateWithBytes(kCFAllocatorDefault,
width,
height,
kCVPixelFormatType_32BGRA,
dest.data,
dest.rowBytes,
NULL,
NULL,
(__bridge CFDictionaryRef)pixelAttributes,
&pixelBuffer);
型
1条答案
按热度按时间ix0qys7i1#
我不得不求助于使用第三方库OGVKit,使它的工作与一些小的调整。解码器来自函数
(void)updatePixelBuffer420:pixelBuffer
,对YUV420数据的解码时间非常快。