c++ 如何从YUV_420_888到NV21进行快速内存拷贝

jk9hmnmh  于 2022-12-05  发布在  其他
关注(0)|答案(1)|浏览(486)

参考这篇文章,我想写一个方法将Android YUV_420_888转换为nv21。虽然从camera2 API的图像是默认的NV21伪装,但需要一个更通用的实现。如下所示:

class NV21Image{
public byte[] y;
public byte[] uv;
}

public static void cvtYUV420ToNV21(Image image, NV21Image nv21) {
        int width  = image.getWidth();
        int height = image.getHeight();
        int ySize  = width*height;
        ByteBuffer yBuffer = image.getPlanes()[0].getBuffer(); // Y
        ByteBuffer uBuffer = image.getPlanes()[1].getBuffer(); // U
        ByteBuffer vBuffer = image.getPlanes()[2].getBuffer(); // V

        int yRowStride = image.getPlanes()[0].getRowStride();
        int vRowStride = image.getPlanes()[2].getRowStride();
        int pixelStride = image.getPlanes()[2].getPixelStride();
        assert(image.getPlanes()[0].getPixelStride() == 1);
        assert(image.getPlanes()[2].getRowStride() == image.getPlanes()[1].getRowStride());
        assert(image.getPlanes()[2].getPixelStride() == image.getPlanes()[1].getPixelStride());

        int pos = 0;
        int yBufferPos = -yRowStride; // not an actual position
        for (; pos<ySize; pos+=width) {
            yBufferPos += yRowStride;
            yBuffer.position(yBufferPos);
            yBuffer.get(nv21.y, pos, width);
        }

        pos = 0;
        for (int row=0; row<height/2; row++) {
            for (int col=0; col<vRowStride / pixelStride; col++) {
                int vuPos = col*pixelStride + row * vRowStride;
                nv21.uv[pos++] = vBuffer.get(vuPos);
                nv21.uv[pos++] = uBuffer.get(vuPos);
            }
        }
}

以上代码工作如预期,而非常耗时的我的实时相机预览应用程序(约12ms每帧720p在Snapdragon 865 CPU),所以我试图加速它与JNI实现从字节访问和性能优势的利润:

JNIEXPORT void JNICALL
Java_com_example_Utils_nFillYUVArray(JNIEnv *env, jclass clazz, jbyteArray yArr, jbyteArray uvArr,
                                                 jobject yBuf, jobject uBuf, jobject vBuf,
                                                 jint yRowStride, jint vRowStride, jint vPixelStride, jint w, jint h) {
    auto ySrcPtr = (jbyte const*)env->GetDirectBufferAddress(yBuf);
    auto uSrcPtr = (jbyte const*)env->GetDirectBufferAddress(uBuf);
    auto vSrcPtr = (jbyte const*)env->GetDirectBufferAddress(vBuf);

    for(int row = 0; row < h; row++){
        env->SetByteArrayRegion(yArr, row * w, w, ySrcPtr + row * yRowStride);
    }

    int pos = 0;
    for (int row=0; row<h/2; row++) {
        for (int col=0; col<w/2; col++) {
            int vuPos = col * vPixelStride + row * vRowStride;
            env->SetByteArrayRegion(uvArr, pos++, 1, vSrcPtr + vuPos);
            env->SetByteArrayRegion(uvArr, pos++, 1, uSrcPtr + vuPos);
        }
    }
}

然而,它比我预期的要差(大约每帧107ms)。最耗时的部分是UV缓冲区的隔行内存复制
所以我的问题是是否有任何方法来加速和如何解决它?

更新

当U、V平面的pixelStride都是1或2时,我成功地加速了它(检查我的answer),我相信这是大多数情况下发生的事情。

x6h2sr28

x6h2sr281#

正如@snachmsm所说的libyuv可能会有帮助。我找到了一个可用的API I420ToNV21,但它不能接收pixelStride参数,因为YUV_420_888不能保证U,V平面中相邻像素之间不存在间隙。
当pixelStride为2(减少到每帧2.7ms)时,我成功地使用arm内部函数对其进行了加速:

JNIEXPORT void JNICALL
Java_com_example_Utils_nFillYUVArray(JNIEnv *env, jclass clazz, jbyteArray yArr, jbyteArray uvArr,
                                                 jobject yBuf, jobject uBuf, jobject vBuf,
                                                 jint yRowStride, jint vRowStride, jint uRowStride, jint pixelStride,
                                                 jint width, jint height) {
    ///TODO: too time-consuming
    auto ySrcPtr = (jbyte const*)env->GetDirectBufferAddress(yBuf);
    auto uSrcPtr = (jbyte const*)env->GetDirectBufferAddress(uBuf);
    auto vSrcPtr = (jbyte const*)env->GetDirectBufferAddress(vBuf);

    for(int row = 0; row < height; row++){
        env->SetByteArrayRegion(yArr, row * width, width, ySrcPtr + row * yRowStride);
    }

    constexpr int kStride = 8;
    const size_t nGroups = width / kStride;
    if(pixelStride == 2){
        int8_t *line = (int8_t*)alignedAlloc(width, 64);
        int8_t *mask = (int8_t*)alignedAlloc(kStride, 64);
        memset(mask, 0, kStride);
        for(int i=0; i < kStride / 2; i++) {
            mask[i * 2] = -1;
        }
        int8x8_t vm = vld1_s8(mask);

        for(int row = 0; row < height / 2; row ++){
            size_t vrowOff = row * vRowStride;
            size_t urowOff = row * uRowStride;
            for(int g = 0; g < nGroups; g++) {
                size_t colOff = g * kStride;
                int8x8_t v0  = vld1_s8(vSrcPtr + vrowOff + colOff);
                int8x8_t v1  = vld1_s8(uSrcPtr + urowOff + colOff);
                int8x8_t a0  = vand_s8(v0, vm);
                int16x4_t b1 = vreinterpret_s16_s8(vand_s8(v1, vm));
                int8x8_t a1  = vreinterpret_s8_s16(vshl_n_s16(b1, 8));
                int8x8_t r = vorr_s8(a0, a1);
                vst1_s8(line + colOff, r);
            }
            env->SetByteArrayRegion(uvArr, row * width, width, line);
        }

        free(mask);
        free(line);
    }else if(pixelStride == 1){
        int8_t *line = (int8_t*)alignedAlloc(width, 64);
        for(int row = 0; row < height / 2; row ++) {
            size_t vrowOff = row * vRowStride;
            size_t urowOff = row * uRowStride;
            for(int g = 0; g < nGroups / 2; g++){
                size_t colOff = g * kStride;
                int8x8_t v0 = vld1_s8(vSrcPtr + vrowOff + colOff);
                int8x8_t v1 = vld1_s8(uSrcPtr + urowOff + colOff);
                int8x8x2_t vz = vzip_s8(v0, v1);

                vst1_s8(line + colOff, vz.val[0]);
                vst1_s8(line + colOff + kStride, vz.val[1]);
            }

            env->SetByteArrayRegion(uvArr, row * width, width, line);
        }
        free(line);
    }
}

没有对pixelStride == 1的情况进行充分测试,但我相信它会按预期工作。

相关问题