//qWarning() << "update"; QPixmap pixmap = this->grab(); QImage image = pixmap.toImage(); //qWarning() << "byteCount " << image.byteCount(); // BYTE * rgb = new BYTE[bufsize]; // BYTE * yuv = new BYTE[bufsize * 3 / 2]; // RGB2YUV(&pStream, &yuv, 4, 640, 480) int w = image.width(); int h = image.height(); // image.invertPixels(QImage::InvertRgb); //============================================================== AVFrame *pFrameRGB = avcodec_alloc_frame(); // Determine required buffer size and allocate buffer int numBytes1 = avpicture_get_size(PIX_FMT_RGB32, w, h); uint8_t *buffer1 = (uint8_t *)av_malloc(numBytes1*sizeof(uint8_t)); avpicture_fill((AVPicture *)pFrameRGB, buffer1, PIX_FMT_RGB32, w, h); pFrameRGB->data[0] = image.bits(); qWarning() << "numBytes1 " << numBytes1; AVFrame *pFrameYUV = avcodec_alloc_frame(); // Determine required buffer size and allocate buffer int numBytes2 = avpicture_get_size(AV_PIX_FMT_YUV420P, w, h); uint8_t *buffer2 = (uint8_t *)av_malloc(numBytes2*sizeof(uint8_t)); avpicture_fill((AVPicture *)pFrameYUV, buffer2, AV_PIX_FMT_YUV420P, w, h); qWarning() << "numBytes2 " << numBytes2; SwsContext * rgb_to_yuv_ctx = sws_getContext(w,h, PIX_FMT_RGB32, w,h, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL,NULL,NULL); // sws_scale(rgb_to_yuv_ctx, pFrameRGB->data, pFrameRGB->linesize, 0, // pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize); sws_scale(rgb_to_yuv_ctx, pFrameRGB->data, pFrameRGB->linesize, 0, h, pFrameYUV->data, pFrameYUV->linesize); //// int rc = LE_ESVideoData(instance, buffer2, numBytes2); //qWarning() << rc << endl; sws_freeContext(rgb_to_yuv_ctx); av_free(buffer1); av_free(buffer2); av_free(pFrameRGB); av_free(pFrameYUV);
原文地址:http://blog.csdn.net/ahyswang/article/details/40318511