我正在使用ffmpeg库来创建一个AVI文件,并按照下面的 muxing.c ffmpeg 示例进行操作

  • 分配输出媒体上下文: avformat_alloc_output_context2

  • 使用带有以下参数集的 AV_CODEC_ID_H264 编解码器添加视频流:

int AddVideoStream(AVStream *&video_st, AVFormatContext *&oc, AVCodec **codec, enum AVCodecID codec_id){

AVCodecContext *c;

/* find the encoder */
*codec = avcodec_find_encoder(codec_id); //codec id = AV_CODEC_ID_H264
if (!(*codec)) {
    sprintf(strError , "Could not find encoder for '%s' line %d\n", avcodec_get_name(codec_id), __LINE__);
    commonGlobal->WriteRuntimeBackupLogs(strError);
    return RS_NOT_OK;
}

video_st = avformat_new_stream(oc, *codec);
if (!video_st) {
    sprintf(strError , "Could not allocate stream line %d\n", __LINE__);
    commonGlobal->WriteRuntimeBackupLogs(strError);
    return RS_NOT_OK;
}
video_st->id = oc->nb_streams-1;
c = video_st->codec;

avcodec_get_context_defaults3(c, *codec);
c->codec_id = codec_id;

c->bit_rate = 500*1000;
/* Resolution must be a multiple of two. */
c->width    = 1280;
c->height   = 720;
/* timebase: This is the fundamental unit of time (in seconds) in terms
* of which frame timestamps are represented. For fixed-fps content,
* timebase should be 1/framerate and timestamp increments should be
* identical to 1. */
c->time_base.den = 25*1000;
c->time_base.num = 1000;
c->gop_size      = 12;//(int)(av_q2d(c->time_base) / 2);    // GOP size is framerate/2 
c->pix_fmt       = STREAM_PIX_FMT;
/* Some formats want stream headers to be separate. */
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
    c->flags |= CODEC_FLAG_GLOBAL_HEADER;

return RS_OK;

}

  • 开放视频流: open_video

int open_video( AVFormatContext *oc, AVCodec *codec, AVStream *st ){

int ret;
AVCodecContext *c = st->codec;
char strError[STR_LENGTH_256];
/* open the codec */
ret = avcodec_open2(c, codec, NULL);
if (ret < 0) {
    sprintf(strError , "Could not open video codec line %d", __LINE__);
    commonGlobal->WriteRuntimeBackupLogs(strError);
    return RS_NOT_OK;
}

/* allocate and init a re-usable frame */
frame = avcodec_alloc_frame();
if (!frame) {
    sprintf(strError , "Could not allocate video frame line %d", __LINE__);
    commonGlobal->WriteRuntimeBackupLogs(strError);
    return RS_NOT_OK;
}

/* Allocate the encoded raw picture. */
ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
if (ret < 0) {
    sprintf(strError , "Could not allocate picture line %d", __LINE__);
    commonGlobal->WriteRuntimeBackupLogs(strError);
    return RS_NOT_OK;
}

/* If the output format is not YUV420P, then a temporary YUV420P
* picture is needed too. It is then converted to the required
* output format. */
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
    ret = avpicture_alloc(&src_picture, AV_PIX_FMT_YUV420P, c->width, c->height);
    if (ret < 0) {
        sprintf(strError , "Could not allocate temporary picture line %d", __LINE__);
        commonGlobal->WriteRuntimeBackupLogs(strError);
        return RS_NOT_OK;
    }
}

/* copy data and linesize picture pointers to frame */
*((AVPicture *)frame) = dst_picture;
return RS_OK;

}

  • 写入AVI流 Headers : avformat_write_header

  • 编码视频帧: avcodec_encode_video2

情况a:这里的输入是 BRG frames 所以我将它们编码为H264并传递给下一步 .

情况b:这里的输入是 H264 compressed frames (these frames captured from H264 RTP stream) 所以我离开这一步然后转到下一步 .

  • 写交错视频帧: av_interleaved_write_frame(oc, &pkt)

情况a:正确编写从步骤5编码的数据包数据,没有错误 .

情况b:我总是从 av_interleaved_write_frame 得到错误,值为-22 . 它可能是EINVAL无效的参数 . 所以有人可以告诉我出了什么问题?或者我在这里缺少一些参数 .

int WriteVideoFrame(AVFormatContext *&oc, AVStream *&st, 
uint8_t *imageData `/*BRG data input*/`, 
int width, 
int height, 
bool isStart, 
bool isData,
bool isCompressed, 
AVPacket* packet `/*H264 data input*/`)

{

if (isCompressed == false)// For BRG data {

static struct SwsContext *sws_ctx;
AVCodecContext *c = st->codec;

if (isData)
{
    if (!frame) {
        //fprintf(stderr, "Could not allocate video frame\n");
        return RS_NOT_OK;
    }
    if (isStart == true)
        frame->pts = 0;
    /* Allocate the encoded raw picture. */
    if (width != c->width || height != c->height)
    {
        if (!sws_ctx)
        {
            sws_ctx = sws_getContext(width, height,
                AV_PIX_FMT_BGR24, c->width, c->height,
                AV_PIX_FMT_YUV420P, SWS_FAST_BILINEAR, 0, 0, 0);

            if (!sws_ctx)
            {
                sprintf(strError, "Could not initialize the conversion context line %d\n", __LINE__);
                commonGlobal->WriteRuntimeBackupLogs(strError);
                return RS_NOT_OK;
            }
        }
        uint8_t * inData[1] = { imageData }; // RGB24 have one plane
        int inLinesize[1] = { 3 * width }; // RGB stride
        sws_scale(sws_ctx, inData, inLinesize, 0, height, dst_picture.data, dst_picture.linesize);
    }
    else
        BRG24ToYUV420p(dst_picture.data, imageData, width, height); //Phong Le changed this
}
if (oc->oformat->flags & AVFMT_RAWPICTURE)
{
    /* Raw video case - directly store the picture in the packet */
    AVPacket pkt;
    av_init_packet(&pkt);

    pkt.flags |= AV_PKT_FLAG_KEY;
    pkt.stream_index = st->index;
    pkt.data = dst_picture.data[0];
    pkt.size = sizeof(AVPicture);

    ret = av_interleaved_write_frame(oc, &pkt);
    av_free_packet(&pkt);
}
else
{
    /* encode the image */
    AVPacket pkt;
    int got_output;

    av_init_packet(&pkt);
    pkt.data = NULL;    // packet data will be allocated by the encoder
    pkt.size = 0;
    ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
    if (ret < 0) {
        sprintf(strError, "Error encoding video frame line %d\n", __LINE__);
        commonGlobal->WriteRuntimeBackupLogs(strError);
        av_free_packet(&pkt);
        return RS_NOT_OK;
    }

    /* If size is zero, it means the image was buffered. */
    if (got_output) {
        if (c->coded_frame->key_frame)
            pkt.flags |= AV_PKT_FLAG_KEY;

        pkt.stream_index = st->index;

        /* Write the compressed frame to the media file. */
        ret = av_interleaved_write_frame(oc, &pkt);
    }
    else
    {
        ret = 0;
    }
    av_free_packet(&pkt);
}
if (ret != 0)
{
    sprintf(strError, "Error while writing video frame line %d\n", __LINE__);
    commonGlobal->WriteRuntimeBackupLogs(strError);
    return RS_NOT_OK;
}
frame->pts += av_rescale_q(1, st->codec->time_base, st->time_base);
return RS_OK;

}

else / H264数据/

{

if (isStart == true)
    packet->pts = 0;

else
    packet->pts += av_rescale_q(1, st->codec->time_base, st->time_base);

ret = av_interleaved_write_frame(oc, packet);
if (ret < 0)
{
    sprintf(strError, "Error while writing video frame line %d\n", __LINE__);
    commonGlobal->WriteRuntimeBackupLogs(strError);
    return RS_NOT_OK;
}

return RS_OK;

}

}

  • 关闭文件 .

  • 案例a:创建AVI文件成功 .

  • 案例b:失败 .

谢谢Tien Vo