diff --git a/src/decoder/dvpp/FFRecoder.cpp b/src/decoder/dvpp/FFRecoder.cpp index 8a4c1ce..e426009 100644 --- a/src/decoder/dvpp/FFRecoder.cpp +++ b/src/decoder/dvpp/FFRecoder.cpp @@ -1,9 +1,18 @@ +// FFRecoder.cpp #include "FFRecoder.h" - #include #include #include +extern "C" { +#include +#include +#include +#include +#include +#include +} + FFRecoder::FFRecoder() :width_{}, @@ -14,20 +23,17 @@ FFRecoder::FFRecoder() codec_ctx_{ nullptr }, fmt_ctx_{ nullptr }, out_stream_{ nullptr }, - yuv_frame_{ nullptr }, - img_convert_ctx{nullptr} + yuv_frame_{ nullptr } { - bFirstFrame = true; - last_src_pts = 0; - last_pts = 0; } FFRecoder::~FFRecoder() { + uninit(); } -bool FFRecoder::init(int w, int h, AVRational time_base, AVCodecContext* avctx, const char* outfile_name) +bool FFRecoder::init(int w, int h, int fps, int bit_rate, const char* outfile_name) { uninit(); @@ -36,30 +42,32 @@ bool FFRecoder::init(int w, int h, AVRational time_base, AVCodecContext* avctx, y_size_ = w * h; uv_size_ = y_size_ / 4; + m_fps = fps; + // [1] 创建解码器 - const AVCodec* encoder = avcodec_find_encoder(AV_CODEC_ID_HEVC); + const AVCodec* encoder = avcodec_find_encoder(AV_CODEC_ID_H264); if (!encoder) { - LOG_ERROR("Find encoder AV_CODEC_ID_H264 failed!"); + fprintf(stderr, "Find encoder AV_CODEC_ID_H264 failed!\n"); return false; } // 获取解码器上下文 codec_ctx_ = avcodec_alloc_context3(encoder); if (!codec_ctx_) { - LOG_ERROR("Alloc context for encoder contx failed!"); + fprintf(stderr, "Alloc context for encoder contx failed!\n"); return false; } // 设置解码器上下文参数 - codec_ctx_->bit_rate = avctx->bit_rate; + codec_ctx_->bit_rate = bit_rate; codec_ctx_->width = width_; codec_ctx_->height = height_; - codec_ctx_->time_base = time_base; - codec_ctx_->gop_size = avctx->gop_size; - codec_ctx_->max_b_frames = avctx->max_b_frames; + codec_ctx_->time_base = AVRational{ 1, fps }; + codec_ctx_->gop_size = 50; + codec_ctx_->max_b_frames = 0; codec_ctx_->pix_fmt = AV_PIX_FMT_YUV420P; codec_ctx_->thread_count = 4; - codec_ctx_->qmin = avctx->qmin; - codec_ctx_->qmax = avctx->qmax; - codec_ctx_->qcompress = avctx->qcompress; + codec_ctx_->qmin = 10; + codec_ctx_->qmax = 51; + codec_ctx_->qcompress = 0.6f; codec_ctx_->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; av_opt_set(codec_ctx_->priv_data, "preset", "ultrafast", 0); @@ -68,7 +76,7 @@ bool FFRecoder::init(int w, int h, AVRational time_base, AVCodecContext* avctx, // 打开解码器 int ret = avcodec_open2(codec_ctx_, encoder, nullptr); if (ret < 0) { - LOG_ERROR("Open encoder failed!"); + fprintf(stderr, "Open encoder failed!\n"); return false; } @@ -80,7 +88,6 @@ bool FFRecoder::init(int w, int h, AVRational time_base, AVCodecContext* avctx, out_stream_->id = 0; out_stream_->codecpar->codec_tag = 0; avcodec_parameters_from_context(out_stream_->codecpar, codec_ctx_); - out_stream_->time_base = { 1,30 }; av_dump_format(fmt_ctx_, out_stream_->id, outfile_name, 1); @@ -93,113 +100,25 @@ bool FFRecoder::init(int w, int h, AVRational time_base, AVCodecContext* avctx, if (av_frame_get_buffer(yuv_frame_, 0) < 0) { av_frame_free(&yuv_frame_); yuv_frame_ = nullptr; - LOG_ERROR("Frame get buffer failed!"); - return false; - } - - // [5] 打开输出视频文件并写入视频头信息 - if (avio_open(&fmt_ctx_->pb, outfile_name, AVIO_FLAG_WRITE) < 0) { - LOG_ERROR("avio_open failed!"); - return false; - } - if (avformat_write_header(fmt_ctx_, nullptr) < 0) { - LOG_ERROR("Write header failed!"); - return false; - } - - // 计算解码后原始数据所需缓冲区大小,并分配内存空间 Determine required buffer size and allocate buffer - int numBytes = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, w, h, 1); - out_buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t)); - - //pFrameOut = av_frame_alloc(); - //av_image_fill_arrays(pFrameOut->data, pFrameOut->linesize, buffer, AV_PIX_FMT_YUV420P, w, h, 1); - - img_convert_ctx = sws_getContext(avctx->width, avctx->height, avctx->pix_fmt, w, h, AV_PIX_FMT_YUV420P, - SWS_BICUBIC, nullptr, nullptr, nullptr); - - return true; -} - -bool FFRecoder::init(AVStream* stream, AVCodecContext* avctx, const char* outfile_name) { - - const AVCodec* encoder = avcodec_find_encoder(avctx->codec_id); - if (!encoder) { - LOG_ERROR("Find encoder AV_CODEC_ID_H264 failed!"); - return false; - } - // 获取解码器上下文 - codec_ctx_ = avcodec_alloc_context3(encoder); - if (!codec_ctx_) { - LOG_ERROR("Alloc context for encoder contx failed!"); + fprintf(stderr, "Frame get buffer failed!\n"); return false; } - avcodec_copy_context(codec_ctx_, avctx); - codec_ctx_->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; - m_inStream = stream; - - // [2] 创建输出上下文 - avformat_alloc_output_context2(&fmt_ctx_, nullptr, nullptr, outfile_name); - - // [3] 添加输出视频流 - out_stream_ = avformat_new_stream(fmt_ctx_, nullptr); - - out_stream_->id = 0; - out_stream_->codecpar->codec_tag = 0; - avcodec_parameters_from_context(out_stream_->codecpar, codec_ctx_); - // out_stream_->time_base = { 1,25 }; - out_stream_->time_base = stream->time_base; - out_stream_->r_frame_rate = stream->r_frame_rate; - out_stream_->avg_frame_rate = stream->r_frame_rate; - - codec_ctx_->time_base = out_stream_->time_base; - - av_opt_set(out_stream_->codec->priv_data, "preset", "ultrafast", 0); - av_opt_set(out_stream_->codec->priv_data, "tune", "zerolatency", 0); - - // av_dump_format(fmt_ctx_, out_stream_->id, outfile_name, 1); - // [5] 打开输出视频文件并写入视频头信息 if (avio_open(&fmt_ctx_->pb, outfile_name, AVIO_FLAG_WRITE) < 0) { - LOG_ERROR("avio_open failed!"); + fprintf(stderr, "avio_open failed!\n"); return false; } if (avformat_write_header(fmt_ctx_, nullptr) < 0) { - LOG_ERROR("Write header failed!"); + fprintf(stderr, "Write header failed!\n"); return false; } return true; } -void FFRecoder::release() { - av_write_trailer(fmt_ctx_); - - avcodec_close(fmt_ctx_->streams[0]->codec); - av_freep(&fmt_ctx_->streams[0]->codec); - av_freep(&fmt_ctx_->streams[0]); - - avio_close(fmt_ctx_->pb); - av_free(fmt_ctx_); - fmt_ctx_ = nullptr; -} - void FFRecoder::uninit() { - //if (out_buffer) { - // av_free(out_buffer); - //} - - if (yuv_frame_) { - av_frame_free(&yuv_frame_); - yuv_frame_ = nullptr; - } - - if (img_convert_ctx) { - sws_freeContext(img_convert_ctx); - img_convert_ctx = nullptr; - } - if (fmt_ctx_) { av_write_trailer(fmt_ctx_); avio_close(fmt_ctx_->pb); @@ -213,6 +132,11 @@ void FFRecoder::uninit() codec_ctx_ = nullptr; } + if (yuv_frame_) { + av_frame_free(&yuv_frame_); + yuv_frame_ = nullptr; + } + width_ = 0; height_ = 0; y_size_ = 0; @@ -250,84 +174,13 @@ bool FFRecoder::write_yuv(const uint8_t* yuv_data) return write_frame(yuv_frame_); } -void FFRecoder::update_pts(AVPacket* pkt) { - if (pkt->pts > 0) { - if (bFirstFrame) { - bFirstFrame = false; - last_src_pts = pkt->pts; - } - int64_t pkt_pts = pkt->pts; - pkt->pts = last_pts + (pkt_pts - last_src_pts); - last_src_pts = pkt_pts; - last_pts = pkt->pts; - pkt->dts = pkt->pts; - } - else { - if (bFirstFrame) { - bFirstFrame = false; - last_pts = 0; - } - pkt->pts = last_pts + 512; - last_pts = pkt->pts; - } - -} - -bool FFRecoder::write_pkt(AVPacket *pkt) { - char errbuf[64]{ 0 }; - - // av_packet_rescale_ts(pkt, codec_ctx_->time_base, out_stream_->time_base); - // update_pts(pkt); - // pkt->stream_index = out_stream_->index; - - if(pkt->pts==AV_NOPTS_VALUE) { - // printf("frame_index:%d", frame_index); - //Write PTS - AVRational time_base1 = codec_ctx_->time_base; - //Duration between 2 frames (us) - int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(m_inStream->r_frame_rate); - //Parameters - pkt->pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE); - pkt->dts = pkt->pts; - pkt->duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE); - frame_index++; - } - // Convert PTS/DTS - pkt->pts = av_rescale_q_rnd(pkt->pts, codec_ctx_->time_base, out_stream_->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); - pkt->dts = av_rescale_q_rnd(pkt->dts, codec_ctx_->time_base, out_stream_->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); - pkt->duration = av_rescale_q(pkt->duration, codec_ctx_->time_base, out_stream_->time_base); - - pkt->pos = -1; - pkt->stream_index = out_stream_->index; - fmt_ctx_->duration += pkt->duration; - - // 将数据写入到输出流 - int ret = av_write_frame(fmt_ctx_, pkt); - if (ret < 0) { - LOG_ERROR("Error while writing output packet: {}", av_make_error_string(errbuf, sizeof(errbuf), ret)); - return false; - } - return true; -} - -bool FFRecoder::write_frame(AVFrame* frame) +bool FFRecoder::write_frame(const AVFrame* frame) { - AVFrame *pFrameOut = nullptr; - if (frame != nullptr && frame->format != AV_PIX_FMT_YUV420P) { - pFrameOut = av_frame_clone(frame); - pFrameOut->format = AV_PIX_FMT_YUV420P; - av_image_fill_arrays(pFrameOut->data, pFrameOut->linesize, out_buffer, AV_PIX_FMT_YUV420P, frame->width, frame->height, 1); - sws_scale(img_convert_ctx, (const unsigned char* const*)frame->data, frame->linesize, 0, frame->height, pFrameOut->data, pFrameOut->linesize); - } - else { - pFrameOut = frame; - } char errbuf[64]{ 0 }; // 将帧数据发送到编码器 - int ret = avcodec_send_frame(codec_ctx_, pFrameOut); - av_frame_free(&pFrameOut); + int ret = avcodec_send_frame(codec_ctx_, frame); if (ret < 0) { - LOG_ERROR("Error sending a frame to the encoder: {}", av_make_error_string(errbuf, sizeof(errbuf), ret)); + fprintf(stderr, "Error sending a frame to the encoder: %s\n", av_make_error_string(errbuf, sizeof(errbuf), ret)); return false; } @@ -338,36 +191,85 @@ bool FFRecoder::write_frame(AVFrame* frame) if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) return true; else if (ret < 0) { - LOG_ERROR("Error encoding a frame: {}", av_make_error_string(errbuf, sizeof(errbuf), ret)); + fprintf(stderr, "Error encoding a frame: %s\n", av_make_error_string(errbuf, sizeof(errbuf), ret)); return false; } // 将pts缩放到输出流的time_base上 av_packet_rescale_ts(&pkt, codec_ctx_->time_base, out_stream_->time_base); pkt.stream_index = out_stream_->index; - update_pts(&pkt); // 将数据写入到输出流 ret = av_interleaved_write_frame(fmt_ctx_, &pkt); - //ret = av_write_frame(fmt_ctx_, &pkt); av_packet_unref(&pkt); if (ret < 0) { - LOG_ERROR("Error while writing output packet: {}", av_make_error_string(errbuf, sizeof(errbuf), ret)); + fprintf(stderr, "Error while writing output packet: %s\n", av_make_error_string(errbuf, sizeof(errbuf), ret)); return false; } - /* av_interleaved_write_frame(fmt_ctx_, nullptr); - avio_flush(fmt_ctx_->pb);*/ } return true; } -bool FFRecoder::flush() -{ - return write_frame(nullptr); +static double a2d(AVRational a) { + return a.den / a.num; +} + +void FFRecoder::calc_pkt_ts(AVPacket* pkt, int frame_index) { + //Duration between 2 frames (us) + int64_t calc_duration=(double)AV_TIME_BASE/m_fps; + //Parameters + pkt->pts=(double)(frame_index*calc_duration)/(double)(av_q2d(codec_ctx_->time_base)*AV_TIME_BASE); + pkt->dts=pkt->pts; + pkt->duration=(double)calc_duration/(double)(av_q2d(codec_ctx_->time_base)*AV_TIME_BASE); +} + +bool FFRecoder::write_pkt(AVPacket* new_pkt) { + frame_nb++; + calc_pkt_ts(new_pkt, frame_nb); + new_pkt->pts = av_rescale_q_rnd(new_pkt->pts, codec_ctx_->time_base, out_stream_->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); + new_pkt->dts = av_rescale_q_rnd(new_pkt->dts, codec_ctx_->time_base, out_stream_->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); + new_pkt->duration = av_rescale_q(new_pkt->duration, codec_ctx_->time_base, out_stream_->time_base); + new_pkt->stream_index = out_stream_->index; + // 将数据写入到输出流 + int ret = av_interleaved_write_frame(fmt_ctx_, new_pkt); + + char errbuf[64]{ 0 }; + if (ret < 0) { + fprintf(stderr, "Error while writing output packet: %s\n", av_make_error_string(errbuf, sizeof(errbuf), ret)); + return false; + } + + return true; } -bool FFRecoder::flush_pkt() +bool FFRecoder::write_pkt_data(const uint8_t* pkt_data, int pkt_size) { + AVPacket* new_pkt = av_packet_alloc(); + av_new_packet(new_pkt, pkt_size); + memcpy(new_pkt->data, pkt_data, pkt_size); + + frame_nb++; + calc_pkt_ts(new_pkt, frame_nb); + new_pkt->pts = av_rescale_q_rnd(new_pkt->pts, codec_ctx_->time_base, out_stream_->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); + new_pkt->dts = av_rescale_q_rnd(new_pkt->dts, codec_ctx_->time_base, out_stream_->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); + new_pkt->duration = av_rescale_q(new_pkt->duration, codec_ctx_->time_base, out_stream_->time_base); + new_pkt->stream_index = out_stream_->index; + // 将数据写入到输出流 + int ret = av_interleaved_write_frame(fmt_ctx_, new_pkt); + + av_packet_free(&new_pkt); + new_pkt = nullptr; + + char errbuf[64]{ 0 }; + if (ret < 0) { + fprintf(stderr, "Error while writing output packet: %s\n", av_make_error_string(errbuf, sizeof(errbuf), ret)); + return false; + } + + return true; +} + +bool FFRecoder::flush() { - return av_write_frame(fmt_ctx_, nullptr); + return write_frame(nullptr); } bool FFRecoder::bgr_to_yuv420p(const uint8_t* const buf_bgr, uint8_t* const buf_420p) @@ -375,25 +277,30 @@ bool FFRecoder::bgr_to_yuv420p(const uint8_t* const buf_bgr, uint8_t* const buf_ // 分配转换上下文 thread_local std::tuple params{ 0, 0, 0 }; thread_local std::unique_ptr sws_context{ nullptr, &sws_freeContext }; - - std::tuple new_params{ width_, height_, av_image_get_linesize(AV_PIX_FMT_YUV420P, width_, 0) }; + + std::tuple new_params{ width_, height_, av_image_get_linesize(AV_PIX_FMT_YUV420P, width_, 0) }; if (!sws_context || params != new_params) { sws_context.reset(sws_getContext(width_, height_, AV_PIX_FMT_BGR24, width_, height_, AV_PIX_FMT_YUV420P, SWS_FAST_BILINEAR, nullptr, nullptr, nullptr)); params = new_params; } + // 转换格式 const int stride = std::get<2>(params);//Y平面一行的数据长度 - //const int ret = sws_scale(sws_context.get(), - // std::array{ buf_bgr }.data(),/* bgr数据只有一个平面 */ - // std::array{ width_ * 3 }.data(),/* BGR所以图像宽度*3 */ - // 0, height_, - // std::array{ buf_420p, buf_420p + y_size_, buf_420p + y_size_ + uv_size_ }.data(),/* YUV三个平面的起始地址 */ - // std::array{ stride, stride / 2, stride / 2 }.data());/* YUV每个平面中一行的宽度 */ - const int rgba_linesize = width_ * 3; - int yuv_linesize[3] = { stride, stride / 2, stride / 2 }; - int ret = sws_scale(sws_context.get(), (const uint8_t* const*)buf_bgr, &rgba_linesize, 0, height_, (uint8_t* const*)buf_420p, yuv_linesize); - - return 0; + const int ret = sws_scale(sws_context.get(), + &buf_bgr,/* bgr数据只有一个平面 */ + std::array {width_ * 3}.data(),/* BGR所以图像宽度*3 */ + 0, height_, + std::array{ buf_420p, buf_420p + y_size_, buf_420p + y_size_ + uv_size_ }.data(),/* YUV三个平面的起始地址 */ + std::array{ stride, stride / 2, stride / 2 }.data() + );/* YUV每个平面中一行的宽度 */ + + return ret >= 0; +} + +bool FFRecoder::close() +{ + flush(); + uninit(); } \ No newline at end of file diff --git a/src/decoder/dvpp/FFRecoder.h b/src/decoder/dvpp/FFRecoder.h index dfee0e4..c960aff 100644 --- a/src/decoder/dvpp/FFRecoder.h +++ b/src/decoder/dvpp/FFRecoder.h @@ -1,30 +1,31 @@ #pragma once #include -#include "depend_headers.h" +class AVFrame; +class AVStream; +class AVCodecContext; +class AVFormatContext; +class AVPacket; class FFRecoder { public: FFRecoder(); - virtual ~FFRecoder(); + ~FFRecoder(); - bool init(int w, int h, AVRational time_base, AVCodecContext* avctx, const char* outfile_name); + bool init(int w, int h, int fps, int bit_rate, const char* outfile_name); void uninit(); bool write_image(const uint8_t* bgr); bool write_yuv(const uint8_t* yuv_data); - bool write_frame(AVFrame* frame); + bool write_frame(const AVFrame* frame); + bool write_pkt(AVPacket* pkt); + bool write_pkt_data(const uint8_t* data, int size); bool flush(); - - // AVPacket 方式 - bool init(AVStream* stream, AVCodecContext* avctx, const char* outfile_name); - bool write_pkt(AVPacket *pkt); - bool flush_pkt(); - void release(); + bool close(); private: bool bgr_to_yuv420p(const uint8_t* const buf_bgr, uint8_t* const buf_420p); - void update_pts(AVPacket* pkt); + void calc_pkt_ts(AVPacket* pkt, int frame_index); private: int width_; @@ -37,17 +38,7 @@ private: AVStream* out_stream_; AVFrame* yuv_frame_; - SwsContext * img_convert_ctx; - //AVFrame* pFrameOut; - uint8_t * out_buffer; - - bool bFirstFrame; - int64_t last_src_pts; - int64_t last_pts; - - int64_t first_pts; - int64_t first_dts; + int m_fps{1}; - int64_t frame_index{0}; - AVStream* m_inStream; + int frame_nb{0}; }; \ No newline at end of file diff --git a/src/decoder/dvpp/FFRecoder2.cpp b/src/decoder/dvpp/FFRecoder2.cpp deleted file mode 100644 index f9dd526..0000000 --- a/src/decoder/dvpp/FFRecoder2.cpp +++ /dev/null @@ -1,269 +0,0 @@ -// FFRecoder2.cpp -#include "FFRecoder2.h" -#include -#include -#include - -extern "C" { -#include -#include -#include -#include -#include -#include -} - - -FFRecoder2::FFRecoder2() - :width_{}, - height_{}, - y_size_{}, - uv_size_{}, - pts_{}, - codec_ctx_{ nullptr }, - fmt_ctx_{ nullptr }, - out_stream_{ nullptr }, - yuv_frame_{ nullptr } -{ -} - -FFRecoder2::~FFRecoder2() -{ - uninit(); -} - - -bool FFRecoder2::init(int w, int h, int fps, int bit_rate, const char* outfile_name) -{ - uninit(); - - width_ = w; - height_ = h; - y_size_ = w * h; - uv_size_ = y_size_ / 4; - - // [1] 创建解码器 - const AVCodec* encoder = avcodec_find_encoder(AV_CODEC_ID_H264); - if (!encoder) { - fprintf(stderr, "Find encoder AV_CODEC_ID_H264 failed!\n"); - return false; - } - // 获取解码器上下文 - codec_ctx_ = avcodec_alloc_context3(encoder); - if (!codec_ctx_) { - fprintf(stderr, "Alloc context for encoder contx failed!\n"); - return false; - } - // 设置解码器上下文参数 - codec_ctx_->bit_rate = bit_rate; - codec_ctx_->width = width_; - codec_ctx_->height = height_; - codec_ctx_->time_base = AVRational{ 1, fps }; - codec_ctx_->gop_size = 50; - codec_ctx_->max_b_frames = 0; - codec_ctx_->pix_fmt = AV_PIX_FMT_YUV420P; - codec_ctx_->thread_count = 4; - codec_ctx_->qmin = 10; - codec_ctx_->qmax = 51; - codec_ctx_->qcompress = 0.6f; - codec_ctx_->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; - - //av_opt_set(codec_ctx_->priv_data, "preset", "ultrafast", 0); - av_opt_set(codec_ctx_->priv_data, "tune", "zerolatency", 0); - - // 打开解码器 - int ret = avcodec_open2(codec_ctx_, encoder, nullptr); - if (ret < 0) { - fprintf(stderr, "Open encoder failed!\n"); - return false; - } - - // [2] 创建输出上下文 - avformat_alloc_output_context2(&fmt_ctx_, nullptr, nullptr, outfile_name); - - // [3] 添加输出视频流 - out_stream_ = avformat_new_stream(fmt_ctx_, nullptr); - out_stream_->id = 0; - out_stream_->codecpar->codec_tag = 0; - avcodec_parameters_from_context(out_stream_->codecpar, codec_ctx_); - - av_dump_format(fmt_ctx_, out_stream_->id, outfile_name, 1); - - // 创建YUV格式帧 - yuv_frame_ = av_frame_alloc(); - yuv_frame_->format = AV_PIX_FMT_YUV420P; - yuv_frame_->width = width_; - yuv_frame_->height = height_; - // 为创建的YUV帧分配内存 - if (av_frame_get_buffer(yuv_frame_, 0) < 0) { - av_frame_free(&yuv_frame_); - yuv_frame_ = nullptr; - fprintf(stderr, "Frame get buffer failed!\n"); - return false; - } - - // [5] 打开输出视频文件并写入视频头信息 - if (avio_open(&fmt_ctx_->pb, outfile_name, AVIO_FLAG_WRITE) < 0) { - fprintf(stderr, "avio_open failed!\n"); - return false; - } - if (avformat_write_header(fmt_ctx_, nullptr) < 0) { - fprintf(stderr, "Write header failed!\n"); - return false; - } - - return true; -} - -void FFRecoder2::uninit() -{ - if (fmt_ctx_) { - av_write_trailer(fmt_ctx_); - avio_close(fmt_ctx_->pb); - avformat_free_context(fmt_ctx_); - fmt_ctx_ = nullptr; - } - - if (codec_ctx_) { - avcodec_close(codec_ctx_); - avcodec_free_context(&codec_ctx_); - codec_ctx_ = nullptr; - } - - if (yuv_frame_) { - av_frame_free(&yuv_frame_); - yuv_frame_ = nullptr; - } - - width_ = 0; - height_ = 0; - y_size_ = 0; - uv_size_ = 0; - pts_ = 0; -} - -bool FFRecoder2::write_image(const uint8_t* bgr) -{ - // 分配YUV格式数据的内存 - thread_local std::vector yuv_data; - if (yuv_data.size() != y_size_ * 3 / 2) { - yuv_data.resize(y_size_ * 3 / 2); - } - // BGR格式转YUV格式 - bgr_to_yuv420p(bgr, yuv_data.data()); - - return write_yuv(yuv_data.data()); -} - -bool FFRecoder2::write_yuv(const uint8_t* yuv_data) -{ - //拷贝YUV数据到帧,由于帧数据存在内存对齐,故需逐行拷贝 - for (int i = 0; i < height_; i++) { - memcpy(yuv_frame_->data[0] + i * yuv_frame_->linesize[0], yuv_data + width_ * i, width_); - } - const int uv_stride = width_ / 2; - for (int i = 0; i < height_ / 2; i++) { - memcpy(yuv_frame_->data[1] + i * yuv_frame_->linesize[1], yuv_data + y_size_ + uv_stride * i, uv_stride); - memcpy(yuv_frame_->data[2] + i * yuv_frame_->linesize[2], yuv_data + y_size_ + uv_size_ + uv_stride * i, uv_stride); - } - - yuv_frame_->pts = pts_++; - - return write_frame(yuv_frame_); -} - -bool FFRecoder2::write_frame(const AVFrame* frame) -{ - char errbuf[64]{ 0 }; - // 将帧数据发送到编码器 - int ret = avcodec_send_frame(codec_ctx_, frame); - if (ret < 0) { - fprintf(stderr, "Error sending a frame to the encoder: %s\n", av_make_error_string(errbuf, sizeof(errbuf), ret)); - return false; - } - - while (true) { - AVPacket pkt{ 0 }; - // 获取编码后的数据 - ret = avcodec_receive_packet(codec_ctx_, &pkt); - if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) - return true; - else if (ret < 0) { - fprintf(stderr, "Error encoding a frame: %s\n", av_make_error_string(errbuf, sizeof(errbuf), ret)); - return false; - } - // 将pts缩放到输出流的time_base上 - av_packet_rescale_ts(&pkt, codec_ctx_->time_base, out_stream_->time_base); - pkt.stream_index = out_stream_->index; - // 将数据写入到输出流 - ret = av_interleaved_write_frame(fmt_ctx_, &pkt); - av_packet_unref(&pkt); - if (ret < 0) { - fprintf(stderr, "Error while writing output packet: %s\n", av_make_error_string(errbuf, sizeof(errbuf), ret)); - return false; - } - } - - return true; -} - -static double a2d(AVRational a) { - return a.den / a.num; -} - -bool FFRecoder2::write_pkt(AVPacket* pkt) { - frame_nb++; - pkt->duration = int(a2d(codec_ctx_->time_base)); - pkt->pts = frame_nb; - // 将pts缩放到输出流的time_base上 - av_packet_rescale_ts(pkt, codec_ctx_->time_base, out_stream_->time_base); - pkt->stream_index = out_stream_->index; - // 将数据写入到输出流 - int ret = av_interleaved_write_frame(fmt_ctx_, pkt); - - char errbuf[64]{ 0 }; - if (ret < 0) { - fprintf(stderr, "Error while writing output packet: %s\n", av_make_error_string(errbuf, sizeof(errbuf), ret)); - return false; - } - - return true; -} - -bool FFRecoder2::flush() -{ - return write_frame(nullptr); -} - -bool FFRecoder2::bgr_to_yuv420p(const uint8_t* const buf_bgr, uint8_t* const buf_420p) -{ - // 分配转换上下文 - thread_local std::tuple params{ 0, 0, 0 }; - thread_local std::unique_ptr sws_context{ nullptr, &sws_freeContext }; - - std::tuple new_params{ width_, height_, av_image_get_linesize(AV_PIX_FMT_YUV420P, width_, 0) }; - if (!sws_context || params != new_params) - { - sws_context.reset(sws_getContext(width_, height_, AV_PIX_FMT_BGR24, width_, height_, - AV_PIX_FMT_YUV420P, SWS_FAST_BILINEAR, nullptr, nullptr, nullptr)); - params = new_params; - } - - // 转换格式 - const int stride = std::get<2>(params);//Y平面一行的数据长度 - const int ret = sws_scale(sws_context.get(), - &buf_bgr,/* bgr数据只有一个平面 */ - std::array {width_ * 3}.data(),/* BGR所以图像宽度*3 */ - 0, height_, - std::array{ buf_420p, buf_420p + y_size_, buf_420p + y_size_ + uv_size_ }.data(),/* YUV三个平面的起始地址 */ - std::array{ stride, stride / 2, stride / 2 }.data() - );/* YUV每个平面中一行的宽度 */ - - return ret >= 0; -} - -bool FFRecoder2::close() -{ - flush(); - uninit(); -} \ No newline at end of file diff --git a/src/decoder/dvpp/FFRecoder2.h b/src/decoder/dvpp/FFRecoder2.h deleted file mode 100644 index 309aa81..0000000 --- a/src/decoder/dvpp/FFRecoder2.h +++ /dev/null @@ -1,40 +0,0 @@ -#pragma once -#include - -class AVFrame; -class AVStream; -class AVCodecContext; -class AVFormatContext; -class AVPacket; - -class FFRecoder2 -{ -public: - FFRecoder2(); - ~FFRecoder2(); - - bool init(int w, int h, int fps, int bit_rate, const char* outfile_name); - void uninit(); - bool write_image(const uint8_t* bgr); - bool write_yuv(const uint8_t* yuv_data); - bool write_frame(const AVFrame* frame); - bool write_pkt(AVPacket* pkt); - bool flush(); - bool close(); - -private: - bool bgr_to_yuv420p(const uint8_t* const buf_bgr, uint8_t* const buf_420p); - -private: - int width_; - int height_; - int y_size_; - int uv_size_; - int pts_; - AVCodecContext* codec_ctx_; - AVFormatContext* fmt_ctx_; - AVStream* out_stream_; - AVFrame* yuv_frame_; - - int frame_nb{0}; -}; \ No newline at end of file diff --git a/src/decoder/dvpp/FFRecoderTaskManager.cpp b/src/decoder/dvpp/FFRecoderTaskManager.cpp index e2ce460..0b870bf 100644 --- a/src/decoder/dvpp/FFRecoderTaskManager.cpp +++ b/src/decoder/dvpp/FFRecoderTaskManager.cpp @@ -1,11 +1,6 @@ #include "FFRecoderTaskManager.h" #include -struct RecodeThreadParam { - FFRecoderTaskManager* _this; - RecodeParam param; -}; - static long get_cur_time() { chrono::time_point tpMicro @@ -69,68 +64,14 @@ bool FFRecoderTaskManager::init(int w, int h, int fps, int bit_rate) { return true; } -static AVPacket* packet_clone(AVPacket* pkt) { - AVPacket *new_pkt = av_packet_alloc(); - av_init_packet( new_pkt ); - av_new_packet(new_pkt, pkt->size); - memcpy(new_pkt->data, pkt->data, pkt->size); - new_pkt->size = pkt->size; - // new_pkt->pts = pkt->pts; - // new_pkt->dts = pkt->dts; - // new_pkt->stream_index = pkt->stream_index; - // new_pkt->duration = pkt->duration; - // new_pkt->pos = pkt->pos; - // new_pkt->flags = pkt->flags; - // av_copy_packet_side_data(new_pkt, pkt); - return new_pkt; -} - -static AVPacket* copy_packet(const AVPacket* src) -{ - AVPacket* dst = av_packet_alloc(); // 分配内存 - if (!dst) { - return NULL; - } - - // 复制所有字段 - av_packet_ref(dst, src); - - // 复制音视频数据 - dst->data = (uint8_t*)av_malloc(src->size); - memcpy(dst->data, src->data, src->size); - dst->size = src->size; - return dst; -} - void FFRecoderTaskManager::cache_pkt(AVPacket* pkt, long long frame_nb, string dec_name){ if(m_bExit) { // 任务退出了就不再缓存数据了 return; } - // 考虑到一个AVPacket中的数据并不很大,为减少与解码模块的耦合度,方便管理,这里做一个clone - // AVPacket *new_pkt = copy_packet(pkt); - - DataPacket* newDataPkt = new DataPacket(); - newDataPkt->pkt = pkt; - newDataPkt->frame_nb = frame_nb; - - if(is_key_frame(pkt)){ - // 越来越大的值 - newDataPkt->isKeyFrame = true; - LOG_INFO("[{}] - key frame_nb: {}", dec_name, frame_nb); - } else { - newDataPkt->isKeyFrame = false; - } - - AVPacket* npkt = newDataPkt->pkt; - if(npkt == nullptr) { - return ; - } else if (npkt->data == nullptr || npkt->size <= 0){ - return ; - } - std::lock_guard l_info(m_pkt_list_short_mtx); + DataPacket* newDataPkt = new DataPacket(pkt->data, pkt->size, frame_nb, is_key_frame(pkt)); m_pkt_list_short.push_back(newDataPkt); } @@ -277,20 +218,11 @@ void FFRecoderTaskManager::recode_thread() { break; } - auto it = m_pkt_list.begin(); - while (it != it_data) { - DataPacket* dataPkt = m_pkt_list.front(); - delete dataPkt; - dataPkt = nullptr; - m_pkt_list.pop_front(); - it = m_pkt_list.begin(); - } - LOG_INFO("start frame_nb: {}", (*it_data)->frame_nb); string file_name = recoderinfo.recoderPath; - FFRecoder2 ffrecoder; + FFRecoder ffrecoder; bool bInit = ffrecoder.init(m_width, m_height, m_fps, m_bit_rate, file_name.c_str()); if (!bInit) { LOG_ERROR("ffrecoder init error : {} {} {}", recoderinfo.task_id, recoderinfo.object_id, recoderinfo.frame_nb); @@ -308,16 +240,15 @@ void FFRecoderTaskManager::recode_thread() { if(dataPkt->frame_nb > recoderinfo.frame_nb) { break; } - AVPacket* pkt = dataPkt->pkt; - if(pkt == nullptr) { - LOG_ERROR("{} pkt is nullptr", recoderinfo.task_id); - continue; - } else if (pkt->data == nullptr || pkt->size <= 0){ - LOG_ERROR("{} pkt data is nullptr or size is {}", recoderinfo.task_id, pkt->size); + + if (dataPkt->pkt_data == nullptr || dataPkt->pkt_size <= 0){ + LOG_ERROR("{} pkt data is nullptr or size is {}", recoderinfo.task_id, dataPkt->pkt_size); continue; } - ffrecoder.write_pkt(pkt); + // LOG_INFO("ref count: {}", av_buffer_get_ref_count(pkt->buf)); + + ffrecoder.write_pkt_data(dataPkt->pkt_data, dataPkt->pkt_size); count++; end_frame_nb = (*it_save)->frame_nb; } diff --git a/src/decoder/dvpp/FFRecoderTaskManager.h b/src/decoder/dvpp/FFRecoderTaskManager.h index d30486f..dd731e0 100644 --- a/src/decoder/dvpp/FFRecoderTaskManager.h +++ b/src/decoder/dvpp/FFRecoderTaskManager.h @@ -1,4 +1,4 @@ -#include "FFRecoder2.h" +#include "FFRecoder.h" #include "../../ai_platform/common_header.h" #include "depend_headers.h" @@ -12,11 +12,6 @@ using namespace std; -struct RecodeParam { - AVRational time_base; - RecoderInfo recoderInfo; - AVCodecContext* avctx; -}; typedef std::function mq_callback_t; @@ -65,7 +60,7 @@ private: mq_callback_t mq_publish_func; - // FFRecoder2 + // FFRecoder int m_width; int m_height; int m_fps; diff --git a/src/decoder/dvpp/VpcUtils.cpp b/src/decoder/dvpp/VpcUtils.cpp index 84dde71..bb8e332 100644 --- a/src/decoder/dvpp/VpcUtils.cpp +++ b/src/decoder/dvpp/VpcUtils.cpp @@ -32,7 +32,6 @@ int VpcUtils::init(int devId){ m_devId = devId; - aclrtSetDevice(m_devId); aclrtCreateContext(&context_, m_devId); CHECK_AND_RETURN(aclrtSetCurrentContext(context_), "aclrtSetCurrentContext failed"); @@ -54,7 +53,6 @@ int VpcUtils::init(int devId){ DvppDataMemory* VpcUtils::convert2bgr(acldvppPicDesc *inputDesc_, int out_width, int out_height, bool key_frame){ - aclrtSetDevice(m_devId); aclrtSetCurrentContext(context_); int out_buf_width = ALIGN_UP(out_width, 16) * 3; @@ -107,7 +105,6 @@ DvppDataMemory* VpcUtils::convert2bgr(acldvppPicDesc *inputDesc_, int out_width, DvppDataMemory* VpcUtils::convert2bgr(DvppDataMemory* inMem){ - aclrtSetDevice(m_devId); aclrtSetCurrentContext(context_); int out_width = inMem->getWidth(); @@ -174,7 +171,6 @@ DvppDataMemory* VpcUtils::convert2bgr(DvppDataMemory* inMem){ DvppDataMemory* VpcUtils::resize(acldvppPicDesc *inputDesc_, int out_width, int out_height){ - aclrtSetDevice(m_devId); aclrtSetCurrentContext(context_); int out_buf_width = ALIGN_UP(out_width, 16); diff --git a/src/decoder/dvpp/depend_headers.h b/src/decoder/dvpp/depend_headers.h index bfb25be..ef987ad 100644 --- a/src/decoder/dvpp/depend_headers.h +++ b/src/decoder/dvpp/depend_headers.h @@ -27,9 +27,10 @@ extern "C" { #include "libavutil/samplefmt.h" #include "libavformat/avformat.h" #include "libavcodec/avcodec.h" - #include - #include - #include + #include "libavcodec/bsf.h" + #include "libavutil/opt.h" + #include "libavutil/timestamp.h" + #include "libswscale/swscale.h" } @@ -40,16 +41,28 @@ extern "C" { struct DataPacket { - AVPacket* pkt {nullptr}; + uint8_t *pkt_data{nullptr}; + int pkt_size{0}; unsigned long long frame_nb{0}; bool isKeyFrame{false}; + DataPacket(uint8_t *data, int size, unsigned long long frameNb, bool isKey) { + pkt_data = (uint8_t*) malloc(size); + memcpy(pkt_data, data, size); + pkt_size = size; + frame_nb = frameNb; + isKeyFrame = isKey; + } + ~DataPacket(){ - if(pkt != nullptr) { + if(pkt_data != nullptr) { // LOG_INFO("free frame_nb:{}", frame_nb); - av_packet_free(&pkt); - pkt = nullptr; + free(pkt_data); + pkt_data = nullptr; } + pkt_size = 0; + frame_nb = 0; + isKeyFrame = false; } };