FFRecoder.cpp 11.6 KB
#include "FFRecoder.h"

#include <tuple>
#include <array>
#include <vector>


FFRecoder::FFRecoder()
	:width_{},
	height_{},
	y_size_{},
	uv_size_{},
	pts_{},
	codec_ctx_{ nullptr },
	fmt_ctx_{ nullptr },
	out_stream_{ nullptr },
	yuv_frame_{ nullptr },
	img_convert_ctx{nullptr}
{
	bFirstFrame = true;
	last_src_pts = 0;
	last_pts = 0;
}

FFRecoder::~FFRecoder()
{
}


bool FFRecoder::init(int w, int h, AVRational time_base, AVCodecContext* avctx, const char* outfile_name)
{
	uninit();

	width_ = w;
	height_ = h;
	y_size_ = w * h;
	uv_size_ = y_size_ / 4;

	// [1] 创建解码器
	const AVCodec* encoder = avcodec_find_encoder(AV_CODEC_ID_HEVC);
	if (!encoder) {
		LOG_ERROR("Find encoder AV_CODEC_ID_H264 failed!");
		return false;
	}
	// 获取解码器上下文
	codec_ctx_ = avcodec_alloc_context3(encoder);
	if (!codec_ctx_) {
		LOG_ERROR("Alloc context for encoder contx failed!");
		return false;
	}
	// 设置解码器上下文参数
	codec_ctx_->bit_rate = avctx->bit_rate;
	codec_ctx_->width = width_;
	codec_ctx_->height = height_;
	codec_ctx_->time_base = time_base;
	codec_ctx_->gop_size = avctx->gop_size;
	codec_ctx_->max_b_frames = avctx->max_b_frames;
	codec_ctx_->pix_fmt = AV_PIX_FMT_YUV420P;
	codec_ctx_->thread_count = 4;
	codec_ctx_->qmin = avctx->qmin;
	codec_ctx_->qmax = avctx->qmax;
	codec_ctx_->qcompress = avctx->qcompress;
	codec_ctx_->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

	av_opt_set(codec_ctx_->priv_data, "preset", "ultrafast", 0);
	av_opt_set(codec_ctx_->priv_data, "tune", "zerolatency", 0);

	// 打开解码器
	int ret = avcodec_open2(codec_ctx_, encoder, nullptr);
	if (ret < 0) {
		LOG_ERROR("Open encoder failed!");
		return false;
	}

	// [2] 创建输出上下文
	avformat_alloc_output_context2(&fmt_ctx_, nullptr, nullptr, outfile_name);

	// [3] 添加输出视频流
	out_stream_ = avformat_new_stream(fmt_ctx_, nullptr);
	out_stream_->id = 0;
	out_stream_->codecpar->codec_tag = 0;
	avcodec_parameters_from_context(out_stream_->codecpar, codec_ctx_);
	out_stream_->time_base = { 1,30 };

	av_dump_format(fmt_ctx_, out_stream_->id, outfile_name, 1);

	// 创建YUV格式帧
	yuv_frame_ = av_frame_alloc();
	yuv_frame_->format = AV_PIX_FMT_YUV420P;
	yuv_frame_->width = width_;
	yuv_frame_->height = height_;
	// 为创建的YUV帧分配内存
	if (av_frame_get_buffer(yuv_frame_, 0) < 0) {
		av_frame_free(&yuv_frame_);
		yuv_frame_ = nullptr;
		LOG_ERROR("Frame get buffer failed!");
		return false;
	}

	// [5] 打开输出视频文件并写入视频头信息
	if (avio_open(&fmt_ctx_->pb, outfile_name, AVIO_FLAG_WRITE) < 0) {
		LOG_ERROR("avio_open  failed!");
		return false;
	}
	if (avformat_write_header(fmt_ctx_, nullptr) < 0) {
		LOG_ERROR("Write header failed!");
		return false;
	}

	// 计算解码后原始数据所需缓冲区大小,并分配内存空间 Determine required buffer size and allocate buffer
	int numBytes = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, w, h, 1);
	out_buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));

	//pFrameOut = av_frame_alloc();
	//av_image_fill_arrays(pFrameOut->data, pFrameOut->linesize, buffer, AV_PIX_FMT_YUV420P, w, h, 1);

	img_convert_ctx = sws_getContext(avctx->width, avctx->height, avctx->pix_fmt, w, h, AV_PIX_FMT_YUV420P,
		SWS_BICUBIC, nullptr, nullptr, nullptr);

	return true;
}

bool FFRecoder::init(AVStream* stream, AVCodecContext* avctx, const char* outfile_name) {

	const AVCodec* encoder = avcodec_find_encoder(avctx->codec_id);
	if (!encoder) {
		LOG_ERROR("Find encoder AV_CODEC_ID_H264 failed!");
		return false;
	}
	// 获取解码器上下文
	codec_ctx_ = avcodec_alloc_context3(encoder);
	if (!codec_ctx_) {
		LOG_ERROR("Alloc context for encoder contx failed!");
		return false;
	}

	avcodec_copy_context(codec_ctx_, avctx);
	codec_ctx_->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
	m_inStream = stream;

	// [2] 创建输出上下文
	avformat_alloc_output_context2(&fmt_ctx_, nullptr, nullptr, outfile_name);

	// [3] 添加输出视频流
	out_stream_ = avformat_new_stream(fmt_ctx_, nullptr);

	out_stream_->id = 0;
	out_stream_->codecpar->codec_tag = 0;
	avcodec_parameters_from_context(out_stream_->codecpar, codec_ctx_);
	// out_stream_->time_base = { 1,25 };
	out_stream_->time_base = stream->time_base;
	out_stream_->r_frame_rate = stream->r_frame_rate;
	out_stream_->avg_frame_rate = stream->r_frame_rate;

	codec_ctx_->time_base = out_stream_->time_base;

	av_opt_set(out_stream_->codec->priv_data, "preset", "ultrafast", 0);
	av_opt_set(out_stream_->codec->priv_data, "tune", "zerolatency", 0);

	// av_dump_format(fmt_ctx_, out_stream_->id, outfile_name, 1);

	// [5] 打开输出视频文件并写入视频头信息
	if (avio_open(&fmt_ctx_->pb, outfile_name, AVIO_FLAG_WRITE) < 0) {
		LOG_ERROR("avio_open  failed!");
		return false;
	}
	if (avformat_write_header(fmt_ctx_, nullptr) < 0) {
		LOG_ERROR("Write header failed!");
		return false;
	}

	return true;
}

void FFRecoder::release() {
	av_write_trailer(fmt_ctx_);

    avcodec_close(fmt_ctx_->streams[0]->codec);
    av_freep(&fmt_ctx_->streams[0]->codec);
    av_freep(&fmt_ctx_->streams[0]);

    avio_close(fmt_ctx_->pb);
    av_free(fmt_ctx_);
	fmt_ctx_ = nullptr;
}

void FFRecoder::uninit()
{
	//if (out_buffer) {
	//	av_free(out_buffer);
	//}

	if (yuv_frame_) {
		av_frame_free(&yuv_frame_);
		yuv_frame_ = nullptr;
	}

	if (img_convert_ctx) {
		sws_freeContext(img_convert_ctx);
		img_convert_ctx = nullptr;
	}

	if (fmt_ctx_) {
		av_write_trailer(fmt_ctx_);
		avio_close(fmt_ctx_->pb);
		avformat_free_context(fmt_ctx_);
		fmt_ctx_ = nullptr;
	}

	if (codec_ctx_) {
		avcodec_close(codec_ctx_);
		avcodec_free_context(&codec_ctx_);
		codec_ctx_ = nullptr;
	}

	width_ = 0;
	height_ = 0;
	y_size_ = 0;
	uv_size_ = 0;
	pts_ = 0;
}

bool FFRecoder::write_image(const uint8_t* bgr)
{
	// 分配YUV格式数据的内存
	thread_local std::vector<uint8_t> yuv_data;
	if (yuv_data.size() != y_size_ * 3 / 2) {
		yuv_data.resize(y_size_ * 3 / 2);
	}
	// BGR格式转YUV格式
	bgr_to_yuv420p(bgr, yuv_data.data());

	return write_yuv(yuv_data.data());
}

bool FFRecoder::write_yuv(const uint8_t* yuv_data)
{
	//拷贝YUV数据到帧,由于帧数据存在内存对齐,故需逐行拷贝
	for (int i = 0; i < height_; i++) {
		memcpy(yuv_frame_->data[0] + i * yuv_frame_->linesize[0], yuv_data + width_ * i, width_);
	}
	const int uv_stride = width_ / 2;
	for (int i = 0; i < height_ / 2; i++) {
		memcpy(yuv_frame_->data[1] + i * yuv_frame_->linesize[1], yuv_data + y_size_ + uv_stride * i, uv_stride);
		memcpy(yuv_frame_->data[2] + i * yuv_frame_->linesize[2], yuv_data + y_size_ + uv_size_ + uv_stride * i, uv_stride);
	}

	yuv_frame_->pts = pts_++;

	return write_frame(yuv_frame_);
}

void FFRecoder::update_pts(AVPacket* pkt) {
	if (pkt->pts > 0) {
		if (bFirstFrame) {
			bFirstFrame = false;
			last_src_pts = pkt->pts;
		}
		int64_t pkt_pts = pkt->pts;
		pkt->pts = last_pts + (pkt_pts - last_src_pts);
		last_src_pts = pkt_pts;
		last_pts = pkt->pts;
		pkt->dts = pkt->pts;
	}
	else {
		if (bFirstFrame) {
			bFirstFrame = false;
			last_pts = 0;
		}
		pkt->pts = last_pts + 512;
		last_pts = pkt->pts;
	}
	
}

bool FFRecoder::write_pkt(AVPacket *pkt) {
	char errbuf[64]{ 0 };

	// av_packet_rescale_ts(pkt, codec_ctx_->time_base, out_stream_->time_base);
	// update_pts(pkt);
	// pkt->stream_index = out_stream_->index;

	if(pkt->pts==AV_NOPTS_VALUE) {
		// printf("frame_index:%d", frame_index);
		//Write PTS
		AVRational time_base1 = codec_ctx_->time_base;
		//Duration between 2 frames (us)
		int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(m_inStream->r_frame_rate);
		//Parameters
		pkt->pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE);
		pkt->dts = pkt->pts;
		pkt->duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE);
		frame_index++;
	} 
	// Convert PTS/DTS
	pkt->pts = av_rescale_q_rnd(pkt->pts, codec_ctx_->time_base, out_stream_->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
	pkt->dts = av_rescale_q_rnd(pkt->dts, codec_ctx_->time_base, out_stream_->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
	pkt->duration = av_rescale_q(pkt->duration, codec_ctx_->time_base, out_stream_->time_base);
	
	pkt->pos = -1;
	pkt->stream_index = out_stream_->index;
	fmt_ctx_->duration += pkt->duration;
	
	// 将数据写入到输出流
	int ret = av_write_frame(fmt_ctx_, pkt);
	if (ret < 0) {
		LOG_ERROR("Error while writing output packet: {}", av_make_error_string(errbuf, sizeof(errbuf), ret));
		return false;
	}
	return true;
}

bool FFRecoder::write_frame(AVFrame* frame)
{
	AVFrame *pFrameOut = nullptr;
	if (frame != nullptr && frame->format != AV_PIX_FMT_YUV420P) {
		pFrameOut = av_frame_clone(frame);
		pFrameOut->format = AV_PIX_FMT_YUV420P;
		av_image_fill_arrays(pFrameOut->data, pFrameOut->linesize, out_buffer, AV_PIX_FMT_YUV420P, frame->width, frame->height, 1);
		sws_scale(img_convert_ctx, (const unsigned char* const*)frame->data, frame->linesize, 0, frame->height, pFrameOut->data, pFrameOut->linesize);
	}
	else {
		pFrameOut = frame;
	}
	char errbuf[64]{ 0 };
	// 将帧数据发送到编码器
	int ret = avcodec_send_frame(codec_ctx_, pFrameOut);
	av_frame_free(&pFrameOut);
	if (ret < 0) {
		LOG_ERROR("Error sending a frame to the encoder: {}", av_make_error_string(errbuf, sizeof(errbuf), ret));
		return false;
	}

	while (true) {
		AVPacket pkt{ 0 };
		// 获取编码后的数据
		ret = avcodec_receive_packet(codec_ctx_, &pkt);
		if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
			return true;
		else if (ret < 0) {
			LOG_ERROR("Error encoding a frame: {}", av_make_error_string(errbuf, sizeof(errbuf), ret));
			return false;
		}
		// 将pts缩放到输出流的time_base上
		av_packet_rescale_ts(&pkt, codec_ctx_->time_base, out_stream_->time_base);
		pkt.stream_index = out_stream_->index;
		update_pts(&pkt);
		// 将数据写入到输出流
		ret = av_interleaved_write_frame(fmt_ctx_, &pkt);
		//ret = av_write_frame(fmt_ctx_, &pkt);
		av_packet_unref(&pkt);
		if (ret < 0) {
			LOG_ERROR("Error while writing output packet: {}", av_make_error_string(errbuf, sizeof(errbuf), ret));
			return false;
		}
	/*	av_interleaved_write_frame(fmt_ctx_, nullptr);
		avio_flush(fmt_ctx_->pb);*/
	}

	return true;
}

bool FFRecoder::flush()
{
	return write_frame(nullptr);
}

bool FFRecoder::flush_pkt()
{
	return av_write_frame(fmt_ctx_, nullptr);
}

bool FFRecoder::bgr_to_yuv420p(const uint8_t* const buf_bgr, uint8_t* const buf_420p)
{
	// 分配转换上下文
	thread_local std::tuple<int,int,int> params{ 0, 0, 0 };
	thread_local std::unique_ptr<SwsContext, decltype(&sws_freeContext)> sws_context{ nullptr, &sws_freeContext };

	std::tuple<int, int, int> new_params{ width_, height_, av_image_get_linesize(AV_PIX_FMT_YUV420P, width_, 0) };
	if (!sws_context || params != new_params)
	{
		sws_context.reset(sws_getContext(width_, height_, AV_PIX_FMT_BGR24, width_, height_,
			AV_PIX_FMT_YUV420P, SWS_FAST_BILINEAR, nullptr, nullptr, nullptr));
		params = new_params;
	}
	// 转换格式
	const int stride = std::get<2>(params);//Y平面一行的数据长度
	//const int ret = sws_scale(sws_context.get(),
	//	std::array<const uint8_t* const>{ buf_bgr }.data(),/* bgr数据只有一个平面 */
	//	std::array{ width_ * 3 }.data(),/* BGR所以图像宽度*3 */
	//	0, height_,
	//	std::array{ buf_420p, buf_420p + y_size_, buf_420p + y_size_ + uv_size_ }.data(),/* YUV三个平面的起始地址 */
	//	std::array{ stride, stride / 2, stride / 2 }.data());/* YUV每个平面中一行的宽度 */
	const int rgba_linesize = width_ * 3;
	int yuv_linesize[3] = { stride, stride / 2, stride / 2 };
	int ret = sws_scale(sws_context.get(), (const uint8_t* const*)buf_bgr, &rgba_linesize, 0, height_, (uint8_t* const*)buf_420p, yuv_linesize);

	return 0;
}