Commit 7ded47c99f4181b042780c4d0fa9215f471b8b37

Authored by Hu Chunming
2 parents f6cb983a 09a835c9

Merge branch 'hotfix-stream' into release_v2.1

.vscode/launch.json
@@ -104,8 +104,8 @@ @@ -104,8 +104,8 @@
104 "args": [], 104 "args": [],
105 "stopAtEntry": false, 105 "stopAtEntry": false,
106 "externalConsole": false, 106 "externalConsole": false,
107 - "cwd": "/home/cmhu/vpt_ascend_arm/src/demo",  
108 - "program": "/home/cmhu/vpt_ascend_arm/src/demo/build/Debug/outDebug", 107 + "cwd": "/home/cmhu/vpt_ascend_arm/src/decoder/dvpp",
  108 + "program": "/home/cmhu/vpt_ascend_arm/src/decoder/dvpp/build/Debug/outDebug",
109 "MIMode": "gdb", 109 "MIMode": "gdb",
110 "miDebuggerPath": "gdb", 110 "miDebuggerPath": "gdb",
111 "setupCommands": [ 111 "setupCommands": [
build/src/Makefile
@@ -23,7 +23,7 @@ JRTP_ROOT = $(THIRDPARTY_ROOT)/gb28181_3rd/jrtp_export @@ -23,7 +23,7 @@ JRTP_ROOT = $(THIRDPARTY_ROOT)/gb28181_3rd/jrtp_export
23 DEFS = -DENABLE_DVPP_INTERFACE -DWITH_FACE_DET_SS -DPOST_USE_RABBITMQ -DUSE_VILLAGE 23 DEFS = -DENABLE_DVPP_INTERFACE -DWITH_FACE_DET_SS -DPOST_USE_RABBITMQ -DUSE_VILLAGE
24 # DEFS = -DENABLE_DVPP_INTERFACE -DUSE_VILLAGE 24 # DEFS = -DENABLE_DVPP_INTERFACE -DUSE_VILLAGE
25 25
26 -include_dir=-I/usr/local/Ascend/ascend-toolkit/6.3.RC1/aarch64-linux/include \ 26 +include_dir=-I/usr/local/Ascend/ascend-toolkit/latest/include \
27 -I $(SRC_ROOT)/common \ 27 -I $(SRC_ROOT)/common \
28 -I $(SPDLOG_ROOT)/include \ 28 -I $(SPDLOG_ROOT)/include \
29 -I $(OPENCV_ROOT)/include \ 29 -I $(OPENCV_ROOT)/include \
@@ -46,7 +46,7 @@ LIBS= -L $(SPDLOG_ROOT)/lib -l:libspdlog.a \ @@ -46,7 +46,7 @@ LIBS= -L $(SPDLOG_ROOT)/lib -l:libspdlog.a \
46 -L $(RABBITMQ_CLIENT_ROOT)/lib/aarch64-linux-gnu -l:librabbitmq.a \ 46 -L $(RABBITMQ_CLIENT_ROOT)/lib/aarch64-linux-gnu -l:librabbitmq.a \
47 -L $(AUTHORITY_DIR)/lib -latlaslic \ 47 -L $(AUTHORITY_DIR)/lib -latlaslic \
48 -L $(JRTP_ROOT)/jrtplib/lib -l:libjrtp.a \ 48 -L $(JRTP_ROOT)/jrtplib/lib -l:libjrtp.a \
49 - -L/usr/local/Ascend/ascend-toolkit/6.3.RC1/runtime/lib64 -lacl_dvpp -lascendcl \ 49 + -L/usr/local/Ascend/ascend-toolkit/latest/runtime/lib64 -lacl_dvpp -lascendcl \
50 -L $(BOOST_ROOT)/lib -lboost_system \ 50 -L $(BOOST_ROOT)/lib -lboost_system \
51 51
52 52
src/ai_engine_module/VPTProcess.cpp
@@ -70,6 +70,10 @@ int VPTProcess::process_gpu(sy_img * batch_img, vector<string>& tasklist, @@ -70,6 +70,10 @@ int VPTProcess::process_gpu(sy_img * batch_img, vector<string>& tasklist,
70 } 70 }
71 71
72 do{ 72 do{
  73 +
  74 + aclrtSetDevice(m_devId);
  75 + int ret = aclrtSetCurrentContext(m_algorthim_ctx);
  76 +
73 /* 路数太多时 按照最大batchsize数 拆批次运行 */ 77 /* 路数太多时 按照最大batchsize数 拆批次运行 */
74 int cur_batch_size = m_max_batchsize; 78 int cur_batch_size = m_max_batchsize;
75 int cycleTimes = batchsize / cur_batch_size + (batchsize % cur_batch_size == 0 ? 0 : 1); 79 int cycleTimes = batchsize / cur_batch_size + (batchsize % cur_batch_size == 0 ? 0 : 1);
@@ -80,9 +84,6 @@ int VPTProcess::process_gpu(sy_img * batch_img, vector<string>& tasklist, @@ -80,9 +84,6 @@ int VPTProcess::process_gpu(sy_img * batch_img, vector<string>& tasklist,
80 int startbatch = c*cur_batch_size; 84 int startbatch = c*cur_batch_size;
81 85
82 vpt_result *real_res = vpt_det_result + startbatch; 86 vpt_result *real_res = vpt_det_result + startbatch;
83 -  
84 - // aclrtSetDevice(m_devId);  
85 - int ret = aclrtSetCurrentContext(m_algorthim_ctx);  
86 if(ACL_SUCCESS != ret){ 87 if(ACL_SUCCESS != ret){
87 break; 88 break;
88 } 89 }
src/decoder/dvpp/FFRecoder.cpp
  1 +// FFRecoder.cpp
1 #include "FFRecoder.h" 2 #include "FFRecoder.h"
2 -  
3 #include <tuple> 3 #include <tuple>
4 #include <array> 4 #include <array>
5 #include <vector> 5 #include <vector>
6 6
  7 +extern "C" {
  8 +#include <libavcodec/avcodec.h>
  9 +#include <libavformat/avformat.h>
  10 +#include <libavutil/opt.h>
  11 +#include <libavutil/timestamp.h>
  12 +#include <libavutil/imgutils.h>
  13 +#include <libswscale/swscale.h>
  14 +}
  15 +
7 16
8 FFRecoder::FFRecoder() 17 FFRecoder::FFRecoder()
9 :width_{}, 18 :width_{},
@@ -14,20 +23,17 @@ FFRecoder::FFRecoder() @@ -14,20 +23,17 @@ FFRecoder::FFRecoder()
14 codec_ctx_{ nullptr }, 23 codec_ctx_{ nullptr },
15 fmt_ctx_{ nullptr }, 24 fmt_ctx_{ nullptr },
16 out_stream_{ nullptr }, 25 out_stream_{ nullptr },
17 - yuv_frame_{ nullptr },  
18 - img_convert_ctx{nullptr} 26 + yuv_frame_{ nullptr }
19 { 27 {
20 - bFirstFrame = true;  
21 - last_src_pts = 0;  
22 - last_pts = 0;  
23 } 28 }
24 29
25 FFRecoder::~FFRecoder() 30 FFRecoder::~FFRecoder()
26 { 31 {
  32 + uninit();
27 } 33 }
28 34
29 35
30 -bool FFRecoder::init(int w, int h, AVRational time_base, AVCodecContext* avctx, const char* outfile_name) 36 +bool FFRecoder::init(int w, int h, int fps, int bit_rate, const char* outfile_name)
31 { 37 {
32 uninit(); 38 uninit();
33 39
@@ -36,30 +42,32 @@ bool FFRecoder::init(int w, int h, AVRational time_base, AVCodecContext* avctx, @@ -36,30 +42,32 @@ bool FFRecoder::init(int w, int h, AVRational time_base, AVCodecContext* avctx,
36 y_size_ = w * h; 42 y_size_ = w * h;
37 uv_size_ = y_size_ / 4; 43 uv_size_ = y_size_ / 4;
38 44
  45 + m_fps = fps;
  46 +
39 // [1] 创建解码器 47 // [1] 创建解码器
40 - const AVCodec* encoder = avcodec_find_encoder(AV_CODEC_ID_HEVC); 48 + const AVCodec* encoder = avcodec_find_encoder(AV_CODEC_ID_H264);
41 if (!encoder) { 49 if (!encoder) {
42 - LOG_ERROR("Find encoder AV_CODEC_ID_H264 failed!"); 50 + fprintf(stderr, "Find encoder AV_CODEC_ID_H264 failed!\n");
43 return false; 51 return false;
44 } 52 }
45 // 获取解码器上下文 53 // 获取解码器上下文
46 codec_ctx_ = avcodec_alloc_context3(encoder); 54 codec_ctx_ = avcodec_alloc_context3(encoder);
47 if (!codec_ctx_) { 55 if (!codec_ctx_) {
48 - LOG_ERROR("Alloc context for encoder contx failed!"); 56 + fprintf(stderr, "Alloc context for encoder contx failed!\n");
49 return false; 57 return false;
50 } 58 }
51 // 设置解码器上下文参数 59 // 设置解码器上下文参数
52 - codec_ctx_->bit_rate = avctx->bit_rate; 60 + codec_ctx_->bit_rate = bit_rate;
53 codec_ctx_->width = width_; 61 codec_ctx_->width = width_;
54 codec_ctx_->height = height_; 62 codec_ctx_->height = height_;
55 - codec_ctx_->time_base = time_base;  
56 - codec_ctx_->gop_size = avctx->gop_size;  
57 - codec_ctx_->max_b_frames = avctx->max_b_frames; 63 + codec_ctx_->time_base = AVRational{ 1, fps };
  64 + codec_ctx_->gop_size = 50;
  65 + codec_ctx_->max_b_frames = 0;
58 codec_ctx_->pix_fmt = AV_PIX_FMT_YUV420P; 66 codec_ctx_->pix_fmt = AV_PIX_FMT_YUV420P;
59 codec_ctx_->thread_count = 4; 67 codec_ctx_->thread_count = 4;
60 - codec_ctx_->qmin = avctx->qmin;  
61 - codec_ctx_->qmax = avctx->qmax;  
62 - codec_ctx_->qcompress = avctx->qcompress; 68 + codec_ctx_->qmin = 10;
  69 + codec_ctx_->qmax = 51;
  70 + codec_ctx_->qcompress = 0.6f;
63 codec_ctx_->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; 71 codec_ctx_->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
64 72
65 av_opt_set(codec_ctx_->priv_data, "preset", "ultrafast", 0); 73 av_opt_set(codec_ctx_->priv_data, "preset", "ultrafast", 0);
@@ -68,7 +76,7 @@ bool FFRecoder::init(int w, int h, AVRational time_base, AVCodecContext* avctx, @@ -68,7 +76,7 @@ bool FFRecoder::init(int w, int h, AVRational time_base, AVCodecContext* avctx,
68 // 打开解码器 76 // 打开解码器
69 int ret = avcodec_open2(codec_ctx_, encoder, nullptr); 77 int ret = avcodec_open2(codec_ctx_, encoder, nullptr);
70 if (ret < 0) { 78 if (ret < 0) {
71 - LOG_ERROR("Open encoder failed!"); 79 + fprintf(stderr, "Open encoder failed!\n");
72 return false; 80 return false;
73 } 81 }
74 82
@@ -80,7 +88,6 @@ bool FFRecoder::init(int w, int h, AVRational time_base, AVCodecContext* avctx, @@ -80,7 +88,6 @@ bool FFRecoder::init(int w, int h, AVRational time_base, AVCodecContext* avctx,
80 out_stream_->id = 0; 88 out_stream_->id = 0;
81 out_stream_->codecpar->codec_tag = 0; 89 out_stream_->codecpar->codec_tag = 0;
82 avcodec_parameters_from_context(out_stream_->codecpar, codec_ctx_); 90 avcodec_parameters_from_context(out_stream_->codecpar, codec_ctx_);
83 - out_stream_->time_base = { 1,30 };  
84 91
85 av_dump_format(fmt_ctx_, out_stream_->id, outfile_name, 1); 92 av_dump_format(fmt_ctx_, out_stream_->id, outfile_name, 1);
86 93
@@ -93,113 +100,25 @@ bool FFRecoder::init(int w, int h, AVRational time_base, AVCodecContext* avctx, @@ -93,113 +100,25 @@ bool FFRecoder::init(int w, int h, AVRational time_base, AVCodecContext* avctx,
93 if (av_frame_get_buffer(yuv_frame_, 0) < 0) { 100 if (av_frame_get_buffer(yuv_frame_, 0) < 0) {
94 av_frame_free(&yuv_frame_); 101 av_frame_free(&yuv_frame_);
95 yuv_frame_ = nullptr; 102 yuv_frame_ = nullptr;
96 - LOG_ERROR("Frame get buffer failed!");  
97 - return false;  
98 - }  
99 -  
100 - // [5] 打开输出视频文件并写入视频头信息  
101 - if (avio_open(&fmt_ctx_->pb, outfile_name, AVIO_FLAG_WRITE) < 0) {  
102 - LOG_ERROR("avio_open failed!");  
103 - return false;  
104 - }  
105 - if (avformat_write_header(fmt_ctx_, nullptr) < 0) {  
106 - LOG_ERROR("Write header failed!");  
107 - return false;  
108 - }  
109 -  
110 - // 计算解码后原始数据所需缓冲区大小,并分配内存空间 Determine required buffer size and allocate buffer  
111 - int numBytes = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, w, h, 1);  
112 - out_buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));  
113 -  
114 - //pFrameOut = av_frame_alloc();  
115 - //av_image_fill_arrays(pFrameOut->data, pFrameOut->linesize, buffer, AV_PIX_FMT_YUV420P, w, h, 1);  
116 -  
117 - img_convert_ctx = sws_getContext(avctx->width, avctx->height, avctx->pix_fmt, w, h, AV_PIX_FMT_YUV420P,  
118 - SWS_BICUBIC, nullptr, nullptr, nullptr);  
119 -  
120 - return true;  
121 -}  
122 -  
123 -bool FFRecoder::init(AVStream* stream, AVCodecContext* avctx, const char* outfile_name) {  
124 -  
125 - const AVCodec* encoder = avcodec_find_encoder(avctx->codec_id);  
126 - if (!encoder) {  
127 - LOG_ERROR("Find encoder AV_CODEC_ID_H264 failed!");  
128 - return false;  
129 - }  
130 - // 获取解码器上下文  
131 - codec_ctx_ = avcodec_alloc_context3(encoder);  
132 - if (!codec_ctx_) {  
133 - LOG_ERROR("Alloc context for encoder contx failed!"); 103 + fprintf(stderr, "Frame get buffer failed!\n");
134 return false; 104 return false;
135 } 105 }
136 106
137 - avcodec_copy_context(codec_ctx_, avctx);  
138 - codec_ctx_->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;  
139 - m_inStream = stream;  
140 -  
141 - // [2] 创建输出上下文  
142 - avformat_alloc_output_context2(&fmt_ctx_, nullptr, nullptr, outfile_name);  
143 -  
144 - // [3] 添加输出视频流  
145 - out_stream_ = avformat_new_stream(fmt_ctx_, nullptr);  
146 -  
147 - out_stream_->id = 0;  
148 - out_stream_->codecpar->codec_tag = 0;  
149 - avcodec_parameters_from_context(out_stream_->codecpar, codec_ctx_);  
150 - // out_stream_->time_base = { 1,25 };  
151 - out_stream_->time_base = stream->time_base;  
152 - out_stream_->r_frame_rate = stream->r_frame_rate;  
153 - out_stream_->avg_frame_rate = stream->r_frame_rate;  
154 -  
155 - codec_ctx_->time_base = out_stream_->time_base;  
156 -  
157 - av_opt_set(out_stream_->codec->priv_data, "preset", "ultrafast", 0);  
158 - av_opt_set(out_stream_->codec->priv_data, "tune", "zerolatency", 0);  
159 -  
160 - // av_dump_format(fmt_ctx_, out_stream_->id, outfile_name, 1);  
161 -  
162 // [5] 打开输出视频文件并写入视频头信息 107 // [5] 打开输出视频文件并写入视频头信息
163 if (avio_open(&fmt_ctx_->pb, outfile_name, AVIO_FLAG_WRITE) < 0) { 108 if (avio_open(&fmt_ctx_->pb, outfile_name, AVIO_FLAG_WRITE) < 0) {
164 - LOG_ERROR("avio_open failed!"); 109 + fprintf(stderr, "avio_open failed!\n");
165 return false; 110 return false;
166 } 111 }
167 if (avformat_write_header(fmt_ctx_, nullptr) < 0) { 112 if (avformat_write_header(fmt_ctx_, nullptr) < 0) {
168 - LOG_ERROR("Write header failed!"); 113 + fprintf(stderr, "Write header failed!\n");
169 return false; 114 return false;
170 } 115 }
171 116
172 return true; 117 return true;
173 } 118 }
174 119
175 -void FFRecoder::release() {  
176 - av_write_trailer(fmt_ctx_);  
177 -  
178 - avcodec_close(fmt_ctx_->streams[0]->codec);  
179 - av_freep(&fmt_ctx_->streams[0]->codec);  
180 - av_freep(&fmt_ctx_->streams[0]);  
181 -  
182 - avio_close(fmt_ctx_->pb);  
183 - av_free(fmt_ctx_);  
184 - fmt_ctx_ = nullptr;  
185 -}  
186 -  
187 void FFRecoder::uninit() 120 void FFRecoder::uninit()
188 { 121 {
189 - //if (out_buffer) {  
190 - // av_free(out_buffer);  
191 - //}  
192 -  
193 - if (yuv_frame_) {  
194 - av_frame_free(&yuv_frame_);  
195 - yuv_frame_ = nullptr;  
196 - }  
197 -  
198 - if (img_convert_ctx) {  
199 - sws_freeContext(img_convert_ctx);  
200 - img_convert_ctx = nullptr;  
201 - }  
202 -  
203 if (fmt_ctx_) { 122 if (fmt_ctx_) {
204 av_write_trailer(fmt_ctx_); 123 av_write_trailer(fmt_ctx_);
205 avio_close(fmt_ctx_->pb); 124 avio_close(fmt_ctx_->pb);
@@ -213,6 +132,11 @@ void FFRecoder::uninit() @@ -213,6 +132,11 @@ void FFRecoder::uninit()
213 codec_ctx_ = nullptr; 132 codec_ctx_ = nullptr;
214 } 133 }
215 134
  135 + if (yuv_frame_) {
  136 + av_frame_free(&yuv_frame_);
  137 + yuv_frame_ = nullptr;
  138 + }
  139 +
216 width_ = 0; 140 width_ = 0;
217 height_ = 0; 141 height_ = 0;
218 y_size_ = 0; 142 y_size_ = 0;
@@ -250,84 +174,13 @@ bool FFRecoder::write_yuv(const uint8_t* yuv_data) @@ -250,84 +174,13 @@ bool FFRecoder::write_yuv(const uint8_t* yuv_data)
250 return write_frame(yuv_frame_); 174 return write_frame(yuv_frame_);
251 } 175 }
252 176
253 -void FFRecoder::update_pts(AVPacket* pkt) {  
254 - if (pkt->pts > 0) {  
255 - if (bFirstFrame) {  
256 - bFirstFrame = false;  
257 - last_src_pts = pkt->pts;  
258 - }  
259 - int64_t pkt_pts = pkt->pts;  
260 - pkt->pts = last_pts + (pkt_pts - last_src_pts);  
261 - last_src_pts = pkt_pts;  
262 - last_pts = pkt->pts;  
263 - pkt->dts = pkt->pts;  
264 - }  
265 - else {  
266 - if (bFirstFrame) {  
267 - bFirstFrame = false;  
268 - last_pts = 0;  
269 - }  
270 - pkt->pts = last_pts + 512;  
271 - last_pts = pkt->pts;  
272 - }  
273 -  
274 -}  
275 -  
276 -bool FFRecoder::write_pkt(AVPacket *pkt) {  
277 - char errbuf[64]{ 0 };  
278 -  
279 - // av_packet_rescale_ts(pkt, codec_ctx_->time_base, out_stream_->time_base);  
280 - // update_pts(pkt);  
281 - // pkt->stream_index = out_stream_->index;  
282 -  
283 - if(pkt->pts==AV_NOPTS_VALUE) {  
284 - // printf("frame_index:%d", frame_index);  
285 - //Write PTS  
286 - AVRational time_base1 = codec_ctx_->time_base;  
287 - //Duration between 2 frames (us)  
288 - int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(m_inStream->r_frame_rate);  
289 - //Parameters  
290 - pkt->pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE);  
291 - pkt->dts = pkt->pts;  
292 - pkt->duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE);  
293 - frame_index++;  
294 - }  
295 - // Convert PTS/DTS  
296 - pkt->pts = av_rescale_q_rnd(pkt->pts, codec_ctx_->time_base, out_stream_->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));  
297 - pkt->dts = av_rescale_q_rnd(pkt->dts, codec_ctx_->time_base, out_stream_->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));  
298 - pkt->duration = av_rescale_q(pkt->duration, codec_ctx_->time_base, out_stream_->time_base);  
299 -  
300 - pkt->pos = -1;  
301 - pkt->stream_index = out_stream_->index;  
302 - fmt_ctx_->duration += pkt->duration;  
303 -  
304 - // 将数据写入到输出流  
305 - int ret = av_write_frame(fmt_ctx_, pkt);  
306 - if (ret < 0) {  
307 - LOG_ERROR("Error while writing output packet: {}", av_make_error_string(errbuf, sizeof(errbuf), ret));  
308 - return false;  
309 - }  
310 - return true;  
311 -}  
312 -  
313 -bool FFRecoder::write_frame(AVFrame* frame) 177 +bool FFRecoder::write_frame(const AVFrame* frame)
314 { 178 {
315 - AVFrame *pFrameOut = nullptr;  
316 - if (frame != nullptr && frame->format != AV_PIX_FMT_YUV420P) {  
317 - pFrameOut = av_frame_clone(frame);  
318 - pFrameOut->format = AV_PIX_FMT_YUV420P;  
319 - av_image_fill_arrays(pFrameOut->data, pFrameOut->linesize, out_buffer, AV_PIX_FMT_YUV420P, frame->width, frame->height, 1);  
320 - sws_scale(img_convert_ctx, (const unsigned char* const*)frame->data, frame->linesize, 0, frame->height, pFrameOut->data, pFrameOut->linesize);  
321 - }  
322 - else {  
323 - pFrameOut = frame;  
324 - }  
325 char errbuf[64]{ 0 }; 179 char errbuf[64]{ 0 };
326 // 将帧数据发送到编码器 180 // 将帧数据发送到编码器
327 - int ret = avcodec_send_frame(codec_ctx_, pFrameOut);  
328 - av_frame_free(&pFrameOut); 181 + int ret = avcodec_send_frame(codec_ctx_, frame);
329 if (ret < 0) { 182 if (ret < 0) {
330 - LOG_ERROR("Error sending a frame to the encoder: {}", av_make_error_string(errbuf, sizeof(errbuf), ret)); 183 + fprintf(stderr, "Error sending a frame to the encoder: %s\n", av_make_error_string(errbuf, sizeof(errbuf), ret));
331 return false; 184 return false;
332 } 185 }
333 186
@@ -338,36 +191,85 @@ bool FFRecoder::write_frame(AVFrame* frame) @@ -338,36 +191,85 @@ bool FFRecoder::write_frame(AVFrame* frame)
338 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) 191 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
339 return true; 192 return true;
340 else if (ret < 0) { 193 else if (ret < 0) {
341 - LOG_ERROR("Error encoding a frame: {}", av_make_error_string(errbuf, sizeof(errbuf), ret)); 194 + fprintf(stderr, "Error encoding a frame: %s\n", av_make_error_string(errbuf, sizeof(errbuf), ret));
342 return false; 195 return false;
343 } 196 }
344 // 将pts缩放到输出流的time_base上 197 // 将pts缩放到输出流的time_base上
345 av_packet_rescale_ts(&pkt, codec_ctx_->time_base, out_stream_->time_base); 198 av_packet_rescale_ts(&pkt, codec_ctx_->time_base, out_stream_->time_base);
346 pkt.stream_index = out_stream_->index; 199 pkt.stream_index = out_stream_->index;
347 - update_pts(&pkt);  
348 // 将数据写入到输出流 200 // 将数据写入到输出流
349 ret = av_interleaved_write_frame(fmt_ctx_, &pkt); 201 ret = av_interleaved_write_frame(fmt_ctx_, &pkt);
350 - //ret = av_write_frame(fmt_ctx_, &pkt);  
351 av_packet_unref(&pkt); 202 av_packet_unref(&pkt);
352 if (ret < 0) { 203 if (ret < 0) {
353 - LOG_ERROR("Error while writing output packet: {}", av_make_error_string(errbuf, sizeof(errbuf), ret)); 204 + fprintf(stderr, "Error while writing output packet: %s\n", av_make_error_string(errbuf, sizeof(errbuf), ret));
354 return false; 205 return false;
355 } 206 }
356 - /* av_interleaved_write_frame(fmt_ctx_, nullptr);  
357 - avio_flush(fmt_ctx_->pb);*/  
358 } 207 }
359 208
360 return true; 209 return true;
361 } 210 }
362 211
363 -bool FFRecoder::flush()  
364 -{  
365 - return write_frame(nullptr); 212 +static double a2d(AVRational a) {
  213 + return a.den / a.num;
  214 +}
  215 +
  216 +void FFRecoder::calc_pkt_ts(AVPacket* pkt, int frame_index) {
  217 + //Duration between 2 frames (us)
  218 + int64_t calc_duration=(double)AV_TIME_BASE/m_fps;
  219 + //Parameters
  220 + pkt->pts=(double)(frame_index*calc_duration)/(double)(av_q2d(codec_ctx_->time_base)*AV_TIME_BASE);
  221 + pkt->dts=pkt->pts;
  222 + pkt->duration=(double)calc_duration/(double)(av_q2d(codec_ctx_->time_base)*AV_TIME_BASE);
  223 +}
  224 +
  225 +bool FFRecoder::write_pkt(AVPacket* new_pkt) {
  226 + frame_nb++;
  227 + calc_pkt_ts(new_pkt, frame_nb);
  228 + new_pkt->pts = av_rescale_q_rnd(new_pkt->pts, codec_ctx_->time_base, out_stream_->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
  229 + new_pkt->dts = av_rescale_q_rnd(new_pkt->dts, codec_ctx_->time_base, out_stream_->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
  230 + new_pkt->duration = av_rescale_q(new_pkt->duration, codec_ctx_->time_base, out_stream_->time_base);
  231 + new_pkt->stream_index = out_stream_->index;
  232 + // 将数据写入到输出流
  233 + int ret = av_interleaved_write_frame(fmt_ctx_, new_pkt);
  234 +
  235 + char errbuf[64]{ 0 };
  236 + if (ret < 0) {
  237 + fprintf(stderr, "Error while writing output packet: %s\n", av_make_error_string(errbuf, sizeof(errbuf), ret));
  238 + return false;
  239 + }
  240 +
  241 + return true;
366 } 242 }
367 243
368 -bool FFRecoder::flush_pkt() 244 +bool FFRecoder::write_pkt_data(const uint8_t* pkt_data, int pkt_size) {
  245 + AVPacket* new_pkt = av_packet_alloc();
  246 + av_new_packet(new_pkt, pkt_size);
  247 + memcpy(new_pkt->data, pkt_data, pkt_size);
  248 +
  249 + frame_nb++;
  250 + calc_pkt_ts(new_pkt, frame_nb);
  251 + new_pkt->pts = av_rescale_q_rnd(new_pkt->pts, codec_ctx_->time_base, out_stream_->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
  252 + new_pkt->dts = av_rescale_q_rnd(new_pkt->dts, codec_ctx_->time_base, out_stream_->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
  253 + new_pkt->duration = av_rescale_q(new_pkt->duration, codec_ctx_->time_base, out_stream_->time_base);
  254 + new_pkt->stream_index = out_stream_->index;
  255 + // 将数据写入到输出流
  256 + int ret = av_interleaved_write_frame(fmt_ctx_, new_pkt);
  257 +
  258 + av_packet_free(&new_pkt);
  259 + new_pkt = nullptr;
  260 +
  261 + char errbuf[64]{ 0 };
  262 + if (ret < 0) {
  263 + fprintf(stderr, "Error while writing output packet: %s\n", av_make_error_string(errbuf, sizeof(errbuf), ret));
  264 + return false;
  265 + }
  266 +
  267 + return true;
  268 +}
  269 +
  270 +bool FFRecoder::flush()
369 { 271 {
370 - return av_write_frame(fmt_ctx_, nullptr); 272 + return write_frame(nullptr);
371 } 273 }
372 274
373 bool FFRecoder::bgr_to_yuv420p(const uint8_t* const buf_bgr, uint8_t* const buf_420p) 275 bool FFRecoder::bgr_to_yuv420p(const uint8_t* const buf_bgr, uint8_t* const buf_420p)
@@ -375,25 +277,30 @@ bool FFRecoder::bgr_to_yuv420p(const uint8_t* const buf_bgr, uint8_t* const buf_ @@ -375,25 +277,30 @@ bool FFRecoder::bgr_to_yuv420p(const uint8_t* const buf_bgr, uint8_t* const buf_
375 // 分配转换上下文 277 // 分配转换上下文
376 thread_local std::tuple<int,int,int> params{ 0, 0, 0 }; 278 thread_local std::tuple<int,int,int> params{ 0, 0, 0 };
377 thread_local std::unique_ptr<SwsContext, decltype(&sws_freeContext)> sws_context{ nullptr, &sws_freeContext }; 279 thread_local std::unique_ptr<SwsContext, decltype(&sws_freeContext)> sws_context{ nullptr, &sws_freeContext };
378 -  
379 - std::tuple<int, int, int> new_params{ width_, height_, av_image_get_linesize(AV_PIX_FMT_YUV420P, width_, 0) }; 280 +
  281 + std::tuple<int,int,int> new_params{ width_, height_, av_image_get_linesize(AV_PIX_FMT_YUV420P, width_, 0) };
380 if (!sws_context || params != new_params) 282 if (!sws_context || params != new_params)
381 { 283 {
382 sws_context.reset(sws_getContext(width_, height_, AV_PIX_FMT_BGR24, width_, height_, 284 sws_context.reset(sws_getContext(width_, height_, AV_PIX_FMT_BGR24, width_, height_,
383 AV_PIX_FMT_YUV420P, SWS_FAST_BILINEAR, nullptr, nullptr, nullptr)); 285 AV_PIX_FMT_YUV420P, SWS_FAST_BILINEAR, nullptr, nullptr, nullptr));
384 params = new_params; 286 params = new_params;
385 } 287 }
  288 +
386 // 转换格式 289 // 转换格式
387 const int stride = std::get<2>(params);//Y平面一行的数据长度 290 const int stride = std::get<2>(params);//Y平面一行的数据长度
388 - //const int ret = sws_scale(sws_context.get(),  
389 - // std::array<const uint8_t* const>{ buf_bgr }.data(),/* bgr数据只有一个平面 */  
390 - // std::array{ width_ * 3 }.data(),/* BGR所以图像宽度*3 */  
391 - // 0, height_,  
392 - // std::array{ buf_420p, buf_420p + y_size_, buf_420p + y_size_ + uv_size_ }.data(),/* YUV三个平面的起始地址 */  
393 - // std::array{ stride, stride / 2, stride / 2 }.data());/* YUV每个平面中一行的宽度 */  
394 - const int rgba_linesize = width_ * 3;  
395 - int yuv_linesize[3] = { stride, stride / 2, stride / 2 };  
396 - int ret = sws_scale(sws_context.get(), (const uint8_t* const*)buf_bgr, &rgba_linesize, 0, height_, (uint8_t* const*)buf_420p, yuv_linesize);  
397 -  
398 - return 0; 291 + const int ret = sws_scale(sws_context.get(),
  292 + &buf_bgr,/* bgr数据只有一个平面 */
  293 + std::array<int, 1> {width_ * 3}.data(),/* BGR所以图像宽度*3 */
  294 + 0, height_,
  295 + std::array<uint8_t* const, 3>{ buf_420p, buf_420p + y_size_, buf_420p + y_size_ + uv_size_ }.data(),/* YUV三个平面的起始地址 */
  296 + std::array<int, 3>{ stride, stride / 2, stride / 2 }.data()
  297 + );/* YUV每个平面中一行的宽度 */
  298 +
  299 + return ret >= 0;
  300 +}
  301 +
  302 +bool FFRecoder::close()
  303 +{
  304 + flush();
  305 + uninit();
399 } 306 }
400 \ No newline at end of file 307 \ No newline at end of file
src/decoder/dvpp/FFRecoder.h
1 #pragma once 1 #pragma once
2 #include <memory> 2 #include <memory>
3 3
4 -#include "depend_headers.h" 4 +class AVFrame;
  5 +class AVStream;
  6 +class AVCodecContext;
  7 +class AVFormatContext;
  8 +class AVPacket;
5 9
6 class FFRecoder 10 class FFRecoder
7 { 11 {
8 public: 12 public:
9 FFRecoder(); 13 FFRecoder();
10 - virtual ~FFRecoder(); 14 + ~FFRecoder();
11 15
12 - bool init(int w, int h, AVRational time_base, AVCodecContext* avctx, const char* outfile_name); 16 + bool init(int w, int h, int fps, int bit_rate, const char* outfile_name);
13 void uninit(); 17 void uninit();
14 bool write_image(const uint8_t* bgr); 18 bool write_image(const uint8_t* bgr);
15 bool write_yuv(const uint8_t* yuv_data); 19 bool write_yuv(const uint8_t* yuv_data);
16 - bool write_frame(AVFrame* frame); 20 + bool write_frame(const AVFrame* frame);
  21 + bool write_pkt(AVPacket* pkt);
  22 + bool write_pkt_data(const uint8_t* data, int size);
17 bool flush(); 23 bool flush();
18 -  
19 - // AVPacket 方式  
20 - bool init(AVStream* stream, AVCodecContext* avctx, const char* outfile_name);  
21 - bool write_pkt(AVPacket *pkt);  
22 - bool flush_pkt();  
23 - void release(); 24 + bool close();
24 25
25 private: 26 private:
26 bool bgr_to_yuv420p(const uint8_t* const buf_bgr, uint8_t* const buf_420p); 27 bool bgr_to_yuv420p(const uint8_t* const buf_bgr, uint8_t* const buf_420p);
27 - void update_pts(AVPacket* pkt); 28 + void calc_pkt_ts(AVPacket* pkt, int frame_index);
28 29
29 private: 30 private:
30 int width_; 31 int width_;
@@ -37,17 +38,7 @@ private: @@ -37,17 +38,7 @@ private:
37 AVStream* out_stream_; 38 AVStream* out_stream_;
38 AVFrame* yuv_frame_; 39 AVFrame* yuv_frame_;
39 40
40 - SwsContext * img_convert_ctx;  
41 - //AVFrame* pFrameOut;  
42 - uint8_t * out_buffer;  
43 -  
44 - bool bFirstFrame;  
45 - int64_t last_src_pts;  
46 - int64_t last_pts;  
47 -  
48 - int64_t first_pts;  
49 - int64_t first_dts; 41 + int m_fps{1};
50 42
51 - int64_t frame_index{0};  
52 - AVStream* m_inStream; 43 + int frame_nb{0};
53 }; 44 };
54 \ No newline at end of file 45 \ No newline at end of file
src/decoder/dvpp/FFRecoder2.cpp deleted
1 -// FFRecoder2.cpp  
2 -#include "FFRecoder2.h"  
3 -#include <tuple>  
4 -#include <array>  
5 -#include <vector>  
6 -  
7 -extern "C" {  
8 -#include <libavcodec/avcodec.h>  
9 -#include <libavformat/avformat.h>  
10 -#include <libavutil/opt.h>  
11 -#include <libavutil/timestamp.h>  
12 -#include <libavutil/imgutils.h>  
13 -#include <libswscale/swscale.h>  
14 -}  
15 -  
16 -  
17 -FFRecoder2::FFRecoder2()  
18 - :width_{},  
19 - height_{},  
20 - y_size_{},  
21 - uv_size_{},  
22 - pts_{},  
23 - codec_ctx_{ nullptr },  
24 - fmt_ctx_{ nullptr },  
25 - out_stream_{ nullptr },  
26 - yuv_frame_{ nullptr }  
27 -{  
28 -}  
29 -  
30 -FFRecoder2::~FFRecoder2()  
31 -{  
32 - uninit();  
33 -}  
34 -  
35 -  
36 -bool FFRecoder2::init(int w, int h, int fps, int bit_rate, const char* outfile_name)  
37 -{  
38 - uninit();  
39 -  
40 - width_ = w;  
41 - height_ = h;  
42 - y_size_ = w * h;  
43 - uv_size_ = y_size_ / 4;  
44 -  
45 - // [1] 创建解码器  
46 - const AVCodec* encoder = avcodec_find_encoder(AV_CODEC_ID_H264);  
47 - if (!encoder) {  
48 - fprintf(stderr, "Find encoder AV_CODEC_ID_H264 failed!\n");  
49 - return false;  
50 - }  
51 - // 获取解码器上下文  
52 - codec_ctx_ = avcodec_alloc_context3(encoder);  
53 - if (!codec_ctx_) {  
54 - fprintf(stderr, "Alloc context for encoder contx failed!\n");  
55 - return false;  
56 - }  
57 - // 设置解码器上下文参数  
58 - codec_ctx_->bit_rate = bit_rate;  
59 - codec_ctx_->width = width_;  
60 - codec_ctx_->height = height_;  
61 - codec_ctx_->time_base = AVRational{ 1, fps };  
62 - codec_ctx_->gop_size = 50;  
63 - codec_ctx_->max_b_frames = 0;  
64 - codec_ctx_->pix_fmt = AV_PIX_FMT_YUV420P;  
65 - codec_ctx_->thread_count = 4;  
66 - codec_ctx_->qmin = 10;  
67 - codec_ctx_->qmax = 51;  
68 - codec_ctx_->qcompress = 0.6f;  
69 - codec_ctx_->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;  
70 -  
71 - //av_opt_set(codec_ctx_->priv_data, "preset", "ultrafast", 0);  
72 - av_opt_set(codec_ctx_->priv_data, "tune", "zerolatency", 0);  
73 -  
74 - // 打开解码器  
75 - int ret = avcodec_open2(codec_ctx_, encoder, nullptr);  
76 - if (ret < 0) {  
77 - fprintf(stderr, "Open encoder failed!\n");  
78 - return false;  
79 - }  
80 -  
81 - // [2] 创建输出上下文  
82 - avformat_alloc_output_context2(&fmt_ctx_, nullptr, nullptr, outfile_name);  
83 -  
84 - // [3] 添加输出视频流  
85 - out_stream_ = avformat_new_stream(fmt_ctx_, nullptr);  
86 - out_stream_->id = 0;  
87 - out_stream_->codecpar->codec_tag = 0;  
88 - avcodec_parameters_from_context(out_stream_->codecpar, codec_ctx_);  
89 -  
90 - av_dump_format(fmt_ctx_, out_stream_->id, outfile_name, 1);  
91 -  
92 - // 创建YUV格式帧  
93 - yuv_frame_ = av_frame_alloc();  
94 - yuv_frame_->format = AV_PIX_FMT_YUV420P;  
95 - yuv_frame_->width = width_;  
96 - yuv_frame_->height = height_;  
97 - // 为创建的YUV帧分配内存  
98 - if (av_frame_get_buffer(yuv_frame_, 0) < 0) {  
99 - av_frame_free(&yuv_frame_);  
100 - yuv_frame_ = nullptr;  
101 - fprintf(stderr, "Frame get buffer failed!\n");  
102 - return false;  
103 - }  
104 -  
105 - // [5] 打开输出视频文件并写入视频头信息  
106 - if (avio_open(&fmt_ctx_->pb, outfile_name, AVIO_FLAG_WRITE) < 0) {  
107 - fprintf(stderr, "avio_open failed!\n");  
108 - return false;  
109 - }  
110 - if (avformat_write_header(fmt_ctx_, nullptr) < 0) {  
111 - fprintf(stderr, "Write header failed!\n");  
112 - return false;  
113 - }  
114 -  
115 - return true;  
116 -}  
117 -  
118 -void FFRecoder2::uninit()  
119 -{  
120 - if (fmt_ctx_) {  
121 - av_write_trailer(fmt_ctx_);  
122 - avio_close(fmt_ctx_->pb);  
123 - avformat_free_context(fmt_ctx_);  
124 - fmt_ctx_ = nullptr;  
125 - }  
126 -  
127 - if (codec_ctx_) {  
128 - avcodec_close(codec_ctx_);  
129 - avcodec_free_context(&codec_ctx_);  
130 - codec_ctx_ = nullptr;  
131 - }  
132 -  
133 - if (yuv_frame_) {  
134 - av_frame_free(&yuv_frame_);  
135 - yuv_frame_ = nullptr;  
136 - }  
137 -  
138 - width_ = 0;  
139 - height_ = 0;  
140 - y_size_ = 0;  
141 - uv_size_ = 0;  
142 - pts_ = 0;  
143 -}  
144 -  
145 -bool FFRecoder2::write_image(const uint8_t* bgr)  
146 -{  
147 - // 分配YUV格式数据的内存  
148 - thread_local std::vector<uint8_t> yuv_data;  
149 - if (yuv_data.size() != y_size_ * 3 / 2) {  
150 - yuv_data.resize(y_size_ * 3 / 2);  
151 - }  
152 - // BGR格式转YUV格式  
153 - bgr_to_yuv420p(bgr, yuv_data.data());  
154 -  
155 - return write_yuv(yuv_data.data());  
156 -}  
157 -  
158 -bool FFRecoder2::write_yuv(const uint8_t* yuv_data)  
159 -{  
160 - //拷贝YUV数据到帧,由于帧数据存在内存对齐,故需逐行拷贝  
161 - for (int i = 0; i < height_; i++) {  
162 - memcpy(yuv_frame_->data[0] + i * yuv_frame_->linesize[0], yuv_data + width_ * i, width_);  
163 - }  
164 - const int uv_stride = width_ / 2;  
165 - for (int i = 0; i < height_ / 2; i++) {  
166 - memcpy(yuv_frame_->data[1] + i * yuv_frame_->linesize[1], yuv_data + y_size_ + uv_stride * i, uv_stride);  
167 - memcpy(yuv_frame_->data[2] + i * yuv_frame_->linesize[2], yuv_data + y_size_ + uv_size_ + uv_stride * i, uv_stride);  
168 - }  
169 -  
170 - yuv_frame_->pts = pts_++;  
171 -  
172 - return write_frame(yuv_frame_);  
173 -}  
174 -  
175 -bool FFRecoder2::write_frame(const AVFrame* frame)  
176 -{  
177 - char errbuf[64]{ 0 };  
178 - // 将帧数据发送到编码器  
179 - int ret = avcodec_send_frame(codec_ctx_, frame);  
180 - if (ret < 0) {  
181 - fprintf(stderr, "Error sending a frame to the encoder: %s\n", av_make_error_string(errbuf, sizeof(errbuf), ret));  
182 - return false;  
183 - }  
184 -  
185 - while (true) {  
186 - AVPacket pkt{ 0 };  
187 - // 获取编码后的数据  
188 - ret = avcodec_receive_packet(codec_ctx_, &pkt);  
189 - if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)  
190 - return true;  
191 - else if (ret < 0) {  
192 - fprintf(stderr, "Error encoding a frame: %s\n", av_make_error_string(errbuf, sizeof(errbuf), ret));  
193 - return false;  
194 - }  
195 - // 将pts缩放到输出流的time_base上  
196 - av_packet_rescale_ts(&pkt, codec_ctx_->time_base, out_stream_->time_base);  
197 - pkt.stream_index = out_stream_->index;  
198 - // 将数据写入到输出流  
199 - ret = av_interleaved_write_frame(fmt_ctx_, &pkt);  
200 - av_packet_unref(&pkt);  
201 - if (ret < 0) {  
202 - fprintf(stderr, "Error while writing output packet: %s\n", av_make_error_string(errbuf, sizeof(errbuf), ret));  
203 - return false;  
204 - }  
205 - }  
206 -  
207 - return true;  
208 -}  
209 -  
210 -static double a2d(AVRational a) {  
211 - return a.den / a.num;  
212 -}  
213 -  
214 -bool FFRecoder2::write_pkt(AVPacket* pkt) {  
215 - frame_nb++;  
216 - pkt->duration = int(a2d(codec_ctx_->time_base));  
217 - pkt->pts = frame_nb;  
218 - // 将pts缩放到输出流的time_base上  
219 - av_packet_rescale_ts(pkt, codec_ctx_->time_base, out_stream_->time_base);  
220 - pkt->stream_index = out_stream_->index;  
221 - // 将数据写入到输出流  
222 - int ret = av_interleaved_write_frame(fmt_ctx_, pkt);  
223 -  
224 - char errbuf[64]{ 0 };  
225 - if (ret < 0) {  
226 - fprintf(stderr, "Error while writing output packet: %s\n", av_make_error_string(errbuf, sizeof(errbuf), ret));  
227 - return false;  
228 - }  
229 -  
230 - return true;  
231 -}  
232 -  
233 -bool FFRecoder2::flush()  
234 -{  
235 - return write_frame(nullptr);  
236 -}  
237 -  
238 -bool FFRecoder2::bgr_to_yuv420p(const uint8_t* const buf_bgr, uint8_t* const buf_420p)  
239 -{  
240 - // 分配转换上下文  
241 - thread_local std::tuple<int,int,int> params{ 0, 0, 0 };  
242 - thread_local std::unique_ptr<SwsContext, decltype(&sws_freeContext)> sws_context{ nullptr, &sws_freeContext };  
243 -  
244 - std::tuple<int,int,int> new_params{ width_, height_, av_image_get_linesize(AV_PIX_FMT_YUV420P, width_, 0) };  
245 - if (!sws_context || params != new_params)  
246 - {  
247 - sws_context.reset(sws_getContext(width_, height_, AV_PIX_FMT_BGR24, width_, height_,  
248 - AV_PIX_FMT_YUV420P, SWS_FAST_BILINEAR, nullptr, nullptr, nullptr));  
249 - params = new_params;  
250 - }  
251 -  
252 - // 转换格式  
253 - const int stride = std::get<2>(params);//Y平面一行的数据长度  
254 - const int ret = sws_scale(sws_context.get(),  
255 - &buf_bgr,/* bgr数据只有一个平面 */  
256 - std::array<int, 1> {width_ * 3}.data(),/* BGR所以图像宽度*3 */  
257 - 0, height_,  
258 - std::array<uint8_t* const, 3>{ buf_420p, buf_420p + y_size_, buf_420p + y_size_ + uv_size_ }.data(),/* YUV三个平面的起始地址 */  
259 - std::array<int, 3>{ stride, stride / 2, stride / 2 }.data()  
260 - );/* YUV每个平面中一行的宽度 */  
261 -  
262 - return ret >= 0;  
263 -}  
264 -  
265 -bool FFRecoder2::close()  
266 -{  
267 - flush();  
268 - uninit();  
269 -}  
270 \ No newline at end of file 0 \ No newline at end of file
src/decoder/dvpp/FFRecoder2.h deleted
1 -#pragma once  
2 -#include <memory>  
3 -  
4 -class AVFrame;  
5 -class AVStream;  
6 -class AVCodecContext;  
7 -class AVFormatContext;  
8 -class AVPacket;  
9 -  
10 -class FFRecoder2  
11 -{  
12 -public:  
13 - FFRecoder2();  
14 - ~FFRecoder2();  
15 -  
16 - bool init(int w, int h, int fps, int bit_rate, const char* outfile_name);  
17 - void uninit();  
18 - bool write_image(const uint8_t* bgr);  
19 - bool write_yuv(const uint8_t* yuv_data);  
20 - bool write_frame(const AVFrame* frame);  
21 - bool write_pkt(AVPacket* pkt);  
22 - bool flush();  
23 - bool close();  
24 -  
25 -private:  
26 - bool bgr_to_yuv420p(const uint8_t* const buf_bgr, uint8_t* const buf_420p);  
27 -  
28 -private:  
29 - int width_;  
30 - int height_;  
31 - int y_size_;  
32 - int uv_size_;  
33 - int pts_;  
34 - AVCodecContext* codec_ctx_;  
35 - AVFormatContext* fmt_ctx_;  
36 - AVStream* out_stream_;  
37 - AVFrame* yuv_frame_;  
38 -  
39 - int frame_nb{0};  
40 -};  
41 \ No newline at end of file 0 \ No newline at end of file
src/decoder/dvpp/FFRecoderTaskManager.cpp
1 #include "FFRecoderTaskManager.h" 1 #include "FFRecoderTaskManager.h"
2 #include <chrono> 2 #include <chrono>
3 3
4 -struct RecodeThreadParam {  
5 - FFRecoderTaskManager* _this;  
6 - RecodeParam param;  
7 -};  
8 -  
9 static long get_cur_time() { 4 static long get_cur_time() {
10 5
11 chrono::time_point<chrono::system_clock, chrono::milliseconds> tpMicro 6 chrono::time_point<chrono::system_clock, chrono::milliseconds> tpMicro
@@ -69,68 +64,14 @@ bool FFRecoderTaskManager::init(int w, int h, int fps, int bit_rate) { @@ -69,68 +64,14 @@ bool FFRecoderTaskManager::init(int w, int h, int fps, int bit_rate) {
69 return true; 64 return true;
70 } 65 }
71 66
72 -static AVPacket* packet_clone(AVPacket* pkt) {  
73 - AVPacket *new_pkt = av_packet_alloc();  
74 - av_init_packet( new_pkt );  
75 - av_new_packet(new_pkt, pkt->size);  
76 - memcpy(new_pkt->data, pkt->data, pkt->size);  
77 - new_pkt->size = pkt->size;  
78 - // new_pkt->pts = pkt->pts;  
79 - // new_pkt->dts = pkt->dts;  
80 - // new_pkt->stream_index = pkt->stream_index;  
81 - // new_pkt->duration = pkt->duration;  
82 - // new_pkt->pos = pkt->pos;  
83 - // new_pkt->flags = pkt->flags;  
84 - // av_copy_packet_side_data(new_pkt, pkt);  
85 - return new_pkt;  
86 -}  
87 -  
88 -static AVPacket* copy_packet(const AVPacket* src)  
89 -{  
90 - AVPacket* dst = av_packet_alloc(); // 分配内存  
91 - if (!dst) {  
92 - return NULL;  
93 - }  
94 -  
95 - // 复制所有字段  
96 - av_packet_ref(dst, src);  
97 -  
98 - // 复制音视频数据  
99 - dst->data = (uint8_t*)av_malloc(src->size);  
100 - memcpy(dst->data, src->data, src->size);  
101 - dst->size = src->size;  
102 - return dst;  
103 -}  
104 -  
105 void FFRecoderTaskManager::cache_pkt(AVPacket* pkt, long long frame_nb, string dec_name){ 67 void FFRecoderTaskManager::cache_pkt(AVPacket* pkt, long long frame_nb, string dec_name){
106 if(m_bExit) { 68 if(m_bExit) {
107 // 任务退出了就不再缓存数据了 69 // 任务退出了就不再缓存数据了
108 return; 70 return;
109 } 71 }
110 72
111 - // 考虑到一个AVPacket中的数据并不很大,为减少与解码模块的耦合度,方便管理,这里做一个clone  
112 - // AVPacket *new_pkt = copy_packet(pkt);  
113 -  
114 - DataPacket* newDataPkt = new DataPacket();  
115 - newDataPkt->pkt = pkt;  
116 - newDataPkt->frame_nb = frame_nb;  
117 -  
118 - if(is_key_frame(pkt)){  
119 - // 越来越大的值  
120 - newDataPkt->isKeyFrame = true;  
121 - LOG_INFO("[{}] - key frame_nb: {}", dec_name, frame_nb);  
122 - } else {  
123 - newDataPkt->isKeyFrame = false;  
124 - }  
125 -  
126 - AVPacket* npkt = newDataPkt->pkt;  
127 - if(npkt == nullptr) {  
128 - return ;  
129 - } else if (npkt->data == nullptr || npkt->size <= 0){  
130 - return ;  
131 - }  
132 -  
133 std::lock_guard<std::mutex> l_info(m_pkt_list_short_mtx); 73 std::lock_guard<std::mutex> l_info(m_pkt_list_short_mtx);
  74 + DataPacket* newDataPkt = new DataPacket(pkt->data, pkt->size, frame_nb, is_key_frame(pkt));
134 m_pkt_list_short.push_back(newDataPkt); 75 m_pkt_list_short.push_back(newDataPkt);
135 } 76 }
136 77
@@ -277,20 +218,11 @@ void FFRecoderTaskManager::recode_thread() { @@ -277,20 +218,11 @@ void FFRecoderTaskManager::recode_thread() {
277 break; 218 break;
278 } 219 }
279 220
280 - auto it = m_pkt_list.begin();  
281 - while (it != it_data) {  
282 - DataPacket* dataPkt = m_pkt_list.front();  
283 - delete dataPkt;  
284 - dataPkt = nullptr;  
285 - m_pkt_list.pop_front();  
286 - it = m_pkt_list.begin();  
287 - }  
288 -  
289 LOG_INFO("start frame_nb: {}", (*it_data)->frame_nb); 221 LOG_INFO("start frame_nb: {}", (*it_data)->frame_nb);
290 222
291 string file_name = recoderinfo.recoderPath; 223 string file_name = recoderinfo.recoderPath;
292 224
293 - FFRecoder2 ffrecoder; 225 + FFRecoder ffrecoder;
294 bool bInit = ffrecoder.init(m_width, m_height, m_fps, m_bit_rate, file_name.c_str()); 226 bool bInit = ffrecoder.init(m_width, m_height, m_fps, m_bit_rate, file_name.c_str());
295 if (!bInit) { 227 if (!bInit) {
296 LOG_ERROR("ffrecoder init error : {} {} {}", recoderinfo.task_id, recoderinfo.object_id, recoderinfo.frame_nb); 228 LOG_ERROR("ffrecoder init error : {} {} {}", recoderinfo.task_id, recoderinfo.object_id, recoderinfo.frame_nb);
@@ -308,16 +240,15 @@ void FFRecoderTaskManager::recode_thread() { @@ -308,16 +240,15 @@ void FFRecoderTaskManager::recode_thread() {
308 if(dataPkt->frame_nb > recoderinfo.frame_nb) { 240 if(dataPkt->frame_nb > recoderinfo.frame_nb) {
309 break; 241 break;
310 } 242 }
311 - AVPacket* pkt = dataPkt->pkt;  
312 - if(pkt == nullptr) {  
313 - LOG_ERROR("{} pkt is nullptr", recoderinfo.task_id);  
314 - continue;  
315 - } else if (pkt->data == nullptr || pkt->size <= 0){  
316 - LOG_ERROR("{} pkt data is nullptr or size is {}", recoderinfo.task_id, pkt->size); 243 +
  244 + if (dataPkt->pkt_data == nullptr || dataPkt->pkt_size <= 0){
  245 + LOG_ERROR("{} pkt data is nullptr or size is {}", recoderinfo.task_id, dataPkt->pkt_size);
317 continue; 246 continue;
318 } 247 }
319 248
320 - ffrecoder.write_pkt(pkt); 249 + // LOG_INFO("ref count: {}", av_buffer_get_ref_count(pkt->buf));
  250 +
  251 + ffrecoder.write_pkt_data(dataPkt->pkt_data, dataPkt->pkt_size);
321 count++; 252 count++;
322 end_frame_nb = (*it_save)->frame_nb; 253 end_frame_nb = (*it_save)->frame_nb;
323 } 254 }
src/decoder/dvpp/FFRecoderTaskManager.h
1 -#include "FFRecoder2.h" 1 +#include "FFRecoder.h"
2 2
3 #include "../../ai_platform/common_header.h" 3 #include "../../ai_platform/common_header.h"
4 #include "depend_headers.h" 4 #include "depend_headers.h"
@@ -12,11 +12,6 @@ @@ -12,11 +12,6 @@
12 12
13 using namespace std; 13 using namespace std;
14 14
15 -struct RecodeParam {  
16 - AVRational time_base;  
17 - RecoderInfo recoderInfo;  
18 - AVCodecContext* avctx;  
19 -};  
20 15
21 typedef std::function<bool(const char *msg)> mq_callback_t; 16 typedef std::function<bool(const char *msg)> mq_callback_t;
22 17
@@ -65,7 +60,7 @@ private: @@ -65,7 +60,7 @@ private:
65 60
66 mq_callback_t mq_publish_func; 61 mq_callback_t mq_publish_func;
67 62
68 - // FFRecoder2 63 + // FFRecoder
69 int m_width; 64 int m_width;
70 int m_height; 65 int m_height;
71 int m_fps; 66 int m_fps;
src/decoder/dvpp/VpcUtils.cpp
@@ -17,10 +17,6 @@ VpcUtils::VpcUtils(){ @@ -17,10 +17,6 @@ VpcUtils::VpcUtils(){
17 } 17 }
18 18
19 VpcUtils::~VpcUtils(){ 19 VpcUtils::~VpcUtils(){
20 - if(nullptr != stream_){  
21 - aclrtDestroyStream(stream_);  
22 - }  
23 -  
24 if(context_){ 20 if(context_){
25 aclrtDestroyContext(context_); 21 aclrtDestroyContext(context_);
26 } 22 }
@@ -36,11 +32,9 @@ int VpcUtils::init(int devId){ @@ -36,11 +32,9 @@ int VpcUtils::init(int devId){
36 32
37 m_devId = devId; 33 m_devId = devId;
38 34
39 - aclrtSetDevice(m_devId);  
40 aclrtCreateContext(&context_, m_devId); 35 aclrtCreateContext(&context_, m_devId);
41 36
42 CHECK_AND_RETURN(aclrtSetCurrentContext(context_), "aclrtSetCurrentContext failed"); 37 CHECK_AND_RETURN(aclrtSetCurrentContext(context_), "aclrtSetCurrentContext failed");
43 - CHECK_AND_RETURN(aclrtCreateStream(&stream_), "aclrtCreateStream failed! ");  
44 38
45 dvppChannelDesc_ = acldvppCreateChannelDesc(); 39 dvppChannelDesc_ = acldvppCreateChannelDesc();
46 40
@@ -59,7 +53,6 @@ int VpcUtils::init(int devId){ @@ -59,7 +53,6 @@ int VpcUtils::init(int devId){
59 53
60 DvppDataMemory* VpcUtils::convert2bgr(acldvppPicDesc *inputDesc_, int out_width, int out_height, bool key_frame){ 54 DvppDataMemory* VpcUtils::convert2bgr(acldvppPicDesc *inputDesc_, int out_width, int out_height, bool key_frame){
61 55
62 - aclrtSetDevice(m_devId);  
63 aclrtSetCurrentContext(context_); 56 aclrtSetCurrentContext(context_);
64 57
65 int out_buf_width = ALIGN_UP(out_width, 16) * 3; 58 int out_buf_width = ALIGN_UP(out_width, 16) * 3;
@@ -79,6 +72,8 @@ DvppDataMemory* VpcUtils::convert2bgr(acldvppPicDesc *inputDesc_, int out_width, @@ -79,6 +72,8 @@ DvppDataMemory* VpcUtils::convert2bgr(acldvppPicDesc *inputDesc_, int out_width,
79 acldvppSetPicDescSize(outputDesc_, out_buf_size); 72 acldvppSetPicDescSize(outputDesc_, out_buf_size);
80 73
81 aclError ret = ACL_ERROR_NONE; 74 aclError ret = ACL_ERROR_NONE;
  75 + aclrtStream stream_{nullptr};
  76 + aclrtCreateStream(&stream_);
82 do{ 77 do{
83 // 9. 执行异步色域转换,再调用aclrtSynchronizeStream接口阻塞程序运行,直到指定Stream中的所有任务都完成 78 // 9. 执行异步色域转换,再调用aclrtSynchronizeStream接口阻塞程序运行,直到指定Stream中的所有任务都完成
84 ret = acldvppVpcConvertColorAsync(dvppChannelDesc_, inputDesc_, outputDesc_, stream_); 79 ret = acldvppVpcConvertColorAsync(dvppChannelDesc_, inputDesc_, outputDesc_, stream_);
@@ -93,6 +88,11 @@ DvppDataMemory* VpcUtils::convert2bgr(acldvppPicDesc *inputDesc_, int out_width, @@ -93,6 +88,11 @@ DvppDataMemory* VpcUtils::convert2bgr(acldvppPicDesc *inputDesc_, int out_width,
93 } 88 }
94 }while(0); 89 }while(0);
95 90
  91 + if(nullptr != stream_){
  92 + aclrtDestroyStream(stream_);
  93 + stream_ = nullptr;
  94 + }
  95 +
96 acldvppDestroyPicDesc(outputDesc_); 96 acldvppDestroyPicDesc(outputDesc_);
97 97
98 if(ret != ACL_ERROR_NONE){ 98 if(ret != ACL_ERROR_NONE){
@@ -105,7 +105,6 @@ DvppDataMemory* VpcUtils::convert2bgr(acldvppPicDesc *inputDesc_, int out_width, @@ -105,7 +105,6 @@ DvppDataMemory* VpcUtils::convert2bgr(acldvppPicDesc *inputDesc_, int out_width,
105 105
106 DvppDataMemory* VpcUtils::convert2bgr(DvppDataMemory* inMem){ 106 DvppDataMemory* VpcUtils::convert2bgr(DvppDataMemory* inMem){
107 107
108 - aclrtSetDevice(m_devId);  
109 aclrtSetCurrentContext(context_); 108 aclrtSetCurrentContext(context_);
110 109
111 int out_width = inMem->getWidth(); 110 int out_width = inMem->getWidth();
@@ -138,6 +137,8 @@ DvppDataMemory* VpcUtils::convert2bgr(DvppDataMemory* inMem){ @@ -138,6 +137,8 @@ DvppDataMemory* VpcUtils::convert2bgr(DvppDataMemory* inMem){
138 acldvppSetPicDescSize(outputDesc_, out_buf_size); 137 acldvppSetPicDescSize(outputDesc_, out_buf_size);
139 138
140 aclError ret = ACL_ERROR_NONE; 139 aclError ret = ACL_ERROR_NONE;
  140 + aclrtStream stream_{nullptr};
  141 + aclrtCreateStream(&stream_);
141 do{ 142 do{
142 // 9. 执行异步色域转换,再调用aclrtSynchronizeStream接口阻塞程序运行,直到指定Stream中的所有任务都完成 143 // 9. 执行异步色域转换,再调用aclrtSynchronizeStream接口阻塞程序运行,直到指定Stream中的所有任务都完成
143 ret = acldvppVpcConvertColorAsync(dvppChannelDesc_, inputDesc_, outputDesc_, stream_); 144 ret = acldvppVpcConvertColorAsync(dvppChannelDesc_, inputDesc_, outputDesc_, stream_);
@@ -152,6 +153,11 @@ DvppDataMemory* VpcUtils::convert2bgr(DvppDataMemory* inMem){ @@ -152,6 +153,11 @@ DvppDataMemory* VpcUtils::convert2bgr(DvppDataMemory* inMem){
152 } 153 }
153 }while(0); 154 }while(0);
154 155
  156 + if(nullptr != stream_){
  157 + aclrtDestroyStream(stream_);
  158 + stream_ = nullptr;
  159 + }
  160 +
155 acldvppDestroyPicDesc(outputDesc_); 161 acldvppDestroyPicDesc(outputDesc_);
156 acldvppDestroyPicDesc(inputDesc_); 162 acldvppDestroyPicDesc(inputDesc_);
157 163
@@ -165,7 +171,6 @@ DvppDataMemory* VpcUtils::convert2bgr(DvppDataMemory* inMem){ @@ -165,7 +171,6 @@ DvppDataMemory* VpcUtils::convert2bgr(DvppDataMemory* inMem){
165 171
166 DvppDataMemory* VpcUtils::resize(acldvppPicDesc *inputDesc_, int out_width, int out_height){ 172 DvppDataMemory* VpcUtils::resize(acldvppPicDesc *inputDesc_, int out_width, int out_height){
167 173
168 - aclrtSetDevice(m_devId);  
169 aclrtSetCurrentContext(context_); 174 aclrtSetCurrentContext(context_);
170 175
171 int out_buf_width = ALIGN_UP(out_width, 16); 176 int out_buf_width = ALIGN_UP(out_width, 16);
@@ -187,6 +192,8 @@ DvppDataMemory* VpcUtils::resize(acldvppPicDesc *inputDesc_, int out_width, int @@ -187,6 +192,8 @@ DvppDataMemory* VpcUtils::resize(acldvppPicDesc *inputDesc_, int out_width, int
187 acldvppResizeConfig *resizeConfig_ = acldvppCreateResizeConfig(); 192 acldvppResizeConfig *resizeConfig_ = acldvppCreateResizeConfig();
188 193
189 aclError ret = ACL_ERROR_NONE; 194 aclError ret = ACL_ERROR_NONE;
  195 + aclrtStream stream_{nullptr};
  196 + aclrtCreateStream(&stream_);
190 do{ 197 do{
191 // 9. 执行异步色域转换,再调用aclrtSynchronizeStream接口阻塞程序运行,直到指定Stream中的所有任务都完成 198 // 9. 执行异步色域转换,再调用aclrtSynchronizeStream接口阻塞程序运行,直到指定Stream中的所有任务都完成
192 ret = acldvppVpcResizeAsync(dvppChannelDesc_, inputDesc_, outputDesc_, resizeConfig_, stream_); 199 ret = acldvppVpcResizeAsync(dvppChannelDesc_, inputDesc_, outputDesc_, resizeConfig_, stream_);
@@ -201,6 +208,11 @@ DvppDataMemory* VpcUtils::resize(acldvppPicDesc *inputDesc_, int out_width, int @@ -201,6 +208,11 @@ DvppDataMemory* VpcUtils::resize(acldvppPicDesc *inputDesc_, int out_width, int
201 } 208 }
202 }while(0); 209 }while(0);
203 210
  211 + if(nullptr != stream_){
  212 + aclrtDestroyStream(stream_);
  213 + stream_ = nullptr;
  214 + }
  215 +
204 acldvppDestroyResizeConfig(resizeConfig_); 216 acldvppDestroyResizeConfig(resizeConfig_);
205 acldvppDestroyPicDesc(outputDesc_); 217 acldvppDestroyPicDesc(outputDesc_);
206 218
src/decoder/dvpp/VpcUtils.h
@@ -16,7 +16,6 @@ public: @@ -16,7 +16,6 @@ public:
16 16
17 private: 17 private:
18 aclrtContext context_{nullptr}; 18 aclrtContext context_{nullptr};
19 - aclrtStream stream_{nullptr};  
20 int m_devId; 19 int m_devId;
21 acldvppChannelDesc *dvppChannelDesc_ {nullptr}; 20 acldvppChannelDesc *dvppChannelDesc_ {nullptr};
22 string m_dec_name; 21 string m_dec_name;
src/decoder/dvpp/depend_headers.h
@@ -27,9 +27,10 @@ extern &quot;C&quot; { @@ -27,9 +27,10 @@ extern &quot;C&quot; {
27 #include "libavutil/samplefmt.h" 27 #include "libavutil/samplefmt.h"
28 #include "libavformat/avformat.h" 28 #include "libavformat/avformat.h"
29 #include "libavcodec/avcodec.h" 29 #include "libavcodec/avcodec.h"
30 - #include <libavutil/opt.h>  
31 - #include <libavutil/timestamp.h>  
32 - #include <libswscale/swscale.h> 30 + #include "libavcodec/bsf.h"
  31 + #include "libavutil/opt.h"
  32 + #include "libavutil/timestamp.h"
  33 + #include "libswscale/swscale.h"
33 } 34 }
34 35
35 36
@@ -40,16 +41,28 @@ extern &quot;C&quot; { @@ -40,16 +41,28 @@ extern &quot;C&quot; {
40 41
41 42
42 struct DataPacket { 43 struct DataPacket {
43 - AVPacket* pkt {nullptr}; 44 + uint8_t *pkt_data{nullptr};
  45 + int pkt_size{0};
44 unsigned long long frame_nb{0}; 46 unsigned long long frame_nb{0};
45 bool isKeyFrame{false}; 47 bool isKeyFrame{false};
46 48
  49 + DataPacket(uint8_t *data, int size, unsigned long long frameNb, bool isKey) {
  50 + pkt_data = (uint8_t*) malloc(size);
  51 + memcpy(pkt_data, data, size);
  52 + pkt_size = size;
  53 + frame_nb = frameNb;
  54 + isKeyFrame = isKey;
  55 + }
  56 +
47 ~DataPacket(){ 57 ~DataPacket(){
48 - if(pkt != nullptr) { 58 + if(pkt_data != nullptr) {
49 // LOG_INFO("free frame_nb:{}", frame_nb); 59 // LOG_INFO("free frame_nb:{}", frame_nb);
50 - av_packet_free(&pkt);  
51 - pkt = nullptr; 60 + free(pkt_data);
  61 + pkt_data = nullptr;
52 } 62 }
  63 + pkt_size = 0;
  64 + frame_nb = 0;
  65 + isKeyFrame = false;
53 } 66 }
54 }; 67 };
55 68
src/util/JpegUtil.cpp
@@ -14,7 +14,6 @@ int JpegUtil::jpeg_init(int32_t devId){ @@ -14,7 +14,6 @@ int JpegUtil::jpeg_init(int32_t devId){
14 /* 2.Run the management resource application, including Device, Context, Stream */ 14 /* 2.Run the management resource application, including Device, Context, Stream */
15 aclrtSetDevice(deviceId_); 15 aclrtSetDevice(deviceId_);
16 aclrtCreateContext(&context_, deviceId_); 16 aclrtCreateContext(&context_, deviceId_);
17 - aclrtCreateStream(&stream_);  
18 17
19 // channel 准备 18 // channel 准备
20 dvppChannelDesc_ = acldvppCreateChannelDesc(); 19 dvppChannelDesc_ = acldvppCreateChannelDesc();
@@ -35,14 +34,6 @@ void JpegUtil::jpeg_release(){ @@ -35,14 +34,6 @@ void JpegUtil::jpeg_release(){
35 ret = acldvppDestroyChannelDesc(dvppChannelDesc_); 34 ret = acldvppDestroyChannelDesc(dvppChannelDesc_);
36 dvppChannelDesc_ = nullptr; 35 dvppChannelDesc_ = nullptr;
37 36
38 - if (stream_ != nullptr) {  
39 - ret = aclrtDestroyStream(stream_);  
40 - if (ret != ACL_SUCCESS) {  
41 - LOG_ERROR("destroy stream failed");  
42 - }  
43 - stream_ = nullptr;  
44 - }  
45 -  
46 acldvppDestroyJpegeConfig(jpegeConfig_); 37 acldvppDestroyJpegeConfig(jpegeConfig_);
47 38
48 if (context_ != nullptr) { 39 if (context_ != nullptr) {
@@ -96,6 +87,8 @@ bool JpegUtil::jpeg_encode(acldvppPicDesc *encodeInputDesc_, string out_file_nam @@ -96,6 +87,8 @@ bool JpegUtil::jpeg_encode(acldvppPicDesc *encodeInputDesc_, string out_file_nam
96 } 87 }
97 88
98 bool bRet = false; 89 bool bRet = false;
  90 + aclrtStream stream_{nullptr};
  91 + aclrtCreateStream(&stream_);
99 do { 92 do {
100 // 9. 执行异步编码,再调用aclrtSynchronizeStream接口阻塞程序运行,直到指定Stream中的所有任务都完成 93 // 9. 执行异步编码,再调用aclrtSynchronizeStream接口阻塞程序运行,直到指定Stream中的所有任务都完成
101 aclRet = acldvppJpegEncodeAsync(dvppChannelDesc_, encodeInputDesc_, encodeOutBufferDev_, &outBufferSize, jpegeConfig_, stream_); 94 aclRet = acldvppJpegEncodeAsync(dvppChannelDesc_, encodeInputDesc_, encodeOutBufferDev_, &outBufferSize, jpegeConfig_, stream_);
@@ -136,6 +129,14 @@ bool JpegUtil::jpeg_encode(acldvppPicDesc *encodeInputDesc_, string out_file_nam @@ -136,6 +129,14 @@ bool JpegUtil::jpeg_encode(acldvppPicDesc *encodeInputDesc_, string out_file_nam
136 129
137 bRet = true; 130 bRet = true;
138 } while (0); 131 } while (0);
  132 +
  133 + if (stream_ != nullptr) {
  134 + ret = aclrtDestroyStream(stream_);
  135 + if (ret != ACL_SUCCESS) {
  136 + LOG_ERROR("destroy stream failed");
  137 + }
  138 + stream_ = nullptr;
  139 + }
139 140
140 // 释放掉输入输出的device内存 141 // 释放掉输入输出的device内存
141 (void)acldvppFree(encodeOutBufferDev_); 142 (void)acldvppFree(encodeOutBufferDev_);
src/util/JpegUtil.h
@@ -23,7 +23,6 @@ private: @@ -23,7 +23,6 @@ private:
23 private: 23 private:
24 int32_t deviceId_; 24 int32_t deviceId_;
25 aclrtContext context_; 25 aclrtContext context_;
26 - aclrtStream stream_;  
27 acldvppChannelDesc *dvppChannelDesc_; 26 acldvppChannelDesc *dvppChannelDesc_;
28 acldvppJpegeConfig *jpegeConfig_ ; 27 acldvppJpegeConfig *jpegeConfig_ ;
29 }; 28 };