MultiSourceProcess.h
11.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
/*
* @Description: 主流程类 + 任务管理相关操作
*/
//#pragma once
#ifndef __MutliSourceVideoProcess_H__
#define __MutliSourceVideoProcess_H__
#include <iostream>
#include <stdlib.h>
#include "../FFNvDecoder/FFNvDecoderManager.h"
#include "../FFNvDecoder/cuda_kernels.h"
#include "VPTProcess.h"
#include "common.h"
#include "nvml.h"
#include "opencv2/highgui/highgui.hpp"
#include "time.h"
#include <boost/thread/thread.hpp>
#include <queue>
#include <set>
#include "mvpt_process_assist.h"
#include <atomic>
#include <boost/thread/thread_pool.hpp>
#include <mutex>
#include <thread>
#include <pthread.h>
#include "../reprocessing_module/save_snapshot_reprocessing.h"
#include "../reprocessing_module/snapshot_reprocessing.h"
#include "task_param_manager.h"
#ifdef POST_USE_RABBITMQ
#include "../reprocessing_module/mq_manager.hpp"
#endif
#include "../ai_engine_module/ai_engine_module.h"
#ifdef WITH_SECOND_PROCESS
#include "../ai_engine_module/human_gather_statistics.h"
#include "../ai_engine_module/pedestrian_safety_det.hpp"
#include "../ai_engine_module/pedestrian_vehicle_retrograde.hpp"
#include "../ai_engine_module/pedestrian_vehicle_trespass.hpp"
#endif
#include "ErrorInfo.h"
#include <condition_variable>
#ifdef WITH_FACE_DET_SS
#include "face_det_ai_engine.h"
#endif
#include "GpuRgbMemory.hpp"
#ifdef _MSC_VER
#ifdef _DEBUG
#pragma comment(lib, "opencv_world310d.lib")
#else
#pragma comment(lib, "opencv_world310.lib")
#endif
#endif
using namespace cv;
// using namespace std;
using std::map;
using std::set;
using std::vector;
#ifndef _MSC_VER
#ifndef TRUE
#define TRUE 1
#endif
#ifndef FALSE
#define FALSE 0
#endif
#define Sleep(a) usleep((a)*1000)
// typedef int BOOL;
#define BOOL bool
typedef unsigned int DWORD;
typedef void *LPVOID;
#endif
#ifdef _DEBUG
#define DEBUG_MSG(msg, ...) \
{ \
printf("%s %s [%d]: ", __FILE__, __FUNCTION__, __LINE__); \
printf(msg, ##__VA_ARGS__); \
printf("\n"); \
}
#else
#define DEBUG_MSG(msg, ...)
#endif
#define MAXLENGTH 416
#define MINLENGTH 224
//#define PROCESSHEIGHT 224
//#define PROCESSWIDTH 416
//#define DATASIZE PROCESSWIDTH * PROCESSHEIGHT * 3
#define THREAD_COUNT 30
#define SNAPSHOTFRAME 15
#define LOSTMAXFRAMECCOUNT 4
#define SCALE_OUT 10 // 判断目标框初始位置时,在最小距离的基础上适当外扩
enum TaskState {
PLAY,
PAUSE,
FINISH,
DECODEERROR // 解码线程可能报错,报错之后直接结束掉该路解码
};
struct task_resource {
DxDecoderWrap *taskcuvid;
TaskState task_state;
DxGPUFrame task_algorithm_data;
};
struct Operator {
string changeTaskID;
const char *videoFileName;
const char *resultFolderLittleName;
const char *resultFolderName;
int algor_counts;
TaskOperator changeTaskOperator;
};
struct decode_cbk_userdata{
task_param _cur_task_param;
void* opaque;
};
class CMultiSourceProcess {
public:
CMultiSourceProcess();
~CMultiSourceProcess();
int InitAlgorthim(tsl_aiplatform_param vptParam);
void *GetVPT_Handle() const {
return VPT_Handle_;
};
#ifdef POST_USE_RABBITMQ
int AddMqConn(mq_type_t mq_type, rabbitmq_conn_params_t mq_conn_param);
int GetTaskStatus(const string taskID);
#endif
/* task api */
bool add_task_operation(task_param _cur_task_param);
int AddOperator(task_param tparam);
void AddOperator(string taskID, int taskOper);
void OperatorTask();
bool HasNewTask() const {
return !TaskOperatorQ.empty();
}
void PauseTask(const string taskID);
void RestartTask(const string taskID);
void FinishTask(const string taskID, const bool delete_snapshot);
bool DeleteTaskQ(const string taskID);
/* decode api */
void FinishDecode(const string taskID);
bool FinishDecode(std::pair<const std::string, task_resource> &iter);
/* sync api */
int WaitAndPauseTask(const string taskID, const int max_timeout_ms);
int WaitAndFinishTask(const string taskID, const int max_timeout_ms);
int WaitAndRestartTask(const string taskID, const int max_timeout_ms);
/* frame process api */
int FinishProcessThread();
int everyframe_process(vector<string> &task_in_play_id, sy_img *images, vector<onelevel_det_result> &ol_det_result);
public:
void algorthim_process_thread(const string gpuid);
void post_decode_thread(task_param _cur_task_param, AVFrame * gpuFrame);
void decode_finished_thread(task_param _cur_task_param);
private:
void startProcessByGpuid(const string gpuid);
bool task_has_vpt_algor(const std::string &task_id);
bool task_has_face_algor(const std::string &task_id);
void cuda_free_wrap(sy_img &img);
// VPT
void algorthim_vpt(vector<string>& task_list, sy_img *batch_img);
// 行人安全分析算法
void algorthim_pedestrian_safety(vector<string>& task_list, vector<sy_img>& vpt_interest_imgs, vector<onelevel_det_result>& vptResult);
// 逆行&非法闯入算法模块
void algorthim_retrograde_trespass(vector<string>& vpt_interest_task_id, vector<sy_img>& vpt_interest_imgs, vector<onelevel_det_result>& vptResult
,vector<vector<int>>& deleteObjectID);
// for snapshot algorithm. 轨迹结束目标 做最后的结果返回(当前返回算法结果+快照保存路径)
void algorithm_snapshot(vector<string>& vpt_interest_task_id, vector<vector<int>> deleteObjectID);
// 打架跌倒算法模块
void algorithm_fight_fall(vector<string>& vpt_interest_task_id, vector<sy_img>& vpt_interest_imgs, vector<onelevel_det_result>& vptResult);
// 外卖员分析模块
void algorithm_takeaway_member_cls(vector<string>& vpt_interest_task_id, vector<sy_img>& vpt_interest_imgs, vector<onelevel_det_result>& vptResult);
// 人脸检测抓拍算法模块
void algorthim_face_detect(vector<string>& task_list, sy_img *batch_img);
// 轨迹结束帧需要做的算法模块
int endframe_obj_process(const OBJ_KEY &obj_key, algorithm_type_t algor_type);
/* 实现快照保存功能(还未真正保存 将显存图片cp到内存
* 直接保存本地或者存入缓存队列异步保存,保存方式看需求,报警类需要同步保存报警,分析类可异步保存后返回)*/
bool save_snapshot_process(const OBJ_KEY &obj_key, const algorithm_type_t &algorithm_type, const sy_img &ori_img,
const sy_img &roi_img, const long long id, const std::string &json_str,
bool enable_async = true, const bool ori_img_is_in_gpu = true,
const bool roi_img_is_in_gpu = true);
private:
map<string, pthread_t*> gpuProcessthreadMap;
boost::thread ProcessThread;
std::mutex _tx_add_task;
deque<Operator> TaskOperatorQ;
int capacity;
double gpu_total_memory;
boost::thread thread_;
void *authority_handle{nullptr};
public: /*按道理不应该是public的 但是在线程函数中会用到以下的数据 每个都写一个get函数太过复杂*/
map<string, task_resource> system_all_tasks_;
void *VPT_Handle_{nullptr};
int section_batch_size_;
int licence_status_;
int thread_status_;
int gpu_id_;
int AddTaskSucFlag; // 0:初始化状态 1:添加任务成功 -1:添加任务失败
int TaskInPlay;
int TotalTask;
set<string> TaskInPlayID;
map<string, uint> task_id_to_processed_frame_;
// vector<onelevel_det_result> VPTResult;
std::atomic<bool> ProcessFlag;
bool SourceFlag;
unsigned char *imgDataDevice;
void *FrameTemp;
char *mModeSnapshotVideo;
char *mModeSnapshotLittle;
string viewTaskID;
map<int, set<int>> objDelete;
FINISH_CALLBACK taskFinishCallbackFunc;
OBJECT_INFO_CALLBACK taskObjInfoCallbackFunc;
bool beginSaveSnapshot;
boost::thread_group saveSnapshotsThreadGroup;
std::mutex taskMutex;
std::condition_variable taskCondVar;
bool AttributionAnalysis; // 用于控制,每帧分析只进行一个二次属性分析
snapshot_reprocessing *m_snapshot_reprocessing{nullptr};
task_param_manager *m_task_param_manager{nullptr};
#ifdef WITH_SECOND_PROCESS
//! TODO: use model manager to replace.
ai_engine_module::fight_fall_cls::FightfallCls fight_fall_cls_;
ai_engine_module::takeaway_member_classification::TakeawayMemberCls takeaway_member_;
ai_engine_module::human_gather_statistics::human_gather_statistics m_human_gather_statistics;
ai_engine_module::pedestrian_safety_det::PedestrianSafetyDetector pedestrian_safety_detector_;
ai_engine_module::pedestrian_vehicle_retrograde::PedestrianVehicleRetrograde pedestrian_vehicle_retrograde_;
ai_engine_module::pedestrian_vehicle_trespass::PedestrianVehicleTrespass pedestrian_vehicle_trespass_;
#endif
#ifdef POST_USE_RABBITMQ
mq::Manager *mq_manager_{nullptr};
#endif
#ifdef WITH_FACE_DET_SS
face_det_ai_engine m_face_det_ai_engine; // 人脸检测
#endif
private:
base_reprocessing_unit *m_save_snapshot_reprocessing{nullptr};
private:
queue<GpuRgbMemory*> m_queueRgbData;
std::mutex m_QueueMtx;
};
static CMultiSourceProcess mainProcess;
#if 0
struct CUVID_USERDATA {
int id;
void* opaque;
};
struct CUVID_DATA {
float* pData;
int nWidth;
int nHeight;
int nDatasize;
};
struct SNAPSHOT_PROCESS_UNIT
{
vector<DxGPUFrame> imgBig;
vector<DxGPUFrame> imgSmall;
vector<onelevel_det_result> imgVPTResult;
};
struct VideoHeightWidth
{
double height;
double width;
};
struct VideoObjectSS
{
unsigned char* obj_person;
unsigned char* obj_bike;
unsigned char* obj_car;
VideoObjectSS() : obj_person(NULL), obj_bike(NULL), obj_car(NULL) {}
};
template <typename T> class MyAtomic;
template<typename T>
bool operator == (MyAtomic<T>& d1, T& d2);
template<typename T>
bool operator == (MyAtomic<T>& d1, MyAtomic<T>& d2);
template <typename T>
class MyAtomic
{
public:
MyAtomic() {};
MyAtomic(const T& d) { data.store(d); };
MyAtomic(const MyAtomic& d) { data.store(d.data.load()); };
MyAtomic& operator =(T d) { data.store(d); return *this; };
MyAtomic& operator =(MyAtomic& d) { data.store(d.data.load()); return *this; };
MyAtomic& operator +=(T d) { data.fetch_add(d); return *this; };
MyAtomic& operator +=(MyAtomic& d) { data.fetch_add(d); return *this; };
operator int() { return data.load(); }
friend bool operator ==<T> (MyAtomic<T>& d1, T& d2);
friend bool operator ==<T> (MyAtomic<T>& d1, MyAtomic<T>& d2);
private:
std::atomic<T> data;
};
template<typename T>
bool operator == (MyAtomic<T>& d1, T& d2)
{
if (d1.data.load() == d2)
return true;
else
return false;
}
template<typename T>
bool operator == (MyAtomic<T>& d1, MyAtomic<T>& d2)
{
if (d1.data.load() == d2.load())
return true;
else
return false;
}
struct Task {
int taskID;
const char* taskFileSource;
TaskState taskState;
DxDecoderWrap* taskTcuvid;
DxGPUFrame task_algorithm_data; //针对新框架不做resize的处理,先暂时用backup的原图大小的图片送进算法参与计算
float* taskDataToRT;
bool taskHasBackup;
//VideoHeightWidth taskHeightWidthRatio;
VideoHeightWidth taskHeightWidth;
MyAtomic<int> taskFrameCount;
int taskTotalFrameCount;
SNAPSHOT_CALLBACK taskObjCallbackFunc;
REALTIME_CALLBACK taskRealTimeCallbackFunc;
cv::Mat frameImage;
char* folderNameLittle;
char* folderName;
sy_rect task_min_boxsize[DETECTTYPE];
};
#endif
#endif