Commit a164dbead2ef67dd4d1bd3db0d3b91fc4c479c7a

Authored by Hu Chunming
1 parent b6e71beb

同步 HUMAN_TIMING_SNAPSHOT、NONMOTOR_VEHICLE_TIMING_SNAPSHOT、VEHICLE_TIMING_SNAPSHOT

src/ai_engine_module/ai_engine_module.h
1 1 #pragma once
2 2  
  3 +#include "./ai_engine_header.h"
  4 +
3 5 #include "./VPTProcess.h"
4 6 #include "./face_det_ai_engine.h"
5 7 #include "./pedestrian_vehicle_retrograde.h"
... ...
src/ai_engine_module/face_detect.h 100755 → 100644
src/ai_platform/MultiSourceProcess.cpp
... ... @@ -631,7 +631,9 @@ int CMultiSourceProcess::algorthim_vpt(vector<DeviceMemory*> vec_gpuMem){
631 631 /* for snapshot algorithm. 轨迹结束目标 做最后的结果返回(当前返回算法结果+快照保存路径)*/
632 632 vehicle_snapshot(vpt_interest_task_id, deleteObjectID);
633 633 #else
634   - algorithm_vehicle_relult(vec_vptMem, vptResult, deleteObjectID);
  634 + algorithm_timing_snapshot(vec_vptMem, vptResult, algorithm_type_t::HUMAN_TIMING_SNAPSHOT);
  635 + algorithm_timing_snapshot(vec_vptMem, vptResult, algorithm_type_t::VEHICLE_TIMING_SNAPSHOT);
  636 + algorithm_timing_snapshot(vec_vptMem, vptResult, algorithm_type_t::NONMOTOR_VEHICLE_TIMING_SNAPSHOT);
635 637  
636 638 send_locus_finished_msg(vpt_interest_task_id, deleteObjectID);
637 639 #endif
... ... @@ -706,9 +708,9 @@ void CMultiSourceProcess::trace_record(vector<DeviceMemory*>& vec_gpuMem, vector
706 708 }
707 709 }
708 710  
709   -int CMultiSourceProcess::algorithm_vehicle_relult(vector<DeviceMemory*> vec_devMem, vector<onelevel_det_result>& vptResult, vector<vector<int>>& delete_object_id) {
  711 +int CMultiSourceProcess::algorithm_timing_snapshot(vector<DeviceMemory*> vec_devMem, vector<onelevel_det_result>& vptResult, algorithm_type_t algor_type) {
710 712  
711   - vector<multi_obj_data_t> results = m_snapshot_reprocessing->get_vehicle_snapshot(vec_devMem, vptResult, skip_frame_);
  713 + vector<multi_obj_data_t> results = m_snapshot_reprocessing->get_timing_snapshot(vec_devMem, vptResult, algor_type);
712 714  
713 715 VPCUtil* pVPCUtil = VPCUtil::getInstance();
714 716 for (auto &result : results) {
... ... @@ -717,9 +719,9 @@ int CMultiSourceProcess::algorithm_vehicle_relult(vector&lt;DeviceMemory*&gt; vec_devM
717 719 }
718 720 auto task_id = result.task_id;
719 721 auto task_other_params = m_task_param_manager->get_task_other_param(task_id);
720   - const auto &algor_other_params = task_other_params->find(algorithm_type_t::VEHICLE_SNAPSHOT);
  722 + const auto &algor_other_params = task_other_params->find(algor_type);
721 723 if (algor_other_params == task_other_params->end()) {
722   - LOG_ERROR("[Error] taskId {} not found algor {}", task_id.c_str(), (int)algorithm_type_t::VEHICLE_SNAPSHOT);
  724 + LOG_ERROR("[Error] taskId {} not found algor {}", task_id.c_str(), (int)algor_type);
723 725 continue;
724 726 }
725 727 const algor_basic_config_param_t *basic_param = algor_other_params->second->basic_param;
... ... @@ -756,7 +758,7 @@ int CMultiSourceProcess::algorithm_vehicle_relult(vector&lt;DeviceMemory*&gt; vec_devM
756 758 strcpy(new_obj_ss_info.video_image_path, fpath_origin.c_str());
757 759 strcpy(new_obj_ss_info.snapshot_image_path, fpath_roi.c_str());
758 760 new_obj_ss_info.nFinished = 0;
759   - string json_str = helpers::gen_json::gen_multi_obj_json(algorithm_type_t::VEHICLE_SNAPSHOT, new_obj_ss_info);
  761 + string json_str = helpers::gen_json::gen_multi_obj_json(algor_type, new_obj_ss_info);
760 762  
761 763 ImgSaveInfo save_info;
762 764 save_info.file_path = fpath_roi;
... ... @@ -764,7 +766,7 @@ int CMultiSourceProcess::algorithm_vehicle_relult(vector&lt;DeviceMemory*&gt; vec_devM
764 766 save_info.json_str = json_str;
765 767 m_save_snapshot_reprocessing->reprocessing_process_wo_locus_async(save_info);
766 768  
767   - OBJ_KEY obj_key{task_id, obj.object_id};
  769 + ai_engine_module::obj_key_t obj_key{obj.object_id, task_id, algor_type};
768 770 // 保存有轨迹的object记录,轨迹结束时需要发消息
769 771 std::lock_guard<std::mutex> l(m_total_mutex);
770 772 m_total_snapshot_info_multi_object.insert(obj_key);
... ... @@ -784,31 +786,44 @@ void CMultiSourceProcess::send_locus_finished_msg(vector&lt;string&gt;&amp; vpt_interest_t
784 786 string task_id = *task_iter;
785 787 for (int &j : deleteObjectID[i]) // loop algor type.
786 788 {
787   - OBJ_KEY obj_key = {task_id, j};
788   -
789 789 auto task_param_ptr = m_task_param_manager->get_task_algor_param(task_id);
790 790  
791 791 // 该路任务开启了抓拍功能 开始抓拍保存;若未开启抓拍,清空显存资源
792   - if (task_param_ptr->vehicle_algors.find(algorithm_type_t::VEHICLE_SNAPSHOT) != task_param_ptr->vehicle_algors.end()) {
793   - std::lock_guard<std::mutex> l(m_total_mutex);
794   - if (m_total_snapshot_info_multi_object.find(obj_key) != m_total_snapshot_info_multi_object.end()) {
795   - video_object_snapshot new_obj_ss_info;
796   - new_obj_ss_info.object_id = j;
797   - new_obj_ss_info.nFinished = 1;
798   - strcpy(new_obj_ss_info.task_id, task_id.c_str());
799   - string json_str = helpers::gen_json::gen_multi_obj_json(algorithm_type_t::VEHICLE_SNAPSHOT, new_obj_ss_info);
800   - // 通知结束的轨迹
801   - ImgSaveInfo save_info;
802   - save_info.json_str = json_str;
803   - m_save_snapshot_reprocessing->reprocessing_process_wo_locus_async(save_info);
804   -
805   - m_total_snapshot_info_multi_object.erase(obj_key);
806   - }
  792 + if (task_param_ptr->vehicle_algors.find(algorithm_type_t::VEHICLE_TIMING_SNAPSHOT) != task_param_ptr->vehicle_algors.end()) {
  793 + ai_engine_module::obj_key_t obj_key = {j, task_id, algorithm_type_t::VEHICLE_TIMING_SNAPSHOT};
  794 + timing_snapshot_finish_msg(obj_key, algorithm_type_t::VEHICLE_TIMING_SNAPSHOT);
  795 + }
  796 +
  797 + if (task_param_ptr->vehicle_algors.find(algorithm_type_t::NONMOTOR_VEHICLE_TIMING_SNAPSHOT) != task_param_ptr->vehicle_algors.end()) {
  798 + ai_engine_module::obj_key_t obj_key = {j, task_id, algorithm_type_t::NONMOTOR_VEHICLE_TIMING_SNAPSHOT};
  799 + timing_snapshot_finish_msg(obj_key, algorithm_type_t::NONMOTOR_VEHICLE_TIMING_SNAPSHOT);
  800 + }
  801 +
  802 + if (task_param_ptr->vehicle_algors.find(algorithm_type_t::HUMAN_TIMING_SNAPSHOT) != task_param_ptr->vehicle_algors.end()) {
  803 + ai_engine_module::obj_key_t obj_key = {j, task_id, algorithm_type_t::HUMAN_TIMING_SNAPSHOT};
  804 + timing_snapshot_finish_msg(obj_key, algorithm_type_t::HUMAN_TIMING_SNAPSHOT);
807 805 }
808 806 }
809 807 }
810 808 }
811 809  
  810 +void CMultiSourceProcess::timing_snapshot_finish_msg(ai_engine_module::obj_key_t obj_key, algorithm_type_t algor_type) {
  811 + std::lock_guard<std::mutex> l(m_total_mutex);
  812 + if (m_total_snapshot_info_multi_object.find(obj_key) != m_total_snapshot_info_multi_object.end()) {
  813 + video_object_snapshot new_obj_ss_info;
  814 + new_obj_ss_info.object_id = obj_key.obj_id;
  815 + new_obj_ss_info.nFinished = 1;
  816 + strcpy(new_obj_ss_info.task_id, obj_key.task_id.c_str());
  817 + string json_str = helpers::gen_json::gen_multi_obj_json(algor_type, new_obj_ss_info);
  818 + // 通知结束的轨迹
  819 + ImgSaveInfo save_info;
  820 + save_info.json_str = json_str;
  821 + m_save_snapshot_reprocessing->reprocessing_process_wo_locus_async(save_info);
  822 +
  823 + m_total_snapshot_info_multi_object.erase(obj_key);
  824 + }
  825 +}
  826 +
812 827 // for snapshot algorithm. 轨迹结束目标 做最后的结果返回(当前返回算法结果+快照保存路径)
813 828 void CMultiSourceProcess::vehicle_snapshot(vector<string>& vpt_interest_task_id, vector<vector<int>> deleteObjectID) {
814 829 auto task_iter = vpt_interest_task_id.begin();
... ...
src/ai_platform/MultiSourceProcess.h
... ... @@ -69,8 +69,12 @@ private:
69 69 bool task_has_vpt_algor(const std::string &task_id);
70 70 void clear_finished_task();
71 71 bool finish_task(const string taskID, const bool delete_snapshot);
72   - int algorithm_vehicle_relult(vector<DeviceMemory*> vec_devMem, vector<onelevel_det_result>& vptResult, vector<vector<int>>& delete_object_id);
  72 +
  73 + int algorithm_timing_snapshot(vector<DeviceMemory*> vec_devMem, vector<onelevel_det_result>& vptResult, algorithm_type_t algor_type);
  74 +
73 75 void send_locus_finished_msg(vector<string>& vpt_interest_task_id, vector<vector<int>> deleteObjectID);
  76 + void timing_snapshot_finish_msg(ai_engine_module::obj_key_t obj_key, algorithm_type_t algor_type);
  77 +
74 78 void vehicle_snapshot(vector<string>& vpt_interest_task_id, vector<vector<int>> deleteObjectID);
75 79 void vehicle_locus_finished(const OBJ_KEY obj_key);
76 80  
... ... @@ -115,7 +119,7 @@ private:
115 119  
116 120 int m_batch_size{1};
117 121  
118   - set<OBJ_KEY> m_total_snapshot_info_multi_object;
  122 + set<ai_engine_module::obj_key_t> m_total_snapshot_info_multi_object;
119 123 mutex m_total_mutex;
120 124  
121 125 mutex m_TotalObjMtx ;
... ...
src/ai_platform/header.h
... ... @@ -34,6 +34,7 @@ enum class algorithm_type_t {
34 34 PEDESTRIAN_TRESPASS = 211,
35 35 ROAD_WORK_DET = 212, // 221026byzsh施工占道
36 36  
  37 + HUMAN_TIMING_SNAPSHOT = 213, // 行人轨迹定时抓拍
37 38 HUMAN_LINGER = 214, // 人员徘徊
38 39 HUMAN_REGION_GATHER = 215, // 区域人员聚集
39 40 HUMAN_REGION_DISMISS = 216, // 人员逃散
... ... @@ -44,6 +45,7 @@ enum class algorithm_type_t {
44 45 HUMAN_LEAVE_REGION = 222, // 区域离开
45 46  
46 47 VEHICLE_SNAPSHOT = 301,
  48 + VEHICLE_TIMING_SNAPSHOT = 302, // 车辆轨迹定时抓拍
47 49 VEHICLE_RETROGRADE = 310,
48 50 VEHICLE_TRESPASS = 311,
49 51 VEHICLE_GATHER = 312, // 车辆聚集
... ... @@ -53,6 +55,7 @@ enum class algorithm_type_t {
53 55  
54 56 NONMOTOR_VEHICLE_SNAPSHOT = 401,
55 57 TAKEAWAY_MEMBER_CLASSIFICATION = 402,
  58 + NONMOTOR_VEHICLE_TIMING_SNAPSHOT = 403, // 非机动车轨迹定时抓拍
56 59  
57 60 FLOW_STATISTICS = 500, // 用于数量统计
58 61 NONMOTOR_VEHICLE_NOHELMET = 501,// 电动/摩托车不戴头盔
... ...
src/reprocessing_module/snapshot_reprocessing.cpp
... ... @@ -32,91 +32,6 @@ static void box_expansion(video_object_info&amp; obj_info, float expand_ratio, int f
32 32 obj_info.bottom = min(obj_info.bottom + expansion_height, frame_height - 1);
33 33 }
34 34  
35   -/* 获取人车物目标快照图 */
36   -vector<multi_obj_data_t> snapshot_reprocessing::get_vehicle_snapshot(vector<DeviceMemory*> vec_devMem, vector<onelevel_det_result>& ol_det_result, int skip_frame)
37   -{
38   - // 过滤出车辆
39   - filter_vehicle(ol_det_result);
40   -
41   - map<string, algor_open_config_param> && algor_config_param = m_task_param_manager->get_task_algor_params();
42   - map<string, map<algo_type, task_param_manager::algo_param_type_t_*>> && algor_param = m_task_param_manager->get_task_other_params();
43   -
44   - vector<multi_obj_data_t> results;
45   - int idx = 0;
46   - for (auto memPtr : vec_devMem)
47   - {
48   - string task_id = memPtr->getId();
49   - map<string, std::vector<video_object_info>> taskid_to_obj;
50   - if (algor_config_param.count(task_id) && algor_config_param[task_id].vehicle_algors.count(algorithm_type_t::VEHICLE_SNAPSHOT))
51   - {
52   - task_param_manager::algo_param_type_t_* cur_task_params = algor_param[task_id][algorithm_type_t::VEHICLE_SNAPSHOT];
53   - if (!cur_task_params || !cur_task_params->basic_param || !cur_task_params->basic_param->adapt_param)
54   - {
55   - continue;
56   - }
57   -
58   - // 多边形区域
59   - auto adapt_param = cur_task_params->basic_param->adapt_param;
60   - if (adapt_param->points_count <= 0) {
61   - continue;
62   - }
63   -
64   - // 同一目标间隔多少帧保存
65   - int snap_frame_interval = ((algor_config_param_snapshot*)cur_task_params->algor_param)->snap_frame_interval;
66   -
67   - onelevel_det_result &cur_task_ol_detres = ol_det_result[idx];
68   -
69   - for (int c = 0; c < cur_task_ol_detres.obj_count; c++)
70   - {
71   - det_objinfo det_obj = cur_task_ol_detres.obj[c];
72   - if(snap_frame_interval > 0 && det_obj.num % snap_frame_interval >= skip_frame){
73   - continue;
74   - }
75   -
76   - sy_point center;
77   - center.x_ = (det_obj.left + det_obj.right) * 0.5;
78   - center.y_ = det_obj.bottom;
79   -
80   - int type_index = det_obj.index;
81   - if ((type_index == 4 || type_index == 5 || type_index == 6 || type_index ==7 || type_index ==8)
82   - && common::isInPolygon(adapt_param->points, adapt_param->points_count, center))
83   - {
84   - video_object_info obj_info;
85   - obj_info.top = det_obj.top;
86   - obj_info.left = det_obj.left;
87   - obj_info.right = det_obj.right;
88   - obj_info.bottom = det_obj.bottom;
89   - obj_info.confidence = det_obj.confidence;
90   - obj_info.index = type_index;
91   - obj_info.object_id = det_obj.id;
92   -
93   - int frame_height = memPtr->getHeight();
94   - int frame_width = memPtr->getWidth();
95   - box_expansion(obj_info, EXPANSION_PROPORTION, frame_width, frame_height);
96   -
97   - taskid_to_obj[task_id].emplace_back(std::move(obj_info));
98   - }
99   - }
100   -
101   - if (taskid_to_obj.size() > 0)
102   - {
103   - static long long gid_ = 0;
104   -
105   - multi_obj_data_t data;
106   - data.memPtr = memPtr;
107   - data.task_id = task_id;
108   - data.objs = std::move(taskid_to_obj[task_id]);
109   - data.id = gid_++;
110   - results.emplace_back(std::move(data));
111   - LOG_TRACE("{} {} snap_frame_interval:{}", task_id.c_str(), (int)algorithm_type_t::VEHICLE_SNAPSHOT, snap_frame_interval);
112   - }
113   - }
114   -
115   - idx++;
116   - }
117   - return results;
118   -}
119   -
120 35 void snapshot_reprocessing::screen_effective_snapshot(vector<onelevel_det_result> &_onelevel_det_result){
121 36 map<string, algor_open_config_param> algor_param = m_task_param_manager->get_task_algor_params();
122 37  
... ... @@ -775,6 +690,97 @@ int snapshot_reprocessing::update_face_bestsnapshot(vector&lt;DeviceMemory*&gt; vec_de
775 690 return 0;
776 691 }
777 692  
  693 +// algor_type: HUMAN_TIMING_SNAPSHOT、NONMOTOR_VEHICLE_TIMING_SNAPSHOT、VEHICLE_TIMING_SNAPSHOT
  694 +vector<multi_obj_data_t> snapshot_reprocessing::get_timing_snapshot(vector<DeviceMemory*> vec_devMem, vector<onelevel_det_result>& ol_det_result, algorithm_type_t algor_type)
  695 +{
  696 + map<string, algor_open_config_param> && algor_config_param = m_task_param_manager->get_task_algor_params();
  697 + map<string, map<algo_type, task_param_manager::algo_param_type_t_*>> && algor_param = m_task_param_manager->get_task_other_params();
  698 +
  699 + vector<multi_obj_data_t> results;
  700 + for (size_t idx = 0; idx < vec_devMem.size(); idx++)
  701 + {
  702 + DeviceMemory* memPtr = vec_devMem[idx];
  703 + std::string task_id = memPtr->getId();
  704 +
  705 + map<string, std::vector<video_object_info>> taskid_to_obj;
  706 + if (algor_config_param.count(task_id) && algor_config_param[task_id].vehicle_algors.count(algor_type))
  707 + {
  708 + task_param_manager::algo_param_type_t_* cur_task_params = algor_param[task_id][algor_type];
  709 + if (!cur_task_params || !cur_task_params->basic_param || !cur_task_params->basic_param->adapt_param)
  710 + {
  711 + continue;
  712 + }
  713 +
  714 + // 多边形区域
  715 + auto adapt_param = cur_task_params->basic_param->adapt_param;
  716 + if (adapt_param->points_count <= 0) {
  717 + continue;
  718 + }
  719 +
  720 + // 同一目标间隔多少帧保存
  721 + int snap_frame_interval = ((algor_config_param_snapshot*)cur_task_params->algor_param)->snap_frame_interval;
  722 +
  723 + onelevel_det_result &cur_task_ol_detres = ol_det_result[idx];
  724 +
  725 + for (int c = 0; c < cur_task_ol_detres.obj_count; c++)
  726 + {
  727 + det_objinfo det_obj = cur_task_ol_detres.obj[c];
  728 + if(snap_frame_interval > 0 && det_obj.num % snap_frame_interval){
  729 + continue;
  730 + }
  731 +
  732 + int type_index = det_obj.index;
  733 + if (algor_type == algorithm_type_t::HUMAN_TIMING_SNAPSHOT && algor_index_table["human"].find(type_index) == algor_index_table["human"].end()) {
  734 + continue;
  735 + } else if (algor_type == algorithm_type_t::NONMOTOR_VEHICLE_TIMING_SNAPSHOT && algor_index_table["nonmotor_vehicle"].find(type_index) == algor_index_table["nonmotor_vehicle"].end()) {
  736 + continue;
  737 + } else if (algor_type == algorithm_type_t::VEHICLE_TIMING_SNAPSHOT && algor_index_table["vehicle"].find(type_index) == algor_index_table["vehicle"].end()) {
  738 + continue;
  739 + } else {
  740 + continue;
  741 + }
  742 +
  743 + sy_point center;
  744 + center.x_ = (det_obj.left + det_obj.right) * 0.5;
  745 + center.y_ = det_obj.bottom;
  746 + if (!common::isInPolygon(adapt_param->points, adapt_param->points_count, center)) {
  747 + continue;
  748 + }
  749 +
  750 + video_object_info obj_info;
  751 + obj_info.top = det_obj.top;
  752 + obj_info.left = det_obj.left;
  753 + obj_info.right = det_obj.right;
  754 + obj_info.bottom = det_obj.bottom;
  755 + obj_info.confidence = det_obj.confidence;
  756 + obj_info.index = type_index;
  757 + obj_info.object_id = det_obj.id;
  758 +
  759 + int frame_height = memPtr->getHeight();
  760 + int frame_width = memPtr->getWidth();
  761 + box_expansion(obj_info, EXPANSION_PROPORTION, frame_width, frame_height);
  762 +
  763 + taskid_to_obj[task_id].emplace_back(std::move(obj_info));
  764 + }
  765 +
  766 + if (taskid_to_obj.size() > 0)
  767 + {
  768 + static long long gid_ = 0;
  769 +
  770 + multi_obj_data_t data;
  771 + data.memPtr = memPtr;
  772 + data.task_id = task_id;
  773 + data.objs = std::move(taskid_to_obj[task_id]);
  774 + data.id = gid_++;
  775 + results.emplace_back(std::move(data));
  776 + }
  777 + }
  778 + }
  779 +
  780 + return results;
  781 +}
  782 +
  783 +
778 784 map<OBJ_KEY, OBJ_VALUE> snapshot_reprocessing::get_total_snapshot_info(){
779 785 return total_snapshot_info;
780 786 }
... ...
src/reprocessing_module/snapshot_reprocessing.h
... ... @@ -68,7 +68,7 @@ public:
68 68 snapshot_reprocessing(int devId);
69 69 ~snapshot_reprocessing();
70 70  
71   - vector<multi_obj_data_t> get_vehicle_snapshot(vector<DeviceMemory*> vec_devMem, vector<onelevel_det_result>& ol_det_result, int skip_frame);
  71 + vector<multi_obj_data_t> get_timing_snapshot(vector<DeviceMemory*> vec_devMem, vector<onelevel_det_result>& ol_det_result, algorithm_type_t algor_type);
72 72  
73 73 void screen_effective_snapshot(vector<onelevel_det_result> &_onelevel_det_result);
74 74 void filter_vehicle(vector<onelevel_det_result> &_onelevel_det_result);
... ...
src/tsl_aiplatform_jni/AiEngineNativeInterface.cpp
... ... @@ -404,9 +404,9 @@ JNIEXPORT jint JNICALL Java_com_objecteye_nativeinterface_TSLAiEngineNativeInter
404 404 case algorithm_type_t::FACE_SNAPSHOT:
405 405 case algorithm_type_t::HUMAN_SNAPSHOT:
406 406 case algorithm_type_t::VEHICLE_SNAPSHOT:
407   - // case algorithm_type_t::HUMAN_TIMING_SNAPSHOT:
408   - // case algorithm_type_t::VEHICLE_TIMING_SNAPSHOT:
409   - // case algorithm_type_t::NONMOTOR_VEHICLE_TIMING_SNAPSHOT:
  407 + case algorithm_type_t::HUMAN_TIMING_SNAPSHOT:
  408 + case algorithm_type_t::VEHICLE_TIMING_SNAPSHOT:
  409 + case algorithm_type_t::NONMOTOR_VEHICLE_TIMING_SNAPSHOT:
410 410 case algorithm_type_t::NONMOTOR_VEHICLE_SNAPSHOT: {
411 411 jfieldID fid = env->GetFieldID(cls_AlgorConfigParam, "threshold", "F");
412 412 jfieldID fid_snap_frame_interval = env->GetFieldID(cls_AlgorConfigParam, "snap_frame_interval", "I");
... ...