diff --git a/test/main.cpp b/test/main.cpp index 8bfbf07..9e3ddb8 100644 --- a/test/main.cpp +++ b/test/main.cpp @@ -76,6 +76,7 @@ algorithm_type_t algor_index_to_algor_type(const int &idx) { } } +#ifdef POST_USE_RABBITMQ void init_mq_conn(void *handle) { for (auto key : {mq_type_t::ALARM_MQ, mq_type_t::GET_TASK_MQ, mq_type_t::HEART_BEAT_MQ}) { rabbitmq_conn_params_t mq_conn_params; @@ -111,6 +112,7 @@ void init_mq_conn(void *handle) { fprintf(stderr, "ip is %s port is %d\n", mq_conn_params.ip, mq_conn_params.port); } } +#endif const char* ipc_url = "/home/cmhu/tongtu/tsl_aiplatform_project_with_screenshot/data/duan1.avi"; diff --git a/tsl_aiplatform/ai_platform/GpuRgbMemory.hpp b/tsl_aiplatform/ai_platform/GpuRgbMemory.hpp index 500e4ec..a474949 100644 --- a/tsl_aiplatform/ai_platform/GpuRgbMemory.hpp +++ b/tsl_aiplatform/ai_platform/GpuRgbMemory.hpp @@ -18,7 +18,7 @@ public: gpuid = _gpuid; timestamp = helpers::timer::get_timestamp(); - // cudaSetDevice(atoi(gpuid.c_str())); + cudaSetDevice(atoi(gpuid.c_str())); CHECK(cudaMalloc((void **)&pHwRgb, size * sizeof(unsigned char))); } diff --git a/tsl_aiplatform/ai_platform/MultiSourceProcess.cpp b/tsl_aiplatform/ai_platform/MultiSourceProcess.cpp index 127c98c..9eca760 100644 --- a/tsl_aiplatform/ai_platform/MultiSourceProcess.cpp +++ b/tsl_aiplatform/ai_platform/MultiSourceProcess.cpp @@ -9,6 +9,8 @@ #include "authority.h" +#include "NvJpegEncoder.hpp" + #ifdef AUTHORIZATION #include #ifdef _MSC_VER @@ -261,6 +263,9 @@ int CMultiSourceProcess::InitAlgorthim(tsl_aiplatform_param vptParam) { #endif // #ifdef __linux__ #endif // #ifdef AUTHORIZATION + // 启动算法处理线程 + startProcessByGpuid(); + return ret; } @@ -329,8 +334,6 @@ bool CMultiSourceProcess::AddTask(task_param _cur_task_param){ // 所有参数都准备好之后再启动解码 pDecManager->startDecodeByName(config.name); cout << "started task: " << config.name << endl; - // 启动算法处理线程 - startProcessByGpuid(); } // 启动算法处理线程 @@ -352,8 +355,14 @@ void CMultiSourceProcess::post_decode_thread(decode_cbk_userdata* userPtr, AVFra FFNvDecoder* dec = (FFNvDecoder*)ptr->opaque1; if (gpuFrame->format == AV_PIX_FMT_CUDA && dec != nullptr){ - cout << "decode task: " << dec->getName() << " gpuid: " << gpu_id_ << endl; + // cout << "decode task: " << dec->getName() << " gpuid: " << gpu_id_ << endl; GpuRgbMemory* gpuMem = new GpuRgbMemory(3, gpuFrame->width, gpuFrame->height, dec->getName(), gpu_id_ , true); + + if (gpuMem->getMem() == nullptr){ + cout << "new GpuRgbMemory failed !!!" << endl; + return; + } + // cudaSetDevice(atoi(gpu_id_.c_str())); // cuda_common::setColorSpace( ITU_709, 0 ); @@ -431,7 +440,11 @@ void CMultiSourceProcess::cuda_free_wrap(sy_img &img) { // 算法处理函数,由算法线程调用 void CMultiSourceProcess::algorthim_process_thread(){ + cudaSetDevice(atoi(gpu_id_.c_str())); + map task_id_to_n_frame; + + int sum = 0; while(true){ /* step1. 授权check */ if (licence_status_ <= -3) { @@ -470,6 +483,10 @@ void CMultiSourceProcess::algorthim_process_thread(){ batch_img[i].set_data(gpuMem->getWidth(), gpuMem->getHeight(), gpuMem->getChannel(), gpuMem->getMem()); task_list.push_back(gpuMem->getId()); ++task_id_to_n_frame[gpuMem->getId()]; + + // string path = "/home/cmhu/data2/test/" + gpuMem->getId() + "_" + to_string(sum) + ".jpg"; + // saveJpeg(path.c_str(), gpuMem->getMem(), gpuMem->getWidth(), gpuMem->getHeight(), nullptr); // 验证 CUDAToRGB + // sum ++; } m_QueueMtx.unlock(); @@ -511,6 +528,8 @@ void CMultiSourceProcess::algorthim_process_thread(){ LOG_DEBUG("{}", msg); } #endif + + cout << "algorthim_process_thread end. " << endl; } // VPT 检测 @@ -669,7 +688,6 @@ void CMultiSourceProcess::algorithm_snapshot(vector& vpt_interest_task_i /* 轨迹结束帧需要做的算法模块 */ int CMultiSourceProcess::endframe_obj_process(const OBJ_KEY &obj_key, algorithm_type_t algor_type) { auto task_param_ptr = m_task_param_manager->get_task_algor_param(obj_key.video_id); - auto task_other_param_ptr = m_task_param_manager->get_task_other_param(obj_key.video_id); // 该路任务开启了抓拍功能 开始抓拍保存;若未开启抓拍,清空显存资源 if ((task_param_ptr->human_algors.find(algorithm_type_t::HUMAN_SNAPSHOT) != task_param_ptr->human_algors.end() || @@ -884,6 +902,7 @@ int CMultiSourceProcess::endframe_obj_process(const OBJ_KEY &obj_key, algorithm_ } #endif + auto task_other_param_ptr = m_task_param_manager->get_task_other_param(obj_key.video_id); /* 开启人脸抓拍分析算法模块,获取该目标的算法分析结果 返回结果+快照 最后释放资源 */ if (task_other_param_ptr->find(algorithm_type_t::FACE_SNAPSHOT) != task_other_param_ptr->end() && algor_type == algorithm_type_t::FACE_SNAPSHOT) { @@ -1095,8 +1114,10 @@ void CMultiSourceProcess::algorthim_face_detect(vector& task_list, sy_im std::vector facedet_result(image_size); std::vector> face_deleteObjectID(image_size); +#ifdef WITH_FACE_DET_SS m_face_det_ai_engine.ai_engine_process_batch2(face_det_interest_task_id, face_det_interest_imgs.data(), face_det_interest_imgs.size(), facedet_result, face_deleteObjectID); +#endif #if 0 // accum @@ -1129,6 +1150,7 @@ void CMultiSourceProcess::algorthim_face_detect(vector& task_list, sy_im #endif } +#ifdef POST_USE_RABBITMQ /* MQ队列的初始化 */ int CMultiSourceProcess::AddMqConn(mq_type_t mq_type, rabbitmq_conn_params_t mq_conn_param) { /* 初始化MQ队列 */ @@ -1170,6 +1192,7 @@ int CMultiSourceProcess::GetTaskStatus(const string taskID) { return SUCCESS; } +#endif bool CMultiSourceProcess::PauseTask(const string taskID) { FFNvDecoderManager* pDecManager = FFNvDecoderManager::getInstance(); @@ -1182,15 +1205,12 @@ bool CMultiSourceProcess::RestartTask(const string taskID){ } bool CMultiSourceProcess::finish_task(const string taskID, const bool delete_snapshot){ - FFNvDecoderManager* pDecManager = FFNvDecoderManager::getInstance(); - bool ret = pDecManager->closeDecoderByName(taskID); - #ifdef POST_USE_RABBITMQ auto json_str = helpers::gen_json::gen_office_task_heart_beat_json({taskID}); mq_manager_->publish(mq_type_t::HEART_BEAT_MQ, json_str.c_str(), true); #endif - #ifdef WITH_FACE_DET_SS +#ifdef WITH_FACE_DET_SS // 人脸任务结束 auto task_param_ptr = m_task_param_manager->get_task_algor_param(taskID); if (task_param_ptr->human_face_algors.find(algorithm_type_t::FACE_SNAPSHOT) != @@ -1201,7 +1221,14 @@ bool CMultiSourceProcess::finish_task(const string taskID, const bool delete_sna m_task_param_manager->delete_task_param(taskID); - if (delete_snapshot) { + return true; +} + +bool CMultiSourceProcess::FinishTask(const string taskID){ + FFNvDecoderManager* pDecManager = FFNvDecoderManager::getInstance(); + bool ret = pDecManager->closeDecoderByName(taskID); + + if (ret){ m_snapshot_reprocessing->delete_finishtask_snapshot(taskID); ((save_snapshot_reprocessing *)m_save_snapshot_reprocessing)->delete_finishtask(taskID); } @@ -1209,10 +1236,6 @@ bool CMultiSourceProcess::finish_task(const string taskID, const bool delete_sna return ret; } -bool CMultiSourceProcess::FinishTask(const string taskID){ - return finish_task(taskID,true); -} - void CMultiSourceProcess::CloseAllTask(){ m_bfinish = true; diff --git a/tsl_aiplatform/ai_platform/NvJpegEncoder.hpp b/tsl_aiplatform/ai_platform/NvJpegEncoder.hpp new file mode 100644 index 0000000..6f10135 --- /dev/null +++ b/tsl_aiplatform/ai_platform/NvJpegEncoder.hpp @@ -0,0 +1,94 @@ +/* +*主要用于测试显卡上的数据是否正常 +*/ + +#include + +#include +#include +#include + + +#define CHECK_NVJPEG(S) do {nvjpegStatus_t status; \ + status = S; \ + if (status != NVJPEG_STATUS_SUCCESS ) std::cout << __LINE__ <<" CHECK_NVJPEG - status = " << status << std::endl; \ + } while (false) + + +int saveJpeg(const char * filepath, unsigned char* d_srcBGR, int width, int height, cudaStream_t stream) +{ + nvjpegHandle_t nvjpeg_handle; + nvjpegEncoderState_t encoder_state; + nvjpegEncoderParams_t encoder_params; + + cudaEvent_t ev_start, ev_end; + cudaEventCreate(&ev_start); + cudaEventCreate(&ev_end); + + nvjpegImage_t input; + nvjpegInputFormat_t input_format = NVJPEG_INPUT_BGRI; + int image_width = width; + int image_height = height; + + // int channel_size = image_width * image_height; + // for (int i = 0; i < 3; i++) + // { + // input.pitch[i] = image_width; + // (cudaMalloc((void**)&(input.channel[i]), channel_size)); + // (cudaMemset(input.channel[i], 50 * 40 * i, channel_size)); + // } + + input.channel[0] = d_srcBGR; + input.pitch[0] = image_width * 3; + + nvjpegBackend_t backend = NVJPEG_BACKEND_DEFAULT; + + CHECK_NVJPEG(nvjpegCreate(backend, nullptr, &nvjpeg_handle)); + + CHECK_NVJPEG(nvjpegEncoderParamsCreate(nvjpeg_handle, &encoder_params, stream)); + CHECK_NVJPEG(nvjpegEncoderStateCreate(nvjpeg_handle, &encoder_state, stream)); + + // set params + CHECK_NVJPEG(nvjpegEncoderParamsSetEncoding(encoder_params, nvjpegJpegEncoding_t::NVJPEG_ENCODING_PROGRESSIVE_DCT_HUFFMAN, stream)); + CHECK_NVJPEG(nvjpegEncoderParamsSetOptimizedHuffman(encoder_params, 1, stream)); + CHECK_NVJPEG(nvjpegEncoderParamsSetQuality(encoder_params, 70, stream)); + CHECK_NVJPEG(nvjpegEncoderParamsSetSamplingFactors(encoder_params, nvjpegChromaSubsampling_t::NVJPEG_CSS_420, stream)); + + cudaEventRecord(ev_start); + CHECK_NVJPEG(nvjpegEncodeImage(nvjpeg_handle, encoder_state, encoder_params, &input, input_format, image_width, image_height, stream)); + cudaEventRecord(ev_end); + + std::vector obuffer; + size_t length; + CHECK_NVJPEG(nvjpegEncodeRetrieveBitstream( + nvjpeg_handle, + encoder_state, + NULL, + &length, + stream)); + + obuffer.resize(length); + CHECK_NVJPEG(nvjpegEncodeRetrieveBitstream( + nvjpeg_handle, + encoder_state, + obuffer.data(), + &length, + stream)); + + cudaEventSynchronize(ev_end); + + // 用完销毁,避免显存泄露 + nvjpegEncoderParamsDestroy(encoder_params); + nvjpegEncoderStateDestroy(encoder_state); + nvjpegDestroy(nvjpeg_handle); + + float ms; + cudaEventElapsedTime(&ms, ev_start, ev_end); + // std::cout << "time spend " << ms << " ms" << std::endl; + + std::ofstream outputFile(filepath, std::ios::out | std::ios::binary); + outputFile.write(reinterpret_cast(obuffer.data()), static_cast(length)); + outputFile.close(); + + return 0; +} \ No newline at end of file diff --git a/tsl_aiplatform/ai_platform/header.h b/tsl_aiplatform/ai_platform/header.h index cb11a9d..e069551 100644 --- a/tsl_aiplatform/ai_platform/header.h +++ b/tsl_aiplatform/ai_platform/header.h @@ -445,7 +445,7 @@ enum ai_log_level { #endif -#define POST_USE_RABBITMQ +// #define POST_USE_RABBITMQ #ifdef POST_USE_RABBITMQ /** diff --git a/tsl_aiplatform/ai_platform/stl_aiplatform.cpp b/tsl_aiplatform/ai_platform/stl_aiplatform.cpp index c4e5167..293fe29 100644 --- a/tsl_aiplatform/ai_platform/stl_aiplatform.cpp +++ b/tsl_aiplatform/ai_platform/stl_aiplatform.cpp @@ -24,7 +24,6 @@ int add_mq_conn(void *handle, mq_type_t tstatus, rabbitmq_conn_params_t mq_conn_ int res = tools->AddMqConn(tstatus, mq_conn_param); return res; } -#endif int get_task_status(void *handle, char *task_id) @@ -33,6 +32,7 @@ int get_task_status(void *handle, char *task_id) int res = tools->GetTaskStatus(task_id); return res; } +#endif int add_task(void *handle, task_param param) diff --git a/tsl_aiplatform/ai_platform/stl_aiplatform.h b/tsl_aiplatform/ai_platform/stl_aiplatform.h index 6f13208..a34e9bb 100644 --- a/tsl_aiplatform/ai_platform/stl_aiplatform.h +++ b/tsl_aiplatform/ai_platform/stl_aiplatform.h @@ -42,11 +42,13 @@ extern "C" * @return TSL_AIPLATFORM_API */ TSL_AIPLATFORM_API int add_mq_conn(void *handle, mq_type_t tstatus, rabbitmq_conn_params_t mq_conn_param); -#endif + TSL_AIPLATFORM_API int get_task_status(void *handle, char *task_id); +#endif + /************************************************************************* * FUNCTION: add_task * PURPOSE: 添加任务 diff --git a/tsl_aiplatform/helpers/helpers.h b/tsl_aiplatform/helpers/helpers.h index 5eb1e96..81e6ec9 100644 --- a/tsl_aiplatform/helpers/helpers.h +++ b/tsl_aiplatform/helpers/helpers.h @@ -16,5 +16,6 @@ #include "logger.hpp" #include "str_helper.hpp" #include "os_helper.hpp" +#include "timer.hpp"