#pragma once #include #include #include #include #include "NvInfer.h" #include "NvOnnxParser.h" #include "AlgorithmResult.h" #include "buffers.h" struct InferDeleter { template void operator()(T* obj) const { delete obj; } }; template using SampleUniquePtr = std::unique_ptr; class DogPoseDetectorOnnx { public: DogPoseDetectorOnnx(); ~DogPoseDetectorOnnx(); bool init(); std::vector detect(unsigned char *pGpuBgb, int src_width, int src_height); private: bool m_bUseFP16{true}; int32_t m_dlaCore{ -1 }; //!< Specify the DLA core to run network on. std::shared_ptr mRuntime; //!< The TensorRT runtime used to deserialize the engine std::shared_ptr mEngine; //!< The TensorRT engine used to run the network void** m_data_buffer; int m_input_node_index; nvinfer1::Dims m_input_node_dim; int m_output_node_index; nvinfer1::Dims m_output_node_dim; SampleUniquePtr context; };