// This file is part of OpenCV project. // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. #include #include #include "functors.hpp" #include "types.hpp" #include "vector_traits.hpp" #include "grid_stride_range.hpp" #include "execution.hpp" #include "../cuda4dnn/csl/stream.hpp" #include "../cuda4dnn/csl/span.hpp" using namespace cv::dnn::cuda4dnn::csl; using namespace cv::dnn::cuda4dnn::csl::device; namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { namespace raw { template __global__ void eltwise_op_generic_op_vec(Span output, View x, View y, const typename EltwiseOp::Params eltwise_params, const typename ActivationOp::Params act_params) { using vector_type = get_vector_type_t; auto output_vPtr = vector_type::get_pointer(output.data()); auto x_vPtr = vector_type::get_pointer(x.data()); auto y_vPtr = vector_type::get_pointer(y.data()); EltwiseOp eltwise_op(eltwise_params); ActivationOp activation_op(act_params); for (auto i : grid_stride_range(output.size() / vector_type::size())) { vector_type vec_x, vec_y; v_load(vec_x, x_vPtr[i]); v_load(vec_y, y_vPtr[i]); for(int j = 0; j < vec_x.size(); j++) vec_x.data[j] = activation_op(eltwise_op(vec_x.data[j], vec_y.data[j])); v_store(output_vPtr[i], vec_x); } } } template static void launch_vectorized_eltwise_op_generic_op(const Stream& stream, Span output, View x, View y, const typename EltwiseOp::Params& eltwise_params, const typename ActivationOp::Params& act_params) { CV_Assert(is_fully_aligned(output, N)); CV_Assert(is_fully_aligned(x, N)); CV_Assert(is_fully_aligned(y, N)); auto kernel = raw::eltwise_op_generic_op_vec; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, x, y, eltwise_params, act_params); } template static void eltwise_op_generic_op(const Stream& stream, Span output, View x, View y, const typename EltwiseOp::Params& eltwise_params = {}, const typename ActivationOp::Params& act_params = {}) { CV_Assert(output.size() == x.size()); CV_Assert(output.size() == y.size()); if (is_fully_aligned(output, 4) && is_fully_aligned(x, 4) && is_fully_aligned(y, 4)) { launch_vectorized_eltwise_op_generic_op(stream, output, x, y, eltwise_params, act_params); } else if (is_fully_aligned(output, 2) && is_fully_aligned(x, 2) && is_fully_aligned(y, 4)) { launch_vectorized_eltwise_op_generic_op(stream, output, x, y, eltwise_params, act_params); } else { launch_vectorized_eltwise_op_generic_op(stream, output, x, y, eltwise_params, act_params); } } template void eltwise_sum_2_relu(const Stream& stream, Span output, View x, View y, T slope) { eltwise_op_generic_op, ReLUFunctor>(stream, output, x, y, {}, {slope}); } template void eltwise_sum_2_clipped_relu(const Stream& stream, Span output, View x, View y, T floor, T ceiling) { CV_Assert(static_cast(floor) <= static_cast(ceiling)); eltwise_op_generic_op, ClippedReLUFunctor>(stream, output, x, y, {}, {floor, ceiling}); } template void eltwise_sum_2_tanh(const Stream& stream, Span output, View x, View y) { eltwise_op_generic_op, TanHFunctor>(stream, output, x, y); } template void eltwise_sum_2_swish(const Stream& stream, Span output, View x, View y) { eltwise_op_generic_op, SwishFunctor>(stream, output, x, y); } template void eltwise_sum_2_mish(const Stream& stream, Span output, View x, View y) { eltwise_op_generic_op, MishFunctor>(stream, output, x, y); } template void eltwise_sum_2_sigmoid(const Stream& stream, Span output, View x, View y) { eltwise_op_generic_op, SigmoidFunctor>(stream, output, x, y); } template void eltwise_sum_2_power(const Stream& stream, Span output, View x, View y, T exp, T scale, T shift) { eltwise_op_generic_op, PowerFunctor>(stream, output, x, y, {}, {exp, scale, shift}); } #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) template void eltwise_sum_2_relu<__half>(const Stream&, Span<__half>, View<__half>, View<__half>, __half); template void eltwise_sum_2_clipped_relu<__half>(const Stream&, Span<__half>, View<__half>, View<__half>, __half, __half); template void eltwise_sum_2_tanh<__half>(const Stream&, Span<__half>, View<__half>, View<__half>); template void eltwise_sum_2_swish<__half>(const Stream&, Span<__half>, View<__half>, View<__half>); template void eltwise_sum_2_mish<__half>(const Stream&, Span<__half>, View<__half>, View<__half>); template void eltwise_sum_2_sigmoid<__half>(const Stream&, Span<__half>, View<__half>, View<__half>); template void eltwise_sum_2_power<__half>(const Stream&, Span<__half>, View<__half>, View<__half>, __half, __half, __half); #endif template void eltwise_sum_2_relu(const Stream&, Span, View, View, float); template void eltwise_sum_2_clipped_relu(const Stream&, Span, View, View, float, float); template void eltwise_sum_2_tanh(const Stream&, Span, View, View); template void eltwise_sum_2_swish(const Stream&, Span, View, View); template void eltwise_sum_2_mish(const Stream&, Span, View, View); template void eltwise_sum_2_sigmoid(const Stream&, Span, View, View); template void eltwise_sum_2_power(const Stream&, Span, View, View, float, float, float); }}}} /* namespace cv::dnn::cuda4dnn::kernels */