// This file is part of OpenCV project. // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. #include #include #include "functors.hpp" #include "types.hpp" #include "vector_traits.hpp" #include "grid_stride_range.hpp" #include "execution.hpp" #include "../cuda4dnn/csl/stream.hpp" #include "../cuda4dnn/csl/span.hpp" #include "../cuda4dnn/kernels/scale_shift.hpp" #include #include using namespace cv::dnn::cuda4dnn::csl; using namespace cv::dnn::cuda4dnn::csl::device; namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { namespace raw { template __global__ void generic_op_vec(Span output, View input, const typename ActivationOp::Params params) { using vector_type = get_vector_type_t; auto output_vPtr = vector_type::get_pointer(output.data()); auto input_vPtr = vector_type::get_pointer(input.data()); ActivationOp activation_op(params); for (auto i : grid_stride_range(output.size() / vector_type::size())) { vector_type vec; v_load(vec, input_vPtr[i]); for (int j = 0; j < vector_type::size(); j++) vec.data[j] = activation_op(vec.data[j]); v_store(output_vPtr[i], vec); } } template __global__ void axiswise_relu_vec(Span output, View input, size_type inner_size, View slope) { using vector_type = get_vector_type_t; auto output_vPtr = vector_type::get_pointer(output.data()); auto input_vPtr = vector_type::get_pointer(input.data()); for (auto i : grid_stride_range(output.size() / vector_type::size())) { const index_type c = (i / inner_size) % slope.size(); vector_type vec; v_load(vec, input_vPtr[i]); for (int j = 0; j < vector_type::size(); j++) vec.data[j] = vec.data[j] > T(0) ? vec.data[j] : vec.data[j] * slope[c]; v_store(output_vPtr[i], vec); } } } /* namespace raw */ template static void launch_vectorized_generic_op(const Stream& stream, Span output, View input, const typename ActivationOp::Params& params) { CV_Assert(is_fully_aligned(output, N)); CV_Assert(is_fully_aligned(input, N)); auto kernel = raw::generic_op_vec; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input, params); } template static void generic_op(const Stream& stream, Span output, View input, const typename ActivationOp::Params& params = {}) { CV_Assert(input.size() == output.size()); if (is_fully_aligned(output, 4) && is_fully_aligned(input, 4)) { launch_vectorized_generic_op(stream, output, input, params); } else if (is_fully_aligned(output, 2) && is_fully_aligned(input, 2)) { launch_vectorized_generic_op(stream, output, input, params); } else { launch_vectorized_generic_op(stream, output, input, params); } } template void relu(const Stream& stream, Span output, View input, T slope) { generic_op>(stream, output, input, {slope}); } template void clipped_relu(const Stream& stream, Span output, View input, T floor, T ceiling) { CV_Assert(static_cast(floor) <= static_cast(ceiling)); generic_op>(stream, output, input, {floor, ceiling}); } template void tanh(const Stream& stream, Span output, View input) { generic_op>(stream, output, input); } template void swish(const Stream& stream, Span output, View input) { generic_op>(stream, output, input); } template void mish(const Stream& stream, Span output, View input) { generic_op>(stream, output, input); } template void sigmoid(const Stream& stream, Span output, View input) { generic_op>(stream, output, input); } template void elu(const Stream& stream, Span output, View input) { generic_op>(stream, output, input); } template void bnll(const Stream& stream, Span output, View input) { generic_op>(stream, output, input); } template void abs(const Stream& stream, Span output, View input) { generic_op>(stream, output, input); } template void power(const Stream& stream, Span output, View input, T exp, T scale, T shift) { CV_Assert(input.size() == output.size()); if (static_cast(exp) == 1.0f) { scale1_with_bias1(stream, output, input, scale, shift); return; } generic_op>(stream, output, input, {exp, scale, shift}); } template void exp(const Stream& stream, Span output, View input, T normScale, T normShift) { generic_op>(stream, output, input, {normScale, normShift}); } #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) template void relu<__half>(const Stream&, Span<__half>, View<__half>, __half); template void clipped_relu<__half>(const Stream&, Span<__half>, View<__half>, __half, __half); template void tanh<__half>(const Stream&, Span<__half>, View<__half>); template void swish<__half>(const Stream&, Span<__half>, View<__half>); template void mish<__half>(const Stream&, Span<__half>, View<__half>); template void sigmoid<__half>(const Stream&, Span<__half>, View<__half>); template void elu<__half>(const Stream&, Span<__half>, View<__half>); template void abs<__half>(const Stream& stream, Span<__half> output, View<__half> input); template void bnll<__half>(const Stream&, Span<__half>, View<__half>); template void power<__half>(const Stream&, Span<__half>, View<__half>, __half, __half, __half); template void exp<__half>(const Stream&, Span<__half>, View<__half>, __half, __half); #endif template void relu(const Stream&, Span, View, float); template void clipped_relu(const Stream&, Span, View, float, float); template void tanh(const Stream&, Span, View); template void swish(const Stream&, Span, View); template void mish(const Stream&, Span, View); template void sigmoid(const Stream&, Span, View); template void elu(const Stream&, Span, View); template void abs(const Stream& stream, Span output, View input); template void bnll(const Stream&, Span, View); template void power(const Stream&, Span, View, float, float, float); template void exp(const Stream&, Span, View, float, float); template static void launch_vectorized_axiswise_relu(const Stream& stream, Span output, View input, std::size_t inner_size, View slope) { CV_Assert(is_fully_aligned(output, N)); CV_Assert(is_fully_aligned(input, N)); CV_Assert(inner_size % N == 0); auto kernel = raw::axiswise_relu_vec; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input, inner_size / N, slope); } template void axiswise_relu(const Stream& stream, Span output, View input, std::size_t inner_size, View slope) { CV_Assert(input.size() == output.size()); if (is_fully_aligned(output, 4) && is_fully_aligned(input, 4) && inner_size % 4 == 0) { launch_vectorized_axiswise_relu(stream, output, input, inner_size, slope); } else if (is_fully_aligned(output, 2) && is_fully_aligned(input, 2) && inner_size % 2 == 0) { launch_vectorized_axiswise_relu(stream, output, input, inner_size, slope); } else { launch_vectorized_axiswise_relu(stream, output, input, inner_size, slope); } } #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) template void axiswise_relu<__half>(const Stream&, Span<__half>, View<__half>, std::size_t, View<__half>); #endif template void axiswise_relu(const Stream&, Span, View, std::size_t, View); }}}} /* namespace cv::dnn::cuda4dnn::kernels */