// This file is part of OpenCV project. // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. #include #include #include "types.hpp" #include "vector_traits.hpp" #include "grid_stride_range.hpp" #include "execution.hpp" #include "../cuda4dnn/csl/stream.hpp" #include "../cuda4dnn/csl/tensor.hpp" #include "../cuda4dnn/csl/span.hpp" #include #include using namespace cv::dnn::cuda4dnn::csl; using namespace cv::dnn::cuda4dnn::csl::device; namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { namespace raw { template __global__ void biasN_vec(Span output, View input, size_type inner_size, View bias) { using vector_type = get_vector_type_t; auto output_vPtr = vector_type::get_pointer(output.data()); auto input_vPtr = vector_type::get_pointer(input.data()); inner_size /= vector_type::size(); for (auto i : grid_stride_range(output.size() / vector_type::size())) { const index_type bias_idx = (i / inner_size) % bias.size(); vector_type vec; v_load(vec, input_vPtr[i]); for(int j = 0; j < vec.size(); j++) vec.data[j] = vec.data[j] + bias[bias_idx]; v_store(output_vPtr[i], vec); } } template __global__ void scaleN_vec(Span output, View input, size_type inner_size, View weights) { using vector_type = get_vector_type_t; auto output_vPtr = vector_type::get_pointer(output.data()); auto input_vPtr = vector_type::get_pointer(input.data()); inner_size /= vector_type::size(); for (auto i : grid_stride_range(output.size() / vector_type::size())) { const index_type scale_idx = (i / inner_size) % weights.size(); vector_type vec; v_load(vec, input_vPtr[i]); for (int j = 0; j < vec.size(); j++) vec.data[j] = vec.data[j] * weights[scale_idx]; v_store(output_vPtr[i], vec); } } template __global__ void scale1_with_bias1_vec(Span output, View input, T alpha, T beta) { using vector_type = get_vector_type_t; auto output_vPtr = vector_type::get_pointer(output.data()); auto input_vPtr = vector_type::get_pointer(input.data()); for (auto i : grid_stride_range(output.size() / vector_type::size())) { vector_type vec; v_load(vec, input_vPtr[i]); for (int j = 0; j < vec.size(); j++) vec.data[j] = alpha * vec.data[j] + beta; v_store(output_vPtr[i], vec); } } template __global__ void scaleN_with_biasN_vec(Span output, View input, size_type inner_size, View weights, View bias) { using vector_type = get_vector_type_t; auto output_vPtr = vector_type::get_pointer(output.data()); auto input_vPtr = vector_type::get_pointer(input.data()); inner_size /= vector_type::size(); for (auto i : grid_stride_range(output.size() / vector_type::size())) { const index_type scale_idx = (i / inner_size) % weights.size(); vector_type vec; v_load(vec, input_vPtr[i]); for (int j = 0; j < vec.size(); j++) vec.data[j] = vec.data[j] * weights[scale_idx] + bias[scale_idx]; v_store(output_vPtr[i], vec); } } } template static void launch_biasN_vec_kernel(const Stream& stream, Span output, View input, std::size_t inner_size, View bias){ CV_Assert(is_fully_aligned(output, N)); CV_Assert(is_fully_aligned(input, N)); CV_Assert(inner_size % N == 0); auto kernel = raw::biasN_vec; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input, inner_size, bias); } template void biasN( const Stream& stream, TensorSpan output, TensorView input, std::size_t inner_size, TensorView bias) { CV_Assert(is_shape_same(input, output)); if (is_fully_aligned(output, 4) && is_fully_aligned(input, 4) && inner_size % 4 == 0) { launch_biasN_vec_kernel(stream, output, input, inner_size, bias); } else if (is_fully_aligned(output, 2) && is_fully_aligned(input, 2) && inner_size % 2 == 0) { launch_biasN_vec_kernel(stream, output, input, inner_size, bias); } else { launch_biasN_vec_kernel(stream, output, input, inner_size, bias); } } #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) template void biasN<__half>(const Stream&, TensorSpan<__half>, TensorView<__half>, std::size_t, TensorView<__half>); #endif template void biasN(const Stream&, TensorSpan, TensorView, std::size_t, TensorView); template static void launch_scaleN_vec_kernel(const Stream& stream, Span output, View input, std::size_t inner_size, View weights) { CV_Assert(is_fully_aligned(output, N)); CV_Assert(is_fully_aligned(input, N)); CV_Assert(inner_size % N == 0); auto kernel = raw::scaleN_vec; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input, inner_size, weights); } template void scaleN( const Stream& stream, TensorSpan output, TensorView input, std::size_t inner_size, TensorView weights) { CV_Assert(is_shape_same(input, output)); if (is_fully_aligned(output, 4) && is_fully_aligned(input, 4) && inner_size % 4 == 0) { launch_scaleN_vec_kernel(stream, output, input, inner_size, weights); } else if (is_fully_aligned(output, 2) && is_fully_aligned(input, 2) && inner_size % 2 == 0) { launch_scaleN_vec_kernel(stream, output, input, inner_size, weights); } else { launch_scaleN_vec_kernel(stream, output, input, inner_size, weights); } } #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) template void scaleN<__half>(const Stream&, TensorSpan<__half>, TensorView<__half>, std::size_t, TensorView<__half>); #endif template void scaleN(const Stream&, TensorSpan, TensorView, std::size_t, TensorView); template static void launch_scale1_with_bias1_vec_kernel(const Stream& stream, Span output, View input, T alpha, T beta) { CV_Assert(is_fully_aligned(output, N)); CV_Assert(is_fully_aligned(input, N)); auto kernel = raw::scale1_with_bias1_vec; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input, alpha, beta); } template void scale1_with_bias1(const Stream& stream, Span output, View input, T alpha, T beta) { CV_Assert(output.size() == input.size()); if (is_fully_aligned(output, 4) && is_fully_aligned(input, 4)) { launch_scale1_with_bias1_vec_kernel(stream, output, input, alpha, beta); } else if (is_fully_aligned(output, 2) && is_fully_aligned(input, 2)) { launch_scale1_with_bias1_vec_kernel(stream, output, input, alpha, beta); } else { launch_scale1_with_bias1_vec_kernel(stream, output, input, alpha, beta); } } #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) template void scale1_with_bias1<__half>(const Stream&, Span<__half>, View<__half>, __half, __half); #endif template void scale1_with_bias1(const Stream&, Span, View, float, float); template static void launch_scaleN_with_biasN_vec_kernel(const Stream& stream, Span output, View input, std::size_t inner_size, View weights, View bias) { CV_Assert(is_fully_aligned(output, N)); CV_Assert(is_fully_aligned(input, N)); CV_Assert(inner_size % N == 0); auto kernel = raw::scaleN_with_biasN_vec; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input, inner_size, weights, bias); } template void scaleN_with_biasN( const Stream& stream, TensorSpan output, TensorView input, std::size_t inner_size, TensorView weights, TensorView bias) { CV_Assert(is_shape_same(input, output)); CV_Assert(weights.size() == bias.size()); if (is_fully_aligned(output, 4) && is_fully_aligned(input, 4) && inner_size % 4 == 0) { launch_scaleN_with_biasN_vec_kernel(stream, output, input, inner_size, weights, bias); } else if (is_fully_aligned(output, 2) && is_fully_aligned(input, 2) && inner_size % 2 == 0) { launch_scaleN_with_biasN_vec_kernel(stream, output, input, inner_size, weights, bias); } else { launch_scaleN_with_biasN_vec_kernel(stream, output, input, inner_size, weights, bias); } } #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) template void scaleN_with_biasN<__half>(const Stream&, TensorSpan<__half>, TensorView<__half>, std::size_t, TensorView<__half>, TensorView<__half>); #endif template void scaleN_with_biasN(const Stream&, TensorSpan, TensorView, std::size_t, TensorView, TensorView); }}}} /* namespace cv::dnn::cuda4dnn::kernels */