Blame view

3rdparty/opencv-4.5.4/modules/imgproc/src/imgwarp.avx2.cpp 4.27 KB
f4334277   Hu Chunming   提交3rdparty
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
  /*M///////////////////////////////////////////////////////////////////////////////////////
  //
  //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
  //
  //  By downloading, copying, installing or using the software you agree to this license.
  //  If you do not agree to this license, do not download, install,
  //  copy or use the software.
  //
  //
  //                           License Agreement
  //                For Open Source Computer Vision Library
  //
  // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
  // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
  // Copyright (C) 2014-2015, Itseez Inc., all rights reserved.
  // Third party copyrights are property of their respective owners.
  //
  // Redistribution and use in source and binary forms, with or without modification,
  // are permitted provided that the following conditions are met:
  //
  //   * Redistribution's of source code must retain the above copyright notice,
  //     this list of conditions and the following disclaimer.
  //
  //   * Redistribution's in binary form must reproduce the above copyright notice,
  //     this list of conditions and the following disclaimer in the documentation
  //     and/or other materials provided with the distribution.
  //
  //   * The name of the copyright holders may not be used to endorse or promote products
  //     derived from this software without specific prior written permission.
  //
  // This software is provided by the copyright holders and contributors "as is" and
  // any express or implied warranties, including, but not limited to, the implied
  // warranties of merchantability and fitness for a particular purpose are disclaimed.
  // In no event shall the Intel Corporation or contributors be liable for any direct,
  // indirect, incidental, special, exemplary, or consequential damages
  // (including, but not limited to, procurement of substitute goods or services;
  // loss of use, data, or profits; or business interruption) however caused
  // and on any theory of liability, whether in contract, strict liability,
  // or tort (including negligence or otherwise) arising in any way out of
  // the use of this software, even if advised of the possibility of such damage.
  //
  //M*/
  
  /* ////////////////////////////////////////////////////////////////////
  //
  //  Geometrical transforms on images and matrices: rotation, zoom etc.
  //
  // */
  
  #include "precomp.hpp"
  #include "imgwarp.hpp"
  
  namespace cv
  {
  namespace opt_AVX2
  {
  
  int warpAffineBlockline(int *adelta, int *bdelta, short* xy, short* alpha, int X0, int Y0, int bw)
  {
      const int AB_BITS = MAX(10, (int)INTER_BITS);
      int x1 = 0;
      __m256i fxy_mask = _mm256_set1_epi32(INTER_TAB_SIZE - 1);
      __m256i XX = _mm256_set1_epi32(X0), YY = _mm256_set1_epi32(Y0);
      for (; x1 <= bw - 16; x1 += 16)
      {
          __m256i tx0, tx1, ty0, ty1;
          tx0 = _mm256_add_epi32(_mm256_loadu_si256((const __m256i*)(adelta + x1)), XX);
          ty0 = _mm256_add_epi32(_mm256_loadu_si256((const __m256i*)(bdelta + x1)), YY);
          tx1 = _mm256_add_epi32(_mm256_loadu_si256((const __m256i*)(adelta + x1 + 8)), XX);
          ty1 = _mm256_add_epi32(_mm256_loadu_si256((const __m256i*)(bdelta + x1 + 8)), YY);
  
          tx0 = _mm256_srai_epi32(tx0, AB_BITS - INTER_BITS);
          ty0 = _mm256_srai_epi32(ty0, AB_BITS - INTER_BITS);
          tx1 = _mm256_srai_epi32(tx1, AB_BITS - INTER_BITS);
          ty1 = _mm256_srai_epi32(ty1, AB_BITS - INTER_BITS);
  
          __m256i fx_ = _mm256_packs_epi32(_mm256_and_si256(tx0, fxy_mask),
              _mm256_and_si256(tx1, fxy_mask));
          __m256i fy_ = _mm256_packs_epi32(_mm256_and_si256(ty0, fxy_mask),
              _mm256_and_si256(ty1, fxy_mask));
          tx0 = _mm256_packs_epi32(_mm256_srai_epi32(tx0, INTER_BITS),
              _mm256_srai_epi32(tx1, INTER_BITS));
          ty0 = _mm256_packs_epi32(_mm256_srai_epi32(ty0, INTER_BITS),
              _mm256_srai_epi32(ty1, INTER_BITS));
          fx_ = _mm256_adds_epi16(fx_, _mm256_slli_epi16(fy_, INTER_BITS));
          fx_ = _mm256_permute4x64_epi64(fx_, (3 << 6) + (1 << 4) + (2 << 2) + 0);
  
          _mm256_storeu_si256((__m256i*)(xy + x1 * 2), _mm256_unpacklo_epi16(tx0, ty0));
          _mm256_storeu_si256((__m256i*)(xy + x1 * 2 + 16), _mm256_unpackhi_epi16(tx0, ty0));
          _mm256_storeu_si256((__m256i*)(alpha + x1), fx_);
      }
      _mm256_zeroupper();
      return x1;
  }
  
  }
  }
  /* End of file. */