dct-64.asm 11.3 KB
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424
;*****************************************************************************
;* dct-64.asm: x86_64 transform and zigzag
;*****************************************************************************
;* Copyright (C) 2003-2024 x264 project
;*
;* Authors: Loren Merritt <lorenm@u.washington.edu>
;*          Holger Lubitz <holger@lubitz.org>
;*          Laurent Aimar <fenrir@via.ecp.fr>
;*          Min Chen <chenm001.163.com>
;*
;* This program is free software; you can redistribute it and/or modify
;* it under the terms of the GNU General Public License as published by
;* the Free Software Foundation; either version 2 of the License, or
;* (at your option) any later version.
;*
;* This program is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
;* GNU General Public License for more details.
;*
;* You should have received a copy of the GNU General Public License
;* along with this program; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
;*
;* This program is also available under a commercial proprietary license.
;* For more information, contact us at licensing@x264.com.
;*****************************************************************************

%include "x86inc.asm"
%include "x86util.asm"

SECTION .text

cextern pd_32
cextern pw_pixel_max
cextern pw_2
cextern pw_m2
cextern pw_32
cextern hsub_mul

; in: size, m0..m7, temp, temp
; out: m0..m7
%macro DCT8_1D 11
    SUMSUB_BA %1, %6, %5, %11 ; %6=s34, %5=d34
    SUMSUB_BA %1, %7, %4, %11 ; %7=s25, %4=d25
    SUMSUB_BA %1, %8, %3, %11 ; %8=s16, %3=d16
    SUMSUB_BA %1, %9, %2, %11 ; %9=s07, %2=d07

    SUMSUB_BA %1, %7, %8, %11 ; %7=a1, %8=a3
    SUMSUB_BA %1, %6, %9, %11 ; %6=a0, %9=a2

    psra%1   m%10, m%2, 1
    padd%1   m%10, m%2
    padd%1   m%10, m%3
    padd%1   m%10, m%4 ; %10=a4

    psra%1   m%11, m%5, 1
    padd%1   m%11, m%5
    padd%1   m%11, m%3
    psub%1   m%11, m%4 ; %11=a7

    SUMSUB_BA %1, %5, %2
    psub%1   m%2, m%4
    psub%1   m%5, m%3
    psra%1   m%4, 1
    psra%1   m%3, 1
    psub%1   m%2, m%4 ; %2=a5
    psub%1   m%5, m%3 ; %5=a6

    psra%1   m%3, m%11, 2
    padd%1   m%3, m%10 ; %3=b1
    psra%1   m%10, 2
    psub%1   m%10, m%11 ; %10=b7

    SUMSUB_BA %1, %7, %6, %11 ; %7=b0, %6=b4

    psra%1   m%4, m%8, 1
    padd%1   m%4, m%9 ; %4=b2
    psra%1   m%9, 1
    psub%1   m%9, m%8 ; %9=b6

    psra%1   m%8, m%5, 2
    padd%1   m%8, m%2 ; %8=b3
    psra%1   m%2, 2
    psub%1   m%5, m%2 ; %5=b5

    SWAP %2, %7, %5, %8, %9, %10
%endmacro

%macro IDCT8_1D 11
    SUMSUB_BA %1, %6, %2, %10 ; %5=a0, %1=a2

    psra%1   m%10, m%3, 1
    padd%1   m%10, m%3
    padd%1   m%10, m%5
    padd%1   m%10, m%7  ; %9=a7

    psra%1   m%11, m%4, 1
    psub%1   m%11, m%8 ; %10=a4
    psra%1   m%8, 1
    padd%1   m%8, m%4  ; %7=a6

    psra%1   m%4, m%7, 1
    padd%1   m%4, m%7
    padd%1   m%4, m%9
    psub%1   m%4, m%3  ; %3=a5

    psub%1   m%3, m%5
    psub%1   m%7, m%5
    padd%1   m%3, m%9
    psub%1   m%7, m%9
    psra%1   m%5, 1
    psra%1   m%9, 1
    psub%1   m%3, m%5  ; %2=a3
    psub%1   m%7, m%9  ; %6=a1

    psra%1   m%5, m%10, 2
    padd%1   m%5, m%7  ; %4=b1
    psra%1   m%7, 2
    psub%1   m%10, m%7  ; %9=b7

    SUMSUB_BA %1, %8, %6, %7  ;  %7=b0, %5=b6
    SUMSUB_BA %1, %11, %2, %7 ; %10=b2, %1=b4

    psra%1   m%9, m%4, 2
    padd%1   m%9, m%3 ; %8=b3
    psra%1   m%3, 2
    psub%1   m%3, m%4 ; %2=b5

    SUMSUB_BA %1, %10, %8, %7  ; %9=c0,  %7=c7
    SUMSUB_BA %1, %3, %11, %7 ; %2=c1, %10=c6
    SUMSUB_BA %1, %9, %2, %7  ; %8=c2,  %1=c5
    SUMSUB_BA %1, %5, %6, %7  ; %4=c3,  %5=c4

    SWAP %11, %4
    SWAP  %2, %10, %7
    SWAP  %4, %9, %8
%endmacro

%if HIGH_BIT_DEPTH

%macro SUB8x8_DCT8 0
cglobal sub8x8_dct8, 3,3,14
    TAIL_CALL .skip_prologue, 0
cglobal_label .skip_prologue
    LOAD_DIFF8x4 0,1,2,3, none,none, r1, r2
    LOAD_DIFF8x4 4,5,6,7, none,none, r1, r2

    DCT8_1D w, 0,1,2,3,4,5,6,7, 8,9

    TRANSPOSE4x4W 0,1,2,3,8
    WIDEN_SXWD 0,8
    WIDEN_SXWD 1,9
    WIDEN_SXWD 2,10
    WIDEN_SXWD 3,11
    DCT8_1D d, 0,8,1,9,2,10,3,11, 12,13
    mova  [r0+0x00], m0
    mova  [r0+0x20], m8
    mova  [r0+0x40], m1
    mova  [r0+0x60], m9
    mova  [r0+0x80], m2
    mova  [r0+0xA0], m10
    mova  [r0+0xC0], m3
    mova  [r0+0xE0], m11

    TRANSPOSE4x4W 4,5,6,7,0
    WIDEN_SXWD 4,0
    WIDEN_SXWD 5,1
    WIDEN_SXWD 6,2
    WIDEN_SXWD 7,3
    DCT8_1D d,4,0,5,1,6,2,7,3, 8,9
    mova  [r0+0x10], m4
    mova  [r0+0x30], m0
    mova  [r0+0x50], m5
    mova  [r0+0x70], m1
    mova  [r0+0x90], m6
    mova  [r0+0xB0], m2
    mova  [r0+0xD0], m7
    mova  [r0+0xF0], m3
    ret
%endmacro ; SUB8x8_DCT8

INIT_XMM sse2
SUB8x8_DCT8
INIT_XMM sse4
SUB8x8_DCT8
INIT_XMM avx
SUB8x8_DCT8

%macro ADD8x8_IDCT8 0
cglobal add8x8_idct8, 2,2,16
    add r1, 128
    TAIL_CALL .skip_prologue, 0
cglobal_label .skip_prologue
    mova     m0, [r1-128]
    mova     m1, [r1-96]
    mova     m2, [r1-64]
    mova     m3, [r1-32]
    mova     m4, [r1+ 0]
    mova     m5, [r1+32]
    mova     m6, [r1+64]
    mova     m7, [r1+96]
    IDCT8_1D d,0,1,2,3,4,5,6,7,8,9
    TRANSPOSE4x4D 0,1,2,3,8
    TRANSPOSE4x4D 4,5,6,7,8
    paddd     m0, [pd_32]
    paddd     m4, [pd_32]
    mova [r1+64], m6
    mova [r1+96], m7
    mova      m8, [r1-112]
    mova      m9, [r1-80]
    mova     m10, [r1-48]
    mova     m11, [r1-16]
    mova     m12, [r1+16]
    mova     m13, [r1+48]
    mova     m14, [r1+80]
    mova     m15, [r1+112]
    IDCT8_1D d,8,9,10,11,12,13,14,15,6,7
    TRANSPOSE4x4D 8,9,10,11,6
    TRANSPOSE4x4D 12,13,14,15,6
    IDCT8_1D d,0,1,2,3,8,9,10,11,6,7
    mova [r1-112], m8
    mova  [r1-80], m9
    mova       m6, [r1+64]
    mova       m7, [r1+96]
    IDCT8_1D d,4,5,6,7,12,13,14,15,8,9
    pxor       m8, m8
    mova       m9, [pw_pixel_max]
    STORE_DIFF m0, m4, m8, m9, [r0+0*FDEC_STRIDEB]
    STORE_DIFF m1, m5, m8, m9, [r0+1*FDEC_STRIDEB]
    STORE_DIFF m2, m6, m8, m9, [r0+2*FDEC_STRIDEB]
    STORE_DIFF m3, m7, m8, m9, [r0+3*FDEC_STRIDEB]
    mova       m0, [r1-112]
    mova       m1, [r1-80]
    STORE_DIFF  m0, m12, m8, m9, [r0+4*FDEC_STRIDEB]
    STORE_DIFF  m1, m13, m8, m9, [r0+5*FDEC_STRIDEB]
    STORE_DIFF m10, m14, m8, m9, [r0+6*FDEC_STRIDEB]
    STORE_DIFF m11, m15, m8, m9, [r0+7*FDEC_STRIDEB]
    ret
%endmacro ; ADD8x8_IDCT8

INIT_XMM sse2
ADD8x8_IDCT8
INIT_XMM avx
ADD8x8_IDCT8

%else ; !HIGH_BIT_DEPTH

%macro DCT_SUB8 0
cglobal sub8x8_dct, 3,3,10
    add r2, 4*FDEC_STRIDE
%if cpuflag(ssse3)
    mova m7, [hsub_mul]
%endif
    TAIL_CALL .skip_prologue, 0
cglobal_label .skip_prologue
    SWAP 7, 9
    LOAD_DIFF8x4 0, 1, 2, 3, 8, 9, r1, r2-4*FDEC_STRIDE
    LOAD_DIFF8x4 4, 5, 6, 7, 8, 9, r1, r2-4*FDEC_STRIDE
    DCT4_1D 0, 1, 2, 3, 8
    TRANSPOSE2x4x4W 0, 1, 2, 3, 8
    DCT4_1D 4, 5, 6, 7, 8
    TRANSPOSE2x4x4W 4, 5, 6, 7, 8
    DCT4_1D 0, 1, 2, 3, 8
    STORE_DCT 0, 1, 2, 3, r0, 0
    DCT4_1D 4, 5, 6, 7, 8
    STORE_DCT 4, 5, 6, 7, r0, 64
    ret

;-----------------------------------------------------------------------------
; void sub8x8_dct8( int16_t dct[8][8], uint8_t *pix1, uint8_t *pix2 )
;-----------------------------------------------------------------------------
cglobal sub8x8_dct8, 3,3,11
    add r2, 4*FDEC_STRIDE
%if cpuflag(ssse3)
    mova m7, [hsub_mul]
%endif
    TAIL_CALL .skip_prologue, 0
cglobal_label .skip_prologue
    SWAP 7, 10
    LOAD_DIFF8x4  0, 1, 2, 3, 4, 10, r1, r2-4*FDEC_STRIDE
    LOAD_DIFF8x4  4, 5, 6, 7, 8, 10, r1, r2-4*FDEC_STRIDE
    DCT8_1D    w, 0,1,2,3,4,5,6,7,8,9
    TRANSPOSE8x8W 0,1,2,3,4,5,6,7,8
    DCT8_1D    w, 0,1,2,3,4,5,6,7,8,9
    movdqa  [r0+0x00], m0
    movdqa  [r0+0x10], m1
    movdqa  [r0+0x20], m2
    movdqa  [r0+0x30], m3
    movdqa  [r0+0x40], m4
    movdqa  [r0+0x50], m5
    movdqa  [r0+0x60], m6
    movdqa  [r0+0x70], m7
    ret
%endmacro

INIT_XMM sse2
%define movdqa movaps
%define punpcklqdq movlhps
DCT_SUB8
%undef movdqa
%undef punpcklqdq
INIT_XMM ssse3
DCT_SUB8
INIT_XMM avx
DCT_SUB8
INIT_XMM xop
DCT_SUB8

INIT_YMM avx2
cglobal sub16x16_dct8, 3,3,10
    add  r0, 128
    add  r2, 4*FDEC_STRIDE
    call .sub16x8_dct8
    add  r0, 256
    add  r1, FENC_STRIDE*8
    add  r2, FDEC_STRIDE*8
    call .sub16x8_dct8
    RET
.sub16x8_dct8:
    LOAD_DIFF16x2_AVX2 0, 1, 2, 3, 0, 1
    LOAD_DIFF16x2_AVX2 2, 3, 4, 5, 2, 3
    LOAD_DIFF16x2_AVX2 4, 5, 6, 7, 4, 5
    LOAD_DIFF16x2_AVX2 6, 7, 8, 9, 6, 7
    DCT8_1D    w, 0,1,2,3,4,5,6,7,8,9
    TRANSPOSE8x8W 0,1,2,3,4,5,6,7,8
    DCT8_1D    w, 0,1,2,3,4,5,6,7,8,9
    mova    [r0-0x80+0x00], xm0
    vextracti128 [r0+0x00], m0, 1
    mova    [r0-0x80+0x10], xm1
    vextracti128 [r0+0x10], m1, 1
    mova    [r0-0x80+0x20], xm2
    vextracti128 [r0+0x20], m2, 1
    mova    [r0-0x80+0x30], xm3
    vextracti128 [r0+0x30], m3, 1
    mova    [r0-0x80+0x40], xm4
    vextracti128 [r0+0x40], m4, 1
    mova    [r0-0x80+0x50], xm5
    vextracti128 [r0+0x50], m5, 1
    mova    [r0-0x80+0x60], xm6
    vextracti128 [r0+0x60], m6, 1
    mova    [r0-0x80+0x70], xm7
    vextracti128 [r0+0x70], m7, 1
    ret

;-----------------------------------------------------------------------------
; void add8x8_idct8( uint8_t *p_dst, int16_t dct[8][8] )
;-----------------------------------------------------------------------------
%macro ADD8x8_IDCT8 0
cglobal add8x8_idct8, 2,2,11
    add r0, 4*FDEC_STRIDE
    pxor m7, m7
    TAIL_CALL .skip_prologue, 0
cglobal_label .skip_prologue
    SWAP 7, 9
    movdqa  m0, [r1+0x00]
    movdqa  m1, [r1+0x10]
    movdqa  m2, [r1+0x20]
    movdqa  m3, [r1+0x30]
    movdqa  m4, [r1+0x40]
    movdqa  m5, [r1+0x50]
    movdqa  m6, [r1+0x60]
    movdqa  m7, [r1+0x70]
    IDCT8_1D      w,0,1,2,3,4,5,6,7,8,10
    TRANSPOSE8x8W 0,1,2,3,4,5,6,7,8
    paddw         m0, [pw_32] ; rounding for the >>6 at the end
    IDCT8_1D      w,0,1,2,3,4,5,6,7,8,10
    DIFFx2 m0, m1, m8, m9, [r0-4*FDEC_STRIDE], [r0-3*FDEC_STRIDE]
    DIFFx2 m2, m3, m8, m9, [r0-2*FDEC_STRIDE], [r0-1*FDEC_STRIDE]
    DIFFx2 m4, m5, m8, m9, [r0+0*FDEC_STRIDE], [r0+1*FDEC_STRIDE]
    DIFFx2 m6, m7, m8, m9, [r0+2*FDEC_STRIDE], [r0+3*FDEC_STRIDE]
    STORE_IDCT m1, m3, m5, m7
    ret
%endmacro ; ADD8x8_IDCT8

INIT_XMM sse2
ADD8x8_IDCT8
INIT_XMM avx
ADD8x8_IDCT8

;-----------------------------------------------------------------------------
; void add8x8_idct( uint8_t *pix, int16_t dct[4][4][4] )
;-----------------------------------------------------------------------------
%macro ADD8x8 0
cglobal add8x8_idct, 2,2,11
    add  r0, 4*FDEC_STRIDE
    pxor m7, m7
    TAIL_CALL .skip_prologue, 0
cglobal_label .skip_prologue
    SWAP 7, 9
    mova   m0, [r1+ 0]
    mova   m2, [r1+16]
    mova   m1, [r1+32]
    mova   m3, [r1+48]
    SBUTTERFLY qdq, 0, 1, 4
    SBUTTERFLY qdq, 2, 3, 4
    mova   m4, [r1+64]
    mova   m6, [r1+80]
    mova   m5, [r1+96]
    mova   m7, [r1+112]
    SBUTTERFLY qdq, 4, 5, 8
    SBUTTERFLY qdq, 6, 7, 8
    IDCT4_1D w,0,1,2,3,8,10
    TRANSPOSE2x4x4W 0,1,2,3,8
    IDCT4_1D w,4,5,6,7,8,10
    TRANSPOSE2x4x4W 4,5,6,7,8
    paddw m0, [pw_32]
    IDCT4_1D w,0,1,2,3,8,10
    paddw m4, [pw_32]
    IDCT4_1D w,4,5,6,7,8,10
    DIFFx2 m0, m1, m8, m9, [r0-4*FDEC_STRIDE], [r0-3*FDEC_STRIDE]
    DIFFx2 m2, m3, m8, m9, [r0-2*FDEC_STRIDE], [r0-1*FDEC_STRIDE]
    DIFFx2 m4, m5, m8, m9, [r0+0*FDEC_STRIDE], [r0+1*FDEC_STRIDE]
    DIFFx2 m6, m7, m8, m9, [r0+2*FDEC_STRIDE], [r0+3*FDEC_STRIDE]
    STORE_IDCT m1, m3, m5, m7
    ret
%endmacro ; ADD8x8

INIT_XMM sse2
ADD8x8
INIT_XMM avx
ADD8x8

%endif ; !HIGH_BIT_DEPTH