1 ;*****************************************************************************
2 ;* x86-optimized Float DSP functions
4 ;* Copyright 2006 Loren Merritt
6 ;* This file is part of FFmpeg.
8 ;* FFmpeg is free software; you can redistribute it and/or
9 ;* modify it under the terms of the GNU Lesser General Public
10 ;* License as published by the Free Software Foundation; either
11 ;* version 2.1 of the License, or (at your option) any later version.
13 ;* FFmpeg is distributed in the hope that it will be useful,
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;* Lesser General Public License for more details.
18 ;* You should have received a copy of the GNU Lesser General Public
19 ;* License along with FFmpeg; if not, write to the Free Software
20 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 ;******************************************************************************
23 %include "libavutil/x86/x86util.asm"
26 pd_reverse: dd 7, 6, 5, 4, 3, 2, 1, 0
30 ;-----------------------------------------------------------------------------
31 ; void vector_fmul(float *dst, const float *src0, const float *src1, int len)
32 ;-----------------------------------------------------------------------------
34 cglobal vector_fmul, 4,4,2, dst, src0, src1, len
35 lea lenq, [lend*4 - 64]
40 mova m0, [src0q + lenq + (a+0)*mmsize]
41 mova m1, [src0q + lenq + (a+1)*mmsize]
42 mulps m0, m0, [src1q + lenq + (a+0)*mmsize]
43 mulps m1, m1, [src1q + lenq + (a+1)*mmsize]
44 mova [dstq + lenq + (a+0)*mmsize], m0
45 mova [dstq + lenq + (a+1)*mmsize], m1
61 ;-----------------------------------------------------------------------------
62 ; void vector_dmul(double *dst, const double *src0, const double *src1, int len)
63 ;-----------------------------------------------------------------------------
65 cglobal vector_dmul, 4,4,4, dst, src0, src1, len
66 lea lend, [lenq*8 - mmsize*4]
69 movaps m0, [src0q + lenq + 0*mmsize]
70 movaps m1, [src0q + lenq + 1*mmsize]
71 movaps m2, [src0q + lenq + 2*mmsize]
72 movaps m3, [src0q + lenq + 3*mmsize]
73 mulpd m0, m0, [src1q + lenq + 0*mmsize]
74 mulpd m1, m1, [src1q + lenq + 1*mmsize]
75 mulpd m2, m2, [src1q + lenq + 2*mmsize]
76 mulpd m3, m3, [src1q + lenq + 3*mmsize]
77 movaps [dstq + lenq + 0*mmsize], m0
78 movaps [dstq + lenq + 1*mmsize], m1
79 movaps [dstq + lenq + 2*mmsize], m2
80 movaps [dstq + lenq + 3*mmsize], m3
94 ;------------------------------------------------------------------------------
95 ; void ff_vector_fmac_scalar(float *dst, const float *src, float mul, int len)
96 ;------------------------------------------------------------------------------
98 %macro VECTOR_FMAC_SCALAR 0
100 cglobal vector_fmac_scalar, 3,3,5, dst, src, len
102 cglobal vector_fmac_scalar, 4,4,5, dst, src, mul, len
105 VBROADCASTSS m0, mulm
112 vinsertf128 m0, m0, xm0, 1
115 lea lenq, [lend*4-64]
119 mova m2, [dstq+lenq+1*mmsize]
120 fmaddps m1, m0, [srcq+lenq], m1
121 fmaddps m2, m0, [srcq+lenq+1*mmsize], m2
123 mulps m1, m0, [srcq+lenq]
124 mulps m2, m0, [srcq+lenq+1*mmsize]
126 mulps m3, m0, [srcq+lenq+2*mmsize]
127 mulps m4, m0, [srcq+lenq+3*mmsize]
129 addps m1, m1, [dstq+lenq]
130 addps m2, m2, [dstq+lenq+1*mmsize]
132 addps m3, m3, [dstq+lenq+2*mmsize]
133 addps m4, m4, [dstq+lenq+3*mmsize]
137 mova [dstq+lenq+1*mmsize], m2
139 mova [dstq+lenq+2*mmsize], m3
140 mova [dstq+lenq+3*mmsize], m4
149 %if HAVE_AVX_EXTERNAL
153 %if HAVE_FMA3_EXTERNAL
158 ;------------------------------------------------------------------------------
159 ; void ff_vector_fmul_scalar(float *dst, const float *src, float mul, int len)
160 ;------------------------------------------------------------------------------
162 %macro VECTOR_FMUL_SCALAR 0
164 cglobal vector_fmul_scalar, 3,3,2, dst, src, len
166 cglobal vector_fmul_scalar, 4,4,3, dst, src, mul, len
174 lea lenq, [lend*4-mmsize]
187 ;------------------------------------------------------------------------------
188 ; void ff_vector_dmac_scalar(double *dst, const double *src, double mul,
190 ;------------------------------------------------------------------------------
192 %macro VECTOR_DMAC_SCALAR 0
194 cglobal vector_dmac_scalar, 2,4,5, dst, src, mul, len, lenaddr
196 VBROADCASTSD m0, mulm
199 cglobal vector_dmac_scalar, 3,3,5, dst, src, len
201 cglobal vector_dmac_scalar, 4,4,5, dst, src, mul, len
206 vinsertf128 m0, m0, xm0, 1
209 lea lenq, [lend*8-mmsize*4]
212 movaps m1, [dstq+lenq]
213 movaps m2, [dstq+lenq+1*mmsize]
214 movaps m3, [dstq+lenq+2*mmsize]
215 movaps m4, [dstq+lenq+3*mmsize]
216 fmaddpd m1, m0, [srcq+lenq], m1
217 fmaddpd m2, m0, [srcq+lenq+1*mmsize], m2
218 fmaddpd m3, m0, [srcq+lenq+2*mmsize], m3
219 fmaddpd m4, m0, [srcq+lenq+3*mmsize], m4
221 mulpd m1, m0, [srcq+lenq]
222 mulpd m2, m0, [srcq+lenq+1*mmsize]
223 mulpd m3, m0, [srcq+lenq+2*mmsize]
224 mulpd m4, m0, [srcq+lenq+3*mmsize]
225 addpd m1, m1, [dstq+lenq]
226 addpd m2, m2, [dstq+lenq+1*mmsize]
227 addpd m3, m3, [dstq+lenq+2*mmsize]
228 addpd m4, m4, [dstq+lenq+3*mmsize]
230 movaps [dstq+lenq], m1
231 movaps [dstq+lenq+1*mmsize], m2
232 movaps [dstq+lenq+2*mmsize], m3
233 movaps [dstq+lenq+3*mmsize], m4
241 %if HAVE_AVX_EXTERNAL
245 %if HAVE_FMA3_EXTERNAL
250 ;------------------------------------------------------------------------------
251 ; void ff_vector_dmul_scalar(double *dst, const double *src, double mul,
253 ;------------------------------------------------------------------------------
255 %macro VECTOR_DMUL_SCALAR 0
257 cglobal vector_dmul_scalar, 3,4,3, dst, src, mul, len, lenaddr
260 cglobal vector_dmul_scalar, 3,3,3, dst, src, len
262 cglobal vector_dmul_scalar, 4,4,3, dst, src, mul, len
265 VBROADCASTSD m0, mulm
272 vinsertf128 ym0, ym0, xm0, 1
275 lea lenq, [lend*8-2*mmsize]
277 mulpd m1, m0, [srcq+lenq ]
278 mulpd m2, m0, [srcq+lenq+mmsize]
279 movaps [dstq+lenq ], m1
280 movaps [dstq+lenq+mmsize], m2
288 %if HAVE_AVX_EXTERNAL
293 ;-----------------------------------------------------------------------------
294 ; vector_fmul_window(float *dst, const float *src0,
295 ; const float *src1, const float *win, int len);
296 ;-----------------------------------------------------------------------------
298 cglobal vector_fmul_window, 5, 6, 6, dst, src0, src1, win, len, len1
300 lea len1q, [lenq - mmsize]
306 mova m0, [winq + lenq]
307 mova m4, [src0q + lenq]
308 mova m1, [winq + len1q]
309 mova m5, [src1q + len1q]
321 mova [dstq + lenq], m1
322 mova [dstq + len1q], m2
328 ;-----------------------------------------------------------------------------
329 ; vector_fmul_add(float *dst, const float *src0, const float *src1,
330 ; const float *src2, int len)
331 ;-----------------------------------------------------------------------------
332 %macro VECTOR_FMUL_ADD 0
333 cglobal vector_fmul_add, 5,5,4, dst, src0, src1, src2, len
334 lea lenq, [lend*4 - 2*mmsize]
337 mova m0, [src0q + lenq]
338 mova m1, [src0q + lenq + mmsize]
340 mova m2, [src2q + lenq]
341 mova m3, [src2q + lenq + mmsize]
342 fmaddps m0, m0, [src1q + lenq], m2
343 fmaddps m1, m1, [src1q + lenq + mmsize], m3
345 mulps m0, m0, [src1q + lenq]
346 mulps m1, m1, [src1q + lenq + mmsize]
347 addps m0, m0, [src2q + lenq]
348 addps m1, m1, [src2q + lenq + mmsize]
350 mova [dstq + lenq], m0
351 mova [dstq + lenq + mmsize], m1
360 %if HAVE_AVX_EXTERNAL
364 %if HAVE_FMA3_EXTERNAL
369 ;-----------------------------------------------------------------------------
370 ; void vector_fmul_reverse(float *dst, const float *src0, const float *src1,
372 ;-----------------------------------------------------------------------------
373 %macro VECTOR_FMUL_REVERSE 0
374 cglobal vector_fmul_reverse, 4,4,2, dst, src0, src1, len
376 movaps m2, [pd_reverse]
378 lea lenq, [lend*4 - 2*mmsize]
382 vpermps m0, m2, [src1q]
383 vpermps m1, m2, [src1q+mmsize]
385 vmovaps xmm0, [src1q + 16]
386 vinsertf128 m0, m0, [src1q], 1
387 vshufps m0, m0, m0, q0123
388 vmovaps xmm1, [src1q + mmsize + 16]
389 vinsertf128 m1, m1, [src1q + mmsize], 1
390 vshufps m1, m1, m1, q0123
393 mova m1, [src1q + mmsize]
397 mulps m0, m0, [src0q + lenq + mmsize]
398 mulps m1, m1, [src0q + lenq]
399 movaps [dstq + lenq + mmsize], m0
400 movaps [dstq + lenq], m1
409 %if HAVE_AVX_EXTERNAL
413 %if HAVE_AVX2_EXTERNAL
418 ; float scalarproduct_float_sse(const float *v1, const float *v2, int len)
420 cglobal scalarproduct_float, 3,3,2, v1, v2, offset
427 movaps xmm1, [v1q+offsetq]
428 mulps xmm1, [v2q+offsetq]
444 cglobal scalarproduct_float, 3,5,8, v1, v2, size, len, offset
460 movups m4, [v1q+offsetq]
461 movups m5, [v1q+offsetq + 32]
462 movups m6, [v1q+offsetq + 64]
463 movups m7, [v1q+offsetq + 96]
464 fmaddps m0, m4, [v2q+offsetq ], m0
465 fmaddps m1, m5, [v2q+offsetq + 32], m1
466 fmaddps m2, m6, [v2q+offsetq + 64], m2
467 fmaddps m3, m7, [v2q+offsetq + 96], m3
480 vextractf128 xmm2, m0, 1
498 movups m4, [v1q+offsetq]
499 movups m5, [v1q+offsetq + 32]
500 fmaddps m0, m4, [v2q+offsetq], m0
501 fmaddps m1, m5, [v2q+offsetq + 32], m1
510 vextractf128 xmm2, m0, 1
528 movups m4, [v1q+offsetq]
529 fmaddps m0, m4, [v2q+offsetq], m0
533 vextractf128 xmm2, m0, 1
553 movaps xmm1, [v1q+offsetq]
554 mulps xmm1, [v2q+offsetq]
570 ;---------------------------------------------------------------------------------
571 ; double scalarproduct_double(const double *v1, const double *v2, size_t len)
572 ;---------------------------------------------------------------------------------
573 %macro SCALARPRODUCT_DOUBLE 0
574 cglobal scalarproduct_double, 3,3,8, v1, v2, offset
585 movapd m4, [v1q+offsetq+mmsize*0]
586 movapd m5, [v1q+offsetq+mmsize*1]
587 movapd m6, [v1q+offsetq+mmsize*2]
588 movapd m7, [v1q+offsetq+mmsize*3]
589 mulpd m4, [v2q+offsetq+mmsize*0]
590 mulpd m5, [v2q+offsetq+mmsize*1]
591 mulpd m6, [v2q+offsetq+mmsize*2]
592 mulpd m7, [v2q+offsetq+mmsize*3]
597 add offsetq, mmsize*4
603 vextractf128 xm1, m0, 1
617 %if HAVE_AVX_EXTERNAL
622 ;-----------------------------------------------------------------------------
623 ; void ff_butterflies_float(float *src0, float *src1, int len);
624 ;-----------------------------------------------------------------------------
626 cglobal butterflies_float, 3,3,3, src0, src1, len
632 mova m0, [src0q + lenq]
633 mova m1, [src1q + lenq]
636 mova [src1q + lenq], m2
637 mova [src0q + lenq], m0