30 static void imdct36_blocks_ ## CPU(float *out, float *buf, float *in, int count, int switch_point, int block_type);\
31 void ff_imdct36_float_ ## CPU(float *out, float *buf, float *in, float *win);
50 #if HAVE_6REGS && HAVE_SSE_INLINE
52 #define MACS(rt, ra, rb) rt+=(ra)*(rb)
53 #define MLSS(rt, ra, rb) rt-=(ra)*(rb)
55 #define SUM8(op, sum, w, p) \
57 op(sum, (w)[0 * 64], (p)[0 * 64]); \
58 op(sum, (w)[1 * 64], (p)[1 * 64]); \
59 op(sum, (w)[2 * 64], (p)[2 * 64]); \
60 op(sum, (w)[3 * 64], (p)[3 * 64]); \
61 op(sum, (w)[4 * 64], (p)[4 * 64]); \
62 op(sum, (w)[5 * 64], (p)[5 * 64]); \
63 op(sum, (w)[6 * 64], (p)[6 * 64]); \
64 op(sum, (w)[7 * 64], (p)[7 * 64]); \
68 const float *win2,
float *sum1,
float *sum2,
int len)
71 const float *win1a = win1+
len;
72 const float *win2a = win2+
len;
73 const float *bufa = buf+
len;
74 float *sum1a = sum1+
len;
75 float *sum2a = sum2+
len;
79 "movaps " #a "(%1,%0), %%xmm1 \n\t" \
80 "movaps " #a "(%3,%0), %%xmm2 \n\t" \
81 "mulps %%xmm2, %%xmm1 \n\t" \
82 "subps %%xmm1, %%xmm0 \n\t" \
83 "mulps " #b "(%2,%0), %%xmm2 \n\t" \
84 "subps %%xmm2, %%xmm4 \n\t" \
88 "xorps %%xmm0, %%xmm0 \n\t"
89 "xorps %%xmm4, %%xmm4 \n\t"
100 "movaps %%xmm0, (%4,%0) \n\t"
101 "movaps %%xmm4, (%5,%0) \n\t"
105 :
"r"(win1a),
"r"(win2a),
"r"(bufa),
"r"(sum1a),
"r"(sum2a)
111 static void apply_window_mp3(
float *
in,
float *win,
int *unused,
float *
out,
123 "movaps 0(%0), %%xmm0 \n\t" \
124 "movaps 16(%0), %%xmm1 \n\t" \
125 "movaps 32(%0), %%xmm2 \n\t" \
126 "movaps 48(%0), %%xmm3 \n\t" \
127 "movaps %%xmm0, 0(%1) \n\t" \
128 "movaps %%xmm1, 16(%1) \n\t" \
129 "movaps %%xmm2, 32(%1) \n\t" \
130 "movaps %%xmm3, 48(%1) \n\t" \
131 "movaps 64(%0), %%xmm0 \n\t" \
132 "movaps 80(%0), %%xmm1 \n\t" \
133 "movaps 96(%0), %%xmm2 \n\t" \
134 "movaps 112(%0), %%xmm3 \n\t" \
135 "movaps %%xmm0, 64(%1) \n\t" \
136 "movaps %%xmm1, 80(%1) \n\t" \
137 "movaps %%xmm2, 96(%1) \n\t" \
138 "movaps %%xmm3, 112(%1) \n\t"
139 ::
"r"(
in),
"r"(in+512)
144 apply_window(in + 32, win + 48, win + 640, sumb, sumd, 16);
146 SUM8(
MACS, suma[0], win + 32, in + 48);
152 #define SUMS(suma, sumb, sumc, sumd, out1, out2) \
153 "movups " #sumd "(%4), %%xmm0 \n\t" \
154 "shufps $0x1b, %%xmm0, %%xmm0 \n\t" \
155 "subps " #suma "(%1), %%xmm0 \n\t" \
156 "movaps %%xmm0," #out1 "(%0) \n\t" \
158 "movups " #sumc "(%3), %%xmm0 \n\t" \
159 "shufps $0x1b, %%xmm0, %%xmm0 \n\t" \
160 "addps " #sumb "(%2), %%xmm0 \n\t" \
161 "movaps %%xmm0," #out2 "(%0) \n\t"
165 SUMS( 0, 48, 4, 52, 0, 112)
166 SUMS(16, 32, 20, 36, 16, 96)
167 SUMS(32, 16, 36, 20, 32, 80)
168 SUMS(48, 0, 52, 4, 48, 64)
171 :"
r"(&suma[0]), "
r"(&sumb[0]), "
r"(&sumc[0]), "
r"(&sumd[0])
177 float *out2 = out + 32 * incr;
182 *out = -suma[ j] + sumd[16-j];
183 *out2 = sumb[16-j] + sumc[ j];
190 SUM8(
MLSS, sum, win + 16 + 32, in + 32);
197 #define DECL_IMDCT_BLOCKS(CPU1, CPU2) \
198 static void imdct36_blocks_ ## CPU1(float *out, float *buf, float *in, \
199 int count, int switch_point, int block_type) \
201 int align_end = count - (count & 3); \
203 for (j = 0; j < align_end; j+= 4) { \
204 LOCAL_ALIGNED_16(float, tmpbuf, [1024]); \
205 float *win = mdct_win_sse[switch_point && j < 4][block_type]; \
209 ff_four_imdct36_float_ ## CPU2(out, buf, in, win, tmpbuf); \
214 for (; j < count; j++) { \
218 int win_idx = (switch_point && j < 2) ? 0 : block_type; \
219 float *win = ff_mdct_win_float[win_idx + (4 & -(j & 1))]; \
221 ff_imdct36_float_ ## CPU1(out, buf, in, win); \
231 DECL_IMDCT_BLOCKS(
sse,
sse)
233 DECL_IMDCT_BLOCKS(sse2,
sse)
234 DECL_IMDCT_BLOCKS(sse3,
sse)
235 DECL_IMDCT_BLOCKS(ssse3,
sse)
237 #if HAVE_AVX_EXTERNAL
238 DECL_IMDCT_BLOCKS(avx,avx)
247 for (j = 0; j < 4; j++) {
248 for (i = 0; i < 40; i ++) {
260 #if HAVE_6REGS && HAVE_SSE_INLINE
283 #if HAVE_AVX_EXTERNAL
void(* imdct36_blocks_float)(float *out, float *buf, float *in, int count, int switch_point, int block_type)
#define DECLARE_ALIGNED(n, t, v)
static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride)
#define EXTERNAL_SSE(flags)
float ff_mdct_win_float[8][MDCT_BUF_SIZE]
Macro definitions for various function/variable attributes.
#define EXTERNAL_SSE3(flags)
#define EXTERNAL_SSE2(flags)
#define SUM8(op, sum, w, p)
common internal API header
static float mdct_win_sse[2][4][4 *40]
#define INLINE_SSE(flags)
static void(*const apply_window[4])(AVFloatDSPContext *fdsp, SingleChannelElement *sce, const float *audio)
av_cold void ff_mpadsp_init_x86(MPADSPContext *s)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
#define EXTERNAL_SSSE3(flags)
void(* apply_window_float)(float *synth_buf, float *window, int *dither_state, float *samples, int incr)
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
void ff_four_imdct36_float_avx(float *out, float *buf, float *in, float *win, float *tmpbuf)
void ff_four_imdct36_float_sse(float *out, float *buf, float *in, float *win, float *tmpbuf)
#define LOCAL_ALIGNED_16(t, v,...)
#define EXTERNAL_AVX(flags)