void (*fft_calc)(struct FFTContext *s, FFTComplex *z);
} FFTContext;
-int fft_init(FFTContext *s, int nbits, int inverse);
-void fft_permute(FFTContext *s, FFTComplex *z);
-void fft_calc_c(FFTContext *s, FFTComplex *z);
-void fft_calc_sse(FFTContext *s, FFTComplex *z);
-void fft_calc_altivec(FFTContext *s, FFTComplex *z);
+int ff_fft_init(FFTContext *s, int nbits, int inverse);
+void ff_fft_permute(FFTContext *s, FFTComplex *z);
+void ff_fft_calc_c(FFTContext *s, FFTComplex *z);
+void ff_fft_calc_sse(FFTContext *s, FFTComplex *z);
+void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z);
-static inline void fft_calc(FFTContext *s, FFTComplex *z)
+static inline void ff_fft_calc(FFTContext *s, FFTComplex *z)
{
s->fft_calc(s, z);
}
-void fft_end(FFTContext *s);
+void ff_fft_end(FFTContext *s);
/* MDCT computation */
printf("IFFT");
else
printf("FFT");
- fft_init(s, fft_nbits, do_inverse);
+ ff_fft_init(s, fft_nbits, do_inverse);
fft_ref_init(fft_nbits, do_inverse);
}
printf(" %d test\n", fft_size);
}
} else {
memcpy(tab, tab1, fft_size * sizeof(FFTComplex));
- fft_permute(s, tab);
- fft_calc(s, tab);
+ ff_fft_permute(s, tab);
+ ff_fft_calc(s, tab);
fft_ref(tab_ref, tab1, fft_nbits);
check_diff((float *)tab_ref, (float *)tab, fft_size * 2);
}
} else {
memcpy(tab, tab1, fft_size * sizeof(FFTComplex));
- fft_calc(s, tab);
+ ff_fft_calc(s, tab);
}
}
duration = gettime() - time_start;
if (do_mdct) {
ff_mdct_end(m);
} else {
- fft_end(s);
+ ff_fft_end(s);
}
return 0;
}
* The size of the FFT is 2^nbits. If inverse is TRUE, inverse FFT is
* done
*/
-int fft_init(FFTContext *s, int nbits, int inverse)
+int ff_fft_init(FFTContext *s, int nbits, int inverse)
{
int i, j, m, n;
float alpha, c1, s1, s2;
s->exptab[i].re = c1;
s->exptab[i].im = s1;
}
- s->fft_calc = fft_calc_c;
+ s->fft_calc = ff_fft_calc_c;
s->exptab1 = NULL;
/* compute constant table for HAVE_SSE version */
} while (nblocks != 0);
av_freep(&s->exptab);
#if defined(HAVE_MMX)
- s->fft_calc = fft_calc_sse;
+ s->fft_calc = ff_fft_calc_sse;
#else
- s->fft_calc = fft_calc_altivec;
+ s->fft_calc = ff_fft_calc_altivec;
#endif
}
}
}
/**
- * Do a complex FFT with the parameters defined in fft_init(). The
+ * Do a complex FFT with the parameters defined in ff_fft_init(). The
* input data must be permuted before with s->revtab table. No
* 1.0/sqrt(n) normalization is done.
*/
-void fft_calc_c(FFTContext *s, FFTComplex *z)
+void ff_fft_calc_c(FFTContext *s, FFTComplex *z)
{
int ln = s->nbits;
int j, np, np2;
}
/**
- * Do the permutation needed BEFORE calling fft_calc()
+ * Do the permutation needed BEFORE calling ff_fft_calc()
*/
-void fft_permute(FFTContext *s, FFTComplex *z)
+void ff_fft_permute(FFTContext *s, FFTComplex *z)
{
int j, k, np;
FFTComplex tmp;
}
}
-void fft_end(FFTContext *s)
+void ff_fft_end(FFTContext *s)
{
av_freep(&s->revtab);
av_freep(&s->exptab);
#endif
/* XXX: handle reverse case */
-void fft_calc_sse(FFTContext *s, FFTComplex *z)
+void ff_fft_calc_sse(FFTContext *s, FFTComplex *z)
{
int ln = s->nbits;
int j, np, np2;
s->tcos[i] = -cos(alpha);
s->tsin[i] = -sin(alpha);
}
- if (fft_init(&s->fft, s->nbits - 2, inverse) < 0)
+ if (ff_fft_init(&s->fft, s->nbits - 2, inverse) < 0)
goto fail;
return 0;
fail:
in1 += 2;
in2 -= 2;
}
- fft_calc(&s->fft, z);
+ ff_fft_calc(&s->fft, z);
/* post rotation + reordering */
/* XXX: optimize */
CMUL(x[j].re, x[j].im, re, im, -tcos[n8 + i], tsin[n8 + i]);
}
- fft_calc(&s->fft, x);
+ ff_fft_calc(&s->fft, x);
/* post rotation */
for(i=0;i<n4;i++) {
{
av_freep(&s->tcos);
av_freep(&s->tsin);
- fft_end(&s->fft);
+ ff_fft_end(&s->fft);
}
unsigned long long perfdata[POWERPC_NUM_PMC_ENABLED][powerpc_perf_total][powerpc_data_total];
/* list below must match enum in dsputil_ppc.h */
static unsigned char* perfname[] = {
- "fft_calc_altivec",
+ "ff_fft_calc_altivec",
"gmc1_altivec",
"dct_unquantize_h263_altivec",
"fdct_altivec",
/**
- * Do a complex FFT with the parameters defined in fft_init(). The
+ * Do a complex FFT with the parameters defined in ff_fft_init(). The
* input data must be permuted before with s->revtab table. No
* 1.0/sqrt(n) normalization is done.
* AltiVec-enabled
* that successive MUL + ADD/SUB have been merged into
* fused multiply-add ('vec_madd' in altivec)
*/
-void fft_calc_altivec(FFTContext *s, FFTComplex *z)
+void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z)
{
POWERPC_PERF_DECLARE(altivec_fft_num, s->nbits >= 6);
#ifdef ALTIVEC_USE_REFERENCE_C_CODE