#include "fft_cufft.h"
+cuFFT::cuFFT()
+{
+ CudaSafeCall(cudaSetDeviceFlags(cudaDeviceMapHost));
+ cudaErrorCheck(cublasCreate(&cublas));
+}
+
+cufftHandle cuFFT::create_plan_fwd(uint howmany) const
+{
+ int rank = 2;
+ int n[] = {(int)m_height, (int)m_width};
+ int idist = m_height * m_width, odist = m_height * (m_width / 2 + 1);
+ int istride = 1, ostride = 1;
+ int *inembed = n, onembed[] = {(int)m_height, (int)m_width / 2 + 1};
+
+ cufftHandle plan;
+ cudaErrorCheck(cufftPlanMany(&plan, rank, n, inembed, istride, idist, onembed, ostride, odist, CUFFT_R2C, howmany));
+ cudaErrorCheck(cufftSetStream(plan, cudaStreamPerThread));
+ return plan;
+}
+
+cufftHandle cuFFT::create_plan_inv(uint howmany) const
+{
+ int rank = 2;
+ int n[] = {(int)m_height, (int)m_width};
+ int idist = m_height * (m_width / 2 + 1), odist = m_height * m_width;
+ int istride = 1, ostride = 1;
+ int inembed[] = {(int)m_height, (int)m_width / 2 + 1}, *onembed = n;
+
+ cufftHandle plan;
+ cudaErrorCheck(cufftPlanMany(&plan, rank, n, inembed, istride, idist, onembed, ostride, odist, CUFFT_C2R, howmany));
+ cudaErrorCheck(cufftSetStream(plan, cudaStreamPerThread));
+ return plan;
+}
+
+
void cuFFT::init(unsigned width, unsigned height, unsigned num_of_feats, unsigned num_of_scales)
{
- m_width = width;
- m_height = height;
- m_num_of_feats = num_of_feats;
- m_num_of_scales = num_of_scales;
+ Fft::init(width, height, num_of_feats, num_of_scales);
+
std::cout << "FFT: cuFFT" << std::endl;
+
+ plan_f = create_plan_fwd(1);
+ plan_fw = create_plan_fwd(m_num_of_feats);
+ plan_i_1ch = create_plan_inv(1);
+
+#ifdef BIG_BATCH
+ plan_f_all_scales = create_plan_fwd(m_num_of_scales);
+ plan_fw_all_scales = create_plan_fwd(m_num_of_scales * m_num_of_feats);
+ plan_i_all_scales = create_plan_inv(m_num_of_scales);
+#endif
}
-void cuFFT::set_window(const cv::Mat &window)
+void cuFFT::set_window(const MatDynMem &window)
{
- m_window = window;
+ Fft::set_window(window);
+ m_window = window;
}
-ComplexMat cuFFT::forward(const cv::Mat &input)
+void cuFFT::forward(const MatScales &real_input, ComplexMat &complex_result)
{
- cv::Mat complex_result;
- cv::dft(input, complex_result, cv::DFT_COMPLEX_OUTPUT);
- return ComplexMat(complex_result);
+ Fft::forward(real_input, complex_result);
+ auto in = static_cast<cufftReal *>(const_cast<MatScales&>(real_input).deviceMem());
+
+ if (real_input.size[0] == 1)
+ cudaErrorCheck(cufftExecR2C(plan_f, in, complex_result.get_dev_data()));
+#ifdef BIG_BATCH
+ else
+ cudaErrorCheck(cufftExecR2C(plan_f_all_scales, in, complex_result.get_dev_data()));
+#endif
}
-ComplexMat cuFFT::forward_window(const std::vector<cv::Mat> &input)
+void cuFFT::forward_window(MatScaleFeats &feat, ComplexMat &complex_result, MatScaleFeats &temp)
{
- int n_channels = input.size();
- ComplexMat result(input[0].rows, input[0].cols, n_channels);
+ Fft::forward_window(feat, complex_result, temp);
+
+ cufftReal *temp_data = temp.deviceMem();
+ uint n_scales = feat.size[0];
- for (int i = 0; i < n_channels; ++i) {
- cv::Mat complex_result;
- cv::dft(input[i].mul(m_window), complex_result, cv::DFT_COMPLEX_OUTPUT);
- result.set_channel(i, complex_result);
+ for (uint s = 0; s < n_scales; ++s) {
+ for (uint ch = 0; ch < uint(feat.size[1]); ++ch) {
+ cv::Mat feat_plane = feat.plane(s, ch);
+ cv::Mat temp_plane = temp.plane(s, ch);
+ temp_plane = feat_plane.mul(m_window);
+ }
}
- return result;
+
+ if (n_scales == 1)
+ cudaErrorCheck(cufftExecR2C(plan_fw, temp_data, complex_result.get_dev_data()));
+#ifdef BIG_BATCH
+ else
+ cudaErrorCheck(cufftExecR2C(plan_fw_all_scales, temp_data, complex_result.get_dev_data()));
+#endif
+ CudaSafeCall(cudaStreamSynchronize(cudaStreamPerThread));
}
-cv::Mat cuFFT::inverse(const ComplexMat &inputf)
+void cuFFT::inverse(ComplexMat &complex_input, MatScales &real_result)
{
- cv::Mat real_result;
- if (inputf.n_channels == 1) {
- cv::dft(inputf.to_cv_mat(), real_result, cv::DFT_INVERSE | cv::DFT_REAL_OUTPUT | cv::DFT_SCALE);
- } else {
- std::vector<cv::Mat> mat_channels = inputf.to_cv_mat_vector();
- std::vector<cv::Mat> ifft_mats(inputf.n_channels);
- for (int i = 0; i < inputf.n_channels; ++i) {
- cv::dft(mat_channels[i], ifft_mats[i], cv::DFT_INVERSE | cv::DFT_REAL_OUTPUT | cv::DFT_SCALE);
- }
- cv::merge(ifft_mats, real_result);
- }
- return real_result;
+ Fft::inverse(complex_input, real_result);
+
+ uint n_channels = complex_input.n_channels;
+ cufftComplex *in = reinterpret_cast<cufftComplex *>(complex_input.get_p_data());
+ cufftReal *out = real_result.deviceMem();
+ float alpha = 1.0 / (m_width * m_height);
+
+ if (n_channels == 1)
+ cudaErrorCheck(cufftExecC2R(plan_i_1ch, in, out));
+#ifdef BIG_BATCH
+ cudaErrorCheck(cufftExecC2R(plan_i_all_scales, in, out));
+#endif
+ // TODO: Investigate whether this scalling is needed or not
+ cudaErrorCheck(cublasSscal(cublas, real_result.total(), &alpha, out, 1));
+ CudaSafeCall(cudaStreamSynchronize(cudaStreamPerThread));
}
cuFFT::~cuFFT()
{
+ cudaErrorCheck(cublasDestroy(cublas));
+
+ cudaErrorCheck(cufftDestroy(plan_f));
+ cudaErrorCheck(cufftDestroy(plan_fw));
+ cudaErrorCheck(cufftDestroy(plan_i_1ch));
+#ifdef BIG_BATCH
+ cudaErrorCheck(cufftDestroy(plan_f_all_scales));
+ cudaErrorCheck(cufftDestroy(plan_fw_all_scales));
+ cudaErrorCheck(cufftDestroy(plan_i_all_scales));
+#endif
}