KCF_Tracker::KCF_Tracker(double padding, double kernel_sigma, double lambda, double interp_factor,
double output_sigma_factor, int cell_size)
- : fft(*new FFT()), p_padding(padding), p_output_sigma_factor(output_sigma_factor), p_kernel_sigma(kernel_sigma),
- p_lambda(lambda), p_interp_factor(interp_factor), p_cell_size(cell_size), d(*new Kcf_Tracker_Private)
+ : p_cell_size(cell_size), fft(*new FFT()), p_padding(padding), p_output_sigma_factor(output_sigma_factor), p_kernel_sigma(kernel_sigma),
+ p_lambda(lambda), p_interp_factor(interp_factor), d(*new Kcf_Tracker_Private)
{
}
alphaf_den = (p_xf * xfconj);
} else {
// Kernel Ridge Regression, calculate alphas (in Fourier domain)
- const uint num_scales = BIG_BATCH_MODE ? p_num_scales : 1;
cv::Size sz(Fft::freq_size(p_roi));
- ComplexMat kf(sz.height, sz.width, num_scales);
- (*gaussian_correlation)(*this, kf, p_model_xf, p_model_xf, p_kernel_sigma, true);
+ ComplexMat kf(sz.height, sz.width, 1);
+ (*gaussian_correlation)(kf, p_model_xf, p_model_xf, p_kernel_sigma, true, *this);
DEBUG_PRINTM(kf);
p_model_alphaf_num = p_yf * kf;
p_model_alphaf_den = kf * (kf + p_lambda);
p_roi.height = p_windows_size.height / p_cell_size;
p_scales.clear();
- if (m_use_scale)
- for (int i = -int(p_num_scales) / 2; i <= int(p_num_scales) / 2; ++i)
- p_scales.push_back(std::pow(p_scale_step, i));
- else
- p_scales.push_back(1.);
+ for (int i = -int(p_num_scales) / 2; i <= int(p_num_scales) / 2; ++i)
+ p_scales.push_back(std::pow(p_scale_step, i));
#ifdef CUFFT
if (p_roi.height * (p_roi.width / 2 + 1) > 1024) {
std::cerr << "cuFFT supports only Gaussian kernel." << std::endl;
std::exit(EXIT_FAILURE);
}
- CudaSafeCall(cudaSetDeviceFlags(cudaDeviceMapHost));
#else
p_xf.create(p_roi.height, p_roi.height / 2 + 1, p_num_of_feats);
#endif
d.threadctxs.emplace_back(p_roi, p_num_of_feats, p_num_scales);
#endif
- gaussian_correlation.reset(
- new GaussianCorrelation(IF_BIG_BATCH(p_num_scales, 1), p_roi));
+ gaussian_correlation.reset(new GaussianCorrelation(1, p_roi));
p_current_scale = 1.;
tmp.w *= p_current_scale;
tmp.h *= p_current_scale;
- if (p_resize_image) tmp.scale(1 / p_downscale_factor);
+ if (p_resize_image)
+ tmp.scale(1 / p_downscale_factor);
if (p_fit_to_pw2) {
tmp.scale_x(1 / p_scale_factor_x);
tmp.scale_y(1 / p_scale_factor_y);
}
}
#else
- // FIXME: Iterate correctly in big batch mode - perhaps have only one element in the list
for (uint j = 0; j < p_scales.size(); ++j) {
if (d.threadctxs[0].max[j].response > max) {
max = d.threadctxs[0].max[j].response;
it.async_res.wait();
#else // !ASYNC
- // FIXME: Iterate correctly in big batch mode - perhaps have only one element in the list
NORMAL_OMP_PARALLEL_FOR
for (uint i = 0; i < d.threadctxs.size(); ++i)
d.threadctxs[i].track(*this, input_rgb, input_gray);
if (kcf.m_use_linearkernel) {
kzf = zf.mul(kcf.p_model_alphaf).sum_over_channels();
} else {
- gaussian_correlation(kcf, kzf, zf, kcf.p_model_xf, kcf.p_kernel_sigma);
+ gaussian_correlation(kzf, zf, kcf.p_model_xf, kcf.p_kernel_sigma, false, kcf);
DEBUG_PRINTM(kzf);
kzf = kzf.mul(kcf.p_model_alphaf);
}
return patch;
}
-void KCF_Tracker::GaussianCorrelation::operator()(const KCF_Tracker &kcf, ComplexMat &result, const ComplexMat &xf,
- const ComplexMat &yf, double sigma, bool auto_correlation)
+void KCF_Tracker::GaussianCorrelation::operator()(ComplexMat &result, const ComplexMat &xf, const ComplexMat &yf,
+ double sigma, bool auto_correlation, const KCF_Tracker &kcf)
{
TRACE("");
xf.sqr_norm(xf_sqr_norm);
float numel_xf_inv = 1.f / (xf.cols * xf.rows * (xf.channels() / xf.n_scales));
for (uint i = 0; i < xf.n_scales; ++i) {
cv::Mat plane = ifft_res.plane(i);
+ DEBUG_PRINT(ifft_res.plane(i));
cv::exp(-1. / (sigma * sigma) * cv::max((xf_sqr_norm[i] + yf_sqr_norm[0] - 2 * ifft_res.plane(i))
* numel_xf_inv, 0), plane);
DEBUG_PRINTM(plane);