#include <algorithm>
#include "threadctx.hpp"
#include "debug.h"
+#include <limits>
#ifdef FFTW
#include "fft_fftw.h"
n = std::max(lower, std::min(n, upper));
}
+#if CV_MAJOR_VERSION < 3
+template<typename _Tp> static inline
+cv::Size_<_Tp> operator / (const cv::Size_<_Tp>& a, _Tp b)
+{
+ return cv::Size_<_Tp>(a.width / b, a.height / b);
+}
+#endif
+
class Kcf_Tracker_Private {
friend KCF_Tracker;
std::vector<ThreadCtx> threadctxs;
// obtain a sub-window for training
// TODO: Move Mats outside from here
MatScaleFeats patch_feats(1, p_num_of_feats, feature_size);
- DEBUG_PRINT(patch_feats);
MatScaleFeats temp(1, p_num_of_feats, feature_size);
get_features(input_rgb, input_gray, p_current_center.x, p_current_center.y,
p_windows_size.width, p_windows_size.height,
p_current_scale).copyTo(patch_feats.scale(0));
DEBUG_PRINT(patch_feats);
- fft.forward_window(patch_feats, p_xf, temp);
- DEBUG_PRINTM(p_xf);
- p_model_xf = p_model_xf * (1. - interp_factor) + p_xf * interp_factor;
- DEBUG_PRINTM(p_model_xf);
-
- ComplexMat alphaf_num, alphaf_den;
+ fft.forward_window(patch_feats, model->xf, temp);
+ DEBUG_PRINTM(model->xf);
+ model->model_xf = model->model_xf * (1. - interp_factor) + model->xf * interp_factor;
+ DEBUG_PRINTM(model->model_xf);
if (m_use_linearkernel) {
- ComplexMat xfconj = p_xf.conj();
- alphaf_num = xfconj.mul(p_yf);
- alphaf_den = (p_xf * xfconj);
+ ComplexMat xfconj = model->xf.conj();
+ model->model_alphaf_num = xfconj.mul(model->yf);
+ model->model_alphaf_den = (model->xf * xfconj);
} else {
// Kernel Ridge Regression, calculate alphas (in Fourier domain)
cv::Size sz(Fft::freq_size(feature_size));
ComplexMat kf(sz.height, sz.width, 1);
- (*gaussian_correlation)(kf, p_model_xf, p_model_xf, p_kernel_sigma, true, *this);
+ (*gaussian_correlation)(kf, model->model_xf, model->model_xf, p_kernel_sigma, true, *this);
DEBUG_PRINTM(kf);
- p_model_alphaf_num = p_yf * kf;
- p_model_alphaf_den = kf * (kf + p_lambda);
+ model->model_alphaf_num = model->yf * kf;
+ model->model_alphaf_den = kf * (kf + p_lambda);
}
- p_model_alphaf = p_model_alphaf_num / p_model_alphaf_den;
- DEBUG_PRINTM(p_model_alphaf);
+ model->model_alphaf = model->model_alphaf_num / model->model_alphaf_den;
+ DEBUG_PRINTM(model->model_alphaf);
// p_model_alphaf = p_yf / (kf + p_lambda); //equation for fast training
}
p_scales.push_back(std::pow(p_scale_step, i));
#ifdef CUFFT
- if (Fft::freq_size(feature_size).area() > 1024) {
- std::cerr << "Window after forward FFT is too big for CUDA kernels. Plese use -f to set "
- "the window dimensions so its size is less or equal to "
- << 1024 * p_cell_size * p_cell_size * 2 + 1
- << " pixels. Currently the size of the window is: " << fit_size
- << " which is " << fit_size.area() << " pixels. " << std::endl;
- std::exit(EXIT_FAILURE);
- }
-
if (m_use_linearkernel) {
std::cerr << "cuFFT supports only Gaussian kernel." << std::endl;
std::exit(EXIT_FAILURE);
}
#endif
-#if defined(CUFFT) || defined(FFTW)
- uint width = feature_size.width / 2 + 1;
-#else
- uint width = feature_size.width;
-#endif
- p_model_xf.create(feature_size.height, width, p_num_of_feats);
- p_yf.create(feature_size.height, width, 1);
- p_xf.create(feature_size.height, width, p_num_of_feats);
+ model.reset(new Model(Fft::freq_size(feature_size), p_num_of_feats));
#ifndef BIG_BATCH
for (auto scale: p_scales)
d.threadctxs.emplace_back(feature_size, p_num_of_feats, p_num_scales);
#endif
- gaussian_correlation.reset(new GaussianCorrelation(1, feature_size));
+ gaussian_correlation.reset(new GaussianCorrelation(1, p_num_of_feats, feature_size));
p_current_center = p_init_pose.center();
p_current_scale = 1.;
// window weights, i.e. labels
MatScales gsl(1, feature_size);
gaussian_shaped_labels(p_output_sigma, feature_size.width, feature_size.height).copyTo(gsl.plane(0));
- fft.forward(gsl, p_yf);
- DEBUG_PRINTM(p_yf);
+ fft.forward(gsl, model->yf);
+ DEBUG_PRINTM(model->yf);
// train initial model
train(input_rgb, input_gray, 1.0);
double KCF_Tracker::findMaxReponse(uint &max_idx, cv::Point2d &new_location) const
{
double max = -1.;
+ max_idx = std::numeric_limits<uint>::max();
+
#ifndef BIG_BATCH
for (uint j = 0; j < d.threadctxs.size(); ++j) {
if (d.threadctxs[j].max.response > max) {
}
}
#endif
+ assert(max_idx < IF_BIG_BATCH(p_scales.size(), d.threadctxs.size()));
+
+ if (m_visual_debug) {
+ int w = 100; //feature_size.width;
+ int h = 100; //feature_size.height;
+ cv::Mat all_responses(h * p_num_scales, w * p_num_angles,
+ d.threadctxs[0].response.type(), cv::Scalar::all(0));
+ for (size_t i = 0; i < p_num_scales; ++i) {
+ for (size_t j = 0; j < p_num_angles; ++j) {
+ cv::Mat tmp = d.threadctxs[IF_BIG_BATCH(0, p_num_angles * i + j)].response.plane(IF_BIG_BATCH(p_num_angles * i + j, 0));
+ tmp = circshift(tmp, -tmp.cols/2, -tmp.rows/2);
+ cv::resize(tmp, tmp, cv::Size(w, h));
+ cv::Mat resp_roi(all_responses, cv::Rect(j * w, i * h, w, h));
+ tmp.copyTo(resp_roi);
+ }
+ }
+ cv::namedWindow("All responses", CV_WINDOW_AUTOSIZE);
+ cv::imshow("All responses", all_responses);
+ }
+
cv::Point2i &max_response_pt = IF_BIG_BATCH(d.threadctxs[0].max[max_idx].loc, d.threadctxs[max_idx].max.loc);
cv::Mat max_response_map = IF_BIG_BATCH(d.threadctxs[0].response.plane(max_idx), d.threadctxs[max_idx].response.plane(0));
DEBUG_PRINTM(zf);
if (kcf.m_use_linearkernel) {
- kzf = zf.mul(kcf.p_model_alphaf).sum_over_channels();
+ kzf = zf.mul(kcf.model->model_alphaf).sum_over_channels();
} else {
- gaussian_correlation(kzf, zf, kcf.p_model_xf, kcf.p_kernel_sigma, false, kcf);
+ gaussian_correlation(kzf, zf, kcf.model->model_xf, kcf.p_kernel_sigma, false, kcf);
DEBUG_PRINTM(kzf);
- kzf = kzf.mul(kcf.p_model_alphaf);
+ kzf = kzf.mul(kcf.model->model_alphaf);
}
kcf.fft.inverse(kzf, response);
return rot_labels;
}
-cv::Mat KCF_Tracker::circshift(const cv::Mat &patch, int x_rot, int y_rot)
+cv::Mat KCF_Tracker::circshift(const cv::Mat &patch, int x_rot, int y_rot) const
{
cv::Mat rot_patch(patch.size(), CV_32FC1);
cv::Mat tmp_x_rot(patch.size(), CV_32FC1);
{
TRACE("");
xf.sqr_norm(xf_sqr_norm);
+ DEBUG_PRINTM(xf_sqr_norm[0]);
if (auto_correlation) {
yf_sqr_norm = xf_sqr_norm;
} else {
yf.sqr_norm(yf_sqr_norm);
}
+ DEBUG_PRINTM(yf_sqr_norm[0]);
xyf = auto_correlation ? xf.sqr_mag() : xf * yf.conj(); // xf.muln(yf.conj());
DEBUG_PRINTM(xyf);
DEBUG_PRINTM(xyf_sum);
kcf.fft.inverse(xyf_sum, ifft_res);
DEBUG_PRINTM(ifft_res);
-#ifdef CUFFT
+#if 0 && defined(CUFFT)
// FIXME
cuda_gaussian_correlation(ifft_res.deviceMem(), k.deviceMem(), xf_sqr_norm.deviceMem(),
auto_correlation ? xf_sqr_norm.deviceMem() : yf_sqr_norm.deviceMem(), sigma,