]> rtime.felk.cvut.cz Git - hercules2020/kcf.git/commitdiff
Integration of ThredCTX to rotation.
authorShanigen <vkaraf@gmail.com>
Tue, 11 Sep 2018 12:23:27 +0000 (14:23 +0200)
committerShanigen <vkaraf@gmail.com>
Tue, 11 Sep 2018 12:23:27 +0000 (14:23 +0200)
main_vot.cpp
src/CMakeLists.txt
src/complexmat.cuh
src/fft_fftw.cpp
src/kcf.cpp
src/kcf.h
src/threadctx.hpp

index 7a78f34bea9812f2e414faae1f6fd119e98984c6..cf378445f84b5ec57ab0d2802eb95b4a40e7e6e0 100644 (file)
@@ -9,11 +9,11 @@
 double calcAccuracy(std::string line, cv::Rect bb_rect, cv::Rect &groundtruth_rect)
 {
     std::vector<float> numbers;
-    std::istringstream s( line );
+    std::istringstream s(line);
     float x;
     char ch;
 
-    while (s >> x){
+    while (s >> x) {
         numbers.push_back(x);
         s >> ch;
     }
@@ -22,38 +22,34 @@ double calcAccuracy(std::string line, cv::Rect bb_rect, cv::Rect &groundtruth_re
     double y1 = std::min(numbers[1], std::min(numbers[3], std::min(numbers[5], numbers[7])));
     double y2 = std::max(numbers[1], std::max(numbers[3], std::max(numbers[5], numbers[7])));
 
-    groundtruth_rect = cv::Rect(x1, y1, x2-x1, y2-y1);
+    groundtruth_rect = cv::Rect(x1, y1, x2 - x1, y2 - y1);
 
     double rects_intersection = (groundtruth_rect & bb_rect).area();
     double rects_union = (groundtruth_rect | bb_rect).area();
-    double accuracy = rects_intersection/rects_union;
+    double accuracy = rects_intersection / rects_union;
 
     return accuracy;
 }
 
 int main(int argc, char *argv[])
 {
-    //load region, images and prepare for output
+    // load region, images and prepare for output
     std::string region, images, output;
     int visualize_delay = -1, fit_size_x = -1, fit_size_y = -1;
     KCF_Tracker tracker;
 
     while (1) {
         int option_index = 0;
-        static struct option long_options[] = {
-            {"debug",     no_argument,       0,  'd' },
-            {"visualDebug",     no_argument,       0,  'p' },
-            {"help",      no_argument,       0,  'h' },
-            {"output",    required_argument, 0,  'o' },
-            {"visualize", optional_argument, 0,  'v' },
-            {"fit",       optional_argument, 0,  'f' },
-            {0,           0,                 0,  0 }
-        };
-
-        int c = getopt_long(argc, argv, "dphv::f::o:",
-                        long_options, &option_index);
-        if (c == -1)
-            break;
+        static struct option long_options[] = {{"debug", no_argument, 0, 'd'},
+                                               {"visualDebug", no_argument, 0, 'p'},
+                                               {"help", no_argument, 0, 'h'},
+                                               {"output", required_argument, 0, 'o'},
+                                               {"visualize", optional_argument, 0, 'v'},
+                                               {"fit", optional_argument, 0, 'f'},
+                                               {0, 0, 0, 0}};
+
+        int c = getopt_long(argc, argv, "dphv::f::o:", long_options, &option_index);
+        if (c == -1) break;
 
         switch (c) {
         case 'd':
@@ -64,16 +60,18 @@ int main(int argc, char *argv[])
             visualize_delay = 500;
             break;
         case 'h':
-            std::cerr << "Usage: \n"
-                      << argv[0] << " [options]\n"
-                      << argv[0] << " [options] <directory>\n"
-                      << argv[0] << " [options] <path/to/region.txt or groundtruth.txt> <path/to/images.txt> [path/to/output.txt]\n"
-                      << "Options:\n"
-                      << " --visualize | -v[delay_ms]\n"
-                      << " --output    | -o <output.txt>\n"
-                      << " --debug     | -d\n"
-                      << " --visualDebug | -p\n"
-                      << " --fit       | -f[WxH]\n";
+            std::cerr
+                << "Usage: \n"
+                << argv[0] << " [options]\n"
+                << argv[0] << " [options] <directory>\n"
+                << argv[0]
+                << " [options] <path/to/region.txt or groundtruth.txt> <path/to/images.txt> [path/to/output.txt]\n"
+                << "Options:\n"
+                << " --visualize | -v[delay_ms]\n"
+                << " --output    | -o <output.txt>\n"
+                << " --debug     | -d\n"
+                << " --visualDebug | -p\n"
+                << " --fit       | -f[WxH]\n";
             exit(0);
             break;
         case 'o':
@@ -90,7 +88,7 @@ int main(int argc, char *argv[])
             sizes.erase(0, pos + delimiter.length());
 
             fit_size_x = stol(first_argument);
-           fit_size_y = stol(sizes);
+            fit_size_y = stol(sizes);
             break;
         }
     }
@@ -105,8 +103,7 @@ int main(int argc, char *argv[])
     case 0:
         region = access("groundtruth.txt", F_OK) == 0 ? "groundtruth.txt" : "region.txt";
         images = "images.txt";
-        if (output.empty())
-            output = "output.txt";
+        if (output.empty()) output = "output.txt";
         break;
     case 2:
         // Fall through
@@ -136,7 +133,7 @@ int main(int argc, char *argv[])
 
     cv::Mat image;
 
-    //img = firts frame, initPos = initial position in the first frame
+    // img = firts frame, initPos = initial position in the first frame
     cv::Rect init_rect = vot_io.getInitRectangle();
     vot_io.outputBoundingBox(init_rect);
     vot_io.getNextImage(image);
@@ -147,16 +144,16 @@ int main(int argc, char *argv[])
     cv::Rect bb_rect;
     double avg_time = 0., sum_accuracy = 0.;
     int frames = 0;
-    while (vot_io.getNextImage(image) == 1){
+    while (vot_io.getNextImage(image) == 1) {
         double time_profile_counter = cv::getCPUTickCount();
         tracker.track(image);
         time_profile_counter = cv::getCPUTickCount() - time_profile_counter;
-         std::cout << "  -> speed : " <<  time_profile_counter/((double)cvGetTickFrequency()*1000) << "ms. per frame";
-        avg_time += time_profile_counter/((double)cvGetTickFrequency()*1000);
+        std::cout << "  -> speed : " << time_profile_counter / ((double)cvGetTickFrequency() * 1000) << "ms. per frame";
+        avg_time += time_profile_counter / ((double)cvGetTickFrequency() * 1000);
         frames++;
 
         bb = tracker.getBBox();
-        bb_rect = cv::Rect(bb.cx - bb.w/2., bb.cy - bb.h/2., bb.w, bb.h);
+        bb_rect = cv::Rect(bb.cx - bb.w / 2., bb.cy - bb.h / 2., bb.w, bb.h);
         vot_io.outputBoundingBox(bb_rect);
 
         if (groundtruth_stream.is_open()) {
@@ -165,8 +162,7 @@ int main(int argc, char *argv[])
 
             cv::Rect groundtruthRect;
             double accuracy = calcAccuracy(line, bb_rect, groundtruthRect);
-            if (visualize_delay >= 0)
-                cv::rectangle(image, groundtruthRect, CV_RGB(255, 0,0), 1);
+            if (visualize_delay >= 0) cv::rectangle(image, groundtruthRect, CV_RGB(255, 0, 0), 1);
             std::cout << ", accuracy: " << accuracy;
             sum_accuracy += accuracy;
         }
@@ -176,48 +172,50 @@ int main(int argc, char *argv[])
         if (visualize_delay >= 0) {
             cv::Point pt(bb.cx, bb.cy);
             cv::Size size(bb.w, bb.h);
-            cv::RotatedRect rotatedRectangle(pt,size, bb.a);
+            cv::RotatedRect rotatedRectangle(pt, size, bb.a);
 
             cv::Point2f vertices[4];
             rotatedRectangle.points(vertices);
 
             for (int i = 0; i < 4; i++)
-                cv::line(image, vertices[i], vertices[(i+1)%4], cv::Scalar(0,255,0), 2);
-//             cv::rectangle(image, cv::Rect(bb.cx - bb.w/2., bb.cy - bb.h/2., bb.w, bb.h), CV_RGB(0,255,0), 2);
-            std::string angle = std::to_string (bb.a);
-            angle.erase ( angle.find_last_not_of('0') + 1, std::string::npos );
-            angle.erase ( angle.find_last_not_of('.') + 1, std::string::npos );
-            cv::putText(image, "Frame: " + std::to_string(frames) + " " + angle + " angle", cv::Point(0, image.rows-1), cv::FONT_HERSHEY_SIMPLEX, 0.7, cv::Scalar(0,255,0),2,cv::LINE_AA);
+                cv::line(image, vertices[i], vertices[(i + 1) % 4], cv::Scalar(0, 255, 0), 2);
+            //             cv::rectangle(image, cv::Rect(bb.cx - bb.w/2., bb.cy - bb.h/2., bb.w, bb.h), CV_RGB(0,255,0),
+            //             2);
+            std::string angle = std::to_string(bb.a);
+            angle.erase(angle.find_last_not_of('0') + 1, std::string::npos);
+            angle.erase(angle.find_last_not_of('.') + 1, std::string::npos);
+            cv::putText(image, "Frame: " + std::to_string(frames) + " " + angle + " angle",
+                        cv::Point(0, image.rows - 1), cv::FONT_HERSHEY_SIMPLEX, 0.7, cv::Scalar(0, 255, 0), 2);
             cv::imshow("output", image);
             int ret = cv::waitKey(visualize_delay);
-            if (visualize_delay > 0 && ret != -1 && ret != 255)
-                break;
+            if (visualize_delay > 0 && ret != -1 && ret != 255) break;
         }
 
-//        std::stringstream s;
-//        std::string ss;
-//        int countTmp = frames;
-//        s << "imgs" << "/img" << (countTmp/10000);
-//        countTmp = countTmp%10000;
-//        s << (countTmp/1000);
-//        countTmp = countTmp%1000;
-//        s << (countTmp/100);
-//        countTmp = countTmp%100;
-//        s << (countTmp/10);
-//        countTmp = countTmp%10;
-//        s << (countTmp);
-//        s << ".jpg";
-//        s >> ss;
-//        //set image output parameters
-//        std::vector<int> compression_params;
-//        compression_params.push_back(CV_IMWRITE_JPEG_QUALITY);
-//        compression_params.push_back(90);
-//        cv::imwrite(ss.c_str(), image, compression_params);
+        //        std::stringstream s;
+        //        std::string ss;
+        //        int countTmp = frames;
+        //        s << "imgs" << "/img" << (countTmp/10000);
+        //        countTmp = countTmp%10000;
+        //        s << (countTmp/1000);
+        //        countTmp = countTmp%1000;
+        //        s << (countTmp/100);
+        //        countTmp = countTmp%100;
+        //        s << (countTmp/10);
+        //        countTmp = countTmp%10;
+        //        s << (countTmp);
+        //        s << ".jpg";
+        //        s >> ss;
+        //        //set image output parameters
+        //        std::vector<int> compression_params;
+        //        compression_params.push_back(CV_IMWRITE_JPEG_QUALITY);
+        //        compression_params.push_back(90);
+        //        cv::imwrite(ss.c_str(), image, compression_params);
     }
 
-    std::cout << "Average processing speed " << avg_time/frames <<  "ms. (" << 1./(avg_time/frames)*1000 << " fps)" << std::endl;
+    std::cout << "Average processing speed " << avg_time / frames << "ms. (" << 1. / (avg_time / frames) * 1000
+              << " fps)" << std::endl;
     if (groundtruth_stream.is_open()) {
-        std::cout << "Average accuracy: " << sum_accuracy/frames << std::endl;
+        std::cout << "Average accuracy: " << sum_accuracy / frames << std::endl;
         groundtruth_stream.close();
     }
 
index 27ca0a93fddd9666d6fc60918784a4a41941b99c..901f0dcae82048da05644889a0e8a338a58e1152 100644 (file)
@@ -91,9 +91,9 @@ set_target_properties(kcf PROPERTIES VERSION 1.0.0 SOVERSION 1)
 IF(FFT STREQUAL "fftw")
   target_link_libraries(kcf ${FFTW_LDFLAGS})
   IF(OPENMP)
-    target_link_libraries(kcf fftw3_omp)
+    target_link_libraries(kcf fftw3f_omp)
   ELSEIF(NOT ASYNC)
-    target_link_libraries(kcf fftw3_threads)
+    target_link_libraries(kcf fftw3f_threads)
   ENDIF()
 ENDIF() #FFTW
 
index 57ccee3f937dc20b71f46a60c0570b76a55139b5..4d3723b0609a0629124f501a8386b6f7dd09941c 100644 (file)
@@ -106,7 +106,8 @@ class ComplexMat {
     // text output
     friend std::ostream &operator<<(std::ostream &os, const ComplexMat &mat)
     {
-        float *data_cpu = reinterpret_cast<float*>(malloc(mat.rows * mat.cols * mat.n_channels * sizeof(cufftComplex)));
+        float *data_cpu =
+            reinterpret_cast<float *>(malloc(mat.rows * mat.cols * mat.n_channels * sizeof(cufftComplex)));
         CudaSafeCall(cudaMemcpy(data_cpu, mat.p_data, mat.rows * mat.cols * mat.n_channels * sizeof(cufftComplex),
                                 cudaMemcpyDeviceToHost));
         // for (int i = 0; i < mat.n_channels; ++i){
index ee1335ba710ec252aead19045b582155a742aaeb..a3108844ad22a70394a46e72dbdc35563bda4621 100644 (file)
@@ -6,13 +6,17 @@
 #include <omp.h>
 #endif
 
-#if !defined(ASYNC) && !defined(OPENMP) && !defined(CUFFTW)
-#define FFTW_PLAN_WITH_THREADS() fftw_plan_with_nthreads(4);
+#if (defined(BIG_BATCH) && !defined(CUFFTW)) || (!defined(ASYNC) && !defined(OPENMP) && !defined(CUFFTW))
+#define FFTW_PLAN_WITH_THREADS() fftwf_plan_with_nthreads(4);
+#define FFTW_INIT_THREAD() fftwf_init_threads();
+#define FFTW_CLEAN_THREADS() fftwf_cleanup_threads();
 #else
 #define FFTW_PLAN_WITH_THREADS()
+#define FFTW_INIT_THREAD()
+#define FFTW_CLEAN_THREADS()
 #endif
 
-Fftw::Fftw(){}
+Fftw::Fftw() {}
 
 void Fftw::init(unsigned width, unsigned height, unsigned num_of_feats, unsigned num_of_scales, bool big_batch_mode)
 {
@@ -22,20 +26,20 @@ void Fftw::init(unsigned width, unsigned height, unsigned num_of_feats, unsigned
     m_num_of_scales = num_of_scales;
     m_big_batch_mode = big_batch_mode;
 
-#if (!defined(ASYNC) && !defined(CUFFTW)) && defined(OPENMP)
-    fftw_init_threads();
-#endif // OPENMP
-
 #ifndef CUFFTW
     std::cout << "FFT: FFTW" << std::endl;
 #else
     std::cout << "FFT: cuFFTW" << std::endl;
 #endif
-    fftwf_cleanup();
+
+     FFTW_INIT_THREAD();
+
     // FFT forward one scale
     {
         cv::Mat in_f = cv::Mat::zeros(int(m_height), int(m_width), CV_32FC1);
         ComplexMat out_f(int(m_height), m_width / 2 + 1, 1);
+
+        FFTW_PLAN_WITH_THREADS();
         plan_f = fftwf_plan_dft_r2c_2d(int(m_height), int(m_width), reinterpret_cast<float *>(in_f.data),
                                        reinterpret_cast<fftwf_complex *>(out_f.get_p_data()), FFTW_PATIENT);
     }
@@ -245,4 +249,5 @@ Fftw::~Fftw()
         fftwf_destroy_plan(plan_fw_all_scales);
         fftwf_destroy_plan(plan_i_1ch_all_scales);
     }
+    FFTW_CLEAN_THREADS();
 }
index 8c08ef0a25cfed48c588b3ffc34a022d1dc35469..81a1798f87310ebab3cc3a1a1bbbff9975891d44 100644 (file)
@@ -19,7 +19,6 @@
 #include <omp.h>
 #endif // OPENMP
 
-<<<<<<< HEAD
 #define DEBUG_PRINT(obj)                                                                                               \
     if (m_debug) {                                                                                                     \
         std::cout << #obj << " @" << __LINE__ << std::endl << (obj) << std::endl;                                      \
         std::cout << #obj << " @" << __LINE__ << " " << (obj).size() << " CH: " << (obj).channels() << std::endl       \
                   << (obj) << std::endl;                                                                               \
     }
-=======
-#define DEBUG_PRINT(obj) if (m_debug || m_visual_debug) {std::cout << #obj << " @" << __LINE__ << std::endl << (obj) << std::endl;}
-#define DEBUG_PRINTM(obj) if (m_debug) {std::cout << #obj << " @" << __LINE__ << " " << (obj).size() << " CH: " << (obj).channels() << std::endl << (obj) << std::endl;}
->>>>>>> Addded visual debug mode and also modified the rotation tracking implementation.
 
 KCF_Tracker::KCF_Tracker(double padding, double kernel_sigma, double lambda, double interp_factor,
                          double output_sigma_factor, int cell_size)
@@ -132,24 +127,19 @@ void KCF_Tracker::init(cv::Mat &img, const cv::Rect &bbox, int fit_size_x, int f
     p_windows_size.width = int(round(p_pose.w * (1. + p_padding) / p_cell_size) * p_cell_size);
     p_windows_size.height = int(round(p_pose.h * (1. + p_padding) / p_cell_size) * p_cell_size);
 
-    p_num_of_feats = 31;
-    if (m_use_color) p_num_of_feats += 3;
-    if (m_use_cnfeat) p_num_of_feats += 10;
-    p_roi_width = p_windows_size.width / p_cell_size;
-    p_roi_height = p_windows_size.height / p_cell_size;
-
     p_scales.clear();
     if (m_use_scale)
         for (int i = -p_num_scales / 2; i <= p_num_scales / 2; ++i)
             p_scales.push_back(std::pow(p_scale_step, i));
     else
         p_scales.push_back(1.);
-    
-     if (m_use_angle)
-        for (int i = p_angle_min; i <=p_angle_max ; i += p_angle_step)
+
+    if (m_use_angle) {
+        for (int i = p_angle_min; i <= p_angle_max; i += p_angle_step)
             p_angles.push_back(i);
-    else
+    } else {
         p_angles.push_back(0);
+    }
 
 #ifdef CUFFT
     if (p_windows_size.height / p_cell_size * (p_windows_size.width / p_cell_size / 2 + 1) > 1024) {
@@ -165,7 +155,12 @@ void KCF_Tracker::init(cv::Mat &img, const cv::Rect &bbox, int fit_size_x, int f
         std::cerr << "cuFFT supports only Gaussian kernel." << std::endl;
         std::exit(EXIT_FAILURE);
     }
+
+    p_roi_width = p_windows_size.width / p_cell_size;
+    p_roi_height = p_windows_size.height / p_cell_size;
+
     CudaSafeCall(cudaSetDeviceFlags(cudaDeviceMapHost));
+
     p_rot_labels_data = DynMem(
         ((uint(p_windows_size.width) / p_cell_size) * (uint(p_windows_size.height) / p_cell_size)) * sizeof(float));
     p_rot_labels = cv::Mat(p_windows_size.height / int(p_cell_size), p_windows_size.width / int(p_cell_size), CV_32FC1,
@@ -192,9 +187,9 @@ void KCF_Tracker::init(cv::Mat &img, const cv::Rect &bbox, int fit_size_x, int f
     for (int i = 0; i < max; ++i) {
         if (m_use_big_batch && i == 1) {
             p_threadctxs.emplace_back(
-                new ThreadCtx(p_windows_size, p_cell_size, p_num_of_feats * p_num_scales, p_num_scales));
+                new ThreadCtx(p_windows_size, p_cell_size, p_num_of_feats * p_scales.size() * p_angles.size() , p_scales.size(), p_angles.size()));
         } else {
-            p_threadctxs.emplace_back(new ThreadCtx(p_windows_size, p_cell_size, p_num_of_feats, 1));
+            p_threadctxs.emplace_back(new ThreadCtx(p_windows_size, p_cell_size, p_num_of_feats));
         }
     }
 
@@ -214,22 +209,35 @@ void KCF_Tracker::init(cv::Mat &img, const cv::Rect &bbox, int fit_size_x, int f
     p_output_sigma = std::sqrt(p_pose.w * p_pose.h) * p_output_sigma_factor / static_cast<double>(p_cell_size);
 
     fft.init(uint(p_windows_size.width / p_cell_size), uint(p_windows_size.height / p_cell_size), uint(p_num_of_feats),
-             uint(p_num_scales), m_use_big_batch);
+             uint(p_scales.size() * p_angles.size()), m_use_big_batch);
     fft.set_window(cosine_window_function(p_windows_size.width / p_cell_size, p_windows_size.height / p_cell_size));
 
     // window weights, i.e. labels
     fft.forward(
-        gaussian_shaped_labels(p_output_sigma, p_windows_size.width / p_cell_size, p_windows_size.height / p_cell_size), p_yf,
-        m_use_cuda ? p_rot_labels_data.deviceMem() : nullptr, p_threadctxs.front()->stream);
+        gaussian_shaped_labels(p_output_sigma, p_windows_size.width / p_cell_size, p_windows_size.height / p_cell_size),
+        p_yf, m_use_cuda ? p_rot_labels_data.deviceMem() : nullptr, p_threadctxs.front()->stream);
     DEBUG_PRINTM(p_yf);
 
     // obtain a sub-window for training initial model
     p_threadctxs.front()->patch_feats.clear();
-    get_features(input_rgb, input_gray, int(p_pose.cx), int(p_pose.cy), p_windows_size.width, p_windows_size.height,
-                 *p_threadctxs.front());
+
+    int size_x_scaled = floor(p_windows_size.width);
+    int size_y_scaled = floor(p_windows_size.height);
+
+    cv::Mat patch_gray = get_subwindow(input_gray, this->p_pose.cx, this->p_pose.cy, size_x_scaled, size_y_scaled);
+    geometric_transformations(patch_gray, p_windows_size.width, p_windows_size.height, 1, 0, false);
+
+    cv::Mat patch_rgb = cv::Mat::zeros(size_y_scaled, size_x_scaled, CV_32F);
+    if ((m_use_color || m_use_cnfeat) && input_rgb.channels() == 3) {
+        patch_rgb = get_subwindow(input_rgb, this->p_pose.cx, this->p_pose.cy, size_x_scaled, size_y_scaled);
+        geometric_transformations(patch_rgb, p_windows_size.width, p_windows_size.height, 1, 0, false);
+    }
+
+    get_features(patch_rgb, patch_gray, *p_threadctxs.front());
     fft.forward_window(p_threadctxs.front()->patch_feats, p_model_xf, p_threadctxs.front()->fw_all,
                        m_use_cuda ? p_threadctxs.front()->data_features.deviceMem() : nullptr, p_threadctxs.front()->stream);
     DEBUG_PRINTM(p_model_xf);
+
 #if !defined(BIG_BATCH) && defined(CUFFT) && (defined(ASYNC) || defined(OPENMP))
     p_threadctxs.front()->model_xf = p_model_xf;
     p_threadctxs.front()->model_xf.set_stream(p_threadctxs.front()->stream);
@@ -336,7 +344,8 @@ void KCF_Tracker::track(cv::Mat &img)
     }
 
     double max_response = -1.;
-    int scale_index = 0;
+    uint scale_index = 0;
+    uint angle_index = 0;
     cv::Point2i *max_response_pt = nullptr;
     cv::Mat *max_response_map = nullptr;
 
@@ -355,39 +364,69 @@ void KCF_Tracker::track(cv::Mat &img)
                 max_response = (*it)->max_response;
                 max_response_pt = &(*it)->max_loc;
                 max_response_map = &(*it)->response;
-                scale_index = int(index);
+                scale_index = index;
             }
         }
     } else {
         uint start = m_use_big_batch ? 1 : 0;
-        uint end = m_use_big_batch ? 2 : uint(p_num_scales);
+        uint end1 = m_use_big_batch ? 2 : uint(p_scales.size());
+        uint end2 = m_use_big_batch ? 1 : uint(p_angles.size());
         NORMAL_OMP_PARALLEL_FOR
-        for (uint i = start; i < end; ++i) {
+        for (uint i = start; i < end1; ++i) {
             auto it = p_threadctxs.begin();
             std::advance(it, i);
-            scale_track(*(*it), input_rgb, input_gray, this->p_scales[i]);
-
-            if (m_use_big_batch) {
-                for (size_t j = 0; j < p_scales.size(); ++j) {
-                    if ((*it)->max_responses[j] > max_response) {
-                        max_response = (*it)->max_responses[j];
-                        max_response_pt = &(*it)->max_locs[j];
-                        max_response_map = &(*it)->response_maps[j];
-                        scale_index = int(j);
+            for (size_t j = 0; j < end2; ++j) {
+                scale_track(*(*it), input_rgb, input_gray, this->p_scales[i], this->p_angles[j]);
+
+                if (m_use_big_batch) {
+                    for (uint x = 0; x < p_scales.size(); ++x) {
+                        for (uint k = 0; k < p_angles.size(); ++k) {
+                            if ((*it)->max_responses[x+k] > max_response) {
+                                max_response = (*it)->max_responses[x+k];
+                                max_response_pt = &(*it)->max_locs[x+k];
+                                max_response_map = &(*it)->response_maps[x+k];
+                                scale_index = x;
+                                angle_index = k;
+                            }
+                        }
                     }
-                }
-            } else {
-                NORMAL_OMP_CRITICAL
-                {
-                    if ((*it)->max_response > max_response) {
-                        max_response = (*it)->max_response;
-                        max_response_pt = &(*it)->max_loc;
-                        max_response_map = &(*it)->response;
-                        scale_index = int(i);
+                } else {
+                    NORMAL_OMP_CRITICAL
+                    {
+                        if ((*it)->max_response > max_response) {
+                            max_response = (*it)->max_response;
+                            max_response_pt = &(*it)->max_loc;
+                            max_response_map = &(*it)->response;
+                            scale_index = i;
+                            angle_index = j;
+                        }
                     }
                 }
             }
         }
+        if (m_visual_debug) {
+            cv::Mat all_responses(cv::Size(p_angles.size() * p_debug_image_size, p_scales.size() * p_debug_image_size),
+                                  p_debug_scale_responses[0].type(), cv::Scalar::all(0));
+            cv::Mat all_subwindows(cv::Size(p_angles.size() * p_debug_image_size, p_scales.size() * p_debug_image_size),
+                                   p_debug_subwindows[0].type(), cv::Scalar::all(0));
+            for (size_t i = 0; i < p_scales.size(); ++i) {
+                for (size_t j = 0; j < p_angles.size(); ++j) {
+                    cv::Mat in_roi(all_responses, cv::Rect(j * p_debug_image_size, i * p_debug_image_size,
+                                                           p_debug_image_size, p_debug_image_size));
+                    p_debug_scale_responses[5 * i + j].copyTo(in_roi);
+                    in_roi = all_subwindows(cv::Rect(j * p_debug_image_size, i * p_debug_image_size, p_debug_image_size,
+                                                     p_debug_image_size));
+                    p_debug_subwindows[5 * i + j].copyTo(in_roi);
+                }
+            }
+            cv::namedWindow("All subwindows", CV_WINDOW_AUTOSIZE);
+            cv::imshow("All subwindows", all_subwindows);
+            cv::namedWindow("All responses", CV_WINDOW_AUTOSIZE);
+            cv::imshow("All responses", all_responses);
+            cv::waitKey();
+            p_debug_scale_responses.clear();
+            p_debug_subwindows.clear();
+        }
     }
 
     DEBUG_PRINTM(*max_response_map);
@@ -410,10 +449,8 @@ void KCF_Tracker::track(cv::Mat &img)
     p_pose.cx += p_current_scale * p_cell_size * double(new_location.x);
     p_pose.cy += p_current_scale * p_cell_size * double(new_location.y);
 
-    if (m_visual_debug) {
+    if (m_visual_debug)
         std::cout << "New p_pose, cx: " << p_pose.cx << " cy: " << p_pose.cy << std::endl;
-        cv::waitKey();
-    }
 
     if (p_fit_to_pw2) {
         if (p_pose.cx < 0) p_pose.cx = 0;
@@ -436,14 +473,28 @@ void KCF_Tracker::track(cv::Mat &img)
     if (p_current_scale < p_min_max_scale[0]) p_current_scale = p_min_max_scale[0];
     if (p_current_scale > p_min_max_scale[1]) p_current_scale = p_min_max_scale[1];
 
-    // TODO Missing angle_index
-    //            int tmp_angle = p_current_angle + p_angles[angle_index];
-    //            p_current_angle = tmp_angle < 0 ? -std::abs(tmp_angle)%360 : tmp_angle%360;
+    p_current_angle = (p_current_angle + p_angles[angle_index]) < 0
+                          ? -std::abs(p_current_angle + p_angles[angle_index]) % 360
+                          : (p_current_angle + p_angles[angle_index]) % 360;
 
     // obtain a subwindow for training at newly estimated target position
     p_threadctxs.front()->patch_feats.clear();
-    get_features(input_rgb, input_gray, int(p_pose.cx), int(p_pose.cy), p_windows_size.width, p_windows_size.height,
-                 *p_threadctxs.front(), p_current_scale, p_current_angle);
+
+    int size_x_scaled = floor(p_windows_size.width * p_current_scale);
+    int size_y_scaled = floor(p_windows_size.height * p_current_scale);
+
+    cv::Mat patch_gray = get_subwindow(input_gray, this->p_pose.cx, this->p_pose.cy, size_x_scaled, size_y_scaled);
+    geometric_transformations(patch_gray, p_windows_size.width, p_windows_size.height, p_current_scale, p_current_angle,
+                              false);
+
+    cv::Mat patch_rgb = cv::Mat::zeros(size_y_scaled, size_x_scaled, CV_32F);
+    if ((m_use_color || m_use_cnfeat) && input_rgb.channels() == 3) {
+        patch_rgb = get_subwindow(input_rgb, this->p_pose.cx, this->p_pose.cy, size_x_scaled, size_y_scaled);
+        geometric_transformations(patch_rgb, p_windows_size.width, p_windows_size.height, p_current_scale,
+                                  p_current_angle, false);
+    }
+
+    get_features(patch_rgb, patch_gray, *p_threadctxs.front());
     fft.forward_window(p_threadctxs.front()->patch_feats, p_xf, p_threadctxs.front()->fw_all,
                        m_use_cuda ? p_threadctxs.front()->data_features.deviceMem() : nullptr,
                        p_threadctxs.front()->stream);
@@ -481,19 +532,46 @@ void KCF_Tracker::track(cv::Mat &img)
 #endif
 }
 
-void KCF_Tracker::scale_track(ThreadCtx &vars, cv::Mat &input_rgb, cv::Mat &input_gray, double scale)
+void KCF_Tracker::scale_track(ThreadCtx &vars, cv::Mat &input_rgb, cv::Mat &input_gray, double scale, int angle)
 {
     if (m_use_big_batch) {
         vars.patch_feats.clear();
+        std::cout << "WE ARE HERE BOIS" <<std::endl;
         BIG_BATCH_OMP_PARALLEL_FOR
-        for (uint i = 0; i < uint(p_num_scales); ++i) {
-            get_features(input_rgb, input_gray, int(this->p_pose.cx), int(this->p_pose.cy), this->p_windows_size.width,
-                         this->p_windows_size.height, vars, this->p_current_scale * this->p_scales[i]);
+        for (uint i = 0; i < this->p_scales.size(); ++i) {
+            for (uint j = 0; j < this->p_angles.size(); ++j) {
+                int size_x_scaled = floor(this->p_windows_size.width * this->p_current_scale * this->p_scales[i]);
+                int size_y_scaled = floor(this->p_windows_size.height * this->p_current_scale * this->p_scales[i]);
+
+                cv::Mat patch_gray =
+                    get_subwindow(input_gray, this->p_pose.cx, this->p_pose.cy, size_x_scaled, size_y_scaled);
+                geometric_transformations(patch_gray, p_windows_size.width, p_windows_size.height,
+                                          p_current_scale * this->p_scales[i], p_current_angle + this->p_angles[j]);
+
+                cv::Mat patch_rgb = cv::Mat::zeros(size_y_scaled, size_x_scaled, CV_32F);
+                if ((m_use_color || m_use_cnfeat) && input_rgb.channels() == 3) {
+                    patch_rgb =
+                        get_subwindow(input_rgb, this->p_pose.cx, this->p_pose.cy, size_x_scaled, size_y_scaled);
+                    geometric_transformations(patch_rgb, p_windows_size.width, p_windows_size.height,
+                                              p_current_scale * this->p_scales[i], p_current_angle + this->p_angles[j]);
+                }
+                get_features(patch_rgb, patch_gray, vars);
+            }
         }
     } else {
+        int size_x_scaled = floor(this->p_windows_size.width * this->p_current_scale * scale);
+        int size_y_scaled = floor(this->p_windows_size.height * this->p_current_scale * scale);
+
+        cv::Mat patch_gray = get_subwindow(input_gray, this->p_pose.cx, this->p_pose.cy, size_x_scaled, size_y_scaled);
+        geometric_transformations(patch_gray, p_windows_size.width, p_windows_size.height, p_current_scale * scale);
+
+        cv::Mat patch_rgb = cv::Mat::zeros(size_y_scaled, size_x_scaled, CV_32F);
+        if ((m_use_color || m_use_cnfeat) && input_rgb.channels() == 3) {
+            patch_rgb = get_subwindow(input_rgb, this->p_pose.cx, this->p_pose.cy, size_x_scaled, size_y_scaled);
+            geometric_transformations(patch_rgb, p_windows_size.width, p_windows_size.height, p_current_scale * scale, p_current_angle + angle);
+        }
         vars.patch_feats.clear();
-        get_features(input_rgb, input_gray, int(this->p_pose.cx), int(this->p_pose.cy), this->p_windows_size.width,
-                     this->p_windows_size.height, vars, this->p_current_scale *scale);
+        get_features(patch_rgb, patch_gray, vars);
     }
 
     fft.forward_window(vars.patch_feats, vars.zf, vars.fw_all, m_use_cuda ? vars.data_features.deviceMem() : nullptr,
@@ -550,27 +628,17 @@ void KCF_Tracker::scale_track(ThreadCtx &vars, cv::Mat &input_rgb, cv::Mat &inpu
 
 // ****************************************************************************
 
-void KCF_Tracker::get_features(cv::Mat &input_rgb, cv::Mat &input_gray, int cx, int cy, int size_x, int size_y,
-                               ThreadCtx &vars, double scale, int angle)
+void KCF_Tracker::get_features(cv::Mat & patch_rgb, cv::Mat & patch_gray, ThreadCtx &vars)
 {
-    int size_x_scaled = int(floor(size_x * scale));
-    int size_y_scaled = int(floor(size_y * scale));
-
-    cv::Mat patch_gray = get_subwindow(input_gray, cx, cy, size_x_scaled, size_y_scaled);
-    cv::Mat patch_rgb = get_subwindow(input_rgb, cx, cy, size_x_scaled, size_y_scaled);
-
-    geometric_transformations(patch_gray, scale, size_x, size_y, angle);
 
     // get hog(Histogram of Oriented Gradients) features
     FHoG::extract(patch_gray, vars, 2, p_cell_size, 9);
 
     // get color rgb features (simple r,g,b channels)
     std::vector<cv::Mat> color_feat;
-    if ((m_use_color || m_use_cnfeat) && input_rgb.channels() == 3)
-        geometric_transformations(patch_rgb, scale, size_x, size_y, angle);
 
-    if (m_use_color && input_rgb.channels() == 3) {
-        // use rgb color space
+    if (m_use_color && patch_rgb.channels() == 3) {
+        //use rgb color space
         cv::Mat patch_rgb_norm;
         patch_rgb.convertTo(patch_rgb_norm, CV_32F, 1. / 255., -0.5);
         cv::Mat ch1(patch_rgb_norm.size(), CV_32FC1);
@@ -581,7 +649,7 @@ void KCF_Tracker::get_features(cv::Mat &input_rgb, cv::Mat &input_gray, int cx,
         color_feat.insert(color_feat.end(), rgb.begin(), rgb.end());
     }
 
-    if (m_use_cnfeat && input_rgb.channels() == 3) {
+    if (m_use_cnfeat && patch_rgb.channels() == 3) {
         std::vector<cv::Mat> cn_feat = CNFeat::extract(patch_rgb);
         color_feat.insert(color_feat.end(), cn_feat.begin(), cn_feat.end());
     }
@@ -703,7 +771,7 @@ cv::Mat KCF_Tracker::cosine_window_function(int dim1, int dim2)
 // Returns sub-window of image input centered at [cx, cy] coordinates),
 // with size [width, height]. If any pixels are outside of the image,
 // they will replicate the values at the borders.
-cv::Mat KCF_Tracker::get_subwindow(const cv::Mat &input, int cx, int cy, int width, int height/*, int angle*/)
+cv::Mat KCF_Tracker::get_subwindow(const cv::Mat &input, int cx, int cy, int width, int height)
 {
     cv::Mat patch;
 
@@ -742,37 +810,10 @@ cv::Mat KCF_Tracker::get_subwindow(const cv::Mat &input, int cx, int cy, int wid
     } else
         y2 += height % 2;
 
-    //     cv::Point2f center(x1+width/2, y1+height/2);
-    //     cv::Mat r = getRotationMatrix2D(center, angle, 1.0);
-    //
-    //     cv::Mat input_clone = input.clone();
-    //
-    //     cv::warpAffine(input_clone, input_clone, r, cv::Size(input_clone.cols, input_clone.rows), cv::INTER_LINEAR,
-    //     cv::BORDER_CONSTANT);
-    cv::Mat input_clone;
-    if (m_visual_debug) {
-        input_clone = input.clone();
-        cv::rectangle(input_clone, cv::Point(x1, y1), cv::Point(x2, y2), cv::Scalar(0, 255, 0));
-        cv::line(input_clone, cv::Point(0, (input_clone.rows - 1) / 2),
-                 cv::Point(input_clone.cols - 1, (input_clone.rows - 1) / 2), cv::Scalar(0, 0, 255));
-        cv::line(input_clone, cv::Point((input_clone.cols - 1) / 2, 0),
-                 cv::Point((input_clone.cols - 1) / 2, input_clone.rows - 1), cv::Scalar(0, 0, 255));
-
-        cv::imshow("Patch before copyMakeBorder", input_clone);
-    }
-
     if (x2 - x1 == 0 || y2 - y1 == 0)
         patch = cv::Mat::zeros(height, width, CV_32FC1);
-    else {
-        cv::copyMakeBorder(input(cv::Range(y1, y2), cv::Range(x1, x2)), patch, top, bottom, left, right,
-                           cv::BORDER_REPLICATE);
-        if (m_visual_debug) {
-            cv::Mat patch_dummy;
-            cv::copyMakeBorder(input_clone(cv::Range(y1, y2), cv::Range(x1, x2)), patch_dummy, top, bottom, left, right,
-                               cv::BORDER_REPLICATE);
-            cv::imshow("Patch after copyMakeBorder", patch_dummy);
-        }
-    }
+    else
+        cv::copyMakeBorder(input(cv::Range(y1, y2), cv::Range(x1, x2)), patch, top, bottom, left, right, cv::BORDER_REPLICATE);
 
     // sanity check
     assert(patch.cols == width && patch.rows == height);
@@ -780,22 +821,16 @@ cv::Mat KCF_Tracker::get_subwindow(const cv::Mat &input, int cx, int cy, int wid
     return patch;
 }
 
-void KCF_Tracker::geometric_transformations(cv::Mat &patch, double scale, int size_x, int size_y, int angle)
+void KCF_Tracker::geometric_transformations(cv::Mat &patch, int size_x, int size_y, double scale, int angle,
+                                            bool search)
 {
     if (m_use_angle) {
         cv::Point2f center((patch.cols - 1) / 2., (patch.rows - 1) / 2.);
         cv::Mat r = cv::getRotationMatrix2D(center, angle, 1.0);
 
         cv::warpAffine(patch, patch, r, cv::Size(patch.cols, patch.rows), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
-
-        if (m_visual_debug) {
-            cv::Mat patch_copy = patch.clone();
-            cv::namedWindow("Patch RGB copy", CV_WINDOW_AUTOSIZE);
-            cv::putText(patch_copy, std::to_string(angle), cv::Point(0, patch_copy.rows - 1),
-                        cv::FONT_HERSHEY_COMPLEX_SMALL, 1, cv::Scalar(0, 255, 0), 2, cv::LINE_AA);
-            cv::imshow("Rotated patch", patch_copy);
-        }
     }
+
     // resize to default size
     if (patch.channels() != 3) {
         if (scale > 1.) {
@@ -811,11 +846,31 @@ void KCF_Tracker::geometric_transformations(cv::Mat &patch, double scale, int si
         } else {
             cv::resize(patch, patch, cv::Size(size_x / p_cell_size, size_y / p_cell_size), 0., 0., cv::INTER_LINEAR);
         }
+        if (m_visual_debug && search) {
+            cv::Mat input_clone = patch.clone();
+            cv::resize(input_clone, input_clone, cv::Size(p_debug_image_size, p_debug_image_size), 0., 0.,
+                       cv::INTER_LINEAR);
+
+            std::string angle_string = std::to_string(p_current_angle + angle);
+            if (p_count % 5 == 0) {
+                std::string scale_string = std::to_string(scale);
+                scale_string.erase(scale_string.find_last_not_of('0') + 1, std::string::npos);
+                scale_string.erase(scale_string.find_last_not_of('.') + 1, std::string::npos);
+                cv::putText(input_clone, scale_string, cv::Point(0, 10), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.5,
+                            cv::Scalar(0, 255, 0), 1);
+            }
+
+            cv::putText(input_clone, angle_string, cv::Point(1, input_clone.rows - 5), cv::FONT_HERSHEY_COMPLEX_SMALL,
+                        0.5, cv::Scalar(0, 255, 0), 1);
+
+            p_debug_subwindows.push_back(input_clone);
+            p_count += 1;
+        }
     }
 }
 
-void KCF_Tracker::gaussian_correlation(struct ThreadCtx &vars, const ComplexMat &xf, const ComplexMat &yf,
-                                       double sigma, bool auto_correlation)
+void KCF_Tracker::gaussian_correlation(struct ThreadCtx &vars, const ComplexMat &xf, const ComplexMat &yf, double sigma,
+                                       bool auto_correlation)
 {
 #ifdef CUFFT
     xf.sqr_norm(vars.xf_sqr_norm.deviceMem());
index 7f4c559cbb3b865597f946c8705b9d4770802ccc..9af1364af76a6fbb6ba40df31e7f0758177060aa 100644 (file)
--- a/src/kcf.h
+++ b/src/kcf.h
 #include "threadctx.hpp"
 #include "pragmas.h"
 
-struct BBox_c
-{
+struct BBox_c {
     double cx, cy, w, h, a;
 
     inline void scale(double factor)
     {
         cx *= factor;
         cy *= factor;
-        w  *= factor;
-        h  *= factor;
+        w *= factor;
+        h *= factor;
     }
 
     inline void scale_x(double factor)
     {
         cx *= factor;
-        w  *= factor;
+        w *= factor;
     }
 
     inline void scale_y(double factor)
     {
         cy *= factor;
-        h  *= factor;
-    }
-
-    inline cv::Rect get_rect()
-    {
-        return cv::Rect(int(cx-w/2.), int(cy-h/2.), int(w), int(h));
+        h *= factor;
     }
 
+    inline cv::Rect get_rect() { return cv::Rect(int(cx - w / 2.), int(cy - h / 2.), int(w), int(h)); }
 };
 
-class KCF_Tracker
-{
-public:
-    bool m_debug {false};
-       bool m_visual_debug {false};
-    bool m_use_scale {false};
-    bool m_use_angle {true}; //Works only when m_use_scale is off and m_use_subpixel_localization too and used on RotatingBox dataset.
-    bool m_use_color {true};
+class KCF_Tracker {
+  public:
+    bool m_debug{false};
+    bool m_visual_debug{false};
+    bool m_use_scale{true};
+    bool m_use_angle{true}; //Doesn't work with FFTW-BIG version
+    bool m_use_color{true};
 #ifdef ASYNC
-    bool m_use_multithreading {true};
+    bool m_use_multithreading{true};
 #else
-    bool m_use_multithreading {false};
-#endif //ASYNC
-    bool m_use_subpixel_localization {true};
-    bool m_use_subgrid_scale {true};
-    bool m_use_cnfeat {true};
-    bool m_use_linearkernel {false};
+    bool m_use_multithreading{false};
+#endif // ASYNC
+    bool m_use_subpixel_localization{true};
+    bool m_use_subgrid_scale{true};
+    bool m_use_cnfeat{true};
+    bool m_use_linearkernel{false};
 #ifdef BIG_BATCH
-    bool m_use_big_batch {true};
+    bool m_use_big_batch{true};
 #else
-    bool m_use_big_batch {false};
+    bool m_use_big_batch{false};
 #endif
 #ifdef CUFFT
-    bool m_use_cuda {true};
+    bool m_use_cuda{true};
 #else
-    bool m_use_cuda {false};
+    bool m_use_cuda{false};
 #endif
 
     /*
@@ -87,20 +81,21 @@ public:
     output_sigma_factor ... spatial bandwidth (proportional to target)  (0.1)
     cell_size           ... hog cell size                               (4)
     */
-    KCF_Tracker(double padding, double kernel_sigma, double lambda, double interp_factor, double output_sigma_factor, int cell_size);
+    KCF_Tracker(double padding, double kernel_sigma, double lambda, double interp_factor, double output_sigma_factor,
+                int cell_size);
     KCF_Tracker();
     ~KCF_Tracker();
 
     // Init/re-init methods
-    void init(cv::Mat & img, const cv::Rect & bbox, int fit_size_x, int fit_size_y);
-    void setTrackerPose(BBox_c & bbox, cv::Mat & img, int fit_size_x, int fit_size_y);
-    void updateTrackerPosition(BBox_c & bbox);
+    void init(cv::Mat &img, const cv::Rect &bbox, int fit_size_x, int fit_size_y);
+    void setTrackerPose(BBox_c &bbox, cv::Mat &img, int fit_size_x, int fit_size_y);
+    void updateTrackerPosition(BBox_c &bbox);
 
     // frame-to-frame object tracking
-    void track(cv::Mat & img);
+    void track(cv::Mat &img);
     BBox_c getBBox();
 
-private:
+  private:
     Fft &fft;
 
     BBox_c p_pose;
@@ -115,51 +110,60 @@ private:
     double p_padding = 1.5;
     double p_output_sigma_factor = 0.1;
     double p_output_sigma;
-    double p_kernel_sigma = 0.5;    //def = 0.5
-    double p_lambda = 1e-4;         //regularization in learning step
-    double p_interp_factor = 0.02;  //def = 0.02, linear interpolation factor for adaptation
-    int p_cell_size = 4;            //4 for hog (= bin_size)
+    double p_kernel_sigma = 0.5;   // def = 0.5
+    double p_lambda = 1e-4;        // regularization in learning step
+    double p_interp_factor = 0.02; // def = 0.02, linear interpolation factor for adaptation
+    int p_cell_size = 4;           // 4 for hog (= bin_size)
     cv::Size p_windows_size;
-    int p_num_scales {7};
+    int p_num_scales{7};
     double p_scale_step = 1.02;
     double p_current_scale = 1.;
     double p_min_max_scale[2];
     std::vector<double> p_scales;
-    int p_num_angles {5};
+    int p_num_angles{5};
     int p_current_angle = 0;
-    int p_angle_min  = -20, p_angle_max = 20;
+    int p_angle_min = -20, p_angle_max = 20;
     int p_angle_step = 10;
-    std::vector<double> p_angles;
+    std::vector<int> p_angles;
 
-    //for big batch
-    int p_num_of_feats;
+    // for visual debug
+    int p_debug_image_size = 100;
+    int p_count = 0;
+    std::vector<cv::Mat> p_debug_scale_responses;
+    std::vector<cv::Mat> p_debug_subwindows;
+
+    // for big batch
+    int p_num_of_feats = 31 + (m_use_color ? 3 : 0) + (m_use_cnfeat ? 10 : 0);
+
+    // for CUDA
     int p_roi_height, p_roi_width;
 
     std::list<std::unique_ptr<ThreadCtx>> p_threadctxs;
 
-    //CUDA compability
+    // CUDA compability
     cv::Mat p_rot_labels;
     DynMem p_rot_labels_data;
 
-    //model
+    // model
     ComplexMat p_yf;
     ComplexMat p_model_alphaf;
     ComplexMat p_model_alphaf_num;
     ComplexMat p_model_alphaf_den;
     ComplexMat p_model_xf;
     ComplexMat p_xf;
-    //helping functions
-    void scale_track(ThreadCtx & vars, cv::Mat & input_rgb, cv::Mat & input_gray, double scale);
-    cv::Mat get_subwindow(const cv::Mat & input, int cx, int cy, int size_x, int size_y);
+    // helping functions
+    void scale_track(ThreadCtx &vars, cv::Mat &input_rgb, cv::Mat &input_gray, double scale, int angle = 0);
+    cv::Mat get_subwindow(const cv::Mat &input, int cx, int cy, int size_x, int size_y);
     cv::Mat gaussian_shaped_labels(double sigma, int dim1, int dim2);
-    void gaussian_correlation(struct ThreadCtx &vars, const ComplexMat & xf, const ComplexMat & yf, double sigma, bool auto_correlation = false);
-    cv::Mat circshift(const cv::Mat & patch, int x_rot, int y_rot);
+    void gaussian_correlation(struct ThreadCtx &vars, const ComplexMat &xf, const ComplexMat &yf, double sigma,
+                              bool auto_correlation = false);
+    cv::Mat circshift(const cv::Mat &patch, int x_rot, int y_rot);
     cv::Mat cosine_window_function(int dim1, int dim2);
-    void get_features(cv::Mat & input_rgb, cv::Mat & input_gray, int cx, int cy, int size_x, int size_y, ThreadCtx & vars, double scale = 1., int angle = 0);
-    void geometric_transformations(cv::Mat & patch,  double scale,int size_x, int size_y, int angle);
-    cv::Point2f sub_pixel_peak(cv::Point & max_loc, cv::Mat & response);
+    void get_features(cv::Mat &patch_rgb, cv::Mat &patch_gray, ThreadCtx &vars);
+    void geometric_transformations(cv::Mat &patch, int size_x, int size_y, double scale = 1, int angle = 0,
+                                   bool search = true);
+    cv::Point2f sub_pixel_peak(cv::Point &max_loc, cv::Mat &response);
     double sub_grid_scale(int index = -1);
-
 };
 
-#endif //KCF_HEADER_6565467831231
+#endif // KCF_HEADER_6565467831231
index 0e172c91460e376681c0007eedde5fd9cfa6717a..26ddef8e72d77c268b02eafe4c49c84c125d024f 100644 (file)
@@ -15,26 +15,26 @@ typedef int *cudaStream_t;
 
 struct ThreadCtx {
   public:
-    ThreadCtx(cv::Size windows_size, uint cell_size, uint num_of_feats, uint num_of_scales = 1)
+    ThreadCtx(cv::Size windows_size, uint cell_size, uint num_of_feats, uint num_of_scales = 1, uint num_of_angles = 1)
     {
-        this->xf_sqr_norm = DynMem(num_of_scales * sizeof(float));
+        this->xf_sqr_norm = DynMem(num_of_scales * num_of_angles * sizeof(float));
         this->yf_sqr_norm = DynMem(sizeof(float));
         this->patch_feats.reserve(uint(num_of_feats));
 
         uint cells_size =
             ((uint(windows_size.width) / cell_size) * (uint(windows_size.height) / cell_size)) * sizeof(float);
 
-#if  !defined(BIG_BATCH) && defined(CUFFT) && (defined(ASYNC) || defined(OPENMP))
+#if !defined(BIG_BATCH) && defined(CUFFT) && (defined(ASYNC) || defined(OPENMP))
         CudaSafeCall(cudaStreamCreate(&this->stream));
 #endif
 
 #if defined(CUFFT) || defined(FFTW)
-        this->gauss_corr_res = DynMem(cells_size * num_of_scales);
+        this->gauss_corr_res = DynMem(cells_size * num_of_scales * num_of_angles);
         this->data_features = DynMem(cells_size * num_of_feats);
 
         uint width_freq = (uint(windows_size.width) / cell_size) / 2 + 1;
 
-        this->in_all = cv::Mat(windows_size.height / int(cell_size) * int(num_of_scales),
+        this->in_all = cv::Mat(windows_size.height / int(cell_size) * int(num_of_scales) * int(num_of_angles),
                                windows_size.width / int(cell_size), CV_32F, this->gauss_corr_res.hostMem());
 
         this->fw_all = cv::Mat((windows_size.height / int(cell_size)) * int(num_of_feats),
@@ -46,30 +46,31 @@ struct ThreadCtx {
 #endif
 
         this->data_i_features = DynMem(cells_size * num_of_feats);
-        this->data_i_1ch = DynMem(cells_size * num_of_scales);
+        this->data_i_1ch = DynMem(cells_size * num_of_scales * num_of_angles);
 
         this->ifft2_res = cv::Mat(windows_size.height / int(cell_size), windows_size.width / int(cell_size),
                                   CV_32FC(int(num_of_feats)), this->data_i_features.hostMem());
 
         this->response = cv::Mat(windows_size.height / int(cell_size), windows_size.width / int(cell_size),
-                                 CV_32FC(int(num_of_scales)), this->data_i_1ch.hostMem());
+                                 CV_32FC(int(num_of_scales * num_of_angles)), this->data_i_1ch.hostMem());
 
         this->patch_feats.reserve(num_of_feats);
 
 #ifdef CUFFT
-        this->zf.create(uint(windows_size.height) / cell_size, width_freq, num_of_feats, num_of_scales, this->stream);
-        this->kzf.create(uint(windows_size.height) / cell_size, width_freq, num_of_scales, this->stream);
-        this->kf.create(uint(windows_size.height) / cell_size, width_freq, num_of_scales, this->stream);
+        this->zf.create(uint(windows_size.height) / cell_size, width_freq, num_of_feats, num_of_scales * num_of_angles,
+                        this->stream);
+        this->kzf.create(uint(windows_size.height) / cell_size, width_freq, num_of_scales * num_of_angles, this->stream);
+        this->kf.create(uint(windows_size.height) / cell_size, width_freq, num_of_scales * num_of_angles, this->stream);
 #else
-        this->zf.create(uint(windows_size.height) / cell_size, width_freq, num_of_feats, num_of_scales);
-        this->kzf.create(uint(windows_size.height) / cell_size, width_freq, num_of_scales);
-        this->kf.create(uint(windows_size.height) / cell_size, width_freq, num_of_scales);
+        this->zf.create(uint(windows_size.height) / cell_size, width_freq, num_of_feats, num_of_scales * num_of_angles);
+        this->kzf.create(uint(windows_size.height) / cell_size, width_freq, num_of_scales * num_of_angles);
+        this->kf.create(uint(windows_size.height) / cell_size, width_freq, num_of_scales * num_of_angles);
 #endif
 
         if (num_of_scales > 1) {
-            this->max_responses.reserve(uint(num_of_scales));
-            this->max_locs.reserve(uint(num_of_scales));
-            this->response_maps.reserve(uint(num_of_scales));
+            this->max_responses.reserve(uint(num_of_scales * num_of_angles));
+            this->max_locs.reserve(uint(num_of_scales * num_of_angles));
+            this->response_maps.reserve(uint(num_of_scales * num_of_angles));
         }
     }