DecodeImageM CvMat*
CvMat buf
int iscolor CV_LOAD_IMAGE_COLOR
+
+# cvaux stuff
+HOGDetectMultiScale CvSeq*
+ CvArr image
+ CvMemStorage storage
+ CvArr svm_classifier NULL
+ CvSize win_stride cvSize(0,0)
+ double hit_threshold 0
+ double scale 1.05
+ int group_threshold 2
+ CvSize padding cvSize(0,0)
+ CvSize win_size cvSize(64,128)
+ CvSize block_size cvSize(16,16)
+ CvSize block_stride cvSize(8,8)
+ CvSize cell_size cvSize(8,8)
+ int nbins 9
+ int gammaCorrection 1
return Py_BuildValue("ffNN", min_val, max_val, pminloc, pmaxloc);
}
-/*static PyObject *pycvGetMinMaxHistValue(PyObject *self, PyObject *args, PyObject *kw)
-{
- CvHistogram* hist;
- PyObject *pyobj_hist = NULL;
- float min_val;
- float max_val;
- int min_loc[CV_MAX_DIM];
- int max_loc[CV_MAX_DIM];
-
- if (!PyArg_ParseTuple(args, "O", &pyobj_hist))
- return NULL;
- if (!convert_to_CvHistogram(pyobj_hist, &hist, "hist")) return NULL;
- ERRWRAP(cvGetMinMaxHistValue(hist, &min_val, &max_val, min_loc, max_loc));
- int d = cvGetDims(hist->bins);
- PyObject *pminloc = PyTuple_New(d), *pmaxloc = PyTuple_New(d);
- for (int i = 0; i < d; i++) {
- PyTuple_SetItem(pminloc, i, PyInt_FromLong(min_loc[i]));
- PyTuple_SetItem(pmaxloc, i, PyInt_FromLong(max_loc[i]));
- }
- return Py_BuildValue("ffNN", min_val, max_val, pminloc, pmaxloc);
-}*/
-
-
+static CvSeq* cvHOGDetectMultiScale( const CvArr* image, CvMemStorage* storage,
+ const CvArr* svm_classifier=NULL, CvSize win_stride=cvSize(0,0),
+ double hit_threshold=0, double scale=1.05,
+ int group_threshold=2, CvSize padding=cvSize(0,0),
+ CvSize win_size=cvSize(64,128), CvSize block_size=cvSize(16,16),
+ CvSize block_stride=cvSize(8,8), CvSize cell_size=cvSize(8,8),
+ int nbins=9, int gammaCorrection=1 )
+{
+ cv::HOGDescriptor hog(win_size, block_size, block_stride, cell_size, nbins, 1, -1, cv::HOGDescriptor::L2Hys, 0.2, gammaCorrection!=0);
+ if(win_stride.width == 0 && win_stride.height == 0)
+ win_stride = block_stride;
+ cv::Mat img = cv::cvarrToMat(image);
+ std::vector<cv::Rect> found;
+ if(svm_classifier)
+ {
+ CvMat stub, *m = cvGetMat(svm_classifier, &stub);
+ int sz = m->cols*m->rows;
+ CV_Assert(CV_IS_MAT_CONT(m->type) && (m->cols == 1 || m->rows == 1) && CV_MAT_TYPE(m->type) == CV_32FC1);
+ std::vector<float> w(sz);
+ std::copy(m->data.fl, m->data.fl + sz, w.begin());
+ hog.setSVMDetector(w);
+ }
+ else
+ hog.setSVMDetector(cv::HOGDescriptor::getDefaultPeopleDetector());
+ hog.detectMultiScale(img, found, hit_threshold, win_stride, padding, scale, group_threshold);
+ CvSeq* seq = cvCreateSeq(cv::DataType<cv::Rect>::type, sizeof(CvSeq), sizeof(cv::Rect), storage);
+ if(found.size())
+ cvSeqPushMulti(seq, &found[0], (int)found.size());
+ return seq;
+}
static int zero = 0;
MY_DEFINE_EXAMPLE(facedetect facedetect.cpp)
MY_DEFINE_EXAMPLE(ffilldemo ffilldemo.c)
MY_DEFINE_EXAMPLE(fback fback.cpp)
+ MY_DEFINE_EXAMPLE(fback_c fback_c.c)
MY_DEFINE_EXAMPLE(find_obj find_obj.cpp)
MY_DEFINE_EXAMPLE(fitellipse fitellipse.cpp)
MY_DEFINE_EXAMPLE(houghlines houghlines.c)
--- /dev/null
+#undef _GLIBCXX_DEBUG
+
+#include "cv.h"
+#include "highgui.h"
+
+void drawOptFlowMap(const CvMat* flow, CvMat* cflowmap, int step,
+ double scale, CvScalar color)
+{
+ int x, y;
+ for( y = 0; y < cflowmap->rows; y += step)
+ for( x = 0; x < cflowmap->cols; x += step)
+ {
+ CvPoint2D32f fxy = CV_MAT_ELEM(*flow, CvPoint2D32f, y, x);
+ cvLine(cflowmap, cvPoint(x,y), cvPoint(cvRound(x+fxy.x), cvRound(y+fxy.y)),
+ color, 1, 8, 0);
+ cvCircle(cflowmap, cvPoint(x,y), 2, color, -1, 8, 0);
+ }
+}
+
+int main(int argc, char** argv)
+{
+ CvCapture* capture = cvCreateCameraCapture(0);
+
+ if( !capture )
+ return -1;
+
+ CvMat* prevgray = 0, *gray = 0, *flow = 0, *cflow = 0;
+ cvNamedWindow("flow", 1);
+
+ for(;;)
+ {
+ int firstFrame = gray == 0;
+ IplImage* frame = cvQueryFrame(capture);
+ if(!frame)
+ break;
+ if(!gray)
+ {
+ gray = cvCreateMat(frame->height, frame->width, CV_8UC1);
+ prevgray = cvCreateMat(gray->rows, gray->cols, gray->type);
+ flow = cvCreateMat(gray->rows, gray->cols, CV_32FC2);
+ cflow = cvCreateMat(gray->rows, gray->cols, CV_8UC3);
+ }
+ cvCvtColor(frame, gray, CV_BGR2GRAY);
+
+ if( !firstFrame )
+ {
+ cvCalcOpticalFlowFarneback(prevgray, gray, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
+ cvCvtColor(prevgray, cflow, CV_GRAY2BGR);
+ drawOptFlowMap(flow, cflow, 16, 1.5, CV_RGB(0, 255, 0));
+ cvShowImage("flow", cflow);
+ }
+ if(cvWaitKey(30)>=0)
+ break;
+ {
+ CvMat* temp;
+ CV_SWAP(prevgray, gray, temp);
+ }
+ }
+ cvReleaseCapture(&capture);
+ return 0;
+}
--- /dev/null
+import sys
+from cv import *
+
+def inside(r, q):
+ (rx, ry), (rw, rh) = r
+ (qx, qy), (qw, qh) = q
+ return rx > qx and ry > qy and rx + rw < qx + qw and ry + rh < qy + qh
+
+try:
+ img = LoadImage(sys.argv[1])
+except:
+ try:
+ f = open(sys.argv[1], "rt")
+ except:
+ print "cannot read " + sys.argv[1]
+ sys.exit(-1)
+ imglist = list(f.readlines())
+else:
+ imglist = [sys.argv[1]]
+
+NamedWindow("people detection demo", 1)
+storage = CreateMemStorage(0)
+
+for name in imglist:
+ n = name.strip()
+ print n
+ try:
+ img = LoadImage(n)
+ except:
+ continue
+
+ #ClearMemStorage(storage)
+ found = list(HOGDetectMultiScale(img, storage, win_stride=(8,8),
+ padding=(32,32), scale=1.05, group_threshold=2))
+ found_filtered = []
+ for r in found:
+ insidef = False
+ for q in found:
+ if inside(r, q):
+ insidef = True
+ break
+ if not insidef:
+ found_filtered.append(r)
+ for r in found_filtered:
+ (rx, ry), (rw, rh) = r
+ tl = (rx + int(rw*0.1), ry + int(rh*0.07))
+ br = (rx + int(rw*0.9), ry + int(rh*0.87))
+ Rectangle(img, tl, br, (0, 255, 0), 3)
+
+ ShowImage("people detection demo", img)
+ c = WaitKey(0)
+ if c == ord('q'):
+ break