const CvArr* tilted_sum, double scale );
/* runs the cascade on the specified window */
-CVAPI(int) cvRunHaarClassifierCascade( CvHaarClassifierCascade* cascade,
- CvPoint pt, int start_stage CV_DEFAULT(0));
+CVAPI(int) cvRunHaarClassifierCascade( const CvHaarClassifierCascade* cascade,
+ CvPoint pt, int start_stage CV_DEFAULT(0));
/****************************************************************************************\
* Camera Calibration, Pose Estimation and Stereo *
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of the copyright holders may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#ifndef _CV_HPP_
-#define _CV_HPP_
-
-#ifdef __cplusplus
-
-namespace cv
-{
-
-enum { BORDER_REPLICATE=IPL_BORDER_REPLICATE, BORDER_CONSTANT=IPL_BORDER_CONSTANT,
- BORDER_REFLECT=IPL_BORDER_REFLECT, BORDER_REFLECT_101=IPL_BORDER_REFLECT_101,
- BORDER_REFLECT101=BORDER_REFLECT_101, BORDER_WRAP=IPL_BORDER_WRAP,
- BORDER_TRANSPARENT, BORDER_DEFAULT=BORDER_REFLECT_101, BORDER_ISOLATED=16 };
-
-CV_EXPORTS int borderInterpolate( int p, int len, int borderType );
-
-struct CV_EXPORTS BaseRowFilter
-{
- BaseRowFilter();
- virtual ~BaseRowFilter();
- virtual void operator()(const uchar* src, uchar* dst,
- int width, int cn) = 0;
- int ksize, anchor;
-};
-
-
-struct CV_EXPORTS BaseColumnFilter
-{
- BaseColumnFilter();
- virtual ~BaseColumnFilter();
- virtual void operator()(const uchar** src, uchar* dst, int dststep,
- int dstcount, int width) = 0;
- virtual void reset();
- int ksize, anchor;
-};
-
-
-struct CV_EXPORTS BaseFilter
-{
- BaseFilter();
- virtual ~BaseFilter();
- virtual void operator()(const uchar** src, uchar* dst, int dststep,
- int dstcount, int width, int cn) = 0;
- virtual void reset();
- Size ksize;
- Point anchor;
-};
-
-
-struct CV_EXPORTS FilterEngine
-{
- FilterEngine();
- FilterEngine(const Ptr<BaseFilter>& _filter2D,
- const Ptr<BaseRowFilter>& _rowFilter,
- const Ptr<BaseColumnFilter>& _columnFilter,
- int srcType, int dstType, int bufType,
- int _rowBorderType=BORDER_REPLICATE,
- int _columnBorderType=-1,
- const Scalar& _borderValue=Scalar());
- virtual ~FilterEngine();
- void init(const Ptr<BaseFilter>& _filter2D,
- const Ptr<BaseRowFilter>& _rowFilter,
- const Ptr<BaseColumnFilter>& _columnFilter,
- int srcType, int dstType, int bufType,
- int _rowBorderType=BORDER_REPLICATE, int _columnBorderType=-1,
- const Scalar& _borderValue=Scalar());
- virtual int start(Size wholeSize, Rect roi, int maxBufRows=-1);
- virtual int start(const Mat& src, const Rect& srcRoi=Rect(0,0,-1,-1),
- bool isolated=false, int maxBufRows=-1);
- virtual int proceed(const uchar* src, int srcStep, int srcCount,
- uchar* dst, int dstStep);
- virtual void apply( const Mat& src, Mat& dst,
- const Rect& srcRoi=Rect(0,0,-1,-1),
- Point dstOfs=Point(0,0),
- bool isolated=false);
- bool isSeparable() const { return filter2D.obj == 0; }
- int remainingInputRows() const;
- int remainingOutputRows() const;
-
- int srcType, dstType, bufType;
- Size ksize;
- Point anchor;
- int maxWidth;
- Size wholeSize;
- Rect roi;
- int dx1, dx2;
- int rowBorderType, columnBorderType;
- Vector<int> borderTab;
- int borderElemSize;
- Vector<uchar> ringBuf;
- Vector<uchar> srcRow;
- Vector<uchar> constBorderValue;
- Vector<uchar> constBorderRow;
- int bufStep, startY, startY0, endY, rowCount, dstY;
- Vector<uchar*> rows;
-
- Ptr<BaseFilter> filter2D;
- Ptr<BaseRowFilter> rowFilter;
- Ptr<BaseColumnFilter> columnFilter;
-};
-
-enum { KERNEL_GENERAL=0, KERNEL_SYMMETRICAL=1, KERNEL_ASYMMETRICAL=2,
- KERNEL_SMOOTH=4, KERNEL_INTEGER=8 };
-
-CV_EXPORTS int getKernelType(const Mat& kernel, Point anchor);
-
-CV_EXPORTS Ptr<BaseRowFilter> getLinearRowFilter(int srcType, int bufType,
- const Mat& kernel, int anchor,
- int symmetryType);
-
-CV_EXPORTS Ptr<BaseColumnFilter> getLinearColumnFilter(int bufType, int dstType,
- const Mat& kernel, int anchor,
- int symmetryType, double delta=0,
- int bits=0);
-
-CV_EXPORTS Ptr<BaseFilter> getLinearFilter(int srcType, int dstType,
- const Mat& kernel,
- Point anchor=Point(-1,-1),
- double delta=0, int bits=0);
-
-CV_EXPORTS Ptr<FilterEngine> createSeparableLinearFilter(int srcType, int dstType,
- const Mat& rowKernel, const Mat& columnKernel,
- Point _anchor=Point(-1,-1), double delta=0,
- int _rowBorderType=BORDER_DEFAULT,
- int _columnBorderType=-1,
- const Scalar& _borderValue=Scalar());
-
-CV_EXPORTS Ptr<FilterEngine> createLinearFilter(int srcType, int dstType,
- const Mat& kernel, Point _anchor=Point(-1,-1),
- double delta=0, int _rowBorderType=BORDER_DEFAULT,
- int _columnBorderType=-1, const Scalar& _borderValue=Scalar());
-
-CV_EXPORTS Mat getGaussianKernel( int ksize, double sigma, int ktype=CV_64F );
-
-CV_EXPORTS Ptr<FilterEngine> createGaussianFilter( int type, Size ksize,
- double sigma1, double sigma2=0,
- int borderType=BORDER_DEFAULT);
-
-CV_EXPORTS void getDerivKernels( Mat& kx, Mat& ky, int dx, int dy, int ksize,
- bool normalize=false, int ktype=CV_32F );
-
-CV_EXPORTS Ptr<FilterEngine> createDerivFilter( int srcType, int dstType,
- int dx, int dy, int ksize,
- int borderType=BORDER_DEFAULT );
-
-CV_EXPORTS Ptr<BaseRowFilter> getRowSumFilter(int srcType, int sumType,
- int ksize, int anchor=-1);
-CV_EXPORTS Ptr<BaseColumnFilter> getColumnSumFilter(int sumType, int dstType,
- int ksize, int anchor=-1,
- double scale=1);
-CV_EXPORTS Ptr<FilterEngine> createBoxFilter( int srcType, int dstType, Size ksize,
- Point anchor=Point(-1,-1),
- bool normalize=true,
- int borderType=BORDER_DEFAULT);
-
-enum { MORPH_ERODE=0, MORPH_DILATE=1, MORPH_OPEN=2, MORPH_CLOSE=3,
- MORPH_GRADIENT=4, MORPH_TOPHAT=5, MORPH_BLACKHAT=6 };
-
-CV_EXPORTS Ptr<BaseRowFilter> getMorphologyRowFilter(int op, int type, int ksize, int anchor=-1);
-CV_EXPORTS Ptr<BaseColumnFilter> getMorphologyColumnFilter(int op, int type, int ksize, int anchor=-1);
-CV_EXPORTS Ptr<BaseFilter> getMorphologyFilter(int op, int type, const Mat& kernel,
- Point anchor=Point(-1,-1));
-
-static inline Scalar morphologyDefaultBorderValue() { return Scalar::all(DBL_MAX); }
-
-CV_EXPORTS Ptr<FilterEngine> createMorphologyFilter(int op, int type, const Mat& kernel,
- Point anchor=Point(-1,-1), int _rowBorderType=BORDER_CONSTANT,
- int _columnBorderType=-1,
- const Scalar& _borderValue=morphologyDefaultBorderValue());
-
-enum { MORPH_RECT=0, MORPH_CROSS=1, MORPH_ELLIPSE=2 };
-CV_EXPORTS Mat getStructuringElement(int shape, Size ksize, Point anchor=Point(-1,-1));
-
-CV_EXPORTS void copyMakeBorder( const Mat& src, Mat& dst,
- int top, int bottom, int left, int right,
- int borderType );
-
-CV_EXPORTS void medianBlur( const Mat& src, Mat& dst, int ksize );
-CV_EXPORTS void GaussianBlur( const Mat& src, Mat& dst, Size ksize,
- double sigma1, double sigma2=0,
- int borderType=BORDER_DEFAULT );
-CV_EXPORTS void bilateralFilter( const Mat& src, Mat& dst, int d,
- double sigmaColor, double sigmaSpace,
- int borderType=BORDER_DEFAULT );
-CV_EXPORTS void boxFilter( const Mat& src, Mat& dst, int ddepth,
- Size ksize, Point anchor=Point(-1,-1),
- bool normalize=true,
- int borderType=BORDER_DEFAULT );
-static inline void blur( const Mat& src, Mat& dst,
- Size ksize, Point anchor=Point(-1,-1),
- int borderType=BORDER_DEFAULT )
-{
- boxFilter( src, dst, -1, ksize, anchor, true, borderType );
-}
-
-CV_EXPORTS void filter2D( const Mat& src, Mat& dst, int ddepth,
- const Mat& kernel, Point anchor=Point(-1,-1),
- double delta=0, int borderType=BORDER_DEFAULT );
-
-CV_EXPORTS void sepFilter2D( const Mat& src, Mat& dst, int ddepth,
- const Mat& kernelX, const Mat& kernelY,
- Point anchor=Point(-1,-1),
- double delta=0, int borderType=BORDER_DEFAULT );
-
-CV_EXPORTS void Sobel( const Mat& src, Mat& dst, int ddepth,
- int dx, int dy, int ksize=3,
- double scale=1, double delta=0,
- int borderType=BORDER_DEFAULT );
-
-CV_EXPORTS void Scharr( const Mat& src, Mat& dst, int ddepth,
- int dx, int dy, double scale=1, double delta=0,
- int borderType=BORDER_DEFAULT );
-
-CV_EXPORTS void Laplacian( const Mat& src, Mat& dst, int ddepth,
- int ksize=1, double scale=1, double delta=0,
- int borderType=BORDER_DEFAULT );
-
-CV_EXPORTS void erode( const Mat& src, Mat& dst, const Mat& kernel,
- Point anchor=Point(-1,-1), int iterations=1,
- int borderType=BORDER_CONSTANT,
- const Scalar& borderValue=morphologyDefaultBorderValue() );
-CV_EXPORTS void dilate( const Mat& src, Mat& dst, const Mat& kernel,
- Point anchor=Point(-1,-1), int iterations=1,
- int borderType=BORDER_CONSTANT,
- const Scalar& borderValue=morphologyDefaultBorderValue() );
-CV_EXPORTS void morphologyEx( const Mat& src, Mat& dst, int op, const Mat& kernel,
- Point anchor=Point(-1,-1), int iterations=1,
- int borderType=BORDER_CONSTANT,
- const Scalar& borderValue=morphologyDefaultBorderValue() );
-
-enum { INTER_NEAREST=0, INTER_LINEAR=1, INTER_CUBIC=2, INTER_AREA=3,
- INTER_LANCZOS4=4, INTER_MAX=7, WARP_INVERSE_MAP=16 };
-
-CV_EXPORTS void resize( const Mat& src, Mat& dst,
- Size dsize=Size(), double fx=0, double fy=0,
- int interpolation=INTER_LINEAR );
-
-CV_EXPORTS void warpAffine( const Mat& src, Mat& dst,
- const Mat& M, Size dsize,
- int flags=INTER_LINEAR,
- int borderMode=BORDER_CONSTANT,
- const Scalar& borderValue=Scalar());
-CV_EXPORTS void warpPerspective( const Mat& src, Mat& dst,
- const Mat& M, Size dsize,
- int flags=INTER_LINEAR,
- int borderMode=BORDER_CONSTANT,
- const Scalar& borderValue=Scalar());
-
-CV_EXPORTS void remap( const Mat& src, Mat& dst, const Mat& map1, const Mat& map2,
- int interpolation, int borderMode=BORDER_CONSTANT,
- const Scalar& borderValue=Scalar());
-
-CV_EXPORTS void convertMaps( const Mat& map1, const Mat& map2, Mat& dstmap1, Mat& dstmap2,
- int dstmap1type, bool nninterpolation=false );
-
-CV_EXPORTS Mat getRotationMatrix2D( Point2f center, double angle, double scale );
-CV_EXPORTS Mat getPerspectiveTransform( const Point2f src[], const Point2f dst[] );
-CV_EXPORTS Mat getAffineTransform( const Point2f src[], const Point2f dst[] );
-
-CV_EXPORTS void integral( const Mat& src, Mat& sum, int sdepth=-1 );
-CV_EXPORTS void integral( const Mat& src, Mat& sum, Mat& sqsum, int sdepth=-1 );
-CV_EXPORTS void integral( const Mat& src, Mat& sum, Mat& sqsum, Mat& tilted, int sdepth=-1 );
-
-CV_EXPORTS void accumulate( const Mat& src, Mat& dst, const Mat& mask=Mat() );
-CV_EXPORTS void accumulateSquare( const Mat& src, Mat& dst, const Mat& mask=Mat() );
-CV_EXPORTS void accumulateProduct( const Mat& src1, const Mat& src2,
- Mat& dst, const Mat& mask=Mat() );
-CV_EXPORTS void accumulateWeighted( const Mat& src, Mat& dst,
- double alpha, const Mat& mask=Mat() );
-
-enum { THRESH_BINARY=0, THRESH_BINARY_INV=1, THRESH_TRUNC=2, THRESH_TOZERO=3,
- THRESH_TOZERO_INV=4, THRESH_MASK=7, THRESH_OTSU=8 };
-
-CV_EXPORTS double threshold( const Mat& src, Mat& dst, double thresh, double maxval, int type );
-
-enum { ADAPTIVE_THRESH_MEAN_C=0, ADAPTIVE_THRESH_GAUSSIAN_C=1 };
-
-CV_EXPORTS void adaptiveThreshold( const Mat& src, Mat& dst, double maxValue,
- int adaptiveMethod, int thresholdType,
- int blockSize, double C );
-
-CV_EXPORTS void pyrDown( const Mat& src, Mat& dst, const Size& dstsize=Size());
-CV_EXPORTS void pyrUp( const Mat& src, Mat& dst, const Size& dstsize=Size());
-CV_EXPORTS void buildPyramid( const Mat& src, Vector<Mat>& dst, int maxlevel );
-
-
-CV_EXPORTS void undistort( const Mat& src, Mat& dst, const Mat& cameraMatrix,
- const Mat& distCoeffs, const Mat& newCameraMatrix=Mat() );
-CV_EXPORTS void initUndistortRectifyMap( const Mat& cameraMatrix, const Mat& distCoeffs,
- const Mat& R, const Mat& newCameraMatrix,
- Size size, int m1type, Mat& map1, Mat& map2 );
-CV_EXPORTS Mat_<double> getDefaultNewCameraMatrix( const Mat_<double>& A, Size imgsize=Size(),
- bool centerPrincipalPoint=false );
-
-enum { OPTFLOW_USE_INITIAL_FLOW=4, OPTFLOW_FARNEBACK_GAUSSIAN=256 };
-
-CV_EXPORTS void calcOpticalFlowPyrLK( const Mat& prevImg, const Mat& nextImg,
- const Vector<Point2f>& prevPts,
- Vector<Point2f>& nextPts,
- Vector<bool>& status, Vector<float>& err,
- Size winSize=Size(15,15), int maxLevel=3,
- TermCriteria criteria=TermCriteria(
- TermCriteria::COUNT+TermCriteria::EPS,
- 30, 0.01),
- double derivLambda=0.5,
- int flags=0 );
-
-CV_EXPORTS void calcOpticalFlowFarneback( const Mat& prev0, const Mat& next0,
- Mat& flow0, double pyr_scale, int levels, int winsize,
- int iterations, int poly_n, double poly_sigma, int flags );
-
-
-CV_EXPORTS void calcHist( const Vector<Mat>& images, const Vector<int>& channels,
- const Mat& mask, MatND& hist, const Vector<int>& histSize,
- const Vector<Vector<float> >& ranges,
- bool uniform=true, bool accumulate=false );
-
-CV_EXPORTS void calcHist( const Vector<Mat>& images, const Vector<int>& channels,
- const Mat& mask, SparseMat& hist, const Vector<int>& histSize,
- const Vector<Vector<float> >& ranges,
- bool uniform=true, bool accumulate=false );
-
-CV_EXPORTS void calcBackProject( const Vector<Mat>& images, const Vector<int>& channels,
- const MatND& hist, Mat& backProject,
- const Vector<Vector<float> >& ranges,
- double scale=1, bool uniform=true );
-
-CV_EXPORTS void calcBackProject( const Vector<Mat>& images, const Vector<int>& channels,
- const SparseMat& hist, Mat& backProject,
- const Vector<Vector<float> >& ranges,
- double scale=1, bool uniform=true );
-
-CV_EXPORTS double compareHist( const MatND& H1, const MatND& H2, int method );
-
-CV_EXPORTS double compareHist( const SparseMat& H1, const SparseMat& H2, int method );
-
-}
-
-//////////////////////////////////////////////////////////////////////////////////////////
-
-struct CV_EXPORTS CvLevMarq
-{
- CvLevMarq();
- CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria=
- cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
- bool completeSymmFlag=false );
- ~CvLevMarq();
- void init( int nparams, int nerrs, CvTermCriteria criteria=
- cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
- bool completeSymmFlag=false );
- bool update( const CvMat*& param, CvMat*& J, CvMat*& err );
- bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm );
-
- void clear();
- void step();
- enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 };
-
- CvMat* mask;
- CvMat* prevParam;
- CvMat* param;
- CvMat* J;
- CvMat* err;
- CvMat* JtJ;
- CvMat* JtJN;
- CvMat* JtErr;
- CvMat* JtJV;
- CvMat* JtJW;
- double prevErrNorm, errNorm;
- int lambdaLg10;
- CvTermCriteria criteria;
- int state;
- int iters;
- bool completeSymmFlag;
-};
-
-
-// 2009-01-12, Xavier Delacour <xavier.delacour@gmail.com>
-
-struct lsh_hash {
- int h1, h2;
-};
-
-struct CvLSHOperations {
- virtual ~CvLSHOperations() {}
-
- virtual int vector_add(const void* data) = 0;
- virtual void vector_remove(int i) = 0;
- virtual const void* vector_lookup(int i) = 0;
- virtual void vector_reserve(int n) = 0;
- virtual unsigned int vector_count() = 0;
-
- virtual void hash_insert(lsh_hash h, int l, int i) = 0;
- virtual void hash_remove(lsh_hash h, int l, int i) = 0;
- virtual int hash_lookup(lsh_hash h, int l, int* ret_i, int ret_i_max) = 0;
-};
-
-
-#endif /* __cplusplus */
-
-#endif /* _CV_HPP_ */
-
-/* End of file. */
+/*M///////////////////////////////////////////////////////////////////////////////////////\r
+//\r
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
+//\r
+// By downloading, copying, installing or using the software you agree to this license.\r
+// If you do not agree to this license, do not download, install,\r
+// copy or use the software.\r
+//\r
+//\r
+// License Agreement\r
+// For Open Source Computer Vision Library\r
+//\r
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\r
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\r
+// Third party copyrights are property of their respective owners.\r
+//\r
+// Redistribution and use in source and binary forms, with or without modification,\r
+// are permitted provided that the following conditions are met:\r
+//\r
+// * Redistribution's of source code must retain the above copyright notice,\r
+// this list of conditions and the following disclaimer.\r
+//\r
+// * Redistribution's in binary form must reproduce the above copyright notice,\r
+// this list of conditions and the following disclaimer in the documentation\r
+// and/or other materials provided with the distribution.\r
+//\r
+// * The name of the copyright holders may not be used to endorse or promote products\r
+// derived from this software without specific prior written permission.\r
+//\r
+// This software is provided by the copyright holders and contributors "as is" and\r
+// any express or implied warranties, including, but not limited to, the implied\r
+// warranties of merchantability and fitness for a particular purpose are disclaimed.\r
+// In no event shall the Intel Corporation or contributors be liable for any direct,\r
+// indirect, incidental, special, exemplary, or consequential damages\r
+// (including, but not limited to, procurement of substitute goods or services;\r
+// loss of use, data, or profits; or business interruption) however caused\r
+// and on any theory of liability, whether in contract, strict liability,\r
+// or tort (including negligence or otherwise) arising in any way out of\r
+// the use of this software, even if advised of the possibility of such damage.\r
+//\r
+//M*/\r
+\r
+#ifndef _CV_HPP_\r
+#define _CV_HPP_\r
+\r
+#ifdef __cplusplus\r
+\r
+namespace cv\r
+{\r
+\r
+enum { BORDER_REPLICATE=IPL_BORDER_REPLICATE, BORDER_CONSTANT=IPL_BORDER_CONSTANT,\r
+ BORDER_REFLECT=IPL_BORDER_REFLECT, BORDER_REFLECT_101=IPL_BORDER_REFLECT_101,\r
+ BORDER_REFLECT101=BORDER_REFLECT_101, BORDER_WRAP=IPL_BORDER_WRAP,\r
+ BORDER_TRANSPARENT, BORDER_DEFAULT=BORDER_REFLECT_101, BORDER_ISOLATED=16 };\r
+\r
+CV_EXPORTS int borderInterpolate( int p, int len, int borderType );\r
+\r
+struct CV_EXPORTS BaseRowFilter\r
+{\r
+ BaseRowFilter();\r
+ virtual ~BaseRowFilter();\r
+ virtual void operator()(const uchar* src, uchar* dst,\r
+ int width, int cn) = 0;\r
+ int ksize, anchor;\r
+};\r
+\r
+\r
+struct CV_EXPORTS BaseColumnFilter\r
+{\r
+ BaseColumnFilter();\r
+ virtual ~BaseColumnFilter();\r
+ virtual void operator()(const uchar** src, uchar* dst, int dststep,\r
+ int dstcount, int width) = 0;\r
+ virtual void reset();\r
+ int ksize, anchor;\r
+};\r
+\r
+\r
+struct CV_EXPORTS BaseFilter\r
+{\r
+ BaseFilter();\r
+ virtual ~BaseFilter();\r
+ virtual void operator()(const uchar** src, uchar* dst, int dststep,\r
+ int dstcount, int width, int cn) = 0;\r
+ virtual void reset();\r
+ Size ksize;\r
+ Point anchor;\r
+};\r
+\r
+\r
+struct CV_EXPORTS FilterEngine\r
+{\r
+ FilterEngine();\r
+ FilterEngine(const Ptr<BaseFilter>& _filter2D,\r
+ const Ptr<BaseRowFilter>& _rowFilter,\r
+ const Ptr<BaseColumnFilter>& _columnFilter,\r
+ int srcType, int dstType, int bufType,\r
+ int _rowBorderType=BORDER_REPLICATE,\r
+ int _columnBorderType=-1,\r
+ const Scalar& _borderValue=Scalar());\r
+ virtual ~FilterEngine();\r
+ void init(const Ptr<BaseFilter>& _filter2D,\r
+ const Ptr<BaseRowFilter>& _rowFilter,\r
+ const Ptr<BaseColumnFilter>& _columnFilter,\r
+ int srcType, int dstType, int bufType,\r
+ int _rowBorderType=BORDER_REPLICATE, int _columnBorderType=-1,\r
+ const Scalar& _borderValue=Scalar());\r
+ virtual int start(Size wholeSize, Rect roi, int maxBufRows=-1);\r
+ virtual int start(const Mat& src, const Rect& srcRoi=Rect(0,0,-1,-1),\r
+ bool isolated=false, int maxBufRows=-1);\r
+ virtual int proceed(const uchar* src, int srcStep, int srcCount,\r
+ uchar* dst, int dstStep);\r
+ virtual void apply( const Mat& src, Mat& dst,\r
+ const Rect& srcRoi=Rect(0,0,-1,-1),\r
+ Point dstOfs=Point(0,0),\r
+ bool isolated=false);\r
+ bool isSeparable() const { return filter2D.obj == 0; }\r
+ int remainingInputRows() const;\r
+ int remainingOutputRows() const;\r
+ \r
+ int srcType, dstType, bufType;\r
+ Size ksize;\r
+ Point anchor;\r
+ int maxWidth;\r
+ Size wholeSize;\r
+ Rect roi;\r
+ int dx1, dx2;\r
+ int rowBorderType, columnBorderType;\r
+ Vector<int> borderTab;\r
+ int borderElemSize;\r
+ Vector<uchar> ringBuf;\r
+ Vector<uchar> srcRow;\r
+ Vector<uchar> constBorderValue;\r
+ Vector<uchar> constBorderRow;\r
+ int bufStep, startY, startY0, endY, rowCount, dstY;\r
+ Vector<uchar*> rows;\r
+ \r
+ Ptr<BaseFilter> filter2D;\r
+ Ptr<BaseRowFilter> rowFilter;\r
+ Ptr<BaseColumnFilter> columnFilter;\r
+};\r
+\r
+enum { KERNEL_GENERAL=0, KERNEL_SYMMETRICAL=1, KERNEL_ASYMMETRICAL=2,\r
+ KERNEL_SMOOTH=4, KERNEL_INTEGER=8 };\r
+\r
+CV_EXPORTS int getKernelType(const Mat& kernel, Point anchor);\r
+\r
+CV_EXPORTS Ptr<BaseRowFilter> getLinearRowFilter(int srcType, int bufType,\r
+ const Mat& kernel, int anchor,\r
+ int symmetryType);\r
+\r
+CV_EXPORTS Ptr<BaseColumnFilter> getLinearColumnFilter(int bufType, int dstType,\r
+ const Mat& kernel, int anchor,\r
+ int symmetryType, double delta=0,\r
+ int bits=0);\r
+\r
+CV_EXPORTS Ptr<BaseFilter> getLinearFilter(int srcType, int dstType,\r
+ const Mat& kernel,\r
+ Point anchor=Point(-1,-1),\r
+ double delta=0, int bits=0);\r
+\r
+CV_EXPORTS Ptr<FilterEngine> createSeparableLinearFilter(int srcType, int dstType,\r
+ const Mat& rowKernel, const Mat& columnKernel,\r
+ Point _anchor=Point(-1,-1), double delta=0,\r
+ int _rowBorderType=BORDER_DEFAULT,\r
+ int _columnBorderType=-1,\r
+ const Scalar& _borderValue=Scalar());\r
+\r
+CV_EXPORTS Ptr<FilterEngine> createLinearFilter(int srcType, int dstType,\r
+ const Mat& kernel, Point _anchor=Point(-1,-1),\r
+ double delta=0, int _rowBorderType=BORDER_DEFAULT,\r
+ int _columnBorderType=-1, const Scalar& _borderValue=Scalar());\r
+\r
+CV_EXPORTS Mat getGaussianKernel( int ksize, double sigma, int ktype=CV_64F );\r
+\r
+CV_EXPORTS Ptr<FilterEngine> createGaussianFilter( int type, Size ksize,\r
+ double sigma1, double sigma2=0,\r
+ int borderType=BORDER_DEFAULT);\r
+\r
+CV_EXPORTS void getDerivKernels( Mat& kx, Mat& ky, int dx, int dy, int ksize,\r
+ bool normalize=false, int ktype=CV_32F );\r
+\r
+CV_EXPORTS Ptr<FilterEngine> createDerivFilter( int srcType, int dstType,\r
+ int dx, int dy, int ksize,\r
+ int borderType=BORDER_DEFAULT );\r
+\r
+CV_EXPORTS Ptr<BaseRowFilter> getRowSumFilter(int srcType, int sumType,\r
+ int ksize, int anchor=-1);\r
+CV_EXPORTS Ptr<BaseColumnFilter> getColumnSumFilter(int sumType, int dstType,\r
+ int ksize, int anchor=-1,\r
+ double scale=1);\r
+CV_EXPORTS Ptr<FilterEngine> createBoxFilter( int srcType, int dstType, Size ksize,\r
+ Point anchor=Point(-1,-1),\r
+ bool normalize=true,\r
+ int borderType=BORDER_DEFAULT);\r
+\r
+enum { MORPH_ERODE=0, MORPH_DILATE=1, MORPH_OPEN=2, MORPH_CLOSE=3,\r
+ MORPH_GRADIENT=4, MORPH_TOPHAT=5, MORPH_BLACKHAT=6 };\r
+\r
+CV_EXPORTS Ptr<BaseRowFilter> getMorphologyRowFilter(int op, int type, int ksize, int anchor=-1);\r
+CV_EXPORTS Ptr<BaseColumnFilter> getMorphologyColumnFilter(int op, int type, int ksize, int anchor=-1);\r
+CV_EXPORTS Ptr<BaseFilter> getMorphologyFilter(int op, int type, const Mat& kernel,\r
+ Point anchor=Point(-1,-1));\r
+\r
+static inline Scalar morphologyDefaultBorderValue() { return Scalar::all(DBL_MAX); }\r
+\r
+CV_EXPORTS Ptr<FilterEngine> createMorphologyFilter(int op, int type, const Mat& kernel,\r
+ Point anchor=Point(-1,-1), int _rowBorderType=BORDER_CONSTANT,\r
+ int _columnBorderType=-1,\r
+ const Scalar& _borderValue=morphologyDefaultBorderValue());\r
+\r
+enum { MORPH_RECT=0, MORPH_CROSS=1, MORPH_ELLIPSE=2 };\r
+CV_EXPORTS Mat getStructuringElement(int shape, Size ksize, Point anchor=Point(-1,-1));\r
+\r
+CV_EXPORTS void copyMakeBorder( const Mat& src, Mat& dst,\r
+ int top, int bottom, int left, int right,\r
+ int borderType );\r
+\r
+CV_EXPORTS void medianBlur( const Mat& src, Mat& dst, int ksize );\r
+CV_EXPORTS void GaussianBlur( const Mat& src, Mat& dst, Size ksize,\r
+ double sigma1, double sigma2=0,\r
+ int borderType=BORDER_DEFAULT );\r
+CV_EXPORTS void bilateralFilter( const Mat& src, Mat& dst, int d,\r
+ double sigmaColor, double sigmaSpace,\r
+ int borderType=BORDER_DEFAULT );\r
+CV_EXPORTS void boxFilter( const Mat& src, Mat& dst, int ddepth,\r
+ Size ksize, Point anchor=Point(-1,-1),\r
+ bool normalize=true,\r
+ int borderType=BORDER_DEFAULT );\r
+static inline void blur( const Mat& src, Mat& dst,\r
+ Size ksize, Point anchor=Point(-1,-1),\r
+ int borderType=BORDER_DEFAULT )\r
+{\r
+ boxFilter( src, dst, -1, ksize, anchor, true, borderType );\r
+}\r
+\r
+CV_EXPORTS void filter2D( const Mat& src, Mat& dst, int ddepth,\r
+ const Mat& kernel, Point anchor=Point(-1,-1),\r
+ double delta=0, int borderType=BORDER_DEFAULT );\r
+\r
+CV_EXPORTS void sepFilter2D( const Mat& src, Mat& dst, int ddepth,\r
+ const Mat& kernelX, const Mat& kernelY,\r
+ Point anchor=Point(-1,-1),\r
+ double delta=0, int borderType=BORDER_DEFAULT );\r
+\r
+CV_EXPORTS void Sobel( const Mat& src, Mat& dst, int ddepth,\r
+ int dx, int dy, int ksize=3,\r
+ double scale=1, double delta=0,\r
+ int borderType=BORDER_DEFAULT );\r
+\r
+CV_EXPORTS void Scharr( const Mat& src, Mat& dst, int ddepth,\r
+ int dx, int dy, double scale=1, double delta=0,\r
+ int borderType=BORDER_DEFAULT );\r
+\r
+CV_EXPORTS void Laplacian( const Mat& src, Mat& dst, int ddepth,\r
+ int ksize=1, double scale=1, double delta=0,\r
+ int borderType=BORDER_DEFAULT );\r
+\r
+CV_EXPORTS void Canny( const Mat& image, Mat& edges,\r
+ double threshold1, double threshol2,\r
+ int apertureSize=3, bool L2gradient=false );\r
+\r
+CV_EXPORTS void cornerMinEigenVal( const Mat& src, Mat& dst,\r
+ int blockSize, int ksize=3,\r
+ int borderType=BORDER_DEFAULT );\r
+\r
+CV_EXPORTS void cornerHarris( const Mat& src, Mat& dst, int blockSize,\r
+ int ksize, double k,\r
+ int borderType=BORDER_DEFAULT );\r
+\r
+CV_EXPORTS void cornerEigenValsAndVecs( const Mat& src, Mat& dst,\r
+ int blockSize, int ksize,\r
+ int borderType=BORDER_DEFAULT );\r
+\r
+CV_EXPORTS void preCornerDetect( const Mat& src, Mat& dst, int ksize,\r
+ int borderType=BORDER_DEFAULT );\r
+\r
+CV_EXPORTS void cornerSubPix( const Mat& image, Vector<Point2f>& corners,\r
+ Size winSize, Size zeroZone,\r
+ TermCriteria criteria );\r
+\r
+CV_EXPORTS void goodFeaturesToTrack( const Mat& image, Vector<Point2f>& corners,\r
+ int maxCorners, double qualityLevel, double minDistance,\r
+ const Mat& mask=Mat(), int blockSize=3,\r
+ bool useHarrisDetector=false, double k=0.04 );\r
+\r
+CV_EXPORTS void HoughLines( Mat& image, Vector<Vec2f>& lines,\r
+ double rho, double theta, int threshold,\r
+ double srn=0, double stn=0 );\r
+\r
+CV_EXPORTS void HoughLinesP( Mat& image, Vector<Vec4i>& lines,\r
+ double rho, double theta, int threshold,\r
+ double minLineLength=0, double maxLineGap=0 );\r
+\r
+CV_EXPORTS void HoughCircles( Mat& image, Vector<Vec3f>& circles,\r
+ int method, double dp, double minDist,\r
+ double param1=100, double param2=100,\r
+ int minRadius=0, int maxRadius=0 );\r
+\r
+CV_EXPORTS void erode( const Mat& src, Mat& dst, const Mat& kernel,\r
+ Point anchor=Point(-1,-1), int iterations=1,\r
+ int borderType=BORDER_CONSTANT,\r
+ const Scalar& borderValue=morphologyDefaultBorderValue() );\r
+CV_EXPORTS void dilate( const Mat& src, Mat& dst, const Mat& kernel,\r
+ Point anchor=Point(-1,-1), int iterations=1,\r
+ int borderType=BORDER_CONSTANT,\r
+ const Scalar& borderValue=morphologyDefaultBorderValue() );\r
+CV_EXPORTS void morphologyEx( const Mat& src, Mat& dst, int op, const Mat& kernel,\r
+ Point anchor=Point(-1,-1), int iterations=1,\r
+ int borderType=BORDER_CONSTANT,\r
+ const Scalar& borderValue=morphologyDefaultBorderValue() );\r
+\r
+enum { INTER_NEAREST=0, INTER_LINEAR=1, INTER_CUBIC=2, INTER_AREA=3,\r
+ INTER_LANCZOS4=4, INTER_MAX=7, WARP_INVERSE_MAP=16 };\r
+\r
+CV_EXPORTS void resize( const Mat& src, Mat& dst,\r
+ Size dsize=Size(), double fx=0, double fy=0,\r
+ int interpolation=INTER_LINEAR );\r
+\r
+CV_EXPORTS void warpAffine( const Mat& src, Mat& dst,\r
+ const Mat& M, Size dsize,\r
+ int flags=INTER_LINEAR,\r
+ int borderMode=BORDER_CONSTANT,\r
+ const Scalar& borderValue=Scalar());\r
+CV_EXPORTS void warpPerspective( const Mat& src, Mat& dst,\r
+ const Mat& M, Size dsize,\r
+ int flags=INTER_LINEAR,\r
+ int borderMode=BORDER_CONSTANT,\r
+ const Scalar& borderValue=Scalar());\r
+\r
+CV_EXPORTS void remap( const Mat& src, Mat& dst, const Mat& map1, const Mat& map2,\r
+ int interpolation, int borderMode=BORDER_CONSTANT,\r
+ const Scalar& borderValue=Scalar());\r
+\r
+CV_EXPORTS void convertMaps( const Mat& map1, const Mat& map2, Mat& dstmap1, Mat& dstmap2,\r
+ int dstmap1type, bool nninterpolation=false );\r
+\r
+CV_EXPORTS Mat getRotationMatrix2D( Point2f center, double angle, double scale );\r
+CV_EXPORTS Mat getPerspectiveTransform( const Point2f src[], const Point2f dst[] );\r
+CV_EXPORTS Mat getAffineTransform( const Point2f src[], const Point2f dst[] );\r
+\r
+CV_EXPORTS void getRectSubPix( const Mat& image, Size patchSize,\r
+ Point2f center, Mat& patch, int patchType=-1 );\r
+\r
+CV_EXPORTS void integral( const Mat& src, Mat& sum, int sdepth=-1 );\r
+CV_EXPORTS void integral( const Mat& src, Mat& sum, Mat& sqsum, int sdepth=-1 );\r
+CV_EXPORTS void integral( const Mat& src, Mat& sum, Mat& sqsum, Mat& tilted, int sdepth=-1 );\r
+\r
+CV_EXPORTS void accumulate( const Mat& src, Mat& dst, const Mat& mask=Mat() );\r
+CV_EXPORTS void accumulateSquare( const Mat& src, Mat& dst, const Mat& mask=Mat() );\r
+CV_EXPORTS void accumulateProduct( const Mat& src1, const Mat& src2,\r
+ Mat& dst, const Mat& mask=Mat() );\r
+CV_EXPORTS void accumulateWeighted( const Mat& src, Mat& dst,\r
+ double alpha, const Mat& mask=Mat() );\r
+\r
+enum { THRESH_BINARY=0, THRESH_BINARY_INV=1, THRESH_TRUNC=2, THRESH_TOZERO=3,\r
+ THRESH_TOZERO_INV=4, THRESH_MASK=7, THRESH_OTSU=8 };\r
+\r
+CV_EXPORTS double threshold( const Mat& src, Mat& dst, double thresh, double maxval, int type );\r
+\r
+enum { ADAPTIVE_THRESH_MEAN_C=0, ADAPTIVE_THRESH_GAUSSIAN_C=1 };\r
+\r
+CV_EXPORTS void adaptiveThreshold( const Mat& src, Mat& dst, double maxValue,\r
+ int adaptiveMethod, int thresholdType,\r
+ int blockSize, double C );\r
+\r
+CV_EXPORTS void pyrDown( const Mat& src, Mat& dst, const Size& dstsize=Size());\r
+CV_EXPORTS void pyrUp( const Mat& src, Mat& dst, const Size& dstsize=Size());\r
+CV_EXPORTS void buildPyramid( const Mat& src, Vector<Mat>& dst, int maxlevel );\r
+\r
+\r
+CV_EXPORTS void undistort( const Mat& src, Mat& dst, const Mat& cameraMatrix,\r
+ const Mat& distCoeffs, const Mat& newCameraMatrix=Mat() );\r
+CV_EXPORTS void initUndistortRectifyMap( const Mat& cameraMatrix, const Mat& distCoeffs,\r
+ const Mat& R, const Mat& newCameraMatrix,\r
+ Size size, int m1type, Mat& map1, Mat& map2 );\r
+CV_EXPORTS Mat getDefaultNewCameraMatrix( const Mat& cameraMatrix, Size imgsize=Size(),\r
+ bool centerPrincipalPoint=false );\r
+\r
+enum { OPTFLOW_USE_INITIAL_FLOW=4, OPTFLOW_FARNEBACK_GAUSSIAN=256 };\r
+\r
+CV_EXPORTS void calcOpticalFlowPyrLK( const Mat& prevImg, const Mat& nextImg,\r
+ const Vector<Point2f>& prevPts,\r
+ Vector<Point2f>& nextPts,\r
+ Vector<bool>& status, Vector<float>& err,\r
+ Size winSize=Size(15,15), int maxLevel=3,\r
+ TermCriteria criteria=TermCriteria(\r
+ TermCriteria::COUNT+TermCriteria::EPS,\r
+ 30, 0.01),\r
+ double derivLambda=0.5,\r
+ int flags=0 );\r
+\r
+CV_EXPORTS void calcOpticalFlowFarneback( const Mat& prev0, const Mat& next0,\r
+ Mat& flow0, double pyr_scale, int levels, int winsize,\r
+ int iterations, int poly_n, double poly_sigma, int flags );\r
+ \r
+ \r
+CV_EXPORTS void calcHist( const Vector<Mat>& images, const Vector<int>& channels,\r
+ const Mat& mask, MatND& hist, const Vector<int>& histSize,\r
+ const Vector<Vector<float> >& ranges,\r
+ bool uniform=true, bool accumulate=false );\r
+\r
+CV_EXPORTS void calcHist( const Vector<Mat>& images, const Vector<int>& channels,\r
+ const Mat& mask, SparseMat& hist, const Vector<int>& histSize,\r
+ const Vector<Vector<float> >& ranges,\r
+ bool uniform=true, bool accumulate=false );\r
+ \r
+CV_EXPORTS void calcBackProject( const Vector<Mat>& images, const Vector<int>& channels,\r
+ const MatND& hist, Mat& backProject,\r
+ const Vector<Vector<float> >& ranges,\r
+ double scale=1, bool uniform=true );\r
+ \r
+CV_EXPORTS void calcBackProject( const Vector<Mat>& images, const Vector<int>& channels,\r
+ const SparseMat& hist, Mat& backProject,\r
+ const Vector<Vector<float> >& ranges,\r
+ double scale=1, bool uniform=true );\r
+\r
+CV_EXPORTS double compareHist( const MatND& H1, const MatND& H2, int method );\r
+\r
+CV_EXPORTS double compareHist( const SparseMat& H1, const SparseMat& H2, int method );\r
+\r
+CV_EXPORTS void equalizeHist( const Mat& src, Mat& dst );\r
+\r
+CV_EXPORTS void watershed( const Mat& image, Mat& markers );\r
+\r
+enum { INPAINT_NS=CV_INPAINT_NS, INPAINT_TELEA=CV_INPAINT_TELEA };\r
+\r
+CV_EXPORTS void inpaint( const Mat& src, const Mat& inpaintMask,\r
+ Mat& dst, double inpaintRange, int flags );\r
+\r
+CV_EXPORTS void distanceTransform( const Mat& src, Mat& dst, Mat& labels,\r
+ int distanceType, int maskSize );\r
+\r
+CV_EXPORTS void distanceTransform( const Mat& src, Mat& dst,\r
+ int distanceType, int maskSize );\r
+\r
+enum { FLOODFILL_FIXED_RANGE = 1 << 16,\r
+ FLOODFILL_MASK_ONLY = 1 << 17 };\r
+\r
+CV_EXPORTS int floodFill( Mat& image,\r
+ Point seedPoint, Scalar newVal, Rect* rect=0,\r
+ Scalar loDiff=Scalar(), Scalar upDiff=Scalar(),\r
+ int flags=4 );\r
+\r
+CV_EXPORTS int floodFill( Mat& image, Mat& mask,\r
+ Point seedPoint, Scalar newVal, Rect* rect=0,\r
+ Scalar loDiff=Scalar(), Scalar upDiff=Scalar(),\r
+ int flags=4 );\r
+\r
+CV_EXPORTS void cvtColor( const Mat& src, Mat& dst, int code, int dstCn=0 );\r
+\r
+struct CV_EXPORTS Moments\r
+{\r
+ Moments();\r
+ Moments(double m00, double m10, double m01, double m20, double m11,\r
+ double m02, double m30, double m21, double m12, double m03 );\r
+ Moments( const CvMoments& moments );\r
+ operator CvMoments() const;\r
+ \r
+ double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03; // spatial moments\r
+ double mu20, mu11, mu02, mu30, mu21, mu12, mu03; // central moments\r
+ double nu20, nu11, nu02, nu30, nu21, nu12, nu03; // central normalized moments\r
+};\r
+\r
+CV_EXPORTS Moments moments( const Mat& image, bool binaryImage=false );\r
+\r
+CV_EXPORTS void HuMoments( const Moments& moments, double hu[7] );\r
+\r
+enum { TM_SQDIFF=CV_TM_SQDIFF, TM_SQDIFF_NORMED=CV_TM_SQDIFF_NORMED,\r
+ TM_CCORR=CV_TM_CCORR, TM_CCORR_NORMED=CV_TM_CCORR_NORMED,\r
+ TM_CCOEFF=CV_TM_CCOEFF, TM_CCOEFF_NORMED=CV_TM_CCOEFF_NORMED };\r
+\r
+CV_EXPORTS void matchTemplate( const Mat& image, const Mat& templ, Mat& result, int method );\r
+\r
+enum { RETR_EXTERNAL=CV_RETR_EXTERNAL, RETR_LIST=CV_RETR_LIST,\r
+ RETR_CCOMP=CV_RETR_CCOMP, RETR_TREE=CV_RETR_TREE };\r
+\r
+enum { CHAIN_APPROX_NONE=CV_CHAIN_APPROX_NONE,\r
+ CHAIN_APPROX_SIMPLE=CV_CHAIN_APPROX_SIMPLE,\r
+ CHAIN_APPROX_TC89_L1=CV_CHAIN_APPROX_TC89_L1,\r
+ CHAIN_APPROX_TC89_KCOS=CV_CHAIN_APPROX_TC89_KCOS };\r
+\r
+CV_EXPORTS Vector<Vector<Point> >\r
+ findContours( const Mat& image, Vector<Vec4i>& hierarchy,\r
+ int mode, int method, Point offset=Point());\r
+\r
+CV_EXPORTS Vector<Vector<Point> >\r
+ findContours( const Mat& image, int mode, int method, Point offset=Point());\r
+\r
+CV_EXPORTS void\r
+ drawContours( Mat& image, const Vector<Vector<Point> >& contours,\r
+ const Scalar& color, int thickness=1,\r
+ int lineType=8, const Vector<Vec4i>& hierarchy=Vector<Vec4i>(),\r
+ int maxLevel=1, Point offset=Point() );\r
+\r
+CV_EXPORTS void approxPolyDP( const Vector<Point>& curve,\r
+ Vector<Point>& approxCurve,\r
+ double epsilon, bool closed );\r
+CV_EXPORTS void approxPolyDP( const Vector<Point2f>& curve,\r
+ Vector<Point2f>& approxCurve,\r
+ double epsilon, bool closed );\r
+\r
+CV_EXPORTS double arcLength( const Vector<Point>& curve, bool closed );\r
+CV_EXPORTS double arcLength( const Vector<Point2f>& curve, bool closed );\r
+\r
+CV_EXPORTS Rect boundingRect( const Vector<Point>& points );\r
+CV_EXPORTS Rect boundingRect( const Vector<Point2f>& points );\r
+\r
+CV_EXPORTS double contourArea( const Vector<Point>& contour );\r
+CV_EXPORTS double contourArea( const Vector<Point2f>& contour );\r
+\r
+CV_EXPORTS RotatedRect minAreaRect( const Vector<Point>& points );\r
+CV_EXPORTS RotatedRect minAreaRect( const Vector<Point2f>& points );\r
+\r
+CV_EXPORTS void minEnclosingCircle( const Vector<Point>& points,\r
+ Point2f center, float& radius );\r
+CV_EXPORTS void minEnclosingCircle( const Vector<Point2f>& points,\r
+ Point2f center, float& radius );\r
+\r
+CV_EXPORTS Moments moments( const Vector<Point>& points );\r
+CV_EXPORTS Moments moments( const Vector<Point2f>& points );\r
+\r
+CV_EXPORTS double matchShapes( const Vector<Point2f>& contour1,\r
+ const Vector<Point2f>& contour2,\r
+ int method, double parameter );\r
+CV_EXPORTS double matchShapes( const Vector<Point>& contour1,\r
+ const Vector<Point>& contour2,\r
+ int method, double parameter );\r
+\r
+CV_EXPORTS void convexHull( const Vector<Point>& points,\r
+ Vector<int>& hull, bool clockwise=false );\r
+CV_EXPORTS void convexHull( const Vector<Point>& points,\r
+ Vector<Point>& hull, bool clockwise=false );\r
+CV_EXPORTS void convexHull( const Vector<Point2f>& points,\r
+ Vector<int>& hull, bool clockwise=false );\r
+CV_EXPORTS void convexHull( const Vector<Point2f>& points,\r
+ Vector<Point2f>& hull, bool clockwise=false );\r
+\r
+CV_EXPORTS bool isContourConvex( const Vector<Point>& contour );\r
+CV_EXPORTS bool isContourConvex( const Vector<Point2f>& contour );\r
+\r
+CV_EXPORTS RotatedRect fitEllipse( const Vector<Point>& points );\r
+CV_EXPORTS RotatedRect fitEllipse( const Vector<Point2f>& points );\r
+\r
+CV_EXPORTS Vec4f fitLine( const Vector<Point> points, int distType,\r
+ double param, double reps, double aeps );\r
+CV_EXPORTS Vec4f fitLine( const Vector<Point2f> points, int distType,\r
+ double param, double reps, double aeps );\r
+CV_EXPORTS Vec6f fitLine( const Vector<Point3f> points, int distType,\r
+ double param, double reps, double aeps );\r
+\r
+CV_EXPORTS double pointPolygonTest( const Vector<Point>& contour,\r
+ Point2f pt, bool measureDist );\r
+CV_EXPORTS double pointPolygonTest( const Vector<Point2f>& contour,\r
+ Point2f pt, bool measureDist );\r
+\r
+CV_EXPORTS Mat estimateRigidTransform( const Vector<Point2f>& A,\r
+ const Vector<Point2f>& B,\r
+ bool fullAffine );\r
+\r
+CV_EXPORTS void updateMotionHistory( const Mat& silhouette, Mat& mhi,\r
+ double timestamp, double duration );\r
+\r
+CV_EXPORTS void calcMotionGradient( const Mat& mhi, Mat& mask,\r
+ Mat& orientation,\r
+ double delta1, double delta2,\r
+ int apertureSize=3 );\r
+\r
+CV_EXPORTS double calcGlobalOrientation( const Mat& orientation, const Mat& mask,\r
+ const Mat& mhi, double timestamp,\r
+ double duration );\r
+// TODO: need good API for cvSegmentMotion\r
+\r
+CV_EXPORTS RotatedRect CAMShift( const Mat& probImage, Rect& window,\r
+ TermCriteria criteria );\r
+\r
+CV_EXPORTS int MeanShift( const Mat& probImage, Rect& window,\r
+ TermCriteria criteria );\r
+\r
+struct CV_EXPORTS KalmanFilter\r
+{\r
+ KalmanFilter();\r
+ KalmanFilter(int dynamParams, int measureParams, int controlParams=0);\r
+ void init(int dynamParams, int measureParams, int controlParams=0);\r
+\r
+ const Mat& predict(const Mat& control=Mat());\r
+ const Mat& correct(const Mat& measurement);\r
+\r
+ Mat statePre; // predicted state (x'(k)):\r
+ // x(k)=A*x(k-1)+B*u(k)\r
+ Mat statePost; // corrected state (x(k)):\r
+ // x(k)=x'(k)+K(k)*(z(k)-H*x'(k))\r
+ Mat transitionMatrix; // state transition matrix (A)\r
+ Mat controlMatrix; // control matrix (B)\r
+ // (it is not used if there is no control)\r
+ Mat measurementMatrix; // measurement matrix (H)\r
+ Mat processNoiseCov; // process noise covariance matrix (Q)\r
+ Mat measurementNoiseCov;// measurement noise covariance matrix (R)\r
+ Mat errorCovPre; // priori error estimate covariance matrix (P'(k)):\r
+ // P'(k)=A*P(k-1)*At + Q)*/\r
+ Mat gain; // Kalman gain matrix (K(k)):\r
+ // K(k)=P'(k)*Ht*inv(H*P'(k)*Ht+R)\r
+ Mat errorCovPost; // posteriori error estimate covariance matrix (P(k)):\r
+ // P(k)=(I-K(k)*H)*P'(k)\r
+ Mat temp1; // temporary matrices\r
+ Mat temp2;\r
+ Mat temp3;\r
+ Mat temp4;\r
+ Mat temp5;\r
+};\r
+\r
+\r
+///////////////////////////// Object Detection ////////////////////////////\r
+\r
+template<> inline void Ptr<CvHaarClassifierCascade>::delete_obj()\r
+{ cvReleaseHaarClassifierCascade(&obj); }\r
+\r
+struct CV_EXPORTS HaarClassifierCascade\r
+{\r
+ enum { DO_CANNY_PRUNING = CV_HAAR_DO_CANNY_PRUNING,\r
+ SCALE_IMAGE = CV_HAAR_SCALE_IMAGE,\r
+ FIND_BIGGEST_OBJECT = CV_HAAR_FIND_BIGGEST_OBJECT,\r
+ DO_ROUGH_SEARCH = CV_HAAR_DO_ROUGH_SEARCH };\r
+ \r
+ HaarClassifierCascade();\r
+ HaarClassifierCascade(const String& filename);\r
+ bool load(const String& filename);\r
+\r
+ void detectMultiScale( const Mat& image,\r
+ Vector<Rect>& objects,\r
+ double scaleFactor=1.1,\r
+ int minNeighbors=3, int flags=0,\r
+ Size minSize=Size());\r
+\r
+ int runAt(Point pt, int startStage=0, int nstages=0) const;\r
+\r
+ void setImages( const Mat& sum, const Mat& sqsum,\r
+ const Mat& tiltedSum, double scale );\r
+ \r
+ Ptr<CvHaarClassifierCascade> cascade;\r
+};\r
+\r
+CV_EXPORTS void undistortPoints( const Vector<Point2f>& src, Vector<Point2f>& dst,\r
+ const Mat& cameraMatrix, const Mat& distCoeffs,\r
+ const Mat& R=Mat(), const Mat& P=Mat());\r
+\r
+CV_EXPORTS Mat Rodrigues(const Mat& src);\r
+CV_EXPORTS Mat Rodrigues(const Mat& src, Mat& jacobian);\r
+\r
+enum { LMEDS=4, RANSAC=8 };\r
+\r
+CV_EXPORTS Mat findHomography( const Vector<Point2f>& srcPoints,\r
+ const Vector<Point2f>& dstPoints,\r
+ Vector<bool>& mask, int method=0,\r
+ double ransacReprojThreshold=0 );\r
+\r
+CV_EXPORTS Mat findHomography( const Vector<Point2f>& srcPoints,\r
+ const Vector<Point2f>& dstPoints,\r
+ int method=0, double ransacReprojThreshold=0 );\r
+\r
+/* Computes RQ decomposition for 3x3 matrices */\r
+CV_EXPORTS void RQDecomp3x3( const Mat& M, Mat& R, Mat& Q );\r
+CV_EXPORTS Vec3d RQDecomp3x3( const Mat& M, Mat& R, Mat& Q,\r
+ Mat& Qx, Mat& Qy, Mat& Qz );\r
+\r
+CV_EXPORTS void decomposeProjectionMatrix( const Mat& projMatrix, Mat& cameraMatrix,\r
+ Mat& rotMatrix, Mat& transVect );\r
+CV_EXPORTS void decomposeProjectionMatrix( const Mat& projMatrix, Mat& cameraMatrix,\r
+ Mat& rotMatrix, Mat& transVect,\r
+ Mat& rotMatrixX, Mat& rotMatrixY,\r
+ Mat& rotMatrixZ, Vec3d& eulerAngles );\r
+\r
+CV_EXPORTS void matMulDeriv( const Mat& A, const Mat& B, Mat& dABdA, Mat& dABdB );\r
+\r
+CV_EXPORTS void composeRT( const Mat& rvec1, const Mat& tvec1,\r
+ const Mat& rvec2, const Mat& tvec2,\r
+ Mat& rvec3, Mat& tvec3 );\r
+\r
+CV_EXPORTS void composeRT( const Mat& rvec1, const Mat& tvec1,\r
+ const Mat& rvec2, const Mat& tvec2,\r
+ Mat& rvec3, Mat& tvec3,\r
+ Mat& dr3dr1, Mat& dr3dt1,\r
+ Mat& dr3dr2, Mat& dr3dt2,\r
+ Mat& dt3dr1, Mat& dt3dt1,\r
+ Mat& dt3dr2, Mat& dt3dt2 );\r
+\r
+CV_EXPORTS void projectPoints( const Vector<Point3f>& objectPoints,\r
+ const Mat& rvec, const Mat& tvec,\r
+ const Mat& cameraMatrix,\r
+ const Mat& distCoeffs,\r
+ Vector<Point2f>& imagePoints );\r
+\r
+CV_EXPORTS void projectPoints( const Vector<Point3f>& objectPoints,\r
+ const Mat& rvec, const Mat& tvec,\r
+ const Mat& cameraMatrix,\r
+ const Mat& distCoeffs,\r
+ Vector<Point2f>& imagePoints,\r
+ Mat& dpdrot, Mat& dpdt, Mat& dpdf,\r
+ Mat& dpdc, Mat& dpddist,\r
+ double aspectRatio=0 );\r
+\r
+CV_EXPORTS void solvePnP( const Vector<Point3f>& objectPoints,\r
+ const Vector<Point2f>& imagePoints,\r
+ const Mat& cameraMatrix,\r
+ const Mat& distCoeffs,\r
+ Mat& rvec, Mat& tvec,\r
+ bool useExtrinsicGuess=false );\r
+\r
+CV_EXPORTS Mat initCameraMatrix2D( const Vector<Vector<Point3f> >& objectPoints,\r
+ const Vector<Vector<Point2f> >& imagePoints,\r
+ Size imageSize, double aspectRatio=1. );\r
+\r
+enum { CALIB_CB_ADAPTIVE_THRESH = CV_CALIB_CB_ADAPTIVE_THRESH,\r
+ CALIB_CB_NORMALIZE_IMAGE = CV_CALIB_CB_NORMALIZE_IMAGE,\r
+ CALIB_CB_FILTER_QUADS = CV_CALIB_CB_FILTER_QUADS };\r
+\r
+CV_EXPORTS bool findChessboardCorners( const Mat& image, Size patternSize,\r
+ Vector<Point2f>& corners,\r
+ int flags=CV_CALIB_CB_ADAPTIVE_THRESH+\r
+ CV_CALIB_CB_NORMALIZE_IMAGE );\r
+\r
+CV_EXPORTS void drawChessboardCorners( Mat& image, Size patternSize,\r
+ const Vector<Point2f>& corners,\r
+ bool patternWasFound );\r
+\r
+enum\r
+{\r
+ CALIB_USE_INTRINSIC_GUESS = CV_CALIB_USE_INTRINSIC_GUESS,\r
+ CALIB_FIX_ASPECT_RATIO = CV_CALIB_FIX_ASPECT_RATIO,\r
+ CALIB_FIX_PRINCIPAL_POINT = CV_CALIB_FIX_PRINCIPAL_POINT,\r
+ CALIB_ZERO_TANGENT_DIST = CV_CALIB_ZERO_TANGENT_DIST,\r
+ CALIB_FIX_FOCAL_LENGTH = CV_CALIB_FIX_FOCAL_LENGTH,\r
+ CALIB_FIX_K1 = CV_CALIB_FIX_K1,\r
+ CALIB_FIX_K2 = CV_CALIB_FIX_K2,\r
+ CALIB_FIX_K3 = CV_CALIB_FIX_K3,\r
+ // only for stereo\r
+ CALIB_FIX_INTRINSIC = CV_CALIB_FIX_INTRINSIC,\r
+ CALIB_SAME_FOCAL_LENGTH = CV_CALIB_SAME_FOCAL_LENGTH,\r
+ // for stereo rectification\r
+ CALIB_ZERO_DISPARITY = CV_CALIB_ZERO_DISPARITY\r
+};\r
+\r
+CV_EXPORTS void calibrateCamera( const Vector<Vector<Point3f> >& objectPoints,\r
+ const Vector<Vector<Point2f> >& imagePoints,\r
+ Size imageSize,\r
+ Mat& cameraMatrix, Mat& distCoeffs,\r
+ Vector<Mat>& rvecs, Vector<Mat>& tvecs,\r
+ int flags=0 );\r
+\r
+CV_EXPORTS void calibrationMatrixValues( const Mat& cameraMatrix,\r
+ Size imageSize,\r
+ double apertureWidth,\r
+ double apertureHeight,\r
+ double& fovx,\r
+ double& fovy,\r
+ double& focalLength,\r
+ Point2d& principalPoint,\r
+ double& aspectRatio );\r
+\r
+CV_EXPORTS void stereoCalibrate( const Vector<Vector<Point3f> >& objectPoints,\r
+ const Vector<Vector<Point2f> >& imagePoints1,\r
+ const Vector<Vector<Point2f> >& imagePoints2,\r
+ Mat& cameraMatrix1, Mat& distCoeffs1,\r
+ Mat& cameraMatrix2, Mat& distCoeffs2,\r
+ Size imageSize, Mat& R, Mat& T,\r
+ Mat& E, Mat& F,\r
+ TermCriteria criteria = TermCriteria(TermCriteria::COUNT+\r
+ TermCriteria::EPS, 30, 1e-6),\r
+ int flags=CALIB_FIX_INTRINSIC );\r
+\r
+CV_EXPORTS void stereoRectify( const Mat& cameraMatrix1, const Mat& distCoeffs1,\r
+ const Mat& cameraMatrix2, const Mat& distCoeffs2,\r
+ Size imageSize, const Mat& R, const Mat& T,\r
+ Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q,\r
+ int flags=CALIB_ZERO_DISPARITY );\r
+\r
+CV_EXPORTS bool stereoRectifyUncalibrated( const Vector<Point2f>& points1,\r
+ const Vector<Point2f>& points2,\r
+ const Mat& F, Size imgSize,\r
+ Mat& H1, Mat& H2,\r
+ double threshold=5 );\r
+\r
+CV_EXPORTS void convertPointsHomogeneous( const Vector<Point2f>& src,\r
+ Vector<Point3f>& dst );\r
+CV_EXPORTS void convertPointsHomogeneous( const Vector<Point3f>& src,\r
+ Vector<Point2f>& dst );\r
+\r
+enum\r
+{ \r
+ FM_7POINT = CV_FM_7POINT,\r
+ FM_8POINT = CV_FM_8POINT,\r
+ FM_LMEDS = CV_FM_LMEDS,\r
+ FM_RANSAC = CV_FM_RANSAC\r
+};\r
+\r
+CV_EXPORTS Mat findFundamentalMat( const Vector<Point2f>& points1,\r
+ const Vector<Point2f>& points2,\r
+ Vector<bool>& mask,\r
+ int method=FM_RANSAC,\r
+ double param1=3., double param2=0.99 );\r
+\r
+CV_EXPORTS Mat findFundamentalMat( const Vector<Point2f>& points1,\r
+ const Vector<Point2f>& points2,\r
+ int method=FM_RANSAC,\r
+ double param1=3., double param2=0.99 );\r
+\r
+CV_EXPORTS void computeCorrespondEpilines( const Vector<Point2f>& points1,\r
+ int whichImage, const Mat& F,\r
+ Vector<Vec3f>& lines );\r
+\r
+template<> inline void Ptr<CvStereoBMState>::delete_obj()\r
+{ cvReleaseStereoBMState(&obj); }\r
+\r
+// Block matching stereo correspondence algorithm\r
+struct CV_EXPORTS StereoBM\r
+{\r
+ enum { NORMALIZED_RESPONSE = CV_STEREO_BM_NORMALIZED_RESPONSE,\r
+ BASIC_PRESET=CV_STEREO_BM_BASIC,\r
+ FISH_EYE_PRESET=CV_STEREO_BM_FISH_EYE,\r
+ NARROW_PRESET=CV_STEREO_BM_NARROW };\r
+ \r
+ StereoBM();\r
+ StereoBM(int preset, int ndisparities=0, int SADWindowSize=21);\r
+ void init(int preset, int ndisparities=0, int SADWindowSize=21);\r
+ void operator()( const Mat& left, const Mat& right, Mat& disparity );\r
+\r
+ Ptr<CvStereoBMState> state;\r
+};\r
+\r
+CV_EXPORTS void reprojectImageTo3D( const Mat& disparity,\r
+ Mat& _3dImage, const Mat& Q,\r
+ bool handleMissingValues=false );\r
+\r
+struct CV_EXPORTS SURFKeypoint : public CvSURFPoint\r
+{\r
+ SURFKeypoint() { pt=Point2f(); laplacian=size=0; dir=hessian=0; }\r
+ SURFKeypoint(Point2f _pt, int _laplacian, int _size, float _dir=0.f, float _hessian=0.f)\r
+ { pt = _pt; laplacian = _laplacian; size = _size; dir = _dir; hessian = _hessian; }\r
+};\r
+\r
+struct CV_EXPORTS SURF : public CvSURFParams\r
+{\r
+ SURF();\r
+ SURF(double _hessianThreshold, bool _extended=false);\r
+\r
+ int descriptorSize() const;\r
+ void operator()(const Mat& img, const Mat& mask,\r
+ Vector<SURFKeypoint>& keypoints) const;\r
+ void operator()(const Mat& img, const Mat& mask,\r
+ Vector<SURFKeypoint>& keypoints,\r
+ Vector<float>& descriptors,\r
+ bool useProvidedKeypoints=false) const;\r
+};\r
+\r
+\r
+struct CV_EXPORTS MSER : public CvMSERParams\r
+{\r
+ MSER();\r
+ MSER( int _delta, int _min_area, int _max_area,\r
+ float _max_variation, float _min_diversity,\r
+ int _max_evolution, double _area_threshold,\r
+ double _min_margin, int _edge_blur_size );\r
+ Vector<Vector<Point> > operator()(Mat& image, const Mat& mask) const;\r
+};\r
+\r
+struct CV_EXPORTS StarKeypoint : public CvStarKeypoint\r
+{\r
+ StarKeypoint() { pt = Point(); size = 0; response = 0.f; }\r
+ StarKeypoint(Point _pt, int _size, float _response)\r
+ {\r
+ pt = _pt; size = _size; response = _response;\r
+ }\r
+};\r
+\r
+struct CV_EXPORTS StarDetector : CvStarDetectorParams\r
+{\r
+ StarDetector();\r
+ StarDetector(int _maxSize, int _responseThreshold,\r
+ int _lineThresholdProjected,\r
+ int _lineThresholdBinarized,\r
+ int _suppressNonmaxSize);\r
+\r
+ void operator()(const Mat& image, Vector<StarKeypoint>& keypoints) const;\r
+};\r
+\r
+}\r
+\r
+//////////////////////////////////////////////////////////////////////////////////////////\r
+\r
+struct CV_EXPORTS CvLevMarq\r
+{\r
+ CvLevMarq();\r
+ CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria=\r
+ cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),\r
+ bool completeSymmFlag=false );\r
+ ~CvLevMarq();\r
+ void init( int nparams, int nerrs, CvTermCriteria criteria=\r
+ cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),\r
+ bool completeSymmFlag=false );\r
+ bool update( const CvMat*& param, CvMat*& J, CvMat*& err );\r
+ bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm );\r
+\r
+ void clear();\r
+ void step();\r
+ enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 };\r
+\r
+ CvMat* mask;\r
+ CvMat* prevParam;\r
+ CvMat* param;\r
+ CvMat* J;\r
+ CvMat* err;\r
+ CvMat* JtJ;\r
+ CvMat* JtJN;\r
+ CvMat* JtErr;\r
+ CvMat* JtJV;\r
+ CvMat* JtJW;\r
+ double prevErrNorm, errNorm;\r
+ int lambdaLg10;\r
+ CvTermCriteria criteria;\r
+ int state;\r
+ int iters;\r
+ bool completeSymmFlag;\r
+};\r
+\r
+\r
+// 2009-01-12, Xavier Delacour <xavier.delacour@gmail.com>\r
+\r
+struct lsh_hash {\r
+ int h1, h2;\r
+};\r
+\r
+struct CvLSHOperations {\r
+ virtual ~CvLSHOperations() {}\r
+\r
+ virtual int vector_add(const void* data) = 0;\r
+ virtual void vector_remove(int i) = 0;\r
+ virtual const void* vector_lookup(int i) = 0;\r
+ virtual void vector_reserve(int n) = 0;\r
+ virtual unsigned int vector_count() = 0;\r
+\r
+ virtual void hash_insert(lsh_hash h, int l, int i) = 0;\r
+ virtual void hash_remove(lsh_hash h, int l, int i) = 0;\r
+ virtual int hash_lookup(lsh_hash h, int l, int* ret_i, int ret_i_max) = 0;\r
+};\r
+\r
+\r
+#endif /* __cplusplus */\r
+\r
+#endif /* _CV_HPP_ */\r
+\r
+/* End of file. */\r
__END__;
}
+namespace cv
+{
+\r
+bool findChessboardCorners( const Mat& image, Size patternSize,\r
+ Vector<Point2f>& corners, int flags )\r
+{\r
+ int count = patternSize.area()*2;\r
+ corners.resize(count);\r
+ CvMat _image = image;\r
+ bool ok = cvFindChessboardCorners(&_image, patternSize,\r
+ (CvPoint2D32f*)&corners[0], &count, flags ) > 0;\r
+ corners.resize(count);\r
+ return ok;\r
+}\r
+\r
+void drawChessboardCorners( Mat& image, Size patternSize,\r
+ const Vector<Point2f>& corners,\r
+ bool patternWasFound )\r
+{\r
+ CvMat _image = image;\r
+ cvDrawChessboardCorners( &_image, patternSize, (CvPoint2D32f*)&corners[0],\r
+ corners.size(), patternWasFound );\r
+}\r
+
+}
/* End of file. */
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of the copyright holders may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#include "_cv.h"
-
-/*
- This is stright-forward port v3 of Matlab calibration engine by Jean-Yves Bouguet
- that is (in a large extent) based on the paper:
- Z. Zhang. "A flexible new technique for camera calibration".
- IEEE Transactions on Pattern Analysis and Machine Intelligence, 22(11):1330-1334, 2000.
-
- The 1st initial port was done by Valery Mosyagin.
-*/
-
-CvLevMarq::CvLevMarq()
-{
- mask = prevParam = param = J = err = JtJ = JtJN = JtErr = JtJV = JtJW = 0;
- lambdaLg10 = 0; state = DONE;
- criteria = cvTermCriteria(0,0,0);
- iters = 0;
- completeSymmFlag = false;
-}
-
-CvLevMarq::CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria0, bool _completeSymmFlag )
-{
- mask = prevParam = param = J = err = JtJ = JtJN = JtErr = JtJV = JtJW = 0;
- init(nparams, nerrs, criteria0, _completeSymmFlag);
-}
-
-void CvLevMarq::clear()
-{
- cvReleaseMat(&mask);
- cvReleaseMat(&prevParam);
- cvReleaseMat(¶m);
- cvReleaseMat(&J);
- cvReleaseMat(&err);
- cvReleaseMat(&JtJ);
- cvReleaseMat(&JtJN);
- cvReleaseMat(&JtErr);
- cvReleaseMat(&JtJV);
- cvReleaseMat(&JtJW);
-}
-
-CvLevMarq::~CvLevMarq()
-{
- clear();
-}
-
-void CvLevMarq::init( int nparams, int nerrs, CvTermCriteria criteria0, bool _completeSymmFlag )
-{
- if( !param || param->rows != nparams || nerrs != (err ? err->rows : 0) )
- clear();
- mask = cvCreateMat( nparams, 1, CV_8U );
- cvSet(mask, cvScalarAll(1));
- prevParam = cvCreateMat( nparams, 1, CV_64F );
- param = cvCreateMat( nparams, 1, CV_64F );
- JtJ = cvCreateMat( nparams, nparams, CV_64F );
- JtJN = cvCreateMat( nparams, nparams, CV_64F );
- JtJV = cvCreateMat( nparams, nparams, CV_64F );
- JtJW = cvCreateMat( nparams, 1, CV_64F );
- JtErr = cvCreateMat( nparams, 1, CV_64F );
- if( nerrs > 0 )
- {
- J = cvCreateMat( nerrs, nparams, CV_64F );
- err = cvCreateMat( nerrs, 1, CV_64F );
- }
- prevErrNorm = DBL_MAX;
- lambdaLg10 = -3;
- criteria = criteria0;
- if( criteria.type & CV_TERMCRIT_ITER )
- criteria.max_iter = MIN(MAX(criteria.max_iter,1),1000);
- else
- criteria.max_iter = 30;
- if( criteria.type & CV_TERMCRIT_EPS )
- criteria.epsilon = MAX(criteria.epsilon, 0);
- else
- criteria.epsilon = DBL_EPSILON;
- state = STARTED;
- iters = 0;
- completeSymmFlag = _completeSymmFlag;
-}
-
-bool CvLevMarq::update( const CvMat*& _param, CvMat*& _J, CvMat*& _err )
-{
- double change;
-
- _J = _err = 0;
-
- assert( err != 0 );
- if( state == DONE )
- {
- _param = param;
- return false;
- }
-
- if( state == STARTED )
- {
- _param = param;
- cvZero( J );
- cvZero( err );
- _J = J;
- _err = err;
- state = CALC_J;
- return true;
- }
-
- if( state == CALC_J )
- {
- cvMulTransposed( J, JtJ, 1 );
- cvGEMM( J, err, 1, 0, 0, JtErr, CV_GEMM_A_T );
- cvCopy( param, prevParam );
- step();
- if( iters == 0 )
- prevErrNorm = cvNorm(err, 0, CV_L2);
- _param = param;
- cvZero( err );
- _err = err;
- state = CHECK_ERR;
- return true;
- }
-
- assert( state == CHECK_ERR );
- errNorm = cvNorm( err, 0, CV_L2 );
- if( errNorm > prevErrNorm )
- {
- lambdaLg10++;
- step();
- _param = param;
- cvZero( err );
- _err = err;
- state = CHECK_ERR;
- return true;
- }
-
- lambdaLg10 = MAX(lambdaLg10-1, -16);
- if( ++iters >= criteria.max_iter ||
- (change = cvNorm(param, prevParam, CV_RELATIVE_L2)) < criteria.epsilon )
- {
- _param = param;
- state = DONE;
- return true;
- }
-
- prevErrNorm = errNorm;
- _param = param;
- cvZero(J);
- _J = J;
- _err = err;
- state = CALC_J;
- return true;
-}
-
-
-bool CvLevMarq::updateAlt( const CvMat*& _param, CvMat*& _JtJ, CvMat*& _JtErr, double*& _errNorm )
-{
- double change;
-
- assert( err == 0 );
- if( state == DONE )
- {
- _param = param;
- return false;
- }
-
- if( state == STARTED )
- {
- _param = param;
- cvZero( JtJ );
- cvZero( JtErr );
- errNorm = 0;
- _JtJ = JtJ;
- _JtErr = JtErr;
- _errNorm = &errNorm;
- state = CALC_J;
- return true;
- }
-
- if( state == CALC_J )
- {
- cvCopy( param, prevParam );
- step();
- _param = param;
- prevErrNorm = errNorm;
- errNorm = 0;
- _errNorm = &errNorm;
- state = CHECK_ERR;
- return true;
- }
-
- assert( state == CHECK_ERR );
- if( errNorm > prevErrNorm )
- {
- lambdaLg10++;
- step();
- _param = param;
- errNorm = 0;
- _errNorm = &errNorm;
- state = CHECK_ERR;
- return true;
- }
-
- lambdaLg10 = MAX(lambdaLg10-1, -16);
- if( ++iters >= criteria.max_iter ||
- (change = cvNorm(param, prevParam, CV_RELATIVE_L2)) < criteria.epsilon )
- {
- _param = param;
- state = DONE;
- return false;
- }
-
- prevErrNorm = errNorm;
- cvZero( JtJ );
- cvZero( JtErr );
- _param = param;
- _JtJ = JtJ;
- _JtErr = JtErr;
- state = CALC_J;
- return true;
-}
-
-void CvLevMarq::step()
-{
- const double LOG10 = log(10.);
- double lambda = exp(lambdaLg10*LOG10);
- int i, j, nparams = param->rows;
-
- for( i = 0; i < nparams; i++ )
- if( mask->data.ptr[i] == 0 )
- {
- double *row = JtJ->data.db + i*nparams, *col = JtJ->data.db + i;
- for( j = 0; j < nparams; j++ )
- row[j] = col[j*nparams] = 0;
- JtErr->data.db[i] = 0;
- }
-
- if( !err )
- cvCompleteSymm( JtJ, completeSymmFlag );
-#if 1
- cvCopy( JtJ, JtJN );
- for( i = 0; i < nparams; i++ )
- JtJN->data.db[(nparams+1)*i] *= 1. + lambda;
-#else
- cvSetIdentity(JtJN, cvRealScalar(lambda));
- cvAdd( JtJ, JtJN, JtJN );
-#endif
- cvSVD( JtJN, JtJW, 0, JtJV, CV_SVD_MODIFY_A + CV_SVD_U_T + CV_SVD_V_T );
- cvSVBkSb( JtJW, JtJV, JtJV, JtErr, param, CV_SVD_U_T + CV_SVD_V_T );
- for( i = 0; i < nparams; i++ )
- param->data.db[i] = prevParam->data.db[i] - (mask->data.ptr[i] ? param->data.db[i] : 0);
-}
-
-// reimplementation of dAB.m
-CV_IMPL void
-cvCalcMatMulDeriv( const CvMat* A, const CvMat* B, CvMat* dABdA, CvMat* dABdB )
-{
- CV_FUNCNAME( "cvCalcMatMulDeriv" );
-
- __BEGIN__;
-
- int i, j, M, N, L;
- int bstep;
-
- CV_ASSERT( CV_IS_MAT(A) && CV_IS_MAT(B) );
- CV_ASSERT( CV_ARE_TYPES_EQ(A, B) &&
- (CV_MAT_TYPE(A->type) == CV_32F || CV_MAT_TYPE(A->type) == CV_64F) );
- CV_ASSERT( A->cols == B->rows );
-
- M = A->rows;
- L = A->cols;
- N = B->cols;
- bstep = B->step/CV_ELEM_SIZE(B->type);
-
- if( dABdA )
- {
- CV_ASSERT( CV_ARE_TYPES_EQ(A, dABdA) &&
- dABdA->rows == A->rows*B->cols && dABdA->cols == A->rows*A->cols );
- }
-
- if( dABdB )
- {
- CV_ASSERT( CV_ARE_TYPES_EQ(A, dABdB) &&
- dABdB->rows == A->rows*B->cols && dABdB->cols == B->rows*B->cols );
- }
-
- if( CV_MAT_TYPE(A->type) == CV_32F )
- {
- for( i = 0; i < M*N; i++ )
- {
- int i1 = i / N, i2 = i % N;
-
- if( dABdA )
- {
- float* dcda = (float*)(dABdA->data.ptr + dABdA->step*i);
- const float* b = (const float*)B->data.ptr + i2;
-
- for( j = 0; j < M*L; j++ )
- dcda[j] = 0;
- for( j = 0; j < L; j++ )
- dcda[i1*L + j] = b[j*bstep];
- }
-
- if( dABdB )
- {
- float* dcdb = (float*)(dABdB->data.ptr + dABdB->step*i);
- const float* a = (const float*)(A->data.ptr + A->step*i1);
-
- for( j = 0; j < L*N; j++ )
- dcdb[j] = 0;
- for( j = 0; j < L; j++ )
- dcdb[j*N + i2] = a[j];
- }
- }
- }
- else
- {
- for( i = 0; i < M*N; i++ )
- {
- int i1 = i / N, i2 = i % N;
-
- if( dABdA )
- {
- double* dcda = (double*)(dABdA->data.ptr + dABdA->step*i);
- const double* b = (const double*)B->data.ptr + i2;
-
- for( j = 0; j < M*L; j++ )
- dcda[j] = 0;
- for( j = 0; j < L; j++ )
- dcda[i1*L + j] = b[j*bstep];
- }
-
- if( dABdB )
- {
- double* dcdb = (double*)(dABdB->data.ptr + dABdB->step*i);
- const double* a = (const double*)(A->data.ptr + A->step*i1);
-
- for( j = 0; j < L*N; j++ )
- dcdb[j] = 0;
- for( j = 0; j < L; j++ )
- dcdb[j*N + i2] = a[j];
- }
- }
- }
-
- __END__;
-}
-
-// reimplementation of compose_motion.m
-CV_IMPL void
-cvComposeRT( const CvMat* _rvec1, const CvMat* _tvec1,
- const CvMat* _rvec2, const CvMat* _tvec2,
- CvMat* _rvec3, CvMat* _tvec3,
- CvMat* dr3dr1, CvMat* dr3dt1,
- CvMat* dr3dr2, CvMat* dr3dt2,
- CvMat* dt3dr1, CvMat* dt3dt1,
- CvMat* dt3dr2, CvMat* dt3dt2 )
-{
- CV_FUNCNAME( "cvComposeRT" );
-
- __BEGIN__;
-
- double _r1[3], _r2[3];
- double _R1[9], _d1[9*3], _R2[9], _d2[9*3];
- CvMat r1 = cvMat(3,1,CV_64F,_r1), r2 = cvMat(3,1,CV_64F,_r2);
- CvMat R1 = cvMat(3,3,CV_64F,_R1), R2 = cvMat(3,3,CV_64F,_R2);
- CvMat dR1dr1 = cvMat(9,3,CV_64F,_d1), dR2dr2 = cvMat(9,3,CV_64F,_d2);
-
- CV_ASSERT( CV_IS_MAT(_rvec1) && CV_IS_MAT(_rvec2) );
-
- CV_ASSERT( CV_MAT_TYPE(_rvec1->type) == CV_32F ||
- CV_MAT_TYPE(_rvec1->type) == CV_64F );
-
- CV_ASSERT( _rvec1->rows == 3 && _rvec1->cols == 1 && CV_ARE_SIZES_EQ(_rvec1, _rvec2) );
-
- cvConvert( _rvec1, &r1 );
- cvConvert( _rvec2, &r2 );
-
- cvRodrigues2( &r1, &R1, &dR1dr1 );
- cvRodrigues2( &r2, &R2, &dR2dr2 );
-
- if( _rvec3 || dr3dr1 || dr3dr1 )
- {
- double _r3[3], _R3[9], _dR3dR1[9*9], _dR3dR2[9*9], _dr3dR3[9*3];
- double _W1[9*3], _W2[3*3];
- CvMat r3 = cvMat(3,1,CV_64F,_r3), R3 = cvMat(3,3,CV_64F,_R3);
- CvMat dR3dR1 = cvMat(9,9,CV_64F,_dR3dR1), dR3dR2 = cvMat(9,9,CV_64F,_dR3dR2);
- CvMat dr3dR3 = cvMat(3,9,CV_64F,_dr3dR3);
- CvMat W1 = cvMat(3,9,CV_64F,_W1), W2 = cvMat(3,3,CV_64F,_W2);
-
- cvMatMul( &R2, &R1, &R3 );
- cvCalcMatMulDeriv( &R2, &R1, &dR3dR2, &dR3dR1 );
-
- cvRodrigues2( &R3, &r3, &dr3dR3 );
-
- if( _rvec3 )
- cvConvert( &r3, _rvec3 );
-
- if( dr3dr1 )
- {
- cvMatMul( &dr3dR3, &dR3dR1, &W1 );
- cvMatMul( &W1, &dR1dr1, &W2 );
- cvConvert( &W2, dr3dr1 );
- }
-
- if( dr3dr2 )
- {
- cvMatMul( &dr3dR3, &dR3dR2, &W1 );
- cvMatMul( &W1, &dR2dr2, &W2 );
- cvConvert( &W2, dr3dr2 );
- }
- }
-
- if( dr3dt1 )
- cvZero( dr3dt1 );
- if( dr3dt2 )
- cvZero( dr3dt2 );
-
- if( _tvec3 || dt3dr2 || dt3dt1 )
- {
- double _t1[3], _t2[3], _t3[3], _dxdR2[3*9], _dxdt1[3*3], _W3[3*3];
- CvMat t1 = cvMat(3,1,CV_64F,_t1), t2 = cvMat(3,1,CV_64F,_t2);
- CvMat t3 = cvMat(3,1,CV_64F,_t3);
- CvMat dxdR2 = cvMat(3, 9, CV_64F, _dxdR2);
- CvMat dxdt1 = cvMat(3, 3, CV_64F, _dxdt1);
- CvMat W3 = cvMat(3, 3, CV_64F, _W3);
-
- CV_ASSERT( CV_IS_MAT(_tvec1) && CV_IS_MAT(_tvec2) );
- CV_ASSERT( CV_ARE_SIZES_EQ(_tvec1, _tvec2) && CV_ARE_SIZES_EQ(_tvec1, _rvec1) );
-
- cvConvert( _tvec1, &t1 );
- cvConvert( _tvec2, &t2 );
- cvMatMulAdd( &R2, &t1, &t2, &t3 );
-
- if( _tvec3 )
- cvConvert( &t3, _tvec3 );
-
- if( dt3dr2 || dt3dt1 )
- {
- cvCalcMatMulDeriv( &R2, &t1, &dxdR2, &dxdt1 );
- if( dt3dr2 )
- {
- cvMatMul( &dxdR2, &dR2dr2, &W3 );
- cvConvert( &W3, dt3dr2 );
- }
- if( dt3dt1 )
- cvConvert( &dxdt1, dt3dt1 );
- }
- }
-
- if( dt3dt2 )
- cvSetIdentity( dt3dt2 );
- if( dt3dr1 )
- cvZero( dt3dr1 );
-
- __END__;
-}
-
-CV_IMPL int
-cvRodrigues2( const CvMat* src, CvMat* dst, CvMat* jacobian )
-{
- int result = 0;
-
- CV_FUNCNAME( "cvRogrigues2" );
-
- __BEGIN__;
-
- int depth, elem_size;
- int i, k;
- double J[27];
- CvMat _J = cvMat( 3, 9, CV_64F, J );
-
- if( !CV_IS_MAT(src) )
- CV_ERROR( !src ? CV_StsNullPtr : CV_StsBadArg, "Input argument is not a valid matrix" );
-
- if( !CV_IS_MAT(dst) )
- CV_ERROR( !dst ? CV_StsNullPtr : CV_StsBadArg,
- "The first output argument is not a valid matrix" );
-
- depth = CV_MAT_DEPTH(src->type);
- elem_size = CV_ELEM_SIZE(depth);
-
- if( depth != CV_32F && depth != CV_64F )
- CV_ERROR( CV_StsUnsupportedFormat, "The matrices must have 32f or 64f data type" );
-
- if( !CV_ARE_DEPTHS_EQ(src, dst) )
- CV_ERROR( CV_StsUnmatchedFormats, "All the matrices must have the same data type" );
-
- if( jacobian )
- {
- if( !CV_IS_MAT(jacobian) )
- CV_ERROR( CV_StsBadArg, "Jacobian is not a valid matrix" );
-
- if( !CV_ARE_DEPTHS_EQ(src, jacobian) || CV_MAT_CN(jacobian->type) != 1 )
- CV_ERROR( CV_StsUnmatchedFormats, "Jacobian must have 32fC1 or 64fC1 datatype" );
-
- if( (jacobian->rows != 9 || jacobian->cols != 3) &&
- (jacobian->rows != 3 || jacobian->cols != 9))
- CV_ERROR( CV_StsBadSize, "Jacobian must be 3x9 or 9x3" );
- }
-
- if( src->cols == 1 || src->rows == 1 )
- {
- double rx, ry, rz, theta;
- int step = src->rows > 1 ? src->step / elem_size : 1;
-
- if( src->rows + src->cols*CV_MAT_CN(src->type) - 1 != 3 )
- CV_ERROR( CV_StsBadSize, "Input matrix must be 1x3, 3x1 or 3x3" );
-
- if( dst->rows != 3 || dst->cols != 3 || CV_MAT_CN(dst->type) != 1 )
- CV_ERROR( CV_StsBadSize, "Output matrix must be 3x3, single-channel floating point matrix" );
-
- if( depth == CV_32F )
- {
- rx = src->data.fl[0];
- ry = src->data.fl[step];
- rz = src->data.fl[step*2];
- }
- else
- {
- rx = src->data.db[0];
- ry = src->data.db[step];
- rz = src->data.db[step*2];
- }
- theta = sqrt(rx*rx + ry*ry + rz*rz);
-
- if( theta < DBL_EPSILON )
- {
- cvSetIdentity( dst );
-
- if( jacobian )
- {
- memset( J, 0, sizeof(J) );
- J[5] = J[15] = J[19] = -1;
- J[7] = J[11] = J[21] = 1;
- }
- }
- else
- {
- const double I[] = { 1, 0, 0, 0, 1, 0, 0, 0, 1 };
-
- double c = cos(theta);
- double s = sin(theta);
- double c1 = 1. - c;
- double itheta = theta ? 1./theta : 0.;
-
- rx *= itheta; ry *= itheta; rz *= itheta;
-
- double rrt[] = { rx*rx, rx*ry, rx*rz, rx*ry, ry*ry, ry*rz, rx*rz, ry*rz, rz*rz };
- double _r_x_[] = { 0, -rz, ry, rz, 0, -rx, -ry, rx, 0 };
- double R[9];
- CvMat _R = cvMat( 3, 3, CV_64F, R );
-
- // R = cos(theta)*I + (1 - cos(theta))*r*rT + sin(theta)*[r_x]
- // where [r_x] is [0 -rz ry; rz 0 -rx; -ry rx 0]
- for( k = 0; k < 9; k++ )
- R[k] = c*I[k] + c1*rrt[k] + s*_r_x_[k];
-
- cvConvert( &_R, dst );
-
- if( jacobian )
- {
- double drrt[] = { rx+rx, ry, rz, ry, 0, 0, rz, 0, 0,
- 0, rx, 0, rx, ry+ry, rz, 0, rz, 0,
- 0, 0, rx, 0, 0, ry, rx, ry, rz+rz };
- double d_r_x_[] = { 0, 0, 0, 0, 0, -1, 0, 1, 0,
- 0, 0, 1, 0, 0, 0, -1, 0, 0,
- 0, -1, 0, 1, 0, 0, 0, 0, 0 };
- for( i = 0; i < 3; i++ )
- {
- double ri = i == 0 ? rx : i == 1 ? ry : rz;
- double a0 = -s*ri, a1 = (s - 2*c1*itheta)*ri, a2 = c1*itheta;
- double a3 = (c - s*itheta)*ri, a4 = s*itheta;
- for( k = 0; k < 9; k++ )
- J[i*9+k] = a0*I[k] + a1*rrt[k] + a2*drrt[i*9+k] +
- a3*_r_x_[k] + a4*d_r_x_[i*9+k];
- }
- }
- }
- }
- else if( src->cols == 3 && src->rows == 3 )
- {
- double R[9], U[9], V[9], W[3], rx, ry, rz;
- CvMat _R = cvMat( 3, 3, CV_64F, R );
- CvMat _U = cvMat( 3, 3, CV_64F, U );
- CvMat _V = cvMat( 3, 3, CV_64F, V );
- CvMat _W = cvMat( 3, 1, CV_64F, W );
- double theta, s, c;
- int step = dst->rows > 1 ? dst->step / elem_size : 1;
-
- if( (dst->rows != 1 || dst->cols*CV_MAT_CN(dst->type) != 3) &&
- (dst->rows != 3 || dst->cols != 1 || CV_MAT_CN(dst->type) != 1))
- CV_ERROR( CV_StsBadSize, "Output matrix must be 1x3 or 3x1" );
-
- cvConvert( src, &_R );
- if( !cvCheckArr( &_R, CV_CHECK_RANGE+CV_CHECK_QUIET, -100, 100 ) )
- {
- cvZero(dst);
- if( jacobian )
- cvZero(jacobian);
- EXIT;
- }
-
- cvSVD( &_R, &_W, &_U, &_V, CV_SVD_MODIFY_A + CV_SVD_U_T + CV_SVD_V_T );
- cvGEMM( &_U, &_V, 1, 0, 0, &_R, CV_GEMM_A_T );
-
- rx = R[7] - R[5];
- ry = R[2] - R[6];
- rz = R[3] - R[1];
-
- s = sqrt((rx*rx + ry*ry + rz*rz)*0.25);
- c = (R[0] + R[4] + R[8] - 1)*0.5;
- c = c > 1. ? 1. : c < -1. ? -1. : c;
- theta = acos(c);
-
- if( s < 1e-5 )
- {
- double t;
-
- if( c > 0 )
- rx = ry = rz = 0;
- else
- {
- t = (R[0] + 1)*0.5;
- rx = sqrt(MAX(t,0.));
- t = (R[4] + 1)*0.5;
- ry = sqrt(MAX(t,0.))*(R[1] < 0 ? -1. : 1.);
- t = (R[8] + 1)*0.5;
- rz = sqrt(MAX(t,0.))*(R[2] < 0 ? -1. : 1.);
- if( fabs(rx) < fabs(ry) && fabs(rx) < fabs(rz) && (R[5] > 0) != (ry*rz > 0) )
- rz = -rz;
- theta /= sqrt(rx*rx + ry*ry + rz*rz);
- rx *= theta;
- ry *= theta;
- rz *= theta;
- }
-
- if( jacobian )
- {
- memset( J, 0, sizeof(J) );
- if( c > 0 )
- {
- J[5] = J[15] = J[19] = -0.5;
- J[7] = J[11] = J[21] = 0.5;
- }
- }
- }
- else
- {
- double vth = 1/(2*s);
-
- if( jacobian )
- {
- double t, dtheta_dtr = -1./s;
- // var1 = [vth;theta]
- // var = [om1;var1] = [om1;vth;theta]
- double dvth_dtheta = -vth*c/s;
- double d1 = 0.5*dvth_dtheta*dtheta_dtr;
- double d2 = 0.5*dtheta_dtr;
- // dvar1/dR = dvar1/dtheta*dtheta/dR = [dvth/dtheta; 1] * dtheta/dtr * dtr/dR
- double dvardR[5*9] =
- {
- 0, 0, 0, 0, 0, 1, 0, -1, 0,
- 0, 0, -1, 0, 0, 0, 1, 0, 0,
- 0, 1, 0, -1, 0, 0, 0, 0, 0,
- d1, 0, 0, 0, d1, 0, 0, 0, d1,
- d2, 0, 0, 0, d2, 0, 0, 0, d2
- };
- // var2 = [om;theta]
- double dvar2dvar[] =
- {
- vth, 0, 0, rx, 0,
- 0, vth, 0, ry, 0,
- 0, 0, vth, rz, 0,
- 0, 0, 0, 0, 1
- };
- double domegadvar2[] =
- {
- theta, 0, 0, rx*vth,
- 0, theta, 0, ry*vth,
- 0, 0, theta, rz*vth
- };
-
- CvMat _dvardR = cvMat( 5, 9, CV_64FC1, dvardR );
- CvMat _dvar2dvar = cvMat( 4, 5, CV_64FC1, dvar2dvar );
- CvMat _domegadvar2 = cvMat( 3, 4, CV_64FC1, domegadvar2 );
- double t0[3*5];
- CvMat _t0 = cvMat( 3, 5, CV_64FC1, t0 );
-
- cvMatMul( &_domegadvar2, &_dvar2dvar, &_t0 );
- cvMatMul( &_t0, &_dvardR, &_J );
-
- // transpose every row of _J (treat the rows as 3x3 matrices)
- CV_SWAP(J[1], J[3], t); CV_SWAP(J[2], J[6], t); CV_SWAP(J[5], J[7], t);
- CV_SWAP(J[10], J[12], t); CV_SWAP(J[11], J[15], t); CV_SWAP(J[14], J[16], t);
- CV_SWAP(J[19], J[21], t); CV_SWAP(J[20], J[24], t); CV_SWAP(J[23], J[25], t);
- }
-
- vth *= theta;
- rx *= vth; ry *= vth; rz *= vth;
- }
-
- if( depth == CV_32F )
- {
- dst->data.fl[0] = (float)rx;
- dst->data.fl[step] = (float)ry;
- dst->data.fl[step*2] = (float)rz;
- }
- else
- {
- dst->data.db[0] = rx;
- dst->data.db[step] = ry;
- dst->data.db[step*2] = rz;
- }
- }
-
- if( jacobian )
- {
- if( depth == CV_32F )
- {
- if( jacobian->rows == _J.rows )
- cvConvert( &_J, jacobian );
- else
- {
- float Jf[3*9];
- CvMat _Jf = cvMat( _J.rows, _J.cols, CV_32FC1, Jf );
- cvConvert( &_J, &_Jf );
- cvTranspose( &_Jf, jacobian );
- }
- }
- else if( jacobian->rows == _J.rows )
- cvCopy( &_J, jacobian );
- else
- cvTranspose( &_J, jacobian );
- }
-
- result = 1;
-
- __END__;
-
- return result;
-}
-
-
-CV_IMPL void
-cvProjectPoints2( const CvMat* objectPoints,
- const CvMat* r_vec,
- const CvMat* t_vec,
- const CvMat* A,
- const CvMat* distCoeffs,
- CvMat* imagePoints, CvMat* dpdr,
- CvMat* dpdt, CvMat* dpdf,
- CvMat* dpdc, CvMat* dpdk,
- double aspectRatio )
-{
- CvMat *_M = 0, *_m = 0;
- CvMat *_dpdr = 0, *_dpdt = 0, *_dpdc = 0, *_dpdf = 0, *_dpdk = 0;
-
- CV_FUNCNAME( "cvProjectPoints2" );
-
- __BEGIN__;
-
- int i, j, count;
- int calc_derivatives;
- const CvPoint3D64f* M;
- CvPoint2D64f* m;
- double r[3], R[9], dRdr[27], t[3], a[9], k[5] = {0,0,0,0,0}, fx, fy, cx, cy;
- CvMat _r, _t, _a = cvMat( 3, 3, CV_64F, a ), _k;
- CvMat _R = cvMat( 3, 3, CV_64F, R ), _dRdr = cvMat( 3, 9, CV_64F, dRdr );
- double *dpdr_p = 0, *dpdt_p = 0, *dpdk_p = 0, *dpdf_p = 0, *dpdc_p = 0;
- int dpdr_step = 0, dpdt_step = 0, dpdk_step = 0, dpdf_step = 0, dpdc_step = 0;
- bool fixedAspectRatio = aspectRatio > FLT_EPSILON;
-
- if( !CV_IS_MAT(objectPoints) || !CV_IS_MAT(r_vec) ||
- !CV_IS_MAT(t_vec) || !CV_IS_MAT(A) ||
- /*!CV_IS_MAT(distCoeffs) ||*/ !CV_IS_MAT(imagePoints) )
- CV_ERROR( CV_StsBadArg, "One of required arguments is not a valid matrix" );
-
- count = MAX(objectPoints->rows, objectPoints->cols);
-
- if( CV_IS_CONT_MAT(objectPoints->type) && CV_MAT_DEPTH(objectPoints->type) == CV_64F &&
- ((objectPoints->rows == 1 && CV_MAT_CN(objectPoints->type) == 3) ||
- (objectPoints->rows == count && CV_MAT_CN(objectPoints->type)*objectPoints->cols == 3)))
- _M = (CvMat*)objectPoints;
- else
- {
- CV_CALL( _M = cvCreateMat( 1, count, CV_64FC3 ));
- CV_CALL( cvConvertPointsHomogeneous( objectPoints, _M ));
- }
-
- if( CV_IS_CONT_MAT(imagePoints->type) && CV_MAT_DEPTH(imagePoints->type) == CV_64F &&
- ((imagePoints->rows == 1 && CV_MAT_CN(imagePoints->type) == 2) ||
- (imagePoints->rows == count && CV_MAT_CN(imagePoints->type)*imagePoints->cols == 2)))
- _m = imagePoints;
- else
- CV_CALL( _m = cvCreateMat( 1, count, CV_64FC2 ));
-
- M = (CvPoint3D64f*)_M->data.db;
- m = (CvPoint2D64f*)_m->data.db;
-
- if( (CV_MAT_DEPTH(r_vec->type) != CV_64F && CV_MAT_DEPTH(r_vec->type) != CV_32F) ||
- (((r_vec->rows != 1 && r_vec->cols != 1) ||
- r_vec->rows*r_vec->cols*CV_MAT_CN(r_vec->type) != 3) &&
- ((r_vec->rows != 3 && r_vec->cols != 3) || CV_MAT_CN(r_vec->type) != 1)))
- CV_ERROR( CV_StsBadArg, "Rotation must be represented by 1x3 or 3x1 "
- "floating-point rotation vector, or 3x3 rotation matrix" );
-
- if( r_vec->rows == 3 && r_vec->cols == 3 )
- {
- _r = cvMat( 3, 1, CV_64FC1, r );
- CV_CALL( cvRodrigues2( r_vec, &_r ));
- CV_CALL( cvRodrigues2( &_r, &_R, &_dRdr ));
- cvCopy( r_vec, &_R );
- }
- else
- {
- _r = cvMat( r_vec->rows, r_vec->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(r_vec->type)), r );
- CV_CALL( cvConvert( r_vec, &_r ));
- CV_CALL( cvRodrigues2( &_r, &_R, &_dRdr ) );
- }
-
- if( (CV_MAT_DEPTH(t_vec->type) != CV_64F && CV_MAT_DEPTH(t_vec->type) != CV_32F) ||
- (t_vec->rows != 1 && t_vec->cols != 1) ||
- t_vec->rows*t_vec->cols*CV_MAT_CN(t_vec->type) != 3 )
- CV_ERROR( CV_StsBadArg,
- "Translation vector must be 1x3 or 3x1 floating-point vector" );
-
- _t = cvMat( t_vec->rows, t_vec->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(t_vec->type)), t );
- CV_CALL( cvConvert( t_vec, &_t ));
-
- if( (CV_MAT_TYPE(A->type) != CV_64FC1 && CV_MAT_TYPE(A->type) != CV_32FC1) ||
- A->rows != 3 || A->cols != 3 )
- CV_ERROR( CV_StsBadArg, "Instrinsic parameters must be 3x3 floating-point matrix" );
-
- CV_CALL( cvConvert( A, &_a ));
- fx = a[0]; fy = a[4];
- cx = a[2]; cy = a[5];
-
- if( fixedAspectRatio )
- fx = fy*aspectRatio;
-
- if( distCoeffs )
- {
- if( !CV_IS_MAT(distCoeffs) ||
- (CV_MAT_DEPTH(distCoeffs->type) != CV_64F &&
- CV_MAT_DEPTH(distCoeffs->type) != CV_32F) ||
- (distCoeffs->rows != 1 && distCoeffs->cols != 1) ||
- (distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 4 &&
- distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 5) )
- CV_ERROR( CV_StsBadArg,
- "Distortion coefficients must be 1x4, 4x1, 1x5 or 5x1 floating-point vector" );
-
- _k = cvMat( distCoeffs->rows, distCoeffs->cols,
- CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), k );
- CV_CALL( cvConvert( distCoeffs, &_k ));
- }
-
- if( dpdr )
- {
- if( !CV_IS_MAT(dpdr) ||
- (CV_MAT_TYPE(dpdr->type) != CV_32FC1 &&
- CV_MAT_TYPE(dpdr->type) != CV_64FC1) ||
- dpdr->rows != count*2 || dpdr->cols != 3 )
- CV_ERROR( CV_StsBadArg, "dp/drot must be 2Nx3 floating-point matrix" );
-
- if( CV_MAT_TYPE(dpdr->type) == CV_64FC1 )
- _dpdr = dpdr;
- else
- CV_CALL( _dpdr = cvCreateMat( 2*count, 3, CV_64FC1 ));
- dpdr_p = _dpdr->data.db;
- dpdr_step = _dpdr->step/sizeof(dpdr_p[0]);
- }
-
- if( dpdt )
- {
- if( !CV_IS_MAT(dpdt) ||
- (CV_MAT_TYPE(dpdt->type) != CV_32FC1 &&
- CV_MAT_TYPE(dpdt->type) != CV_64FC1) ||
- dpdt->rows != count*2 || dpdt->cols != 3 )
- CV_ERROR( CV_StsBadArg, "dp/dT must be 2Nx3 floating-point matrix" );
-
- if( CV_MAT_TYPE(dpdt->type) == CV_64FC1 )
- _dpdt = dpdt;
- else
- CV_CALL( _dpdt = cvCreateMat( 2*count, 3, CV_64FC1 ));
- dpdt_p = _dpdt->data.db;
- dpdt_step = _dpdt->step/sizeof(dpdt_p[0]);
- }
-
- if( dpdf )
- {
- if( !CV_IS_MAT(dpdf) ||
- (CV_MAT_TYPE(dpdf->type) != CV_32FC1 && CV_MAT_TYPE(dpdf->type) != CV_64FC1) ||
- dpdf->rows != count*2 || dpdf->cols != 2 )
- CV_ERROR( CV_StsBadArg, "dp/df must be 2Nx2 floating-point matrix" );
-
- if( CV_MAT_TYPE(dpdf->type) == CV_64FC1 )
- _dpdf = dpdf;
- else
- CV_CALL( _dpdf = cvCreateMat( 2*count, 2, CV_64FC1 ));
- dpdf_p = _dpdf->data.db;
- dpdf_step = _dpdf->step/sizeof(dpdf_p[0]);
- }
-
- if( dpdc )
- {
- if( !CV_IS_MAT(dpdc) ||
- (CV_MAT_TYPE(dpdc->type) != CV_32FC1 && CV_MAT_TYPE(dpdc->type) != CV_64FC1) ||
- dpdc->rows != count*2 || dpdc->cols != 2 )
- CV_ERROR( CV_StsBadArg, "dp/dc must be 2Nx2 floating-point matrix" );
-
- if( CV_MAT_TYPE(dpdc->type) == CV_64FC1 )
- _dpdc = dpdc;
- else
- CV_CALL( _dpdc = cvCreateMat( 2*count, 2, CV_64FC1 ));
- dpdc_p = _dpdc->data.db;
- dpdc_step = _dpdc->step/sizeof(dpdc_p[0]);
- }
-
- if( dpdk )
- {
- if( !CV_IS_MAT(dpdk) ||
- (CV_MAT_TYPE(dpdk->type) != CV_32FC1 && CV_MAT_TYPE(dpdk->type) != CV_64FC1) ||
- dpdk->rows != count*2 || (dpdk->cols != 5 && dpdk->cols != 4 && dpdk->cols != 2) )
- CV_ERROR( CV_StsBadArg, "dp/df must be 2Nx5, 2Nx4 or 2Nx2 floating-point matrix" );
-
- if( !distCoeffs )
- CV_ERROR( CV_StsNullPtr, "distCoeffs is NULL while dpdk is not" );
-
- if( CV_MAT_TYPE(dpdk->type) == CV_64FC1 )
- _dpdk = dpdk;
- else
- CV_CALL( _dpdk = cvCreateMat( dpdk->rows, dpdk->cols, CV_64FC1 ));
- dpdk_p = _dpdk->data.db;
- dpdk_step = _dpdk->step/sizeof(dpdk_p[0]);
- }
-
- calc_derivatives = dpdr || dpdt || dpdf || dpdc || dpdk;
-
- for( i = 0; i < count; i++ )
- {
- double X = M[i].x, Y = M[i].y, Z = M[i].z;
- double x = R[0]*X + R[1]*Y + R[2]*Z + t[0];
- double y = R[3]*X + R[4]*Y + R[5]*Z + t[1];
- double z = R[6]*X + R[7]*Y + R[8]*Z + t[2];
- double r2, r4, r6, a1, a2, a3, cdist;
- double xd, yd;
-
- z = z ? 1./z : 1;
- x *= z; y *= z;
-
- r2 = x*x + y*y;
- r4 = r2*r2;
- r6 = r4*r2;
- a1 = 2*x*y;
- a2 = r2 + 2*x*x;
- a3 = r2 + 2*y*y;
- cdist = 1 + k[0]*r2 + k[1]*r4 + k[4]*r6;
- xd = x*cdist + k[2]*a1 + k[3]*a2;
- yd = y*cdist + k[2]*a3 + k[3]*a1;
-
- m[i].x = xd*fx + cx;
- m[i].y = yd*fy + cy;
-
- if( calc_derivatives )
- {
- if( dpdc_p )
- {
- dpdc_p[0] = 1; dpdc_p[1] = 0;
- dpdc_p[dpdc_step] = 0;
- dpdc_p[dpdc_step+1] = 1;
- dpdc_p += dpdc_step*2;
- }
-
- if( dpdf_p )
- {
- if( fixedAspectRatio )
- {
- dpdf_p[0] = 0; dpdf_p[1] = xd*aspectRatio;
- dpdf_p[dpdf_step] = 0;
- dpdf_p[dpdf_step+1] = yd;
- }
- else
- {
- dpdf_p[0] = xd; dpdf_p[1] = 0;
- dpdf_p[dpdf_step] = 0;
- dpdf_p[dpdf_step+1] = yd;
- }
- dpdf_p += dpdf_step*2;
- }
-
- if( dpdk_p )
- {
- dpdk_p[0] = fx*x*r2;
- dpdk_p[1] = fx*x*r4;
- dpdk_p[dpdk_step] = fy*y*r2;
- dpdk_p[dpdk_step+1] = fy*y*r4;
- if( _dpdk->cols > 2 )
- {
- dpdk_p[2] = fx*a1;
- dpdk_p[3] = fx*a2;
- dpdk_p[dpdk_step+2] = fy*a3;
- dpdk_p[dpdk_step+3] = fy*a1;
- if( _dpdk->cols > 4 )
- {
- dpdk_p[4] = fx*x*r6;
- dpdk_p[dpdk_step+4] = fy*y*r6;
- }
- }
- dpdk_p += dpdk_step*2;
- }
-
- if( dpdt_p )
- {
- double dxdt[] = { z, 0, -x*z }, dydt[] = { 0, z, -y*z };
- for( j = 0; j < 3; j++ )
- {
- double dr2dt = 2*x*dxdt[j] + 2*y*dydt[j];
- double dcdist_dt = k[0]*dr2dt + 2*k[1]*r2*dr2dt + 3*k[4]*r4*dr2dt;
- double da1dt = 2*(x*dydt[j] + y*dxdt[j]);
- double dmxdt = fx*(dxdt[j]*cdist + x*dcdist_dt +
- k[2]*da1dt + k[3]*(dr2dt + 2*x*dxdt[j]));
- double dmydt = fy*(dydt[j]*cdist + y*dcdist_dt +
- k[2]*(dr2dt + 2*y*dydt[j]) + k[3]*da1dt);
- dpdt_p[j] = dmxdt;
- dpdt_p[dpdt_step+j] = dmydt;
- }
- dpdt_p += dpdt_step*2;
- }
-
- if( dpdr_p )
- {
- double dx0dr[] =
- {
- X*dRdr[0] + Y*dRdr[1] + Z*dRdr[2],
- X*dRdr[9] + Y*dRdr[10] + Z*dRdr[11],
- X*dRdr[18] + Y*dRdr[19] + Z*dRdr[20]
- };
- double dy0dr[] =
- {
- X*dRdr[3] + Y*dRdr[4] + Z*dRdr[5],
- X*dRdr[12] + Y*dRdr[13] + Z*dRdr[14],
- X*dRdr[21] + Y*dRdr[22] + Z*dRdr[23]
- };
- double dz0dr[] =
- {
- X*dRdr[6] + Y*dRdr[7] + Z*dRdr[8],
- X*dRdr[15] + Y*dRdr[16] + Z*dRdr[17],
- X*dRdr[24] + Y*dRdr[25] + Z*dRdr[26]
- };
- for( j = 0; j < 3; j++ )
- {
- double dxdr = z*(dx0dr[j] - x*dz0dr[j]);
- double dydr = z*(dy0dr[j] - y*dz0dr[j]);
- double dr2dr = 2*x*dxdr + 2*y*dydr;
- double dcdist_dr = k[0]*dr2dr + 2*k[1]*r2*dr2dr + 3*k[4]*r4*dr2dr;
- double da1dr = 2*(x*dydr + y*dxdr);
- double dmxdr = fx*(dxdr*cdist + x*dcdist_dr +
- k[2]*da1dr + k[3]*(dr2dr + 2*x*dxdr));
- double dmydr = fy*(dydr*cdist + y*dcdist_dr +
- k[2]*(dr2dr + 2*y*dydr) + k[3]*da1dr);
- dpdr_p[j] = dmxdr;
- dpdr_p[dpdr_step+j] = dmydr;
- }
- dpdr_p += dpdr_step*2;
- }
- }
- }
-
- if( _m != imagePoints )
- cvConvertPointsHomogeneous( _m, imagePoints );
- if( _dpdr != dpdr )
- cvConvert( _dpdr, dpdr );
- if( _dpdt != dpdt )
- cvConvert( _dpdt, dpdt );
- if( _dpdf != dpdf )
- cvConvert( _dpdf, dpdf );
- if( _dpdc != dpdc )
- cvConvert( _dpdc, dpdc );
- if( _dpdk != dpdk )
- cvConvert( _dpdk, dpdk );
-
- __END__;
-
- if( _M != objectPoints )
- cvReleaseMat( &_M );
- if( _m != imagePoints )
- cvReleaseMat( &_m );
- if( _dpdr != dpdr )
- cvReleaseMat( &_dpdr );
- if( _dpdt != dpdt )
- cvReleaseMat( &_dpdt );
- if( _dpdf != dpdf )
- cvReleaseMat( &_dpdf );
- if( _dpdc != dpdc )
- cvReleaseMat( &_dpdc );
- if( _dpdk != dpdk )
- cvReleaseMat( &_dpdk );
-}
-
-
-CV_IMPL void
-cvFindExtrinsicCameraParams2( const CvMat* objectPoints,
- const CvMat* imagePoints, const CvMat* A,
- const CvMat* distCoeffs,
- CvMat* rvec, CvMat* tvec,
- int useExtrinsicGuess )
-{
- const int max_iter = 20;
- CvMat *_M = 0, *_Mxy = 0, *_m = 0, *_mn = 0, *_L = 0, *_J = 0;
-
- CV_FUNCNAME( "cvFindExtrinsicCameraParams2" );
-
- __BEGIN__;
-
- int i, count;
- double a[9], ar[9]={1,0,0,0,1,0,0,0,1}, R[9];
- double MM[9], U[9], V[9], W[3];
- CvScalar Mc;
- double param[6];
- CvMat _A = cvMat( 3, 3, CV_64F, a );
- CvMat _Ar = cvMat( 3, 3, CV_64F, ar );
- CvMat _R = cvMat( 3, 3, CV_64F, R );
- CvMat _r = cvMat( 3, 1, CV_64F, param );
- CvMat _t = cvMat( 3, 1, CV_64F, param + 3 );
- CvMat _Mc = cvMat( 1, 3, CV_64F, Mc.val );
- CvMat _MM = cvMat( 3, 3, CV_64F, MM );
- CvMat _U = cvMat( 3, 3, CV_64F, U );
- CvMat _V = cvMat( 3, 3, CV_64F, V );
- CvMat _W = cvMat( 3, 1, CV_64F, W );
- CvMat _param = cvMat( 6, 1, CV_64F, param );
- CvMat _dpdr, _dpdt;
-
- CV_ASSERT( CV_IS_MAT(objectPoints) && CV_IS_MAT(imagePoints) &&
- CV_IS_MAT(A) && CV_IS_MAT(rvec) && CV_IS_MAT(tvec) );
-
- count = MAX(objectPoints->cols, objectPoints->rows);
- CV_CALL( _M = cvCreateMat( 1, count, CV_64FC3 ));
- CV_CALL( _m = cvCreateMat( 1, count, CV_64FC2 ));
-
- CV_CALL( cvConvertPointsHomogeneous( objectPoints, _M ));
- CV_CALL( cvConvertPointsHomogeneous( imagePoints, _m ));
- CV_CALL( cvConvert( A, &_A ));
-
- CV_ASSERT( (CV_MAT_DEPTH(rvec->type) == CV_64F || CV_MAT_DEPTH(rvec->type) == CV_32F) &&
- (rvec->rows == 1 || rvec->cols == 1) && rvec->rows*rvec->cols*CV_MAT_CN(rvec->type) == 3 );
-
- CV_ASSERT( (CV_MAT_DEPTH(tvec->type) == CV_64F || CV_MAT_DEPTH(tvec->type) == CV_32F) &&
- (tvec->rows == 1 || tvec->cols == 1) && tvec->rows*tvec->cols*CV_MAT_CN(tvec->type) == 3 );
-
- CV_CALL( _mn = cvCreateMat( 1, count, CV_64FC2 ));
- CV_CALL( _Mxy = cvCreateMat( 1, count, CV_64FC2 ));
-
- // normalize image points
- // (unapply the intrinsic matrix transformation and distortion)
- cvUndistortPoints( _m, _mn, &_A, distCoeffs, 0, &_Ar );
-
- if( useExtrinsicGuess )
- {
- CvMat _r_temp = cvMat(rvec->rows, rvec->cols,
- CV_MAKETYPE(CV_64F,CV_MAT_CN(rvec->type)), param );
- CvMat _t_temp = cvMat(tvec->rows, tvec->cols,
- CV_MAKETYPE(CV_64F,CV_MAT_CN(tvec->type)), param + 3);
- cvConvert( rvec, &_r_temp );
- cvConvert( tvec, &_t_temp );
- }
- else
- {
- Mc = cvAvg(_M);
- cvReshape( _M, _M, 1, count );
- cvMulTransposed( _M, &_MM, 1, &_Mc );
- cvSVD( &_MM, &_W, 0, &_V, CV_SVD_MODIFY_A + CV_SVD_V_T );
-
- // initialize extrinsic parameters
- if( W[2]/W[1] < 1e-3 || count < 4 )
- {
- // a planar structure case (all M's lie in the same plane)
- double tt[3], h[9], h1_norm, h2_norm;
- CvMat* R_transform = &_V;
- CvMat T_transform = cvMat( 3, 1, CV_64F, tt );
- CvMat _H = cvMat( 3, 3, CV_64F, h );
- CvMat _h1, _h2, _h3;
-
- if( V[2]*V[2] + V[5]*V[5] < 1e-10 )
- cvSetIdentity( R_transform );
-
- if( cvDet(R_transform) < 0 )
- cvScale( R_transform, R_transform, -1 );
-
- cvGEMM( R_transform, &_Mc, -1, 0, 0, &T_transform, CV_GEMM_B_T );
-
- for( i = 0; i < count; i++ )
- {
- const double* Rp = R_transform->data.db;
- const double* Tp = T_transform.data.db;
- const double* src = _M->data.db + i*3;
- double* dst = _Mxy->data.db + i*2;
-
- dst[0] = Rp[0]*src[0] + Rp[1]*src[1] + Rp[2]*src[2] + Tp[0];
- dst[1] = Rp[3]*src[0] + Rp[4]*src[1] + Rp[5]*src[2] + Tp[1];
- }
-
- cvFindHomography( _Mxy, _mn, &_H );
-
- cvGetCol( &_H, &_h1, 0 );
- _h2 = _h1; _h2.data.db++;
- _h3 = _h2; _h3.data.db++;
- h1_norm = sqrt(h[0]*h[0] + h[3]*h[3] + h[6]*h[6]);
- h2_norm = sqrt(h[1]*h[1] + h[4]*h[4] + h[7]*h[7]);
-
- cvScale( &_h1, &_h1, 1./h1_norm );
- cvScale( &_h2, &_h2, 1./h2_norm );
- cvScale( &_h3, &_t, 2./(h1_norm + h2_norm));
- cvCrossProduct( &_h1, &_h2, &_h3 );
-
- cvRodrigues2( &_H, &_r );
- cvRodrigues2( &_r, &_H );
- cvMatMulAdd( &_H, &T_transform, &_t, &_t );
- cvMatMul( &_H, R_transform, &_R );
- cvRodrigues2( &_R, &_r );
- }
- else
- {
- // non-planar structure. Use DLT method
- double* L;
- double LL[12*12], LW[12], LV[12*12], sc;
- CvMat _LL = cvMat( 12, 12, CV_64F, LL );
- CvMat _LW = cvMat( 12, 1, CV_64F, LW );
- CvMat _LV = cvMat( 12, 12, CV_64F, LV );
- CvMat _RRt, _RR, _tt;
- CvPoint3D64f* M = (CvPoint3D64f*)_M->data.db;
- CvPoint2D64f* mn = (CvPoint2D64f*)_mn->data.db;
-
- CV_CALL( _L = cvCreateMat( 2*count, 12, CV_64F ));
- L = _L->data.db;
-
- for( i = 0; i < count; i++, L += 24 )
- {
- double x = -mn[i].x, y = -mn[i].y;
- L[0] = L[16] = M[i].x;
- L[1] = L[17] = M[i].y;
- L[2] = L[18] = M[i].z;
- L[3] = L[19] = 1.;
- L[4] = L[5] = L[6] = L[7] = 0.;
- L[12] = L[13] = L[14] = L[15] = 0.;
- L[8] = x*M[i].x;
- L[9] = x*M[i].y;
- L[10] = x*M[i].z;
- L[11] = x;
- L[20] = y*M[i].x;
- L[21] = y*M[i].y;
- L[22] = y*M[i].z;
- L[23] = y;
- }
-
- cvMulTransposed( _L, &_LL, 1 );
- cvSVD( &_LL, &_LW, 0, &_LV, CV_SVD_MODIFY_A + CV_SVD_V_T );
- _RRt = cvMat( 3, 4, CV_64F, LV + 11*12 );
- cvGetCols( &_RRt, &_RR, 0, 3 );
- cvGetCol( &_RRt, &_tt, 3 );
- if( cvDet(&_RR) < 0 )
- cvScale( &_RRt, &_RRt, -1 );
- sc = cvNorm(&_RR);
- cvSVD( &_RR, &_W, &_U, &_V, CV_SVD_MODIFY_A + CV_SVD_U_T + CV_SVD_V_T );
- cvGEMM( &_U, &_V, 1, 0, 0, &_R, CV_GEMM_A_T );
- cvScale( &_tt, &_t, cvNorm(&_R)/sc );
- cvRodrigues2( &_R, &_r );
- cvReleaseMat( &_L );
- }
- }
-
- cvReshape( _M, _M, 3, 1 );
- cvReshape( _mn, _mn, 2, 1 );
-
- // refine extrinsic parameters using iterative algorithm
- {
- CvLevMarq solver( 6, count*2, cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,max_iter,FLT_EPSILON), true);
- cvCopy( &_param, solver.param );
-
- for(;;)
- {
- CvMat *_J = 0, *_err = 0;
- const CvMat *__param = 0;
- bool proceed = solver.update( __param, _J, _err );
- cvCopy( __param, &_param );
- if( !proceed || !_err )
- break;
- cvReshape( _err, _err, 2, 1 );
- if( _J )
- {
- cvGetCols( _J, &_dpdr, 0, 3 );
- cvGetCols( _J, &_dpdt, 3, 6 );
- cvProjectPoints2( _M, &_r, &_t, &_A, distCoeffs,
- _err, &_dpdr, &_dpdt, 0, 0, 0 );
- }
- else
- {
- cvProjectPoints2( _M, &_r, &_t, &_A, distCoeffs,
- _err, 0, 0, 0, 0, 0 );
- }
- cvSub(_err, _m, _err);
- cvReshape( _err, _err, 1, 2*count );
- }
- cvCopy( solver.param, &_param );
- }
-
- _r = cvMat( rvec->rows, rvec->cols,
- CV_MAKETYPE(CV_64F,CV_MAT_CN(rvec->type)), param );
- _t = cvMat( tvec->rows, tvec->cols,
- CV_MAKETYPE(CV_64F,CV_MAT_CN(tvec->type)), param + 3 );
-
- cvConvert( &_r, rvec );
- cvConvert( &_t, tvec );
-
- __END__;
-
- cvReleaseMat( &_M );
- cvReleaseMat( &_Mxy );
- cvReleaseMat( &_m );
- cvReleaseMat( &_mn );
- cvReleaseMat( &_L );
- cvReleaseMat( &_J );
-}
-
-
-CV_IMPL void
-cvInitIntrinsicParams2D( const CvMat* objectPoints,
- const CvMat* imagePoints,
- const CvMat* npoints,
- CvSize imageSize,
- CvMat* cameraMatrix,
- double aspectRatio )
-{
- CvMat *_A = 0, *_b = 0, *_allH = 0, *_allK = 0;
-
- CV_FUNCNAME( "cvInitIntrinsicParams2D" );
-
- __BEGIN__;
-
- int i, j, pos, nimages, total, ni = 0;
- double a[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 1 };
- double H[9], f[2];
- CvMat _a = cvMat( 3, 3, CV_64F, a );
- CvMat _H = cvMat( 3, 3, CV_64F, H );
- CvMat _f = cvMat( 2, 1, CV_64F, f );
-
- assert( CV_MAT_TYPE(npoints->type) == CV_32SC1 &&
- CV_IS_MAT_CONT(npoints->type) );
- nimages = npoints->rows + npoints->cols - 1;
-
- if( (CV_MAT_TYPE(objectPoints->type) != CV_32FC3 &&
- CV_MAT_TYPE(objectPoints->type) != CV_64FC3) ||
- (CV_MAT_TYPE(imagePoints->type) != CV_32FC2 &&
- CV_MAT_TYPE(imagePoints->type) != CV_64FC2) )
- CV_ERROR( CV_StsUnsupportedFormat, "Both object points and image points must be 2D" );
-
- if( objectPoints->rows != 1 || imagePoints->rows != 1 )
- CV_ERROR( CV_StsBadSize, "object points and image points must be a single-row matrices" );
-
- _A = cvCreateMat( 2*nimages, 2, CV_64F );
- _b = cvCreateMat( 2*nimages, 1, CV_64F );
- a[2] = (imageSize.width - 1)*0.5;
- a[5] = (imageSize.height - 1)*0.5;
- _allH = cvCreateMat( nimages, 9, CV_64F );
-
- total = cvRound(cvSum(npoints).val[0]);
-
- // extract vanishing points in order to obtain initial value for the focal length
- for( i = 0, pos = 0; i < nimages; i++, pos += ni )
- {
- double* Ap = _A->data.db + i*4;
- double* bp = _b->data.db + i*2;
- ni = npoints->data.i[i];
- double h[3], v[3], d1[3], d2[3];
- double n[4] = {0,0,0,0};
- CvMat _m, _M;
- cvGetCols( objectPoints, &_M, pos, pos + ni );
- cvGetCols( imagePoints, &_m, pos, pos + ni );
-
- cvFindHomography( &_M, &_m, &_H );
- memcpy( _allH->data.db + i*9, H, sizeof(H) );
-
- H[0] -= H[6]*a[2]; H[1] -= H[7]*a[2]; H[2] -= H[8]*a[2];
- H[3] -= H[6]*a[5]; H[4] -= H[7]*a[5]; H[5] -= H[8]*a[5];
-
- for( j = 0; j < 3; j++ )
- {
- double t0 = H[j*3], t1 = H[j*3+1];
- h[j] = t0; v[j] = t1;
- d1[j] = (t0 + t1)*0.5;
- d2[j] = (t0 - t1)*0.5;
- n[0] += t0*t0; n[1] += t1*t1;
- n[2] += d1[j]*d1[j]; n[3] += d2[j]*d2[j];
- }
-
- for( j = 0; j < 4; j++ )
- n[j] = 1./sqrt(n[j]);
-
- for( j = 0; j < 3; j++ )
- {
- h[j] *= n[0]; v[j] *= n[1];
- d1[j] *= n[2]; d2[j] *= n[3];
- }
-
- Ap[0] = h[0]*v[0]; Ap[1] = h[1]*v[1];
- Ap[2] = d1[0]*d2[0]; Ap[3] = d1[1]*d2[1];
- bp[0] = -h[2]*v[2]; bp[1] = -d1[2]*d2[2];
- }
-
- cvSolve( _A, _b, &_f, CV_NORMAL + CV_SVD );
- a[0] = sqrt(fabs(1./f[0]));
- a[4] = sqrt(fabs(1./f[1]));
- if( aspectRatio != 0 )
- {
- double tf = (a[0] + a[4])/(aspectRatio + 1.);
- a[0] = aspectRatio*tf;
- a[4] = tf;
- }
-
- cvConvert( &_a, cameraMatrix );
-
- __END__;
-
- cvReleaseMat( &_A );
- cvReleaseMat( &_b );
- cvReleaseMat( &_allH );
- cvReleaseMat( &_allK );
-}
-
-
-/* finds intrinsic and extrinsic camera parameters
- from a few views of known calibration pattern */
-CV_IMPL void
-cvCalibrateCamera2( const CvMat* objectPoints,
- const CvMat* imagePoints,
- const CvMat* npoints,
- CvSize imageSize,
- CvMat* cameraMatrix, CvMat* distCoeffs,
- CvMat* rvecs, CvMat* tvecs,
- int flags )
-{
- const int NINTRINSIC = 9;
- CvMat *_M = 0, *_m = 0, *_Ji = 0, *_Je = 0, *_err = 0;
- CvLevMarq solver;
-
- CV_FUNCNAME( "cvCalibrateCamera2" );
-
- __BEGIN__;
-
- double A[9], k[5] = {0,0,0,0,0};
- CvMat _A = cvMat(3, 3, CV_64F, A), _k;
- int i, nimages, maxPoints = 0, ni = 0, pos, total = 0, nparams, npstep, cn;
- double aspectRatio = 0.;
-
- // 0. check the parameters & allocate buffers
- if( !CV_IS_MAT(objectPoints) || !CV_IS_MAT(imagePoints) ||
- !CV_IS_MAT(npoints) || !CV_IS_MAT(cameraMatrix) || !CV_IS_MAT(distCoeffs) )
- CV_ERROR( CV_StsBadArg, "One of required vector arguments is not a valid matrix" );
-
- if( imageSize.width <= 0 || imageSize.height <= 0 )
- CV_ERROR( CV_StsOutOfRange, "image width and height must be positive" );
-
- if( CV_MAT_TYPE(npoints->type) != CV_32SC1 ||
- (npoints->rows != 1 && npoints->cols != 1) )
- CV_ERROR( CV_StsUnsupportedFormat,
- "the array of point counters must be 1-dimensional integer vector" );
-
- nimages = npoints->rows*npoints->cols;
- npstep = npoints->rows == 1 ? 1 : npoints->step/CV_ELEM_SIZE(npoints->type);
-
- if( rvecs )
- {
- cn = CV_MAT_CN(rvecs->type);
- if( !CV_IS_MAT(rvecs) ||
- (CV_MAT_DEPTH(rvecs->type) != CV_32F && CV_MAT_DEPTH(rvecs->type) != CV_64F) ||
- ((rvecs->rows != nimages || (rvecs->cols*cn != 3 && rvecs->cols*cn != 9)) &&
- (rvecs->rows != 1 || rvecs->cols != nimages || cn != 3)) )
- CV_ERROR( CV_StsBadArg, "the output array of rotation vectors must be 3-channel "
- "1xn or nx1 array or 1-channel nx3 or nx9 array, where n is the number of views" );
- }
-
- if( tvecs )
- {
- cn = CV_MAT_CN(tvecs->type);
- if( !CV_IS_MAT(tvecs) ||
- (CV_MAT_DEPTH(tvecs->type) != CV_32F && CV_MAT_DEPTH(tvecs->type) != CV_64F) ||
- ((tvecs->rows != nimages || tvecs->cols*cn != 3) &&
- (tvecs->rows != 1 || tvecs->cols != nimages || cn != 3)) )
- CV_ERROR( CV_StsBadArg, "the output array of translation vectors must be 3-channel "
- "1xn or nx1 array or 1-channel nx3 array, where n is the number of views" );
- }
-
- if( (CV_MAT_TYPE(cameraMatrix->type) != CV_32FC1 &&
- CV_MAT_TYPE(cameraMatrix->type) != CV_64FC1) ||
- cameraMatrix->rows != 3 || cameraMatrix->cols != 3 )
- CV_ERROR( CV_StsBadArg,
- "Intrinsic parameters must be 3x3 floating-point matrix" );
-
- if( (CV_MAT_TYPE(distCoeffs->type) != CV_32FC1 &&
- CV_MAT_TYPE(distCoeffs->type) != CV_64FC1) ||
- (distCoeffs->cols != 1 && distCoeffs->rows != 1) ||
- (distCoeffs->cols*distCoeffs->rows != 4 &&
- distCoeffs->cols*distCoeffs->rows != 5) )
- CV_ERROR( CV_StsBadArg,
- "Distortion coefficients must be 4x1, 1x4, 5x1 or 1x5 floating-point matrix" );
-
- for( i = 0; i < nimages; i++ )
- {
- ni = npoints->data.i[i*npstep];
- if( ni < 4 )
- {
- char buf[100];
- sprintf( buf, "The number of points in the view #%d is < 4", i );
- CV_ERROR( CV_StsOutOfRange, buf );
- }
- maxPoints = MAX( maxPoints, ni );
- total += ni;
- }
-
- CV_CALL( _M = cvCreateMat( 1, total, CV_64FC3 ));
- CV_CALL( _m = cvCreateMat( 1, total, CV_64FC2 ));
-
- CV_CALL( cvConvertPointsHomogeneous( objectPoints, _M ));
- CV_CALL( cvConvertPointsHomogeneous( imagePoints, _m ));
-
- nparams = NINTRINSIC + nimages*6;
- CV_CALL( _Ji = cvCreateMat( maxPoints*2, NINTRINSIC, CV_64FC1 ));
- CV_CALL( _Je = cvCreateMat( maxPoints*2, 6, CV_64FC1 ));
- CV_CALL( _err = cvCreateMat( maxPoints*2, 1, CV_64FC1 ));
- cvZero( _Ji );
-
- _k = cvMat( distCoeffs->rows, distCoeffs->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), k);
- if( distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) == 4 )
- flags |= CV_CALIB_FIX_K3;
-
- // 1. initialize intrinsic parameters & LM solver
- if( flags & CV_CALIB_USE_INTRINSIC_GUESS )
- {
- cvConvert( cameraMatrix, &_A );
- if( A[0] <= 0 || A[4] <= 0 )
- CV_ERROR( CV_StsOutOfRange, "Focal length (fx and fy) must be positive" );
- if( A[2] < 0 || A[2] >= imageSize.width ||
- A[5] < 0 || A[5] >= imageSize.height )
- CV_ERROR( CV_StsOutOfRange, "Principal point must be within the image" );
- if( fabs(A[1]) > 1e-5 )
- CV_ERROR( CV_StsOutOfRange, "Non-zero skew is not supported by the function" );
- if( fabs(A[3]) > 1e-5 || fabs(A[6]) > 1e-5 ||
- fabs(A[7]) > 1e-5 || fabs(A[8]-1) > 1e-5 )
- CV_ERROR( CV_StsOutOfRange,
- "The intrinsic matrix must have [fx 0 cx; 0 fy cy; 0 0 1] shape" );
- A[1] = A[3] = A[6] = A[7] = 0.;
- A[8] = 1.;
-
- if( flags & CV_CALIB_FIX_ASPECT_RATIO )
- aspectRatio = A[0]/A[4];
- cvConvert( distCoeffs, &_k );
- }
- else
- {
- CvScalar mean, sdv;
- cvAvgSdv( _M, &mean, &sdv );
- if( fabs(mean.val[2]) > 1e-5 || fabs(sdv.val[2]) > 1e-5 )
- CV_ERROR( CV_StsBadArg,
- "For non-planar calibration rigs the initial intrinsic matrix must be specified" );
- for( i = 0; i < total; i++ )
- ((CvPoint3D64f*)_M->data.db)[i].z = 0.;
-
- if( flags & CV_CALIB_FIX_ASPECT_RATIO )
- {
- aspectRatio = cvmGet(cameraMatrix,0,0);
- aspectRatio /= cvmGet(cameraMatrix,1,1);
- if( aspectRatio < 0.01 || aspectRatio > 100 )
- CV_ERROR( CV_StsOutOfRange,
- "The specified aspect ratio (=A[0][0]/A[1][1]) is incorrect" );
- }
- cvInitIntrinsicParams2D( _M, _m, npoints, imageSize, &_A, aspectRatio );
- }
-
- solver.init( nparams, 0, cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON) );
-
- {
- double* param = solver.param->data.db;
- uchar* mask = solver.mask->data.ptr;
-
- param[0] = A[0]; param[1] = A[4]; param[2] = A[2]; param[3] = A[5];
- param[4] = k[0]; param[5] = k[1]; param[6] = k[2]; param[7] = k[3];
- param[8] = k[4];
-
- if( flags & CV_CALIB_FIX_FOCAL_LENGTH )
- mask[0] = mask[1] = 0;
- if( flags & CV_CALIB_FIX_PRINCIPAL_POINT )
- mask[2] = mask[3] = 0;
- if( flags & CV_CALIB_ZERO_TANGENT_DIST )
- {
- param[6] = param[7] = 0;
- mask[6] = mask[7] = 0;
- }
- if( flags & CV_CALIB_FIX_K1 )
- mask[4] = 0;
- if( flags & CV_CALIB_FIX_K2 )
- mask[5] = 0;
- if( flags & CV_CALIB_FIX_K3 )
- mask[8] = 0;
- }
-
- // 2. initialize extrinsic parameters
- for( i = 0, pos = 0; i < nimages; i++, pos += ni )
- {
- CvMat _Mi, _mi, _ri, _ti;
- ni = npoints->data.i[i*npstep];
-
- cvGetRows( solver.param, &_ri, NINTRINSIC + i*6, NINTRINSIC + i*6 + 3 );
- cvGetRows( solver.param, &_ti, NINTRINSIC + i*6 + 3, NINTRINSIC + i*6 + 6 );
-
- cvGetCols( _M, &_Mi, pos, pos + ni );
- cvGetCols( _m, &_mi, pos, pos + ni );
-
- cvFindExtrinsicCameraParams2( &_Mi, &_mi, &_A, &_k, &_ri, &_ti );
- }
-
- // 3. run the optimization
- for(;;)
- {
- const CvMat* _param = 0;
- CvMat *_JtJ = 0, *_JtErr = 0;
- double* _errNorm = 0;
- bool proceed = solver.updateAlt( _param, _JtJ, _JtErr, _errNorm );
- double *param = solver.param->data.db, *pparam = solver.prevParam->data.db;
-
- if( flags & CV_CALIB_FIX_ASPECT_RATIO )
- {
- param[0] = param[1]*aspectRatio;
- pparam[0] = pparam[1]*aspectRatio;
- }
-
- A[0] = param[0]; A[4] = param[1];
- A[2] = param[2]; A[5] = param[3];
- k[0] = param[4]; k[1] = param[5]; k[2] = param[6];
- k[3] = param[7];
- k[4] = param[8];
-
- if( !proceed )
- break;
-
- for( i = 0, pos = 0; i < nimages; i++, pos += ni )
- {
- CvMat _Mi, _mi, _ri, _ti, _dpdr, _dpdt, _dpdf, _dpdc, _dpdk, _mp, _part;
- ni = npoints->data.i[i*npstep];
-
- cvGetRows( solver.param, &_ri, NINTRINSIC + i*6, NINTRINSIC + i*6 + 3 );
- cvGetRows( solver.param, &_ti, NINTRINSIC + i*6 + 3, NINTRINSIC + i*6 + 6 );
-
- cvGetCols( _M, &_Mi, pos, pos + ni );
- cvGetCols( _m, &_mi, pos, pos + ni );
-
- _Je->rows = _Ji->rows = _err->rows = ni*2;
- cvGetCols( _Je, &_dpdr, 0, 3 );
- cvGetCols( _Je, &_dpdt, 3, 6 );
- cvGetCols( _Ji, &_dpdf, 0, 2 );
- cvGetCols( _Ji, &_dpdc, 2, 4 );
- cvGetCols( _Ji, &_dpdk, 4, NINTRINSIC );
- cvReshape( _err, &_mp, 2, 1 );
-
- if( _JtJ || _JtErr )
- {
- cvProjectPoints2( &_Mi, &_ri, &_ti, &_A, &_k, &_mp, &_dpdr, &_dpdt,
- (flags & CV_CALIB_FIX_FOCAL_LENGTH) ? 0 : &_dpdf,
- (flags & CV_CALIB_FIX_PRINCIPAL_POINT) ? 0 : &_dpdc, &_dpdk,
- (flags & CV_CALIB_FIX_ASPECT_RATIO) ? aspectRatio : 0);
- }
- else
- cvProjectPoints2( &_Mi, &_ri, &_ti, &_A, &_k, &_mp );
-
- cvSub( &_mp, &_mi, &_mp );
-
- if( _JtJ || _JtErr )
- {
- cvGetSubRect( _JtJ, &_part, cvRect(0,0,NINTRINSIC,NINTRINSIC) );
- cvGEMM( _Ji, _Ji, 1, &_part, 1, &_part, CV_GEMM_A_T );
-
- cvGetSubRect( _JtJ, &_part, cvRect(NINTRINSIC+i*6,NINTRINSIC+i*6,6,6) );
- cvGEMM( _Je, _Je, 1, 0, 0, &_part, CV_GEMM_A_T );
-
- cvGetSubRect( _JtJ, &_part, cvRect(NINTRINSIC+i*6,0,6,NINTRINSIC) );
- cvGEMM( _Ji, _Je, 1, 0, 0, &_part, CV_GEMM_A_T );
-
- cvGetRows( _JtErr, &_part, 0, NINTRINSIC );
- cvGEMM( _Ji, _err, 1, &_part, 1, &_part, CV_GEMM_A_T );
-
- cvGetRows( _JtErr, &_part, NINTRINSIC + i*6, NINTRINSIC + (i+1)*6 );
- cvGEMM( _Je, _err, 1, 0, 0, &_part, CV_GEMM_A_T );
- }
-
- if( _errNorm )
- {
- double errNorm = cvNorm( &_mp, 0, CV_L2 );
- *_errNorm += errNorm*errNorm;
- }
- }
- }
-
- // 4. store the results
- cvConvert( &_A, cameraMatrix );
- cvConvert( &_k, distCoeffs );
-
- for( i = 0; i < nimages; i++ )
- {
- CvMat src, dst;
- if( rvecs )
- {
- src = cvMat( 3, 1, CV_64F, solver.param->data.db + NINTRINSIC + i*6 );
- if( rvecs->rows == nimages && rvecs->cols*CV_MAT_CN(rvecs->type) == 9 )
- {
- dst = cvMat( 3, 3, CV_MAT_DEPTH(rvecs->type),
- rvecs->data.ptr + rvecs->step*i );
- cvRodrigues2( &src, &_A );
- cvConvert( &_A, &dst );
- }
- else
- {
- dst = cvMat( 3, 1, CV_MAT_DEPTH(rvecs->type), rvecs->rows == 1 ?
- rvecs->data.ptr + i*CV_ELEM_SIZE(rvecs->type) :
- rvecs->data.ptr + rvecs->step*i );
- cvConvert( &src, &dst );
- }
- }
- if( tvecs )
- {
- src = cvMat( 3, 1, CV_64F, solver.param->data.db + NINTRINSIC + i*6 + 3 );
- dst = cvMat( 3, 1, CV_MAT_TYPE(tvecs->type), tvecs->rows == 1 ?
- tvecs->data.ptr + i*CV_ELEM_SIZE(tvecs->type) :
- tvecs->data.ptr + tvecs->step*i );
- cvConvert( &src, &dst );
- }
- }
-
- __END__;
-
- cvReleaseMat( &_M );
- cvReleaseMat( &_m );
- cvReleaseMat( &_Ji );
- cvReleaseMat( &_Je );
- cvReleaseMat( &_err );
-}
-
-
-void cvCalibrationMatrixValues( const CvMat *calibMatr, CvSize imgSize,
- double apertureWidth, double apertureHeight, double *fovx, double *fovy,
- double *focalLength, CvPoint2D64f *principalPoint, double *pasp )
-{
- double alphax, alphay, mx, my;
- int imgWidth = imgSize.width, imgHeight = imgSize.height;
-
- CV_FUNCNAME("cvCalibrationMatrixValues");
- __BEGIN__;
-
- /* Validate parameters. */
-
- if(calibMatr == 0)
- CV_ERROR(CV_StsNullPtr, "Some of parameters is a NULL pointer!");
-
- if(!CV_IS_MAT(calibMatr))
- CV_ERROR(CV_StsUnsupportedFormat, "Input parameters must be a matrices!");
-
- if(calibMatr->cols != 3 || calibMatr->rows != 3)
- CV_ERROR(CV_StsUnmatchedSizes, "Size of matrices must be 3x3!");
-
- alphax = cvmGet(calibMatr, 0, 0);
- alphay = cvmGet(calibMatr, 1, 1);
- assert(imgWidth != 0 && imgHeight != 0 && alphax != 0.0 && alphay != 0.0);
-
- /* Calculate pixel aspect ratio. */
- if(pasp)
- *pasp = alphay / alphax;
-
- /* Calculate number of pixel per realworld unit. */
-
- if(apertureWidth != 0.0 && apertureHeight != 0.0) {
- mx = imgWidth / apertureWidth;
- my = imgHeight / apertureHeight;
- } else {
- mx = 1.0;
- my = *pasp;
- }
-
- /* Calculate fovx and fovy. */
-
- if(fovx)
- *fovx = 2 * atan(imgWidth / (2 * alphax)) * 180.0 / CV_PI;
-
- if(fovy)
- *fovy = 2 * atan(imgHeight / (2 * alphay)) * 180.0 / CV_PI;
-
- /* Calculate focal length. */
-
- if(focalLength)
- *focalLength = alphax / mx;
-
- /* Calculate principle point. */
-
- if(principalPoint)
- *principalPoint = cvPoint2D64f(cvmGet(calibMatr, 0, 2) / mx, cvmGet(calibMatr, 1, 2) / my);
-
- __END__;
-}
-
-
-//////////////////////////////// Stereo Calibration ///////////////////////////////////
-
-static int dbCmp( const void* _a, const void* _b )
-{
- double a = *(const double*)_a;
- double b = *(const double*)_b;
-
- return (a > b) - (a < b);
-}
-
-
-void cvStereoCalibrate( const CvMat* _objectPoints, const CvMat* _imagePoints1,
- const CvMat* _imagePoints2, const CvMat* _npoints,
- CvMat* _cameraMatrix1, CvMat* _distCoeffs1,
- CvMat* _cameraMatrix2, CvMat* _distCoeffs2,
- CvSize imageSize, CvMat* _R, CvMat* _T,
- CvMat* _E, CvMat* _F,
- CvTermCriteria termCrit, int flags )
-{
- const int NINTRINSIC = 9;
- CvMat* npoints = 0;
- CvMat* err = 0;
- CvMat* J_LR = 0;
- CvMat* Je = 0;
- CvMat* Ji = 0;
- CvMat* imagePoints[2] = {0,0};
- CvMat* objectPoints = 0;
- CvMat* RT0 = 0;
- CvLevMarq solver;
-
- CV_FUNCNAME( "cvStereoCalibrate" );
-
- __BEGIN__;
-
- double A[2][9], dk[2][5]={{0,0,0,0,0},{0,0,0,0,0}}, rlr[9];
- CvMat K[2], Dist[2], om_LR, T_LR;
- CvMat R_LR = cvMat(3, 3, CV_64F, rlr);
- int i, k, p, ni = 0, ofs, nimages, pointsTotal, maxPoints = 0;
- int nparams;
- bool recomputeIntrinsics = false;
- double aspectRatio[2] = {0,0};
-
- CV_ASSERT( CV_IS_MAT(_imagePoints1) && CV_IS_MAT(_imagePoints2) &&
- CV_IS_MAT(_objectPoints) && CV_IS_MAT(_npoints) &&
- CV_IS_MAT(_R) && CV_IS_MAT(_T) );
-
- CV_ASSERT( CV_ARE_TYPES_EQ(_imagePoints1, _imagePoints2) &&
- CV_ARE_DEPTHS_EQ(_imagePoints1, _objectPoints) );
-
- CV_ASSERT( (_npoints->cols == 1 || _npoints->rows == 1) &&
- CV_MAT_TYPE(_npoints->type) == CV_32SC1 );
-
- nimages = _npoints->cols + _npoints->rows - 1;
- npoints = cvCreateMat( _npoints->rows, _npoints->cols, _npoints->type );
- cvCopy( _npoints, npoints );
-
- for( i = 0, pointsTotal = 0; i < nimages; i++ )
- {
- maxPoints = MAX(maxPoints, npoints->data.i[i]);
- pointsTotal += npoints->data.i[i];
- }
-
- objectPoints = cvCreateMat( _objectPoints->rows, _objectPoints->cols,
- CV_64FC(CV_MAT_CN(_objectPoints->type)));
- cvConvert( _objectPoints, objectPoints );
- cvReshape( objectPoints, objectPoints, 3, 1 );
-
- for( k = 0; k < 2; k++ )
- {
- const CvMat* points = k == 0 ? _imagePoints1 : _imagePoints2;
- const CvMat* cameraMatrix = k == 0 ? _cameraMatrix1 : _cameraMatrix2;
- const CvMat* distCoeffs = k == 0 ? _distCoeffs1 : _distCoeffs2;
-
- int cn = CV_MAT_CN(_imagePoints1->type);
- CV_ASSERT( (CV_MAT_DEPTH(_imagePoints1->type) == CV_32F ||
- CV_MAT_DEPTH(_imagePoints1->type) == CV_64F) &&
- ((_imagePoints1->rows == pointsTotal && _imagePoints1->cols*cn == 2) ||
- (_imagePoints1->rows == 1 && _imagePoints1->cols == pointsTotal && cn == 2)) );
-
- K[k] = cvMat(3,3,CV_64F,A[k]);
- Dist[k] = cvMat(1,5,CV_64F,dk[k]);
-
- imagePoints[k] = cvCreateMat( points->rows, points->cols, CV_64FC(CV_MAT_CN(points->type)));
- cvConvert( points, imagePoints[k] );
- cvReshape( imagePoints[k], imagePoints[k], 2, 1 );
-
- if( flags & (CV_CALIB_FIX_INTRINSIC|CV_CALIB_USE_INTRINSIC_GUESS|
- CV_CALIB_FIX_ASPECT_RATIO|CV_CALIB_FIX_FOCAL_LENGTH) )
- cvConvert( cameraMatrix, &K[k] );
-
- if( flags & (CV_CALIB_FIX_INTRINSIC|CV_CALIB_USE_INTRINSIC_GUESS|
- CV_CALIB_FIX_K1|CV_CALIB_FIX_K2|CV_CALIB_FIX_K3) )
- {
- CvMat tdist = cvMat( distCoeffs->rows, distCoeffs->cols,
- CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), Dist[k].data.db );
- cvConvert( distCoeffs, &tdist );
- }
-
- if( !(flags & (CV_CALIB_FIX_INTRINSIC|CV_CALIB_USE_INTRINSIC_GUESS)))
- {
- cvCalibrateCamera2( objectPoints, imagePoints[k],
- npoints, imageSize, &K[k], &Dist[k], 0, 0, flags );
- }
- }
-
- if( flags & CV_CALIB_SAME_FOCAL_LENGTH )
- {
- static const int avg_idx[] = { 0, 4, 2, 5, -1 };
- for( k = 0; avg_idx[k] >= 0; k++ )
- A[0][avg_idx[k]] = A[1][avg_idx[k]] = (A[0][avg_idx[k]] + A[1][avg_idx[k]])*0.5;
- }
-
- if( flags & CV_CALIB_FIX_ASPECT_RATIO )
- {
- for( k = 0; k < 2; k++ )
- aspectRatio[k] = A[k][0]/A[k][4];
- }
-
- recomputeIntrinsics = (flags & CV_CALIB_FIX_INTRINSIC) == 0;
-
- err = cvCreateMat( maxPoints*2, 1, CV_64F );
- Je = cvCreateMat( maxPoints*2, 6, CV_64F );
- J_LR = cvCreateMat( maxPoints*2, 6, CV_64F );
- Ji = cvCreateMat( maxPoints*2, NINTRINSIC, CV_64F );
- cvZero( Ji );
-
- // we optimize for the inter-camera R(3),t(3), then, optionally,
- // for intrinisic parameters of each camera ((fx,fy,cx,cy,k1,k2,p1,p2) ~ 8 parameters).
- nparams = 6*(nimages+1) + (recomputeIntrinsics ? NINTRINSIC*2 : 0);
-
- // storage for initial [om(R){i}|t{i}] (in order to compute the median for each component)
- RT0 = cvCreateMat( 6, nimages, CV_64F );
-
- solver.init( nparams, 0, termCrit );
- if( recomputeIntrinsics )
- {
- uchar* imask = solver.mask->data.ptr + nparams - NINTRINSIC*2;
- if( flags & CV_CALIB_FIX_ASPECT_RATIO )
- imask[0] = imask[NINTRINSIC] = 0;
- if( flags & CV_CALIB_FIX_FOCAL_LENGTH )
- imask[0] = imask[1] = imask[NINTRINSIC] = imask[NINTRINSIC+1] = 0;
- if( flags & CV_CALIB_FIX_PRINCIPAL_POINT )
- imask[2] = imask[3] = imask[NINTRINSIC+2] = imask[NINTRINSIC+3] = 0;
- if( flags & CV_CALIB_ZERO_TANGENT_DIST )
- imask[6] = imask[7] = imask[NINTRINSIC+6] = imask[NINTRINSIC+7] = 0;
- if( flags & CV_CALIB_FIX_K1 )
- imask[4] = imask[NINTRINSIC+4] = 0;
- if( flags & CV_CALIB_FIX_K2 )
- imask[5] = imask[NINTRINSIC+5] = 0;
- if( flags & CV_CALIB_FIX_K3 )
- imask[8] = imask[NINTRINSIC+8] = 0;
- }
-
- /*
- Compute initial estimate of pose
-
- For each image, compute:
- R(om) is the rotation matrix of om
- om(R) is the rotation vector of R
- R_ref = R(om_right) * R(om_left)'
- T_ref_list = [T_ref_list; T_right - R_ref * T_left]
- om_ref_list = {om_ref_list; om(R_ref)]
-
- om = median(om_ref_list)
- T = median(T_ref_list)
- */
- for( i = ofs = 0; i < nimages; ofs += ni, i++ )
- {
- ni = npoints->data.i[i];
- CvMat objpt_i;
- double _om[2][3], r[2][9], t[2][3];
- CvMat om[2], R[2], T[2], imgpt_i[2];
-
- objpt_i = cvMat(1, ni, CV_64FC3, objectPoints->data.db + ofs*3);
- for( k = 0; k < 2; k++ )
- {
- imgpt_i[k] = cvMat(1, ni, CV_64FC2, imagePoints[k]->data.db + ofs*2);
- om[k] = cvMat(3, 1, CV_64F, _om[k]);
- R[k] = cvMat(3, 3, CV_64F, r[k]);
- T[k] = cvMat(3, 1, CV_64F, t[k]);
-
- // FIXME: here we ignore activePoints[k] because of
- // the limited API of cvFindExtrnisicCameraParams2
- cvFindExtrinsicCameraParams2( &objpt_i, &imgpt_i[k], &K[k], &Dist[k], &om[k], &T[k] );
- cvRodrigues2( &om[k], &R[k] );
- if( k == 0 )
- {
- // save initial om_left and T_left
- solver.param->data.db[(i+1)*6] = _om[0][0];
- solver.param->data.db[(i+1)*6 + 1] = _om[0][1];
- solver.param->data.db[(i+1)*6 + 2] = _om[0][2];
- solver.param->data.db[(i+1)*6 + 3] = t[0][0];
- solver.param->data.db[(i+1)*6 + 4] = t[0][1];
- solver.param->data.db[(i+1)*6 + 5] = t[0][2];
- }
- }
- cvGEMM( &R[1], &R[0], 1, 0, 0, &R[0], CV_GEMM_B_T );
- cvGEMM( &R[0], &T[0], -1, &T[1], 1, &T[1] );
- cvRodrigues2( &R[0], &T[0] );
- RT0->data.db[i] = t[0][0];
- RT0->data.db[i + nimages] = t[0][1];
- RT0->data.db[i + nimages*2] = t[0][2];
- RT0->data.db[i + nimages*3] = t[1][0];
- RT0->data.db[i + nimages*4] = t[1][1];
- RT0->data.db[i + nimages*5] = t[1][2];
- }
-
- // find the medians and save the first 6 parameters
- for( i = 0; i < 6; i++ )
- {
- qsort( RT0->data.db + i*nimages, nimages, CV_ELEM_SIZE(RT0->type), dbCmp );
- solver.param->data.db[i] = nimages % 2 != 0 ? RT0->data.db[i*nimages + nimages/2] :
- (RT0->data.db[i*nimages + nimages/2 - 1] + RT0->data.db[i*nimages + nimages/2])*0.5;
- }
-
- if( recomputeIntrinsics )
- for( k = 0; k < 2; k++ )
- {
- double* iparam = solver.param->data.db + (nimages+1)*6 + k*NINTRINSIC;
- if( flags & CV_CALIB_ZERO_TANGENT_DIST )
- dk[k][2] = dk[k][3] = 0;
- iparam[0] = A[k][0]; iparam[1] = A[k][4]; iparam[2] = A[k][2]; iparam[3] = A[k][5];
- iparam[4] = dk[k][0]; iparam[5] = dk[k][1]; iparam[6] = dk[k][2];
- iparam[7] = dk[k][3]; iparam[8] = dk[k][4];
- }
-
- om_LR = cvMat(3, 1, CV_64F, solver.param->data.db);
- T_LR = cvMat(3, 1, CV_64F, solver.param->data.db + 3);
-
- for(;;)
- {
- const CvMat* param = 0;
- CvMat tmpimagePoints;
- CvMat *JtJ = 0, *JtErr = 0;
- double* errNorm = 0;
- double _omR[3], _tR[3];
- double _dr3dr1[9], _dr3dr2[9], /*_dt3dr1[9],*/ _dt3dr2[9], _dt3dt1[9], _dt3dt2[9];
- CvMat dr3dr1 = cvMat(3, 3, CV_64F, _dr3dr1);
- CvMat dr3dr2 = cvMat(3, 3, CV_64F, _dr3dr2);
- //CvMat dt3dr1 = cvMat(3, 3, CV_64F, _dt3dr1);
- CvMat dt3dr2 = cvMat(3, 3, CV_64F, _dt3dr2);
- CvMat dt3dt1 = cvMat(3, 3, CV_64F, _dt3dt1);
- CvMat dt3dt2 = cvMat(3, 3, CV_64F, _dt3dt2);
- CvMat om[2], T[2], imgpt_i[2];
- CvMat dpdrot_hdr, dpdt_hdr, dpdf_hdr, dpdc_hdr, dpdk_hdr;
- CvMat *dpdrot = &dpdrot_hdr, *dpdt = &dpdt_hdr, *dpdf = 0, *dpdc = 0, *dpdk = 0;
-
- if( !solver.updateAlt( param, JtJ, JtErr, errNorm ))
- break;
-
- cvRodrigues2( &om_LR, &R_LR );
- om[1] = cvMat(3,1,CV_64F,_omR);
- T[1] = cvMat(3,1,CV_64F,_tR);
-
- if( recomputeIntrinsics )
- {
- double* iparam = solver.param->data.db + (nimages+1)*6;
- double* ipparam = solver.prevParam->data.db + (nimages+1)*6;
- dpdf = &dpdf_hdr;
- dpdc = &dpdc_hdr;
- dpdk = &dpdk_hdr;
- if( flags & CV_CALIB_SAME_FOCAL_LENGTH )
- {
- iparam[NINTRINSIC] = iparam[0];
- iparam[NINTRINSIC+1] = iparam[1];
- ipparam[NINTRINSIC] = ipparam[0];
- ipparam[NINTRINSIC+1] = ipparam[1];
- }
- if( flags & CV_CALIB_FIX_ASPECT_RATIO )
- {
- iparam[0] = iparam[1]*aspectRatio[0];
- iparam[NINTRINSIC] = iparam[NINTRINSIC+1]*aspectRatio[1];
- ipparam[0] = ipparam[1]*aspectRatio[0];
- ipparam[NINTRINSIC] = ipparam[NINTRINSIC+1]*aspectRatio[1];
- }
- for( k = 0; k < 2; k++ )
- {
- A[k][0] = iparam[k*NINTRINSIC+0];
- A[k][4] = iparam[k*NINTRINSIC+1];
- A[k][2] = iparam[k*NINTRINSIC+2];
- A[k][5] = iparam[k*NINTRINSIC+3];
- dk[k][0] = iparam[k*NINTRINSIC+4];
- dk[k][1] = iparam[k*NINTRINSIC+5];
- dk[k][2] = iparam[k*NINTRINSIC+6];
- dk[k][3] = iparam[k*NINTRINSIC+7];
- dk[k][4] = iparam[k*NINTRINSIC+8];
- }
- }
-
- for( i = ofs = 0; i < nimages; ofs += ni, i++ )
- {
- ni = npoints->data.i[i];
- CvMat objpt_i, _part;
-
- om[0] = cvMat(3,1,CV_64F,solver.param->data.db+(i+1)*6);
- T[0] = cvMat(3,1,CV_64F,solver.param->data.db+(i+1)*6+3);
-
- if( JtJ || JtErr )
- cvComposeRT( &om[0], &T[0], &om_LR, &T_LR, &om[1], &T[1], &dr3dr1, 0,
- &dr3dr2, 0, 0, &dt3dt1, &dt3dr2, &dt3dt2 );
- else
- cvComposeRT( &om[0], &T[0], &om_LR, &T_LR, &om[1], &T[1] );
-
- objpt_i = cvMat(1, ni, CV_64FC3, objectPoints->data.db + ofs*3);
- err->rows = Je->rows = J_LR->rows = Ji->rows = ni*2;
- cvReshape( err, &tmpimagePoints, 2, 1 );
-
- cvGetCols( Ji, &dpdf_hdr, 0, 2 );
- cvGetCols( Ji, &dpdc_hdr, 2, 4 );
- cvGetCols( Ji, &dpdk_hdr, 4, NINTRINSIC );
- cvGetCols( Je, &dpdrot_hdr, 0, 3 );
- cvGetCols( Je, &dpdt_hdr, 3, 6 );
-
- for( k = 0; k < 2; k++ )
- {
- double maxErr, l2err;
- imgpt_i[k] = cvMat(1, ni, CV_64FC2, imagePoints[k]->data.db + ofs*2);
-
- if( JtJ || JtErr )
- cvProjectPoints2( &objpt_i, &om[k], &T[k], &K[k], &Dist[k],
- &tmpimagePoints, dpdrot, dpdt, dpdf, dpdc, dpdk,
- (flags & CV_CALIB_FIX_ASPECT_RATIO) ? aspectRatio[k] : 0);
- else
- cvProjectPoints2( &objpt_i, &om[k], &T[k], &K[k], &Dist[k], &tmpimagePoints );
- cvSub( &tmpimagePoints, &imgpt_i[k], &tmpimagePoints );
-
- l2err = cvNorm( &tmpimagePoints, 0, CV_L2 );
- maxErr = cvNorm( &tmpimagePoints, 0, CV_C );
-
- if( JtJ || JtErr )
- {
- int iofs = (nimages+1)*6 + k*NINTRINSIC, eofs = (i+1)*6;
- assert( JtJ && JtErr );
-
- if( k == 1 )
- {
- // d(err_{x|y}R) ~ de3
- // convert de3/{dr3,dt3} => de3{dr1,dt1} & de3{dr2,dt2}
- for( p = 0; p < ni*2; p++ )
- {
- CvMat de3dr3 = cvMat( 1, 3, CV_64F, Je->data.ptr + Je->step*p );
- CvMat de3dt3 = cvMat( 1, 3, CV_64F, de3dr3.data.db + 3 );
- CvMat de3dr2 = cvMat( 1, 3, CV_64F, J_LR->data.ptr + J_LR->step*p );
- CvMat de3dt2 = cvMat( 1, 3, CV_64F, de3dr2.data.db + 3 );
- double _de3dr1[3], _de3dt1[3];
- CvMat de3dr1 = cvMat( 1, 3, CV_64F, _de3dr1 );
- CvMat de3dt1 = cvMat( 1, 3, CV_64F, _de3dt1 );
-
- cvMatMul( &de3dr3, &dr3dr1, &de3dr1 );
- cvMatMul( &de3dt3, &dt3dt1, &de3dt1 );
-
- cvMatMul( &de3dr3, &dr3dr2, &de3dr2 );
- cvMatMulAdd( &de3dt3, &dt3dr2, &de3dr2, &de3dr2 );
-
- cvMatMul( &de3dt3, &dt3dt2, &de3dt2 );
-
- cvCopy( &de3dr1, &de3dr3 );
- cvCopy( &de3dt1, &de3dt3 );
- }
-
- cvGetSubRect( JtJ, &_part, cvRect(0, 0, 6, 6) );
- cvGEMM( J_LR, J_LR, 1, &_part, 1, &_part, CV_GEMM_A_T );
-
- cvGetSubRect( JtJ, &_part, cvRect(eofs, 0, 6, 6) );
- cvGEMM( J_LR, Je, 1, 0, 0, &_part, CV_GEMM_A_T );
-
- cvGetRows( JtErr, &_part, 0, 6 );
- cvGEMM( J_LR, err, 1, &_part, 1, &_part, CV_GEMM_A_T );
- }
-
- cvGetSubRect( JtJ, &_part, cvRect(eofs, eofs, 6, 6) );
- cvGEMM( Je, Je, 1, &_part, 1, &_part, CV_GEMM_A_T );
-
- cvGetRows( JtErr, &_part, eofs, eofs + 6 );
- cvGEMM( Je, err, 1, &_part, 1, &_part, CV_GEMM_A_T );
-
- if( recomputeIntrinsics )
- {
- cvGetSubRect( JtJ, &_part, cvRect(iofs, iofs, NINTRINSIC, NINTRINSIC) );
- cvGEMM( Ji, Ji, 1, &_part, 1, &_part, CV_GEMM_A_T );
- cvGetSubRect( JtJ, &_part, cvRect(iofs, eofs, NINTRINSIC, 6) );
- cvGEMM( Je, Ji, 1, &_part, 1, &_part, CV_GEMM_A_T );
- if( k == 1 )
- {
- cvGetSubRect( JtJ, &_part, cvRect(iofs, 0, NINTRINSIC, 6) );
- cvGEMM( J_LR, Ji, 1, &_part, 1, &_part, CV_GEMM_A_T );
- }
- cvGetRows( JtErr, &_part, iofs, iofs + NINTRINSIC );
- cvGEMM( Ji, err, 1, &_part, 1, &_part, CV_GEMM_A_T );
- }
- }
-
- if( errNorm )
- *errNorm += l2err*l2err;
- }
- }
- }
-
- cvRodrigues2( &om_LR, &R_LR );
- if( _R->rows == 1 || _R->cols == 1 )
- cvConvert( &om_LR, _R );
- else
- cvConvert( &R_LR, _R );
- cvConvert( &T_LR, _T );
-
- if( recomputeIntrinsics )
- {
- cvConvert( &K[0], _cameraMatrix1 );
- cvConvert( &K[1], _cameraMatrix2 );
-
- for( k = 0; k < 2; k++ )
- {
- CvMat* distCoeffs = k == 0 ? _distCoeffs1 : _distCoeffs2;
- CvMat tdist = cvMat( distCoeffs->rows, distCoeffs->cols,
- CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), Dist[k].data.db );
- cvConvert( &tdist, distCoeffs );
- }
- }
-
- if( _E || _F )
- {
- double* t = T_LR.data.db;
- double tx[] =
- {
- 0, -t[2], t[1],
- t[2], 0, -t[0],
- -t[1], t[0], 0
- };
- CvMat Tx = cvMat(3, 3, CV_64F, tx);
- double e[9], f[9];
- CvMat E = cvMat(3, 3, CV_64F, e);
- CvMat F = cvMat(3, 3, CV_64F, f);
- cvMatMul( &Tx, &R_LR, &E );
- if( _E )
- cvConvert( &E, _E );
- if( _F )
- {
- double ik[9];
- CvMat iK = cvMat(3, 3, CV_64F, ik);
- cvInvert(&K[1], &iK);
- cvGEMM( &iK, &E, 1, 0, 0, &E, CV_GEMM_A_T );
- cvInvert(&K[0], &iK);
- cvMatMul(&E, &iK, &F);
- cvConvertScale( &F, _F, fabs(f[8]) > 0 ? 1./f[8] : 1 );
- }
- }
-
- __END__;
-
- cvReleaseMat( &npoints );
- cvReleaseMat( &err );
- cvReleaseMat( &J_LR );
- cvReleaseMat( &Je );
- cvReleaseMat( &Ji );
- cvReleaseMat( &RT0 );
- cvReleaseMat( &objectPoints );
- cvReleaseMat( &imagePoints[0] );
- cvReleaseMat( &imagePoints[1] );
-}
-
-
-void cvStereoRectify( const CvMat* _cameraMatrix1, const CvMat* _cameraMatrix2,
- const CvMat* _distCoeffs1, const CvMat* _distCoeffs2,
- CvSize imageSize, const CvMat* _R, const CvMat* _T,
- CvMat* _R1, CvMat* _R2, CvMat* _P1, CvMat* _P2,
- CvMat* _Q, int flags )
-{
- double _om[3], _t[3], _uu[3]={0,0,0}, _r_r[3][3], _pp[3][4];
- double _ww[3], _wr[3][3], _z[3] = {0,0,0}, _ri[3][3];
- CvMat om = cvMat(3, 1, CV_64F, _om);
- CvMat t = cvMat(3, 1, CV_64F, _t);
- CvMat uu = cvMat(3, 1, CV_64F, _uu);
- CvMat r_r = cvMat(3, 3, CV_64F, _r_r);
- CvMat pp = cvMat(3, 4, CV_64F, _pp);
- CvMat ww = cvMat(3, 1, CV_64F, _ww); // temps
- CvMat wR = cvMat(3, 3, CV_64F, _wr);
- CvMat Z = cvMat(3, 1, CV_64F, _z);
- CvMat Ri = cvMat(3, 3, CV_64F, _ri);
- double nx = imageSize.width, ny = imageSize.height;
- int i, k;
-
- if( _R->rows == 3 && _R->cols == 3 )
- cvRodrigues2(_R, &om); // get vector rotation
- else
- cvConvert(_R, &om); // it's already a rotation vector
- cvConvertScale(&om, &om, -0.5); // get average rotation
- cvRodrigues2(&om, &r_r); // rotate cameras to same orientation by averaging
- cvMatMul(&r_r, _T, &t);
-
- int idx = fabs(_t[0]) > fabs(_t[1]) ? 0 : 1;
- double c = _t[idx], nt = cvNorm(&t, 0, CV_L2);
- _uu[idx] = c > 0 ? 1 : -1;
-
- // calculate global Z rotation
- cvCrossProduct(&t,&uu,&ww);
- double nw = cvNorm(&ww, 0, CV_L2);
- cvConvertScale(&ww, &ww, acos(fabs(c)/nt)/nw);
- cvRodrigues2(&ww, &wR);
-
- // apply to both views
- cvGEMM(&wR, &r_r, 1, 0, 0, &Ri, CV_GEMM_B_T);
- cvConvert( &Ri, _R1 );
- cvGEMM(&wR, &r_r, 1, 0, 0, &Ri, 0);
- cvConvert( &Ri, _R2 );
- cvMatMul(&r_r, _T, &t);
-
- // calculate projection/camera matrices
- // these contain the relevant rectified image internal params (fx, fy=fx, cx, cy)
- double fc_new = DBL_MAX;
- CvPoint2D64f cc_new[2] = {{0,0}, {0,0}};
-
- for( k = 0; k < 2; k++ )
- {
- const CvMat* A = k == 0 ? _cameraMatrix1 : _cameraMatrix2;
- const CvMat* Dk = k == 0 ? _distCoeffs1 : _distCoeffs2;
- CvPoint2D32f _pts[4];
- CvPoint3D32f _pts_3[4];
- CvMat pts = cvMat(1, 4, CV_32FC2, _pts);
- CvMat pts_3 = cvMat(1, 4, CV_32FC3, _pts_3);
- double fc, dk1 = Dk ? cvmGet(Dk, 0, 0) : 0;
-
- fc = cvmGet(A,idx^1,idx^1);
- if( dk1 < 0 )
- fc *= 1 + 0.2*dk1*(nx*nx + ny*ny)/(8*fc*fc);
- fc_new = MIN(fc_new, fc);
-
- for( i = 0; i < 4; i++ )
- {
- _pts[i].x = (float)(((i % 2) + 0.5)*nx*0.5);
- _pts[i].y = (float)(((i / 2) + 0.5)*ny*0.5);
- }
- cvUndistortPoints( &pts, &pts, A, Dk, 0, 0 );
- cvConvertPointsHomogeneous( &pts, &pts_3 );
- cvProjectPoints2( &pts_3, k == 0 ? _R1 : _R2, &Z, A, 0, &pts );
- CvScalar avg = cvAvg(&pts);
- cc_new[k].x = avg.val[0];
- cc_new[k].y = avg.val[1];
- }
-
- // vertical focal length must be the same for both images to keep the epipolar constraint
- // (for horizontal epipolar lines -- TBD: check for vertical epipolar lines)
- // use fy for fx also, for simplicity
-
- // For simplicity, set the principal points for both cameras to be the average
- // of the two principal points (either one of or both x- and y- coordinates)
- if( flags & CV_CALIB_ZERO_DISPARITY )
- {
- cc_new[0].x = cc_new[1].x = (cc_new[0].x + cc_new[1].x)*0.5;
- cc_new[0].y = cc_new[1].y = (cc_new[0].y + cc_new[1].y)*0.5;
- }
- else if( idx == 0 ) // horizontal stereo
- cc_new[0].y = cc_new[1].y = (cc_new[0].y + cc_new[1].y)*0.5;
- else // vertical stereo
- cc_new[0].x = cc_new[1].x = (cc_new[0].x + cc_new[1].x)*0.5;
-
- cvZero( &pp );
- _pp[0][0] = _pp[1][1] = fc_new;
- _pp[0][2] = cc_new[0].x;
- _pp[1][2] = cc_new[0].y;
- _pp[2][2] = 1;
- cvConvert(&pp, _P1);
-
- _pp[0][2] = cc_new[1].x;
- _pp[1][2] = cc_new[1].y;
- _pp[idx][3] = _t[idx]*fc_new; // baseline * focal length
- cvConvert(&pp, _P2);
-
- if( _Q )
- {
- double q[] =
- {
- 1, 0, 0, -cc_new[0].x,
- 0, 1, 0, -cc_new[0].y,
- 0, 0, 0, fc_new,
- 0, 0, 1./_t[idx],
- (idx == 0 ? cc_new[0].x - cc_new[1].x : cc_new[0].y - cc_new[1].y)/_t[idx]
- };
- CvMat Q = cvMat(4, 4, CV_64F, q);
- cvConvert( &Q, _Q );
- }
-}
-
-
-CV_IMPL int
-cvStereoRectifyUncalibrated(
- const CvMat* _points1, const CvMat* _points2,
- const CvMat* F0, CvSize imgSize, CvMat* _H1, CvMat* _H2, double threshold )
-{
- int result = 0;
- CvMat* _m1 = 0;
- CvMat* _m2 = 0;
- CvMat* _lines1 = 0;
- CvMat* _lines2 = 0;
-
- CV_FUNCNAME( "cvStereoCalcHomographiesFromF" );
-
- __BEGIN__;
-
- int i, j, npoints;
- double cx, cy;
- double u[9], v[9], w[9], f[9], h1[9], h2[9], h0[9], e2[3];
- CvMat E2 = cvMat( 3, 1, CV_64F, e2 );
- CvMat U = cvMat( 3, 3, CV_64F, u );
- CvMat V = cvMat( 3, 3, CV_64F, v );
- CvMat W = cvMat( 3, 3, CV_64F, w );
- CvMat F = cvMat( 3, 3, CV_64F, f );
- CvMat H1 = cvMat( 3, 3, CV_64F, h1 );
- CvMat H2 = cvMat( 3, 3, CV_64F, h2 );
- CvMat H0 = cvMat( 3, 3, CV_64F, h0 );
-
- CvPoint2D64f* m1;
- CvPoint2D64f* m2;
- CvPoint3D64f* lines1;
- CvPoint3D64f* lines2;
-
- CV_ASSERT( CV_IS_MAT(_points1) && CV_IS_MAT(_points2) &&
- (_points1->rows == 1 || _points1->cols == 1) &&
- (_points2->rows == 1 || _points2->cols == 1) &&
- CV_ARE_SIZES_EQ(_points1, _points2) );
-
- npoints = _points1->rows * _points1->cols * CV_MAT_CN(_points1->type) / 2;
-
- _m1 = cvCreateMat( _points1->rows, _points1->cols, CV_64FC(CV_MAT_CN(_points1->type)) );
- _m2 = cvCreateMat( _points2->rows, _points2->cols, CV_64FC(CV_MAT_CN(_points2->type)) );
- _lines1 = cvCreateMat( 1, npoints, CV_64FC3 );
- _lines2 = cvCreateMat( 1, npoints, CV_64FC3 );
-
- cvConvert( F0, &F );
-
- cvSVD( (CvMat*)&F, &W, &U, &V, CV_SVD_U_T + CV_SVD_V_T );
- W.data.db[8] = 0.;
- cvGEMM( &U, &W, 1, 0, 0, &W, CV_GEMM_A_T );
- cvMatMul( &W, &V, &F );
-
- cx = cvRound( (imgSize.width-1)*0.5 );
- cy = cvRound( (imgSize.height-1)*0.5 );
-
- cvZero( _H1 );
- cvZero( _H2 );
-
- cvConvert( _points1, _m1 );
- cvConvert( _points2, _m2 );
- cvReshape( _m1, _m1, 2, 1 );
- cvReshape( _m1, _m1, 2, 1 );
-
- m1 = (CvPoint2D64f*)_m1->data.ptr;
- m2 = (CvPoint2D64f*)_m2->data.ptr;
- lines1 = (CvPoint3D64f*)_lines1->data.ptr;
- lines2 = (CvPoint3D64f*)_lines2->data.ptr;
-
- if( threshold > 0 )
- {
- cvComputeCorrespondEpilines( _m1, 1, &F, _lines1 );
- cvComputeCorrespondEpilines( _m2, 2, &F, _lines2 );
-
- // measure distance from points to the corresponding epilines, mark outliers
- for( i = j = 0; i < npoints; i++ )
- {
- if( fabs(m1[i].x*lines2[i].x +
- m1[i].y*lines2[i].y +
- lines2[i].z) <= threshold &&
- fabs(m2[i].x*lines1[i].x +
- m2[i].y*lines1[i].y +
- lines1[i].z) <= threshold )
- {
- if( j > i )
- {
- m1[j] = m1[i];
- m2[j] = m2[i];
- }
- j++;
- }
- }
-
- npoints = j;
- if( npoints == 0 )
- EXIT;
- }
-
- {
- _m1->cols = _m2->cols = npoints;
- memcpy( E2.data.db, U.data.db + 6, sizeof(e2));
- cvScale( &E2, &E2, e2[2] > 0 ? 1 : -1 );
-
- double t[] =
- {
- 1, 0, -cx,
- 0, 1, -cy,
- 0, 0, 1
- };
- CvMat T = cvMat(3, 3, CV_64F, t);
- cvMatMul( &T, &E2, &E2 );
-
- int mirror = e2[0] < 0;
- double d = MAX(sqrt(e2[0]*e2[0] + e2[1]*e2[1]),DBL_EPSILON);
- double alpha = e2[0]/d;
- double beta = e2[1]/d;
- double r[] =
- {
- alpha, beta, 0,
- -beta, alpha, 0,
- 0, 0, 1
- };
- CvMat R = cvMat(3, 3, CV_64F, r);
- cvMatMul( &R, &T, &T );
- cvMatMul( &R, &E2, &E2 );
- double invf = fabs(e2[2]) < 1e-6*fabs(e2[0]) ? 0 : -e2[2]/e2[0];
- double k[] =
- {
- 1, 0, 0,
- 0, 1, 0,
- invf, 0, 1
- };
- CvMat K = cvMat(3, 3, CV_64F, k);
- cvMatMul( &K, &T, &H2 );
- cvMatMul( &K, &E2, &E2 );
-
- double it[] =
- {
- 1, 0, cx,
- 0, 1, cy,
- 0, 0, 1
- };
- CvMat iT = cvMat( 3, 3, CV_64F, it );
- cvMatMul( &iT, &H2, &H2 );
-
- memcpy( E2.data.db, U.data.db + 6, sizeof(e2));
- cvScale( &E2, &E2, e2[2] > 0 ? 1 : -1 );
-
- double e2_x[] =
- {
- 0, -e2[2], e2[1],
- e2[2], 0, -e2[0],
- -e2[1], e2[0], 0
- };
- double e2_111[] =
- {
- e2[0], e2[0], e2[0],
- e2[1], e2[1], e2[1],
- e2[2], e2[2], e2[2],
- };
- CvMat E2_x = cvMat(3, 3, CV_64F, e2_x);
- CvMat E2_111 = cvMat(3, 3, CV_64F, e2_111);
- cvMatMulAdd(&E2_x, &F, &E2_111, &H0 );
- cvMatMul(&H2, &H0, &H0);
- CvMat E1=cvMat(3, 1, CV_64F, V.data.db+6);
- cvMatMul(&H0, &E1, &E1);
-
- cvPerspectiveTransform( _m1, _m1, &H0 );
- cvPerspectiveTransform( _m2, _m2, &H2 );
- CvMat A = cvMat( 1, npoints, CV_64FC3, lines1 ), BxBy, B;
- double a[9], atb[3], x[3];
- CvMat AtA = cvMat( 3, 3, CV_64F, a );
- CvMat AtB = cvMat( 3, 1, CV_64F, atb );
- CvMat X = cvMat( 3, 1, CV_64F, x );
- cvConvertPointsHomogeneous( _m1, &A );
- cvReshape( &A, &A, 1, npoints );
- cvReshape( _m2, &BxBy, 1, npoints );
- cvGetCol( &BxBy, &B, 0 );
- cvGEMM( &A, &A, 1, 0, 0, &AtA, CV_GEMM_A_T );
- cvGEMM( &A, &B, 1, 0, 0, &AtB, CV_GEMM_A_T );
- cvSolve( &AtA, &AtB, &X, CV_SVD_SYM );
-
- double ha[] =
- {
- x[0], x[1], x[2],
- 0, 1, 0,
- 0, 0, 1
- };
- CvMat Ha = cvMat(3, 3, CV_64F, ha);
- cvMatMul( &Ha, &H0, &H1 );
- cvPerspectiveTransform( _m1, _m1, &Ha );
-
- if( mirror )
- {
- double mm[] = { -1, 0, cx*2, 0, -1, cy*2, 0, 0, 1 };
- CvMat MM = cvMat(3, 3, CV_64F, mm);
- cvMatMul( &MM, &H1, &H1 );
- cvMatMul( &MM, &H2, &H2 );
- }
-
- cvConvert( &H1, _H1 );
- cvConvert( &H2, _H2 );
-
- result = 1;
- }
-
- __END__;
-
- cvReleaseMat( &_m1 );
- cvReleaseMat( &_m2 );
- cvReleaseMat( &_lines1 );
- cvReleaseMat( &_lines2 );
-
- return result;
-}
-
-
-CV_IMPL void
-cvReprojectImageTo3D(
- const CvArr* disparityImage,
- CvArr* _3dImage, const CvMat* _Q,
- int handleMissingValues )
-{
- const double bigZ = 10000.;
- CV_FUNCNAME( "cvReprojectImageTo3D" );
-
- __BEGIN__;
-
- double q[4][4];
- CvMat Q = cvMat(4, 4, CV_64F, q);
- CvMat sstub, *src = cvGetMat( disparityImage, &sstub );
- CvMat dstub, *dst = cvGetMat( _3dImage, &dstub );
- int stype = CV_MAT_TYPE(src->type), dtype = CV_MAT_TYPE(dst->type);
- int x, y, rows = src->rows, cols = src->cols;
- float* sbuf = (float*)cvStackAlloc( cols*sizeof(sbuf[0]) );
- float* dbuf = (float*)cvStackAlloc( cols*3*sizeof(dbuf[0]) );
- double minDisparity = FLT_MAX;
-
- CV_ASSERT( CV_ARE_SIZES_EQ(src, dst) &&
- (CV_MAT_TYPE(stype) == CV_8UC1 || CV_MAT_TYPE(stype) == CV_16SC1 ||
- CV_MAT_TYPE(stype) == CV_32SC1 || CV_MAT_TYPE(stype) == CV_32FC1) &&
- (CV_MAT_TYPE(dtype) == CV_16SC3 || CV_MAT_TYPE(dtype) == CV_32SC3 ||
- CV_MAT_TYPE(dtype) == CV_32FC3) );
-
- cvConvert( _Q, &Q );
-
- // NOTE: here we quietly assume that at least one pixel in the disparity map is not defined.
- // and we set the corresponding Z's to some fixed big value.
- if( handleMissingValues )
- cvMinMaxLoc( disparityImage, &minDisparity, 0, 0, 0 );
-
- for( y = 0; y < rows; y++ )
- {
- const float* sptr = (const float*)(src->data.ptr + src->step*y);
- float* dptr0 = (float*)(dst->data.ptr + dst->step*y), *dptr = dptr0;
- double qx = q[0][1]*y + q[0][3], qy = q[1][1]*y + q[1][3];
- double qz = q[2][1]*y + q[2][3], qw = q[3][1]*y + q[3][3];
-
- if( stype == CV_8UC1 )
- {
- const uchar* sptr0 = (const uchar*)sptr;
- for( x = 0; x < cols; x++ )
- sbuf[x] = (float)sptr0[x];
- sptr = sbuf;
- }
- else if( stype == CV_16SC1 )
- {
- const short* sptr0 = (const short*)sptr;
- for( x = 0; x < cols; x++ )
- sbuf[x] = (float)sptr0[x];
- sptr = sbuf;
- }
- else if( stype == CV_32SC1 )
- {
- const int* sptr0 = (const int*)sptr;
- for( x = 0; x < cols; x++ )
- sbuf[x] = (float)sptr0[x];
- sptr = sbuf;
- }
-
- if( dtype != CV_32FC3 )
- dptr = dbuf;
-
- for( x = 0; x < cols; x++, qx += q[0][0], qy += q[1][0], qz += q[2][0], qw += q[3][0] )
- {
- double d = sptr[x];
- double iW = 1./(qw + q[3][2]*d);
- double X = (qx + q[0][2]*d)*iW;
- double Y = (qy + q[1][2]*d)*iW;
- double Z = (qz + q[2][2]*d)*iW;
- if( fabs(d-minDisparity) <= FLT_EPSILON )
- Z = bigZ;
-
- dptr[x*3] = (float)X;
- dptr[x*3+1] = (float)Y;
- dptr[x*3+2] = (float)Z;
- }
-
- if( dtype == CV_16SC3 )
- {
- for( x = 0; x < cols*3; x++ )
- {
- int ival = cvRound(dptr[x]);
- ((short*)dptr0)[x] = CV_CAST_16S(ival);
- }
- }
- else if( dtype == CV_32SC3 )
- {
- for( x = 0; x < cols*3; x++ )
- {
- int ival = cvRound(dptr[x]);
- ((int*)dptr0)[x] = ival;
- }
- }
- }
-
- __END__;
-}
-
-
-/* End of file. */
+/*M///////////////////////////////////////////////////////////////////////////////////////\r
+//\r
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
+//\r
+// By downloading, copying, installing or using the software you agree to this license.\r
+// If you do not agree to this license, do not download, install,\r
+// copy or use the software.\r
+//\r
+//\r
+// License Agreement\r
+// For Open Source Computer Vision Library\r
+//\r
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\r
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\r
+// Third party copyrights are property of their respective owners.\r
+//\r
+// Redistribution and use in source and binary forms, with or without modification,\r
+// are permitted provided that the following conditions are met:\r
+//\r
+// * Redistribution's of source code must retain the above copyright notice,\r
+// this list of conditions and the following disclaimer.\r
+//\r
+// * Redistribution's in binary form must reproduce the above copyright notice,\r
+// this list of conditions and the following disclaimer in the documentation\r
+// and/or other materials provided with the distribution.\r
+//\r
+// * The name of the copyright holders may not be used to endorse or promote products\r
+// derived from this software without specific prior written permission.\r
+//\r
+// This software is provided by the copyright holders and contributors "as is" and\r
+// any express or implied warranties, including, but not limited to, the implied\r
+// warranties of merchantability and fitness for a particular purpose are disclaimed.\r
+// In no event shall the Intel Corporation or contributors be liable for any direct,\r
+// indirect, incidental, special, exemplary, or consequential damages\r
+// (including, but not limited to, procurement of substitute goods or services;\r
+// loss of use, data, or profits; or business interruption) however caused\r
+// and on any theory of liability, whether in contract, strict liability,\r
+// or tort (including negligence or otherwise) arising in any way out of\r
+// the use of this software, even if advised of the possibility of such damage.\r
+//\r
+//M*/\r
+\r
+#include "_cv.h"\r
+\r
+/*\r
+ This is stright-forward port v3 of Matlab calibration engine by Jean-Yves Bouguet\r
+ that is (in a large extent) based on the paper:\r
+ Z. Zhang. "A flexible new technique for camera calibration".\r
+ IEEE Transactions on Pattern Analysis and Machine Intelligence, 22(11):1330-1334, 2000.\r
+\r
+ The 1st initial port was done by Valery Mosyagin.\r
+*/\r
+\r
+CvLevMarq::CvLevMarq()\r
+{\r
+ mask = prevParam = param = J = err = JtJ = JtJN = JtErr = JtJV = JtJW = 0;\r
+ lambdaLg10 = 0; state = DONE;\r
+ criteria = cvTermCriteria(0,0,0);\r
+ iters = 0;\r
+ completeSymmFlag = false;\r
+}\r
+\r
+CvLevMarq::CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria0, bool _completeSymmFlag )\r
+{\r
+ mask = prevParam = param = J = err = JtJ = JtJN = JtErr = JtJV = JtJW = 0;\r
+ init(nparams, nerrs, criteria0, _completeSymmFlag);\r
+}\r
+\r
+void CvLevMarq::clear()\r
+{\r
+ cvReleaseMat(&mask);\r
+ cvReleaseMat(&prevParam);\r
+ cvReleaseMat(¶m);\r
+ cvReleaseMat(&J);\r
+ cvReleaseMat(&err);\r
+ cvReleaseMat(&JtJ);\r
+ cvReleaseMat(&JtJN);\r
+ cvReleaseMat(&JtErr);\r
+ cvReleaseMat(&JtJV);\r
+ cvReleaseMat(&JtJW);\r
+}\r
+\r
+CvLevMarq::~CvLevMarq()\r
+{\r
+ clear();\r
+}\r
+\r
+void CvLevMarq::init( int nparams, int nerrs, CvTermCriteria criteria0, bool _completeSymmFlag )\r
+{\r
+ if( !param || param->rows != nparams || nerrs != (err ? err->rows : 0) )\r
+ clear();\r
+ mask = cvCreateMat( nparams, 1, CV_8U );\r
+ cvSet(mask, cvScalarAll(1));\r
+ prevParam = cvCreateMat( nparams, 1, CV_64F );\r
+ param = cvCreateMat( nparams, 1, CV_64F );\r
+ JtJ = cvCreateMat( nparams, nparams, CV_64F );\r
+ JtJN = cvCreateMat( nparams, nparams, CV_64F );\r
+ JtJV = cvCreateMat( nparams, nparams, CV_64F );\r
+ JtJW = cvCreateMat( nparams, 1, CV_64F );\r
+ JtErr = cvCreateMat( nparams, 1, CV_64F );\r
+ if( nerrs > 0 )\r
+ {\r
+ J = cvCreateMat( nerrs, nparams, CV_64F );\r
+ err = cvCreateMat( nerrs, 1, CV_64F );\r
+ }\r
+ prevErrNorm = DBL_MAX;\r
+ lambdaLg10 = -3;\r
+ criteria = criteria0;\r
+ if( criteria.type & CV_TERMCRIT_ITER )\r
+ criteria.max_iter = MIN(MAX(criteria.max_iter,1),1000);\r
+ else\r
+ criteria.max_iter = 30;\r
+ if( criteria.type & CV_TERMCRIT_EPS )\r
+ criteria.epsilon = MAX(criteria.epsilon, 0);\r
+ else\r
+ criteria.epsilon = DBL_EPSILON;\r
+ state = STARTED;\r
+ iters = 0;\r
+ completeSymmFlag = _completeSymmFlag;\r
+}\r
+\r
+bool CvLevMarq::update( const CvMat*& _param, CvMat*& _J, CvMat*& _err )\r
+{\r
+ double change;\r
+\r
+ _J = _err = 0;\r
+\r
+ assert( err != 0 );\r
+ if( state == DONE )\r
+ {\r
+ _param = param;\r
+ return false;\r
+ }\r
+\r
+ if( state == STARTED )\r
+ {\r
+ _param = param;\r
+ cvZero( J );\r
+ cvZero( err );\r
+ _J = J;\r
+ _err = err;\r
+ state = CALC_J;\r
+ return true;\r
+ }\r
+\r
+ if( state == CALC_J )\r
+ {\r
+ cvMulTransposed( J, JtJ, 1 );\r
+ cvGEMM( J, err, 1, 0, 0, JtErr, CV_GEMM_A_T );\r
+ cvCopy( param, prevParam );\r
+ step();\r
+ if( iters == 0 )\r
+ prevErrNorm = cvNorm(err, 0, CV_L2);\r
+ _param = param;\r
+ cvZero( err );\r
+ _err = err;\r
+ state = CHECK_ERR;\r
+ return true;\r
+ }\r
+\r
+ assert( state == CHECK_ERR );\r
+ errNorm = cvNorm( err, 0, CV_L2 );\r
+ if( errNorm > prevErrNorm )\r
+ {\r
+ lambdaLg10++;\r
+ step();\r
+ _param = param;\r
+ cvZero( err );\r
+ _err = err;\r
+ state = CHECK_ERR;\r
+ return true;\r
+ }\r
+\r
+ lambdaLg10 = MAX(lambdaLg10-1, -16);\r
+ if( ++iters >= criteria.max_iter ||\r
+ (change = cvNorm(param, prevParam, CV_RELATIVE_L2)) < criteria.epsilon )\r
+ {\r
+ _param = param;\r
+ state = DONE;\r
+ return true;\r
+ }\r
+\r
+ prevErrNorm = errNorm;\r
+ _param = param;\r
+ cvZero(J);\r
+ _J = J;\r
+ _err = err;\r
+ state = CALC_J;\r
+ return true;\r
+}\r
+\r
+\r
+bool CvLevMarq::updateAlt( const CvMat*& _param, CvMat*& _JtJ, CvMat*& _JtErr, double*& _errNorm )\r
+{\r
+ double change;\r
+\r
+ assert( err == 0 );\r
+ if( state == DONE )\r
+ {\r
+ _param = param;\r
+ return false;\r
+ }\r
+\r
+ if( state == STARTED )\r
+ {\r
+ _param = param;\r
+ cvZero( JtJ );\r
+ cvZero( JtErr );\r
+ errNorm = 0;\r
+ _JtJ = JtJ;\r
+ _JtErr = JtErr;\r
+ _errNorm = &errNorm;\r
+ state = CALC_J;\r
+ return true;\r
+ }\r
+\r
+ if( state == CALC_J )\r
+ {\r
+ cvCopy( param, prevParam );\r
+ step();\r
+ _param = param;\r
+ prevErrNorm = errNorm;\r
+ errNorm = 0;\r
+ _errNorm = &errNorm;\r
+ state = CHECK_ERR;\r
+ return true;\r
+ }\r
+\r
+ assert( state == CHECK_ERR );\r
+ if( errNorm > prevErrNorm )\r
+ {\r
+ lambdaLg10++;\r
+ step();\r
+ _param = param;\r
+ errNorm = 0;\r
+ _errNorm = &errNorm;\r
+ state = CHECK_ERR;\r
+ return true;\r
+ }\r
+\r
+ lambdaLg10 = MAX(lambdaLg10-1, -16);\r
+ if( ++iters >= criteria.max_iter ||\r
+ (change = cvNorm(param, prevParam, CV_RELATIVE_L2)) < criteria.epsilon )\r
+ {\r
+ _param = param;\r
+ state = DONE;\r
+ return false;\r
+ }\r
+\r
+ prevErrNorm = errNorm;\r
+ cvZero( JtJ );\r
+ cvZero( JtErr );\r
+ _param = param;\r
+ _JtJ = JtJ;\r
+ _JtErr = JtErr;\r
+ state = CALC_J;\r
+ return true;\r
+}\r
+\r
+void CvLevMarq::step()\r
+{\r
+ const double LOG10 = log(10.);\r
+ double lambda = exp(lambdaLg10*LOG10);\r
+ int i, j, nparams = param->rows;\r
+\r
+ for( i = 0; i < nparams; i++ )\r
+ if( mask->data.ptr[i] == 0 )\r
+ {\r
+ double *row = JtJ->data.db + i*nparams, *col = JtJ->data.db + i;\r
+ for( j = 0; j < nparams; j++ )\r
+ row[j] = col[j*nparams] = 0;\r
+ JtErr->data.db[i] = 0;\r
+ }\r
+\r
+ if( !err )\r
+ cvCompleteSymm( JtJ, completeSymmFlag );\r
+#if 1\r
+ cvCopy( JtJ, JtJN );\r
+ for( i = 0; i < nparams; i++ )\r
+ JtJN->data.db[(nparams+1)*i] *= 1. + lambda;\r
+#else\r
+ cvSetIdentity(JtJN, cvRealScalar(lambda));\r
+ cvAdd( JtJ, JtJN, JtJN );\r
+#endif\r
+ cvSVD( JtJN, JtJW, 0, JtJV, CV_SVD_MODIFY_A + CV_SVD_U_T + CV_SVD_V_T );\r
+ cvSVBkSb( JtJW, JtJV, JtJV, JtErr, param, CV_SVD_U_T + CV_SVD_V_T );\r
+ for( i = 0; i < nparams; i++ )\r
+ param->data.db[i] = prevParam->data.db[i] - (mask->data.ptr[i] ? param->data.db[i] : 0);\r
+}\r
+\r
+// reimplementation of dAB.m\r
+CV_IMPL void\r
+cvCalcMatMulDeriv( const CvMat* A, const CvMat* B, CvMat* dABdA, CvMat* dABdB )\r
+{\r
+ CV_FUNCNAME( "cvCalcMatMulDeriv" );\r
+\r
+ __BEGIN__;\r
+\r
+ int i, j, M, N, L;\r
+ int bstep;\r
+\r
+ CV_ASSERT( CV_IS_MAT(A) && CV_IS_MAT(B) );\r
+ CV_ASSERT( CV_ARE_TYPES_EQ(A, B) &&\r
+ (CV_MAT_TYPE(A->type) == CV_32F || CV_MAT_TYPE(A->type) == CV_64F) );\r
+ CV_ASSERT( A->cols == B->rows );\r
+\r
+ M = A->rows;\r
+ L = A->cols;\r
+ N = B->cols;\r
+ bstep = B->step/CV_ELEM_SIZE(B->type);\r
+\r
+ if( dABdA )\r
+ {\r
+ CV_ASSERT( CV_ARE_TYPES_EQ(A, dABdA) &&\r
+ dABdA->rows == A->rows*B->cols && dABdA->cols == A->rows*A->cols );\r
+ }\r
+\r
+ if( dABdB )\r
+ {\r
+ CV_ASSERT( CV_ARE_TYPES_EQ(A, dABdB) &&\r
+ dABdB->rows == A->rows*B->cols && dABdB->cols == B->rows*B->cols );\r
+ }\r
+\r
+ if( CV_MAT_TYPE(A->type) == CV_32F )\r
+ {\r
+ for( i = 0; i < M*N; i++ )\r
+ {\r
+ int i1 = i / N, i2 = i % N;\r
+\r
+ if( dABdA )\r
+ {\r
+ float* dcda = (float*)(dABdA->data.ptr + dABdA->step*i);\r
+ const float* b = (const float*)B->data.ptr + i2;\r
+\r
+ for( j = 0; j < M*L; j++ )\r
+ dcda[j] = 0;\r
+ for( j = 0; j < L; j++ )\r
+ dcda[i1*L + j] = b[j*bstep];\r
+ }\r
+\r
+ if( dABdB )\r
+ {\r
+ float* dcdb = (float*)(dABdB->data.ptr + dABdB->step*i);\r
+ const float* a = (const float*)(A->data.ptr + A->step*i1);\r
+\r
+ for( j = 0; j < L*N; j++ )\r
+ dcdb[j] = 0;\r
+ for( j = 0; j < L; j++ )\r
+ dcdb[j*N + i2] = a[j];\r
+ }\r
+ }\r
+ }\r
+ else\r
+ {\r
+ for( i = 0; i < M*N; i++ )\r
+ {\r
+ int i1 = i / N, i2 = i % N;\r
+\r
+ if( dABdA )\r
+ {\r
+ double* dcda = (double*)(dABdA->data.ptr + dABdA->step*i);\r
+ const double* b = (const double*)B->data.ptr + i2;\r
+\r
+ for( j = 0; j < M*L; j++ )\r
+ dcda[j] = 0;\r
+ for( j = 0; j < L; j++ )\r
+ dcda[i1*L + j] = b[j*bstep];\r
+ }\r
+\r
+ if( dABdB )\r
+ {\r
+ double* dcdb = (double*)(dABdB->data.ptr + dABdB->step*i);\r
+ const double* a = (const double*)(A->data.ptr + A->step*i1);\r
+\r
+ for( j = 0; j < L*N; j++ )\r
+ dcdb[j] = 0;\r
+ for( j = 0; j < L; j++ )\r
+ dcdb[j*N + i2] = a[j];\r
+ }\r
+ }\r
+ }\r
+\r
+ __END__;\r
+}\r
+\r
+// reimplementation of compose_motion.m\r
+CV_IMPL void\r
+cvComposeRT( const CvMat* _rvec1, const CvMat* _tvec1,\r
+ const CvMat* _rvec2, const CvMat* _tvec2,\r
+ CvMat* _rvec3, CvMat* _tvec3,\r
+ CvMat* dr3dr1, CvMat* dr3dt1,\r
+ CvMat* dr3dr2, CvMat* dr3dt2,\r
+ CvMat* dt3dr1, CvMat* dt3dt1,\r
+ CvMat* dt3dr2, CvMat* dt3dt2 )\r
+{\r
+ CV_FUNCNAME( "cvComposeRT" );\r
+\r
+ __BEGIN__;\r
+\r
+ double _r1[3], _r2[3];\r
+ double _R1[9], _d1[9*3], _R2[9], _d2[9*3];\r
+ CvMat r1 = cvMat(3,1,CV_64F,_r1), r2 = cvMat(3,1,CV_64F,_r2);\r
+ CvMat R1 = cvMat(3,3,CV_64F,_R1), R2 = cvMat(3,3,CV_64F,_R2);\r
+ CvMat dR1dr1 = cvMat(9,3,CV_64F,_d1), dR2dr2 = cvMat(9,3,CV_64F,_d2);\r
+\r
+ CV_ASSERT( CV_IS_MAT(_rvec1) && CV_IS_MAT(_rvec2) );\r
+\r
+ CV_ASSERT( CV_MAT_TYPE(_rvec1->type) == CV_32F ||\r
+ CV_MAT_TYPE(_rvec1->type) == CV_64F );\r
+\r
+ CV_ASSERT( _rvec1->rows == 3 && _rvec1->cols == 1 && CV_ARE_SIZES_EQ(_rvec1, _rvec2) );\r
+\r
+ cvConvert( _rvec1, &r1 );\r
+ cvConvert( _rvec2, &r2 );\r
+\r
+ cvRodrigues2( &r1, &R1, &dR1dr1 );\r
+ cvRodrigues2( &r2, &R2, &dR2dr2 );\r
+\r
+ if( _rvec3 || dr3dr1 || dr3dr1 )\r
+ {\r
+ double _r3[3], _R3[9], _dR3dR1[9*9], _dR3dR2[9*9], _dr3dR3[9*3];\r
+ double _W1[9*3], _W2[3*3];\r
+ CvMat r3 = cvMat(3,1,CV_64F,_r3), R3 = cvMat(3,3,CV_64F,_R3);\r
+ CvMat dR3dR1 = cvMat(9,9,CV_64F,_dR3dR1), dR3dR2 = cvMat(9,9,CV_64F,_dR3dR2);\r
+ CvMat dr3dR3 = cvMat(3,9,CV_64F,_dr3dR3);\r
+ CvMat W1 = cvMat(3,9,CV_64F,_W1), W2 = cvMat(3,3,CV_64F,_W2);\r
+\r
+ cvMatMul( &R2, &R1, &R3 );\r
+ cvCalcMatMulDeriv( &R2, &R1, &dR3dR2, &dR3dR1 );\r
+\r
+ cvRodrigues2( &R3, &r3, &dr3dR3 );\r
+\r
+ if( _rvec3 )\r
+ cvConvert( &r3, _rvec3 );\r
+\r
+ if( dr3dr1 )\r
+ {\r
+ cvMatMul( &dr3dR3, &dR3dR1, &W1 );\r
+ cvMatMul( &W1, &dR1dr1, &W2 );\r
+ cvConvert( &W2, dr3dr1 );\r
+ }\r
+\r
+ if( dr3dr2 )\r
+ {\r
+ cvMatMul( &dr3dR3, &dR3dR2, &W1 );\r
+ cvMatMul( &W1, &dR2dr2, &W2 );\r
+ cvConvert( &W2, dr3dr2 );\r
+ }\r
+ }\r
+\r
+ if( dr3dt1 )\r
+ cvZero( dr3dt1 );\r
+ if( dr3dt2 )\r
+ cvZero( dr3dt2 );\r
+\r
+ if( _tvec3 || dt3dr2 || dt3dt1 )\r
+ {\r
+ double _t1[3], _t2[3], _t3[3], _dxdR2[3*9], _dxdt1[3*3], _W3[3*3];\r
+ CvMat t1 = cvMat(3,1,CV_64F,_t1), t2 = cvMat(3,1,CV_64F,_t2);\r
+ CvMat t3 = cvMat(3,1,CV_64F,_t3);\r
+ CvMat dxdR2 = cvMat(3, 9, CV_64F, _dxdR2);\r
+ CvMat dxdt1 = cvMat(3, 3, CV_64F, _dxdt1);\r
+ CvMat W3 = cvMat(3, 3, CV_64F, _W3);\r
+\r
+ CV_ASSERT( CV_IS_MAT(_tvec1) && CV_IS_MAT(_tvec2) );\r
+ CV_ASSERT( CV_ARE_SIZES_EQ(_tvec1, _tvec2) && CV_ARE_SIZES_EQ(_tvec1, _rvec1) );\r
+\r
+ cvConvert( _tvec1, &t1 );\r
+ cvConvert( _tvec2, &t2 );\r
+ cvMatMulAdd( &R2, &t1, &t2, &t3 );\r
+\r
+ if( _tvec3 )\r
+ cvConvert( &t3, _tvec3 );\r
+\r
+ if( dt3dr2 || dt3dt1 )\r
+ {\r
+ cvCalcMatMulDeriv( &R2, &t1, &dxdR2, &dxdt1 );\r
+ if( dt3dr2 )\r
+ {\r
+ cvMatMul( &dxdR2, &dR2dr2, &W3 );\r
+ cvConvert( &W3, dt3dr2 );\r
+ }\r
+ if( dt3dt1 )\r
+ cvConvert( &dxdt1, dt3dt1 );\r
+ }\r
+ }\r
+\r
+ if( dt3dt2 )\r
+ cvSetIdentity( dt3dt2 );\r
+ if( dt3dr1 )\r
+ cvZero( dt3dr1 );\r
+\r
+ __END__;\r
+}\r
+\r
+CV_IMPL int\r
+cvRodrigues2( const CvMat* src, CvMat* dst, CvMat* jacobian )\r
+{\r
+ int result = 0;\r
+\r
+ CV_FUNCNAME( "cvRogrigues2" );\r
+\r
+ __BEGIN__;\r
+\r
+ int depth, elem_size;\r
+ int i, k;\r
+ double J[27];\r
+ CvMat _J = cvMat( 3, 9, CV_64F, J );\r
+\r
+ if( !CV_IS_MAT(src) )\r
+ CV_ERROR( !src ? CV_StsNullPtr : CV_StsBadArg, "Input argument is not a valid matrix" );\r
+\r
+ if( !CV_IS_MAT(dst) )\r
+ CV_ERROR( !dst ? CV_StsNullPtr : CV_StsBadArg,\r
+ "The first output argument is not a valid matrix" );\r
+\r
+ depth = CV_MAT_DEPTH(src->type);\r
+ elem_size = CV_ELEM_SIZE(depth);\r
+\r
+ if( depth != CV_32F && depth != CV_64F )\r
+ CV_ERROR( CV_StsUnsupportedFormat, "The matrices must have 32f or 64f data type" );\r
+\r
+ if( !CV_ARE_DEPTHS_EQ(src, dst) )\r
+ CV_ERROR( CV_StsUnmatchedFormats, "All the matrices must have the same data type" );\r
+\r
+ if( jacobian )\r
+ {\r
+ if( !CV_IS_MAT(jacobian) )\r
+ CV_ERROR( CV_StsBadArg, "Jacobian is not a valid matrix" );\r
+\r
+ if( !CV_ARE_DEPTHS_EQ(src, jacobian) || CV_MAT_CN(jacobian->type) != 1 )\r
+ CV_ERROR( CV_StsUnmatchedFormats, "Jacobian must have 32fC1 or 64fC1 datatype" );\r
+\r
+ if( (jacobian->rows != 9 || jacobian->cols != 3) &&\r
+ (jacobian->rows != 3 || jacobian->cols != 9))\r
+ CV_ERROR( CV_StsBadSize, "Jacobian must be 3x9 or 9x3" );\r
+ }\r
+\r
+ if( src->cols == 1 || src->rows == 1 )\r
+ {\r
+ double rx, ry, rz, theta;\r
+ int step = src->rows > 1 ? src->step / elem_size : 1;\r
+\r
+ if( src->rows + src->cols*CV_MAT_CN(src->type) - 1 != 3 )\r
+ CV_ERROR( CV_StsBadSize, "Input matrix must be 1x3, 3x1 or 3x3" );\r
+\r
+ if( dst->rows != 3 || dst->cols != 3 || CV_MAT_CN(dst->type) != 1 )\r
+ CV_ERROR( CV_StsBadSize, "Output matrix must be 3x3, single-channel floating point matrix" );\r
+\r
+ if( depth == CV_32F )\r
+ {\r
+ rx = src->data.fl[0];\r
+ ry = src->data.fl[step];\r
+ rz = src->data.fl[step*2];\r
+ }\r
+ else\r
+ {\r
+ rx = src->data.db[0];\r
+ ry = src->data.db[step];\r
+ rz = src->data.db[step*2];\r
+ }\r
+ theta = sqrt(rx*rx + ry*ry + rz*rz);\r
+\r
+ if( theta < DBL_EPSILON )\r
+ {\r
+ cvSetIdentity( dst );\r
+\r
+ if( jacobian )\r
+ {\r
+ memset( J, 0, sizeof(J) );\r
+ J[5] = J[15] = J[19] = -1;\r
+ J[7] = J[11] = J[21] = 1;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ const double I[] = { 1, 0, 0, 0, 1, 0, 0, 0, 1 };\r
+\r
+ double c = cos(theta);\r
+ double s = sin(theta);\r
+ double c1 = 1. - c;\r
+ double itheta = theta ? 1./theta : 0.;\r
+\r
+ rx *= itheta; ry *= itheta; rz *= itheta;\r
+\r
+ double rrt[] = { rx*rx, rx*ry, rx*rz, rx*ry, ry*ry, ry*rz, rx*rz, ry*rz, rz*rz };\r
+ double _r_x_[] = { 0, -rz, ry, rz, 0, -rx, -ry, rx, 0 };\r
+ double R[9];\r
+ CvMat _R = cvMat( 3, 3, CV_64F, R );\r
+\r
+ // R = cos(theta)*I + (1 - cos(theta))*r*rT + sin(theta)*[r_x]\r
+ // where [r_x] is [0 -rz ry; rz 0 -rx; -ry rx 0]\r
+ for( k = 0; k < 9; k++ )\r
+ R[k] = c*I[k] + c1*rrt[k] + s*_r_x_[k];\r
+\r
+ cvConvert( &_R, dst );\r
+\r
+ if( jacobian )\r
+ {\r
+ double drrt[] = { rx+rx, ry, rz, ry, 0, 0, rz, 0, 0,\r
+ 0, rx, 0, rx, ry+ry, rz, 0, rz, 0,\r
+ 0, 0, rx, 0, 0, ry, rx, ry, rz+rz };\r
+ double d_r_x_[] = { 0, 0, 0, 0, 0, -1, 0, 1, 0,\r
+ 0, 0, 1, 0, 0, 0, -1, 0, 0,\r
+ 0, -1, 0, 1, 0, 0, 0, 0, 0 };\r
+ for( i = 0; i < 3; i++ )\r
+ {\r
+ double ri = i == 0 ? rx : i == 1 ? ry : rz;\r
+ double a0 = -s*ri, a1 = (s - 2*c1*itheta)*ri, a2 = c1*itheta;\r
+ double a3 = (c - s*itheta)*ri, a4 = s*itheta;\r
+ for( k = 0; k < 9; k++ )\r
+ J[i*9+k] = a0*I[k] + a1*rrt[k] + a2*drrt[i*9+k] +\r
+ a3*_r_x_[k] + a4*d_r_x_[i*9+k];\r
+ }\r
+ }\r
+ }\r
+ }\r
+ else if( src->cols == 3 && src->rows == 3 )\r
+ {\r
+ double R[9], U[9], V[9], W[3], rx, ry, rz;\r
+ CvMat _R = cvMat( 3, 3, CV_64F, R );\r
+ CvMat _U = cvMat( 3, 3, CV_64F, U );\r
+ CvMat _V = cvMat( 3, 3, CV_64F, V );\r
+ CvMat _W = cvMat( 3, 1, CV_64F, W );\r
+ double theta, s, c;\r
+ int step = dst->rows > 1 ? dst->step / elem_size : 1;\r
+\r
+ if( (dst->rows != 1 || dst->cols*CV_MAT_CN(dst->type) != 3) &&\r
+ (dst->rows != 3 || dst->cols != 1 || CV_MAT_CN(dst->type) != 1))\r
+ CV_ERROR( CV_StsBadSize, "Output matrix must be 1x3 or 3x1" );\r
+\r
+ cvConvert( src, &_R );\r
+ if( !cvCheckArr( &_R, CV_CHECK_RANGE+CV_CHECK_QUIET, -100, 100 ) )\r
+ {\r
+ cvZero(dst);\r
+ if( jacobian )\r
+ cvZero(jacobian);\r
+ EXIT;\r
+ }\r
+\r
+ cvSVD( &_R, &_W, &_U, &_V, CV_SVD_MODIFY_A + CV_SVD_U_T + CV_SVD_V_T );\r
+ cvGEMM( &_U, &_V, 1, 0, 0, &_R, CV_GEMM_A_T );\r
+\r
+ rx = R[7] - R[5];\r
+ ry = R[2] - R[6];\r
+ rz = R[3] - R[1];\r
+\r
+ s = sqrt((rx*rx + ry*ry + rz*rz)*0.25);\r
+ c = (R[0] + R[4] + R[8] - 1)*0.5;\r
+ c = c > 1. ? 1. : c < -1. ? -1. : c;\r
+ theta = acos(c);\r
+\r
+ if( s < 1e-5 )\r
+ {\r
+ double t;\r
+\r
+ if( c > 0 )\r
+ rx = ry = rz = 0;\r
+ else\r
+ {\r
+ t = (R[0] + 1)*0.5;\r
+ rx = sqrt(MAX(t,0.));\r
+ t = (R[4] + 1)*0.5;\r
+ ry = sqrt(MAX(t,0.))*(R[1] < 0 ? -1. : 1.);\r
+ t = (R[8] + 1)*0.5;\r
+ rz = sqrt(MAX(t,0.))*(R[2] < 0 ? -1. : 1.);\r
+ if( fabs(rx) < fabs(ry) && fabs(rx) < fabs(rz) && (R[5] > 0) != (ry*rz > 0) )\r
+ rz = -rz;\r
+ theta /= sqrt(rx*rx + ry*ry + rz*rz);\r
+ rx *= theta;\r
+ ry *= theta;\r
+ rz *= theta;\r
+ }\r
+\r
+ if( jacobian )\r
+ {\r
+ memset( J, 0, sizeof(J) );\r
+ if( c > 0 )\r
+ {\r
+ J[5] = J[15] = J[19] = -0.5;\r
+ J[7] = J[11] = J[21] = 0.5;\r
+ }\r
+ }\r
+ }\r
+ else\r
+ {\r
+ double vth = 1/(2*s);\r
+\r
+ if( jacobian )\r
+ {\r
+ double t, dtheta_dtr = -1./s;\r
+ // var1 = [vth;theta]\r
+ // var = [om1;var1] = [om1;vth;theta]\r
+ double dvth_dtheta = -vth*c/s;\r
+ double d1 = 0.5*dvth_dtheta*dtheta_dtr;\r
+ double d2 = 0.5*dtheta_dtr;\r
+ // dvar1/dR = dvar1/dtheta*dtheta/dR = [dvth/dtheta; 1] * dtheta/dtr * dtr/dR\r
+ double dvardR[5*9] =\r
+ {\r
+ 0, 0, 0, 0, 0, 1, 0, -1, 0,\r
+ 0, 0, -1, 0, 0, 0, 1, 0, 0,\r
+ 0, 1, 0, -1, 0, 0, 0, 0, 0,\r
+ d1, 0, 0, 0, d1, 0, 0, 0, d1,\r
+ d2, 0, 0, 0, d2, 0, 0, 0, d2\r
+ };\r
+ // var2 = [om;theta]\r
+ double dvar2dvar[] =\r
+ {\r
+ vth, 0, 0, rx, 0,\r
+ 0, vth, 0, ry, 0,\r
+ 0, 0, vth, rz, 0,\r
+ 0, 0, 0, 0, 1\r
+ };\r
+ double domegadvar2[] =\r
+ {\r
+ theta, 0, 0, rx*vth,\r
+ 0, theta, 0, ry*vth,\r
+ 0, 0, theta, rz*vth\r
+ };\r
+\r
+ CvMat _dvardR = cvMat( 5, 9, CV_64FC1, dvardR );\r
+ CvMat _dvar2dvar = cvMat( 4, 5, CV_64FC1, dvar2dvar );\r
+ CvMat _domegadvar2 = cvMat( 3, 4, CV_64FC1, domegadvar2 );\r
+ double t0[3*5];\r
+ CvMat _t0 = cvMat( 3, 5, CV_64FC1, t0 );\r
+\r
+ cvMatMul( &_domegadvar2, &_dvar2dvar, &_t0 );\r
+ cvMatMul( &_t0, &_dvardR, &_J );\r
+\r
+ // transpose every row of _J (treat the rows as 3x3 matrices)\r
+ CV_SWAP(J[1], J[3], t); CV_SWAP(J[2], J[6], t); CV_SWAP(J[5], J[7], t);\r
+ CV_SWAP(J[10], J[12], t); CV_SWAP(J[11], J[15], t); CV_SWAP(J[14], J[16], t);\r
+ CV_SWAP(J[19], J[21], t); CV_SWAP(J[20], J[24], t); CV_SWAP(J[23], J[25], t);\r
+ }\r
+\r
+ vth *= theta;\r
+ rx *= vth; ry *= vth; rz *= vth;\r
+ }\r
+\r
+ if( depth == CV_32F )\r
+ {\r
+ dst->data.fl[0] = (float)rx;\r
+ dst->data.fl[step] = (float)ry;\r
+ dst->data.fl[step*2] = (float)rz;\r
+ }\r
+ else\r
+ {\r
+ dst->data.db[0] = rx;\r
+ dst->data.db[step] = ry;\r
+ dst->data.db[step*2] = rz;\r
+ }\r
+ }\r
+\r
+ if( jacobian )\r
+ {\r
+ if( depth == CV_32F )\r
+ {\r
+ if( jacobian->rows == _J.rows )\r
+ cvConvert( &_J, jacobian );\r
+ else\r
+ {\r
+ float Jf[3*9];\r
+ CvMat _Jf = cvMat( _J.rows, _J.cols, CV_32FC1, Jf );\r
+ cvConvert( &_J, &_Jf );\r
+ cvTranspose( &_Jf, jacobian );\r
+ }\r
+ }\r
+ else if( jacobian->rows == _J.rows )\r
+ cvCopy( &_J, jacobian );\r
+ else\r
+ cvTranspose( &_J, jacobian );\r
+ }\r
+\r
+ result = 1;\r
+\r
+ __END__;\r
+\r
+ return result;\r
+}\r
+\r
+\r
+CV_IMPL void\r
+cvProjectPoints2( const CvMat* objectPoints,\r
+ const CvMat* r_vec,\r
+ const CvMat* t_vec,\r
+ const CvMat* A,\r
+ const CvMat* distCoeffs,\r
+ CvMat* imagePoints, CvMat* dpdr,\r
+ CvMat* dpdt, CvMat* dpdf,\r
+ CvMat* dpdc, CvMat* dpdk,\r
+ double aspectRatio )\r
+{\r
+ CvMat *_M = 0, *_m = 0;\r
+ CvMat *_dpdr = 0, *_dpdt = 0, *_dpdc = 0, *_dpdf = 0, *_dpdk = 0;\r
+\r
+ CV_FUNCNAME( "cvProjectPoints2" );\r
+\r
+ __BEGIN__;\r
+\r
+ int i, j, count;\r
+ int calc_derivatives;\r
+ const CvPoint3D64f* M;\r
+ CvPoint2D64f* m;\r
+ double r[3], R[9], dRdr[27], t[3], a[9], k[5] = {0,0,0,0,0}, fx, fy, cx, cy;\r
+ CvMat _r, _t, _a = cvMat( 3, 3, CV_64F, a ), _k;\r
+ CvMat _R = cvMat( 3, 3, CV_64F, R ), _dRdr = cvMat( 3, 9, CV_64F, dRdr );\r
+ double *dpdr_p = 0, *dpdt_p = 0, *dpdk_p = 0, *dpdf_p = 0, *dpdc_p = 0;\r
+ int dpdr_step = 0, dpdt_step = 0, dpdk_step = 0, dpdf_step = 0, dpdc_step = 0;\r
+ bool fixedAspectRatio = aspectRatio > FLT_EPSILON;\r
+\r
+ if( !CV_IS_MAT(objectPoints) || !CV_IS_MAT(r_vec) ||\r
+ !CV_IS_MAT(t_vec) || !CV_IS_MAT(A) ||\r
+ /*!CV_IS_MAT(distCoeffs) ||*/ !CV_IS_MAT(imagePoints) )\r
+ CV_ERROR( CV_StsBadArg, "One of required arguments is not a valid matrix" );\r
+\r
+ count = MAX(objectPoints->rows, objectPoints->cols);\r
+\r
+ if( CV_IS_CONT_MAT(objectPoints->type) && CV_MAT_DEPTH(objectPoints->type) == CV_64F &&\r
+ ((objectPoints->rows == 1 && CV_MAT_CN(objectPoints->type) == 3) ||\r
+ (objectPoints->rows == count && CV_MAT_CN(objectPoints->type)*objectPoints->cols == 3)))\r
+ _M = (CvMat*)objectPoints;\r
+ else\r
+ {\r
+ CV_CALL( _M = cvCreateMat( 1, count, CV_64FC3 ));\r
+ CV_CALL( cvConvertPointsHomogeneous( objectPoints, _M ));\r
+ }\r
+\r
+ if( CV_IS_CONT_MAT(imagePoints->type) && CV_MAT_DEPTH(imagePoints->type) == CV_64F &&\r
+ ((imagePoints->rows == 1 && CV_MAT_CN(imagePoints->type) == 2) ||\r
+ (imagePoints->rows == count && CV_MAT_CN(imagePoints->type)*imagePoints->cols == 2)))\r
+ _m = imagePoints;\r
+ else\r
+ CV_CALL( _m = cvCreateMat( 1, count, CV_64FC2 ));\r
+\r
+ M = (CvPoint3D64f*)_M->data.db;\r
+ m = (CvPoint2D64f*)_m->data.db;\r
+\r
+ if( (CV_MAT_DEPTH(r_vec->type) != CV_64F && CV_MAT_DEPTH(r_vec->type) != CV_32F) ||\r
+ (((r_vec->rows != 1 && r_vec->cols != 1) ||\r
+ r_vec->rows*r_vec->cols*CV_MAT_CN(r_vec->type) != 3) &&\r
+ ((r_vec->rows != 3 && r_vec->cols != 3) || CV_MAT_CN(r_vec->type) != 1)))\r
+ CV_ERROR( CV_StsBadArg, "Rotation must be represented by 1x3 or 3x1 "\r
+ "floating-point rotation vector, or 3x3 rotation matrix" );\r
+\r
+ if( r_vec->rows == 3 && r_vec->cols == 3 )\r
+ {\r
+ _r = cvMat( 3, 1, CV_64FC1, r );\r
+ CV_CALL( cvRodrigues2( r_vec, &_r ));\r
+ CV_CALL( cvRodrigues2( &_r, &_R, &_dRdr ));\r
+ cvCopy( r_vec, &_R );\r
+ }\r
+ else\r
+ {\r
+ _r = cvMat( r_vec->rows, r_vec->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(r_vec->type)), r );\r
+ CV_CALL( cvConvert( r_vec, &_r ));\r
+ CV_CALL( cvRodrigues2( &_r, &_R, &_dRdr ) );\r
+ }\r
+\r
+ if( (CV_MAT_DEPTH(t_vec->type) != CV_64F && CV_MAT_DEPTH(t_vec->type) != CV_32F) ||\r
+ (t_vec->rows != 1 && t_vec->cols != 1) ||\r
+ t_vec->rows*t_vec->cols*CV_MAT_CN(t_vec->type) != 3 )\r
+ CV_ERROR( CV_StsBadArg,\r
+ "Translation vector must be 1x3 or 3x1 floating-point vector" );\r
+\r
+ _t = cvMat( t_vec->rows, t_vec->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(t_vec->type)), t );\r
+ CV_CALL( cvConvert( t_vec, &_t ));\r
+\r
+ if( (CV_MAT_TYPE(A->type) != CV_64FC1 && CV_MAT_TYPE(A->type) != CV_32FC1) ||\r
+ A->rows != 3 || A->cols != 3 )\r
+ CV_ERROR( CV_StsBadArg, "Instrinsic parameters must be 3x3 floating-point matrix" );\r
+\r
+ CV_CALL( cvConvert( A, &_a ));\r
+ fx = a[0]; fy = a[4];\r
+ cx = a[2]; cy = a[5];\r
+\r
+ if( fixedAspectRatio )\r
+ fx = fy*aspectRatio;\r
+\r
+ if( distCoeffs )\r
+ {\r
+ if( !CV_IS_MAT(distCoeffs) ||\r
+ (CV_MAT_DEPTH(distCoeffs->type) != CV_64F &&\r
+ CV_MAT_DEPTH(distCoeffs->type) != CV_32F) ||\r
+ (distCoeffs->rows != 1 && distCoeffs->cols != 1) ||\r
+ (distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 4 &&\r
+ distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 5) )\r
+ CV_ERROR( CV_StsBadArg,\r
+ "Distortion coefficients must be 1x4, 4x1, 1x5 or 5x1 floating-point vector" );\r
+\r
+ _k = cvMat( distCoeffs->rows, distCoeffs->cols,\r
+ CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), k );\r
+ CV_CALL( cvConvert( distCoeffs, &_k ));\r
+ }\r
+\r
+ if( dpdr )\r
+ {\r
+ if( !CV_IS_MAT(dpdr) ||\r
+ (CV_MAT_TYPE(dpdr->type) != CV_32FC1 &&\r
+ CV_MAT_TYPE(dpdr->type) != CV_64FC1) ||\r
+ dpdr->rows != count*2 || dpdr->cols != 3 )\r
+ CV_ERROR( CV_StsBadArg, "dp/drot must be 2Nx3 floating-point matrix" );\r
+\r
+ if( CV_MAT_TYPE(dpdr->type) == CV_64FC1 )\r
+ _dpdr = dpdr;\r
+ else\r
+ CV_CALL( _dpdr = cvCreateMat( 2*count, 3, CV_64FC1 ));\r
+ dpdr_p = _dpdr->data.db;\r
+ dpdr_step = _dpdr->step/sizeof(dpdr_p[0]);\r
+ }\r
+\r
+ if( dpdt )\r
+ {\r
+ if( !CV_IS_MAT(dpdt) ||\r
+ (CV_MAT_TYPE(dpdt->type) != CV_32FC1 &&\r
+ CV_MAT_TYPE(dpdt->type) != CV_64FC1) ||\r
+ dpdt->rows != count*2 || dpdt->cols != 3 )\r
+ CV_ERROR( CV_StsBadArg, "dp/dT must be 2Nx3 floating-point matrix" );\r
+\r
+ if( CV_MAT_TYPE(dpdt->type) == CV_64FC1 )\r
+ _dpdt = dpdt;\r
+ else\r
+ CV_CALL( _dpdt = cvCreateMat( 2*count, 3, CV_64FC1 ));\r
+ dpdt_p = _dpdt->data.db;\r
+ dpdt_step = _dpdt->step/sizeof(dpdt_p[0]);\r
+ }\r
+\r
+ if( dpdf )\r
+ {\r
+ if( !CV_IS_MAT(dpdf) ||\r
+ (CV_MAT_TYPE(dpdf->type) != CV_32FC1 && CV_MAT_TYPE(dpdf->type) != CV_64FC1) ||\r
+ dpdf->rows != count*2 || dpdf->cols != 2 )\r
+ CV_ERROR( CV_StsBadArg, "dp/df must be 2Nx2 floating-point matrix" );\r
+\r
+ if( CV_MAT_TYPE(dpdf->type) == CV_64FC1 )\r
+ _dpdf = dpdf;\r
+ else\r
+ CV_CALL( _dpdf = cvCreateMat( 2*count, 2, CV_64FC1 ));\r
+ dpdf_p = _dpdf->data.db;\r
+ dpdf_step = _dpdf->step/sizeof(dpdf_p[0]);\r
+ }\r
+\r
+ if( dpdc )\r
+ {\r
+ if( !CV_IS_MAT(dpdc) ||\r
+ (CV_MAT_TYPE(dpdc->type) != CV_32FC1 && CV_MAT_TYPE(dpdc->type) != CV_64FC1) ||\r
+ dpdc->rows != count*2 || dpdc->cols != 2 )\r
+ CV_ERROR( CV_StsBadArg, "dp/dc must be 2Nx2 floating-point matrix" );\r
+\r
+ if( CV_MAT_TYPE(dpdc->type) == CV_64FC1 )\r
+ _dpdc = dpdc;\r
+ else\r
+ CV_CALL( _dpdc = cvCreateMat( 2*count, 2, CV_64FC1 ));\r
+ dpdc_p = _dpdc->data.db;\r
+ dpdc_step = _dpdc->step/sizeof(dpdc_p[0]);\r
+ }\r
+\r
+ if( dpdk )\r
+ {\r
+ if( !CV_IS_MAT(dpdk) ||\r
+ (CV_MAT_TYPE(dpdk->type) != CV_32FC1 && CV_MAT_TYPE(dpdk->type) != CV_64FC1) ||\r
+ dpdk->rows != count*2 || (dpdk->cols != 5 && dpdk->cols != 4 && dpdk->cols != 2) )\r
+ CV_ERROR( CV_StsBadArg, "dp/df must be 2Nx5, 2Nx4 or 2Nx2 floating-point matrix" );\r
+\r
+ if( !distCoeffs )\r
+ CV_ERROR( CV_StsNullPtr, "distCoeffs is NULL while dpdk is not" );\r
+\r
+ if( CV_MAT_TYPE(dpdk->type) == CV_64FC1 )\r
+ _dpdk = dpdk;\r
+ else\r
+ CV_CALL( _dpdk = cvCreateMat( dpdk->rows, dpdk->cols, CV_64FC1 ));\r
+ dpdk_p = _dpdk->data.db;\r
+ dpdk_step = _dpdk->step/sizeof(dpdk_p[0]);\r
+ }\r
+\r
+ calc_derivatives = dpdr || dpdt || dpdf || dpdc || dpdk;\r
+\r
+ for( i = 0; i < count; i++ )\r
+ {\r
+ double X = M[i].x, Y = M[i].y, Z = M[i].z;\r
+ double x = R[0]*X + R[1]*Y + R[2]*Z + t[0];\r
+ double y = R[3]*X + R[4]*Y + R[5]*Z + t[1];\r
+ double z = R[6]*X + R[7]*Y + R[8]*Z + t[2];\r
+ double r2, r4, r6, a1, a2, a3, cdist;\r
+ double xd, yd;\r
+\r
+ z = z ? 1./z : 1;\r
+ x *= z; y *= z;\r
+\r
+ r2 = x*x + y*y;\r
+ r4 = r2*r2;\r
+ r6 = r4*r2;\r
+ a1 = 2*x*y;\r
+ a2 = r2 + 2*x*x;\r
+ a3 = r2 + 2*y*y;\r
+ cdist = 1 + k[0]*r2 + k[1]*r4 + k[4]*r6;\r
+ xd = x*cdist + k[2]*a1 + k[3]*a2;\r
+ yd = y*cdist + k[2]*a3 + k[3]*a1;\r
+\r
+ m[i].x = xd*fx + cx;\r
+ m[i].y = yd*fy + cy;\r
+\r
+ if( calc_derivatives )\r
+ {\r
+ if( dpdc_p )\r
+ {\r
+ dpdc_p[0] = 1; dpdc_p[1] = 0;\r
+ dpdc_p[dpdc_step] = 0;\r
+ dpdc_p[dpdc_step+1] = 1;\r
+ dpdc_p += dpdc_step*2;\r
+ }\r
+\r
+ if( dpdf_p )\r
+ {\r
+ if( fixedAspectRatio )\r
+ {\r
+ dpdf_p[0] = 0; dpdf_p[1] = xd*aspectRatio;\r
+ dpdf_p[dpdf_step] = 0;\r
+ dpdf_p[dpdf_step+1] = yd;\r
+ }\r
+ else\r
+ {\r
+ dpdf_p[0] = xd; dpdf_p[1] = 0;\r
+ dpdf_p[dpdf_step] = 0;\r
+ dpdf_p[dpdf_step+1] = yd;\r
+ }\r
+ dpdf_p += dpdf_step*2;\r
+ }\r
+\r
+ if( dpdk_p )\r
+ {\r
+ dpdk_p[0] = fx*x*r2;\r
+ dpdk_p[1] = fx*x*r4;\r
+ dpdk_p[dpdk_step] = fy*y*r2;\r
+ dpdk_p[dpdk_step+1] = fy*y*r4;\r
+ if( _dpdk->cols > 2 )\r
+ {\r
+ dpdk_p[2] = fx*a1;\r
+ dpdk_p[3] = fx*a2;\r
+ dpdk_p[dpdk_step+2] = fy*a3;\r
+ dpdk_p[dpdk_step+3] = fy*a1;\r
+ if( _dpdk->cols > 4 )\r
+ {\r
+ dpdk_p[4] = fx*x*r6;\r
+ dpdk_p[dpdk_step+4] = fy*y*r6;\r
+ }\r
+ }\r
+ dpdk_p += dpdk_step*2;\r
+ }\r
+\r
+ if( dpdt_p )\r
+ {\r
+ double dxdt[] = { z, 0, -x*z }, dydt[] = { 0, z, -y*z };\r
+ for( j = 0; j < 3; j++ )\r
+ {\r
+ double dr2dt = 2*x*dxdt[j] + 2*y*dydt[j];\r
+ double dcdist_dt = k[0]*dr2dt + 2*k[1]*r2*dr2dt + 3*k[4]*r4*dr2dt;\r
+ double da1dt = 2*(x*dydt[j] + y*dxdt[j]);\r
+ double dmxdt = fx*(dxdt[j]*cdist + x*dcdist_dt +\r
+ k[2]*da1dt + k[3]*(dr2dt + 2*x*dxdt[j]));\r
+ double dmydt = fy*(dydt[j]*cdist + y*dcdist_dt +\r
+ k[2]*(dr2dt + 2*y*dydt[j]) + k[3]*da1dt);\r
+ dpdt_p[j] = dmxdt;\r
+ dpdt_p[dpdt_step+j] = dmydt;\r
+ }\r
+ dpdt_p += dpdt_step*2;\r
+ }\r
+\r
+ if( dpdr_p )\r
+ {\r
+ double dx0dr[] =\r
+ {\r
+ X*dRdr[0] + Y*dRdr[1] + Z*dRdr[2],\r
+ X*dRdr[9] + Y*dRdr[10] + Z*dRdr[11],\r
+ X*dRdr[18] + Y*dRdr[19] + Z*dRdr[20]\r
+ };\r
+ double dy0dr[] =\r
+ {\r
+ X*dRdr[3] + Y*dRdr[4] + Z*dRdr[5],\r
+ X*dRdr[12] + Y*dRdr[13] + Z*dRdr[14],\r
+ X*dRdr[21] + Y*dRdr[22] + Z*dRdr[23]\r
+ };\r
+ double dz0dr[] =\r
+ {\r
+ X*dRdr[6] + Y*dRdr[7] + Z*dRdr[8],\r
+ X*dRdr[15] + Y*dRdr[16] + Z*dRdr[17],\r
+ X*dRdr[24] + Y*dRdr[25] + Z*dRdr[26]\r
+ };\r
+ for( j = 0; j < 3; j++ )\r
+ {\r
+ double dxdr = z*(dx0dr[j] - x*dz0dr[j]);\r
+ double dydr = z*(dy0dr[j] - y*dz0dr[j]);\r
+ double dr2dr = 2*x*dxdr + 2*y*dydr;\r
+ double dcdist_dr = k[0]*dr2dr + 2*k[1]*r2*dr2dr + 3*k[4]*r4*dr2dr;\r
+ double da1dr = 2*(x*dydr + y*dxdr);\r
+ double dmxdr = fx*(dxdr*cdist + x*dcdist_dr +\r
+ k[2]*da1dr + k[3]*(dr2dr + 2*x*dxdr));\r
+ double dmydr = fy*(dydr*cdist + y*dcdist_dr +\r
+ k[2]*(dr2dr + 2*y*dydr) + k[3]*da1dr);\r
+ dpdr_p[j] = dmxdr;\r
+ dpdr_p[dpdr_step+j] = dmydr;\r
+ }\r
+ dpdr_p += dpdr_step*2;\r
+ }\r
+ }\r
+ }\r
+\r
+ if( _m != imagePoints )\r
+ cvConvertPointsHomogeneous( _m, imagePoints );\r
+ if( _dpdr != dpdr )\r
+ cvConvert( _dpdr, dpdr );\r
+ if( _dpdt != dpdt )\r
+ cvConvert( _dpdt, dpdt );\r
+ if( _dpdf != dpdf )\r
+ cvConvert( _dpdf, dpdf );\r
+ if( _dpdc != dpdc )\r
+ cvConvert( _dpdc, dpdc );\r
+ if( _dpdk != dpdk )\r
+ cvConvert( _dpdk, dpdk );\r
+\r
+ __END__;\r
+\r
+ if( _M != objectPoints )\r
+ cvReleaseMat( &_M );\r
+ if( _m != imagePoints )\r
+ cvReleaseMat( &_m );\r
+ if( _dpdr != dpdr )\r
+ cvReleaseMat( &_dpdr );\r
+ if( _dpdt != dpdt )\r
+ cvReleaseMat( &_dpdt );\r
+ if( _dpdf != dpdf )\r
+ cvReleaseMat( &_dpdf );\r
+ if( _dpdc != dpdc )\r
+ cvReleaseMat( &_dpdc );\r
+ if( _dpdk != dpdk )\r
+ cvReleaseMat( &_dpdk );\r
+}\r
+\r
+\r
+CV_IMPL void\r
+cvFindExtrinsicCameraParams2( const CvMat* objectPoints,\r
+ const CvMat* imagePoints, const CvMat* A,\r
+ const CvMat* distCoeffs,\r
+ CvMat* rvec, CvMat* tvec,\r
+ int useExtrinsicGuess )\r
+{\r
+ const int max_iter = 20;\r
+ CvMat *_M = 0, *_Mxy = 0, *_m = 0, *_mn = 0, *_L = 0, *_J = 0;\r
+\r
+ CV_FUNCNAME( "cvFindExtrinsicCameraParams2" );\r
+\r
+ __BEGIN__;\r
+\r
+ int i, count;\r
+ double a[9], ar[9]={1,0,0,0,1,0,0,0,1}, R[9];\r
+ double MM[9], U[9], V[9], W[3];\r
+ CvScalar Mc;\r
+ double param[6];\r
+ CvMat _A = cvMat( 3, 3, CV_64F, a );\r
+ CvMat _Ar = cvMat( 3, 3, CV_64F, ar );\r
+ CvMat _R = cvMat( 3, 3, CV_64F, R );\r
+ CvMat _r = cvMat( 3, 1, CV_64F, param );\r
+ CvMat _t = cvMat( 3, 1, CV_64F, param + 3 );\r
+ CvMat _Mc = cvMat( 1, 3, CV_64F, Mc.val );\r
+ CvMat _MM = cvMat( 3, 3, CV_64F, MM );\r
+ CvMat _U = cvMat( 3, 3, CV_64F, U );\r
+ CvMat _V = cvMat( 3, 3, CV_64F, V );\r
+ CvMat _W = cvMat( 3, 1, CV_64F, W );\r
+ CvMat _param = cvMat( 6, 1, CV_64F, param );\r
+ CvMat _dpdr, _dpdt;\r
+\r
+ CV_ASSERT( CV_IS_MAT(objectPoints) && CV_IS_MAT(imagePoints) &&\r
+ CV_IS_MAT(A) && CV_IS_MAT(rvec) && CV_IS_MAT(tvec) );\r
+\r
+ count = MAX(objectPoints->cols, objectPoints->rows);\r
+ CV_CALL( _M = cvCreateMat( 1, count, CV_64FC3 ));\r
+ CV_CALL( _m = cvCreateMat( 1, count, CV_64FC2 ));\r
+\r
+ CV_CALL( cvConvertPointsHomogeneous( objectPoints, _M ));\r
+ CV_CALL( cvConvertPointsHomogeneous( imagePoints, _m ));\r
+ CV_CALL( cvConvert( A, &_A ));\r
+\r
+ CV_ASSERT( (CV_MAT_DEPTH(rvec->type) == CV_64F || CV_MAT_DEPTH(rvec->type) == CV_32F) &&\r
+ (rvec->rows == 1 || rvec->cols == 1) && rvec->rows*rvec->cols*CV_MAT_CN(rvec->type) == 3 );\r
+\r
+ CV_ASSERT( (CV_MAT_DEPTH(tvec->type) == CV_64F || CV_MAT_DEPTH(tvec->type) == CV_32F) &&\r
+ (tvec->rows == 1 || tvec->cols == 1) && tvec->rows*tvec->cols*CV_MAT_CN(tvec->type) == 3 );\r
+\r
+ CV_CALL( _mn = cvCreateMat( 1, count, CV_64FC2 ));\r
+ CV_CALL( _Mxy = cvCreateMat( 1, count, CV_64FC2 ));\r
+\r
+ // normalize image points\r
+ // (unapply the intrinsic matrix transformation and distortion)\r
+ cvUndistortPoints( _m, _mn, &_A, distCoeffs, 0, &_Ar );\r
+\r
+ if( useExtrinsicGuess )\r
+ {\r
+ CvMat _r_temp = cvMat(rvec->rows, rvec->cols,\r
+ CV_MAKETYPE(CV_64F,CV_MAT_CN(rvec->type)), param );\r
+ CvMat _t_temp = cvMat(tvec->rows, tvec->cols,\r
+ CV_MAKETYPE(CV_64F,CV_MAT_CN(tvec->type)), param + 3);\r
+ cvConvert( rvec, &_r_temp );\r
+ cvConvert( tvec, &_t_temp );\r
+ }\r
+ else\r
+ {\r
+ Mc = cvAvg(_M);\r
+ cvReshape( _M, _M, 1, count );\r
+ cvMulTransposed( _M, &_MM, 1, &_Mc );\r
+ cvSVD( &_MM, &_W, 0, &_V, CV_SVD_MODIFY_A + CV_SVD_V_T );\r
+\r
+ // initialize extrinsic parameters\r
+ if( W[2]/W[1] < 1e-3 || count < 4 )\r
+ {\r
+ // a planar structure case (all M's lie in the same plane)\r
+ double tt[3], h[9], h1_norm, h2_norm;\r
+ CvMat* R_transform = &_V;\r
+ CvMat T_transform = cvMat( 3, 1, CV_64F, tt );\r
+ CvMat _H = cvMat( 3, 3, CV_64F, h );\r
+ CvMat _h1, _h2, _h3;\r
+\r
+ if( V[2]*V[2] + V[5]*V[5] < 1e-10 )\r
+ cvSetIdentity( R_transform );\r
+\r
+ if( cvDet(R_transform) < 0 )\r
+ cvScale( R_transform, R_transform, -1 );\r
+\r
+ cvGEMM( R_transform, &_Mc, -1, 0, 0, &T_transform, CV_GEMM_B_T );\r
+\r
+ for( i = 0; i < count; i++ )\r
+ {\r
+ const double* Rp = R_transform->data.db;\r
+ const double* Tp = T_transform.data.db;\r
+ const double* src = _M->data.db + i*3;\r
+ double* dst = _Mxy->data.db + i*2;\r
+\r
+ dst[0] = Rp[0]*src[0] + Rp[1]*src[1] + Rp[2]*src[2] + Tp[0];\r
+ dst[1] = Rp[3]*src[0] + Rp[4]*src[1] + Rp[5]*src[2] + Tp[1];\r
+ }\r
+\r
+ cvFindHomography( _Mxy, _mn, &_H );\r
+\r
+ cvGetCol( &_H, &_h1, 0 );\r
+ _h2 = _h1; _h2.data.db++;\r
+ _h3 = _h2; _h3.data.db++;\r
+ h1_norm = sqrt(h[0]*h[0] + h[3]*h[3] + h[6]*h[6]);\r
+ h2_norm = sqrt(h[1]*h[1] + h[4]*h[4] + h[7]*h[7]);\r
+\r
+ cvScale( &_h1, &_h1, 1./h1_norm );\r
+ cvScale( &_h2, &_h2, 1./h2_norm );\r
+ cvScale( &_h3, &_t, 2./(h1_norm + h2_norm));\r
+ cvCrossProduct( &_h1, &_h2, &_h3 );\r
+\r
+ cvRodrigues2( &_H, &_r );\r
+ cvRodrigues2( &_r, &_H );\r
+ cvMatMulAdd( &_H, &T_transform, &_t, &_t );\r
+ cvMatMul( &_H, R_transform, &_R );\r
+ cvRodrigues2( &_R, &_r );\r
+ }\r
+ else\r
+ {\r
+ // non-planar structure. Use DLT method\r
+ double* L;\r
+ double LL[12*12], LW[12], LV[12*12], sc;\r
+ CvMat _LL = cvMat( 12, 12, CV_64F, LL );\r
+ CvMat _LW = cvMat( 12, 1, CV_64F, LW );\r
+ CvMat _LV = cvMat( 12, 12, CV_64F, LV );\r
+ CvMat _RRt, _RR, _tt;\r
+ CvPoint3D64f* M = (CvPoint3D64f*)_M->data.db;\r
+ CvPoint2D64f* mn = (CvPoint2D64f*)_mn->data.db;\r
+\r
+ CV_CALL( _L = cvCreateMat( 2*count, 12, CV_64F ));\r
+ L = _L->data.db;\r
+\r
+ for( i = 0; i < count; i++, L += 24 )\r
+ {\r
+ double x = -mn[i].x, y = -mn[i].y;\r
+ L[0] = L[16] = M[i].x;\r
+ L[1] = L[17] = M[i].y;\r
+ L[2] = L[18] = M[i].z;\r
+ L[3] = L[19] = 1.;\r
+ L[4] = L[5] = L[6] = L[7] = 0.;\r
+ L[12] = L[13] = L[14] = L[15] = 0.;\r
+ L[8] = x*M[i].x;\r
+ L[9] = x*M[i].y;\r
+ L[10] = x*M[i].z;\r
+ L[11] = x;\r
+ L[20] = y*M[i].x;\r
+ L[21] = y*M[i].y;\r
+ L[22] = y*M[i].z;\r
+ L[23] = y;\r
+ }\r
+\r
+ cvMulTransposed( _L, &_LL, 1 );\r
+ cvSVD( &_LL, &_LW, 0, &_LV, CV_SVD_MODIFY_A + CV_SVD_V_T );\r
+ _RRt = cvMat( 3, 4, CV_64F, LV + 11*12 );\r
+ cvGetCols( &_RRt, &_RR, 0, 3 );\r
+ cvGetCol( &_RRt, &_tt, 3 );\r
+ if( cvDet(&_RR) < 0 )\r
+ cvScale( &_RRt, &_RRt, -1 );\r
+ sc = cvNorm(&_RR);\r
+ cvSVD( &_RR, &_W, &_U, &_V, CV_SVD_MODIFY_A + CV_SVD_U_T + CV_SVD_V_T );\r
+ cvGEMM( &_U, &_V, 1, 0, 0, &_R, CV_GEMM_A_T );\r
+ cvScale( &_tt, &_t, cvNorm(&_R)/sc );\r
+ cvRodrigues2( &_R, &_r );\r
+ cvReleaseMat( &_L );\r
+ }\r
+ }\r
+\r
+ cvReshape( _M, _M, 3, 1 );\r
+ cvReshape( _mn, _mn, 2, 1 );\r
+\r
+ // refine extrinsic parameters using iterative algorithm\r
+ {\r
+ CvLevMarq solver( 6, count*2, cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,max_iter,FLT_EPSILON), true);\r
+ cvCopy( &_param, solver.param );\r
+\r
+ for(;;)\r
+ {\r
+ CvMat *_J = 0, *_err = 0;\r
+ const CvMat *__param = 0;\r
+ bool proceed = solver.update( __param, _J, _err );\r
+ cvCopy( __param, &_param );\r
+ if( !proceed || !_err )\r
+ break;\r
+ cvReshape( _err, _err, 2, 1 );\r
+ if( _J )\r
+ {\r
+ cvGetCols( _J, &_dpdr, 0, 3 );\r
+ cvGetCols( _J, &_dpdt, 3, 6 );\r
+ cvProjectPoints2( _M, &_r, &_t, &_A, distCoeffs,\r
+ _err, &_dpdr, &_dpdt, 0, 0, 0 );\r
+ }\r
+ else\r
+ {\r
+ cvProjectPoints2( _M, &_r, &_t, &_A, distCoeffs,\r
+ _err, 0, 0, 0, 0, 0 );\r
+ }\r
+ cvSub(_err, _m, _err);\r
+ cvReshape( _err, _err, 1, 2*count );\r
+ }\r
+ cvCopy( solver.param, &_param );\r
+ }\r
+\r
+ _r = cvMat( rvec->rows, rvec->cols,\r
+ CV_MAKETYPE(CV_64F,CV_MAT_CN(rvec->type)), param );\r
+ _t = cvMat( tvec->rows, tvec->cols,\r
+ CV_MAKETYPE(CV_64F,CV_MAT_CN(tvec->type)), param + 3 );\r
+\r
+ cvConvert( &_r, rvec );\r
+ cvConvert( &_t, tvec );\r
+\r
+ __END__;\r
+\r
+ cvReleaseMat( &_M );\r
+ cvReleaseMat( &_Mxy );\r
+ cvReleaseMat( &_m );\r
+ cvReleaseMat( &_mn );\r
+ cvReleaseMat( &_L );\r
+ cvReleaseMat( &_J );\r
+}\r
+\r
+\r
+CV_IMPL void\r
+cvInitIntrinsicParams2D( const CvMat* objectPoints,\r
+ const CvMat* imagePoints,\r
+ const CvMat* npoints,\r
+ CvSize imageSize,\r
+ CvMat* cameraMatrix,\r
+ double aspectRatio )\r
+{\r
+ CvMat *_A = 0, *_b = 0, *_allH = 0, *_allK = 0;\r
+\r
+ CV_FUNCNAME( "cvInitIntrinsicParams2D" );\r
+\r
+ __BEGIN__;\r
+\r
+ int i, j, pos, nimages, total, ni = 0;\r
+ double a[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 1 };\r
+ double H[9], f[2];\r
+ CvMat _a = cvMat( 3, 3, CV_64F, a );\r
+ CvMat _H = cvMat( 3, 3, CV_64F, H );\r
+ CvMat _f = cvMat( 2, 1, CV_64F, f );\r
+\r
+ assert( CV_MAT_TYPE(npoints->type) == CV_32SC1 &&\r
+ CV_IS_MAT_CONT(npoints->type) );\r
+ nimages = npoints->rows + npoints->cols - 1;\r
+\r
+ if( (CV_MAT_TYPE(objectPoints->type) != CV_32FC3 &&\r
+ CV_MAT_TYPE(objectPoints->type) != CV_64FC3) ||\r
+ (CV_MAT_TYPE(imagePoints->type) != CV_32FC2 &&\r
+ CV_MAT_TYPE(imagePoints->type) != CV_64FC2) )\r
+ CV_ERROR( CV_StsUnsupportedFormat, "Both object points and image points must be 2D" );\r
+\r
+ if( objectPoints->rows != 1 || imagePoints->rows != 1 )\r
+ CV_ERROR( CV_StsBadSize, "object points and image points must be a single-row matrices" );\r
+\r
+ _A = cvCreateMat( 2*nimages, 2, CV_64F );\r
+ _b = cvCreateMat( 2*nimages, 1, CV_64F );\r
+ a[2] = (imageSize.width - 1)*0.5;\r
+ a[5] = (imageSize.height - 1)*0.5;\r
+ _allH = cvCreateMat( nimages, 9, CV_64F );\r
+\r
+ total = cvRound(cvSum(npoints).val[0]);\r
+\r
+ // extract vanishing points in order to obtain initial value for the focal length\r
+ for( i = 0, pos = 0; i < nimages; i++, pos += ni )\r
+ {\r
+ double* Ap = _A->data.db + i*4;\r
+ double* bp = _b->data.db + i*2;\r
+ ni = npoints->data.i[i];\r
+ double h[3], v[3], d1[3], d2[3];\r
+ double n[4] = {0,0,0,0};\r
+ CvMat _m, _M;\r
+ cvGetCols( objectPoints, &_M, pos, pos + ni );\r
+ cvGetCols( imagePoints, &_m, pos, pos + ni );\r
+\r
+ cvFindHomography( &_M, &_m, &_H );\r
+ memcpy( _allH->data.db + i*9, H, sizeof(H) );\r
+\r
+ H[0] -= H[6]*a[2]; H[1] -= H[7]*a[2]; H[2] -= H[8]*a[2];\r
+ H[3] -= H[6]*a[5]; H[4] -= H[7]*a[5]; H[5] -= H[8]*a[5];\r
+\r
+ for( j = 0; j < 3; j++ )\r
+ {\r
+ double t0 = H[j*3], t1 = H[j*3+1];\r
+ h[j] = t0; v[j] = t1;\r
+ d1[j] = (t0 + t1)*0.5;\r
+ d2[j] = (t0 - t1)*0.5;\r
+ n[0] += t0*t0; n[1] += t1*t1;\r
+ n[2] += d1[j]*d1[j]; n[3] += d2[j]*d2[j];\r
+ }\r
+\r
+ for( j = 0; j < 4; j++ )\r
+ n[j] = 1./sqrt(n[j]);\r
+\r
+ for( j = 0; j < 3; j++ )\r
+ {\r
+ h[j] *= n[0]; v[j] *= n[1];\r
+ d1[j] *= n[2]; d2[j] *= n[3];\r
+ }\r
+\r
+ Ap[0] = h[0]*v[0]; Ap[1] = h[1]*v[1];\r
+ Ap[2] = d1[0]*d2[0]; Ap[3] = d1[1]*d2[1];\r
+ bp[0] = -h[2]*v[2]; bp[1] = -d1[2]*d2[2];\r
+ }\r
+\r
+ cvSolve( _A, _b, &_f, CV_NORMAL + CV_SVD );\r
+ a[0] = sqrt(fabs(1./f[0]));\r
+ a[4] = sqrt(fabs(1./f[1]));\r
+ if( aspectRatio != 0 )\r
+ {\r
+ double tf = (a[0] + a[4])/(aspectRatio + 1.);\r
+ a[0] = aspectRatio*tf;\r
+ a[4] = tf;\r
+ }\r
+\r
+ cvConvert( &_a, cameraMatrix );\r
+\r
+ __END__;\r
+\r
+ cvReleaseMat( &_A );\r
+ cvReleaseMat( &_b );\r
+ cvReleaseMat( &_allH );\r
+ cvReleaseMat( &_allK );\r
+}\r
+\r
+\r
+/* finds intrinsic and extrinsic camera parameters\r
+ from a few views of known calibration pattern */\r
+CV_IMPL void\r
+cvCalibrateCamera2( const CvMat* objectPoints,\r
+ const CvMat* imagePoints,\r
+ const CvMat* npoints,\r
+ CvSize imageSize,\r
+ CvMat* cameraMatrix, CvMat* distCoeffs,\r
+ CvMat* rvecs, CvMat* tvecs,\r
+ int flags )\r
+{\r
+ const int NINTRINSIC = 9;\r
+ CvMat *_M = 0, *_m = 0, *_Ji = 0, *_Je = 0, *_err = 0;\r
+ CvLevMarq solver;\r
+\r
+ CV_FUNCNAME( "cvCalibrateCamera2" );\r
+\r
+ __BEGIN__;\r
+\r
+ double A[9], k[5] = {0,0,0,0,0};\r
+ CvMat _A = cvMat(3, 3, CV_64F, A), _k;\r
+ int i, nimages, maxPoints = 0, ni = 0, pos, total = 0, nparams, npstep, cn;\r
+ double aspectRatio = 0.;\r
+\r
+ // 0. check the parameters & allocate buffers\r
+ if( !CV_IS_MAT(objectPoints) || !CV_IS_MAT(imagePoints) ||\r
+ !CV_IS_MAT(npoints) || !CV_IS_MAT(cameraMatrix) || !CV_IS_MAT(distCoeffs) )\r
+ CV_ERROR( CV_StsBadArg, "One of required vector arguments is not a valid matrix" );\r
+\r
+ if( imageSize.width <= 0 || imageSize.height <= 0 )\r
+ CV_ERROR( CV_StsOutOfRange, "image width and height must be positive" );\r
+\r
+ if( CV_MAT_TYPE(npoints->type) != CV_32SC1 ||\r
+ (npoints->rows != 1 && npoints->cols != 1) )\r
+ CV_ERROR( CV_StsUnsupportedFormat,\r
+ "the array of point counters must be 1-dimensional integer vector" );\r
+\r
+ nimages = npoints->rows*npoints->cols;\r
+ npstep = npoints->rows == 1 ? 1 : npoints->step/CV_ELEM_SIZE(npoints->type);\r
+\r
+ if( rvecs )\r
+ {\r
+ cn = CV_MAT_CN(rvecs->type);\r
+ if( !CV_IS_MAT(rvecs) ||\r
+ (CV_MAT_DEPTH(rvecs->type) != CV_32F && CV_MAT_DEPTH(rvecs->type) != CV_64F) ||\r
+ ((rvecs->rows != nimages || (rvecs->cols*cn != 3 && rvecs->cols*cn != 9)) &&\r
+ (rvecs->rows != 1 || rvecs->cols != nimages || cn != 3)) )\r
+ CV_ERROR( CV_StsBadArg, "the output array of rotation vectors must be 3-channel "\r
+ "1xn or nx1 array or 1-channel nx3 or nx9 array, where n is the number of views" );\r
+ }\r
+\r
+ if( tvecs )\r
+ {\r
+ cn = CV_MAT_CN(tvecs->type);\r
+ if( !CV_IS_MAT(tvecs) ||\r
+ (CV_MAT_DEPTH(tvecs->type) != CV_32F && CV_MAT_DEPTH(tvecs->type) != CV_64F) ||\r
+ ((tvecs->rows != nimages || tvecs->cols*cn != 3) &&\r
+ (tvecs->rows != 1 || tvecs->cols != nimages || cn != 3)) )\r
+ CV_ERROR( CV_StsBadArg, "the output array of translation vectors must be 3-channel "\r
+ "1xn or nx1 array or 1-channel nx3 array, where n is the number of views" );\r
+ }\r
+\r
+ if( (CV_MAT_TYPE(cameraMatrix->type) != CV_32FC1 &&\r
+ CV_MAT_TYPE(cameraMatrix->type) != CV_64FC1) ||\r
+ cameraMatrix->rows != 3 || cameraMatrix->cols != 3 )\r
+ CV_ERROR( CV_StsBadArg,\r
+ "Intrinsic parameters must be 3x3 floating-point matrix" );\r
+\r
+ if( (CV_MAT_TYPE(distCoeffs->type) != CV_32FC1 &&\r
+ CV_MAT_TYPE(distCoeffs->type) != CV_64FC1) ||\r
+ (distCoeffs->cols != 1 && distCoeffs->rows != 1) ||\r
+ (distCoeffs->cols*distCoeffs->rows != 4 &&\r
+ distCoeffs->cols*distCoeffs->rows != 5) )\r
+ CV_ERROR( CV_StsBadArg,\r
+ "Distortion coefficients must be 4x1, 1x4, 5x1 or 1x5 floating-point matrix" );\r
+\r
+ for( i = 0; i < nimages; i++ )\r
+ {\r
+ ni = npoints->data.i[i*npstep];\r
+ if( ni < 4 )\r
+ {\r
+ char buf[100];\r
+ sprintf( buf, "The number of points in the view #%d is < 4", i );\r
+ CV_ERROR( CV_StsOutOfRange, buf );\r
+ }\r
+ maxPoints = MAX( maxPoints, ni );\r
+ total += ni;\r
+ }\r
+\r
+ CV_CALL( _M = cvCreateMat( 1, total, CV_64FC3 ));\r
+ CV_CALL( _m = cvCreateMat( 1, total, CV_64FC2 ));\r
+\r
+ CV_CALL( cvConvertPointsHomogeneous( objectPoints, _M ));\r
+ CV_CALL( cvConvertPointsHomogeneous( imagePoints, _m ));\r
+\r
+ nparams = NINTRINSIC + nimages*6;\r
+ CV_CALL( _Ji = cvCreateMat( maxPoints*2, NINTRINSIC, CV_64FC1 ));\r
+ CV_CALL( _Je = cvCreateMat( maxPoints*2, 6, CV_64FC1 ));\r
+ CV_CALL( _err = cvCreateMat( maxPoints*2, 1, CV_64FC1 ));\r
+ cvZero( _Ji );\r
+\r
+ _k = cvMat( distCoeffs->rows, distCoeffs->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), k);\r
+ if( distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) == 4 )\r
+ flags |= CV_CALIB_FIX_K3;\r
+\r
+ // 1. initialize intrinsic parameters & LM solver\r
+ if( flags & CV_CALIB_USE_INTRINSIC_GUESS )\r
+ {\r
+ cvConvert( cameraMatrix, &_A );\r
+ if( A[0] <= 0 || A[4] <= 0 )\r
+ CV_ERROR( CV_StsOutOfRange, "Focal length (fx and fy) must be positive" );\r
+ if( A[2] < 0 || A[2] >= imageSize.width ||\r
+ A[5] < 0 || A[5] >= imageSize.height )\r
+ CV_ERROR( CV_StsOutOfRange, "Principal point must be within the image" );\r
+ if( fabs(A[1]) > 1e-5 )\r
+ CV_ERROR( CV_StsOutOfRange, "Non-zero skew is not supported by the function" );\r
+ if( fabs(A[3]) > 1e-5 || fabs(A[6]) > 1e-5 ||\r
+ fabs(A[7]) > 1e-5 || fabs(A[8]-1) > 1e-5 )\r
+ CV_ERROR( CV_StsOutOfRange,\r
+ "The intrinsic matrix must have [fx 0 cx; 0 fy cy; 0 0 1] shape" );\r
+ A[1] = A[3] = A[6] = A[7] = 0.;\r
+ A[8] = 1.;\r
+\r
+ if( flags & CV_CALIB_FIX_ASPECT_RATIO )\r
+ aspectRatio = A[0]/A[4];\r
+ cvConvert( distCoeffs, &_k );\r
+ }\r
+ else\r
+ {\r
+ CvScalar mean, sdv;\r
+ cvAvgSdv( _M, &mean, &sdv );\r
+ if( fabs(mean.val[2]) > 1e-5 || fabs(sdv.val[2]) > 1e-5 )\r
+ CV_ERROR( CV_StsBadArg,\r
+ "For non-planar calibration rigs the initial intrinsic matrix must be specified" );\r
+ for( i = 0; i < total; i++ )\r
+ ((CvPoint3D64f*)_M->data.db)[i].z = 0.;\r
+\r
+ if( flags & CV_CALIB_FIX_ASPECT_RATIO )\r
+ {\r
+ aspectRatio = cvmGet(cameraMatrix,0,0);\r
+ aspectRatio /= cvmGet(cameraMatrix,1,1);\r
+ if( aspectRatio < 0.01 || aspectRatio > 100 )\r
+ CV_ERROR( CV_StsOutOfRange,\r
+ "The specified aspect ratio (=A[0][0]/A[1][1]) is incorrect" );\r
+ }\r
+ cvInitIntrinsicParams2D( _M, _m, npoints, imageSize, &_A, aspectRatio );\r
+ }\r
+\r
+ solver.init( nparams, 0, cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON) );\r
+\r
+ {\r
+ double* param = solver.param->data.db;\r
+ uchar* mask = solver.mask->data.ptr;\r
+\r
+ param[0] = A[0]; param[1] = A[4]; param[2] = A[2]; param[3] = A[5];\r
+ param[4] = k[0]; param[5] = k[1]; param[6] = k[2]; param[7] = k[3];\r
+ param[8] = k[4];\r
+\r
+ if( flags & CV_CALIB_FIX_FOCAL_LENGTH )\r
+ mask[0] = mask[1] = 0;\r
+ if( flags & CV_CALIB_FIX_PRINCIPAL_POINT )\r
+ mask[2] = mask[3] = 0;\r
+ if( flags & CV_CALIB_ZERO_TANGENT_DIST )\r
+ {\r
+ param[6] = param[7] = 0;\r
+ mask[6] = mask[7] = 0;\r
+ }\r
+ if( flags & CV_CALIB_FIX_K1 )\r
+ mask[4] = 0;\r
+ if( flags & CV_CALIB_FIX_K2 )\r
+ mask[5] = 0;\r
+ if( flags & CV_CALIB_FIX_K3 )\r
+ mask[8] = 0;\r
+ }\r
+\r
+ // 2. initialize extrinsic parameters\r
+ for( i = 0, pos = 0; i < nimages; i++, pos += ni )\r
+ {\r
+ CvMat _Mi, _mi, _ri, _ti;\r
+ ni = npoints->data.i[i*npstep];\r
+\r
+ cvGetRows( solver.param, &_ri, NINTRINSIC + i*6, NINTRINSIC + i*6 + 3 );\r
+ cvGetRows( solver.param, &_ti, NINTRINSIC + i*6 + 3, NINTRINSIC + i*6 + 6 );\r
+\r
+ cvGetCols( _M, &_Mi, pos, pos + ni );\r
+ cvGetCols( _m, &_mi, pos, pos + ni );\r
+\r
+ cvFindExtrinsicCameraParams2( &_Mi, &_mi, &_A, &_k, &_ri, &_ti );\r
+ }\r
+\r
+ // 3. run the optimization\r
+ for(;;)\r
+ {\r
+ const CvMat* _param = 0;\r
+ CvMat *_JtJ = 0, *_JtErr = 0;\r
+ double* _errNorm = 0;\r
+ bool proceed = solver.updateAlt( _param, _JtJ, _JtErr, _errNorm );\r
+ double *param = solver.param->data.db, *pparam = solver.prevParam->data.db;\r
+\r
+ if( flags & CV_CALIB_FIX_ASPECT_RATIO )\r
+ {\r
+ param[0] = param[1]*aspectRatio;\r
+ pparam[0] = pparam[1]*aspectRatio;\r
+ }\r
+\r
+ A[0] = param[0]; A[4] = param[1];\r
+ A[2] = param[2]; A[5] = param[3];\r
+ k[0] = param[4]; k[1] = param[5]; k[2] = param[6];\r
+ k[3] = param[7];\r
+ k[4] = param[8];\r
+\r
+ if( !proceed )\r
+ break;\r
+\r
+ for( i = 0, pos = 0; i < nimages; i++, pos += ni )\r
+ {\r
+ CvMat _Mi, _mi, _ri, _ti, _dpdr, _dpdt, _dpdf, _dpdc, _dpdk, _mp, _part;\r
+ ni = npoints->data.i[i*npstep];\r
+\r
+ cvGetRows( solver.param, &_ri, NINTRINSIC + i*6, NINTRINSIC + i*6 + 3 );\r
+ cvGetRows( solver.param, &_ti, NINTRINSIC + i*6 + 3, NINTRINSIC + i*6 + 6 );\r
+\r
+ cvGetCols( _M, &_Mi, pos, pos + ni );\r
+ cvGetCols( _m, &_mi, pos, pos + ni );\r
+\r
+ _Je->rows = _Ji->rows = _err->rows = ni*2;\r
+ cvGetCols( _Je, &_dpdr, 0, 3 );\r
+ cvGetCols( _Je, &_dpdt, 3, 6 );\r
+ cvGetCols( _Ji, &_dpdf, 0, 2 );\r
+ cvGetCols( _Ji, &_dpdc, 2, 4 );\r
+ cvGetCols( _Ji, &_dpdk, 4, NINTRINSIC );\r
+ cvReshape( _err, &_mp, 2, 1 );\r
+\r
+ if( _JtJ || _JtErr )\r
+ {\r
+ cvProjectPoints2( &_Mi, &_ri, &_ti, &_A, &_k, &_mp, &_dpdr, &_dpdt,\r
+ (flags & CV_CALIB_FIX_FOCAL_LENGTH) ? 0 : &_dpdf,\r
+ (flags & CV_CALIB_FIX_PRINCIPAL_POINT) ? 0 : &_dpdc, &_dpdk,\r
+ (flags & CV_CALIB_FIX_ASPECT_RATIO) ? aspectRatio : 0);\r
+ }\r
+ else\r
+ cvProjectPoints2( &_Mi, &_ri, &_ti, &_A, &_k, &_mp );\r
+\r
+ cvSub( &_mp, &_mi, &_mp );\r
+\r
+ if( _JtJ || _JtErr )\r
+ {\r
+ cvGetSubRect( _JtJ, &_part, cvRect(0,0,NINTRINSIC,NINTRINSIC) );\r
+ cvGEMM( _Ji, _Ji, 1, &_part, 1, &_part, CV_GEMM_A_T );\r
+\r
+ cvGetSubRect( _JtJ, &_part, cvRect(NINTRINSIC+i*6,NINTRINSIC+i*6,6,6) );\r
+ cvGEMM( _Je, _Je, 1, 0, 0, &_part, CV_GEMM_A_T );\r
+\r
+ cvGetSubRect( _JtJ, &_part, cvRect(NINTRINSIC+i*6,0,6,NINTRINSIC) );\r
+ cvGEMM( _Ji, _Je, 1, 0, 0, &_part, CV_GEMM_A_T );\r
+\r
+ cvGetRows( _JtErr, &_part, 0, NINTRINSIC );\r
+ cvGEMM( _Ji, _err, 1, &_part, 1, &_part, CV_GEMM_A_T );\r
+\r
+ cvGetRows( _JtErr, &_part, NINTRINSIC + i*6, NINTRINSIC + (i+1)*6 );\r
+ cvGEMM( _Je, _err, 1, 0, 0, &_part, CV_GEMM_A_T );\r
+ }\r
+\r
+ if( _errNorm )\r
+ {\r
+ double errNorm = cvNorm( &_mp, 0, CV_L2 );\r
+ *_errNorm += errNorm*errNorm;\r
+ }\r
+ }\r
+ }\r
+\r
+ // 4. store the results\r
+ cvConvert( &_A, cameraMatrix );\r
+ cvConvert( &_k, distCoeffs );\r
+\r
+ for( i = 0; i < nimages; i++ )\r
+ {\r
+ CvMat src, dst;\r
+ if( rvecs )\r
+ {\r
+ src = cvMat( 3, 1, CV_64F, solver.param->data.db + NINTRINSIC + i*6 );\r
+ if( rvecs->rows == nimages && rvecs->cols*CV_MAT_CN(rvecs->type) == 9 )\r
+ {\r
+ dst = cvMat( 3, 3, CV_MAT_DEPTH(rvecs->type),\r
+ rvecs->data.ptr + rvecs->step*i );\r
+ cvRodrigues2( &src, &_A );\r
+ cvConvert( &_A, &dst );\r
+ }\r
+ else\r
+ {\r
+ dst = cvMat( 3, 1, CV_MAT_DEPTH(rvecs->type), rvecs->rows == 1 ?\r
+ rvecs->data.ptr + i*CV_ELEM_SIZE(rvecs->type) :\r
+ rvecs->data.ptr + rvecs->step*i );\r
+ cvConvert( &src, &dst );\r
+ }\r
+ }\r
+ if( tvecs )\r
+ {\r
+ src = cvMat( 3, 1, CV_64F, solver.param->data.db + NINTRINSIC + i*6 + 3 );\r
+ dst = cvMat( 3, 1, CV_MAT_TYPE(tvecs->type), tvecs->rows == 1 ?\r
+ tvecs->data.ptr + i*CV_ELEM_SIZE(tvecs->type) :\r
+ tvecs->data.ptr + tvecs->step*i );\r
+ cvConvert( &src, &dst );\r
+ }\r
+ }\r
+\r
+ __END__;\r
+\r
+ cvReleaseMat( &_M );\r
+ cvReleaseMat( &_m );\r
+ cvReleaseMat( &_Ji );\r
+ cvReleaseMat( &_Je );\r
+ cvReleaseMat( &_err );\r
+}\r
+\r
+\r
+void cvCalibrationMatrixValues( const CvMat *calibMatr, CvSize imgSize,\r
+ double apertureWidth, double apertureHeight, double *fovx, double *fovy,\r
+ double *focalLength, CvPoint2D64f *principalPoint, double *pasp )\r
+{\r
+ double alphax, alphay, mx, my;\r
+ int imgWidth = imgSize.width, imgHeight = imgSize.height;\r
+\r
+ CV_FUNCNAME("cvCalibrationMatrixValues");\r
+ __BEGIN__;\r
+\r
+ /* Validate parameters. */\r
+\r
+ if(calibMatr == 0)\r
+ CV_ERROR(CV_StsNullPtr, "Some of parameters is a NULL pointer!");\r
+\r
+ if(!CV_IS_MAT(calibMatr))\r
+ CV_ERROR(CV_StsUnsupportedFormat, "Input parameters must be a matrices!");\r
+\r
+ if(calibMatr->cols != 3 || calibMatr->rows != 3)\r
+ CV_ERROR(CV_StsUnmatchedSizes, "Size of matrices must be 3x3!");\r
+\r
+ alphax = cvmGet(calibMatr, 0, 0);\r
+ alphay = cvmGet(calibMatr, 1, 1);\r
+ assert(imgWidth != 0 && imgHeight != 0 && alphax != 0.0 && alphay != 0.0);\r
+\r
+ /* Calculate pixel aspect ratio. */\r
+ if(pasp)\r
+ *pasp = alphay / alphax;\r
+\r
+ /* Calculate number of pixel per realworld unit. */\r
+\r
+ if(apertureWidth != 0.0 && apertureHeight != 0.0) {\r
+ mx = imgWidth / apertureWidth;\r
+ my = imgHeight / apertureHeight;\r
+ } else {\r
+ mx = 1.0;\r
+ my = *pasp;\r
+ }\r
+\r
+ /* Calculate fovx and fovy. */\r
+\r
+ if(fovx)\r
+ *fovx = 2 * atan(imgWidth / (2 * alphax)) * 180.0 / CV_PI;\r
+\r
+ if(fovy)\r
+ *fovy = 2 * atan(imgHeight / (2 * alphay)) * 180.0 / CV_PI;\r
+\r
+ /* Calculate focal length. */\r
+\r
+ if(focalLength)\r
+ *focalLength = alphax / mx;\r
+\r
+ /* Calculate principle point. */\r
+\r
+ if(principalPoint)\r
+ *principalPoint = cvPoint2D64f(cvmGet(calibMatr, 0, 2) / mx, cvmGet(calibMatr, 1, 2) / my);\r
+\r
+ __END__;\r
+}\r
+\r
+\r
+//////////////////////////////// Stereo Calibration ///////////////////////////////////\r
+\r
+static int dbCmp( const void* _a, const void* _b )\r
+{\r
+ double a = *(const double*)_a;\r
+ double b = *(const double*)_b;\r
+\r
+ return (a > b) - (a < b);\r
+}\r
+\r
+\r
+void cvStereoCalibrate( const CvMat* _objectPoints, const CvMat* _imagePoints1,\r
+ const CvMat* _imagePoints2, const CvMat* _npoints,\r
+ CvMat* _cameraMatrix1, CvMat* _distCoeffs1,\r
+ CvMat* _cameraMatrix2, CvMat* _distCoeffs2,\r
+ CvSize imageSize, CvMat* _R, CvMat* _T,\r
+ CvMat* _E, CvMat* _F,\r
+ CvTermCriteria termCrit, int flags )\r
+{\r
+ const int NINTRINSIC = 9;\r
+ CvMat* npoints = 0;\r
+ CvMat* err = 0;\r
+ CvMat* J_LR = 0;\r
+ CvMat* Je = 0;\r
+ CvMat* Ji = 0;\r
+ CvMat* imagePoints[2] = {0,0};\r
+ CvMat* objectPoints = 0;\r
+ CvMat* RT0 = 0;\r
+ CvLevMarq solver;\r
+\r
+ CV_FUNCNAME( "cvStereoCalibrate" );\r
+\r
+ __BEGIN__;\r
+\r
+ double A[2][9], dk[2][5]={{0,0,0,0,0},{0,0,0,0,0}}, rlr[9];\r
+ CvMat K[2], Dist[2], om_LR, T_LR;\r
+ CvMat R_LR = cvMat(3, 3, CV_64F, rlr);\r
+ int i, k, p, ni = 0, ofs, nimages, pointsTotal, maxPoints = 0;\r
+ int nparams;\r
+ bool recomputeIntrinsics = false;\r
+ double aspectRatio[2] = {0,0};\r
+\r
+ CV_ASSERT( CV_IS_MAT(_imagePoints1) && CV_IS_MAT(_imagePoints2) &&\r
+ CV_IS_MAT(_objectPoints) && CV_IS_MAT(_npoints) &&\r
+ CV_IS_MAT(_R) && CV_IS_MAT(_T) );\r
+\r
+ CV_ASSERT( CV_ARE_TYPES_EQ(_imagePoints1, _imagePoints2) &&\r
+ CV_ARE_DEPTHS_EQ(_imagePoints1, _objectPoints) );\r
+\r
+ CV_ASSERT( (_npoints->cols == 1 || _npoints->rows == 1) &&\r
+ CV_MAT_TYPE(_npoints->type) == CV_32SC1 );\r
+\r
+ nimages = _npoints->cols + _npoints->rows - 1;\r
+ npoints = cvCreateMat( _npoints->rows, _npoints->cols, _npoints->type );\r
+ cvCopy( _npoints, npoints );\r
+\r
+ for( i = 0, pointsTotal = 0; i < nimages; i++ )\r
+ {\r
+ maxPoints = MAX(maxPoints, npoints->data.i[i]);\r
+ pointsTotal += npoints->data.i[i];\r
+ }\r
+\r
+ objectPoints = cvCreateMat( _objectPoints->rows, _objectPoints->cols,\r
+ CV_64FC(CV_MAT_CN(_objectPoints->type)));\r
+ cvConvert( _objectPoints, objectPoints );\r
+ cvReshape( objectPoints, objectPoints, 3, 1 );\r
+\r
+ for( k = 0; k < 2; k++ )\r
+ {\r
+ const CvMat* points = k == 0 ? _imagePoints1 : _imagePoints2;\r
+ const CvMat* cameraMatrix = k == 0 ? _cameraMatrix1 : _cameraMatrix2;\r
+ const CvMat* distCoeffs = k == 0 ? _distCoeffs1 : _distCoeffs2;\r
+\r
+ int cn = CV_MAT_CN(_imagePoints1->type);\r
+ CV_ASSERT( (CV_MAT_DEPTH(_imagePoints1->type) == CV_32F ||\r
+ CV_MAT_DEPTH(_imagePoints1->type) == CV_64F) &&\r
+ ((_imagePoints1->rows == pointsTotal && _imagePoints1->cols*cn == 2) ||\r
+ (_imagePoints1->rows == 1 && _imagePoints1->cols == pointsTotal && cn == 2)) );\r
+\r
+ K[k] = cvMat(3,3,CV_64F,A[k]);\r
+ Dist[k] = cvMat(1,5,CV_64F,dk[k]);\r
+\r
+ imagePoints[k] = cvCreateMat( points->rows, points->cols, CV_64FC(CV_MAT_CN(points->type)));\r
+ cvConvert( points, imagePoints[k] );\r
+ cvReshape( imagePoints[k], imagePoints[k], 2, 1 );\r
+\r
+ if( flags & (CV_CALIB_FIX_INTRINSIC|CV_CALIB_USE_INTRINSIC_GUESS|\r
+ CV_CALIB_FIX_ASPECT_RATIO|CV_CALIB_FIX_FOCAL_LENGTH) )\r
+ cvConvert( cameraMatrix, &K[k] );\r
+\r
+ if( flags & (CV_CALIB_FIX_INTRINSIC|CV_CALIB_USE_INTRINSIC_GUESS|\r
+ CV_CALIB_FIX_K1|CV_CALIB_FIX_K2|CV_CALIB_FIX_K3) )\r
+ {\r
+ CvMat tdist = cvMat( distCoeffs->rows, distCoeffs->cols,\r
+ CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), Dist[k].data.db );\r
+ cvConvert( distCoeffs, &tdist );\r
+ }\r
+\r
+ if( !(flags & (CV_CALIB_FIX_INTRINSIC|CV_CALIB_USE_INTRINSIC_GUESS)))\r
+ {\r
+ cvCalibrateCamera2( objectPoints, imagePoints[k],\r
+ npoints, imageSize, &K[k], &Dist[k], 0, 0, flags );\r
+ }\r
+ }\r
+\r
+ if( flags & CV_CALIB_SAME_FOCAL_LENGTH )\r
+ {\r
+ static const int avg_idx[] = { 0, 4, 2, 5, -1 };\r
+ for( k = 0; avg_idx[k] >= 0; k++ )\r
+ A[0][avg_idx[k]] = A[1][avg_idx[k]] = (A[0][avg_idx[k]] + A[1][avg_idx[k]])*0.5;\r
+ }\r
+\r
+ if( flags & CV_CALIB_FIX_ASPECT_RATIO )\r
+ {\r
+ for( k = 0; k < 2; k++ )\r
+ aspectRatio[k] = A[k][0]/A[k][4];\r
+ }\r
+\r
+ recomputeIntrinsics = (flags & CV_CALIB_FIX_INTRINSIC) == 0;\r
+\r
+ err = cvCreateMat( maxPoints*2, 1, CV_64F );\r
+ Je = cvCreateMat( maxPoints*2, 6, CV_64F );\r
+ J_LR = cvCreateMat( maxPoints*2, 6, CV_64F );\r
+ Ji = cvCreateMat( maxPoints*2, NINTRINSIC, CV_64F );\r
+ cvZero( Ji );\r
+\r
+ // we optimize for the inter-camera R(3),t(3), then, optionally,\r
+ // for intrinisic parameters of each camera ((fx,fy,cx,cy,k1,k2,p1,p2) ~ 8 parameters).\r
+ nparams = 6*(nimages+1) + (recomputeIntrinsics ? NINTRINSIC*2 : 0);\r
+\r
+ // storage for initial [om(R){i}|t{i}] (in order to compute the median for each component)\r
+ RT0 = cvCreateMat( 6, nimages, CV_64F );\r
+\r
+ solver.init( nparams, 0, termCrit );\r
+ if( recomputeIntrinsics )\r
+ {\r
+ uchar* imask = solver.mask->data.ptr + nparams - NINTRINSIC*2;\r
+ if( flags & CV_CALIB_FIX_ASPECT_RATIO )\r
+ imask[0] = imask[NINTRINSIC] = 0;\r
+ if( flags & CV_CALIB_FIX_FOCAL_LENGTH )\r
+ imask[0] = imask[1] = imask[NINTRINSIC] = imask[NINTRINSIC+1] = 0;\r
+ if( flags & CV_CALIB_FIX_PRINCIPAL_POINT )\r
+ imask[2] = imask[3] = imask[NINTRINSIC+2] = imask[NINTRINSIC+3] = 0;\r
+ if( flags & CV_CALIB_ZERO_TANGENT_DIST )\r
+ imask[6] = imask[7] = imask[NINTRINSIC+6] = imask[NINTRINSIC+7] = 0;\r
+ if( flags & CV_CALIB_FIX_K1 )\r
+ imask[4] = imask[NINTRINSIC+4] = 0;\r
+ if( flags & CV_CALIB_FIX_K2 )\r
+ imask[5] = imask[NINTRINSIC+5] = 0;\r
+ if( flags & CV_CALIB_FIX_K3 )\r
+ imask[8] = imask[NINTRINSIC+8] = 0;\r
+ }\r
+\r
+ /*\r
+ Compute initial estimate of pose\r
+\r
+ For each image, compute:\r
+ R(om) is the rotation matrix of om\r
+ om(R) is the rotation vector of R\r
+ R_ref = R(om_right) * R(om_left)'\r
+ T_ref_list = [T_ref_list; T_right - R_ref * T_left]\r
+ om_ref_list = {om_ref_list; om(R_ref)]\r
+\r
+ om = median(om_ref_list)\r
+ T = median(T_ref_list)\r
+ */\r
+ for( i = ofs = 0; i < nimages; ofs += ni, i++ )\r
+ {\r
+ ni = npoints->data.i[i];\r
+ CvMat objpt_i;\r
+ double _om[2][3], r[2][9], t[2][3];\r
+ CvMat om[2], R[2], T[2], imgpt_i[2];\r
+\r
+ objpt_i = cvMat(1, ni, CV_64FC3, objectPoints->data.db + ofs*3);\r
+ for( k = 0; k < 2; k++ )\r
+ {\r
+ imgpt_i[k] = cvMat(1, ni, CV_64FC2, imagePoints[k]->data.db + ofs*2);\r
+ om[k] = cvMat(3, 1, CV_64F, _om[k]);\r
+ R[k] = cvMat(3, 3, CV_64F, r[k]);\r
+ T[k] = cvMat(3, 1, CV_64F, t[k]);\r
+\r
+ // FIXME: here we ignore activePoints[k] because of\r
+ // the limited API of cvFindExtrnisicCameraParams2\r
+ cvFindExtrinsicCameraParams2( &objpt_i, &imgpt_i[k], &K[k], &Dist[k], &om[k], &T[k] );\r
+ cvRodrigues2( &om[k], &R[k] );\r
+ if( k == 0 )\r
+ {\r
+ // save initial om_left and T_left\r
+ solver.param->data.db[(i+1)*6] = _om[0][0];\r
+ solver.param->data.db[(i+1)*6 + 1] = _om[0][1];\r
+ solver.param->data.db[(i+1)*6 + 2] = _om[0][2];\r
+ solver.param->data.db[(i+1)*6 + 3] = t[0][0];\r
+ solver.param->data.db[(i+1)*6 + 4] = t[0][1];\r
+ solver.param->data.db[(i+1)*6 + 5] = t[0][2];\r
+ }\r
+ }\r
+ cvGEMM( &R[1], &R[0], 1, 0, 0, &R[0], CV_GEMM_B_T );\r
+ cvGEMM( &R[0], &T[0], -1, &T[1], 1, &T[1] );\r
+ cvRodrigues2( &R[0], &T[0] );\r
+ RT0->data.db[i] = t[0][0];\r
+ RT0->data.db[i + nimages] = t[0][1];\r
+ RT0->data.db[i + nimages*2] = t[0][2];\r
+ RT0->data.db[i + nimages*3] = t[1][0];\r
+ RT0->data.db[i + nimages*4] = t[1][1];\r
+ RT0->data.db[i + nimages*5] = t[1][2];\r
+ }\r
+\r
+ // find the medians and save the first 6 parameters\r
+ for( i = 0; i < 6; i++ )\r
+ {\r
+ qsort( RT0->data.db + i*nimages, nimages, CV_ELEM_SIZE(RT0->type), dbCmp );\r
+ solver.param->data.db[i] = nimages % 2 != 0 ? RT0->data.db[i*nimages + nimages/2] :\r
+ (RT0->data.db[i*nimages + nimages/2 - 1] + RT0->data.db[i*nimages + nimages/2])*0.5;\r
+ }\r
+\r
+ if( recomputeIntrinsics )\r
+ for( k = 0; k < 2; k++ )\r
+ {\r
+ double* iparam = solver.param->data.db + (nimages+1)*6 + k*NINTRINSIC;\r
+ if( flags & CV_CALIB_ZERO_TANGENT_DIST )\r
+ dk[k][2] = dk[k][3] = 0;\r
+ iparam[0] = A[k][0]; iparam[1] = A[k][4]; iparam[2] = A[k][2]; iparam[3] = A[k][5];\r
+ iparam[4] = dk[k][0]; iparam[5] = dk[k][1]; iparam[6] = dk[k][2];\r
+ iparam[7] = dk[k][3]; iparam[8] = dk[k][4];\r
+ }\r
+\r
+ om_LR = cvMat(3, 1, CV_64F, solver.param->data.db);\r
+ T_LR = cvMat(3, 1, CV_64F, solver.param->data.db + 3);\r
+\r
+ for(;;)\r
+ {\r
+ const CvMat* param = 0;\r
+ CvMat tmpimagePoints;\r
+ CvMat *JtJ = 0, *JtErr = 0;\r
+ double* errNorm = 0;\r
+ double _omR[3], _tR[3];\r
+ double _dr3dr1[9], _dr3dr2[9], /*_dt3dr1[9],*/ _dt3dr2[9], _dt3dt1[9], _dt3dt2[9];\r
+ CvMat dr3dr1 = cvMat(3, 3, CV_64F, _dr3dr1);\r
+ CvMat dr3dr2 = cvMat(3, 3, CV_64F, _dr3dr2);\r
+ //CvMat dt3dr1 = cvMat(3, 3, CV_64F, _dt3dr1);\r
+ CvMat dt3dr2 = cvMat(3, 3, CV_64F, _dt3dr2);\r
+ CvMat dt3dt1 = cvMat(3, 3, CV_64F, _dt3dt1);\r
+ CvMat dt3dt2 = cvMat(3, 3, CV_64F, _dt3dt2);\r
+ CvMat om[2], T[2], imgpt_i[2];\r
+ CvMat dpdrot_hdr, dpdt_hdr, dpdf_hdr, dpdc_hdr, dpdk_hdr;\r
+ CvMat *dpdrot = &dpdrot_hdr, *dpdt = &dpdt_hdr, *dpdf = 0, *dpdc = 0, *dpdk = 0;\r
+\r
+ if( !solver.updateAlt( param, JtJ, JtErr, errNorm ))\r
+ break;\r
+\r
+ cvRodrigues2( &om_LR, &R_LR );\r
+ om[1] = cvMat(3,1,CV_64F,_omR);\r
+ T[1] = cvMat(3,1,CV_64F,_tR);\r
+\r
+ if( recomputeIntrinsics )\r
+ {\r
+ double* iparam = solver.param->data.db + (nimages+1)*6;\r
+ double* ipparam = solver.prevParam->data.db + (nimages+1)*6;\r
+ dpdf = &dpdf_hdr;\r
+ dpdc = &dpdc_hdr;\r
+ dpdk = &dpdk_hdr;\r
+ if( flags & CV_CALIB_SAME_FOCAL_LENGTH )\r
+ {\r
+ iparam[NINTRINSIC] = iparam[0];\r
+ iparam[NINTRINSIC+1] = iparam[1];\r
+ ipparam[NINTRINSIC] = ipparam[0];\r
+ ipparam[NINTRINSIC+1] = ipparam[1];\r
+ }\r
+ if( flags & CV_CALIB_FIX_ASPECT_RATIO )\r
+ {\r
+ iparam[0] = iparam[1]*aspectRatio[0];\r
+ iparam[NINTRINSIC] = iparam[NINTRINSIC+1]*aspectRatio[1];\r
+ ipparam[0] = ipparam[1]*aspectRatio[0];\r
+ ipparam[NINTRINSIC] = ipparam[NINTRINSIC+1]*aspectRatio[1];\r
+ }\r
+ for( k = 0; k < 2; k++ )\r
+ {\r
+ A[k][0] = iparam[k*NINTRINSIC+0];\r
+ A[k][4] = iparam[k*NINTRINSIC+1];\r
+ A[k][2] = iparam[k*NINTRINSIC+2];\r
+ A[k][5] = iparam[k*NINTRINSIC+3];\r
+ dk[k][0] = iparam[k*NINTRINSIC+4];\r
+ dk[k][1] = iparam[k*NINTRINSIC+5];\r
+ dk[k][2] = iparam[k*NINTRINSIC+6];\r
+ dk[k][3] = iparam[k*NINTRINSIC+7];\r
+ dk[k][4] = iparam[k*NINTRINSIC+8];\r
+ }\r
+ }\r
+\r
+ for( i = ofs = 0; i < nimages; ofs += ni, i++ )\r
+ {\r
+ ni = npoints->data.i[i];\r
+ CvMat objpt_i, _part;\r
+\r
+ om[0] = cvMat(3,1,CV_64F,solver.param->data.db+(i+1)*6);\r
+ T[0] = cvMat(3,1,CV_64F,solver.param->data.db+(i+1)*6+3);\r
+\r
+ if( JtJ || JtErr )\r
+ cvComposeRT( &om[0], &T[0], &om_LR, &T_LR, &om[1], &T[1], &dr3dr1, 0,\r
+ &dr3dr2, 0, 0, &dt3dt1, &dt3dr2, &dt3dt2 );\r
+ else\r
+ cvComposeRT( &om[0], &T[0], &om_LR, &T_LR, &om[1], &T[1] );\r
+\r
+ objpt_i = cvMat(1, ni, CV_64FC3, objectPoints->data.db + ofs*3);\r
+ err->rows = Je->rows = J_LR->rows = Ji->rows = ni*2;\r
+ cvReshape( err, &tmpimagePoints, 2, 1 );\r
+\r
+ cvGetCols( Ji, &dpdf_hdr, 0, 2 );\r
+ cvGetCols( Ji, &dpdc_hdr, 2, 4 );\r
+ cvGetCols( Ji, &dpdk_hdr, 4, NINTRINSIC );\r
+ cvGetCols( Je, &dpdrot_hdr, 0, 3 );\r
+ cvGetCols( Je, &dpdt_hdr, 3, 6 );\r
+\r
+ for( k = 0; k < 2; k++ )\r
+ {\r
+ double maxErr, l2err;\r
+ imgpt_i[k] = cvMat(1, ni, CV_64FC2, imagePoints[k]->data.db + ofs*2);\r
+\r
+ if( JtJ || JtErr )\r
+ cvProjectPoints2( &objpt_i, &om[k], &T[k], &K[k], &Dist[k],\r
+ &tmpimagePoints, dpdrot, dpdt, dpdf, dpdc, dpdk,\r
+ (flags & CV_CALIB_FIX_ASPECT_RATIO) ? aspectRatio[k] : 0);\r
+ else\r
+ cvProjectPoints2( &objpt_i, &om[k], &T[k], &K[k], &Dist[k], &tmpimagePoints );\r
+ cvSub( &tmpimagePoints, &imgpt_i[k], &tmpimagePoints );\r
+\r
+ l2err = cvNorm( &tmpimagePoints, 0, CV_L2 );\r
+ maxErr = cvNorm( &tmpimagePoints, 0, CV_C );\r
+\r
+ if( JtJ || JtErr )\r
+ {\r
+ int iofs = (nimages+1)*6 + k*NINTRINSIC, eofs = (i+1)*6;\r
+ assert( JtJ && JtErr );\r
+\r
+ if( k == 1 )\r
+ {\r
+ // d(err_{x|y}R) ~ de3\r
+ // convert de3/{dr3,dt3} => de3{dr1,dt1} & de3{dr2,dt2}\r
+ for( p = 0; p < ni*2; p++ )\r
+ {\r
+ CvMat de3dr3 = cvMat( 1, 3, CV_64F, Je->data.ptr + Je->step*p );\r
+ CvMat de3dt3 = cvMat( 1, 3, CV_64F, de3dr3.data.db + 3 );\r
+ CvMat de3dr2 = cvMat( 1, 3, CV_64F, J_LR->data.ptr + J_LR->step*p );\r
+ CvMat de3dt2 = cvMat( 1, 3, CV_64F, de3dr2.data.db + 3 );\r
+ double _de3dr1[3], _de3dt1[3];\r
+ CvMat de3dr1 = cvMat( 1, 3, CV_64F, _de3dr1 );\r
+ CvMat de3dt1 = cvMat( 1, 3, CV_64F, _de3dt1 );\r
+\r
+ cvMatMul( &de3dr3, &dr3dr1, &de3dr1 );\r
+ cvMatMul( &de3dt3, &dt3dt1, &de3dt1 );\r
+\r
+ cvMatMul( &de3dr3, &dr3dr2, &de3dr2 );\r
+ cvMatMulAdd( &de3dt3, &dt3dr2, &de3dr2, &de3dr2 );\r
+\r
+ cvMatMul( &de3dt3, &dt3dt2, &de3dt2 );\r
+\r
+ cvCopy( &de3dr1, &de3dr3 );\r
+ cvCopy( &de3dt1, &de3dt3 );\r
+ }\r
+\r
+ cvGetSubRect( JtJ, &_part, cvRect(0, 0, 6, 6) );\r
+ cvGEMM( J_LR, J_LR, 1, &_part, 1, &_part, CV_GEMM_A_T );\r
+\r
+ cvGetSubRect( JtJ, &_part, cvRect(eofs, 0, 6, 6) );\r
+ cvGEMM( J_LR, Je, 1, 0, 0, &_part, CV_GEMM_A_T );\r
+\r
+ cvGetRows( JtErr, &_part, 0, 6 );\r
+ cvGEMM( J_LR, err, 1, &_part, 1, &_part, CV_GEMM_A_T );\r
+ }\r
+\r
+ cvGetSubRect( JtJ, &_part, cvRect(eofs, eofs, 6, 6) );\r
+ cvGEMM( Je, Je, 1, &_part, 1, &_part, CV_GEMM_A_T );\r
+\r
+ cvGetRows( JtErr, &_part, eofs, eofs + 6 );\r
+ cvGEMM( Je, err, 1, &_part, 1, &_part, CV_GEMM_A_T );\r
+\r
+ if( recomputeIntrinsics )\r
+ {\r
+ cvGetSubRect( JtJ, &_part, cvRect(iofs, iofs, NINTRINSIC, NINTRINSIC) );\r
+ cvGEMM( Ji, Ji, 1, &_part, 1, &_part, CV_GEMM_A_T );\r
+ cvGetSubRect( JtJ, &_part, cvRect(iofs, eofs, NINTRINSIC, 6) );\r
+ cvGEMM( Je, Ji, 1, &_part, 1, &_part, CV_GEMM_A_T );\r
+ if( k == 1 )\r
+ {\r
+ cvGetSubRect( JtJ, &_part, cvRect(iofs, 0, NINTRINSIC, 6) );\r
+ cvGEMM( J_LR, Ji, 1, &_part, 1, &_part, CV_GEMM_A_T );\r
+ }\r
+ cvGetRows( JtErr, &_part, iofs, iofs + NINTRINSIC );\r
+ cvGEMM( Ji, err, 1, &_part, 1, &_part, CV_GEMM_A_T );\r
+ }\r
+ }\r
+\r
+ if( errNorm )\r
+ *errNorm += l2err*l2err;\r
+ }\r
+ }\r
+ }\r
+\r
+ cvRodrigues2( &om_LR, &R_LR );\r
+ if( _R->rows == 1 || _R->cols == 1 )\r
+ cvConvert( &om_LR, _R );\r
+ else\r
+ cvConvert( &R_LR, _R );\r
+ cvConvert( &T_LR, _T );\r
+\r
+ if( recomputeIntrinsics )\r
+ {\r
+ cvConvert( &K[0], _cameraMatrix1 );\r
+ cvConvert( &K[1], _cameraMatrix2 );\r
+\r
+ for( k = 0; k < 2; k++ )\r
+ {\r
+ CvMat* distCoeffs = k == 0 ? _distCoeffs1 : _distCoeffs2;\r
+ CvMat tdist = cvMat( distCoeffs->rows, distCoeffs->cols,\r
+ CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), Dist[k].data.db );\r
+ cvConvert( &tdist, distCoeffs );\r
+ }\r
+ }\r
+\r
+ if( _E || _F )\r
+ {\r
+ double* t = T_LR.data.db;\r
+ double tx[] =\r
+ {\r
+ 0, -t[2], t[1],\r
+ t[2], 0, -t[0],\r
+ -t[1], t[0], 0\r
+ };\r
+ CvMat Tx = cvMat(3, 3, CV_64F, tx);\r
+ double e[9], f[9];\r
+ CvMat E = cvMat(3, 3, CV_64F, e);\r
+ CvMat F = cvMat(3, 3, CV_64F, f);\r
+ cvMatMul( &Tx, &R_LR, &E );\r
+ if( _E )\r
+ cvConvert( &E, _E );\r
+ if( _F )\r
+ {\r
+ double ik[9];\r
+ CvMat iK = cvMat(3, 3, CV_64F, ik);\r
+ cvInvert(&K[1], &iK);\r
+ cvGEMM( &iK, &E, 1, 0, 0, &E, CV_GEMM_A_T );\r
+ cvInvert(&K[0], &iK);\r
+ cvMatMul(&E, &iK, &F);\r
+ cvConvertScale( &F, _F, fabs(f[8]) > 0 ? 1./f[8] : 1 );\r
+ }\r
+ }\r
+\r
+ __END__;\r
+\r
+ cvReleaseMat( &npoints );\r
+ cvReleaseMat( &err );\r
+ cvReleaseMat( &J_LR );\r
+ cvReleaseMat( &Je );\r
+ cvReleaseMat( &Ji );\r
+ cvReleaseMat( &RT0 );\r
+ cvReleaseMat( &objectPoints );\r
+ cvReleaseMat( &imagePoints[0] );\r
+ cvReleaseMat( &imagePoints[1] );\r
+}\r
+\r
+\r
+void cvStereoRectify( const CvMat* _cameraMatrix1, const CvMat* _cameraMatrix2,\r
+ const CvMat* _distCoeffs1, const CvMat* _distCoeffs2,\r
+ CvSize imageSize, const CvMat* _R, const CvMat* _T,\r
+ CvMat* _R1, CvMat* _R2, CvMat* _P1, CvMat* _P2,\r
+ CvMat* _Q, int flags )\r
+{\r
+ double _om[3], _t[3], _uu[3]={0,0,0}, _r_r[3][3], _pp[3][4];\r
+ double _ww[3], _wr[3][3], _z[3] = {0,0,0}, _ri[3][3];\r
+ CvMat om = cvMat(3, 1, CV_64F, _om);\r
+ CvMat t = cvMat(3, 1, CV_64F, _t);\r
+ CvMat uu = cvMat(3, 1, CV_64F, _uu);\r
+ CvMat r_r = cvMat(3, 3, CV_64F, _r_r);\r
+ CvMat pp = cvMat(3, 4, CV_64F, _pp);\r
+ CvMat ww = cvMat(3, 1, CV_64F, _ww); // temps\r
+ CvMat wR = cvMat(3, 3, CV_64F, _wr);\r
+ CvMat Z = cvMat(3, 1, CV_64F, _z);\r
+ CvMat Ri = cvMat(3, 3, CV_64F, _ri);\r
+ double nx = imageSize.width, ny = imageSize.height;\r
+ int i, k;\r
+\r
+ if( _R->rows == 3 && _R->cols == 3 )\r
+ cvRodrigues2(_R, &om); // get vector rotation\r
+ else\r
+ cvConvert(_R, &om); // it's already a rotation vector\r
+ cvConvertScale(&om, &om, -0.5); // get average rotation\r
+ cvRodrigues2(&om, &r_r); // rotate cameras to same orientation by averaging\r
+ cvMatMul(&r_r, _T, &t);\r
+\r
+ int idx = fabs(_t[0]) > fabs(_t[1]) ? 0 : 1;\r
+ double c = _t[idx], nt = cvNorm(&t, 0, CV_L2);\r
+ _uu[idx] = c > 0 ? 1 : -1;\r
+\r
+ // calculate global Z rotation\r
+ cvCrossProduct(&t,&uu,&ww);\r
+ double nw = cvNorm(&ww, 0, CV_L2);\r
+ cvConvertScale(&ww, &ww, acos(fabs(c)/nt)/nw);\r
+ cvRodrigues2(&ww, &wR);\r
+\r
+ // apply to both views\r
+ cvGEMM(&wR, &r_r, 1, 0, 0, &Ri, CV_GEMM_B_T);\r
+ cvConvert( &Ri, _R1 );\r
+ cvGEMM(&wR, &r_r, 1, 0, 0, &Ri, 0);\r
+ cvConvert( &Ri, _R2 );\r
+ cvMatMul(&r_r, _T, &t);\r
+\r
+ // calculate projection/camera matrices\r
+ // these contain the relevant rectified image internal params (fx, fy=fx, cx, cy)\r
+ double fc_new = DBL_MAX;\r
+ CvPoint2D64f cc_new[2] = {{0,0}, {0,0}};\r
+\r
+ for( k = 0; k < 2; k++ )\r
+ {\r
+ const CvMat* A = k == 0 ? _cameraMatrix1 : _cameraMatrix2;\r
+ const CvMat* Dk = k == 0 ? _distCoeffs1 : _distCoeffs2;\r
+ CvPoint2D32f _pts[4];\r
+ CvPoint3D32f _pts_3[4];\r
+ CvMat pts = cvMat(1, 4, CV_32FC2, _pts);\r
+ CvMat pts_3 = cvMat(1, 4, CV_32FC3, _pts_3);\r
+ double fc, dk1 = Dk ? cvmGet(Dk, 0, 0) : 0;\r
+\r
+ fc = cvmGet(A,idx^1,idx^1);\r
+ if( dk1 < 0 )\r
+ fc *= 1 + 0.2*dk1*(nx*nx + ny*ny)/(8*fc*fc);\r
+ fc_new = MIN(fc_new, fc);\r
+\r
+ for( i = 0; i < 4; i++ )\r
+ {\r
+ _pts[i].x = (float)(((i % 2) + 0.5)*nx*0.5);\r
+ _pts[i].y = (float)(((i / 2) + 0.5)*ny*0.5);\r
+ }\r
+ cvUndistortPoints( &pts, &pts, A, Dk, 0, 0 );\r
+ cvConvertPointsHomogeneous( &pts, &pts_3 );\r
+ cvProjectPoints2( &pts_3, k == 0 ? _R1 : _R2, &Z, A, 0, &pts );\r
+ CvScalar avg = cvAvg(&pts);\r
+ cc_new[k].x = avg.val[0];\r
+ cc_new[k].y = avg.val[1];\r
+ }\r
+\r
+ // vertical focal length must be the same for both images to keep the epipolar constraint\r
+ // (for horizontal epipolar lines -- TBD: check for vertical epipolar lines)\r
+ // use fy for fx also, for simplicity\r
+\r
+ // For simplicity, set the principal points for both cameras to be the average\r
+ // of the two principal points (either one of or both x- and y- coordinates)\r
+ if( flags & CV_CALIB_ZERO_DISPARITY )\r
+ {\r
+ cc_new[0].x = cc_new[1].x = (cc_new[0].x + cc_new[1].x)*0.5;\r
+ cc_new[0].y = cc_new[1].y = (cc_new[0].y + cc_new[1].y)*0.5;\r
+ }\r
+ else if( idx == 0 ) // horizontal stereo\r
+ cc_new[0].y = cc_new[1].y = (cc_new[0].y + cc_new[1].y)*0.5;\r
+ else // vertical stereo\r
+ cc_new[0].x = cc_new[1].x = (cc_new[0].x + cc_new[1].x)*0.5;\r
+\r
+ cvZero( &pp );\r
+ _pp[0][0] = _pp[1][1] = fc_new;\r
+ _pp[0][2] = cc_new[0].x;\r
+ _pp[1][2] = cc_new[0].y;\r
+ _pp[2][2] = 1;\r
+ cvConvert(&pp, _P1);\r
+\r
+ _pp[0][2] = cc_new[1].x;\r
+ _pp[1][2] = cc_new[1].y;\r
+ _pp[idx][3] = _t[idx]*fc_new; // baseline * focal length\r
+ cvConvert(&pp, _P2);\r
+\r
+ if( _Q )\r
+ {\r
+ double q[] =\r
+ {\r
+ 1, 0, 0, -cc_new[0].x,\r
+ 0, 1, 0, -cc_new[0].y,\r
+ 0, 0, 0, fc_new,\r
+ 0, 0, 1./_t[idx],\r
+ (idx == 0 ? cc_new[0].x - cc_new[1].x : cc_new[0].y - cc_new[1].y)/_t[idx]\r
+ };\r
+ CvMat Q = cvMat(4, 4, CV_64F, q);\r
+ cvConvert( &Q, _Q );\r
+ }\r
+}\r
+\r
+\r
+CV_IMPL int\r
+cvStereoRectifyUncalibrated(\r
+ const CvMat* _points1, const CvMat* _points2,\r
+ const CvMat* F0, CvSize imgSize, CvMat* _H1, CvMat* _H2, double threshold )\r
+{\r
+ int result = 0;\r
+ CvMat* _m1 = 0;\r
+ CvMat* _m2 = 0;\r
+ CvMat* _lines1 = 0;\r
+ CvMat* _lines2 = 0;\r
+\r
+ CV_FUNCNAME( "cvStereoCalcHomographiesFromF" );\r
+\r
+ __BEGIN__;\r
+\r
+ int i, j, npoints;\r
+ double cx, cy;\r
+ double u[9], v[9], w[9], f[9], h1[9], h2[9], h0[9], e2[3];\r
+ CvMat E2 = cvMat( 3, 1, CV_64F, e2 );\r
+ CvMat U = cvMat( 3, 3, CV_64F, u );\r
+ CvMat V = cvMat( 3, 3, CV_64F, v );\r
+ CvMat W = cvMat( 3, 3, CV_64F, w );\r
+ CvMat F = cvMat( 3, 3, CV_64F, f );\r
+ CvMat H1 = cvMat( 3, 3, CV_64F, h1 );\r
+ CvMat H2 = cvMat( 3, 3, CV_64F, h2 );\r
+ CvMat H0 = cvMat( 3, 3, CV_64F, h0 );\r
+\r
+ CvPoint2D64f* m1;\r
+ CvPoint2D64f* m2;\r
+ CvPoint3D64f* lines1;\r
+ CvPoint3D64f* lines2;\r
+\r
+ CV_ASSERT( CV_IS_MAT(_points1) && CV_IS_MAT(_points2) &&\r
+ (_points1->rows == 1 || _points1->cols == 1) &&\r
+ (_points2->rows == 1 || _points2->cols == 1) &&\r
+ CV_ARE_SIZES_EQ(_points1, _points2) );\r
+\r
+ npoints = _points1->rows * _points1->cols * CV_MAT_CN(_points1->type) / 2;\r
+\r
+ _m1 = cvCreateMat( _points1->rows, _points1->cols, CV_64FC(CV_MAT_CN(_points1->type)) );\r
+ _m2 = cvCreateMat( _points2->rows, _points2->cols, CV_64FC(CV_MAT_CN(_points2->type)) );\r
+ _lines1 = cvCreateMat( 1, npoints, CV_64FC3 );\r
+ _lines2 = cvCreateMat( 1, npoints, CV_64FC3 );\r
+\r
+ cvConvert( F0, &F );\r
+\r
+ cvSVD( (CvMat*)&F, &W, &U, &V, CV_SVD_U_T + CV_SVD_V_T );\r
+ W.data.db[8] = 0.;\r
+ cvGEMM( &U, &W, 1, 0, 0, &W, CV_GEMM_A_T );\r
+ cvMatMul( &W, &V, &F );\r
+\r
+ cx = cvRound( (imgSize.width-1)*0.5 );\r
+ cy = cvRound( (imgSize.height-1)*0.5 );\r
+\r
+ cvZero( _H1 );\r
+ cvZero( _H2 );\r
+\r
+ cvConvert( _points1, _m1 );\r
+ cvConvert( _points2, _m2 );\r
+ cvReshape( _m1, _m1, 2, 1 );\r
+ cvReshape( _m1, _m1, 2, 1 );\r
+\r
+ m1 = (CvPoint2D64f*)_m1->data.ptr;\r
+ m2 = (CvPoint2D64f*)_m2->data.ptr;\r
+ lines1 = (CvPoint3D64f*)_lines1->data.ptr;\r
+ lines2 = (CvPoint3D64f*)_lines2->data.ptr;\r
+\r
+ if( threshold > 0 )\r
+ {\r
+ cvComputeCorrespondEpilines( _m1, 1, &F, _lines1 );\r
+ cvComputeCorrespondEpilines( _m2, 2, &F, _lines2 );\r
+\r
+ // measure distance from points to the corresponding epilines, mark outliers\r
+ for( i = j = 0; i < npoints; i++ )\r
+ {\r
+ if( fabs(m1[i].x*lines2[i].x +\r
+ m1[i].y*lines2[i].y +\r
+ lines2[i].z) <= threshold &&\r
+ fabs(m2[i].x*lines1[i].x +\r
+ m2[i].y*lines1[i].y +\r
+ lines1[i].z) <= threshold )\r
+ {\r
+ if( j > i )\r
+ {\r
+ m1[j] = m1[i];\r
+ m2[j] = m2[i];\r
+ }\r
+ j++;\r
+ }\r
+ }\r
+\r
+ npoints = j;\r
+ if( npoints == 0 )\r
+ EXIT;\r
+ }\r
+\r
+ {\r
+ _m1->cols = _m2->cols = npoints;\r
+ memcpy( E2.data.db, U.data.db + 6, sizeof(e2));\r
+ cvScale( &E2, &E2, e2[2] > 0 ? 1 : -1 );\r
+\r
+ double t[] =\r
+ {\r
+ 1, 0, -cx,\r
+ 0, 1, -cy,\r
+ 0, 0, 1\r
+ };\r
+ CvMat T = cvMat(3, 3, CV_64F, t);\r
+ cvMatMul( &T, &E2, &E2 );\r
+\r
+ int mirror = e2[0] < 0;\r
+ double d = MAX(sqrt(e2[0]*e2[0] + e2[1]*e2[1]),DBL_EPSILON);\r
+ double alpha = e2[0]/d;\r
+ double beta = e2[1]/d;\r
+ double r[] =\r
+ {\r
+ alpha, beta, 0,\r
+ -beta, alpha, 0,\r
+ 0, 0, 1\r
+ };\r
+ CvMat R = cvMat(3, 3, CV_64F, r);\r
+ cvMatMul( &R, &T, &T );\r
+ cvMatMul( &R, &E2, &E2 );\r
+ double invf = fabs(e2[2]) < 1e-6*fabs(e2[0]) ? 0 : -e2[2]/e2[0];\r
+ double k[] =\r
+ {\r
+ 1, 0, 0,\r
+ 0, 1, 0,\r
+ invf, 0, 1\r
+ };\r
+ CvMat K = cvMat(3, 3, CV_64F, k);\r
+ cvMatMul( &K, &T, &H2 );\r
+ cvMatMul( &K, &E2, &E2 );\r
+\r
+ double it[] =\r
+ {\r
+ 1, 0, cx,\r
+ 0, 1, cy,\r
+ 0, 0, 1\r
+ };\r
+ CvMat iT = cvMat( 3, 3, CV_64F, it );\r
+ cvMatMul( &iT, &H2, &H2 );\r
+\r
+ memcpy( E2.data.db, U.data.db + 6, sizeof(e2));\r
+ cvScale( &E2, &E2, e2[2] > 0 ? 1 : -1 );\r
+\r
+ double e2_x[] =\r
+ {\r
+ 0, -e2[2], e2[1],\r
+ e2[2], 0, -e2[0],\r
+ -e2[1], e2[0], 0\r
+ };\r
+ double e2_111[] =\r
+ {\r
+ e2[0], e2[0], e2[0],\r
+ e2[1], e2[1], e2[1],\r
+ e2[2], e2[2], e2[2],\r
+ };\r
+ CvMat E2_x = cvMat(3, 3, CV_64F, e2_x);\r
+ CvMat E2_111 = cvMat(3, 3, CV_64F, e2_111);\r
+ cvMatMulAdd(&E2_x, &F, &E2_111, &H0 );\r
+ cvMatMul(&H2, &H0, &H0);\r
+ CvMat E1=cvMat(3, 1, CV_64F, V.data.db+6);\r
+ cvMatMul(&H0, &E1, &E1);\r
+\r
+ cvPerspectiveTransform( _m1, _m1, &H0 );\r
+ cvPerspectiveTransform( _m2, _m2, &H2 );\r
+ CvMat A = cvMat( 1, npoints, CV_64FC3, lines1 ), BxBy, B;\r
+ double a[9], atb[3], x[3];\r
+ CvMat AtA = cvMat( 3, 3, CV_64F, a );\r
+ CvMat AtB = cvMat( 3, 1, CV_64F, atb );\r
+ CvMat X = cvMat( 3, 1, CV_64F, x );\r
+ cvConvertPointsHomogeneous( _m1, &A );\r
+ cvReshape( &A, &A, 1, npoints );\r
+ cvReshape( _m2, &BxBy, 1, npoints );\r
+ cvGetCol( &BxBy, &B, 0 );\r
+ cvGEMM( &A, &A, 1, 0, 0, &AtA, CV_GEMM_A_T );\r
+ cvGEMM( &A, &B, 1, 0, 0, &AtB, CV_GEMM_A_T );\r
+ cvSolve( &AtA, &AtB, &X, CV_SVD_SYM );\r
+\r
+ double ha[] =\r
+ {\r
+ x[0], x[1], x[2],\r
+ 0, 1, 0,\r
+ 0, 0, 1\r
+ };\r
+ CvMat Ha = cvMat(3, 3, CV_64F, ha);\r
+ cvMatMul( &Ha, &H0, &H1 );\r
+ cvPerspectiveTransform( _m1, _m1, &Ha );\r
+\r
+ if( mirror )\r
+ {\r
+ double mm[] = { -1, 0, cx*2, 0, -1, cy*2, 0, 0, 1 };\r
+ CvMat MM = cvMat(3, 3, CV_64F, mm);\r
+ cvMatMul( &MM, &H1, &H1 );\r
+ cvMatMul( &MM, &H2, &H2 );\r
+ }\r
+\r
+ cvConvert( &H1, _H1 );\r
+ cvConvert( &H2, _H2 );\r
+\r
+ result = 1;\r
+ }\r
+\r
+ __END__;\r
+\r
+ cvReleaseMat( &_m1 );\r
+ cvReleaseMat( &_m2 );\r
+ cvReleaseMat( &_lines1 );\r
+ cvReleaseMat( &_lines2 );\r
+\r
+ return result;\r
+}\r
+\r
+\r
+CV_IMPL void\r
+cvReprojectImageTo3D(\r
+ const CvArr* disparityImage,\r
+ CvArr* _3dImage, const CvMat* _Q,\r
+ int handleMissingValues )\r
+{\r
+ const double bigZ = 10000.;\r
+ CV_FUNCNAME( "cvReprojectImageTo3D" );\r
+\r
+ __BEGIN__;\r
+\r
+ double q[4][4];\r
+ CvMat Q = cvMat(4, 4, CV_64F, q);\r
+ CvMat sstub, *src = cvGetMat( disparityImage, &sstub );\r
+ CvMat dstub, *dst = cvGetMat( _3dImage, &dstub );\r
+ int stype = CV_MAT_TYPE(src->type), dtype = CV_MAT_TYPE(dst->type);\r
+ int x, y, rows = src->rows, cols = src->cols;\r
+ float* sbuf = (float*)cvStackAlloc( cols*sizeof(sbuf[0]) );\r
+ float* dbuf = (float*)cvStackAlloc( cols*3*sizeof(dbuf[0]) );\r
+ double minDisparity = FLT_MAX;\r
+\r
+ CV_ASSERT( CV_ARE_SIZES_EQ(src, dst) &&\r
+ (CV_MAT_TYPE(stype) == CV_8UC1 || CV_MAT_TYPE(stype) == CV_16SC1 ||\r
+ CV_MAT_TYPE(stype) == CV_32SC1 || CV_MAT_TYPE(stype) == CV_32FC1) &&\r
+ (CV_MAT_TYPE(dtype) == CV_16SC3 || CV_MAT_TYPE(dtype) == CV_32SC3 ||\r
+ CV_MAT_TYPE(dtype) == CV_32FC3) );\r
+\r
+ cvConvert( _Q, &Q );\r
+\r
+ // NOTE: here we quietly assume that at least one pixel in the disparity map is not defined.\r
+ // and we set the corresponding Z's to some fixed big value.\r
+ if( handleMissingValues )\r
+ cvMinMaxLoc( disparityImage, &minDisparity, 0, 0, 0 ); \r
+\r
+ for( y = 0; y < rows; y++ )\r
+ {\r
+ const float* sptr = (const float*)(src->data.ptr + src->step*y);\r
+ float* dptr0 = (float*)(dst->data.ptr + dst->step*y), *dptr = dptr0;\r
+ double qx = q[0][1]*y + q[0][3], qy = q[1][1]*y + q[1][3];\r
+ double qz = q[2][1]*y + q[2][3], qw = q[3][1]*y + q[3][3];\r
+\r
+ if( stype == CV_8UC1 )\r
+ {\r
+ const uchar* sptr0 = (const uchar*)sptr;\r
+ for( x = 0; x < cols; x++ )\r
+ sbuf[x] = (float)sptr0[x];\r
+ sptr = sbuf;\r
+ }\r
+ else if( stype == CV_16SC1 )\r
+ {\r
+ const short* sptr0 = (const short*)sptr;\r
+ for( x = 0; x < cols; x++ )\r
+ sbuf[x] = (float)sptr0[x];\r
+ sptr = sbuf;\r
+ }\r
+ else if( stype == CV_32SC1 )\r
+ {\r
+ const int* sptr0 = (const int*)sptr;\r
+ for( x = 0; x < cols; x++ )\r
+ sbuf[x] = (float)sptr0[x];\r
+ sptr = sbuf;\r
+ }\r
+ \r
+ if( dtype != CV_32FC3 )\r
+ dptr = dbuf;\r
+\r
+ for( x = 0; x < cols; x++, qx += q[0][0], qy += q[1][0], qz += q[2][0], qw += q[3][0] )\r
+ {\r
+ double d = sptr[x];\r
+ double iW = 1./(qw + q[3][2]*d);\r
+ double X = (qx + q[0][2]*d)*iW;\r
+ double Y = (qy + q[1][2]*d)*iW;\r
+ double Z = (qz + q[2][2]*d)*iW;\r
+ if( fabs(d-minDisparity) <= FLT_EPSILON )\r
+ Z = bigZ;\r
+\r
+ dptr[x*3] = (float)X;\r
+ dptr[x*3+1] = (float)Y;\r
+ dptr[x*3+2] = (float)Z;\r
+ }\r
+\r
+ if( dtype == CV_16SC3 )\r
+ {\r
+ for( x = 0; x < cols*3; x++ )\r
+ {\r
+ int ival = cvRound(dptr[x]);\r
+ ((short*)dptr0)[x] = CV_CAST_16S(ival);\r
+ }\r
+ }\r
+ else if( dtype == CV_32SC3 )\r
+ {\r
+ for( x = 0; x < cols*3; x++ )\r
+ {\r
+ int ival = cvRound(dptr[x]);\r
+ ((int*)dptr0)[x] = ival;\r
+ }\r
+ }\r
+ }\r
+\r
+ __END__;\r
+}\r
+\r
+\r
+namespace cv\r
+{\r
+\r
+Mat Rodrigues(const Mat& src)\r
+{\r
+ bool v2m = src.cols == 1 || src.rows == 1;\r
+ Mat dst(3, v2m ? 3 : 1, src.type());\r
+ CvMat _src = src, _dst = dst;\r
+ bool ok = cvRodrigues2(&_src, &_dst, 0) > 0;\r
+ if( !ok )\r
+ dst = Scalar(0);\r
+ return dst;\r
+}\r
+\r
+Mat Rodrigues(const Mat& src, Mat& jacobian)\r
+{\r
+ bool v2m = src.cols == 1 || src.rows == 1;\r
+ Mat dst(3, v2m ? 3 : 1, src.type());\r
+ jacobian.create(v2m ? Size(9, 3) : Size(3, 9), src.type());\r
+ CvMat _src = src, _dst = dst, _jacobian = jacobian;\r
+ bool ok = cvRodrigues2(&_src, &_dst, &_jacobian) > 0;\r
+ if( !ok )\r
+ dst = Scalar(0);\r
+ return dst;\r
+}\r
+\r
+void matMulDeriv( const Mat& A, const Mat& B, Mat& dABdA, Mat& dABdB )\r
+{\r
+ dABdA.create(A.rows*B.cols, A.rows*A.cols, A.type());\r
+ dABdB.create(A.rows*B.cols, B.rows*B.cols, A.type());\r
+ CvMat _A = A, _B = B, _dABdA = dABdA, _dABdB = dABdB;\r
+ cvCalcMatMulDeriv(&_A, &_B, &_dABdA, &_dABdB);\r
+}\r
+\r
+void composeRT( const Mat& rvec1, const Mat& tvec1,\r
+ const Mat& rvec2, const Mat& tvec2,\r
+ Mat& rvec3, Mat& tvec3 )\r
+{\r
+ rvec3.create(rvec1.size(), rvec1.type());\r
+ tvec3.create(tvec1.size(), tvec1.type());\r
+ CvMat _rvec1 = rvec1, _tvec1 = tvec1, _rvec2 = rvec2,\r
+ _tvec2 = tvec2, _rvec3 = rvec3, _tvec3 = tvec3;\r
+ cvComposeRT(&_rvec1, &_tvec1, &_rvec2, &_tvec2, &_rvec3, &_tvec3, 0, 0, 0, 0, 0, 0, 0, 0);\r
+}\r
+\r
+\r
+void composeRT( const Mat& rvec1, const Mat& tvec1,\r
+ const Mat& rvec2, const Mat& tvec2,\r
+ Mat& rvec3, Mat& tvec3,\r
+ Mat& dr3dr1, Mat& dr3dt1,\r
+ Mat& dr3dr2, Mat& dr3dt2,\r
+ Mat& dt3dr1, Mat& dt3dt1,\r
+ Mat& dt3dr2, Mat& dt3dt2 )\r
+{\r
+ int rtype = rvec1.type();\r
+ rvec3.create(rvec1.size(), rtype);\r
+ tvec3.create(tvec1.size(), rtype);\r
+ dr3dr1.create(3, 3, rtype); dr3dt1.create(3, 3, rtype);\r
+ dr3dr2.create(3, 3, rtype); dr3dt2.create(3, 3, rtype);\r
+ dt3dr1.create(3, 3, rtype); dt3dt1.create(3, 3, rtype);\r
+ dt3dr2.create(3, 3, rtype); dt3dt2.create(3, 3, rtype);\r
+\r
+ CvMat _rvec1 = rvec1, _tvec1 = tvec1, _rvec2 = rvec2,\r
+ _tvec2 = tvec2, _rvec3 = rvec3, _tvec3 = tvec3;\r
+ CvMat _dr3dr1 = dr3dr1, _dr3dt1 = dr3dt1, _dr3dr2 = dr3dr2, _dr3dt2 = dr3dt2;\r
+ CvMat _dt3dr1 = dt3dr1, _dt3dt1 = dt3dt1, _dt3dr2 = dt3dr2, _dt3dt2 = dt3dt2;\r
+ cvComposeRT(&_rvec1, &_tvec1, &_rvec2, &_tvec2, &_rvec3, &_tvec3,\r
+ &_dr3dr1, &_dr3dt1, &_dr3dr2, &_dr3dt2,\r
+ &_dt3dr1, &_dt3dt1, &_dt3dr2, &_dt3dt2);\r
+}\r
+\r
+void projectPoints( const Vector<Point3f>& objectPoints,\r
+ const Mat& rvec, const Mat& tvec,\r
+ const Mat& cameraMatrix,\r
+ const Mat& distCoeffs,\r
+ Vector<Point2f>& imagePoints )\r
+{\r
+ imagePoints.resize(objectPoints.size());\r
+ CvMat _objectPoints = objectPoints, _imagePoints = imagePoints;\r
+ CvMat _rvec = rvec, _tvec = tvec, _cameraMatrix = cameraMatrix, _distCoeffs = distCoeffs;\r
+\r
+ cvProjectPoints2( &_objectPoints, &_rvec, &_tvec, &_cameraMatrix, &_distCoeffs,\r
+ &_imagePoints, 0, 0, 0, 0, 0, 0 );\r
+}\r
+\r
+void projectPoints( const Vector<Point3f>& objectPoints,\r
+ const Mat& rvec, const Mat& tvec,\r
+ const Mat& cameraMatrix,\r
+ const Mat& distCoeffs,\r
+ Vector<Point2f>& imagePoints,\r
+ Mat& dpdrot, Mat& dpdt, Mat& dpdf,\r
+ Mat& dpdc, Mat& dpddist,\r
+ double aspectRatio )\r
+{\r
+ size_t npoints = objectPoints.size();\r
+ imagePoints.resize(npoints);\r
+ dpdrot.create(npoints*2, 3, CV_64F);\r
+ dpdt.create(npoints*2, 3, CV_64F);\r
+ dpdf.create(npoints*2, 2, CV_64F);\r
+ dpdc.create(npoints*2, 3, CV_64F);\r
+ dpddist.create(npoints*2, distCoeffs.rows + distCoeffs.cols - 1, CV_64F);\r
+ CvMat _objectPoints = objectPoints, _imagePoints = imagePoints;\r
+ CvMat _rvec = rvec, _tvec = tvec, _cameraMatrix = cameraMatrix, _distCoeffs = distCoeffs;\r
+ CvMat _dpdrot = dpdrot, _dpdt = dpdt, _dpdf = dpdf, _dpdc = dpdc, _dpddist = dpddist;\r
+\r
+ cvProjectPoints2( &_objectPoints, &_rvec, &_tvec, &_cameraMatrix, &_distCoeffs,\r
+ &_imagePoints, &_dpdrot, &_dpdt, &_dpdf, &_dpdc, &_dpddist, aspectRatio );\r
+}\r
+\r
+void solvePnP( const Vector<Point3f>& objectPoints,\r
+ const Vector<Point2f>& imagePoints,\r
+ const Mat& cameraMatrix,\r
+ const Mat& distCoeffs,\r
+ Mat& rvec, Mat& tvec,\r
+ bool useExtrinsicGuess )\r
+{\r
+ rvec.create(3, 1, CV_64F);\r
+ tvec.create(3, 1, CV_64F);\r
+ CvMat _objectPoints = objectPoints, _imagePoints = imagePoints;\r
+ CvMat _cameraMatrix = cameraMatrix, _distCoeffs = distCoeffs;\r
+ CvMat _rvec = rvec, _tvec = tvec;\r
+ cvFindExtrinsicCameraParams2(&_objectPoints, &_imagePoints, &_cameraMatrix,\r
+ &_distCoeffs, &_rvec, &_tvec, useExtrinsicGuess );\r
+}\r
+\r
+static void collectCalibrationData( const Vector<Vector<Point3f> >& objectPoints,\r
+ const Vector<Vector<Point2f> >& imagePoints,\r
+ const Vector<Vector<Point2f> >& imagePoints2,\r
+ Mat& objPtMat, Mat& imgPtMat, Mat* imgPtMat2,\r
+ Mat& npoints )\r
+{\r
+ size_t i, j = 0, ni = 0, nimages = objectPoints.size(), total = 0;\r
+ CV_Assert(nimages > 0 && nimages == imagePoints.size() &&\r
+ (!imgPtMat2 || nimages == imagePoints2.size()));\r
+\r
+ for( i = 0; i < nimages; i++ )\r
+ {\r
+ ni = objectPoints[i].size();\r
+ CV_Assert(ni == imagePoints[i].size() && (!imgPtMat2 || ni == imagePoints2[i].size()));\r
+ total += ni;\r
+ }\r
+\r
+ npoints.create(1, nimages, CV_32S);\r
+ objPtMat.create(1, total, objectPoints[0].type());\r
+ imgPtMat.create(1, total, imagePoints[0].type());\r
+ if( imgPtMat2 )\r
+ imgPtMat2->create(1, total, imagePoints2[0].type());\r
+\r
+ for( i = 0; i < nimages; i++, j += ni )\r
+ {\r
+ ni = objectPoints[i].size();\r
+ ((int*)npoints.data)[i] = ni;\r
+ Vector<Point3f> dstObjPt((Point3f*)objPtMat.data + j, ni);\r
+ Vector<Point2f> dstImgPt((Point2f*)imgPtMat.data + j, ni);\r
+ objectPoints[i].copyTo(dstObjPt);\r
+ imagePoints[i].copyTo(dstImgPt);\r
+ if( !imagePoints2.empty() )\r
+ {\r
+ Vector<Point2f> dstImgPt2((Point2f*)imgPtMat2->data + j, ni);\r
+ imagePoints2[i].copyTo(dstImgPt2);\r
+ }\r
+ }\r
+}\r
+\r
+Mat initCameraMatrix2D( const Vector<Vector<Point3f> >& objectPoints,\r
+ const Vector<Vector<Point2f> >& imagePoints,\r
+ Size imageSize, double aspectRatio )\r
+{\r
+ Mat objPt, imgPt, npoints, cameraMatrix(3, 3, CV_64F);\r
+ collectCalibrationData( objectPoints, imagePoints, Vector<Vector<Point2f> >(),\r
+ objPt, imgPt, 0, npoints );\r
+ CvMat _objPt = objPt, _imgPt = imgPt, _npoints = npoints, _cameraMatrix = cameraMatrix;\r
+ cvInitIntrinsicParams2D( &_objPt, &_imgPt, &_npoints,\r
+ imageSize, &_cameraMatrix, aspectRatio );\r
+ return cameraMatrix;\r
+}\r
+\r
+static Mat prepareCameraMatrix(Mat& cameraMatrix0, int rtype)\r
+{\r
+ Mat cameraMatrix = Mat::eye(3, 3, rtype);\r
+ if( cameraMatrix0.size() == cameraMatrix.size() )\r
+ cameraMatrix0.convertTo(cameraMatrix, rtype);\r
+ return cameraMatrix;\r
+}\r
+\r
+static Mat prepareDistCoeffs(Mat& distCoeffs0, int rtype)\r
+{\r
+ Mat distCoeffs = Mat::zeros(distCoeffs0.cols == 1 ? Size(1, 5) : Size(5, 1), rtype);\r
+ if( distCoeffs0.size() == Size(1, 4) ||\r
+ distCoeffs0.size() == Size(1, 5) ||\r
+ distCoeffs0.size() == Size(4, 1) ||\r
+ distCoeffs0.size() == Size(5, 1) )\r
+ {\r
+ Mat dstCoeffs(distCoeffs, Rect(0, 0, distCoeffs0.cols, distCoeffs0.rows));\r
+ distCoeffs0.convertTo(dstCoeffs, rtype);\r
+ }\r
+ return distCoeffs;\r
+}\r
+\r
+void calibrateCamera( const Vector<Vector<Point3f> >& objectPoints,\r
+ const Vector<Vector<Point2f> >& imagePoints,\r
+ Size imageSize, Mat& cameraMatrix, Mat& distCoeffs,\r
+ Vector<Mat>& rvecs, Vector<Mat>& tvecs, int flags )\r
+{\r
+ int rtype = CV_64F;\r
+ cameraMatrix = prepareCameraMatrix(cameraMatrix, rtype);\r
+ distCoeffs = prepareDistCoeffs(distCoeffs, rtype);\r
+\r
+ size_t i, nimages = objectPoints.size();\r
+ CV_Assert( nimages > 0 );\r
+ Mat objPt, imgPt, npoints, rvecM(nimages, 1, CV_32FC3), tvecM(nimages, 1, CV_32FC3);\r
+ collectCalibrationData( objectPoints, imagePoints, Vector<Vector<Point2f> >(),\r
+ objPt, imgPt, 0, npoints );\r
+ CvMat _objPt = objPt, _imgPt = imgPt, _npoints = npoints;\r
+ CvMat _cameraMatrix = cameraMatrix, _distCoeffs = distCoeffs;\r
+ CvMat _rvecM = rvecM, _tvecM = tvecM;\r
+\r
+ cvCalibrateCamera2(&_objPt, &_imgPt, &_npoints, imageSize, &_cameraMatrix,\r
+ &_distCoeffs, &_rvecM, &_tvecM, flags );\r
+ rvecs.resize(nimages);\r
+ tvecs.resize(nimages);\r
+ for( i = 0; i < nimages; i++ )\r
+ {\r
+ rvecM.row(i).copyTo(rvecs[i]);\r
+ tvecM.row(i).copyTo(tvecs[i]);\r
+ }\r
+}\r
+\r
+void calibrationMatrixValues( const Mat& cameraMatrix, Size imageSize,\r
+ double apertureWidth, double apertureHeight,\r
+ double& fovx, double& fovy, double& focalLength,\r
+ Point2d& principalPoint, double& aspectRatio )\r
+{\r
+ CvMat _cameraMatrix = cameraMatrix;\r
+ cvCalibrationMatrixValues( &_cameraMatrix, imageSize, apertureWidth, apertureHeight,\r
+ &fovx, &fovy, &focalLength, (CvPoint2D64f*)&principalPoint, &aspectRatio );\r
+}\r
+\r
+void stereoCalibrate( const Vector<Vector<Point3f> >& objectPoints,\r
+ const Vector<Vector<Point2f> >& imagePoints1,\r
+ const Vector<Vector<Point2f> >& imagePoints2,\r
+ Mat& cameraMatrix1, Mat& distCoeffs1,\r
+ Mat& cameraMatrix2, Mat& distCoeffs2,\r
+ Size imageSize, Mat& R, Mat& T,\r
+ Mat& E, Mat& F, TermCriteria criteria,\r
+ int flags )\r
+{\r
+ int rtype = CV_64F;\r
+ cameraMatrix1 = prepareCameraMatrix(cameraMatrix1, rtype);\r
+ cameraMatrix2 = prepareCameraMatrix(cameraMatrix2, rtype);\r
+ distCoeffs1 = prepareDistCoeffs(distCoeffs1, rtype);\r
+ distCoeffs2 = prepareDistCoeffs(distCoeffs2, rtype);\r
+ R.create(3, 3, rtype);\r
+ T.create(3, 1, rtype);\r
+\r
+ Mat objPt, imgPt, imgPt2, npoints;\r
+ collectCalibrationData( objectPoints, imagePoints1, imagePoints2,\r
+ objPt, imgPt, &imgPt2, npoints );\r
+ CvMat _objPt = objPt, _imgPt = imgPt, _imgPt2 = imgPt2, _npoints = npoints;\r
+ CvMat _cameraMatrix1 = cameraMatrix1, _distCoeffs1 = distCoeffs1;\r
+ CvMat _cameraMatrix2 = cameraMatrix2, _distCoeffs2 = distCoeffs2;\r
+ CvMat _R = R, _T = T, _E = E, _F = F;\r
+\r
+ cvStereoCalibrate(&_objPt, &_imgPt, &_imgPt2, &_npoints, &_cameraMatrix1,\r
+ &_cameraMatrix2, &_distCoeffs1, &_distCoeffs2, imageSize,\r
+ &_R, &_T, &_E, &_F, criteria, flags );\r
+}\r
+\r
+void stereoRectify( const Mat& cameraMatrix1, const Mat& distCoeffs1,\r
+ const Mat& cameraMatrix2, const Mat& distCoeffs2,\r
+ Size imageSize, const Mat& R, const Mat& T,\r
+ Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q,\r
+ int flags )\r
+{\r
+ int rtype = CV_64F;\r
+ R1.create(3, 3, rtype);\r
+ R2.create(3, 3, rtype);\r
+ P1.create(3, 4, rtype);\r
+ P2.create(3, 4, rtype);\r
+ Q.create(4, 4, rtype);\r
+ CvMat _cameraMatrix1 = cameraMatrix1, _distCoeffs1 = distCoeffs1;\r
+ CvMat _cameraMatrix2 = cameraMatrix2, _distCoeffs2 = distCoeffs2;\r
+ CvMat _R = R, _T = T, _R1 = R1, _R2 = R2, _P1 = P1, _P2 = P2, _Q = Q;\r
+ cvStereoRectify( &_cameraMatrix1, &_cameraMatrix2, &_distCoeffs1, &_distCoeffs2,\r
+ imageSize, &_R, &_T, &_R1, &_R2, &_P1, &_P2, &_Q, flags );\r
+}\r
+\r
+bool stereoRectifyUncalibrated( const Vector<Point2f>& points1,\r
+ const Vector<Point2f>& points2,\r
+ const Mat& F, Size imgSize,\r
+ Mat& H1, Mat& H2,\r
+ double threshold )\r
+{\r
+ int rtype = CV_64F;\r
+ H1.create(3, 3, rtype);\r
+ H2.create(3, 3, rtype);\r
+ CvMat _pt1 = points1, _pt2 = points2, _F, *pF=0, _H1 = H1, _H2 = H2;\r
+ if( F.size() == Size(3, 3) )\r
+ pF = &(_F = F);\r
+ return cvStereoRectifyUncalibrated(&_pt1, &_pt2, pF, imgSize, &_H1, &_H2, threshold) > 0;\r
+}\r
+\r
+void reprojectImageTo3D( const Mat& disparity,\r
+ Mat& _3dImage, const Mat& Q,\r
+ bool handleMissingValues )\r
+{\r
+ _3dImage.create(disparity.size(), CV_32FC3);\r
+ CvMat _disparity = disparity, __3dImage = _3dImage, _Q = Q;\r
+ cvReprojectImageTo3D( &_disparity, &__3dImage, &_Q, handleMissingValues );\r
+}\r
+\r
+}\r
+\r
+/* End of file. */\r
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// Intel License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of Intel Corporation may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-#include "_cv.h"
-
-
-/*F///////////////////////////////////////////////////////////////////////////////////////
-// Name: cvMeanShift
-// Purpose: MeanShift algorithm
-// Context:
-// Parameters:
-// imgProb - 2D object probability distribution
-// windowIn - CvRect of CAMSHIFT Window intial size
-// numIters - If CAMSHIFT iterates this many times, stop
-// windowOut - Location, height and width of converged CAMSHIFT window
-// len - If != NULL, return equivalent len
-// width - If != NULL, return equivalent width
-// itersUsed - Returns number of iterations CAMSHIFT took to converge
-// Returns:
-// The function itself returns the area found
-// Notes:
-//F*/
-CV_IMPL int
-cvMeanShift( const void* imgProb, CvRect windowIn,
- CvTermCriteria criteria, CvConnectedComp* comp )
-{
- CvMoments moments;
- int i = 0, eps;
- CvMat stub, *mat = (CvMat*)imgProb;
- CvMat cur_win;
- CvRect cur_rect = windowIn;
-
- CV_FUNCNAME( "cvMeanShift" );
-
- if( comp )
- comp->rect = windowIn;
-
- moments.m00 = moments.m10 = moments.m01 = 0;
-
- __BEGIN__;
-
- CV_CALL( mat = cvGetMat( mat, &stub ));
-
- if( CV_MAT_CN( mat->type ) > 1 )
- CV_ERROR( CV_BadNumChannels, cvUnsupportedFormat );
-
- if( windowIn.height <= 0 || windowIn.width <= 0 )
- CV_ERROR( CV_StsBadArg, "Input window has non-positive sizes" );
-
- if( windowIn.x < 0 || windowIn.x + windowIn.width > mat->cols ||
- windowIn.y < 0 || windowIn.y + windowIn.height > mat->rows )
- CV_ERROR( CV_StsBadArg, "Initial window is not inside the image ROI" );
-
- CV_CALL( criteria = cvCheckTermCriteria( criteria, 1., 100 ));
-
- eps = cvRound( criteria.epsilon * criteria.epsilon );
-
- for( i = 0; i < criteria.max_iter; i++ )
- {
- int dx, dy, nx, ny;
- double inv_m00;
-
- CV_CALL( cvGetSubRect( mat, &cur_win, cur_rect ));
- CV_CALL( cvMoments( &cur_win, &moments ));
-
- /* Calculating center of mass */
- if( fabs(moments.m00) < DBL_EPSILON )
- break;
-
- inv_m00 = moments.inv_sqrt_m00*moments.inv_sqrt_m00;
- dx = cvRound( moments.m10 * inv_m00 - windowIn.width*0.5 );
- dy = cvRound( moments.m01 * inv_m00 - windowIn.height*0.5 );
-
- nx = cur_rect.x + dx;
- ny = cur_rect.y + dy;
-
- if( nx < 0 )
- nx = 0;
- else if( nx + cur_rect.width > mat->cols )
- nx = mat->cols - cur_rect.width;
-
- if( ny < 0 )
- ny = 0;
- else if( ny + cur_rect.height > mat->rows )
- ny = mat->rows - cur_rect.height;
-
- dx = nx - cur_rect.x;
- dy = ny - cur_rect.y;
- cur_rect.x = nx;
- cur_rect.y = ny;
-
- /* Check for coverage centers mass & window */
- if( dx*dx + dy*dy < eps )
- break;
- }
-
- __END__;
-
- if( comp )
- {
- comp->rect = cur_rect;
- comp->area = (float)moments.m00;
- }
-
- return i;
-}
-
-
-/*F///////////////////////////////////////////////////////////////////////////////////////
-// Name: cvCamShift
-// Purpose: CAMSHIFT algorithm
-// Context:
-// Parameters:
-// imgProb - 2D object probability distribution
-// windowIn - CvRect of CAMSHIFT Window intial size
-// criteria - criteria of stop finding window
-// windowOut - Location, height and width of converged CAMSHIFT window
-// orientation - If != NULL, return distribution orientation
-// len - If != NULL, return equivalent len
-// width - If != NULL, return equivalent width
-// area - sum of all elements in result window
-// itersUsed - Returns number of iterations CAMSHIFT took to converge
-// Returns:
-// The function itself returns the area found
-// Notes:
-//F*/
-CV_IMPL int
-cvCamShift( const void* imgProb, CvRect windowIn,
- CvTermCriteria criteria,
- CvConnectedComp* _comp,
- CvBox2D* box )
-{
- const int TOLERANCE = 10;
- CvMoments moments;
- double m00 = 0, m10, m01, mu20, mu11, mu02, inv_m00;
- double a, b, c, xc, yc;
- double rotate_a, rotate_c;
- double theta = 0, square;
- double cs, sn;
- double length = 0, width = 0;
- int itersUsed = 0;
- CvConnectedComp comp;
- CvMat cur_win, stub, *mat = (CvMat*)imgProb;
-
- CV_FUNCNAME( "cvCamShift" );
-
- comp.rect = windowIn;
-
- __BEGIN__;
-
- CV_CALL( mat = cvGetMat( mat, &stub ));
-
- CV_CALL( itersUsed = cvMeanShift( mat, windowIn, criteria, &comp ));
- windowIn = comp.rect;
-
- windowIn.x -= TOLERANCE;
- if( windowIn.x < 0 )
- windowIn.x = 0;
-
- windowIn.y -= TOLERANCE;
- if( windowIn.y < 0 )
- windowIn.y = 0;
-
- windowIn.width += 2 * TOLERANCE;
- if( windowIn.x + windowIn.width > mat->width )
- windowIn.width = mat->width - windowIn.x;
-
- windowIn.height += 2 * TOLERANCE;
- if( windowIn.y + windowIn.height > mat->height )
- windowIn.height = mat->height - windowIn.y;
-
- CV_CALL( cvGetSubRect( mat, &cur_win, windowIn ));
-
- /* Calculating moments in new center mass */
- CV_CALL( cvMoments( &cur_win, &moments ));
-
- m00 = moments.m00;
- m10 = moments.m10;
- m01 = moments.m01;
- mu11 = moments.mu11;
- mu20 = moments.mu20;
- mu02 = moments.mu02;
-
- if( fabs(m00) < DBL_EPSILON )
- EXIT;
-
- inv_m00 = 1. / m00;
- xc = cvRound( m10 * inv_m00 + windowIn.x );
- yc = cvRound( m01 * inv_m00 + windowIn.y );
- a = mu20 * inv_m00;
- b = mu11 * inv_m00;
- c = mu02 * inv_m00;
-
- /* Calculating width & height */
- square = sqrt( 4 * b * b + (a - c) * (a - c) );
-
- /* Calculating orientation */
- theta = atan2( 2 * b, a - c + square );
-
- /* Calculating width & length of figure */
- cs = cos( theta );
- sn = sin( theta );
-
- rotate_a = cs * cs * mu20 + 2 * cs * sn * mu11 + sn * sn * mu02;
- rotate_c = sn * sn * mu20 - 2 * cs * sn * mu11 + cs * cs * mu02;
- length = sqrt( rotate_a * inv_m00 ) * 4;
- width = sqrt( rotate_c * inv_m00 ) * 4;
-
- /* In case, when tetta is 0 or 1.57... the Length & Width may be exchanged */
- if( length < width )
- {
- double t;
-
- CV_SWAP( length, width, t );
- CV_SWAP( cs, sn, t );
- theta = CV_PI*0.5 - theta;
- }
-
- /* Saving results */
- if( _comp || box )
- {
- int t0, t1;
- int _xc = cvRound( xc );
- int _yc = cvRound( yc );
-
- t0 = cvRound( fabs( length * cs ));
- t1 = cvRound( fabs( width * sn ));
-
- t0 = MAX( t0, t1 ) + 2;
- comp.rect.width = MIN( t0, (mat->width - _xc) * 2 );
-
- t0 = cvRound( fabs( length * sn ));
- t1 = cvRound( fabs( width * cs ));
-
- t0 = MAX( t0, t1 ) + 2;
- comp.rect.height = MIN( t0, (mat->height - _yc) * 2 );
-
- comp.rect.x = MAX( 0, _xc - comp.rect.width / 2 );
- comp.rect.y = MAX( 0, _yc - comp.rect.height / 2 );
-
- comp.rect.width = MIN( mat->width - comp.rect.x, comp.rect.width );
- comp.rect.height = MIN( mat->height - comp.rect.y, comp.rect.height );
- comp.area = (float) m00;
- }
-
- __END__;
-
- if( _comp )
- *_comp = comp;
-
- if( box )
- {
- box->size.height = (float)length;
- box->size.width = (float)width;
- box->angle = (float)(theta*180./CV_PI);
- box->center = cvPoint2D32f( comp.rect.x + comp.rect.width*0.5f,
- comp.rect.y + comp.rect.height*0.5f);
- }
-
- return itersUsed;
-}
-
-/* End of file. */
+/*M///////////////////////////////////////////////////////////////////////////////////////\r
+//\r
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
+//\r
+// By downloading, copying, installing or using the software you agree to this license.\r
+// If you do not agree to this license, do not download, install,\r
+// copy or use the software.\r
+//\r
+//\r
+// Intel License Agreement\r
+// For Open Source Computer Vision Library\r
+//\r
+// Copyright (C) 2000, Intel Corporation, all rights reserved.\r
+// Third party copyrights are property of their respective owners.\r
+//\r
+// Redistribution and use in source and binary forms, with or without modification,\r
+// are permitted provided that the following conditions are met:\r
+//\r
+// * Redistribution's of source code must retain the above copyright notice,\r
+// this list of conditions and the following disclaimer.\r
+//\r
+// * Redistribution's in binary form must reproduce the above copyright notice,\r
+// this list of conditions and the following disclaimer in the documentation\r
+// and/or other materials provided with the distribution.\r
+//\r
+// * The name of Intel Corporation may not be used to endorse or promote products\r
+// derived from this software without specific prior written permission.\r
+//\r
+// This software is provided by the copyright holders and contributors "as is" and\r
+// any express or implied warranties, including, but not limited to, the implied\r
+// warranties of merchantability and fitness for a particular purpose are disclaimed.\r
+// In no event shall the Intel Corporation or contributors be liable for any direct,\r
+// indirect, incidental, special, exemplary, or consequential damages\r
+// (including, but not limited to, procurement of substitute goods or services;\r
+// loss of use, data, or profits; or business interruption) however caused\r
+// and on any theory of liability, whether in contract, strict liability,\r
+// or tort (including negligence or otherwise) arising in any way out of\r
+// the use of this software, even if advised of the possibility of such damage.\r
+//\r
+//M*/\r
+#include "_cv.h"\r
+\r
+\r
+/*F///////////////////////////////////////////////////////////////////////////////////////\r
+// Name: cvMeanShift\r
+// Purpose: MeanShift algorithm\r
+// Context:\r
+// Parameters:\r
+// imgProb - 2D object probability distribution\r
+// windowIn - CvRect of CAMSHIFT Window intial size\r
+// numIters - If CAMSHIFT iterates this many times, stop\r
+// windowOut - Location, height and width of converged CAMSHIFT window\r
+// len - If != NULL, return equivalent len\r
+// width - If != NULL, return equivalent width\r
+// itersUsed - Returns number of iterations CAMSHIFT took to converge\r
+// Returns:\r
+// The function itself returns the area found\r
+// Notes:\r
+//F*/\r
+CV_IMPL int\r
+cvMeanShift( const void* imgProb, CvRect windowIn,\r
+ CvTermCriteria criteria, CvConnectedComp* comp )\r
+{\r
+ CvMoments moments;\r
+ int i = 0, eps;\r
+ CvMat stub, *mat = (CvMat*)imgProb;\r
+ CvMat cur_win;\r
+ CvRect cur_rect = windowIn;\r
+\r
+ CV_FUNCNAME( "cvMeanShift" );\r
+\r
+ if( comp )\r
+ comp->rect = windowIn;\r
+\r
+ moments.m00 = moments.m10 = moments.m01 = 0;\r
+\r
+ __BEGIN__;\r
+\r
+ CV_CALL( mat = cvGetMat( mat, &stub ));\r
+\r
+ if( CV_MAT_CN( mat->type ) > 1 )\r
+ CV_ERROR( CV_BadNumChannels, cvUnsupportedFormat );\r
+\r
+ if( windowIn.height <= 0 || windowIn.width <= 0 )\r
+ CV_ERROR( CV_StsBadArg, "Input window has non-positive sizes" );\r
+\r
+ if( windowIn.x < 0 || windowIn.x + windowIn.width > mat->cols ||\r
+ windowIn.y < 0 || windowIn.y + windowIn.height > mat->rows )\r
+ CV_ERROR( CV_StsBadArg, "Initial window is not inside the image ROI" );\r
+\r
+ CV_CALL( criteria = cvCheckTermCriteria( criteria, 1., 100 ));\r
+\r
+ eps = cvRound( criteria.epsilon * criteria.epsilon );\r
+\r
+ for( i = 0; i < criteria.max_iter; i++ )\r
+ {\r
+ int dx, dy, nx, ny;\r
+ double inv_m00;\r
+\r
+ CV_CALL( cvGetSubRect( mat, &cur_win, cur_rect )); \r
+ CV_CALL( cvMoments( &cur_win, &moments ));\r
+\r
+ /* Calculating center of mass */\r
+ if( fabs(moments.m00) < DBL_EPSILON )\r
+ break;\r
+\r
+ inv_m00 = moments.inv_sqrt_m00*moments.inv_sqrt_m00;\r
+ dx = cvRound( moments.m10 * inv_m00 - windowIn.width*0.5 );\r
+ dy = cvRound( moments.m01 * inv_m00 - windowIn.height*0.5 );\r
+\r
+ nx = cur_rect.x + dx;\r
+ ny = cur_rect.y + dy;\r
+\r
+ if( nx < 0 )\r
+ nx = 0;\r
+ else if( nx + cur_rect.width > mat->cols )\r
+ nx = mat->cols - cur_rect.width;\r
+\r
+ if( ny < 0 )\r
+ ny = 0;\r
+ else if( ny + cur_rect.height > mat->rows )\r
+ ny = mat->rows - cur_rect.height;\r
+\r
+ dx = nx - cur_rect.x;\r
+ dy = ny - cur_rect.y;\r
+ cur_rect.x = nx;\r
+ cur_rect.y = ny;\r
+\r
+ /* Check for coverage centers mass & window */\r
+ if( dx*dx + dy*dy < eps )\r
+ break;\r
+ }\r
+\r
+ __END__;\r
+\r
+ if( comp )\r
+ {\r
+ comp->rect = cur_rect;\r
+ comp->area = (float)moments.m00;\r
+ }\r
+\r
+ return i;\r
+}\r
+\r
+\r
+/*F///////////////////////////////////////////////////////////////////////////////////////\r
+// Name: cvCamShift\r
+// Purpose: CAMSHIFT algorithm\r
+// Context:\r
+// Parameters:\r
+// imgProb - 2D object probability distribution\r
+// windowIn - CvRect of CAMSHIFT Window intial size\r
+// criteria - criteria of stop finding window\r
+// windowOut - Location, height and width of converged CAMSHIFT window\r
+// orientation - If != NULL, return distribution orientation\r
+// len - If != NULL, return equivalent len\r
+// width - If != NULL, return equivalent width\r
+// area - sum of all elements in result window\r
+// itersUsed - Returns number of iterations CAMSHIFT took to converge\r
+// Returns:\r
+// The function itself returns the area found\r
+// Notes:\r
+//F*/\r
+CV_IMPL int\r
+cvCamShift( const void* imgProb, CvRect windowIn,\r
+ CvTermCriteria criteria,\r
+ CvConnectedComp* _comp,\r
+ CvBox2D* box )\r
+{\r
+ const int TOLERANCE = 10;\r
+ CvMoments moments;\r
+ double m00 = 0, m10, m01, mu20, mu11, mu02, inv_m00;\r
+ double a, b, c, xc, yc;\r
+ double rotate_a, rotate_c;\r
+ double theta = 0, square;\r
+ double cs, sn;\r
+ double length = 0, width = 0;\r
+ int itersUsed = 0;\r
+ CvConnectedComp comp;\r
+ CvMat cur_win, stub, *mat = (CvMat*)imgProb;\r
+\r
+ CV_FUNCNAME( "cvCamShift" );\r
+\r
+ comp.rect = windowIn;\r
+\r
+ __BEGIN__;\r
+\r
+ CV_CALL( mat = cvGetMat( mat, &stub ));\r
+\r
+ CV_CALL( itersUsed = cvMeanShift( mat, windowIn, criteria, &comp ));\r
+ windowIn = comp.rect;\r
+\r
+ windowIn.x -= TOLERANCE;\r
+ if( windowIn.x < 0 )\r
+ windowIn.x = 0;\r
+\r
+ windowIn.y -= TOLERANCE;\r
+ if( windowIn.y < 0 )\r
+ windowIn.y = 0;\r
+\r
+ windowIn.width += 2 * TOLERANCE;\r
+ if( windowIn.x + windowIn.width > mat->width )\r
+ windowIn.width = mat->width - windowIn.x;\r
+\r
+ windowIn.height += 2 * TOLERANCE;\r
+ if( windowIn.y + windowIn.height > mat->height )\r
+ windowIn.height = mat->height - windowIn.y;\r
+\r
+ CV_CALL( cvGetSubRect( mat, &cur_win, windowIn ));\r
+\r
+ /* Calculating moments in new center mass */\r
+ CV_CALL( cvMoments( &cur_win, &moments ));\r
+\r
+ m00 = moments.m00;\r
+ m10 = moments.m10;\r
+ m01 = moments.m01;\r
+ mu11 = moments.mu11;\r
+ mu20 = moments.mu20;\r
+ mu02 = moments.mu02;\r
+\r
+ if( fabs(m00) < DBL_EPSILON )\r
+ EXIT;\r
+\r
+ inv_m00 = 1. / m00;\r
+ xc = cvRound( m10 * inv_m00 + windowIn.x );\r
+ yc = cvRound( m01 * inv_m00 + windowIn.y );\r
+ a = mu20 * inv_m00;\r
+ b = mu11 * inv_m00;\r
+ c = mu02 * inv_m00;\r
+\r
+ /* Calculating width & height */\r
+ square = sqrt( 4 * b * b + (a - c) * (a - c) );\r
+\r
+ /* Calculating orientation */\r
+ theta = atan2( 2 * b, a - c + square );\r
+\r
+ /* Calculating width & length of figure */\r
+ cs = cos( theta );\r
+ sn = sin( theta );\r
+\r
+ rotate_a = cs * cs * mu20 + 2 * cs * sn * mu11 + sn * sn * mu02;\r
+ rotate_c = sn * sn * mu20 - 2 * cs * sn * mu11 + cs * cs * mu02;\r
+ length = sqrt( rotate_a * inv_m00 ) * 4;\r
+ width = sqrt( rotate_c * inv_m00 ) * 4;\r
+\r
+ /* In case, when tetta is 0 or 1.57... the Length & Width may be exchanged */\r
+ if( length < width )\r
+ {\r
+ double t;\r
+ \r
+ CV_SWAP( length, width, t );\r
+ CV_SWAP( cs, sn, t );\r
+ theta = CV_PI*0.5 - theta;\r
+ }\r
+\r
+ /* Saving results */\r
+ if( _comp || box )\r
+ {\r
+ int t0, t1;\r
+ int _xc = cvRound( xc );\r
+ int _yc = cvRound( yc );\r
+\r
+ t0 = cvRound( fabs( length * cs ));\r
+ t1 = cvRound( fabs( width * sn ));\r
+\r
+ t0 = MAX( t0, t1 ) + 2;\r
+ comp.rect.width = MIN( t0, (mat->width - _xc) * 2 );\r
+\r
+ t0 = cvRound( fabs( length * sn ));\r
+ t1 = cvRound( fabs( width * cs ));\r
+\r
+ t0 = MAX( t0, t1 ) + 2;\r
+ comp.rect.height = MIN( t0, (mat->height - _yc) * 2 );\r
+\r
+ comp.rect.x = MAX( 0, _xc - comp.rect.width / 2 );\r
+ comp.rect.y = MAX( 0, _yc - comp.rect.height / 2 );\r
+\r
+ comp.rect.width = MIN( mat->width - comp.rect.x, comp.rect.width );\r
+ comp.rect.height = MIN( mat->height - comp.rect.y, comp.rect.height );\r
+ comp.area = (float) m00;\r
+ }\r
+\r
+ __END__;\r
+\r
+ if( _comp )\r
+ *_comp = comp;\r
+ \r
+ if( box )\r
+ {\r
+ box->size.height = (float)length;\r
+ box->size.width = (float)width;\r
+ box->angle = (float)(theta*180./CV_PI);\r
+ box->center = cvPoint2D32f( comp.rect.x + comp.rect.width*0.5f,\r
+ comp.rect.y + comp.rect.height*0.5f);\r
+ }\r
+\r
+ return itersUsed;\r
+}\r
+\r
+namespace cv\r
+{\r
+\r
+RotatedRect CAMShift( const Mat& probImage, Rect& window,\r
+ TermCriteria criteria )\r
+{\r
+ CvConnectedComp comp;\r
+ CvBox2D box;\r
+ CvMat _probImage = probImage;\r
+ cvCamShift(&_probImage, window, (CvTermCriteria)criteria, &comp, &box);\r
+ window = comp.rect;\r
+ return RotatedRect(Point2f(box.center), Size2f(box.size), box.angle);\r
+}\r
+\r
+int MeanShift( const Mat& probImage, Rect& window, TermCriteria criteria )\r
+{\r
+ CvConnectedComp comp;\r
+ CvMat _probImage = probImage;\r
+ int iters = cvMeanShift(&_probImage, window, (CvTermCriteria)criteria, &comp );\r
+ window = comp.rect;\r
+ return iters;\r
+}\r
+\r
+}\r
+\r
+/* End of file. */\r
__END__;
}
+
+void cv::cvtColor( const Mat& src, Mat& dst, int code, int dst_cn )
+{
+ switch( code )
+ {
+ case CV_BGR2BGRA:
+ case CV_RGB2BGRA:
+ case CV_BGRA2RGBA:
+ case CV_BGR5652BGRA:
+ case CV_BGR5552BGRA:
+ case CV_BGR5652RGBA:
+ case CV_BGR5552RGBA:
+ case CV_GRAY2BGRA:
+ dst_cn = 4;
+ break;
+
+ case CV_BGRA2BGR:
+ case CV_RGBA2BGR:
+ case CV_RGB2BGR:
+ case CV_BGR5652BGR:
+ case CV_BGR5552BGR:
+ case CV_BGR5652RGB:
+ case CV_BGR5552RGB:
+ case CV_GRAY2BGR:
+
+ case CV_BGR2YCrCb:
+ case CV_RGB2YCrCb:
+ case CV_BGR2XYZ:
+ case CV_RGB2XYZ:
+ case CV_BGR2HSV:
+ case CV_RGB2HSV:
+ case CV_BGR2Lab:
+ case CV_RGB2Lab:
+ case CV_BGR2Luv:
+ case CV_RGB2Luv:
+ case CV_BGR2HLS:
+ case CV_RGB2HLS:
+
+ case CV_BayerBG2BGR:
+ case CV_BayerGB2BGR:
+ case CV_BayerRG2BGR:
+ case CV_BayerGR2BGR:
+ dst_cn = 3;
+ break;
+
+ case CV_YCrCb2BGR:
+ case CV_YCrCb2RGB:
+ case CV_XYZ2BGR:
+ case CV_XYZ2RGB:
+ case CV_HSV2BGR:
+ case CV_HSV2RGB:
+ case CV_Lab2BGR:
+ case CV_Lab2RGB:
+ case CV_Luv2BGR:
+ case CV_Luv2RGB:
+ case CV_HLS2BGR:
+ case CV_HLS2RGB:
+ if( dst_cn != 4 )
+ dst_cn = 3;
+ break;
+
+ case CV_BGR2BGR565:
+ case CV_BGR2BGR555:
+ case CV_RGB2BGR565:
+ case CV_RGB2BGR555:
+ case CV_BGRA2BGR565:
+ case CV_BGRA2BGR555:
+ case CV_RGBA2BGR565:
+ case CV_RGBA2BGR555:
+ case CV_GRAY2BGR565:
+ case CV_GRAY2BGR555:
+ dst_cn = 2;
+ break;
+
+ case CV_BGR2GRAY:
+ case CV_BGRA2GRAY:
+ case CV_RGB2GRAY:
+ case CV_RGBA2GRAY:
+ case CV_BGR5652GRAY:
+ case CV_BGR5552GRAY:
+ dst_cn = 1;
+ break;
+ default:
+ CV_Error( CV_StsBadFlag, "Unknown/unsupported color conversion code" );
+ }
+
+ dst.create(src.size(), CV_MAKETYPE(src.depth(), dst_cn));
+ CvMat _src = src, _dst = dst;
+ cvCvtColor( &_src, &_dst, code );
+}
+
/* End of file. */
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// Intel License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of Intel Corporation may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-#include "_cv.h"
-
-/* initializes 8-element array for fast access to 3x3 neighborhood of a pixel */
-#define CV_INIT_3X3_DELTAS( deltas, step, nch ) \
- ((deltas)[0] = (nch), (deltas)[1] = -(step) + (nch), \
- (deltas)[2] = -(step), (deltas)[3] = -(step) - (nch), \
- (deltas)[4] = -(nch), (deltas)[5] = (step) - (nch), \
- (deltas)[6] = (step), (deltas)[7] = (step) + (nch))
-
-static const CvPoint icvCodeDeltas[8] =
- { {1, 0}, {1, -1}, {0, -1}, {-1, -1}, {-1, 0}, {-1, 1}, {0, 1}, {1, 1} };
-
-CV_IMPL void
-cvStartReadChainPoints( CvChain * chain, CvChainPtReader * reader )
-{
- int i;
-
- CV_FUNCNAME( "cvStartReadChainPoints" );
-
- __BEGIN__;
-
- if( !chain || !reader )
- CV_ERROR( CV_StsNullPtr, "" );
-
- if( chain->elem_size != 1 || chain->header_size < (int)sizeof(CvChain))
- CV_ERROR( CV_StsBadSize, "" );
-
- cvStartReadSeq( (CvSeq *) chain, (CvSeqReader *) reader, 0 );
- CV_CHECK();
-
- reader->pt = chain->origin;
-
- for( i = 0; i < 8; i++ )
- {
- reader->deltas[i][0] = (schar) icvCodeDeltas[i].x;
- reader->deltas[i][1] = (schar) icvCodeDeltas[i].y;
- }
-
- __END__;
-}
-
-
-/* retrieves next point of the chain curve and updates reader */
-CV_IMPL CvPoint
-cvReadChainPoint( CvChainPtReader * reader )
-{
- schar *ptr;
- int code;
- CvPoint pt = { 0, 0 };
-
- CV_FUNCNAME( "cvReadChainPoint" );
-
- __BEGIN__;
-
- if( !reader )
- CV_ERROR( CV_StsNullPtr, "" );
-
- pt = reader->pt;
-
- ptr = reader->ptr;
- if( ptr )
- {
- code = *ptr++;
-
- if( ptr >= reader->block_max )
- {
- cvChangeSeqBlock( (CvSeqReader *) reader, 1 );
- ptr = reader->ptr;
- }
-
- reader->ptr = ptr;
- reader->code = (schar)code;
- assert( (code & ~7) == 0 );
- reader->pt.x = pt.x + icvCodeDeltas[code].x;
- reader->pt.y = pt.y + icvCodeDeltas[code].y;
- }
-
- __END__;
-
- return pt;
-}
-
-
-/****************************************************************************************\
-* Raster->Chain Tree (Suzuki algorithms) *
-\****************************************************************************************/
-
-typedef struct _CvContourInfo
-{
- int flags;
- struct _CvContourInfo *next; /* next contour with the same mark value */
- struct _CvContourInfo *parent; /* information about parent contour */
- CvSeq *contour; /* corresponding contour (may be 0, if rejected) */
- CvRect rect; /* bounding rectangle */
- CvPoint origin; /* origin point (where the contour was traced from) */
- int is_hole; /* hole flag */
-}
-_CvContourInfo;
-
-
-/*
- Structure that is used for sequental retrieving contours from the image.
- It supports both hierarchical and plane variants of Suzuki algorithm.
-*/
-typedef struct _CvContourScanner
-{
- CvMemStorage *storage1; /* contains fetched contours */
- CvMemStorage *storage2; /* contains approximated contours
- (!=storage1 if approx_method2 != approx_method1) */
- CvMemStorage *cinfo_storage; /* contains _CvContourInfo nodes */
- CvSet *cinfo_set; /* set of _CvContourInfo nodes */
- CvMemStoragePos initial_pos; /* starting storage pos */
- CvMemStoragePos backup_pos; /* beginning of the latest approx. contour */
- CvMemStoragePos backup_pos2; /* ending of the latest approx. contour */
- schar *img0; /* image origin */
- schar *img; /* current image row */
- int img_step; /* image step */
- CvSize img_size; /* ROI size */
- CvPoint offset; /* ROI offset: coordinates, added to each contour point */
- CvPoint pt; /* current scanner position */
- CvPoint lnbd; /* position of the last met contour */
- int nbd; /* current mark val */
- _CvContourInfo *l_cinfo; /* information about latest approx. contour */
- _CvContourInfo cinfo_temp; /* temporary var which is used in simple modes */
- _CvContourInfo frame_info; /* information about frame */
- CvSeq frame; /* frame itself */
- int approx_method1; /* approx method when tracing */
- int approx_method2; /* final approx method */
- int mode; /* contour scanning mode:
- 0 - external only
- 1 - all the contours w/o any hierarchy
- 2 - connected components (i.e. two-level structure -
- external contours and holes) */
- int subst_flag;
- int seq_type1; /* type of fetched contours */
- int header_size1; /* hdr size of fetched contours */
- int elem_size1; /* elem size of fetched contours */
- int seq_type2; /* */
- int header_size2; /* the same for approx. contours */
- int elem_size2; /* */
- _CvContourInfo *cinfo_table[126];
-}
-_CvContourScanner;
-
-#define _CV_FIND_CONTOURS_FLAGS_EXTERNAL_ONLY 1
-#define _CV_FIND_CONTOURS_FLAGS_HIERARCHIC 2
-
-/*
- Initializes scanner structure.
- Prepare image for scanning ( clear borders and convert all pixels to 0-1.
-*/
-CV_IMPL CvContourScanner
-cvStartFindContours( void* _img, CvMemStorage* storage,
- int header_size, int mode,
- int method, CvPoint offset )
-{
- int y;
- int step;
- CvSize size;
- uchar *img = 0;
- CvContourScanner scanner = 0;
- CvMat stub, *mat = (CvMat*)_img;
-
- CV_FUNCNAME( "cvStartFindContours" );
-
- __BEGIN__;
-
- if( !storage )
- CV_ERROR( CV_StsNullPtr, "" );
-
- CV_CALL( mat = cvGetMat( mat, &stub ));
-
- if( !CV_IS_MASK_ARR( mat ))
- CV_ERROR( CV_StsUnsupportedFormat, "[Start]FindContours support only 8uC1 images" );
-
- size = cvSize( mat->width, mat->height );
- step = mat->step;
- img = (uchar*)(mat->data.ptr);
-
- if( method < 0 || method > CV_CHAIN_APPROX_TC89_KCOS )
- CV_ERROR( CV_StsOutOfRange, "" );
-
- if( header_size < (int) (method == CV_CHAIN_CODE ? sizeof( CvChain ) : sizeof( CvContour )))
- CV_ERROR( CV_StsBadSize, "" );
-
- scanner = (CvContourScanner)cvAlloc( sizeof( *scanner ));
- memset( scanner, 0, sizeof( *scanner ));
-
- scanner->storage1 = scanner->storage2 = storage;
- scanner->img0 = (schar *) img;
- scanner->img = (schar *) (img + step);
- scanner->img_step = step;
- scanner->img_size.width = size.width - 1; /* exclude rightest column */
- scanner->img_size.height = size.height - 1; /* exclude bottomost row */
- scanner->mode = mode;
- scanner->offset = offset;
- scanner->pt.x = scanner->pt.y = 1;
- scanner->lnbd.x = 0;
- scanner->lnbd.y = 1;
- scanner->nbd = 2;
- scanner->mode = (int) mode;
- scanner->frame_info.contour = &(scanner->frame);
- scanner->frame_info.is_hole = 1;
- scanner->frame_info.next = 0;
- scanner->frame_info.parent = 0;
- scanner->frame_info.rect = cvRect( 0, 0, size.width, size.height );
- scanner->l_cinfo = 0;
- scanner->subst_flag = 0;
-
- scanner->frame.flags = CV_SEQ_FLAG_HOLE;
-
- scanner->approx_method2 = scanner->approx_method1 = method;
-
- if( method == CV_CHAIN_APPROX_TC89_L1 || method == CV_CHAIN_APPROX_TC89_KCOS )
- scanner->approx_method1 = CV_CHAIN_CODE;
-
- if( scanner->approx_method1 == CV_CHAIN_CODE )
- {
- scanner->seq_type1 = CV_SEQ_CHAIN_CONTOUR;
- scanner->header_size1 = scanner->approx_method1 == scanner->approx_method2 ?
- header_size : sizeof( CvChain );
- scanner->elem_size1 = sizeof( char );
- }
- else
- {
- scanner->seq_type1 = CV_SEQ_POLYGON;
- scanner->header_size1 = scanner->approx_method1 == scanner->approx_method2 ?
- header_size : sizeof( CvContour );
- scanner->elem_size1 = sizeof( CvPoint );
- }
-
- scanner->header_size2 = header_size;
-
- if( scanner->approx_method2 == CV_CHAIN_CODE )
- {
- scanner->seq_type2 = scanner->seq_type1;
- scanner->elem_size2 = scanner->elem_size1;
- }
- else
- {
- scanner->seq_type2 = CV_SEQ_POLYGON;
- scanner->elem_size2 = sizeof( CvPoint );
- }
-
- scanner->seq_type1 = scanner->approx_method1 == CV_CHAIN_CODE ?
- CV_SEQ_CHAIN_CONTOUR : CV_SEQ_POLYGON;
-
- scanner->seq_type2 = scanner->approx_method2 == CV_CHAIN_CODE ?
- CV_SEQ_CHAIN_CONTOUR : CV_SEQ_POLYGON;
-
- cvSaveMemStoragePos( storage, &(scanner->initial_pos) );
-
- if( method > CV_CHAIN_APPROX_SIMPLE )
- {
- scanner->storage1 = cvCreateChildMemStorage( scanner->storage2 );
- }
-
- if( mode > CV_RETR_LIST )
- {
- scanner->cinfo_storage = cvCreateChildMemStorage( scanner->storage2 );
- scanner->cinfo_set = cvCreateSet( 0, sizeof( CvSet ), sizeof( _CvContourInfo ),
- scanner->cinfo_storage );
- }
-
- /* make zero borders */
- memset( img, 0, size.width );
- memset( img + step * (size.height - 1), 0, size.width );
-
- for( y = 1, img += step; y < size.height - 1; y++, img += step )
- {
- img[0] = img[size.width - 1] = 0;
- }
-
- /* converts all pixels to 0 or 1 */
- cvThreshold( mat, mat, 0, 1, CV_THRESH_BINARY );
- CV_CHECK();
-
- __END__;
-
- if( cvGetErrStatus() < 0 )
- cvFree( &scanner );
-
- return scanner;
-}
-
-/*
- Final stage of contour processing.
- Three variants possible:
- 1. Contour, which was retrieved using border following, is added to
- the contour tree. It is the case when the icvSubstituteContour function
- was not called after retrieving the contour.
-
- 2. New contour, assigned by icvSubstituteContour function, is added to the
- tree. The retrieved contour itself is removed from the storage.
- Here two cases are possible:
- 2a. If one deals with plane variant of algorithm
- (hierarchical strucutre is not reconstructed),
- the contour is removed completely.
- 2b. In hierarchical case, the header of the contour is not removed.
- It's marked as "link to contour" and h_next pointer of it is set to
- new, substituting contour.
-
- 3. The similar to 2, but when NULL pointer was assigned by
- icvSubstituteContour function. In this case, the function removes
- retrieved contour completely if plane case and
- leaves header if hierarchical (but doesn't mark header as "link").
- ------------------------------------------------------------------------
- The 1st variant can be used to retrieve and store all the contours from the image
- (with optional convertion from chains to contours using some approximation from
- restriced set of methods). Some characteristics of contour can be computed in the
- same pass.
-
- The usage scheme can look like:
-
- icvContourScanner scanner;
- CvMemStorage* contour_storage;
- CvSeq* first_contour;
- CvStatus result;
-
- ...
-
- icvCreateMemStorage( &contour_storage, block_size/0 );
-
- ...
-
- cvStartFindContours
- ( img, contour_storage,
- header_size, approx_method,
- [external_only,]
- &scanner );
-
- for(;;)
- {
- [CvSeq* contour;]
- result = icvFindNextContour( &scanner, &contour/0 );
-
- if( result != CV_OK ) break;
-
- // calculate some characteristics
- ...
- }
-
- if( result < 0 ) goto error_processing;
-
- cvEndFindContours( &scanner, &first_contour );
- ...
-
- -----------------------------------------------------------------
-
- Second variant is more complex and can be used when someone wants store not
- the retrieved contours but transformed ones. (e.g. approximated with some
- non-default algorithm ).
-
- The scheme can be the as following:
-
- icvContourScanner scanner;
- CvMemStorage* contour_storage;
- CvMemStorage* temp_storage;
- CvSeq* first_contour;
- CvStatus result;
-
- ...
-
- icvCreateMemStorage( &contour_storage, block_size/0 );
- icvCreateMemStorage( &temp_storage, block_size/0 );
-
- ...
-
- icvStartFindContours8uC1R
- ( <img_params>, temp_storage,
- header_size, approx_method,
- [retrival_mode],
- &scanner );
-
- for(;;)
- {
- CvSeq* temp_contour;
- CvSeq* new_contour;
- result = icvFindNextContour( scanner, &temp_contour );
-
- if( result != CV_OK ) break;
-
- <approximation_function>( temp_contour, contour_storage,
- &new_contour, <parameters...> );
-
- icvSubstituteContour( scanner, new_contour );
- ...
- }
-
- if( result < 0 ) goto error_processing;
-
- cvEndFindContours( &scanner, &first_contour );
- ...
-
- ----------------------------------------------------------------------------
- Third method to retrieve contours may be applied if contours are irrelevant
- themselves but some characteristics of them are used only.
- The usage is similar to second except slightly different internal loop
-
- for(;;)
- {
- CvSeq* temp_contour;
- result = icvFindNextContour( &scanner, &temp_contour );
-
- if( result != CV_OK ) break;
-
- // calculate some characteristics of temp_contour
-
- icvSubstituteContour( scanner, 0 );
- ...
- }
-
- new_storage variable is not needed here.
-
- Two notes.
- 1. Second and third method can interleave. I.e. it is possible to
- remain contours that satisfy with some criteria and reject others.
- In hierarchic case the resulting tree is the part of original tree with
- some nodes absent. But in the resulting tree the contour1 is a child
- (may be indirect) of contour2 iff in the original tree the contour1
- is a child (may be indirect) of contour2.
-*/
-static void
-icvEndProcessContour( CvContourScanner scanner )
-{
- _CvContourInfo *l_cinfo = scanner->l_cinfo;
-
- if( l_cinfo )
- {
- if( scanner->subst_flag )
- {
- CvMemStoragePos temp;
-
- cvSaveMemStoragePos( scanner->storage2, &temp );
-
- if( temp.top == scanner->backup_pos2.top &&
- temp.free_space == scanner->backup_pos2.free_space )
- {
- cvRestoreMemStoragePos( scanner->storage2, &scanner->backup_pos );
- }
- scanner->subst_flag = 0;
- }
-
- if( l_cinfo->contour )
- {
- cvInsertNodeIntoTree( l_cinfo->contour, l_cinfo->parent->contour,
- &(scanner->frame) );
- }
- scanner->l_cinfo = 0;
- }
-}
-
-/* replaces one contour with another */
-CV_IMPL void
-cvSubstituteContour( CvContourScanner scanner, CvSeq * new_contour )
-{
- _CvContourInfo *l_cinfo;
-
- CV_FUNCNAME( "cvSubstituteContour" );
-
- __BEGIN__;
-
- if( !scanner )
- CV_ERROR( CV_StsNullPtr, "" );
-
- l_cinfo = scanner->l_cinfo;
- if( l_cinfo && l_cinfo->contour && l_cinfo->contour != new_contour )
- {
- l_cinfo->contour = new_contour;
- scanner->subst_flag = 1;
- }
-
- __END__;
-}
-
-
-/*
- marks domain border with +/-<constant> and stores the contour into CvSeq.
- method:
- <0 - chain
- ==0 - direct
- >0 - simple approximation
-*/
-static CvStatus
-icvFetchContour( schar *ptr,
- int step,
- CvPoint pt,
- CvSeq* contour,
- int _method )
-{
- const schar nbd = 2;
- int deltas[16];
- CvSeqWriter writer;
- schar *i0 = ptr, *i1, *i3, *i4 = 0;
- int prev_s = -1, s, s_end;
- int method = _method - 1;
-
- assert( (unsigned) _method <= CV_CHAIN_APPROX_SIMPLE );
-
- /* initialize local state */
- CV_INIT_3X3_DELTAS( deltas, step, 1 );
- memcpy( deltas + 8, deltas, 8 * sizeof( deltas[0] ));
-
- /* initialize writer */
- cvStartAppendToSeq( contour, &writer );
-
- if( method < 0 )
- ((CvChain *) contour)->origin = pt;
-
- s_end = s = CV_IS_SEQ_HOLE( contour ) ? 0 : 4;
-
- do
- {
- s = (s - 1) & 7;
- i1 = i0 + deltas[s];
- if( *i1 != 0 )
- break;
- }
- while( s != s_end );
-
- if( s == s_end ) /* single pixel domain */
- {
- *i0 = (schar) (nbd | -128);
- if( method >= 0 )
- {
- CV_WRITE_SEQ_ELEM( pt, writer );
- }
- }
- else
- {
- i3 = i0;
- prev_s = s ^ 4;
-
- /* follow border */
- for( ;; )
- {
- s_end = s;
-
- for( ;; )
- {
- i4 = i3 + deltas[++s];
- if( *i4 != 0 )
- break;
- }
- s &= 7;
-
- /* check "right" bound */
- if( (unsigned) (s - 1) < (unsigned) s_end )
- {
- *i3 = (schar) (nbd | -128);
- }
- else if( *i3 == 1 )
- {
- *i3 = nbd;
- }
-
- if( method < 0 )
- {
- schar _s = (schar) s;
-
- CV_WRITE_SEQ_ELEM( _s, writer );
- }
- else
- {
- if( s != prev_s || method == 0 )
- {
- CV_WRITE_SEQ_ELEM( pt, writer );
- prev_s = s;
- }
-
- pt.x += icvCodeDeltas[s].x;
- pt.y += icvCodeDeltas[s].y;
-
- }
-
- if( i4 == i0 && i3 == i1 )
- break;
-
- i3 = i4;
- s = (s + 4) & 7;
- } /* end of border following loop */
- }
-
- cvEndWriteSeq( &writer );
-
- if( _method != CV_CHAIN_CODE )
- cvBoundingRect( contour, 1 );
-
- assert( (writer.seq->total == 0 && writer.seq->first == 0) ||
- writer.seq->total > writer.seq->first->count ||
- (writer.seq->first->prev == writer.seq->first &&
- writer.seq->first->next == writer.seq->first) );
-
- return CV_OK;
-}
-
-
-
-/*
- trace contour until certain point is met.
- returns 1 if met, 0 else.
-*/
-static int
-icvTraceContour( schar *ptr, int step, schar *stop_ptr, int is_hole )
-{
- int deltas[16];
- schar *i0 = ptr, *i1, *i3, *i4;
- int s, s_end;
-
- /* initialize local state */
- CV_INIT_3X3_DELTAS( deltas, step, 1 );
- memcpy( deltas + 8, deltas, 8 * sizeof( deltas[0] ));
-
- assert( (*i0 & -2) != 0 );
-
- s_end = s = is_hole ? 0 : 4;
-
- do
- {
- s = (s - 1) & 7;
- i1 = i0 + deltas[s];
- if( *i1 != 0 )
- break;
- }
- while( s != s_end );
-
- i3 = i0;
-
- /* check single pixel domain */
- if( s != s_end )
- {
- /* follow border */
- for( ;; )
- {
- s_end = s;
-
- for( ;; )
- {
- i4 = i3 + deltas[++s];
- if( *i4 != 0 )
- break;
- }
-
- if( i3 == stop_ptr || (i4 == i0 && i3 == i1) )
- break;
-
- i3 = i4;
- s = (s + 4) & 7;
- } /* end of border following loop */
- }
- return i3 == stop_ptr;
-}
-
-
-static CvStatus
-icvFetchContourEx( schar* ptr,
- int step,
- CvPoint pt,
- CvSeq* contour,
- int _method,
- int nbd,
- CvRect* _rect )
-{
- int deltas[16];
- CvSeqWriter writer;
- schar *i0 = ptr, *i1, *i3, *i4;
- CvRect rect;
- int prev_s = -1, s, s_end;
- int method = _method - 1;
-
- assert( (unsigned) _method <= CV_CHAIN_APPROX_SIMPLE );
- assert( 1 < nbd && nbd < 128 );
-
- /* initialize local state */
- CV_INIT_3X3_DELTAS( deltas, step, 1 );
- memcpy( deltas + 8, deltas, 8 * sizeof( deltas[0] ));
-
- /* initialize writer */
- cvStartAppendToSeq( contour, &writer );
-
- if( method < 0 )
- ((CvChain *)contour)->origin = pt;
-
- rect.x = rect.width = pt.x;
- rect.y = rect.height = pt.y;
-
- s_end = s = CV_IS_SEQ_HOLE( contour ) ? 0 : 4;
-
- do
- {
- s = (s - 1) & 7;
- i1 = i0 + deltas[s];
- if( *i1 != 0 )
- break;
- }
- while( s != s_end );
-
- if( s == s_end ) /* single pixel domain */
- {
- *i0 = (schar) (nbd | 0x80);
- if( method >= 0 )
- {
- CV_WRITE_SEQ_ELEM( pt, writer );
- }
- }
- else
- {
- i3 = i0;
-
- prev_s = s ^ 4;
-
- /* follow border */
- for( ;; )
- {
- s_end = s;
-
- for( ;; )
- {
- i4 = i3 + deltas[++s];
- if( *i4 != 0 )
- break;
- }
- s &= 7;
-
- /* check "right" bound */
- if( (unsigned) (s - 1) < (unsigned) s_end )
- {
- *i3 = (schar) (nbd | 0x80);
- }
- else if( *i3 == 1 )
- {
- *i3 = (schar) nbd;
- }
-
- if( method < 0 )
- {
- schar _s = (schar) s;
- CV_WRITE_SEQ_ELEM( _s, writer );
- }
- else if( s != prev_s || method == 0 )
- {
- CV_WRITE_SEQ_ELEM( pt, writer );
- }
-
- if( s != prev_s )
- {
- /* update bounds */
- if( pt.x < rect.x )
- rect.x = pt.x;
- else if( pt.x > rect.width )
- rect.width = pt.x;
-
- if( pt.y < rect.y )
- rect.y = pt.y;
- else if( pt.y > rect.height )
- rect.height = pt.y;
- }
-
- prev_s = s;
- pt.x += icvCodeDeltas[s].x;
- pt.y += icvCodeDeltas[s].y;
-
- if( i4 == i0 && i3 == i1 ) break;
-
- i3 = i4;
- s = (s + 4) & 7;
- } /* end of border following loop */
- }
-
- rect.width -= rect.x - 1;
- rect.height -= rect.y - 1;
-
- cvEndWriteSeq( &writer );
-
- if( _method != CV_CHAIN_CODE )
- ((CvContour*)contour)->rect = rect;
-
- assert( (writer.seq->total == 0 && writer.seq->first == 0) ||
- writer.seq->total > writer.seq->first->count ||
- (writer.seq->first->prev == writer.seq->first &&
- writer.seq->first->next == writer.seq->first) );
-
- if( _rect ) *_rect = rect;
-
- return CV_OK;
-}
-
-
-CvSeq *
-cvFindNextContour( CvContourScanner scanner )
-{
- schar *img0;
- schar *img;
- int step;
- int width, height;
- int x, y;
- int prev;
- CvPoint lnbd;
- CvSeq *contour = 0;
- int nbd;
- int mode;
- CvStatus result = (CvStatus) 1;
-
- CV_FUNCNAME( "cvFindNextContour" );
-
- __BEGIN__;
-
- if( !scanner )
- CV_ERROR( CV_StsNullPtr, "" );
- icvEndProcessContour( scanner );
-
- /* initialize local state */
- img0 = scanner->img0;
- img = scanner->img;
- step = scanner->img_step;
- x = scanner->pt.x;
- y = scanner->pt.y;
- width = scanner->img_size.width;
- height = scanner->img_size.height;
- mode = scanner->mode;
- lnbd = scanner->lnbd;
- nbd = scanner->nbd;
-
- prev = img[x - 1];
-
- for( ; y < height; y++, img += step )
- {
- for( ; x < width; x++ )
- {
- int p = img[x];
-
- if( p != prev )
- {
- _CvContourInfo *par_info = 0;
- _CvContourInfo *l_cinfo = 0;
- CvSeq *seq = 0;
- int is_hole = 0;
- CvPoint origin;
-
- if( !(prev == 0 && p == 1) ) /* if not external contour */
- {
- /* check hole */
- if( p != 0 || prev < 1 )
- goto resume_scan;
-
- if( prev & -2 )
- {
- lnbd.x = x - 1;
- }
- is_hole = 1;
- }
-
- if( mode == 0 && (is_hole || img0[lnbd.y * step + lnbd.x] > 0) )
- goto resume_scan;
-
- origin.y = y;
- origin.x = x - is_hole;
-
- /* find contour parent */
- if( mode <= 1 || (!is_hole && mode == 2) || lnbd.x <= 0 )
- {
- par_info = &(scanner->frame_info);
- }
- else
- {
- int lval = img0[lnbd.y * step + lnbd.x] & 0x7f;
- _CvContourInfo *cur = scanner->cinfo_table[lval - 2];
-
- assert( lval >= 2 );
-
- /* find the first bounding contour */
- while( cur )
- {
- if( (unsigned) (lnbd.x - cur->rect.x) < (unsigned) cur->rect.width &&
- (unsigned) (lnbd.y - cur->rect.y) < (unsigned) cur->rect.height )
- {
- if( par_info )
- {
- if( icvTraceContour( scanner->img0 +
- par_info->origin.y * step +
- par_info->origin.x, step, img + lnbd.x,
- par_info->is_hole ) > 0 )
- break;
- }
- par_info = cur;
- }
- cur = cur->next;
- }
-
- assert( par_info != 0 );
-
- /* if current contour is a hole and previous contour is a hole or
- current contour is external and previous contour is external then
- the parent of the contour is the parent of the previous contour else
- the parent is the previous contour itself. */
- if( par_info->is_hole == is_hole )
- {
- par_info = par_info->parent;
- /* every contour must have a parent
- (at least, the frame of the image) */
- if( !par_info )
- par_info = &(scanner->frame_info);
- }
-
- /* hole flag of the parent must differ from the flag of the contour */
- assert( par_info->is_hole != is_hole );
- if( par_info->contour == 0 ) /* removed contour */
- goto resume_scan;
- }
-
- lnbd.x = x - is_hole;
-
- cvSaveMemStoragePos( scanner->storage2, &(scanner->backup_pos) );
-
- seq = cvCreateSeq( scanner->seq_type1, scanner->header_size1,
- scanner->elem_size1, scanner->storage1 );
- seq->flags |= is_hole ? CV_SEQ_FLAG_HOLE : 0;
-
- /* initialize header */
- if( mode <= 1 )
- {
- l_cinfo = &(scanner->cinfo_temp);
- result = icvFetchContour( img + x - is_hole, step,
- cvPoint( origin.x + scanner->offset.x,
- origin.y + scanner->offset.y),
- seq, scanner->approx_method1 );
- if( result < 0 )
- goto exit_func;
- }
- else
- {
- union { _CvContourInfo* ci; CvSetElem* se; } v;
- v.ci = l_cinfo;
- cvSetAdd( scanner->cinfo_set, 0, &v.se );
- l_cinfo = v.ci;
-
- result = icvFetchContourEx( img + x - is_hole, step,
- cvPoint( origin.x + scanner->offset.x,
- origin.y + scanner->offset.y),
- seq, scanner->approx_method1,
- nbd, &(l_cinfo->rect) );
- if( result < 0 )
- goto exit_func;
- l_cinfo->rect.x -= scanner->offset.x;
- l_cinfo->rect.y -= scanner->offset.y;
-
- l_cinfo->next = scanner->cinfo_table[nbd - 2];
- scanner->cinfo_table[nbd - 2] = l_cinfo;
-
- /* change nbd */
- nbd = (nbd + 1) & 127;
- nbd += nbd == 0 ? 3 : 0;
- }
-
- l_cinfo->is_hole = is_hole;
- l_cinfo->contour = seq;
- l_cinfo->origin = origin;
- l_cinfo->parent = par_info;
-
- if( scanner->approx_method1 != scanner->approx_method2 )
- {
- result = icvApproximateChainTC89( (CvChain *) seq,
- scanner->header_size2,
- scanner->storage2,
- &(l_cinfo->contour),
- scanner->approx_method2 );
- if( result < 0 )
- goto exit_func;
- cvClearMemStorage( scanner->storage1 );
- }
-
- l_cinfo->contour->v_prev = l_cinfo->parent->contour;
-
- if( par_info->contour == 0 )
- {
- l_cinfo->contour = 0;
- if( scanner->storage1 == scanner->storage2 )
- {
- cvRestoreMemStoragePos( scanner->storage1, &(scanner->backup_pos) );
- }
- else
- {
- cvClearMemStorage( scanner->storage1 );
- }
- p = img[x];
- goto resume_scan;
- }
-
- cvSaveMemStoragePos( scanner->storage2, &(scanner->backup_pos2) );
- scanner->l_cinfo = l_cinfo;
- scanner->pt.x = x + 1;
- scanner->pt.y = y;
- scanner->lnbd = lnbd;
- scanner->img = (schar *) img;
- scanner->nbd = nbd;
- contour = l_cinfo->contour;
-
- result = CV_OK;
- goto exit_func;
- resume_scan:
- prev = p;
- /* update lnbd */
- if( prev & -2 )
- {
- lnbd.x = x;
- }
- } /* end of prev != p */
- } /* end of loop on x */
-
- lnbd.x = 0;
- lnbd.y = y + 1;
- x = 1;
- prev = 0;
-
- } /* end of loop on y */
-
- exit_func:
-
- if( result != 0 )
- contour = 0;
- if( result < 0 )
- CV_ERROR( result, "" );
-
- __END__;
-
- return contour;
-}
-
-
-/*
- The function add to tree the last retrieved/substituted contour,
- releases temp_storage, restores state of dst_storage (if needed), and
- returns pointer to root of the contour tree */
-CV_IMPL CvSeq *
-cvEndFindContours( CvContourScanner * _scanner )
-{
- CvContourScanner scanner;
- CvSeq *first = 0;
-
- CV_FUNCNAME( "cvFindNextContour" );
-
- __BEGIN__;
-
- if( !_scanner )
- CV_ERROR( CV_StsNullPtr, "" );
- scanner = *_scanner;
-
- if( scanner )
- {
- icvEndProcessContour( scanner );
-
- if( scanner->storage1 != scanner->storage2 )
- cvReleaseMemStorage( &(scanner->storage1) );
-
- if( scanner->cinfo_storage )
- cvReleaseMemStorage( &(scanner->cinfo_storage) );
-
- first = scanner->frame.v_next;
- cvFree( _scanner );
- }
-
- __END__;
-
- return first;
-}
-
-
-#define ICV_SINGLE 0
-#define ICV_CONNECTING_ABOVE 1
-#define ICV_CONNECTING_BELOW -1
-#define ICV_IS_COMPONENT_POINT(val) ((val) != 0)
-
-#define CV_GET_WRITTEN_ELEM( writer ) ((writer).ptr - (writer).seq->elem_size)
-
-typedef struct CvLinkedRunPoint
-{
- struct CvLinkedRunPoint* link;
- struct CvLinkedRunPoint* next;
- CvPoint pt;
-}
-CvLinkedRunPoint;
-
-
-static int
-icvFindContoursInInterval( const CvArr* src,
- /*int minValue, int maxValue,*/
- CvMemStorage* storage,
- CvSeq** result,
- int contourHeaderSize )
-{
- int count = 0;
- CvMemStorage* storage00 = 0;
- CvMemStorage* storage01 = 0;
- CvSeq* first = 0;
-
- CV_FUNCNAME( "icvFindContoursInInterval" );
-
- __BEGIN__;
-
- int i, j, k, n;
-
- uchar* src_data = 0;
- int img_step = 0;
- CvSize img_size;
-
- int connect_flag;
- int lower_total;
- int upper_total;
- int all_total;
-
- CvSeq* runs;
- CvLinkedRunPoint tmp;
- CvLinkedRunPoint* tmp_prev;
- CvLinkedRunPoint* upper_line = 0;
- CvLinkedRunPoint* lower_line = 0;
- CvLinkedRunPoint* last_elem;
-
- CvLinkedRunPoint* upper_run = 0;
- CvLinkedRunPoint* lower_run = 0;
- CvLinkedRunPoint* prev_point = 0;
-
- CvSeqWriter writer_ext;
- CvSeqWriter writer_int;
- CvSeqWriter writer;
- CvSeqReader reader;
-
- CvSeq* external_contours;
- CvSeq* internal_contours;
- CvSeq* prev = 0;
-
- if( !storage )
- CV_ERROR( CV_StsNullPtr, "NULL storage pointer" );
-
- if( !result )
- CV_ERROR( CV_StsNullPtr, "NULL double CvSeq pointer" );
-
- if( contourHeaderSize < (int)sizeof(CvContour))
- CV_ERROR( CV_StsBadSize, "Contour header size must be >= sizeof(CvContour)" );
-
- CV_CALL( storage00 = cvCreateChildMemStorage(storage));
- CV_CALL( storage01 = cvCreateChildMemStorage(storage));
-
- {
- CvMat stub, *mat;
-
- CV_CALL( mat = cvGetMat( src, &stub ));
- if( !CV_IS_MASK_ARR(mat))
- CV_ERROR( CV_StsBadArg, "Input array must be 8uC1 or 8sC1" );
- src_data = mat->data.ptr;
- img_step = mat->step;
- img_size = cvGetMatSize( mat );
- }
-
- // Create temporary sequences
- runs = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvLinkedRunPoint), storage00 );
- cvStartAppendToSeq( runs, &writer );
-
- cvStartWriteSeq( 0, sizeof(CvSeq), sizeof(CvLinkedRunPoint*), storage01, &writer_ext );
- cvStartWriteSeq( 0, sizeof(CvSeq), sizeof(CvLinkedRunPoint*), storage01, &writer_int );
-
- tmp_prev = &(tmp);
- tmp_prev->next = 0;
- tmp_prev->link = 0;
-
- // First line. None of runs is binded
- tmp.pt.y = 0;
- i = 0;
- CV_WRITE_SEQ_ELEM( tmp, writer );
- upper_line = (CvLinkedRunPoint*)CV_GET_WRITTEN_ELEM( writer );
-
- tmp_prev = upper_line;
- for( j = 0; j < img_size.width; )
- {
- for( ; j < img_size.width && !ICV_IS_COMPONENT_POINT(src_data[j]); j++ )
- ;
- if( j == img_size.width )
- break;
-
- tmp.pt.x = j;
- CV_WRITE_SEQ_ELEM( tmp, writer );
- tmp_prev->next = (CvLinkedRunPoint*)CV_GET_WRITTEN_ELEM( writer );
- tmp_prev = tmp_prev->next;
-
- for( ; j < img_size.width && ICV_IS_COMPONENT_POINT(src_data[j]); j++ )
- ;
-
- tmp.pt.x = j-1;
- CV_WRITE_SEQ_ELEM( tmp, writer );
- tmp_prev->next = (CvLinkedRunPoint*)CV_GET_WRITTEN_ELEM( writer );
- tmp_prev->link = tmp_prev->next;
- // First point of contour
- CV_WRITE_SEQ_ELEM( tmp_prev, writer_ext );
- tmp_prev = tmp_prev->next;
- }
- cvFlushSeqWriter( &writer );
- upper_line = upper_line->next;
- upper_total = runs->total - 1;
- last_elem = tmp_prev;
- tmp_prev->next = 0;
-
- for( i = 1; i < img_size.height; i++ )
- {
-//------// Find runs in next line
- src_data += img_step;
- tmp.pt.y = i;
- all_total = runs->total;
- for( j = 0; j < img_size.width; )
- {
- for( ; j < img_size.width && !ICV_IS_COMPONENT_POINT(src_data[j]); j++ )
- ;
- if( j == img_size.width ) break;
-
- tmp.pt.x = j;
- CV_WRITE_SEQ_ELEM( tmp, writer );
- tmp_prev->next = (CvLinkedRunPoint*)CV_GET_WRITTEN_ELEM( writer );
- tmp_prev = tmp_prev->next;
-
- for( ; j < img_size.width && ICV_IS_COMPONENT_POINT(src_data[j]); j++ )
- ;
-
- tmp.pt.x = j-1;
- CV_WRITE_SEQ_ELEM( tmp, writer );
- tmp_prev = tmp_prev->next = (CvLinkedRunPoint*)CV_GET_WRITTEN_ELEM( writer );
- }//j
- cvFlushSeqWriter( &writer );
- lower_line = last_elem->next;
- lower_total = runs->total - all_total;
- last_elem = tmp_prev;
- tmp_prev->next = 0;
-//------//
-//------// Find links between runs of lower_line and upper_line
- upper_run = upper_line;
- lower_run = lower_line;
- connect_flag = ICV_SINGLE;
-
- for( k = 0, n = 0; k < upper_total/2 && n < lower_total/2; )
- {
- switch( connect_flag )
- {
- case ICV_SINGLE:
- if( upper_run->next->pt.x < lower_run->next->pt.x )
- {
- if( upper_run->next->pt.x >= lower_run->pt.x -1 )
- {
- lower_run->link = upper_run;
- connect_flag = ICV_CONNECTING_ABOVE;
- prev_point = upper_run->next;
- }
- else
- upper_run->next->link = upper_run;
- k++;
- upper_run = upper_run->next->next;
- }
- else
- {
- if( upper_run->pt.x <= lower_run->next->pt.x +1 )
- {
- lower_run->link = upper_run;
- connect_flag = ICV_CONNECTING_BELOW;
- prev_point = lower_run->next;
- }
- else
- {
- lower_run->link = lower_run->next;
- // First point of contour
- CV_WRITE_SEQ_ELEM( lower_run, writer_ext );
- }
- n++;
- lower_run = lower_run->next->next;
- }
- break;
- case ICV_CONNECTING_ABOVE:
- if( upper_run->pt.x > lower_run->next->pt.x +1 )
- {
- prev_point->link = lower_run->next;
- connect_flag = ICV_SINGLE;
- n++;
- lower_run = lower_run->next->next;
- }
- else
- {
- prev_point->link = upper_run;
- if( upper_run->next->pt.x < lower_run->next->pt.x )
- {
- k++;
- prev_point = upper_run->next;
- upper_run = upper_run->next->next;
- }
- else
- {
- connect_flag = ICV_CONNECTING_BELOW;
- prev_point = lower_run->next;
- n++;
- lower_run = lower_run->next->next;
- }
- }
- break;
- case ICV_CONNECTING_BELOW:
- if( lower_run->pt.x > upper_run->next->pt.x +1 )
- {
- upper_run->next->link = prev_point;
- connect_flag = ICV_SINGLE;
- k++;
- upper_run = upper_run->next->next;
- }
- else
- {
- // First point of contour
- CV_WRITE_SEQ_ELEM( lower_run, writer_int );
-
- lower_run->link = prev_point;
- if( lower_run->next->pt.x < upper_run->next->pt.x )
- {
- n++;
- prev_point = lower_run->next;
- lower_run = lower_run->next->next;
- }
- else
- {
- connect_flag = ICV_CONNECTING_ABOVE;
- k++;
- prev_point = upper_run->next;
- upper_run = upper_run->next->next;
- }
- }
- break;
- }
- }// k, n
-
- for( ; n < lower_total/2; n++ )
- {
- if( connect_flag != ICV_SINGLE )
- {
- prev_point->link = lower_run->next;
- connect_flag = ICV_SINGLE;
- lower_run = lower_run->next->next;
- continue;
- }
- lower_run->link = lower_run->next;
-
- //First point of contour
- CV_WRITE_SEQ_ELEM( lower_run, writer_ext );
-
- lower_run = lower_run->next->next;
- }
-
- for( ; k < upper_total/2; k++ )
- {
- if( connect_flag != ICV_SINGLE )
- {
- upper_run->next->link = prev_point;
- connect_flag = ICV_SINGLE;
- upper_run = upper_run->next->next;
- continue;
- }
- upper_run->next->link = upper_run;
- upper_run = upper_run->next->next;
- }
- upper_line = lower_line;
- upper_total = lower_total;
- }//i
-
- upper_run = upper_line;
-
- //the last line of image
- for( k = 0; k < upper_total/2; k++ )
- {
- upper_run->next->link = upper_run;
- upper_run = upper_run->next->next;
- }
-
-//------//
-//------//Find end read contours
- external_contours = cvEndWriteSeq( &writer_ext );
- internal_contours = cvEndWriteSeq( &writer_int );
-
- for( k = 0; k < 2; k++ )
- {
- CvSeq* contours = k == 0 ? external_contours : internal_contours;
-
- cvStartReadSeq( contours, &reader );
-
- for( j = 0; j < contours->total; j++, count++ )
- {
- CvLinkedRunPoint* p_temp;
- CvLinkedRunPoint* p00;
- CvLinkedRunPoint* p01;
- CvSeq* contour;
-
- CV_READ_SEQ_ELEM( p00, reader );
- p01 = p00;
-
- if( !p00->link )
- continue;
-
- cvStartWriteSeq( CV_SEQ_ELTYPE_POINT | CV_SEQ_POLYLINE | CV_SEQ_FLAG_CLOSED,
- contourHeaderSize, sizeof(CvPoint), storage, &writer );
- do
- {
- CV_WRITE_SEQ_ELEM( p00->pt, writer );
- p_temp = p00;
- p00 = p00->link;
- p_temp->link = 0;
- }
- while( p00 != p01 );
-
- contour = cvEndWriteSeq( &writer );
- cvBoundingRect( contour, 1 );
-
- if( k != 0 )
- contour->flags |= CV_SEQ_FLAG_HOLE;
-
- if( !first )
- prev = first = contour;
- else
- {
- contour->h_prev = prev;
- prev = prev->h_next = contour;
- }
- }
- }
-
- __END__;
-
- if( !first )
- count = -1;
-
- if( result )
- *result = first;
-
- cvReleaseMemStorage(&storage00);
- cvReleaseMemStorage(&storage01);
-
- return count;
-}
-
-
-
-/*F///////////////////////////////////////////////////////////////////////////////////////
-// Name: cvFindContours
-// Purpose:
-// Finds all the contours on the bi-level image.
-// Context:
-// Parameters:
-// img - source image.
-// Non-zero pixels are considered as 1-pixels
-// and zero pixels as 0-pixels.
-// step - full width of source image in bytes.
-// size - width and height of the image in pixels
-// storage - pointer to storage where will the output contours be placed.
-// header_size - header size of resulting contours
-// mode - mode of contour retrieval.
-// method - method of approximation that is applied to contours
-// first_contour - pointer to first contour pointer
-// Returns:
-// CV_OK or error code
-// Notes:
-//F*/
-CV_IMPL int
-cvFindContours( void* img, CvMemStorage* storage,
- CvSeq** firstContour, int cntHeaderSize,
- int mode,
- int method, CvPoint offset )
-{
- CvContourScanner scanner = 0;
- CvSeq *contour = 0;
- int count = -1;
-
- CV_FUNCNAME( "cvFindContours" );
-
- __BEGIN__;
-
- if( !firstContour )
- CV_ERROR( CV_StsNullPtr, "NULL double CvSeq pointer" );
-
- if( method == CV_LINK_RUNS )
- {
- if( offset.x != 0 || offset.y != 0 )
- CV_ERROR( CV_StsOutOfRange,
- "Nonzero offset is not supported in CV_LINK_RUNS yet" );
-
- CV_CALL( count = icvFindContoursInInterval( img, storage,
- firstContour, cntHeaderSize ));
- }
- else
- {
- CV_CALL( scanner = cvStartFindContours( img, storage,
- cntHeaderSize, mode, method, offset ));
- assert( scanner );
-
- do
- {
- count++;
- contour = cvFindNextContour( scanner );
- }
- while( contour != 0 );
-
- *firstContour = cvEndFindContours( &scanner );
- }
-
- __END__;
-
- return count;
-}
-
-
-/* End of file. */
+/*M///////////////////////////////////////////////////////////////////////////////////////\r
+//\r
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
+//\r
+// By downloading, copying, installing or using the software you agree to this license.\r
+// If you do not agree to this license, do not download, install,\r
+// copy or use the software.\r
+//\r
+//\r
+// Intel License Agreement\r
+// For Open Source Computer Vision Library\r
+//\r
+// Copyright (C) 2000, Intel Corporation, all rights reserved.\r
+// Third party copyrights are property of their respective owners.\r
+//\r
+// Redistribution and use in source and binary forms, with or without modification,\r
+// are permitted provided that the following conditions are met:\r
+//\r
+// * Redistribution's of source code must retain the above copyright notice,\r
+// this list of conditions and the following disclaimer.\r
+//\r
+// * Redistribution's in binary form must reproduce the above copyright notice,\r
+// this list of conditions and the following disclaimer in the documentation\r
+// and/or other materials provided with the distribution.\r
+//\r
+// * The name of Intel Corporation may not be used to endorse or promote products\r
+// derived from this software without specific prior written permission.\r
+//\r
+// This software is provided by the copyright holders and contributors "as is" and\r
+// any express or implied warranties, including, but not limited to, the implied\r
+// warranties of merchantability and fitness for a particular purpose are disclaimed.\r
+// In no event shall the Intel Corporation or contributors be liable for any direct,\r
+// indirect, incidental, special, exemplary, or consequential damages\r
+// (including, but not limited to, procurement of substitute goods or services;\r
+// loss of use, data, or profits; or business interruption) however caused\r
+// and on any theory of liability, whether in contract, strict liability,\r
+// or tort (including negligence or otherwise) arising in any way out of\r
+// the use of this software, even if advised of the possibility of such damage.\r
+//\r
+//M*/\r
+#include "_cv.h"\r
+\r
+/* initializes 8-element array for fast access to 3x3 neighborhood of a pixel */\r
+#define CV_INIT_3X3_DELTAS( deltas, step, nch ) \\r
+ ((deltas)[0] = (nch), (deltas)[1] = -(step) + (nch), \\r
+ (deltas)[2] = -(step), (deltas)[3] = -(step) - (nch), \\r
+ (deltas)[4] = -(nch), (deltas)[5] = (step) - (nch), \\r
+ (deltas)[6] = (step), (deltas)[7] = (step) + (nch))\r
+\r
+static const CvPoint icvCodeDeltas[8] =\r
+ { {1, 0}, {1, -1}, {0, -1}, {-1, -1}, {-1, 0}, {-1, 1}, {0, 1}, {1, 1} };\r
+\r
+CV_IMPL void\r
+cvStartReadChainPoints( CvChain * chain, CvChainPtReader * reader )\r
+{\r
+ int i;\r
+\r
+ CV_FUNCNAME( "cvStartReadChainPoints" );\r
+\r
+ __BEGIN__;\r
+\r
+ if( !chain || !reader )\r
+ CV_ERROR( CV_StsNullPtr, "" );\r
+\r
+ if( chain->elem_size != 1 || chain->header_size < (int)sizeof(CvChain))\r
+ CV_ERROR( CV_StsBadSize, "" );\r
+\r
+ cvStartReadSeq( (CvSeq *) chain, (CvSeqReader *) reader, 0 );\r
+ CV_CHECK();\r
+\r
+ reader->pt = chain->origin;\r
+\r
+ for( i = 0; i < 8; i++ )\r
+ {\r
+ reader->deltas[i][0] = (schar) icvCodeDeltas[i].x;\r
+ reader->deltas[i][1] = (schar) icvCodeDeltas[i].y;\r
+ }\r
+\r
+ __END__;\r
+}\r
+\r
+\r
+/* retrieves next point of the chain curve and updates reader */\r
+CV_IMPL CvPoint\r
+cvReadChainPoint( CvChainPtReader * reader )\r
+{\r
+ schar *ptr;\r
+ int code;\r
+ CvPoint pt = { 0, 0 };\r
+\r
+ CV_FUNCNAME( "cvReadChainPoint" );\r
+\r
+ __BEGIN__;\r
+\r
+ if( !reader )\r
+ CV_ERROR( CV_StsNullPtr, "" );\r
+\r
+ pt = reader->pt;\r
+\r
+ ptr = reader->ptr;\r
+ if( ptr )\r
+ {\r
+ code = *ptr++;\r
+\r
+ if( ptr >= reader->block_max )\r
+ {\r
+ cvChangeSeqBlock( (CvSeqReader *) reader, 1 );\r
+ ptr = reader->ptr;\r
+ }\r
+\r
+ reader->ptr = ptr;\r
+ reader->code = (schar)code;\r
+ assert( (code & ~7) == 0 );\r
+ reader->pt.x = pt.x + icvCodeDeltas[code].x;\r
+ reader->pt.y = pt.y + icvCodeDeltas[code].y;\r
+ }\r
+\r
+ __END__;\r
+\r
+ return pt;\r
+}\r
+\r
+\r
+/****************************************************************************************\\r
+* Raster->Chain Tree (Suzuki algorithms) *\r
+\****************************************************************************************/\r
+\r
+typedef struct _CvContourInfo\r
+{\r
+ int flags;\r
+ struct _CvContourInfo *next; /* next contour with the same mark value */\r
+ struct _CvContourInfo *parent; /* information about parent contour */\r
+ CvSeq *contour; /* corresponding contour (may be 0, if rejected) */\r
+ CvRect rect; /* bounding rectangle */\r
+ CvPoint origin; /* origin point (where the contour was traced from) */\r
+ int is_hole; /* hole flag */\r
+}\r
+_CvContourInfo;\r
+\r
+\r
+/*\r
+ Structure that is used for sequental retrieving contours from the image.\r
+ It supports both hierarchical and plane variants of Suzuki algorithm.\r
+*/\r
+typedef struct _CvContourScanner\r
+{\r
+ CvMemStorage *storage1; /* contains fetched contours */\r
+ CvMemStorage *storage2; /* contains approximated contours\r
+ (!=storage1 if approx_method2 != approx_method1) */\r
+ CvMemStorage *cinfo_storage; /* contains _CvContourInfo nodes */\r
+ CvSet *cinfo_set; /* set of _CvContourInfo nodes */\r
+ CvMemStoragePos initial_pos; /* starting storage pos */\r
+ CvMemStoragePos backup_pos; /* beginning of the latest approx. contour */\r
+ CvMemStoragePos backup_pos2; /* ending of the latest approx. contour */\r
+ schar *img0; /* image origin */\r
+ schar *img; /* current image row */\r
+ int img_step; /* image step */\r
+ CvSize img_size; /* ROI size */\r
+ CvPoint offset; /* ROI offset: coordinates, added to each contour point */\r
+ CvPoint pt; /* current scanner position */\r
+ CvPoint lnbd; /* position of the last met contour */\r
+ int nbd; /* current mark val */\r
+ _CvContourInfo *l_cinfo; /* information about latest approx. contour */\r
+ _CvContourInfo cinfo_temp; /* temporary var which is used in simple modes */\r
+ _CvContourInfo frame_info; /* information about frame */\r
+ CvSeq frame; /* frame itself */\r
+ int approx_method1; /* approx method when tracing */\r
+ int approx_method2; /* final approx method */\r
+ int mode; /* contour scanning mode:\r
+ 0 - external only\r
+ 1 - all the contours w/o any hierarchy\r
+ 2 - connected components (i.e. two-level structure -\r
+ external contours and holes) */\r
+ int subst_flag;\r
+ int seq_type1; /* type of fetched contours */\r
+ int header_size1; /* hdr size of fetched contours */\r
+ int elem_size1; /* elem size of fetched contours */\r
+ int seq_type2; /* */\r
+ int header_size2; /* the same for approx. contours */\r
+ int elem_size2; /* */\r
+ _CvContourInfo *cinfo_table[126];\r
+}\r
+_CvContourScanner;\r
+\r
+#define _CV_FIND_CONTOURS_FLAGS_EXTERNAL_ONLY 1\r
+#define _CV_FIND_CONTOURS_FLAGS_HIERARCHIC 2\r
+\r
+/*\r
+ Initializes scanner structure.\r
+ Prepare image for scanning ( clear borders and convert all pixels to 0-1.\r
+*/\r
+CV_IMPL CvContourScanner\r
+cvStartFindContours( void* _img, CvMemStorage* storage,\r
+ int header_size, int mode,\r
+ int method, CvPoint offset )\r
+{\r
+ int y;\r
+ int step;\r
+ CvSize size;\r
+ uchar *img = 0;\r
+ CvContourScanner scanner = 0;\r
+ CvMat stub, *mat = (CvMat*)_img;\r
+\r
+ CV_FUNCNAME( "cvStartFindContours" );\r
+\r
+ __BEGIN__;\r
+\r
+ if( !storage )\r
+ CV_ERROR( CV_StsNullPtr, "" );\r
+\r
+ CV_CALL( mat = cvGetMat( mat, &stub ));\r
+\r
+ if( !CV_IS_MASK_ARR( mat ))\r
+ CV_ERROR( CV_StsUnsupportedFormat, "[Start]FindContours support only 8uC1 images" );\r
+\r
+ size = cvSize( mat->width, mat->height );\r
+ step = mat->step;\r
+ img = (uchar*)(mat->data.ptr);\r
+\r
+ if( method < 0 || method > CV_CHAIN_APPROX_TC89_KCOS )\r
+ CV_ERROR( CV_StsOutOfRange, "" );\r
+\r
+ if( header_size < (int) (method == CV_CHAIN_CODE ? sizeof( CvChain ) : sizeof( CvContour )))\r
+ CV_ERROR( CV_StsBadSize, "" );\r
+\r
+ scanner = (CvContourScanner)cvAlloc( sizeof( *scanner ));\r
+ memset( scanner, 0, sizeof( *scanner ));\r
+\r
+ scanner->storage1 = scanner->storage2 = storage;\r
+ scanner->img0 = (schar *) img;\r
+ scanner->img = (schar *) (img + step);\r
+ scanner->img_step = step;\r
+ scanner->img_size.width = size.width - 1; /* exclude rightest column */\r
+ scanner->img_size.height = size.height - 1; /* exclude bottomost row */\r
+ scanner->mode = mode;\r
+ scanner->offset = offset;\r
+ scanner->pt.x = scanner->pt.y = 1;\r
+ scanner->lnbd.x = 0;\r
+ scanner->lnbd.y = 1;\r
+ scanner->nbd = 2;\r
+ scanner->mode = (int) mode;\r
+ scanner->frame_info.contour = &(scanner->frame);\r
+ scanner->frame_info.is_hole = 1;\r
+ scanner->frame_info.next = 0;\r
+ scanner->frame_info.parent = 0;\r
+ scanner->frame_info.rect = cvRect( 0, 0, size.width, size.height );\r
+ scanner->l_cinfo = 0;\r
+ scanner->subst_flag = 0;\r
+\r
+ scanner->frame.flags = CV_SEQ_FLAG_HOLE;\r
+\r
+ scanner->approx_method2 = scanner->approx_method1 = method;\r
+\r
+ if( method == CV_CHAIN_APPROX_TC89_L1 || method == CV_CHAIN_APPROX_TC89_KCOS )\r
+ scanner->approx_method1 = CV_CHAIN_CODE;\r
+\r
+ if( scanner->approx_method1 == CV_CHAIN_CODE )\r
+ {\r
+ scanner->seq_type1 = CV_SEQ_CHAIN_CONTOUR;\r
+ scanner->header_size1 = scanner->approx_method1 == scanner->approx_method2 ?\r
+ header_size : sizeof( CvChain );\r
+ scanner->elem_size1 = sizeof( char );\r
+ }\r
+ else\r
+ {\r
+ scanner->seq_type1 = CV_SEQ_POLYGON;\r
+ scanner->header_size1 = scanner->approx_method1 == scanner->approx_method2 ?\r
+ header_size : sizeof( CvContour );\r
+ scanner->elem_size1 = sizeof( CvPoint );\r
+ }\r
+\r
+ scanner->header_size2 = header_size;\r
+\r
+ if( scanner->approx_method2 == CV_CHAIN_CODE )\r
+ {\r
+ scanner->seq_type2 = scanner->seq_type1;\r
+ scanner->elem_size2 = scanner->elem_size1;\r
+ }\r
+ else\r
+ {\r
+ scanner->seq_type2 = CV_SEQ_POLYGON;\r
+ scanner->elem_size2 = sizeof( CvPoint );\r
+ }\r
+\r
+ scanner->seq_type1 = scanner->approx_method1 == CV_CHAIN_CODE ?\r
+ CV_SEQ_CHAIN_CONTOUR : CV_SEQ_POLYGON;\r
+\r
+ scanner->seq_type2 = scanner->approx_method2 == CV_CHAIN_CODE ?\r
+ CV_SEQ_CHAIN_CONTOUR : CV_SEQ_POLYGON;\r
+\r
+ cvSaveMemStoragePos( storage, &(scanner->initial_pos) );\r
+\r
+ if( method > CV_CHAIN_APPROX_SIMPLE )\r
+ {\r
+ scanner->storage1 = cvCreateChildMemStorage( scanner->storage2 );\r
+ }\r
+\r
+ if( mode > CV_RETR_LIST )\r
+ {\r
+ scanner->cinfo_storage = cvCreateChildMemStorage( scanner->storage2 );\r
+ scanner->cinfo_set = cvCreateSet( 0, sizeof( CvSet ), sizeof( _CvContourInfo ),\r
+ scanner->cinfo_storage );\r
+ }\r
+\r
+ /* make zero borders */\r
+ memset( img, 0, size.width );\r
+ memset( img + step * (size.height - 1), 0, size.width );\r
+\r
+ for( y = 1, img += step; y < size.height - 1; y++, img += step )\r
+ {\r
+ img[0] = img[size.width - 1] = 0;\r
+ }\r
+\r
+ /* converts all pixels to 0 or 1 */\r
+ cvThreshold( mat, mat, 0, 1, CV_THRESH_BINARY );\r
+ CV_CHECK();\r
+\r
+ __END__;\r
+\r
+ if( cvGetErrStatus() < 0 )\r
+ cvFree( &scanner );\r
+\r
+ return scanner;\r
+}\r
+\r
+/*\r
+ Final stage of contour processing.\r
+ Three variants possible:\r
+ 1. Contour, which was retrieved using border following, is added to\r
+ the contour tree. It is the case when the icvSubstituteContour function\r
+ was not called after retrieving the contour.\r
+\r
+ 2. New contour, assigned by icvSubstituteContour function, is added to the\r
+ tree. The retrieved contour itself is removed from the storage.\r
+ Here two cases are possible:\r
+ 2a. If one deals with plane variant of algorithm\r
+ (hierarchical strucutre is not reconstructed),\r
+ the contour is removed completely.\r
+ 2b. In hierarchical case, the header of the contour is not removed.\r
+ It's marked as "link to contour" and h_next pointer of it is set to\r
+ new, substituting contour.\r
+\r
+ 3. The similar to 2, but when NULL pointer was assigned by\r
+ icvSubstituteContour function. In this case, the function removes\r
+ retrieved contour completely if plane case and\r
+ leaves header if hierarchical (but doesn't mark header as "link").\r
+ ------------------------------------------------------------------------\r
+ The 1st variant can be used to retrieve and store all the contours from the image\r
+ (with optional convertion from chains to contours using some approximation from\r
+ restriced set of methods). Some characteristics of contour can be computed in the\r
+ same pass.\r
+\r
+ The usage scheme can look like:\r
+\r
+ icvContourScanner scanner;\r
+ CvMemStorage* contour_storage;\r
+ CvSeq* first_contour;\r
+ CvStatus result;\r
+\r
+ ...\r
+\r
+ icvCreateMemStorage( &contour_storage, block_size/0 );\r
+\r
+ ...\r
+\r
+ cvStartFindContours\r
+ ( img, contour_storage,\r
+ header_size, approx_method,\r
+ [external_only,]\r
+ &scanner );\r
+\r
+ for(;;)\r
+ {\r
+ [CvSeq* contour;]\r
+ result = icvFindNextContour( &scanner, &contour/0 );\r
+\r
+ if( result != CV_OK ) break;\r
+\r
+ // calculate some characteristics\r
+ ...\r
+ }\r
+\r
+ if( result < 0 ) goto error_processing;\r
+\r
+ cvEndFindContours( &scanner, &first_contour );\r
+ ...\r
+\r
+ -----------------------------------------------------------------\r
+\r
+ Second variant is more complex and can be used when someone wants store not\r
+ the retrieved contours but transformed ones. (e.g. approximated with some\r
+ non-default algorithm ).\r
+\r
+ The scheme can be the as following:\r
+\r
+ icvContourScanner scanner;\r
+ CvMemStorage* contour_storage;\r
+ CvMemStorage* temp_storage;\r
+ CvSeq* first_contour;\r
+ CvStatus result;\r
+\r
+ ...\r
+\r
+ icvCreateMemStorage( &contour_storage, block_size/0 );\r
+ icvCreateMemStorage( &temp_storage, block_size/0 );\r
+\r
+ ...\r
+\r
+ icvStartFindContours8uC1R\r
+ ( <img_params>, temp_storage,\r
+ header_size, approx_method,\r
+ [retrival_mode],\r
+ &scanner );\r
+\r
+ for(;;)\r
+ {\r
+ CvSeq* temp_contour;\r
+ CvSeq* new_contour;\r
+ result = icvFindNextContour( scanner, &temp_contour );\r
+\r
+ if( result != CV_OK ) break;\r
+\r
+ <approximation_function>( temp_contour, contour_storage,\r
+ &new_contour, <parameters...> );\r
+\r
+ icvSubstituteContour( scanner, new_contour );\r
+ ...\r
+ }\r
+\r
+ if( result < 0 ) goto error_processing;\r
+\r
+ cvEndFindContours( &scanner, &first_contour );\r
+ ...\r
+\r
+ ----------------------------------------------------------------------------\r
+ Third method to retrieve contours may be applied if contours are irrelevant\r
+ themselves but some characteristics of them are used only.\r
+ The usage is similar to second except slightly different internal loop\r
+\r
+ for(;;)\r
+ {\r
+ CvSeq* temp_contour;\r
+ result = icvFindNextContour( &scanner, &temp_contour );\r
+\r
+ if( result != CV_OK ) break;\r
+\r
+ // calculate some characteristics of temp_contour\r
+\r
+ icvSubstituteContour( scanner, 0 );\r
+ ...\r
+ }\r
+\r
+ new_storage variable is not needed here.\r
+\r
+ Two notes.\r
+ 1. Second and third method can interleave. I.e. it is possible to\r
+ remain contours that satisfy with some criteria and reject others.\r
+ In hierarchic case the resulting tree is the part of original tree with\r
+ some nodes absent. But in the resulting tree the contour1 is a child\r
+ (may be indirect) of contour2 iff in the original tree the contour1\r
+ is a child (may be indirect) of contour2.\r
+*/\r
+static void\r
+icvEndProcessContour( CvContourScanner scanner )\r
+{\r
+ _CvContourInfo *l_cinfo = scanner->l_cinfo;\r
+\r
+ if( l_cinfo )\r
+ {\r
+ if( scanner->subst_flag )\r
+ {\r
+ CvMemStoragePos temp;\r
+\r
+ cvSaveMemStoragePos( scanner->storage2, &temp );\r
+\r
+ if( temp.top == scanner->backup_pos2.top &&\r
+ temp.free_space == scanner->backup_pos2.free_space )\r
+ {\r
+ cvRestoreMemStoragePos( scanner->storage2, &scanner->backup_pos );\r
+ }\r
+ scanner->subst_flag = 0;\r
+ }\r
+\r
+ if( l_cinfo->contour )\r
+ {\r
+ cvInsertNodeIntoTree( l_cinfo->contour, l_cinfo->parent->contour,\r
+ &(scanner->frame) );\r
+ }\r
+ scanner->l_cinfo = 0;\r
+ }\r
+}\r
+\r
+/* replaces one contour with another */\r
+CV_IMPL void\r
+cvSubstituteContour( CvContourScanner scanner, CvSeq * new_contour )\r
+{\r
+ _CvContourInfo *l_cinfo;\r
+\r
+ CV_FUNCNAME( "cvSubstituteContour" );\r
+\r
+ __BEGIN__;\r
+\r
+ if( !scanner )\r
+ CV_ERROR( CV_StsNullPtr, "" );\r
+\r
+ l_cinfo = scanner->l_cinfo;\r
+ if( l_cinfo && l_cinfo->contour && l_cinfo->contour != new_contour )\r
+ {\r
+ l_cinfo->contour = new_contour;\r
+ scanner->subst_flag = 1;\r
+ }\r
+\r
+ __END__;\r
+}\r
+\r
+\r
+/*\r
+ marks domain border with +/-<constant> and stores the contour into CvSeq.\r
+ method:\r
+ <0 - chain\r
+ ==0 - direct\r
+ >0 - simple approximation\r
+*/\r
+static CvStatus\r
+icvFetchContour( schar *ptr,\r
+ int step,\r
+ CvPoint pt,\r
+ CvSeq* contour,\r
+ int _method )\r
+{\r
+ const schar nbd = 2;\r
+ int deltas[16];\r
+ CvSeqWriter writer;\r
+ schar *i0 = ptr, *i1, *i3, *i4 = 0;\r
+ int prev_s = -1, s, s_end;\r
+ int method = _method - 1;\r
+\r
+ assert( (unsigned) _method <= CV_CHAIN_APPROX_SIMPLE );\r
+\r
+ /* initialize local state */\r
+ CV_INIT_3X3_DELTAS( deltas, step, 1 );\r
+ memcpy( deltas + 8, deltas, 8 * sizeof( deltas[0] ));\r
+\r
+ /* initialize writer */\r
+ cvStartAppendToSeq( contour, &writer );\r
+\r
+ if( method < 0 )\r
+ ((CvChain *) contour)->origin = pt;\r
+\r
+ s_end = s = CV_IS_SEQ_HOLE( contour ) ? 0 : 4;\r
+\r
+ do\r
+ {\r
+ s = (s - 1) & 7;\r
+ i1 = i0 + deltas[s];\r
+ if( *i1 != 0 )\r
+ break;\r
+ }\r
+ while( s != s_end );\r
+\r
+ if( s == s_end ) /* single pixel domain */\r
+ {\r
+ *i0 = (schar) (nbd | -128);\r
+ if( method >= 0 )\r
+ {\r
+ CV_WRITE_SEQ_ELEM( pt, writer );\r
+ }\r
+ }\r
+ else\r
+ {\r
+ i3 = i0;\r
+ prev_s = s ^ 4;\r
+\r
+ /* follow border */\r
+ for( ;; )\r
+ {\r
+ s_end = s;\r
+\r
+ for( ;; )\r
+ {\r
+ i4 = i3 + deltas[++s];\r
+ if( *i4 != 0 )\r
+ break;\r
+ }\r
+ s &= 7;\r
+\r
+ /* check "right" bound */\r
+ if( (unsigned) (s - 1) < (unsigned) s_end )\r
+ {\r
+ *i3 = (schar) (nbd | -128);\r
+ }\r
+ else if( *i3 == 1 )\r
+ {\r
+ *i3 = nbd;\r
+ }\r
+\r
+ if( method < 0 )\r
+ {\r
+ schar _s = (schar) s;\r
+\r
+ CV_WRITE_SEQ_ELEM( _s, writer );\r
+ }\r
+ else\r
+ {\r
+ if( s != prev_s || method == 0 )\r
+ {\r
+ CV_WRITE_SEQ_ELEM( pt, writer );\r
+ prev_s = s;\r
+ }\r
+\r
+ pt.x += icvCodeDeltas[s].x;\r
+ pt.y += icvCodeDeltas[s].y;\r
+\r
+ }\r
+\r
+ if( i4 == i0 && i3 == i1 )\r
+ break;\r
+\r
+ i3 = i4;\r
+ s = (s + 4) & 7;\r
+ } /* end of border following loop */\r
+ }\r
+\r
+ cvEndWriteSeq( &writer );\r
+\r
+ if( _method != CV_CHAIN_CODE )\r
+ cvBoundingRect( contour, 1 );\r
+\r
+ assert( (writer.seq->total == 0 && writer.seq->first == 0) ||\r
+ writer.seq->total > writer.seq->first->count ||\r
+ (writer.seq->first->prev == writer.seq->first &&\r
+ writer.seq->first->next == writer.seq->first) );\r
+\r
+ return CV_OK;\r
+}\r
+\r
+\r
+\r
+/*\r
+ trace contour until certain point is met.\r
+ returns 1 if met, 0 else.\r
+*/\r
+static int\r
+icvTraceContour( schar *ptr, int step, schar *stop_ptr, int is_hole )\r
+{\r
+ int deltas[16];\r
+ schar *i0 = ptr, *i1, *i3, *i4;\r
+ int s, s_end;\r
+\r
+ /* initialize local state */\r
+ CV_INIT_3X3_DELTAS( deltas, step, 1 );\r
+ memcpy( deltas + 8, deltas, 8 * sizeof( deltas[0] ));\r
+\r
+ assert( (*i0 & -2) != 0 );\r
+\r
+ s_end = s = is_hole ? 0 : 4;\r
+\r
+ do\r
+ {\r
+ s = (s - 1) & 7;\r
+ i1 = i0 + deltas[s];\r
+ if( *i1 != 0 )\r
+ break;\r
+ }\r
+ while( s != s_end );\r
+\r
+ i3 = i0;\r
+\r
+ /* check single pixel domain */\r
+ if( s != s_end )\r
+ {\r
+ /* follow border */\r
+ for( ;; )\r
+ {\r
+ s_end = s;\r
+\r
+ for( ;; )\r
+ {\r
+ i4 = i3 + deltas[++s];\r
+ if( *i4 != 0 )\r
+ break;\r
+ }\r
+\r
+ if( i3 == stop_ptr || (i4 == i0 && i3 == i1) )\r
+ break;\r
+\r
+ i3 = i4;\r
+ s = (s + 4) & 7;\r
+ } /* end of border following loop */\r
+ }\r
+ return i3 == stop_ptr;\r
+}\r
+\r
+\r
+static CvStatus\r
+icvFetchContourEx( schar* ptr,\r
+ int step,\r
+ CvPoint pt,\r
+ CvSeq* contour,\r
+ int _method,\r
+ int nbd,\r
+ CvRect* _rect )\r
+{\r
+ int deltas[16];\r
+ CvSeqWriter writer;\r
+ schar *i0 = ptr, *i1, *i3, *i4;\r
+ CvRect rect;\r
+ int prev_s = -1, s, s_end;\r
+ int method = _method - 1;\r
+\r
+ assert( (unsigned) _method <= CV_CHAIN_APPROX_SIMPLE );\r
+ assert( 1 < nbd && nbd < 128 );\r
+\r
+ /* initialize local state */\r
+ CV_INIT_3X3_DELTAS( deltas, step, 1 );\r
+ memcpy( deltas + 8, deltas, 8 * sizeof( deltas[0] ));\r
+\r
+ /* initialize writer */\r
+ cvStartAppendToSeq( contour, &writer );\r
+\r
+ if( method < 0 )\r
+ ((CvChain *)contour)->origin = pt;\r
+\r
+ rect.x = rect.width = pt.x;\r
+ rect.y = rect.height = pt.y;\r
+\r
+ s_end = s = CV_IS_SEQ_HOLE( contour ) ? 0 : 4;\r
+\r
+ do\r
+ {\r
+ s = (s - 1) & 7;\r
+ i1 = i0 + deltas[s];\r
+ if( *i1 != 0 )\r
+ break;\r
+ }\r
+ while( s != s_end );\r
+\r
+ if( s == s_end ) /* single pixel domain */\r
+ {\r
+ *i0 = (schar) (nbd | 0x80);\r
+ if( method >= 0 )\r
+ {\r
+ CV_WRITE_SEQ_ELEM( pt, writer );\r
+ }\r
+ }\r
+ else\r
+ {\r
+ i3 = i0;\r
+\r
+ prev_s = s ^ 4;\r
+\r
+ /* follow border */\r
+ for( ;; )\r
+ {\r
+ s_end = s;\r
+\r
+ for( ;; )\r
+ {\r
+ i4 = i3 + deltas[++s];\r
+ if( *i4 != 0 )\r
+ break;\r
+ }\r
+ s &= 7;\r
+\r
+ /* check "right" bound */\r
+ if( (unsigned) (s - 1) < (unsigned) s_end )\r
+ {\r
+ *i3 = (schar) (nbd | 0x80);\r
+ }\r
+ else if( *i3 == 1 )\r
+ {\r
+ *i3 = (schar) nbd;\r
+ }\r
+\r
+ if( method < 0 )\r
+ {\r
+ schar _s = (schar) s;\r
+ CV_WRITE_SEQ_ELEM( _s, writer );\r
+ }\r
+ else if( s != prev_s || method == 0 )\r
+ {\r
+ CV_WRITE_SEQ_ELEM( pt, writer );\r
+ }\r
+\r
+ if( s != prev_s )\r
+ {\r
+ /* update bounds */\r
+ if( pt.x < rect.x )\r
+ rect.x = pt.x;\r
+ else if( pt.x > rect.width )\r
+ rect.width = pt.x;\r
+\r
+ if( pt.y < rect.y )\r
+ rect.y = pt.y;\r
+ else if( pt.y > rect.height )\r
+ rect.height = pt.y;\r
+ }\r
+\r
+ prev_s = s;\r
+ pt.x += icvCodeDeltas[s].x;\r
+ pt.y += icvCodeDeltas[s].y;\r
+\r
+ if( i4 == i0 && i3 == i1 ) break;\r
+\r
+ i3 = i4;\r
+ s = (s + 4) & 7;\r
+ } /* end of border following loop */\r
+ }\r
+\r
+ rect.width -= rect.x - 1;\r
+ rect.height -= rect.y - 1;\r
+\r
+ cvEndWriteSeq( &writer );\r
+\r
+ if( _method != CV_CHAIN_CODE )\r
+ ((CvContour*)contour)->rect = rect;\r
+\r
+ assert( (writer.seq->total == 0 && writer.seq->first == 0) ||\r
+ writer.seq->total > writer.seq->first->count ||\r
+ (writer.seq->first->prev == writer.seq->first &&\r
+ writer.seq->first->next == writer.seq->first) );\r
+\r
+ if( _rect ) *_rect = rect;\r
+\r
+ return CV_OK;\r
+}\r
+\r
+\r
+CvSeq *\r
+cvFindNextContour( CvContourScanner scanner )\r
+{\r
+ schar *img0;\r
+ schar *img;\r
+ int step;\r
+ int width, height;\r
+ int x, y;\r
+ int prev;\r
+ CvPoint lnbd;\r
+ CvSeq *contour = 0;\r
+ int nbd;\r
+ int mode;\r
+ CvStatus result = (CvStatus) 1;\r
+\r
+ CV_FUNCNAME( "cvFindNextContour" );\r
+\r
+ __BEGIN__;\r
+\r
+ if( !scanner )\r
+ CV_ERROR( CV_StsNullPtr, "" );\r
+ icvEndProcessContour( scanner );\r
+\r
+ /* initialize local state */\r
+ img0 = scanner->img0;\r
+ img = scanner->img;\r
+ step = scanner->img_step;\r
+ x = scanner->pt.x;\r
+ y = scanner->pt.y;\r
+ width = scanner->img_size.width;\r
+ height = scanner->img_size.height;\r
+ mode = scanner->mode;\r
+ lnbd = scanner->lnbd;\r
+ nbd = scanner->nbd;\r
+\r
+ prev = img[x - 1];\r
+\r
+ for( ; y < height; y++, img += step )\r
+ {\r
+ for( ; x < width; x++ )\r
+ {\r
+ int p = img[x];\r
+\r
+ if( p != prev )\r
+ {\r
+ _CvContourInfo *par_info = 0;\r
+ _CvContourInfo *l_cinfo = 0;\r
+ CvSeq *seq = 0;\r
+ int is_hole = 0;\r
+ CvPoint origin;\r
+\r
+ if( !(prev == 0 && p == 1) ) /* if not external contour */\r
+ {\r
+ /* check hole */\r
+ if( p != 0 || prev < 1 )\r
+ goto resume_scan;\r
+\r
+ if( prev & -2 )\r
+ {\r
+ lnbd.x = x - 1;\r
+ }\r
+ is_hole = 1;\r
+ }\r
+\r
+ if( mode == 0 && (is_hole || img0[lnbd.y * step + lnbd.x] > 0) )\r
+ goto resume_scan;\r
+\r
+ origin.y = y;\r
+ origin.x = x - is_hole;\r
+\r
+ /* find contour parent */\r
+ if( mode <= 1 || (!is_hole && mode == 2) || lnbd.x <= 0 )\r
+ {\r
+ par_info = &(scanner->frame_info);\r
+ }\r
+ else\r
+ {\r
+ int lval = img0[lnbd.y * step + lnbd.x] & 0x7f;\r
+ _CvContourInfo *cur = scanner->cinfo_table[lval - 2];\r
+\r
+ assert( lval >= 2 );\r
+\r
+ /* find the first bounding contour */\r
+ while( cur )\r
+ {\r
+ if( (unsigned) (lnbd.x - cur->rect.x) < (unsigned) cur->rect.width &&\r
+ (unsigned) (lnbd.y - cur->rect.y) < (unsigned) cur->rect.height )\r
+ {\r
+ if( par_info )\r
+ {\r
+ if( icvTraceContour( scanner->img0 +\r
+ par_info->origin.y * step +\r
+ par_info->origin.x, step, img + lnbd.x,\r
+ par_info->is_hole ) > 0 )\r
+ break;\r
+ }\r
+ par_info = cur;\r
+ }\r
+ cur = cur->next;\r
+ }\r
+\r
+ assert( par_info != 0 );\r
+\r
+ /* if current contour is a hole and previous contour is a hole or\r
+ current contour is external and previous contour is external then\r
+ the parent of the contour is the parent of the previous contour else\r
+ the parent is the previous contour itself. */\r
+ if( par_info->is_hole == is_hole )\r
+ {\r
+ par_info = par_info->parent;\r
+ /* every contour must have a parent\r
+ (at least, the frame of the image) */\r
+ if( !par_info )\r
+ par_info = &(scanner->frame_info);\r
+ }\r
+\r
+ /* hole flag of the parent must differ from the flag of the contour */\r
+ assert( par_info->is_hole != is_hole );\r
+ if( par_info->contour == 0 ) /* removed contour */\r
+ goto resume_scan;\r
+ }\r
+\r
+ lnbd.x = x - is_hole;\r
+\r
+ cvSaveMemStoragePos( scanner->storage2, &(scanner->backup_pos) );\r
+\r
+ seq = cvCreateSeq( scanner->seq_type1, scanner->header_size1,\r
+ scanner->elem_size1, scanner->storage1 );\r
+ seq->flags |= is_hole ? CV_SEQ_FLAG_HOLE : 0;\r
+\r
+ /* initialize header */\r
+ if( mode <= 1 )\r
+ {\r
+ l_cinfo = &(scanner->cinfo_temp);\r
+ result = icvFetchContour( img + x - is_hole, step,\r
+ cvPoint( origin.x + scanner->offset.x,\r
+ origin.y + scanner->offset.y),\r
+ seq, scanner->approx_method1 );\r
+ if( result < 0 )\r
+ goto exit_func;\r
+ }\r
+ else\r
+ {\r
+ union { _CvContourInfo* ci; CvSetElem* se; } v;\r
+ v.ci = l_cinfo;\r
+ cvSetAdd( scanner->cinfo_set, 0, &v.se );\r
+ l_cinfo = v.ci;\r
+\r
+ result = icvFetchContourEx( img + x - is_hole, step,\r
+ cvPoint( origin.x + scanner->offset.x,\r
+ origin.y + scanner->offset.y),\r
+ seq, scanner->approx_method1,\r
+ nbd, &(l_cinfo->rect) );\r
+ if( result < 0 )\r
+ goto exit_func;\r
+ l_cinfo->rect.x -= scanner->offset.x;\r
+ l_cinfo->rect.y -= scanner->offset.y;\r
+\r
+ l_cinfo->next = scanner->cinfo_table[nbd - 2];\r
+ scanner->cinfo_table[nbd - 2] = l_cinfo;\r
+\r
+ /* change nbd */\r
+ nbd = (nbd + 1) & 127;\r
+ nbd += nbd == 0 ? 3 : 0;\r
+ }\r
+\r
+ l_cinfo->is_hole = is_hole;\r
+ l_cinfo->contour = seq;\r
+ l_cinfo->origin = origin;\r
+ l_cinfo->parent = par_info;\r
+\r
+ if( scanner->approx_method1 != scanner->approx_method2 )\r
+ {\r
+ result = icvApproximateChainTC89( (CvChain *) seq,\r
+ scanner->header_size2,\r
+ scanner->storage2,\r
+ &(l_cinfo->contour),\r
+ scanner->approx_method2 );\r
+ if( result < 0 )\r
+ goto exit_func;\r
+ cvClearMemStorage( scanner->storage1 );\r
+ }\r
+\r
+ l_cinfo->contour->v_prev = l_cinfo->parent->contour;\r
+\r
+ if( par_info->contour == 0 )\r
+ {\r
+ l_cinfo->contour = 0;\r
+ if( scanner->storage1 == scanner->storage2 )\r
+ {\r
+ cvRestoreMemStoragePos( scanner->storage1, &(scanner->backup_pos) );\r
+ }\r
+ else\r
+ {\r
+ cvClearMemStorage( scanner->storage1 );\r
+ }\r
+ p = img[x];\r
+ goto resume_scan;\r
+ }\r
+\r
+ cvSaveMemStoragePos( scanner->storage2, &(scanner->backup_pos2) );\r
+ scanner->l_cinfo = l_cinfo;\r
+ scanner->pt.x = x + 1;\r
+ scanner->pt.y = y;\r
+ scanner->lnbd = lnbd;\r
+ scanner->img = (schar *) img;\r
+ scanner->nbd = nbd;\r
+ contour = l_cinfo->contour;\r
+\r
+ result = CV_OK;\r
+ goto exit_func;\r
+ resume_scan:\r
+ prev = p;\r
+ /* update lnbd */\r
+ if( prev & -2 )\r
+ {\r
+ lnbd.x = x;\r
+ }\r
+ } /* end of prev != p */\r
+ } /* end of loop on x */\r
+\r
+ lnbd.x = 0;\r
+ lnbd.y = y + 1;\r
+ x = 1;\r
+ prev = 0;\r
+\r
+ } /* end of loop on y */\r
+\r
+ exit_func:\r
+\r
+ if( result != 0 )\r
+ contour = 0;\r
+ if( result < 0 )\r
+ CV_ERROR( result, "" );\r
+\r
+ __END__;\r
+\r
+ return contour;\r
+}\r
+\r
+\r
+/*\r
+ The function add to tree the last retrieved/substituted contour,\r
+ releases temp_storage, restores state of dst_storage (if needed), and\r
+ returns pointer to root of the contour tree */\r
+CV_IMPL CvSeq *\r
+cvEndFindContours( CvContourScanner * _scanner )\r
+{\r
+ CvContourScanner scanner;\r
+ CvSeq *first = 0;\r
+\r
+ CV_FUNCNAME( "cvFindNextContour" );\r
+\r
+ __BEGIN__;\r
+\r
+ if( !_scanner )\r
+ CV_ERROR( CV_StsNullPtr, "" );\r
+ scanner = *_scanner;\r
+\r
+ if( scanner )\r
+ {\r
+ icvEndProcessContour( scanner );\r
+\r
+ if( scanner->storage1 != scanner->storage2 )\r
+ cvReleaseMemStorage( &(scanner->storage1) );\r
+\r
+ if( scanner->cinfo_storage )\r
+ cvReleaseMemStorage( &(scanner->cinfo_storage) );\r
+\r
+ first = scanner->frame.v_next;\r
+ cvFree( _scanner );\r
+ }\r
+\r
+ __END__;\r
+\r
+ return first;\r
+}\r
+\r
+\r
+#define ICV_SINGLE 0\r
+#define ICV_CONNECTING_ABOVE 1\r
+#define ICV_CONNECTING_BELOW -1\r
+#define ICV_IS_COMPONENT_POINT(val) ((val) != 0)\r
+\r
+#define CV_GET_WRITTEN_ELEM( writer ) ((writer).ptr - (writer).seq->elem_size)\r
+\r
+typedef struct CvLinkedRunPoint\r
+{\r
+ struct CvLinkedRunPoint* link;\r
+ struct CvLinkedRunPoint* next;\r
+ CvPoint pt;\r
+}\r
+CvLinkedRunPoint;\r
+\r
+\r
+static int\r
+icvFindContoursInInterval( const CvArr* src,\r
+ /*int minValue, int maxValue,*/\r
+ CvMemStorage* storage,\r
+ CvSeq** result,\r
+ int contourHeaderSize )\r
+{\r
+ int count = 0;\r
+ CvMemStorage* storage00 = 0;\r
+ CvMemStorage* storage01 = 0;\r
+ CvSeq* first = 0;\r
+\r
+ CV_FUNCNAME( "icvFindContoursInInterval" );\r
+\r
+ __BEGIN__;\r
+\r
+ int i, j, k, n;\r
+\r
+ uchar* src_data = 0;\r
+ int img_step = 0;\r
+ CvSize img_size;\r
+\r
+ int connect_flag;\r
+ int lower_total;\r
+ int upper_total;\r
+ int all_total;\r
+\r
+ CvSeq* runs;\r
+ CvLinkedRunPoint tmp;\r
+ CvLinkedRunPoint* tmp_prev;\r
+ CvLinkedRunPoint* upper_line = 0;\r
+ CvLinkedRunPoint* lower_line = 0;\r
+ CvLinkedRunPoint* last_elem;\r
+\r
+ CvLinkedRunPoint* upper_run = 0;\r
+ CvLinkedRunPoint* lower_run = 0;\r
+ CvLinkedRunPoint* prev_point = 0;\r
+\r
+ CvSeqWriter writer_ext;\r
+ CvSeqWriter writer_int;\r
+ CvSeqWriter writer;\r
+ CvSeqReader reader;\r
+\r
+ CvSeq* external_contours;\r
+ CvSeq* internal_contours;\r
+ CvSeq* prev = 0;\r
+\r
+ if( !storage )\r
+ CV_ERROR( CV_StsNullPtr, "NULL storage pointer" );\r
+\r
+ if( !result )\r
+ CV_ERROR( CV_StsNullPtr, "NULL double CvSeq pointer" );\r
+\r
+ if( contourHeaderSize < (int)sizeof(CvContour))\r
+ CV_ERROR( CV_StsBadSize, "Contour header size must be >= sizeof(CvContour)" );\r
+\r
+ CV_CALL( storage00 = cvCreateChildMemStorage(storage));\r
+ CV_CALL( storage01 = cvCreateChildMemStorage(storage));\r
+\r
+ {\r
+ CvMat stub, *mat;\r
+\r
+ CV_CALL( mat = cvGetMat( src, &stub ));\r
+ if( !CV_IS_MASK_ARR(mat))\r
+ CV_ERROR( CV_StsBadArg, "Input array must be 8uC1 or 8sC1" );\r
+ src_data = mat->data.ptr;\r
+ img_step = mat->step;\r
+ img_size = cvGetMatSize( mat );\r
+ }\r
+\r
+ // Create temporary sequences\r
+ runs = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvLinkedRunPoint), storage00 );\r
+ cvStartAppendToSeq( runs, &writer );\r
+\r
+ cvStartWriteSeq( 0, sizeof(CvSeq), sizeof(CvLinkedRunPoint*), storage01, &writer_ext );\r
+ cvStartWriteSeq( 0, sizeof(CvSeq), sizeof(CvLinkedRunPoint*), storage01, &writer_int );\r
+\r
+ tmp_prev = &(tmp);\r
+ tmp_prev->next = 0;\r
+ tmp_prev->link = 0;\r
+\r
+ // First line. None of runs is binded\r
+ tmp.pt.y = 0;\r
+ i = 0;\r
+ CV_WRITE_SEQ_ELEM( tmp, writer );\r
+ upper_line = (CvLinkedRunPoint*)CV_GET_WRITTEN_ELEM( writer );\r
+\r
+ tmp_prev = upper_line;\r
+ for( j = 0; j < img_size.width; )\r
+ {\r
+ for( ; j < img_size.width && !ICV_IS_COMPONENT_POINT(src_data[j]); j++ )\r
+ ;\r
+ if( j == img_size.width )\r
+ break;\r
+\r
+ tmp.pt.x = j;\r
+ CV_WRITE_SEQ_ELEM( tmp, writer );\r
+ tmp_prev->next = (CvLinkedRunPoint*)CV_GET_WRITTEN_ELEM( writer );\r
+ tmp_prev = tmp_prev->next;\r
+\r
+ for( ; j < img_size.width && ICV_IS_COMPONENT_POINT(src_data[j]); j++ )\r
+ ;\r
+\r
+ tmp.pt.x = j-1;\r
+ CV_WRITE_SEQ_ELEM( tmp, writer );\r
+ tmp_prev->next = (CvLinkedRunPoint*)CV_GET_WRITTEN_ELEM( writer );\r
+ tmp_prev->link = tmp_prev->next;\r
+ // First point of contour\r
+ CV_WRITE_SEQ_ELEM( tmp_prev, writer_ext );\r
+ tmp_prev = tmp_prev->next;\r
+ }\r
+ cvFlushSeqWriter( &writer );\r
+ upper_line = upper_line->next;\r
+ upper_total = runs->total - 1;\r
+ last_elem = tmp_prev;\r
+ tmp_prev->next = 0;\r
+\r
+ for( i = 1; i < img_size.height; i++ )\r
+ {\r
+//------// Find runs in next line\r
+ src_data += img_step;\r
+ tmp.pt.y = i;\r
+ all_total = runs->total;\r
+ for( j = 0; j < img_size.width; )\r
+ {\r
+ for( ; j < img_size.width && !ICV_IS_COMPONENT_POINT(src_data[j]); j++ )\r
+ ;\r
+ if( j == img_size.width ) break;\r
+\r
+ tmp.pt.x = j;\r
+ CV_WRITE_SEQ_ELEM( tmp, writer );\r
+ tmp_prev->next = (CvLinkedRunPoint*)CV_GET_WRITTEN_ELEM( writer );\r
+ tmp_prev = tmp_prev->next;\r
+\r
+ for( ; j < img_size.width && ICV_IS_COMPONENT_POINT(src_data[j]); j++ )\r
+ ;\r
+\r
+ tmp.pt.x = j-1;\r
+ CV_WRITE_SEQ_ELEM( tmp, writer );\r
+ tmp_prev = tmp_prev->next = (CvLinkedRunPoint*)CV_GET_WRITTEN_ELEM( writer );\r
+ }//j\r
+ cvFlushSeqWriter( &writer );\r
+ lower_line = last_elem->next;\r
+ lower_total = runs->total - all_total;\r
+ last_elem = tmp_prev;\r
+ tmp_prev->next = 0;\r
+//------//\r
+//------// Find links between runs of lower_line and upper_line\r
+ upper_run = upper_line;\r
+ lower_run = lower_line;\r
+ connect_flag = ICV_SINGLE;\r
+\r
+ for( k = 0, n = 0; k < upper_total/2 && n < lower_total/2; )\r
+ {\r
+ switch( connect_flag )\r
+ {\r
+ case ICV_SINGLE:\r
+ if( upper_run->next->pt.x < lower_run->next->pt.x )\r
+ {\r
+ if( upper_run->next->pt.x >= lower_run->pt.x -1 )\r
+ {\r
+ lower_run->link = upper_run;\r
+ connect_flag = ICV_CONNECTING_ABOVE;\r
+ prev_point = upper_run->next;\r
+ }\r
+ else\r
+ upper_run->next->link = upper_run;\r
+ k++;\r
+ upper_run = upper_run->next->next;\r
+ }\r
+ else\r
+ {\r
+ if( upper_run->pt.x <= lower_run->next->pt.x +1 )\r
+ {\r
+ lower_run->link = upper_run;\r
+ connect_flag = ICV_CONNECTING_BELOW;\r
+ prev_point = lower_run->next;\r
+ }\r
+ else\r
+ {\r
+ lower_run->link = lower_run->next;\r
+ // First point of contour\r
+ CV_WRITE_SEQ_ELEM( lower_run, writer_ext );\r
+ }\r
+ n++;\r
+ lower_run = lower_run->next->next;\r
+ }\r
+ break;\r
+ case ICV_CONNECTING_ABOVE:\r
+ if( upper_run->pt.x > lower_run->next->pt.x +1 )\r
+ {\r
+ prev_point->link = lower_run->next;\r
+ connect_flag = ICV_SINGLE;\r
+ n++;\r
+ lower_run = lower_run->next->next;\r
+ }\r
+ else\r
+ {\r
+ prev_point->link = upper_run;\r
+ if( upper_run->next->pt.x < lower_run->next->pt.x )\r
+ {\r
+ k++;\r
+ prev_point = upper_run->next;\r
+ upper_run = upper_run->next->next;\r
+ }\r
+ else\r
+ {\r
+ connect_flag = ICV_CONNECTING_BELOW;\r
+ prev_point = lower_run->next;\r
+ n++;\r
+ lower_run = lower_run->next->next;\r
+ }\r
+ }\r
+ break;\r
+ case ICV_CONNECTING_BELOW:\r
+ if( lower_run->pt.x > upper_run->next->pt.x +1 )\r
+ {\r
+ upper_run->next->link = prev_point;\r
+ connect_flag = ICV_SINGLE;\r
+ k++;\r
+ upper_run = upper_run->next->next;\r
+ }\r
+ else\r
+ {\r
+ // First point of contour\r
+ CV_WRITE_SEQ_ELEM( lower_run, writer_int );\r
+\r
+ lower_run->link = prev_point;\r
+ if( lower_run->next->pt.x < upper_run->next->pt.x )\r
+ {\r
+ n++;\r
+ prev_point = lower_run->next;\r
+ lower_run = lower_run->next->next;\r
+ }\r
+ else\r
+ {\r
+ connect_flag = ICV_CONNECTING_ABOVE;\r
+ k++;\r
+ prev_point = upper_run->next;\r
+ upper_run = upper_run->next->next;\r
+ }\r
+ }\r
+ break;\r
+ }\r
+ }// k, n\r
+\r
+ for( ; n < lower_total/2; n++ )\r
+ {\r
+ if( connect_flag != ICV_SINGLE )\r
+ {\r
+ prev_point->link = lower_run->next;\r
+ connect_flag = ICV_SINGLE;\r
+ lower_run = lower_run->next->next;\r
+ continue;\r
+ }\r
+ lower_run->link = lower_run->next;\r
+\r
+ //First point of contour\r
+ CV_WRITE_SEQ_ELEM( lower_run, writer_ext );\r
+\r
+ lower_run = lower_run->next->next;\r
+ }\r
+\r
+ for( ; k < upper_total/2; k++ )\r
+ {\r
+ if( connect_flag != ICV_SINGLE )\r
+ {\r
+ upper_run->next->link = prev_point;\r
+ connect_flag = ICV_SINGLE;\r
+ upper_run = upper_run->next->next;\r
+ continue;\r
+ }\r
+ upper_run->next->link = upper_run;\r
+ upper_run = upper_run->next->next;\r
+ }\r
+ upper_line = lower_line;\r
+ upper_total = lower_total;\r
+ }//i\r
+\r
+ upper_run = upper_line;\r
+\r
+ //the last line of image\r
+ for( k = 0; k < upper_total/2; k++ )\r
+ {\r
+ upper_run->next->link = upper_run;\r
+ upper_run = upper_run->next->next;\r
+ }\r
+\r
+//------//\r
+//------//Find end read contours\r
+ external_contours = cvEndWriteSeq( &writer_ext );\r
+ internal_contours = cvEndWriteSeq( &writer_int );\r
+\r
+ for( k = 0; k < 2; k++ )\r
+ {\r
+ CvSeq* contours = k == 0 ? external_contours : internal_contours;\r
+\r
+ cvStartReadSeq( contours, &reader );\r
+\r
+ for( j = 0; j < contours->total; j++, count++ )\r
+ {\r
+ CvLinkedRunPoint* p_temp;\r
+ CvLinkedRunPoint* p00;\r
+ CvLinkedRunPoint* p01;\r
+ CvSeq* contour;\r
+\r
+ CV_READ_SEQ_ELEM( p00, reader );\r
+ p01 = p00;\r
+\r
+ if( !p00->link )\r
+ continue;\r
+\r
+ cvStartWriteSeq( CV_SEQ_ELTYPE_POINT | CV_SEQ_POLYLINE | CV_SEQ_FLAG_CLOSED,\r
+ contourHeaderSize, sizeof(CvPoint), storage, &writer );\r
+ do\r
+ {\r
+ CV_WRITE_SEQ_ELEM( p00->pt, writer );\r
+ p_temp = p00;\r
+ p00 = p00->link;\r
+ p_temp->link = 0;\r
+ }\r
+ while( p00 != p01 );\r
+\r
+ contour = cvEndWriteSeq( &writer );\r
+ cvBoundingRect( contour, 1 );\r
+\r
+ if( k != 0 )\r
+ contour->flags |= CV_SEQ_FLAG_HOLE;\r
+\r
+ if( !first )\r
+ prev = first = contour;\r
+ else\r
+ {\r
+ contour->h_prev = prev;\r
+ prev = prev->h_next = contour;\r
+ }\r
+ }\r
+ }\r
+\r
+ __END__;\r
+\r
+ if( !first )\r
+ count = -1;\r
+\r
+ if( result )\r
+ *result = first;\r
+\r
+ cvReleaseMemStorage(&storage00);\r
+ cvReleaseMemStorage(&storage01);\r
+\r
+ return count;\r
+}\r
+\r
+\r
+\r
+/*F///////////////////////////////////////////////////////////////////////////////////////\r
+// Name: cvFindContours\r
+// Purpose:\r
+// Finds all the contours on the bi-level image.\r
+// Context:\r
+// Parameters:\r
+// img - source image.\r
+// Non-zero pixels are considered as 1-pixels\r
+// and zero pixels as 0-pixels.\r
+// step - full width of source image in bytes.\r
+// size - width and height of the image in pixels\r
+// storage - pointer to storage where will the output contours be placed.\r
+// header_size - header size of resulting contours\r
+// mode - mode of contour retrieval.\r
+// method - method of approximation that is applied to contours\r
+// first_contour - pointer to first contour pointer\r
+// Returns:\r
+// CV_OK or error code\r
+// Notes:\r
+//F*/\r
+CV_IMPL int\r
+cvFindContours( void* img, CvMemStorage* storage,\r
+ CvSeq** firstContour, int cntHeaderSize,\r
+ int mode,\r
+ int method, CvPoint offset )\r
+{\r
+ CvContourScanner scanner = 0;\r
+ CvSeq *contour = 0;\r
+ int count = -1;\r
+\r
+ CV_FUNCNAME( "cvFindContours" );\r
+\r
+ __BEGIN__;\r
+\r
+ if( !firstContour )\r
+ CV_ERROR( CV_StsNullPtr, "NULL double CvSeq pointer" );\r
+\r
+ if( method == CV_LINK_RUNS )\r
+ {\r
+ if( offset.x != 0 || offset.y != 0 )\r
+ CV_ERROR( CV_StsOutOfRange,\r
+ "Nonzero offset is not supported in CV_LINK_RUNS yet" );\r
+\r
+ CV_CALL( count = icvFindContoursInInterval( img, storage,\r
+ firstContour, cntHeaderSize ));\r
+ }\r
+ else\r
+ {\r
+ CV_CALL( scanner = cvStartFindContours( img, storage,\r
+ cntHeaderSize, mode, method, offset ));\r
+ assert( scanner );\r
+\r
+ do\r
+ {\r
+ count++;\r
+ contour = cvFindNextContour( scanner );\r
+ }\r
+ while( contour != 0 );\r
+\r
+ *firstContour = cvEndFindContours( &scanner );\r
+ }\r
+\r
+ __END__;\r
+\r
+ return count;\r
+}\r
+\r
+\r
+namespace cv\r
+{\r
+\r
+static Vector<Vector<Point> >\r
+_findContours( const Mat& image, Vector<Vec4i>* hierarchy, int mode, int method, Point offset )\r
+{\r
+ MemStorage storage(cvCreateMemStorage());\r
+ CvMat _image = image;\r
+ CvSeq* _contours = 0;\r
+ Vector<Vector<Point> > contours;\r
+ if( hierarchy )\r
+ hierarchy->clear();\r
+ cvFindContours(&_image, storage, &_contours, sizeof(CvContour), mode, method, offset);\r
+ if( !_contours )\r
+ return contours;\r
+ Seq<CvSeq*> all_contours(cvTreeToNodeSeq( _contours, sizeof(CvSeq), storage ));\r
+ size_t i, total = all_contours.size();\r
+ contours.resize(total);\r
+ SeqIterator<CvSeq*> it = all_contours.begin();\r
+ for( i = 0; i < total; i++, ++it )\r
+ {\r
+ CvSeq* c = *it;\r
+ ((CvContour*)c)->color = (int)i;\r
+ Seq<Point>(c).copyTo(contours[i]);\r
+ }\r
+\r
+ if( hierarchy )\r
+ {\r
+ hierarchy->resize(total);\r
+ it = all_contours.begin();\r
+ for( i = 0; i < total; i++, ++it )\r
+ {\r
+ CvSeq* c = *it;\r
+ int h_next = c->h_next ? ((CvContour*)c->h_next)->color : -1;\r
+ int h_prev = c->h_next ? ((CvContour*)c->h_next)->color : -1;\r
+ int v_next = c->h_next ? ((CvContour*)c->h_next)->color : -1;\r
+ int v_prev = c->h_next ? ((CvContour*)c->h_next)->color : -1;\r
+ (*hierarchy)[i] = Vec4i(h_next, h_prev, v_next, v_prev);\r
+ }\r
+ }\r
+\r
+ return contours;\r
+}\r
+\r
+Vector<Vector<Point> >\r
+findContours( const Mat& image, Vector<Vec4i>& hierarchy, int mode, int method, Point offset )\r
+{\r
+ return _findContours(image, &hierarchy, mode, method, offset);\r
+}\r
+\r
+Vector<Vector<Point> >\r
+findContours( const Mat& image, int mode, int method, Point offset)\r
+{\r
+ return _findContours(image, 0, mode, method, offset);\r
+}\r
+\r
+void drawContours( Mat& image, const Vector<Vector<Point> >& contours,\r
+ const Scalar& color, int thickness,\r
+ int lineType, const Vector<Vec4i>& hierarchy,\r
+ int maxLevel, Point offset )\r
+{\r
+ CvMat _image = image;\r
+\r
+ size_t i = 0, count = maxLevel != 0 ? contours.size() : 1;\r
+ Vector<CvSeq> seq(count);\r
+ Vector<CvSeqBlock> block(count);\r
+\r
+ // TODO: if maxLevel is < 0, we do not have to collect all the contours,\r
+ // instead we can just track down the sub-tree of interest.\r
+ for( i = 0; i < count; i++ )\r
+ {\r
+ const Vector<Point>& ci = contours[i];\r
+ cvMakeSeqHeaderForArray(CV_SEQ_POLYGON, sizeof(CvSeq), sizeof(Point),
+ !ci.empty() ? (void*)&ci[0] : 0, ci.size(), &seq[i], &block[i] );
+ }\r
+\r
+ if( hierarchy.empty() || maxLevel == 0 )\r
+ for( i = 0; i < count; i++ )\r
+ {\r
+ seq[i].h_next = i < count-1 ? &seq[i+1] : 0;\r
+ seq[i].h_prev = i > 0 ? &seq[i-1] : 0;\r
+ }\r
+ else\r
+ {\r
+ CV_Assert(hierarchy.size() == contours.size());\r
+ for( i = 0; i < count; i++ )\r
+ {\r
+ int h_next = hierarchy[i][0], h_prev = hierarchy[i][1],\r
+ v_next = hierarchy[i][2], v_prev = hierarchy[i][3];\r
+ seq[i].h_next = (size_t)h_next < count ? &seq[h_next] : 0;\r
+ seq[i].h_prev = (size_t)h_prev < count ? &seq[h_prev] : 0;\r
+ seq[i].v_next = (size_t)v_next < count ? &seq[v_next] : 0;\r
+ seq[i].v_prev = (size_t)v_prev < count ? &seq[v_prev] : 0;\r
+ }\r
+ }\r
+\r
+ cvDrawContours( &_image, &seq[0], color, color, maxLevel, thickness, lineType, offset );\r
+}\r
+\r
+void approxPolyDP( const Vector<Point>& curve,\r
+ Vector<Point>& approxCurve,\r
+ double epsilon, bool closed )\r
+{\r
+ CvMat _curve = curve;\r
+ MemStorage storage(cvCreateMemStorage());\r
+ Seq<Point> seq(cvApproxPoly(&_curve, sizeof(CvSeq), storage, CV_POLY_APPROX_DP, epsilon, closed));\r
+ seq.copyTo(approxCurve);\r
+}\r
+\r
+void approxPolyDP( const Vector<Point2f>& curve,\r
+ Vector<Point2f>& approxCurve,\r
+ double epsilon, bool closed )\r
+{\r
+ CvMat _curve = curve;\r
+ MemStorage storage(cvCreateMemStorage());\r
+ Seq<Point2f> seq(cvApproxPoly(&_curve, sizeof(CvSeq), storage, CV_POLY_APPROX_DP, epsilon, closed));\r
+ seq.copyTo(approxCurve);\r
+}\r
+\r
+double arcLength( const Vector<Point>& curve, bool closed )\r
+{\r
+ CvMat _curve = curve;\r
+ return cvArcLength(&_curve, CV_WHOLE_SEQ, closed);\r
+}\r
+\r
+double arcLength( const Vector<Point2f>& curve, bool closed )\r
+{\r
+ CvMat _curve = curve;\r
+ return cvArcLength(&_curve, CV_WHOLE_SEQ, closed);\r
+}\r
+\r
+Rect boundingRect( const Vector<Point>& points )\r
+{\r
+ CvMat _points = points;\r
+ return cvBoundingRect(&_points, 0);\r
+}\r
+\r
+Rect boundingRect( const Vector<Point2f>& points )\r
+{\r
+ CvMat _points = points;\r
+ return cvBoundingRect(&_points, 0);\r
+}\r
+\r
+double contourArea( const Vector<Point>& contour )\r
+{\r
+ CvMat _contour = contour;\r
+ return cvContourArea(&_contour);\r
+}\r
+\r
+double contourArea( const Vector<Point2f>& contour )\r
+{\r
+ CvMat _contour = contour;\r
+ return cvContourArea(&_contour);\r
+}\r
+\r
+RotatedRect minAreaRect( const Vector<Point>& points )\r
+{\r
+ CvMat _points = points;\r
+ return cvMinAreaRect2(&_points, 0);\r
+}\r
+\r
+RotatedRect minAreaRect( const Vector<Point2f>& points )\r
+{\r
+ CvMat _points = points;\r
+ return cvMinAreaRect2(&_points, 0);\r
+}\r
+\r
+void minEnclosingCircle( const Vector<Point>& points,\r
+ Point2f center, float& radius )\r
+{\r
+ CvMat _points = points;\r
+ cvMinEnclosingCircle( &_points, (CvPoint2D32f*)¢er, &radius );\r
+}\r
+\r
+void minEnclosingCircle( const Vector<Point2f>& points,\r
+ Point2f center, float& radius )\r
+{\r
+ CvMat _points = points;\r
+ cvMinEnclosingCircle( &_points, (CvPoint2D32f*)¢er, &radius );\r
+}\r
+\r
+double matchShapes( const Vector<Point2f>& contour1,\r
+ const Vector<Point2f>& contour2,\r
+ int method, double parameter )\r
+{\r
+ CvMat c1 = contour1, c2 = contour2;\r
+ return cvMatchShapes(&c1, &c2, method, parameter);\r
+}\r
+\r
+double matchShapes( const Vector<Point>& contour1,\r
+ const Vector<Point>& contour2,\r
+ int method, double parameter )\r
+{\r
+ CvMat c1 = contour1, c2 = contour2;\r
+ return cvMatchShapes(&c1, &c2, method, parameter);\r
+}\r
+\r
+void convexHull( const Vector<Point>& points,\r
+ Vector<int>& hull, bool clockwise )\r
+{\r
+ hull.resize(points.size());\r
+ CvMat _points = points, _hull=hull;\r
+ cvConvexHull2(&_points, &_hull, clockwise ? CV_CLOCKWISE : CV_COUNTER_CLOCKWISE, 0);\r
+ hull.resize(_hull.cols);\r
+}\r
+\r
+void convexHull( const Vector<Point>& points,\r
+ Vector<Point>& hull, bool clockwise )\r
+{\r
+ hull.resize(points.size());\r
+ CvMat _points = points, _hull=hull;\r
+ cvConvexHull2(&_points, &_hull, clockwise ? CV_CLOCKWISE : CV_COUNTER_CLOCKWISE, 1);\r
+ hull.resize(_hull.cols);\r
+}\r
+\r
+void convexHull( const Vector<Point2f>& points,\r
+ Vector<int>& hull, bool clockwise )\r
+{\r
+ hull.resize(points.size());\r
+ CvMat _points = points, _hull=hull;\r
+ cvConvexHull2(&_points, &_hull, clockwise ? CV_CLOCKWISE : CV_COUNTER_CLOCKWISE, 0);\r
+ hull.resize(_hull.cols);\r
+}\r
+\r
+void convexHull( const Vector<Point2f>& points,\r
+ Vector<Point2f>& hull, bool clockwise )\r
+{\r
+ hull.resize(points.size());\r
+ CvMat _points = points, _hull=hull;\r
+ cvConvexHull2(&_points, &_hull, clockwise ? CV_CLOCKWISE : CV_COUNTER_CLOCKWISE, 0);\r
+ hull.resize(_hull.cols);\r
+}\r
+\r
+bool isContourConvex( const Vector<Point>& contour )\r
+{\r
+ CvMat c = contour;\r
+ return cvCheckContourConvexity(&c) > 0;\r
+}\r
+\r
+bool isContourConvex( const Vector<Point2f>& contour )\r
+{\r
+ CvMat c = contour;\r
+ return cvCheckContourConvexity(&c) > 0;\r
+}\r
+\r
+RotatedRect fitEllipse( const Vector<Point>& points )\r
+{\r
+ CvMat _points = points;\r
+ return cvFitEllipse2(&_points);\r
+}\r
+\r
+RotatedRect fitEllipse( const Vector<Point2f>& points )\r
+{\r
+ CvMat _points = points;\r
+ return cvFitEllipse2(&_points);\r
+}\r
+\r
+Vec4f fitLine( const Vector<Point> points, int distType,\r
+ double param, double reps, double aeps )\r
+{\r
+ CvMat _points = points;\r
+ Vec4f line;\r
+ cvFitLine(&_points, distType, param, reps, aeps, &line[0]);\r
+ return line;\r
+}\r
+\r
+Vec4f fitLine( const Vector<Point2f> points, int distType,\r
+ double param, double reps, double aeps )\r
+{\r
+ CvMat _points = points;\r
+ Vec4f line;\r
+ cvFitLine(&_points, distType, param, reps, aeps, &line[0]);\r
+ return line;\r
+}\r
+\r
+Vec6f fitLine( const Vector<Point3f> points, int distType,\r
+ double param, double reps, double aeps )\r
+{\r
+ CvMat _points = points;\r
+ Vec6f line;\r
+ cvFitLine(&_points, distType, param, reps, aeps, &line[0]);\r
+ return line;\r
+}\r
+\r
+double pointPolygonTest( const Vector<Point>& contour,\r
+ Point2f pt, bool measureDist )\r
+{\r
+ CvMat c = contour;\r
+ return cvPointPolygonTest( &c, pt, measureDist );\r
+}\r
+\r
+double pointPolygonTest( const Vector<Point2f>& contour,\r
+ Point2f pt, bool measureDist )\r
+{\r
+ CvMat c = contour;\r
+ return cvPointPolygonTest( &c, pt, measureDist );\r
+}\r
+\r
+}\r
+\r
+/* End of file. */\r
cvFree( &buffer );
}
+void cv::cornerSubPix( const Mat& image, Vector<Point2f>& corners,\r
+ Size winSize, Size zeroZone,\r
+ TermCriteria criteria )\r
+{\r
+ CvMat _image = image;\r
+ cvFindCornerSubPix(&_image, (CvPoint2D32f*)&corners[0], corners.size(),\r
+ winSize, zeroZone, criteria );\r
+}\r
+
/* End of file. */
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// Intel License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of Intel Corporation may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-#include "_cv.h"
-
-#define ICV_DIST_SHIFT 16
-#define ICV_INIT_DIST0 (INT_MAX >> 2)
-
-static CvStatus
-icvInitTopBottom( int* temp, int tempstep, CvSize size, int border )
-{
- int i, j;
- for( i = 0; i < border; i++ )
- {
- int* ttop = (int*)(temp + i*tempstep);
- int* tbottom = (int*)(temp + (size.height + border*2 - i - 1)*tempstep);
-
- for( j = 0; j < size.width + border*2; j++ )
- {
- ttop[j] = ICV_INIT_DIST0;
- tbottom[j] = ICV_INIT_DIST0;
- }
- }
-
- return CV_OK;
-}
-
-
-static CvStatus CV_STDCALL
-icvDistanceTransform_3x3_C1R( const uchar* src, int srcstep, int* temp,
- int step, float* dist, int dststep, CvSize size, const float* metrics )
-{
- const int BORDER = 1;
- int i, j;
- const int HV_DIST = CV_FLT_TO_FIX( metrics[0], ICV_DIST_SHIFT );
- const int DIAG_DIST = CV_FLT_TO_FIX( metrics[1], ICV_DIST_SHIFT );
- const float scale = 1.f/(1 << ICV_DIST_SHIFT);
-
- srcstep /= sizeof(src[0]);
- step /= sizeof(temp[0]);
- dststep /= sizeof(dist[0]);
-
- icvInitTopBottom( temp, step, size, BORDER );
-
- // forward pass
- for( i = 0; i < size.height; i++ )
- {
- const uchar* s = src + i*srcstep;
- int* tmp = (int*)(temp + (i+BORDER)*step) + BORDER;
-
- for( j = 0; j < BORDER; j++ )
- tmp[-j-1] = tmp[size.width + j] = ICV_INIT_DIST0;
-
- for( j = 0; j < size.width; j++ )
- {
- if( !s[j] )
- tmp[j] = 0;
- else
- {
- int t0 = tmp[j-step-1] + DIAG_DIST;
- int t = tmp[j-step] + HV_DIST;
- if( t0 > t ) t0 = t;
- t = tmp[j-step+1] + DIAG_DIST;
- if( t0 > t ) t0 = t;
- t = tmp[j-1] + HV_DIST;
- if( t0 > t ) t0 = t;
- tmp[j] = t0;
- }
- }
- }
-
- // backward pass
- for( i = size.height - 1; i >= 0; i-- )
- {
- float* d = (float*)(dist + i*dststep);
- int* tmp = (int*)(temp + (i+BORDER)*step) + BORDER;
-
- for( j = size.width - 1; j >= 0; j-- )
- {
- int t0 = tmp[j];
- if( t0 > HV_DIST )
- {
- int t = tmp[j+step+1] + DIAG_DIST;
- if( t0 > t ) t0 = t;
- t = tmp[j+step] + HV_DIST;
- if( t0 > t ) t0 = t;
- t = tmp[j+step-1] + DIAG_DIST;
- if( t0 > t ) t0 = t;
- t = tmp[j+1] + HV_DIST;
- if( t0 > t ) t0 = t;
- tmp[j] = t0;
- }
- d[j] = (float)(t0 * scale);
- }
- }
-
- return CV_OK;
-}
-
-
-static CvStatus CV_STDCALL
-icvDistanceTransform_5x5_C1R( const uchar* src, int srcstep, int* temp,
- int step, float* dist, int dststep, CvSize size, const float* metrics )
-{
- const int BORDER = 2;
- int i, j;
- const int HV_DIST = CV_FLT_TO_FIX( metrics[0], ICV_DIST_SHIFT );
- const int DIAG_DIST = CV_FLT_TO_FIX( metrics[1], ICV_DIST_SHIFT );
- const int LONG_DIST = CV_FLT_TO_FIX( metrics[2], ICV_DIST_SHIFT );
- const float scale = 1.f/(1 << ICV_DIST_SHIFT);
-
- srcstep /= sizeof(src[0]);
- step /= sizeof(temp[0]);
- dststep /= sizeof(dist[0]);
-
- icvInitTopBottom( temp, step, size, BORDER );
-
- // forward pass
- for( i = 0; i < size.height; i++ )
- {
- const uchar* s = src + i*srcstep;
- int* tmp = (int*)(temp + (i+BORDER)*step) + BORDER;
-
- for( j = 0; j < BORDER; j++ )
- tmp[-j-1] = tmp[size.width + j] = ICV_INIT_DIST0;
-
- for( j = 0; j < size.width; j++ )
- {
- if( !s[j] )
- tmp[j] = 0;
- else
- {
- int t0 = tmp[j-step*2-1] + LONG_DIST;
- int t = tmp[j-step*2+1] + LONG_DIST;
- if( t0 > t ) t0 = t;
- t = tmp[j-step-2] + LONG_DIST;
- if( t0 > t ) t0 = t;
- t = tmp[j-step-1] + DIAG_DIST;
- if( t0 > t ) t0 = t;
- t = tmp[j-step] + HV_DIST;
- if( t0 > t ) t0 = t;
- t = tmp[j-step+1] + DIAG_DIST;
- if( t0 > t ) t0 = t;
- t = tmp[j-step+2] + LONG_DIST;
- if( t0 > t ) t0 = t;
- t = tmp[j-1] + HV_DIST;
- if( t0 > t ) t0 = t;
- tmp[j] = t0;
- }
- }
- }
-
- // backward pass
- for( i = size.height - 1; i >= 0; i-- )
- {
- float* d = (float*)(dist + i*dststep);
- int* tmp = (int*)(temp + (i+BORDER)*step) + BORDER;
-
- for( j = size.width - 1; j >= 0; j-- )
- {
- int t0 = tmp[j];
- if( t0 > HV_DIST )
- {
- int t = tmp[j+step*2+1] + LONG_DIST;
- if( t0 > t ) t0 = t;
- t = tmp[j+step*2-1] + LONG_DIST;
- if( t0 > t ) t0 = t;
- t = tmp[j+step+2] + LONG_DIST;
- if( t0 > t ) t0 = t;
- t = tmp[j+step+1] + DIAG_DIST;
- if( t0 > t ) t0 = t;
- t = tmp[j+step] + HV_DIST;
- if( t0 > t ) t0 = t;
- t = tmp[j+step-1] + DIAG_DIST;
- if( t0 > t ) t0 = t;
- t = tmp[j+step-2] + LONG_DIST;
- if( t0 > t ) t0 = t;
- t = tmp[j+1] + HV_DIST;
- if( t0 > t ) t0 = t;
- tmp[j] = t0;
- }
- d[j] = (float)(t0 * scale);
- }
- }
-
- return CV_OK;
-}
-
-
-static CvStatus CV_STDCALL
-icvDistanceTransformEx_5x5_C1R( const uchar* src, int srcstep, int* temp,
- int step, float* dist, int dststep, int* labels, int lstep,
- CvSize size, const float* metrics )
-{
- const int BORDER = 2;
-
- int i, j;
- const int HV_DIST = CV_FLT_TO_FIX( metrics[0], ICV_DIST_SHIFT );
- const int DIAG_DIST = CV_FLT_TO_FIX( metrics[1], ICV_DIST_SHIFT );
- const int LONG_DIST = CV_FLT_TO_FIX( metrics[2], ICV_DIST_SHIFT );
- const float scale = 1.f/(1 << ICV_DIST_SHIFT);
-
- srcstep /= sizeof(src[0]);
- step /= sizeof(temp[0]);
- dststep /= sizeof(dist[0]);
- lstep /= sizeof(labels[0]);
-
- icvInitTopBottom( temp, step, size, BORDER );
-
- // forward pass
- for( i = 0; i < size.height; i++ )
- {
- const uchar* s = src + i*srcstep;
- int* tmp = (int*)(temp + (i+BORDER)*step) + BORDER;
- int* lls = (int*)(labels + i*lstep);
-
- for( j = 0; j < BORDER; j++ )
- tmp[-j-1] = tmp[size.width + j] = ICV_INIT_DIST0;
-
- for( j = 0; j < size.width; j++ )
- {
- if( !s[j] )
- {
- tmp[j] = 0;
- //assert( lls[j] != 0 );
- }
- else
- {
- int t0 = ICV_INIT_DIST0, t;
- int l0 = 0;
-
- t = tmp[j-step*2-1] + LONG_DIST;
- if( t0 > t )
- {
- t0 = t;
- l0 = lls[j-lstep*2-1];
- }
- t = tmp[j-step*2+1] + LONG_DIST;
- if( t0 > t )
- {
- t0 = t;
- l0 = lls[j-lstep*2+1];
- }
- t = tmp[j-step-2] + LONG_DIST;
- if( t0 > t )
- {
- t0 = t;
- l0 = lls[j-lstep-2];
- }
- t = tmp[j-step-1] + DIAG_DIST;
- if( t0 > t )
- {
- t0 = t;
- l0 = lls[j-lstep-1];
- }
- t = tmp[j-step] + HV_DIST;
- if( t0 > t )
- {
- t0 = t;
- l0 = lls[j-lstep];
- }
- t = tmp[j-step+1] + DIAG_DIST;
- if( t0 > t )
- {
- t0 = t;
- l0 = lls[j-lstep+1];
- }
- t = tmp[j-step+2] + LONG_DIST;
- if( t0 > t )
- {
- t0 = t;
- l0 = lls[j-lstep+2];
- }
- t = tmp[j-1] + HV_DIST;
- if( t0 > t )
- {
- t0 = t;
- l0 = lls[j-1];
- }
-
- tmp[j] = t0;
- lls[j] = l0;
- }
- }
- }
-
- // backward pass
- for( i = size.height - 1; i >= 0; i-- )
- {
- float* d = (float*)(dist + i*dststep);
- int* tmp = (int*)(temp + (i+BORDER)*step) + BORDER;
- int* lls = (int*)(labels + i*lstep);
-
- for( j = size.width - 1; j >= 0; j-- )
- {
- int t0 = tmp[j];
- int l0 = lls[j];
- if( t0 > HV_DIST )
- {
- int t = tmp[j+step*2+1] + LONG_DIST;
- if( t0 > t )
- {
- t0 = t;
- l0 = lls[j+lstep*2+1];
- }
- t = tmp[j+step*2-1] + LONG_DIST;
- if( t0 > t )
- {
- t0 = t;
- l0 = lls[j+lstep*2-1];
- }
- t = tmp[j+step+2] + LONG_DIST;
- if( t0 > t )
- {
- t0 = t;
- l0 = lls[j+lstep+2];
- }
- t = tmp[j+step+1] + DIAG_DIST;
- if( t0 > t )
- {
- t0 = t;
- l0 = lls[j+lstep+1];
- }
- t = tmp[j+step] + HV_DIST;
- if( t0 > t )
- {
- t0 = t;
- l0 = lls[j+lstep];
- }
- t = tmp[j+step-1] + DIAG_DIST;
- if( t0 > t )
- {
- t0 = t;
- l0 = lls[j+lstep-1];
- }
- t = tmp[j+step-2] + LONG_DIST;
- if( t0 > t )
- {
- t0 = t;
- l0 = lls[j+lstep-2];
- }
- t = tmp[j+1] + HV_DIST;
- if( t0 > t )
- {
- t0 = t;
- l0 = lls[j+1];
- }
- tmp[j] = t0;
- lls[j] = l0;
- }
- d[j] = (float)(t0 * scale);
- }
- }
-
- return CV_OK;
-}
-
-
-static CvStatus
-icvGetDistanceTransformMask( int maskType, float *metrics )
-{
- if( !metrics )
- return CV_NULLPTR_ERR;
-
- switch (maskType)
- {
- case 30:
- metrics[0] = 1.0f;
- metrics[1] = 1.0f;
- break;
-
- case 31:
- metrics[0] = 1.0f;
- metrics[1] = 2.0f;
- break;
-
- case 32:
- metrics[0] = 0.955f;
- metrics[1] = 1.3693f;
- break;
-
- case 50:
- metrics[0] = 1.0f;
- metrics[1] = 1.0f;
- metrics[2] = 2.0f;
- break;
-
- case 51:
- metrics[0] = 1.0f;
- metrics[1] = 2.0f;
- metrics[2] = 3.0f;
- break;
-
- case 52:
- metrics[0] = 1.0f;
- metrics[1] = 1.4f;
- metrics[2] = 2.1969f;
- break;
- default:
- return CV_BADRANGE_ERR;
- }
-
- return CV_OK;
-}
-
-
-static void
-icvTrueDistTrans( const CvMat* src, CvMat* dst )
-{
- CvMat* buffer = 0;
-
- CV_FUNCNAME( "cvDistTransform2" );
-
- __BEGIN__;
-
- int i, m, n;
- int sstep, dstep;
- const float inf = 1e6f;
- int thread_count = cvGetNumThreads();
- int pass1_sz, pass2_sz;
-
- if( !CV_ARE_SIZES_EQ( src, dst ))
- CV_ERROR( CV_StsUnmatchedSizes, "" );
-
- if( CV_MAT_TYPE(src->type) != CV_8UC1 ||
- CV_MAT_TYPE(dst->type) != CV_32FC1 )
- CV_ERROR( CV_StsUnsupportedFormat,
- "The input image must have 8uC1 type and the output one must have 32fC1 type" );
-
- m = src->rows;
- n = src->cols;
-
- // (see stage 1 below):
- // sqr_tab: 2*m, sat_tab: 3*m + 1, d: m*thread_count,
- pass1_sz = src->rows*(5 + thread_count) + 1;
- // (see stage 2):
- // sqr_tab & inv_tab: n each; f & v: n*thread_count each; z: (n+1)*thread_count
- pass2_sz = src->cols*(2 + thread_count*3) + thread_count;
- CV_CALL( buffer = cvCreateMat( 1, MAX(pass1_sz, pass2_sz), CV_32FC1 ));
-
- sstep = src->step;
- dstep = dst->step / sizeof(float);
-
- // stage 1: compute 1d distance transform of each column
- {
- float* sqr_tab = buffer->data.fl;
- int* sat_tab = (int*)(sqr_tab + m*2);
- const int shift = m*2;
-
- for( i = 0; i < m; i++ )
- sqr_tab[i] = (float)(i*i);
- for( i = m; i < m*2; i++ )
- sqr_tab[i] = inf;
- for( i = 0; i < shift; i++ )
- sat_tab[i] = 0;
- for( ; i <= m*3; i++ )
- sat_tab[i] = i - shift;
-
-#ifdef _OPENMP
- #pragma omp parallel for num_threads(thread_count)
-#endif
- for( i = 0; i < n; i++ )
- {
- const uchar* sptr = src->data.ptr + i + (m-1)*sstep;
- float* dptr = dst->data.fl + i;
- int* d = (int*)(sat_tab + m*3+1+m*cvGetThreadNum());
- int j, dist = m-1;
-
- for( j = m-1; j >= 0; j--, sptr -= sstep )
- {
- dist = (dist + 1) & (sptr[0] == 0 ? 0 : -1);
- d[j] = dist;
- }
-
- dist = m-1;
- for( j = 0; j < m; j++, dptr += dstep )
- {
- dist = dist + 1 - sat_tab[dist + 1 - d[j] + shift];
- d[j] = dist;
- dptr[0] = sqr_tab[dist];
- }
- }
- }
-
- // stage 2: compute modified distance transform for each row
- {
- float* inv_tab = buffer->data.fl;
- float* sqr_tab = inv_tab + n;
-
- inv_tab[0] = sqr_tab[0] = 0.f;
- for( i = 1; i < n; i++ )
- {
- inv_tab[i] = (float)(0.5/i);
- sqr_tab[i] = (float)(i*i);
- }
-
-#ifdef _OPENMP
- #pragma omp parallel for num_threads(thread_count) schedule(dynamic)
-#endif
- for( i = 0; i < m; i++ )
- {
- float* d = (float*)(dst->data.ptr + i*dst->step);
- float* f = sqr_tab + n + (n*3+1)*cvGetThreadNum();
- float* z = f + n;
- int* v = (int*)(z + n + 1);
- int p, q, k;
-
- v[0] = 0;
- z[0] = -inf;
- z[1] = inf;
- f[0] = d[0];
-
- for( q = 1, k = 0; q < n; q++ )
- {
- float fq = d[q];
- f[q] = fq;
-
- for(;;k--)
- {
- p = v[k];
- float s = (fq + sqr_tab[q] - d[p] - sqr_tab[p])*inv_tab[q - p];
- if( s > z[k] )
- {
- k++;
- v[k] = q;
- z[k] = s;
- z[k+1] = inf;
- break;
- }
- }
- }
-
- for( q = 0, k = 0; q < n; q++ )
- {
- while( z[k+1] < q )
- k++;
- p = v[k];
- d[q] = sqr_tab[abs(q - p)] + f[p];
- }
- }
- }
-
- cvPow( dst, dst, 0.5 );
-
- __END__;
-
- cvReleaseMat( &buffer );
-}
-
-
-/*********************************** IPP functions *********************************/
-
-typedef CvStatus (CV_STDCALL * CvIPPDistTransFunc)( const uchar* src, int srcstep,
- void* dst, int dststep,
- CvSize size, const void* metrics );
-
-typedef CvStatus (CV_STDCALL * CvIPPDistTransFunc2)( uchar* src, int srcstep,
- CvSize size, const int* metrics );
-
-/***********************************************************************************/
-
-typedef CvStatus (CV_STDCALL * CvDistTransFunc)( const uchar* src, int srcstep,
- int* temp, int tempstep,
- float* dst, int dststep,
- CvSize size, const float* metrics );
-
-
-/****************************************************************************************\
- Non-inplace and Inplace 8u->8u Distance Transform for CityBlock (a.k.a. L1) metric
- (C) 2006 by Jay Stavinzky.
-\****************************************************************************************/
-
-//BEGIN ATS ADDITION
-/* 8-bit grayscale distance transform function */
-static void
-icvDistanceATS_L1_8u( const CvMat* src, CvMat* dst )
-{
- CV_FUNCNAME( "cvDistanceATS" );
-
- __BEGIN__;
-
- int width = src->cols, height = src->rows;
-
- int a;
- uchar lut[256];
- int x, y;
-
- const uchar *sbase = src->data.ptr;
- uchar *dbase = dst->data.ptr;
- int srcstep = src->step;
- int dststep = dst->step;
-
- CV_ASSERT( CV_IS_MASK_ARR( src ) && CV_MAT_TYPE( dst->type ) == CV_8UC1 );
- CV_ASSERT( CV_ARE_SIZES_EQ( src, dst ));
-
- ////////////////////// forward scan ////////////////////////
- for( x = 0; x < 256; x++ )
- lut[x] = CV_CAST_8U(x+1);
-
- //init first pixel to max (we're going to be skipping it)
- dbase[0] = (uchar)(sbase[0] == 0 ? 0 : 255);
-
- //first row (scan west only, skip first pixel)
- for( x = 1; x < width; x++ )
- dbase[x] = (uchar)(sbase[x] == 0 ? 0 : lut[dbase[x-1]]);
-
- for( y = 1; y < height; y++ )
- {
- sbase += srcstep;
- dbase += dststep;
-
- //for left edge, scan north only
- a = sbase[0] == 0 ? 0 : lut[dbase[-dststep]];
- dbase[0] = (uchar)a;
-
- for( x = 1; x < width; x++ )
- {
- a = sbase[x] == 0 ? 0 : lut[MIN(a, dbase[x - dststep])];
- dbase[x] = (uchar)a;
- }
- }
-
- ////////////////////// backward scan ///////////////////////
-
- a = dbase[width-1];
-
- // do last row east pixel scan here (skip bottom right pixel)
- for( x = width - 2; x >= 0; x-- )
- {
- a = lut[a];
- dbase[x] = (uchar)(CV_CALC_MIN_8U(a, dbase[x]));
- }
-
- // right edge is the only error case
- for( y = height - 2; y >= 0; y-- )
- {
- dbase -= dststep;
-
- // do right edge
- a = lut[dbase[width-1+dststep]];
- dbase[width-1] = (uchar)(MIN(a, dbase[width-1]));
-
- for( x = width - 2; x >= 0; x-- )
- {
- int b = dbase[x+dststep];
- a = lut[MIN(a, b)];
- dbase[x] = (uchar)(MIN(a, dbase[x]));
- }
- }
-
- __END__;
-}
-//END ATS ADDITION
-
-
-/* Wrapper function for distance transform group */
-CV_IMPL void
-cvDistTransform( const void* srcarr, void* dstarr,
- int distType, int maskSize,
- const float *mask,
- void* labelsarr )
-{
- CvMat* temp = 0;
- CvMat* src_copy = 0;
- CvMemStorage* st = 0;
-
- CV_FUNCNAME( "cvDistTransform" );
-
- __BEGIN__;
-
- float _mask[5] = {0};
- CvMat srcstub, *src = (CvMat*)srcarr;
- CvMat dststub, *dst = (CvMat*)dstarr;
- CvMat lstub, *labels = (CvMat*)labelsarr;
- CvSize size;
- //CvIPPDistTransFunc ipp_func = 0;
- //CvIPPDistTransFunc2 ipp_inp_func = 0;
-
- CV_CALL( src = cvGetMat( src, &srcstub ));
- CV_CALL( dst = cvGetMat( dst, &dststub ));
-
- if( !CV_IS_MASK_ARR( src ) || (CV_MAT_TYPE( dst->type ) != CV_32FC1 &&
- (CV_MAT_TYPE(dst->type) != CV_8UC1 || distType != CV_DIST_L1 || labels)) )
- CV_ERROR( CV_StsUnsupportedFormat,
- "source image must be 8uC1 and the distance map must be 32fC1 "
- "(or 8uC1 in case of simple L1 distance transform)" );
-
- if( !CV_ARE_SIZES_EQ( src, dst ))
- CV_ERROR( CV_StsUnmatchedSizes, "the source and the destination images must be of the same size" );
-
- if( maskSize != CV_DIST_MASK_3 && maskSize != CV_DIST_MASK_5 && maskSize != CV_DIST_MASK_PRECISE )
- CV_ERROR( CV_StsBadSize, "Mask size should be 3 or 5 or 0 (presize)" );
-
- if( distType == CV_DIST_C || distType == CV_DIST_L1 )
- maskSize = !labels ? CV_DIST_MASK_3 : CV_DIST_MASK_5;
- else if( distType == CV_DIST_L2 && labels )
- maskSize = CV_DIST_MASK_5;
-
- if( maskSize == CV_DIST_MASK_PRECISE )
- {
- CV_CALL( icvTrueDistTrans( src, dst ));
- EXIT;
- }
-
- if( labels )
- {
- CV_CALL( labels = cvGetMat( labels, &lstub ));
- if( CV_MAT_TYPE( labels->type ) != CV_32SC1 )
- CV_ERROR( CV_StsUnsupportedFormat, "the output array of labels must be 32sC1" );
-
- if( !CV_ARE_SIZES_EQ( labels, dst ))
- CV_ERROR( CV_StsUnmatchedSizes, "the array of labels has a different size" );
-
- if( maskSize == CV_DIST_MASK_3 )
- CV_ERROR( CV_StsNotImplemented,
- "3x3 mask can not be used for \"labeled\" distance transform. Use 5x5 mask" );
- }
-
- if( distType == CV_DIST_C || distType == CV_DIST_L1 || distType == CV_DIST_L2 )
- {
- icvGetDistanceTransformMask( (distType == CV_DIST_C ? 0 :
- distType == CV_DIST_L1 ? 1 : 2) + maskSize*10, _mask );
- }
- else if( distType == CV_DIST_USER )
- {
- if( !mask )
- CV_ERROR( CV_StsNullPtr, "" );
-
- memcpy( _mask, mask, (maskSize/2 + 1)*sizeof(float));
- }
-
- /*if( !labels )
- {
- if( CV_MAT_TYPE(dst->type) == CV_32FC1 )
- ipp_func = (CvIPPDistTransFunc)(maskSize == CV_DIST_MASK_3 ?
- icvDistanceTransform_3x3_8u32f_C1R_p : icvDistanceTransform_5x5_8u32f_C1R_p);
- else if( src->data.ptr != dst->data.ptr )
- ipp_func = (CvIPPDistTransFunc)icvDistanceTransform_3x3_8u_C1R_p;
- else
- ipp_inp_func = icvDistanceTransform_3x3_8u_C1IR_p;
- }*/
-
- size = cvGetMatSize(src);
-
- /*if( (ipp_func || ipp_inp_func) && src->cols >= 4 && src->rows >= 2 )
- {
- int _imask[3];
- _imask[0] = cvRound(_mask[0]);
- _imask[1] = cvRound(_mask[1]);
- _imask[2] = cvRound(_mask[2]);
-
- if( ipp_func )
- {
- IPPI_CALL( ipp_func( src->data.ptr, src->step,
- dst->data.fl, dst->step, size,
- CV_MAT_TYPE(dst->type) == CV_8UC1 ?
- (void*)_imask : (void*)_mask ));
- }
- else
- {
- IPPI_CALL( ipp_inp_func( src->data.ptr, src->step, size, _imask ));
- }
- }
- else*/ if( CV_MAT_TYPE(dst->type) == CV_8UC1 )
- {
- CV_CALL( icvDistanceATS_L1_8u( src, dst ));
- }
- else
- {
- int border = maskSize == CV_DIST_MASK_3 ? 1 : 2;
- CV_CALL( temp = cvCreateMat( size.height + border*2, size.width + border*2, CV_32SC1 ));
-
- if( !labels )
- {
- CvDistTransFunc func = maskSize == CV_DIST_MASK_3 ?
- icvDistanceTransform_3x3_C1R :
- icvDistanceTransform_5x5_C1R;
-
- func( src->data.ptr, src->step, temp->data.i, temp->step,
- dst->data.fl, dst->step, size, _mask );
- }
- else
- {
- CvSeq *contours = 0;
- CvPoint top_left = {0,0}, bottom_right = {size.width-1,size.height-1};
- int label;
-
- CV_CALL( st = cvCreateMemStorage() );
- CV_CALL( src_copy = cvCreateMat( size.height, size.width, src->type ));
- cvCmpS( src, 0, src_copy, CV_CMP_EQ );
- cvFindContours( src_copy, st, &contours, sizeof(CvContour),
- CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
- cvZero( labels );
- for( label = 1; contours != 0; contours = contours->h_next, label++ )
- {
- CvScalar area_color = cvScalarAll(label);
- cvDrawContours( labels, contours, area_color, area_color, -255, -1, 8 );
- }
-
- cvCopy( src, src_copy );
- cvRectangle( src_copy, top_left, bottom_right, cvScalarAll(255), 1, 8 );
-
- icvDistanceTransformEx_5x5_C1R( src_copy->data.ptr, src_copy->step, temp->data.i, temp->step,
- dst->data.fl, dst->step, labels->data.i, labels->step, size, _mask );
- }
- }
-
- __END__;
-
- cvReleaseMat( &temp );
- cvReleaseMat( &src_copy );
- cvReleaseMemStorage( &st );
-}
-
-/* End of file. */
+/*M///////////////////////////////////////////////////////////////////////////////////////\r
+//\r
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
+//\r
+// By downloading, copying, installing or using the software you agree to this license.\r
+// If you do not agree to this license, do not download, install,\r
+// copy or use the software.\r
+//\r
+//\r
+// Intel License Agreement\r
+// For Open Source Computer Vision Library\r
+//\r
+// Copyright (C) 2000, Intel Corporation, all rights reserved.\r
+// Third party copyrights are property of their respective owners.\r
+//\r
+// Redistribution and use in source and binary forms, with or without modification,\r
+// are permitted provided that the following conditions are met:\r
+//\r
+// * Redistribution's of source code must retain the above copyright notice,\r
+// this list of conditions and the following disclaimer.\r
+//\r
+// * Redistribution's in binary form must reproduce the above copyright notice,\r
+// this list of conditions and the following disclaimer in the documentation\r
+// and/or other materials provided with the distribution.\r
+//\r
+// * The name of Intel Corporation may not be used to endorse or promote products\r
+// derived from this software without specific prior written permission.\r
+//\r
+// This software is provided by the copyright holders and contributors "as is" and\r
+// any express or implied warranties, including, but not limited to, the implied\r
+// warranties of merchantability and fitness for a particular purpose are disclaimed.\r
+// In no event shall the Intel Corporation or contributors be liable for any direct,\r
+// indirect, incidental, special, exemplary, or consequential damages\r
+// (including, but not limited to, procurement of substitute goods or services;\r
+// loss of use, data, or profits; or business interruption) however caused\r
+// and on any theory of liability, whether in contract, strict liability,\r
+// or tort (including negligence or otherwise) arising in any way out of\r
+// the use of this software, even if advised of the possibility of such damage.\r
+//\r
+//M*/\r
+#include "_cv.h"\r
+\r
+#define ICV_DIST_SHIFT 16\r
+#define ICV_INIT_DIST0 (INT_MAX >> 2)\r
+\r
+static CvStatus\r
+icvInitTopBottom( int* temp, int tempstep, CvSize size, int border )\r
+{\r
+ int i, j;\r
+ for( i = 0; i < border; i++ )\r
+ {\r
+ int* ttop = (int*)(temp + i*tempstep);\r
+ int* tbottom = (int*)(temp + (size.height + border*2 - i - 1)*tempstep);\r
+ \r
+ for( j = 0; j < size.width + border*2; j++ )\r
+ {\r
+ ttop[j] = ICV_INIT_DIST0;\r
+ tbottom[j] = ICV_INIT_DIST0;\r
+ }\r
+ }\r
+\r
+ return CV_OK;\r
+}\r
+\r
+\r
+static CvStatus CV_STDCALL\r
+icvDistanceTransform_3x3_C1R( const uchar* src, int srcstep, int* temp,\r
+ int step, float* dist, int dststep, CvSize size, const float* metrics )\r
+{\r
+ const int BORDER = 1;\r
+ int i, j;\r
+ const int HV_DIST = CV_FLT_TO_FIX( metrics[0], ICV_DIST_SHIFT );\r
+ const int DIAG_DIST = CV_FLT_TO_FIX( metrics[1], ICV_DIST_SHIFT );\r
+ const float scale = 1.f/(1 << ICV_DIST_SHIFT);\r
+\r
+ srcstep /= sizeof(src[0]);\r
+ step /= sizeof(temp[0]);\r
+ dststep /= sizeof(dist[0]);\r
+\r
+ icvInitTopBottom( temp, step, size, BORDER );\r
+\r
+ // forward pass\r
+ for( i = 0; i < size.height; i++ )\r
+ {\r
+ const uchar* s = src + i*srcstep;\r
+ int* tmp = (int*)(temp + (i+BORDER)*step) + BORDER;\r
+\r
+ for( j = 0; j < BORDER; j++ )\r
+ tmp[-j-1] = tmp[size.width + j] = ICV_INIT_DIST0;\r
+ \r
+ for( j = 0; j < size.width; j++ )\r
+ {\r
+ if( !s[j] )\r
+ tmp[j] = 0;\r
+ else\r
+ {\r
+ int t0 = tmp[j-step-1] + DIAG_DIST;\r
+ int t = tmp[j-step] + HV_DIST;\r
+ if( t0 > t ) t0 = t;\r
+ t = tmp[j-step+1] + DIAG_DIST;\r
+ if( t0 > t ) t0 = t;\r
+ t = tmp[j-1] + HV_DIST;\r
+ if( t0 > t ) t0 = t;\r
+ tmp[j] = t0;\r
+ }\r
+ }\r
+ }\r
+\r
+ // backward pass\r
+ for( i = size.height - 1; i >= 0; i-- )\r
+ {\r
+ float* d = (float*)(dist + i*dststep);\r
+ int* tmp = (int*)(temp + (i+BORDER)*step) + BORDER;\r
+ \r
+ for( j = size.width - 1; j >= 0; j-- )\r
+ {\r
+ int t0 = tmp[j];\r
+ if( t0 > HV_DIST )\r
+ {\r
+ int t = tmp[j+step+1] + DIAG_DIST;\r
+ if( t0 > t ) t0 = t;\r
+ t = tmp[j+step] + HV_DIST;\r
+ if( t0 > t ) t0 = t;\r
+ t = tmp[j+step-1] + DIAG_DIST;\r
+ if( t0 > t ) t0 = t;\r
+ t = tmp[j+1] + HV_DIST;\r
+ if( t0 > t ) t0 = t;\r
+ tmp[j] = t0;\r
+ }\r
+ d[j] = (float)(t0 * scale);\r
+ }\r
+ }\r
+\r
+ return CV_OK;\r
+}\r
+\r
+\r
+static CvStatus CV_STDCALL\r
+icvDistanceTransform_5x5_C1R( const uchar* src, int srcstep, int* temp,\r
+ int step, float* dist, int dststep, CvSize size, const float* metrics )\r
+{\r
+ const int BORDER = 2;\r
+ int i, j;\r
+ const int HV_DIST = CV_FLT_TO_FIX( metrics[0], ICV_DIST_SHIFT );\r
+ const int DIAG_DIST = CV_FLT_TO_FIX( metrics[1], ICV_DIST_SHIFT );\r
+ const int LONG_DIST = CV_FLT_TO_FIX( metrics[2], ICV_DIST_SHIFT );\r
+ const float scale = 1.f/(1 << ICV_DIST_SHIFT);\r
+\r
+ srcstep /= sizeof(src[0]);\r
+ step /= sizeof(temp[0]);\r
+ dststep /= sizeof(dist[0]);\r
+\r
+ icvInitTopBottom( temp, step, size, BORDER );\r
+\r
+ // forward pass\r
+ for( i = 0; i < size.height; i++ )\r
+ {\r
+ const uchar* s = src + i*srcstep;\r
+ int* tmp = (int*)(temp + (i+BORDER)*step) + BORDER;\r
+\r
+ for( j = 0; j < BORDER; j++ )\r
+ tmp[-j-1] = tmp[size.width + j] = ICV_INIT_DIST0;\r
+ \r
+ for( j = 0; j < size.width; j++ )\r
+ {\r
+ if( !s[j] )\r
+ tmp[j] = 0;\r
+ else\r
+ {\r
+ int t0 = tmp[j-step*2-1] + LONG_DIST;\r
+ int t = tmp[j-step*2+1] + LONG_DIST;\r
+ if( t0 > t ) t0 = t;\r
+ t = tmp[j-step-2] + LONG_DIST;\r
+ if( t0 > t ) t0 = t;\r
+ t = tmp[j-step-1] + DIAG_DIST;\r
+ if( t0 > t ) t0 = t;\r
+ t = tmp[j-step] + HV_DIST;\r
+ if( t0 > t ) t0 = t;\r
+ t = tmp[j-step+1] + DIAG_DIST;\r
+ if( t0 > t ) t0 = t;\r
+ t = tmp[j-step+2] + LONG_DIST;\r
+ if( t0 > t ) t0 = t;\r
+ t = tmp[j-1] + HV_DIST;\r
+ if( t0 > t ) t0 = t;\r
+ tmp[j] = t0;\r
+ }\r
+ }\r
+ }\r
+\r
+ // backward pass\r
+ for( i = size.height - 1; i >= 0; i-- )\r
+ {\r
+ float* d = (float*)(dist + i*dststep);\r
+ int* tmp = (int*)(temp + (i+BORDER)*step) + BORDER;\r
+ \r
+ for( j = size.width - 1; j >= 0; j-- )\r
+ {\r
+ int t0 = tmp[j];\r
+ if( t0 > HV_DIST )\r
+ {\r
+ int t = tmp[j+step*2+1] + LONG_DIST;\r
+ if( t0 > t ) t0 = t;\r
+ t = tmp[j+step*2-1] + LONG_DIST;\r
+ if( t0 > t ) t0 = t;\r
+ t = tmp[j+step+2] + LONG_DIST;\r
+ if( t0 > t ) t0 = t;\r
+ t = tmp[j+step+1] + DIAG_DIST;\r
+ if( t0 > t ) t0 = t;\r
+ t = tmp[j+step] + HV_DIST;\r
+ if( t0 > t ) t0 = t;\r
+ t = tmp[j+step-1] + DIAG_DIST;\r
+ if( t0 > t ) t0 = t;\r
+ t = tmp[j+step-2] + LONG_DIST;\r
+ if( t0 > t ) t0 = t;\r
+ t = tmp[j+1] + HV_DIST;\r
+ if( t0 > t ) t0 = t;\r
+ tmp[j] = t0;\r
+ }\r
+ d[j] = (float)(t0 * scale);\r
+ }\r
+ }\r
+\r
+ return CV_OK;\r
+}\r
+\r
+\r
+static CvStatus CV_STDCALL\r
+icvDistanceTransformEx_5x5_C1R( const uchar* src, int srcstep, int* temp,\r
+ int step, float* dist, int dststep, int* labels, int lstep,\r
+ CvSize size, const float* metrics )\r
+{\r
+ const int BORDER = 2;\r
+ \r
+ int i, j;\r
+ const int HV_DIST = CV_FLT_TO_FIX( metrics[0], ICV_DIST_SHIFT );\r
+ const int DIAG_DIST = CV_FLT_TO_FIX( metrics[1], ICV_DIST_SHIFT );\r
+ const int LONG_DIST = CV_FLT_TO_FIX( metrics[2], ICV_DIST_SHIFT );\r
+ const float scale = 1.f/(1 << ICV_DIST_SHIFT);\r
+\r
+ srcstep /= sizeof(src[0]);\r
+ step /= sizeof(temp[0]);\r
+ dststep /= sizeof(dist[0]);\r
+ lstep /= sizeof(labels[0]);\r
+\r
+ icvInitTopBottom( temp, step, size, BORDER );\r
+\r
+ // forward pass\r
+ for( i = 0; i < size.height; i++ )\r
+ {\r
+ const uchar* s = src + i*srcstep;\r
+ int* tmp = (int*)(temp + (i+BORDER)*step) + BORDER;\r
+ int* lls = (int*)(labels + i*lstep);\r
+\r
+ for( j = 0; j < BORDER; j++ )\r
+ tmp[-j-1] = tmp[size.width + j] = ICV_INIT_DIST0;\r
+ \r
+ for( j = 0; j < size.width; j++ )\r
+ {\r
+ if( !s[j] )\r
+ {\r
+ tmp[j] = 0;\r
+ //assert( lls[j] != 0 );\r
+ }\r
+ else\r
+ {\r
+ int t0 = ICV_INIT_DIST0, t;\r
+ int l0 = 0;\r
+\r
+ t = tmp[j-step*2-1] + LONG_DIST;\r
+ if( t0 > t )\r
+ {\r
+ t0 = t;\r
+ l0 = lls[j-lstep*2-1];\r
+ }\r
+ t = tmp[j-step*2+1] + LONG_DIST;\r
+ if( t0 > t )\r
+ {\r
+ t0 = t;\r
+ l0 = lls[j-lstep*2+1];\r
+ }\r
+ t = tmp[j-step-2] + LONG_DIST;\r
+ if( t0 > t )\r
+ {\r
+ t0 = t;\r
+ l0 = lls[j-lstep-2];\r
+ }\r
+ t = tmp[j-step-1] + DIAG_DIST;\r
+ if( t0 > t )\r
+ {\r
+ t0 = t;\r
+ l0 = lls[j-lstep-1];\r
+ }\r
+ t = tmp[j-step] + HV_DIST;\r
+ if( t0 > t )\r
+ {\r
+ t0 = t;\r
+ l0 = lls[j-lstep];\r
+ }\r
+ t = tmp[j-step+1] + DIAG_DIST;\r
+ if( t0 > t )\r
+ {\r
+ t0 = t;\r
+ l0 = lls[j-lstep+1];\r
+ }\r
+ t = tmp[j-step+2] + LONG_DIST;\r
+ if( t0 > t )\r
+ {\r
+ t0 = t;\r
+ l0 = lls[j-lstep+2];\r
+ }\r
+ t = tmp[j-1] + HV_DIST;\r
+ if( t0 > t )\r
+ {\r
+ t0 = t;\r
+ l0 = lls[j-1];\r
+ }\r
+\r
+ tmp[j] = t0;\r
+ lls[j] = l0;\r
+ }\r
+ }\r
+ }\r
+\r
+ // backward pass\r
+ for( i = size.height - 1; i >= 0; i-- )\r
+ {\r
+ float* d = (float*)(dist + i*dststep);\r
+ int* tmp = (int*)(temp + (i+BORDER)*step) + BORDER;\r
+ int* lls = (int*)(labels + i*lstep);\r
+ \r
+ for( j = size.width - 1; j >= 0; j-- )\r
+ {\r
+ int t0 = tmp[j];\r
+ int l0 = lls[j];\r
+ if( t0 > HV_DIST )\r
+ {\r
+ int t = tmp[j+step*2+1] + LONG_DIST;\r
+ if( t0 > t )\r
+ {\r
+ t0 = t;\r
+ l0 = lls[j+lstep*2+1];\r
+ }\r
+ t = tmp[j+step*2-1] + LONG_DIST;\r
+ if( t0 > t )\r
+ {\r
+ t0 = t;\r
+ l0 = lls[j+lstep*2-1];\r
+ }\r
+ t = tmp[j+step+2] + LONG_DIST;\r
+ if( t0 > t )\r
+ {\r
+ t0 = t;\r
+ l0 = lls[j+lstep+2];\r
+ }\r
+ t = tmp[j+step+1] + DIAG_DIST;\r
+ if( t0 > t )\r
+ {\r
+ t0 = t;\r
+ l0 = lls[j+lstep+1];\r
+ }\r
+ t = tmp[j+step] + HV_DIST;\r
+ if( t0 > t )\r
+ {\r
+ t0 = t;\r
+ l0 = lls[j+lstep];\r
+ }\r
+ t = tmp[j+step-1] + DIAG_DIST;\r
+ if( t0 > t )\r
+ {\r
+ t0 = t;\r
+ l0 = lls[j+lstep-1];\r
+ }\r
+ t = tmp[j+step-2] + LONG_DIST;\r
+ if( t0 > t )\r
+ {\r
+ t0 = t;\r
+ l0 = lls[j+lstep-2];\r
+ }\r
+ t = tmp[j+1] + HV_DIST;\r
+ if( t0 > t )\r
+ {\r
+ t0 = t;\r
+ l0 = lls[j+1];\r
+ }\r
+ tmp[j] = t0;\r
+ lls[j] = l0;\r
+ }\r
+ d[j] = (float)(t0 * scale);\r
+ }\r
+ }\r
+\r
+ return CV_OK;\r
+}\r
+\r
+\r
+static CvStatus\r
+icvGetDistanceTransformMask( int maskType, float *metrics )\r
+{\r
+ if( !metrics )\r
+ return CV_NULLPTR_ERR;\r
+\r
+ switch (maskType)\r
+ {\r
+ case 30:\r
+ metrics[0] = 1.0f;\r
+ metrics[1] = 1.0f;\r
+ break;\r
+\r
+ case 31:\r
+ metrics[0] = 1.0f;\r
+ metrics[1] = 2.0f;\r
+ break;\r
+\r
+ case 32:\r
+ metrics[0] = 0.955f;\r
+ metrics[1] = 1.3693f;\r
+ break;\r
+\r
+ case 50:\r
+ metrics[0] = 1.0f;\r
+ metrics[1] = 1.0f;\r
+ metrics[2] = 2.0f;\r
+ break;\r
+\r
+ case 51:\r
+ metrics[0] = 1.0f;\r
+ metrics[1] = 2.0f;\r
+ metrics[2] = 3.0f;\r
+ break;\r
+\r
+ case 52:\r
+ metrics[0] = 1.0f;\r
+ metrics[1] = 1.4f;\r
+ metrics[2] = 2.1969f;\r
+ break;\r
+ default:\r
+ return CV_BADRANGE_ERR;\r
+ }\r
+\r
+ return CV_OK;\r
+}\r
+\r
+\r
+static void\r
+icvTrueDistTrans( const CvMat* src, CvMat* dst )\r
+{\r
+ CvMat* buffer = 0;\r
+\r
+ CV_FUNCNAME( "cvDistTransform2" );\r
+\r
+ __BEGIN__;\r
+\r
+ int i, m, n;\r
+ int sstep, dstep;\r
+ const float inf = 1e6f;\r
+ int thread_count = cvGetNumThreads();\r
+ int pass1_sz, pass2_sz;\r
+\r
+ if( !CV_ARE_SIZES_EQ( src, dst ))\r
+ CV_ERROR( CV_StsUnmatchedSizes, "" );\r
+\r
+ if( CV_MAT_TYPE(src->type) != CV_8UC1 ||\r
+ CV_MAT_TYPE(dst->type) != CV_32FC1 )\r
+ CV_ERROR( CV_StsUnsupportedFormat,\r
+ "The input image must have 8uC1 type and the output one must have 32fC1 type" );\r
+\r
+ m = src->rows;\r
+ n = src->cols;\r
+\r
+ // (see stage 1 below):\r
+ // sqr_tab: 2*m, sat_tab: 3*m + 1, d: m*thread_count,\r
+ pass1_sz = src->rows*(5 + thread_count) + 1;\r
+ // (see stage 2):\r
+ // sqr_tab & inv_tab: n each; f & v: n*thread_count each; z: (n+1)*thread_count\r
+ pass2_sz = src->cols*(2 + thread_count*3) + thread_count;\r
+ CV_CALL( buffer = cvCreateMat( 1, MAX(pass1_sz, pass2_sz), CV_32FC1 ));\r
+\r
+ sstep = src->step;\r
+ dstep = dst->step / sizeof(float);\r
+\r
+ // stage 1: compute 1d distance transform of each column\r
+ {\r
+ float* sqr_tab = buffer->data.fl;\r
+ int* sat_tab = (int*)(sqr_tab + m*2);\r
+ const int shift = m*2;\r
+\r
+ for( i = 0; i < m; i++ )\r
+ sqr_tab[i] = (float)(i*i);\r
+ for( i = m; i < m*2; i++ )\r
+ sqr_tab[i] = inf;\r
+ for( i = 0; i < shift; i++ )\r
+ sat_tab[i] = 0;\r
+ for( ; i <= m*3; i++ )\r
+ sat_tab[i] = i - shift;\r
+\r
+#ifdef _OPENMP\r
+ #pragma omp parallel for num_threads(thread_count)\r
+#endif\r
+ for( i = 0; i < n; i++ )\r
+ {\r
+ const uchar* sptr = src->data.ptr + i + (m-1)*sstep;\r
+ float* dptr = dst->data.fl + i;\r
+ int* d = (int*)(sat_tab + m*3+1+m*cvGetThreadNum());\r
+ int j, dist = m-1;\r
+\r
+ for( j = m-1; j >= 0; j--, sptr -= sstep )\r
+ {\r
+ dist = (dist + 1) & (sptr[0] == 0 ? 0 : -1);\r
+ d[j] = dist;\r
+ }\r
+\r
+ dist = m-1;\r
+ for( j = 0; j < m; j++, dptr += dstep )\r
+ {\r
+ dist = dist + 1 - sat_tab[dist + 1 - d[j] + shift];\r
+ d[j] = dist;\r
+ dptr[0] = sqr_tab[dist];\r
+ }\r
+ }\r
+ }\r
+\r
+ // stage 2: compute modified distance transform for each row\r
+ {\r
+ float* inv_tab = buffer->data.fl;\r
+ float* sqr_tab = inv_tab + n;\r
+\r
+ inv_tab[0] = sqr_tab[0] = 0.f;\r
+ for( i = 1; i < n; i++ )\r
+ {\r
+ inv_tab[i] = (float)(0.5/i);\r
+ sqr_tab[i] = (float)(i*i);\r
+ }\r
+\r
+#ifdef _OPENMP\r
+ #pragma omp parallel for num_threads(thread_count) schedule(dynamic)\r
+#endif\r
+ for( i = 0; i < m; i++ )\r
+ {\r
+ float* d = (float*)(dst->data.ptr + i*dst->step);\r
+ float* f = sqr_tab + n + (n*3+1)*cvGetThreadNum();\r
+ float* z = f + n;\r
+ int* v = (int*)(z + n + 1);\r
+ int p, q, k;\r
+\r
+ v[0] = 0;\r
+ z[0] = -inf;\r
+ z[1] = inf;\r
+ f[0] = d[0];\r
+\r
+ for( q = 1, k = 0; q < n; q++ )\r
+ {\r
+ float fq = d[q];\r
+ f[q] = fq;\r
+\r
+ for(;;k--)\r
+ {\r
+ p = v[k];\r
+ float s = (fq + sqr_tab[q] - d[p] - sqr_tab[p])*inv_tab[q - p];\r
+ if( s > z[k] )\r
+ {\r
+ k++;\r
+ v[k] = q;\r
+ z[k] = s;\r
+ z[k+1] = inf;\r
+ break;\r
+ }\r
+ }\r
+ }\r
+\r
+ for( q = 0, k = 0; q < n; q++ )\r
+ {\r
+ while( z[k+1] < q )\r
+ k++;\r
+ p = v[k];\r
+ d[q] = sqr_tab[abs(q - p)] + f[p];\r
+ }\r
+ }\r
+ }\r
+\r
+ cvPow( dst, dst, 0.5 );\r
+\r
+ __END__;\r
+\r
+ cvReleaseMat( &buffer );\r
+}\r
+\r
+\r
+/*********************************** IPP functions *********************************/\r
+\r
+typedef CvStatus (CV_STDCALL * CvIPPDistTransFunc)( const uchar* src, int srcstep,\r
+ void* dst, int dststep,\r
+ CvSize size, const void* metrics );\r
+\r
+typedef CvStatus (CV_STDCALL * CvIPPDistTransFunc2)( uchar* src, int srcstep,\r
+ CvSize size, const int* metrics );\r
+\r
+/***********************************************************************************/\r
+\r
+typedef CvStatus (CV_STDCALL * CvDistTransFunc)( const uchar* src, int srcstep,\r
+ int* temp, int tempstep,\r
+ float* dst, int dststep,\r
+ CvSize size, const float* metrics );\r
+\r
+\r
+/****************************************************************************************\\r
+ Non-inplace and Inplace 8u->8u Distance Transform for CityBlock (a.k.a. L1) metric\r
+ (C) 2006 by Jay Stavinzky.\r
+\****************************************************************************************/\r
+\r
+//BEGIN ATS ADDITION\r
+/* 8-bit grayscale distance transform function */\r
+static void\r
+icvDistanceATS_L1_8u( const CvMat* src, CvMat* dst )\r
+{\r
+ CV_FUNCNAME( "cvDistanceATS" );\r
+\r
+ __BEGIN__;\r
+\r
+ int width = src->cols, height = src->rows;\r
+\r
+ int a;\r
+ uchar lut[256];\r
+ int x, y;\r
+ \r
+ const uchar *sbase = src->data.ptr;\r
+ uchar *dbase = dst->data.ptr;\r
+ int srcstep = src->step;\r
+ int dststep = dst->step;\r
+\r
+ CV_ASSERT( CV_IS_MASK_ARR( src ) && CV_MAT_TYPE( dst->type ) == CV_8UC1 );\r
+ CV_ASSERT( CV_ARE_SIZES_EQ( src, dst ));\r
+\r
+ ////////////////////// forward scan ////////////////////////\r
+ for( x = 0; x < 256; x++ )\r
+ lut[x] = CV_CAST_8U(x+1);\r
+\r
+ //init first pixel to max (we're going to be skipping it)\r
+ dbase[0] = (uchar)(sbase[0] == 0 ? 0 : 255);\r
+\r
+ //first row (scan west only, skip first pixel)\r
+ for( x = 1; x < width; x++ )\r
+ dbase[x] = (uchar)(sbase[x] == 0 ? 0 : lut[dbase[x-1]]);\r
+\r
+ for( y = 1; y < height; y++ )\r
+ {\r
+ sbase += srcstep;\r
+ dbase += dststep;\r
+\r
+ //for left edge, scan north only\r
+ a = sbase[0] == 0 ? 0 : lut[dbase[-dststep]];\r
+ dbase[0] = (uchar)a;\r
+\r
+ for( x = 1; x < width; x++ )\r
+ {\r
+ a = sbase[x] == 0 ? 0 : lut[MIN(a, dbase[x - dststep])];\r
+ dbase[x] = (uchar)a;\r
+ }\r
+ }\r
+\r
+ ////////////////////// backward scan ///////////////////////\r
+\r
+ a = dbase[width-1];\r
+\r
+ // do last row east pixel scan here (skip bottom right pixel)\r
+ for( x = width - 2; x >= 0; x-- )\r
+ {\r
+ a = lut[a];\r
+ dbase[x] = (uchar)(CV_CALC_MIN_8U(a, dbase[x]));\r
+ }\r
+\r
+ // right edge is the only error case\r
+ for( y = height - 2; y >= 0; y-- )\r
+ {\r
+ dbase -= dststep;\r
+\r
+ // do right edge\r
+ a = lut[dbase[width-1+dststep]];\r
+ dbase[width-1] = (uchar)(MIN(a, dbase[width-1]));\r
+\r
+ for( x = width - 2; x >= 0; x-- )\r
+ {\r
+ int b = dbase[x+dststep];\r
+ a = lut[MIN(a, b)];\r
+ dbase[x] = (uchar)(MIN(a, dbase[x]));\r
+ }\r
+ }\r
+\r
+ __END__;\r
+}\r
+//END ATS ADDITION\r
+\r
+\r
+/* Wrapper function for distance transform group */\r
+CV_IMPL void\r
+cvDistTransform( const void* srcarr, void* dstarr,\r
+ int distType, int maskSize,\r
+ const float *mask,\r
+ void* labelsarr )\r
+{\r
+ CvMat* temp = 0;\r
+ CvMat* src_copy = 0;\r
+ CvMemStorage* st = 0;\r
+ \r
+ CV_FUNCNAME( "cvDistTransform" );\r
+\r
+ __BEGIN__;\r
+\r
+ float _mask[5] = {0};\r
+ CvMat srcstub, *src = (CvMat*)srcarr;\r
+ CvMat dststub, *dst = (CvMat*)dstarr;\r
+ CvMat lstub, *labels = (CvMat*)labelsarr;\r
+ CvSize size;\r
+ //CvIPPDistTransFunc ipp_func = 0;\r
+ //CvIPPDistTransFunc2 ipp_inp_func = 0;\r
+\r
+ CV_CALL( src = cvGetMat( src, &srcstub ));\r
+ CV_CALL( dst = cvGetMat( dst, &dststub ));\r
+\r
+ if( !CV_IS_MASK_ARR( src ) || (CV_MAT_TYPE( dst->type ) != CV_32FC1 &&\r
+ (CV_MAT_TYPE(dst->type) != CV_8UC1 || distType != CV_DIST_L1 || labels)) )\r
+ CV_ERROR( CV_StsUnsupportedFormat,\r
+ "source image must be 8uC1 and the distance map must be 32fC1 "\r
+ "(or 8uC1 in case of simple L1 distance transform)" );\r
+\r
+ if( !CV_ARE_SIZES_EQ( src, dst ))\r
+ CV_ERROR( CV_StsUnmatchedSizes, "the source and the destination images must be of the same size" );\r
+\r
+ if( maskSize != CV_DIST_MASK_3 && maskSize != CV_DIST_MASK_5 && maskSize != CV_DIST_MASK_PRECISE )\r
+ CV_ERROR( CV_StsBadSize, "Mask size should be 3 or 5 or 0 (presize)" );\r
+\r
+ if( distType == CV_DIST_C || distType == CV_DIST_L1 )\r
+ maskSize = !labels ? CV_DIST_MASK_3 : CV_DIST_MASK_5;\r
+ else if( distType == CV_DIST_L2 && labels )\r
+ maskSize = CV_DIST_MASK_5;\r
+\r
+ if( maskSize == CV_DIST_MASK_PRECISE )\r
+ {\r
+ CV_CALL( icvTrueDistTrans( src, dst ));\r
+ EXIT;\r
+ }\r
+ \r
+ if( labels )\r
+ {\r
+ CV_CALL( labels = cvGetMat( labels, &lstub ));\r
+ if( CV_MAT_TYPE( labels->type ) != CV_32SC1 )\r
+ CV_ERROR( CV_StsUnsupportedFormat, "the output array of labels must be 32sC1" );\r
+\r
+ if( !CV_ARE_SIZES_EQ( labels, dst ))\r
+ CV_ERROR( CV_StsUnmatchedSizes, "the array of labels has a different size" );\r
+\r
+ if( maskSize == CV_DIST_MASK_3 )\r
+ CV_ERROR( CV_StsNotImplemented,\r
+ "3x3 mask can not be used for \"labeled\" distance transform. Use 5x5 mask" );\r
+ }\r
+\r
+ if( distType == CV_DIST_C || distType == CV_DIST_L1 || distType == CV_DIST_L2 )\r
+ {\r
+ icvGetDistanceTransformMask( (distType == CV_DIST_C ? 0 :\r
+ distType == CV_DIST_L1 ? 1 : 2) + maskSize*10, _mask );\r
+ }\r
+ else if( distType == CV_DIST_USER )\r
+ {\r
+ if( !mask )\r
+ CV_ERROR( CV_StsNullPtr, "" );\r
+\r
+ memcpy( _mask, mask, (maskSize/2 + 1)*sizeof(float));\r
+ }\r
+\r
+ /*if( !labels )\r
+ {\r
+ if( CV_MAT_TYPE(dst->type) == CV_32FC1 )\r
+ ipp_func = (CvIPPDistTransFunc)(maskSize == CV_DIST_MASK_3 ?\r
+ icvDistanceTransform_3x3_8u32f_C1R_p : icvDistanceTransform_5x5_8u32f_C1R_p);\r
+ else if( src->data.ptr != dst->data.ptr )\r
+ ipp_func = (CvIPPDistTransFunc)icvDistanceTransform_3x3_8u_C1R_p;\r
+ else\r
+ ipp_inp_func = icvDistanceTransform_3x3_8u_C1IR_p;\r
+ }*/\r
+\r
+ size = cvGetMatSize(src);\r
+\r
+ /*if( (ipp_func || ipp_inp_func) && src->cols >= 4 && src->rows >= 2 )\r
+ {\r
+ int _imask[3];\r
+ _imask[0] = cvRound(_mask[0]);\r
+ _imask[1] = cvRound(_mask[1]);\r
+ _imask[2] = cvRound(_mask[2]);\r
+\r
+ if( ipp_func )\r
+ {\r
+ IPPI_CALL( ipp_func( src->data.ptr, src->step,\r
+ dst->data.fl, dst->step, size,\r
+ CV_MAT_TYPE(dst->type) == CV_8UC1 ?\r
+ (void*)_imask : (void*)_mask ));\r
+ }\r
+ else\r
+ {\r
+ IPPI_CALL( ipp_inp_func( src->data.ptr, src->step, size, _imask ));\r
+ }\r
+ }\r
+ else*/ if( CV_MAT_TYPE(dst->type) == CV_8UC1 )\r
+ {\r
+ CV_CALL( icvDistanceATS_L1_8u( src, dst ));\r
+ }\r
+ else\r
+ {\r
+ int border = maskSize == CV_DIST_MASK_3 ? 1 : 2;\r
+ CV_CALL( temp = cvCreateMat( size.height + border*2, size.width + border*2, CV_32SC1 ));\r
+\r
+ if( !labels )\r
+ {\r
+ CvDistTransFunc func = maskSize == CV_DIST_MASK_3 ?\r
+ icvDistanceTransform_3x3_C1R :\r
+ icvDistanceTransform_5x5_C1R;\r
+\r
+ func( src->data.ptr, src->step, temp->data.i, temp->step,\r
+ dst->data.fl, dst->step, size, _mask );\r
+ }\r
+ else\r
+ {\r
+ CvSeq *contours = 0;\r
+ CvPoint top_left = {0,0}, bottom_right = {size.width-1,size.height-1};\r
+ int label;\r
+\r
+ CV_CALL( st = cvCreateMemStorage() );\r
+ CV_CALL( src_copy = cvCreateMat( size.height, size.width, src->type ));\r
+ cvCmpS( src, 0, src_copy, CV_CMP_EQ );\r
+ cvFindContours( src_copy, st, &contours, sizeof(CvContour),\r
+ CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );\r
+ cvZero( labels );\r
+ for( label = 1; contours != 0; contours = contours->h_next, label++ )\r
+ {\r
+ CvScalar area_color = cvScalarAll(label);\r
+ cvDrawContours( labels, contours, area_color, area_color, -255, -1, 8 );\r
+ }\r
+\r
+ cvCopy( src, src_copy );\r
+ cvRectangle( src_copy, top_left, bottom_right, cvScalarAll(255), 1, 8 );\r
+\r
+ icvDistanceTransformEx_5x5_C1R( src_copy->data.ptr, src_copy->step, temp->data.i, temp->step,\r
+ dst->data.fl, dst->step, labels->data.i, labels->step, size, _mask );\r
+ }\r
+ }\r
+\r
+ __END__;\r
+\r
+ cvReleaseMat( &temp );\r
+ cvReleaseMat( &src_copy );\r
+ cvReleaseMemStorage( &st );\r
+}\r
+\r
+void cv::distanceTransform( const Mat& src, Mat& dst, Mat& labels,\r
+ int distanceType, int maskSize )\r
+{\r
+ dst.create(src.size(), CV_32F);\r
+ dst.create(src.size(), CV_32S);\r
+ CvMat _src = src, _dst = dst, _labels = labels;\r
+ cvDistTransform(&_src, &_dst, distanceType, maskSize, 0, &_labels);\r
+}\r
+\r
+void cv::distanceTransform( const Mat& src, Mat& dst,\r
+ int distanceType, int maskSize )\r
+{\r
+ dst.create(src.size(), CV_32F);\r
+ CvMat _src = src, _dst = dst;\r
+ cvDistTransform(&_src, &_dst, distanceType, maskSize, 0, 0);\r
+}\r
+\r
+/* End of file. */\r
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// Intel License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of Intel Corporation may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-#include "_cv.h"
-
-#define cmp_features( f1, f2 ) (*(f1) > *(f2))
-
-static CV_IMPLEMENT_QSORT( icvSortFeatures, int *, cmp_features )
-
-CV_IMPL void
-cvGoodFeaturesToTrack( const void* image, void* eigImage, void* tempImage,
- CvPoint2D32f* corners, int *corner_count,
- double quality_level, double min_distance,
- const void* maskImage, int block_size,
- int use_harris, double harris_k )
-{
- CvMat* _eigImg = 0;
- CvMat* _tmpImg = 0;
-
- CV_FUNCNAME( "cvGoodFeaturesToTrack" );
-
- __BEGIN__;
-
- double max_val = 0;
- int max_count = 0;
- int count = 0;
- int x, y, i, k = 0;
- int min_dist;
- int eig_step, tmp_step;
-
- /* when selecting points, use integer coordinates */
- CvPoint *ptr = (CvPoint *) corners;
-
- /* process floating-point images using integer arithmetics */
- int *eig_data = 0;
- int *tmp_data = 0;
- int **ptr_data = 0;
- uchar *mask_data = 0;
- int mask_step = 0;
- CvSize size;
-
- int coi1 = 0, coi2 = 0, coi3 = 0;
- CvMat stub, *img = (CvMat*)image;
- CvMat eig_stub, *eig = (CvMat*)eigImage;
- CvMat tmp_stub, *tmp = (CvMat*)tempImage;
- CvMat mask_stub, *mask = (CvMat*)maskImage;
-
- if( corner_count )
- {
- max_count = *corner_count;
- *corner_count = 0;
- }
-
- CV_CALL( img = cvGetMat( img, &stub, &coi1 ));
- if( eig )
- {
- CV_CALL( eig = cvGetMat( eig, &eig_stub, &coi2 ));
- }
- else
- {
- CV_CALL( _eigImg = cvCreateMat( img->rows, img->cols, CV_32FC1 ));
- eig = _eigImg;
- }
-
- if( tmp )
- {
- CV_CALL( tmp = cvGetMat( tmp, &tmp_stub, &coi3 ));
- }
- else
- {
- CV_CALL( _tmpImg = cvCreateMat( img->rows, img->cols, CV_32FC1 ));
- tmp = _tmpImg;
- }
-
- if( mask )
- {
- CV_CALL( mask = cvGetMat( mask, &mask_stub ));
- if( !CV_IS_MASK_ARR( mask ))
- {
- CV_ERROR( CV_StsBadMask, "" );
- }
- }
-
- if( coi1 != 0 || coi2 != 0 || coi3 != 0 )
- CV_ERROR( CV_BadCOI, "" );
-
- if( CV_MAT_CN(img->type) != 1 ||
- CV_MAT_CN(eig->type) != 1 ||
- CV_MAT_CN(tmp->type) != 1 )
- CV_ERROR( CV_BadNumChannels, cvUnsupportedFormat );
-
- if( CV_MAT_DEPTH(tmp->type) != CV_32F ||
- CV_MAT_DEPTH(eig->type) != CV_32F )
- CV_ERROR( CV_BadDepth, cvUnsupportedFormat );
-
- if( !corners || !corner_count )
- CV_ERROR( CV_StsNullPtr, "" );
-
- if( max_count <= 0 )
- CV_ERROR( CV_StsBadArg, "maximal corners number is non positive" );
-
- if( quality_level <= 0 || min_distance < 0 )
- CV_ERROR( CV_StsBadArg, "quality level or min distance are non positive" );
-
- if( use_harris )
- {
- CV_CALL( cvCornerHarris( img, eig, block_size, 3, harris_k ));
- }
- else
- {
- CV_CALL( cvCornerMinEigenVal( img, eig, block_size, 3 ));
- }
- CV_CALL( cvMinMaxLoc( eig, 0, &max_val, 0, 0, mask ));
- CV_CALL( cvThreshold( eig, eig, max_val * quality_level,
- 0, CV_THRESH_TOZERO ));
- CV_CALL( cvDilate( eig, tmp ));
-
- min_dist = cvRound( min_distance * min_distance );
-
- size = cvGetMatSize( img );
- ptr_data = (int**)(tmp->data.ptr);
- eig_data = (int*)(eig->data.ptr);
- tmp_data = (int*)(tmp->data.ptr);
- if( mask )
- {
- mask_data = (uchar*)(mask->data.ptr);
- mask_step = mask->step;
- }
-
- eig_step = eig->step / sizeof(eig_data[0]);
- tmp_step = tmp->step / sizeof(tmp_data[0]);
-
- /* collect list of pointers to features - put them into temporary image */
- for( y = 1, k = 0; y < size.height - 1; y++ )
- {
- eig_data += eig_step;
- tmp_data += tmp_step;
- mask_data += mask_step;
-
- for( x = 1; x < size.width - 1; x++ )
- {
- int val = eig_data[x];
- if( val != 0 && val == tmp_data[x] && (!mask || mask_data[x]) )
- ptr_data[k++] = eig_data + x;
- }
- }
-
- icvSortFeatures( ptr_data, k, 0 );
-
- /* select the strongest features */
- for( i = 0; i < k; i++ )
- {
- int j = count, ofs = (int)((uchar*)(ptr_data[i]) - eig->data.ptr);
- y = ofs / eig->step;
- x = (ofs - y * eig->step)/sizeof(float);
-
- if( min_dist != 0 )
- {
- for( j = 0; j < count; j++ )
- {
- int dx = x - ptr[j].x;
- int dy = y - ptr[j].y;
- int dist = dx * dx + dy * dy;
-
- if( dist < min_dist )
- break;
- }
- }
-
- if( j == count )
- {
- ptr[count].x = x;
- ptr[count].y = y;
- if( ++count >= max_count )
- break;
- }
- }
-
- /* convert points to floating-point format */
- for( i = 0; i < count; i++ )
- {
- assert( (unsigned)ptr[i].x < (unsigned)size.width &&
- (unsigned)ptr[i].y < (unsigned)size.height );
-
- corners[i].x = (float)ptr[i].x;
- corners[i].y = (float)ptr[i].y;
- }
-
- *corner_count = count;
-
- __END__;
-
- cvReleaseMat( &_eigImg );
- cvReleaseMat( &_tmpImg );
-}
-
-/* End of file. */
+/*M///////////////////////////////////////////////////////////////////////////////////////\r
+//\r
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
+//\r
+// By downloading, copying, installing or using the software you agree to this license.\r
+// If you do not agree to this license, do not download, install,\r
+// copy or use the software.\r
+//\r
+//\r
+// Intel License Agreement\r
+// For Open Source Computer Vision Library\r
+//\r
+// Copyright (C) 2000, Intel Corporation, all rights reserved.\r
+// Third party copyrights are property of their respective owners.\r
+//\r
+// Redistribution and use in source and binary forms, with or without modification,\r
+// are permitted provided that the following conditions are met:\r
+//\r
+// * Redistribution's of source code must retain the above copyright notice,\r
+// this list of conditions and the following disclaimer.\r
+//\r
+// * Redistribution's in binary form must reproduce the above copyright notice,\r
+// this list of conditions and the following disclaimer in the documentation\r
+// and/or other materials provided with the distribution.\r
+//\r
+// * The name of Intel Corporation may not be used to endorse or promote products\r
+// derived from this software without specific prior written permission.\r
+//\r
+// This software is provided by the copyright holders and contributors "as is" and\r
+// any express or implied warranties, including, but not limited to, the implied\r
+// warranties of merchantability and fitness for a particular purpose are disclaimed.\r
+// In no event shall the Intel Corporation or contributors be liable for any direct,\r
+// indirect, incidental, special, exemplary, or consequential damages\r
+// (including, but not limited to, procurement of substitute goods or services;\r
+// loss of use, data, or profits; or business interruption) however caused\r
+// and on any theory of liability, whether in contract, strict liability,\r
+// or tort (including negligence or otherwise) arising in any way out of\r
+// the use of this software, even if advised of the possibility of such damage.\r
+//\r
+//M*/\r
+#include "_cv.h"\r
+\r
+namespace cv\r
+{\r
+\r
+template<typename T> struct lessThanPtr\r
+{\r
+ bool operator()(const T* a, const T* b) const { return *a < *b; }\r
+};\r
+\r
+void goodFeaturesToTrack( const Mat& image, Vector<Point2f>& corners,\r
+ int maxCorners, double qualityLevel, double minDistance,\r
+ const Mat& mask, int blockSize,\r
+ bool useHarrisDetector, double harrisK )\r
+{\r
+ CV_Assert( qualityLevel > 0 && minDistance >= 0 && maxCorners >= 0 );\r
+\r
+ if( mask.data )\r
+ CV_Assert( mask.type() == CV_8UC1 && mask.size() == image.size() );\r
+\r
+ Mat eig, tmp;\r
+ if( useHarrisDetector )\r
+ cornerHarris( image, eig, blockSize, 3, harrisK );\r
+ else\r
+ cornerMinEigenVal( image, eig, blockSize, 3 );\r
+\r
+ double maxVal = 0;\r
+ minMaxLoc( eig, 0, &maxVal, 0, 0, mask );\r
+ threshold( eig, eig, maxVal*qualityLevel, 0, THRESH_TOZERO );\r
+ dilate( eig, tmp, Mat());\r
+\r
+ Size imgsize = image.size();\r
+\r
+ Vector<const float*> tmpCorners;\r
+\r
+ // collect list of pointers to features - put them into temporary image\r
+ for( int y = 1; y < imgsize.height - 1; y++ )\r
+ {\r
+ const float* eig_data = (const float*)eig.ptr(y);\r
+ const float* tmp_data = (const float*)tmp.ptr(y);\r
+ const uchar* mask_data = mask.data ? mask.ptr(y) : 0;\r
+\r
+ for( int x = 1; x < imgsize.width - 1; x++ )\r
+ {\r
+ float val = eig_data[x];\r
+ if( val != 0 && val == tmp_data[x] && (!mask_data || mask_data[x]) )\r
+ tmpCorners.push_back(eig_data + x);\r
+ }\r
+ }\r
+\r
+ sort( tmpCorners, lessThanPtr<float>() );\r
+ corners.clear();\r
+ size_t i, j, total = tmpCorners.size(), ncorners = 0;\r
+ \r
+ minDistance *= minDistance;\r
+\r
+ // select the strongest features\r
+ for( i = 0; i < total; i++ )\r
+ {\r
+ int ofs = (const uchar*)tmpCorners[i] - eig.data;\r
+ int y = ofs / eig.step;\r
+ int x = (ofs - y*eig.step)/sizeof(float);\r
+\r
+ if( minDistance > 0 )\r
+ {\r
+ for( j = 0; j < ncorners; j++ )\r
+ {\r
+ float dx = x - corners[j].x;\r
+ float dy = y - corners[j].y;\r
+ if( dx*dx + dy*dy < minDistance )\r
+ break;\r
+ }\r
+ if( j < ncorners )\r
+ continue;\r
+ }\r
+\r
+ corners.push_back(Point2f((float)x, (float)y));\r
+ ++ncorners;\r
+ if( maxCorners > 0 && (int)ncorners == maxCorners )\r
+ break;\r
+ }\r
+}\r
+\r
+}\r
+\r
+CV_IMPL void\r
+cvGoodFeaturesToTrack( const void* _image, void*, void*,\r
+ CvPoint2D32f* _corners, int *_corner_count,\r
+ double quality_level, double min_distance,\r
+ const void* _maskImage, int block_size,\r
+ int use_harris, double harris_k )\r
+{\r
+ cv::Mat image = cv::cvarrToMat(_image), mask;\r
+ cv::Vector<cv::Point2f> corners;\r
+\r
+ if( _maskImage )\r
+ mask = cv::cvarrToMat(_maskImage);\r
+\r
+ CV_Assert( _corners && _corner_count );\r
+ cv::goodFeaturesToTrack( image, corners, *_corner_count, quality_level,\r
+ min_distance, mask, block_size, use_harris != 0, harris_k );\r
+\r
+ size_t i, ncorners = corners.size();\r
+ for( i = 0; i < ncorners; i++ )\r
+ _corners[i] = corners[i];\r
+ *_corner_count = (int)ncorners;\r
+}\r
+\r
+/* End of file. */\r
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// Intel License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of Intel Corporation may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#include "_cv.h"
-
-typedef struct CvFFillSegment
-{
- ushort y;
- ushort l;
- ushort r;
- ushort prevl;
- ushort prevr;
- short dir;
-}
-CvFFillSegment;
-
-#define UP 1
-#define DOWN -1
-
-#define ICV_PUSH( Y, L, R, PREV_L, PREV_R, DIR )\
-{ \
- tail->y = (ushort)(Y); \
- tail->l = (ushort)(L); \
- tail->r = (ushort)(R); \
- tail->prevl = (ushort)(PREV_L); \
- tail->prevr = (ushort)(PREV_R); \
- tail->dir = (short)(DIR); \
- if( ++tail >= buffer_end ) \
- tail = buffer; \
-}
-
-
-#define ICV_POP( Y, L, R, PREV_L, PREV_R, DIR ) \
-{ \
- Y = head->y; \
- L = head->l; \
- R = head->r; \
- PREV_L = head->prevl; \
- PREV_R = head->prevr; \
- DIR = head->dir; \
- if( ++head >= buffer_end ) \
- head = buffer; \
-}
-
-
-#define ICV_EQ_C3( p1, p2 ) \
- ((p1)[0] == (p2)[0] && (p1)[1] == (p2)[1] && (p1)[2] == (p2)[2])
-
-#define ICV_SET_C3( p, q ) \
- ((p)[0] = (q)[0], (p)[1] = (q)[1], (p)[2] = (q)[2])
-
-/****************************************************************************************\
-* Simple Floodfill (repainting single-color connected component) *
-\****************************************************************************************/
-
-static CvStatus
-icvFloodFill_8u_CnIR( uchar* pImage, int step, CvSize roi, CvPoint seed,
- uchar* _newVal, CvConnectedComp* region, int flags,
- CvFFillSegment* buffer, int buffer_size, int cn )
-{
- uchar* img = pImage + step * seed.y;
- int i, L, R;
- int area = 0;
- int val0[] = {0,0,0};
- uchar newVal[] = {0,0,0};
- int XMin, XMax, YMin = seed.y, YMax = seed.y;
- int _8_connectivity = (flags & 255) == 8;
- CvFFillSegment* buffer_end = buffer + buffer_size, *head = buffer, *tail = buffer;
-
- L = R = XMin = XMax = seed.x;
-
- if( cn == 1 )
- {
- val0[0] = img[L];
- newVal[0] = _newVal[0];
-
- img[L] = newVal[0];
-
- while( ++R < roi.width && img[R] == val0[0] )
- img[R] = newVal[0];
-
- while( --L >= 0 && img[L] == val0[0] )
- img[L] = newVal[0];
- }
- else
- {
- assert( cn == 3 );
- ICV_SET_C3( val0, img + L*3 );
- ICV_SET_C3( newVal, _newVal );
-
- ICV_SET_C3( img + L*3, newVal );
-
- while( --L >= 0 && ICV_EQ_C3( img + L*3, val0 ))
- ICV_SET_C3( img + L*3, newVal );
-
- while( ++R < roi.width && ICV_EQ_C3( img + R*3, val0 ))
- ICV_SET_C3( img + R*3, newVal );
- }
-
- XMax = --R;
- XMin = ++L;
- ICV_PUSH( seed.y, L, R, R + 1, R, UP );
-
- while( head != tail )
- {
- int k, YC, PL, PR, dir;
- ICV_POP( YC, L, R, PL, PR, dir );
-
- int data[][3] =
- {
- {-dir, L - _8_connectivity, R + _8_connectivity},
- {dir, L - _8_connectivity, PL - 1},
- {dir, PR + 1, R + _8_connectivity}
- };
-
- if( region )
- {
- area += R - L + 1;
-
- if( XMax < R ) XMax = R;
- if( XMin > L ) XMin = L;
- if( YMax < YC ) YMax = YC;
- if( YMin > YC ) YMin = YC;
- }
-
- for( k = 0/*(unsigned)(YC - dir) >= (unsigned)roi.height*/; k < 3; k++ )
- {
- dir = data[k][0];
- img = pImage + (YC + dir) * step;
- int left = data[k][1];
- int right = data[k][2];
-
- if( (unsigned)(YC + dir) >= (unsigned)roi.height )
- continue;
-
- if( cn == 1 )
- for( i = left; i <= right; i++ )
- {
- if( (unsigned)i < (unsigned)roi.width && img[i] == val0[0] )
- {
- int j = i;
- img[i] = newVal[0];
- while( --j >= 0 && img[j] == val0[0] )
- img[j] = newVal[0];
-
- while( ++i < roi.width && img[i] == val0[0] )
- img[i] = newVal[0];
-
- ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );
- }
- }
- else
- for( i = left; i <= right; i++ )
- {
- if( (unsigned)i < (unsigned)roi.width && ICV_EQ_C3( img + i*3, val0 ))
- {
- int j = i;
- ICV_SET_C3( img + i*3, newVal );
- while( --j >= 0 && ICV_EQ_C3( img + j*3, val0 ))
- ICV_SET_C3( img + j*3, newVal );
-
- while( ++i < roi.width && ICV_EQ_C3( img + i*3, val0 ))
- ICV_SET_C3( img + i*3, newVal );
-
- ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );
- }
- }
- }
- }
-
- if( region )
- {
- region->area = area;
- region->rect.x = XMin;
- region->rect.y = YMin;
- region->rect.width = XMax - XMin + 1;
- region->rect.height = YMax - YMin + 1;
- region->value = cvScalar(newVal[0], newVal[1], newVal[2], 0);
- }
-
- return CV_NO_ERR;
-}
-
-
-/* because all the operations on floats that are done during non-gradient floodfill
- are just copying and comparison on equality,
- we can do the whole op on 32-bit integers instead */
-static CvStatus
-icvFloodFill_32f_CnIR( int* pImage, int step, CvSize roi, CvPoint seed,
- int* _newVal, CvConnectedComp* region, int flags,
- CvFFillSegment* buffer, int buffer_size, int cn )
-{
- int* img = pImage + (step /= sizeof(pImage[0])) * seed.y;
- int i, L, R;
- int area = 0;
- int val0[] = {0,0,0};
- int newVal[] = {0,0,0};
- int XMin, XMax, YMin = seed.y, YMax = seed.y;
- int _8_connectivity = (flags & 255) == 8;
- CvFFillSegment* buffer_end = buffer + buffer_size, *head = buffer, *tail = buffer;
-
- L = R = XMin = XMax = seed.x;
-
- if( cn == 1 )
- {
- val0[0] = img[L];
- newVal[0] = _newVal[0];
-
- img[L] = newVal[0];
-
- while( ++R < roi.width && img[R] == val0[0] )
- img[R] = newVal[0];
-
- while( --L >= 0 && img[L] == val0[0] )
- img[L] = newVal[0];
- }
- else
- {
- assert( cn == 3 );
- ICV_SET_C3( val0, img + L*3 );
- ICV_SET_C3( newVal, _newVal );
-
- ICV_SET_C3( img + L*3, newVal );
-
- while( --L >= 0 && ICV_EQ_C3( img + L*3, val0 ))
- ICV_SET_C3( img + L*3, newVal );
-
- while( ++R < roi.width && ICV_EQ_C3( img + R*3, val0 ))
- ICV_SET_C3( img + R*3, newVal );
- }
-
- XMax = --R;
- XMin = ++L;
- ICV_PUSH( seed.y, L, R, R + 1, R, UP );
-
- while( head != tail )
- {
- int k, YC, PL, PR, dir;
- ICV_POP( YC, L, R, PL, PR, dir );
-
- int data[][3] =
- {
- {-dir, L - _8_connectivity, R + _8_connectivity},
- {dir, L - _8_connectivity, PL - 1},
- {dir, PR + 1, R + _8_connectivity}
- };
-
- if( region )
- {
- area += R - L + 1;
-
- if( XMax < R ) XMax = R;
- if( XMin > L ) XMin = L;
- if( YMax < YC ) YMax = YC;
- if( YMin > YC ) YMin = YC;
- }
-
- for( k = 0/*(unsigned)(YC - dir) >= (unsigned)roi.height*/; k < 3; k++ )
- {
- dir = data[k][0];
- img = pImage + (YC + dir) * step;
- int left = data[k][1];
- int right = data[k][2];
-
- if( (unsigned)(YC + dir) >= (unsigned)roi.height )
- continue;
-
- if( cn == 1 )
- for( i = left; i <= right; i++ )
- {
- if( (unsigned)i < (unsigned)roi.width && img[i] == val0[0] )
- {
- int j = i;
- img[i] = newVal[0];
- while( --j >= 0 && img[j] == val0[0] )
- img[j] = newVal[0];
-
- while( ++i < roi.width && img[i] == val0[0] )
- img[i] = newVal[0];
-
- ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );
- }
- }
- else
- for( i = left; i <= right; i++ )
- {
- if( (unsigned)i < (unsigned)roi.width && ICV_EQ_C3( img + i*3, val0 ))
- {
- int j = i;
- ICV_SET_C3( img + i*3, newVal );
- while( --j >= 0 && ICV_EQ_C3( img + j*3, val0 ))
- ICV_SET_C3( img + j*3, newVal );
-
- while( ++i < roi.width && ICV_EQ_C3( img + i*3, val0 ))
- ICV_SET_C3( img + i*3, newVal );
-
- ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );
- }
- }
- }
- }
-
- if( region )
- {
- Cv32suf v0, v1, v2;
- region->area = area;
- region->rect.x = XMin;
- region->rect.y = YMin;
- region->rect.width = XMax - XMin + 1;
- region->rect.height = YMax - YMin + 1;
- v0.i = newVal[0]; v1.i = newVal[1]; v2.i = newVal[2];
- region->value = cvScalar( v0.f, v1.f, v2.f );
- }
-
- return CV_NO_ERR;
-}
-
-/****************************************************************************************\
-* Gradient Floodfill *
-\****************************************************************************************/
-
-#define DIFF_INT_C1(p1,p2) ((unsigned)((p1)[0] - (p2)[0] + d_lw[0]) <= interval[0])
-
-#define DIFF_INT_C3(p1,p2) ((unsigned)((p1)[0] - (p2)[0] + d_lw[0])<= interval[0] && \
- (unsigned)((p1)[1] - (p2)[1] + d_lw[1])<= interval[1] && \
- (unsigned)((p1)[2] - (p2)[2] + d_lw[2])<= interval[2])
-
-#define DIFF_FLT_C1(p1,p2) (fabs((p1)[0] - (p2)[0] + d_lw[0]) <= interval[0])
-
-#define DIFF_FLT_C3(p1,p2) (fabs((p1)[0] - (p2)[0] + d_lw[0]) <= interval[0] && \
- fabs((p1)[1] - (p2)[1] + d_lw[1]) <= interval[1] && \
- fabs((p1)[2] - (p2)[2] + d_lw[2]) <= interval[2])
-
-static CvStatus
-icvFloodFill_Grad_8u_CnIR( uchar* pImage, int step, uchar* pMask, int maskStep,
- CvSize /*roi*/, CvPoint seed, uchar* _newVal, uchar* _d_lw,
- uchar* _d_up, CvConnectedComp* region, int flags,
- CvFFillSegment* buffer, int buffer_size, int cn )
-{
- uchar* img = pImage + step*seed.y;
- uchar* mask = (pMask += maskStep + 1) + maskStep*seed.y;
- int i, L, R;
- int area = 0;
- int sum[] = {0,0,0}, val0[] = {0,0,0};
- uchar newVal[] = {0,0,0};
- int d_lw[] = {0,0,0};
- unsigned interval[] = {0,0,0};
- int XMin, XMax, YMin = seed.y, YMax = seed.y;
- int _8_connectivity = (flags & 255) == 8;
- int fixedRange = flags & CV_FLOODFILL_FIXED_RANGE;
- int fillImage = (flags & CV_FLOODFILL_MASK_ONLY) == 0;
- uchar newMaskVal = (uchar)(flags & 0xff00 ? flags >> 8 : 1);
- CvFFillSegment* buffer_end = buffer + buffer_size, *head = buffer, *tail = buffer;
-
- L = R = seed.x;
- if( mask[L] )
- return CV_OK;
-
- mask[L] = newMaskVal;
-
- for( i = 0; i < cn; i++ )
- {
- newVal[i] = _newVal[i];
- d_lw[i] = _d_lw[i];
- interval[i] = (unsigned)(_d_up[i] + _d_lw[i]);
- if( fixedRange )
- val0[i] = img[L*cn+i];
- }
-
- if( cn == 1 )
- {
- if( fixedRange )
- {
- while( !mask[R + 1] && DIFF_INT_C1( img + (R+1), val0 ))
- mask[++R] = newMaskVal;
-
- while( !mask[L - 1] && DIFF_INT_C1( img + (L-1), val0 ))
- mask[--L] = newMaskVal;
- }
- else
- {
- while( !mask[R + 1] && DIFF_INT_C1( img + (R+1), img + R ))
- mask[++R] = newMaskVal;
-
- while( !mask[L - 1] && DIFF_INT_C1( img + (L-1), img + L ))
- mask[--L] = newMaskVal;
- }
- }
- else
- {
- if( fixedRange )
- {
- while( !mask[R + 1] && DIFF_INT_C3( img + (R+1)*3, val0 ))
- mask[++R] = newMaskVal;
-
- while( !mask[L - 1] && DIFF_INT_C3( img + (L-1)*3, val0 ))
- mask[--L] = newMaskVal;
- }
- else
- {
- while( !mask[R + 1] && DIFF_INT_C3( img + (R+1)*3, img + R*3 ))
- mask[++R] = newMaskVal;
-
- while( !mask[L - 1] && DIFF_INT_C3( img + (L-1)*3, img + L*3 ))
- mask[--L] = newMaskVal;
- }
- }
-
- XMax = R;
- XMin = L;
- ICV_PUSH( seed.y, L, R, R + 1, R, UP );
-
- while( head != tail )
- {
- int k, YC, PL, PR, dir, curstep;
- ICV_POP( YC, L, R, PL, PR, dir );
-
- int data[][3] =
- {
- {-dir, L - _8_connectivity, R + _8_connectivity},
- {dir, L - _8_connectivity, PL - 1},
- {dir, PR + 1, R + _8_connectivity}
- };
-
- unsigned length = (unsigned)(R-L);
-
- if( region )
- {
- area += (int)length + 1;
-
- if( XMax < R ) XMax = R;
- if( XMin > L ) XMin = L;
- if( YMax < YC ) YMax = YC;
- if( YMin > YC ) YMin = YC;
- }
-
- if( cn == 1 )
- {
- for( k = 0; k < 3; k++ )
- {
- dir = data[k][0];
- curstep = dir * step;
- img = pImage + (YC + dir) * step;
- mask = pMask + (YC + dir) * maskStep;
- int left = data[k][1];
- int right = data[k][2];
-
- if( fixedRange )
- for( i = left; i <= right; i++ )
- {
- if( !mask[i] && DIFF_INT_C1( img + i, val0 ))
- {
- int j = i;
- mask[i] = newMaskVal;
- while( !mask[--j] && DIFF_INT_C1( img + j, val0 ))
- mask[j] = newMaskVal;
-
- while( !mask[++i] && DIFF_INT_C1( img + i, val0 ))
- mask[i] = newMaskVal;
-
- ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );
- }
- }
- else if( !_8_connectivity )
- for( i = left; i <= right; i++ )
- {
- if( !mask[i] && DIFF_INT_C1( img + i, img - curstep + i ))
- {
- int j = i;
- mask[i] = newMaskVal;
- while( !mask[--j] && DIFF_INT_C1( img + j, img + (j+1) ))
- mask[j] = newMaskVal;
-
- while( !mask[++i] &&
- (DIFF_INT_C1( img + i, img + (i-1) ) ||
- (DIFF_INT_C1( img + i, img + i - curstep) && i <= R)))
- mask[i] = newMaskVal;
-
- ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );
- }
- }
- else
- for( i = left; i <= right; i++ )
- {
- int idx, val[1];
-
- if( !mask[i] &&
- (((val[0] = img[i],
- (unsigned)(idx = i-L-1) <= length) &&
- DIFF_INT_C1( val, img - curstep + (i-1))) ||
- ((unsigned)(++idx) <= length &&
- DIFF_INT_C1( val, img - curstep + i )) ||
- ((unsigned)(++idx) <= length &&
- DIFF_INT_C1( val, img - curstep + (i+1) ))))
- {
- int j = i;
- mask[i] = newMaskVal;
- while( !mask[--j] && DIFF_INT_C1( img + j, img + (j+1) ))
- mask[j] = newMaskVal;
-
- while( !mask[++i] &&
- ((val[0] = img[i],
- DIFF_INT_C1( val, img + (i-1) )) ||
- (((unsigned)(idx = i-L-1) <= length &&
- DIFF_INT_C1( val, img - curstep + (i-1) ))) ||
- ((unsigned)(++idx) <= length &&
- DIFF_INT_C1( val, img - curstep + i )) ||
- ((unsigned)(++idx) <= length &&
- DIFF_INT_C1( val, img - curstep + (i+1) ))))
- mask[i] = newMaskVal;
-
- ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );
- }
- }
- }
-
- img = pImage + YC * step;
- if( fillImage )
- for( i = L; i <= R; i++ )
- img[i] = newVal[0];
- else if( region )
- for( i = L; i <= R; i++ )
- sum[0] += img[i];
- }
- else
- {
- for( k = 0; k < 3; k++ )
- {
- dir = data[k][0];
- curstep = dir * step;
- img = pImage + (YC + dir) * step;
- mask = pMask + (YC + dir) * maskStep;
- int left = data[k][1];
- int right = data[k][2];
-
- if( fixedRange )
- for( i = left; i <= right; i++ )
- {
- if( !mask[i] && DIFF_INT_C3( img + i*3, val0 ))
- {
- int j = i;
- mask[i] = newMaskVal;
- while( !mask[--j] && DIFF_INT_C3( img + j*3, val0 ))
- mask[j] = newMaskVal;
-
- while( !mask[++i] && DIFF_INT_C3( img + i*3, val0 ))
- mask[i] = newMaskVal;
-
- ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );
- }
- }
- else if( !_8_connectivity )
- for( i = left; i <= right; i++ )
- {
- if( !mask[i] && DIFF_INT_C3( img + i*3, img - curstep + i*3 ))
- {
- int j = i;
- mask[i] = newMaskVal;
- while( !mask[--j] && DIFF_INT_C3( img + j*3, img + (j+1)*3 ))
- mask[j] = newMaskVal;
-
- while( !mask[++i] &&
- (DIFF_INT_C3( img + i*3, img + (i-1)*3 ) ||
- (DIFF_INT_C3( img + i*3, img + i*3 - curstep) && i <= R)))
- mask[i] = newMaskVal;
-
- ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );
- }
- }
- else
- for( i = left; i <= right; i++ )
- {
- int idx, val[3];
-
- if( !mask[i] &&
- (((ICV_SET_C3( val, img+i*3 ),
- (unsigned)(idx = i-L-1) <= length) &&
- DIFF_INT_C3( val, img - curstep + (i-1)*3 )) ||
- ((unsigned)(++idx) <= length &&
- DIFF_INT_C3( val, img - curstep + i*3 )) ||
- ((unsigned)(++idx) <= length &&
- DIFF_INT_C3( val, img - curstep + (i+1)*3 ))))
- {
- int j = i;
- mask[i] = newMaskVal;
- while( !mask[--j] && DIFF_INT_C3( img + j*3, img + (j+1)*3 ))
- mask[j] = newMaskVal;
-
- while( !mask[++i] &&
- ((ICV_SET_C3( val, img + i*3 ),
- DIFF_INT_C3( val, img + (i-1)*3 )) ||
- (((unsigned)(idx = i-L-1) <= length &&
- DIFF_INT_C3( val, img - curstep + (i-1)*3 ))) ||
- ((unsigned)(++idx) <= length &&
- DIFF_INT_C3( val, img - curstep + i*3 )) ||
- ((unsigned)(++idx) <= length &&
- DIFF_INT_C3( val, img - curstep + (i+1)*3 ))))
- mask[i] = newMaskVal;
-
- ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );
- }
- }
- }
-
- img = pImage + YC * step;
- if( fillImage )
- for( i = L; i <= R; i++ )
- ICV_SET_C3( img + i*3, newVal );
- else if( region )
- for( i = L; i <= R; i++ )
- {
- sum[0] += img[i*3];
- sum[1] += img[i*3+1];
- sum[2] += img[i*3+2];
- }
- }
- }
-
- if( region )
- {
- region->area = area;
- region->rect.x = XMin;
- region->rect.y = YMin;
- region->rect.width = XMax - XMin + 1;
- region->rect.height = YMax - YMin + 1;
-
- if( fillImage )
- region->value = cvScalar(newVal[0], newVal[1], newVal[2]);
- else
- {
- double iarea = area ? 1./area : 0;
- region->value = cvScalar(sum[0]*iarea, sum[1]*iarea, sum[2]*iarea);
- }
- }
-
- return CV_NO_ERR;
-}
-
-
-static CvStatus
-icvFloodFill_Grad_32f_CnIR( float* pImage, int step, uchar* pMask, int maskStep,
- CvSize /*roi*/, CvPoint seed, float* _newVal, float* _d_lw,
- float* _d_up, CvConnectedComp* region, int flags,
- CvFFillSegment* buffer, int buffer_size, int cn )
-{
- float* img = pImage + (step /= sizeof(float))*seed.y;
- uchar* mask = (pMask += maskStep + 1) + maskStep*seed.y;
- int i, L, R;
- int area = 0;
- double sum[] = {0,0,0}, val0[] = {0,0,0};
- float newVal[] = {0,0,0};
- float d_lw[] = {0,0,0};
- float interval[] = {0,0,0};
- int XMin, XMax, YMin = seed.y, YMax = seed.y;
- int _8_connectivity = (flags & 255) == 8;
- int fixedRange = flags & CV_FLOODFILL_FIXED_RANGE;
- int fillImage = (flags & CV_FLOODFILL_MASK_ONLY) == 0;
- uchar newMaskVal = (uchar)(flags & 0xff00 ? flags >> 8 : 1);
- CvFFillSegment* buffer_end = buffer + buffer_size, *head = buffer, *tail = buffer;
-
- L = R = seed.x;
- if( mask[L] )
- return CV_OK;
-
- mask[L] = newMaskVal;
-
- for( i = 0; i < cn; i++ )
- {
- newVal[i] = _newVal[i];
- d_lw[i] = 0.5f*(_d_lw[i] - _d_up[i]);
- interval[i] = 0.5f*(_d_lw[i] + _d_up[i]);
- if( fixedRange )
- val0[i] = img[L*cn+i];
- }
-
- if( cn == 1 )
- {
- if( fixedRange )
- {
- while( !mask[R + 1] && DIFF_FLT_C1( img + (R+1), val0 ))
- mask[++R] = newMaskVal;
-
- while( !mask[L - 1] && DIFF_FLT_C1( img + (L-1), val0 ))
- mask[--L] = newMaskVal;
- }
- else
- {
- while( !mask[R + 1] && DIFF_FLT_C1( img + (R+1), img + R ))
- mask[++R] = newMaskVal;
-
- while( !mask[L - 1] && DIFF_FLT_C1( img + (L-1), img + L ))
- mask[--L] = newMaskVal;
- }
- }
- else
- {
- if( fixedRange )
- {
- while( !mask[R + 1] && DIFF_FLT_C3( img + (R+1)*3, val0 ))
- mask[++R] = newMaskVal;
-
- while( !mask[L - 1] && DIFF_FLT_C3( img + (L-1)*3, val0 ))
- mask[--L] = newMaskVal;
- }
- else
- {
- while( !mask[R + 1] && DIFF_FLT_C3( img + (R+1)*3, img + R*3 ))
- mask[++R] = newMaskVal;
-
- while( !mask[L - 1] && DIFF_FLT_C3( img + (L-1)*3, img + L*3 ))
- mask[--L] = newMaskVal;
- }
- }
-
- XMax = R;
- XMin = L;
- ICV_PUSH( seed.y, L, R, R + 1, R, UP );
-
- while( head != tail )
- {
- int k, YC, PL, PR, dir, curstep;
- ICV_POP( YC, L, R, PL, PR, dir );
-
- int data[][3] =
- {
- {-dir, L - _8_connectivity, R + _8_connectivity},
- {dir, L - _8_connectivity, PL - 1},
- {dir, PR + 1, R + _8_connectivity}
- };
-
- unsigned length = (unsigned)(R-L);
-
- if( region )
- {
- area += (int)length + 1;
-
- if( XMax < R ) XMax = R;
- if( XMin > L ) XMin = L;
- if( YMax < YC ) YMax = YC;
- if( YMin > YC ) YMin = YC;
- }
-
- if( cn == 1 )
- {
- for( k = 0; k < 3; k++ )
- {
- dir = data[k][0];
- curstep = dir * step;
- img = pImage + (YC + dir) * step;
- mask = pMask + (YC + dir) * maskStep;
- int left = data[k][1];
- int right = data[k][2];
-
- if( fixedRange )
- for( i = left; i <= right; i++ )
- {
- if( !mask[i] && DIFF_FLT_C1( img + i, val0 ))
- {
- int j = i;
- mask[i] = newMaskVal;
- while( !mask[--j] && DIFF_FLT_C1( img + j, val0 ))
- mask[j] = newMaskVal;
-
- while( !mask[++i] && DIFF_FLT_C1( img + i, val0 ))
- mask[i] = newMaskVal;
-
- ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );
- }
- }
- else if( !_8_connectivity )
- for( i = left; i <= right; i++ )
- {
- if( !mask[i] && DIFF_FLT_C1( img + i, img - curstep + i ))
- {
- int j = i;
- mask[i] = newMaskVal;
- while( !mask[--j] && DIFF_FLT_C1( img + j, img + (j+1) ))
- mask[j] = newMaskVal;
-
- while( !mask[++i] &&
- (DIFF_FLT_C1( img + i, img + (i-1) ) ||
- (DIFF_FLT_C1( img + i, img + i - curstep) && i <= R)))
- mask[i] = newMaskVal;
-
- ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );
- }
- }
- else
- for( i = left; i <= right; i++ )
- {
- int idx;
- float val[1];
-
- if( !mask[i] &&
- (((val[0] = img[i],
- (unsigned)(idx = i-L-1) <= length) &&
- DIFF_FLT_C1( val, img - curstep + (i-1) )) ||
- ((unsigned)(++idx) <= length &&
- DIFF_FLT_C1( val, img - curstep + i )) ||
- ((unsigned)(++idx) <= length &&
- DIFF_FLT_C1( val, img - curstep + (i+1) ))))
- {
- int j = i;
- mask[i] = newMaskVal;
- while( !mask[--j] && DIFF_FLT_C1( img + j, img + (j+1) ))
- mask[j] = newMaskVal;
-
- while( !mask[++i] &&
- ((val[0] = img[i],
- DIFF_FLT_C1( val, img + (i-1) )) ||
- (((unsigned)(idx = i-L-1) <= length &&
- DIFF_FLT_C1( val, img - curstep + (i-1) ))) ||
- ((unsigned)(++idx) <= length &&
- DIFF_FLT_C1( val, img - curstep + i )) ||
- ((unsigned)(++idx) <= length &&
- DIFF_FLT_C1( val, img - curstep + (i+1) ))))
- mask[i] = newMaskVal;
-
- ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );
- }
- }
- }
-
- img = pImage + YC * step;
- if( fillImage )
- for( i = L; i <= R; i++ )
- img[i] = newVal[0];
- else if( region )
- for( i = L; i <= R; i++ )
- sum[0] += img[i];
- }
- else
- {
- for( k = 0; k < 3; k++ )
- {
- dir = data[k][0];
- curstep = dir * step;
- img = pImage + (YC + dir) * step;
- mask = pMask + (YC + dir) * maskStep;
- int left = data[k][1];
- int right = data[k][2];
-
- if( fixedRange )
- for( i = left; i <= right; i++ )
- {
- if( !mask[i] && DIFF_FLT_C3( img + i*3, val0 ))
- {
- int j = i;
- mask[i] = newMaskVal;
- while( !mask[--j] && DIFF_FLT_C3( img + j*3, val0 ))
- mask[j] = newMaskVal;
-
- while( !mask[++i] && DIFF_FLT_C3( img + i*3, val0 ))
- mask[i] = newMaskVal;
-
- ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );
- }
- }
- else if( !_8_connectivity )
- for( i = left; i <= right; i++ )
- {
- if( !mask[i] && DIFF_FLT_C3( img + i*3, img - curstep + i*3 ))
- {
- int j = i;
- mask[i] = newMaskVal;
- while( !mask[--j] && DIFF_FLT_C3( img + j*3, img + (j+1)*3 ))
- mask[j] = newMaskVal;
-
- while( !mask[++i] &&
- (DIFF_FLT_C3( img + i*3, img + (i-1)*3 ) ||
- (DIFF_FLT_C3( img + i*3, img + i*3 - curstep) && i <= R)))
- mask[i] = newMaskVal;
-
- ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );
- }
- }
- else
- for( i = left; i <= right; i++ )
- {
- int idx;
- float val[3];
-
- if( !mask[i] &&
- (((ICV_SET_C3( val, img+i*3 ),
- (unsigned)(idx = i-L-1) <= length) &&
- DIFF_FLT_C3( val, img - curstep + (i-1)*3 )) ||
- ((unsigned)(++idx) <= length &&
- DIFF_FLT_C3( val, img - curstep + i*3 )) ||
- ((unsigned)(++idx) <= length &&
- DIFF_FLT_C3( val, img - curstep + (i+1)*3 ))))
- {
- int j = i;
- mask[i] = newMaskVal;
- while( !mask[--j] && DIFF_FLT_C3( img + j*3, img + (j+1)*3 ))
- mask[j] = newMaskVal;
-
- while( !mask[++i] &&
- ((ICV_SET_C3( val, img + i*3 ),
- DIFF_FLT_C3( val, img + (i-1)*3 )) ||
- (((unsigned)(idx = i-L-1) <= length &&
- DIFF_FLT_C3( val, img - curstep + (i-1)*3 ))) ||
- ((unsigned)(++idx) <= length &&
- DIFF_FLT_C3( val, img - curstep + i*3 )) ||
- ((unsigned)(++idx) <= length &&
- DIFF_FLT_C3( val, img - curstep + (i+1)*3 ))))
- mask[i] = newMaskVal;
-
- ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );
- }
- }
- }
-
- img = pImage + YC * step;
- if( fillImage )
- for( i = L; i <= R; i++ )
- ICV_SET_C3( img + i*3, newVal );
- else if( region )
- for( i = L; i <= R; i++ )
- {
- sum[0] += img[i*3];
- sum[1] += img[i*3+1];
- sum[2] += img[i*3+2];
- }
- }
- }
-
- if( region )
- {
- region->area = area;
- region->rect.x = XMin;
- region->rect.y = YMin;
- region->rect.width = XMax - XMin + 1;
- region->rect.height = YMax - YMin + 1;
-
- if( fillImage )
- region->value = cvScalar(newVal[0], newVal[1], newVal[2]);
- else
- {
- double iarea = area ? 1./area : 0;
- region->value = cvScalar(sum[0]*iarea, sum[1]*iarea, sum[2]*iarea);
- }
- }
-
- return CV_NO_ERR;
-}
-
-
-/****************************************************************************************\
-* External Functions *
-\****************************************************************************************/
-
-typedef CvStatus (CV_CDECL* CvFloodFillFunc)(
- void* img, int step, CvSize size, CvPoint seed, void* newval,
- CvConnectedComp* comp, int flags, void* buffer, int buffer_size, int cn );
-
-typedef CvStatus (CV_CDECL* CvFloodFillGradFunc)(
- void* img, int step, uchar* mask, int maskStep, CvSize size,
- CvPoint seed, void* newval, void* d_lw, void* d_up, void* ccomp,
- int flags, void* buffer, int buffer_size, int cn );
-
-static void icvInitFloodFill( void** ffill_tab,
- void** ffillgrad_tab )
-{
- ffill_tab[0] = (void*)icvFloodFill_8u_CnIR;
- ffill_tab[1] = (void*)icvFloodFill_32f_CnIR;
-
- ffillgrad_tab[0] = (void*)icvFloodFill_Grad_8u_CnIR;
- ffillgrad_tab[1] = (void*)icvFloodFill_Grad_32f_CnIR;
-}
-
-
-CV_IMPL void
-cvFloodFill( CvArr* arr, CvPoint seed_point,
- CvScalar newVal, CvScalar lo_diff, CvScalar up_diff,
- CvConnectedComp* comp, int flags, CvArr* maskarr )
-{
- static void* ffill_tab[4];
- static void* ffillgrad_tab[4];
- static int inittab = 0;
-
- CvMat* tempMask = 0;
- CvFFillSegment* buffer = 0;
- CV_FUNCNAME( "cvFloodFill" );
-
- if( comp )
- memset( comp, 0, sizeof(*comp) );
-
- __BEGIN__;
-
- int i, type, depth, cn, is_simple, idx;
- int buffer_size, connectivity = flags & 255;
- double nv_buf[4] = {0,0,0,0};
- union { uchar b[4]; float f[4]; } ld_buf, ud_buf;
- CvMat stub, *img = (CvMat*)arr;
- CvMat maskstub, *mask = (CvMat*)maskarr;
- CvSize size;
-
- if( !inittab )
- {
- icvInitFloodFill( ffill_tab, ffillgrad_tab );
- inittab = 1;
- }
-
- CV_CALL( img = cvGetMat( img, &stub ));
- type = CV_MAT_TYPE( img->type );
- depth = CV_MAT_DEPTH(type);
- cn = CV_MAT_CN(type);
-
- idx = type == CV_8UC1 || type == CV_8UC3 ? 0 :
- type == CV_32FC1 || type == CV_32FC3 ? 1 : -1;
-
- if( idx < 0 )
- CV_ERROR( CV_StsUnsupportedFormat, "" );
-
- if( connectivity == 0 )
- connectivity = 4;
- else if( connectivity != 4 && connectivity != 8 )
- CV_ERROR( CV_StsBadFlag, "Connectivity must be 4, 0(=4) or 8" );
-
- is_simple = mask == 0 && (flags & CV_FLOODFILL_MASK_ONLY) == 0;
-
- for( i = 0; i < cn; i++ )
- {
- if( lo_diff.val[i] < 0 || up_diff.val[i] < 0 )
- CV_ERROR( CV_StsBadArg, "lo_diff and up_diff must be non-negative" );
- is_simple &= fabs(lo_diff.val[i]) < DBL_EPSILON && fabs(up_diff.val[i]) < DBL_EPSILON;
- }
-
- size = cvGetMatSize( img );
-
- if( (unsigned)seed_point.x >= (unsigned)size.width ||
- (unsigned)seed_point.y >= (unsigned)size.height )
- CV_ERROR( CV_StsOutOfRange, "Seed point is outside of image" );
-
- cvScalarToRawData( &newVal, &nv_buf, type, 0 );
- buffer_size = MAX( size.width, size.height )*2;
- CV_CALL( buffer = (CvFFillSegment*)cvAlloc( buffer_size*sizeof(buffer[0])));
-
- if( is_simple )
- {
- int elem_size = CV_ELEM_SIZE(type);
- const uchar* seed_ptr = img->data.ptr + img->step*seed_point.y + elem_size*seed_point.x;
- CvFloodFillFunc func = (CvFloodFillFunc)ffill_tab[idx];
- if( !func )
- CV_ERROR( CV_StsUnsupportedFormat, "" );
- // check if the new value is different from the current value at the seed point.
- // if they are exactly the same, use the generic version with mask to avoid infinite loops.
- for( i = 0; i < elem_size; i++ )
- if( seed_ptr[i] != ((uchar*)nv_buf)[i] )
- break;
- if( i < elem_size )
- {
- IPPI_CALL( func( img->data.ptr, img->step, size,
- seed_point, &nv_buf, comp, flags,
- buffer, buffer_size, cn ));
- EXIT;
- }
- }
-
- {
- CvFloodFillGradFunc func = (CvFloodFillGradFunc)ffillgrad_tab[idx];
- if( !func )
- CV_ERROR( CV_StsUnsupportedFormat, "" );
-
- if( !mask )
- {
- /* created mask will be 8-byte aligned */
- tempMask = cvCreateMat( size.height + 2, (size.width + 9) & -8, CV_8UC1 );
- mask = tempMask;
- }
- else
- {
- CV_CALL( mask = cvGetMat( mask, &maskstub ));
- if( !CV_IS_MASK_ARR( mask ))
- CV_ERROR( CV_StsBadMask, "" );
-
- if( mask->width != size.width + 2 || mask->height != size.height + 2 )
- CV_ERROR( CV_StsUnmatchedSizes, "mask must be 2 pixel wider "
- "and 2 pixel taller than filled image" );
- }
-
- {
- int width = tempMask ? mask->step : size.width + 2;
- uchar* mask_row = mask->data.ptr + mask->step;
- memset( mask_row - mask->step, 1, width );
-
- for( i = 1; i <= size.height; i++, mask_row += mask->step )
- {
- if( tempMask )
- memset( mask_row, 0, width );
- mask_row[0] = mask_row[size.width+1] = (uchar)1;
- }
- memset( mask_row, 1, width );
- }
-
- if( depth == CV_8U )
- for( i = 0; i < cn; i++ )
- {
- int t = cvFloor(lo_diff.val[i]);
- ld_buf.b[i] = CV_CAST_8U(t);
- t = cvFloor(up_diff.val[i]);
- ud_buf.b[i] = CV_CAST_8U(t);
- }
- else
- for( i = 0; i < cn; i++ )
- {
- ld_buf.f[i] = (float)lo_diff.val[i];
- ud_buf.f[i] = (float)up_diff.val[i];
- }
-
- IPPI_CALL( func( img->data.ptr, img->step, mask->data.ptr, mask->step,
- size, seed_point, &nv_buf, ld_buf.f, ud_buf.f,
- comp, flags, buffer, buffer_size, cn ));
- }
-
- __END__;
-
- cvFree( &buffer );
- cvReleaseMat( &tempMask );
-}
-
-/* End of file. */
+/*M///////////////////////////////////////////////////////////////////////////////////////\r
+//\r
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
+//\r
+// By downloading, copying, installing or using the software you agree to this license.\r
+// If you do not agree to this license, do not download, install,\r
+// copy or use the software.\r
+//\r
+//\r
+// Intel License Agreement\r
+// For Open Source Computer Vision Library\r
+//\r
+// Copyright (C) 2000, Intel Corporation, all rights reserved.\r
+// Third party copyrights are property of their respective owners.\r
+//\r
+// Redistribution and use in source and binary forms, with or without modification,\r
+// are permitted provided that the following conditions are met:\r
+//\r
+// * Redistribution's of source code must retain the above copyright notice,\r
+// this list of conditions and the following disclaimer.\r
+//\r
+// * Redistribution's in binary form must reproduce the above copyright notice,\r
+// this list of conditions and the following disclaimer in the documentation\r
+// and/or other materials provided with the distribution.\r
+//\r
+// * The name of Intel Corporation may not be used to endorse or promote products\r
+// derived from this software without specific prior written permission.\r
+//\r
+// This software is provided by the copyright holders and contributors "as is" and\r
+// any express or implied warranties, including, but not limited to, the implied\r
+// warranties of merchantability and fitness for a particular purpose are disclaimed.\r
+// In no event shall the Intel Corporation or contributors be liable for any direct,\r
+// indirect, incidental, special, exemplary, or consequential damages\r
+// (including, but not limited to, procurement of substitute goods or services;\r
+// loss of use, data, or profits; or business interruption) however caused\r
+// and on any theory of liability, whether in contract, strict liability,\r
+// or tort (including negligence or otherwise) arising in any way out of\r
+// the use of this software, even if advised of the possibility of such damage.\r
+//\r
+//M*/\r
+\r
+#include "_cv.h"\r
+\r
+typedef struct CvFFillSegment\r
+{\r
+ ushort y;\r
+ ushort l;\r
+ ushort r;\r
+ ushort prevl;\r
+ ushort prevr;\r
+ short dir;\r
+}\r
+CvFFillSegment;\r
+\r
+#define UP 1\r
+#define DOWN -1\r
+\r
+#define ICV_PUSH( Y, L, R, PREV_L, PREV_R, DIR )\\r
+{ \\r
+ tail->y = (ushort)(Y); \\r
+ tail->l = (ushort)(L); \\r
+ tail->r = (ushort)(R); \\r
+ tail->prevl = (ushort)(PREV_L); \\r
+ tail->prevr = (ushort)(PREV_R); \\r
+ tail->dir = (short)(DIR); \\r
+ if( ++tail >= buffer_end ) \\r
+ tail = buffer; \\r
+}\r
+\r
+\r
+#define ICV_POP( Y, L, R, PREV_L, PREV_R, DIR ) \\r
+{ \\r
+ Y = head->y; \\r
+ L = head->l; \\r
+ R = head->r; \\r
+ PREV_L = head->prevl; \\r
+ PREV_R = head->prevr; \\r
+ DIR = head->dir; \\r
+ if( ++head >= buffer_end ) \\r
+ head = buffer; \\r
+}\r
+\r
+\r
+#define ICV_EQ_C3( p1, p2 ) \\r
+ ((p1)[0] == (p2)[0] && (p1)[1] == (p2)[1] && (p1)[2] == (p2)[2])\r
+\r
+#define ICV_SET_C3( p, q ) \\r
+ ((p)[0] = (q)[0], (p)[1] = (q)[1], (p)[2] = (q)[2])\r
+\r
+/****************************************************************************************\\r
+* Simple Floodfill (repainting single-color connected component) *\r
+\****************************************************************************************/\r
+\r
+static CvStatus\r
+icvFloodFill_8u_CnIR( uchar* pImage, int step, CvSize roi, CvPoint seed,\r
+ uchar* _newVal, CvConnectedComp* region, int flags,\r
+ CvFFillSegment* buffer, int buffer_size, int cn )\r
+{\r
+ uchar* img = pImage + step * seed.y;\r
+ int i, L, R;\r
+ int area = 0;\r
+ int val0[] = {0,0,0};\r
+ uchar newVal[] = {0,0,0};\r
+ int XMin, XMax, YMin = seed.y, YMax = seed.y;\r
+ int _8_connectivity = (flags & 255) == 8;\r
+ CvFFillSegment* buffer_end = buffer + buffer_size, *head = buffer, *tail = buffer;\r
+\r
+ L = R = XMin = XMax = seed.x;\r
+\r
+ if( cn == 1 )\r
+ {\r
+ val0[0] = img[L];\r
+ newVal[0] = _newVal[0];\r
+\r
+ img[L] = newVal[0];\r
+\r
+ while( ++R < roi.width && img[R] == val0[0] )\r
+ img[R] = newVal[0];\r
+\r
+ while( --L >= 0 && img[L] == val0[0] )\r
+ img[L] = newVal[0];\r
+ }\r
+ else\r
+ {\r
+ assert( cn == 3 );\r
+ ICV_SET_C3( val0, img + L*3 );\r
+ ICV_SET_C3( newVal, _newVal );\r
+\r
+ ICV_SET_C3( img + L*3, newVal );\r
+\r
+ while( --L >= 0 && ICV_EQ_C3( img + L*3, val0 ))\r
+ ICV_SET_C3( img + L*3, newVal );\r
+\r
+ while( ++R < roi.width && ICV_EQ_C3( img + R*3, val0 ))\r
+ ICV_SET_C3( img + R*3, newVal );\r
+ }\r
+\r
+ XMax = --R;\r
+ XMin = ++L;\r
+ ICV_PUSH( seed.y, L, R, R + 1, R, UP );\r
+\r
+ while( head != tail )\r
+ {\r
+ int k, YC, PL, PR, dir;\r
+ ICV_POP( YC, L, R, PL, PR, dir );\r
+\r
+ int data[][3] =\r
+ {\r
+ {-dir, L - _8_connectivity, R + _8_connectivity},\r
+ {dir, L - _8_connectivity, PL - 1},\r
+ {dir, PR + 1, R + _8_connectivity}\r
+ };\r
+\r
+ if( region )\r
+ {\r
+ area += R - L + 1;\r
+\r
+ if( XMax < R ) XMax = R;\r
+ if( XMin > L ) XMin = L;\r
+ if( YMax < YC ) YMax = YC;\r
+ if( YMin > YC ) YMin = YC;\r
+ }\r
+\r
+ for( k = 0/*(unsigned)(YC - dir) >= (unsigned)roi.height*/; k < 3; k++ )\r
+ {\r
+ dir = data[k][0];\r
+ img = pImage + (YC + dir) * step;\r
+ int left = data[k][1];\r
+ int right = data[k][2];\r
+\r
+ if( (unsigned)(YC + dir) >= (unsigned)roi.height )\r
+ continue;\r
+\r
+ if( cn == 1 )\r
+ for( i = left; i <= right; i++ )\r
+ {\r
+ if( (unsigned)i < (unsigned)roi.width && img[i] == val0[0] )\r
+ {\r
+ int j = i;\r
+ img[i] = newVal[0];\r
+ while( --j >= 0 && img[j] == val0[0] )\r
+ img[j] = newVal[0];\r
+\r
+ while( ++i < roi.width && img[i] == val0[0] )\r
+ img[i] = newVal[0];\r
+\r
+ ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );\r
+ }\r
+ }\r
+ else\r
+ for( i = left; i <= right; i++ )\r
+ {\r
+ if( (unsigned)i < (unsigned)roi.width && ICV_EQ_C3( img + i*3, val0 ))\r
+ {\r
+ int j = i;\r
+ ICV_SET_C3( img + i*3, newVal );\r
+ while( --j >= 0 && ICV_EQ_C3( img + j*3, val0 ))\r
+ ICV_SET_C3( img + j*3, newVal );\r
+\r
+ while( ++i < roi.width && ICV_EQ_C3( img + i*3, val0 ))\r
+ ICV_SET_C3( img + i*3, newVal );\r
+\r
+ ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );\r
+ }\r
+ }\r
+ }\r
+ }\r
+\r
+ if( region )\r
+ {\r
+ region->area = area;\r
+ region->rect.x = XMin;\r
+ region->rect.y = YMin;\r
+ region->rect.width = XMax - XMin + 1;\r
+ region->rect.height = YMax - YMin + 1;\r
+ region->value = cvScalar(newVal[0], newVal[1], newVal[2], 0);\r
+ }\r
+\r
+ return CV_NO_ERR;\r
+}\r
+\r
+\r
+/* because all the operations on floats that are done during non-gradient floodfill\r
+ are just copying and comparison on equality,\r
+ we can do the whole op on 32-bit integers instead */\r
+static CvStatus\r
+icvFloodFill_32f_CnIR( int* pImage, int step, CvSize roi, CvPoint seed,\r
+ int* _newVal, CvConnectedComp* region, int flags,\r
+ CvFFillSegment* buffer, int buffer_size, int cn )\r
+{\r
+ int* img = pImage + (step /= sizeof(pImage[0])) * seed.y;\r
+ int i, L, R;\r
+ int area = 0;\r
+ int val0[] = {0,0,0};\r
+ int newVal[] = {0,0,0};\r
+ int XMin, XMax, YMin = seed.y, YMax = seed.y;\r
+ int _8_connectivity = (flags & 255) == 8;\r
+ CvFFillSegment* buffer_end = buffer + buffer_size, *head = buffer, *tail = buffer;\r
+\r
+ L = R = XMin = XMax = seed.x;\r
+\r
+ if( cn == 1 )\r
+ {\r
+ val0[0] = img[L];\r
+ newVal[0] = _newVal[0];\r
+\r
+ img[L] = newVal[0];\r
+\r
+ while( ++R < roi.width && img[R] == val0[0] )\r
+ img[R] = newVal[0];\r
+\r
+ while( --L >= 0 && img[L] == val0[0] )\r
+ img[L] = newVal[0];\r
+ }\r
+ else\r
+ {\r
+ assert( cn == 3 );\r
+ ICV_SET_C3( val0, img + L*3 );\r
+ ICV_SET_C3( newVal, _newVal );\r
+\r
+ ICV_SET_C3( img + L*3, newVal );\r
+\r
+ while( --L >= 0 && ICV_EQ_C3( img + L*3, val0 ))\r
+ ICV_SET_C3( img + L*3, newVal );\r
+\r
+ while( ++R < roi.width && ICV_EQ_C3( img + R*3, val0 ))\r
+ ICV_SET_C3( img + R*3, newVal );\r
+ }\r
+\r
+ XMax = --R;\r
+ XMin = ++L;\r
+ ICV_PUSH( seed.y, L, R, R + 1, R, UP );\r
+\r
+ while( head != tail )\r
+ {\r
+ int k, YC, PL, PR, dir;\r
+ ICV_POP( YC, L, R, PL, PR, dir );\r
+\r
+ int data[][3] =\r
+ {\r
+ {-dir, L - _8_connectivity, R + _8_connectivity},\r
+ {dir, L - _8_connectivity, PL - 1},\r
+ {dir, PR + 1, R + _8_connectivity}\r
+ };\r
+\r
+ if( region )\r
+ {\r
+ area += R - L + 1;\r
+\r
+ if( XMax < R ) XMax = R;\r
+ if( XMin > L ) XMin = L;\r
+ if( YMax < YC ) YMax = YC;\r
+ if( YMin > YC ) YMin = YC;\r
+ }\r
+\r
+ for( k = 0/*(unsigned)(YC - dir) >= (unsigned)roi.height*/; k < 3; k++ )\r
+ {\r
+ dir = data[k][0];\r
+ img = pImage + (YC + dir) * step;\r
+ int left = data[k][1];\r
+ int right = data[k][2];\r
+\r
+ if( (unsigned)(YC + dir) >= (unsigned)roi.height )\r
+ continue;\r
+\r
+ if( cn == 1 )\r
+ for( i = left; i <= right; i++ )\r
+ {\r
+ if( (unsigned)i < (unsigned)roi.width && img[i] == val0[0] )\r
+ {\r
+ int j = i;\r
+ img[i] = newVal[0];\r
+ while( --j >= 0 && img[j] == val0[0] )\r
+ img[j] = newVal[0];\r
+\r
+ while( ++i < roi.width && img[i] == val0[0] )\r
+ img[i] = newVal[0];\r
+\r
+ ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );\r
+ }\r
+ }\r
+ else\r
+ for( i = left; i <= right; i++ )\r
+ {\r
+ if( (unsigned)i < (unsigned)roi.width && ICV_EQ_C3( img + i*3, val0 ))\r
+ {\r
+ int j = i;\r
+ ICV_SET_C3( img + i*3, newVal );\r
+ while( --j >= 0 && ICV_EQ_C3( img + j*3, val0 ))\r
+ ICV_SET_C3( img + j*3, newVal );\r
+\r
+ while( ++i < roi.width && ICV_EQ_C3( img + i*3, val0 ))\r
+ ICV_SET_C3( img + i*3, newVal );\r
+\r
+ ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );\r
+ }\r
+ }\r
+ }\r
+ }\r
+\r
+ if( region )\r
+ {\r
+ Cv32suf v0, v1, v2;\r
+ region->area = area;\r
+ region->rect.x = XMin;\r
+ region->rect.y = YMin;\r
+ region->rect.width = XMax - XMin + 1;\r
+ region->rect.height = YMax - YMin + 1;\r
+ v0.i = newVal[0]; v1.i = newVal[1]; v2.i = newVal[2];\r
+ region->value = cvScalar( v0.f, v1.f, v2.f );\r
+ }\r
+\r
+ return CV_NO_ERR;\r
+}\r
+\r
+/****************************************************************************************\\r
+* Gradient Floodfill *\r
+\****************************************************************************************/\r
+\r
+#define DIFF_INT_C1(p1,p2) ((unsigned)((p1)[0] - (p2)[0] + d_lw[0]) <= interval[0])\r
+\r
+#define DIFF_INT_C3(p1,p2) ((unsigned)((p1)[0] - (p2)[0] + d_lw[0])<= interval[0] && \\r
+ (unsigned)((p1)[1] - (p2)[1] + d_lw[1])<= interval[1] && \\r
+ (unsigned)((p1)[2] - (p2)[2] + d_lw[2])<= interval[2])\r
+\r
+#define DIFF_FLT_C1(p1,p2) (fabs((p1)[0] - (p2)[0] + d_lw[0]) <= interval[0])\r
+\r
+#define DIFF_FLT_C3(p1,p2) (fabs((p1)[0] - (p2)[0] + d_lw[0]) <= interval[0] && \\r
+ fabs((p1)[1] - (p2)[1] + d_lw[1]) <= interval[1] && \\r
+ fabs((p1)[2] - (p2)[2] + d_lw[2]) <= interval[2])\r
+\r
+static CvStatus\r
+icvFloodFill_Grad_8u_CnIR( uchar* pImage, int step, uchar* pMask, int maskStep,\r
+ CvSize /*roi*/, CvPoint seed, uchar* _newVal, uchar* _d_lw,\r
+ uchar* _d_up, CvConnectedComp* region, int flags,\r
+ CvFFillSegment* buffer, int buffer_size, int cn )\r
+{\r
+ uchar* img = pImage + step*seed.y;\r
+ uchar* mask = (pMask += maskStep + 1) + maskStep*seed.y;\r
+ int i, L, R;\r
+ int area = 0;\r
+ int sum[] = {0,0,0}, val0[] = {0,0,0};\r
+ uchar newVal[] = {0,0,0};\r
+ int d_lw[] = {0,0,0};\r
+ unsigned interval[] = {0,0,0};\r
+ int XMin, XMax, YMin = seed.y, YMax = seed.y;\r
+ int _8_connectivity = (flags & 255) == 8;\r
+ int fixedRange = flags & CV_FLOODFILL_FIXED_RANGE;\r
+ int fillImage = (flags & CV_FLOODFILL_MASK_ONLY) == 0;\r
+ uchar newMaskVal = (uchar)(flags & 0xff00 ? flags >> 8 : 1);\r
+ CvFFillSegment* buffer_end = buffer + buffer_size, *head = buffer, *tail = buffer;\r
+\r
+ L = R = seed.x;\r
+ if( mask[L] )\r
+ return CV_OK;\r
+\r
+ mask[L] = newMaskVal;\r
+\r
+ for( i = 0; i < cn; i++ )\r
+ {\r
+ newVal[i] = _newVal[i];\r
+ d_lw[i] = _d_lw[i];\r
+ interval[i] = (unsigned)(_d_up[i] + _d_lw[i]);\r
+ if( fixedRange )\r
+ val0[i] = img[L*cn+i];\r
+ }\r
+\r
+ if( cn == 1 )\r
+ {\r
+ if( fixedRange )\r
+ {\r
+ while( !mask[R + 1] && DIFF_INT_C1( img + (R+1), val0 ))\r
+ mask[++R] = newMaskVal;\r
+\r
+ while( !mask[L - 1] && DIFF_INT_C1( img + (L-1), val0 ))\r
+ mask[--L] = newMaskVal;\r
+ }\r
+ else\r
+ {\r
+ while( !mask[R + 1] && DIFF_INT_C1( img + (R+1), img + R ))\r
+ mask[++R] = newMaskVal;\r
+\r
+ while( !mask[L - 1] && DIFF_INT_C1( img + (L-1), img + L ))\r
+ mask[--L] = newMaskVal;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ if( fixedRange )\r
+ {\r
+ while( !mask[R + 1] && DIFF_INT_C3( img + (R+1)*3, val0 ))\r
+ mask[++R] = newMaskVal;\r
+\r
+ while( !mask[L - 1] && DIFF_INT_C3( img + (L-1)*3, val0 ))\r
+ mask[--L] = newMaskVal;\r
+ }\r
+ else\r
+ {\r
+ while( !mask[R + 1] && DIFF_INT_C3( img + (R+1)*3, img + R*3 ))\r
+ mask[++R] = newMaskVal;\r
+\r
+ while( !mask[L - 1] && DIFF_INT_C3( img + (L-1)*3, img + L*3 ))\r
+ mask[--L] = newMaskVal;\r
+ }\r
+ }\r
+\r
+ XMax = R;\r
+ XMin = L;\r
+ ICV_PUSH( seed.y, L, R, R + 1, R, UP );\r
+\r
+ while( head != tail )\r
+ {\r
+ int k, YC, PL, PR, dir, curstep;\r
+ ICV_POP( YC, L, R, PL, PR, dir );\r
+\r
+ int data[][3] =\r
+ {\r
+ {-dir, L - _8_connectivity, R + _8_connectivity},\r
+ {dir, L - _8_connectivity, PL - 1},\r
+ {dir, PR + 1, R + _8_connectivity}\r
+ };\r
+\r
+ unsigned length = (unsigned)(R-L);\r
+\r
+ if( region )\r
+ {\r
+ area += (int)length + 1;\r
+\r
+ if( XMax < R ) XMax = R;\r
+ if( XMin > L ) XMin = L;\r
+ if( YMax < YC ) YMax = YC;\r
+ if( YMin > YC ) YMin = YC;\r
+ }\r
+\r
+ if( cn == 1 )\r
+ {\r
+ for( k = 0; k < 3; k++ )\r
+ {\r
+ dir = data[k][0];\r
+ curstep = dir * step;\r
+ img = pImage + (YC + dir) * step;\r
+ mask = pMask + (YC + dir) * maskStep;\r
+ int left = data[k][1];\r
+ int right = data[k][2];\r
+\r
+ if( fixedRange )\r
+ for( i = left; i <= right; i++ )\r
+ {\r
+ if( !mask[i] && DIFF_INT_C1( img + i, val0 ))\r
+ {\r
+ int j = i;\r
+ mask[i] = newMaskVal;\r
+ while( !mask[--j] && DIFF_INT_C1( img + j, val0 ))\r
+ mask[j] = newMaskVal;\r
+\r
+ while( !mask[++i] && DIFF_INT_C1( img + i, val0 ))\r
+ mask[i] = newMaskVal;\r
+\r
+ ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );\r
+ }\r
+ }\r
+ else if( !_8_connectivity )\r
+ for( i = left; i <= right; i++ )\r
+ {\r
+ if( !mask[i] && DIFF_INT_C1( img + i, img - curstep + i ))\r
+ {\r
+ int j = i;\r
+ mask[i] = newMaskVal;\r
+ while( !mask[--j] && DIFF_INT_C1( img + j, img + (j+1) ))\r
+ mask[j] = newMaskVal;\r
+\r
+ while( !mask[++i] &&\r
+ (DIFF_INT_C1( img + i, img + (i-1) ) ||\r
+ (DIFF_INT_C1( img + i, img + i - curstep) && i <= R)))\r
+ mask[i] = newMaskVal;\r
+\r
+ ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );\r
+ }\r
+ }\r
+ else\r
+ for( i = left; i <= right; i++ )\r
+ {\r
+ int idx, val[1];\r
+\r
+ if( !mask[i] &&\r
+ (((val[0] = img[i],\r
+ (unsigned)(idx = i-L-1) <= length) &&\r
+ DIFF_INT_C1( val, img - curstep + (i-1))) ||\r
+ ((unsigned)(++idx) <= length &&\r
+ DIFF_INT_C1( val, img - curstep + i )) ||\r
+ ((unsigned)(++idx) <= length &&\r
+ DIFF_INT_C1( val, img - curstep + (i+1) ))))\r
+ {\r
+ int j = i;\r
+ mask[i] = newMaskVal;\r
+ while( !mask[--j] && DIFF_INT_C1( img + j, img + (j+1) ))\r
+ mask[j] = newMaskVal;\r
+\r
+ while( !mask[++i] &&\r
+ ((val[0] = img[i],\r
+ DIFF_INT_C1( val, img + (i-1) )) ||\r
+ (((unsigned)(idx = i-L-1) <= length &&\r
+ DIFF_INT_C1( val, img - curstep + (i-1) ))) ||\r
+ ((unsigned)(++idx) <= length &&\r
+ DIFF_INT_C1( val, img - curstep + i )) ||\r
+ ((unsigned)(++idx) <= length &&\r
+ DIFF_INT_C1( val, img - curstep + (i+1) ))))\r
+ mask[i] = newMaskVal;\r
+\r
+ ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );\r
+ }\r
+ }\r
+ }\r
+\r
+ img = pImage + YC * step;\r
+ if( fillImage )\r
+ for( i = L; i <= R; i++ )\r
+ img[i] = newVal[0];\r
+ else if( region )\r
+ for( i = L; i <= R; i++ )\r
+ sum[0] += img[i];\r
+ }\r
+ else\r
+ {\r
+ for( k = 0; k < 3; k++ )\r
+ {\r
+ dir = data[k][0];\r
+ curstep = dir * step;\r
+ img = pImage + (YC + dir) * step;\r
+ mask = pMask + (YC + dir) * maskStep;\r
+ int left = data[k][1];\r
+ int right = data[k][2];\r
+\r
+ if( fixedRange )\r
+ for( i = left; i <= right; i++ )\r
+ {\r
+ if( !mask[i] && DIFF_INT_C3( img + i*3, val0 ))\r
+ {\r
+ int j = i;\r
+ mask[i] = newMaskVal;\r
+ while( !mask[--j] && DIFF_INT_C3( img + j*3, val0 ))\r
+ mask[j] = newMaskVal;\r
+\r
+ while( !mask[++i] && DIFF_INT_C3( img + i*3, val0 ))\r
+ mask[i] = newMaskVal;\r
+\r
+ ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );\r
+ }\r
+ }\r
+ else if( !_8_connectivity )\r
+ for( i = left; i <= right; i++ )\r
+ {\r
+ if( !mask[i] && DIFF_INT_C3( img + i*3, img - curstep + i*3 ))\r
+ {\r
+ int j = i;\r
+ mask[i] = newMaskVal;\r
+ while( !mask[--j] && DIFF_INT_C3( img + j*3, img + (j+1)*3 ))\r
+ mask[j] = newMaskVal;\r
+\r
+ while( !mask[++i] &&\r
+ (DIFF_INT_C3( img + i*3, img + (i-1)*3 ) ||\r
+ (DIFF_INT_C3( img + i*3, img + i*3 - curstep) && i <= R)))\r
+ mask[i] = newMaskVal;\r
+\r
+ ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );\r
+ }\r
+ }\r
+ else\r
+ for( i = left; i <= right; i++ )\r
+ {\r
+ int idx, val[3];\r
+\r
+ if( !mask[i] &&\r
+ (((ICV_SET_C3( val, img+i*3 ),\r
+ (unsigned)(idx = i-L-1) <= length) &&\r
+ DIFF_INT_C3( val, img - curstep + (i-1)*3 )) ||\r
+ ((unsigned)(++idx) <= length &&\r
+ DIFF_INT_C3( val, img - curstep + i*3 )) ||\r
+ ((unsigned)(++idx) <= length &&\r
+ DIFF_INT_C3( val, img - curstep + (i+1)*3 ))))\r
+ {\r
+ int j = i;\r
+ mask[i] = newMaskVal;\r
+ while( !mask[--j] && DIFF_INT_C3( img + j*3, img + (j+1)*3 ))\r
+ mask[j] = newMaskVal;\r
+\r
+ while( !mask[++i] &&\r
+ ((ICV_SET_C3( val, img + i*3 ),\r
+ DIFF_INT_C3( val, img + (i-1)*3 )) ||\r
+ (((unsigned)(idx = i-L-1) <= length &&\r
+ DIFF_INT_C3( val, img - curstep + (i-1)*3 ))) ||\r
+ ((unsigned)(++idx) <= length &&\r
+ DIFF_INT_C3( val, img - curstep + i*3 )) ||\r
+ ((unsigned)(++idx) <= length &&\r
+ DIFF_INT_C3( val, img - curstep + (i+1)*3 ))))\r
+ mask[i] = newMaskVal;\r
+\r
+ ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );\r
+ }\r
+ }\r
+ }\r
+\r
+ img = pImage + YC * step;\r
+ if( fillImage )\r
+ for( i = L; i <= R; i++ )\r
+ ICV_SET_C3( img + i*3, newVal );\r
+ else if( region )\r
+ for( i = L; i <= R; i++ )\r
+ {\r
+ sum[0] += img[i*3];\r
+ sum[1] += img[i*3+1];\r
+ sum[2] += img[i*3+2];\r
+ }\r
+ }\r
+ }\r
+\r
+ if( region )\r
+ {\r
+ region->area = area;\r
+ region->rect.x = XMin;\r
+ region->rect.y = YMin;\r
+ region->rect.width = XMax - XMin + 1;\r
+ region->rect.height = YMax - YMin + 1;\r
+\r
+ if( fillImage )\r
+ region->value = cvScalar(newVal[0], newVal[1], newVal[2]);\r
+ else\r
+ {\r
+ double iarea = area ? 1./area : 0;\r
+ region->value = cvScalar(sum[0]*iarea, sum[1]*iarea, sum[2]*iarea);\r
+ }\r
+ }\r
+\r
+ return CV_NO_ERR;\r
+}\r
+\r
+\r
+static CvStatus\r
+icvFloodFill_Grad_32f_CnIR( float* pImage, int step, uchar* pMask, int maskStep,\r
+ CvSize /*roi*/, CvPoint seed, float* _newVal, float* _d_lw,\r
+ float* _d_up, CvConnectedComp* region, int flags,\r
+ CvFFillSegment* buffer, int buffer_size, int cn )\r
+{\r
+ float* img = pImage + (step /= sizeof(float))*seed.y;\r
+ uchar* mask = (pMask += maskStep + 1) + maskStep*seed.y;\r
+ int i, L, R;\r
+ int area = 0;\r
+ double sum[] = {0,0,0}, val0[] = {0,0,0};\r
+ float newVal[] = {0,0,0};\r
+ float d_lw[] = {0,0,0};\r
+ float interval[] = {0,0,0};\r
+ int XMin, XMax, YMin = seed.y, YMax = seed.y;\r
+ int _8_connectivity = (flags & 255) == 8;\r
+ int fixedRange = flags & CV_FLOODFILL_FIXED_RANGE;\r
+ int fillImage = (flags & CV_FLOODFILL_MASK_ONLY) == 0;\r
+ uchar newMaskVal = (uchar)(flags & 0xff00 ? flags >> 8 : 1);\r
+ CvFFillSegment* buffer_end = buffer + buffer_size, *head = buffer, *tail = buffer;\r
+\r
+ L = R = seed.x;\r
+ if( mask[L] )\r
+ return CV_OK;\r
+\r
+ mask[L] = newMaskVal;\r
+\r
+ for( i = 0; i < cn; i++ )\r
+ {\r
+ newVal[i] = _newVal[i];\r
+ d_lw[i] = 0.5f*(_d_lw[i] - _d_up[i]);\r
+ interval[i] = 0.5f*(_d_lw[i] + _d_up[i]);\r
+ if( fixedRange )\r
+ val0[i] = img[L*cn+i];\r
+ }\r
+\r
+ if( cn == 1 )\r
+ {\r
+ if( fixedRange )\r
+ {\r
+ while( !mask[R + 1] && DIFF_FLT_C1( img + (R+1), val0 ))\r
+ mask[++R] = newMaskVal;\r
+\r
+ while( !mask[L - 1] && DIFF_FLT_C1( img + (L-1), val0 ))\r
+ mask[--L] = newMaskVal;\r
+ }\r
+ else\r
+ {\r
+ while( !mask[R + 1] && DIFF_FLT_C1( img + (R+1), img + R ))\r
+ mask[++R] = newMaskVal;\r
+\r
+ while( !mask[L - 1] && DIFF_FLT_C1( img + (L-1), img + L ))\r
+ mask[--L] = newMaskVal;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ if( fixedRange )\r
+ {\r
+ while( !mask[R + 1] && DIFF_FLT_C3( img + (R+1)*3, val0 ))\r
+ mask[++R] = newMaskVal;\r
+\r
+ while( !mask[L - 1] && DIFF_FLT_C3( img + (L-1)*3, val0 ))\r
+ mask[--L] = newMaskVal;\r
+ }\r
+ else\r
+ {\r
+ while( !mask[R + 1] && DIFF_FLT_C3( img + (R+1)*3, img + R*3 ))\r
+ mask[++R] = newMaskVal;\r
+\r
+ while( !mask[L - 1] && DIFF_FLT_C3( img + (L-1)*3, img + L*3 ))\r
+ mask[--L] = newMaskVal;\r
+ }\r
+ }\r
+\r
+ XMax = R;\r
+ XMin = L;\r
+ ICV_PUSH( seed.y, L, R, R + 1, R, UP );\r
+\r
+ while( head != tail )\r
+ {\r
+ int k, YC, PL, PR, dir, curstep;\r
+ ICV_POP( YC, L, R, PL, PR, dir );\r
+\r
+ int data[][3] =\r
+ {\r
+ {-dir, L - _8_connectivity, R + _8_connectivity},\r
+ {dir, L - _8_connectivity, PL - 1},\r
+ {dir, PR + 1, R + _8_connectivity}\r
+ };\r
+\r
+ unsigned length = (unsigned)(R-L);\r
+\r
+ if( region )\r
+ {\r
+ area += (int)length + 1;\r
+\r
+ if( XMax < R ) XMax = R;\r
+ if( XMin > L ) XMin = L;\r
+ if( YMax < YC ) YMax = YC;\r
+ if( YMin > YC ) YMin = YC;\r
+ }\r
+\r
+ if( cn == 1 )\r
+ {\r
+ for( k = 0; k < 3; k++ )\r
+ {\r
+ dir = data[k][0];\r
+ curstep = dir * step;\r
+ img = pImage + (YC + dir) * step;\r
+ mask = pMask + (YC + dir) * maskStep;\r
+ int left = data[k][1];\r
+ int right = data[k][2];\r
+\r
+ if( fixedRange )\r
+ for( i = left; i <= right; i++ )\r
+ {\r
+ if( !mask[i] && DIFF_FLT_C1( img + i, val0 ))\r
+ {\r
+ int j = i;\r
+ mask[i] = newMaskVal;\r
+ while( !mask[--j] && DIFF_FLT_C1( img + j, val0 ))\r
+ mask[j] = newMaskVal;\r
+\r
+ while( !mask[++i] && DIFF_FLT_C1( img + i, val0 ))\r
+ mask[i] = newMaskVal;\r
+\r
+ ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );\r
+ }\r
+ }\r
+ else if( !_8_connectivity )\r
+ for( i = left; i <= right; i++ )\r
+ {\r
+ if( !mask[i] && DIFF_FLT_C1( img + i, img - curstep + i ))\r
+ {\r
+ int j = i;\r
+ mask[i] = newMaskVal;\r
+ while( !mask[--j] && DIFF_FLT_C1( img + j, img + (j+1) ))\r
+ mask[j] = newMaskVal;\r
+\r
+ while( !mask[++i] &&\r
+ (DIFF_FLT_C1( img + i, img + (i-1) ) ||\r
+ (DIFF_FLT_C1( img + i, img + i - curstep) && i <= R)))\r
+ mask[i] = newMaskVal;\r
+\r
+ ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );\r
+ }\r
+ }\r
+ else\r
+ for( i = left; i <= right; i++ )\r
+ {\r
+ int idx;\r
+ float val[1];\r
+\r
+ if( !mask[i] &&\r
+ (((val[0] = img[i],\r
+ (unsigned)(idx = i-L-1) <= length) &&\r
+ DIFF_FLT_C1( val, img - curstep + (i-1) )) ||\r
+ ((unsigned)(++idx) <= length &&\r
+ DIFF_FLT_C1( val, img - curstep + i )) ||\r
+ ((unsigned)(++idx) <= length &&\r
+ DIFF_FLT_C1( val, img - curstep + (i+1) ))))\r
+ {\r
+ int j = i;\r
+ mask[i] = newMaskVal;\r
+ while( !mask[--j] && DIFF_FLT_C1( img + j, img + (j+1) ))\r
+ mask[j] = newMaskVal;\r
+\r
+ while( !mask[++i] &&\r
+ ((val[0] = img[i],\r
+ DIFF_FLT_C1( val, img + (i-1) )) ||\r
+ (((unsigned)(idx = i-L-1) <= length &&\r
+ DIFF_FLT_C1( val, img - curstep + (i-1) ))) ||\r
+ ((unsigned)(++idx) <= length &&\r
+ DIFF_FLT_C1( val, img - curstep + i )) ||\r
+ ((unsigned)(++idx) <= length &&\r
+ DIFF_FLT_C1( val, img - curstep + (i+1) ))))\r
+ mask[i] = newMaskVal;\r
+\r
+ ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );\r
+ }\r
+ }\r
+ }\r
+\r
+ img = pImage + YC * step;\r
+ if( fillImage )\r
+ for( i = L; i <= R; i++ )\r
+ img[i] = newVal[0];\r
+ else if( region )\r
+ for( i = L; i <= R; i++ )\r
+ sum[0] += img[i];\r
+ }\r
+ else\r
+ {\r
+ for( k = 0; k < 3; k++ )\r
+ {\r
+ dir = data[k][0];\r
+ curstep = dir * step;\r
+ img = pImage + (YC + dir) * step;\r
+ mask = pMask + (YC + dir) * maskStep;\r
+ int left = data[k][1];\r
+ int right = data[k][2];\r
+\r
+ if( fixedRange )\r
+ for( i = left; i <= right; i++ )\r
+ {\r
+ if( !mask[i] && DIFF_FLT_C3( img + i*3, val0 ))\r
+ {\r
+ int j = i;\r
+ mask[i] = newMaskVal;\r
+ while( !mask[--j] && DIFF_FLT_C3( img + j*3, val0 ))\r
+ mask[j] = newMaskVal;\r
+\r
+ while( !mask[++i] && DIFF_FLT_C3( img + i*3, val0 ))\r
+ mask[i] = newMaskVal;\r
+\r
+ ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );\r
+ }\r
+ }\r
+ else if( !_8_connectivity )\r
+ for( i = left; i <= right; i++ )\r
+ {\r
+ if( !mask[i] && DIFF_FLT_C3( img + i*3, img - curstep + i*3 ))\r
+ {\r
+ int j = i;\r
+ mask[i] = newMaskVal;\r
+ while( !mask[--j] && DIFF_FLT_C3( img + j*3, img + (j+1)*3 ))\r
+ mask[j] = newMaskVal;\r
+\r
+ while( !mask[++i] &&\r
+ (DIFF_FLT_C3( img + i*3, img + (i-1)*3 ) ||\r
+ (DIFF_FLT_C3( img + i*3, img + i*3 - curstep) && i <= R)))\r
+ mask[i] = newMaskVal;\r
+\r
+ ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );\r
+ }\r
+ }\r
+ else\r
+ for( i = left; i <= right; i++ )\r
+ {\r
+ int idx;\r
+ float val[3];\r
+\r
+ if( !mask[i] &&\r
+ (((ICV_SET_C3( val, img+i*3 ),\r
+ (unsigned)(idx = i-L-1) <= length) &&\r
+ DIFF_FLT_C3( val, img - curstep + (i-1)*3 )) ||\r
+ ((unsigned)(++idx) <= length &&\r
+ DIFF_FLT_C3( val, img - curstep + i*3 )) ||\r
+ ((unsigned)(++idx) <= length &&\r
+ DIFF_FLT_C3( val, img - curstep + (i+1)*3 ))))\r
+ {\r
+ int j = i;\r
+ mask[i] = newMaskVal;\r
+ while( !mask[--j] && DIFF_FLT_C3( img + j*3, img + (j+1)*3 ))\r
+ mask[j] = newMaskVal;\r
+\r
+ while( !mask[++i] &&\r
+ ((ICV_SET_C3( val, img + i*3 ),\r
+ DIFF_FLT_C3( val, img + (i-1)*3 )) ||\r
+ (((unsigned)(idx = i-L-1) <= length &&\r
+ DIFF_FLT_C3( val, img - curstep + (i-1)*3 ))) ||\r
+ ((unsigned)(++idx) <= length &&\r
+ DIFF_FLT_C3( val, img - curstep + i*3 )) ||\r
+ ((unsigned)(++idx) <= length &&\r
+ DIFF_FLT_C3( val, img - curstep + (i+1)*3 ))))\r
+ mask[i] = newMaskVal;\r
+\r
+ ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir );\r
+ }\r
+ }\r
+ }\r
+\r
+ img = pImage + YC * step;\r
+ if( fillImage )\r
+ for( i = L; i <= R; i++ )\r
+ ICV_SET_C3( img + i*3, newVal );\r
+ else if( region )\r
+ for( i = L; i <= R; i++ )\r
+ {\r
+ sum[0] += img[i*3];\r
+ sum[1] += img[i*3+1];\r
+ sum[2] += img[i*3+2];\r
+ }\r
+ }\r
+ }\r
+\r
+ if( region )\r
+ {\r
+ region->area = area;\r
+ region->rect.x = XMin;\r
+ region->rect.y = YMin;\r
+ region->rect.width = XMax - XMin + 1;\r
+ region->rect.height = YMax - YMin + 1;\r
+\r
+ if( fillImage )\r
+ region->value = cvScalar(newVal[0], newVal[1], newVal[2]);\r
+ else\r
+ {\r
+ double iarea = area ? 1./area : 0;\r
+ region->value = cvScalar(sum[0]*iarea, sum[1]*iarea, sum[2]*iarea);\r
+ }\r
+ }\r
+\r
+ return CV_NO_ERR;\r
+}\r
+\r
+\r
+/****************************************************************************************\\r
+* External Functions *\r
+\****************************************************************************************/\r
+\r
+typedef CvStatus (CV_CDECL* CvFloodFillFunc)(\r
+ void* img, int step, CvSize size, CvPoint seed, void* newval,\r
+ CvConnectedComp* comp, int flags, void* buffer, int buffer_size, int cn );\r
+\r
+typedef CvStatus (CV_CDECL* CvFloodFillGradFunc)(\r
+ void* img, int step, uchar* mask, int maskStep, CvSize size,\r
+ CvPoint seed, void* newval, void* d_lw, void* d_up, void* ccomp,\r
+ int flags, void* buffer, int buffer_size, int cn );\r
+\r
+static void icvInitFloodFill( void** ffill_tab,\r
+ void** ffillgrad_tab )\r
+{\r
+ ffill_tab[0] = (void*)icvFloodFill_8u_CnIR;\r
+ ffill_tab[1] = (void*)icvFloodFill_32f_CnIR;\r
+\r
+ ffillgrad_tab[0] = (void*)icvFloodFill_Grad_8u_CnIR;\r
+ ffillgrad_tab[1] = (void*)icvFloodFill_Grad_32f_CnIR;\r
+}\r
+\r
+\r
+CV_IMPL void\r
+cvFloodFill( CvArr* arr, CvPoint seed_point,\r
+ CvScalar newVal, CvScalar lo_diff, CvScalar up_diff,\r
+ CvConnectedComp* comp, int flags, CvArr* maskarr )\r
+{\r
+ static void* ffill_tab[4];\r
+ static void* ffillgrad_tab[4];\r
+ static int inittab = 0;\r
+\r
+ CvMat* tempMask = 0;\r
+ CvFFillSegment* buffer = 0;\r
+ CV_FUNCNAME( "cvFloodFill" );\r
+\r
+ if( comp )\r
+ memset( comp, 0, sizeof(*comp) );\r
+\r
+ __BEGIN__;\r
+\r
+ int i, type, depth, cn, is_simple, idx;\r
+ int buffer_size, connectivity = flags & 255;\r
+ double nv_buf[4] = {0,0,0,0};\r
+ union { uchar b[4]; float f[4]; } ld_buf, ud_buf;\r
+ CvMat stub, *img = (CvMat*)arr;\r
+ CvMat maskstub, *mask = (CvMat*)maskarr;\r
+ CvSize size;\r
+\r
+ if( !inittab )\r
+ {\r
+ icvInitFloodFill( ffill_tab, ffillgrad_tab );\r
+ inittab = 1;\r
+ }\r
+\r
+ CV_CALL( img = cvGetMat( img, &stub ));\r
+ type = CV_MAT_TYPE( img->type );\r
+ depth = CV_MAT_DEPTH(type);\r
+ cn = CV_MAT_CN(type);\r
+\r
+ idx = type == CV_8UC1 || type == CV_8UC3 ? 0 :\r
+ type == CV_32FC1 || type == CV_32FC3 ? 1 : -1;\r
+\r
+ if( idx < 0 )\r
+ CV_ERROR( CV_StsUnsupportedFormat, "" );\r
+\r
+ if( connectivity == 0 )\r
+ connectivity = 4;\r
+ else if( connectivity != 4 && connectivity != 8 )\r
+ CV_ERROR( CV_StsBadFlag, "Connectivity must be 4, 0(=4) or 8" );\r
+\r
+ is_simple = mask == 0 && (flags & CV_FLOODFILL_MASK_ONLY) == 0;\r
+\r
+ for( i = 0; i < cn; i++ )\r
+ {\r
+ if( lo_diff.val[i] < 0 || up_diff.val[i] < 0 )\r
+ CV_ERROR( CV_StsBadArg, "lo_diff and up_diff must be non-negative" );\r
+ is_simple &= fabs(lo_diff.val[i]) < DBL_EPSILON && fabs(up_diff.val[i]) < DBL_EPSILON;\r
+ }\r
+\r
+ size = cvGetMatSize( img );\r
+\r
+ if( (unsigned)seed_point.x >= (unsigned)size.width ||\r
+ (unsigned)seed_point.y >= (unsigned)size.height )\r
+ CV_ERROR( CV_StsOutOfRange, "Seed point is outside of image" );\r
+\r
+ cvScalarToRawData( &newVal, &nv_buf, type, 0 );\r
+ buffer_size = MAX( size.width, size.height )*2;\r
+ CV_CALL( buffer = (CvFFillSegment*)cvAlloc( buffer_size*sizeof(buffer[0])));\r
+\r
+ if( is_simple )\r
+ {\r
+ int elem_size = CV_ELEM_SIZE(type);\r
+ const uchar* seed_ptr = img->data.ptr + img->step*seed_point.y + elem_size*seed_point.x;\r
+ CvFloodFillFunc func = (CvFloodFillFunc)ffill_tab[idx];\r
+ if( !func )\r
+ CV_ERROR( CV_StsUnsupportedFormat, "" );\r
+ // check if the new value is different from the current value at the seed point.\r
+ // if they are exactly the same, use the generic version with mask to avoid infinite loops.\r
+ for( i = 0; i < elem_size; i++ )\r
+ if( seed_ptr[i] != ((uchar*)nv_buf)[i] )\r
+ break;\r
+ if( i < elem_size )\r
+ {\r
+ IPPI_CALL( func( img->data.ptr, img->step, size,\r
+ seed_point, &nv_buf, comp, flags,\r
+ buffer, buffer_size, cn ));\r
+ EXIT;\r
+ }\r
+ }\r
+\r
+ {\r
+ CvFloodFillGradFunc func = (CvFloodFillGradFunc)ffillgrad_tab[idx];\r
+ if( !func )\r
+ CV_ERROR( CV_StsUnsupportedFormat, "" );\r
+\r
+ if( !mask )\r
+ {\r
+ /* created mask will be 8-byte aligned */\r
+ tempMask = cvCreateMat( size.height + 2, (size.width + 9) & -8, CV_8UC1 );\r
+ mask = tempMask;\r
+ }\r
+ else\r
+ {\r
+ CV_CALL( mask = cvGetMat( mask, &maskstub ));\r
+ if( !CV_IS_MASK_ARR( mask ))\r
+ CV_ERROR( CV_StsBadMask, "" );\r
+\r
+ if( mask->width != size.width + 2 || mask->height != size.height + 2 )\r
+ CV_ERROR( CV_StsUnmatchedSizes, "mask must be 2 pixel wider "\r
+ "and 2 pixel taller than filled image" );\r
+ }\r
+\r
+ {\r
+ int width = tempMask ? mask->step : size.width + 2;\r
+ uchar* mask_row = mask->data.ptr + mask->step;\r
+ memset( mask_row - mask->step, 1, width );\r
+\r
+ for( i = 1; i <= size.height; i++, mask_row += mask->step )\r
+ {\r
+ if( tempMask )\r
+ memset( mask_row, 0, width );\r
+ mask_row[0] = mask_row[size.width+1] = (uchar)1;\r
+ }\r
+ memset( mask_row, 1, width );\r
+ }\r
+\r
+ if( depth == CV_8U )\r
+ for( i = 0; i < cn; i++ )\r
+ {\r
+ int t = cvFloor(lo_diff.val[i]);\r
+ ld_buf.b[i] = CV_CAST_8U(t);\r
+ t = cvFloor(up_diff.val[i]);\r
+ ud_buf.b[i] = CV_CAST_8U(t);\r
+ }\r
+ else\r
+ for( i = 0; i < cn; i++ )\r
+ {\r
+ ld_buf.f[i] = (float)lo_diff.val[i];\r
+ ud_buf.f[i] = (float)up_diff.val[i];\r
+ }\r
+\r
+ IPPI_CALL( func( img->data.ptr, img->step, mask->data.ptr, mask->step,\r
+ size, seed_point, &nv_buf, ld_buf.f, ud_buf.f,\r
+ comp, flags, buffer, buffer_size, cn ));\r
+ }\r
+\r
+ __END__;\r
+\r
+ cvFree( &buffer );\r
+ cvReleaseMat( &tempMask );\r
+}\r
+\r
+\r
+int cv::floodFill( Mat& image, Point seedPoint,\r
+ Scalar newVal, Rect* rect,\r
+ Scalar loDiff, Scalar upDiff, int flags )\r
+{\r
+ CvConnectedComp ccomp;\r
+ CvMat _image = image;\r
+ cvFloodFill(&_image, seedPoint, newVal, loDiff, upDiff, &ccomp, flags, 0);\r
+ if( rect )\r
+ *rect = ccomp.rect;\r
+ return cvRound(ccomp.area);\r
+}\r
+\r
+int cv::floodFill( Mat& image, Mat& mask,\r
+ Point seedPoint, Scalar newVal, Rect* rect, \r
+ Scalar loDiff, Scalar upDiff, int flags )\r
+{\r
+ CvConnectedComp ccomp;\r
+ CvMat _image = image, _mask = mask;\r
+ cvFloodFill(&_image, seedPoint, newVal, loDiff, upDiff, &ccomp, flags, &_mask);\r
+ if( rect )\r
+ *rect = ccomp.rect;\r
+ return cvRound(ccomp.area);\r
+}\r
+\r
+/* End of file. */\r
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// Intel License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of Intel Corporation may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#include "_cv.h"
-#include "_cvmodelest.h"
-
-template<typename T> int icvCompressPoints( T* ptr, const uchar* mask, int mstep, int count )
-{
- int i, j;
- for( i = j = 0; i < count; i++ )
- if( mask[i*mstep] )
- {
- if( i > j )
- ptr[j] = ptr[i];
- j++;
- }
- return j;
-}
-
-class CvHomographyEstimator : public CvModelEstimator2
-{
-public:
- CvHomographyEstimator( int modelPoints );
-
- virtual int runKernel( const CvMat* m1, const CvMat* m2, CvMat* model );
- virtual bool refine( const CvMat* m1, const CvMat* m2,
- CvMat* model, int maxIters );
-protected:
- virtual void computeReprojError( const CvMat* m1, const CvMat* m2,
- const CvMat* model, CvMat* error );
-};
-
-
-CvHomographyEstimator::CvHomographyEstimator(int _modelPoints)
- : CvModelEstimator2(_modelPoints, cvSize(3,3), 1)
-{
- assert( _modelPoints == 4 || _modelPoints == 5 );
-}
-
-int CvHomographyEstimator::runKernel( const CvMat* m1, const CvMat* m2, CvMat* H )
-{
- int i, count = m1->rows*m1->cols;
- const CvPoint2D64f* M = (const CvPoint2D64f*)m1->data.ptr;
- const CvPoint2D64f* m = (const CvPoint2D64f*)m2->data.ptr;
-
- double LtL[9][9], W[9][9], V[9][9];
- CvMat _LtL = cvMat( 9, 9, CV_64F, LtL );
- CvMat _W = cvMat( 9, 9, CV_64F, W );
- CvMat _V = cvMat( 9, 9, CV_64F, V );
- CvMat _H0 = cvMat( 3, 3, CV_64F, V[8] );
- CvMat _Htemp = cvMat( 3, 3, CV_64F, V[7] );
- CvPoint2D64f cM={0,0}, cm={0,0}, sM={0,0}, sm={0,0};
-
- for( i = 0; i < count; i++ )
- {
- cm.x += m[i].x; cm.y += m[i].y;
- cM.x += M[i].x; cM.y += M[i].y;
- }
-
- cm.x /= count; cm.y /= count;
- cM.x /= count; cM.y /= count;
-
- for( i = 0; i < count; i++ )
- {
- sm.x += fabs(m[i].x - cm.x);
- sm.y += fabs(m[i].y - cm.y);
- sM.x += fabs(M[i].x - cM.x);
- sM.y += fabs(M[i].y - cM.y);
- }
-
- sm.x = count/sm.x; sm.y = count/sm.y;
- sM.x = count/sM.x; sM.y = count/sM.y;
-
- double invHnorm[9] = { 1./sm.x, 0, cm.x, 0, 1./sm.y, cm.y, 0, 0, 1 };
- double Hnorm2[9] = { sM.x, 0, -cM.x*sM.x, 0, sM.y, -cM.y*sM.y, 0, 0, 1 };
- CvMat _invHnorm = cvMat( 3, 3, CV_64FC1, invHnorm );
- CvMat _Hnorm2 = cvMat( 3, 3, CV_64FC1, Hnorm2 );
-
- cvZero( &_LtL );
- for( i = 0; i < count; i++ )
- {
- double x = (m[i].x - cm.x)*sm.x, y = (m[i].y - cm.y)*sm.y;
- double X = (M[i].x - cM.x)*sM.x, Y = (M[i].y - cM.y)*sM.y;
- double Lx[] = { X, Y, 1, 0, 0, 0, -x*X, -x*Y, -x };
- double Ly[] = { 0, 0, 0, X, Y, 1, -y*X, -y*Y, -y };
- int j, k;
- for( j = 0; j < 9; j++ )
- for( k = j; k < 9; k++ )
- LtL[j][k] += Lx[j]*Lx[k] + Ly[j]*Ly[k];
- }
- cvCompleteSymm( &_LtL );
-
- cvSVD( &_LtL, &_W, 0, &_V, CV_SVD_MODIFY_A + CV_SVD_V_T );
- cvMatMul( &_invHnorm, &_H0, &_Htemp );
- cvMatMul( &_Htemp, &_Hnorm2, &_H0 );
- cvConvertScale( &_H0, H, 1./_H0.data.db[8] );
-
- return 1;
-}
-
-
-void CvHomographyEstimator::computeReprojError( const CvMat* m1, const CvMat* m2,
- const CvMat* model, CvMat* _err )
-{
- int i, count = m1->rows*m1->cols;
- const CvPoint2D64f* M = (const CvPoint2D64f*)m1->data.ptr;
- const CvPoint2D64f* m = (const CvPoint2D64f*)m2->data.ptr;
- const double* H = model->data.db;
- float* err = _err->data.fl;
-
- for( i = 0; i < count; i++ )
- {
- double ww = 1./(H[6]*M[i].x + H[7]*M[i].y + 1.);
- double dx = (H[0]*M[i].x + H[1]*M[i].y + H[2])*ww - m[i].x;
- double dy = (H[3]*M[i].x + H[4]*M[i].y + H[5])*ww - m[i].y;
- err[i] = (float)(dx*dx + dy*dy);
- }
-}
-
-bool CvHomographyEstimator::refine( const CvMat* m1, const CvMat* m2, CvMat* model, int maxIters )
-{
- CvLevMarq solver(8, 0, cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, maxIters, DBL_EPSILON));
- int i, j, k, count = m1->rows*m1->cols;
- const CvPoint2D64f* M = (const CvPoint2D64f*)m1->data.ptr;
- const CvPoint2D64f* m = (const CvPoint2D64f*)m2->data.ptr;
- CvMat modelPart = cvMat( solver.param->rows, solver.param->cols, model->type, model->data.ptr );
- cvCopy( &modelPart, solver.param );
-
- for(;;)
- {
- const CvMat* _param = 0;
- CvMat *_JtJ = 0, *_JtErr = 0;
- double* _errNorm = 0;
-
- if( !solver.updateAlt( _param, _JtJ, _JtErr, _errNorm ))
- break;
-
- for( i = 0; i < count; i++ )
- {
- const double* h = _param->data.db;
- double Mx = M[i].x, My = M[i].y;
- double ww = 1./(h[6]*Mx + h[7]*My + 1.);
- double _xi = (h[0]*Mx + h[1]*My + h[2])*ww;
- double _yi = (h[3]*Mx + h[4]*My + h[5])*ww;
- double err[] = { _xi - m[i].x, _yi - m[i].y };
- if( _JtJ || _JtErr )
- {
- double J[][8] =
- {
- { Mx*ww, My*ww, ww, 0, 0, 0, -Mx*ww*_xi, -My*ww*_xi },
- { 0, 0, 0, Mx*ww, My*ww, ww, -Mx*ww*_yi, -My*ww*_yi }
- };
-
- for( j = 0; j < 8; j++ )
- {
- for( k = j; k < 8; k++ )
- _JtJ->data.db[j*8+k] += J[0][j]*J[0][k] + J[1][j]*J[1][k];
- _JtErr->data.db[j] += J[0][j]*err[0] + J[1][j]*err[1];
- }
- }
- if( _errNorm )
- *_errNorm += err[0]*err[0] + err[1]*err[1];
- }
- }
-
- cvCopy( solver.param, &modelPart );
- return true;
-}
-
-
-CV_IMPL int
-cvFindHomography( const CvMat* objectPoints, const CvMat* imagePoints,
- CvMat* __H, int method, double ransacReprojThreshold,
- CvMat* mask )
-{
- const double confidence = 0.99;
- bool result = false;
- CvMat *m = 0, *M = 0, *tempMask = 0;
-
- CV_FUNCNAME( "cvFindHomography" );
-
- __BEGIN__;
-
- double H[9];
- CvMat _H = cvMat( 3, 3, CV_64FC1, H );
- int count;
-
- CV_ASSERT( CV_IS_MAT(imagePoints) && CV_IS_MAT(objectPoints) );
-
- count = MAX(imagePoints->cols, imagePoints->rows);
- CV_ASSERT( count >= 4 );
-
- m = cvCreateMat( 1, count, CV_64FC2 );
- cvConvertPointsHomogeneous( imagePoints, m );
-
- M = cvCreateMat( 1, count, CV_64FC2 );
- cvConvertPointsHomogeneous( objectPoints, M );
-
- if( mask )
- {
- CV_ASSERT( CV_IS_MASK_ARR(mask) && CV_IS_MAT_CONT(mask->type) &&
- (mask->rows == 1 || mask->cols == 1) &&
- mask->rows*mask->cols == count );
- tempMask = mask;
- }
- else if( count > 4 )
- tempMask = cvCreateMat( 1, count, CV_8U );
- if( tempMask )
- cvSet( tempMask, cvScalarAll(1.) );
-
- {
- CvHomographyEstimator estimator( MIN(count, 5) );
- if( count == 4 )
- method = 0;
- if( method == CV_LMEDS )
- result = estimator.runLMeDS( M, m, &_H, tempMask, confidence );
- else if( method == CV_RANSAC )
- result = estimator.runRANSAC( M, m, &_H, tempMask, ransacReprojThreshold, confidence );
- else
- result = estimator.runKernel( M, m, &_H ) > 0;
-
- if( result && count > 4 )
- {
- icvCompressPoints( (CvPoint2D64f*)M->data.ptr, tempMask->data.ptr, 1, count );
- count = icvCompressPoints( (CvPoint2D64f*)m->data.ptr, tempMask->data.ptr, 1, count );
- M->cols = m->cols = count;
- estimator.refine( M, m, &_H, 10 );
- }
- }
-
- if( result )
- cvConvert( &_H, __H );
-
- __END__;
-
- cvReleaseMat( &m );
- cvReleaseMat( &M );
- if( tempMask != mask )
- cvReleaseMat( &tempMask );
-
- return (int)result;
-}
-
-
-/* Evaluation of Fundamental Matrix from point correspondences.
- The original code has been written by Valery Mosyagin */
-
-/* The algorithms (except for RANSAC) and the notation have been taken from
- Zhengyou Zhang's research report
- "Determining the Epipolar Geometry and its Uncertainty: A Review"
- that can be found at http://www-sop.inria.fr/robotvis/personnel/zzhang/zzhang-eng.html */
-
-/************************************** 7-point algorithm *******************************/
-class CvFMEstimator : public CvModelEstimator2
-{
-public:
- CvFMEstimator( int _modelPoints );
-
- virtual int runKernel( const CvMat* m1, const CvMat* m2, CvMat* model );
- virtual int run7Point( const CvMat* m1, const CvMat* m2, CvMat* model );
- virtual int run8Point( const CvMat* m1, const CvMat* m2, CvMat* model );
-protected:
- virtual void computeReprojError( const CvMat* m1, const CvMat* m2,
- const CvMat* model, CvMat* error );
-};
-
-CvFMEstimator::CvFMEstimator( int _modelPoints )
-: CvModelEstimator2( _modelPoints, cvSize(3,3), _modelPoints == 7 ? 3 : 1 )
-{
- assert( _modelPoints == 7 || _modelPoints == 8 );
-}
-
-
-int CvFMEstimator::runKernel( const CvMat* m1, const CvMat* m2, CvMat* model )
-{
- return modelPoints == 7 ? run7Point( m1, m2, model ) : run8Point( m1, m2, model );
-}
-
-int CvFMEstimator::run7Point( const CvMat* _m1, const CvMat* _m2, CvMat* _fmatrix )
-{
- double a[7*9], w[7], v[9*9], c[4], r[3];
- double* f1, *f2;
- double t0, t1, t2;
- CvMat A = cvMat( 7, 9, CV_64F, a );
- CvMat V = cvMat( 9, 9, CV_64F, v );
- CvMat W = cvMat( 7, 1, CV_64F, w );
- CvMat coeffs = cvMat( 1, 4, CV_64F, c );
- CvMat roots = cvMat( 1, 3, CV_64F, r );
- const CvPoint2D64f* m1 = (const CvPoint2D64f*)_m1->data.ptr;
- const CvPoint2D64f* m2 = (const CvPoint2D64f*)_m2->data.ptr;
- double* fmatrix = _fmatrix->data.db;
- int i, k, n;
-
- // form a linear system: i-th row of A(=a) represents
- // the equation: (m2[i], 1)'*F*(m1[i], 1) = 0
- for( i = 0; i < 7; i++ )
- {
- double x0 = m1[i].x, y0 = m1[i].y;
- double x1 = m2[i].x, y1 = m2[i].y;
-
- a[i*9+0] = x1*x0;
- a[i*9+1] = x1*y0;
- a[i*9+2] = x1;
- a[i*9+3] = y1*x0;
- a[i*9+4] = y1*y0;
- a[i*9+5] = y1;
- a[i*9+6] = x0;
- a[i*9+7] = y0;
- a[i*9+8] = 1;
- }
-
- // A*(f11 f12 ... f33)' = 0 is singular (7 equations for 9 variables), so
- // the solution is linear subspace of dimensionality 2.
- // => use the last two singular vectors as a basis of the space
- // (according to SVD properties)
- cvSVD( &A, &W, 0, &V, CV_SVD_MODIFY_A + CV_SVD_V_T );
- f1 = v + 7*9;
- f2 = v + 8*9;
-
- // f1, f2 is a basis => lambda*f1 + mu*f2 is an arbitrary f. matrix.
- // as it is determined up to a scale, normalize lambda & mu (lambda + mu = 1),
- // so f ~ lambda*f1 + (1 - lambda)*f2.
- // use the additional constraint det(f) = det(lambda*f1 + (1-lambda)*f2) to find lambda.
- // it will be a cubic equation.
- // find c - polynomial coefficients.
- for( i = 0; i < 9; i++ )
- f1[i] -= f2[i];
-
- t0 = f2[4]*f2[8] - f2[5]*f2[7];
- t1 = f2[3]*f2[8] - f2[5]*f2[6];
- t2 = f2[3]*f2[7] - f2[4]*f2[6];
-
- c[3] = f2[0]*t0 - f2[1]*t1 + f2[2]*t2;
-
- c[2] = f1[0]*t0 - f1[1]*t1 + f1[2]*t2 -
- f1[3]*(f2[1]*f2[8] - f2[2]*f2[7]) +
- f1[4]*(f2[0]*f2[8] - f2[2]*f2[6]) -
- f1[5]*(f2[0]*f2[7] - f2[1]*f2[6]) +
- f1[6]*(f2[1]*f2[5] - f2[2]*f2[4]) -
- f1[7]*(f2[0]*f2[5] - f2[2]*f2[3]) +
- f1[8]*(f2[0]*f2[4] - f2[1]*f2[3]);
-
- t0 = f1[4]*f1[8] - f1[5]*f1[7];
- t1 = f1[3]*f1[8] - f1[5]*f1[6];
- t2 = f1[3]*f1[7] - f1[4]*f1[6];
-
- c[1] = f2[0]*t0 - f2[1]*t1 + f2[2]*t2 -
- f2[3]*(f1[1]*f1[8] - f1[2]*f1[7]) +
- f2[4]*(f1[0]*f1[8] - f1[2]*f1[6]) -
- f2[5]*(f1[0]*f1[7] - f1[1]*f1[6]) +
- f2[6]*(f1[1]*f1[5] - f1[2]*f1[4]) -
- f2[7]*(f1[0]*f1[5] - f1[2]*f1[3]) +
- f2[8]*(f1[0]*f1[4] - f1[1]*f1[3]);
-
- c[0] = f1[0]*t0 - f1[1]*t1 + f1[2]*t2;
-
- // solve the cubic equation; there can be 1 to 3 roots ...
- n = cvSolveCubic( &coeffs, &roots );
-
- if( n < 1 || n > 3 )
- return n;
-
- for( k = 0; k < n; k++, fmatrix += 9 )
- {
- // for each root form the fundamental matrix
- double lambda = r[k], mu = 1.;
- double s = f1[8]*r[k] + f2[8];
-
- // normalize each matrix, so that F(3,3) (~fmatrix[8]) == 1
- if( fabs(s) > DBL_EPSILON )
- {
- mu = 1./s;
- lambda *= mu;
- fmatrix[8] = 1.;
- }
- else
- fmatrix[8] = 0.;
-
- for( i = 0; i < 8; i++ )
- fmatrix[i] = f1[i]*lambda + f2[i]*mu;
- }
-
- return n;
-}
-
-
-int CvFMEstimator::run8Point( const CvMat* _m1, const CvMat* _m2, CvMat* _fmatrix )
-{
- double a[9*9], w[9], v[9*9];
- CvMat W = cvMat( 1, 9, CV_64F, w );
- CvMat V = cvMat( 9, 9, CV_64F, v );
- CvMat A = cvMat( 9, 9, CV_64F, a );
- CvMat U, F0, TF;
-
- CvPoint2D64f m0c = {0,0}, m1c = {0,0};
- double t, scale0 = 0, scale1 = 0;
-
- const CvPoint2D64f* m1 = (const CvPoint2D64f*)_m1->data.ptr;
- const CvPoint2D64f* m2 = (const CvPoint2D64f*)_m2->data.ptr;
- double* fmatrix = _fmatrix->data.db;
- int i, j, k, count = _m1->cols*_m1->rows;
-
- // compute centers and average distances for each of the two point sets
- for( i = 0; i < count; i++ )
- {
- double x = m1[i].x, y = m1[i].y;
- m0c.x += x; m0c.y += y;
-
- x = m2[i].x, y = m2[i].y;
- m1c.x += x; m1c.y += y;
- }
-
- // calculate the normalizing transformations for each of the point sets:
- // after the transformation each set will have the mass center at the coordinate origin
- // and the average distance from the origin will be ~sqrt(2).
- t = 1./count;
- m0c.x *= t; m0c.y *= t;
- m1c.x *= t; m1c.y *= t;
-
- for( i = 0; i < count; i++ )
- {
- double x = m1[i].x - m0c.x, y = m1[i].y - m0c.y;
- scale0 += sqrt(x*x + y*y);
-
- x = fabs(m2[i].x - m1c.x), y = fabs(m2[i].y - m1c.y);
- scale1 += sqrt(x*x + y*y);
- }
-
- scale0 *= t;
- scale1 *= t;
-
- if( scale0 < FLT_EPSILON || scale1 < FLT_EPSILON )
- return 0;
-
- scale0 = sqrt(2.)/scale0;
- scale1 = sqrt(2.)/scale1;
-
- cvZero( &A );
-
- // form a linear system Ax=0: for each selected pair of points m1 & m2,
- // the row of A(=a) represents the coefficients of equation: (m2, 1)'*F*(m1, 1) = 0
- // to save computation time, we compute (At*A) instead of A and then solve (At*A)x=0.
- for( i = 0; i < count; i++ )
- {
- double x0 = (m1[i].x - m0c.x)*scale0;
- double y0 = (m1[i].y - m0c.y)*scale0;
- double x1 = (m2[i].x - m1c.x)*scale1;
- double y1 = (m2[i].y - m1c.y)*scale1;
- double r[9] = { x1*x0, x1*y0, x1, y1*x0, y1*y0, y1, x0, y0, 1 };
- for( j = 0; j < 9; j++ )
- for( k = 0; k < 9; k++ )
- a[j*9+k] += r[j]*r[k];
- }
-
- cvSVD( &A, &W, 0, &V, CV_SVD_MODIFY_A + CV_SVD_V_T );
-
- for( i = 0; i < 8; i++ )
- {
- if( fabs(w[i]) < DBL_EPSILON )
- break;
- }
-
- if( i < 7 )
- return 0;
-
- F0 = cvMat( 3, 3, CV_64F, v + 9*8 ); // take the last column of v as a solution of Af = 0
-
- // make F0 singular (of rank 2) by decomposing it with SVD,
- // zeroing the last diagonal element of W and then composing the matrices back.
-
- // use v as a temporary storage for different 3x3 matrices
- W = U = V = TF = F0;
- W.data.db = v;
- U.data.db = v + 9;
- V.data.db = v + 18;
- TF.data.db = v + 27;
-
- cvSVD( &F0, &W, &U, &V, CV_SVD_MODIFY_A + CV_SVD_U_T + CV_SVD_V_T );
- W.data.db[8] = 0.;
-
- // F0 <- U*diag([W(1), W(2), 0])*V'
- cvGEMM( &U, &W, 1., 0, 0., &TF, CV_GEMM_A_T );
- cvGEMM( &TF, &V, 1., 0, 0., &F0, 0/*CV_GEMM_B_T*/ );
-
- // apply the transformation that is inverse
- // to what we used to normalize the point coordinates
- {
- double tt0[] = { scale0, 0, -scale0*m0c.x, 0, scale0, -scale0*m0c.y, 0, 0, 1 };
- double tt1[] = { scale1, 0, -scale1*m1c.x, 0, scale1, -scale1*m1c.y, 0, 0, 1 };
- CvMat T0, T1;
- T0 = T1 = F0;
- T0.data.db = tt0;
- T1.data.db = tt1;
-
- // F0 <- T1'*F0*T0
- cvGEMM( &T1, &F0, 1., 0, 0., &TF, CV_GEMM_A_T );
- F0.data.db = fmatrix;
- cvGEMM( &TF, &T0, 1., 0, 0., &F0, 0 );
-
- // make F(3,3) = 1
- if( fabs(F0.data.db[8]) > FLT_EPSILON )
- cvScale( &F0, &F0, 1./F0.data.db[8] );
- }
-
- return 1;
-}
-
-
-void CvFMEstimator::computeReprojError( const CvMat* _m1, const CvMat* _m2,
- const CvMat* model, CvMat* _err )
-{
- int i, count = _m1->rows*_m1->cols;
- const CvPoint2D64f* m1 = (const CvPoint2D64f*)_m1->data.ptr;
- const CvPoint2D64f* m2 = (const CvPoint2D64f*)_m2->data.ptr;
- const double* F = model->data.db;
- float* err = _err->data.fl;
-
- for( i = 0; i < count; i++ )
- {
- double a, b, c, d1, d2, s1, s2;
-
- a = F[0]*m1[i].x + F[1]*m1[i].y + F[2];
- b = F[3]*m1[i].x + F[4]*m1[i].y + F[5];
- c = F[6]*m1[i].x + F[7]*m1[i].y + F[8];
-
- s2 = 1./(a*a + b*b);
- d2 = m2[i].x*a + m2[i].y*b + c;
-
- a = F[0]*m2[i].x + F[3]*m2[i].y + F[6];
- b = F[1]*m2[i].x + F[4]*m2[i].y + F[7];
- c = F[2]*m2[i].x + F[5]*m2[i].y + F[8];
-
- s1 = 1./(a*a + b*b);
- d1 = m1[i].x*a + m1[i].y*b + c;
-
- err[i] = (float)std::max(d1*d1*s1, d2*d2*s2);
- }
-}
-
-
-CV_IMPL int
-cvFindFundamentalMat( const CvMat* points1, const CvMat* points2,
- CvMat* fmatrix, int method,
- double param1, double param2, CvMat* mask )
-{
- int result = 0;
- CvMat *m1 = 0, *m2 = 0, *tempMask = 0;
-
- CV_FUNCNAME( "cvFindFundamentalMat" );
-
- __BEGIN__;
-
- double F[3*9];
- CvMat _F3x3 = cvMat( 3, 3, CV_64FC1, F ), _F9x3 = cvMat( 9, 3, CV_64FC1, F );
- int count;
-
- CV_ASSERT( CV_IS_MAT(points1) && CV_IS_MAT(points2) && CV_ARE_SIZES_EQ(points1, points2) );
- CV_ASSERT( CV_IS_MAT(fmatrix) && fmatrix->cols == 3 &&
- (fmatrix->rows == 3 || (fmatrix->rows == 9 && method == CV_FM_7POINT)) );
-
- count = MAX(points1->cols, points1->rows);
- if( count < 7 )
- EXIT;
-
- m1 = cvCreateMat( 1, count, CV_64FC2 );
- cvConvertPointsHomogeneous( points1, m1 );
-
- m2 = cvCreateMat( 1, count, CV_64FC2 );
- cvConvertPointsHomogeneous( points2, m2 );
-
- if( mask )
- {
- CV_ASSERT( CV_IS_MASK_ARR(mask) && CV_IS_MAT_CONT(mask->type) &&
- (mask->rows == 1 || mask->cols == 1) &&
- mask->rows*mask->cols == count );
- tempMask = cvCreateMatHeader(1, count, CV_8U);
- cvSetData(tempMask, mask->data.ptr, 0);
- }
- else if( count > 8 )
- tempMask = cvCreateMat( 1, count, CV_8U );
- if( tempMask )
- cvSet( tempMask, cvScalarAll(1.) );
-
- {
- CvFMEstimator estimator( MIN(count, (method & 3) == CV_FM_7POINT ? 7 : 8) );
- if( count == 7 )
- result = estimator.run7Point(m1, m2, &_F9x3);
- else if( count == 8 || method == CV_FM_8POINT )
- result = estimator.run8Point(m1, m2, &_F3x3);
- else if( count > 8 )
- {
- if( param1 <= 0 )
- param1 = 3;
- if( param2 < DBL_EPSILON || param2 > 1 - DBL_EPSILON )
- param2 = 0.99;
-
- if( (method & ~3) == CV_RANSAC )
- result = estimator.runRANSAC(m1, m2, &_F3x3, tempMask, param1, param2 );
- else
- result = estimator.runLMeDS(m1, m2, &_F3x3, tempMask, param2 );
- if( result <= 0 )
- EXIT;
- /*icvCompressPoints( (CvPoint2D64f*)m1->data.ptr, tempMask->data.ptr, 1, count );
- count = icvCompressPoints( (CvPoint2D64f*)m2->data.ptr, tempMask->data.ptr, 1, count );
- assert( count >= 8 );
- m1->cols = m2->cols = count;
- estimator.run8Point(m1, m2, &_F3x3);*/
- }
- }
-
- if( result )
- cvConvert( fmatrix->rows == 3 ? &_F3x3 : &_F9x3, fmatrix );
-
- __END__;
-
- cvReleaseMat( &m1 );
- cvReleaseMat( &m2 );
- if( tempMask != mask )
- cvReleaseMat( &tempMask );
-
- return result;
-}
-
-
-CV_IMPL void
-cvComputeCorrespondEpilines( const CvMat* points, int pointImageID,
- const CvMat* fmatrix, CvMat* lines )
-{
- CV_FUNCNAME( "cvComputeCorrespondEpilines" );
-
- __BEGIN__;
-
- int abc_stride, abc_plane_stride, abc_elem_size;
- int plane_stride, stride, elem_size;
- int i, dims, count, depth, cn, abc_dims, abc_count, abc_depth, abc_cn;
- uchar *ap, *bp, *cp;
- const uchar *xp, *yp, *zp;
- double f[9];
- CvMat F = cvMat( 3, 3, CV_64F, f );
-
- if( !CV_IS_MAT(points) )
- CV_ERROR( !points ? CV_StsNullPtr : CV_StsBadArg, "points parameter is not a valid matrix" );
-
- depth = CV_MAT_DEPTH(points->type);
- cn = CV_MAT_CN(points->type);
- if( (depth != CV_32F && depth != CV_64F) || (cn != 1 && cn != 2 && cn != 3) )
- CV_ERROR( CV_StsUnsupportedFormat, "The format of point matrix is unsupported" );
-
- if( points->rows > points->cols )
- {
- dims = cn*points->cols;
- count = points->rows;
- }
- else
- {
- if( (points->rows > 1 && cn > 1) || (points->rows == 1 && cn == 1) )
- CV_ERROR( CV_StsBadSize, "The point matrix does not have a proper layout (2xn, 3xn, nx2 or nx3)" );
- dims = cn * points->rows;
- count = points->cols;
- }
-
- if( dims != 2 && dims != 3 )
- CV_ERROR( CV_StsOutOfRange, "The dimensionality of points must be 2 or 3" );
-
- if( !CV_IS_MAT(fmatrix) )
- CV_ERROR( !fmatrix ? CV_StsNullPtr : CV_StsBadArg, "fmatrix is not a valid matrix" );
-
- if( CV_MAT_TYPE(fmatrix->type) != CV_32FC1 && CV_MAT_TYPE(fmatrix->type) != CV_64FC1 )
- CV_ERROR( CV_StsUnsupportedFormat, "fundamental matrix must have 32fC1 or 64fC1 type" );
-
- if( fmatrix->cols != 3 || fmatrix->rows != 3 )
- CV_ERROR( CV_StsBadSize, "fundamental matrix must be 3x3" );
-
- if( !CV_IS_MAT(lines) )
- CV_ERROR( !lines ? CV_StsNullPtr : CV_StsBadArg, "lines parameter is not a valid matrix" );
-
- abc_depth = CV_MAT_DEPTH(lines->type);
- abc_cn = CV_MAT_CN(lines->type);
- if( (abc_depth != CV_32F && abc_depth != CV_64F) || (abc_cn != 1 && abc_cn != 3) )
- CV_ERROR( CV_StsUnsupportedFormat, "The format of the matrix of lines is unsupported" );
-
- if( lines->rows > lines->cols )
- {
- abc_dims = abc_cn*lines->cols;
- abc_count = lines->rows;
- }
- else
- {
- if( (lines->rows > 1 && abc_cn > 1) || (lines->rows == 1 && abc_cn == 1) )
- CV_ERROR( CV_StsBadSize, "The lines matrix does not have a proper layout (3xn or nx3)" );
- abc_dims = abc_cn * lines->rows;
- abc_count = lines->cols;
- }
-
- if( abc_dims != 3 )
- CV_ERROR( CV_StsOutOfRange, "The lines matrix does not have a proper layout (3xn or nx3)" );
-
- if( abc_count != count )
- CV_ERROR( CV_StsUnmatchedSizes, "The numbers of points and lines are different" );
-
- elem_size = CV_ELEM_SIZE(depth);
- abc_elem_size = CV_ELEM_SIZE(abc_depth);
-
- if( points->rows == dims )
- {
- plane_stride = points->step;
- stride = elem_size;
- }
- else
- {
- plane_stride = elem_size;
- stride = points->rows == 1 ? dims*elem_size : points->step;
- }
-
- if( lines->rows == 3 )
- {
- abc_plane_stride = lines->step;
- abc_stride = abc_elem_size;
- }
- else
- {
- abc_plane_stride = abc_elem_size;
- abc_stride = lines->rows == 1 ? 3*abc_elem_size : lines->step;
- }
-
- CV_CALL( cvConvert( fmatrix, &F ));
- if( pointImageID == 2 )
- cvTranspose( &F, &F );
-
- xp = points->data.ptr;
- yp = xp + plane_stride;
- zp = dims == 3 ? yp + plane_stride : 0;
-
- ap = lines->data.ptr;
- bp = ap + abc_plane_stride;
- cp = bp + abc_plane_stride;
-
- for( i = 0; i < count; i++ )
- {
- double x, y, z = 1.;
- double a, b, c, nu;
-
- if( depth == CV_32F )
- {
- x = *(float*)xp; y = *(float*)yp;
- if( zp )
- z = *(float*)zp, zp += stride;
- }
- else
- {
- x = *(double*)xp; y = *(double*)yp;
- if( zp )
- z = *(double*)zp, zp += stride;
- }
-
- xp += stride; yp += stride;
-
- a = f[0]*x + f[1]*y + f[2]*z;
- b = f[3]*x + f[4]*y + f[5]*z;
- c = f[6]*x + f[7]*y + f[8]*z;
- nu = a*a + b*b;
- nu = nu ? 1./sqrt(nu) : 1.;
- a *= nu; b *= nu; c *= nu;
-
- if( abc_depth == CV_32F )
- {
- *(float*)ap = (float)a;
- *(float*)bp = (float)b;
- *(float*)cp = (float)c;
- }
- else
- {
- *(double*)ap = a;
- *(double*)bp = b;
- *(double*)cp = c;
- }
-
- ap += abc_stride;
- bp += abc_stride;
- cp += abc_stride;
- }
-
- __END__;
-}
-
-
-CV_IMPL void
-cvConvertPointsHomogeneous( const CvMat* src, CvMat* dst )
-{
- CvMat* temp = 0;
- CvMat* denom = 0;
-
- CV_FUNCNAME( "cvConvertPointsHomogeneous" );
-
- __BEGIN__;
-
- int i, s_count, s_dims, d_count, d_dims;
- CvMat _src, _dst, _ones;
- CvMat* ones = 0;
-
- if( !CV_IS_MAT(src) )
- CV_ERROR( !src ? CV_StsNullPtr : CV_StsBadArg,
- "The input parameter is not a valid matrix" );
-
- if( !CV_IS_MAT(dst) )
- CV_ERROR( !dst ? CV_StsNullPtr : CV_StsBadArg,
- "The output parameter is not a valid matrix" );
-
- if( src == dst || src->data.ptr == dst->data.ptr )
- {
- if( src != dst && (!CV_ARE_TYPES_EQ(src, dst) || !CV_ARE_SIZES_EQ(src,dst)) )
- CV_ERROR( CV_StsBadArg, "Invalid inplace operation" );
- EXIT;
- }
-
- if( src->rows > src->cols )
- {
- if( !((src->cols > 1) ^ (CV_MAT_CN(src->type) > 1)) )
- CV_ERROR( CV_StsBadSize, "Either the number of channels or columns or rows must be =1" );
-
- s_dims = CV_MAT_CN(src->type)*src->cols;
- s_count = src->rows;
- }
- else
- {
- if( !((src->rows > 1) ^ (CV_MAT_CN(src->type) > 1)) )
- CV_ERROR( CV_StsBadSize, "Either the number of channels or columns or rows must be =1" );
-
- s_dims = CV_MAT_CN(src->type)*src->rows;
- s_count = src->cols;
- }
-
- if( src->rows == 1 || src->cols == 1 )
- src = cvReshape( src, &_src, 1, s_count );
-
- if( dst->rows > dst->cols )
- {
- if( !((dst->cols > 1) ^ (CV_MAT_CN(dst->type) > 1)) )
- CV_ERROR( CV_StsBadSize,
- "Either the number of channels or columns or rows in the input matrix must be =1" );
-
- d_dims = CV_MAT_CN(dst->type)*dst->cols;
- d_count = dst->rows;
- }
- else
- {
- if( !((dst->rows > 1) ^ (CV_MAT_CN(dst->type) > 1)) )
- CV_ERROR( CV_StsBadSize,
- "Either the number of channels or columns or rows in the output matrix must be =1" );
-
- d_dims = CV_MAT_CN(dst->type)*dst->rows;
- d_count = dst->cols;
- }
-
- if( dst->rows == 1 || dst->cols == 1 )
- dst = cvReshape( dst, &_dst, 1, d_count );
-
- if( s_count != d_count )
- CV_ERROR( CV_StsUnmatchedSizes, "Both matrices must have the same number of points" );
-
- if( CV_MAT_DEPTH(src->type) < CV_32F || CV_MAT_DEPTH(dst->type) < CV_32F )
- CV_ERROR( CV_StsUnsupportedFormat,
- "Both matrices must be floating-point (single or double precision)" );
-
- if( s_dims < 2 || s_dims > 4 || d_dims < 2 || d_dims > 4 )
- CV_ERROR( CV_StsOutOfRange,
- "Both input and output point dimensionality must be 2, 3 or 4" );
-
- if( s_dims < d_dims - 1 || s_dims > d_dims + 1 )
- CV_ERROR( CV_StsUnmatchedSizes,
- "The dimensionalities of input and output point sets differ too much" );
-
- if( s_dims == d_dims - 1 )
- {
- if( d_count == dst->rows )
- {
- ones = cvGetSubRect( dst, &_ones, cvRect( s_dims, 0, 1, d_count ));
- dst = cvGetSubRect( dst, &_dst, cvRect( 0, 0, s_dims, d_count ));
- }
- else
- {
- ones = cvGetSubRect( dst, &_ones, cvRect( 0, s_dims, d_count, 1 ));
- dst = cvGetSubRect( dst, &_dst, cvRect( 0, 0, d_count, s_dims ));
- }
- }
-
- if( s_dims <= d_dims )
- {
- if( src->rows == dst->rows && src->cols == dst->cols )
- {
- if( CV_ARE_TYPES_EQ( src, dst ) )
- cvCopy( src, dst );
- else
- cvConvert( src, dst );
- }
- else
- {
- if( !CV_ARE_TYPES_EQ( src, dst ))
- {
- CV_CALL( temp = cvCreateMat( src->rows, src->cols, dst->type ));
- cvConvert( src, temp );
- src = temp;
- }
- cvTranspose( src, dst );
- }
-
- if( ones )
- cvSet( ones, cvRealScalar(1.) );
- }
- else
- {
- int s_plane_stride, s_stride, d_plane_stride, d_stride, elem_size;
-
- if( !CV_ARE_TYPES_EQ( src, dst ))
- {
- CV_CALL( temp = cvCreateMat( src->rows, src->cols, dst->type ));
- cvConvert( src, temp );
- src = temp;
- }
-
- elem_size = CV_ELEM_SIZE(src->type);
-
- if( s_count == src->cols )
- s_plane_stride = src->step / elem_size, s_stride = 1;
- else
- s_stride = src->step / elem_size, s_plane_stride = 1;
-
- if( d_count == dst->cols )
- d_plane_stride = dst->step / elem_size, d_stride = 1;
- else
- d_stride = dst->step / elem_size, d_plane_stride = 1;
-
- CV_CALL( denom = cvCreateMat( 1, d_count, dst->type ));
-
- if( CV_MAT_DEPTH(dst->type) == CV_32F )
- {
- const float* xs = src->data.fl;
- const float* ys = xs + s_plane_stride;
- const float* zs = 0;
- const float* ws = xs + (s_dims - 1)*s_plane_stride;
-
- float* iw = denom->data.fl;
-
- float* xd = dst->data.fl;
- float* yd = xd + d_plane_stride;
- float* zd = 0;
-
- if( d_dims == 3 )
- {
- zs = ys + s_plane_stride;
- zd = yd + d_plane_stride;
- }
-
- for( i = 0; i < d_count; i++, ws += s_stride )
- {
- float t = *ws;
- iw[i] = fabs((double)t) > FLT_EPSILON ? t : 1.f;
- }
-
- cvDiv( 0, denom, denom );
-
- if( d_dims == 3 )
- for( i = 0; i < d_count; i++ )
- {
- float w = iw[i];
- float x = *xs * w, y = *ys * w, z = *zs * w;
- xs += s_stride; ys += s_stride; zs += s_stride;
- *xd = x; *yd = y; *zd = z;
- xd += d_stride; yd += d_stride; zd += d_stride;
- }
- else
- for( i = 0; i < d_count; i++ )
- {
- float w = iw[i];
- float x = *xs * w, y = *ys * w;
- xs += s_stride; ys += s_stride;
- *xd = x; *yd = y;
- xd += d_stride; yd += d_stride;
- }
- }
- else
- {
- const double* xs = src->data.db;
- const double* ys = xs + s_plane_stride;
- const double* zs = 0;
- const double* ws = xs + (s_dims - 1)*s_plane_stride;
-
- double* iw = denom->data.db;
-
- double* xd = dst->data.db;
- double* yd = xd + d_plane_stride;
- double* zd = 0;
-
- if( d_dims == 3 )
- {
- zs = ys + s_plane_stride;
- zd = yd + d_plane_stride;
- }
-
- for( i = 0; i < d_count; i++, ws += s_stride )
- {
- double t = *ws;
- iw[i] = fabs(t) > DBL_EPSILON ? t : 1.;
- }
-
- cvDiv( 0, denom, denom );
-
- if( d_dims == 3 )
- for( i = 0; i < d_count; i++ )
- {
- double w = iw[i];
- double x = *xs * w, y = *ys * w, z = *zs * w;
- xs += s_stride; ys += s_stride; zs += s_stride;
- *xd = x; *yd = y; *zd = z;
- xd += d_stride; yd += d_stride; zd += d_stride;
- }
- else
- for( i = 0; i < d_count; i++ )
- {
- double w = iw[i];
- double x = *xs * w, y = *ys * w;
- xs += s_stride; ys += s_stride;
- *xd = x; *yd = y;
- xd += d_stride; yd += d_stride;
- }
- }
- }
-
- __END__;
-
- cvReleaseMat( &denom );
- cvReleaseMat( &temp );
-}
-
-/* End of file. */
+/*M///////////////////////////////////////////////////////////////////////////////////////\r
+//\r
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
+//\r
+// By downloading, copying, installing or using the software you agree to this license.\r
+// If you do not agree to this license, do not download, install,\r
+// copy or use the software.\r
+//\r
+//\r
+// Intel License Agreement\r
+// For Open Source Computer Vision Library\r
+//\r
+// Copyright (C) 2000, Intel Corporation, all rights reserved.\r
+// Third party copyrights are property of their respective owners.\r
+//\r
+// Redistribution and use in source and binary forms, with or without modification,\r
+// are permitted provided that the following conditions are met:\r
+//\r
+// * Redistribution's of source code must retain the above copyright notice,\r
+// this list of conditions and the following disclaimer.\r
+//\r
+// * Redistribution's in binary form must reproduce the above copyright notice,\r
+// this list of conditions and the following disclaimer in the documentation\r
+// and/or other materials provided with the distribution.\r
+//\r
+// * The name of Intel Corporation may not be used to endorse or promote products\r
+// derived from this software without specific prior written permission.\r
+//\r
+// This software is provided by the copyright holders and contributors "as is" and\r
+// any express or implied warranties, including, but not limited to, the implied\r
+// warranties of merchantability and fitness for a particular purpose are disclaimed.\r
+// In no event shall the Intel Corporation or contributors be liable for any direct,\r
+// indirect, incidental, special, exemplary, or consequential damages\r
+// (including, but not limited to, procurement of substitute goods or services;\r
+// loss of use, data, or profits; or business interruption) however caused\r
+// and on any theory of liability, whether in contract, strict liability,\r
+// or tort (including negligence or otherwise) arising in any way out of\r
+// the use of this software, even if advised of the possibility of such damage.\r
+//\r
+//M*/\r
+\r
+#include "_cv.h"\r
+#include "_cvmodelest.h"\r
+\r
+template<typename T> int icvCompressPoints( T* ptr, const uchar* mask, int mstep, int count )\r
+{\r
+ int i, j;\r
+ for( i = j = 0; i < count; i++ )\r
+ if( mask[i*mstep] )\r
+ {\r
+ if( i > j )\r
+ ptr[j] = ptr[i];\r
+ j++;\r
+ }\r
+ return j;\r
+}\r
+\r
+class CvHomographyEstimator : public CvModelEstimator2\r
+{\r
+public:\r
+ CvHomographyEstimator( int modelPoints );\r
+\r
+ virtual int runKernel( const CvMat* m1, const CvMat* m2, CvMat* model );\r
+ virtual bool refine( const CvMat* m1, const CvMat* m2,\r
+ CvMat* model, int maxIters );\r
+protected:\r
+ virtual void computeReprojError( const CvMat* m1, const CvMat* m2,\r
+ const CvMat* model, CvMat* error );\r
+};\r
+\r
+\r
+CvHomographyEstimator::CvHomographyEstimator(int _modelPoints)\r
+ : CvModelEstimator2(_modelPoints, cvSize(3,3), 1)\r
+{\r
+ assert( _modelPoints == 4 || _modelPoints == 5 );\r
+}\r
+\r
+int CvHomographyEstimator::runKernel( const CvMat* m1, const CvMat* m2, CvMat* H )\r
+{\r
+ int i, count = m1->rows*m1->cols;\r
+ const CvPoint2D64f* M = (const CvPoint2D64f*)m1->data.ptr;\r
+ const CvPoint2D64f* m = (const CvPoint2D64f*)m2->data.ptr;\r
+\r
+ double LtL[9][9], W[9][9], V[9][9];\r
+ CvMat _LtL = cvMat( 9, 9, CV_64F, LtL );\r
+ CvMat _W = cvMat( 9, 9, CV_64F, W );\r
+ CvMat _V = cvMat( 9, 9, CV_64F, V );\r
+ CvMat _H0 = cvMat( 3, 3, CV_64F, V[8] );\r
+ CvMat _Htemp = cvMat( 3, 3, CV_64F, V[7] );\r
+ CvPoint2D64f cM={0,0}, cm={0,0}, sM={0,0}, sm={0,0};\r
+\r
+ for( i = 0; i < count; i++ )\r
+ {\r
+ cm.x += m[i].x; cm.y += m[i].y;\r
+ cM.x += M[i].x; cM.y += M[i].y;\r
+ }\r
+\r
+ cm.x /= count; cm.y /= count;\r
+ cM.x /= count; cM.y /= count;\r
+\r
+ for( i = 0; i < count; i++ )\r
+ {\r
+ sm.x += fabs(m[i].x - cm.x);\r
+ sm.y += fabs(m[i].y - cm.y);\r
+ sM.x += fabs(M[i].x - cM.x);\r
+ sM.y += fabs(M[i].y - cM.y);\r
+ }\r
+\r
+ sm.x = count/sm.x; sm.y = count/sm.y;\r
+ sM.x = count/sM.x; sM.y = count/sM.y;\r
+\r
+ double invHnorm[9] = { 1./sm.x, 0, cm.x, 0, 1./sm.y, cm.y, 0, 0, 1 };\r
+ double Hnorm2[9] = { sM.x, 0, -cM.x*sM.x, 0, sM.y, -cM.y*sM.y, 0, 0, 1 };\r
+ CvMat _invHnorm = cvMat( 3, 3, CV_64FC1, invHnorm );\r
+ CvMat _Hnorm2 = cvMat( 3, 3, CV_64FC1, Hnorm2 );\r
+\r
+ cvZero( &_LtL );\r
+ for( i = 0; i < count; i++ )\r
+ {\r
+ double x = (m[i].x - cm.x)*sm.x, y = (m[i].y - cm.y)*sm.y;\r
+ double X = (M[i].x - cM.x)*sM.x, Y = (M[i].y - cM.y)*sM.y;\r
+ double Lx[] = { X, Y, 1, 0, 0, 0, -x*X, -x*Y, -x };\r
+ double Ly[] = { 0, 0, 0, X, Y, 1, -y*X, -y*Y, -y };\r
+ int j, k;\r
+ for( j = 0; j < 9; j++ )\r
+ for( k = j; k < 9; k++ )\r
+ LtL[j][k] += Lx[j]*Lx[k] + Ly[j]*Ly[k];\r
+ }\r
+ cvCompleteSymm( &_LtL );\r
+\r
+ cvSVD( &_LtL, &_W, 0, &_V, CV_SVD_MODIFY_A + CV_SVD_V_T );\r
+ cvMatMul( &_invHnorm, &_H0, &_Htemp );\r
+ cvMatMul( &_Htemp, &_Hnorm2, &_H0 );\r
+ cvConvertScale( &_H0, H, 1./_H0.data.db[8] );\r
+\r
+ return 1;\r
+}\r
+\r
+\r
+void CvHomographyEstimator::computeReprojError( const CvMat* m1, const CvMat* m2,\r
+ const CvMat* model, CvMat* _err )\r
+{\r
+ int i, count = m1->rows*m1->cols;\r
+ const CvPoint2D64f* M = (const CvPoint2D64f*)m1->data.ptr;\r
+ const CvPoint2D64f* m = (const CvPoint2D64f*)m2->data.ptr;\r
+ const double* H = model->data.db;\r
+ float* err = _err->data.fl;\r
+\r
+ for( i = 0; i < count; i++ )\r
+ {\r
+ double ww = 1./(H[6]*M[i].x + H[7]*M[i].y + 1.);\r
+ double dx = (H[0]*M[i].x + H[1]*M[i].y + H[2])*ww - m[i].x;\r
+ double dy = (H[3]*M[i].x + H[4]*M[i].y + H[5])*ww - m[i].y;\r
+ err[i] = (float)(dx*dx + dy*dy);\r
+ }\r
+}\r
+\r
+bool CvHomographyEstimator::refine( const CvMat* m1, const CvMat* m2, CvMat* model, int maxIters )\r
+{\r
+ CvLevMarq solver(8, 0, cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, maxIters, DBL_EPSILON));\r
+ int i, j, k, count = m1->rows*m1->cols;\r
+ const CvPoint2D64f* M = (const CvPoint2D64f*)m1->data.ptr;\r
+ const CvPoint2D64f* m = (const CvPoint2D64f*)m2->data.ptr;\r
+ CvMat modelPart = cvMat( solver.param->rows, solver.param->cols, model->type, model->data.ptr );\r
+ cvCopy( &modelPart, solver.param );\r
+\r
+ for(;;)\r
+ {\r
+ const CvMat* _param = 0;\r
+ CvMat *_JtJ = 0, *_JtErr = 0;\r
+ double* _errNorm = 0;\r
+\r
+ if( !solver.updateAlt( _param, _JtJ, _JtErr, _errNorm ))\r
+ break;\r
+\r
+ for( i = 0; i < count; i++ )\r
+ {\r
+ const double* h = _param->data.db;\r
+ double Mx = M[i].x, My = M[i].y;\r
+ double ww = 1./(h[6]*Mx + h[7]*My + 1.);\r
+ double _xi = (h[0]*Mx + h[1]*My + h[2])*ww;\r
+ double _yi = (h[3]*Mx + h[4]*My + h[5])*ww;\r
+ double err[] = { _xi - m[i].x, _yi - m[i].y };\r
+ if( _JtJ || _JtErr )\r
+ {\r
+ double J[][8] =\r
+ {\r
+ { Mx*ww, My*ww, ww, 0, 0, 0, -Mx*ww*_xi, -My*ww*_xi },\r
+ { 0, 0, 0, Mx*ww, My*ww, ww, -Mx*ww*_yi, -My*ww*_yi }\r
+ };\r
+\r
+ for( j = 0; j < 8; j++ )\r
+ {\r
+ for( k = j; k < 8; k++ )\r
+ _JtJ->data.db[j*8+k] += J[0][j]*J[0][k] + J[1][j]*J[1][k];\r
+ _JtErr->data.db[j] += J[0][j]*err[0] + J[1][j]*err[1];\r
+ }\r
+ }\r
+ if( _errNorm )\r
+ *_errNorm += err[0]*err[0] + err[1]*err[1];\r
+ }\r
+ }\r
+\r
+ cvCopy( solver.param, &modelPart );\r
+ return true;\r
+}\r
+\r
+\r
+CV_IMPL int\r
+cvFindHomography( const CvMat* objectPoints, const CvMat* imagePoints,\r
+ CvMat* __H, int method, double ransacReprojThreshold,\r
+ CvMat* mask )\r
+{\r
+ const double confidence = 0.99;\r
+ bool result = false;\r
+ CvMat *m = 0, *M = 0, *tempMask = 0;\r
+\r
+ CV_FUNCNAME( "cvFindHomography" );\r
+\r
+ __BEGIN__;\r
+\r
+ double H[9];\r
+ CvMat _H = cvMat( 3, 3, CV_64FC1, H );\r
+ int count;\r
+\r
+ CV_ASSERT( CV_IS_MAT(imagePoints) && CV_IS_MAT(objectPoints) );\r
+\r
+ count = MAX(imagePoints->cols, imagePoints->rows);\r
+ CV_ASSERT( count >= 4 );\r
+\r
+ m = cvCreateMat( 1, count, CV_64FC2 );\r
+ cvConvertPointsHomogeneous( imagePoints, m );\r
+\r
+ M = cvCreateMat( 1, count, CV_64FC2 );\r
+ cvConvertPointsHomogeneous( objectPoints, M );\r
+\r
+ if( mask )\r
+ {\r
+ CV_ASSERT( CV_IS_MASK_ARR(mask) && CV_IS_MAT_CONT(mask->type) &&\r
+ (mask->rows == 1 || mask->cols == 1) &&\r
+ mask->rows*mask->cols == count );\r
+ tempMask = mask;\r
+ }\r
+ else if( count > 4 )\r
+ tempMask = cvCreateMat( 1, count, CV_8U );\r
+ if( tempMask )\r
+ cvSet( tempMask, cvScalarAll(1.) );\r
+\r
+ {\r
+ CvHomographyEstimator estimator( MIN(count, 5) );\r
+ if( count == 4 )\r
+ method = 0;\r
+ if( method == CV_LMEDS )\r
+ result = estimator.runLMeDS( M, m, &_H, tempMask, confidence );\r
+ else if( method == CV_RANSAC )\r
+ result = estimator.runRANSAC( M, m, &_H, tempMask, ransacReprojThreshold, confidence );\r
+ else\r
+ result = estimator.runKernel( M, m, &_H ) > 0;\r
+\r
+ if( result && count > 4 )\r
+ {\r
+ icvCompressPoints( (CvPoint2D64f*)M->data.ptr, tempMask->data.ptr, 1, count );\r
+ count = icvCompressPoints( (CvPoint2D64f*)m->data.ptr, tempMask->data.ptr, 1, count );\r
+ M->cols = m->cols = count;\r
+ estimator.refine( M, m, &_H, 10 );\r
+ }\r
+ }\r
+\r
+ if( result )\r
+ cvConvert( &_H, __H );\r
+\r
+ __END__;\r
+\r
+ cvReleaseMat( &m );\r
+ cvReleaseMat( &M );\r
+ if( tempMask != mask )\r
+ cvReleaseMat( &tempMask );\r
+\r
+ return (int)result;\r
+}\r
+\r
+\r
+/* Evaluation of Fundamental Matrix from point correspondences.\r
+ The original code has been written by Valery Mosyagin */\r
+\r
+/* The algorithms (except for RANSAC) and the notation have been taken from\r
+ Zhengyou Zhang's research report\r
+ "Determining the Epipolar Geometry and its Uncertainty: A Review"\r
+ that can be found at http://www-sop.inria.fr/robotvis/personnel/zzhang/zzhang-eng.html */\r
+\r
+/************************************** 7-point algorithm *******************************/\r
+class CvFMEstimator : public CvModelEstimator2\r
+{\r
+public:\r
+ CvFMEstimator( int _modelPoints );\r
+\r
+ virtual int runKernel( const CvMat* m1, const CvMat* m2, CvMat* model );\r
+ virtual int run7Point( const CvMat* m1, const CvMat* m2, CvMat* model );\r
+ virtual int run8Point( const CvMat* m1, const CvMat* m2, CvMat* model );\r
+protected:\r
+ virtual void computeReprojError( const CvMat* m1, const CvMat* m2,\r
+ const CvMat* model, CvMat* error );\r
+};\r
+\r
+CvFMEstimator::CvFMEstimator( int _modelPoints )\r
+: CvModelEstimator2( _modelPoints, cvSize(3,3), _modelPoints == 7 ? 3 : 1 )\r
+{\r
+ assert( _modelPoints == 7 || _modelPoints == 8 );\r
+}\r
+\r
+\r
+int CvFMEstimator::runKernel( const CvMat* m1, const CvMat* m2, CvMat* model )\r
+{\r
+ return modelPoints == 7 ? run7Point( m1, m2, model ) : run8Point( m1, m2, model );\r
+}\r
+\r
+int CvFMEstimator::run7Point( const CvMat* _m1, const CvMat* _m2, CvMat* _fmatrix )\r
+{\r
+ double a[7*9], w[7], v[9*9], c[4], r[3];\r
+ double* f1, *f2;\r
+ double t0, t1, t2;\r
+ CvMat A = cvMat( 7, 9, CV_64F, a );\r
+ CvMat V = cvMat( 9, 9, CV_64F, v );\r
+ CvMat W = cvMat( 7, 1, CV_64F, w );\r
+ CvMat coeffs = cvMat( 1, 4, CV_64F, c );\r
+ CvMat roots = cvMat( 1, 3, CV_64F, r );\r
+ const CvPoint2D64f* m1 = (const CvPoint2D64f*)_m1->data.ptr;\r
+ const CvPoint2D64f* m2 = (const CvPoint2D64f*)_m2->data.ptr;\r
+ double* fmatrix = _fmatrix->data.db;\r
+ int i, k, n;\r
+\r
+ // form a linear system: i-th row of A(=a) represents\r
+ // the equation: (m2[i], 1)'*F*(m1[i], 1) = 0\r
+ for( i = 0; i < 7; i++ )\r
+ {\r
+ double x0 = m1[i].x, y0 = m1[i].y;\r
+ double x1 = m2[i].x, y1 = m2[i].y;\r
+\r
+ a[i*9+0] = x1*x0;\r
+ a[i*9+1] = x1*y0;\r
+ a[i*9+2] = x1;\r
+ a[i*9+3] = y1*x0;\r
+ a[i*9+4] = y1*y0;\r
+ a[i*9+5] = y1;\r
+ a[i*9+6] = x0;\r
+ a[i*9+7] = y0;\r
+ a[i*9+8] = 1;\r
+ }\r
+\r
+ // A*(f11 f12 ... f33)' = 0 is singular (7 equations for 9 variables), so\r
+ // the solution is linear subspace of dimensionality 2.\r
+ // => use the last two singular vectors as a basis of the space\r
+ // (according to SVD properties)\r
+ cvSVD( &A, &W, 0, &V, CV_SVD_MODIFY_A + CV_SVD_V_T );\r
+ f1 = v + 7*9;\r
+ f2 = v + 8*9;\r
+\r
+ // f1, f2 is a basis => lambda*f1 + mu*f2 is an arbitrary f. matrix.\r
+ // as it is determined up to a scale, normalize lambda & mu (lambda + mu = 1),\r
+ // so f ~ lambda*f1 + (1 - lambda)*f2.\r
+ // use the additional constraint det(f) = det(lambda*f1 + (1-lambda)*f2) to find lambda.\r
+ // it will be a cubic equation.\r
+ // find c - polynomial coefficients.\r
+ for( i = 0; i < 9; i++ )\r
+ f1[i] -= f2[i];\r
+\r
+ t0 = f2[4]*f2[8] - f2[5]*f2[7];\r
+ t1 = f2[3]*f2[8] - f2[5]*f2[6];\r
+ t2 = f2[3]*f2[7] - f2[4]*f2[6];\r
+\r
+ c[3] = f2[0]*t0 - f2[1]*t1 + f2[2]*t2;\r
+\r
+ c[2] = f1[0]*t0 - f1[1]*t1 + f1[2]*t2 -\r
+ f1[3]*(f2[1]*f2[8] - f2[2]*f2[7]) +\r
+ f1[4]*(f2[0]*f2[8] - f2[2]*f2[6]) -\r
+ f1[5]*(f2[0]*f2[7] - f2[1]*f2[6]) +\r
+ f1[6]*(f2[1]*f2[5] - f2[2]*f2[4]) -\r
+ f1[7]*(f2[0]*f2[5] - f2[2]*f2[3]) +\r
+ f1[8]*(f2[0]*f2[4] - f2[1]*f2[3]);\r
+\r
+ t0 = f1[4]*f1[8] - f1[5]*f1[7];\r
+ t1 = f1[3]*f1[8] - f1[5]*f1[6];\r
+ t2 = f1[3]*f1[7] - f1[4]*f1[6];\r
+\r
+ c[1] = f2[0]*t0 - f2[1]*t1 + f2[2]*t2 -\r
+ f2[3]*(f1[1]*f1[8] - f1[2]*f1[7]) +\r
+ f2[4]*(f1[0]*f1[8] - f1[2]*f1[6]) -\r
+ f2[5]*(f1[0]*f1[7] - f1[1]*f1[6]) +\r
+ f2[6]*(f1[1]*f1[5] - f1[2]*f1[4]) -\r
+ f2[7]*(f1[0]*f1[5] - f1[2]*f1[3]) +\r
+ f2[8]*(f1[0]*f1[4] - f1[1]*f1[3]);\r
+\r
+ c[0] = f1[0]*t0 - f1[1]*t1 + f1[2]*t2;\r
+\r
+ // solve the cubic equation; there can be 1 to 3 roots ...\r
+ n = cvSolveCubic( &coeffs, &roots );\r
+\r
+ if( n < 1 || n > 3 )\r
+ return n;\r
+\r
+ for( k = 0; k < n; k++, fmatrix += 9 )\r
+ {\r
+ // for each root form the fundamental matrix\r
+ double lambda = r[k], mu = 1.;\r
+ double s = f1[8]*r[k] + f2[8];\r
+\r
+ // normalize each matrix, so that F(3,3) (~fmatrix[8]) == 1\r
+ if( fabs(s) > DBL_EPSILON )\r
+ {\r
+ mu = 1./s;\r
+ lambda *= mu;\r
+ fmatrix[8] = 1.;\r
+ }\r
+ else\r
+ fmatrix[8] = 0.;\r
+\r
+ for( i = 0; i < 8; i++ )\r
+ fmatrix[i] = f1[i]*lambda + f2[i]*mu;\r
+ }\r
+\r
+ return n;\r
+}\r
+\r
+\r
+int CvFMEstimator::run8Point( const CvMat* _m1, const CvMat* _m2, CvMat* _fmatrix )\r
+{\r
+ double a[9*9], w[9], v[9*9];\r
+ CvMat W = cvMat( 1, 9, CV_64F, w );\r
+ CvMat V = cvMat( 9, 9, CV_64F, v );\r
+ CvMat A = cvMat( 9, 9, CV_64F, a );\r
+ CvMat U, F0, TF;\r
+\r
+ CvPoint2D64f m0c = {0,0}, m1c = {0,0};\r
+ double t, scale0 = 0, scale1 = 0;\r
+\r
+ const CvPoint2D64f* m1 = (const CvPoint2D64f*)_m1->data.ptr;\r
+ const CvPoint2D64f* m2 = (const CvPoint2D64f*)_m2->data.ptr;\r
+ double* fmatrix = _fmatrix->data.db;\r
+ int i, j, k, count = _m1->cols*_m1->rows;\r
+\r
+ // compute centers and average distances for each of the two point sets\r
+ for( i = 0; i < count; i++ )\r
+ {\r
+ double x = m1[i].x, y = m1[i].y;\r
+ m0c.x += x; m0c.y += y;\r
+\r
+ x = m2[i].x, y = m2[i].y;\r
+ m1c.x += x; m1c.y += y;\r
+ }\r
+\r
+ // calculate the normalizing transformations for each of the point sets:\r
+ // after the transformation each set will have the mass center at the coordinate origin\r
+ // and the average distance from the origin will be ~sqrt(2).\r
+ t = 1./count;\r
+ m0c.x *= t; m0c.y *= t;\r
+ m1c.x *= t; m1c.y *= t;\r
+\r
+ for( i = 0; i < count; i++ )\r
+ {\r
+ double x = m1[i].x - m0c.x, y = m1[i].y - m0c.y;\r
+ scale0 += sqrt(x*x + y*y);\r
+\r
+ x = fabs(m2[i].x - m1c.x), y = fabs(m2[i].y - m1c.y);\r
+ scale1 += sqrt(x*x + y*y);\r
+ }\r
+\r
+ scale0 *= t;\r
+ scale1 *= t;\r
+\r
+ if( scale0 < FLT_EPSILON || scale1 < FLT_EPSILON )\r
+ return 0;\r
+\r
+ scale0 = sqrt(2.)/scale0;\r
+ scale1 = sqrt(2.)/scale1;\r
+ \r
+ cvZero( &A );\r
+\r
+ // form a linear system Ax=0: for each selected pair of points m1 & m2,\r
+ // the row of A(=a) represents the coefficients of equation: (m2, 1)'*F*(m1, 1) = 0\r
+ // to save computation time, we compute (At*A) instead of A and then solve (At*A)x=0. \r
+ for( i = 0; i < count; i++ )\r
+ {\r
+ double x0 = (m1[i].x - m0c.x)*scale0;\r
+ double y0 = (m1[i].y - m0c.y)*scale0;\r
+ double x1 = (m2[i].x - m1c.x)*scale1;\r
+ double y1 = (m2[i].y - m1c.y)*scale1;\r
+ double r[9] = { x1*x0, x1*y0, x1, y1*x0, y1*y0, y1, x0, y0, 1 };\r
+ for( j = 0; j < 9; j++ )\r
+ for( k = 0; k < 9; k++ )\r
+ a[j*9+k] += r[j]*r[k];\r
+ }\r
+\r
+ cvSVD( &A, &W, 0, &V, CV_SVD_MODIFY_A + CV_SVD_V_T );\r
+\r
+ for( i = 0; i < 8; i++ )\r
+ {\r
+ if( fabs(w[i]) < DBL_EPSILON )\r
+ break;\r
+ }\r
+\r
+ if( i < 7 )\r
+ return 0;\r
+\r
+ F0 = cvMat( 3, 3, CV_64F, v + 9*8 ); // take the last column of v as a solution of Af = 0\r
+\r
+ // make F0 singular (of rank 2) by decomposing it with SVD,\r
+ // zeroing the last diagonal element of W and then composing the matrices back.\r
+\r
+ // use v as a temporary storage for different 3x3 matrices\r
+ W = U = V = TF = F0;\r
+ W.data.db = v;\r
+ U.data.db = v + 9;\r
+ V.data.db = v + 18;\r
+ TF.data.db = v + 27;\r
+\r
+ cvSVD( &F0, &W, &U, &V, CV_SVD_MODIFY_A + CV_SVD_U_T + CV_SVD_V_T );\r
+ W.data.db[8] = 0.;\r
+\r
+ // F0 <- U*diag([W(1), W(2), 0])*V'\r
+ cvGEMM( &U, &W, 1., 0, 0., &TF, CV_GEMM_A_T );\r
+ cvGEMM( &TF, &V, 1., 0, 0., &F0, 0/*CV_GEMM_B_T*/ );\r
+\r
+ // apply the transformation that is inverse\r
+ // to what we used to normalize the point coordinates\r
+ {\r
+ double tt0[] = { scale0, 0, -scale0*m0c.x, 0, scale0, -scale0*m0c.y, 0, 0, 1 };\r
+ double tt1[] = { scale1, 0, -scale1*m1c.x, 0, scale1, -scale1*m1c.y, 0, 0, 1 };\r
+ CvMat T0, T1;\r
+ T0 = T1 = F0;\r
+ T0.data.db = tt0;\r
+ T1.data.db = tt1;\r
+\r
+ // F0 <- T1'*F0*T0\r
+ cvGEMM( &T1, &F0, 1., 0, 0., &TF, CV_GEMM_A_T );\r
+ F0.data.db = fmatrix;\r
+ cvGEMM( &TF, &T0, 1., 0, 0., &F0, 0 );\r
+\r
+ // make F(3,3) = 1\r
+ if( fabs(F0.data.db[8]) > FLT_EPSILON )\r
+ cvScale( &F0, &F0, 1./F0.data.db[8] );\r
+ }\r
+\r
+ return 1;\r
+}\r
+\r
+\r
+void CvFMEstimator::computeReprojError( const CvMat* _m1, const CvMat* _m2,\r
+ const CvMat* model, CvMat* _err )\r
+{\r
+ int i, count = _m1->rows*_m1->cols;\r
+ const CvPoint2D64f* m1 = (const CvPoint2D64f*)_m1->data.ptr;\r
+ const CvPoint2D64f* m2 = (const CvPoint2D64f*)_m2->data.ptr;\r
+ const double* F = model->data.db;\r
+ float* err = _err->data.fl;\r
+ \r
+ for( i = 0; i < count; i++ )\r
+ {\r
+ double a, b, c, d1, d2, s1, s2;\r
+\r
+ a = F[0]*m1[i].x + F[1]*m1[i].y + F[2];\r
+ b = F[3]*m1[i].x + F[4]*m1[i].y + F[5];\r
+ c = F[6]*m1[i].x + F[7]*m1[i].y + F[8];\r
+\r
+ s2 = 1./(a*a + b*b);\r
+ d2 = m2[i].x*a + m2[i].y*b + c;\r
+\r
+ a = F[0]*m2[i].x + F[3]*m2[i].y + F[6];\r
+ b = F[1]*m2[i].x + F[4]*m2[i].y + F[7];\r
+ c = F[2]*m2[i].x + F[5]*m2[i].y + F[8];\r
+\r
+ s1 = 1./(a*a + b*b);\r
+ d1 = m1[i].x*a + m1[i].y*b + c;\r
+\r
+ err[i] = (float)std::max(d1*d1*s1, d2*d2*s2);\r
+ }\r
+}\r
+\r
+\r
+CV_IMPL int\r
+cvFindFundamentalMat( const CvMat* points1, const CvMat* points2,\r
+ CvMat* fmatrix, int method,\r
+ double param1, double param2, CvMat* mask )\r
+{\r
+ int result = 0;\r
+ CvMat *m1 = 0, *m2 = 0, *tempMask = 0;\r
+\r
+ CV_FUNCNAME( "cvFindFundamentalMat" );\r
+\r
+ __BEGIN__;\r
+\r
+ double F[3*9];\r
+ CvMat _F3x3 = cvMat( 3, 3, CV_64FC1, F ), _F9x3 = cvMat( 9, 3, CV_64FC1, F );\r
+ int count;\r
+\r
+ CV_ASSERT( CV_IS_MAT(points1) && CV_IS_MAT(points2) && CV_ARE_SIZES_EQ(points1, points2) );\r
+ CV_ASSERT( CV_IS_MAT(fmatrix) && fmatrix->cols == 3 &&\r
+ (fmatrix->rows == 3 || (fmatrix->rows == 9 && method == CV_FM_7POINT)) );\r
+\r
+ count = MAX(points1->cols, points1->rows);\r
+ if( count < 7 )\r
+ EXIT;\r
+\r
+ m1 = cvCreateMat( 1, count, CV_64FC2 );\r
+ cvConvertPointsHomogeneous( points1, m1 );\r
+\r
+ m2 = cvCreateMat( 1, count, CV_64FC2 );\r
+ cvConvertPointsHomogeneous( points2, m2 );\r
+\r
+ if( mask )\r
+ {\r
+ CV_ASSERT( CV_IS_MASK_ARR(mask) && CV_IS_MAT_CONT(mask->type) &&\r
+ (mask->rows == 1 || mask->cols == 1) &&\r
+ mask->rows*mask->cols == count );\r
+ tempMask = cvCreateMatHeader(1, count, CV_8U);\r
+ cvSetData(tempMask, mask->data.ptr, 0);\r
+ }\r
+ else if( count > 8 )\r
+ tempMask = cvCreateMat( 1, count, CV_8U );\r
+ if( tempMask )\r
+ cvSet( tempMask, cvScalarAll(1.) );\r
+\r
+ {\r
+ CvFMEstimator estimator( MIN(count, (method & 3) == CV_FM_7POINT ? 7 : 8) );\r
+ if( count == 7 )\r
+ result = estimator.run7Point(m1, m2, &_F9x3);\r
+ else if( count == 8 || method == CV_FM_8POINT )\r
+ result = estimator.run8Point(m1, m2, &_F3x3);\r
+ else if( count > 8 )\r
+ {\r
+ if( param1 <= 0 )\r
+ param1 = 3;\r
+ if( param2 < DBL_EPSILON || param2 > 1 - DBL_EPSILON )\r
+ param2 = 0.99;\r
+ \r
+ if( (method & ~3) == CV_RANSAC )\r
+ result = estimator.runRANSAC(m1, m2, &_F3x3, tempMask, param1, param2 );\r
+ else\r
+ result = estimator.runLMeDS(m1, m2, &_F3x3, tempMask, param2 );\r
+ if( result <= 0 )\r
+ EXIT;\r
+ /*icvCompressPoints( (CvPoint2D64f*)m1->data.ptr, tempMask->data.ptr, 1, count );\r
+ count = icvCompressPoints( (CvPoint2D64f*)m2->data.ptr, tempMask->data.ptr, 1, count );\r
+ assert( count >= 8 );\r
+ m1->cols = m2->cols = count;\r
+ estimator.run8Point(m1, m2, &_F3x3);*/\r
+ }\r
+ }\r
+\r
+ if( result )\r
+ cvConvert( fmatrix->rows == 3 ? &_F3x3 : &_F9x3, fmatrix );\r
+\r
+ __END__;\r
+\r
+ cvReleaseMat( &m1 );\r
+ cvReleaseMat( &m2 );\r
+ if( tempMask != mask )\r
+ cvReleaseMat( &tempMask );\r
+\r
+ return result;\r
+}\r
+\r
+\r
+CV_IMPL void\r
+cvComputeCorrespondEpilines( const CvMat* points, int pointImageID,\r
+ const CvMat* fmatrix, CvMat* lines )\r
+{\r
+ CV_FUNCNAME( "cvComputeCorrespondEpilines" );\r
+\r
+ __BEGIN__;\r
+\r
+ int abc_stride, abc_plane_stride, abc_elem_size;\r
+ int plane_stride, stride, elem_size;\r
+ int i, dims, count, depth, cn, abc_dims, abc_count, abc_depth, abc_cn;\r
+ uchar *ap, *bp, *cp;\r
+ const uchar *xp, *yp, *zp;\r
+ double f[9];\r
+ CvMat F = cvMat( 3, 3, CV_64F, f );\r
+\r
+ if( !CV_IS_MAT(points) )\r
+ CV_ERROR( !points ? CV_StsNullPtr : CV_StsBadArg, "points parameter is not a valid matrix" );\r
+\r
+ depth = CV_MAT_DEPTH(points->type);\r
+ cn = CV_MAT_CN(points->type);\r
+ if( (depth != CV_32F && depth != CV_64F) || (cn != 1 && cn != 2 && cn != 3) )\r
+ CV_ERROR( CV_StsUnsupportedFormat, "The format of point matrix is unsupported" );\r
+\r
+ if( points->rows > points->cols )\r
+ {\r
+ dims = cn*points->cols;\r
+ count = points->rows;\r
+ }\r
+ else\r
+ {\r
+ if( (points->rows > 1 && cn > 1) || (points->rows == 1 && cn == 1) )\r
+ CV_ERROR( CV_StsBadSize, "The point matrix does not have a proper layout (2xn, 3xn, nx2 or nx3)" );\r
+ dims = cn * points->rows;\r
+ count = points->cols;\r
+ }\r
+\r
+ if( dims != 2 && dims != 3 )\r
+ CV_ERROR( CV_StsOutOfRange, "The dimensionality of points must be 2 or 3" );\r
+\r
+ if( !CV_IS_MAT(fmatrix) )\r
+ CV_ERROR( !fmatrix ? CV_StsNullPtr : CV_StsBadArg, "fmatrix is not a valid matrix" );\r
+\r
+ if( CV_MAT_TYPE(fmatrix->type) != CV_32FC1 && CV_MAT_TYPE(fmatrix->type) != CV_64FC1 )\r
+ CV_ERROR( CV_StsUnsupportedFormat, "fundamental matrix must have 32fC1 or 64fC1 type" );\r
+\r
+ if( fmatrix->cols != 3 || fmatrix->rows != 3 )\r
+ CV_ERROR( CV_StsBadSize, "fundamental matrix must be 3x3" );\r
+\r
+ if( !CV_IS_MAT(lines) )\r
+ CV_ERROR( !lines ? CV_StsNullPtr : CV_StsBadArg, "lines parameter is not a valid matrix" );\r
+\r
+ abc_depth = CV_MAT_DEPTH(lines->type);\r
+ abc_cn = CV_MAT_CN(lines->type);\r
+ if( (abc_depth != CV_32F && abc_depth != CV_64F) || (abc_cn != 1 && abc_cn != 3) )\r
+ CV_ERROR( CV_StsUnsupportedFormat, "The format of the matrix of lines is unsupported" );\r
+\r
+ if( lines->rows > lines->cols )\r
+ {\r
+ abc_dims = abc_cn*lines->cols;\r
+ abc_count = lines->rows;\r
+ }\r
+ else\r
+ {\r
+ if( (lines->rows > 1 && abc_cn > 1) || (lines->rows == 1 && abc_cn == 1) )\r
+ CV_ERROR( CV_StsBadSize, "The lines matrix does not have a proper layout (3xn or nx3)" );\r
+ abc_dims = abc_cn * lines->rows;\r
+ abc_count = lines->cols;\r
+ }\r
+\r
+ if( abc_dims != 3 )\r
+ CV_ERROR( CV_StsOutOfRange, "The lines matrix does not have a proper layout (3xn or nx3)" );\r
+\r
+ if( abc_count != count )\r
+ CV_ERROR( CV_StsUnmatchedSizes, "The numbers of points and lines are different" );\r
+\r
+ elem_size = CV_ELEM_SIZE(depth);\r
+ abc_elem_size = CV_ELEM_SIZE(abc_depth);\r
+\r
+ if( points->rows == dims )\r
+ {\r
+ plane_stride = points->step;\r
+ stride = elem_size;\r
+ }\r
+ else\r
+ {\r
+ plane_stride = elem_size;\r
+ stride = points->rows == 1 ? dims*elem_size : points->step;\r
+ }\r
+\r
+ if( lines->rows == 3 )\r
+ {\r
+ abc_plane_stride = lines->step;\r
+ abc_stride = abc_elem_size;\r
+ }\r
+ else\r
+ {\r
+ abc_plane_stride = abc_elem_size;\r
+ abc_stride = lines->rows == 1 ? 3*abc_elem_size : lines->step;\r
+ }\r
+\r
+ CV_CALL( cvConvert( fmatrix, &F ));\r
+ if( pointImageID == 2 )\r
+ cvTranspose( &F, &F );\r
+\r
+ xp = points->data.ptr;\r
+ yp = xp + plane_stride;\r
+ zp = dims == 3 ? yp + plane_stride : 0;\r
+\r
+ ap = lines->data.ptr;\r
+ bp = ap + abc_plane_stride;\r
+ cp = bp + abc_plane_stride;\r
+\r
+ for( i = 0; i < count; i++ )\r
+ {\r
+ double x, y, z = 1.;\r
+ double a, b, c, nu;\r
+\r
+ if( depth == CV_32F )\r
+ {\r
+ x = *(float*)xp; y = *(float*)yp;\r
+ if( zp )\r
+ z = *(float*)zp, zp += stride;\r
+ }\r
+ else\r
+ {\r
+ x = *(double*)xp; y = *(double*)yp;\r
+ if( zp )\r
+ z = *(double*)zp, zp += stride;\r
+ }\r
+\r
+ xp += stride; yp += stride;\r
+\r
+ a = f[0]*x + f[1]*y + f[2]*z;\r
+ b = f[3]*x + f[4]*y + f[5]*z;\r
+ c = f[6]*x + f[7]*y + f[8]*z;\r
+ nu = a*a + b*b;\r
+ nu = nu ? 1./sqrt(nu) : 1.;\r
+ a *= nu; b *= nu; c *= nu;\r
+\r
+ if( abc_depth == CV_32F )\r
+ {\r
+ *(float*)ap = (float)a;\r
+ *(float*)bp = (float)b;\r
+ *(float*)cp = (float)c;\r
+ }\r
+ else\r
+ {\r
+ *(double*)ap = a;\r
+ *(double*)bp = b;\r
+ *(double*)cp = c;\r
+ }\r
+\r
+ ap += abc_stride;\r
+ bp += abc_stride;\r
+ cp += abc_stride;\r
+ }\r
+\r
+ __END__;\r
+}\r
+\r
+\r
+CV_IMPL void\r
+cvConvertPointsHomogeneous( const CvMat* src, CvMat* dst )\r
+{\r
+ CvMat* temp = 0;\r
+ CvMat* denom = 0;\r
+\r
+ CV_FUNCNAME( "cvConvertPointsHomogeneous" );\r
+\r
+ __BEGIN__;\r
+\r
+ int i, s_count, s_dims, d_count, d_dims;\r
+ CvMat _src, _dst, _ones;\r
+ CvMat* ones = 0;\r
+\r
+ if( !CV_IS_MAT(src) )\r
+ CV_ERROR( !src ? CV_StsNullPtr : CV_StsBadArg,\r
+ "The input parameter is not a valid matrix" );\r
+\r
+ if( !CV_IS_MAT(dst) )\r
+ CV_ERROR( !dst ? CV_StsNullPtr : CV_StsBadArg,\r
+ "The output parameter is not a valid matrix" );\r
+\r
+ if( src == dst || src->data.ptr == dst->data.ptr )\r
+ {\r
+ if( src != dst && (!CV_ARE_TYPES_EQ(src, dst) || !CV_ARE_SIZES_EQ(src,dst)) )\r
+ CV_ERROR( CV_StsBadArg, "Invalid inplace operation" );\r
+ EXIT;\r
+ }\r
+\r
+ if( src->rows > src->cols )\r
+ {\r
+ if( !((src->cols > 1) ^ (CV_MAT_CN(src->type) > 1)) )\r
+ CV_ERROR( CV_StsBadSize, "Either the number of channels or columns or rows must be =1" );\r
+\r
+ s_dims = CV_MAT_CN(src->type)*src->cols;\r
+ s_count = src->rows;\r
+ }\r
+ else\r
+ {\r
+ if( !((src->rows > 1) ^ (CV_MAT_CN(src->type) > 1)) )\r
+ CV_ERROR( CV_StsBadSize, "Either the number of channels or columns or rows must be =1" );\r
+\r
+ s_dims = CV_MAT_CN(src->type)*src->rows;\r
+ s_count = src->cols;\r
+ }\r
+\r
+ if( src->rows == 1 || src->cols == 1 )\r
+ src = cvReshape( src, &_src, 1, s_count );\r
+\r
+ if( dst->rows > dst->cols )\r
+ {\r
+ if( !((dst->cols > 1) ^ (CV_MAT_CN(dst->type) > 1)) )\r
+ CV_ERROR( CV_StsBadSize,\r
+ "Either the number of channels or columns or rows in the input matrix must be =1" );\r
+\r
+ d_dims = CV_MAT_CN(dst->type)*dst->cols;\r
+ d_count = dst->rows;\r
+ }\r
+ else\r
+ {\r
+ if( !((dst->rows > 1) ^ (CV_MAT_CN(dst->type) > 1)) )\r
+ CV_ERROR( CV_StsBadSize,\r
+ "Either the number of channels or columns or rows in the output matrix must be =1" );\r
+\r
+ d_dims = CV_MAT_CN(dst->type)*dst->rows;\r
+ d_count = dst->cols;\r
+ }\r
+\r
+ if( dst->rows == 1 || dst->cols == 1 )\r
+ dst = cvReshape( dst, &_dst, 1, d_count );\r
+\r
+ if( s_count != d_count )\r
+ CV_ERROR( CV_StsUnmatchedSizes, "Both matrices must have the same number of points" );\r
+\r
+ if( CV_MAT_DEPTH(src->type) < CV_32F || CV_MAT_DEPTH(dst->type) < CV_32F )\r
+ CV_ERROR( CV_StsUnsupportedFormat,\r
+ "Both matrices must be floating-point (single or double precision)" );\r
+\r
+ if( s_dims < 2 || s_dims > 4 || d_dims < 2 || d_dims > 4 )\r
+ CV_ERROR( CV_StsOutOfRange,\r
+ "Both input and output point dimensionality must be 2, 3 or 4" );\r
+\r
+ if( s_dims < d_dims - 1 || s_dims > d_dims + 1 )\r
+ CV_ERROR( CV_StsUnmatchedSizes,\r
+ "The dimensionalities of input and output point sets differ too much" );\r
+\r
+ if( s_dims == d_dims - 1 )\r
+ {\r
+ if( d_count == dst->rows )\r
+ {\r
+ ones = cvGetSubRect( dst, &_ones, cvRect( s_dims, 0, 1, d_count ));\r
+ dst = cvGetSubRect( dst, &_dst, cvRect( 0, 0, s_dims, d_count ));\r
+ }\r
+ else\r
+ {\r
+ ones = cvGetSubRect( dst, &_ones, cvRect( 0, s_dims, d_count, 1 ));\r
+ dst = cvGetSubRect( dst, &_dst, cvRect( 0, 0, d_count, s_dims ));\r
+ }\r
+ }\r
+\r
+ if( s_dims <= d_dims )\r
+ {\r
+ if( src->rows == dst->rows && src->cols == dst->cols )\r
+ {\r
+ if( CV_ARE_TYPES_EQ( src, dst ) )\r
+ cvCopy( src, dst );\r
+ else\r
+ cvConvert( src, dst );\r
+ }\r
+ else\r
+ {\r
+ if( !CV_ARE_TYPES_EQ( src, dst ))\r
+ {\r
+ CV_CALL( temp = cvCreateMat( src->rows, src->cols, dst->type ));\r
+ cvConvert( src, temp );\r
+ src = temp;\r
+ }\r
+ cvTranspose( src, dst );\r
+ }\r
+\r
+ if( ones )\r
+ cvSet( ones, cvRealScalar(1.) );\r
+ }\r
+ else\r
+ {\r
+ int s_plane_stride, s_stride, d_plane_stride, d_stride, elem_size;\r
+\r
+ if( !CV_ARE_TYPES_EQ( src, dst ))\r
+ {\r
+ CV_CALL( temp = cvCreateMat( src->rows, src->cols, dst->type ));\r
+ cvConvert( src, temp );\r
+ src = temp;\r
+ }\r
+\r
+ elem_size = CV_ELEM_SIZE(src->type);\r
+\r
+ if( s_count == src->cols )\r
+ s_plane_stride = src->step / elem_size, s_stride = 1;\r
+ else\r
+ s_stride = src->step / elem_size, s_plane_stride = 1;\r
+\r
+ if( d_count == dst->cols )\r
+ d_plane_stride = dst->step / elem_size, d_stride = 1;\r
+ else\r
+ d_stride = dst->step / elem_size, d_plane_stride = 1;\r
+\r
+ CV_CALL( denom = cvCreateMat( 1, d_count, dst->type ));\r
+\r
+ if( CV_MAT_DEPTH(dst->type) == CV_32F )\r
+ {\r
+ const float* xs = src->data.fl;\r
+ const float* ys = xs + s_plane_stride;\r
+ const float* zs = 0;\r
+ const float* ws = xs + (s_dims - 1)*s_plane_stride;\r
+\r
+ float* iw = denom->data.fl;\r
+\r
+ float* xd = dst->data.fl;\r
+ float* yd = xd + d_plane_stride;\r
+ float* zd = 0;\r
+\r
+ if( d_dims == 3 )\r
+ {\r
+ zs = ys + s_plane_stride;\r
+ zd = yd + d_plane_stride;\r
+ }\r
+\r
+ for( i = 0; i < d_count; i++, ws += s_stride )\r
+ {\r
+ float t = *ws;\r
+ iw[i] = fabs((double)t) > FLT_EPSILON ? t : 1.f;\r
+ }\r
+\r
+ cvDiv( 0, denom, denom );\r
+\r
+ if( d_dims == 3 )\r
+ for( i = 0; i < d_count; i++ )\r
+ {\r
+ float w = iw[i];\r
+ float x = *xs * w, y = *ys * w, z = *zs * w;\r
+ xs += s_stride; ys += s_stride; zs += s_stride;\r
+ *xd = x; *yd = y; *zd = z;\r
+ xd += d_stride; yd += d_stride; zd += d_stride;\r
+ }\r
+ else\r
+ for( i = 0; i < d_count; i++ )\r
+ {\r
+ float w = iw[i];\r
+ float x = *xs * w, y = *ys * w;\r
+ xs += s_stride; ys += s_stride;\r
+ *xd = x; *yd = y;\r
+ xd += d_stride; yd += d_stride;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ const double* xs = src->data.db;\r
+ const double* ys = xs + s_plane_stride;\r
+ const double* zs = 0;\r
+ const double* ws = xs + (s_dims - 1)*s_plane_stride;\r
+\r
+ double* iw = denom->data.db;\r
+\r
+ double* xd = dst->data.db;\r
+ double* yd = xd + d_plane_stride;\r
+ double* zd = 0;\r
+\r
+ if( d_dims == 3 )\r
+ {\r
+ zs = ys + s_plane_stride;\r
+ zd = yd + d_plane_stride;\r
+ }\r
+\r
+ for( i = 0; i < d_count; i++, ws += s_stride )\r
+ {\r
+ double t = *ws;\r
+ iw[i] = fabs(t) > DBL_EPSILON ? t : 1.;\r
+ }\r
+\r
+ cvDiv( 0, denom, denom );\r
+\r
+ if( d_dims == 3 )\r
+ for( i = 0; i < d_count; i++ )\r
+ {\r
+ double w = iw[i];\r
+ double x = *xs * w, y = *ys * w, z = *zs * w;\r
+ xs += s_stride; ys += s_stride; zs += s_stride;\r
+ *xd = x; *yd = y; *zd = z;\r
+ xd += d_stride; yd += d_stride; zd += d_stride;\r
+ }\r
+ else\r
+ for( i = 0; i < d_count; i++ )\r
+ {\r
+ double w = iw[i];\r
+ double x = *xs * w, y = *ys * w;\r
+ xs += s_stride; ys += s_stride;\r
+ *xd = x; *yd = y;\r
+ xd += d_stride; yd += d_stride;\r
+ }\r
+ }\r
+ }\r
+\r
+ __END__;\r
+\r
+ cvReleaseMat( &denom );\r
+ cvReleaseMat( &temp );\r
+}\r
+\r
+namespace cv\r
+{\r
+\r
+static Mat _findHomography( const Vector<Point2f>& srcPoints,\r
+ const Vector<Point2f>& dstPoints,\r
+ int method, double ransacReprojThreshold,\r
+ Vector<bool>* mask )\r
+{\r
+ Mat H(3, 3, CV_64F);\r
+ CvMat _pt1 = srcPoints, _pt2 = dstPoints;\r
+ CvMat _H = H, _mask, *pmask = 0;\r
+ if( mask )\r
+ {\r
+ mask->resize(srcPoints.size());\r
+ pmask = &(_mask = cvMat(1, mask->size(), CV_8U, (void*)&(*mask)[0]));\r
+ }\r
+ bool ok = cvFindHomography( &_pt1, &_pt2, &_H, method, ransacReprojThreshold, pmask ) > 0;\r
+ if( !ok )\r
+ H = Scalar(0);\r
+ return H;\r
+}\r
+\r
+Mat findHomography( const Vector<Point2f>& srcPoints,\r
+ const Vector<Point2f>& dstPoints,\r
+ Vector<bool>& mask, int method,\r
+ double ransacReprojThreshold )\r
+{\r
+ return _findHomography(srcPoints, dstPoints, method, ransacReprojThreshold, &mask);\r
+}\r
+\r
+Mat findHomography( const Vector<Point2f>& srcPoints,\r
+ const Vector<Point2f>& dstPoints,\r
+ int method, double ransacReprojThreshold )\r
+{\r
+ return _findHomography(srcPoints, dstPoints, method, ransacReprojThreshold, 0);\r
+}\r
+\r
+static Mat _findFundamentalMat( const Vector<Point2f>& points1,\r
+ const Vector<Point2f>& points2,\r
+ int method, double param1, double param2,\r
+ Vector<bool>* mask )\r
+{\r
+ Mat F(3, 3, CV_64F);\r
+ CvMat _pt1 = points1, _pt2 = points2;\r
+ CvMat _F = F, _mask, *pmask = 0;\r
+ if( mask )\r
+ {\r
+ mask->resize(points1.size());\r
+ pmask = &(_mask = cvMat(1, mask->size(), CV_8U, (void*)&(*mask)[0]));\r
+ }\r
+ int n = cvFindFundamentalMat( &_pt1, &_pt2, &_F, method, param1, param2, pmask );\r
+ if( n <= 0 )\r
+ F = Scalar(0);\r
+ return F;\r
+}\r
+\r
+Mat findFundamentalMat( const Vector<Point2f>& points1,\r
+ const Vector<Point2f>& points2,\r
+ Vector<bool>& mask,\r
+ int method, double param1, double param2 )\r
+{\r
+ return _findFundamentalMat( points1, points2, method, param1, param2, &mask );\r
+}\r
+\r
+Mat findFundamentalMat( const Vector<Point2f>& points1,\r
+ const Vector<Point2f>& points2,\r
+ int method, double param1, double param2 )\r
+{\r
+ return _findFundamentalMat( points1, points2, method, param1, param2, 0 );\r
+}\r
+\r
+void computeCorrespondEpilines( const Vector<Point2f>& points,\r
+ int whichImage, const Mat& F,\r
+ Vector<Vec3f>& lines )\r
+{\r
+ lines.resize(points.size());\r
+ CvMat _points = points, _lines = lines, _F = F;\r
+ cvComputeCorrespondEpilines(&_points, whichImage, &_F, &_lines);\r
+}\r
+\r
+void convertPointsHomogeneous( const Vector<Point2f>& src,\r
+ Vector<Point3f>& dst )\r
+{\r
+ dst.resize(src.size());\r
+ CvMat _src = src, _dst = dst;\r
+ cvConvertPointsHomogeneous(&_src, &_dst);\r
+}\r
+\r
+void convertPointsHomogeneous( const Vector<Point3f>& src,\r
+ Vector<Point2f>& dst )\r
+{\r
+ dst.resize(src.size());\r
+ CvMat _src = src, _dst = dst;\r
+ cvConvertPointsHomogeneous(&_src, &_dst);\r
+}\r
+\r
+}\r
+\r
+/* End of file. */\r
cvReleaseMat(&tmpMatrixM);
}
+namespace cv
+{
+\r
+void RQDecomp3x3( const Mat& M, Mat& R, Mat& Q )\r
+{\r
+ R.create(3, 3, M.type());\r
+ Q.create(3, 3, M.type());\r
+\r
+ CvMat _M = M, _R = R, _Q = Q;\r
+ cvRQDecomp3x3(&_M, &_R, &_Q, 0, 0, 0, 0);\r
+}\r
+\r
+Vec3d RQDecomp3x3( const Mat& M, Mat& R, Mat& Q,\r
+ Mat& Qx, Mat& Qy, Mat& Qz )\r
+{\r
+ R.create(3, 3, M.type());\r
+ Q.create(3, 3, M.type());\r
+ Vec3d eulerAngles;\r
+\r
+ CvMat _M = M, _R = R, _Q = Q, _Qx = Qx, _Qy = Qy, _Qz = Qz;\r
+ cvRQDecomp3x3(&_M, &_R, &_Q, &_Qx, &_Qy, &_Qz, (CvPoint3D64f*)&eulerAngles[0]);\r
+ return eulerAngles;\r
+}\r
+
+void decomposeProjectionMatrix( const Mat& projMatrix, Mat& cameraMatrix,\r
+ Mat& rotMatrix, Mat& transVect )\r
+{\r
+ int type = projMatrix.type();\r
+ cameraMatrix.create(3, 3, type);\r
+ rotMatrix.create(3, 3, type);\r
+ transVect.create(3, 3, type);\r
+ CvMat _projMatrix = projMatrix, _cameraMatrix = cameraMatrix;\r
+ CvMat _rotMatrix = rotMatrix, _transVect = transVect;\r
+ cvDecomposeProjectionMatrix(&_projMatrix, &_cameraMatrix, &_rotMatrix,\r
+ &_transVect, 0, 0, 0, 0);\r
+}\r
+\r
+void decomposeProjectionMatrix( const Mat& projMatrix, Mat& cameraMatrix,\r
+ Mat& rotMatrix, Mat& transVect,\r
+ Mat& rotMatrixX, Mat& rotMatrixY,\r
+ Mat& rotMatrixZ, Vec3d& eulerAngles )\r
+{\r
+ int type = projMatrix.type();\r
+ cameraMatrix.create(3, 3, type);\r
+ rotMatrix.create(3, 3, type);\r
+ transVect.create(3, 3, type);\r
+ rotMatrixX.create(3, 3, type);\r
+ rotMatrixY.create(3, 3, type);\r
+ rotMatrixZ.create(3, 3, type);\r
+ CvMat _projMatrix = projMatrix, _cameraMatrix = cameraMatrix;\r
+ CvMat _rotMatrix = rotMatrix, _transVect = transVect;\r
+ CvMat _rotMatrixX = rotMatrixX, _rotMatrixY = rotMatrixY;\r
+ CvMat _rotMatrixZ = rotMatrixZ;\r
+ cvDecomposeProjectionMatrix(&_projMatrix, &_cameraMatrix, &_rotMatrix,\r
+ &_transVect, &_rotMatrixX, &_rotMatrixY,\r
+ &_rotMatrixZ, (CvPoint3D64f*)&eulerAngles[0]);\r
+}\r
+
+}
+
/* End of file. */
CV_IMPL int
-cvRunHaarClassifierCascade( CvHaarClassifierCascade* _cascade,
+cvRunHaarClassifierCascade( const CvHaarClassifierCascade* _cascade,
CvPoint pt, int start_stage )
{
int result = -1;
icvReadHaarClassifier, icvWriteHaarClassifier,
icvCloneHaarClassifier );
+namespace cv
+{
+
+HaarClassifierCascade::HaarClassifierCascade() {}\r
+HaarClassifierCascade::HaarClassifierCascade(const String& filename)\r
+{ load(filename); }\r
+ \r
+bool HaarClassifierCascade::load(const String& filename)\r
+{\r
+ cascade = Ptr<CvHaarClassifierCascade>((CvHaarClassifierCascade*)cvLoad(filename.c_str(), 0, 0, 0));\r
+ return cascade.obj != 0;\r
+}\r
+\r
+void HaarClassifierCascade::detectMultiScale( const Mat& image,\r
+ Vector<Rect>& objects, double scaleFactor,\r
+ int minNeighbors, int flags,\r
+ Size minSize )\r
+{\r
+ MemStorage storage(cvCreateMemStorage(0));\r
+ CvMat _image = image;\r
+ CvSeq* _objects = cvHaarDetectObjects( &_image, cascade, storage, scaleFactor,\r
+ minNeighbors, flags, minSize );\r
+ Seq<Rect>(_objects).copyTo(objects);\r
+}\r
+\r
+int HaarClassifierCascade::runAt(Point pt, int startStage, int) const\r
+{\r
+ return cvRunHaarClassifierCascade(cascade, pt, startStage);\r
+}\r
+\r
+void HaarClassifierCascade::setImages( const Mat& sum, const Mat& sqsum,\r
+ const Mat& tilted, double scale )\r
+{\r
+ CvMat _sum = sum, _sqsum = sqsum, _tilted = tilted;\r
+ cvSetImagesForHaarClassifierCascade( cascade, &_sum, &_sqsum, &_tilted, scale );\r
+}\r
+
+}
+
/* End of file. */
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// Intel License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of Intel Corporation may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#include "_cv.h"
-#include "_cvlist.h"
-
-#define halfPi ((float)(CV_PI*0.5))
-#define Pi ((float)CV_PI)
-#define a0 0 /*-4.172325e-7f*/ /*(-(float)0x7)/((float)0x1000000); */
-#define a1 1.000025f /*((float)0x1922253)/((float)0x1000000)*2/Pi; */
-#define a2 -2.652905e-4f /*(-(float)0x2ae6)/((float)0x1000000)*4/(Pi*Pi); */
-#define a3 -0.165624f /*(-(float)0xa45511)/((float)0x1000000)*8/(Pi*Pi*Pi); */
-#define a4 -1.964532e-3f /*(-(float)0x30fd3)/((float)0x1000000)*16/(Pi*Pi*Pi*Pi); */
-#define a5 1.02575e-2f /*((float)0x191cac)/((float)0x1000000)*32/(Pi*Pi*Pi*Pi*Pi); */
-#define a6 -9.580378e-4f /*(-(float)0x3af27)/((float)0x1000000)*64/(Pi*Pi*Pi*Pi*Pi*Pi); */
-
-#define _sin(x) ((((((a6*(x) + a5)*(x) + a4)*(x) + a3)*(x) + a2)*(x) + a1)*(x) + a0)
-#define _cos(x) _sin(halfPi - (x))
-
-/****************************************************************************************\
-* Classical Hough Transform *
-\****************************************************************************************/
-
-typedef struct CvLinePolar
-{
- float rho;
- float angle;
-}
-CvLinePolar;
-
-/*=====================================================================================*/
-
-#define hough_cmp_gt(l1,l2) (aux[l1] > aux[l2])
-
-static CV_IMPLEMENT_QSORT_EX( icvHoughSortDescent32s, int, hough_cmp_gt, const int* )
-
-/*
-Here image is an input raster;
-step is it's step; size characterizes it's ROI;
-rho and theta are discretization steps (in pixels and radians correspondingly).
-threshold is the minimum number of pixels in the feature for it
-to be a candidate for line. lines is the output
-array of (rho, theta) pairs. linesMax is the buffer size (number of pairs).
-Functions return the actual number of found lines.
-*/
-static void
-icvHoughLinesStandard( const CvMat* img, float rho, float theta,
- int threshold, CvSeq *lines, int linesMax )
-{
- int *accum = 0;
- int *sort_buf=0;
- float *tabSin = 0;
- float *tabCos = 0;
-
- CV_FUNCNAME( "icvHoughLinesStandard" );
-
- __BEGIN__;
-
- const uchar* image;
- int step, width, height;
- int numangle, numrho;
- int total = 0;
- float ang;
- int r, n;
- int i, j;
- float irho = 1 / rho;
- double scale;
-
- CV_ASSERT( CV_IS_MAT(img) && CV_MAT_TYPE(img->type) == CV_8UC1 );
-
- image = img->data.ptr;
- step = img->step;
- width = img->cols;
- height = img->rows;
-
- numangle = cvRound(CV_PI / theta);
- numrho = cvRound(((width + height) * 2 + 1) / rho);
-
- CV_CALL( accum = (int*)cvAlloc( sizeof(accum[0]) * (numangle+2) * (numrho+2) ));
- CV_CALL( sort_buf = (int*)cvAlloc( sizeof(accum[0]) * numangle * numrho ));
- CV_CALL( tabSin = (float*)cvAlloc( sizeof(tabSin[0]) * numangle ));
- CV_CALL( tabCos = (float*)cvAlloc( sizeof(tabCos[0]) * numangle ));
- memset( accum, 0, sizeof(accum[0]) * (numangle+2) * (numrho+2) );
-
- for( ang = 0, n = 0; n < numangle; ang += theta, n++ )
- {
- tabSin[n] = (float)(sin(ang) * irho);
- tabCos[n] = (float)(cos(ang) * irho);
- }
-
- // stage 1. fill accumulator
- for( i = 0; i < height; i++ )
- for( j = 0; j < width; j++ )
- {
- if( image[i * step + j] != 0 )
- for( n = 0; n < numangle; n++ )
- {
- r = cvRound( j * tabCos[n] + i * tabSin[n] );
- r += (numrho - 1) / 2;
- accum[(n+1) * (numrho+2) + r+1]++;
- }
- }
-
- // stage 2. find local maximums
- for( r = 0; r < numrho; r++ )
- for( n = 0; n < numangle; n++ )
- {
- int base = (n+1) * (numrho+2) + r+1;
- if( accum[base] > threshold &&
- accum[base] > accum[base - 1] && accum[base] >= accum[base + 1] &&
- accum[base] > accum[base - numrho - 2] && accum[base] >= accum[base + numrho + 2] )
- sort_buf[total++] = base;
- }
-
- // stage 3. sort the detected lines by accumulator value
- icvHoughSortDescent32s( sort_buf, total, accum );
-
- // stage 4. store the first min(total,linesMax) lines to the output buffer
- linesMax = MIN(linesMax, total);
- scale = 1./(numrho+2);
- for( i = 0; i < linesMax; i++ )
- {
- CvLinePolar line;
- int idx = sort_buf[i];
- int n = cvFloor(idx*scale) - 1;
- int r = idx - (n+1)*(numrho+2) - 1;
- line.rho = (r - (numrho - 1)*0.5f) * rho;
- line.angle = n * theta;
- cvSeqPush( lines, &line );
- }
-
- __END__;
-
- cvFree( &sort_buf );
- cvFree( &tabSin );
- cvFree( &tabCos );
- cvFree( &accum );
-}
-
-
-/****************************************************************************************\
-* Multi-Scale variant of Classical Hough Transform *
-\****************************************************************************************/
-
-#if defined _MSC_VER && _MSC_VER >= 1200
-#pragma warning( disable: 4714 )
-#endif
-
-//DECLARE_AND_IMPLEMENT_LIST( _index, h_ );
-IMPLEMENT_LIST( _index, h_ )
-
-static void
-icvHoughLinesSDiv( const CvMat* img,
- float rho, float theta, int threshold,
- int srn, int stn,
- CvSeq* lines, int linesMax )
-{
- uchar *caccum = 0;
- uchar *buffer = 0;
- float *sinTable = 0;
- int *x = 0;
- int *y = 0;
- _CVLIST *list = 0;
-
- CV_FUNCNAME( "icvHoughLinesSDiv" );
-
- __BEGIN__;
-
-#define _POINT(row, column)\
- (image_src[(row)*step+(column)])
-
- uchar *mcaccum = 0;
- int rn, tn; /* number of rho and theta discrete values */
- int index, i;
- int ri, ti, ti1, ti0;
- int row, col;
- float r, t; /* Current rho and theta */
- float rv; /* Some temporary rho value */
- float irho;
- float itheta;
- float srho, stheta;
- float isrho, istheta;
-
- const uchar* image_src;
- int w, h, step;
- int fn = 0;
- float xc, yc;
-
- const float d2r = (float)(Pi / 180);
- int sfn = srn * stn;
- int fi;
- int count;
- int cmax = 0;
-
- CVPOS pos;
- _index *pindex;
- _index vi;
-
- CV_ASSERT( CV_IS_MAT(img) && CV_MAT_TYPE(img->type) == CV_8UC1 );
- CV_ASSERT( linesMax > 0 && rho > 0 && theta > 0 );
-
- threshold = MIN( threshold, 255 );
-
- image_src = img->data.ptr;
- step = img->step;
- w = img->cols;
- h = img->rows;
-
- irho = 1 / rho;
- itheta = 1 / theta;
- srho = rho / srn;
- stheta = theta / stn;
- isrho = 1 / srho;
- istheta = 1 / stheta;
-
- rn = cvFloor( sqrt( (double)w * w + (double)h * h ) * irho );
- tn = cvFloor( 2 * Pi * itheta );
-
- list = h_create_list__index( linesMax < 1000 ? linesMax : 1000 );
- vi.value = threshold;
- vi.rho = -1;
- h_add_head__index( list, &vi );
-
- /* Precalculating sin */
- CV_CALL( sinTable = (float*)cvAlloc( 5 * tn * stn * sizeof( float )));
-
- for( index = 0; index < 5 * tn * stn; index++ )
- {
- sinTable[index] = (float)cos( stheta * index * 0.2f );
- }
-
- CV_CALL( caccum = (uchar*)cvAlloc( rn * tn * sizeof( caccum[0] )));
- memset( caccum, 0, rn * tn * sizeof( caccum[0] ));
-
- /* Counting all feature pixels */
- for( row = 0; row < h; row++ )
- for( col = 0; col < w; col++ )
- fn += _POINT( row, col ) != 0;
-
- CV_CALL( x = (int*)cvAlloc( fn * sizeof(x[0])));
- CV_CALL( y = (int*)cvAlloc( fn * sizeof(y[0])));
-
- /* Full Hough Transform (it's accumulator update part) */
- fi = 0;
- for( row = 0; row < h; row++ )
- {
- for( col = 0; col < w; col++ )
- {
- if( _POINT( row, col ))
- {
- int halftn;
- float r0;
- float scale_factor;
- int iprev = -1;
- float phi, phi1;
- float theta_it; /* Value of theta for iterating */
-
- /* Remember the feature point */
- x[fi] = col;
- y[fi] = row;
- fi++;
-
- yc = (float) row + 0.5f;
- xc = (float) col + 0.5f;
-
- /* Update the accumulator */
- t = (float) fabs( cvFastArctan( yc, xc ) * d2r );
- r = (float) sqrt( (double)xc * xc + (double)yc * yc );
- r0 = r * irho;
- ti0 = cvFloor( (t + Pi / 2) * itheta );
-
- caccum[ti0]++;
-
- theta_it = rho / r;
- theta_it = theta_it < theta ? theta_it : theta;
- scale_factor = theta_it * itheta;
- halftn = cvFloor( Pi / theta_it );
- for( ti1 = 1, phi = theta_it - halfPi, phi1 = (theta_it + t) * itheta;
- ti1 < halftn; ti1++, phi += theta_it, phi1 += scale_factor )
- {
- rv = r0 * _cos( phi );
- i = cvFloor( rv ) * tn;
- i += cvFloor( phi1 );
- assert( i >= 0 );
- assert( i < rn * tn );
- caccum[i] = (uchar) (caccum[i] + ((i ^ iprev) != 0));
- iprev = i;
- if( cmax < caccum[i] )
- cmax = caccum[i];
- }
- }
- }
- }
-
- /* Starting additional analysis */
- count = 0;
- for( ri = 0; ri < rn; ri++ )
- {
- for( ti = 0; ti < tn; ti++ )
- {
- if( caccum[ri * tn + ti > threshold] )
- {
- count++;
- }
- }
- }
-
- if( count * 100 > rn * tn )
- {
- icvHoughLinesStandard( img, rho, theta, threshold, lines, linesMax );
- EXIT;
- }
-
- CV_CALL( buffer = (uchar *) cvAlloc(srn * stn + 2));
- mcaccum = buffer + 1;
-
- count = 0;
- for( ri = 0; ri < rn; ri++ )
- {
- for( ti = 0; ti < tn; ti++ )
- {
- if( caccum[ri * tn + ti] > threshold )
- {
- count++;
- memset( mcaccum, 0, sfn * sizeof( uchar ));
-
- for( index = 0; index < fn; index++ )
- {
- int ti2;
- float r0;
-
- yc = (float) y[index] + 0.5f;
- xc = (float) x[index] + 0.5f;
-
- /* Update the accumulator */
- t = (float) fabs( cvFastArctan( yc, xc ) * d2r );
- r = (float) sqrt( (double)xc * xc + (double)yc * yc ) * isrho;
- ti0 = cvFloor( (t + Pi * 0.5f) * istheta );
- ti2 = (ti * stn - ti0) * 5;
- r0 = (float) ri *srn;
-
- for( ti1 = 0 /*, phi = ti*theta - Pi/2 - t */ ; ti1 < stn; ti1++, ti2 += 5
- /*phi += stheta */ )
- {
- /*rv = r*_cos(phi) - r0; */
- rv = r * sinTable[(int) (abs( ti2 ))] - r0;
- i = cvFloor( rv ) * stn + ti1;
-
- i = CV_IMAX( i, -1 );
- i = CV_IMIN( i, sfn );
- mcaccum[i]++;
- assert( i >= -1 );
- assert( i <= sfn );
- }
- }
-
- /* Find peaks in maccum... */
- for( index = 0; index < sfn; index++ )
- {
- i = 0;
- pos = h_get_tail_pos__index( list );
- if( h_get_prev__index( &pos )->value < mcaccum[index] )
- {
- vi.value = mcaccum[index];
- vi.rho = index / stn * srho + ri * rho;
- vi.theta = index % stn * stheta + ti * theta - halfPi;
- while( h_is_pos__index( pos ))
- {
- if( h_get__index( pos )->value > mcaccum[index] )
- {
- h_insert_after__index( list, pos, &vi );
- if( h_get_count__index( list ) > linesMax )
- {
- h_remove_tail__index( list );
- }
- break;
- }
- h_get_prev__index( &pos );
- }
- if( !h_is_pos__index( pos ))
- {
- h_add_head__index( list, &vi );
- if( h_get_count__index( list ) > linesMax )
- {
- h_remove_tail__index( list );
- }
- }
- }
- }
- }
- }
- }
-
- pos = h_get_head_pos__index( list );
- if( h_get_count__index( list ) == 1 )
- {
- if( h_get__index( pos )->rho < 0 )
- {
- h_clear_list__index( list );
- }
- }
- else
- {
- while( h_is_pos__index( pos ))
- {
- CvLinePolar line;
- pindex = h_get__index( pos );
- if( pindex->rho < 0 )
- {
- /* This should be the last element... */
- h_get_next__index( &pos );
- assert( !h_is_pos__index( pos ));
- break;
- }
- line.rho = pindex->rho;
- line.angle = pindex->theta;
- cvSeqPush( lines, &line );
-
- if( lines->total >= linesMax )
- EXIT;
- h_get_next__index( &pos );
- }
- }
-
- __END__;
-
- h_destroy_list__index( list );
- cvFree( &sinTable );
- cvFree( &x );
- cvFree( &y );
- cvFree( &caccum );
- cvFree( &buffer );
-}
-
-
-/****************************************************************************************\
-* Probabilistic Hough Transform *
-\****************************************************************************************/
-
-#if defined WIN64 && defined EM64T && _MSC_VER == 1400 && !defined CV_ICC
-#pragma optimize("",off)
-#endif
-
-static void
-icvHoughLinesProbabalistic( CvMat* image,
- float rho, float theta, int threshold,
- int lineLength, int lineGap,
- CvSeq *lines, int linesMax )
-{
- CvMat* accum = 0;
- CvMat* mask = 0;
- CvMat* trigtab = 0;
- CvMemStorage* storage = 0;
-
- CV_FUNCNAME( "icvHoughLinesProbalistic" );
-
- __BEGIN__;
-
- CvSeq* seq;
- CvSeqWriter writer;
- int width, height;
- int numangle, numrho;
- float ang;
- int r, n, count;
- CvPoint pt;
- float irho = 1 / rho;
- CvRNG rng = cvRNG(-1);
- const float* ttab;
- uchar* mdata0;
-
- CV_ASSERT( CV_IS_MAT(image) && CV_MAT_TYPE(image->type) == CV_8UC1 );
-
- width = image->cols;
- height = image->rows;
-
- numangle = cvRound(CV_PI / theta);
- numrho = cvRound(((width + height) * 2 + 1) / rho);
-
- CV_CALL( accum = cvCreateMat( numangle, numrho, CV_32SC1 ));
- CV_CALL( mask = cvCreateMat( height, width, CV_8UC1 ));
- CV_CALL( trigtab = cvCreateMat( 1, numangle, CV_32FC2 ));
- cvZero( accum );
-
- CV_CALL( storage = cvCreateMemStorage(0) );
-
- for( ang = 0, n = 0; n < numangle; ang += theta, n++ )
- {
- trigtab->data.fl[n*2] = (float)(cos(ang) * irho);
- trigtab->data.fl[n*2+1] = (float)(sin(ang) * irho);
- }
- ttab = trigtab->data.fl;
- mdata0 = mask->data.ptr;
-
- CV_CALL( cvStartWriteSeq( CV_32SC2, sizeof(CvSeq), sizeof(CvPoint), storage, &writer ));
-
- // stage 1. collect non-zero image points
- for( pt.y = 0, count = 0; pt.y < height; pt.y++ )
- {
- const uchar* data = image->data.ptr + pt.y*image->step;
- uchar* mdata = mdata0 + pt.y*width;
- for( pt.x = 0; pt.x < width; pt.x++ )
- {
- if( data[pt.x] )
- {
- mdata[pt.x] = (uchar)1;
- CV_WRITE_SEQ_ELEM( pt, writer );
- }
- else
- mdata[pt.x] = 0;
- }
- }
-
- seq = cvEndWriteSeq( &writer );
- count = seq->total;
-
- // stage 2. process all the points in random order
- for( ; count > 0; count-- )
- {
- // choose random point out of the remaining ones
- int idx = cvRandInt(&rng) % count;
- int max_val = threshold-1, max_n = 0;
- CvPoint* pt = (CvPoint*)cvGetSeqElem( seq, idx );
- CvPoint line_end[2] = {{0,0}, {0,0}};
- float a, b;
- int* adata = accum->data.i;
- int i, j, k, x0, y0, dx0, dy0, xflag;
- int good_line;
- const int shift = 16;
-
- i = pt->y;
- j = pt->x;
-
- // "remove" it by overriding it with the last element
- *pt = *(CvPoint*)cvGetSeqElem( seq, count-1 );
-
- // check if it has been excluded already (i.e. belongs to some other line)
- if( !mdata0[i*width + j] )
- continue;
-
- // update accumulator, find the most probable line
- for( n = 0; n < numangle; n++, adata += numrho )
- {
- r = cvRound( j * ttab[n*2] + i * ttab[n*2+1] );
- r += (numrho - 1) / 2;
- int val = ++adata[r];
- if( max_val < val )
- {
- max_val = val;
- max_n = n;
- }
- }
-
- // if it is too "weak" candidate, continue with another point
- if( max_val < threshold )
- continue;
-
- // from the current point walk in each direction
- // along the found line and extract the line segment
- a = -ttab[max_n*2+1];
- b = ttab[max_n*2];
- x0 = j;
- y0 = i;
- if( fabs(a) > fabs(b) )
- {
- xflag = 1;
- dx0 = a > 0 ? 1 : -1;
- dy0 = cvRound( b*(1 << shift)/fabs(a) );
- y0 = (y0 << shift) + (1 << (shift-1));
- }
- else
- {
- xflag = 0;
- dy0 = b > 0 ? 1 : -1;
- dx0 = cvRound( a*(1 << shift)/fabs(b) );
- x0 = (x0 << shift) + (1 << (shift-1));
- }
-
- for( k = 0; k < 2; k++ )
- {
- int gap = 0, x = x0, y = y0, dx = dx0, dy = dy0;
-
- if( k > 0 )
- dx = -dx, dy = -dy;
-
- // walk along the line using fixed-point arithmetics,
- // stop at the image border or in case of too big gap
- for( ;; x += dx, y += dy )
- {
- uchar* mdata;
- int i1, j1;
-
- if( xflag )
- {
- j1 = x;
- i1 = y >> shift;
- }
- else
- {
- j1 = x >> shift;
- i1 = y;
- }
-
- if( j1 < 0 || j1 >= width || i1 < 0 || i1 >= height )
- break;
-
- mdata = mdata0 + i1*width + j1;
-
- // for each non-zero point:
- // update line end,
- // clear the mask element
- // reset the gap
- if( *mdata )
- {
- gap = 0;
- line_end[k].y = i1;
- line_end[k].x = j1;
- }
- else if( ++gap > lineGap )
- break;
- }
- }
-
- good_line = abs(line_end[1].x - line_end[0].x) >= lineLength ||
- abs(line_end[1].y - line_end[0].y) >= lineLength;
-
- for( k = 0; k < 2; k++ )
- {
- int x = x0, y = y0, dx = dx0, dy = dy0;
-
- if( k > 0 )
- dx = -dx, dy = -dy;
-
- // walk along the line using fixed-point arithmetics,
- // stop at the image border or in case of too big gap
- for( ;; x += dx, y += dy )
- {
- uchar* mdata;
- int i1, j1;
-
- if( xflag )
- {
- j1 = x;
- i1 = y >> shift;
- }
- else
- {
- j1 = x >> shift;
- i1 = y;
- }
-
- mdata = mdata0 + i1*width + j1;
-
- // for each non-zero point:
- // update line end,
- // clear the mask element
- // reset the gap
- if( *mdata )
- {
- if( good_line )
- {
- adata = accum->data.i;
- for( n = 0; n < numangle; n++, adata += numrho )
- {
- r = cvRound( j1 * ttab[n*2] + i1 * ttab[n*2+1] );
- r += (numrho - 1) / 2;
- adata[r]--;
- }
- }
- *mdata = 0;
- }
-
- if( i1 == line_end[k].y && j1 == line_end[k].x )
- break;
- }
- }
-
- if( good_line )
- {
- CvRect lr = { line_end[0].x, line_end[0].y, line_end[1].x, line_end[1].y };
- cvSeqPush( lines, &lr );
- if( lines->total >= linesMax )
- EXIT;
- }
- }
-
- __END__;
-
- cvReleaseMat( &accum );
- cvReleaseMat( &mask );
- cvReleaseMat( &trigtab );
- cvReleaseMemStorage( &storage );
-}
-
-
-#if defined WIN64 && defined EM64T && _MSC_VER == 1400 && !defined CV_ICC
-#pragma optimize("",on)
-#endif
-
-
-/* Wrapper function for standard hough transform */
-CV_IMPL CvSeq*
-cvHoughLines2( CvArr* src_image, void* lineStorage, int method,
- double rho, double theta, int threshold,
- double param1, double param2 )
-{
- CvSeq* result = 0;
-
- CV_FUNCNAME( "cvHoughLines" );
-
- __BEGIN__;
-
- CvMat stub, *img = (CvMat*)src_image;
- CvMat* mat = 0;
- CvSeq* lines = 0;
- CvSeq lines_header;
- CvSeqBlock lines_block;
- int lineType, elemSize;
- int linesMax = INT_MAX;
- int iparam1, iparam2;
-
- CV_CALL( img = cvGetMat( img, &stub ));
-
- if( !CV_IS_MASK_ARR(img))
- CV_ERROR( CV_StsBadArg, "The source image must be 8-bit, single-channel" );
-
- if( !lineStorage )
- CV_ERROR( CV_StsNullPtr, "NULL destination" );
-
- if( rho <= 0 || theta <= 0 || threshold <= 0 )
- CV_ERROR( CV_StsOutOfRange, "rho, theta and threshold must be positive" );
-
- if( method != CV_HOUGH_PROBABILISTIC )
- {
- lineType = CV_32FC2;
- elemSize = sizeof(float)*2;
- }
- else
- {
- lineType = CV_32SC4;
- elemSize = sizeof(int)*4;
- }
-
- if( CV_IS_STORAGE( lineStorage ))
- {
- CV_CALL( lines = cvCreateSeq( lineType, sizeof(CvSeq), elemSize, (CvMemStorage*)lineStorage ));
- }
- else if( CV_IS_MAT( lineStorage ))
- {
- mat = (CvMat*)lineStorage;
-
- if( !CV_IS_MAT_CONT( mat->type ) || (mat->rows != 1 && mat->cols != 1) )
- CV_ERROR( CV_StsBadArg,
- "The destination matrix should be continuous and have a single row or a single column" );
-
- if( CV_MAT_TYPE( mat->type ) != lineType )
- CV_ERROR( CV_StsBadArg,
- "The destination matrix data type is inappropriate, see the manual" );
-
- CV_CALL( lines = cvMakeSeqHeaderForArray( lineType, sizeof(CvSeq), elemSize, mat->data.ptr,
- mat->rows + mat->cols - 1, &lines_header, &lines_block ));
- linesMax = lines->total;
- CV_CALL( cvClearSeq( lines ));
- }
- else
- {
- CV_ERROR( CV_StsBadArg, "Destination is not CvMemStorage* nor CvMat*" );
- }
-
- iparam1 = cvRound(param1);
- iparam2 = cvRound(param2);
-
- switch( method )
- {
- case CV_HOUGH_STANDARD:
- CV_CALL( icvHoughLinesStandard( img, (float)rho,
- (float)theta, threshold, lines, linesMax ));
- break;
- case CV_HOUGH_MULTI_SCALE:
- CV_CALL( icvHoughLinesSDiv( img, (float)rho, (float)theta,
- threshold, iparam1, iparam2, lines, linesMax ));
- break;
- case CV_HOUGH_PROBABILISTIC:
- CV_CALL( icvHoughLinesProbabalistic( img, (float)rho, (float)theta,
- threshold, iparam1, iparam2, lines, linesMax ));
- break;
- default:
- CV_ERROR( CV_StsBadArg, "Unrecognized method id" );
- }
-
- if( mat )
- {
- if( mat->cols > mat->rows )
- mat->cols = lines->total;
- else
- mat->rows = lines->total;
- }
- else
- {
- result = lines;
- }
-
- __END__;
-
- return result;
-}
-
-
-/****************************************************************************************\
-* Circle Detection *
-\****************************************************************************************/
-
-static void
-icvHoughCirclesGradient( CvMat* img, float dp, float min_dist,
- int min_radius, int max_radius,
- int canny_threshold, int acc_threshold,
- CvSeq* circles, int circles_max )
-{
- const int SHIFT = 10, ONE = 1 << SHIFT, R_THRESH = 30;
- CvMat *dx = 0, *dy = 0;
- CvMat *edges = 0;
- CvMat *accum = 0;
- int* sort_buf = 0;
- CvMat* dist_buf = 0;
- CvMemStorage* storage = 0;
-
- CV_FUNCNAME( "icvHoughCirclesGradient" );
-
- __BEGIN__;
-
- int x, y, i, j, center_count, nz_count;
- int rows, cols, arows, acols;
- int astep, *adata;
- float* ddata;
- CvSeq *nz, *centers;
- float idp, dr;
- CvSeqReader reader;
-
- CV_CALL( edges = cvCreateMat( img->rows, img->cols, CV_8UC1 ));
- CV_CALL( cvCanny( img, edges, MAX(canny_threshold/2,1), canny_threshold, 3 ));
-
- CV_CALL( dx = cvCreateMat( img->rows, img->cols, CV_16SC1 ));
- CV_CALL( dy = cvCreateMat( img->rows, img->cols, CV_16SC1 ));
- CV_CALL( cvSobel( img, dx, 1, 0, 3 ));
- CV_CALL( cvSobel( img, dy, 0, 1, 3 ));
-
- if( dp < 1.f )
- dp = 1.f;
- idp = 1.f/dp;
- CV_CALL( accum = cvCreateMat( cvCeil(img->rows*idp)+2, cvCeil(img->cols*idp)+2, CV_32SC1 ));
- CV_CALL( cvZero(accum));
-
- CV_CALL( storage = cvCreateMemStorage() );
- CV_CALL( nz = cvCreateSeq( CV_32SC2, sizeof(CvSeq), sizeof(CvPoint), storage ));
- CV_CALL( centers = cvCreateSeq( CV_32SC1, sizeof(CvSeq), sizeof(int), storage ));
-
- rows = img->rows;
- cols = img->cols;
- arows = accum->rows - 2;
- acols = accum->cols - 2;
- adata = accum->data.i;
- astep = accum->step/sizeof(adata[0]);
-
- for( y = 0; y < rows; y++ )
- {
- const uchar* edges_row = edges->data.ptr + y*edges->step;
- const short* dx_row = (const short*)(dx->data.ptr + y*dx->step);
- const short* dy_row = (const short*)(dy->data.ptr + y*dy->step);
-
- for( x = 0; x < cols; x++ )
- {
- float vx, vy;
- int sx, sy, x0, y0, x1, y1, r, k;
- CvPoint pt;
-
- vx = dx_row[x];
- vy = dy_row[x];
-
- if( !edges_row[x] || (vx == 0 && vy == 0) )
- continue;
-
- if( fabs(vx) < fabs(vy) )
- {
- sx = cvRound(vx*ONE/fabs(vy));
- sy = vy < 0 ? -ONE : ONE;
- }
- else
- {
- assert( vx != 0 );
- sy = cvRound(vy*ONE/fabs(vx));
- sx = vx < 0 ? -ONE : ONE;
- }
-
- x0 = cvRound((x*idp)*ONE) + ONE + (ONE/2);
- y0 = cvRound((y*idp)*ONE) + ONE + (ONE/2);
-
- for( k = 0; k < 2; k++ )
- {
- x0 += min_radius * sx;
- y0 += min_radius * sy;
-
- for( x1 = x0, y1 = y0, r = min_radius; r <= max_radius; x1 += sx, y1 += sy, r++ )
- {
- int x2 = x1 >> SHIFT, y2 = y1 >> SHIFT;
- if( (unsigned)x2 >= (unsigned)acols ||
- (unsigned)y2 >= (unsigned)arows )
- break;
- adata[y2*astep + x2]++;
- }
-
- x0 -= min_radius * sx;
- y0 -= min_radius * sy;
- sx = -sx; sy = -sy;
- }
-
- pt.x = x; pt.y = y;
- cvSeqPush( nz, &pt );
- }
- }
-
- nz_count = nz->total;
- if( !nz_count )
- EXIT;
-
- for( y = 1; y < arows - 1; y++ )
- {
- for( x = 1; x < acols - 1; x++ )
- {
- int base = y*(acols+2) + x;
- if( adata[base] > acc_threshold &&
- adata[base] > adata[base-1] && adata[base] > adata[base+1] &&
- adata[base] > adata[base-acols-2] && adata[base] > adata[base+acols+2] )
- cvSeqPush(centers, &base);
- }
- }
-
- center_count = centers->total;
- if( !center_count )
- EXIT;
-
- CV_CALL( sort_buf = (int*)cvAlloc( MAX(center_count,nz_count)*sizeof(sort_buf[0]) ));
- cvCvtSeqToArray( centers, sort_buf );
-
- icvHoughSortDescent32s( sort_buf, center_count, adata );
- cvClearSeq( centers );
- cvSeqPushMulti( centers, sort_buf, center_count );
-
- CV_CALL( dist_buf = cvCreateMat( 1, nz_count, CV_32FC1 ));
- ddata = dist_buf->data.fl;
-
- dr = dp;
- min_dist = MAX( min_dist, dp );
- min_dist *= min_dist;
-
- for( i = 0; i < centers->total; i++ )
- {
- int ofs = *(int*)cvGetSeqElem( centers, i );
- y = ofs/(acols+2) - 1;
- x = ofs - (y+1)*(acols+2) - 1;
- float cx = (float)(x*dp), cy = (float)(y*dp);
- int start_idx = nz_count - 1;
- float start_dist, dist_sum;
- float r_best = 0, c[3];
- int max_count = R_THRESH;
-
- for( j = 0; j < circles->total; j++ )
- {
- float* c = (float*)cvGetSeqElem( circles, j );
- if( (c[0] - cx)*(c[0] - cx) + (c[1] - cy)*(c[1] - cy) < min_dist )
- break;
- }
-
- if( j < circles->total )
- continue;
-
- cvStartReadSeq( nz, &reader );
- for( j = 0; j < nz_count; j++ )
- {
- CvPoint pt;
- float _dx, _dy;
- CV_READ_SEQ_ELEM( pt, reader );
- _dx = cx - pt.x; _dy = cy - pt.y;
- ddata[j] = _dx*_dx + _dy*_dy;
- sort_buf[j] = j;
- }
-
- cvPow( dist_buf, dist_buf, 0.5 );
- icvHoughSortDescent32s( sort_buf, nz_count, (int*)ddata );
-
- dist_sum = start_dist = ddata[sort_buf[nz_count-1]];
- for( j = nz_count - 2; j >= 0; j-- )
- {
- float d = ddata[sort_buf[j]];
-
- if( d > max_radius )
- break;
-
- if( d - start_dist > dr )
- {
- float r_cur = ddata[sort_buf[(j + start_idx)/2]];
- if( (start_idx - j)*r_best >= max_count*r_cur ||
- (r_best < FLT_EPSILON && start_idx - j >= max_count) )
- {
- r_best = r_cur;
- max_count = start_idx - j;
- }
- start_dist = d;
- start_idx = j;
- dist_sum = 0;
- }
- dist_sum += d;
- }
-
- if( max_count > R_THRESH )
- {
- c[0] = cx;
- c[1] = cy;
- c[2] = (float)r_best;
- cvSeqPush( circles, c );
- if( circles->total > circles_max )
- EXIT;
- }
- }
-
- __END__;
-
- cvReleaseMat( &dist_buf );
- cvFree( &sort_buf );
- cvReleaseMemStorage( &storage );
- cvReleaseMat( &edges );
- cvReleaseMat( &dx );
- cvReleaseMat( &dy );
- cvReleaseMat( &accum );
-}
-
-CV_IMPL CvSeq*
-cvHoughCircles( CvArr* src_image, void* circle_storage,
- int method, double dp, double min_dist,
- double param1, double param2,
- int min_radius, int max_radius )
-{
- CvSeq* result = 0;
-
- CV_FUNCNAME( "cvHoughCircles" );
-
- __BEGIN__;
-
- CvMat stub, *img = (CvMat*)src_image;
- CvMat* mat = 0;
- CvSeq* circles = 0;
- CvSeq circles_header;
- CvSeqBlock circles_block;
- int circles_max = INT_MAX;
- int canny_threshold = cvRound(param1);
- int acc_threshold = cvRound(param2);
-
- CV_CALL( img = cvGetMat( img, &stub ));
-
- if( !CV_IS_MASK_ARR(img))
- CV_ERROR( CV_StsBadArg, "The source image must be 8-bit, single-channel" );
-
- if( !circle_storage )
- CV_ERROR( CV_StsNullPtr, "NULL destination" );
-
- if( dp <= 0 || min_dist <= 0 || canny_threshold <= 0 || acc_threshold <= 0 )
- CV_ERROR( CV_StsOutOfRange, "dp, min_dist, canny_threshold and acc_threshold must be all positive numbers" );
-
- min_radius = MAX( min_radius, 0 );
- if( max_radius <= 0 )
- max_radius = MAX( img->rows, img->cols );
- else if( max_radius <= min_radius )
- max_radius = min_radius + 2;
-
- if( CV_IS_STORAGE( circle_storage ))
- {
- CV_CALL( circles = cvCreateSeq( CV_32FC3, sizeof(CvSeq),
- sizeof(float)*3, (CvMemStorage*)circle_storage ));
- }
- else if( CV_IS_MAT( circle_storage ))
- {
- mat = (CvMat*)circle_storage;
-
- if( !CV_IS_MAT_CONT( mat->type ) || (mat->rows != 1 && mat->cols != 1) ||
- CV_MAT_TYPE(mat->type) != CV_32FC3 )
- CV_ERROR( CV_StsBadArg,
- "The destination matrix should be continuous and have a single row or a single column" );
-
- CV_CALL( circles = cvMakeSeqHeaderForArray( CV_32FC3, sizeof(CvSeq), sizeof(float)*3,
- mat->data.ptr, mat->rows + mat->cols - 1, &circles_header, &circles_block ));
- circles_max = circles->total;
- CV_CALL( cvClearSeq( circles ));
- }
- else
- {
- CV_ERROR( CV_StsBadArg, "Destination is not CvMemStorage* nor CvMat*" );
- }
-
- switch( method )
- {
- case CV_HOUGH_GRADIENT:
- CV_CALL( icvHoughCirclesGradient( img, (float)dp, (float)min_dist,
- min_radius, max_radius, canny_threshold,
- acc_threshold, circles, circles_max ));
- break;
- default:
- CV_ERROR( CV_StsBadArg, "Unrecognized method id" );
- }
-
- if( mat )
- {
- if( mat->cols > mat->rows )
- mat->cols = circles->total;
- else
- mat->rows = circles->total;
- }
- else
- result = circles;
-
- __END__;
-
- return result;
-}
-
-/* End of file. */
+/*M///////////////////////////////////////////////////////////////////////////////////////\r
+//\r
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
+//\r
+// By downloading, copying, installing or using the software you agree to this license.\r
+// If you do not agree to this license, do not download, install,\r
+// copy or use the software.\r
+//\r
+//\r
+// Intel License Agreement\r
+// For Open Source Computer Vision Library\r
+//\r
+// Copyright (C) 2000, Intel Corporation, all rights reserved.\r
+// Third party copyrights are property of their respective owners.\r
+//\r
+// Redistribution and use in source and binary forms, with or without modification,\r
+// are permitted provided that the following conditions are met:\r
+//\r
+// * Redistribution's of source code must retain the above copyright notice,\r
+// this list of conditions and the following disclaimer.\r
+//\r
+// * Redistribution's in binary form must reproduce the above copyright notice,\r
+// this list of conditions and the following disclaimer in the documentation\r
+// and/or other materials provided with the distribution.\r
+//\r
+// * The name of Intel Corporation may not be used to endorse or promote products\r
+// derived from this software without specific prior written permission.\r
+//\r
+// This software is provided by the copyright holders and contributors "as is" and\r
+// any express or implied warranties, including, but not limited to, the implied\r
+// warranties of merchantability and fitness for a particular purpose are disclaimed.\r
+// In no event shall the Intel Corporation or contributors be liable for any direct,\r
+// indirect, incidental, special, exemplary, or consequential damages\r
+// (including, but not limited to, procurement of substitute goods or services;\r
+// loss of use, data, or profits; or business interruption) however caused\r
+// and on any theory of liability, whether in contract, strict liability,\r
+// or tort (including negligence or otherwise) arising in any way out of\r
+// the use of this software, even if advised of the possibility of such damage.\r
+//\r
+//M*/\r
+\r
+#include "_cv.h"\r
+#include "_cvlist.h"\r
+\r
+#define halfPi ((float)(CV_PI*0.5))\r
+#define Pi ((float)CV_PI)\r
+#define a0 0 /*-4.172325e-7f*/ /*(-(float)0x7)/((float)0x1000000); */\r
+#define a1 1.000025f /*((float)0x1922253)/((float)0x1000000)*2/Pi; */\r
+#define a2 -2.652905e-4f /*(-(float)0x2ae6)/((float)0x1000000)*4/(Pi*Pi); */\r
+#define a3 -0.165624f /*(-(float)0xa45511)/((float)0x1000000)*8/(Pi*Pi*Pi); */\r
+#define a4 -1.964532e-3f /*(-(float)0x30fd3)/((float)0x1000000)*16/(Pi*Pi*Pi*Pi); */\r
+#define a5 1.02575e-2f /*((float)0x191cac)/((float)0x1000000)*32/(Pi*Pi*Pi*Pi*Pi); */\r
+#define a6 -9.580378e-4f /*(-(float)0x3af27)/((float)0x1000000)*64/(Pi*Pi*Pi*Pi*Pi*Pi); */\r
+\r
+#define _sin(x) ((((((a6*(x) + a5)*(x) + a4)*(x) + a3)*(x) + a2)*(x) + a1)*(x) + a0)\r
+#define _cos(x) _sin(halfPi - (x))\r
+\r
+/****************************************************************************************\\r
+* Classical Hough Transform *\r
+\****************************************************************************************/\r
+\r
+typedef struct CvLinePolar\r
+{\r
+ float rho;\r
+ float angle;\r
+}\r
+CvLinePolar;\r
+\r
+/*=====================================================================================*/\r
+\r
+#define hough_cmp_gt(l1,l2) (aux[l1] > aux[l2])\r
+\r
+static CV_IMPLEMENT_QSORT_EX( icvHoughSortDescent32s, int, hough_cmp_gt, const int* )\r
+\r
+/*\r
+Here image is an input raster;\r
+step is it's step; size characterizes it's ROI;\r
+rho and theta are discretization steps (in pixels and radians correspondingly).\r
+threshold is the minimum number of pixels in the feature for it\r
+to be a candidate for line. lines is the output\r
+array of (rho, theta) pairs. linesMax is the buffer size (number of pairs).\r
+Functions return the actual number of found lines.\r
+*/\r
+static void\r
+icvHoughLinesStandard( const CvMat* img, float rho, float theta,\r
+ int threshold, CvSeq *lines, int linesMax )\r
+{\r
+ int *accum = 0;\r
+ int *sort_buf=0;\r
+ float *tabSin = 0;\r
+ float *tabCos = 0;\r
+\r
+ CV_FUNCNAME( "icvHoughLinesStandard" );\r
+\r
+ __BEGIN__;\r
+ \r
+ const uchar* image;\r
+ int step, width, height;\r
+ int numangle, numrho;\r
+ int total = 0;\r
+ float ang;\r
+ int r, n;\r
+ int i, j;\r
+ float irho = 1 / rho;\r
+ double scale;\r
+\r
+ CV_ASSERT( CV_IS_MAT(img) && CV_MAT_TYPE(img->type) == CV_8UC1 );\r
+\r
+ image = img->data.ptr;\r
+ step = img->step;\r
+ width = img->cols;\r
+ height = img->rows;\r
+\r
+ numangle = cvRound(CV_PI / theta);\r
+ numrho = cvRound(((width + height) * 2 + 1) / rho);\r
+\r
+ CV_CALL( accum = (int*)cvAlloc( sizeof(accum[0]) * (numangle+2) * (numrho+2) ));\r
+ CV_CALL( sort_buf = (int*)cvAlloc( sizeof(accum[0]) * numangle * numrho ));\r
+ CV_CALL( tabSin = (float*)cvAlloc( sizeof(tabSin[0]) * numangle ));\r
+ CV_CALL( tabCos = (float*)cvAlloc( sizeof(tabCos[0]) * numangle ));\r
+ memset( accum, 0, sizeof(accum[0]) * (numangle+2) * (numrho+2) );\r
+\r
+ for( ang = 0, n = 0; n < numangle; ang += theta, n++ )\r
+ {\r
+ tabSin[n] = (float)(sin(ang) * irho);\r
+ tabCos[n] = (float)(cos(ang) * irho);\r
+ }\r
+\r
+ // stage 1. fill accumulator\r
+ for( i = 0; i < height; i++ )\r
+ for( j = 0; j < width; j++ )\r
+ {\r
+ if( image[i * step + j] != 0 )\r
+ for( n = 0; n < numangle; n++ )\r
+ {\r
+ r = cvRound( j * tabCos[n] + i * tabSin[n] );\r
+ r += (numrho - 1) / 2;\r
+ accum[(n+1) * (numrho+2) + r+1]++;\r
+ }\r
+ }\r
+\r
+ // stage 2. find local maximums \r
+ for( r = 0; r < numrho; r++ )\r
+ for( n = 0; n < numangle; n++ )\r
+ {\r
+ int base = (n+1) * (numrho+2) + r+1;\r
+ if( accum[base] > threshold &&\r
+ accum[base] > accum[base - 1] && accum[base] >= accum[base + 1] &&\r
+ accum[base] > accum[base - numrho - 2] && accum[base] >= accum[base + numrho + 2] )\r
+ sort_buf[total++] = base;\r
+ }\r
+\r
+ // stage 3. sort the detected lines by accumulator value\r
+ icvHoughSortDescent32s( sort_buf, total, accum );\r
+ \r
+ // stage 4. store the first min(total,linesMax) lines to the output buffer\r
+ linesMax = MIN(linesMax, total);\r
+ scale = 1./(numrho+2);\r
+ for( i = 0; i < linesMax; i++ )\r
+ {\r
+ CvLinePolar line;\r
+ int idx = sort_buf[i];\r
+ int n = cvFloor(idx*scale) - 1;\r
+ int r = idx - (n+1)*(numrho+2) - 1;\r
+ line.rho = (r - (numrho - 1)*0.5f) * rho;\r
+ line.angle = n * theta;\r
+ cvSeqPush( lines, &line );\r
+ }\r
+\r
+ __END__;\r
+\r
+ cvFree( &sort_buf );\r
+ cvFree( &tabSin );\r
+ cvFree( &tabCos );\r
+ cvFree( &accum );\r
+}\r
+\r
+\r
+/****************************************************************************************\\r
+* Multi-Scale variant of Classical Hough Transform *\r
+\****************************************************************************************/\r
+\r
+#if defined _MSC_VER && _MSC_VER >= 1200\r
+#pragma warning( disable: 4714 )\r
+#endif\r
+\r
+//DECLARE_AND_IMPLEMENT_LIST( _index, h_ );\r
+IMPLEMENT_LIST( _index, h_ )\r
+\r
+static void\r
+icvHoughLinesSDiv( const CvMat* img,\r
+ float rho, float theta, int threshold,\r
+ int srn, int stn,\r
+ CvSeq* lines, int linesMax )\r
+{\r
+ uchar *caccum = 0;\r
+ uchar *buffer = 0;\r
+ float *sinTable = 0;\r
+ int *x = 0;\r
+ int *y = 0;\r
+ _CVLIST *list = 0;\r
+\r
+ CV_FUNCNAME( "icvHoughLinesSDiv" );\r
+\r
+ __BEGIN__;\r
+\r
+#define _POINT(row, column)\\r
+ (image_src[(row)*step+(column)])\r
+\r
+ uchar *mcaccum = 0;\r
+ int rn, tn; /* number of rho and theta discrete values */\r
+ int index, i;\r
+ int ri, ti, ti1, ti0;\r
+ int row, col;\r
+ float r, t; /* Current rho and theta */\r
+ float rv; /* Some temporary rho value */\r
+ float irho;\r
+ float itheta;\r
+ float srho, stheta;\r
+ float isrho, istheta;\r
+\r
+ const uchar* image_src;\r
+ int w, h, step;\r
+ int fn = 0;\r
+ float xc, yc;\r
+\r
+ const float d2r = (float)(Pi / 180);\r
+ int sfn = srn * stn;\r
+ int fi;\r
+ int count;\r
+ int cmax = 0;\r
+ \r
+ CVPOS pos;\r
+ _index *pindex;\r
+ _index vi;\r
+\r
+ CV_ASSERT( CV_IS_MAT(img) && CV_MAT_TYPE(img->type) == CV_8UC1 );\r
+ CV_ASSERT( linesMax > 0 && rho > 0 && theta > 0 );\r
+ \r
+ threshold = MIN( threshold, 255 );\r
+\r
+ image_src = img->data.ptr;\r
+ step = img->step;\r
+ w = img->cols;\r
+ h = img->rows;\r
+\r
+ irho = 1 / rho;\r
+ itheta = 1 / theta;\r
+ srho = rho / srn;\r
+ stheta = theta / stn;\r
+ isrho = 1 / srho;\r
+ istheta = 1 / stheta;\r
+\r
+ rn = cvFloor( sqrt( (double)w * w + (double)h * h ) * irho );\r
+ tn = cvFloor( 2 * Pi * itheta );\r
+\r
+ list = h_create_list__index( linesMax < 1000 ? linesMax : 1000 );\r
+ vi.value = threshold;\r
+ vi.rho = -1;\r
+ h_add_head__index( list, &vi );\r
+\r
+ /* Precalculating sin */\r
+ CV_CALL( sinTable = (float*)cvAlloc( 5 * tn * stn * sizeof( float )));\r
+\r
+ for( index = 0; index < 5 * tn * stn; index++ )\r
+ {\r
+ sinTable[index] = (float)cos( stheta * index * 0.2f );\r
+ }\r
+\r
+ CV_CALL( caccum = (uchar*)cvAlloc( rn * tn * sizeof( caccum[0] )));\r
+ memset( caccum, 0, rn * tn * sizeof( caccum[0] ));\r
+\r
+ /* Counting all feature pixels */\r
+ for( row = 0; row < h; row++ )\r
+ for( col = 0; col < w; col++ )\r
+ fn += _POINT( row, col ) != 0;\r
+\r
+ CV_CALL( x = (int*)cvAlloc( fn * sizeof(x[0])));\r
+ CV_CALL( y = (int*)cvAlloc( fn * sizeof(y[0])));\r
+\r
+ /* Full Hough Transform (it's accumulator update part) */\r
+ fi = 0;\r
+ for( row = 0; row < h; row++ )\r
+ {\r
+ for( col = 0; col < w; col++ )\r
+ {\r
+ if( _POINT( row, col ))\r
+ {\r
+ int halftn;\r
+ float r0;\r
+ float scale_factor;\r
+ int iprev = -1;\r
+ float phi, phi1;\r
+ float theta_it; /* Value of theta for iterating */\r
+\r
+ /* Remember the feature point */\r
+ x[fi] = col;\r
+ y[fi] = row;\r
+ fi++;\r
+\r
+ yc = (float) row + 0.5f;\r
+ xc = (float) col + 0.5f;\r
+\r
+ /* Update the accumulator */\r
+ t = (float) fabs( cvFastArctan( yc, xc ) * d2r );\r
+ r = (float) sqrt( (double)xc * xc + (double)yc * yc );\r
+ r0 = r * irho;\r
+ ti0 = cvFloor( (t + Pi / 2) * itheta );\r
+\r
+ caccum[ti0]++;\r
+\r
+ theta_it = rho / r;\r
+ theta_it = theta_it < theta ? theta_it : theta;\r
+ scale_factor = theta_it * itheta;\r
+ halftn = cvFloor( Pi / theta_it );\r
+ for( ti1 = 1, phi = theta_it - halfPi, phi1 = (theta_it + t) * itheta;\r
+ ti1 < halftn; ti1++, phi += theta_it, phi1 += scale_factor )\r
+ {\r
+ rv = r0 * _cos( phi );\r
+ i = cvFloor( rv ) * tn;\r
+ i += cvFloor( phi1 );\r
+ assert( i >= 0 );\r
+ assert( i < rn * tn );\r
+ caccum[i] = (uchar) (caccum[i] + ((i ^ iprev) != 0));\r
+ iprev = i;\r
+ if( cmax < caccum[i] )\r
+ cmax = caccum[i];\r
+ }\r
+ }\r
+ }\r
+ }\r
+\r
+ /* Starting additional analysis */\r
+ count = 0;\r
+ for( ri = 0; ri < rn; ri++ )\r
+ {\r
+ for( ti = 0; ti < tn; ti++ )\r
+ {\r
+ if( caccum[ri * tn + ti > threshold] )\r
+ {\r
+ count++;\r
+ }\r
+ }\r
+ }\r
+\r
+ if( count * 100 > rn * tn )\r
+ {\r
+ icvHoughLinesStandard( img, rho, theta, threshold, lines, linesMax );\r
+ EXIT;\r
+ }\r
+\r
+ CV_CALL( buffer = (uchar *) cvAlloc(srn * stn + 2));\r
+ mcaccum = buffer + 1;\r
+\r
+ count = 0;\r
+ for( ri = 0; ri < rn; ri++ )\r
+ {\r
+ for( ti = 0; ti < tn; ti++ )\r
+ {\r
+ if( caccum[ri * tn + ti] > threshold )\r
+ {\r
+ count++;\r
+ memset( mcaccum, 0, sfn * sizeof( uchar ));\r
+\r
+ for( index = 0; index < fn; index++ )\r
+ {\r
+ int ti2;\r
+ float r0;\r
+\r
+ yc = (float) y[index] + 0.5f;\r
+ xc = (float) x[index] + 0.5f;\r
+\r
+ /* Update the accumulator */\r
+ t = (float) fabs( cvFastArctan( yc, xc ) * d2r );\r
+ r = (float) sqrt( (double)xc * xc + (double)yc * yc ) * isrho;\r
+ ti0 = cvFloor( (t + Pi * 0.5f) * istheta );\r
+ ti2 = (ti * stn - ti0) * 5;\r
+ r0 = (float) ri *srn;\r
+\r
+ for( ti1 = 0 /*, phi = ti*theta - Pi/2 - t */ ; ti1 < stn; ti1++, ti2 += 5\r
+ /*phi += stheta */ )\r
+ {\r
+ /*rv = r*_cos(phi) - r0; */\r
+ rv = r * sinTable[(int) (abs( ti2 ))] - r0;\r
+ i = cvFloor( rv ) * stn + ti1;\r
+\r
+ i = CV_IMAX( i, -1 );\r
+ i = CV_IMIN( i, sfn );\r
+ mcaccum[i]++;\r
+ assert( i >= -1 );\r
+ assert( i <= sfn );\r
+ }\r
+ }\r
+\r
+ /* Find peaks in maccum... */\r
+ for( index = 0; index < sfn; index++ )\r
+ {\r
+ i = 0;\r
+ pos = h_get_tail_pos__index( list );\r
+ if( h_get_prev__index( &pos )->value < mcaccum[index] )\r
+ {\r
+ vi.value = mcaccum[index];\r
+ vi.rho = index / stn * srho + ri * rho;\r
+ vi.theta = index % stn * stheta + ti * theta - halfPi;\r
+ while( h_is_pos__index( pos ))\r
+ {\r
+ if( h_get__index( pos )->value > mcaccum[index] )\r
+ {\r
+ h_insert_after__index( list, pos, &vi );\r
+ if( h_get_count__index( list ) > linesMax )\r
+ {\r
+ h_remove_tail__index( list );\r
+ }\r
+ break;\r
+ }\r
+ h_get_prev__index( &pos );\r
+ }\r
+ if( !h_is_pos__index( pos ))\r
+ {\r
+ h_add_head__index( list, &vi );\r
+ if( h_get_count__index( list ) > linesMax )\r
+ {\r
+ h_remove_tail__index( list );\r
+ }\r
+ }\r
+ }\r
+ }\r
+ }\r
+ }\r
+ }\r
+\r
+ pos = h_get_head_pos__index( list );\r
+ if( h_get_count__index( list ) == 1 )\r
+ {\r
+ if( h_get__index( pos )->rho < 0 )\r
+ {\r
+ h_clear_list__index( list );\r
+ }\r
+ }\r
+ else\r
+ {\r
+ while( h_is_pos__index( pos ))\r
+ {\r
+ CvLinePolar line;\r
+ pindex = h_get__index( pos );\r
+ if( pindex->rho < 0 )\r
+ {\r
+ /* This should be the last element... */\r
+ h_get_next__index( &pos );\r
+ assert( !h_is_pos__index( pos ));\r
+ break;\r
+ }\r
+ line.rho = pindex->rho;\r
+ line.angle = pindex->theta;\r
+ cvSeqPush( lines, &line );\r
+\r
+ if( lines->total >= linesMax )\r
+ EXIT;\r
+ h_get_next__index( &pos );\r
+ }\r
+ }\r
+\r
+ __END__;\r
+\r
+ h_destroy_list__index( list );\r
+ cvFree( &sinTable );\r
+ cvFree( &x );\r
+ cvFree( &y );\r
+ cvFree( &caccum );\r
+ cvFree( &buffer );\r
+}\r
+\r
+\r
+/****************************************************************************************\\r
+* Probabilistic Hough Transform *\r
+\****************************************************************************************/\r
+\r
+#if defined WIN64 && defined EM64T && _MSC_VER == 1400 && !defined CV_ICC\r
+#pragma optimize("",off)\r
+#endif\r
+\r
+static void\r
+icvHoughLinesProbabalistic( CvMat* image,\r
+ float rho, float theta, int threshold,\r
+ int lineLength, int lineGap,\r
+ CvSeq *lines, int linesMax )\r
+{\r
+ CvMat* accum = 0;\r
+ CvMat* mask = 0;\r
+ CvMat* trigtab = 0;\r
+ CvMemStorage* storage = 0;\r
+\r
+ CV_FUNCNAME( "icvHoughLinesProbalistic" );\r
+\r
+ __BEGIN__;\r
+ \r
+ CvSeq* seq;\r
+ CvSeqWriter writer;\r
+ int width, height;\r
+ int numangle, numrho;\r
+ float ang;\r
+ int r, n, count;\r
+ CvPoint pt;\r
+ float irho = 1 / rho;\r
+ CvRNG rng = cvRNG(-1);\r
+ const float* ttab;\r
+ uchar* mdata0;\r
+\r
+ CV_ASSERT( CV_IS_MAT(image) && CV_MAT_TYPE(image->type) == CV_8UC1 );\r
+\r
+ width = image->cols;\r
+ height = image->rows;\r
+\r
+ numangle = cvRound(CV_PI / theta);\r
+ numrho = cvRound(((width + height) * 2 + 1) / rho);\r
+\r
+ CV_CALL( accum = cvCreateMat( numangle, numrho, CV_32SC1 ));\r
+ CV_CALL( mask = cvCreateMat( height, width, CV_8UC1 ));\r
+ CV_CALL( trigtab = cvCreateMat( 1, numangle, CV_32FC2 ));\r
+ cvZero( accum );\r
+ \r
+ CV_CALL( storage = cvCreateMemStorage(0) );\r
+ \r
+ for( ang = 0, n = 0; n < numangle; ang += theta, n++ )\r
+ {\r
+ trigtab->data.fl[n*2] = (float)(cos(ang) * irho);\r
+ trigtab->data.fl[n*2+1] = (float)(sin(ang) * irho);\r
+ }\r
+ ttab = trigtab->data.fl;\r
+ mdata0 = mask->data.ptr;\r
+\r
+ CV_CALL( cvStartWriteSeq( CV_32SC2, sizeof(CvSeq), sizeof(CvPoint), storage, &writer )); \r
+\r
+ // stage 1. collect non-zero image points\r
+ for( pt.y = 0, count = 0; pt.y < height; pt.y++ )\r
+ {\r
+ const uchar* data = image->data.ptr + pt.y*image->step;\r
+ uchar* mdata = mdata0 + pt.y*width;\r
+ for( pt.x = 0; pt.x < width; pt.x++ )\r
+ {\r
+ if( data[pt.x] )\r
+ {\r
+ mdata[pt.x] = (uchar)1;\r
+ CV_WRITE_SEQ_ELEM( pt, writer );\r
+ }\r
+ else\r
+ mdata[pt.x] = 0;\r
+ }\r
+ }\r
+\r
+ seq = cvEndWriteSeq( &writer );\r
+ count = seq->total;\r
+\r
+ // stage 2. process all the points in random order\r
+ for( ; count > 0; count-- )\r
+ {\r
+ // choose random point out of the remaining ones\r
+ int idx = cvRandInt(&rng) % count;\r
+ int max_val = threshold-1, max_n = 0;\r
+ CvPoint* pt = (CvPoint*)cvGetSeqElem( seq, idx );\r
+ CvPoint line_end[2] = {{0,0}, {0,0}};\r
+ float a, b;\r
+ int* adata = accum->data.i;\r
+ int i, j, k, x0, y0, dx0, dy0, xflag;\r
+ int good_line;\r
+ const int shift = 16;\r
+\r
+ i = pt->y;\r
+ j = pt->x;\r
+\r
+ // "remove" it by overriding it with the last element\r
+ *pt = *(CvPoint*)cvGetSeqElem( seq, count-1 );\r
+\r
+ // check if it has been excluded already (i.e. belongs to some other line)\r
+ if( !mdata0[i*width + j] )\r
+ continue;\r
+\r
+ // update accumulator, find the most probable line\r
+ for( n = 0; n < numangle; n++, adata += numrho )\r
+ {\r
+ r = cvRound( j * ttab[n*2] + i * ttab[n*2+1] );\r
+ r += (numrho - 1) / 2;\r
+ int val = ++adata[r];\r
+ if( max_val < val )\r
+ {\r
+ max_val = val;\r
+ max_n = n;\r
+ }\r
+ }\r
+\r
+ // if it is too "weak" candidate, continue with another point\r
+ if( max_val < threshold )\r
+ continue;\r
+\r
+ // from the current point walk in each direction\r
+ // along the found line and extract the line segment\r
+ a = -ttab[max_n*2+1];\r
+ b = ttab[max_n*2];\r
+ x0 = j;\r
+ y0 = i;\r
+ if( fabs(a) > fabs(b) )\r
+ {\r
+ xflag = 1;\r
+ dx0 = a > 0 ? 1 : -1;\r
+ dy0 = cvRound( b*(1 << shift)/fabs(a) );\r
+ y0 = (y0 << shift) + (1 << (shift-1));\r
+ }\r
+ else\r
+ {\r
+ xflag = 0;\r
+ dy0 = b > 0 ? 1 : -1;\r
+ dx0 = cvRound( a*(1 << shift)/fabs(b) );\r
+ x0 = (x0 << shift) + (1 << (shift-1));\r
+ }\r
+\r
+ for( k = 0; k < 2; k++ )\r
+ {\r
+ int gap = 0, x = x0, y = y0, dx = dx0, dy = dy0;\r
+ \r
+ if( k > 0 )\r
+ dx = -dx, dy = -dy;\r
+\r
+ // walk along the line using fixed-point arithmetics,\r
+ // stop at the image border or in case of too big gap\r
+ for( ;; x += dx, y += dy )\r
+ {\r
+ uchar* mdata;\r
+ int i1, j1;\r
+\r
+ if( xflag )\r
+ {\r
+ j1 = x;\r
+ i1 = y >> shift;\r
+ }\r
+ else\r
+ {\r
+ j1 = x >> shift;\r
+ i1 = y;\r
+ }\r
+\r
+ if( j1 < 0 || j1 >= width || i1 < 0 || i1 >= height )\r
+ break;\r
+\r
+ mdata = mdata0 + i1*width + j1;\r
+\r
+ // for each non-zero point:\r
+ // update line end,\r
+ // clear the mask element\r
+ // reset the gap\r
+ if( *mdata )\r
+ {\r
+ gap = 0;\r
+ line_end[k].y = i1;\r
+ line_end[k].x = j1;\r
+ }\r
+ else if( ++gap > lineGap )\r
+ break;\r
+ }\r
+ }\r
+\r
+ good_line = abs(line_end[1].x - line_end[0].x) >= lineLength ||\r
+ abs(line_end[1].y - line_end[0].y) >= lineLength;\r
+\r
+ for( k = 0; k < 2; k++ )\r
+ {\r
+ int x = x0, y = y0, dx = dx0, dy = dy0;\r
+ \r
+ if( k > 0 )\r
+ dx = -dx, dy = -dy;\r
+\r
+ // walk along the line using fixed-point arithmetics,\r
+ // stop at the image border or in case of too big gap\r
+ for( ;; x += dx, y += dy )\r
+ {\r
+ uchar* mdata;\r
+ int i1, j1;\r
+\r
+ if( xflag )\r
+ {\r
+ j1 = x;\r
+ i1 = y >> shift;\r
+ }\r
+ else\r
+ {\r
+ j1 = x >> shift;\r
+ i1 = y;\r
+ }\r
+\r
+ mdata = mdata0 + i1*width + j1;\r
+\r
+ // for each non-zero point:\r
+ // update line end,\r
+ // clear the mask element\r
+ // reset the gap\r
+ if( *mdata )\r
+ {\r
+ if( good_line )\r
+ {\r
+ adata = accum->data.i;\r
+ for( n = 0; n < numangle; n++, adata += numrho )\r
+ {\r
+ r = cvRound( j1 * ttab[n*2] + i1 * ttab[n*2+1] );\r
+ r += (numrho - 1) / 2;\r
+ adata[r]--;\r
+ }\r
+ }\r
+ *mdata = 0;\r
+ }\r
+\r
+ if( i1 == line_end[k].y && j1 == line_end[k].x )\r
+ break;\r
+ }\r
+ }\r
+\r
+ if( good_line )\r
+ {\r
+ CvRect lr = { line_end[0].x, line_end[0].y, line_end[1].x, line_end[1].y };\r
+ cvSeqPush( lines, &lr );\r
+ if( lines->total >= linesMax )\r
+ EXIT;\r
+ }\r
+ }\r
+\r
+ __END__;\r
+\r
+ cvReleaseMat( &accum );\r
+ cvReleaseMat( &mask );\r
+ cvReleaseMat( &trigtab );\r
+ cvReleaseMemStorage( &storage );\r
+}\r
+\r
+\r
+#if defined WIN64 && defined EM64T && _MSC_VER == 1400 && !defined CV_ICC\r
+#pragma optimize("",on)\r
+#endif\r
+\r
+\r
+/* Wrapper function for standard hough transform */\r
+CV_IMPL CvSeq*\r
+cvHoughLines2( CvArr* src_image, void* lineStorage, int method,\r
+ double rho, double theta, int threshold,\r
+ double param1, double param2 )\r
+{\r
+ CvSeq* result = 0;\r
+\r
+ CV_FUNCNAME( "cvHoughLines" );\r
+\r
+ __BEGIN__;\r
+ \r
+ CvMat stub, *img = (CvMat*)src_image;\r
+ CvMat* mat = 0;\r
+ CvSeq* lines = 0;\r
+ CvSeq lines_header;\r
+ CvSeqBlock lines_block;\r
+ int lineType, elemSize;\r
+ int linesMax = INT_MAX;\r
+ int iparam1, iparam2;\r
+\r
+ CV_CALL( img = cvGetMat( img, &stub ));\r
+\r
+ if( !CV_IS_MASK_ARR(img))\r
+ CV_ERROR( CV_StsBadArg, "The source image must be 8-bit, single-channel" );\r
+\r
+ if( !lineStorage )\r
+ CV_ERROR( CV_StsNullPtr, "NULL destination" );\r
+\r
+ if( rho <= 0 || theta <= 0 || threshold <= 0 )\r
+ CV_ERROR( CV_StsOutOfRange, "rho, theta and threshold must be positive" );\r
+\r
+ if( method != CV_HOUGH_PROBABILISTIC )\r
+ {\r
+ lineType = CV_32FC2;\r
+ elemSize = sizeof(float)*2;\r
+ }\r
+ else\r
+ {\r
+ lineType = CV_32SC4;\r
+ elemSize = sizeof(int)*4;\r
+ }\r
+\r
+ if( CV_IS_STORAGE( lineStorage ))\r
+ {\r
+ CV_CALL( lines = cvCreateSeq( lineType, sizeof(CvSeq), elemSize, (CvMemStorage*)lineStorage ));\r
+ }\r
+ else if( CV_IS_MAT( lineStorage ))\r
+ {\r
+ mat = (CvMat*)lineStorage;\r
+\r
+ if( !CV_IS_MAT_CONT( mat->type ) || (mat->rows != 1 && mat->cols != 1) )\r
+ CV_ERROR( CV_StsBadArg,\r
+ "The destination matrix should be continuous and have a single row or a single column" );\r
+\r
+ if( CV_MAT_TYPE( mat->type ) != lineType )\r
+ CV_ERROR( CV_StsBadArg,\r
+ "The destination matrix data type is inappropriate, see the manual" );\r
+\r
+ CV_CALL( lines = cvMakeSeqHeaderForArray( lineType, sizeof(CvSeq), elemSize, mat->data.ptr,\r
+ mat->rows + mat->cols - 1, &lines_header, &lines_block ));\r
+ linesMax = lines->total;\r
+ CV_CALL( cvClearSeq( lines ));\r
+ }\r
+ else\r
+ {\r
+ CV_ERROR( CV_StsBadArg, "Destination is not CvMemStorage* nor CvMat*" );\r
+ }\r
+\r
+ iparam1 = cvRound(param1);\r
+ iparam2 = cvRound(param2);\r
+\r
+ switch( method )\r
+ {\r
+ case CV_HOUGH_STANDARD:\r
+ CV_CALL( icvHoughLinesStandard( img, (float)rho,\r
+ (float)theta, threshold, lines, linesMax ));\r
+ break;\r
+ case CV_HOUGH_MULTI_SCALE:\r
+ CV_CALL( icvHoughLinesSDiv( img, (float)rho, (float)theta,\r
+ threshold, iparam1, iparam2, lines, linesMax ));\r
+ break;\r
+ case CV_HOUGH_PROBABILISTIC:\r
+ CV_CALL( icvHoughLinesProbabalistic( img, (float)rho, (float)theta,\r
+ threshold, iparam1, iparam2, lines, linesMax ));\r
+ break;\r
+ default:\r
+ CV_ERROR( CV_StsBadArg, "Unrecognized method id" );\r
+ }\r
+\r
+ if( mat )\r
+ {\r
+ if( mat->cols > mat->rows )\r
+ mat->cols = lines->total;\r
+ else\r
+ mat->rows = lines->total;\r
+ }\r
+ else\r
+ {\r
+ result = lines;\r
+ }\r
+\r
+ __END__;\r
+ \r
+ return result; \r
+}\r
+\r
+\r
+/****************************************************************************************\\r
+* Circle Detection *\r
+\****************************************************************************************/\r
+\r
+static void\r
+icvHoughCirclesGradient( CvMat* img, float dp, float min_dist,\r
+ int min_radius, int max_radius,\r
+ int canny_threshold, int acc_threshold,\r
+ CvSeq* circles, int circles_max )\r
+{\r
+ const int SHIFT = 10, ONE = 1 << SHIFT, R_THRESH = 30;\r
+ CvMat *dx = 0, *dy = 0;\r
+ CvMat *edges = 0;\r
+ CvMat *accum = 0;\r
+ int* sort_buf = 0;\r
+ CvMat* dist_buf = 0;\r
+ CvMemStorage* storage = 0;\r
+ \r
+ CV_FUNCNAME( "icvHoughCirclesGradient" );\r
+\r
+ __BEGIN__;\r
+\r
+ int x, y, i, j, center_count, nz_count;\r
+ int rows, cols, arows, acols;\r
+ int astep, *adata;\r
+ float* ddata;\r
+ CvSeq *nz, *centers;\r
+ float idp, dr;\r
+ CvSeqReader reader;\r
+\r
+ CV_CALL( edges = cvCreateMat( img->rows, img->cols, CV_8UC1 ));\r
+ CV_CALL( cvCanny( img, edges, MAX(canny_threshold/2,1), canny_threshold, 3 ));\r
+\r
+ CV_CALL( dx = cvCreateMat( img->rows, img->cols, CV_16SC1 ));\r
+ CV_CALL( dy = cvCreateMat( img->rows, img->cols, CV_16SC1 ));\r
+ CV_CALL( cvSobel( img, dx, 1, 0, 3 ));\r
+ CV_CALL( cvSobel( img, dy, 0, 1, 3 ));\r
+\r
+ if( dp < 1.f )\r
+ dp = 1.f;\r
+ idp = 1.f/dp;\r
+ CV_CALL( accum = cvCreateMat( cvCeil(img->rows*idp)+2, cvCeil(img->cols*idp)+2, CV_32SC1 ));\r
+ CV_CALL( cvZero(accum));\r
+\r
+ CV_CALL( storage = cvCreateMemStorage() );\r
+ CV_CALL( nz = cvCreateSeq( CV_32SC2, sizeof(CvSeq), sizeof(CvPoint), storage ));\r
+ CV_CALL( centers = cvCreateSeq( CV_32SC1, sizeof(CvSeq), sizeof(int), storage ));\r
+\r
+ rows = img->rows;\r
+ cols = img->cols;\r
+ arows = accum->rows - 2;\r
+ acols = accum->cols - 2;\r
+ adata = accum->data.i;\r
+ astep = accum->step/sizeof(adata[0]);\r
+\r
+ for( y = 0; y < rows; y++ )\r
+ {\r
+ const uchar* edges_row = edges->data.ptr + y*edges->step;\r
+ const short* dx_row = (const short*)(dx->data.ptr + y*dx->step);\r
+ const short* dy_row = (const short*)(dy->data.ptr + y*dy->step);\r
+\r
+ for( x = 0; x < cols; x++ )\r
+ {\r
+ float vx, vy;\r
+ int sx, sy, x0, y0, x1, y1, r, k;\r
+ CvPoint pt;\r
+\r
+ vx = dx_row[x];\r
+ vy = dy_row[x];\r
+\r
+ if( !edges_row[x] || (vx == 0 && vy == 0) )\r
+ continue;\r
+\r
+ if( fabs(vx) < fabs(vy) )\r
+ {\r
+ sx = cvRound(vx*ONE/fabs(vy));\r
+ sy = vy < 0 ? -ONE : ONE;\r
+ }\r
+ else\r
+ {\r
+ assert( vx != 0 );\r
+ sy = cvRound(vy*ONE/fabs(vx));\r
+ sx = vx < 0 ? -ONE : ONE;\r
+ }\r
+\r
+ x0 = cvRound((x*idp)*ONE) + ONE + (ONE/2);\r
+ y0 = cvRound((y*idp)*ONE) + ONE + (ONE/2);\r
+\r
+ for( k = 0; k < 2; k++ )\r
+ {\r
+ x0 += min_radius * sx;\r
+ y0 += min_radius * sy;\r
+\r
+ for( x1 = x0, y1 = y0, r = min_radius; r <= max_radius; x1 += sx, y1 += sy, r++ )\r
+ {\r
+ int x2 = x1 >> SHIFT, y2 = y1 >> SHIFT;\r
+ if( (unsigned)x2 >= (unsigned)acols ||\r
+ (unsigned)y2 >= (unsigned)arows )\r
+ break;\r
+ adata[y2*astep + x2]++;\r
+ }\r
+\r
+ x0 -= min_radius * sx;\r
+ y0 -= min_radius * sy;\r
+ sx = -sx; sy = -sy;\r
+ }\r
+\r
+ pt.x = x; pt.y = y;\r
+ cvSeqPush( nz, &pt );\r
+ }\r
+ }\r
+\r
+ nz_count = nz->total;\r
+ if( !nz_count )\r
+ EXIT;\r
+\r
+ for( y = 1; y < arows - 1; y++ )\r
+ {\r
+ for( x = 1; x < acols - 1; x++ )\r
+ {\r
+ int base = y*(acols+2) + x;\r
+ if( adata[base] > acc_threshold &&\r
+ adata[base] > adata[base-1] && adata[base] > adata[base+1] &&\r
+ adata[base] > adata[base-acols-2] && adata[base] > adata[base+acols+2] )\r
+ cvSeqPush(centers, &base);\r
+ }\r
+ }\r
+\r
+ center_count = centers->total;\r
+ if( !center_count )\r
+ EXIT;\r
+\r
+ CV_CALL( sort_buf = (int*)cvAlloc( MAX(center_count,nz_count)*sizeof(sort_buf[0]) ));\r
+ cvCvtSeqToArray( centers, sort_buf );\r
+\r
+ icvHoughSortDescent32s( sort_buf, center_count, adata );\r
+ cvClearSeq( centers );\r
+ cvSeqPushMulti( centers, sort_buf, center_count );\r
+\r
+ CV_CALL( dist_buf = cvCreateMat( 1, nz_count, CV_32FC1 ));\r
+ ddata = dist_buf->data.fl;\r
+\r
+ dr = dp;\r
+ min_dist = MAX( min_dist, dp );\r
+ min_dist *= min_dist;\r
+\r
+ for( i = 0; i < centers->total; i++ )\r
+ {\r
+ int ofs = *(int*)cvGetSeqElem( centers, i );\r
+ y = ofs/(acols+2) - 1;\r
+ x = ofs - (y+1)*(acols+2) - 1;\r
+ float cx = (float)(x*dp), cy = (float)(y*dp);\r
+ int start_idx = nz_count - 1;\r
+ float start_dist, dist_sum;\r
+ float r_best = 0, c[3];\r
+ int max_count = R_THRESH;\r
+\r
+ for( j = 0; j < circles->total; j++ )\r
+ {\r
+ float* c = (float*)cvGetSeqElem( circles, j );\r
+ if( (c[0] - cx)*(c[0] - cx) + (c[1] - cy)*(c[1] - cy) < min_dist )\r
+ break;\r
+ }\r
+\r
+ if( j < circles->total )\r
+ continue;\r
+\r
+ cvStartReadSeq( nz, &reader );\r
+ for( j = 0; j < nz_count; j++ )\r
+ {\r
+ CvPoint pt;\r
+ float _dx, _dy;\r
+ CV_READ_SEQ_ELEM( pt, reader );\r
+ _dx = cx - pt.x; _dy = cy - pt.y;\r
+ ddata[j] = _dx*_dx + _dy*_dy;\r
+ sort_buf[j] = j;\r
+ }\r
+\r
+ cvPow( dist_buf, dist_buf, 0.5 );\r
+ icvHoughSortDescent32s( sort_buf, nz_count, (int*)ddata );\r
+ \r
+ dist_sum = start_dist = ddata[sort_buf[nz_count-1]];\r
+ for( j = nz_count - 2; j >= 0; j-- )\r
+ {\r
+ float d = ddata[sort_buf[j]];\r
+\r
+ if( d > max_radius )\r
+ break;\r
+\r
+ if( d - start_dist > dr )\r
+ {\r
+ float r_cur = ddata[sort_buf[(j + start_idx)/2]];\r
+ if( (start_idx - j)*r_best >= max_count*r_cur ||\r
+ (r_best < FLT_EPSILON && start_idx - j >= max_count) )\r
+ {\r
+ r_best = r_cur;\r
+ max_count = start_idx - j;\r
+ }\r
+ start_dist = d;\r
+ start_idx = j;\r
+ dist_sum = 0;\r
+ }\r
+ dist_sum += d;\r
+ }\r
+\r
+ if( max_count > R_THRESH )\r
+ {\r
+ c[0] = cx;\r
+ c[1] = cy;\r
+ c[2] = (float)r_best;\r
+ cvSeqPush( circles, c );\r
+ if( circles->total > circles_max )\r
+ EXIT;\r
+ }\r
+ }\r
+\r
+ __END__;\r
+\r
+ cvReleaseMat( &dist_buf );\r
+ cvFree( &sort_buf );\r
+ cvReleaseMemStorage( &storage );\r
+ cvReleaseMat( &edges );\r
+ cvReleaseMat( &dx );\r
+ cvReleaseMat( &dy );\r
+ cvReleaseMat( &accum );\r
+}\r
+\r
+CV_IMPL CvSeq*\r
+cvHoughCircles( CvArr* src_image, void* circle_storage,\r
+ int method, double dp, double min_dist,\r
+ double param1, double param2,\r
+ int min_radius, int max_radius )\r
+{\r
+ CvSeq* result = 0;\r
+\r
+ CV_FUNCNAME( "cvHoughCircles" );\r
+\r
+ __BEGIN__;\r
+ \r
+ CvMat stub, *img = (CvMat*)src_image;\r
+ CvMat* mat = 0;\r
+ CvSeq* circles = 0;\r
+ CvSeq circles_header;\r
+ CvSeqBlock circles_block;\r
+ int circles_max = INT_MAX;\r
+ int canny_threshold = cvRound(param1);\r
+ int acc_threshold = cvRound(param2);\r
+\r
+ CV_CALL( img = cvGetMat( img, &stub ));\r
+\r
+ if( !CV_IS_MASK_ARR(img))\r
+ CV_ERROR( CV_StsBadArg, "The source image must be 8-bit, single-channel" );\r
+\r
+ if( !circle_storage )\r
+ CV_ERROR( CV_StsNullPtr, "NULL destination" );\r
+\r
+ if( dp <= 0 || min_dist <= 0 || canny_threshold <= 0 || acc_threshold <= 0 )\r
+ CV_ERROR( CV_StsOutOfRange, "dp, min_dist, canny_threshold and acc_threshold must be all positive numbers" );\r
+\r
+ min_radius = MAX( min_radius, 0 );\r
+ if( max_radius <= 0 )\r
+ max_radius = MAX( img->rows, img->cols );\r
+ else if( max_radius <= min_radius )\r
+ max_radius = min_radius + 2;\r
+\r
+ if( CV_IS_STORAGE( circle_storage ))\r
+ {\r
+ CV_CALL( circles = cvCreateSeq( CV_32FC3, sizeof(CvSeq),\r
+ sizeof(float)*3, (CvMemStorage*)circle_storage ));\r
+ }\r
+ else if( CV_IS_MAT( circle_storage ))\r
+ {\r
+ mat = (CvMat*)circle_storage;\r
+\r
+ if( !CV_IS_MAT_CONT( mat->type ) || (mat->rows != 1 && mat->cols != 1) ||\r
+ CV_MAT_TYPE(mat->type) != CV_32FC3 )\r
+ CV_ERROR( CV_StsBadArg,\r
+ "The destination matrix should be continuous and have a single row or a single column" );\r
+\r
+ CV_CALL( circles = cvMakeSeqHeaderForArray( CV_32FC3, sizeof(CvSeq), sizeof(float)*3,\r
+ mat->data.ptr, mat->rows + mat->cols - 1, &circles_header, &circles_block ));\r
+ circles_max = circles->total;\r
+ CV_CALL( cvClearSeq( circles ));\r
+ }\r
+ else\r
+ {\r
+ CV_ERROR( CV_StsBadArg, "Destination is not CvMemStorage* nor CvMat*" );\r
+ }\r
+\r
+ switch( method )\r
+ {\r
+ case CV_HOUGH_GRADIENT:\r
+ CV_CALL( icvHoughCirclesGradient( img, (float)dp, (float)min_dist,\r
+ min_radius, max_radius, canny_threshold,\r
+ acc_threshold, circles, circles_max ));\r
+ break;\r
+ default:\r
+ CV_ERROR( CV_StsBadArg, "Unrecognized method id" );\r
+ }\r
+\r
+ if( mat )\r
+ {\r
+ if( mat->cols > mat->rows )\r
+ mat->cols = circles->total;\r
+ else\r
+ mat->rows = circles->total;\r
+ }\r
+ else\r
+ result = circles;\r
+\r
+ __END__;\r
+ \r
+ return result; \r
+}\r
+\r
+\r
+namespace cv\r
+{\r
+\r
+const int STORAGE_SIZE = 1 << 12;\r
+\r
+void HoughLines( Mat& image, Vector<Vec2f>& lines,\r
+ double rho, double theta, int threshold,\r
+ double srn, double stn )\r
+{\r
+ CvMemStorage* storage = cvCreateMemStorage(STORAGE_SIZE);\r
+ CvMat _image = image;\r
+ CvSeq* seq = cvHoughLines2( &_image, storage, srn == 0 && stn == 0 ?\r
+ CV_HOUGH_STANDARD : CV_HOUGH_MULTI_SCALE,\r
+ rho, theta, threshold, srn, stn );\r
+ Seq<Vec2f>(seq).copyTo(lines);\r
+}\r
+\r
+void HoughLinesP( Mat& image, Vector<Vec4i>& lines,\r
+ double rho, double theta, int threshold,\r
+ double minLineLength, double maxGap )\r
+{\r
+ CvMemStorage* storage = cvCreateMemStorage(STORAGE_SIZE);\r
+ CvMat _image = image;\r
+ CvSeq* seq = cvHoughLines2( &_image, storage, CV_HOUGH_PROBABILISTIC,\r
+ rho, theta, threshold, minLineLength, maxGap );\r
+ Seq<Vec4i>(seq).copyTo(lines);\r
+}\r
+\r
+void HoughCircles( Mat& image, Vector<Vec3f>& circles,\r
+ int method, double dp, double min_dist,\r
+ double param1, double param2,\r
+ int minRadius, int maxRadius )\r
+{\r
+ CvMemStorage* storage = cvCreateMemStorage(STORAGE_SIZE);\r
+ CvMat _image = image;\r
+ CvSeq* seq = cvHoughCircles( &_image, storage, method,\r
+ dp, min_dist, param1, param2, minRadius, maxRadius );\r
+ Seq<Vec3f>(seq).copyTo(circles);\r
+}\r
+\r
+}\r
+\r
+/* End of file. */\r
cvReleaseMat(&t);
cvReleaseMat(&f);
}
+
+void cv::inpaint( const Mat& src, const Mat& mask, Mat& dst,
+ double inpaintRange, int flags )
+{
+ dst.create( src.size(), src.type() );
+ CvMat _src = src, _mask = mask, _dst = dst;
+ cvInpaint( &_src, &_mask, &_dst, inpaintRange, flags );
+}
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// Intel License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of Intel Corporation may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-#include "_cv.h"
-
-
-CV_IMPL CvKalman*
-cvCreateKalman( int DP, int MP, int CP )
-{
- CvKalman *kalman = 0;
-
- CV_FUNCNAME( "cvCreateKalman" );
-
- __BEGIN__;
-
- if( DP <= 0 || MP <= 0 )
- CV_ERROR( CV_StsOutOfRange,
- "state and measurement vectors must have positive number of dimensions" );
-
- if( CP < 0 )
- CP = DP;
-
- /* allocating memory for the structure */
- CV_CALL( kalman = (CvKalman *)cvAlloc( sizeof( CvKalman )));
- memset( kalman, 0, sizeof(*kalman));
-
- kalman->DP = DP;
- kalman->MP = MP;
- kalman->CP = CP;
-
- CV_CALL( kalman->state_pre = cvCreateMat( DP, 1, CV_32FC1 ));
- cvZero( kalman->state_pre );
-
- CV_CALL( kalman->state_post = cvCreateMat( DP, 1, CV_32FC1 ));
- cvZero( kalman->state_post );
-
- CV_CALL( kalman->transition_matrix = cvCreateMat( DP, DP, CV_32FC1 ));
- cvSetIdentity( kalman->transition_matrix );
-
- CV_CALL( kalman->process_noise_cov = cvCreateMat( DP, DP, CV_32FC1 ));
- cvSetIdentity( kalman->process_noise_cov );
-
- CV_CALL( kalman->measurement_matrix = cvCreateMat( MP, DP, CV_32FC1 ));
- cvZero( kalman->measurement_matrix );
-
- CV_CALL( kalman->measurement_noise_cov = cvCreateMat( MP, MP, CV_32FC1 ));
- cvSetIdentity( kalman->measurement_noise_cov );
-
- CV_CALL( kalman->error_cov_pre = cvCreateMat( DP, DP, CV_32FC1 ));
-
- CV_CALL( kalman->error_cov_post = cvCreateMat( DP, DP, CV_32FC1 ));
- cvZero( kalman->error_cov_post );
-
- CV_CALL( kalman->gain = cvCreateMat( DP, MP, CV_32FC1 ));
-
- if( CP > 0 )
- {
- CV_CALL( kalman->control_matrix = cvCreateMat( DP, CP, CV_32FC1 ));
- cvZero( kalman->control_matrix );
- }
-
- CV_CALL( kalman->temp1 = cvCreateMat( DP, DP, CV_32FC1 ));
- CV_CALL( kalman->temp2 = cvCreateMat( MP, DP, CV_32FC1 ));
- CV_CALL( kalman->temp3 = cvCreateMat( MP, MP, CV_32FC1 ));
- CV_CALL( kalman->temp4 = cvCreateMat( MP, DP, CV_32FC1 ));
- CV_CALL( kalman->temp5 = cvCreateMat( MP, 1, CV_32FC1 ));
-
-#if 1
- kalman->PosterState = kalman->state_pre->data.fl;
- kalman->PriorState = kalman->state_post->data.fl;
- kalman->DynamMatr = kalman->transition_matrix->data.fl;
- kalman->MeasurementMatr = kalman->measurement_matrix->data.fl;
- kalman->MNCovariance = kalman->measurement_noise_cov->data.fl;
- kalman->PNCovariance = kalman->process_noise_cov->data.fl;
- kalman->KalmGainMatr = kalman->gain->data.fl;
- kalman->PriorErrorCovariance = kalman->error_cov_pre->data.fl;
- kalman->PosterErrorCovariance = kalman->error_cov_post->data.fl;
-#endif
-
- __END__;
-
- if( cvGetErrStatus() < 0 )
- cvReleaseKalman( &kalman );
-
- return kalman;
-}
-
-
-CV_IMPL void
-cvReleaseKalman( CvKalman** _kalman )
-{
- CvKalman *kalman;
-
- CV_FUNCNAME( "cvReleaseKalman" );
- __BEGIN__;
-
- if( !_kalman )
- CV_ERROR( CV_StsNullPtr, "" );
-
- kalman = *_kalman;
-
- /* freeing the memory */
- cvReleaseMat( &kalman->state_pre );
- cvReleaseMat( &kalman->state_post );
- cvReleaseMat( &kalman->transition_matrix );
- cvReleaseMat( &kalman->control_matrix );
- cvReleaseMat( &kalman->measurement_matrix );
- cvReleaseMat( &kalman->process_noise_cov );
- cvReleaseMat( &kalman->measurement_noise_cov );
- cvReleaseMat( &kalman->error_cov_pre );
- cvReleaseMat( &kalman->gain );
- cvReleaseMat( &kalman->error_cov_post );
- cvReleaseMat( &kalman->temp1 );
- cvReleaseMat( &kalman->temp2 );
- cvReleaseMat( &kalman->temp3 );
- cvReleaseMat( &kalman->temp4 );
- cvReleaseMat( &kalman->temp5 );
-
- memset( kalman, 0, sizeof(*kalman));
-
- /* deallocating the structure */
- cvFree( _kalman );
-
- __END__;
-}
-
-
-CV_IMPL const CvMat*
-cvKalmanPredict( CvKalman* kalman, const CvMat* control )
-{
- CvMat* result = 0;
-
- CV_FUNCNAME( "cvKalmanPredict" );
-
- __BEGIN__;
-
- if( !kalman )
- CV_ERROR( CV_StsNullPtr, "" );
-
- /* update the state */
- /* x'(k) = A*x(k) */
- CV_CALL( cvMatMulAdd( kalman->transition_matrix, kalman->state_post, 0, kalman->state_pre ));
-
- if( control && kalman->CP > 0 )
- /* x'(k) = x'(k) + B*u(k) */
- CV_CALL( cvMatMulAdd( kalman->control_matrix, control, kalman->state_pre, kalman->state_pre ));
-
- /* update error covariance matrices */
- /* temp1 = A*P(k) */
- CV_CALL( cvMatMulAdd( kalman->transition_matrix, kalman->error_cov_post, 0, kalman->temp1 ));
-
- /* P'(k) = temp1*At + Q */
- CV_CALL( cvGEMM( kalman->temp1, kalman->transition_matrix, 1, kalman->process_noise_cov, 1,
- kalman->error_cov_pre, CV_GEMM_B_T ));
-
- result = kalman->state_pre;
-
- __END__;
-
- return result;
-}
-
-
-CV_IMPL const CvMat*
-cvKalmanCorrect( CvKalman* kalman, const CvMat* measurement )
-{
- CvMat* result = 0;
-
- CV_FUNCNAME( "cvKalmanCorrect" );
-
- __BEGIN__;
-
- if( !kalman || !measurement )
- CV_ERROR( CV_StsNullPtr, "" );
-
- /* temp2 = H*P'(k) */
- CV_CALL( cvMatMulAdd( kalman->measurement_matrix,
- kalman->error_cov_pre, 0, kalman->temp2 ));
- /* temp3 = temp2*Ht + R */
- CV_CALL( cvGEMM( kalman->temp2, kalman->measurement_matrix, 1,
- kalman->measurement_noise_cov, 1, kalman->temp3, CV_GEMM_B_T ));
-
- /* temp4 = inv(temp3)*temp2 = Kt(k) */
- CV_CALL( cvSolve( kalman->temp3, kalman->temp2, kalman->temp4, CV_SVD ));
-
- /* K(k) */
- CV_CALL( cvTranspose( kalman->temp4, kalman->gain ));
-
- /* temp5 = z(k) - H*x'(k) */
- CV_CALL( cvGEMM( kalman->measurement_matrix, kalman->state_pre, -1, measurement, 1, kalman->temp5 ));
-
- /* x(k) = x'(k) + K(k)*temp5 */
- CV_CALL( cvMatMulAdd( kalman->gain, kalman->temp5, kalman->state_pre, kalman->state_post ));
-
- /* P(k) = P'(k) - K(k)*temp2 */
- CV_CALL( cvGEMM( kalman->gain, kalman->temp2, -1, kalman->error_cov_pre, 1,
- kalman->error_cov_post, 0 ));
-
- result = kalman->state_post;
-
- __END__;
-
- return result;
-}
+/*M///////////////////////////////////////////////////////////////////////////////////////\r
+//\r
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
+//\r
+// By downloading, copying, installing or using the software you agree to this license.\r
+// If you do not agree to this license, do not download, install,\r
+// copy or use the software.\r
+//\r
+//\r
+// Intel License Agreement\r
+// For Open Source Computer Vision Library\r
+//\r
+// Copyright (C) 2000, Intel Corporation, all rights reserved.\r
+// Third party copyrights are property of their respective owners.\r
+//\r
+// Redistribution and use in source and binary forms, with or without modification,\r
+// are permitted provided that the following conditions are met:\r
+//\r
+// * Redistribution's of source code must retain the above copyright notice,\r
+// this list of conditions and the following disclaimer.\r
+//\r
+// * Redistribution's in binary form must reproduce the above copyright notice,\r
+// this list of conditions and the following disclaimer in the documentation\r
+// and/or other materials provided with the distribution.\r
+//\r
+// * The name of Intel Corporation may not be used to endorse or promote products\r
+// derived from this software without specific prior written permission.\r
+//\r
+// This software is provided by the copyright holders and contributors "as is" and\r
+// any express or implied warranties, including, but not limited to, the implied\r
+// warranties of merchantability and fitness for a particular purpose are disclaimed.\r
+// In no event shall the Intel Corporation or contributors be liable for any direct,\r
+// indirect, incidental, special, exemplary, or consequential damages\r
+// (including, but not limited to, procurement of substitute goods or services;\r
+// loss of use, data, or profits; or business interruption) however caused\r
+// and on any theory of liability, whether in contract, strict liability,\r
+// or tort (including negligence or otherwise) arising in any way out of\r
+// the use of this software, even if advised of the possibility of such damage.\r
+//\r
+//M*/\r
+#include "_cv.h"\r
+\r
+\r
+CV_IMPL CvKalman*\r
+cvCreateKalman( int DP, int MP, int CP )\r
+{\r
+ CvKalman *kalman = 0;\r
+\r
+ CV_FUNCNAME( "cvCreateKalman" );\r
+ \r
+ __BEGIN__;\r
+\r
+ if( DP <= 0 || MP <= 0 )\r
+ CV_ERROR( CV_StsOutOfRange,\r
+ "state and measurement vectors must have positive number of dimensions" );\r
+\r
+ if( CP < 0 )\r
+ CP = DP;\r
+ \r
+ /* allocating memory for the structure */\r
+ CV_CALL( kalman = (CvKalman *)cvAlloc( sizeof( CvKalman )));\r
+ memset( kalman, 0, sizeof(*kalman));\r
+ \r
+ kalman->DP = DP;\r
+ kalman->MP = MP;\r
+ kalman->CP = CP;\r
+\r
+ CV_CALL( kalman->state_pre = cvCreateMat( DP, 1, CV_32FC1 ));\r
+ cvZero( kalman->state_pre );\r
+ \r
+ CV_CALL( kalman->state_post = cvCreateMat( DP, 1, CV_32FC1 ));\r
+ cvZero( kalman->state_post );\r
+ \r
+ CV_CALL( kalman->transition_matrix = cvCreateMat( DP, DP, CV_32FC1 ));\r
+ cvSetIdentity( kalman->transition_matrix );\r
+\r
+ CV_CALL( kalman->process_noise_cov = cvCreateMat( DP, DP, CV_32FC1 ));\r
+ cvSetIdentity( kalman->process_noise_cov );\r
+ \r
+ CV_CALL( kalman->measurement_matrix = cvCreateMat( MP, DP, CV_32FC1 ));\r
+ cvZero( kalman->measurement_matrix );\r
+\r
+ CV_CALL( kalman->measurement_noise_cov = cvCreateMat( MP, MP, CV_32FC1 ));\r
+ cvSetIdentity( kalman->measurement_noise_cov );\r
+\r
+ CV_CALL( kalman->error_cov_pre = cvCreateMat( DP, DP, CV_32FC1 ));\r
+ \r
+ CV_CALL( kalman->error_cov_post = cvCreateMat( DP, DP, CV_32FC1 ));\r
+ cvZero( kalman->error_cov_post );\r
+\r
+ CV_CALL( kalman->gain = cvCreateMat( DP, MP, CV_32FC1 ));\r
+\r
+ if( CP > 0 )\r
+ {\r
+ CV_CALL( kalman->control_matrix = cvCreateMat( DP, CP, CV_32FC1 ));\r
+ cvZero( kalman->control_matrix );\r
+ }\r
+\r
+ CV_CALL( kalman->temp1 = cvCreateMat( DP, DP, CV_32FC1 ));\r
+ CV_CALL( kalman->temp2 = cvCreateMat( MP, DP, CV_32FC1 ));\r
+ CV_CALL( kalman->temp3 = cvCreateMat( MP, MP, CV_32FC1 ));\r
+ CV_CALL( kalman->temp4 = cvCreateMat( MP, DP, CV_32FC1 ));\r
+ CV_CALL( kalman->temp5 = cvCreateMat( MP, 1, CV_32FC1 ));\r
+\r
+#if 1\r
+ kalman->PosterState = kalman->state_pre->data.fl;\r
+ kalman->PriorState = kalman->state_post->data.fl;\r
+ kalman->DynamMatr = kalman->transition_matrix->data.fl;\r
+ kalman->MeasurementMatr = kalman->measurement_matrix->data.fl;\r
+ kalman->MNCovariance = kalman->measurement_noise_cov->data.fl;\r
+ kalman->PNCovariance = kalman->process_noise_cov->data.fl;\r
+ kalman->KalmGainMatr = kalman->gain->data.fl;\r
+ kalman->PriorErrorCovariance = kalman->error_cov_pre->data.fl;\r
+ kalman->PosterErrorCovariance = kalman->error_cov_post->data.fl;\r
+#endif \r
+\r
+ __END__;\r
+\r
+ if( cvGetErrStatus() < 0 )\r
+ cvReleaseKalman( &kalman );\r
+\r
+ return kalman;\r
+}\r
+\r
+\r
+CV_IMPL void\r
+cvReleaseKalman( CvKalman** _kalman )\r
+{\r
+ CvKalman *kalman;\r
+\r
+ CV_FUNCNAME( "cvReleaseKalman" );\r
+ __BEGIN__;\r
+ \r
+ if( !_kalman )\r
+ CV_ERROR( CV_StsNullPtr, "" );\r
+ \r
+ kalman = *_kalman;\r
+ \r
+ /* freeing the memory */\r
+ cvReleaseMat( &kalman->state_pre );\r
+ cvReleaseMat( &kalman->state_post );\r
+ cvReleaseMat( &kalman->transition_matrix );\r
+ cvReleaseMat( &kalman->control_matrix );\r
+ cvReleaseMat( &kalman->measurement_matrix );\r
+ cvReleaseMat( &kalman->process_noise_cov );\r
+ cvReleaseMat( &kalman->measurement_noise_cov );\r
+ cvReleaseMat( &kalman->error_cov_pre );\r
+ cvReleaseMat( &kalman->gain );\r
+ cvReleaseMat( &kalman->error_cov_post );\r
+ cvReleaseMat( &kalman->temp1 );\r
+ cvReleaseMat( &kalman->temp2 );\r
+ cvReleaseMat( &kalman->temp3 );\r
+ cvReleaseMat( &kalman->temp4 );\r
+ cvReleaseMat( &kalman->temp5 );\r
+\r
+ memset( kalman, 0, sizeof(*kalman));\r
+\r
+ /* deallocating the structure */\r
+ cvFree( _kalman );\r
+\r
+ __END__;\r
+}\r
+\r
+\r
+CV_IMPL const CvMat*\r
+cvKalmanPredict( CvKalman* kalman, const CvMat* control )\r
+{\r
+ CvMat* result = 0;\r
+ \r
+ CV_FUNCNAME( "cvKalmanPredict" );\r
+\r
+ __BEGIN__;\r
+ \r
+ if( !kalman )\r
+ CV_ERROR( CV_StsNullPtr, "" );\r
+\r
+ /* update the state */\r
+ /* x'(k) = A*x(k) */\r
+ CV_CALL( cvMatMulAdd( kalman->transition_matrix, kalman->state_post, 0, kalman->state_pre ));\r
+\r
+ if( control && kalman->CP > 0 )\r
+ /* x'(k) = x'(k) + B*u(k) */\r
+ CV_CALL( cvMatMulAdd( kalman->control_matrix, control, kalman->state_pre, kalman->state_pre ));\r
+ \r
+ /* update error covariance matrices */\r
+ /* temp1 = A*P(k) */\r
+ CV_CALL( cvMatMulAdd( kalman->transition_matrix, kalman->error_cov_post, 0, kalman->temp1 ));\r
+ \r
+ /* P'(k) = temp1*At + Q */\r
+ CV_CALL( cvGEMM( kalman->temp1, kalman->transition_matrix, 1, kalman->process_noise_cov, 1,\r
+ kalman->error_cov_pre, CV_GEMM_B_T ));\r
+\r
+ result = kalman->state_pre;\r
+\r
+ __END__;\r
+\r
+ return result;\r
+}\r
+\r
+\r
+CV_IMPL const CvMat*\r
+cvKalmanCorrect( CvKalman* kalman, const CvMat* measurement )\r
+{\r
+ CvMat* result = 0;\r
+\r
+ CV_FUNCNAME( "cvKalmanCorrect" );\r
+\r
+ __BEGIN__;\r
+ \r
+ if( !kalman || !measurement )\r
+ CV_ERROR( CV_StsNullPtr, "" );\r
+\r
+ /* temp2 = H*P'(k) */\r
+ CV_CALL( cvMatMulAdd( kalman->measurement_matrix,\r
+ kalman->error_cov_pre, 0, kalman->temp2 ));\r
+ /* temp3 = temp2*Ht + R */\r
+ CV_CALL( cvGEMM( kalman->temp2, kalman->measurement_matrix, 1,\r
+ kalman->measurement_noise_cov, 1, kalman->temp3, CV_GEMM_B_T ));\r
+\r
+ /* temp4 = inv(temp3)*temp2 = Kt(k) */\r
+ CV_CALL( cvSolve( kalman->temp3, kalman->temp2, kalman->temp4, CV_SVD ));\r
+\r
+ /* K(k) */\r
+ CV_CALL( cvTranspose( kalman->temp4, kalman->gain ));\r
+ \r
+ /* temp5 = z(k) - H*x'(k) */\r
+ CV_CALL( cvGEMM( kalman->measurement_matrix, kalman->state_pre, -1, measurement, 1, kalman->temp5 ));\r
+\r
+ /* x(k) = x'(k) + K(k)*temp5 */\r
+ CV_CALL( cvMatMulAdd( kalman->gain, kalman->temp5, kalman->state_pre, kalman->state_post ));\r
+\r
+ /* P(k) = P'(k) - K(k)*temp2 */\r
+ CV_CALL( cvGEMM( kalman->gain, kalman->temp2, -1, kalman->error_cov_pre, 1,\r
+ kalman->error_cov_post, 0 ));\r
+\r
+ result = kalman->state_post;\r
+\r
+ __END__;\r
+\r
+ return result;\r
+}\r
+\r
+namespace cv\r
+{\r
+\r
+KalmanFilter::KalmanFilter() {}\r
+KalmanFilter::KalmanFilter(int dynamParams, int measureParams, int controlParams)\r
+{\r
+ init(dynamParams, measureParams, controlParams);\r
+}\r
+\r
+void KalmanFilter::init(int DP, int MP, int CP)\r
+{\r
+ CV_Assert( DP > 0 && MP > 0 );\r
+ CP = std::max(CP, 0);\r
+\r
+ statePre = Mat::zeros(DP, 1, CV_32F);\r
+ statePost = Mat::zeros(DP, 1, CV_32F);\r
+ transitionMatrix = Mat::eye(DP, DP, CV_32F);\r
+\r
+ processNoiseCov = Mat::eye(DP, DP, CV_32F);\r
+ measurementMatrix = Mat::zeros(MP, DP, CV_32F);\r
+ measurementNoiseCov = Mat::eye(MP, MP, CV_32F);\r
+\r
+ errorCovPre = Mat::zeros(DP, DP, CV_32F);\r
+ errorCovPost = Mat::zeros(DP, DP, CV_32F);\r
+ gain = Mat::zeros(DP, MP, CV_32F);\r
+\r
+ if( CP > 0 )\r
+ controlMatrix = Mat::zeros(DP, CP, CV_32F);\r
+ else\r
+ controlMatrix.release();\r
+\r
+ temp1.create(DP, DP, CV_32F);\r
+ temp2.create(MP, DP, CV_32F);\r
+ temp3.create(MP, MP, CV_32F);\r
+ temp4.create(MP, DP, CV_32F);\r
+ temp5.create(MP, 1, CV_32F);\r
+}\r
+\r
+const Mat& KalmanFilter::predict(const Mat& control)\r
+{\r
+ // update the state: x'(k) = A*x(k)\r
+ statePre = transitionMatrix*statePost;\r
+\r
+ if( control.data )\r
+ // x'(k) = x'(k) + B*u(k)\r
+ statePre += controlMatrix*control;\r
+\r
+ // update error covariance matrices: temp1 = A*P(k)\r
+ temp1 = transitionMatrix*errorCovPost;\r
+\r
+ // P'(k) = temp1*At + Q\r
+ errorCovPre = temp1*transitionMatrix.t() + processNoiseCov;\r
+\r
+ return statePre;\r
+}\r
+\r
+const Mat& KalmanFilter::correct(const Mat& measurement)\r
+{\r
+ // temp2 = H*P'(k)\r
+ temp2 = measurementMatrix * errorCovPre;\r
+\r
+ // temp3 = temp2*Ht + R\r
+ temp3 = temp2*measurementMatrix.t() + measurementNoiseCov;\r
+\r
+ // temp4 = inv(temp3)*temp2 = Kt(k)\r
+ solve(temp3, temp2, temp4, DECOMP_SVD);\r
+\r
+ // K(k)\r
+ gain = temp4.t();\r
+ \r
+ // temp5 = z(k) - H*x'(k)\r
+ temp5 = measurement - measurementMatrix*statePre;\r
+\r
+ // x(k) = x'(k) + K(k)*temp5\r
+ statePost = statePre + gain*temp5;\r
+\r
+ // P(k) = P'(k) - K(k)*temp2\r
+ errorCovPost = errorCovPre - gain*temp2;\r
+\r
+ return statePost;\r
+}\r
+ \r
+};\r
return result;
}
+namespace cv
+{
+Mat estimateRigidTransform( const Vector<Point2f>& A,\r
+ const Vector<Point2f>& B,\r
+ bool fullAffine )\r
+{\r
+ Mat M(2, 3, CV_64F);\r
+ CvMat _A = A, _B = B, _M = M;\r
+ cvEstimateRigidTransform(&_A, &_B, &_M, fullAffine);\r
+ return M;\r
+}\r
+}\r
/* End of file. */
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// Intel License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of Intel Corporation may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-#include "_cv.h"
-
-/* The function calculates center of gravity and central second order moments */
-static void
-icvCompleteMomentState( CvMoments* moments )
-{
- double cx = 0, cy = 0;
- double mu20, mu11, mu02;
-
- assert( moments != 0 );
- moments->inv_sqrt_m00 = 0;
-
- if( fabs(moments->m00) > DBL_EPSILON )
- {
- double inv_m00 = 1. / moments->m00;
- cx = moments->m10 * inv_m00;
- cy = moments->m01 * inv_m00;
- moments->inv_sqrt_m00 = std::sqrt( fabs(inv_m00) );
- }
-
- /* mu20 = m20 - m10*cx */
- mu20 = moments->m20 - moments->m10 * cx;
- /* mu11 = m11 - m10*cy */
- mu11 = moments->m11 - moments->m10 * cy;
- /* mu02 = m02 - m01*cy */
- mu02 = moments->m02 - moments->m01 * cy;
-
- moments->mu20 = mu20;
- moments->mu11 = mu11;
- moments->mu02 = mu02;
-
- /* mu30 = m30 - cx*(3*mu20 + cx*m10) */
- moments->mu30 = moments->m30 - cx * (3 * mu20 + cx * moments->m10);
- mu11 += mu11;
- /* mu21 = m21 - cx*(2*mu11 + cx*m01) - cy*mu20 */
- moments->mu21 = moments->m21 - cx * (mu11 + cx * moments->m01) - cy * mu20;
- /* mu12 = m12 - cy*(2*mu11 + cy*m10) - cx*mu02 */
- moments->mu12 = moments->m12 - cy * (mu11 + cy * moments->m10) - cx * mu02;
- /* mu03 = m03 - cy*(3*mu02 + cy*m01) */
- moments->mu03 = moments->m03 - cy * (3 * mu02 + cy * moments->m01);
-}
-
-
-static void
-icvContourMoments( CvSeq* contour, CvMoments* moments )
-{
- int is_float = CV_SEQ_ELTYPE(contour) == CV_32FC2;
-
- if( contour->total )
- {
- CvSeqReader reader;
- double a00, a10, a01, a20, a11, a02, a30, a21, a12, a03;
- double xi, yi, xi2, yi2, xi_1, yi_1, xi_12, yi_12, dxy, xii_1, yii_1;
- int lpt = contour->total;
-
- a00 = a10 = a01 = a20 = a11 = a02 = a30 = a21 = a12 = a03 = 0;
-
- cvStartReadSeq( contour, &reader, 0 );
-
- if( !is_float )
- {
- xi_1 = ((CvPoint*)(reader.ptr))->x;
- yi_1 = ((CvPoint*)(reader.ptr))->y;
- }
- else
- {
- xi_1 = ((CvPoint2D32f*)(reader.ptr))->x;
- yi_1 = ((CvPoint2D32f*)(reader.ptr))->y;
- }
- CV_NEXT_SEQ_ELEM( contour->elem_size, reader );
-
- xi_12 = xi_1 * xi_1;
- yi_12 = yi_1 * yi_1;
-
- while( lpt-- > 0 )
- {
- if( !is_float )
- {
- xi = ((CvPoint*)(reader.ptr))->x;
- yi = ((CvPoint*)(reader.ptr))->y;
- }
- else
- {
- xi = ((CvPoint2D32f*)(reader.ptr))->x;
- yi = ((CvPoint2D32f*)(reader.ptr))->y;
- }
- CV_NEXT_SEQ_ELEM( contour->elem_size, reader );
-
- xi2 = xi * xi;
- yi2 = yi * yi;
- dxy = xi_1 * yi - xi * yi_1;
- xii_1 = xi_1 + xi;
- yii_1 = yi_1 + yi;
-
- a00 += dxy;
- a10 += dxy * xii_1;
- a01 += dxy * yii_1;
- a20 += dxy * (xi_1 * xii_1 + xi2);
- a11 += dxy * (xi_1 * (yii_1 + yi_1) + xi * (yii_1 + yi));
- a02 += dxy * (yi_1 * yii_1 + yi2);
- a30 += dxy * xii_1 * (xi_12 + xi2);
- a03 += dxy * yii_1 * (yi_12 + yi2);
- a21 +=
- dxy * (xi_12 * (3 * yi_1 + yi) + 2 * xi * xi_1 * yii_1 +
- xi2 * (yi_1 + 3 * yi));
- a12 +=
- dxy * (yi_12 * (3 * xi_1 + xi) + 2 * yi * yi_1 * xii_1 +
- yi2 * (xi_1 + 3 * xi));
-
- xi_1 = xi;
- yi_1 = yi;
- xi_12 = xi2;
- yi_12 = yi2;
- }
-
- double db1_2, db1_6, db1_12, db1_24, db1_20, db1_60;
-
- if( fabs(a00) > FLT_EPSILON )
- {
- if( a00 > 0 )
- {
- db1_2 = 0.5;
- db1_6 = 0.16666666666666666666666666666667;
- db1_12 = 0.083333333333333333333333333333333;
- db1_24 = 0.041666666666666666666666666666667;
- db1_20 = 0.05;
- db1_60 = 0.016666666666666666666666666666667;
- }
- else
- {
- db1_2 = -0.5;
- db1_6 = -0.16666666666666666666666666666667;
- db1_12 = -0.083333333333333333333333333333333;
- db1_24 = -0.041666666666666666666666666666667;
- db1_20 = -0.05;
- db1_60 = -0.016666666666666666666666666666667;
- }
-
- /* spatial moments */
- moments->m00 = a00 * db1_2;
- moments->m10 = a10 * db1_6;
- moments->m01 = a01 * db1_6;
- moments->m20 = a20 * db1_12;
- moments->m11 = a11 * db1_24;
- moments->m02 = a02 * db1_12;
- moments->m30 = a30 * db1_20;
- moments->m21 = a21 * db1_60;
- moments->m12 = a12 * db1_60;
- moments->m03 = a03 * db1_20;
-
- icvCompleteMomentState( moments );
- }
- }
-}
-
-
-/* summarizes moment values for all tiles */
-static void
-icvAccumulateMoments( double *tiles, CvSize size, CvSize tile_size, CvMoments * moments )
-{
- int x, y;
-
- for( y = 0; y < size.height; y += tile_size.height )
- {
- for( x = 0; x < size.width; x += tile_size.width, tiles += 10 )
- {
- double dx = x, dy = y;
- double dxm = dx * tiles[0], dym = dy * tiles[0];
-
- /* + m00 ( = m00' ) */
- moments->m00 += tiles[0];
-
- /* + m10 ( = m10' + dx*m00' ) */
- moments->m10 += tiles[1] + dxm;
-
- /* + m01 ( = m01' + dy*m00' ) */
- moments->m01 += tiles[2] + dym;
-
- /* + m20 ( = m20' + 2*dx*m10' + dx*dx*m00' ) */
- moments->m20 += tiles[3] + dx * (tiles[1] * 2 + dxm);
-
- /* + m11 ( = m11' + dx*m01' + dy*m10' + dx*dy*m00' ) */
- moments->m11 += tiles[4] + dx * (tiles[2] + dym) + dy * tiles[1];
-
- /* + m02 ( = m02' + 2*dy*m01' + dy*dy*m00' ) */
- moments->m02 += tiles[5] + dy * (tiles[2] * 2 + dym);
-
- /* + m30 ( = m30' + 3*dx*m20' + 3*dx*dx*m10' + dx*dx*dx*m00' ) */
- moments->m30 += tiles[6] + dx * (3. * tiles[3] + dx * (3. * tiles[1] + dxm));
-
- /* + m21 (= m21' + dx*(2*m11' + 2*dy*m10' + dx*m01' + dx*dy*m00') + dy*m20') */
- moments->m21 += tiles[7] + dx * (2 * (tiles[4] + dy * tiles[1]) +
- dx * (tiles[2] + dym)) + dy * tiles[3];
-
- /* + m12 (= m12' + dy*(2*m11' + 2*dx*m01' + dy*m10' + dx*dy*m00') + dx*m02') */
- moments->m12 += tiles[8] + dy * (2 * (tiles[4] + dx * tiles[2]) +
- dy * (tiles[1] + dxm)) + dx * tiles[5];
-
- /* + m03 ( = m03' + 3*dy*m02' + 3*dy*dy*m01' + dy*dy*dy*m00' ) */
- moments->m03 += tiles[9] + dy * (3. * tiles[5] + dy * (3. * tiles[2] + dym));
- }
- }
-
- icvCompleteMomentState( moments );
-}
-
-
-/****************************************************************************************\
-* Spatial Moments *
-\****************************************************************************************/
-
-#define ICV_DEF_CALC_MOMENTS_IN_TILE( __op__, name, flavor, srctype, temptype, momtype ) \
-static CvStatus CV_STDCALL icv##name##_##flavor##_CnCR \
-( const srctype* img, int step, CvSize size, int cn, int coi, double *moments ) \
-{ \
- int x, y, sx_init = (size.width & -4) * (size.width & -4), sy = 0; \
- momtype mom[10]; \
- \
- assert( img && size.width && (size.width | size.height) >= 0 ); \
- memset( mom, 0, 10 * sizeof( mom[0] )); \
- \
- if( coi ) \
- img += coi - 1; \
- step /= sizeof(img[0]); \
- \
- for( y = 0; y < size.height; sy += 2 * y + 1, y++, img += step ) \
- { \
- temptype x0 = 0; \
- temptype x1 = 0; \
- temptype x2 = 0; \
- momtype x3 = 0; \
- int sx = sx_init; \
- const srctype* ptr = img; \
- \
- for( x = 0; x < size.width - 3; x += 4, ptr += cn*4 ) \
- { \
- temptype p0 = __op__(ptr[0]), p1 = __op__(ptr[cn]), \
- p2 = __op__(ptr[2*cn]), p3 = __op__(ptr[3*cn]); \
- temptype t = p1; \
- temptype a, b, c; \
- \
- p0 += p1 + p2 + p3; /* p0 + p1 + p2 + p3 */ \
- p1 += 2 * p2 + 3 * p3; /* p1 + p2*2 + p3*3 */ \
- p2 = p1 + 2 * p2 + 6 * p3; /* p1 + p2*4 + p3*9 */ \
- p3 = 2 * p2 - t + 9 * p3; /* p1 + p2*8 + p3*27 */ \
- \
- a = x * p0 + p1; /* x*p0 + (x+1)*p1 + (x+2)*p2 + (x+3)*p3 */ \
- b = x * p1 + p2; /* (x+1)*p1 + 2*(x+2)*p2 + 3*(x+3)*p3 */ \
- c = x * p2 + p3; /* (x+1)*p1 + 4*(x+2)*p2 + 9*(x+3)*p3 */ \
- \
- x0 += p0; \
- x1 += a; \
- a = a * x + b; /*(x^2)*p0+((x+1)^2)*p1+((x+2)^2)*p2+((x+3)^2)*p3 */ \
- x2 += a; \
- x3 += ((momtype)(a + b)) * x + c; /*x3 += (x^3)*p0+((x+1)^3)*p1 + */ \
- /* ((x+2)^3)*p2+((x+3)^3)*p3 */ \
- } \
- \
- /* process the rest */ \
- for( ; x < size.width; sx += 2 * x + 1, x++, ptr += cn ) \
- { \
- temptype p = __op__(ptr[0]); \
- temptype xp = x * p; \
- \
- x0 += p; \
- x1 += xp; \
- x2 += sx * p; \
- x3 += ((momtype)sx) * xp; \
- } \
- \
- { \
- temptype py = y * x0; \
- \
- mom[9] += ((momtype)py) * sy; /* m03 */ \
- mom[8] += ((momtype)x1) * sy; /* m12 */ \
- mom[7] += ((momtype)x2) * y; /* m21 */ \
- mom[6] += x3; /* m30 */ \
- mom[5] += x0 * sy; /* m02 */ \
- mom[4] += x1 * y; /* m11 */ \
- mom[3] += x2; /* m20 */ \
- mom[2] += py; /* m01 */ \
- mom[1] += x1; /* m10 */ \
- mom[0] += x0; /* m00 */ \
- } \
- } \
- \
- for( x = 0; x < 10; x++ ) \
- moments[x] = (double)mom[x]; \
- \
- return CV_OK; \
-}
-
-
-ICV_DEF_CALC_MOMENTS_IN_TILE( CV_NOP, MomentsInTile, 8u, uchar, int, int )
-ICV_DEF_CALC_MOMENTS_IN_TILE( CV_NOP, MomentsInTile, 16u, ushort, int, int64 )
-ICV_DEF_CALC_MOMENTS_IN_TILE( CV_NOP, MomentsInTile, 16s, short, int, int64 )
-ICV_DEF_CALC_MOMENTS_IN_TILE( CV_NOP, MomentsInTile, 32f, float, double, double )
-ICV_DEF_CALC_MOMENTS_IN_TILE( CV_NOP, MomentsInTile, 64f, double, double, double )
-
-ICV_DEF_CALC_MOMENTS_IN_TILE( CV_NONZERO, MomentsInTileBin, 8u, uchar, int, int )
-ICV_DEF_CALC_MOMENTS_IN_TILE( CV_NONZERO, MomentsInTileBin, 16s, ushort, int, int )
-ICV_DEF_CALC_MOMENTS_IN_TILE( CV_NONZERO_FLT, MomentsInTileBin, 32f, int, int, int )
-ICV_DEF_CALC_MOMENTS_IN_TILE( CV_NONZERO_FLT, MomentsInTileBin, 64f, int64, double, double )
-
-#define icvMomentsInTile_32s_CnCR 0
-#define icvMomentsInTileBin_16u_CnCR icvMomentsInTileBin_16s_CnCR
-#define icvMomentsInTileBin_32s_CnCR 0
-
-typedef CvStatus (CV_STDCALL* CvMomentFunc)
-( const void* img, int step, CvSize size, int cn, int coi, double *moments );
-
-CV_IMPL void
-cvMoments( const void* array, CvMoments* moments, int binary )
-{
- static CvFuncTable mom_tab;
- static CvFuncTable mombin_tab;
- static int inittab = 0;
- double* tiles = 0;
-
- CV_FUNCNAME("cvMoments");
-
- __BEGIN__;
-
- int type = 0, depth, cn, pix_size;
- int coi = 0;
- int x, y, k, tile_num = 1;
- CvSize size, tile_size = { 32, 32 };
- CvMat stub, *mat = (CvMat*)array;
- CvMomentFunc func = 0;
- CvContour contour_header;
- CvSeq* contour = 0;
- CvSeqBlock block;
-
- if( CV_IS_SEQ( array ))
- {
- contour = (CvSeq*)array;
- if( !CV_IS_SEQ_POLYGON( contour ))
- CV_ERROR( CV_StsBadArg, "The passed sequence is not a valid contour" );
- }
-
- if( !inittab )
- {
- CV_INIT_FUNC_TAB( mom_tab, icvMomentsInTile, _CnCR );
- CV_INIT_FUNC_TAB( mombin_tab, icvMomentsInTileBin, _CnCR );
- inittab = 1;
- }
-
- if( !moments )
- CV_ERROR( CV_StsNullPtr, "" );
-
- memset( moments, 0, sizeof(*moments));
-
- if( !contour )
- {
- CV_CALL( mat = cvGetMat( mat, &stub, &coi ));
- type = CV_MAT_TYPE( mat->type );
-
- if( type == CV_32SC2 || type == CV_32FC2 )
- {
- CV_CALL( contour = cvPointSeqFromMat(
- CV_SEQ_KIND_CURVE | CV_SEQ_FLAG_CLOSED,
- mat, &contour_header, &block ));
- }
- }
-
- if( contour )
- {
- icvContourMoments( contour, moments );
- EXIT;
- }
-
- type = CV_MAT_TYPE( mat->type );
- depth = CV_MAT_DEPTH( type );
- cn = CV_MAT_CN( type );
- pix_size = CV_ELEM_SIZE(type);
- size = cvGetMatSize( mat );
-
- if( cn > 1 && coi == 0 )
- CV_ERROR( CV_StsBadArg, "Invalid image type" );
-
- if( size.width <= 0 || size.height <= 0 )
- {
- EXIT;
- }
-
- func = (CvMomentFunc)(!binary ? mom_tab.fn_2d[depth] : mombin_tab.fn_2d[depth]);
-
- if( !func )
- CV_ERROR( CV_StsBadArg, cvUnsupportedFormat );
-
- if( depth >= CV_32S && !binary )
- tile_size = size;
- else
- tile_num = ((size.width + tile_size.width - 1)/tile_size.width)*
- ((size.height + tile_size.height - 1)/tile_size.height);
-
- CV_CALL( tiles = (double*)cvAlloc( tile_num*10*sizeof(double)));
-
- for( y = 0, k = 0; y < size.height; y += tile_size.height )
- {
- CvSize cur_tile_size = tile_size;
- if( y + cur_tile_size.height > size.height )
- cur_tile_size.height = size.height - y;
-
- for( x = 0; x < size.width; x += tile_size.width, k++ )
- {
- if( x + cur_tile_size.width > size.width )
- cur_tile_size.width = size.width - x;
-
- assert( k < tile_num );
-
- IPPI_CALL( func( mat->data.ptr + y*mat->step + x*pix_size,
- mat->step, cur_tile_size, cn, coi, tiles + k*10 ));
- }
- }
-
- icvAccumulateMoments( tiles, size, tile_size, moments );
-
- __END__;
-
- cvFree( &tiles );
-}
-
-/*F///////////////////////////////////////////////////////////////////////////////////////
-// Name: cvGetHuMoments
-// Purpose: Returns Hu moments
-// Context:
-// Parameters:
-// mState - moment structure filled by one of the icvMoments[Binary]*** function
-// HuState - pointer to output structure containing seven Hu moments
-// Returns:
-// CV_NO_ERR if success or error code
-// Notes:
-//F*/
-CV_IMPL void
-cvGetHuMoments( CvMoments * mState, CvHuMoments * HuState )
-{
- CV_FUNCNAME( "cvGetHuMoments" );
-
- __BEGIN__;
-
- if( !mState || !HuState )
- CV_ERROR( CV_StsNullPtr, "" );
-
- {
- double m00s = mState->inv_sqrt_m00, m00 = m00s * m00s, s2 = m00 * m00, s3 = s2 * m00s;
-
- double nu20 = mState->mu20 * s2,
- nu11 = mState->mu11 * s2,
- nu02 = mState->mu02 * s2,
- nu30 = mState->mu30 * s3,
- nu21 = mState->mu21 * s3, nu12 = mState->mu12 * s3, nu03 = mState->mu03 * s3;
-
- double t0 = nu30 + nu12;
- double t1 = nu21 + nu03;
-
- double q0 = t0 * t0, q1 = t1 * t1;
-
- double n4 = 4 * nu11;
- double s = nu20 + nu02;
- double d = nu20 - nu02;
-
- HuState->hu1 = s;
- HuState->hu2 = d * d + n4 * nu11;
- HuState->hu4 = q0 + q1;
- HuState->hu6 = d * (q0 - q1) + n4 * t0 * t1;
-
- t0 *= q0 - 3 * q1;
- t1 *= 3 * q0 - q1;
-
- q0 = nu30 - 3 * nu12;
- q1 = 3 * nu21 - nu03;
-
- HuState->hu3 = q0 * q0 + q1 * q1;
- HuState->hu5 = q0 * t0 + q1 * t1;
- HuState->hu7 = q1 * t0 - q0 * t1;
- }
-
- __END__;
-}
-
-
-/*F///////////////////////////////////////////////////////////////////////////////////////
-// Name: cvGetSpatialMoment
-// Purpose: Returns spatial moment(x_order, y_order) which is determined as:
-// m(x_o,y_o) = sum (x ^ x_o)*(y ^ y_o)*I(x,y)
-// 0 <= x_o, y_o; x_o + y_o <= 3
-// Context:
-// Parameters:
-// mom - moment structure filled by one of the icvMoments[Binary]*** function
-// x_order - x order of the moment
-// y_order - y order of the moment
-// Returns:
-// moment value or large negative number (-DBL_MAX) if error
-// Notes:
-//F*/
-CV_IMPL double
-cvGetSpatialMoment( CvMoments * moments, int x_order, int y_order )
-{
- int order = x_order + y_order;
- double moment = -DBL_MAX;
-
- CV_FUNCNAME( "cvGetSpatialMoment" );
-
- __BEGIN__;
-
- if( !moments )
- CV_ERROR( CV_StsNullPtr, "" );
- if( (x_order | y_order) < 0 || order > 3 )
- CV_ERROR( CV_StsOutOfRange, "" );
-
- moment = (&(moments->m00))[order + (order >> 1) + (order > 2) * 2 + y_order];
-
- __END__;
-
- return moment;
-}
-
-
-/*F///////////////////////////////////////////////////////////////////////////////////////
-// Name: cvGetCentralMoment
-// Purpose: Returns central moment(x_order, y_order) which is determined as:
-// mu(x_o,y_o) = sum ((x - xc)^ x_o)*((y - yc) ^ y_o)*I(x,y)
-// 0 <= x_o, y_o; x_o + y_o <= 3,
-// (xc, yc) = (m10/m00,m01/m00) - center of gravity
-// Context:
-// Parameters:
-// mom - moment structure filled by one of the icvMoments[Binary]*** function
-// x_order - x order of the moment
-// y_order - y order of the moment
-// Returns:
-// moment value or large negative number (-DBL_MAX) if error
-// Notes:
-//F*/
-CV_IMPL double
-cvGetCentralMoment( CvMoments * moments, int x_order, int y_order )
-{
- int order = x_order + y_order;
- double mu = 0;
-
- CV_FUNCNAME( "cvGetCentralMoment" );
-
- __BEGIN__;
-
- if( !moments )
- CV_ERROR( CV_StsNullPtr, "" );
- if( (x_order | y_order) < 0 || order > 3 )
- CV_ERROR( CV_StsOutOfRange, "" );
-
- if( order >= 2 )
- {
- mu = (&(moments->m00))[4 + order * 3 + y_order];
- }
- else if( order == 0 )
- mu = moments->m00;
-
- __END__;
-
- return mu;
-}
-
-
-/*F///////////////////////////////////////////////////////////////////////////////////////
-// Name: cvGetNormalizedCentralMoment
-// Purpose: Returns normalized central moment(x_order,y_order) which is determined as:
-// nu(x_o,y_o) = mu(x_o, y_o)/(m00 ^ (((x_o + y_o)/2) + 1))
-// 0 <= x_o, y_o; x_o + y_o <= 3,
-// (xc, yc) = (m10/m00,m01/m00) - center of gravity
-// Context:
-// Parameters:
-// mom - moment structure filled by one of the icvMoments[Binary]*** function
-// x_order - x order of the moment
-// y_order - y order of the moment
-// Returns:
-// moment value or large negative number (-DBL_MAX) if error
-// Notes:
-//F*/
-CV_IMPL double
-cvGetNormalizedCentralMoment( CvMoments * moments, int x_order, int y_order )
-{
- int order = x_order + y_order;
- double mu = 0;
- double m00s, m00;
-
- CV_FUNCNAME( "cvGetCentralNormalizedMoment" );
-
- __BEGIN__;
-
- mu = cvGetCentralMoment( moments, x_order, y_order );
- CV_CHECK();
-
- m00s = moments->inv_sqrt_m00;
- m00 = m00s * m00s;
-
- while( --order >= 0 )
- m00 *= m00s;
- mu *= m00;
-
- __END__;
-
- return mu;
-}
-
-
-/* End of file. */
+/*M///////////////////////////////////////////////////////////////////////////////////////\r
+//\r
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
+//\r
+// By downloading, copying, installing or using the software you agree to this license.\r
+// If you do not agree to this license, do not download, install,\r
+// copy or use the software.\r
+//\r
+//\r
+// Intel License Agreement\r
+// For Open Source Computer Vision Library\r
+//\r
+// Copyright (C) 2000, Intel Corporation, all rights reserved.\r
+// Third party copyrights are property of their respective owners.\r
+//\r
+// Redistribution and use in source and binary forms, with or without modification,\r
+// are permitted provided that the following conditions are met:\r
+//\r
+// * Redistribution's of source code must retain the above copyright notice,\r
+// this list of conditions and the following disclaimer.\r
+//\r
+// * Redistribution's in binary form must reproduce the above copyright notice,\r
+// this list of conditions and the following disclaimer in the documentation\r
+// and/or other materials provided with the distribution.\r
+//\r
+// * The name of Intel Corporation may not be used to endorse or promote products\r
+// derived from this software without specific prior written permission.\r
+//\r
+// This software is provided by the copyright holders and contributors "as is" and\r
+// any express or implied warranties, including, but not limited to, the implied\r
+// warranties of merchantability and fitness for a particular purpose are disclaimed.\r
+// In no event shall the Intel Corporation or contributors be liable for any direct,\r
+// indirect, incidental, special, exemplary, or consequential damages\r
+// (including, but not limited to, procurement of substitute goods or services;\r
+// loss of use, data, or profits; or business interruption) however caused\r
+// and on any theory of liability, whether in contract, strict liability,\r
+// or tort (including negligence or otherwise) arising in any way out of\r
+// the use of this software, even if advised of the possibility of such damage.\r
+//\r
+//M*/\r
+#include "_cv.h"\r
+\r
+/* The function calculates center of gravity and central second order moments */\r
+static void\r
+icvCompleteMomentState( CvMoments* moments )\r
+{\r
+ double cx = 0, cy = 0;\r
+ double mu20, mu11, mu02;\r
+\r
+ assert( moments != 0 );\r
+ moments->inv_sqrt_m00 = 0;\r
+\r
+ if( fabs(moments->m00) > DBL_EPSILON )\r
+ {\r
+ double inv_m00 = 1. / moments->m00;\r
+ cx = moments->m10 * inv_m00;\r
+ cy = moments->m01 * inv_m00;\r
+ moments->inv_sqrt_m00 = std::sqrt( fabs(inv_m00) );\r
+ }\r
+\r
+ /* mu20 = m20 - m10*cx */\r
+ mu20 = moments->m20 - moments->m10 * cx;\r
+ /* mu11 = m11 - m10*cy */\r
+ mu11 = moments->m11 - moments->m10 * cy;\r
+ /* mu02 = m02 - m01*cy */\r
+ mu02 = moments->m02 - moments->m01 * cy;\r
+\r
+ moments->mu20 = mu20;\r
+ moments->mu11 = mu11;\r
+ moments->mu02 = mu02;\r
+\r
+ /* mu30 = m30 - cx*(3*mu20 + cx*m10) */\r
+ moments->mu30 = moments->m30 - cx * (3 * mu20 + cx * moments->m10);\r
+ mu11 += mu11;\r
+ /* mu21 = m21 - cx*(2*mu11 + cx*m01) - cy*mu20 */\r
+ moments->mu21 = moments->m21 - cx * (mu11 + cx * moments->m01) - cy * mu20;\r
+ /* mu12 = m12 - cy*(2*mu11 + cy*m10) - cx*mu02 */\r
+ moments->mu12 = moments->m12 - cy * (mu11 + cy * moments->m10) - cx * mu02;\r
+ /* mu03 = m03 - cy*(3*mu02 + cy*m01) */\r
+ moments->mu03 = moments->m03 - cy * (3 * mu02 + cy * moments->m01);\r
+}\r
+\r
+\r
+static void\r
+icvContourMoments( CvSeq* contour, CvMoments* moments )\r
+{\r
+ int is_float = CV_SEQ_ELTYPE(contour) == CV_32FC2;\r
+\r
+ if( contour->total )\r
+ {\r
+ CvSeqReader reader;\r
+ double a00, a10, a01, a20, a11, a02, a30, a21, a12, a03;\r
+ double xi, yi, xi2, yi2, xi_1, yi_1, xi_12, yi_12, dxy, xii_1, yii_1;\r
+ int lpt = contour->total;\r
+\r
+ a00 = a10 = a01 = a20 = a11 = a02 = a30 = a21 = a12 = a03 = 0;\r
+\r
+ cvStartReadSeq( contour, &reader, 0 );\r
+\r
+ if( !is_float )\r
+ {\r
+ xi_1 = ((CvPoint*)(reader.ptr))->x;\r
+ yi_1 = ((CvPoint*)(reader.ptr))->y;\r
+ }\r
+ else\r
+ {\r
+ xi_1 = ((CvPoint2D32f*)(reader.ptr))->x;\r
+ yi_1 = ((CvPoint2D32f*)(reader.ptr))->y;\r
+ }\r
+ CV_NEXT_SEQ_ELEM( contour->elem_size, reader );\r
+ \r
+ xi_12 = xi_1 * xi_1;\r
+ yi_12 = yi_1 * yi_1;\r
+\r
+ while( lpt-- > 0 )\r
+ {\r
+ if( !is_float )\r
+ {\r
+ xi = ((CvPoint*)(reader.ptr))->x;\r
+ yi = ((CvPoint*)(reader.ptr))->y;\r
+ }\r
+ else\r
+ {\r
+ xi = ((CvPoint2D32f*)(reader.ptr))->x;\r
+ yi = ((CvPoint2D32f*)(reader.ptr))->y;\r
+ }\r
+ CV_NEXT_SEQ_ELEM( contour->elem_size, reader );\r
+\r
+ xi2 = xi * xi;\r
+ yi2 = yi * yi;\r
+ dxy = xi_1 * yi - xi * yi_1;\r
+ xii_1 = xi_1 + xi;\r
+ yii_1 = yi_1 + yi;\r
+\r
+ a00 += dxy;\r
+ a10 += dxy * xii_1;\r
+ a01 += dxy * yii_1;\r
+ a20 += dxy * (xi_1 * xii_1 + xi2);\r
+ a11 += dxy * (xi_1 * (yii_1 + yi_1) + xi * (yii_1 + yi));\r
+ a02 += dxy * (yi_1 * yii_1 + yi2);\r
+ a30 += dxy * xii_1 * (xi_12 + xi2);\r
+ a03 += dxy * yii_1 * (yi_12 + yi2);\r
+ a21 +=\r
+ dxy * (xi_12 * (3 * yi_1 + yi) + 2 * xi * xi_1 * yii_1 +\r
+ xi2 * (yi_1 + 3 * yi));\r
+ a12 +=\r
+ dxy * (yi_12 * (3 * xi_1 + xi) + 2 * yi * yi_1 * xii_1 +\r
+ yi2 * (xi_1 + 3 * xi));\r
+\r
+ xi_1 = xi;\r
+ yi_1 = yi;\r
+ xi_12 = xi2;\r
+ yi_12 = yi2;\r
+ }\r
+\r
+ double db1_2, db1_6, db1_12, db1_24, db1_20, db1_60;\r
+\r
+ if( fabs(a00) > FLT_EPSILON )\r
+ {\r
+ if( a00 > 0 )\r
+ {\r
+ db1_2 = 0.5;\r
+ db1_6 = 0.16666666666666666666666666666667;\r
+ db1_12 = 0.083333333333333333333333333333333;\r
+ db1_24 = 0.041666666666666666666666666666667;\r
+ db1_20 = 0.05;\r
+ db1_60 = 0.016666666666666666666666666666667;\r
+ }\r
+ else\r
+ {\r
+ db1_2 = -0.5;\r
+ db1_6 = -0.16666666666666666666666666666667;\r
+ db1_12 = -0.083333333333333333333333333333333;\r
+ db1_24 = -0.041666666666666666666666666666667;\r
+ db1_20 = -0.05;\r
+ db1_60 = -0.016666666666666666666666666666667;\r
+ }\r
+\r
+ /* spatial moments */\r
+ moments->m00 = a00 * db1_2;\r
+ moments->m10 = a10 * db1_6;\r
+ moments->m01 = a01 * db1_6;\r
+ moments->m20 = a20 * db1_12;\r
+ moments->m11 = a11 * db1_24;\r
+ moments->m02 = a02 * db1_12;\r
+ moments->m30 = a30 * db1_20;\r
+ moments->m21 = a21 * db1_60;\r
+ moments->m12 = a12 * db1_60;\r
+ moments->m03 = a03 * db1_20;\r
+\r
+ icvCompleteMomentState( moments );\r
+ }\r
+ }\r
+}\r
+\r
+\r
+/* summarizes moment values for all tiles */\r
+static void\r
+icvAccumulateMoments( double *tiles, CvSize size, CvSize tile_size, CvMoments * moments )\r
+{\r
+ int x, y;\r
+\r
+ for( y = 0; y < size.height; y += tile_size.height )\r
+ {\r
+ for( x = 0; x < size.width; x += tile_size.width, tiles += 10 )\r
+ {\r
+ double dx = x, dy = y;\r
+ double dxm = dx * tiles[0], dym = dy * tiles[0];\r
+\r
+ /* + m00 ( = m00' ) */\r
+ moments->m00 += tiles[0];\r
+\r
+ /* + m10 ( = m10' + dx*m00' ) */\r
+ moments->m10 += tiles[1] + dxm;\r
+\r
+ /* + m01 ( = m01' + dy*m00' ) */\r
+ moments->m01 += tiles[2] + dym;\r
+\r
+ /* + m20 ( = m20' + 2*dx*m10' + dx*dx*m00' ) */\r
+ moments->m20 += tiles[3] + dx * (tiles[1] * 2 + dxm);\r
+\r
+ /* + m11 ( = m11' + dx*m01' + dy*m10' + dx*dy*m00' ) */\r
+ moments->m11 += tiles[4] + dx * (tiles[2] + dym) + dy * tiles[1];\r
+\r
+ /* + m02 ( = m02' + 2*dy*m01' + dy*dy*m00' ) */\r
+ moments->m02 += tiles[5] + dy * (tiles[2] * 2 + dym);\r
+\r
+ /* + m30 ( = m30' + 3*dx*m20' + 3*dx*dx*m10' + dx*dx*dx*m00' ) */\r
+ moments->m30 += tiles[6] + dx * (3. * tiles[3] + dx * (3. * tiles[1] + dxm));\r
+\r
+ /* + m21 (= m21' + dx*(2*m11' + 2*dy*m10' + dx*m01' + dx*dy*m00') + dy*m20') */\r
+ moments->m21 += tiles[7] + dx * (2 * (tiles[4] + dy * tiles[1]) +\r
+ dx * (tiles[2] + dym)) + dy * tiles[3];\r
+\r
+ /* + m12 (= m12' + dy*(2*m11' + 2*dx*m01' + dy*m10' + dx*dy*m00') + dx*m02') */\r
+ moments->m12 += tiles[8] + dy * (2 * (tiles[4] + dx * tiles[2]) +\r
+ dy * (tiles[1] + dxm)) + dx * tiles[5];\r
+\r
+ /* + m03 ( = m03' + 3*dy*m02' + 3*dy*dy*m01' + dy*dy*dy*m00' ) */\r
+ moments->m03 += tiles[9] + dy * (3. * tiles[5] + dy * (3. * tiles[2] + dym));\r
+ }\r
+ }\r
+\r
+ icvCompleteMomentState( moments );\r
+}\r
+\r
+\r
+/****************************************************************************************\\r
+* Spatial Moments *\r
+\****************************************************************************************/\r
+\r
+#define ICV_DEF_CALC_MOMENTS_IN_TILE( __op__, name, flavor, srctype, temptype, momtype ) \\r
+static CvStatus CV_STDCALL icv##name##_##flavor##_CnCR \\r
+( const srctype* img, int step, CvSize size, int cn, int coi, double *moments ) \\r
+{ \\r
+ int x, y, sx_init = (size.width & -4) * (size.width & -4), sy = 0; \\r
+ momtype mom[10]; \\r
+ \\r
+ assert( img && size.width && (size.width | size.height) >= 0 ); \\r
+ memset( mom, 0, 10 * sizeof( mom[0] )); \\r
+ \\r
+ if( coi ) \\r
+ img += coi - 1; \\r
+ step /= sizeof(img[0]); \\r
+ \\r
+ for( y = 0; y < size.height; sy += 2 * y + 1, y++, img += step ) \\r
+ { \\r
+ temptype x0 = 0; \\r
+ temptype x1 = 0; \\r
+ temptype x2 = 0; \\r
+ momtype x3 = 0; \\r
+ int sx = sx_init; \\r
+ const srctype* ptr = img; \\r
+ \\r
+ for( x = 0; x < size.width - 3; x += 4, ptr += cn*4 ) \\r
+ { \\r
+ temptype p0 = __op__(ptr[0]), p1 = __op__(ptr[cn]), \\r
+ p2 = __op__(ptr[2*cn]), p3 = __op__(ptr[3*cn]); \\r
+ temptype t = p1; \\r
+ temptype a, b, c; \\r
+ \\r
+ p0 += p1 + p2 + p3; /* p0 + p1 + p2 + p3 */ \\r
+ p1 += 2 * p2 + 3 * p3; /* p1 + p2*2 + p3*3 */ \\r
+ p2 = p1 + 2 * p2 + 6 * p3; /* p1 + p2*4 + p3*9 */ \\r
+ p3 = 2 * p2 - t + 9 * p3; /* p1 + p2*8 + p3*27 */ \\r
+ \\r
+ a = x * p0 + p1; /* x*p0 + (x+1)*p1 + (x+2)*p2 + (x+3)*p3 */ \\r
+ b = x * p1 + p2; /* (x+1)*p1 + 2*(x+2)*p2 + 3*(x+3)*p3 */ \\r
+ c = x * p2 + p3; /* (x+1)*p1 + 4*(x+2)*p2 + 9*(x+3)*p3 */ \\r
+ \\r
+ x0 += p0; \\r
+ x1 += a; \\r
+ a = a * x + b; /*(x^2)*p0+((x+1)^2)*p1+((x+2)^2)*p2+((x+3)^2)*p3 */ \\r
+ x2 += a; \\r
+ x3 += ((momtype)(a + b)) * x + c; /*x3 += (x^3)*p0+((x+1)^3)*p1 + */ \\r
+ /* ((x+2)^3)*p2+((x+3)^3)*p3 */ \\r
+ } \\r
+ \\r
+ /* process the rest */ \\r
+ for( ; x < size.width; sx += 2 * x + 1, x++, ptr += cn ) \\r
+ { \\r
+ temptype p = __op__(ptr[0]); \\r
+ temptype xp = x * p; \\r
+ \\r
+ x0 += p; \\r
+ x1 += xp; \\r
+ x2 += sx * p; \\r
+ x3 += ((momtype)sx) * xp; \\r
+ } \\r
+ \\r
+ { \\r
+ temptype py = y * x0; \\r
+ \\r
+ mom[9] += ((momtype)py) * sy; /* m03 */ \\r
+ mom[8] += ((momtype)x1) * sy; /* m12 */ \\r
+ mom[7] += ((momtype)x2) * y; /* m21 */ \\r
+ mom[6] += x3; /* m30 */ \\r
+ mom[5] += x0 * sy; /* m02 */ \\r
+ mom[4] += x1 * y; /* m11 */ \\r
+ mom[3] += x2; /* m20 */ \\r
+ mom[2] += py; /* m01 */ \\r
+ mom[1] += x1; /* m10 */ \\r
+ mom[0] += x0; /* m00 */ \\r
+ } \\r
+ } \\r
+ \\r
+ for( x = 0; x < 10; x++ ) \\r
+ moments[x] = (double)mom[x]; \\r
+ \\r
+ return CV_OK; \\r
+}\r
+\r
+\r
+ICV_DEF_CALC_MOMENTS_IN_TILE( CV_NOP, MomentsInTile, 8u, uchar, int, int )\r
+ICV_DEF_CALC_MOMENTS_IN_TILE( CV_NOP, MomentsInTile, 16u, ushort, int, int64 )\r
+ICV_DEF_CALC_MOMENTS_IN_TILE( CV_NOP, MomentsInTile, 16s, short, int, int64 )\r
+ICV_DEF_CALC_MOMENTS_IN_TILE( CV_NOP, MomentsInTile, 32f, float, double, double )\r
+ICV_DEF_CALC_MOMENTS_IN_TILE( CV_NOP, MomentsInTile, 64f, double, double, double )\r
+\r
+ICV_DEF_CALC_MOMENTS_IN_TILE( CV_NONZERO, MomentsInTileBin, 8u, uchar, int, int )\r
+ICV_DEF_CALC_MOMENTS_IN_TILE( CV_NONZERO, MomentsInTileBin, 16s, ushort, int, int )\r
+ICV_DEF_CALC_MOMENTS_IN_TILE( CV_NONZERO_FLT, MomentsInTileBin, 32f, int, int, int )\r
+ICV_DEF_CALC_MOMENTS_IN_TILE( CV_NONZERO_FLT, MomentsInTileBin, 64f, int64, double, double )\r
+\r
+#define icvMomentsInTile_32s_CnCR 0\r
+#define icvMomentsInTileBin_16u_CnCR icvMomentsInTileBin_16s_CnCR\r
+#define icvMomentsInTileBin_32s_CnCR 0\r
+\r
+typedef CvStatus (CV_STDCALL* CvMomentFunc)\r
+( const void* img, int step, CvSize size, int cn, int coi, double *moments );\r
+\r
+CV_IMPL void\r
+cvMoments( const void* array, CvMoments* moments, int binary )\r
+{\r
+ static CvFuncTable mom_tab;\r
+ static CvFuncTable mombin_tab;\r
+ static int inittab = 0;\r
+ double* tiles = 0;\r
+\r
+ CV_FUNCNAME("cvMoments");\r
+\r
+ __BEGIN__;\r
+\r
+ int type = 0, depth, cn, pix_size;\r
+ int coi = 0;\r
+ int x, y, k, tile_num = 1;\r
+ CvSize size, tile_size = { 32, 32 };\r
+ CvMat stub, *mat = (CvMat*)array;\r
+ CvMomentFunc func = 0;\r
+ CvContour contour_header;\r
+ CvSeq* contour = 0;\r
+ CvSeqBlock block;\r
+\r
+ if( CV_IS_SEQ( array ))\r
+ {\r
+ contour = (CvSeq*)array;\r
+ if( !CV_IS_SEQ_POLYGON( contour ))\r
+ CV_ERROR( CV_StsBadArg, "The passed sequence is not a valid contour" );\r
+ }\r
+\r
+ if( !inittab )\r
+ {\r
+ CV_INIT_FUNC_TAB( mom_tab, icvMomentsInTile, _CnCR );\r
+ CV_INIT_FUNC_TAB( mombin_tab, icvMomentsInTileBin, _CnCR );\r
+ inittab = 1;\r
+ }\r
+ \r
+ if( !moments )\r
+ CV_ERROR( CV_StsNullPtr, "" );\r
+\r
+ memset( moments, 0, sizeof(*moments));\r
+\r
+ if( !contour )\r
+ {\r
+ CV_CALL( mat = cvGetMat( mat, &stub, &coi ));\r
+ type = CV_MAT_TYPE( mat->type );\r
+\r
+ if( type == CV_32SC2 || type == CV_32FC2 )\r
+ {\r
+ CV_CALL( contour = cvPointSeqFromMat(\r
+ CV_SEQ_KIND_CURVE | CV_SEQ_FLAG_CLOSED,\r
+ mat, &contour_header, &block ));\r
+ }\r
+ }\r
+\r
+ if( contour )\r
+ {\r
+ icvContourMoments( contour, moments );\r
+ EXIT;\r
+ }\r
+\r
+ type = CV_MAT_TYPE( mat->type );\r
+ depth = CV_MAT_DEPTH( type );\r
+ cn = CV_MAT_CN( type );\r
+ pix_size = CV_ELEM_SIZE(type);\r
+ size = cvGetMatSize( mat );\r
+\r
+ if( cn > 1 && coi == 0 )\r
+ CV_ERROR( CV_StsBadArg, "Invalid image type" );\r
+\r
+ if( size.width <= 0 || size.height <= 0 )\r
+ {\r
+ EXIT;\r
+ }\r
+\r
+ func = (CvMomentFunc)(!binary ? mom_tab.fn_2d[depth] : mombin_tab.fn_2d[depth]);\r
+\r
+ if( !func )\r
+ CV_ERROR( CV_StsBadArg, cvUnsupportedFormat );\r
+\r
+ if( depth >= CV_32S && !binary )\r
+ tile_size = size;\r
+ else\r
+ tile_num = ((size.width + tile_size.width - 1)/tile_size.width)*\r
+ ((size.height + tile_size.height - 1)/tile_size.height);\r
+\r
+ CV_CALL( tiles = (double*)cvAlloc( tile_num*10*sizeof(double)));\r
+\r
+ for( y = 0, k = 0; y < size.height; y += tile_size.height )\r
+ {\r
+ CvSize cur_tile_size = tile_size;\r
+ if( y + cur_tile_size.height > size.height )\r
+ cur_tile_size.height = size.height - y;\r
+ \r
+ for( x = 0; x < size.width; x += tile_size.width, k++ )\r
+ {\r
+ if( x + cur_tile_size.width > size.width )\r
+ cur_tile_size.width = size.width - x;\r
+\r
+ assert( k < tile_num );\r
+\r
+ IPPI_CALL( func( mat->data.ptr + y*mat->step + x*pix_size,\r
+ mat->step, cur_tile_size, cn, coi, tiles + k*10 ));\r
+ }\r
+ }\r
+\r
+ icvAccumulateMoments( tiles, size, tile_size, moments );\r
+\r
+ __END__;\r
+\r
+ cvFree( &tiles );\r
+}\r
+\r
+/*F///////////////////////////////////////////////////////////////////////////////////////\r
+// Name: cvGetHuMoments\r
+// Purpose: Returns Hu moments\r
+// Context:\r
+// Parameters:\r
+// mState - moment structure filled by one of the icvMoments[Binary]*** function\r
+// HuState - pointer to output structure containing seven Hu moments\r
+// Returns:\r
+// CV_NO_ERR if success or error code\r
+// Notes:\r
+//F*/\r
+CV_IMPL void\r
+cvGetHuMoments( CvMoments * mState, CvHuMoments * HuState )\r
+{\r
+ CV_FUNCNAME( "cvGetHuMoments" );\r
+\r
+ __BEGIN__;\r
+\r
+ if( !mState || !HuState )\r
+ CV_ERROR( CV_StsNullPtr, "" );\r
+\r
+ {\r
+ double m00s = mState->inv_sqrt_m00, m00 = m00s * m00s, s2 = m00 * m00, s3 = s2 * m00s;\r
+\r
+ double nu20 = mState->mu20 * s2,\r
+ nu11 = mState->mu11 * s2,\r
+ nu02 = mState->mu02 * s2,\r
+ nu30 = mState->mu30 * s3,\r
+ nu21 = mState->mu21 * s3, nu12 = mState->mu12 * s3, nu03 = mState->mu03 * s3;\r
+\r
+ double t0 = nu30 + nu12;\r
+ double t1 = nu21 + nu03;\r
+\r
+ double q0 = t0 * t0, q1 = t1 * t1;\r
+\r
+ double n4 = 4 * nu11;\r
+ double s = nu20 + nu02;\r
+ double d = nu20 - nu02;\r
+\r
+ HuState->hu1 = s;\r
+ HuState->hu2 = d * d + n4 * nu11;\r
+ HuState->hu4 = q0 + q1;\r
+ HuState->hu6 = d * (q0 - q1) + n4 * t0 * t1;\r
+\r
+ t0 *= q0 - 3 * q1;\r
+ t1 *= 3 * q0 - q1;\r
+\r
+ q0 = nu30 - 3 * nu12;\r
+ q1 = 3 * nu21 - nu03;\r
+\r
+ HuState->hu3 = q0 * q0 + q1 * q1;\r
+ HuState->hu5 = q0 * t0 + q1 * t1;\r
+ HuState->hu7 = q1 * t0 - q0 * t1;\r
+ }\r
+\r
+ __END__;\r
+}\r
+\r
+\r
+/*F///////////////////////////////////////////////////////////////////////////////////////\r
+// Name: cvGetSpatialMoment\r
+// Purpose: Returns spatial moment(x_order, y_order) which is determined as:\r
+// m(x_o,y_o) = sum (x ^ x_o)*(y ^ y_o)*I(x,y)\r
+// 0 <= x_o, y_o; x_o + y_o <= 3\r
+// Context:\r
+// Parameters:\r
+// mom - moment structure filled by one of the icvMoments[Binary]*** function\r
+// x_order - x order of the moment\r
+// y_order - y order of the moment\r
+// Returns:\r
+// moment value or large negative number (-DBL_MAX) if error\r
+// Notes:\r
+//F*/\r
+CV_IMPL double\r
+cvGetSpatialMoment( CvMoments * moments, int x_order, int y_order )\r
+{\r
+ int order = x_order + y_order;\r
+ double moment = -DBL_MAX;\r
+\r
+ CV_FUNCNAME( "cvGetSpatialMoment" );\r
+\r
+ __BEGIN__;\r
+\r
+ if( !moments )\r
+ CV_ERROR( CV_StsNullPtr, "" );\r
+ if( (x_order | y_order) < 0 || order > 3 )\r
+ CV_ERROR( CV_StsOutOfRange, "" );\r
+\r
+ moment = (&(moments->m00))[order + (order >> 1) + (order > 2) * 2 + y_order];\r
+\r
+ __END__;\r
+\r
+ return moment;\r
+}\r
+\r
+\r
+/*F///////////////////////////////////////////////////////////////////////////////////////\r
+// Name: cvGetCentralMoment\r
+// Purpose: Returns central moment(x_order, y_order) which is determined as:\r
+// mu(x_o,y_o) = sum ((x - xc)^ x_o)*((y - yc) ^ y_o)*I(x,y)\r
+// 0 <= x_o, y_o; x_o + y_o <= 3,\r
+// (xc, yc) = (m10/m00,m01/m00) - center of gravity \r
+// Context:\r
+// Parameters:\r
+// mom - moment structure filled by one of the icvMoments[Binary]*** function\r
+// x_order - x order of the moment\r
+// y_order - y order of the moment\r
+// Returns:\r
+// moment value or large negative number (-DBL_MAX) if error\r
+// Notes:\r
+//F*/\r
+CV_IMPL double\r
+cvGetCentralMoment( CvMoments * moments, int x_order, int y_order )\r
+{\r
+ int order = x_order + y_order;\r
+ double mu = 0;\r
+\r
+ CV_FUNCNAME( "cvGetCentralMoment" );\r
+\r
+ __BEGIN__;\r
+\r
+ if( !moments )\r
+ CV_ERROR( CV_StsNullPtr, "" );\r
+ if( (x_order | y_order) < 0 || order > 3 )\r
+ CV_ERROR( CV_StsOutOfRange, "" );\r
+\r
+ if( order >= 2 )\r
+ {\r
+ mu = (&(moments->m00))[4 + order * 3 + y_order];\r
+ }\r
+ else if( order == 0 )\r
+ mu = moments->m00;\r
+\r
+ __END__;\r
+\r
+ return mu;\r
+}\r
+\r
+\r
+/*F///////////////////////////////////////////////////////////////////////////////////////\r
+// Name: cvGetNormalizedCentralMoment\r
+// Purpose: Returns normalized central moment(x_order,y_order) which is determined as:\r
+// nu(x_o,y_o) = mu(x_o, y_o)/(m00 ^ (((x_o + y_o)/2) + 1))\r
+// 0 <= x_o, y_o; x_o + y_o <= 3,\r
+// (xc, yc) = (m10/m00,m01/m00) - center of gravity \r
+// Context:\r
+// Parameters:\r
+// mom - moment structure filled by one of the icvMoments[Binary]*** function\r
+// x_order - x order of the moment\r
+// y_order - y order of the moment\r
+// Returns:\r
+// moment value or large negative number (-DBL_MAX) if error\r
+// Notes:\r
+//F*/\r
+CV_IMPL double\r
+cvGetNormalizedCentralMoment( CvMoments * moments, int x_order, int y_order )\r
+{\r
+ int order = x_order + y_order;\r
+ double mu = 0;\r
+ double m00s, m00;\r
+\r
+ CV_FUNCNAME( "cvGetCentralNormalizedMoment" );\r
+\r
+ __BEGIN__;\r
+\r
+ mu = cvGetCentralMoment( moments, x_order, y_order );\r
+ CV_CHECK();\r
+\r
+ m00s = moments->inv_sqrt_m00;\r
+ m00 = m00s * m00s;\r
+\r
+ while( --order >= 0 )\r
+ m00 *= m00s;\r
+ mu *= m00;\r
+\r
+ __END__;\r
+\r
+ return mu;\r
+}\r
+\r
+\r
+namespace cv\r
+{\r
+\r
+Moments::Moments()\r
+{\r
+ m00 = m10 = m01 = m20 = m11 = m02 = m30 = m21 = m12 = m03 =\r
+ mu20 = mu11 = mu02 = mu30 = mu21 = mu12 = mu03 =\r
+ nu20 = nu11 = nu02 = nu30 = nu21 = nu12 = nu03 = 0.;\r
+}\r
+\r
+Moments::Moments( double _m00, double _m10, double _m01, double _m20, double _m11,\r
+ double _m02, double _m30, double _m21, double _m12, double _m03 )\r
+{\r
+ m00 = _m00; m10 = _m10; m01 = _m01;\r
+ m20 = _m20; m11 = _m11; m02 = _m02;\r
+ m30 = _m30; m21 = _m21; m12 = _m12; m03 = _m03;\r
+\r
+ double cx = 0, cy = 0, inv_m00 = 0;\r
+ if( std::abs(m00) > DBL_EPSILON )\r
+ {\r
+ inv_m00 = 1./m00;\r
+ cx = m10*inv_m00; cy = m01*inv_m00;\r
+ }\r
+\r
+ mu20 = m20 - m10*cx;\r
+ mu11 = m11 - m10*cy;\r
+ mu02 = m02 - m01*cy;\r
+\r
+ mu30 = m30 - cx*(3*mu20 + cx*m10);\r
+ mu21 = m21 - cx*(2*mu11 + cx*m01) - cy*mu20;\r
+ mu12 = m12 - cy*(2*mu11 + cy*m10) - cx*mu02;\r
+ mu03 = m03 - cy*(3*mu02 + cy*m01);\r
+\r
+ double inv_sqrt_m00 = std::sqrt(std::abs(inv_m00));\r
+ double s2 = inv_m00*inv_m00, s3 = s2*inv_sqrt_m00;\r
+\r
+ nu20 = mu20*s2; nu11 = mu11*s2; nu02 = mu02*s2;\r
+ nu30 = mu30*s3; nu21 = mu21*s3; nu12 = mu12*s3; nu03 = mu03*s3;\r
+}\r
+\r
+Moments::Moments( const CvMoments& m )\r
+{\r
+ *this = Moments(m.m00, m.m10, m.m01, m.m20, m.m11, m.m02, m.m30, m.m21, m.m12, m.m03);\r
+}\r
+\r
+Moments::operator CvMoments() const\r
+{\r
+ CvMoments m;\r
+ m.m00 = m00; m.m10 = m10; m.m01 = m01;\r
+ m.m20 = m20; m.m11 = m11; m.m02 = m02;\r
+ m.m30 = m30; m.m21 = m21; m.m12 = m12; m.m03 = m03;\r
+ m.mu20 = mu20; m.mu11 = mu11; m.mu02 = mu02;\r
+ m.mu30 = mu30; m.mu21 = mu21; m.mu12 = mu12; m.mu03 = mu03;\r
+ double am00 = std::abs(m00);\r
+ m.inv_sqrt_m00 = am00 > DBL_EPSILON ? 1./std::sqrt(am00) : 0;\r
+\r
+ return m;\r
+}\r
+\r
+Moments moments( const Mat& image, bool binaryImage )\r
+{\r
+ CvMoments om;\r
+ CvMat _image = image;\r
+ cvMoments(&_image, &om, binaryImage);\r
+ return om;\r
+}\r
+\r
+Moments moments( const Vector<Point>& points )\r
+{\r
+ CvMoments om;\r
+ CvMat _points = points;\r
+ cvMoments(&_points, &om, 0);\r
+ return om;\r
+}\r
+\r
+Moments moments( const Vector<Point2f>& points )\r
+{\r
+ CvMoments om;\r
+ CvMat _points = points;\r
+ cvMoments(&_points, &om, 0);\r
+ return om;\r
+}\r
+\r
+void HuMoments( const Moments& m, double hu[7] )\r
+{\r
+ double t0 = m.nu30 + m.nu12;\r
+ double t1 = m.nu21 + m.nu03;\r
+\r
+ double q0 = t0 * t0, q1 = t1 * t1;\r
+\r
+ double n4 = 4 * m.nu11;\r
+ double s = m.nu20 + m.nu02;\r
+ double d = m.nu20 - m.nu02;\r
+\r
+ hu[0] = s;\r
+ hu[1] = d * d + n4 * m.nu11;\r
+ hu[3] = q0 + q1;\r
+ hu[5] = d * (q0 - q1) + n4 * t0 * t1;\r
+\r
+ t0 *= q0 - 3 * q1;\r
+ t1 *= 3 * q0 - q1;\r
+\r
+ q0 = m.nu30 - 3 * m.nu12;\r
+ q1 = 3 * m.nu21 - m.nu03;\r
+\r
+ hu[2] = q0 * q0 + q1 * q1;\r
+ hu[4] = q0 * t0 + q1 * t1;\r
+ hu[6] = q1 * t0 - q0 * t1;\r
+}\r
+\r
+}\r
+\r
+/* End of file. */\r
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// Intel License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of Intel Corporation may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#include "_cv.h"
-
-static CvStatus CV_STDCALL icvUpdateMotionHistory_8u32f_C1IR
- (const uchar * silIm, int silStep, float *mhiIm, int mhiStep,
- CvSize size, float timestamp, float mhi_duration)
-{
- int x, y;
-
- /* function processes floating-point images using integer arithmetics */
- Cv32suf v;
- int ts, delbound;
- int *mhi = (int *) mhiIm;
-
- v.f = timestamp;
- ts = v.i;
-
- if( !silIm || !mhiIm )
- return CV_NULLPTR_ERR;
-
- if( size.height <= 0 || size.width <= 0 ||
- silStep < size.width || mhiStep < size.width * CV_SIZEOF_FLOAT ||
- (mhiStep & (CV_SIZEOF_FLOAT - 1)) != 0 )
- return CV_BADSIZE_ERR;
-
- if( mhi_duration < 0 )
- return CV_BADFACTOR_ERR;
-
- mhi_duration = timestamp - mhi_duration;
-
- v.f = mhi_duration;
- delbound = CV_TOGGLE_FLT( v.i );
-
- mhiStep /= sizeof(mhi[0]);
-
- if( mhiStep == size.width && silStep == size.width )
- {
- size.width *= size.height;
- size.height = 1;
- }
-
- if( delbound > 0 )
- for( y = 0; y < size.height; y++, silIm += silStep, mhi += mhiStep )
- for( x = 0; x < size.width; x++ )
- {
- int val = mhi[x];
-
- /* val = silIm[x] ? ts : val < delbound ? 0 : val; */
- val &= (val < delbound) - 1;
- val ^= (ts ^ val) & ((silIm[x] == 0) - 1);
- mhi[x] = val;
- }
- else
- for( y = 0; y < size.height; y++, silIm += silStep, mhi += mhiStep )
- for( x = 0; x < size.width; x++ )
- {
- int val = mhi[x];
-
- /* val = silIm[x] ? ts : val < delbound ? 0 : val; */
- val &= (CV_TOGGLE_FLT( val ) < delbound) - 1;
- val ^= (ts ^ val) & ((silIm[x] == 0) - 1);
- mhi[x] = val;
- }
-
- return CV_OK;
-}
-
-
-/* motion templates */
-CV_IMPL void
-cvUpdateMotionHistory( const void* silhouette, void* mhimg,
- double timestamp, double mhi_duration )
-{
- CvSize size;
- CvMat silhstub, *silh = (CvMat*)silhouette;
- CvMat mhistub, *mhi = (CvMat*)mhimg;
- int mhi_step, silh_step;
-
- CV_FUNCNAME( "cvUpdateMHIByTime" );
-
- __BEGIN__;
-
- CV_CALL( silh = cvGetMat( silh, &silhstub ));
- CV_CALL( mhi = cvGetMat( mhi, &mhistub ));
-
- if( !CV_IS_MASK_ARR( silh ))
- CV_ERROR( CV_StsBadMask, "" );
-
- if( CV_MAT_CN( mhi->type ) > 1 )
- CV_ERROR( CV_BadNumChannels, "" );
-
- if( CV_MAT_DEPTH( mhi->type ) != CV_32F )
- CV_ERROR( CV_BadDepth, "" );
-
- if( !CV_ARE_SIZES_EQ( mhi, silh ))
- CV_ERROR( CV_StsUnmatchedSizes, "" );
-
- size = cvGetMatSize( mhi );
-
- mhi_step = mhi->step;
- silh_step = silh->step;
-
- if( CV_IS_MAT_CONT( mhi->type & silh->type ))
- {
- size.width *= size.height;
- mhi_step = silh_step = CV_STUB_STEP;
- size.height = 1;
- }
-
- IPPI_CALL( icvUpdateMotionHistory_8u32f_C1IR( (const uchar*)(silh->data.ptr), silh_step,
- mhi->data.fl, mhi_step, size,
- (float)timestamp, (float)mhi_duration ));
- __END__;
-}
-
-
-CV_IMPL void
-cvCalcMotionGradient( const CvArr* mhiimg, CvArr* maskimg,
- CvArr* orientation,
- double delta1, double delta2,
- int aperture_size )
-{
- CvMat *dX_min = 0, *dY_max = 0;
- IplConvKernel* el = 0;
-
- CV_FUNCNAME( "cvCalcMotionGradient" );
-
- __BEGIN__;
-
- CvMat mhistub, *mhi = (CvMat*)mhiimg;
- CvMat maskstub, *mask = (CvMat*)maskimg;
- CvMat orientstub, *orient = (CvMat*)orientation;
- CvMat dX_min_row, dY_max_row, orient_row, mask_row;
- CvSize size;
- int x, y;
-
- float gradient_epsilon = 1e-4f * aperture_size * aperture_size;
- float min_delta, max_delta;
-
- CV_CALL( mhi = cvGetMat( mhi, &mhistub ));
- CV_CALL( mask = cvGetMat( mask, &maskstub ));
- CV_CALL( orient = cvGetMat( orient, &orientstub ));
-
- if( !CV_IS_MASK_ARR( mask ))
- CV_ERROR( CV_StsBadMask, "" );
-
- if( aperture_size < 3 || aperture_size > 7 || (aperture_size & 1) == 0 )
- CV_ERROR( CV_StsOutOfRange, "aperture_size must be 3, 5 or 7" );
-
- if( delta1 <= 0 || delta2 <= 0 )
- CV_ERROR( CV_StsOutOfRange, "both delta's must be positive" );
-
- if( CV_MAT_TYPE( mhi->type ) != CV_32FC1 || CV_MAT_TYPE( orient->type ) != CV_32FC1 )
- CV_ERROR( CV_StsUnsupportedFormat,
- "MHI and orientation must be single-channel floating-point images" );
-
- if( !CV_ARE_SIZES_EQ( mhi, mask ) || !CV_ARE_SIZES_EQ( orient, mhi ))
- CV_ERROR( CV_StsUnmatchedSizes, "" );
-
- if( orient->data.ptr == mhi->data.ptr )
- CV_ERROR( CV_StsInplaceNotSupported, "orientation image must be different from MHI" );
-
- if( delta1 > delta2 )
- {
- double t;
- CV_SWAP( delta1, delta2, t );
- }
-
- size = cvGetMatSize( mhi );
- min_delta = (float)delta1;
- max_delta = (float)delta2;
- CV_CALL( dX_min = cvCreateMat( mhi->rows, mhi->cols, CV_32F ));
- CV_CALL( dY_max = cvCreateMat( mhi->rows, mhi->cols, CV_32F ));
-
- /* calc Dx and Dy */
- CV_CALL( cvSobel( mhi, dX_min, 1, 0, aperture_size ));
- CV_CALL( cvSobel( mhi, dY_max, 0, 1, aperture_size ));
- cvGetRow( dX_min, &dX_min_row, 0 );
- cvGetRow( dY_max, &dY_max_row, 0 );
- cvGetRow( orient, &orient_row, 0 );
- cvGetRow( mask, &mask_row, 0 );
-
- /* calc gradient */
- for( y = 0; y < size.height; y++ )
- {
- dX_min_row.data.ptr = dX_min->data.ptr + y*dX_min->step;
- dY_max_row.data.ptr = dY_max->data.ptr + y*dY_max->step;
- orient_row.data.ptr = orient->data.ptr + y*orient->step;
- mask_row.data.ptr = mask->data.ptr + y*mask->step;
- cvCartToPolar( &dX_min_row, &dY_max_row, 0, &orient_row, 1 );
-
- /* make orientation zero where the gradient is very small */
- for( x = 0; x < size.width; x++ )
- {
- float dY = dY_max_row.data.fl[x];
- float dX = dX_min_row.data.fl[x];
-
- if( fabs(dX) < gradient_epsilon && fabs(dY) < gradient_epsilon )
- {
- mask_row.data.ptr[x] = 0;
- orient_row.data.i[x] = 0;
- }
- else
- mask_row.data.ptr[x] = 1;
- }
- }
-
- CV_CALL( el = cvCreateStructuringElementEx( aperture_size, aperture_size,
- aperture_size/2, aperture_size/2, CV_SHAPE_RECT ));
- cvErode( mhi, dX_min, el );
- cvDilate( mhi, dY_max, el );
-
- /* mask off pixels which have little motion difference in their neighborhood */
- for( y = 0; y < size.height; y++ )
- {
- dX_min_row.data.ptr = dX_min->data.ptr + y*dX_min->step;
- dY_max_row.data.ptr = dY_max->data.ptr + y*dY_max->step;
- mask_row.data.ptr = mask->data.ptr + y*mask->step;
- orient_row.data.ptr = orient->data.ptr + y*orient->step;
-
- for( x = 0; x < size.width; x++ )
- {
- float d0 = dY_max_row.data.fl[x] - dX_min_row.data.fl[x];
-
- if( mask_row.data.ptr[x] == 0 || d0 < min_delta || max_delta < d0 )
- {
- mask_row.data.ptr[x] = 0;
- orient_row.data.i[x] = 0;
- }
- }
- }
-
- __END__;
-
- cvReleaseMat( &dX_min );
- cvReleaseMat( &dY_max );
- cvReleaseStructuringElement( &el );
-}
-
-
-CV_IMPL double
-cvCalcGlobalOrientation( const void* orientation, const void* maskimg, const void* mhiimg,
- double curr_mhi_timestamp, double mhi_duration )
-{
- double angle = 0;
- int hist_size = 12;
- CvHistogram* hist = 0;
-
- CV_FUNCNAME( "cvCalcGlobalOrientation" );
-
- __BEGIN__;
-
- CvMat mhistub, *mhi = (CvMat*)mhiimg;
- CvMat maskstub, *mask = (CvMat*)maskimg;
- CvMat orientstub, *orient = (CvMat*)orientation;
- void* _orient;
- float _ranges[] = { 0, 360 };
- float* ranges = _ranges;
- int base_orient;
- double shift_orient = 0, shift_weight = 0, fbase_orient;
- double a, b;
- float delbound;
- CvMat mhi_row, mask_row, orient_row;
- int x, y, mhi_rows, mhi_cols;
-
- CV_CALL( mhi = cvGetMat( mhi, &mhistub ));
- CV_CALL( mask = cvGetMat( mask, &maskstub ));
- CV_CALL( orient = cvGetMat( orient, &orientstub ));
-
- if( !CV_IS_MASK_ARR( mask ))
- CV_ERROR( CV_StsBadMask, "" );
-
- if( CV_MAT_TYPE( mhi->type ) != CV_32FC1 || CV_MAT_TYPE( orient->type ) != CV_32FC1 )
- CV_ERROR( CV_StsUnsupportedFormat,
- "MHI and orientation must be single-channel floating-point images" );
-
- if( !CV_ARE_SIZES_EQ( mhi, mask ) || !CV_ARE_SIZES_EQ( orient, mhi ))
- CV_ERROR( CV_StsUnmatchedSizes, "" );
-
- if( mhi_duration <= 0 )
- CV_ERROR( CV_StsOutOfRange, "MHI duration must be positive" );
-
- if( orient->data.ptr == mhi->data.ptr )
- CV_ERROR( CV_StsInplaceNotSupported, "orientation image must be different from MHI" );
-
- // calculate histogram of different orientation values
- CV_CALL( hist = cvCreateHist( 1, &hist_size, CV_HIST_ARRAY, &ranges ));
- _orient = orient;
- cvCalcArrHist( &_orient, hist, 0, mask );
-
- // find the maximum index (the dominant orientation)
- cvGetMinMaxHistValue( hist, 0, 0, 0, &base_orient );
- base_orient *= 360/hist_size;
-
- // override timestamp with the maximum value in MHI
- cvMinMaxLoc( mhi, 0, &curr_mhi_timestamp, 0, 0, mask );
-
- // find the shift relative to the dominant orientation as weighted sum of relative angles
- a = 254. / 255. / mhi_duration;
- b = 1. - curr_mhi_timestamp * a;
- fbase_orient = base_orient;
- delbound = (float)(curr_mhi_timestamp - mhi_duration);
- mhi_rows = mhi->rows;
- mhi_cols = mhi->cols;
-
- if( CV_IS_MAT_CONT( mhi->type & mask->type & orient->type ))
- {
- mhi_cols *= mhi_rows;
- mhi_rows = 1;
- }
-
- cvGetRow( mhi, &mhi_row, 0 );
- cvGetRow( mask, &mask_row, 0 );
- cvGetRow( orient, &orient_row, 0 );
-
- /*
- a = 254/(255*dt)
- b = 1 - t*a = 1 - 254*t/(255*dur) =
- (255*dt - 254*t)/(255*dt) =
- (dt - (t - dt)*254)/(255*dt);
- --------------------------------------------------------
- ax + b = 254*x/(255*dt) + (dt - (t - dt)*254)/(255*dt) =
- (254*x + dt - (t - dt)*254)/(255*dt) =
- ((x - (t - dt))*254 + dt)/(255*dt) =
- (((x - (t - dt))/dt)*254 + 1)/255 = (((x - low_time)/dt)*254 + 1)/255
- */
- for( y = 0; y < mhi_rows; y++ )
- {
- mhi_row.data.ptr = mhi->data.ptr + mhi->step*y;
- mask_row.data.ptr = mask->data.ptr + mask->step*y;
- orient_row.data.ptr = orient->data.ptr + orient->step*y;
-
- for( x = 0; x < mhi_cols; x++ )
- if( mask_row.data.ptr[x] != 0 && mhi_row.data.fl[x] > delbound )
- {
- /*
- orient in 0..360, base_orient in 0..360
- -> (rel_angle = orient - base_orient) in -360..360.
- rel_angle is translated to -180..180
- */
- double weight = mhi_row.data.fl[x] * a + b;
- int rel_angle = cvRound( orient_row.data.fl[x] - fbase_orient );
-
- rel_angle += (rel_angle < -180 ? 360 : 0);
- rel_angle += (rel_angle > 180 ? -360 : 0);
-
- if( abs(rel_angle) < 90 )
- {
- shift_orient += weight * rel_angle;
- shift_weight += weight;
- }
- }
- }
-
- // add the dominant orientation and the relative shift
- if( shift_weight == 0 )
- shift_weight = 0.01;
-
- base_orient = base_orient + cvRound( shift_orient / shift_weight );
- base_orient -= (base_orient < 360 ? 0 : 360);
- base_orient += (base_orient >= 0 ? 0 : 360);
-
- angle = base_orient;
-
- __END__;
-
- cvReleaseHist( &hist );
- return angle;
-}
-
-
-CV_IMPL CvSeq*
-cvSegmentMotion( const CvArr* mhiimg, CvArr* segmask, CvMemStorage* storage,
- double timestamp, double seg_thresh )
-{
- CvSeq* components = 0;
- CvMat* mask8u = 0;
-
- CV_FUNCNAME( "cvSegmentMotion" );
-
- __BEGIN__;
-
- CvMat mhistub, *mhi = (CvMat*)mhiimg;
- CvMat maskstub, *mask = (CvMat*)segmask;
- Cv32suf v, comp_idx;
- int stub_val, ts;
- int x, y;
-
- if( !storage )
- CV_ERROR( CV_StsNullPtr, "NULL memory storage" );
-
- CV_CALL( mhi = cvGetMat( mhi, &mhistub ));
- CV_CALL( mask = cvGetMat( mask, &maskstub ));
-
- if( CV_MAT_TYPE( mhi->type ) != CV_32FC1 || CV_MAT_TYPE( mask->type ) != CV_32FC1 )
- CV_ERROR( CV_BadDepth, "Both MHI and the destination mask" );
-
- if( !CV_ARE_SIZES_EQ( mhi, mask ))
- CV_ERROR( CV_StsUnmatchedSizes, "" );
-
- CV_CALL( mask8u = cvCreateMat( mhi->rows + 2, mhi->cols + 2, CV_8UC1 ));
- cvZero( mask8u );
- cvZero( mask );
- CV_CALL( components = cvCreateSeq( CV_SEQ_KIND_GENERIC, sizeof(CvSeq),
- sizeof(CvConnectedComp), storage ));
-
- v.f = (float)timestamp; ts = v.i;
- v.f = FLT_MAX*0.1f; stub_val = v.i;
- comp_idx.f = 1;
-
- for( y = 0; y < mhi->rows; y++ )
- {
- int* mhi_row = (int*)(mhi->data.ptr + y*mhi->step);
- for( x = 0; x < mhi->cols; x++ )
- {
- if( mhi_row[x] == 0 )
- mhi_row[x] = stub_val;
- }
- }
-
- for( y = 0; y < mhi->rows; y++ )
- {
- int* mhi_row = (int*)(mhi->data.ptr + y*mhi->step);
- uchar* mask8u_row = mask8u->data.ptr + (y+1)*mask8u->step + 1;
-
- for( x = 0; x < mhi->cols; x++ )
- {
- if( mhi_row[x] == ts && mask8u_row[x] == 0 )
- {
- CvConnectedComp comp;
- int x1, y1;
- CvScalar _seg_thresh = cvRealScalar(seg_thresh);
- CvPoint seed = cvPoint(x,y);
-
- CV_CALL( cvFloodFill( mhi, seed, cvRealScalar(0), _seg_thresh, _seg_thresh,
- &comp, CV_FLOODFILL_MASK_ONLY + 2*256 + 4, mask8u ));
-
- for( y1 = 0; y1 < comp.rect.height; y1++ )
- {
- int* mask_row1 = (int*)(mask->data.ptr +
- (comp.rect.y + y1)*mask->step) + comp.rect.x;
- uchar* mask8u_row1 = mask8u->data.ptr +
- (comp.rect.y + y1+1)*mask8u->step + comp.rect.x+1;
-
- for( x1 = 0; x1 < comp.rect.width; x1++ )
- {
- if( mask8u_row1[x1] > 1 )
- {
- mask8u_row1[x1] = 1;
- mask_row1[x1] = comp_idx.i;
- }
- }
- }
- comp_idx.f++;
- cvSeqPush( components, &comp );
- }
- }
- }
-
- for( y = 0; y < mhi->rows; y++ )
- {
- int* mhi_row = (int*)(mhi->data.ptr + y*mhi->step);
- for( x = 0; x < mhi->cols; x++ )
- {
- if( mhi_row[x] == stub_val )
- mhi_row[x] = 0;
- }
- }
-
- __END__;
-
- cvReleaseMat( &mask8u );
- return components;
-}
-
-/* End of file. */
+/*M///////////////////////////////////////////////////////////////////////////////////////\r
+//\r
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
+//\r
+// By downloading, copying, installing or using the software you agree to this license.\r
+// If you do not agree to this license, do not download, install,\r
+// copy or use the software.\r
+//\r
+//\r
+// Intel License Agreement\r
+// For Open Source Computer Vision Library\r
+//\r
+// Copyright (C) 2000, Intel Corporation, all rights reserved.\r
+// Third party copyrights are property of their respective owners.\r
+//\r
+// Redistribution and use in source and binary forms, with or without modification,\r
+// are permitted provided that the following conditions are met:\r
+//\r
+// * Redistribution's of source code must retain the above copyright notice,\r
+// this list of conditions and the following disclaimer.\r
+//\r
+// * Redistribution's in binary form must reproduce the above copyright notice,\r
+// this list of conditions and the following disclaimer in the documentation\r
+// and/or other materials provided with the distribution.\r
+//\r
+// * The name of Intel Corporation may not be used to endorse or promote products\r
+// derived from this software without specific prior written permission.\r
+//\r
+// This software is provided by the copyright holders and contributors "as is" and\r
+// any express or implied warranties, including, but not limited to, the implied\r
+// warranties of merchantability and fitness for a particular purpose are disclaimed.\r
+// In no event shall the Intel Corporation or contributors be liable for any direct,\r
+// indirect, incidental, special, exemplary, or consequential damages\r
+// (including, but not limited to, procurement of substitute goods or services;\r
+// loss of use, data, or profits; or business interruption) however caused\r
+// and on any theory of liability, whether in contract, strict liability,\r
+// or tort (including negligence or otherwise) arising in any way out of\r
+// the use of this software, even if advised of the possibility of such damage.\r
+//\r
+//M*/\r
+\r
+#include "_cv.h"\r
+\r
+static CvStatus CV_STDCALL icvUpdateMotionHistory_8u32f_C1IR\r
+ (const uchar * silIm, int silStep, float *mhiIm, int mhiStep,\r
+ CvSize size, float timestamp, float mhi_duration)\r
+{\r
+ int x, y;\r
+\r
+ /* function processes floating-point images using integer arithmetics */\r
+ Cv32suf v;\r
+ int ts, delbound;\r
+ int *mhi = (int *) mhiIm;\r
+\r
+ v.f = timestamp;\r
+ ts = v.i;\r
+\r
+ if( !silIm || !mhiIm )\r
+ return CV_NULLPTR_ERR;\r
+\r
+ if( size.height <= 0 || size.width <= 0 ||\r
+ silStep < size.width || mhiStep < size.width * CV_SIZEOF_FLOAT ||\r
+ (mhiStep & (CV_SIZEOF_FLOAT - 1)) != 0 )\r
+ return CV_BADSIZE_ERR;\r
+\r
+ if( mhi_duration < 0 )\r
+ return CV_BADFACTOR_ERR;\r
+\r
+ mhi_duration = timestamp - mhi_duration;\r
+\r
+ v.f = mhi_duration;\r
+ delbound = CV_TOGGLE_FLT( v.i );\r
+\r
+ mhiStep /= sizeof(mhi[0]);\r
+\r
+ if( mhiStep == size.width && silStep == size.width )\r
+ {\r
+ size.width *= size.height;\r
+ size.height = 1;\r
+ }\r
+\r
+ if( delbound > 0 )\r
+ for( y = 0; y < size.height; y++, silIm += silStep, mhi += mhiStep )\r
+ for( x = 0; x < size.width; x++ )\r
+ {\r
+ int val = mhi[x];\r
+\r
+ /* val = silIm[x] ? ts : val < delbound ? 0 : val; */\r
+ val &= (val < delbound) - 1;\r
+ val ^= (ts ^ val) & ((silIm[x] == 0) - 1);\r
+ mhi[x] = val;\r
+ }\r
+ else\r
+ for( y = 0; y < size.height; y++, silIm += silStep, mhi += mhiStep )\r
+ for( x = 0; x < size.width; x++ )\r
+ {\r
+ int val = mhi[x];\r
+\r
+ /* val = silIm[x] ? ts : val < delbound ? 0 : val; */\r
+ val &= (CV_TOGGLE_FLT( val ) < delbound) - 1;\r
+ val ^= (ts ^ val) & ((silIm[x] == 0) - 1);\r
+ mhi[x] = val;\r
+ }\r
+\r
+ return CV_OK;\r
+}\r
+\r
+\r
+/* motion templates */\r
+CV_IMPL void\r
+cvUpdateMotionHistory( const void* silhouette, void* mhimg,\r
+ double timestamp, double mhi_duration )\r
+{\r
+ CvSize size;\r
+ CvMat silhstub, *silh = (CvMat*)silhouette;\r
+ CvMat mhistub, *mhi = (CvMat*)mhimg;\r
+ int mhi_step, silh_step;\r
+\r
+ CV_FUNCNAME( "cvUpdateMHIByTime" );\r
+\r
+ __BEGIN__;\r
+\r
+ CV_CALL( silh = cvGetMat( silh, &silhstub ));\r
+ CV_CALL( mhi = cvGetMat( mhi, &mhistub ));\r
+\r
+ if( !CV_IS_MASK_ARR( silh ))\r
+ CV_ERROR( CV_StsBadMask, "" );\r
+\r
+ if( CV_MAT_CN( mhi->type ) > 1 )\r
+ CV_ERROR( CV_BadNumChannels, "" );\r
+\r
+ if( CV_MAT_DEPTH( mhi->type ) != CV_32F )\r
+ CV_ERROR( CV_BadDepth, "" );\r
+\r
+ if( !CV_ARE_SIZES_EQ( mhi, silh ))\r
+ CV_ERROR( CV_StsUnmatchedSizes, "" );\r
+\r
+ size = cvGetMatSize( mhi );\r
+\r
+ mhi_step = mhi->step;\r
+ silh_step = silh->step;\r
+\r
+ if( CV_IS_MAT_CONT( mhi->type & silh->type ))\r
+ {\r
+ size.width *= size.height;\r
+ mhi_step = silh_step = CV_STUB_STEP;\r
+ size.height = 1;\r
+ }\r
+\r
+ IPPI_CALL( icvUpdateMotionHistory_8u32f_C1IR( (const uchar*)(silh->data.ptr), silh_step,\r
+ mhi->data.fl, mhi_step, size,\r
+ (float)timestamp, (float)mhi_duration ));\r
+ __END__;\r
+}\r
+\r
+\r
+CV_IMPL void\r
+cvCalcMotionGradient( const CvArr* mhiimg, CvArr* maskimg,\r
+ CvArr* orientation,\r
+ double delta1, double delta2,\r
+ int aperture_size )\r
+{\r
+ CvMat *dX_min = 0, *dY_max = 0;\r
+ IplConvKernel* el = 0;\r
+\r
+ CV_FUNCNAME( "cvCalcMotionGradient" );\r
+\r
+ __BEGIN__;\r
+\r
+ CvMat mhistub, *mhi = (CvMat*)mhiimg;\r
+ CvMat maskstub, *mask = (CvMat*)maskimg;\r
+ CvMat orientstub, *orient = (CvMat*)orientation;\r
+ CvMat dX_min_row, dY_max_row, orient_row, mask_row;\r
+ CvSize size;\r
+ int x, y;\r
+\r
+ float gradient_epsilon = 1e-4f * aperture_size * aperture_size;\r
+ float min_delta, max_delta;\r
+\r
+ CV_CALL( mhi = cvGetMat( mhi, &mhistub ));\r
+ CV_CALL( mask = cvGetMat( mask, &maskstub ));\r
+ CV_CALL( orient = cvGetMat( orient, &orientstub ));\r
+\r
+ if( !CV_IS_MASK_ARR( mask ))\r
+ CV_ERROR( CV_StsBadMask, "" );\r
+\r
+ if( aperture_size < 3 || aperture_size > 7 || (aperture_size & 1) == 0 )\r
+ CV_ERROR( CV_StsOutOfRange, "aperture_size must be 3, 5 or 7" );\r
+\r
+ if( delta1 <= 0 || delta2 <= 0 )\r
+ CV_ERROR( CV_StsOutOfRange, "both delta's must be positive" );\r
+\r
+ if( CV_MAT_TYPE( mhi->type ) != CV_32FC1 || CV_MAT_TYPE( orient->type ) != CV_32FC1 )\r
+ CV_ERROR( CV_StsUnsupportedFormat,\r
+ "MHI and orientation must be single-channel floating-point images" );\r
+\r
+ if( !CV_ARE_SIZES_EQ( mhi, mask ) || !CV_ARE_SIZES_EQ( orient, mhi ))\r
+ CV_ERROR( CV_StsUnmatchedSizes, "" );\r
+\r
+ if( orient->data.ptr == mhi->data.ptr )\r
+ CV_ERROR( CV_StsInplaceNotSupported, "orientation image must be different from MHI" );\r
+\r
+ if( delta1 > delta2 )\r
+ {\r
+ double t;\r
+ CV_SWAP( delta1, delta2, t );\r
+ }\r
+\r
+ size = cvGetMatSize( mhi );\r
+ min_delta = (float)delta1;\r
+ max_delta = (float)delta2;\r
+ CV_CALL( dX_min = cvCreateMat( mhi->rows, mhi->cols, CV_32F ));\r
+ CV_CALL( dY_max = cvCreateMat( mhi->rows, mhi->cols, CV_32F ));\r
+\r
+ /* calc Dx and Dy */\r
+ CV_CALL( cvSobel( mhi, dX_min, 1, 0, aperture_size ));\r
+ CV_CALL( cvSobel( mhi, dY_max, 0, 1, aperture_size ));\r
+ cvGetRow( dX_min, &dX_min_row, 0 );\r
+ cvGetRow( dY_max, &dY_max_row, 0 );\r
+ cvGetRow( orient, &orient_row, 0 );\r
+ cvGetRow( mask, &mask_row, 0 );\r
+\r
+ /* calc gradient */\r
+ for( y = 0; y < size.height; y++ )\r
+ {\r
+ dX_min_row.data.ptr = dX_min->data.ptr + y*dX_min->step;\r
+ dY_max_row.data.ptr = dY_max->data.ptr + y*dY_max->step;\r
+ orient_row.data.ptr = orient->data.ptr + y*orient->step;\r
+ mask_row.data.ptr = mask->data.ptr + y*mask->step;\r
+ cvCartToPolar( &dX_min_row, &dY_max_row, 0, &orient_row, 1 );\r
+\r
+ /* make orientation zero where the gradient is very small */\r
+ for( x = 0; x < size.width; x++ )\r
+ {\r
+ float dY = dY_max_row.data.fl[x];\r
+ float dX = dX_min_row.data.fl[x];\r
+\r
+ if( fabs(dX) < gradient_epsilon && fabs(dY) < gradient_epsilon )\r
+ {\r
+ mask_row.data.ptr[x] = 0;\r
+ orient_row.data.i[x] = 0;\r
+ }\r
+ else\r
+ mask_row.data.ptr[x] = 1;\r
+ }\r
+ }\r
+\r
+ CV_CALL( el = cvCreateStructuringElementEx( aperture_size, aperture_size,\r
+ aperture_size/2, aperture_size/2, CV_SHAPE_RECT ));\r
+ cvErode( mhi, dX_min, el );\r
+ cvDilate( mhi, dY_max, el );\r
+\r
+ /* mask off pixels which have little motion difference in their neighborhood */\r
+ for( y = 0; y < size.height; y++ )\r
+ {\r
+ dX_min_row.data.ptr = dX_min->data.ptr + y*dX_min->step;\r
+ dY_max_row.data.ptr = dY_max->data.ptr + y*dY_max->step;\r
+ mask_row.data.ptr = mask->data.ptr + y*mask->step;\r
+ orient_row.data.ptr = orient->data.ptr + y*orient->step;\r
+ \r
+ for( x = 0; x < size.width; x++ )\r
+ {\r
+ float d0 = dY_max_row.data.fl[x] - dX_min_row.data.fl[x];\r
+\r
+ if( mask_row.data.ptr[x] == 0 || d0 < min_delta || max_delta < d0 )\r
+ {\r
+ mask_row.data.ptr[x] = 0;\r
+ orient_row.data.i[x] = 0;\r
+ }\r
+ }\r
+ }\r
+\r
+ __END__;\r
+\r
+ cvReleaseMat( &dX_min );\r
+ cvReleaseMat( &dY_max );\r
+ cvReleaseStructuringElement( &el );\r
+}\r
+\r
+\r
+CV_IMPL double\r
+cvCalcGlobalOrientation( const void* orientation, const void* maskimg, const void* mhiimg,\r
+ double curr_mhi_timestamp, double mhi_duration )\r
+{\r
+ double angle = 0;\r
+ int hist_size = 12;\r
+ CvHistogram* hist = 0;\r
+\r
+ CV_FUNCNAME( "cvCalcGlobalOrientation" );\r
+\r
+ __BEGIN__;\r
+\r
+ CvMat mhistub, *mhi = (CvMat*)mhiimg;\r
+ CvMat maskstub, *mask = (CvMat*)maskimg;\r
+ CvMat orientstub, *orient = (CvMat*)orientation;\r
+ void* _orient;\r
+ float _ranges[] = { 0, 360 };\r
+ float* ranges = _ranges;\r
+ int base_orient;\r
+ double shift_orient = 0, shift_weight = 0, fbase_orient;\r
+ double a, b;\r
+ float delbound;\r
+ CvMat mhi_row, mask_row, orient_row;\r
+ int x, y, mhi_rows, mhi_cols;\r
+\r
+ CV_CALL( mhi = cvGetMat( mhi, &mhistub ));\r
+ CV_CALL( mask = cvGetMat( mask, &maskstub ));\r
+ CV_CALL( orient = cvGetMat( orient, &orientstub ));\r
+\r
+ if( !CV_IS_MASK_ARR( mask ))\r
+ CV_ERROR( CV_StsBadMask, "" );\r
+\r
+ if( CV_MAT_TYPE( mhi->type ) != CV_32FC1 || CV_MAT_TYPE( orient->type ) != CV_32FC1 )\r
+ CV_ERROR( CV_StsUnsupportedFormat,\r
+ "MHI and orientation must be single-channel floating-point images" );\r
+\r
+ if( !CV_ARE_SIZES_EQ( mhi, mask ) || !CV_ARE_SIZES_EQ( orient, mhi ))\r
+ CV_ERROR( CV_StsUnmatchedSizes, "" );\r
+\r
+ if( mhi_duration <= 0 )\r
+ CV_ERROR( CV_StsOutOfRange, "MHI duration must be positive" );\r
+\r
+ if( orient->data.ptr == mhi->data.ptr )\r
+ CV_ERROR( CV_StsInplaceNotSupported, "orientation image must be different from MHI" );\r
+\r
+ // calculate histogram of different orientation values\r
+ CV_CALL( hist = cvCreateHist( 1, &hist_size, CV_HIST_ARRAY, &ranges ));\r
+ _orient = orient;\r
+ cvCalcArrHist( &_orient, hist, 0, mask );\r
+\r
+ // find the maximum index (the dominant orientation)\r
+ cvGetMinMaxHistValue( hist, 0, 0, 0, &base_orient );\r
+ base_orient *= 360/hist_size;\r
+\r
+ // override timestamp with the maximum value in MHI\r
+ cvMinMaxLoc( mhi, 0, &curr_mhi_timestamp, 0, 0, mask );\r
+\r
+ // find the shift relative to the dominant orientation as weighted sum of relative angles\r
+ a = 254. / 255. / mhi_duration;\r
+ b = 1. - curr_mhi_timestamp * a;\r
+ fbase_orient = base_orient;\r
+ delbound = (float)(curr_mhi_timestamp - mhi_duration);\r
+ mhi_rows = mhi->rows;\r
+ mhi_cols = mhi->cols;\r
+\r
+ if( CV_IS_MAT_CONT( mhi->type & mask->type & orient->type ))\r
+ {\r
+ mhi_cols *= mhi_rows;\r
+ mhi_rows = 1;\r
+ }\r
+\r
+ cvGetRow( mhi, &mhi_row, 0 );\r
+ cvGetRow( mask, &mask_row, 0 );\r
+ cvGetRow( orient, &orient_row, 0 );\r
+\r
+ /*\r
+ a = 254/(255*dt)\r
+ b = 1 - t*a = 1 - 254*t/(255*dur) =\r
+ (255*dt - 254*t)/(255*dt) =\r
+ (dt - (t - dt)*254)/(255*dt);\r
+ --------------------------------------------------------\r
+ ax + b = 254*x/(255*dt) + (dt - (t - dt)*254)/(255*dt) =\r
+ (254*x + dt - (t - dt)*254)/(255*dt) =\r
+ ((x - (t - dt))*254 + dt)/(255*dt) =\r
+ (((x - (t - dt))/dt)*254 + 1)/255 = (((x - low_time)/dt)*254 + 1)/255\r
+ */\r
+ for( y = 0; y < mhi_rows; y++ )\r
+ {\r
+ mhi_row.data.ptr = mhi->data.ptr + mhi->step*y;\r
+ mask_row.data.ptr = mask->data.ptr + mask->step*y;\r
+ orient_row.data.ptr = orient->data.ptr + orient->step*y;\r
+\r
+ for( x = 0; x < mhi_cols; x++ )\r
+ if( mask_row.data.ptr[x] != 0 && mhi_row.data.fl[x] > delbound )\r
+ {\r
+ /*\r
+ orient in 0..360, base_orient in 0..360\r
+ -> (rel_angle = orient - base_orient) in -360..360.\r
+ rel_angle is translated to -180..180\r
+ */\r
+ double weight = mhi_row.data.fl[x] * a + b;\r
+ int rel_angle = cvRound( orient_row.data.fl[x] - fbase_orient );\r
+\r
+ rel_angle += (rel_angle < -180 ? 360 : 0);\r
+ rel_angle += (rel_angle > 180 ? -360 : 0);\r
+\r
+ if( abs(rel_angle) < 90 )\r
+ {\r
+ shift_orient += weight * rel_angle;\r
+ shift_weight += weight;\r
+ }\r
+ }\r
+ }\r
+\r
+ // add the dominant orientation and the relative shift\r
+ if( shift_weight == 0 )\r
+ shift_weight = 0.01;\r
+\r
+ base_orient = base_orient + cvRound( shift_orient / shift_weight );\r
+ base_orient -= (base_orient < 360 ? 0 : 360);\r
+ base_orient += (base_orient >= 0 ? 0 : 360);\r
+\r
+ angle = base_orient;\r
+\r
+ __END__;\r
+\r
+ cvReleaseHist( &hist );\r
+ return angle;\r
+}\r
+\r
+\r
+CV_IMPL CvSeq*\r
+cvSegmentMotion( const CvArr* mhiimg, CvArr* segmask, CvMemStorage* storage,\r
+ double timestamp, double seg_thresh )\r
+{\r
+ CvSeq* components = 0;\r
+ CvMat* mask8u = 0;\r
+\r
+ CV_FUNCNAME( "cvSegmentMotion" );\r
+\r
+ __BEGIN__;\r
+\r
+ CvMat mhistub, *mhi = (CvMat*)mhiimg;\r
+ CvMat maskstub, *mask = (CvMat*)segmask;\r
+ Cv32suf v, comp_idx;\r
+ int stub_val, ts;\r
+ int x, y;\r
+\r
+ if( !storage )\r
+ CV_ERROR( CV_StsNullPtr, "NULL memory storage" );\r
+\r
+ CV_CALL( mhi = cvGetMat( mhi, &mhistub ));\r
+ CV_CALL( mask = cvGetMat( mask, &maskstub ));\r
+\r
+ if( CV_MAT_TYPE( mhi->type ) != CV_32FC1 || CV_MAT_TYPE( mask->type ) != CV_32FC1 )\r
+ CV_ERROR( CV_BadDepth, "Both MHI and the destination mask" );\r
+\r
+ if( !CV_ARE_SIZES_EQ( mhi, mask ))\r
+ CV_ERROR( CV_StsUnmatchedSizes, "" );\r
+\r
+ CV_CALL( mask8u = cvCreateMat( mhi->rows + 2, mhi->cols + 2, CV_8UC1 ));\r
+ cvZero( mask8u );\r
+ cvZero( mask );\r
+ CV_CALL( components = cvCreateSeq( CV_SEQ_KIND_GENERIC, sizeof(CvSeq),\r
+ sizeof(CvConnectedComp), storage ));\r
+ \r
+ v.f = (float)timestamp; ts = v.i;\r
+ v.f = FLT_MAX*0.1f; stub_val = v.i;\r
+ comp_idx.f = 1;\r
+\r
+ for( y = 0; y < mhi->rows; y++ )\r
+ {\r
+ int* mhi_row = (int*)(mhi->data.ptr + y*mhi->step);\r
+ for( x = 0; x < mhi->cols; x++ )\r
+ {\r
+ if( mhi_row[x] == 0 )\r
+ mhi_row[x] = stub_val;\r
+ }\r
+ }\r
+\r
+ for( y = 0; y < mhi->rows; y++ )\r
+ {\r
+ int* mhi_row = (int*)(mhi->data.ptr + y*mhi->step);\r
+ uchar* mask8u_row = mask8u->data.ptr + (y+1)*mask8u->step + 1;\r
+\r
+ for( x = 0; x < mhi->cols; x++ )\r
+ {\r
+ if( mhi_row[x] == ts && mask8u_row[x] == 0 )\r
+ {\r
+ CvConnectedComp comp;\r
+ int x1, y1;\r
+ CvScalar _seg_thresh = cvRealScalar(seg_thresh);\r
+ CvPoint seed = cvPoint(x,y);\r
+\r
+ CV_CALL( cvFloodFill( mhi, seed, cvRealScalar(0), _seg_thresh, _seg_thresh,\r
+ &comp, CV_FLOODFILL_MASK_ONLY + 2*256 + 4, mask8u ));\r
+\r
+ for( y1 = 0; y1 < comp.rect.height; y1++ )\r
+ {\r
+ int* mask_row1 = (int*)(mask->data.ptr +\r
+ (comp.rect.y + y1)*mask->step) + comp.rect.x;\r
+ uchar* mask8u_row1 = mask8u->data.ptr +\r
+ (comp.rect.y + y1+1)*mask8u->step + comp.rect.x+1;\r
+\r
+ for( x1 = 0; x1 < comp.rect.width; x1++ )\r
+ {\r
+ if( mask8u_row1[x1] > 1 )\r
+ {\r
+ mask8u_row1[x1] = 1;\r
+ mask_row1[x1] = comp_idx.i;\r
+ }\r
+ }\r
+ }\r
+ comp_idx.f++;\r
+ cvSeqPush( components, &comp );\r
+ }\r
+ }\r
+ }\r
+\r
+ for( y = 0; y < mhi->rows; y++ )\r
+ {\r
+ int* mhi_row = (int*)(mhi->data.ptr + y*mhi->step);\r
+ for( x = 0; x < mhi->cols; x++ )\r
+ {\r
+ if( mhi_row[x] == stub_val )\r
+ mhi_row[x] = 0;\r
+ }\r
+ }\r
+\r
+ __END__;\r
+\r
+ cvReleaseMat( &mask8u );\r
+ return components;\r
+}\r
+\r
+\r
+void cv::updateMotionHistory( const Mat& silhouette, Mat& mhi,\r
+ double timestamp, double duration )\r
+{\r
+ CvMat _silhouette = silhouette, _mhi = mhi;\r
+ cvUpdateMotionHistory( &_silhouette, &_mhi, timestamp, duration );\r
+}\r
+\r
+void cv::calcMotionGradient( const Mat& mhi, Mat& mask,\r
+ Mat& orientation,\r
+ double delta1, double delta2,\r
+ int aperture_size )\r
+{\r
+ mask.create(mhi.size(), CV_8U);\r
+ orientation.create(mhi.size(), CV_32F);\r
+ CvMat _mhi = mhi, _mask = mask, _orientation = orientation;\r
+ cvCalcMotionGradient(&_mhi, &_mask, &_orientation, delta1, delta2, aperture_size);\r
+}\r
+\r
+double cv::calcGlobalOrientation( const Mat& orientation, const Mat& mask,\r
+ const Mat& mhi, double timestamp,\r
+ double duration )\r
+{\r
+ CvMat _orientation = orientation, _mask = mask, _mhi = mhi;\r
+ return cvCalcGlobalOrientation(&_orientation, &_mask, &_mhi, timestamp, duration);\r
+}\r
+\r
+/* End of file. */\r
__END__;
}
+
+
+namespace cv
+{
+
+MSER::MSER()
+{
+ *(CvMSERParams*)this = cvMSERParams();
+}
+
+MSER::MSER( int _delta, int _min_area, int _max_area,
+ float _max_variation, float _min_diversity,
+ int _max_evolution, double _area_threshold,
+ double _min_margin, int _edge_blur_size )
+{
+ *(CvMSERParams*)this = cvMSERParams(_delta, _min_area, _max_area, _max_variation,
+ _min_diversity, _max_evolution, _area_threshold, _min_margin, _edge_blur_size);
+}
+
+Vector<Vector<Point> > MSER::operator()(Mat& image, const Mat& mask) const
+{
+ CvMat _image = image, _mask, *pmask = 0;
+ if( mask.data )
+ pmask = &(_mask = mask);
+ MemStorage storage(cvCreateMemStorage(0));
+ Seq<CvSeq*> contours;
+ cvExtractMSER( &_image, pmask, &contours.seq, storage, *(const CvMSERParams*)this );
+ SeqIterator<CvSeq*> it = contours.begin();
+ size_t i, ncontours = contours.size();
+ Vector<Vector<Point> > dstcontours;
+ dstcontours.resize(ncontours);
+ for( i = 0; i < ncontours; i++, ++it )
+ Seq<Point>(*it).copyTo(dstcontours[i]);
+ return dstcontours;
+}
+
+}
}
+void cv::getRectSubPix( const Mat& image, Size patchSize, Point2f center,
+ Mat& patch, int patchType )
+{
+ patch.create(patchSize, patchType < 0 ? image.type() :
+ CV_MAKETYPE(CV_MAT_DEPTH(patchType),image.channels()));
+ CvMat _image = image, _patch = patch;
+ cvGetRectSubPix(&_image, &_patch, center);
+}
+
/* End of file. */
}
+void cv::watershed( const Mat& src, Mat& markers )
+{
+ CvMat _src = src, _markers = markers;
+ cvWatershed( &_src, &_markers );
+}
+
+
/****************************************************************************************\
* Meanshift *
\****************************************************************************************/
return border >= 0 ? keypoints : 0;
}
+
+namespace cv
+{
+
+StarDetector::StarDetector()
+{
+ *(CvStarDetectorParams*)this = cvStarDetectorParams();
+}
+
+StarDetector::StarDetector(int _maxSize, int _responseThreshold,
+ int _lineThresholdProjected,
+ int _lineThresholdBinarized,
+ int _suppressNonmaxSize)
+{
+ *(CvStarDetectorParams*)this = cvStarDetectorParams(_maxSize, _responseThreshold,
+ _lineThresholdProjected, _lineThresholdBinarized, _suppressNonmaxSize);
+}
+
+void StarDetector::operator()(const Mat& image, Vector<StarKeypoint>& keypoints) const
+{
+ CvMat _image = image;
+ MemStorage storage(cvCreateMemStorage(0));
+ CvSeq* kp = cvGetStarKeypoints( &_image, storage, *(const CvStarDetectorParams*)this);
+ Seq<StarKeypoint>(kp).copyTo(keypoints);
+}
+
+}
__END__;\r
}\r
\r
+namespace cv\r
+{\r
+\r
+StereoBM::StereoBM()\r
+{ state = cvCreateStereoBMState(); }\r
+\r
+StereoBM::StereoBM(int _preset, int _ndisparities, int _SADWindowSize)\r
+{ init(_preset, _ndisparities, _SADWindowSize); }\r
+\r
+void StereoBM::init(int _preset, int _ndisparities, int _SADWindowSize)\r
+{\r
+ state = cvCreateStereoBMState(_preset, _ndisparities);\r
+ state->SADWindowSize = _SADWindowSize;\r
+}\r
+\r
+void StereoBM::operator()( const Mat& left, const Mat& right, Mat& disparity )\r
+{\r
+ disparity.create(left.size(), CV_16SC1);\r
+ CvMat _left = left, _right = right, _disparity = disparity;\r
+ cvFindStereoCorrespondenceBM(&_left, &_right, &_disparity, state);\r
+}\r
+\r
+}\r
+\r
/* End of file. */\r
-/* Original code has been submitted by Liu Liu. Here is the copyright.
-----------------------------------------------------------------------------------
- * An OpenCV Implementation of SURF
- * Further Information Refer to "SURF: Speed-Up Robust Feature"
- * Author: Liu Liu
- * liuliu.1987+opencv@gmail.com
- *
- * There are still serveral lacks for this experimental implementation:
- * 1.The interpolation of sub-pixel mentioned in article was not implemented yet;
- * 2.A comparision with original libSurf.so shows that the hessian detector is not a 100% match to their implementation;
- * 3.Due to above reasons, I recommanded the original one for study and reuse;
- *
- * However, the speed of this implementation is something comparable to original one.
- *
- * Copyright© 2008, Liu Liu All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- * Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- * The name of Contributor may not be used to endorse or
- * promote products derived from this software without
- * specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
- * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
- * OF SUCH DAMAGE.
- */
-
-/*
- The following changes have been made, comparing to the original contribution:
- 1. A lot of small optimizations, less memory allocations, got rid of global buffers
- 2. Reversed order of cvGetQuadrangleSubPix and cvResize calls; probably less accurate, but much faster
- 3. The descriptor computing part (which is most expensive) is threaded using OpenMP
- (subpixel-accurate keypoint localization and scale estimation are still TBD)
-*/
-
-/*
-Keypoint position and scale interpolation has been implemented as described in
-the Brown and Lowe paper cited by the SURF paper.
-
-The sampling step along the x and y axes of the image for the determinant of the
-Hessian is now the same for each layer in an octave. While this increases the
-computation time, it ensures that a true 3x3x3 neighbourhood exists, with
-samples calculated at the same position in the layers above and below. This
-results in improved maxima detection and non-maxima suppression, and I think it
-is consistent with the description in the SURF paper.
-
-The wavelet size sampling interval has also been made consistent. The wavelet
-size at the first layer of the first octave is now 9 instead of 7. Along with
-regular position sampling steps, this makes location and scale interpolation
-easy. I think this is consistent with the SURF paper and original
-implementation.
-
-The scaling of the wavelet parameters has been fixed to ensure that the patterns
-are symmetric around the centre. Previously the truncation caused by integer
-division in the scaling ratio caused a bias towards the top left of the wavelet,
-resulting in inconsistent keypoint positions.
-
-The matrices for the determinant and trace of the Hessian are now reused in each
-octave.
-
-The extraction of the patch of pixels surrounding a keypoint used to build a
-descriptor has been simplified.
-
-Keypoint descriptor normalisation has been changed from normalising each 4x4
-cell (resulting in a descriptor of magnitude 16) to normalising the entire
-descriptor to magnitude 1.
-
-The default number of octaves has been increased from 3 to 4 to match the
-original SURF binary default. The increase in computation time is minimal since
-the higher octaves are sampled sparsely.
-
-The default number of layers per octave has been reduced from 3 to 2, to prevent
-redundant calculation of similar sizes in consecutive octaves. This decreases
-computation time. The number of features extracted may be less, however the
-additional features were mostly redundant.
-
-The radius of the circle of gradient samples used to assign an orientation has
-been increased from 4 to 6 to match the description in the SURF paper. This is
-now defined by ORI_RADIUS, and could be made into a parameter.
-
-The size of the sliding window used in orientation assignment has been reduced
-from 120 to 60 degrees to match the description in the SURF paper. This is now
-defined by ORI_WIN, and could be made into a parameter.
-
-Other options like HAAR_SIZE0, HAAR_SIZE_INC, SAMPLE_STEP0, ORI_SEARCH_INC,
-ORI_SIGMA and DESC_SIGMA have been separated from the code and documented.
-These could also be made into parameters.
-
-Modifications by Ian Mahon
-
-*/
-#include "_cv.h"
-
-CvSURFParams cvSURFParams(double threshold, int extended)
-{
- CvSURFParams params;
- params.hessianThreshold = threshold;
- params.extended = extended;
- params.nOctaves = 4;
- params.nOctaveLayers = 2;
- return params;
-}
-
-struct CvSurfHF
-{
- int p0, p1, p2, p3;
- float w;
-};
-
-CV_INLINE float
-icvCalcHaarPattern( const int* origin, const CvSurfHF* f, int n )
-{
- double d = 0;
- for( int k = 0; k < n; k++ )
- d += (origin[f[k].p0] + origin[f[k].p3] - origin[f[k].p1] - origin[f[k].p2])*f[k].w;
- return (float)d;
-}
-
-static void
-icvResizeHaarPattern( const int src[][5], CvSurfHF* dst, int n, int oldSize, int newSize, int widthStep )
-{
- float ratio = (float)newSize/oldSize;
- for( int k = 0; k < n; k++ )
- {
- int dx1 = cvRound( ratio*src[k][0] );
- int dy1 = cvRound( ratio*src[k][1] );
- int dx2 = cvRound( ratio*src[k][2] );
- int dy2 = cvRound( ratio*src[k][3] );
- dst[k].p0 = dy1*widthStep + dx1;
- dst[k].p1 = dy2*widthStep + dx1;
- dst[k].p2 = dy1*widthStep + dx2;
- dst[k].p3 = dy2*widthStep + dx2;
- dst[k].w = src[k][4]/((float)(dx2-dx1)*(dy2-dy1));
- }
-}
-
-/*
- * Maxima location interpolation as described in "Invariant Features from
- * Interest Point Groups" by Matthew Brown and David Lowe. This is performed by
- * fitting a 3D quadratic to a set of neighbouring samples.
- *
- * The gradient vector and Hessian matrix at the initial keypoint location are
- * approximated using central differences. The linear system Ax = b is then
- * solved, where A is the Hessian, b is the negative gradient, and x is the
- * offset of the interpolated maxima coordinates from the initial estimate.
- * This is equivalent to an iteration of Netwon's optimisation algorithm.
- *
- * N9 contains the samples in the 3x3x3 neighbourhood of the maxima
- * dx is the sampling step in x
- * dy is the sampling step in y
- * ds is the sampling step in size
- * point contains the keypoint coordinates and scale to be modified
- *
- * Return value is 1 if interpolation was successful, 0 on failure.
- */
-CV_INLINE int
-icvInterpolateKeypoint( float N9[3][9], int dx, int dy, int ds, CvSURFPoint *point )
-{
- int solve_ok;
- float A[9], x[3], b[3];
- CvMat _A = cvMat(3, 3, CV_32F, A);
- CvMat _x = cvMat(3, 1, CV_32F, x);
- CvMat _b = cvMat(3, 1, CV_32F, b);
-
- b[0] = -(N9[1][5]-N9[1][3])/2; /* Negative 1st deriv with respect to x */
- b[1] = -(N9[1][7]-N9[1][1])/2; /* Negative 1st deriv with respect to y */
- b[2] = -(N9[2][4]-N9[0][4])/2; /* Negative 1st deriv with respect to s */
-
- A[0] = N9[1][3]-2*N9[1][4]+N9[1][5]; /* 2nd deriv x, x */
- A[1] = (N9[1][8]-N9[1][6]-N9[1][2]+N9[1][0])/4; /* 2nd deriv x, y */
- A[2] = (N9[2][5]-N9[2][3]-N9[0][5]+N9[0][3])/4; /* 2nd deriv x, s */
- A[3] = A[1]; /* 2nd deriv y, x */
- A[4] = N9[1][1]-2*N9[1][4]+N9[1][7]; /* 2nd deriv y, y */
- A[5] = (N9[2][7]-N9[2][1]-N9[0][7]+N9[0][1])/4; /* 2nd deriv y, s */
- A[6] = A[2]; /* 2nd deriv s, x */
- A[7] = A[5]; /* 2nd deriv s, y */
- A[8] = N9[0][4]-2*N9[1][4]+N9[2][4]; /* 2nd deriv s, s */
-
- solve_ok = cvSolve( &_A, &_b, &_x );
- if( solve_ok )
- {
- point->pt.x += x[0]*dx;
- point->pt.y += x[1]*dy;
- point->size = cvRound( point->size + x[2]*ds );
- }
- return solve_ok;
-}
-
-
-static CvSeq* icvFastHessianDetector( const CvMat* sum, const CvMat* mask_sum,
- CvMemStorage* storage, const CvSURFParams* params )
-{
- CvSeq* points = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvSURFPoint), storage );
-
- /* Wavelet size at first layer of first octave. */
- const int HAAR_SIZE0 = 9;
-
- /* Wavelet size increment between layers. This should be an even number,
- such that the wavelet sizes in an octave are either all even or all odd.
- This ensures that when looking for the neighbours of a sample, the layers
- above and below are aligned correctly. */
- const int HAAR_SIZE_INC = 6;
-
- /* Sampling step along image x and y axes at first octave. This is doubled
- for each additional octave. WARNING: Increasing this improves speed,
- however keypoint extraction becomes unreliable. */
- const int SAMPLE_STEP0 = 1;
-
-
- /* Wavelet Data */
- const int NX=3, NY=3, NXY=4, NM=1;
- const int dx_s[NX][5] = { {0, 2, 3, 7, 1}, {3, 2, 6, 7, -2}, {6, 2, 9, 7, 1} };
- const int dy_s[NY][5] = { {2, 0, 7, 3, 1}, {2, 3, 7, 6, -2}, {2, 6, 7, 9, 1} };
- const int dxy_s[NXY][5] = { {1, 1, 4, 4, 1}, {5, 1, 8, 4, -1}, {1, 5, 4, 8, -1}, {5, 5, 8, 8, 1} };
- const int dm[NM][5] = { {0, 0, 9, 9, 1} };
- CvSurfHF Dx[NX], Dy[NY], Dxy[NXY], Dm;
-
- CvMat** dets = (CvMat**)cvStackAlloc((params->nOctaveLayers+2)*sizeof(dets[0]));
- CvMat** traces = (CvMat**)cvStackAlloc((params->nOctaveLayers+2)*sizeof(traces[0]));
- int *sizes = (int*)cvStackAlloc((params->nOctaveLayers+2)*sizeof(sizes[0]));
-
- double dx = 0, dy = 0, dxy = 0;
- int octave, layer, sampleStep, size, margin;
- int rows, cols;
- int i, j, sum_i, sum_j;
- const int* s_ptr;
- float *det_ptr, *trace_ptr;
-
- /* Allocate enough space for hessian determinant and trace matrices at the
- first octave. Clearing these initially or between octaves is not
- required, since all values that are accessed are first calculated */
- for( layer = 0; layer <= params->nOctaveLayers+1; layer++ )
- {
- dets[layer] = cvCreateMat( (sum->rows-1)/SAMPLE_STEP0, (sum->cols-1)/SAMPLE_STEP0, CV_32FC1 );
- traces[layer] = cvCreateMat( (sum->rows-1)/SAMPLE_STEP0, (sum->cols-1)/SAMPLE_STEP0, CV_32FC1 );
- }
-
- for( octave = 0, sampleStep=SAMPLE_STEP0; octave < params->nOctaves; octave++, sampleStep*=2 )
- {
- /* Hessian determinant and trace sample array size in this octave */
- rows = (sum->rows-1)/sampleStep;
- cols = (sum->cols-1)/sampleStep;
-
- /* Calculate the determinant and trace of the hessian */
- for( layer = 0; layer <= params->nOctaveLayers+1; layer++ )
- {
- sizes[layer] = size = (HAAR_SIZE0+HAAR_SIZE_INC*layer)<<octave;
- icvResizeHaarPattern( dx_s, Dx, NX, 9, size, sum->cols );
- icvResizeHaarPattern( dy_s, Dy, NY, 9, size, sum->cols );
- icvResizeHaarPattern( dxy_s, Dxy, NXY, 9, size, sum->cols );
- /*printf( "octave=%d layer=%d size=%d rows=%d cols=%d\n", octave, layer, size, rows, cols );*/
-
- margin = (size/2)/sampleStep;
- for( sum_i=0, i=margin; sum_i<=(sum->rows-1)-size; sum_i+=sampleStep, i++ )
- {
- s_ptr = sum->data.i + sum_i*sum->cols;
- det_ptr = dets[layer]->data.fl + i*dets[layer]->cols + margin;
- trace_ptr = traces[layer]->data.fl + i*traces[layer]->cols + margin;
- for( sum_j=0, j=margin; sum_j<=(sum->cols-1)-size; sum_j+=sampleStep, j++ )
- {
- dx = icvCalcHaarPattern( s_ptr, Dx, 3 );
- dy = icvCalcHaarPattern( s_ptr, Dy, 3 );
- dxy = icvCalcHaarPattern( s_ptr, Dxy, 4 );
- s_ptr+=sampleStep;
- *det_ptr++ = (float)(dx*dy - 0.81*dxy*dxy);
- *trace_ptr++ = (float)(dx + dy);
- }
- }
- }
-
- /* Find maxima in the determinant of the hessian */
- for( layer = 1; layer <= params->nOctaveLayers; layer++ )
- {
- size = sizes[layer];
- icvResizeHaarPattern( dm, &Dm, NM, 9, size, mask_sum ? mask_sum->cols : sum->cols );
-
- /* Ignore pixels without a 3x3 neighbourhood in the layer above */
- margin = (sizes[layer+1]/2)/sampleStep+1;
- for( i = margin; i < rows-margin; i++ )
- {
- det_ptr = dets[layer]->data.fl + i*dets[layer]->cols;
- trace_ptr = traces[layer]->data.fl + i*traces[layer]->cols;
- for( j = margin; j < cols-margin; j++ )
- {
- float val0 = det_ptr[j];
- if( val0 > params->hessianThreshold )
- {
- /* Coordinates for the start of the wavelet in the sum image. There
- is some integer division involved, so don't try to simplify this
- (cancel out sampleStep) without checking the result is the same */
- int sum_i = sampleStep*(i-(size/2)/sampleStep);
- int sum_j = sampleStep*(j-(size/2)/sampleStep);
-
- /* The 3x3x3 neighbouring samples around the maxima.
- The maxima is included at N9[1][4] */
- int c = dets[layer]->cols;
- const float *det1 = dets[layer-1]->data.fl + i*c + j;
- const float *det2 = dets[layer]->data.fl + i*c + j;
- const float *det3 = dets[layer+1]->data.fl + i*c + j;
- float N9[3][9] = { { det1[-c-1], det1[-c], det1[-c+1],
- det1[-1] , det1[0] , det1[1],
- det1[c-1] , det1[c] , det1[c+1] },
- { det2[-c-1], det2[-c], det2[-c+1],
- det2[-1] , det2[0] , det2[1],
- det2[c-1] , det2[c] , det2[c+1 ] },
- { det3[-c-1], det3[-c], det3[-c+1],
- det3[-1 ], det3[0] , det3[1],
- det3[c-1] , det3[c] , det3[c+1 ] } };
-
- /* Check the mask - why not just check the mask at the center of the wavelet? */
- if( mask_sum )
- {
- const int* mask_ptr = mask_sum->data.i + mask_sum->cols*sum_i + sum_j;
- float mval = icvCalcHaarPattern( mask_ptr, &Dm, 1 );
- if( mval < 0.5 )
- continue;
- }
-
- /* Non-maxima suppression. val0 is at N9[1][4]*/
- if( val0 > N9[0][0] && val0 > N9[0][1] && val0 > N9[0][2] &&
- val0 > N9[0][3] && val0 > N9[0][4] && val0 > N9[0][5] &&
- val0 > N9[0][6] && val0 > N9[0][7] && val0 > N9[0][8] &&
- val0 > N9[1][0] && val0 > N9[1][1] && val0 > N9[1][2] &&
- val0 > N9[1][3] && val0 > N9[1][5] &&
- val0 > N9[1][6] && val0 > N9[1][7] && val0 > N9[1][8] &&
- val0 > N9[2][0] && val0 > N9[2][1] && val0 > N9[2][2] &&
- val0 > N9[2][3] && val0 > N9[2][4] && val0 > N9[2][5] &&
- val0 > N9[2][6] && val0 > N9[2][7] && val0 > N9[2][8] )
- {
- /* Calculate the wavelet center coordinates for the maxima */
- double center_i = sum_i + (double)(size-1)/2;
- double center_j = sum_j + (double)(size-1)/2;
-
- CvSURFPoint point = cvSURFPoint( cvPoint2D32f(center_j,center_i),
- CV_SIGN(trace_ptr[j]), sizes[layer], 0, val0 );
-
- /* Interpolate maxima location within the 3x3x3 neighbourhood */
- int ds = sizes[layer]-sizes[layer-1];
- int interp_ok = icvInterpolateKeypoint( N9, sampleStep, sampleStep, ds, &point );
-
- /* Sometimes the interpolation step gives a negative size etc. */
- if( interp_ok && point.size >= 1 &&
- point.pt.x >= 0 && point.pt.x <= (sum->cols-1) &&
- point.pt.y >= 0 && point.pt.y <= (sum->rows-1) )
- {
- /*printf( "Keypoint %f %f %d\n", point.pt.x, point.pt.y, point.size );*/
- cvSeqPush( points, &point );
- }
- }
- }
- }
- }
- }
- }
-
- /* Clean-up */
- for( layer = 0; layer <= params->nOctaveLayers+1; layer++ )
- {
- cvReleaseMat( &dets[layer] );
- cvReleaseMat( &traces[layer] );
- }
-
- return points;
-}
-
-
-CV_IMPL void
-cvExtractSURF( const CvArr* _img, const CvArr* _mask,
- CvSeq** _keypoints, CvSeq** _descriptors,
- CvMemStorage* storage, CvSURFParams params,
- int useProvidedKeyPts)
-{
- CvMat *sum = 0, *mask1 = 0, *mask_sum = 0, **win_bufs = 0;
-
- if( _keypoints && !useProvidedKeyPts ) // If useProvidedKeyPts!=0 we'll use current contents of "*_keypoints"
- *_keypoints = 0;
- if( _descriptors )
- *_descriptors = 0;
-
- /* Radius of the circle in which to sample gradients to assign an
- orientation */
- const int ORI_RADIUS = 6;
-
- /* The size of the sliding window (in degrees) used to assign an
- orientation */
- const int ORI_WIN = 60;
-
- /* Increment used for the orientation sliding window (in degrees) */
- const int ORI_SEARCH_INC = 5;
-
- /* Standard deviation of the Gaussian used to weight the gradient samples
- used to assign an orientation */
- const float ORI_SIGMA = 2.5f;
-
- /* Standard deviation of the Gaussian used to weight the gradient samples
- used to build a keypoint descriptor */
- const float DESC_SIGMA = 3.3f;
-
-
- /* X and Y gradient wavelet data */
- const int NX=2, NY=2;
- int dx_s[NX][5] = {{0, 0, 2, 4, -1}, {2, 0, 4, 4, 1}};
- int dy_s[NY][5] = {{0, 0, 4, 2, 1}, {0, 2, 4, 4, -1}};
-
- CvSeq *keypoints, *descriptors = 0;
- CvMat imghdr, *img = cvGetMat(_img, &imghdr);
- CvMat maskhdr, *mask = _mask ? cvGetMat(_mask, &maskhdr) : 0;
-
- const int max_ori_samples = (2*ORI_RADIUS+1)*(2*ORI_RADIUS+1);
- int descriptor_size = params.extended ? 128 : 64;
- const int descriptor_data_type = CV_32F;
- const int PATCH_SZ = 20;
- float DW[PATCH_SZ][PATCH_SZ];
- CvMat _DW = cvMat(PATCH_SZ, PATCH_SZ, CV_32F, DW);
- CvPoint apt[max_ori_samples];
- float apt_w[max_ori_samples];
- int i, j, k, nangle0 = 0, N;
- int nthreads = cvGetNumThreads();
-
- CV_Assert(img != 0);
- CV_Assert(CV_MAT_TYPE(img->type) == CV_8UC1);
- CV_Assert(mask == 0 || (CV_ARE_SIZES_EQ(img,mask) && CV_MAT_TYPE(mask->type) == CV_8UC1));
- CV_Assert(storage != 0);
- CV_Assert(params.hessianThreshold >= 0);
- CV_Assert(params.nOctaves > 0);
- CV_Assert(params.nOctaveLayers > 0);
-
- sum = cvCreateMat( img->height+1, img->width+1, CV_32SC1 );
- cvIntegral( img, sum );
-
- // Compute keypoints only if we are not asked for evaluating the descriptors are some given locations:
- if (!useProvidedKeyPts)
- {
- if( mask )
- {
- mask1 = cvCreateMat( img->height, img->width, CV_8UC1 );
- mask_sum = cvCreateMat( img->height+1, img->width+1, CV_32SC1 );
- cvMinS( mask, 1, mask1 );
- cvIntegral( mask1, mask_sum );
- }
- keypoints = icvFastHessianDetector( sum, mask_sum, storage, ¶ms );
- }
- else
- {
- CV_Assert(useProvidedKeyPts && (_keypoints != 0) && (*_keypoints != 0));
- keypoints = *_keypoints;
- }
-
- N = keypoints->total;
- if( _descriptors )
- {
- descriptors = cvCreateSeq( 0, sizeof(CvSeq),
- descriptor_size*CV_ELEM_SIZE(descriptor_data_type), storage );
- cvSeqPushMulti( descriptors, 0, N );
- }
-
- /* Coordinates and weights of samples used to calculate orientation */
- cv::Mat _G = cv::getGaussianKernel( 2*ORI_RADIUS+1, ORI_SIGMA, CV_32F );
- const float* G = (const float*)_G.data;
-
- for( i = -ORI_RADIUS; i <= ORI_RADIUS; i++ )
- {
- for( j = -ORI_RADIUS; j <= ORI_RADIUS; j++ )
- {
- if( i*i + j*j <= ORI_RADIUS*ORI_RADIUS )
- {
- apt[nangle0] = cvPoint(j,i);
- apt_w[nangle0++] = G[i+ORI_RADIUS]*G[j+ORI_RADIUS];
- }
- }
- }
-
- /* Gaussian used to weight descriptor samples */
- {
- double c2 = 1./(DESC_SIGMA*DESC_SIGMA*2);
- double gs = 0;
- for( i = 0; i < PATCH_SZ; i++ )
- {
- for( j = 0; j < PATCH_SZ; j++ )
- {
- double x = j - (float)(PATCH_SZ-1)/2, y = i - (float)(PATCH_SZ-1)/2;
- double val = exp(-(x*x+y*y)*c2);
- DW[i][j] = (float)val;
- gs += val;
- }
- }
- cvScale( &_DW, &_DW, 1./gs );
- }
-
- win_bufs = (CvMat**)cvAlloc(nthreads*sizeof(win_bufs[0]));
- for( i = 0; i < nthreads; i++ )
- win_bufs[i] = 0;
-
-#ifdef _OPENMP
-#pragma omp parallel for num_threads(nthreads) schedule(dynamic)
-#endif
- for( k = 0; k < N; k++ )
- {
- const int* sum_ptr = sum->data.i;
- int sum_cols = sum->cols;
- int i, j, kk, x, y, nangle;
- float X[max_ori_samples], Y[max_ori_samples], angle[max_ori_samples];
- uchar PATCH[PATCH_SZ+1][PATCH_SZ+1];
- float DX[PATCH_SZ][PATCH_SZ], DY[PATCH_SZ][PATCH_SZ];
- CvMat _X = cvMat(1, max_ori_samples, CV_32F, X);
- CvMat _Y = cvMat(1, max_ori_samples, CV_32F, Y);
- CvMat _angle = cvMat(1, max_ori_samples, CV_32F, angle);
- CvMat _patch = cvMat(PATCH_SZ+1, PATCH_SZ+1, CV_8U, PATCH);
- float* vec;
- CvSurfHF dx_t[NX], dy_t[NY];
- int thread_idx = cvGetThreadNum();
-
- CvSURFPoint* kp = (CvSURFPoint*)cvGetSeqElem( keypoints, k );
- int size = kp->size;
- CvPoint2D32f center = kp->pt;
-
- /* The sampling intervals and wavelet sized for selecting an orientation
- and building the keypoint descriptor are defined relative to 's' */
- float s = (float)size*1.2f/9.0f;
-
- /* To find the dominant orientation, the gradients in x and y are
- sampled in a circle of radius 6s using wavelets of size 4s.
- We ensure the gradient wavelet size is even to ensure the
- wavelet pattern is balanced and symmetric around its center */
- int grad_wav_size = 2*cvRound( 2*s );
- if ( sum->rows < grad_wav_size || sum->cols < grad_wav_size )
- {
- /* when grad_wav_size is too big,
- * the sampling of gradient will be meaningless
- * mark keypoint for deletion. */
- kp->size = -1;
- continue;
- }
- icvResizeHaarPattern( dx_s, dx_t, NX, 4, grad_wav_size, sum->cols );
- icvResizeHaarPattern( dy_s, dy_t, NY, 4, grad_wav_size, sum->cols );
- for( kk = 0, nangle = 0; kk < nangle0; kk++ )
- {
- const int* ptr;
- float vx, vy;
- x = cvRound( center.x + apt[kk].x*s - (float)(grad_wav_size-1)/2 );
- y = cvRound( center.y + apt[kk].y*s - (float)(grad_wav_size-1)/2 );
- if( (unsigned)y >= (unsigned)(sum->rows - grad_wav_size) ||
- (unsigned)x >= (unsigned)(sum->cols - grad_wav_size) )
- continue;
- ptr = sum_ptr + x + y*sum_cols;
- vx = icvCalcHaarPattern( ptr, dx_t, 2 );
- vy = icvCalcHaarPattern( ptr, dy_t, 2 );
- X[nangle] = vx*apt_w[kk]; Y[nangle] = vy*apt_w[kk];
- nangle++;
- }
- if ( nangle == 0 )
- {
- /* No gradient could be sampled because the keypoint is too
- * near too one or more of the sides of the image. As we
- * therefore cannot find a dominant direction, we skip this
- * keypoint and mark it for later deletion from the sequence. */
- kp->size = -1;
- continue;
- }
- _X.cols = _Y.cols = _angle.cols = nangle;
- cvCartToPolar( &_X, &_Y, 0, &_angle, 1 );
-
- float bestx = 0, besty = 0, descriptor_mod = 0;
- for( i = 0; i < 360; i += ORI_SEARCH_INC )
- {
- float sumx = 0, sumy = 0, temp_mod;
- for( j = 0; j < nangle; j++ )
- {
- int d = abs(cvRound(angle[j]) - i);
- if( d < ORI_WIN/2 || d > 360-ORI_WIN/2 )
- {
- sumx += X[j];
- sumy += Y[j];
- }
- }
- temp_mod = sumx*sumx + sumy*sumy;
- if( temp_mod > descriptor_mod )
- {
- descriptor_mod = temp_mod;
- bestx = sumx;
- besty = sumy;
- }
- }
-
- float descriptor_dir = cvFastArctan( besty, bestx );
- kp->dir = descriptor_dir;
-
- if( !_descriptors )
- continue;
-
- descriptor_dir *= (float)(CV_PI/180);
-
- /* Extract a window of pixels around the keypoint of size 20s */
- int win_size = (int)((PATCH_SZ+1)*s);
- if( win_bufs[thread_idx] == 0 || win_bufs[thread_idx]->cols < win_size*win_size )
- {
- cvReleaseMat( &win_bufs[thread_idx] );
- win_bufs[thread_idx] = cvCreateMat( 1, win_size*win_size, CV_8U );
- }
-
- CvMat win = cvMat(win_size, win_size, CV_8U, win_bufs[thread_idx]->data.ptr);
- float sin_dir = sin(descriptor_dir);
- float cos_dir = cos(descriptor_dir) ;
-
- /* Subpixel interpolation version (slower). Subpixel not required since
- the pixels will all get averaged when we scale down to 20 pixels */
- /*
- float w[] = { cos_dir, sin_dir, center.x,
- -sin_dir, cos_dir , center.y };
- CvMat W = cvMat(2, 3, CV_32F, w);
- cvGetQuadrangleSubPix( img, &win, &W );
- */
-
- /* Nearest neighbour version (faster) */
- float win_offset = -(float)(win_size-1)/2;
- float start_x = center.x + win_offset*cos_dir + win_offset*sin_dir;
- float start_y = center.y - win_offset*sin_dir + win_offset*cos_dir;
- uchar* WIN = win.data.ptr;
- for( i=0; i<win_size; i++, start_x+=sin_dir, start_y+=cos_dir )
- {
- float pixel_x = start_x;
- float pixel_y = start_y;
- for( j=0; j<win_size; j++, pixel_x+=cos_dir, pixel_y-=sin_dir )
- {
- int x = cvRound( pixel_x );
- int y = cvRound( pixel_y );
- x = MAX( x, 0 );
- y = MAX( y, 0 );
- x = MIN( x, img->cols-1 );
- y = MIN( y, img->rows-1 );
- WIN[i*win_size + j] = img->data.ptr[y*img->step+x];
- }
- }
-
- /* Scale the window to size PATCH_SZ so each pixel's size is s. This
- makes calculating the gradients with wavelets of size 2s easy */
- cvResize( &win, &_patch, CV_INTER_AREA );
-
- /* Calculate gradients in x and y with wavelets of size 2s */
- for( i = 0; i < PATCH_SZ; i++ )
- for( j = 0; j < PATCH_SZ; j++ )
- {
- float dw = DW[i][j];
- float vx = (PATCH[i][j+1] - PATCH[i][j] + PATCH[i+1][j+1] - PATCH[i+1][j])*dw;
- float vy = (PATCH[i+1][j] - PATCH[i][j] + PATCH[i+1][j+1] - PATCH[i][j+1])*dw;
- DX[i][j] = vx;
- DY[i][j] = vy;
- }
-
- /* Construct the descriptor */
- vec = (float*)cvGetSeqElem( descriptors, k );
- for( kk = 0; kk < (int)(descriptors->elem_size/sizeof(vec[0])); kk++ )
- vec[kk] = 0;
- double square_mag = 0;
- if( params.extended )
- {
- /* 128-bin descriptor */
- for( i = 0; i < 4; i++ )
- for( j = 0; j < 4; j++ )
- {
- for( y = i*5; y < i*5+5; y++ )
- {
- for( x = j*5; x < j*5+5; x++ )
- {
- float tx = DX[y][x], ty = DY[y][x];
- if( ty >= 0 )
- {
- vec[0] += tx;
- vec[1] += (float)fabs(tx);
- } else {
- vec[2] += tx;
- vec[3] += (float)fabs(tx);
- }
- if ( tx >= 0 )
- {
- vec[4] += ty;
- vec[5] += (float)fabs(ty);
- } else {
- vec[6] += ty;
- vec[7] += (float)fabs(ty);
- }
- }
- }
- for( kk = 0; kk < 8; kk++ )
- square_mag += vec[kk]*vec[kk];
- vec += 8;
- }
- }
- else
- {
- /* 64-bin descriptor */
- for( i = 0; i < 4; i++ )
- for( j = 0; j < 4; j++ )
- {
- for( y = i*5; y < i*5+5; y++ )
- {
- for( x = j*5; x < j*5+5; x++ )
- {
- float tx = DX[y][x], ty = DY[y][x];
- vec[0] += tx; vec[1] += ty;
- vec[2] += (float)fabs(tx); vec[3] += (float)fabs(ty);
- }
- }
- for( kk = 0; kk < 4; kk++ )
- square_mag += vec[kk]*vec[kk];
- vec+=4;
- }
- }
-
- /* unit vector is essential for contrast invariance */
- vec = (float*)cvGetSeqElem( descriptors, k );
- double scale = 1./(sqrt(square_mag) + DBL_EPSILON);
- for( kk = 0; kk < descriptor_size; kk++ )
- vec[kk] = (float)(vec[kk]*scale);
- }
-
- /* remove keypoints that were marked for deletion */
- for ( i = 0; i < N; i++ )
- {
- CvSURFPoint* kp = (CvSURFPoint*)cvGetSeqElem( keypoints, i );
- if ( kp->size == -1 )
- {
- cvSeqRemove( keypoints, i );
- if ( _descriptors )
- cvSeqRemove( descriptors, i );
- k--;
- N--;
- }
- }
-
- for( i = 0; i < nthreads; i++ )
- cvReleaseMat( &win_bufs[i] );
-
- if( _keypoints && !useProvidedKeyPts )
- *_keypoints = keypoints;
- if( _descriptors )
- *_descriptors = descriptors;
-
- cvReleaseMat( &sum );
- if (mask1) cvReleaseMat( &mask1 );
- if (mask_sum) cvReleaseMat( &mask_sum );
- cvFree( &win_bufs );
-}
-
+/* Original code has been submitted by Liu Liu. Here is the copyright.\r
+----------------------------------------------------------------------------------\r
+ * An OpenCV Implementation of SURF\r
+ * Further Information Refer to "SURF: Speed-Up Robust Feature"\r
+ * Author: Liu Liu\r
+ * liuliu.1987+opencv@gmail.com\r
+ *\r
+ * There are still serveral lacks for this experimental implementation:\r
+ * 1.The interpolation of sub-pixel mentioned in article was not implemented yet;\r
+ * 2.A comparision with original libSurf.so shows that the hessian detector is not a 100% match to their implementation;\r
+ * 3.Due to above reasons, I recommanded the original one for study and reuse;\r
+ *\r
+ * However, the speed of this implementation is something comparable to original one.\r
+ *\r
+ * Copyright© 2008, Liu Liu All rights reserved.\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ * Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ * Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ * The name of Contributor may not be used to endorse or\r
+ * promote products derived from this software without\r
+ * specific prior written permission.\r
+ *\r
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND\r
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,\r
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\r
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\r
+ * DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY\r
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\r
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\r
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,\r
+ * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR\r
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT\r
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY\r
+ * OF SUCH DAMAGE.\r
+ */\r
+\r
+/* \r
+ The following changes have been made, comparing to the original contribution:\r
+ 1. A lot of small optimizations, less memory allocations, got rid of global buffers\r
+ 2. Reversed order of cvGetQuadrangleSubPix and cvResize calls; probably less accurate, but much faster\r
+ 3. The descriptor computing part (which is most expensive) is threaded using OpenMP\r
+ (subpixel-accurate keypoint localization and scale estimation are still TBD)\r
+*/\r
+\r
+/*\r
+Keypoint position and scale interpolation has been implemented as described in\r
+the Brown and Lowe paper cited by the SURF paper.\r
+\r
+The sampling step along the x and y axes of the image for the determinant of the\r
+Hessian is now the same for each layer in an octave. While this increases the\r
+computation time, it ensures that a true 3x3x3 neighbourhood exists, with\r
+samples calculated at the same position in the layers above and below. This\r
+results in improved maxima detection and non-maxima suppression, and I think it\r
+is consistent with the description in the SURF paper.\r
+\r
+The wavelet size sampling interval has also been made consistent. The wavelet\r
+size at the first layer of the first octave is now 9 instead of 7. Along with\r
+regular position sampling steps, this makes location and scale interpolation\r
+easy. I think this is consistent with the SURF paper and original\r
+implementation.\r
+\r
+The scaling of the wavelet parameters has been fixed to ensure that the patterns\r
+are symmetric around the centre. Previously the truncation caused by integer\r
+division in the scaling ratio caused a bias towards the top left of the wavelet,\r
+resulting in inconsistent keypoint positions.\r
+\r
+The matrices for the determinant and trace of the Hessian are now reused in each\r
+octave.\r
+\r
+The extraction of the patch of pixels surrounding a keypoint used to build a\r
+descriptor has been simplified.\r
+\r
+Keypoint descriptor normalisation has been changed from normalising each 4x4 \r
+cell (resulting in a descriptor of magnitude 16) to normalising the entire \r
+descriptor to magnitude 1.\r
+\r
+The default number of octaves has been increased from 3 to 4 to match the\r
+original SURF binary default. The increase in computation time is minimal since\r
+the higher octaves are sampled sparsely.\r
+\r
+The default number of layers per octave has been reduced from 3 to 2, to prevent\r
+redundant calculation of similar sizes in consecutive octaves. This decreases \r
+computation time. The number of features extracted may be less, however the \r
+additional features were mostly redundant.\r
+\r
+The radius of the circle of gradient samples used to assign an orientation has\r
+been increased from 4 to 6 to match the description in the SURF paper. This is \r
+now defined by ORI_RADIUS, and could be made into a parameter.\r
+\r
+The size of the sliding window used in orientation assignment has been reduced\r
+from 120 to 60 degrees to match the description in the SURF paper. This is now\r
+defined by ORI_WIN, and could be made into a parameter.\r
+\r
+Other options like HAAR_SIZE0, HAAR_SIZE_INC, SAMPLE_STEP0, ORI_SEARCH_INC, \r
+ORI_SIGMA and DESC_SIGMA have been separated from the code and documented. \r
+These could also be made into parameters.\r
+\r
+Modifications by Ian Mahon\r
+\r
+*/\r
+#include "_cv.h"\r
+\r
+CvSURFParams cvSURFParams(double threshold, int extended)\r
+{\r
+ CvSURFParams params;\r
+ params.hessianThreshold = threshold;\r
+ params.extended = extended;\r
+ params.nOctaves = 4;\r
+ params.nOctaveLayers = 2;\r
+ return params;\r
+}\r
+\r
+struct CvSurfHF\r
+{\r
+ int p0, p1, p2, p3;\r
+ float w;\r
+};\r
+\r
+CV_INLINE float\r
+icvCalcHaarPattern( const int* origin, const CvSurfHF* f, int n )\r
+{\r
+ double d = 0;\r
+ for( int k = 0; k < n; k++ )\r
+ d += (origin[f[k].p0] + origin[f[k].p3] - origin[f[k].p1] - origin[f[k].p2])*f[k].w;\r
+ return (float)d;\r
+}\r
+\r
+static void\r
+icvResizeHaarPattern( const int src[][5], CvSurfHF* dst, int n, int oldSize, int newSize, int widthStep )\r
+{\r
+ float ratio = (float)newSize/oldSize;\r
+ for( int k = 0; k < n; k++ )\r
+ {\r
+ int dx1 = cvRound( ratio*src[k][0] );\r
+ int dy1 = cvRound( ratio*src[k][1] );\r
+ int dx2 = cvRound( ratio*src[k][2] );\r
+ int dy2 = cvRound( ratio*src[k][3] );\r
+ dst[k].p0 = dy1*widthStep + dx1;\r
+ dst[k].p1 = dy2*widthStep + dx1;\r
+ dst[k].p2 = dy1*widthStep + dx2;\r
+ dst[k].p3 = dy2*widthStep + dx2;\r
+ dst[k].w = src[k][4]/((float)(dx2-dx1)*(dy2-dy1));\r
+ }\r
+}\r
+\r
+/*\r
+ * Maxima location interpolation as described in "Invariant Features from\r
+ * Interest Point Groups" by Matthew Brown and David Lowe. This is performed by\r
+ * fitting a 3D quadratic to a set of neighbouring samples.\r
+ * \r
+ * The gradient vector and Hessian matrix at the initial keypoint location are \r
+ * approximated using central differences. The linear system Ax = b is then\r
+ * solved, where A is the Hessian, b is the negative gradient, and x is the \r
+ * offset of the interpolated maxima coordinates from the initial estimate.\r
+ * This is equivalent to an iteration of Netwon's optimisation algorithm.\r
+ *\r
+ * N9 contains the samples in the 3x3x3 neighbourhood of the maxima\r
+ * dx is the sampling step in x\r
+ * dy is the sampling step in y\r
+ * ds is the sampling step in size\r
+ * point contains the keypoint coordinates and scale to be modified\r
+ *\r
+ * Return value is 1 if interpolation was successful, 0 on failure.\r
+ */ \r
+CV_INLINE int \r
+icvInterpolateKeypoint( float N9[3][9], int dx, int dy, int ds, CvSURFPoint *point )\r
+{\r
+ int solve_ok;\r
+ float A[9], x[3], b[3];\r
+ CvMat _A = cvMat(3, 3, CV_32F, A);\r
+ CvMat _x = cvMat(3, 1, CV_32F, x); \r
+ CvMat _b = cvMat(3, 1, CV_32F, b);\r
+\r
+ b[0] = -(N9[1][5]-N9[1][3])/2; /* Negative 1st deriv with respect to x */\r
+ b[1] = -(N9[1][7]-N9[1][1])/2; /* Negative 1st deriv with respect to y */\r
+ b[2] = -(N9[2][4]-N9[0][4])/2; /* Negative 1st deriv with respect to s */\r
+\r
+ A[0] = N9[1][3]-2*N9[1][4]+N9[1][5]; /* 2nd deriv x, x */\r
+ A[1] = (N9[1][8]-N9[1][6]-N9[1][2]+N9[1][0])/4; /* 2nd deriv x, y */\r
+ A[2] = (N9[2][5]-N9[2][3]-N9[0][5]+N9[0][3])/4; /* 2nd deriv x, s */\r
+ A[3] = A[1]; /* 2nd deriv y, x */\r
+ A[4] = N9[1][1]-2*N9[1][4]+N9[1][7]; /* 2nd deriv y, y */\r
+ A[5] = (N9[2][7]-N9[2][1]-N9[0][7]+N9[0][1])/4; /* 2nd deriv y, s */\r
+ A[6] = A[2]; /* 2nd deriv s, x */\r
+ A[7] = A[5]; /* 2nd deriv s, y */\r
+ A[8] = N9[0][4]-2*N9[1][4]+N9[2][4]; /* 2nd deriv s, s */\r
+\r
+ solve_ok = cvSolve( &_A, &_b, &_x );\r
+ if( solve_ok )\r
+ {\r
+ point->pt.x += x[0]*dx;\r
+ point->pt.y += x[1]*dy;\r
+ point->size = cvRound( point->size + x[2]*ds ); \r
+ }\r
+ return solve_ok;\r
+}\r
+\r
+\r
+static CvSeq* icvFastHessianDetector( const CvMat* sum, const CvMat* mask_sum,\r
+ CvMemStorage* storage, const CvSURFParams* params )\r
+{\r
+ CvSeq* points = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvSURFPoint), storage );\r
+ \r
+ /* Wavelet size at first layer of first octave. */ \r
+ const int HAAR_SIZE0 = 9; \r
+\r
+ /* Wavelet size increment between layers. This should be an even number, \r
+ such that the wavelet sizes in an octave are either all even or all odd.\r
+ This ensures that when looking for the neighbours of a sample, the layers\r
+ above and below are aligned correctly. */\r
+ const int HAAR_SIZE_INC = 6; \r
+\r
+ /* Sampling step along image x and y axes at first octave. This is doubled\r
+ for each additional octave. WARNING: Increasing this improves speed, \r
+ however keypoint extraction becomes unreliable. */\r
+ const int SAMPLE_STEP0 = 1; \r
+\r
+\r
+ /* Wavelet Data */\r
+ const int NX=3, NY=3, NXY=4, NM=1;\r
+ const int dx_s[NX][5] = { {0, 2, 3, 7, 1}, {3, 2, 6, 7, -2}, {6, 2, 9, 7, 1} };\r
+ const int dy_s[NY][5] = { {2, 0, 7, 3, 1}, {2, 3, 7, 6, -2}, {2, 6, 7, 9, 1} };\r
+ const int dxy_s[NXY][5] = { {1, 1, 4, 4, 1}, {5, 1, 8, 4, -1}, {1, 5, 4, 8, -1}, {5, 5, 8, 8, 1} };\r
+ const int dm[NM][5] = { {0, 0, 9, 9, 1} };\r
+ CvSurfHF Dx[NX], Dy[NY], Dxy[NXY], Dm;\r
+\r
+ CvMat** dets = (CvMat**)cvStackAlloc((params->nOctaveLayers+2)*sizeof(dets[0]));\r
+ CvMat** traces = (CvMat**)cvStackAlloc((params->nOctaveLayers+2)*sizeof(traces[0]));\r
+ int *sizes = (int*)cvStackAlloc((params->nOctaveLayers+2)*sizeof(sizes[0]));\r
+\r
+ double dx = 0, dy = 0, dxy = 0;\r
+ int octave, layer, sampleStep, size, margin;\r
+ int rows, cols;\r
+ int i, j, sum_i, sum_j;\r
+ const int* s_ptr;\r
+ float *det_ptr, *trace_ptr;\r
+\r
+ /* Allocate enough space for hessian determinant and trace matrices at the \r
+ first octave. Clearing these initially or between octaves is not\r
+ required, since all values that are accessed are first calculated */\r
+ for( layer = 0; layer <= params->nOctaveLayers+1; layer++ )\r
+ {\r
+ dets[layer] = cvCreateMat( (sum->rows-1)/SAMPLE_STEP0, (sum->cols-1)/SAMPLE_STEP0, CV_32FC1 );\r
+ traces[layer] = cvCreateMat( (sum->rows-1)/SAMPLE_STEP0, (sum->cols-1)/SAMPLE_STEP0, CV_32FC1 );\r
+ }\r
+\r
+ for( octave = 0, sampleStep=SAMPLE_STEP0; octave < params->nOctaves; octave++, sampleStep*=2 )\r
+ {\r
+ /* Hessian determinant and trace sample array size in this octave */\r
+ rows = (sum->rows-1)/sampleStep;\r
+ cols = (sum->cols-1)/sampleStep;\r
+\r
+ /* Calculate the determinant and trace of the hessian */\r
+ for( layer = 0; layer <= params->nOctaveLayers+1; layer++ )\r
+ {\r
+ sizes[layer] = size = (HAAR_SIZE0+HAAR_SIZE_INC*layer)<<octave;\r
+ icvResizeHaarPattern( dx_s, Dx, NX, 9, size, sum->cols );\r
+ icvResizeHaarPattern( dy_s, Dy, NY, 9, size, sum->cols );\r
+ icvResizeHaarPattern( dxy_s, Dxy, NXY, 9, size, sum->cols );\r
+ /*printf( "octave=%d layer=%d size=%d rows=%d cols=%d\n", octave, layer, size, rows, cols );*/\r
+ \r
+ margin = (size/2)/sampleStep;\r
+ for( sum_i=0, i=margin; sum_i<=(sum->rows-1)-size; sum_i+=sampleStep, i++ )\r
+ {\r
+ s_ptr = sum->data.i + sum_i*sum->cols;\r
+ det_ptr = dets[layer]->data.fl + i*dets[layer]->cols + margin;\r
+ trace_ptr = traces[layer]->data.fl + i*traces[layer]->cols + margin;\r
+ for( sum_j=0, j=margin; sum_j<=(sum->cols-1)-size; sum_j+=sampleStep, j++ )\r
+ {\r
+ dx = icvCalcHaarPattern( s_ptr, Dx, 3 );\r
+ dy = icvCalcHaarPattern( s_ptr, Dy, 3 );\r
+ dxy = icvCalcHaarPattern( s_ptr, Dxy, 4 );\r
+ s_ptr+=sampleStep;\r
+ *det_ptr++ = (float)(dx*dy - 0.81*dxy*dxy);\r
+ *trace_ptr++ = (float)(dx + dy);\r
+ }\r
+ }\r
+ }\r
+\r
+ /* Find maxima in the determinant of the hessian */\r
+ for( layer = 1; layer <= params->nOctaveLayers; layer++ )\r
+ {\r
+ size = sizes[layer];\r
+ icvResizeHaarPattern( dm, &Dm, NM, 9, size, mask_sum ? mask_sum->cols : sum->cols );\r
+ \r
+ /* Ignore pixels without a 3x3 neighbourhood in the layer above */\r
+ margin = (sizes[layer+1]/2)/sampleStep+1; \r
+ for( i = margin; i < rows-margin; i++ )\r
+ {\r
+ det_ptr = dets[layer]->data.fl + i*dets[layer]->cols;\r
+ trace_ptr = traces[layer]->data.fl + i*traces[layer]->cols;\r
+ for( j = margin; j < cols-margin; j++ )\r
+ {\r
+ float val0 = det_ptr[j];\r
+ if( val0 > params->hessianThreshold )\r
+ {\r
+ /* Coordinates for the start of the wavelet in the sum image. There \r
+ is some integer division involved, so don't try to simplify this\r
+ (cancel out sampleStep) without checking the result is the same */\r
+ int sum_i = sampleStep*(i-(size/2)/sampleStep);\r
+ int sum_j = sampleStep*(j-(size/2)/sampleStep);\r
+\r
+ /* The 3x3x3 neighbouring samples around the maxima. \r
+ The maxima is included at N9[1][4] */\r
+ int c = dets[layer]->cols;\r
+ const float *det1 = dets[layer-1]->data.fl + i*c + j;\r
+ const float *det2 = dets[layer]->data.fl + i*c + j;\r
+ const float *det3 = dets[layer+1]->data.fl + i*c + j;\r
+ float N9[3][9] = { { det1[-c-1], det1[-c], det1[-c+1], \r
+ det1[-1] , det1[0] , det1[1],\r
+ det1[c-1] , det1[c] , det1[c+1] },\r
+ { det2[-c-1], det2[-c], det2[-c+1], \r
+ det2[-1] , det2[0] , det2[1],\r
+ det2[c-1] , det2[c] , det2[c+1 ] },\r
+ { det3[-c-1], det3[-c], det3[-c+1], \r
+ det3[-1 ], det3[0] , det3[1],\r
+ det3[c-1] , det3[c] , det3[c+1 ] } };\r
+\r
+ /* Check the mask - why not just check the mask at the center of the wavelet? */\r
+ if( mask_sum )\r
+ {\r
+ const int* mask_ptr = mask_sum->data.i + mask_sum->cols*sum_i + sum_j;\r
+ float mval = icvCalcHaarPattern( mask_ptr, &Dm, 1 );\r
+ if( mval < 0.5 )\r
+ continue;\r
+ }\r
+\r
+ /* Non-maxima suppression. val0 is at N9[1][4]*/\r
+ if( val0 > N9[0][0] && val0 > N9[0][1] && val0 > N9[0][2] &&\r
+ val0 > N9[0][3] && val0 > N9[0][4] && val0 > N9[0][5] &&\r
+ val0 > N9[0][6] && val0 > N9[0][7] && val0 > N9[0][8] &&\r
+ val0 > N9[1][0] && val0 > N9[1][1] && val0 > N9[1][2] &&\r
+ val0 > N9[1][3] && val0 > N9[1][5] &&\r
+ val0 > N9[1][6] && val0 > N9[1][7] && val0 > N9[1][8] &&\r
+ val0 > N9[2][0] && val0 > N9[2][1] && val0 > N9[2][2] &&\r
+ val0 > N9[2][3] && val0 > N9[2][4] && val0 > N9[2][5] &&\r
+ val0 > N9[2][6] && val0 > N9[2][7] && val0 > N9[2][8] )\r
+ {\r
+ /* Calculate the wavelet center coordinates for the maxima */\r
+ double center_i = sum_i + (double)(size-1)/2;\r
+ double center_j = sum_j + (double)(size-1)/2;\r
+\r
+ CvSURFPoint point = cvSURFPoint( cvPoint2D32f(center_j,center_i), \r
+ CV_SIGN(trace_ptr[j]), sizes[layer], 0, val0 );\r
+ \r
+ /* Interpolate maxima location within the 3x3x3 neighbourhood */\r
+ int ds = sizes[layer]-sizes[layer-1];\r
+ int interp_ok = icvInterpolateKeypoint( N9, sampleStep, sampleStep, ds, &point );\r
+\r
+ /* Sometimes the interpolation step gives a negative size etc. */\r
+ if( interp_ok && point.size >= 1 &&\r
+ point.pt.x >= 0 && point.pt.x <= (sum->cols-1) &&\r
+ point.pt.y >= 0 && point.pt.y <= (sum->rows-1) )\r
+ { \r
+ /*printf( "Keypoint %f %f %d\n", point.pt.x, point.pt.y, point.size );*/\r
+ cvSeqPush( points, &point );\r
+ } \r
+ }\r
+ }\r
+ }\r
+ }\r
+ }\r
+ }\r
+\r
+ /* Clean-up */\r
+ for( layer = 0; layer <= params->nOctaveLayers+1; layer++ )\r
+ {\r
+ cvReleaseMat( &dets[layer] );\r
+ cvReleaseMat( &traces[layer] );\r
+ }\r
+\r
+ return points;\r
+}\r
+\r
+\r
+CV_IMPL void\r
+cvExtractSURF( const CvArr* _img, const CvArr* _mask,\r
+ CvSeq** _keypoints, CvSeq** _descriptors,\r
+ CvMemStorage* storage, CvSURFParams params,\r
+ int useProvidedKeyPts)\r
+{\r
+ CvMat *sum = 0, *mask1 = 0, *mask_sum = 0, **win_bufs = 0;\r
+\r
+ if( _keypoints && !useProvidedKeyPts ) // If useProvidedKeyPts!=0 we'll use current contents of "*_keypoints"\r
+ *_keypoints = 0;\r
+ if( _descriptors )\r
+ *_descriptors = 0;\r
+\r
+ /* Radius of the circle in which to sample gradients to assign an \r
+ orientation */\r
+ const int ORI_RADIUS = 6; \r
+\r
+ /* The size of the sliding window (in degrees) used to assign an \r
+ orientation */\r
+ const int ORI_WIN = 60; \r
+\r
+ /* Increment used for the orientation sliding window (in degrees) */\r
+ const int ORI_SEARCH_INC = 5; \r
+\r
+ /* Standard deviation of the Gaussian used to weight the gradient samples\r
+ used to assign an orientation */ \r
+ const float ORI_SIGMA = 2.5f;\r
+\r
+ /* Standard deviation of the Gaussian used to weight the gradient samples\r
+ used to build a keypoint descriptor */\r
+ const float DESC_SIGMA = 3.3f;\r
+\r
+\r
+ /* X and Y gradient wavelet data */\r
+ const int NX=2, NY=2;\r
+ int dx_s[NX][5] = {{0, 0, 2, 4, -1}, {2, 0, 4, 4, 1}};\r
+ int dy_s[NY][5] = {{0, 0, 4, 2, 1}, {0, 2, 4, 4, -1}};\r
+\r
+ CvSeq *keypoints, *descriptors = 0;\r
+ CvMat imghdr, *img = cvGetMat(_img, &imghdr);\r
+ CvMat maskhdr, *mask = _mask ? cvGetMat(_mask, &maskhdr) : 0;\r
+ \r
+ const int max_ori_samples = (2*ORI_RADIUS+1)*(2*ORI_RADIUS+1);\r
+ int descriptor_size = params.extended ? 128 : 64;\r
+ const int descriptor_data_type = CV_32F;\r
+ const int PATCH_SZ = 20;\r
+ float DW[PATCH_SZ][PATCH_SZ];\r
+ CvMat _DW = cvMat(PATCH_SZ, PATCH_SZ, CV_32F, DW);\r
+ CvPoint apt[max_ori_samples];\r
+ float apt_w[max_ori_samples];\r
+ int i, j, k, nangle0 = 0, N;\r
+ int nthreads = cvGetNumThreads();\r
+\r
+ CV_Assert(img != 0);\r
+ CV_Assert(CV_MAT_TYPE(img->type) == CV_8UC1);\r
+ CV_Assert(mask == 0 || (CV_ARE_SIZES_EQ(img,mask) && CV_MAT_TYPE(mask->type) == CV_8UC1));\r
+ CV_Assert(storage != 0);\r
+ CV_Assert(params.hessianThreshold >= 0);\r
+ CV_Assert(params.nOctaves > 0);\r
+ CV_Assert(params.nOctaveLayers > 0);\r
+\r
+ sum = cvCreateMat( img->height+1, img->width+1, CV_32SC1 );\r
+ cvIntegral( img, sum );\r
+ \r
+ // Compute keypoints only if we are not asked for evaluating the descriptors are some given locations:\r
+ if (!useProvidedKeyPts)\r
+ {\r
+ if( mask )\r
+ {\r
+ mask1 = cvCreateMat( img->height, img->width, CV_8UC1 );\r
+ mask_sum = cvCreateMat( img->height+1, img->width+1, CV_32SC1 );\r
+ cvMinS( mask, 1, mask1 );\r
+ cvIntegral( mask1, mask_sum );\r
+ }\r
+ keypoints = icvFastHessianDetector( sum, mask_sum, storage, ¶ms );\r
+ }\r
+ else\r
+ {\r
+ CV_Assert(useProvidedKeyPts && (_keypoints != 0) && (*_keypoints != 0));\r
+ keypoints = *_keypoints;\r
+ }\r
+\r
+ N = keypoints->total;\r
+ if( _descriptors )\r
+ {\r
+ descriptors = cvCreateSeq( 0, sizeof(CvSeq),\r
+ descriptor_size*CV_ELEM_SIZE(descriptor_data_type), storage );\r
+ cvSeqPushMulti( descriptors, 0, N );\r
+ }\r
+\r
+ /* Coordinates and weights of samples used to calculate orientation */\r
+ cv::Mat _G = cv::getGaussianKernel( 2*ORI_RADIUS+1, ORI_SIGMA, CV_32F );\r
+ const float* G = (const float*)_G.data;\r
+ \r
+ for( i = -ORI_RADIUS; i <= ORI_RADIUS; i++ )\r
+ {\r
+ for( j = -ORI_RADIUS; j <= ORI_RADIUS; j++ )\r
+ {\r
+ if( i*i + j*j <= ORI_RADIUS*ORI_RADIUS )\r
+ {\r
+ apt[nangle0] = cvPoint(j,i);\r
+ apt_w[nangle0++] = G[i+ORI_RADIUS]*G[j+ORI_RADIUS];\r
+ }\r
+ }\r
+ }\r
+\r
+ /* Gaussian used to weight descriptor samples */\r
+ {\r
+ double c2 = 1./(DESC_SIGMA*DESC_SIGMA*2);\r
+ double gs = 0;\r
+ for( i = 0; i < PATCH_SZ; i++ )\r
+ {\r
+ for( j = 0; j < PATCH_SZ; j++ )\r
+ {\r
+ double x = j - (float)(PATCH_SZ-1)/2, y = i - (float)(PATCH_SZ-1)/2;\r
+ double val = exp(-(x*x+y*y)*c2);\r
+ DW[i][j] = (float)val;\r
+ gs += val;\r
+ }\r
+ }\r
+ cvScale( &_DW, &_DW, 1./gs );\r
+ }\r
+\r
+ win_bufs = (CvMat**)cvAlloc(nthreads*sizeof(win_bufs[0]));\r
+ for( i = 0; i < nthreads; i++ )\r
+ win_bufs[i] = 0;\r
+\r
+#ifdef _OPENMP\r
+#pragma omp parallel for num_threads(nthreads) schedule(dynamic)\r
+#endif\r
+ for( k = 0; k < N; k++ )\r
+ {\r
+ const int* sum_ptr = sum->data.i;\r
+ int sum_cols = sum->cols;\r
+ int i, j, kk, x, y, nangle;\r
+ float X[max_ori_samples], Y[max_ori_samples], angle[max_ori_samples];\r
+ uchar PATCH[PATCH_SZ+1][PATCH_SZ+1];\r
+ float DX[PATCH_SZ][PATCH_SZ], DY[PATCH_SZ][PATCH_SZ];\r
+ CvMat _X = cvMat(1, max_ori_samples, CV_32F, X);\r
+ CvMat _Y = cvMat(1, max_ori_samples, CV_32F, Y);\r
+ CvMat _angle = cvMat(1, max_ori_samples, CV_32F, angle);\r
+ CvMat _patch = cvMat(PATCH_SZ+1, PATCH_SZ+1, CV_8U, PATCH);\r
+ float* vec;\r
+ CvSurfHF dx_t[NX], dy_t[NY];\r
+ int thread_idx = cvGetThreadNum();\r
+ \r
+ CvSURFPoint* kp = (CvSURFPoint*)cvGetSeqElem( keypoints, k );\r
+ int size = kp->size;\r
+ CvPoint2D32f center = kp->pt;\r
+\r
+ /* The sampling intervals and wavelet sized for selecting an orientation\r
+ and building the keypoint descriptor are defined relative to 's' */\r
+ float s = (float)size*1.2f/9.0f;\r
+\r
+ /* To find the dominant orientation, the gradients in x and y are\r
+ sampled in a circle of radius 6s using wavelets of size 4s.\r
+ We ensure the gradient wavelet size is even to ensure the \r
+ wavelet pattern is balanced and symmetric around its center */\r
+ int grad_wav_size = 2*cvRound( 2*s );\r
+ if ( sum->rows < grad_wav_size || sum->cols < grad_wav_size )\r
+ {\r
+ /* when grad_wav_size is too big,\r
+ * the sampling of gradient will be meaningless\r
+ * mark keypoint for deletion. */\r
+ kp->size = -1;\r
+ continue;\r
+ }\r
+ icvResizeHaarPattern( dx_s, dx_t, NX, 4, grad_wav_size, sum->cols );\r
+ icvResizeHaarPattern( dy_s, dy_t, NY, 4, grad_wav_size, sum->cols );\r
+ for( kk = 0, nangle = 0; kk < nangle0; kk++ )\r
+ {\r
+ const int* ptr;\r
+ float vx, vy;\r
+ x = cvRound( center.x + apt[kk].x*s - (float)(grad_wav_size-1)/2 );\r
+ y = cvRound( center.y + apt[kk].y*s - (float)(grad_wav_size-1)/2 );\r
+ if( (unsigned)y >= (unsigned)(sum->rows - grad_wav_size) ||\r
+ (unsigned)x >= (unsigned)(sum->cols - grad_wav_size) )\r
+ continue;\r
+ ptr = sum_ptr + x + y*sum_cols;\r
+ vx = icvCalcHaarPattern( ptr, dx_t, 2 );\r
+ vy = icvCalcHaarPattern( ptr, dy_t, 2 );\r
+ X[nangle] = vx*apt_w[kk]; Y[nangle] = vy*apt_w[kk];\r
+ nangle++;\r
+ }\r
+ if ( nangle == 0 )\r
+ {\r
+ /* No gradient could be sampled because the keypoint is too\r
+ * near too one or more of the sides of the image. As we\r
+ * therefore cannot find a dominant direction, we skip this\r
+ * keypoint and mark it for later deletion from the sequence. */\r
+ kp->size = -1;\r
+ continue;\r
+ }\r
+ _X.cols = _Y.cols = _angle.cols = nangle;\r
+ cvCartToPolar( &_X, &_Y, 0, &_angle, 1 );\r
+\r
+ float bestx = 0, besty = 0, descriptor_mod = 0;\r
+ for( i = 0; i < 360; i += ORI_SEARCH_INC )\r
+ {\r
+ float sumx = 0, sumy = 0, temp_mod;\r
+ for( j = 0; j < nangle; j++ )\r
+ {\r
+ int d = abs(cvRound(angle[j]) - i);\r
+ if( d < ORI_WIN/2 || d > 360-ORI_WIN/2 )\r
+ {\r
+ sumx += X[j];\r
+ sumy += Y[j];\r
+ }\r
+ }\r
+ temp_mod = sumx*sumx + sumy*sumy;\r
+ if( temp_mod > descriptor_mod )\r
+ {\r
+ descriptor_mod = temp_mod;\r
+ bestx = sumx;\r
+ besty = sumy;\r
+ }\r
+ }\r
+ \r
+ float descriptor_dir = cvFastArctan( besty, bestx );\r
+ kp->dir = descriptor_dir;\r
+\r
+ if( !_descriptors )\r
+ continue;\r
+\r
+ descriptor_dir *= (float)(CV_PI/180);\r
+ \r
+ /* Extract a window of pixels around the keypoint of size 20s */\r
+ int win_size = (int)((PATCH_SZ+1)*s);\r
+ if( win_bufs[thread_idx] == 0 || win_bufs[thread_idx]->cols < win_size*win_size )\r
+ {\r
+ cvReleaseMat( &win_bufs[thread_idx] );\r
+ win_bufs[thread_idx] = cvCreateMat( 1, win_size*win_size, CV_8U );\r
+ }\r
+ \r
+ CvMat win = cvMat(win_size, win_size, CV_8U, win_bufs[thread_idx]->data.ptr);\r
+ float sin_dir = sin(descriptor_dir);\r
+ float cos_dir = cos(descriptor_dir) ;\r
+\r
+ /* Subpixel interpolation version (slower). Subpixel not required since\r
+ the pixels will all get averaged when we scale down to 20 pixels */\r
+ /* \r
+ float w[] = { cos_dir, sin_dir, center.x,\r
+ -sin_dir, cos_dir , center.y };\r
+ CvMat W = cvMat(2, 3, CV_32F, w);\r
+ cvGetQuadrangleSubPix( img, &win, &W );\r
+ */\r
+\r
+ /* Nearest neighbour version (faster) */\r
+ float win_offset = -(float)(win_size-1)/2;\r
+ float start_x = center.x + win_offset*cos_dir + win_offset*sin_dir;\r
+ float start_y = center.y - win_offset*sin_dir + win_offset*cos_dir;\r
+ uchar* WIN = win.data.ptr;\r
+ for( i=0; i<win_size; i++, start_x+=sin_dir, start_y+=cos_dir )\r
+ {\r
+ float pixel_x = start_x;\r
+ float pixel_y = start_y;\r
+ for( j=0; j<win_size; j++, pixel_x+=cos_dir, pixel_y-=sin_dir )\r
+ {\r
+ int x = cvRound( pixel_x );\r
+ int y = cvRound( pixel_y );\r
+ x = MAX( x, 0 );\r
+ y = MAX( y, 0 );\r
+ x = MIN( x, img->cols-1 );\r
+ y = MIN( y, img->rows-1 );\r
+ WIN[i*win_size + j] = img->data.ptr[y*img->step+x];\r
+ }\r
+ }\r
+\r
+ /* Scale the window to size PATCH_SZ so each pixel's size is s. This\r
+ makes calculating the gradients with wavelets of size 2s easy */\r
+ cvResize( &win, &_patch, CV_INTER_AREA );\r
+\r
+ /* Calculate gradients in x and y with wavelets of size 2s */\r
+ for( i = 0; i < PATCH_SZ; i++ )\r
+ for( j = 0; j < PATCH_SZ; j++ )\r
+ {\r
+ float dw = DW[i][j];\r
+ float vx = (PATCH[i][j+1] - PATCH[i][j] + PATCH[i+1][j+1] - PATCH[i+1][j])*dw;\r
+ float vy = (PATCH[i+1][j] - PATCH[i][j] + PATCH[i+1][j+1] - PATCH[i][j+1])*dw;\r
+ DX[i][j] = vx;\r
+ DY[i][j] = vy;\r
+ }\r
+\r
+ /* Construct the descriptor */\r
+ vec = (float*)cvGetSeqElem( descriptors, k );\r
+ for( kk = 0; kk < (int)(descriptors->elem_size/sizeof(vec[0])); kk++ )\r
+ vec[kk] = 0;\r
+ double square_mag = 0; \r
+ if( params.extended )\r
+ {\r
+ /* 128-bin descriptor */\r
+ for( i = 0; i < 4; i++ )\r
+ for( j = 0; j < 4; j++ )\r
+ {\r
+ for( y = i*5; y < i*5+5; y++ )\r
+ {\r
+ for( x = j*5; x < j*5+5; x++ )\r
+ {\r
+ float tx = DX[y][x], ty = DY[y][x];\r
+ if( ty >= 0 )\r
+ {\r
+ vec[0] += tx;\r
+ vec[1] += (float)fabs(tx);\r
+ } else {\r
+ vec[2] += tx;\r
+ vec[3] += (float)fabs(tx);\r
+ }\r
+ if ( tx >= 0 )\r
+ {\r
+ vec[4] += ty;\r
+ vec[5] += (float)fabs(ty);\r
+ } else {\r
+ vec[6] += ty;\r
+ vec[7] += (float)fabs(ty);\r
+ }\r
+ }\r
+ }\r
+ for( kk = 0; kk < 8; kk++ )\r
+ square_mag += vec[kk]*vec[kk];\r
+ vec += 8;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ /* 64-bin descriptor */\r
+ for( i = 0; i < 4; i++ )\r
+ for( j = 0; j < 4; j++ )\r
+ {\r
+ for( y = i*5; y < i*5+5; y++ )\r
+ {\r
+ for( x = j*5; x < j*5+5; x++ )\r
+ {\r
+ float tx = DX[y][x], ty = DY[y][x];\r
+ vec[0] += tx; vec[1] += ty;\r
+ vec[2] += (float)fabs(tx); vec[3] += (float)fabs(ty);\r
+ }\r
+ }\r
+ for( kk = 0; kk < 4; kk++ )\r
+ square_mag += vec[kk]*vec[kk];\r
+ vec+=4;\r
+ }\r
+ }\r
+\r
+ /* unit vector is essential for contrast invariance */\r
+ vec = (float*)cvGetSeqElem( descriptors, k );\r
+ double scale = 1./(sqrt(square_mag) + DBL_EPSILON);\r
+ for( kk = 0; kk < descriptor_size; kk++ )\r
+ vec[kk] = (float)(vec[kk]*scale);\r
+ }\r
+ \r
+ /* remove keypoints that were marked for deletion */\r
+ for ( i = 0; i < N; i++ )\r
+ {\r
+ CvSURFPoint* kp = (CvSURFPoint*)cvGetSeqElem( keypoints, i );\r
+ if ( kp->size == -1 )\r
+ {\r
+ cvSeqRemove( keypoints, i );\r
+ if ( _descriptors )\r
+ cvSeqRemove( descriptors, i );\r
+ k--;\r
+ N--;\r
+ }\r
+ }\r
+\r
+ for( i = 0; i < nthreads; i++ )\r
+ cvReleaseMat( &win_bufs[i] );\r
+\r
+ if( _keypoints && !useProvidedKeyPts )\r
+ *_keypoints = keypoints;\r
+ if( _descriptors )\r
+ *_descriptors = descriptors;\r
+\r
+ cvReleaseMat( &sum );\r
+ if (mask1) cvReleaseMat( &mask1 );\r
+ if (mask_sum) cvReleaseMat( &mask_sum );\r
+ cvFree( &win_bufs );\r
+}\r
+\r
+\r
+namespace cv\r
+{\r
+\r
+SURF::SURF()\r
+{\r
+ hessianThreshold = 100;\r
+ extended = 1;\r
+ nOctaves = 4;\r
+ nOctaveLayers = 2;\r
+}\r
+\r
+SURF::SURF(double _threshold, bool _extended)\r
+{\r
+ hessianThreshold = _threshold;\r
+ extended = _extended;\r
+ nOctaves = 4;\r
+ nOctaveLayers = 2;\r
+}\r
+\r
+int SURF::descriptorSize() const { return extended ? 128 : 64; }\r
+\r
+void SURF::operator()(const Mat& img, const Mat& mask,\r
+ Vector<SURFKeypoint>& keypoints) const\r
+{\r
+ CvMat _img = img, _mask, *pmask = 0;\r
+ if( mask.data )\r
+ pmask = &(_mask = mask);\r
+ MemStorage storage(cvCreateMemStorage(0));\r
+ CvSeq* kp = 0;\r
+ cvExtractSURF(&_img, pmask, &kp, 0, storage, *(const CvSURFParams*)this, 0);\r
+ Seq<SURFKeypoint>(kp).copyTo(keypoints);\r
+}\r
+\r
+void SURF::operator()(const Mat& img, const Mat& mask,\r
+ Vector<SURFKeypoint>& keypoints,\r
+ Vector<float>& descriptors,\r
+ bool useProvidedKeypoints) const\r
+{\r
+ CvMat _img = img, _mask, *pmask = 0;\r
+ if( mask.data )\r
+ pmask = &(_mask = mask);\r
+ MemStorage storage(cvCreateMemStorage(0));\r
+ CvSeq hdr, *kp = 0, *d = 0;\r
+ CvSeqBlock block;\r
+ if( useProvidedKeypoints )\r
+ kp = cvMakeSeqHeaderForArray(0, sizeof(*kp), sizeof(SURFKeypoint),\r
+ &keypoints[0], keypoints.size(), &hdr, &block);\r
+ cvExtractSURF(&_img, pmask, &kp, &d, storage,\r
+ *(const CvSURFParams*)this, useProvidedKeypoints);\r
+ if( !useProvidedKeypoints )\r
+ Seq<SURFKeypoint>(kp).copyTo(keypoints);\r
+ Seq<float>(d).copyTo(descriptors);\r
+}\r
+\r
+}\r
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// Intel License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of Intel Corporation may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#include "_cv.h"
-
-void
-icvCrossCorr( const CvArr* _img, const CvArr* _templ, CvArr* _corr,
- CvPoint anchor, double delta, int borderType )
-{
- const double block_scale = 4.5;
- const int min_block_size = 256;
- CvMat* dft_img[CV_MAX_THREADS] = {0};
- CvMat* dft_templ = 0;
- void* buf[CV_MAX_THREADS] = {0};
- int k, num_threads = 0;
-
- CV_FUNCNAME( "icvCrossCorr" );
-
- __BEGIN__;
-
- CvMat istub, *img = (CvMat*)_img;
- CvMat tstub, *templ = (CvMat*)_templ;
- CvMat cstub, *corr = (CvMat*)_corr;
- CvSize dftsize, blocksize;
- int depth, templ_depth, corr_depth, max_depth = CV_32F,
- cn, templ_cn, corr_cn, buf_size = 0,
- tile_count_x, tile_count_y, tile_count;
-
- CV_CALL( img = cvGetMat( img, &istub ));
- CV_CALL( templ = cvGetMat( templ, &tstub ));
- CV_CALL( corr = cvGetMat( corr, &cstub ));
-
- if( CV_MAT_DEPTH( img->type ) != CV_8U &&
- CV_MAT_DEPTH( img->type ) != CV_16U &&
- CV_MAT_DEPTH( img->type ) != CV_32F )
- CV_ERROR( CV_StsUnsupportedFormat,
- "The function supports only 8u, 16u and 32f data types" );
-
- if( !CV_ARE_DEPTHS_EQ( img, templ ) && CV_MAT_DEPTH( templ->type ) != CV_32F )
- CV_ERROR( CV_StsUnsupportedFormat,
- "Template (kernel) must be of the same depth as the input image, or be 32f" );
-
- if( !CV_ARE_DEPTHS_EQ( img, corr ) && CV_MAT_DEPTH( corr->type ) != CV_32F &&
- CV_MAT_DEPTH( corr->type ) != CV_64F )
- CV_ERROR( CV_StsUnsupportedFormat,
- "The output image must have the same depth as the input image, or be 32f/64f" );
-
- if( (!CV_ARE_CNS_EQ( img, corr ) || CV_MAT_CN(templ->type) > 1) &&
- (CV_MAT_CN( corr->type ) > 1 || !CV_ARE_CNS_EQ( img, templ)) )
- CV_ERROR( CV_StsUnsupportedFormat,
- "The output must have the same number of channels as the input (when the template has 1 channel), "
- "or the output must have 1 channel when the input and the template have the same number of channels" );
-
- depth = CV_MAT_DEPTH(img->type);
- cn = CV_MAT_CN(img->type);
- templ_depth = CV_MAT_DEPTH(templ->type);
- templ_cn = CV_MAT_CN(templ->type);
- corr_depth = CV_MAT_DEPTH(corr->type);
- corr_cn = CV_MAT_CN(corr->type);
-
- CV_Assert( corr_cn == 1 || delta == 0 );
-
- max_depth = MAX( max_depth, templ_depth );
- max_depth = MAX( max_depth, depth );
- max_depth = MAX( max_depth, corr_depth );
- if( depth > CV_8U )
- max_depth = CV_64F;
-
- if( img->cols < templ->cols || img->rows < templ->rows )
- CV_ERROR( CV_StsUnmatchedSizes,
- "Such a combination of image and template/filter size is not supported" );
-
- if( corr->rows > img->rows + templ->rows - 1 ||
- corr->cols > img->cols + templ->cols - 1 )
- CV_ERROR( CV_StsUnmatchedSizes,
- "output image should not be greater than (W + w - 1)x(H + h - 1)" );
-
- blocksize.width = cvRound(templ->cols*block_scale);
- blocksize.width = MAX( blocksize.width, min_block_size - templ->cols + 1 );
- blocksize.width = MIN( blocksize.width, corr->cols );
- blocksize.height = cvRound(templ->rows*block_scale);
- blocksize.height = MAX( blocksize.height, min_block_size - templ->rows + 1 );
- blocksize.height = MIN( blocksize.height, corr->rows );
-
- dftsize.width = cvGetOptimalDFTSize(blocksize.width + templ->cols - 1);
- if( dftsize.width == 1 )
- dftsize.width = 2;
- dftsize.height = cvGetOptimalDFTSize(blocksize.height + templ->rows - 1);
- if( dftsize.width <= 0 || dftsize.height <= 0 )
- CV_ERROR( CV_StsOutOfRange, "the input arrays are too big" );
-
- // recompute block size
- blocksize.width = dftsize.width - templ->cols + 1;
- blocksize.width = MIN( blocksize.width, corr->cols );
- blocksize.height = dftsize.height - templ->rows + 1;
- blocksize.height = MIN( blocksize.height, corr->rows );
-
- CV_CALL( dft_templ = cvCreateMat( dftsize.height*templ_cn, dftsize.width, max_depth ));
-
- num_threads = cvGetNumThreads();
-
- for( k = 0; k < num_threads; k++ )
- CV_CALL( dft_img[k] = cvCreateMat( dftsize.height, dftsize.width, max_depth ));
-
- if( templ_cn > 1 && templ_depth != max_depth )
- buf_size = templ->cols*templ->rows*CV_ELEM_SIZE(templ_depth);
-
- if( cn > 1 && depth != max_depth )
- buf_size = MAX( buf_size, (blocksize.width + templ->cols - 1)*
- (blocksize.height + templ->rows - 1)*CV_ELEM_SIZE(depth));
-
- if( (corr_cn > 1 || cn > 1) && corr_depth != max_depth )
- buf_size = MAX( buf_size, blocksize.width*blocksize.height*CV_ELEM_SIZE(corr_depth));
-
- if( buf_size > 0 )
- {
- for( k = 0; k < num_threads; k++ )
- CV_CALL( buf[k] = cvAlloc(buf_size) );
- }
-
- // compute DFT of each template plane
- for( k = 0; k < templ_cn; k++ )
- {
- CvMat dstub, *src, *dst, temp;
- CvMat* planes[] = { 0, 0, 0, 0 };
- int yofs = k*dftsize.height;
-
- src = templ;
- dst = cvGetSubRect( dft_templ, &dstub, cvRect(0,yofs,templ->cols,templ->rows));
-
- if( templ_cn > 1 )
- {
- planes[k] = templ_depth == max_depth ? dst :
- cvInitMatHeader( &temp, templ->rows, templ->cols, templ_depth, buf[0] );
- cvSplit( templ, planes[0], planes[1], planes[2], planes[3] );
- src = planes[k];
- planes[k] = 0;
- }
-
- if( dst != src )
- cvConvert( src, dst );
-
- if( dft_templ->cols > templ->cols )
- {
- cvGetSubRect( dft_templ, dst, cvRect(templ->cols, yofs,
- dft_templ->cols - templ->cols, templ->rows) );
- cvZero( dst );
- }
- cvGetSubRect( dft_templ, dst, cvRect(0,yofs,dftsize.width,dftsize.height) );
- cvDFT( dst, dst, CV_DXT_FORWARD + CV_DXT_SCALE, templ->rows );
- }
-
- tile_count_x = (corr->cols + blocksize.width - 1)/blocksize.width;
- tile_count_y = (corr->rows + blocksize.height - 1)/blocksize.height;
- tile_count = tile_count_x*tile_count_y;
-
- {
-#ifdef _OPENMP
- #pragma omp parallel for num_threads(num_threads) schedule(dynamic)
-#endif
- // calculate correlation by blocks
- for( k = 0; k < tile_count; k++ )
- {
- int thread_idx = cvGetThreadNum();
- int x = (k%tile_count_x)*blocksize.width;
- int y = (k/tile_count_x)*blocksize.height;
- int i, yofs;
- CvMat sstub, dstub, *src, *dst, temp;
- CvMat* planes[] = { 0, 0, 0, 0 };
- CvMat* _dft_img = dft_img[thread_idx];
- void* _buf = buf[thread_idx];
- CvSize csz = { blocksize.width, blocksize.height }, isz;
- int x0 = x - anchor.x, y0 = y - anchor.y;
- int x1 = MAX( 0, x0 ), y1 = MAX( 0, y0 ), x2, y2;
- csz.width = MIN( csz.width, corr->cols - x );
- csz.height = MIN( csz.height, corr->rows - y );
- isz.width = csz.width + templ->cols - 1;
- isz.height = csz.height + templ->rows - 1;
- x2 = MIN( img->cols, x0 + isz.width );
- y2 = MIN( img->rows, y0 + isz.height );
-
- for( i = 0; i < cn; i++ )
- {
- CvMat dstub1, *dst1;
- yofs = i*dftsize.height;
-
- src = cvGetSubRect( img, &sstub, cvRect(x1,y1,x2-x1,y2-y1) );
- dst = cvGetSubRect( _dft_img, &dstub,
- cvRect(0,0,isz.width,isz.height) );
- dst1 = dst;
-
- if( x2 - x1 < isz.width || y2 - y1 < isz.height )
- dst1 = cvGetSubRect( _dft_img, &dstub1,
- cvRect( x1 - x0, y1 - y0, x2 - x1, y2 - y1 ));
-
- if( cn > 1 )
- {
- planes[i] = dst1;
- if( depth != max_depth )
- planes[i] = cvInitMatHeader( &temp, y2 - y1, x2 - x1, depth, _buf );
- cvSplit( src, planes[0], planes[1], planes[2], planes[3] );
- src = planes[i];
- planes[i] = 0;
- }
-
- if( dst1 != src )
- cvConvert( src, dst1 );
-
- if( dst != dst1 )
- cvCopyMakeBorder( dst1, dst, cvPoint(x1 - x0, y1 - y0), borderType );
-
- if( dftsize.width > isz.width )
- {
- cvGetSubRect( _dft_img, dst, cvRect(isz.width, 0,
- dftsize.width - isz.width,dftsize.height) );
- cvZero( dst );
- }
-
- cvDFT( _dft_img, _dft_img, CV_DXT_FORWARD, isz.height );
- cvGetSubRect( dft_templ, dst,
- cvRect(0,(templ_cn>1?yofs:0),dftsize.width,dftsize.height) );
-
- cvMulSpectrums( _dft_img, dst, _dft_img, CV_DXT_MUL_CONJ );
- cvDFT( _dft_img, _dft_img, CV_DXT_INVERSE, csz.height );
-
- src = cvGetSubRect( _dft_img, &sstub, cvRect(0,0,csz.width,csz.height) );
- dst = cvGetSubRect( corr, &dstub, cvRect(x,y,csz.width,csz.height) );
-
- if( corr_cn > 1 )
- {
- planes[i] = src;
- if( corr_depth != max_depth )
- {
- planes[i] = cvInitMatHeader( &temp, csz.height, csz.width,
- corr_depth, _buf );
- cvConvertScale( src, planes[i], 1, delta );
- }
- cvMerge( planes[0], planes[1], planes[2], planes[3], dst );
- planes[i] = 0;
- }
- else
- {
- if( i == 0 )
- cvConvertScale( src, dst, 1, delta );
- else
- {
- if( max_depth > corr_depth )
- {
- cvInitMatHeader( &temp, csz.height, csz.width,
- corr_depth, _buf );
- cvConvert( src, &temp );
- src = &temp;
- }
- cvAcc( src, dst );
- }
- }
- }
- }
- }
-
- __END__;
-
- cvReleaseMat( &dft_templ );
-
- for( k = 0; k < num_threads; k++ )
- {
- cvReleaseMat( &dft_img[k] );
- cvFree( &buf[k] );
- }
-}
-
-void
-cv::crossCorr( const Mat& img, const Mat& templ, Mat& corr,
- Point anchor, double delta, int borderType )
-{
- CvMat _img = img, _templ = templ, _corr = corr;
- icvCrossCorr( &_img, &_templ, &_corr, anchor, delta, borderType );
-}
-
-
-/*****************************************************************************************/
-
-CV_IMPL void
-cvMatchTemplate( const CvArr* _img, const CvArr* _templ, CvArr* _result, int method )
-{
- CvMat* sum = 0;
- CvMat* sqsum = 0;
-
- CV_FUNCNAME( "cvMatchTemplate" );
-
- __BEGIN__;
-
- int coi1 = 0, coi2 = 0;
- int depth, cn;
- int i, j, k;
- CvMat stub, *img = (CvMat*)_img;
- CvMat tstub, *templ = (CvMat*)_templ;
- CvMat rstub, *result = (CvMat*)_result;
- CvScalar templ_mean = cvScalarAll(0);
- double templ_norm = 0, templ_sum2 = 0;
-
- int idx = 0, idx2 = 0;
- double *p0, *p1, *p2, *p3;
- double *q0, *q1, *q2, *q3;
- double inv_area;
- int sum_step, sqsum_step;
- int num_type = method == CV_TM_CCORR || method == CV_TM_CCORR_NORMED ? 0 :
- method == CV_TM_CCOEFF || method == CV_TM_CCOEFF_NORMED ? 1 : 2;
- int is_normed = method == CV_TM_CCORR_NORMED ||
- method == CV_TM_SQDIFF_NORMED ||
- method == CV_TM_CCOEFF_NORMED;
-
- CV_CALL( img = cvGetMat( img, &stub, &coi1 ));
- CV_CALL( templ = cvGetMat( templ, &tstub, &coi2 ));
- CV_CALL( result = cvGetMat( result, &rstub ));
-
- if( CV_MAT_DEPTH( img->type ) != CV_8U &&
- CV_MAT_DEPTH( img->type ) != CV_32F )
- CV_ERROR( CV_StsUnsupportedFormat,
- "The function supports only 8u and 32f data types" );
-
- if( !CV_ARE_TYPES_EQ( img, templ ))
- CV_ERROR( CV_StsUnmatchedSizes, "image and template should have the same type" );
-
- if( CV_MAT_TYPE( result->type ) != CV_32FC1 )
- CV_ERROR( CV_StsUnsupportedFormat, "output image should have 32f type" );
-
- if( img->rows < templ->rows || img->cols < templ->cols )
- {
- CvMat* t;
- CV_SWAP( img, templ, t );
- }
-
- if( result->rows != img->rows - templ->rows + 1 ||
- result->cols != img->cols - templ->cols + 1 )
- CV_ERROR( CV_StsUnmatchedSizes, "output image should be (W - w + 1)x(H - h + 1)" );
-
- if( method < CV_TM_SQDIFF || method > CV_TM_CCOEFF_NORMED )
- CV_ERROR( CV_StsBadArg, "unknown comparison method" );
-
- depth = CV_MAT_DEPTH(img->type);
- cn = CV_MAT_CN(img->type);
-
- /*if( is_normed && cn == 1 && templ->rows > 8 && templ->cols > 8 &&
- img->rows > templ->cols && img->cols > templ->cols )
- {
- CvTemplMatchIPPFunc ipp_func =
- depth == CV_8U ?
- (method == CV_TM_SQDIFF_NORMED ? (CvTemplMatchIPPFunc)icvSqrDistanceValid_Norm_8u32f_C1R_p :
- method == CV_TM_CCORR_NORMED ? (CvTemplMatchIPPFunc)icvCrossCorrValid_Norm_8u32f_C1R_p :
- (CvTemplMatchIPPFunc)icvCrossCorrValid_NormLevel_8u32f_C1R_p) :
- (method == CV_TM_SQDIFF_NORMED ? (CvTemplMatchIPPFunc)icvSqrDistanceValid_Norm_32f_C1R_p :
- method == CV_TM_CCORR_NORMED ? (CvTemplMatchIPPFunc)icvCrossCorrValid_Norm_32f_C1R_p :
- (CvTemplMatchIPPFunc)icvCrossCorrValid_NormLevel_32f_C1R_p);
-
- if( ipp_func )
- {
- CvSize img_size = cvGetMatSize(img), templ_size = cvGetMatSize(templ);
-
- IPPI_CALL( ipp_func( img->data.ptr, img->step ? img->step : CV_STUB_STEP,
- img_size, templ->data.ptr,
- templ->step ? templ->step : CV_STUB_STEP,
- templ_size, result->data.ptr,
- result->step ? result->step : CV_STUB_STEP ));
- for( i = 0; i < result->rows; i++ )
- {
- float* rrow = (float*)(result->data.ptr + i*result->step);
- for( j = 0; j < result->cols; j++ )
- {
- if( fabs(rrow[j]) > 1. )
- rrow[j] = rrow[j] < 0 ? -1.f : 1.f;
- }
- }
- EXIT;
- }
- }*/
-
- CV_CALL( icvCrossCorr( img, templ, result ));
-
- if( method == CV_TM_CCORR )
- EXIT;
-
- inv_area = 1./((double)templ->rows * templ->cols);
-
- CV_CALL( sum = cvCreateMat( img->rows + 1, img->cols + 1,
- CV_MAKETYPE( CV_64F, cn )));
- if( method == CV_TM_CCOEFF )
- {
- CV_CALL( cvIntegral( img, sum, 0, 0 ));
- CV_CALL( templ_mean = cvAvg( templ ));
- q0 = q1 = q2 = q3 = 0;
- }
- else
- {
- CvScalar _templ_sdv = cvScalarAll(0);
- CV_CALL( sqsum = cvCreateMat( img->rows + 1, img->cols + 1,
- CV_MAKETYPE( CV_64F, cn )));
- CV_CALL( cvIntegral( img, sum, sqsum, 0 ));
- CV_CALL( cvAvgSdv( templ, &templ_mean, &_templ_sdv ));
-
- templ_norm = CV_SQR(_templ_sdv.val[0]) + CV_SQR(_templ_sdv.val[1]) +
- CV_SQR(_templ_sdv.val[2]) + CV_SQR(_templ_sdv.val[3]);
-
- if( templ_norm < DBL_EPSILON && method == CV_TM_CCOEFF_NORMED )
- {
- cvSet( result, cvScalarAll(1.) );
- EXIT;
- }
-
- templ_sum2 = templ_norm +
- CV_SQR(templ_mean.val[0]) + CV_SQR(templ_mean.val[1]) +
- CV_SQR(templ_mean.val[2]) + CV_SQR(templ_mean.val[3]);
-
- if( num_type != 1 )
- {
- templ_mean = cvScalarAll(0);
- templ_norm = templ_sum2;
- }
-
- templ_sum2 /= inv_area;
- templ_norm = sqrt(templ_norm);
- templ_norm /= sqrt(inv_area); // care of accuracy here
-
- q0 = (double*)sqsum->data.ptr;
- q1 = q0 + templ->cols*cn;
- q2 = (double*)(sqsum->data.ptr + templ->rows*sqsum->step);
- q3 = q2 + templ->cols*cn;
- }
-
- p0 = (double*)sum->data.ptr;
- p1 = p0 + templ->cols*cn;
- p2 = (double*)(sum->data.ptr + templ->rows*sum->step);
- p3 = p2 + templ->cols*cn;
-
- sum_step = sum ? sum->step / sizeof(double) : 0;
- sqsum_step = sqsum ? sqsum->step / sizeof(double) : 0;
-
- for( i = 0; i < result->rows; i++ )
- {
- float* rrow = (float*)(result->data.ptr + i*result->step);
- idx = i * sum_step;
- idx2 = i * sqsum_step;
-
- for( j = 0; j < result->cols; j++, idx += cn, idx2 += cn )
- {
- double num = rrow[j], t;
- double wnd_mean2 = 0, wnd_sum2 = 0;
-
- if( num_type == 1 )
- {
- for( k = 0; k < cn; k++ )
- {
- t = p0[idx+k] - p1[idx+k] - p2[idx+k] + p3[idx+k];
- wnd_mean2 += CV_SQR(t);
- num -= t*templ_mean.val[k];
- }
-
- wnd_mean2 *= inv_area;
- }
-
- if( is_normed || num_type == 2 )
- {
- for( k = 0; k < cn; k++ )
- {
- t = q0[idx2+k] - q1[idx2+k] - q2[idx2+k] + q3[idx2+k];
- wnd_sum2 += t;
- }
-
- if( num_type == 2 )
- num = wnd_sum2 - 2*num + templ_sum2;
- }
-
- if( is_normed )
- {
- t = sqrt(MAX(wnd_sum2 - wnd_mean2,0))*templ_norm;
- if( t > DBL_EPSILON )
- {
- num /= t;
- if( fabs(num) > 1. )
- num = num > 0 ? 1 : -1;
- }
- else
- num = method != CV_TM_SQDIFF_NORMED || num < DBL_EPSILON ? 0 : 1;
- }
-
- rrow[j] = (float)num;
- }
- }
-
- __END__;
-
- cvReleaseMat( &sum );
- cvReleaseMat( &sqsum );
-}
-
-/* End of file. */
+/*M///////////////////////////////////////////////////////////////////////////////////////\r
+//\r
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
+//\r
+// By downloading, copying, installing or using the software you agree to this license.\r
+// If you do not agree to this license, do not download, install,\r
+// copy or use the software.\r
+//\r
+//\r
+// Intel License Agreement\r
+// For Open Source Computer Vision Library\r
+//\r
+// Copyright (C) 2000, Intel Corporation, all rights reserved.\r
+// Third party copyrights are property of their respective owners.\r
+//\r
+// Redistribution and use in source and binary forms, with or without modification,\r
+// are permitted provided that the following conditions are met:\r
+//\r
+// * Redistribution's of source code must retain the above copyright notice,\r
+// this list of conditions and the following disclaimer.\r
+//\r
+// * Redistribution's in binary form must reproduce the above copyright notice,\r
+// this list of conditions and the following disclaimer in the documentation\r
+// and/or other materials provided with the distribution.\r
+//\r
+// * The name of Intel Corporation may not be used to endorse or promote products\r
+// derived from this software without specific prior written permission.\r
+//\r
+// This software is provided by the copyright holders and contributors "as is" and\r
+// any express or implied warranties, including, but not limited to, the implied\r
+// warranties of merchantability and fitness for a particular purpose are disclaimed.\r
+// In no event shall the Intel Corporation or contributors be liable for any direct,\r
+// indirect, incidental, special, exemplary, or consequential damages\r
+// (including, but not limited to, procurement of substitute goods or services;\r
+// loss of use, data, or profits; or business interruption) however caused\r
+// and on any theory of liability, whether in contract, strict liability,\r
+// or tort (including negligence or otherwise) arising in any way out of\r
+// the use of this software, even if advised of the possibility of such damage.\r
+//\r
+//M*/\r
+\r
+#include "_cv.h"\r
+\r
+void\r
+icvCrossCorr( const CvArr* _img, const CvArr* _templ, CvArr* _corr,\r
+ CvPoint anchor, double delta, int borderType )\r
+{\r
+ const double block_scale = 4.5;\r
+ const int min_block_size = 256;\r
+ CvMat* dft_img[CV_MAX_THREADS] = {0};\r
+ CvMat* dft_templ = 0;\r
+ void* buf[CV_MAX_THREADS] = {0};\r
+ int k, num_threads = 0;\r
+ \r
+ CV_FUNCNAME( "icvCrossCorr" );\r
+ \r
+ __BEGIN__;\r
+\r
+ CvMat istub, *img = (CvMat*)_img;\r
+ CvMat tstub, *templ = (CvMat*)_templ;\r
+ CvMat cstub, *corr = (CvMat*)_corr;\r
+ CvSize dftsize, blocksize;\r
+ int depth, templ_depth, corr_depth, max_depth = CV_32F,\r
+ cn, templ_cn, corr_cn, buf_size = 0,\r
+ tile_count_x, tile_count_y, tile_count;\r
+\r
+ CV_CALL( img = cvGetMat( img, &istub ));\r
+ CV_CALL( templ = cvGetMat( templ, &tstub ));\r
+ CV_CALL( corr = cvGetMat( corr, &cstub ));\r
+\r
+ if( CV_MAT_DEPTH( img->type ) != CV_8U &&\r
+ CV_MAT_DEPTH( img->type ) != CV_16U &&\r
+ CV_MAT_DEPTH( img->type ) != CV_32F )\r
+ CV_ERROR( CV_StsUnsupportedFormat,\r
+ "The function supports only 8u, 16u and 32f data types" );\r
+\r
+ if( !CV_ARE_DEPTHS_EQ( img, templ ) && CV_MAT_DEPTH( templ->type ) != CV_32F )\r
+ CV_ERROR( CV_StsUnsupportedFormat,\r
+ "Template (kernel) must be of the same depth as the input image, or be 32f" );\r
+ \r
+ if( !CV_ARE_DEPTHS_EQ( img, corr ) && CV_MAT_DEPTH( corr->type ) != CV_32F &&\r
+ CV_MAT_DEPTH( corr->type ) != CV_64F )\r
+ CV_ERROR( CV_StsUnsupportedFormat,\r
+ "The output image must have the same depth as the input image, or be 32f/64f" );\r
+\r
+ if( (!CV_ARE_CNS_EQ( img, corr ) || CV_MAT_CN(templ->type) > 1) &&\r
+ (CV_MAT_CN( corr->type ) > 1 || !CV_ARE_CNS_EQ( img, templ)) )\r
+ CV_ERROR( CV_StsUnsupportedFormat,\r
+ "The output must have the same number of channels as the input (when the template has 1 channel), "\r
+ "or the output must have 1 channel when the input and the template have the same number of channels" );\r
+\r
+ depth = CV_MAT_DEPTH(img->type);\r
+ cn = CV_MAT_CN(img->type);\r
+ templ_depth = CV_MAT_DEPTH(templ->type);\r
+ templ_cn = CV_MAT_CN(templ->type);\r
+ corr_depth = CV_MAT_DEPTH(corr->type);\r
+ corr_cn = CV_MAT_CN(corr->type);\r
+\r
+ CV_Assert( corr_cn == 1 || delta == 0 );\r
+\r
+ max_depth = MAX( max_depth, templ_depth );\r
+ max_depth = MAX( max_depth, depth );\r
+ max_depth = MAX( max_depth, corr_depth );\r
+ if( depth > CV_8U )\r
+ max_depth = CV_64F;\r
+\r
+ if( img->cols < templ->cols || img->rows < templ->rows )\r
+ CV_ERROR( CV_StsUnmatchedSizes,\r
+ "Such a combination of image and template/filter size is not supported" );\r
+\r
+ if( corr->rows > img->rows + templ->rows - 1 ||\r
+ corr->cols > img->cols + templ->cols - 1 )\r
+ CV_ERROR( CV_StsUnmatchedSizes,\r
+ "output image should not be greater than (W + w - 1)x(H + h - 1)" );\r
+\r
+ blocksize.width = cvRound(templ->cols*block_scale);\r
+ blocksize.width = MAX( blocksize.width, min_block_size - templ->cols + 1 );\r
+ blocksize.width = MIN( blocksize.width, corr->cols );\r
+ blocksize.height = cvRound(templ->rows*block_scale);\r
+ blocksize.height = MAX( blocksize.height, min_block_size - templ->rows + 1 );\r
+ blocksize.height = MIN( blocksize.height, corr->rows );\r
+\r
+ dftsize.width = cvGetOptimalDFTSize(blocksize.width + templ->cols - 1);\r
+ if( dftsize.width == 1 )\r
+ dftsize.width = 2;\r
+ dftsize.height = cvGetOptimalDFTSize(blocksize.height + templ->rows - 1);\r
+ if( dftsize.width <= 0 || dftsize.height <= 0 )\r
+ CV_ERROR( CV_StsOutOfRange, "the input arrays are too big" );\r
+\r
+ // recompute block size\r
+ blocksize.width = dftsize.width - templ->cols + 1;\r
+ blocksize.width = MIN( blocksize.width, corr->cols );\r
+ blocksize.height = dftsize.height - templ->rows + 1;\r
+ blocksize.height = MIN( blocksize.height, corr->rows );\r
+\r
+ CV_CALL( dft_templ = cvCreateMat( dftsize.height*templ_cn, dftsize.width, max_depth ));\r
+\r
+ num_threads = cvGetNumThreads();\r
+\r
+ for( k = 0; k < num_threads; k++ )\r
+ CV_CALL( dft_img[k] = cvCreateMat( dftsize.height, dftsize.width, max_depth ));\r
+\r
+ if( templ_cn > 1 && templ_depth != max_depth )\r
+ buf_size = templ->cols*templ->rows*CV_ELEM_SIZE(templ_depth);\r
+\r
+ if( cn > 1 && depth != max_depth )\r
+ buf_size = MAX( buf_size, (blocksize.width + templ->cols - 1)*\r
+ (blocksize.height + templ->rows - 1)*CV_ELEM_SIZE(depth));\r
+\r
+ if( (corr_cn > 1 || cn > 1) && corr_depth != max_depth )\r
+ buf_size = MAX( buf_size, blocksize.width*blocksize.height*CV_ELEM_SIZE(corr_depth));\r
+\r
+ if( buf_size > 0 )\r
+ {\r
+ for( k = 0; k < num_threads; k++ )\r
+ CV_CALL( buf[k] = cvAlloc(buf_size) );\r
+ }\r
+\r
+ // compute DFT of each template plane\r
+ for( k = 0; k < templ_cn; k++ )\r
+ {\r
+ CvMat dstub, *src, *dst, temp;\r
+ CvMat* planes[] = { 0, 0, 0, 0 };\r
+ int yofs = k*dftsize.height;\r
+\r
+ src = templ;\r
+ dst = cvGetSubRect( dft_templ, &dstub, cvRect(0,yofs,templ->cols,templ->rows));\r
+ \r
+ if( templ_cn > 1 )\r
+ {\r
+ planes[k] = templ_depth == max_depth ? dst :\r
+ cvInitMatHeader( &temp, templ->rows, templ->cols, templ_depth, buf[0] );\r
+ cvSplit( templ, planes[0], planes[1], planes[2], planes[3] );\r
+ src = planes[k];\r
+ planes[k] = 0;\r
+ }\r
+\r
+ if( dst != src )\r
+ cvConvert( src, dst );\r
+\r
+ if( dft_templ->cols > templ->cols )\r
+ {\r
+ cvGetSubRect( dft_templ, dst, cvRect(templ->cols, yofs,\r
+ dft_templ->cols - templ->cols, templ->rows) );\r
+ cvZero( dst );\r
+ }\r
+ cvGetSubRect( dft_templ, dst, cvRect(0,yofs,dftsize.width,dftsize.height) );\r
+ cvDFT( dst, dst, CV_DXT_FORWARD + CV_DXT_SCALE, templ->rows );\r
+ }\r
+\r
+ tile_count_x = (corr->cols + blocksize.width - 1)/blocksize.width;\r
+ tile_count_y = (corr->rows + blocksize.height - 1)/blocksize.height;\r
+ tile_count = tile_count_x*tile_count_y;\r
+\r
+ {\r
+#ifdef _OPENMP\r
+ #pragma omp parallel for num_threads(num_threads) schedule(dynamic)\r
+#endif\r
+ // calculate correlation by blocks\r
+ for( k = 0; k < tile_count; k++ )\r
+ {\r
+ int thread_idx = cvGetThreadNum();\r
+ int x = (k%tile_count_x)*blocksize.width;\r
+ int y = (k/tile_count_x)*blocksize.height;\r
+ int i, yofs;\r
+ CvMat sstub, dstub, *src, *dst, temp;\r
+ CvMat* planes[] = { 0, 0, 0, 0 };\r
+ CvMat* _dft_img = dft_img[thread_idx];\r
+ void* _buf = buf[thread_idx];\r
+ CvSize csz = { blocksize.width, blocksize.height }, isz;\r
+ int x0 = x - anchor.x, y0 = y - anchor.y;\r
+ int x1 = MAX( 0, x0 ), y1 = MAX( 0, y0 ), x2, y2;\r
+ csz.width = MIN( csz.width, corr->cols - x );\r
+ csz.height = MIN( csz.height, corr->rows - y );\r
+ isz.width = csz.width + templ->cols - 1;\r
+ isz.height = csz.height + templ->rows - 1;\r
+ x2 = MIN( img->cols, x0 + isz.width );\r
+ y2 = MIN( img->rows, y0 + isz.height );\r
+ \r
+ for( i = 0; i < cn; i++ )\r
+ {\r
+ CvMat dstub1, *dst1;\r
+ yofs = i*dftsize.height;\r
+\r
+ src = cvGetSubRect( img, &sstub, cvRect(x1,y1,x2-x1,y2-y1) );\r
+ dst = cvGetSubRect( _dft_img, &dstub,\r
+ cvRect(0,0,isz.width,isz.height) );\r
+ dst1 = dst;\r
+ \r
+ if( x2 - x1 < isz.width || y2 - y1 < isz.height )\r
+ dst1 = cvGetSubRect( _dft_img, &dstub1,\r
+ cvRect( x1 - x0, y1 - y0, x2 - x1, y2 - y1 ));\r
+\r
+ if( cn > 1 )\r
+ {\r
+ planes[i] = dst1;\r
+ if( depth != max_depth )\r
+ planes[i] = cvInitMatHeader( &temp, y2 - y1, x2 - x1, depth, _buf );\r
+ cvSplit( src, planes[0], planes[1], planes[2], planes[3] );\r
+ src = planes[i];\r
+ planes[i] = 0;\r
+ }\r
+\r
+ if( dst1 != src )\r
+ cvConvert( src, dst1 );\r
+\r
+ if( dst != dst1 )\r
+ cvCopyMakeBorder( dst1, dst, cvPoint(x1 - x0, y1 - y0), borderType );\r
+\r
+ if( dftsize.width > isz.width )\r
+ {\r
+ cvGetSubRect( _dft_img, dst, cvRect(isz.width, 0,\r
+ dftsize.width - isz.width,dftsize.height) );\r
+ cvZero( dst );\r
+ }\r
+\r
+ cvDFT( _dft_img, _dft_img, CV_DXT_FORWARD, isz.height );\r
+ cvGetSubRect( dft_templ, dst,\r
+ cvRect(0,(templ_cn>1?yofs:0),dftsize.width,dftsize.height) );\r
+\r
+ cvMulSpectrums( _dft_img, dst, _dft_img, CV_DXT_MUL_CONJ );\r
+ cvDFT( _dft_img, _dft_img, CV_DXT_INVERSE, csz.height );\r
+\r
+ src = cvGetSubRect( _dft_img, &sstub, cvRect(0,0,csz.width,csz.height) );\r
+ dst = cvGetSubRect( corr, &dstub, cvRect(x,y,csz.width,csz.height) );\r
+\r
+ if( corr_cn > 1 )\r
+ {\r
+ planes[i] = src;\r
+ if( corr_depth != max_depth )\r
+ {\r
+ planes[i] = cvInitMatHeader( &temp, csz.height, csz.width,\r
+ corr_depth, _buf );\r
+ cvConvertScale( src, planes[i], 1, delta );\r
+ }\r
+ cvMerge( planes[0], planes[1], planes[2], planes[3], dst );\r
+ planes[i] = 0; \r
+ }\r
+ else\r
+ {\r
+ if( i == 0 )\r
+ cvConvertScale( src, dst, 1, delta );\r
+ else\r
+ {\r
+ if( max_depth > corr_depth )\r
+ {\r
+ cvInitMatHeader( &temp, csz.height, csz.width,\r
+ corr_depth, _buf );\r
+ cvConvert( src, &temp );\r
+ src = &temp;\r
+ }\r
+ cvAcc( src, dst );\r
+ }\r
+ }\r
+ }\r
+ }\r
+ }\r
+\r
+ __END__;\r
+\r
+ cvReleaseMat( &dft_templ );\r
+\r
+ for( k = 0; k < num_threads; k++ )\r
+ {\r
+ cvReleaseMat( &dft_img[k] );\r
+ cvFree( &buf[k] );\r
+ }\r
+}\r
+\r
+void\r
+cv::crossCorr( const Mat& img, const Mat& templ, Mat& corr,\r
+ Point anchor, double delta, int borderType )\r
+{\r
+ CvMat _img = img, _templ = templ, _corr = corr;\r
+ icvCrossCorr( &_img, &_templ, &_corr, anchor, delta, borderType );\r
+}\r
+\r
+\r
+/*****************************************************************************************/\r
+\r
+CV_IMPL void\r
+cvMatchTemplate( const CvArr* _img, const CvArr* _templ, CvArr* _result, int method )\r
+{\r
+ CvMat* sum = 0;\r
+ CvMat* sqsum = 0;\r
+ \r
+ CV_FUNCNAME( "cvMatchTemplate" );\r
+\r
+ __BEGIN__;\r
+\r
+ int coi1 = 0, coi2 = 0;\r
+ int depth, cn;\r
+ int i, j, k;\r
+ CvMat stub, *img = (CvMat*)_img;\r
+ CvMat tstub, *templ = (CvMat*)_templ;\r
+ CvMat rstub, *result = (CvMat*)_result;\r
+ CvScalar templ_mean = cvScalarAll(0);\r
+ double templ_norm = 0, templ_sum2 = 0;\r
+ \r
+ int idx = 0, idx2 = 0;\r
+ double *p0, *p1, *p2, *p3;\r
+ double *q0, *q1, *q2, *q3;\r
+ double inv_area;\r
+ int sum_step, sqsum_step;\r
+ int num_type = method == CV_TM_CCORR || method == CV_TM_CCORR_NORMED ? 0 :\r
+ method == CV_TM_CCOEFF || method == CV_TM_CCOEFF_NORMED ? 1 : 2;\r
+ int is_normed = method == CV_TM_CCORR_NORMED ||\r
+ method == CV_TM_SQDIFF_NORMED ||\r
+ method == CV_TM_CCOEFF_NORMED;\r
+\r
+ CV_CALL( img = cvGetMat( img, &stub, &coi1 ));\r
+ CV_CALL( templ = cvGetMat( templ, &tstub, &coi2 ));\r
+ CV_CALL( result = cvGetMat( result, &rstub ));\r
+\r
+ if( CV_MAT_DEPTH( img->type ) != CV_8U &&\r
+ CV_MAT_DEPTH( img->type ) != CV_32F )\r
+ CV_ERROR( CV_StsUnsupportedFormat,\r
+ "The function supports only 8u and 32f data types" );\r
+\r
+ if( !CV_ARE_TYPES_EQ( img, templ ))\r
+ CV_ERROR( CV_StsUnmatchedSizes, "image and template should have the same type" );\r
+\r
+ if( CV_MAT_TYPE( result->type ) != CV_32FC1 )\r
+ CV_ERROR( CV_StsUnsupportedFormat, "output image should have 32f type" );\r
+\r
+ if( img->rows < templ->rows || img->cols < templ->cols )\r
+ {\r
+ CvMat* t;\r
+ CV_SWAP( img, templ, t );\r
+ }\r
+\r
+ if( result->rows != img->rows - templ->rows + 1 ||\r
+ result->cols != img->cols - templ->cols + 1 )\r
+ CV_ERROR( CV_StsUnmatchedSizes, "output image should be (W - w + 1)x(H - h + 1)" );\r
+\r
+ if( method < CV_TM_SQDIFF || method > CV_TM_CCOEFF_NORMED )\r
+ CV_ERROR( CV_StsBadArg, "unknown comparison method" );\r
+\r
+ depth = CV_MAT_DEPTH(img->type);\r
+ cn = CV_MAT_CN(img->type);\r
+\r
+ /*if( is_normed && cn == 1 && templ->rows > 8 && templ->cols > 8 &&\r
+ img->rows > templ->cols && img->cols > templ->cols )\r
+ {\r
+ CvTemplMatchIPPFunc ipp_func =\r
+ depth == CV_8U ?\r
+ (method == CV_TM_SQDIFF_NORMED ? (CvTemplMatchIPPFunc)icvSqrDistanceValid_Norm_8u32f_C1R_p :\r
+ method == CV_TM_CCORR_NORMED ? (CvTemplMatchIPPFunc)icvCrossCorrValid_Norm_8u32f_C1R_p :\r
+ (CvTemplMatchIPPFunc)icvCrossCorrValid_NormLevel_8u32f_C1R_p) :\r
+ (method == CV_TM_SQDIFF_NORMED ? (CvTemplMatchIPPFunc)icvSqrDistanceValid_Norm_32f_C1R_p :\r
+ method == CV_TM_CCORR_NORMED ? (CvTemplMatchIPPFunc)icvCrossCorrValid_Norm_32f_C1R_p :\r
+ (CvTemplMatchIPPFunc)icvCrossCorrValid_NormLevel_32f_C1R_p);\r
+\r
+ if( ipp_func )\r
+ {\r
+ CvSize img_size = cvGetMatSize(img), templ_size = cvGetMatSize(templ);\r
+\r
+ IPPI_CALL( ipp_func( img->data.ptr, img->step ? img->step : CV_STUB_STEP,\r
+ img_size, templ->data.ptr,\r
+ templ->step ? templ->step : CV_STUB_STEP,\r
+ templ_size, result->data.ptr,\r
+ result->step ? result->step : CV_STUB_STEP ));\r
+ for( i = 0; i < result->rows; i++ )\r
+ {\r
+ float* rrow = (float*)(result->data.ptr + i*result->step);\r
+ for( j = 0; j < result->cols; j++ )\r
+ {\r
+ if( fabs(rrow[j]) > 1. )\r
+ rrow[j] = rrow[j] < 0 ? -1.f : 1.f;\r
+ }\r
+ }\r
+ EXIT;\r
+ }\r
+ }*/\r
+\r
+ CV_CALL( icvCrossCorr( img, templ, result ));\r
+\r
+ if( method == CV_TM_CCORR )\r
+ EXIT;\r
+\r
+ inv_area = 1./((double)templ->rows * templ->cols);\r
+\r
+ CV_CALL( sum = cvCreateMat( img->rows + 1, img->cols + 1,\r
+ CV_MAKETYPE( CV_64F, cn )));\r
+ if( method == CV_TM_CCOEFF )\r
+ {\r
+ CV_CALL( cvIntegral( img, sum, 0, 0 ));\r
+ CV_CALL( templ_mean = cvAvg( templ ));\r
+ q0 = q1 = q2 = q3 = 0;\r
+ }\r
+ else\r
+ {\r
+ CvScalar _templ_sdv = cvScalarAll(0);\r
+ CV_CALL( sqsum = cvCreateMat( img->rows + 1, img->cols + 1,\r
+ CV_MAKETYPE( CV_64F, cn )));\r
+ CV_CALL( cvIntegral( img, sum, sqsum, 0 ));\r
+ CV_CALL( cvAvgSdv( templ, &templ_mean, &_templ_sdv ));\r
+\r
+ templ_norm = CV_SQR(_templ_sdv.val[0]) + CV_SQR(_templ_sdv.val[1]) +\r
+ CV_SQR(_templ_sdv.val[2]) + CV_SQR(_templ_sdv.val[3]);\r
+\r
+ if( templ_norm < DBL_EPSILON && method == CV_TM_CCOEFF_NORMED )\r
+ {\r
+ cvSet( result, cvScalarAll(1.) );\r
+ EXIT;\r
+ }\r
+ \r
+ templ_sum2 = templ_norm +\r
+ CV_SQR(templ_mean.val[0]) + CV_SQR(templ_mean.val[1]) +\r
+ CV_SQR(templ_mean.val[2]) + CV_SQR(templ_mean.val[3]);\r
+\r
+ if( num_type != 1 )\r
+ {\r
+ templ_mean = cvScalarAll(0);\r
+ templ_norm = templ_sum2;\r
+ }\r
+ \r
+ templ_sum2 /= inv_area;\r
+ templ_norm = sqrt(templ_norm);\r
+ templ_norm /= sqrt(inv_area); // care of accuracy here\r
+\r
+ q0 = (double*)sqsum->data.ptr;\r
+ q1 = q0 + templ->cols*cn;\r
+ q2 = (double*)(sqsum->data.ptr + templ->rows*sqsum->step);\r
+ q3 = q2 + templ->cols*cn;\r
+ }\r
+\r
+ p0 = (double*)sum->data.ptr;\r
+ p1 = p0 + templ->cols*cn;\r
+ p2 = (double*)(sum->data.ptr + templ->rows*sum->step);\r
+ p3 = p2 + templ->cols*cn;\r
+\r
+ sum_step = sum ? sum->step / sizeof(double) : 0;\r
+ sqsum_step = sqsum ? sqsum->step / sizeof(double) : 0;\r
+\r
+ for( i = 0; i < result->rows; i++ )\r
+ {\r
+ float* rrow = (float*)(result->data.ptr + i*result->step);\r
+ idx = i * sum_step;\r
+ idx2 = i * sqsum_step;\r
+\r
+ for( j = 0; j < result->cols; j++, idx += cn, idx2 += cn )\r
+ {\r
+ double num = rrow[j], t;\r
+ double wnd_mean2 = 0, wnd_sum2 = 0;\r
+ \r
+ if( num_type == 1 )\r
+ {\r
+ for( k = 0; k < cn; k++ )\r
+ {\r
+ t = p0[idx+k] - p1[idx+k] - p2[idx+k] + p3[idx+k];\r
+ wnd_mean2 += CV_SQR(t);\r
+ num -= t*templ_mean.val[k];\r
+ }\r
+\r
+ wnd_mean2 *= inv_area;\r
+ }\r
+\r
+ if( is_normed || num_type == 2 )\r
+ {\r
+ for( k = 0; k < cn; k++ )\r
+ {\r
+ t = q0[idx2+k] - q1[idx2+k] - q2[idx2+k] + q3[idx2+k];\r
+ wnd_sum2 += t;\r
+ }\r
+\r
+ if( num_type == 2 )\r
+ num = wnd_sum2 - 2*num + templ_sum2;\r
+ }\r
+\r
+ if( is_normed )\r
+ {\r
+ t = sqrt(MAX(wnd_sum2 - wnd_mean2,0))*templ_norm;\r
+ if( t > DBL_EPSILON )\r
+ {\r
+ num /= t;\r
+ if( fabs(num) > 1. )\r
+ num = num > 0 ? 1 : -1;\r
+ }\r
+ else\r
+ num = method != CV_TM_SQDIFF_NORMED || num < DBL_EPSILON ? 0 : 1;\r
+ }\r
+\r
+ rrow[j] = (float)num;\r
+ }\r
+ }\r
+ \r
+ __END__;\r
+\r
+ cvReleaseMat( &sum );\r
+ cvReleaseMat( &sqsum );\r
+}\r
+\r
+void cv::matchTemplate( const Mat& image, const Mat& templ, Mat& result, int method )\r
+{\r
+ result.create( std::abs(image.rows - templ.rows) + 1,\r
+ std::abs(image.cols - templ.cols) + 1, CV_32F );\r
+ CvMat _image = image, _templ = templ, _result = result;\r
+ cvMatchTemplate( &_image, &_templ, &_result, method ); \r
+}\r
+\r
+/* End of file. */\r
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of the copyright holders may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#include "_cv.h"
-
-namespace cv
-{
-
-Mat_<double> getDefaultNewCameraMatrix( const Mat_<double>& A, Size imgsize,
- bool centerPrincipalPoint )
-{
- Mat_<double> Ar(3, 3);
- if( centerPrincipalPoint )
- Ar << A(0,0), 0., (imgsize.width-1)*0.5,
- 0., A(1,1), (imgsize.height-1)*0.5,
- 0., 0., 1.;
- else
- Ar << A(0,0), 0., A(0,2),
- 0., A(1,1), A(1,2),
- 0., 0., 1.;
- return Ar;
-}
-
-void initUndistortRectifyMap( const Mat& _cameraMatrix, const Mat& _distCoeffs,
- const Mat& _R, const Mat& _newCameraMatrix,
- Size size, int m1type, Mat& map1, Mat& map2 )
-{
- if( m1type <= 0 )
- m1type = CV_16SC2;
- CV_Assert( m1type == CV_16SC2 || m1type == CV_32FC1 || m1type == CV_32FC2 );
- map1.create( size, m1type );
- if( m1type != CV_32FC2 )
- map2.create( size, m1type == CV_16SC2 ? CV_16UC1 : CV_32FC1 );
- else
- map2.release();
-
- Mat_<double> R = Mat_<double>::eye(3, 3), distCoeffs;
- Mat_<double> A = Mat_<double>(_cameraMatrix), Ar;
-
- if( _newCameraMatrix.data )
- Ar = Mat_<double>(_newCameraMatrix);
- else
- Ar = getDefaultNewCameraMatrix( A, size, true );
-
- if( _R.data )
- R = Mat_<double>(_R);
-
- if( _distCoeffs.data )
- distCoeffs = Mat_<double>(_distCoeffs);
- else
- {
- distCoeffs.create(5, 1);
- distCoeffs = 0.;
- }
-
- CV_Assert( A.size() == Size(3,3) && A.size() == Ar.size() && A.size() == R.size() );
- Mat_<double> iR = (Ar*R).inv(DECOMP_LU);
- const double* ir = &iR(0,0);
-
- double u0 = A(0, 2), v0 = A(1, 2);
- double fx = A(0, 0), fy = A(1, 1);
-
- CV_Assert( distCoeffs.size() == Size(1, 4) || distCoeffs.size() == Size(1, 5) ||
- distCoeffs.size() == Size(4, 1) || distCoeffs.size() == Size(5, 1));
-
- if( distCoeffs.rows != 1 && !distCoeffs.isContinuous() )
- distCoeffs = distCoeffs.t();
-
- double k1 = ((double*)distCoeffs.data)[0];
- double k2 = ((double*)distCoeffs.data)[1];
- double p1 = ((double*)distCoeffs.data)[2];
- double p2 = ((double*)distCoeffs.data)[3];
- double k3 = distCoeffs.cols + distCoeffs.rows - 1 == 5 ? ((double*)distCoeffs.data)[4] : 0.;
-
- for( int i = 0; i < size.height; i++ )
- {
- float* m1f = (float*)(map1.data + map1.step*i);
- float* m2f = (float*)(map2.data + map2.step*i);
- short* m1 = (short*)m1f;
- ushort* m2 = (ushort*)m2f;
- double _x = i*ir[1] + ir[2], _y = i*ir[4] + ir[5], _w = i*ir[7] + ir[8];
-
- for( int j = 0; j < size.width; j++, _x += ir[0], _y += ir[3], _w += ir[6] )
- {
- double w = 1./_w, x = _x*w, y = _y*w;
- double x2 = x*x, y2 = y*y;
- double r2 = x2 + y2, _2xy = 2*x*y;
- double kr = 1 + ((k3*r2 + k2)*r2 + k1)*r2;
- double u = fx*(x*kr + p1*_2xy + p2*(r2 + 2*x2)) + u0;
- double v = fy*(y*kr + p1*(r2 + 2*y2) + p2*_2xy) + v0;
- if( m1type == CV_16SC2 )
- {
- int iu = saturate_cast<int>(u*INTER_TAB_SIZE);
- int iv = saturate_cast<int>(v*INTER_TAB_SIZE);
- m1[j*2] = (short)(iu >> INTER_BITS);
- m1[j*2+1] = (short)(iv >> INTER_BITS);
- m2[j] = (ushort)((iv & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + (iu & (INTER_TAB_SIZE-1)));
- }
- else if( m1type == CV_32FC1 )
- {
- m1f[j] = (float)u;
- m2f[j] = (float)v;
- }
- else
- {
- m1f[j*2] = (float)u;
- m1f[j*2+1] = (float)v;
- }
- }
- }
-}
-
-
-void undistort( const Mat& src, Mat& dst, const Mat& _cameraMatrix,
- const Mat& _distCoeffs, const Mat& _newCameraMatrix )
-{
- dst.create( src.size(), src.type() );
-
- int stripe_size0 = std::min((1 << 12)/std::max(src.cols, 1), src.rows);
- Mat map1(stripe_size0, src.cols, CV_16SC2), map2(stripe_size0, src.cols, CV_16UC1);
-
- Mat_<double> A, distCoeffs, Ar, I = Mat_<double>::eye(3,3);
-
- _cameraMatrix.convertTo(A, CV_64F);
- if( _distCoeffs.data )
- distCoeffs = Mat_<double>(_distCoeffs);
- else
- {
- distCoeffs.create(5, 1);
- distCoeffs = 0.;
- }
-
- if( _newCameraMatrix.data )
- Ar = Mat_<double>(_newCameraMatrix);
- else
- Ar = getDefaultNewCameraMatrix(A, src.size() );
-
- double v0 = Ar(1, 2);
- for( int y = 0; y < src.rows; y += stripe_size0 )
- {
- int stripe_size = std::min( stripe_size0, src.rows - y );
- Ar(1, 2) = v0 - y;
- Mat map1_part = map1.rowRange(0, stripe_size),
- map2_part = map2.rowRange(0, stripe_size),
- dst_part = dst.rowRange(y, y + stripe_size);
-
- initUndistortRectifyMap( A, distCoeffs, I, Ar, Size(src.cols, stripe_size),
- map1_part.type(), map1_part, map2_part );
- remap( src, dst_part, map1_part, map2_part, INTER_LINEAR, BORDER_REPLICATE );
- }
-}
-
-}
-
-
-CV_IMPL void
-cvUndistort2( const CvArr* srcarr, CvArr* dstarr, const CvMat* Aarr, const CvMat* dist_coeffs )
-{
- cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), dst0 = dst;
- cv::Mat A = cv::cvarrToMat(Aarr), distCoeffs = cv::cvarrToMat(dist_coeffs);
-
- CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
- cv::undistort( src, dst, A, distCoeffs, cv::Mat() );
-}
-
-
-CV_IMPL void cvInitUndistortMap( const CvMat* Aarr, const CvMat* dist_coeffs,
- CvArr* mapxarr, CvArr* mapyarr )
-{
- cv::Mat A = cv::cvarrToMat(Aarr), distCoeffs = cv::cvarrToMat(dist_coeffs);
- cv::Mat mapx = cv::cvarrToMat(mapxarr), mapy, mapx0 = mapx, mapy0;
-
- if( mapyarr )
- mapy0 = mapy = cv::cvarrToMat(mapyarr);
-
- cv::initUndistortRectifyMap( A, distCoeffs, cv::Mat(), A,
- mapx.size(), mapx.type(), mapx, mapy );
- CV_Assert( mapx0.data == mapx.data && mapy0.data == mapy.data );
-}
-
-void
-cvInitUndistortRectifyMap( const CvMat* Aarr, const CvMat* dist_coeffs,
- const CvMat *Rarr, const CvMat* ArArr, CvArr* mapxarr, CvArr* mapyarr )
-{
- cv::Mat A = cv::cvarrToMat(Aarr), distCoeffs, R, Ar;
- cv::Mat mapx = cv::cvarrToMat(mapxarr), mapy, mapx0 = mapx, mapy0;
-
- if( mapyarr )
- mapy0 = mapy = cv::cvarrToMat(mapyarr);
-
- if( dist_coeffs )
- distCoeffs = cv::cvarrToMat(dist_coeffs);
- if( Rarr )
- R = cv::cvarrToMat(Rarr);
- if( ArArr )
- Ar = cv::cvarrToMat(ArArr);
-
- cv::initUndistortRectifyMap( A, distCoeffs, R, Ar, mapx.size(), mapx.type(), mapx, mapy );
- CV_Assert( mapx0.data == mapx.data && mapy0.data == mapy.data );
-}
-
-
-void
-cvUndistortPoints( const CvMat* _src, CvMat* _dst, const CvMat* _cameraMatrix,
- const CvMat* _distCoeffs,
- const CvMat* _R, const CvMat* _P )
-{
- CV_FUNCNAME( "cvUndistortPoints" );
-
- __BEGIN__;
-
- double A[3][3], RR[3][3], k[5]={0,0,0,0,0}, fx, fy, ifx, ify, cx, cy;
- CvMat _A=cvMat(3, 3, CV_64F, A), _Dk;
- CvMat _RR=cvMat(3, 3, CV_64F, RR);
- const CvPoint2D32f* srcf;
- const CvPoint2D64f* srcd;
- CvPoint2D32f* dstf;
- CvPoint2D64f* dstd;
- int stype, dtype;
- int sstep, dstep;
- int i, j, n;
-
- CV_ASSERT( CV_IS_MAT(_src) && CV_IS_MAT(_dst) &&
- (_src->rows == 1 || _src->cols == 1) &&
- (_dst->rows == 1 || _dst->cols == 1) &&
- CV_ARE_SIZES_EQ(_src, _dst) &&
- (CV_MAT_TYPE(_src->type) == CV_32FC2 || CV_MAT_TYPE(_src->type) == CV_64FC2) &&
- (CV_MAT_TYPE(_dst->type) == CV_32FC2 || CV_MAT_TYPE(_dst->type) == CV_64FC2));
-
- CV_ASSERT( CV_IS_MAT(_cameraMatrix) && CV_IS_MAT(_distCoeffs) &&
- _cameraMatrix->rows == 3 && _cameraMatrix->cols == 3 &&
- (_distCoeffs->rows == 1 || _distCoeffs->cols == 1) &&
- (_distCoeffs->rows*_distCoeffs->cols == 4 ||
- _distCoeffs->rows*_distCoeffs->cols == 5) );
- _Dk = cvMat( _distCoeffs->rows, _distCoeffs->cols,
- CV_MAKETYPE(CV_64F,CV_MAT_CN(_distCoeffs->type)), k);
- cvConvert( _cameraMatrix, &_A );
- cvConvert( _distCoeffs, &_Dk );
-
- if( _R )
- {
- CV_ASSERT( CV_IS_MAT(_R) && _R->rows == 3 && _R->cols == 3 );
- cvConvert( _R, &_RR );
- }
- else
- cvSetIdentity(&_RR);
-
- if( _P )
- {
- double PP[3][3];
- CvMat _P3x3, _PP=cvMat(3, 3, CV_64F, PP);
- CV_ASSERT( CV_IS_MAT(_P) && _P->rows == 3 && (_P->cols == 3 || _P->cols == 4));
- cvConvert( cvGetCols(_P, &_P3x3, 0, 3), &_PP );
- cvMatMul( &_PP, &_RR, &_RR );
- }
-
- srcf = (const CvPoint2D32f*)_src->data.ptr;
- srcd = (const CvPoint2D64f*)_src->data.ptr;
- dstf = (CvPoint2D32f*)_dst->data.ptr;
- dstd = (CvPoint2D64f*)_dst->data.ptr;
- stype = CV_MAT_TYPE(_src->type);
- dtype = CV_MAT_TYPE(_dst->type);
- sstep = _src->rows == 1 ? 1 : _src->step/CV_ELEM_SIZE(stype);
- dstep = _dst->rows == 1 ? 1 : _dst->step/CV_ELEM_SIZE(dtype);
-
- n = _src->rows + _src->cols - 1;
-
- fx = A[0][0];
- fy = A[1][1];
- ifx = 1./fx;
- ify = 1./fy;
- cx = A[0][2];
- cy = A[1][2];
-
- for( i = 0; i < n; i++ )
- {
- double x, y, x0, y0;
- if( stype == CV_32FC2 )
- {
- x = srcf[i*sstep].x;
- y = srcf[i*sstep].y;
- }
- else
- {
- x = srcd[i*sstep].x;
- y = srcd[i*sstep].y;
- }
-
- x0 = x = (x - cx)*ifx;
- y0 = y = (y - cy)*ify;
-
- // compensate distortion iteratively
- for( j = 0; j < 5; j++ )
- {
- double r2 = x*x + y*y;
- double icdist = 1./(1 + ((k[4]*r2 + k[1])*r2 + k[0])*r2);
- double deltaX = 2*k[2]*x*y + k[3]*(r2 + 2*x*x);
- double deltaY = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y;
- x = (x0 - deltaX)*icdist;
- y = (y0 - deltaY)*icdist;
- }
-
- double xx = RR[0][0]*x + RR[0][1]*y + RR[0][2];
- double yy = RR[1][0]*x + RR[1][1]*y + RR[1][2];
- double ww = 1./(RR[2][0]*x + RR[2][1]*y + RR[2][2]);
- x = xx*ww;
- y = yy*ww;
-
- if( dtype == CV_32FC2 )
- {
- dstf[i*dstep].x = (float)x;
- dstf[i*dstep].y = (float)y;
- }
- else
- {
- dstd[i*dstep].x = x;
- dstd[i*dstep].y = y;
- }
- }
-
- __END__;
-}
-
-/* End of file */
+/*M///////////////////////////////////////////////////////////////////////////////////////\r
+//\r
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
+//\r
+// By downloading, copying, installing or using the software you agree to this license.\r
+// If you do not agree to this license, do not download, install,\r
+// copy or use the software.\r
+//\r
+//\r
+// License Agreement\r
+// For Open Source Computer Vision Library\r
+//\r
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\r
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\r
+// Third party copyrights are property of their respective owners.\r
+//\r
+// Redistribution and use in source and binary forms, with or without modification,\r
+// are permitted provided that the following conditions are met:\r
+//\r
+// * Redistribution's of source code must retain the above copyright notice,\r
+// this list of conditions and the following disclaimer.\r
+//\r
+// * Redistribution's in binary form must reproduce the above copyright notice,\r
+// this list of conditions and the following disclaimer in the documentation\r
+// and/or other materials provided with the distribution.\r
+//\r
+// * The name of the copyright holders may not be used to endorse or promote products\r
+// derived from this software without specific prior written permission.\r
+//\r
+// This software is provided by the copyright holders and contributors "as is" and\r
+// any express or implied warranties, including, but not limited to, the implied\r
+// warranties of merchantability and fitness for a particular purpose are disclaimed.\r
+// In no event shall the Intel Corporation or contributors be liable for any direct,\r
+// indirect, incidental, special, exemplary, or consequential damages\r
+// (including, but not limited to, procurement of substitute goods or services;\r
+// loss of use, data, or profits; or business interruption) however caused\r
+// and on any theory of liability, whether in contract, strict liability,\r
+// or tort (including negligence or otherwise) arising in any way out of\r
+// the use of this software, even if advised of the possibility of such damage.\r
+//\r
+//M*/\r
+\r
+#include "_cv.h"\r
+\r
+namespace cv\r
+{\r
+\r
+Mat getDefaultNewCameraMatrix( const Mat& cameraMatrix, Size imgsize,\r
+ bool centerPrincipalPoint )\r
+{\r
+ if( !centerPrincipalPoint && cameraMatrix.type() == CV_64F )\r
+ return cameraMatrix;\r
+ \r
+ Mat newCameraMatrix;\r
+ cameraMatrix.convertTo(newCameraMatrix, CV_64F);\r
+ if( centerPrincipalPoint )\r
+ {\r
+ ((double*)newCameraMatrix.data)[2] = (imgsize.width-1)*0.5;\r
+ ((double*)newCameraMatrix.data)[5] = (imgsize.height-1)*0.5;\r
+ }\r
+ return newCameraMatrix;\r
+}\r
+\r
+void initUndistortRectifyMap( const Mat& _cameraMatrix, const Mat& _distCoeffs,\r
+ const Mat& _R, const Mat& _newCameraMatrix,\r
+ Size size, int m1type, Mat& map1, Mat& map2 )\r
+{\r
+ if( m1type <= 0 )\r
+ m1type = CV_16SC2;\r
+ CV_Assert( m1type == CV_16SC2 || m1type == CV_32FC1 || m1type == CV_32FC2 );\r
+ map1.create( size, m1type );\r
+ if( m1type != CV_32FC2 )\r
+ map2.create( size, m1type == CV_16SC2 ? CV_16UC1 : CV_32FC1 );\r
+ else\r
+ map2.release();\r
+\r
+ Mat_<double> R = Mat_<double>::eye(3, 3), distCoeffs;\r
+ Mat_<double> A = Mat_<double>(_cameraMatrix), Ar;\r
+\r
+ if( _newCameraMatrix.data )\r
+ Ar = Mat_<double>(_newCameraMatrix);\r
+ else\r
+ Ar = getDefaultNewCameraMatrix( A, size, true );\r
+\r
+ if( _R.data )\r
+ R = Mat_<double>(_R);\r
+\r
+ if( _distCoeffs.data )\r
+ distCoeffs = Mat_<double>(_distCoeffs);\r
+ else\r
+ {\r
+ distCoeffs.create(5, 1);\r
+ distCoeffs = 0.;\r
+ }\r
+\r
+ CV_Assert( A.size() == Size(3,3) && A.size() == Ar.size() && A.size() == R.size() );\r
+ Mat_<double> iR = (Ar*R).inv(DECOMP_LU);\r
+ const double* ir = &iR(0,0);\r
+\r
+ double u0 = A(0, 2), v0 = A(1, 2);\r
+ double fx = A(0, 0), fy = A(1, 1);\r
+\r
+ CV_Assert( distCoeffs.size() == Size(1, 4) || distCoeffs.size() == Size(1, 5) ||\r
+ distCoeffs.size() == Size(4, 1) || distCoeffs.size() == Size(5, 1));\r
+\r
+ if( distCoeffs.rows != 1 && !distCoeffs.isContinuous() )\r
+ distCoeffs = distCoeffs.t();\r
+\r
+ double k1 = ((double*)distCoeffs.data)[0];\r
+ double k2 = ((double*)distCoeffs.data)[1];\r
+ double p1 = ((double*)distCoeffs.data)[2];\r
+ double p2 = ((double*)distCoeffs.data)[3];\r
+ double k3 = distCoeffs.cols + distCoeffs.rows - 1 == 5 ? ((double*)distCoeffs.data)[4] : 0.;\r
+\r
+ for( int i = 0; i < size.height; i++ )\r
+ {\r
+ float* m1f = (float*)(map1.data + map1.step*i);\r
+ float* m2f = (float*)(map2.data + map2.step*i);\r
+ short* m1 = (short*)m1f;\r
+ ushort* m2 = (ushort*)m2f;\r
+ double _x = i*ir[1] + ir[2], _y = i*ir[4] + ir[5], _w = i*ir[7] + ir[8];\r
+\r
+ for( int j = 0; j < size.width; j++, _x += ir[0], _y += ir[3], _w += ir[6] )\r
+ {\r
+ double w = 1./_w, x = _x*w, y = _y*w;\r
+ double x2 = x*x, y2 = y*y;\r
+ double r2 = x2 + y2, _2xy = 2*x*y;\r
+ double kr = 1 + ((k3*r2 + k2)*r2 + k1)*r2;\r
+ double u = fx*(x*kr + p1*_2xy + p2*(r2 + 2*x2)) + u0;\r
+ double v = fy*(y*kr + p1*(r2 + 2*y2) + p2*_2xy) + v0;\r
+ if( m1type == CV_16SC2 )\r
+ {\r
+ int iu = saturate_cast<int>(u*INTER_TAB_SIZE);\r
+ int iv = saturate_cast<int>(v*INTER_TAB_SIZE);\r
+ m1[j*2] = (short)(iu >> INTER_BITS);\r
+ m1[j*2+1] = (short)(iv >> INTER_BITS);\r
+ m2[j] = (ushort)((iv & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + (iu & (INTER_TAB_SIZE-1)));\r
+ }\r
+ else if( m1type == CV_32FC1 )\r
+ {\r
+ m1f[j] = (float)u;\r
+ m2f[j] = (float)v;\r
+ }\r
+ else\r
+ {\r
+ m1f[j*2] = (float)u;\r
+ m1f[j*2+1] = (float)v;\r
+ }\r
+ }\r
+ }\r
+}\r
+\r
+\r
+void undistort( const Mat& src, Mat& dst, const Mat& _cameraMatrix,\r
+ const Mat& _distCoeffs, const Mat& _newCameraMatrix )\r
+{\r
+ dst.create( src.size(), src.type() );\r
+\r
+ int stripe_size0 = std::min((1 << 12)/std::max(src.cols, 1), src.rows);\r
+ Mat map1(stripe_size0, src.cols, CV_16SC2), map2(stripe_size0, src.cols, CV_16UC1);\r
+\r
+ Mat_<double> A, distCoeffs, Ar, I = Mat_<double>::eye(3,3);\r
+\r
+ _cameraMatrix.convertTo(A, CV_64F);\r
+ if( _distCoeffs.data )\r
+ distCoeffs = Mat_<double>(_distCoeffs);\r
+ else\r
+ {\r
+ distCoeffs.create(5, 1);\r
+ distCoeffs = 0.;\r
+ }\r
+\r
+ if( _newCameraMatrix.data )\r
+ _newCameraMatrix.convertTo(Ar, CV_64F);\r
+ else\r
+ A.copyTo(Ar);\r
+\r
+ double v0 = Ar(1, 2);\r
+ for( int y = 0; y < src.rows; y += stripe_size0 )\r
+ {\r
+ int stripe_size = std::min( stripe_size0, src.rows - y );\r
+ Ar(1, 2) = v0 - y;\r
+ Mat map1_part = map1.rowRange(0, stripe_size),\r
+ map2_part = map2.rowRange(0, stripe_size),\r
+ dst_part = dst.rowRange(y, y + stripe_size);\r
+\r
+ initUndistortRectifyMap( A, distCoeffs, I, Ar, Size(src.cols, stripe_size),\r
+ map1_part.type(), map1_part, map2_part );\r
+ remap( src, dst_part, map1_part, map2_part, INTER_LINEAR, BORDER_REPLICATE );\r
+ }\r
+}\r
+\r
+}\r
+\r
+\r
+CV_IMPL void\r
+cvUndistort2( const CvArr* srcarr, CvArr* dstarr, const CvMat* Aarr, const CvMat* dist_coeffs )\r
+{\r
+ cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), dst0 = dst;\r
+ cv::Mat A = cv::cvarrToMat(Aarr), distCoeffs = cv::cvarrToMat(dist_coeffs);\r
+\r
+ CV_Assert( src.size() == dst.size() && src.type() == dst.type() );\r
+ cv::undistort( src, dst, A, distCoeffs, cv::Mat() );\r
+}\r
+\r
+\r
+CV_IMPL void cvInitUndistortMap( const CvMat* Aarr, const CvMat* dist_coeffs,\r
+ CvArr* mapxarr, CvArr* mapyarr )\r
+{\r
+ cv::Mat A = cv::cvarrToMat(Aarr), distCoeffs = cv::cvarrToMat(dist_coeffs);\r
+ cv::Mat mapx = cv::cvarrToMat(mapxarr), mapy, mapx0 = mapx, mapy0;\r
+\r
+ if( mapyarr )\r
+ mapy0 = mapy = cv::cvarrToMat(mapyarr);\r
+\r
+ cv::initUndistortRectifyMap( A, distCoeffs, cv::Mat(), A,\r
+ mapx.size(), mapx.type(), mapx, mapy );\r
+ CV_Assert( mapx0.data == mapx.data && mapy0.data == mapy.data );\r
+}\r
+\r
+void\r
+cvInitUndistortRectifyMap( const CvMat* Aarr, const CvMat* dist_coeffs,\r
+ const CvMat *Rarr, const CvMat* ArArr, CvArr* mapxarr, CvArr* mapyarr )\r
+{\r
+ cv::Mat A = cv::cvarrToMat(Aarr), distCoeffs, R, Ar;\r
+ cv::Mat mapx = cv::cvarrToMat(mapxarr), mapy, mapx0 = mapx, mapy0;\r
+\r
+ if( mapyarr )\r
+ mapy0 = mapy = cv::cvarrToMat(mapyarr);\r
+\r
+ if( dist_coeffs )\r
+ distCoeffs = cv::cvarrToMat(dist_coeffs);\r
+ if( Rarr )\r
+ R = cv::cvarrToMat(Rarr);\r
+ if( ArArr )\r
+ Ar = cv::cvarrToMat(ArArr);\r
+\r
+ cv::initUndistortRectifyMap( A, distCoeffs, R, Ar, mapx.size(), mapx.type(), mapx, mapy );\r
+ CV_Assert( mapx0.data == mapx.data && mapy0.data == mapy.data );\r
+}\r
+\r
+\r
+void\r
+cvUndistortPoints( const CvMat* _src, CvMat* _dst, const CvMat* _cameraMatrix,\r
+ const CvMat* _distCoeffs,\r
+ const CvMat* _R, const CvMat* _P )\r
+{\r
+ CV_FUNCNAME( "cvUndistortPoints" );\r
+\r
+ __BEGIN__;\r
+\r
+ double A[3][3], RR[3][3], k[5]={0,0,0,0,0}, fx, fy, ifx, ify, cx, cy;\r
+ CvMat _A=cvMat(3, 3, CV_64F, A), _Dk;\r
+ CvMat _RR=cvMat(3, 3, CV_64F, RR);\r
+ const CvPoint2D32f* srcf;\r
+ const CvPoint2D64f* srcd;\r
+ CvPoint2D32f* dstf;\r
+ CvPoint2D64f* dstd;\r
+ int stype, dtype;\r
+ int sstep, dstep;\r
+ int i, j, n;\r
+\r
+ CV_ASSERT( CV_IS_MAT(_src) && CV_IS_MAT(_dst) &&\r
+ (_src->rows == 1 || _src->cols == 1) &&\r
+ (_dst->rows == 1 || _dst->cols == 1) &&\r
+ CV_ARE_SIZES_EQ(_src, _dst) &&\r
+ (CV_MAT_TYPE(_src->type) == CV_32FC2 || CV_MAT_TYPE(_src->type) == CV_64FC2) &&\r
+ (CV_MAT_TYPE(_dst->type) == CV_32FC2 || CV_MAT_TYPE(_dst->type) == CV_64FC2));\r
+\r
+ CV_ASSERT( CV_IS_MAT(_cameraMatrix) && CV_IS_MAT(_distCoeffs) &&\r
+ _cameraMatrix->rows == 3 && _cameraMatrix->cols == 3 &&\r
+ (_distCoeffs->rows == 1 || _distCoeffs->cols == 1) &&\r
+ (_distCoeffs->rows*_distCoeffs->cols == 4 ||\r
+ _distCoeffs->rows*_distCoeffs->cols == 5) );\r
+ _Dk = cvMat( _distCoeffs->rows, _distCoeffs->cols,\r
+ CV_MAKETYPE(CV_64F,CV_MAT_CN(_distCoeffs->type)), k);\r
+ cvConvert( _cameraMatrix, &_A );\r
+ cvConvert( _distCoeffs, &_Dk );\r
+\r
+ if( _R )\r
+ {\r
+ CV_ASSERT( CV_IS_MAT(_R) && _R->rows == 3 && _R->cols == 3 );\r
+ cvConvert( _R, &_RR );\r
+ }\r
+ else\r
+ cvSetIdentity(&_RR);\r
+\r
+ if( _P )\r
+ {\r
+ double PP[3][3];\r
+ CvMat _P3x3, _PP=cvMat(3, 3, CV_64F, PP);\r
+ CV_ASSERT( CV_IS_MAT(_P) && _P->rows == 3 && (_P->cols == 3 || _P->cols == 4));\r
+ cvConvert( cvGetCols(_P, &_P3x3, 0, 3), &_PP );\r
+ cvMatMul( &_PP, &_RR, &_RR );\r
+ }\r
+\r
+ srcf = (const CvPoint2D32f*)_src->data.ptr;\r
+ srcd = (const CvPoint2D64f*)_src->data.ptr;\r
+ dstf = (CvPoint2D32f*)_dst->data.ptr;\r
+ dstd = (CvPoint2D64f*)_dst->data.ptr;\r
+ stype = CV_MAT_TYPE(_src->type);\r
+ dtype = CV_MAT_TYPE(_dst->type);\r
+ sstep = _src->rows == 1 ? 1 : _src->step/CV_ELEM_SIZE(stype);\r
+ dstep = _dst->rows == 1 ? 1 : _dst->step/CV_ELEM_SIZE(dtype);\r
+\r
+ n = _src->rows + _src->cols - 1;\r
+\r
+ fx = A[0][0];\r
+ fy = A[1][1];\r
+ ifx = 1./fx;\r
+ ify = 1./fy;\r
+ cx = A[0][2];\r
+ cy = A[1][2];\r
+\r
+ for( i = 0; i < n; i++ )\r
+ {\r
+ double x, y, x0, y0;\r
+ if( stype == CV_32FC2 )\r
+ {\r
+ x = srcf[i*sstep].x;\r
+ y = srcf[i*sstep].y;\r
+ }\r
+ else\r
+ {\r
+ x = srcd[i*sstep].x;\r
+ y = srcd[i*sstep].y;\r
+ }\r
+\r
+ x0 = x = (x - cx)*ifx;\r
+ y0 = y = (y - cy)*ify;\r
+\r
+ // compensate distortion iteratively\r
+ for( j = 0; j < 5; j++ )\r
+ {\r
+ double r2 = x*x + y*y;\r
+ double icdist = 1./(1 + ((k[4]*r2 + k[1])*r2 + k[0])*r2);\r
+ double deltaX = 2*k[2]*x*y + k[3]*(r2 + 2*x*x);\r
+ double deltaY = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y;\r
+ x = (x0 - deltaX)*icdist;\r
+ y = (y0 - deltaY)*icdist;\r
+ }\r
+\r
+ double xx = RR[0][0]*x + RR[0][1]*y + RR[0][2];\r
+ double yy = RR[1][0]*x + RR[1][1]*y + RR[1][2];\r
+ double ww = 1./(RR[2][0]*x + RR[2][1]*y + RR[2][2]);\r
+ x = xx*ww;\r
+ y = yy*ww;\r
+\r
+ if( dtype == CV_32FC2 )\r
+ {\r
+ dstf[i*dstep].x = (float)x;\r
+ dstf[i*dstep].y = (float)y;\r
+ }\r
+ else\r
+ {\r
+ dstd[i*dstep].x = x;\r
+ dstd[i*dstep].y = y;\r
+ }\r
+ }\r
+\r
+ __END__;\r
+}\r
+\r
+\r
+void cv::undistortPoints( const Vector<Point2f>& src, Vector<Point2f>& dst,\r
+ const Mat& cameraMatrix, const Mat& distCoeffs,\r
+ const Mat& R, const Mat& P)\r
+{\r
+ dst.resize(src.size());\r
+ CvMat _src = src, _dst = dst;\r
+ CvMat _cameraMatrix = cameraMatrix, _distCoeffs = distCoeffs;\r
+ CvMat _R, _P, *pR=0, *pP=0;\r
+ if( R.data )\r
+ pR = &(_R = R);\r
+ if( P.data )\r
+ pP = &(_P = P);\r
+ cvUndistortPoints(&_src, &_dst, &_cameraMatrix, &_distCoeffs, pR, pP);\r
+}\r
+\r
+/* End of file */\r