-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// Intel License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of Intel Corporation may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#ifndef _CV_HPP_
-#define _CV_HPP_
-
-#ifdef __cplusplus
-
-namespace cv
-{
-
-enum { BORDER_REPLICATE=IPL_BORDER_REPLICATE, BORDER_CONSTANT=IPL_BORDER_CONSTANT,
- BORDER_REFLECT=IPL_BORDER_REFLECT, BORDER_REFLECT_101=IPL_BORDER_REFLECT_101,
- BORDER_REFLECT101=BORDER_REFLECT_101, BORDER_WRAP=IPL_BORDER_WRAP,
- BORDER_TRANSPARENT, BORDER_DEFAULT=BORDER_REFLECT_101, BORDER_ISOLATED=16 };
-
-CV_EXPORTS int borderInterpolate( int p, int len, int borderType );
-
-struct CV_EXPORTS BaseRowFilter
-{
- BaseRowFilter();
- virtual ~BaseRowFilter();
- virtual void operator()(const uchar* src, uchar* dst,
- int width, int cn) = 0;
- int ksize, anchor;
-};
-
-
-struct CV_EXPORTS BaseColumnFilter
-{
- BaseColumnFilter();
- virtual ~BaseColumnFilter();
- virtual void operator()(const uchar** src, uchar* dst, int dststep,
- int dstcount, int width) = 0;
- virtual void reset();
- int ksize, anchor;
-};
-
-
-struct CV_EXPORTS BaseFilter
-{
- BaseFilter();
- virtual ~BaseFilter();
- virtual void operator()(const uchar** src, uchar* dst, int dststep,
- int dstcount, int width, int cn) = 0;
- virtual void reset();
- Size ksize;
- Point anchor;
-};
-
-
-struct CV_EXPORTS FilterEngine
-{
- FilterEngine();
- FilterEngine(const Ptr<BaseFilter>& _filter2D,
- const Ptr<BaseRowFilter>& _rowFilter,
- const Ptr<BaseColumnFilter>& _columnFilter,
- int srcType, int dstType, int bufType,
- int _rowBorderType=BORDER_REPLICATE,
- int _columnBorderType=-1,
- const Scalar& _borderValue=Scalar());
- virtual ~FilterEngine();
- void init(const Ptr<BaseFilter>& _filter2D,
- const Ptr<BaseRowFilter>& _rowFilter,
- const Ptr<BaseColumnFilter>& _columnFilter,
- int srcType, int dstType, int bufType,
- int _rowBorderType=BORDER_REPLICATE, int _columnBorderType=-1,
- const Scalar& _borderValue=Scalar());
- virtual int start(Size wholeSize, Rect roi, int maxBufRows=-1);
- virtual int start(const Mat& src, const Rect& srcRoi=Rect(0,0,-1,-1),
- bool isolated=false, int maxBufRows=-1);
- virtual int proceed(const uchar* src, int srcStep, int srcCount,
- uchar* dst, int dstStep);
- virtual void apply( const Mat& src, Mat& dst,
- const Rect& srcRoi=Rect(0,0,-1,-1),
- Point dstOfs=Point(0,0),
- bool isolated=false);
- bool isSeparable() const { return filter2D.obj == 0; }
- int remainingInputRows() const;
- int remainingOutputRows() const;
-
- int srcType, dstType, bufType;
- Size ksize;
- Point anchor;
- int maxWidth;
- Size wholeSize;
- Rect roi;
- int dx1, dx2;
- int rowBorderType, columnBorderType;
- Vector<int> borderTab;
- int borderElemSize;
- Vector<uchar> ringBuf;
- Vector<uchar> srcRow;
- Vector<uchar> constBorderValue;
- Vector<uchar> constBorderRow;
- int bufStep, startY, startY0, endY, rowCount, dstY;
- Vector<uchar*> rows;
-
- Ptr<BaseFilter> filter2D;
- Ptr<BaseRowFilter> rowFilter;
- Ptr<BaseColumnFilter> columnFilter;
-};
-
-enum { KERNEL_GENERAL=0, KERNEL_SYMMETRICAL=1, KERNEL_ASYMMETRICAL=2,
- KERNEL_SMOOTH=4, KERNEL_INTEGER=8 };
-
-CV_EXPORTS int getKernelType(const Mat& kernel, Point anchor);
-
-CV_EXPORTS Ptr<BaseRowFilter> getLinearRowFilter(int srcType, int bufType,
- const Mat& kernel, int anchor,
- int symmetryType);
-
-CV_EXPORTS Ptr<BaseColumnFilter> getLinearColumnFilter(int bufType, int dstType,
- const Mat& kernel, int anchor,
- int symmetryType, double delta=0,
- int bits=0);
-
-CV_EXPORTS Ptr<BaseFilter> getLinearFilter(int srcType, int dstType,
- const Mat& kernel,
- Point anchor=Point(-1,-1),
- double delta=0, int bits=0);
-
-CV_EXPORTS Ptr<FilterEngine> createSeparableLinearFilter(int srcType, int dstType,
- const Mat& rowKernel, const Mat& columnKernel,
- Point _anchor=Point(-1,-1), double delta=0,
- int _rowBorderType=BORDER_DEFAULT,
- int _columnBorderType=-1,
- const Scalar& _borderValue=Scalar());
-
-CV_EXPORTS Ptr<FilterEngine> createLinearFilter(int srcType, int dstType,
- const Mat& kernel, Point _anchor=Point(-1,-1),
- double delta=0, int _rowBorderType=BORDER_DEFAULT,
- int _columnBorderType=-1, const Scalar& _borderValue=Scalar());
-
-CV_EXPORTS Mat getGaussianKernel( int ksize, double sigma, int ktype=CV_64F );
-
-CV_EXPORTS Ptr<FilterEngine> createGaussianFilter( int type, Size ksize,
- double sigma1, double sigma2=0,
- int borderType=BORDER_DEFAULT);
-
-CV_EXPORTS void getDerivKernels( Mat& kx, Mat& ky, int dx, int dy, int ksize,
- bool normalize=false, int ktype=CV_32F );
-
-CV_EXPORTS Ptr<FilterEngine> createDerivFilter( int srcType, int dstType,
- int dx, int dy, int ksize,
- int borderType=BORDER_DEFAULT );
-
-CV_EXPORTS Ptr<BaseRowFilter> getRowSumFilter(int srcType, int sumType,
- int ksize, int anchor=-1);
-CV_EXPORTS Ptr<BaseColumnFilter> getColumnSumFilter(int sumType, int dstType,
- int ksize, int anchor=-1,
- double scale=1);
-CV_EXPORTS Ptr<FilterEngine> createBoxFilter( int srcType, int dstType, Size ksize,
- Point anchor=Point(-1,-1),
- bool normalize=true,
- int borderType=BORDER_DEFAULT);
-
-enum { MORPH_ERODE=0, MORPH_DILATE=1, MORPH_OPEN=2, MORPH_CLOSE=3,
- MORPH_GRADIENT=4, MORPH_TOPHAT=5, MORPH_BLACKHAT=6 };
-
-CV_EXPORTS Ptr<BaseRowFilter> getMorphologyRowFilter(int op, int type, int ksize, int anchor=-1);
-CV_EXPORTS Ptr<BaseColumnFilter> getMorphologyColumnFilter(int op, int type, int ksize, int anchor=-1);
-CV_EXPORTS Ptr<BaseFilter> getMorphologyFilter(int op, int type, const Mat& kernel,
- Point anchor=Point(-1,-1));
-
-static inline Scalar morphologyDefaultBorderValue() { return Scalar::all(DBL_MAX); }
-
-CV_EXPORTS Ptr<FilterEngine> createMorphologyFilter(int op, int type, const Mat& kernel,
- Point anchor=Point(-1,-1), int _rowBorderType=BORDER_CONSTANT,
- int _columnBorderType=-1,
- const Scalar& _borderValue=morphologyDefaultBorderValue());
-
-enum { MORPH_RECT=0, MORPH_CROSS=1, MORPH_ELLIPSE=2 };
-CV_EXPORTS Mat getStructuringElement(int shape, Size ksize, Point anchor=Point(-1,-1));
-
-CV_EXPORTS void copyMakeBorder( const Mat& src, Mat& dst,
- int top, int bottom, int left, int right,
- int borderType );
-
-CV_EXPORTS void medianBlur( const Mat& src, Mat& dst, int ksize );
-CV_EXPORTS void GaussianBlur( const Mat& src, Mat& dst, Size ksize,
- double sigma1, double sigma2=0,
- int borderType=BORDER_DEFAULT );
-CV_EXPORTS void bilateralFilter( const Mat& src, Mat& dst, int d,
- double sigmaColor, double sigmaSpace,
- int borderType=BORDER_DEFAULT );
-CV_EXPORTS void boxFilter( const Mat& src, Mat& dst, int ddepth,
- Size ksize, Point anchor=Point(-1,-1),
- bool normalize=true,
- int borderType=BORDER_DEFAULT );
-static inline void blur( const Mat& src, Mat& dst,
- Size ksize, Point anchor=Point(-1,-1),
- int borderType=BORDER_DEFAULT )
-{
- boxFilter( src, dst, -1, ksize, anchor, true, borderType );
-}
-
-CV_EXPORTS void filter2D( const Mat& src, Mat& dst, int ddepth,
- const Mat& kernel, Point anchor=Point(-1,-1),
- double delta=0, int borderType=BORDER_DEFAULT );
-
-CV_EXPORTS void sepFilter2D( const Mat& src, Mat& dst, int ddepth,
- const Mat& kernelX, const Mat& kernelY,
- Point anchor=Point(-1,-1),
- double delta=0, int borderType=BORDER_DEFAULT );
-
-CV_EXPORTS void Sobel( const Mat& src, Mat& dst, int ddepth,
- int dx, int dy, int ksize=3,
- double scale=1, double delta=0,
- int borderType=BORDER_DEFAULT );
-
-CV_EXPORTS void Scharr( const Mat& src, Mat& dst, int ddepth,
- int dx, int dy, double scale=1, double delta=0,
- int borderType=BORDER_DEFAULT );
-
-CV_EXPORTS void Laplacian( const Mat& src, Mat& dst, int ddepth,
- int ksize=1, double scale=1, double delta=0,
- int borderType=BORDER_DEFAULT );
-
-CV_EXPORTS void erode( const Mat& src, Mat& dst, const Mat& kernel,
- Point anchor=Point(-1,-1), int iterations=1,
- int borderType=BORDER_CONSTANT,
- const Scalar& borderValue=morphologyDefaultBorderValue() );
-CV_EXPORTS void dilate( const Mat& src, Mat& dst, const Mat& kernel,
- Point anchor=Point(-1,-1), int iterations=1,
- int borderType=BORDER_CONSTANT,
- const Scalar& borderValue=morphologyDefaultBorderValue() );
-CV_EXPORTS void morphologyEx( const Mat& src, Mat& dst, int op, const Mat& kernel,
- Point anchor=Point(-1,-1), int iterations=1,
- int borderType=BORDER_CONSTANT,
- const Scalar& borderValue=morphologyDefaultBorderValue() );
-
-enum { INTER_NEAREST=0, INTER_LINEAR=1, INTER_CUBIC=2, INTER_AREA=3,
- INTER_LANCZOS4=4, INTER_MAX=7, WARP_INVERSE_MAP=16 };
-
-CV_EXPORTS void resize( const Mat& src, Mat& dst,
- Size dsize=Size(), double fx=0, double fy=0,
- int interpolation=INTER_LINEAR );
-
-CV_EXPORTS void warpAffine( const Mat& src, Mat& dst,
- const Mat& M, Size dsize,
- int flags=INTER_LINEAR,
- int borderMode=BORDER_CONSTANT,
- const Scalar& borderValue=Scalar());
-CV_EXPORTS void warpPerspective( const Mat& src, Mat& dst,
- const Mat& M, Size dsize,
- int flags=INTER_LINEAR,
- int borderMode=BORDER_CONSTANT,
- const Scalar& borderValue=Scalar());
-
-CV_EXPORTS void remap( const Mat& src, Mat& dst, const Mat& map1, const Mat& map2,
- int interpolation, int borderMode=BORDER_CONSTANT,
- const Scalar& borderValue=Scalar());
-
-CV_EXPORTS void convertMaps( const Mat& map1, const Mat& map2, Mat& dstmap1, Mat& dstmap2,
- int dstmap1type, bool nninterpolation=false );
-
-CV_EXPORTS Mat getRotationMatrix2D( Point2f center, double angle, double scale );
-CV_EXPORTS Mat getPerspectiveTransform( const Point2f src[], const Point2f dst[] );
-CV_EXPORTS Mat getAffineTransform( const Point2f src[], const Point2f dst[] );
-
-}
-
-//////////////////////////////////////////////////////////////////////////////////////////
-
-struct CV_EXPORTS CvLevMarq
-{
- CvLevMarq();
- CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria=
- cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
- bool completeSymmFlag=false );
- ~CvLevMarq();
- void init( int nparams, int nerrs, CvTermCriteria criteria=
- cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
- bool completeSymmFlag=false );
- bool update( const CvMat*& param, CvMat*& J, CvMat*& err );
- bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm );
-
- void clear();
- void step();
- enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 };
-
- CvMat* mask;
- CvMat* prevParam;
- CvMat* param;
- CvMat* J;
- CvMat* err;
- CvMat* JtJ;
- CvMat* JtJN;
- CvMat* JtErr;
- CvMat* JtJV;
- CvMat* JtJW;
- double prevErrNorm, errNorm;
- int lambdaLg10;
- CvTermCriteria criteria;
- int state;
- int iters;
- bool completeSymmFlag;
-};
-
-
-// 2009-01-12, Xavier Delacour <xavier.delacour@gmail.com>
-
-struct lsh_hash {
- int h1, h2;
-};
-
-struct CvLSHOperations {
- virtual ~CvLSHOperations() {}
-
- virtual int vector_add(const void* data) = 0;
- virtual void vector_remove(int i) = 0;
- virtual const void* vector_lookup(int i) = 0;
- virtual void vector_reserve(int n) = 0;
- virtual unsigned int vector_count() = 0;
-
- virtual void hash_insert(lsh_hash h, int l, int i) = 0;
- virtual void hash_remove(lsh_hash h, int l, int i) = 0;
- virtual int hash_lookup(lsh_hash h, int l, int* ret_i, int ret_i_max) = 0;
-};
-
-
-#endif /* __cplusplus */
-
-#endif /* _CV_HPP_ */
-
-/* End of file. */
+/*M///////////////////////////////////////////////////////////////////////////////////////\r
+//\r
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
+//\r
+// By downloading, copying, installing or using the software you agree to this license.\r
+// If you do not agree to this license, do not download, install,\r
+// copy or use the software.\r
+//\r
+//\r
+// License Agreement\r
+// For Open Source Computer Vision Library\r
+//\r
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\r
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\r
+// Third party copyrights are property of their respective owners.\r
+//\r
+// Redistribution and use in source and binary forms, with or without modification,\r
+// are permitted provided that the following conditions are met:\r
+//\r
+// * Redistribution's of source code must retain the above copyright notice,\r
+// this list of conditions and the following disclaimer.\r
+//\r
+// * Redistribution's in binary form must reproduce the above copyright notice,\r
+// this list of conditions and the following disclaimer in the documentation\r
+// and/or other materials provided with the distribution.\r
+//\r
+// * The name of the copyright holders may not be used to endorse or promote products\r
+// derived from this software without specific prior written permission.\r
+//\r
+// This software is provided by the copyright holders and contributors "as is" and\r
+// any express or implied warranties, including, but not limited to, the implied\r
+// warranties of merchantability and fitness for a particular purpose are disclaimed.\r
+// In no event shall the Intel Corporation or contributors be liable for any direct,\r
+// indirect, incidental, special, exemplary, or consequential damages\r
+// (including, but not limited to, procurement of substitute goods or services;\r
+// loss of use, data, or profits; or business interruption) however caused\r
+// and on any theory of liability, whether in contract, strict liability,\r
+// or tort (including negligence or otherwise) arising in any way out of\r
+// the use of this software, even if advised of the possibility of such damage.\r
+//\r
+//M*/\r
+\r
+#ifndef _CV_HPP_\r
+#define _CV_HPP_\r
+\r
+#ifdef __cplusplus\r
+\r
+namespace cv\r
+{\r
+\r
+enum { BORDER_REPLICATE=IPL_BORDER_REPLICATE, BORDER_CONSTANT=IPL_BORDER_CONSTANT,\r
+ BORDER_REFLECT=IPL_BORDER_REFLECT, BORDER_REFLECT_101=IPL_BORDER_REFLECT_101,\r
+ BORDER_REFLECT101=BORDER_REFLECT_101, BORDER_WRAP=IPL_BORDER_WRAP,\r
+ BORDER_TRANSPARENT, BORDER_DEFAULT=BORDER_REFLECT_101, BORDER_ISOLATED=16 };\r
+\r
+CV_EXPORTS int borderInterpolate( int p, int len, int borderType );\r
+\r
+struct CV_EXPORTS BaseRowFilter\r
+{\r
+ BaseRowFilter();\r
+ virtual ~BaseRowFilter();\r
+ virtual void operator()(const uchar* src, uchar* dst,\r
+ int width, int cn) = 0;\r
+ int ksize, anchor;\r
+};\r
+\r
+\r
+struct CV_EXPORTS BaseColumnFilter\r
+{\r
+ BaseColumnFilter();\r
+ virtual ~BaseColumnFilter();\r
+ virtual void operator()(const uchar** src, uchar* dst, int dststep,\r
+ int dstcount, int width) = 0;\r
+ virtual void reset();\r
+ int ksize, anchor;\r
+};\r
+\r
+\r
+struct CV_EXPORTS BaseFilter\r
+{\r
+ BaseFilter();\r
+ virtual ~BaseFilter();\r
+ virtual void operator()(const uchar** src, uchar* dst, int dststep,\r
+ int dstcount, int width, int cn) = 0;\r
+ virtual void reset();\r
+ Size ksize;\r
+ Point anchor;\r
+};\r
+\r
+\r
+struct CV_EXPORTS FilterEngine\r
+{\r
+ FilterEngine();\r
+ FilterEngine(const Ptr<BaseFilter>& _filter2D,\r
+ const Ptr<BaseRowFilter>& _rowFilter,\r
+ const Ptr<BaseColumnFilter>& _columnFilter,\r
+ int srcType, int dstType, int bufType,\r
+ int _rowBorderType=BORDER_REPLICATE,\r
+ int _columnBorderType=-1,\r
+ const Scalar& _borderValue=Scalar());\r
+ virtual ~FilterEngine();\r
+ void init(const Ptr<BaseFilter>& _filter2D,\r
+ const Ptr<BaseRowFilter>& _rowFilter,\r
+ const Ptr<BaseColumnFilter>& _columnFilter,\r
+ int srcType, int dstType, int bufType,\r
+ int _rowBorderType=BORDER_REPLICATE, int _columnBorderType=-1,\r
+ const Scalar& _borderValue=Scalar());\r
+ virtual int start(Size wholeSize, Rect roi, int maxBufRows=-1);\r
+ virtual int start(const Mat& src, const Rect& srcRoi=Rect(0,0,-1,-1),\r
+ bool isolated=false, int maxBufRows=-1);\r
+ virtual int proceed(const uchar* src, int srcStep, int srcCount,\r
+ uchar* dst, int dstStep);\r
+ virtual void apply( const Mat& src, Mat& dst,\r
+ const Rect& srcRoi=Rect(0,0,-1,-1),\r
+ Point dstOfs=Point(0,0),\r
+ bool isolated=false);\r
+ bool isSeparable() const { return filter2D.obj == 0; }\r
+ int remainingInputRows() const;\r
+ int remainingOutputRows() const;\r
+ \r
+ int srcType, dstType, bufType;\r
+ Size ksize;\r
+ Point anchor;\r
+ int maxWidth;\r
+ Size wholeSize;\r
+ Rect roi;\r
+ int dx1, dx2;\r
+ int rowBorderType, columnBorderType;\r
+ Vector<int> borderTab;\r
+ int borderElemSize;\r
+ Vector<uchar> ringBuf;\r
+ Vector<uchar> srcRow;\r
+ Vector<uchar> constBorderValue;\r
+ Vector<uchar> constBorderRow;\r
+ int bufStep, startY, startY0, endY, rowCount, dstY;\r
+ Vector<uchar*> rows;\r
+ \r
+ Ptr<BaseFilter> filter2D;\r
+ Ptr<BaseRowFilter> rowFilter;\r
+ Ptr<BaseColumnFilter> columnFilter;\r
+};\r
+\r
+enum { KERNEL_GENERAL=0, KERNEL_SYMMETRICAL=1, KERNEL_ASYMMETRICAL=2,\r
+ KERNEL_SMOOTH=4, KERNEL_INTEGER=8 };\r
+\r
+CV_EXPORTS int getKernelType(const Mat& kernel, Point anchor);\r
+\r
+CV_EXPORTS Ptr<BaseRowFilter> getLinearRowFilter(int srcType, int bufType,\r
+ const Mat& kernel, int anchor,\r
+ int symmetryType);\r
+\r
+CV_EXPORTS Ptr<BaseColumnFilter> getLinearColumnFilter(int bufType, int dstType,\r
+ const Mat& kernel, int anchor,\r
+ int symmetryType, double delta=0,\r
+ int bits=0);\r
+\r
+CV_EXPORTS Ptr<BaseFilter> getLinearFilter(int srcType, int dstType,\r
+ const Mat& kernel,\r
+ Point anchor=Point(-1,-1),\r
+ double delta=0, int bits=0);\r
+\r
+CV_EXPORTS Ptr<FilterEngine> createSeparableLinearFilter(int srcType, int dstType,\r
+ const Mat& rowKernel, const Mat& columnKernel,\r
+ Point _anchor=Point(-1,-1), double delta=0,\r
+ int _rowBorderType=BORDER_DEFAULT,\r
+ int _columnBorderType=-1,\r
+ const Scalar& _borderValue=Scalar());\r
+\r
+CV_EXPORTS Ptr<FilterEngine> createLinearFilter(int srcType, int dstType,\r
+ const Mat& kernel, Point _anchor=Point(-1,-1),\r
+ double delta=0, int _rowBorderType=BORDER_DEFAULT,\r
+ int _columnBorderType=-1, const Scalar& _borderValue=Scalar());\r
+\r
+CV_EXPORTS Mat getGaussianKernel( int ksize, double sigma, int ktype=CV_64F );\r
+\r
+CV_EXPORTS Ptr<FilterEngine> createGaussianFilter( int type, Size ksize,\r
+ double sigma1, double sigma2=0,\r
+ int borderType=BORDER_DEFAULT);\r
+\r
+CV_EXPORTS void getDerivKernels( Mat& kx, Mat& ky, int dx, int dy, int ksize,\r
+ bool normalize=false, int ktype=CV_32F );\r
+\r
+CV_EXPORTS Ptr<FilterEngine> createDerivFilter( int srcType, int dstType,\r
+ int dx, int dy, int ksize,\r
+ int borderType=BORDER_DEFAULT );\r
+\r
+CV_EXPORTS Ptr<BaseRowFilter> getRowSumFilter(int srcType, int sumType,\r
+ int ksize, int anchor=-1);\r
+CV_EXPORTS Ptr<BaseColumnFilter> getColumnSumFilter(int sumType, int dstType,\r
+ int ksize, int anchor=-1,\r
+ double scale=1);\r
+CV_EXPORTS Ptr<FilterEngine> createBoxFilter( int srcType, int dstType, Size ksize,\r
+ Point anchor=Point(-1,-1),\r
+ bool normalize=true,\r
+ int borderType=BORDER_DEFAULT);\r
+\r
+enum { MORPH_ERODE=0, MORPH_DILATE=1, MORPH_OPEN=2, MORPH_CLOSE=3,\r
+ MORPH_GRADIENT=4, MORPH_TOPHAT=5, MORPH_BLACKHAT=6 };\r
+\r
+CV_EXPORTS Ptr<BaseRowFilter> getMorphologyRowFilter(int op, int type, int ksize, int anchor=-1);\r
+CV_EXPORTS Ptr<BaseColumnFilter> getMorphologyColumnFilter(int op, int type, int ksize, int anchor=-1);\r
+CV_EXPORTS Ptr<BaseFilter> getMorphologyFilter(int op, int type, const Mat& kernel,\r
+ Point anchor=Point(-1,-1));\r
+\r
+static inline Scalar morphologyDefaultBorderValue() { return Scalar::all(DBL_MAX); }\r
+\r
+CV_EXPORTS Ptr<FilterEngine> createMorphologyFilter(int op, int type, const Mat& kernel,\r
+ Point anchor=Point(-1,-1), int _rowBorderType=BORDER_CONSTANT,\r
+ int _columnBorderType=-1,\r
+ const Scalar& _borderValue=morphologyDefaultBorderValue());\r
+\r
+enum { MORPH_RECT=0, MORPH_CROSS=1, MORPH_ELLIPSE=2 };\r
+CV_EXPORTS Mat getStructuringElement(int shape, Size ksize, Point anchor=Point(-1,-1));\r
+\r
+CV_EXPORTS void copyMakeBorder( const Mat& src, Mat& dst,\r
+ int top, int bottom, int left, int right,\r
+ int borderType );\r
+\r
+CV_EXPORTS void medianBlur( const Mat& src, Mat& dst, int ksize );\r
+CV_EXPORTS void GaussianBlur( const Mat& src, Mat& dst, Size ksize,\r
+ double sigma1, double sigma2=0,\r
+ int borderType=BORDER_DEFAULT );\r
+CV_EXPORTS void bilateralFilter( const Mat& src, Mat& dst, int d,\r
+ double sigmaColor, double sigmaSpace,\r
+ int borderType=BORDER_DEFAULT );\r
+CV_EXPORTS void boxFilter( const Mat& src, Mat& dst, int ddepth,\r
+ Size ksize, Point anchor=Point(-1,-1),\r
+ bool normalize=true,\r
+ int borderType=BORDER_DEFAULT );\r
+static inline void blur( const Mat& src, Mat& dst,\r
+ Size ksize, Point anchor=Point(-1,-1),\r
+ int borderType=BORDER_DEFAULT )\r
+{\r
+ boxFilter( src, dst, -1, ksize, anchor, true, borderType );\r
+}\r
+\r
+CV_EXPORTS void filter2D( const Mat& src, Mat& dst, int ddepth,\r
+ const Mat& kernel, Point anchor=Point(-1,-1),\r
+ double delta=0, int borderType=BORDER_DEFAULT );\r
+\r
+CV_EXPORTS void sepFilter2D( const Mat& src, Mat& dst, int ddepth,\r
+ const Mat& kernelX, const Mat& kernelY,\r
+ Point anchor=Point(-1,-1),\r
+ double delta=0, int borderType=BORDER_DEFAULT );\r
+\r
+CV_EXPORTS void Sobel( const Mat& src, Mat& dst, int ddepth,\r
+ int dx, int dy, int ksize=3,\r
+ double scale=1, double delta=0,\r
+ int borderType=BORDER_DEFAULT );\r
+\r
+CV_EXPORTS void Scharr( const Mat& src, Mat& dst, int ddepth,\r
+ int dx, int dy, double scale=1, double delta=0,\r
+ int borderType=BORDER_DEFAULT );\r
+\r
+CV_EXPORTS void Laplacian( const Mat& src, Mat& dst, int ddepth,\r
+ int ksize=1, double scale=1, double delta=0,\r
+ int borderType=BORDER_DEFAULT );\r
+\r
+CV_EXPORTS void erode( const Mat& src, Mat& dst, const Mat& kernel,\r
+ Point anchor=Point(-1,-1), int iterations=1,\r
+ int borderType=BORDER_CONSTANT,\r
+ const Scalar& borderValue=morphologyDefaultBorderValue() );\r
+CV_EXPORTS void dilate( const Mat& src, Mat& dst, const Mat& kernel,\r
+ Point anchor=Point(-1,-1), int iterations=1,\r
+ int borderType=BORDER_CONSTANT,\r
+ const Scalar& borderValue=morphologyDefaultBorderValue() );\r
+CV_EXPORTS void morphologyEx( const Mat& src, Mat& dst, int op, const Mat& kernel,\r
+ Point anchor=Point(-1,-1), int iterations=1,\r
+ int borderType=BORDER_CONSTANT,\r
+ const Scalar& borderValue=morphologyDefaultBorderValue() );\r
+\r
+enum { INTER_NEAREST=0, INTER_LINEAR=1, INTER_CUBIC=2, INTER_AREA=3,\r
+ INTER_LANCZOS4=4, INTER_MAX=7, WARP_INVERSE_MAP=16 };\r
+\r
+CV_EXPORTS void resize( const Mat& src, Mat& dst,\r
+ Size dsize=Size(), double fx=0, double fy=0,\r
+ int interpolation=INTER_LINEAR );\r
+\r
+CV_EXPORTS void warpAffine( const Mat& src, Mat& dst,\r
+ const Mat& M, Size dsize,\r
+ int flags=INTER_LINEAR,\r
+ int borderMode=BORDER_CONSTANT,\r
+ const Scalar& borderValue=Scalar());\r
+CV_EXPORTS void warpPerspective( const Mat& src, Mat& dst,\r
+ const Mat& M, Size dsize,\r
+ int flags=INTER_LINEAR,\r
+ int borderMode=BORDER_CONSTANT,\r
+ const Scalar& borderValue=Scalar());\r
+\r
+CV_EXPORTS void remap( const Mat& src, Mat& dst, const Mat& map1, const Mat& map2,\r
+ int interpolation, int borderMode=BORDER_CONSTANT,\r
+ const Scalar& borderValue=Scalar());\r
+\r
+CV_EXPORTS void convertMaps( const Mat& map1, const Mat& map2, Mat& dstmap1, Mat& dstmap2,\r
+ int dstmap1type, bool nninterpolation=false );\r
+\r
+CV_EXPORTS Mat getRotationMatrix2D( Point2f center, double angle, double scale );\r
+CV_EXPORTS Mat getPerspectiveTransform( const Point2f src[], const Point2f dst[] );\r
+CV_EXPORTS Mat getAffineTransform( const Point2f src[], const Point2f dst[] );\r
+\r
+CV_EXPORTS void integral( const Mat& src, Mat& sum, int sdepth=-1 );\r
+CV_EXPORTS void integral( const Mat& src, Mat& sum, Mat& sqsum, int sdepth=-1 );\r
+CV_EXPORTS void integral( const Mat& src, Mat& sum, Mat& sqsum, Mat& tilted, int sdepth=-1 );\r
+\r
+CV_EXPORTS void accumulate( const Mat& src, Mat& dst, const Mat& mask=Mat() );\r
+CV_EXPORTS void accumulateSquare( const Mat& src, Mat& dst, const Mat& mask=Mat() );\r
+CV_EXPORTS void accumulateProduct( const Mat& src1, const Mat& src2,\r
+ Mat& dst, const Mat& mask=Mat() );\r
+CV_EXPORTS void accumulateWeighted( const Mat& src, Mat& dst,\r
+ double alpha, const Mat& mask=Mat() );\r
+\r
+enum { THRESH_BINARY=0, THRESH_BINARY_INV=1, THRESH_TRUNC=2, THRESH_TOZERO=3,\r
+ THRESH_TOZERO_INV=4, THRESH_MASK=7, THRESH_OTSU=8 };\r
+\r
+CV_EXPORTS double threshold( const Mat& src, Mat& dst, double thresh, double maxval, int type );\r
+\r
+enum { ADAPTIVE_THRESH_MEAN_C=0, ADAPTIVE_THRESH_GAUSSIAN_C=1 };\r
+\r
+CV_EXPORTS void adaptiveThreshold( const Mat& src, Mat& dst, double maxValue,\r
+ int adaptiveMethod, int thresholdType,\r
+ int blockSize, double C );\r
+\r
+}\r
+\r
+//////////////////////////////////////////////////////////////////////////////////////////\r
+\r
+struct CV_EXPORTS CvLevMarq\r
+{\r
+ CvLevMarq();\r
+ CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria=\r
+ cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),\r
+ bool completeSymmFlag=false );\r
+ ~CvLevMarq();\r
+ void init( int nparams, int nerrs, CvTermCriteria criteria=\r
+ cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),\r
+ bool completeSymmFlag=false );\r
+ bool update( const CvMat*& param, CvMat*& J, CvMat*& err );\r
+ bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm );\r
+\r
+ void clear();\r
+ void step();\r
+ enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 };\r
+\r
+ CvMat* mask;\r
+ CvMat* prevParam;\r
+ CvMat* param;\r
+ CvMat* J;\r
+ CvMat* err;\r
+ CvMat* JtJ;\r
+ CvMat* JtJN;\r
+ CvMat* JtErr;\r
+ CvMat* JtJV;\r
+ CvMat* JtJW;\r
+ double prevErrNorm, errNorm;\r
+ int lambdaLg10;\r
+ CvTermCriteria criteria;\r
+ int state;\r
+ int iters;\r
+ bool completeSymmFlag;\r
+};\r
+\r
+\r
+// 2009-01-12, Xavier Delacour <xavier.delacour@gmail.com>\r
+\r
+struct lsh_hash {\r
+ int h1, h2;\r
+};\r
+\r
+struct CvLSHOperations {\r
+ virtual ~CvLSHOperations() {}\r
+\r
+ virtual int vector_add(const void* data) = 0;\r
+ virtual void vector_remove(int i) = 0;\r
+ virtual const void* vector_lookup(int i) = 0;\r
+ virtual void vector_reserve(int n) = 0;\r
+ virtual unsigned int vector_count() = 0;\r
+\r
+ virtual void hash_insert(lsh_hash h, int l, int i) = 0;\r
+ virtual void hash_remove(lsh_hash h, int l, int i) = 0;\r
+ virtual int hash_lookup(lsh_hash h, int l, int* ret_i, int ret_i_max) = 0;\r
+};\r
+\r
+\r
+#endif /* __cplusplus */\r
+\r
+#endif /* _CV_HPP_ */\r
+\r
+/* End of file. */\r
return c -= b;
}
+template<typename T> static inline
+Vec_<T, 2>& operator *= (Vec_<T, 2>& a, T alpha)
+{
+ a[0] *= alpha; a[1] *= alpha;
+ return a;
+}
+
+template<typename T> static inline
+Vec_<T, 3>& operator *= (Vec_<T, 3>& a, T alpha)
+{
+ a[0] *= alpha; a[1] *= alpha; a[2] *= alpha;
+ return a;
+}
+
+template<typename T> static inline
+Vec_<T, 4>& operator *= (Vec_<T, 4>& a, T alpha)
+{
+ a[0] *= alpha; a[1] *= alpha; a[2] *= alpha; a[3] *= alpha;
+ return a;
+}
+
template<typename T, int cn> static inline Vec_<T, cn>
operator * (const Vec_<T, cn>& a, T alpha)
{
# convenience library for cv
lib_cv_la_SOURCES = \
cv/cvaccum.cpp \
- cv/cvadapthresh.cpp \
cv/cvapprox.cpp \
cv/cvcalccontrasthistogram.cpp \
cv/cvcalcimagehomography.cpp \
// copy or use the software.
//
//
-// Intel License Agreement
+// License Agreement
// For Open Source Computer Vision Library
//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
-// * The name of Intel Corporation may not be used to endorse or promote products
+// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
#include "_cv.h"
-#define ICV_DEF_ACC_FUNC( name, srctype, dsttype, cvtmacro ) \
-static CvStatus CV_STDCALL \
-name( const srctype *src, int srcstep, dsttype *dst, \
- int dststep, CvSize size ) \
- \
-{ \
- srcstep /= sizeof(src[0]); \
- dststep /= sizeof(dst[0]); \
- \
- for( ; size.height--; src += srcstep, dst += dststep ) \
- { \
- int x; \
- for( x = 0; x <= size.width - 4; x += 4 ) \
- { \
- dsttype t0 = dst[x] + cvtmacro(src[x]); \
- dsttype t1 = dst[x + 1] + cvtmacro(src[x + 1]); \
- dst[x] = t0; dst[x + 1] = t1; \
- \
- t0 = dst[x + 2] + cvtmacro(src[x + 2]); \
- t1 = dst[x + 3] + cvtmacro(src[x + 3]); \
- dst[x + 2] = t0; dst[x + 3] = t1; \
- } \
- \
- for( ; x < size.width; x++ ) \
- dst[x] += cvtmacro(src[x]); \
- } \
- \
- return CV_OK; \
-}
-
-
-ICV_DEF_ACC_FUNC( icvAdd_8u32f_C1IR, uchar, float, CV_8TO32F )
-ICV_DEF_ACC_FUNC( icvAdd_32f_C1IR, float, float, CV_NOP )
-ICV_DEF_ACC_FUNC( icvAddSquare_8u32f_C1IR, uchar, float, CV_8TO32F_SQR )
-ICV_DEF_ACC_FUNC( icvAddSquare_32f_C1IR, float, float, CV_SQR )
-
-
-#define ICV_DEF_ACCPROD_FUNC( flavor, srctype, dsttype, cvtmacro ) \
-static CvStatus CV_STDCALL icvAddProduct_##flavor##_C1IR \
-( const srctype *src1, int step1, const srctype *src2, int step2, \
- dsttype *dst, int dststep, CvSize size ) \
-{ \
- step1 /= sizeof(src1[0]); \
- step2 /= sizeof(src2[0]); \
- dststep /= sizeof(dst[0]); \
- \
- for( ; size.height--; src1 += step1, src2 += step2, dst += dststep ) \
- { \
- int x; \
- for( x = 0; x <= size.width - 4; x += 4 ) \
- { \
- dsttype t0 = dst[x] + cvtmacro(src1[x])*cvtmacro(src2[x]); \
- dsttype t1 = dst[x+1] + cvtmacro(src1[x+1])*cvtmacro(src2[x+1]);\
- dst[x] = t0; dst[x + 1] = t1; \
- \
- t0 = dst[x + 2] + cvtmacro(src1[x + 2])*cvtmacro(src2[x + 2]); \
- t1 = dst[x + 3] + cvtmacro(src1[x + 3])*cvtmacro(src2[x + 3]); \
- dst[x + 2] = t0; dst[x + 3] = t1; \
- } \
- \
- for( ; x < size.width; x++ ) \
- dst[x] += cvtmacro(src1[x])*cvtmacro(src2[x]); \
- } \
- \
- return CV_OK; \
-}
-
+namespace cv
+{
-ICV_DEF_ACCPROD_FUNC( 8u32f, uchar, float, CV_8TO32F )
-ICV_DEF_ACCPROD_FUNC( 32f, float, float, CV_NOP )
-
-
-#define ICV_DEF_ACCWEIGHT_FUNC( flavor, srctype, dsttype, cvtmacro ) \
-static CvStatus CV_STDCALL icvAddWeighted_##flavor##_C1IR \
-( const srctype *src, int srcstep, dsttype *dst, int dststep, \
- CvSize size, dsttype alpha ) \
-{ \
- dsttype beta = (dsttype)(1 - alpha); \
- srcstep /= sizeof(src[0]); \
- dststep /= sizeof(dst[0]); \
- \
- for( ; size.height--; src += srcstep, dst += dststep ) \
- { \
- int x; \
- for( x = 0; x <= size.width - 4; x += 4 ) \
- { \
- dsttype t0 = dst[x]*beta + cvtmacro(src[x])*alpha; \
- dsttype t1 = dst[x+1]*beta + cvtmacro(src[x+1])*alpha; \
- dst[x] = t0; dst[x + 1] = t1; \
- \
- t0 = dst[x + 2]*beta + cvtmacro(src[x + 2])*alpha; \
- t1 = dst[x + 3]*beta + cvtmacro(src[x + 3])*alpha; \
- dst[x + 2] = t0; dst[x + 3] = t1; \
- } \
- \
- for( ; x < size.width; x++ ) \
- dst[x] = dst[x]*beta + cvtmacro(src[x])*alpha; \
- } \
- \
- return CV_OK; \
+inline float sqr(uchar a) { return CV_8TO32F_SQR(a); }
+inline float sqr(float a) { return a*a; }
+inline Vec_<float, 3> sqr(const Vec_<uchar, 3>& a)
+{
+ return Vec_<float, 3>(CV_8TO32F_SQR(a[0]), CV_8TO32F_SQR(a[1]), CV_8TO32F_SQR(a[2]));
}
-
-
-ICV_DEF_ACCWEIGHT_FUNC( 8u32f, uchar, float, CV_8TO32F )
-ICV_DEF_ACCWEIGHT_FUNC( 32f, float, float, CV_NOP )
-
-
-#define ICV_DEF_ACCMASK_FUNC_C1( name, srctype, dsttype, cvtmacro ) \
-static CvStatus CV_STDCALL \
-name( const srctype *src, int srcstep, const uchar* mask, int maskstep, \
- dsttype *dst, int dststep, CvSize size ) \
-{ \
- srcstep /= sizeof(src[0]); \
- dststep /= sizeof(dst[0]); \
- \
- for( ; size.height--; src += srcstep, \
- dst += dststep, mask += maskstep ) \
- { \
- int x; \
- for( x = 0; x <= size.width - 2; x += 2 ) \
- { \
- if( mask[x] ) \
- dst[x] += cvtmacro(src[x]); \
- if( mask[x+1] ) \
- dst[x+1] += cvtmacro(src[x+1]); \
- } \
- \
- for( ; x < size.width; x++ ) \
- if( mask[x] ) \
- dst[x] += cvtmacro(src[x]); \
- } \
- \
- return CV_OK; \
+inline Vec_<float, 3> sqr(const Vec_<float, 3>& a)
+{
+ return Vec_<float, 3>(a[0]*a[0], a[1]*a[1], a[2]*a[2]);
}
-
-
-ICV_DEF_ACCMASK_FUNC_C1( icvAdd_8u32f_C1IMR, uchar, float, CV_8TO32F )
-ICV_DEF_ACCMASK_FUNC_C1( icvAdd_32f_C1IMR, float, float, CV_NOP )
-ICV_DEF_ACCMASK_FUNC_C1( icvAddSquare_8u32f_C1IMR, uchar, float, CV_8TO32F_SQR )
-ICV_DEF_ACCMASK_FUNC_C1( icvAddSquare_32f_C1IMR, float, float, CV_SQR )
-
-
-#define ICV_DEF_ACCPRODUCTMASK_FUNC_C1( flavor, srctype, dsttype, cvtmacro ) \
-static CvStatus CV_STDCALL icvAddProduct_##flavor##_C1IMR \
-( const srctype *src1, int step1, const srctype* src2, int step2, \
- const uchar* mask, int maskstep, dsttype *dst, int dststep, CvSize size )\
-{ \
- step1 /= sizeof(src1[0]); \
- step2 /= sizeof(src2[0]); \
- dststep /= sizeof(dst[0]); \
- \
- for( ; size.height--; src1 += step1, src2 += step2, \
- dst += dststep, mask += maskstep ) \
- { \
- int x; \
- for( x = 0; x <= size.width - 2; x += 2 ) \
- { \
- if( mask[x] ) \
- dst[x] += cvtmacro(src1[x])*cvtmacro(src2[x]); \
- if( mask[x+1] ) \
- dst[x+1] += cvtmacro(src1[x+1])*cvtmacro(src2[x+1]); \
- } \
- \
- for( ; x < size.width; x++ ) \
- if( mask[x] ) \
- dst[x] += cvtmacro(src1[x])*cvtmacro(src2[x]); \
- } \
- \
- return CV_OK; \
+inline float multiply(uchar a, uchar b) { return CV_8TO32F(a)*CV_8TO32F(b); }
+inline float multiply(float a, float b) { return a*b; }
+inline Vec_<float, 3> multiply(const Vec_<uchar, 3>& a, const Vec_<uchar, 3>& b)
+{
+ return Vec_<float, 3>(
+ CV_8TO32F(a[0])*CV_8TO32F(b[0]),
+ CV_8TO32F(a[1])*CV_8TO32F(b[1]),
+ CV_8TO32F(a[2])*CV_8TO32F(b[2]));
}
-
-
-ICV_DEF_ACCPRODUCTMASK_FUNC_C1( 8u32f, uchar, float, CV_8TO32F )
-ICV_DEF_ACCPRODUCTMASK_FUNC_C1( 32f, float, float, CV_NOP )
-
-#define ICV_DEF_ACCWEIGHTMASK_FUNC_C1( flavor, srctype, dsttype, cvtmacro ) \
-static CvStatus CV_STDCALL icvAddWeighted_##flavor##_C1IMR \
-( const srctype *src, int srcstep, const uchar* mask, int maskstep, \
- dsttype *dst, int dststep, CvSize size, dsttype alpha ) \
-{ \
- dsttype beta = (dsttype)(1 - alpha); \
- srcstep /= sizeof(src[0]); \
- dststep /= sizeof(dst[0]); \
- \
- for( ; size.height--; src += srcstep, \
- dst += dststep, mask += maskstep ) \
- { \
- int x; \
- for( x = 0; x <= size.width - 2; x += 2 ) \
- { \
- if( mask[x] ) \
- dst[x] = dst[x]*beta + cvtmacro(src[x])*alpha; \
- if( mask[x+1] ) \
- dst[x+1] = dst[x+1]*beta + cvtmacro(src[x+1])*alpha; \
- } \
- \
- for( ; x < size.width; x++ ) \
- if( mask[x] ) \
- dst[x] = dst[x]*beta + cvtmacro(src[x])*alpha; \
- } \
- \
- return CV_OK; \
+inline Vec_<float, 3> multiply(const Vec_<float, 3>& a, const Vec_<float, 3>& b)
+{
+ return Vec_<float, 3>(a[0]*b[0], a[1]*b[1], a[2]*b[2]);
}
-
-ICV_DEF_ACCWEIGHTMASK_FUNC_C1( 8u32f, uchar, float, CV_8TO32F )
-ICV_DEF_ACCWEIGHTMASK_FUNC_C1( 32f, float, float, CV_NOP )
-
-
-#define ICV_DEF_ACCMASK_FUNC_C3( name, srctype, dsttype, cvtmacro ) \
-static CvStatus CV_STDCALL \
-name( const srctype *src, int srcstep, const uchar* mask, int maskstep, \
- dsttype *dst, int dststep, CvSize size ) \
-{ \
- srcstep /= sizeof(src[0]); \
- dststep /= sizeof(dst[0]); \
- \
- for( ; size.height--; src += srcstep, \
- dst += dststep, mask += maskstep ) \
- { \
- int x; \
- for( x = 0; x < size.width; x++ ) \
- if( mask[x] ) \
- { \
- dsttype t0, t1, t2; \
- t0 = dst[x*3] + cvtmacro(src[x*3]); \
- t1 = dst[x*3+1] + cvtmacro(src[x*3+1]); \
- t2 = dst[x*3+2] + cvtmacro(src[x*3+2]); \
- dst[x*3] = t0; \
- dst[x*3+1] = t1; \
- dst[x*3+2] = t2; \
- } \
- } \
- \
- return CV_OK; \
+inline float addw(uchar a, float alpha, float b, float beta)
+{
+ return b*beta + CV_8TO32F(a)*alpha;
}
-
-
-ICV_DEF_ACCMASK_FUNC_C3( icvAdd_8u32f_C3IMR, uchar, float, CV_8TO32F )
-ICV_DEF_ACCMASK_FUNC_C3( icvAdd_32f_C3IMR, float, float, CV_NOP )
-ICV_DEF_ACCMASK_FUNC_C3( icvAddSquare_8u32f_C3IMR, uchar, float, CV_8TO32F_SQR )
-ICV_DEF_ACCMASK_FUNC_C3( icvAddSquare_32f_C3IMR, float, float, CV_SQR )
-
-
-#define ICV_DEF_ACCPRODUCTMASK_FUNC_C3( flavor, srctype, dsttype, cvtmacro ) \
-static CvStatus CV_STDCALL icvAddProduct_##flavor##_C3IMR \
-( const srctype *src1, int step1, const srctype* src2, int step2, \
- const uchar* mask, int maskstep, dsttype *dst, int dststep, CvSize size ) \
-{ \
- step1 /= sizeof(src1[0]); \
- step2 /= sizeof(src2[0]); \
- dststep /= sizeof(dst[0]); \
- \
- for( ; size.height--; src1 += step1, src2 += step2, \
- dst += dststep, mask += maskstep ) \
- { \
- int x; \
- for( x = 0; x < size.width; x++ ) \
- if( mask[x] ) \
- { \
- dsttype t0, t1, t2; \
- t0 = dst[x*3]+cvtmacro(src1[x*3])*cvtmacro(src2[x*3]); \
- t1 = dst[x*3+1]+cvtmacro(src1[x*3+1])*cvtmacro(src2[x*3+1]);\
- t2 = dst[x*3+2]+cvtmacro(src1[x*3+2])*cvtmacro(src2[x*3+2]);\
- dst[x*3] = t0; \
- dst[x*3+1] = t1; \
- dst[x*3+2] = t2; \
- } \
- } \
- \
- return CV_OK; \
+inline float addw(float a, float alpha, float b, float beta)
+{
+ return b*beta + a*alpha;
}
-
-
-ICV_DEF_ACCPRODUCTMASK_FUNC_C3( 8u32f, uchar, float, CV_8TO32F )
-ICV_DEF_ACCPRODUCTMASK_FUNC_C3( 32f, float, float, CV_NOP )
-
-
-#define ICV_DEF_ACCWEIGHTMASK_FUNC_C3( flavor, srctype, dsttype, cvtmacro ) \
-static CvStatus CV_STDCALL icvAddWeighted_##flavor##_C3IMR \
-( const srctype *src, int srcstep, const uchar* mask, int maskstep, \
- dsttype *dst, int dststep, CvSize size, dsttype alpha ) \
-{ \
- dsttype beta = (dsttype)(1 - alpha); \
- srcstep /= sizeof(src[0]); \
- dststep /= sizeof(dst[0]); \
- \
- for( ; size.height--; src += srcstep, \
- dst += dststep, mask += maskstep ) \
- { \
- int x; \
- for( x = 0; x < size.width; x++ ) \
- if( mask[x] ) \
- { \
- dsttype t0, t1, t2; \
- t0 = dst[x*3]*beta + cvtmacro(src[x*3])*alpha; \
- t1 = dst[x*3+1]*beta + cvtmacro(src[x*3+1])*alpha; \
- t2 = dst[x*3+2]*beta + cvtmacro(src[x*3+2])*alpha; \
- dst[x*3] = t0; \
- dst[x*3+1] = t1; \
- dst[x*3+2] = t2; \
- } \
- } \
- \
- return CV_OK; \
+inline Vec_<float, 3> addw(const Vec_<uchar, 3>& a, float alpha, const Vec_<float, 3>& b, float beta)
+{
+ return Vec_<float, 3>(b[0]*beta + CV_8TO32F(a[0])*alpha,
+ b[1]*beta + CV_8TO32F(a[1])*alpha,
+ b[2]*beta + CV_8TO32F(a[2])*alpha);
}
-
-ICV_DEF_ACCWEIGHTMASK_FUNC_C3( 8u32f, uchar, float, CV_8TO32F )
-ICV_DEF_ACCWEIGHTMASK_FUNC_C3( 32f, float, float, CV_NOP )
-
-#define ICV_DEF_INIT_ACC_TAB( FUNCNAME ) \
-static void icvInit##FUNCNAME##Table( CvFuncTable* tab, CvBigFuncTable* masktab ) \
-{ \
- tab->fn_2d[CV_8U] = (void*)icv##FUNCNAME##_8u32f_C1IR; \
- tab->fn_2d[CV_32F] = (void*)icv##FUNCNAME##_32f_C1IR; \
- \
- masktab->fn_2d[CV_8UC1] = (void*)icv##FUNCNAME##_8u32f_C1IMR; \
- masktab->fn_2d[CV_32FC1] = (void*)icv##FUNCNAME##_32f_C1IMR; \
- \
- masktab->fn_2d[CV_8UC3] = (void*)icv##FUNCNAME##_8u32f_C3IMR; \
- masktab->fn_2d[CV_32FC3] = (void*)icv##FUNCNAME##_32f_C3IMR; \
+inline Vec_<float, 3> addw(const Vec_<float, 3>& a, float alpha, const Vec_<float, 3>& b, float beta)
+{
+ return Vec_<float, 3>(b[0]*beta + a[0]*alpha,
+ b[1]*beta + a[1]*alpha,
+ b[2]*beta + a[2]*alpha);
}
-ICV_DEF_INIT_ACC_TAB( Add )
-ICV_DEF_INIT_ACC_TAB( AddSquare )
-ICV_DEF_INIT_ACC_TAB( AddProduct )
-ICV_DEF_INIT_ACC_TAB( AddWeighted )
-
-typedef CvStatus (CV_STDCALL * CvAccFunc)( const void* src, int srcstep,
- void* dst, int dststep, CvSize size );
-typedef CvStatus (CV_STDCALL * CvAccMaskFunc)( const void* src, int srcstep,
- const void* mask, int maskstep,
- void* dst, int dststep, CvSize size );
-
-CV_IMPL void
-cvAcc( const void* arr, void* sumarr, const void* maskarr )
+template<typename T, typename AT> void
+acc_( const Mat& _src, Mat& _dst )
{
- static CvFuncTable acc_tab;
- static CvBigFuncTable accmask_tab;
- static int inittab = 0;
-
- CV_FUNCNAME( "cvAcc" );
+ Size size = _src.size();
+ size.width *= _src.channels();
- __BEGIN__;
-
- int type, sumdepth;
- int mat_step, sum_step, mask_step = 0;
- CvSize size;
- CvMat stub, *mat = (CvMat*)arr;
- CvMat sumstub, *sum = (CvMat*)sumarr;
- CvMat maskstub, *mask = (CvMat*)maskarr;
-
- if( !inittab )
+ if( _src.isContinuous() && _dst.isContinuous() )
{
- icvInitAddTable( &acc_tab, &accmask_tab );
- inittab = 1;
+ size.width *= size.height;
+ size.height = 1;
}
- if( !CV_IS_MAT( mat ) || !CV_IS_MAT( sum ))
+ int i, j;
+ for( i = 0; i < size.height; i++ )
{
- int coi1 = 0, coi2 = 0;
- CV_CALL( mat = cvGetMat( mat, &stub, &coi1 ));
- CV_CALL( sum = cvGetMat( sum, &sumstub, &coi2 ));
- if( coi1 + coi2 != 0 )
- CV_ERROR( CV_BadCOI, "" );
- }
-
- if( CV_MAT_DEPTH( sum->type ) != CV_32F )
- CV_ERROR( CV_BadDepth, "" );
-
- if( !CV_ARE_CNS_EQ( mat, sum ))
- CV_ERROR( CV_StsUnmatchedFormats, "" );
+ const T* src = (const T*)(_src.data + _src.step*i);
+ AT* dst = (AT*)(_dst.data + _dst.step*i);
- sumdepth = CV_MAT_DEPTH( sum->type );
- if( sumdepth != CV_32F && (maskarr != 0 || sumdepth != CV_64F))
- CV_ERROR( CV_BadDepth, "Bad accumulator type" );
-
- if( !CV_ARE_SIZES_EQ( mat, sum ))
- CV_ERROR( CV_StsUnmatchedSizes, "" );
-
- size = cvGetMatSize( mat );
- type = CV_MAT_TYPE( mat->type );
-
- mat_step = mat->step;
- sum_step = sum->step;
-
- if( !mask )
- {
- CvAccFunc func=(CvAccFunc)acc_tab.fn_2d[CV_MAT_DEPTH(type)];
-
- if( !func )
- CV_ERROR( CV_StsUnsupportedFormat, "Unsupported type combination" );
-
- size.width *= CV_MAT_CN(type);
- if( CV_IS_MAT_CONT( mat->type & sum->type ))
+ for( j = 0; j <= size.width - 4; j += 4 )
{
- size.width *= size.height;
- mat_step = sum_step = CV_STUB_STEP;
- size.height = 1;
+ AT t0 = dst[j] + src[j], t1 = dst[j+1] + src[j+1];
+ dst[j] = t0; dst[j+1] = t1;
+ t0 = dst[j+2] + src[j+2]; t1 = dst[j+3] + src[j+3];
+ dst[j+2] = t0; dst[j+3] = t1;
}
- IPPI_CALL( func( mat->data.ptr, mat_step, sum->data.ptr, sum_step, size ));
+ for( ; j < size.width; j++ )
+ dst[j] += src[j];
}
- else
- {
- CvAccMaskFunc func = (CvAccMaskFunc)accmask_tab.fn_2d[type];
-
- if( !func )
- CV_ERROR( CV_StsUnsupportedFormat, "" );
+}
- CV_CALL( mask = cvGetMat( mask, &maskstub ));
- if( !CV_IS_MASK_ARR( mask ))
- CV_ERROR( CV_StsBadMask, "" );
+template<typename T, typename AT> void
+accSqr_( const Mat& _src, Mat& _dst )
+{
+ Size size = _src.size();
+ size.width *= _src.channels();
- if( !CV_ARE_SIZES_EQ( mat, mask ))
- CV_ERROR( CV_StsUnmatchedSizes, "" );
+ if( _src.isContinuous() && _dst.isContinuous() )
+ {
+ size.width *= size.height;
+ size.height = 1;
+ }
- mask_step = mask->step;
+ int i, j;
+ for( i = 0; i < size.height; i++ )
+ {
+ const T* src = (const T*)(_src.data + _src.step*i);
+ AT* dst = (AT*)(_dst.data + _dst.step*i);
- if( CV_IS_MAT_CONT( mat->type & sum->type & mask->type ))
+ for( j = 0; j <= size.width - 4; j += 4 )
{
- size.width *= size.height;
- mat_step = sum_step = mask_step = CV_STUB_STEP;
- size.height = 1;
+ AT t0 = dst[j] + sqr(src[j]), t1 = dst[j+1] + sqr(src[j+1]);
+ dst[j] = t0; dst[j+1] = t1;
+ t0 = dst[j+2] + sqr(src[j+2]); t1 = dst[j+3] + sqr(src[j+3]);
+ dst[j+2] = t0; dst[j+3] = t1;
}
- IPPI_CALL( func( mat->data.ptr, mat_step, mask->data.ptr, mask_step,
- sum->data.ptr, sum_step, size ));
+ for( ; j < size.width; j++ )
+ dst[j] += sqr(src[j]);
}
-
- __END__;
}
-CV_IMPL void
-cvSquareAcc( const void* arr, void* sq_sum, const void* maskarr )
+template<typename T, typename AT> void
+accProd_( const Mat& _src1, const Mat& _src2, Mat& _dst )
{
- static CvFuncTable acc_tab;
- static CvBigFuncTable accmask_tab;
- static int inittab = 0;
-
- CV_FUNCNAME( "cvSquareAcc" );
-
- __BEGIN__;
-
- int coi1, coi2;
- int type;
- int mat_step, sum_step, mask_step = 0;
- CvSize size;
- CvMat stub, *mat = (CvMat*)arr;
- CvMat sumstub, *sum = (CvMat*)sq_sum;
- CvMat maskstub, *mask = (CvMat*)maskarr;
+ Size size = _src1.size();
+ size.width *= _src1.channels();
- if( !inittab )
+ if( _src1.isContinuous() && _src2.isContinuous() && _dst.isContinuous() )
{
- icvInitAddSquareTable( &acc_tab, &accmask_tab );
- inittab = 1;
+ size.width *= size.height;
+ size.height = 1;
}
- CV_CALL( mat = cvGetMat( mat, &stub, &coi1 ));
- CV_CALL( sum = cvGetMat( sum, &sumstub, &coi2 ));
-
- if( coi1 != 0 || coi2 != 0 )
- CV_ERROR( CV_BadCOI, "" );
-
- if( !CV_ARE_CNS_EQ( mat, sum ))
- CV_ERROR( CV_StsUnmatchedFormats, "" );
-
- if( CV_MAT_DEPTH( sum->type ) != CV_32F )
- CV_ERROR( CV_BadDepth, "" );
-
- if( !CV_ARE_SIZES_EQ( mat, sum ))
- CV_ERROR( CV_StsUnmatchedSizes, "" );
-
- size = cvGetMatSize( mat );
- type = CV_MAT_TYPE( mat->type );
-
- mat_step = mat->step;
- sum_step = sum->step;
-
- if( !mask )
+ int i, j;
+ for( i = 0; i < size.height; i++ )
{
- CvAccFunc func = (CvAccFunc)acc_tab.fn_2d[CV_MAT_DEPTH(type)];
-
- if( !func )
- CV_ERROR( CV_StsUnsupportedFormat, "" );
+ const T* src1 = (const T*)(_src1.data + _src1.step*i);
+ const T* src2 = (const T*)(_src2.data + _src2.step*i);
+ AT* dst = (AT*)(_dst.data + _dst.step*i);
- size.width *= CV_MAT_CN(type);
-
- if( CV_IS_MAT_CONT( mat->type & sum->type ))
+ for( j = 0; j <= size.width - 4; j += 4 )
{
- size.width *= size.height;
- mat_step = sum_step = CV_STUB_STEP;;
- size.height = 1;
+ AT t0, t1;
+ t0 = dst[j] + multiply(src1[j], src2[j]);
+ t1 = dst[j+1] + multiply(src1[j+1], src2[j+1]);
+ dst[j] = t0; dst[j+1] = t1;
+ t0 = dst[j+2] + multiply(src1[j+2], src2[j+2]);
+ t1 = dst[j+3] + multiply(src1[j+3], src2[j+3]);
+ dst[j+2] = t0; dst[j+3] = t1;
}
- IPPI_CALL( func( mat->data.ptr, mat_step, sum->data.ptr, sum_step, size ));
+ for( ; j < size.width; j++ )
+ dst[j] += multiply(src1[j], src2[j]);
}
- else
- {
- CvAccMaskFunc func = (CvAccMaskFunc)accmask_tab.fn_2d[type];
-
- if( !func )
- CV_ERROR( CV_StsUnsupportedFormat, "" );
+}
- CV_CALL( mask = cvGetMat( mask, &maskstub ));
- if( !CV_IS_MASK_ARR( mask ))
- CV_ERROR( CV_StsBadMask, "" );
+template<typename T, typename AT> void
+accW_( const Mat& _src, Mat& _dst, double _alpha )
+{
+ AT alpha = (AT)_alpha, beta = (AT)(1 - _alpha);
+ Size size = _src.size();
+ size.width *= _src.channels();
- if( !CV_ARE_SIZES_EQ( mat, mask ))
- CV_ERROR( CV_StsUnmatchedSizes, "" );
+ if( _src.isContinuous() && _dst.isContinuous() )
+ {
+ size.width *= size.height;
+ size.height = 1;
+ }
- mask_step = mask->step;
+ int i, j;
+ for( i = 0; i < size.height; i++ )
+ {
+ const T* src = (const T*)(_src.data + _src.step*i);
+ AT* dst = (AT*)(_dst.data + _dst.step*i);
- if( CV_IS_MAT_CONT( mat->type & sum->type & mask->type ))
+ for( j = 0; j <= size.width - 4; j += 4 )
{
- size.width *= size.height;
- mat_step = sum_step = mask_step = CV_STUB_STEP;
- size.height = 1;
+ AT t0, t1;
+ t0 = addw(src[j], alpha, dst[j], beta);
+ t1 = addw(src[j+1], alpha, dst[j+1], beta);
+ dst[j] = t0; dst[j+1] = t1;
+ t0 = addw(src[j+2], alpha, dst[j+2], beta);
+ t1 = addw(src[j+3], alpha, dst[j+3], beta);
+ dst[j+2] = t0; dst[j+3] = t1;
}
- IPPI_CALL( func( mat->data.ptr, mat_step, mask->data.ptr, mask_step,
- sum->data.ptr, sum_step, size ));
+ for( ; j < size.width; j++ )
+ dst[j] = addw(src[j], alpha, dst[j], beta);
}
-
- __END__;
}
-typedef CvStatus (CV_STDCALL * CvMultAccFunc)( const void* src1, int srcstep1,
- const void* src2, int srcstep2,
- void* dst, int dststep, CvSize size );
-typedef CvStatus (CV_STDCALL * CvMultAccMaskFunc)( const void* src1, int srcstep1,
- const void* src2, int srcstep2,
- const void* mask, int maskstep,
- void* dst, int dststep, CvSize size );
-
-CV_IMPL void
-cvMultiplyAcc( const void* arrA, const void* arrB,
- void* acc, const void* maskarr )
+template<typename T, typename AT> void
+accMask_( const Mat& _src, Mat& _dst, const Mat& _mask )
{
- static CvFuncTable acc_tab;
- static CvBigFuncTable accmask_tab;
- static int inittab = 0;
-
- CV_FUNCNAME( "cvMultiplyAcc" );
-
- __BEGIN__;
-
- int coi1, coi2, coi3;
- int type;
- int mat1_step, mat2_step, sum_step, mask_step = 0;
- CvSize size;
- CvMat stub1, *mat1 = (CvMat*)arrA;
- CvMat stub2, *mat2 = (CvMat*)arrB;
- CvMat sumstub, *sum = (CvMat*)acc;
- CvMat maskstub, *mask = (CvMat*)maskarr;
+ Size size = _src.size();
- if( !inittab )
+ if( _src.isContinuous() && _dst.isContinuous() && _mask.isContinuous() )
{
- icvInitAddProductTable( &acc_tab, &accmask_tab );
- inittab = 1;
+ size.width *= size.height;
+ size.height = 1;
}
- CV_CALL( mat1 = cvGetMat( mat1, &stub1, &coi1 ));
- CV_CALL( mat2 = cvGetMat( mat2, &stub2, &coi2 ));
- CV_CALL( sum = cvGetMat( sum, &sumstub, &coi3 ));
-
- if( coi1 != 0 || coi2 != 0 || coi3 != 0 )
- CV_ERROR( CV_BadCOI, "" );
-
- if( !CV_ARE_CNS_EQ( mat1, mat2 ) || !CV_ARE_CNS_EQ( mat1, sum ))
- CV_ERROR( CV_StsUnmatchedFormats, "" );
+ int i, j;
+ for( i = 0; i < size.height; i++ )
+ {
+ const T* src = (const T*)(_src.data + _src.step*i);
+ AT* dst = (AT*)(_dst.data + _dst.step*i);
+ const uchar* mask = _mask.data + _mask.step*i;
- if( CV_MAT_DEPTH( sum->type ) != CV_32F )
- CV_ERROR( CV_BadDepth, "" );
+ for( j = 0; j < size.width; j++ )
+ if( mask[j] )
+ dst[j] += src[j];
+ }
+}
- if( !CV_ARE_SIZES_EQ( mat1, sum ) || !CV_ARE_SIZES_EQ( mat2, sum ))
- CV_ERROR( CV_StsUnmatchedSizes, "" );
- size = cvGetMatSize( mat1 );
- type = CV_MAT_TYPE( mat1->type );
+template<typename T, typename AT> void
+accSqrMask_( const Mat& _src, Mat& _dst, const Mat& _mask )
+{
+ Size size = _src.size();
- mat1_step = mat1->step;
- mat2_step = mat2->step;
- sum_step = sum->step;
+ if( _src.isContinuous() && _dst.isContinuous() && _mask.isContinuous() )
+ {
+ size.width *= size.height;
+ size.height = 1;
+ }
- if( !mask )
+ int i, j;
+ for( i = 0; i < size.height; i++ )
{
- CvMultAccFunc func = (CvMultAccFunc)acc_tab.fn_2d[CV_MAT_DEPTH(type)];
+ const T* src = (const T*)(_src.data + _src.step*i);
+ AT* dst = (AT*)(_dst.data + _dst.step*i);
+ const uchar* mask = _mask.data + _mask.step*i;
- if( !func )
- CV_ERROR( CV_StsUnsupportedFormat, "" );
+ for( j = 0; j < size.width; j++ )
+ if( mask[j] )
+ dst[j] += sqr(src[j]);
+ }
+}
- size.width *= CV_MAT_CN(type);
- if( CV_IS_MAT_CONT( mat1->type & mat2->type & sum->type ))
- {
- size.width *= size.height;
- mat1_step = mat2_step = sum_step = CV_STUB_STEP;
- size.height = 1;
- }
+template<typename T, typename AT> void
+accProdMask_( const Mat& _src1, const Mat& _src2, Mat& _dst, const Mat& _mask )
+{
+ Size size = _src1.size();
- IPPI_CALL( func( mat1->data.ptr, mat1_step, mat2->data.ptr, mat2_step,
- sum->data.ptr, sum_step, size ));
- }
- else
+ if( _src1.isContinuous() && _src2.isContinuous() &&
+ _dst.isContinuous() && _mask.isContinuous() )
{
- CvMultAccMaskFunc func = (CvMultAccMaskFunc)accmask_tab.fn_2d[type];
-
- if( !func )
- CV_ERROR( CV_StsUnsupportedFormat, "" );
+ size.width *= size.height;
+ size.height = 1;
+ }
- CV_CALL( mask = cvGetMat( mask, &maskstub ));
+ int i, j;
+ for( i = 0; i < size.height; i++ )
+ {
+ const T* src1 = (const T*)(_src1.data + _src1.step*i);
+ const T* src2 = (const T*)(_src2.data + _src2.step*i);
+ AT* dst = (AT*)(_dst.data + _dst.step*i);
+ const uchar* mask = _mask.data + _mask.step*i;
+
+ for( j = 0; j < size.width; j++ )
+ if( mask[j] )
+ dst[j] += multiply(src1[j], src2[j]);
+ }
+}
- if( !CV_IS_MASK_ARR( mask ))
- CV_ERROR( CV_StsBadMask, "" );
- if( !CV_ARE_SIZES_EQ( mat1, mask ))
- CV_ERROR( CV_StsUnmatchedSizes, "" );
+template<typename T, typename AT> void
+accWMask_( const Mat& _src, Mat& _dst, double _alpha, const Mat& _mask )
+{
+ typedef typename DataType<AT>::channel_type AT1;
+ AT1 alpha = (AT1)_alpha, beta = (AT1)(1 - _alpha);
+ Size size = _src.size();
- mask_step = mask->step;
+ if( _src.isContinuous() && _dst.isContinuous() && _mask.isContinuous() )
+ {
+ size.width *= size.height;
+ size.height = 1;
+ }
- if( CV_IS_MAT_CONT( mat1->type & mat2->type & sum->type & mask->type ))
- {
- size.width *= size.height;
- mat1_step = mat2_step = sum_step = mask_step = CV_STUB_STEP;
- size.height = 1;
- }
+ int i, j;
+ for( i = 0; i < size.height; i++ )
+ {
+ const T* src = (const T*)(_src.data + _src.step*i);
+ AT* dst = (AT*)(_dst.data + _dst.step*i);
+ const uchar* mask = _mask.data + _mask.step*i;
- IPPI_CALL( func( mat1->data.ptr, mat1_step, mat2->data.ptr, mat2_step,
- mask->data.ptr, mask_step,
- sum->data.ptr, sum_step, size ));
+ for( j = 0; j < size.width; j++ )
+ if( mask[j] )
+ dst[j] = addw(src[j], alpha, dst[j], beta);
}
-
- __END__;
}
-typedef CvStatus (CV_STDCALL *CvAddWeightedFunc)( const void* src, int srcstep,
- void* dst, int dststep,
- CvSize size, float alpha );
+typedef void (*AccFunc)(const Mat&, Mat&);
+typedef void (*AccMaskFunc)(const Mat&, Mat&, const Mat&);
+typedef void (*AccProdFunc)(const Mat&, const Mat&, Mat&);
+typedef void (*AccProdMaskFunc)(const Mat&, const Mat&, Mat&, const Mat&);
+typedef void (*AccWFunc)(const Mat&, Mat&, double);
+typedef void (*AccWMaskFunc)(const Mat&, Mat&, double, const Mat&);
-typedef CvStatus (CV_STDCALL *CvAddWeightedMaskFunc)( const void* src, int srcstep,
- void* dst, int dststep,
- const void* mask, int maskstep,
- CvSize size, float alpha );
-
-CV_IMPL void
-cvRunningAvg( const void* arrY, void* arrU,
- double alpha, const void* maskarr )
+void accumulate( const Mat& src, Mat& dst, const Mat& mask )
{
- static CvFuncTable acc_tab;
- static CvBigFuncTable accmask_tab;
- static int inittab = 0;
+ CV_Assert( dst.size() == src.size() && dst.channels() == src.channels() );
- CV_FUNCNAME( "cvRunningAvg" );
-
- __BEGIN__;
-
- int coi1, coi2;
- int type;
- int mat_step, sum_step, mask_step = 0;
- CvSize size;
- CvMat stub, *mat = (CvMat*)arrY;
- CvMat sumstub, *sum = (CvMat*)arrU;
- CvMat maskstub, *mask = (CvMat*)maskarr;
-
- if( !inittab )
+ if( !mask.data )
{
- icvInitAddWeightedTable( &acc_tab, &accmask_tab );
- inittab = 1;
+ AccFunc func = 0;
+ if( src.depth() == CV_8U && dst.depth() == CV_32F )
+ func = acc_<uchar, float>;
+ else if( src.depth() == CV_32F && dst.depth() == CV_32F )
+ func = acc_<float, float>;
+ else
+ CV_Error( CV_StsUnsupportedFormat, "" );
+
+ func( src, dst );
}
+ else
+ {
+ CV_Assert( mask.size() == src.size() && mask.type() == CV_8UC1 );
+
+ AccMaskFunc func = 0;
+ if( src.type() == CV_8UC1 && dst.type() == CV_32FC1 )
+ func = accMask_<uchar, float>;
+ else if( src.type() == CV_8UC3 && dst.type() == CV_32FC3 )
+ func = accMask_<Vec_<uchar, 3>, Vec_<float, 3> >;
+ else if( src.type() == CV_32FC1 && dst.type() == CV_32FC1 )
+ func = accMask_<float, float>;
+ else if( src.type() == CV_32FC3 && dst.type() == CV_32FC3 )
+ func = accMask_<Vec_<float, 3>, Vec_<float, 3> >;
+ else
+ CV_Error( CV_StsUnsupportedFormat, "" );
+
+ func( src, dst, mask );
+ }
+}
- CV_CALL( mat = cvGetMat( mat, &stub, &coi1 ));
- CV_CALL( sum = cvGetMat( sum, &sumstub, &coi2 ));
-
- if( coi1 != 0 || coi2 != 0 )
- CV_ERROR( CV_BadCOI, "" );
-
- if( !CV_ARE_CNS_EQ( mat, sum ))
- CV_ERROR( CV_StsUnmatchedFormats, "" );
-
- if( CV_MAT_DEPTH( sum->type ) != CV_32F )
- CV_ERROR( CV_BadDepth, "" );
-
- if( !CV_ARE_SIZES_EQ( mat, sum ))
- CV_ERROR( CV_StsUnmatchedSizes, "" );
-
- size = cvGetMatSize( mat );
- type = CV_MAT_TYPE( mat->type );
-
- mat_step = mat->step;
- sum_step = sum->step;
- if( !mask )
+void accumulateSquare( const Mat& src, Mat& dst, const Mat& mask )
+{
+ CV_Assert( dst.size() == src.size() && dst.channels() == src.channels() );
+
+ if( !mask.data )
{
- CvAddWeightedFunc func = (CvAddWeightedFunc)acc_tab.fn_2d[CV_MAT_DEPTH(type)];
-
- if( !func )
- CV_ERROR( CV_StsUnsupportedFormat, "" );
+ AccFunc func = 0;
+ if( src.depth() == CV_8U && dst.depth() == CV_32F )
+ func = accSqr_<uchar, float>;
+ else if( src.depth() == CV_32F && dst.depth() == CV_32F )
+ func = accSqr_<float, float>;
+ else
+ CV_Error( CV_StsUnsupportedFormat, "" );
+
+ func( src, dst );
+ }
+ else
+ {
+ CV_Assert( mask.size() == src.size() && mask.type() == CV_8UC1 );
+
+ AccMaskFunc func = 0;
+ if( src.type() == CV_8UC1 && dst.type() == CV_32FC1 )
+ func = accSqrMask_<uchar, float>;
+ else if( src.type() == CV_8UC3 && dst.type() == CV_32FC3 )
+ func = accSqrMask_<Vec_<uchar, 3>, Vec_<float, 3> >;
+ else if( src.type() == CV_32FC1 && dst.type() == CV_32FC1 )
+ func = accSqrMask_<float, float>;
+ else if( src.type() == CV_32FC3 && dst.type() == CV_32FC3 )
+ func = accSqrMask_<Vec_<float, 3>, Vec_<float, 3> >;
+ else
+ CV_Error( CV_StsUnsupportedFormat, "" );
+
+ func( src, dst, mask );
+ }
+}
- size.width *= CV_MAT_CN(type);
- if( CV_IS_MAT_CONT( mat->type & sum->type ))
- {
- size.width *= size.height;
- mat_step = sum_step = CV_STUB_STEP;
- size.height = 1;
- }
- IPPI_CALL( func( mat->data.ptr, mat_step,
- sum->data.ptr, sum_step, size, (float)alpha ));
+void accumulateProduct( const Mat& src1, const Mat& src2, Mat& dst, const Mat& mask )
+{
+ CV_Assert( dst.size() == src1.size() && dst.channels() == src1.channels() &&
+ src1.size() == src2.size() && src1.type() == src2.type() );
+
+ if( !mask.data )
+ {
+ AccProdFunc func = 0;
+ if( src1.depth() == CV_8U && dst.depth() == CV_32F )
+ func = accProd_<uchar, float>;
+ else if( src1.depth() == CV_32F && dst.depth() == CV_32F )
+ func = accProd_<float, float>;
+ else
+ CV_Error( CV_StsUnsupportedFormat, "" );
+
+ func( src1, src2, dst );
}
else
{
- CvAddWeightedMaskFunc func = (CvAddWeightedMaskFunc)accmask_tab.fn_2d[type];
-
- if( !func )
- CV_ERROR( CV_StsUnsupportedFormat, "" );
+ CV_Assert( mask.size() == src1.size() && mask.type() == CV_8UC1 );
+
+ AccProdMaskFunc func = 0;
+ if( src1.type() == CV_8UC1 && dst.type() == CV_32FC1 )
+ func = accProdMask_<uchar, float>;
+ else if( src1.type() == CV_8UC3 && dst.type() == CV_32FC3 )
+ func = accProdMask_<Vec_<uchar, 3>, Vec_<float, 3> >;
+ else if( src1.type() == CV_32FC1 && dst.type() == CV_32FC1 )
+ func = accProdMask_<float, float>;
+ else if( src1.type() == CV_32FC3 && dst.type() == CV_32FC3 )
+ func = accProdMask_<Vec_<float, 3>, Vec_<float, 3> >;
+ else
+ CV_Error( CV_StsUnsupportedFormat, "" );
+
+ func( src1, src2, dst, mask );
+ }
+}
- CV_CALL( mask = cvGetMat( mask, &maskstub ));
- if( !CV_IS_MASK_ARR( mask ))
- CV_ERROR( CV_StsBadMask, "" );
+void accumulateWeighted( const Mat& src, Mat& dst, double alpha, const Mat& mask )
+{
+ CV_Assert( dst.size() == src.size() && dst.channels() == src.channels() );
+
+ if( !mask.data )
+ {
+ AccWFunc func = 0;
+ if( src.depth() == CV_8U && dst.depth() == CV_32F )
+ func = accW_<uchar, float>;
+ else if( src.depth() == CV_32F && dst.depth() == CV_32F )
+ func = accW_<float, float>;
+ else
+ CV_Error( CV_StsUnsupportedFormat, "" );
+
+ func( src, dst, alpha );
+ }
+ else
+ {
+ CV_Assert( mask.size() == src.size() && mask.type() == CV_8UC1 );
+
+ AccWMaskFunc func = 0;
+ if( src.type() == CV_8UC1 && dst.type() == CV_32FC1 )
+ func = accWMask_<uchar, float>;
+ else if( src.type() == CV_8UC3 && dst.type() == CV_32FC3 )
+ func = accWMask_<Vec_<uchar, 3>, Vec_<float, 3> >;
+ else if( src.type() == CV_32FC1 && dst.type() == CV_32FC1 )
+ func = accWMask_<float, float>;
+ else if( src.type() == CV_32FC3 && dst.type() == CV_32FC3 )
+ func = accWMask_<Vec_<float, 3>, Vec_<float, 3> >;
+ else
+ CV_Error( CV_StsUnsupportedFormat, "" );
+
+ func( src, dst, alpha, mask );
+ }
+}
- if( !CV_ARE_SIZES_EQ( mat, mask ))
- CV_ERROR( CV_StsUnmatchedSizes, "" );
+}
- mask_step = mask->step;
- if( CV_IS_MAT_CONT( mat->type & sum->type & mask->type ))
- {
- size.width *= size.height;
- mat_step = sum_step = mask_step = CV_STUB_STEP;
- size.height = 1;
- }
+CV_IMPL void
+cvAcc( const void* arr, void* sumarr, const void* maskarr )
+{
+ cv::Mat src = cv::cvarrToMat(arr), dst = cv::cvarrToMat(sumarr), mask;
+ if( maskarr )
+ mask = cv::cvarrToMat(maskarr);
+ cv::accumulate( src, dst, mask );
+}
- IPPI_CALL( func( mat->data.ptr, mat_step, mask->data.ptr, mask_step,
- sum->data.ptr, sum_step, size, (float)alpha ));
- }
+CV_IMPL void
+cvSquareAcc( const void* arr, void* sumarr, const void* maskarr )
+{
+ cv::Mat src = cv::cvarrToMat(arr), dst = cv::cvarrToMat(sumarr), mask;
+ if( maskarr )
+ mask = cv::cvarrToMat(maskarr);
+ cv::accumulateSquare( src, dst, mask );
+}
- __END__;
+CV_IMPL void
+cvMultiplyAcc( const void* arr1, const void* arr2,
+ void* sumarr, const void* maskarr )
+{
+ cv::Mat src1 = cv::cvarrToMat(arr1), src2 = cv::cvarrToMat(arr2);
+ cv::Mat dst = cv::cvarrToMat(sumarr), mask;
+ if( maskarr )
+ mask = cv::cvarrToMat(maskarr);
+ cv::accumulateProduct( src1, src2, dst, mask );
}
+CV_IMPL void
+cvRunningAvg( const void* arr, void* sumarr, double alpha, const void* maskarr )
+{
+ cv::Mat src = cv::cvarrToMat(arr), dst = cv::cvarrToMat(sumarr), mask;
+ if( maskarr )
+ mask = cv::cvarrToMat(maskarr);
+ cv::accumulateWeighted( src, dst, alpha, mask );
+}
/* End of file. */
+++ /dev/null
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// Intel License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of Intel Corporation may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#include "_cv.h"
-
-static void
-icvAdaptiveThreshold_MeanC( const CvMat* src, CvMat* dst, int method,
- int maxValue, int type, int size, double delta )
-{
- CvMat* mean = 0;
- CV_FUNCNAME( "icvAdaptiveThreshold_MeanC" );
-
- __BEGIN__;
-
- int i, j, rows, cols;
- int idelta = type == CV_THRESH_BINARY ? cvCeil(delta) : cvFloor(delta);
- uchar tab[768];
-
- if( size <= 1 || (size&1) == 0 )
- CV_ERROR( CV_StsOutOfRange, "Neighborhood size must be >=3 and odd (3, 5, 7, ...)" );
-
- if( maxValue < 0 )
- {
- CV_CALL( cvSetZero( dst ));
- EXIT;
- }
-
- rows = src->rows;
- cols = src->cols;
-
- if( src->data.ptr != dst->data.ptr )
- mean = dst;
- else
- CV_CALL( mean = cvCreateMat( rows, cols, CV_8UC1 ));
-
- CV_CALL( cvSmooth( src, mean, method == CV_ADAPTIVE_THRESH_MEAN_C ?
- CV_BLUR : CV_GAUSSIAN, size, size ));
- if( maxValue > 255 )
- maxValue = 255;
-
- if( type == CV_THRESH_BINARY )
- for( i = 0; i < 768; i++ )
- tab[i] = (uchar)(i - 255 > -idelta ? maxValue : 0);
- else
- for( i = 0; i < 768; i++ )
- tab[i] = (uchar)(i - 255 <= -idelta ? maxValue : 0);
-
- for( i = 0; i < rows; i++ )
- {
- const uchar* s = src->data.ptr + i*src->step;
- const uchar* m = mean->data.ptr + i*mean->step;
- uchar* d = dst->data.ptr + i*dst->step;
-
- for( j = 0; j < cols; j++ )
- d[j] = tab[s[j] - m[j] + 255];
- }
-
- __END__;
-
- if( mean != dst )
- cvReleaseMat( &mean );
-}
-
-
-CV_IMPL void
-cvAdaptiveThreshold( const void *srcIm, void *dstIm, double maxValue,
- int method, int type, int blockSize, double param1 )
-{
- CvMat src_stub, dst_stub;
- CvMat *src = 0, *dst = 0;
-
- CV_FUNCNAME( "cvAdaptiveThreshold" );
-
- __BEGIN__;
-
- if( type != CV_THRESH_BINARY && type != CV_THRESH_BINARY_INV )
- CV_ERROR( CV_StsBadArg, "Only CV_TRESH_BINARY and CV_THRESH_BINARY_INV "
- "threshold types are acceptable" );
-
- CV_CALL( src = cvGetMat( srcIm, &src_stub ));
- CV_CALL( dst = cvGetMat( dstIm, &dst_stub ));
-
- if( !CV_ARE_CNS_EQ( src, dst ))
- CV_ERROR( CV_StsUnmatchedFormats, "" );
-
- if( CV_MAT_TYPE(dst->type) != CV_8UC1 )
- CV_ERROR( CV_StsUnsupportedFormat, "" );
-
- if( !CV_ARE_SIZES_EQ( src, dst ) )
- CV_ERROR( CV_StsUnmatchedSizes, "" );
-
- switch( method )
- {
- case CV_ADAPTIVE_THRESH_MEAN_C:
- case CV_ADAPTIVE_THRESH_GAUSSIAN_C:
- CV_CALL( icvAdaptiveThreshold_MeanC( src, dst, method, cvRound(maxValue),type,
- blockSize, param1 ));
- break;
- default:
- CV_ERROR( CV_BADCOEF_ERR, "" );
- }
-
- __END__;
-}
-
-/* End of file. */
// copy or use the software.
//
//
-// Intel License Agreement
+// License Agreement
// For Open Source Computer Vision Library
//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
-// * The name of Intel Corporation may not be used to endorse or promote products
+// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
double a[9], ar[9]={1,0,0,0,1,0,0,0,1}, R[9];
double MM[9], U[9], V[9], W[3];
CvScalar Mc;
- double JtJ[6*6], JtErr[6], JtJW[6], JtJV[6*6], delta[6], param[6];
+ double param[6];
CvMat _A = cvMat( 3, 3, CV_64F, a );
CvMat _Ar = cvMat( 3, 3, CV_64F, ar );
CvMat _R = cvMat( 3, 3, CV_64F, R );
CvMat _U = cvMat( 3, 3, CV_64F, U );
CvMat _V = cvMat( 3, 3, CV_64F, V );
CvMat _W = cvMat( 3, 1, CV_64F, W );
- CvMat _JtJ = cvMat( 6, 6, CV_64F, JtJ );
- CvMat _JtErr = cvMat( 6, 1, CV_64F, JtErr );
- CvMat _JtJW = cvMat( 6, 1, CV_64F, JtJW );
- CvMat _JtJV = cvMat( 6, 6, CV_64F, JtJV );
- CvMat _delta = cvMat( 6, 1, CV_64F, delta );
CvMat _param = cvMat( 6, 1, CV_64F, param );
CvMat _dpdr, _dpdt;
cvReshape( _mn, _mn, 2, 1 );
// refine extrinsic parameters using iterative algorithm
-#if 0
- CV_CALL( _J = cvCreateMat( 2*count, 6, CV_64FC1 ));
- cvGetCols( _J, &_dpdr, 0, 3 );
- cvGetCols( _J, &_dpdt, 3, 6 );
-
- for( i = 0; i < max_iter; i++ )
- {
- double n1, n2;
- cvReshape( _mn, _mn, 2, 1 );
- cvProjectPoints2( _M, &_r, &_t, &_A, distCoeffs,
- _mn, &_dpdr, &_dpdt, 0, 0, 0 );
- cvSub( _m, _mn, _mn );
- cvReshape( _mn, _mn, 1, 2*count );
- //printf("reproj err=%g\n", cvNorm(_mn, 0, CV_C));
-
- cvMulTransposed( _J, &_JtJ, 1 );
- cvGEMM( _J, _mn, 1, 0, 0, &_JtErr, CV_GEMM_A_T );
- cvSVD( &_JtJ, &_JtJW, 0, &_JtJV, CV_SVD_MODIFY_A + CV_SVD_V_T );
- if( JtJW[5]/JtJW[0] < 1e-12 )
- break;
- cvSVBkSb( &_JtJW, &_JtJV, &_JtJV, &_JtErr,
- &_delta, CV_SVD_U_T + CV_SVD_V_T );
- cvAdd( &_param, &_delta, &_param );
- n1 = cvNorm( &_delta );
- n2 = cvNorm( &_param );
- if( n1/n2 < 1e-10 )
- break;
- }
- printf("max reproj err=%g\n", cvNorm(_mn, 0, CV_C));
-#else
{
CvLevMarq solver( 6, count*2, cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,max_iter,FLT_EPSILON), true);
cvCopy( &_param, solver.param );
}
cvCopy( solver.param, &_param );
}
-#endif
_r = cvMat( rvec->rows, rvec->cols,
CV_MAKETYPE(CV_64F,CV_MAT_CN(rvec->type)), param );
+++ /dev/null
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// Intel License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of Intel Corporation may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#include "_cv.h"
-
-/* End of file. */
// copy or use the software.
//
//
-// Intel License Agreement
+// License Agreement
// For Open Source Computer Vision Library
//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
-// * The name of Intel Corporation may not be used to endorse or promote products
+// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// copy or use the software.
//
//
-// Intel License Agreement
+// License Agreement
// For Open Source Computer Vision Library
//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
-// * The name of Intel Corporation may not be used to endorse or promote products
+// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// copy or use the software.\r
//\r
//\r
-// Intel License Agreement\r
+// License Agreement\r
// For Open Source Computer Vision Library\r
//\r
-// Copyright (C) 2000, Intel Corporation, all rights reserved.\r
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\r
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\r
// Third party copyrights are property of their respective owners.\r
//\r
// Redistribution and use in source and binary forms, with or without modification,\r
// this list of conditions and the following disclaimer in the documentation\r
// and/or other materials provided with the distribution.\r
//\r
-// * The name of Intel Corporation may not be used to endorse or promote products\r
+// * The name of the copyright holders may not be used to endorse or promote products\r
// derived from this software without specific prior written permission.\r
//\r
// This software is provided by the copyright holders and contributors "as is" and\r
// copy or use the software.\r
//\r
//\r
-// Intel License Agreement\r
+// License Agreement\r
// For Open Source Computer Vision Library\r
//\r
-// Copyright (C) 2000, Intel Corporation, all rights reserved.\r
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\r
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\r
// Third party copyrights are property of their respective owners.\r
//\r
// Redistribution and use in source and binary forms, with or without modification,\r
// this list of conditions and the following disclaimer in the documentation\r
// and/or other materials provided with the distribution.\r
//\r
-// * The name of Intel Corporation may not be used to endorse or promote products\r
+// * The name of the copyright holders may not be used to endorse or promote products\r
// derived from this software without specific prior written permission.\r
//\r
// This software is provided by the copyright holders and contributors "as is" and\r
// copy or use the software.\r
//\r
//\r
-// Intel License Agreement\r
+// License Agreement\r
// For Open Source Computer Vision Library\r
//\r
-// Copyright (C) 2000, Intel Corporation, all rights reserved.\r
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\r
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\r
// Third party copyrights are property of their respective owners.\r
//\r
// Redistribution and use in source and binary forms, with or without modification,\r
// this list of conditions and the following disclaimer in the documentation\r
// and/or other materials provided with the distribution.\r
//\r
-// * The name of Intel Corporation may not be used to endorse or promote products\r
+// * The name of the copyright holders may not be used to endorse or promote products\r
// derived from this software without specific prior written permission.\r
//\r
// This software is provided by the copyright holders and contributors "as is" and\r
Histogram CV_DECL_ALIGNED(16) H[4];\r
HT luc[4][16];\r
\r
- const int STRIPE_SIZE = 256/cn;\r
+ int STRIPE_SIZE = std::min( _dst.cols, 512/cn );\r
\r
Vector<HT> _h_coarse(1 * 16 * (STRIPE_SIZE + 2*r) * cn);\r
Vector<HT> _h_fine(16 * 16 * (STRIPE_SIZE + 2*r) * cn);\r
CV_Assert( src.depth() == CV_8U && (cn == 1 || cn == 3 || cn == 4) );\r
\r
double img_size_mp = (double)(size.width*size.height)/(1 << 20);\r
- if( size.width < ksize*2 || size.height < ksize*2 ||\r
- ksize <= 3 + (img_size_mp < 1 ? 12 : img_size_mp < 4 ? 6 : 2)*(MEDIAN_HAVE_SIMD ? 1 : 3))\r
+ if( ksize <= 3 + (img_size_mp < 1 ? 12 : img_size_mp < 4 ? 6 : 2)*(MEDIAN_HAVE_SIMD ? 1 : 3))\r
medianBlur_8u_Om( src, dst, ksize );\r
else\r
medianBlur_8u_O1( src, dst, ksize );\r
// copy or use the software.
//
//
-// Intel License Agreement
+// License Agreement
// For Open Source Computer Vision Library
//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
-// * The name of Intel Corporation may not be used to endorse or promote products
+// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
#include "_cv.h"
-#define ICV_DEF_INTEGRAL_OP_C1( flavor, arrtype, sumtype, sqsumtype, worktype, \
- cast_macro, cast_sqr_macro ) \
-static CvStatus CV_STDCALL \
-icvIntegralImage_##flavor##_C1R( const arrtype* src, int srcstep,\
- sumtype* sum, int sumstep, \
- sqsumtype* sqsum, int sqsumstep,\
- sumtype* tilted, int tiltedstep,\
- CvSize size ) \
-{ \
- int x, y; \
- sumtype s; \
- sqsumtype sq; \
- sumtype* buf = 0; \
- \
- srcstep /= sizeof(src[0]); \
- \
- memset( sum, 0, (size.width+1)*sizeof(sum[0])); \
- sumstep /= sizeof(sum[0]); \
- sum += sumstep + 1; \
- \
- if( sqsum ) \
- { \
- memset( sqsum, 0, (size.width+1)*sizeof(sqsum[0])); \
- sqsumstep /= sizeof(sqsum[0]); \
- sqsum += sqsumstep + 1; \
- } \
- \
- if( tilted ) \
- { \
- memset( tilted, 0, (size.width+1)*sizeof(tilted[0])); \
- tiltedstep /= sizeof(tilted[0]); \
- tilted += tiltedstep + 1; \
- } \
- \
- if( sqsum == 0 && tilted == 0 ) \
- { \
- for( y = 0; y < size.height; y++, src += srcstep, \
- sum += sumstep ) \
- { \
- sum[-1] = 0; \
- for( x = 0, s = 0; x < size.width; x++ ) \
- { \
- sumtype t = cast_macro(src[x]); \
- s += t; \
- sum[x] = sum[x - sumstep] + s; \
- } \
- } \
- } \
- else if( tilted == 0 ) \
- { \
- for( y = 0; y < size.height; y++, src += srcstep, \
- sum += sumstep, sqsum += sqsumstep ) \
- { \
- sum[-1] = 0; \
- sqsum[-1] = 0; \
- \
- for( x = 0, s = 0, sq = 0; x < size.width; x++ ) \
- { \
- worktype it = src[x]; \
- sumtype t = cast_macro(it); \
- sqsumtype tq = cast_sqr_macro(it); \
- s += t; \
- sq += tq; \
- t = sum[x - sumstep] + s; \
- tq = sqsum[x - sqsumstep] + sq; \
- sum[x] = t; \
- sqsum[x] = tq; \
- } \
- } \
- } \
- else \
- { \
- if( sqsum == 0 ) \
- { \
- assert(0); \
- return CV_NULLPTR_ERR; \
- } \
- \
- buf = (sumtype*)cvStackAlloc((size.width + 1 )* sizeof(buf[0]));\
- sum[-1] = tilted[-1] = 0; \
- sqsum[-1] = 0; \
- \
- for( x = 0, s = 0, sq = 0; x < size.width; x++ ) \
- { \
- worktype it = src[x]; \
- sumtype t = cast_macro(it); \
- sqsumtype tq = cast_sqr_macro(it); \
- buf[x] = tilted[x] = t; \
- s += t; \
- sq += tq; \
- sum[x] = s; \
- sqsum[x] = sq; \
- } \
- \
- if( size.width == 1 ) \
- buf[1] = 0; \
- \
- for( y = 1; y < size.height; y++ ) \
- { \
- worktype it; \
- sumtype t0; \
- sqsumtype tq0; \
- \
- src += srcstep; \
- sum += sumstep; \
- sqsum += sqsumstep; \
- tilted += tiltedstep; \
- \
- it = src[0/*x*/]; \
- s = t0 = cast_macro(it); \
- sq = tq0 = cast_sqr_macro(it); \
- \
- sum[-1] = 0; \
- sqsum[-1] = 0; \
- /*tilted[-1] = buf[0];*/ \
- tilted[-1] = tilted[-tiltedstep]; \
- \
- sum[0] = sum[-sumstep] + t0; \
- sqsum[0] = sqsum[-sqsumstep] + tq0; \
- tilted[0] = tilted[-tiltedstep] + t0 + buf[1]; \
- \
- for( x = 1; x < size.width - 1; x++ ) \
- { \
- sumtype t1 = buf[x]; \
- buf[x-1] = t1 + t0; \
- it = src[x]; \
- t0 = cast_macro(it); \
- tq0 = cast_sqr_macro(it); \
- s += t0; \
- sq += tq0; \
- sum[x] = sum[x - sumstep] + s; \
- sqsum[x] = sqsum[x - sqsumstep] + sq; \
- t1 += buf[x+1] + t0 + tilted[x - tiltedstep - 1];\
- tilted[x] = t1; \
- } \
- \
- if( size.width > 1 ) \
- { \
- sumtype t1 = buf[x]; \
- buf[x-1] = t1 + t0; \
- it = src[x]; /*+*/ \
- t0 = cast_macro(it); \
- tq0 = cast_sqr_macro(it); \
- s += t0; \
- sq += tq0; \
- sum[x] = sum[x - sumstep] + s; \
- sqsum[x] = sqsum[x - sqsumstep] + sq; \
- tilted[x] = t0 + t1 + tilted[x - tiltedstep - 1];\
- buf[x] = t0; \
- } \
- } \
- } \
- \
- return CV_OK; \
-}
-
-
-ICV_DEF_INTEGRAL_OP_C1( 8u32s, uchar, int, double, int, CV_NOP, CV_8TO32F_SQR )
-ICV_DEF_INTEGRAL_OP_C1( 8u64f, uchar, double, double, int, CV_8TO32F, CV_8TO32F_SQR )
-ICV_DEF_INTEGRAL_OP_C1( 32f64f, float, double, double, double, CV_NOP, CV_SQR )
-ICV_DEF_INTEGRAL_OP_C1( 64f, double, double, double, double, CV_NOP, CV_SQR )
-
-
-#define ICV_DEF_INTEGRAL_OP_CN( flavor, arrtype, sumtype, sqsumtype, \
- worktype, cast_macro, cast_sqr_macro ) \
-static CvStatus CV_STDCALL \
-icvIntegralImage_##flavor##_CnR( const arrtype* src, int srcstep,\
- sumtype* sum, int sumstep, \
- sqsumtype* sqsum, int sqsumstep,\
- CvSize size, int cn ) \
-{ \
- int x, y; \
- srcstep /= sizeof(src[0]); \
- \
- memset( sum, 0, (size.width+1)*cn*sizeof(sum[0])); \
- sumstep /= sizeof(sum[0]); \
- sum += sumstep + cn; \
- \
- if( sqsum ) \
- { \
- memset( sqsum, 0, (size.width+1)*cn*sizeof(sqsum[0])); \
- sqsumstep /= sizeof(sqsum[0]); \
- sqsum += sqsumstep + cn; \
- } \
- \
- size.width *= cn; \
- \
- if( sqsum == 0 ) \
- { \
- for( y = 0; y < size.height; y++, src += srcstep, \
- sum += sumstep ) \
- { \
- for( x = -cn; x < 0; x++ ) \
- sum[x] = 0; \
- \
- for( x = 0; x < size.width; x++ ) \
- sum[x] = cast_macro(src[x]) + sum[x - cn]; \
- \
- for( x = 0; x < size.width; x++ ) \
- sum[x] = sum[x] + sum[x - sumstep]; \
- } \
- } \
- else \
- { \
- for( y = 0; y < size.height; y++, src += srcstep, \
- sum += sumstep, sqsum += sqsumstep ) \
- { \
- for( x = -cn; x < 0; x++ ) \
- { \
- sum[x] = 0; \
- sqsum[x] = 0; \
- } \
- \
- for( x = 0; x < size.width; x++ ) \
- { \
- worktype it = src[x]; \
- sumtype t = cast_macro(it) + sum[x-cn]; \
- sqsumtype tq = cast_sqr_macro(it) + sqsum[x-cn];\
- sum[x] = t; \
- sqsum[x] = tq; \
- } \
- \
- for( x = 0; x < size.width; x++ ) \
- { \
- sumtype t = sum[x] + sum[x - sumstep]; \
- sqsumtype tq = sqsum[x] + sqsum[x - sqsumstep]; \
- sum[x] = t; \
- sqsum[x] = tq; \
- } \
- } \
- } \
- \
- return CV_OK; \
-}
-
+namespace cv
+{
-ICV_DEF_INTEGRAL_OP_CN( 8u32s, uchar, int, double, int, CV_NOP, CV_8TO32F_SQR )
-ICV_DEF_INTEGRAL_OP_CN( 8u64f, uchar, double, double, int, CV_8TO32F, CV_8TO32F_SQR )
-ICV_DEF_INTEGRAL_OP_CN( 32f64f, float, double, double, double, CV_NOP, CV_SQR )
-ICV_DEF_INTEGRAL_OP_CN( 64f, double, double, double, double, CV_NOP, CV_SQR )
+template<typename QT> inline QT sqr(uchar a) { return a*a; }
+template<typename QT> inline QT sqr(float a) { return a*a; }
+template<typename QT> inline QT sqr(double a) { return a*a; }
+template<> inline double sqr(uchar a) { return CV_8TO32F_SQR(a); }
-static void icvInitIntegralImageTable( CvFuncTable* table_c1, CvFuncTable* table_cn )
+template<typename T, typename ST, typename QT>
+void integral_( const Mat& _src, Mat& _sum, Mat& _sqsum, Mat& _tilted )
{
- table_c1->fn_2d[CV_8U] = (void*)icvIntegralImage_8u64f_C1R;
- table_c1->fn_2d[CV_32F] = (void*)icvIntegralImage_32f64f_C1R;
- table_c1->fn_2d[CV_64F] = (void*)icvIntegralImage_64f_C1R;
-
- table_cn->fn_2d[CV_8U] = (void*)icvIntegralImage_8u64f_CnR;
- table_cn->fn_2d[CV_32F] = (void*)icvIntegralImage_32f64f_CnR;
- table_cn->fn_2d[CV_64F] = (void*)icvIntegralImage_64f_CnR;
-}
+ int cn = _src.channels();
+ Size size = _src.size();
+ int x, y, k;
+ const T* src = (const T*)_src.data;
+ ST* sum = (ST*)_sum.data;
+ ST* tilted = (ST*)_tilted.data;
+ QT* sqsum = (QT*)_sqsum.data;
-typedef CvStatus (CV_STDCALL * CvIntegralImageFuncC1)(
- const void* src, int srcstep, void* sum, int sumstep,
- void* sqsum, int sqsumstep, void* tilted, int tiltedstep,
- CvSize size );
+ int srcstep = _src.step/sizeof(T);
+ int sumstep = _sum.step/sizeof(ST);
+ int tiltedstep = _tilted.step/sizeof(ST);
+ int sqsumstep = _sqsum.step/sizeof(QT);
-typedef CvStatus (CV_STDCALL * CvIntegralImageFuncCn)(
- const void* src, int srcstep, void* sum, int sumstep,
- void* sqsum, int sqsumstep, CvSize size, int cn );
+ size.width *= cn;
-CV_IMPL void
-cvIntegral( const CvArr* image, CvArr* sumImage,
- CvArr* sumSqImage, CvArr* tiltedSumImage )
-{
- static CvFuncTable tab_c1, tab_cn;
- static int inittab = 0;
-
- CV_FUNCNAME( "cvIntegralImage" );
-
- __BEGIN__;
-
- CvMat src_stub, *src = (CvMat*)image;
- CvMat sum_stub, *sum = (CvMat*)sumImage;
- CvMat sqsum_stub, *sqsum = (CvMat*)sumSqImage;
- CvMat tilted_stub, *tilted = (CvMat*)tiltedSumImage;
- int coi0 = 0, coi1 = 0, coi2 = 0, coi3 = 0;
- int depth, cn;
- int src_step, sum_step, sqsum_step, tilted_step;
- CvIntegralImageFuncC1 func_c1 = 0;
- CvIntegralImageFuncCn func_cn = 0;
- CvSize size;
-
- if( !inittab )
- {
- icvInitIntegralImageTable( &tab_c1, &tab_cn );
- inittab = 1;
- }
-
- CV_CALL( src = cvGetMat( src, &src_stub, &coi0 ));
- CV_CALL( sum = cvGetMat( sum, &sum_stub, &coi1 ));
-
- if( sum->width != src->width + 1 ||
- sum->height != src->height + 1 )
- CV_ERROR( CV_StsUnmatchedSizes, "" );
-
- if( (CV_MAT_DEPTH( sum->type ) != CV_64F &&
- (CV_MAT_DEPTH( src->type ) != CV_8U ||
- CV_MAT_DEPTH( sum->type ) != CV_32S )) ||
- !CV_ARE_CNS_EQ( src, sum ))
- CV_ERROR( CV_StsUnsupportedFormat,
- "Sum array must have 64f type (or 32s type in case of 8u source array) "
- "and the same number of channels as the source array" );
+ memset( sum, 0, (size.width+cn)*sizeof(sum[0]));
+ sum += sumstep + cn;
if( sqsum )
{
- CV_CALL( sqsum = cvGetMat( sqsum, &sqsum_stub, &coi2 ));
- if( !CV_ARE_SIZES_EQ( sum, sqsum ) )
- CV_ERROR( CV_StsUnmatchedSizes, "" );
- if( CV_MAT_DEPTH( sqsum->type ) != CV_64F || !CV_ARE_CNS_EQ( src, sqsum ))
- CV_ERROR( CV_StsUnsupportedFormat,
- "Squares sum array must be 64f "
- "and the same number of channels as the source array" );
+ memset( sqsum, 0, (size.width+cn)*sizeof(sqsum[0]));
+ sqsum += sqsumstep + cn;
}
if( tilted )
{
- if( !sqsum )
- CV_ERROR( CV_StsNullPtr,
- "Squared sum array must be passed if tilted sum array is passed" );
-
- CV_CALL( tilted = cvGetMat( tilted, &tilted_stub, &coi3 ));
- if( !CV_ARE_SIZES_EQ( sum, tilted ) )
- CV_ERROR( CV_StsUnmatchedSizes, "" );
- if( !CV_ARE_TYPES_EQ( sum, tilted ) )
- CV_ERROR( CV_StsUnmatchedFormats,
- "Sum and tilted sum must have the same types" );
- if( CV_MAT_CN(tilted->type) != 1 )
- CV_ERROR( CV_StsNotImplemented,
- "Tilted sum can not be computed for multi-channel arrays" );
+ memset( tilted, 0, (size.width+cn)*sizeof(tilted[0]));
+ tilted += tiltedstep + cn;
}
- if( coi0 || coi1 || coi2 || coi3 )
- CV_ERROR( CV_BadCOI, "COI is not supported by the function" );
-
- depth = CV_MAT_DEPTH(src->type);
- cn = CV_MAT_CN(src->type);
-
- if( CV_MAT_DEPTH( sum->type ) == CV_32S )
+ if( sqsum == 0 && tilted == 0 )
+ {
+ for( y = 0; y < size.height; y++, src += srcstep - cn, sum += sumstep - cn )
+ {
+ for( k = 0; k < cn; k++, src++, sum++ )
+ {
+ ST s = sum[-cn] = 0;
+ for( x = 0; x < size.width; x += cn )
+ {
+ s += src[x];
+ sum[x] = sum[x - sumstep] + s;
+ }
+ }
+ }
+ }
+ else if( tilted == 0 )
{
- func_c1 = (CvIntegralImageFuncC1)icvIntegralImage_8u32s_C1R;
- func_cn = (CvIntegralImageFuncCn)icvIntegralImage_8u32s_CnR;
+ for( y = 0; y < size.height; y++, src += srcstep - cn,
+ sum += sumstep - cn, sqsum += sqsumstep - cn )
+ {
+ for( k = 0; k < cn; k++, src++, sum++, sqsum++ )
+ {
+ ST s = sum[-cn] = 0;
+ QT sq = sqsum[-cn] = 0;
+ for( x = 0; x < size.width; x += cn )
+ {
+ T it = src[x];
+ s += it;
+ sq += sqr<QT>(it);
+ ST t = sum[x - sumstep] + s;
+ QT tq = sqsum[x - sqsumstep] + sq;
+ sum[x] = t;
+ sqsum[x] = tq;
+ }
+ }
+ }
}
else
{
- func_c1 = (CvIntegralImageFuncC1)tab_c1.fn_2d[depth];
- func_cn = (CvIntegralImageFuncCn)tab_cn.fn_2d[depth];
- if( !func_c1 && !func_cn )
- CV_ERROR( CV_StsUnsupportedFormat, "This source image format is unsupported" );
+ AutoBuffer<ST> _buf(size.width+cn);
+ ST* buf = _buf;
+ ST s;
+ QT sq;
+ for( k = 0; k < cn; k++, src++, sum++, tilted++, sqsum++, buf++ )
+ {
+ sum[-cn] = tilted[-cn] = 0;
+ sqsum[-cn] = 0;
+
+ for( x = 0, s = 0, sq = 0; x < size.width; x += cn )
+ {
+ T it = src[x];
+ buf[x] = tilted[x] = it;
+ s += it;
+ sq += sqr<QT>(it);
+ sum[x] = s;
+ sqsum[x] = sq;
+ }
+
+ if( size.width == cn )
+ buf[cn] = 0;
+ }
+
+ for( y = 1; y < size.height; y++ )
+ {
+ src += srcstep - cn;
+ sum += sumstep - cn;
+ sqsum += sqsumstep - cn;
+ tilted += tiltedstep - cn;
+ buf += -cn;
+
+ for( k = 0; k < cn; k++, src++, sum++, sqsum++, tilted++, buf++ )
+ {
+ T it = src[0];
+ ST t0 = s = it;
+ QT tq0 = sq = sqr<QT>(it);
+
+ sum[-cn] = 0;
+ sqsum[-cn] = 0;
+ tilted[-cn] = tilted[-tiltedstep];
+
+ sum[0] = sum[-sumstep] + t0;
+ sqsum[0] = sqsum[-sqsumstep] + tq0;
+ tilted[0] = tilted[-tiltedstep] + t0 + buf[cn];
+
+ for( x = cn; x < size.width - cn; x += cn )
+ {
+ ST t1 = buf[x];
+ buf[x - cn] = t1 + t0;
+ t0 = it = src[x];
+ tq0 = sqr<QT>(it);
+ s += t0;
+ sq += tq0;
+ sum[x] = sum[x - sumstep] + s;
+ sqsum[x] = sqsum[x - sqsumstep] + sq;
+ t1 += buf[x + cn] + t0 + tilted[x - tiltedstep - cn];
+ tilted[x] = t1;
+ }
+
+ if( size.width > cn )
+ {
+ ST t1 = buf[x];
+ buf[x - cn] = t1 + t0;
+ t0 = it = src[x];
+ tq0 = sqr<QT>(it);
+ s += t0;
+ sq += tq0;
+ sum[x] = sum[x - sumstep] + s;
+ sqsum[x] = sqsum[x - sqsumstep] + sq;
+ tilted[x] = t0 + t1 + tilted[x - tiltedstep - cn];
+ buf[x] = t0;
+ }
+ }
+ }
}
+}
+
+typedef void (*IntegralFunc)(const Mat& _src, Mat& _sum, Mat& _sqsum, Mat& _tilted );
- size = cvGetMatSize(src);
- src_step = src->step ? src->step : CV_STUB_STEP;
- sum_step = sum->step ? sum->step : CV_STUB_STEP;
- sqsum_step = !sqsum ? 0 : sqsum->step ? sqsum->step : CV_STUB_STEP;
- tilted_step = !tilted ? 0 : tilted->step ? tilted->step : CV_STUB_STEP;
+static void
+integral( const Mat& src, Mat& sum, Mat* _sqsum, Mat* _tilted, int sdepth )
+{
+ int depth = src.depth(), cn = src.channels();
+ Size isize(src.cols + 1, src.rows+1);
+ Mat sqsum, tilted;
+
+ if( sdepth <= 0 )
+ sdepth = depth == CV_8U ? CV_32S : CV_64F;
+ sdepth = CV_MAT_DEPTH(sdepth);
+ sum.create( isize, CV_MAKETYPE(sdepth, cn) );
+
+ if( _tilted )
+ _tilted->create( isize, CV_MAKETYPE(sdepth, cn) );
+ else
+ _tilted = &tilted;
+
+ if( !_sqsum )
+ _sqsum = &sqsum;
+
+ if( _sqsum != &sqsum || _tilted->data )
+ _sqsum->create( isize, CV_MAKETYPE(CV_64F, cn) );
+
+ IntegralFunc func = 0;
+
+ if( depth == CV_8U && sdepth == CV_32S )
+ func = integral_<uchar, int, double>;
+ else if( depth == CV_8U && sdepth == CV_32F )
+ func = integral_<uchar, float, double>;
+ else if( depth == CV_8U && sdepth == CV_64F )
+ func = integral_<uchar, double, double>;
+ else if( depth == CV_32F && sdepth == CV_64F )
+ func = integral_<float, double, double>;
+ else if( depth == CV_64F && sdepth == CV_64F )
+ func = integral_<double, double, double>;
+ else
+ CV_Error( CV_StsUnsupportedFormat, "" );
+
+ func( src, sum, *_sqsum, *_tilted );
+}
+
+void integral( const Mat& src, Mat& sum, int sdepth )
+{
+ integral( src, sum, 0, 0, sdepth );
+}
- if( cn == 1 )
+void integral( const Mat& src, Mat& sum, Mat& sqsum, int sdepth )
+{
+ integral( src, sum, &sqsum, 0, sdepth );
+}
+
+void integral( const Mat& src, Mat& sum, Mat& sqsum, Mat& tilted, int sdepth )
+{
+ integral( src, sum, &sqsum, &tilted, sdepth );
+}
+
+}
+
+
+CV_IMPL void
+cvIntegral( const CvArr* image, CvArr* sumImage,
+ CvArr* sumSqImage, CvArr* tiltedSumImage )
+{
+ cv::Mat src = cv::cvarrToMat(image), sum = cv::cvarrToMat(sumImage), sum0 = sum;
+ cv::Mat sqsum0, sqsum, tilted0, tilted;
+ cv::Mat *psqsum = 0, *ptilted = 0;
+
+ if( sumSqImage )
{
- func_c1( src->data.ptr, src_step, sum->data.ptr, sum_step,
- sqsum ? sqsum->data.ptr : 0, sqsum_step,
- tilted ? tilted->data.ptr : 0, tilted_step, size );
+ sqsum0 = sqsum = cv::cvarrToMat(sumSqImage);
+ psqsum = &sqsum;
}
- else
+
+ if( tiltedSumImage )
{
- func_cn( src->data.ptr, src_step, sum->data.ptr, sum_step,
- sqsum ? sqsum->data.ptr : 0, sqsum_step, size, cn );
+ tilted0 = tilted = cv::cvarrToMat(tiltedSumImage);
+ ptilted = &tilted;
}
+ cv::integral( src, sum, psqsum, ptilted, sum.depth() );
- __END__;
+ CV_Assert( sum.data == sum0.data && sqsum.data == sqsum0.data && tilted.data == tilted0.data );
}
-
/* End of file. */
// copy or use the software.
//
//
-// Intel License Agreement
+// License Agreement
// For Open Source Computer Vision Library
//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
-// * The name of Intel Corporation may not be used to endorse or promote products
+// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
#include "_cv.h"
-static CvStatus CV_STDCALL
-icvThresh_8u_C1R( const uchar* src, int src_step, uchar* dst, int dst_step,
- CvSize roi, uchar thresh, uchar maxval, int type )
+namespace cv
+{
+
+static void
+thresh_8u( const Mat& _src, Mat& _dst, uchar thresh, uchar maxval, int type )
{
int i, j;
uchar tab[256];
+ Size roi = _src.size();
+ roi.width *= _src.channels();
+#if CV_SSE2
+ __m128i _x80 = _mm_set1_epi8('\x80');
+ __m128i thresh_u = _mm_set1_epi8(thresh);
+ __m128i thresh_s = _mm_set1_epi8(thresh ^ 0x80);
+ __m128i maxval_ = _mm_set1_epi8(maxval);
+#endif
+
+ if( _src.isContinuous() && _dst.isContinuous() )
+ {
+ roi.width *= roi.height;
+ roi.height = 1;
+ }
switch( type )
{
- case CV_THRESH_BINARY:
+ case THRESH_BINARY:
for( i = 0; i <= thresh; i++ )
tab[i] = 0;
for( ; i < 256; i++ )
tab[i] = maxval;
break;
- case CV_THRESH_BINARY_INV:
+ case THRESH_BINARY_INV:
for( i = 0; i <= thresh; i++ )
tab[i] = maxval;
for( ; i < 256; i++ )
tab[i] = 0;
break;
- case CV_THRESH_TRUNC:
+ case THRESH_TRUNC:
for( i = 0; i <= thresh; i++ )
tab[i] = (uchar)i;
for( ; i < 256; i++ )
tab[i] = thresh;
break;
- case CV_THRESH_TOZERO:
+ case THRESH_TOZERO:
for( i = 0; i <= thresh; i++ )
tab[i] = 0;
for( ; i < 256; i++ )
tab[i] = (uchar)i;
break;
- case CV_THRESH_TOZERO_INV:
+ case THRESH_TOZERO_INV:
for( i = 0; i <= thresh; i++ )
tab[i] = (uchar)i;
for( ; i < 256; i++ )
tab[i] = 0;
break;
default:
- return CV_BADFLAG_ERR;
+ CV_Error( CV_StsBadArg, "Unknown threshold type" );
}
- for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
+ for( i = 0; i < roi.height; i++ )
{
- for( j = 0; j <= roi.width - 4; j += 4 )
+ const uchar* src = (const uchar*)(_src.data + _src.step*i);
+ uchar* dst = (uchar*)(_dst.data + _dst.step*i);
+ j = 0;
+
+ #if CV_SSE2
+ switch( type )
+ {
+ case THRESH_BINARY:
+ for( ; j <= roi.width - 32; j += 32 )
+ {
+ __m128i v0, v1;
+ v0 = _mm_loadu_si128( (const __m128i*)(src + j) );
+ v1 = _mm_loadu_si128( (const __m128i*)(src + j + 16) );
+ v0 = _mm_cmpgt_epi8( _mm_xor_si128(v0, _x80), thresh_s );
+ v1 = _mm_cmpgt_epi8( _mm_xor_si128(v1, _x80), thresh_s );
+ v0 = _mm_and_si128( v0, maxval_ );
+ v1 = _mm_and_si128( v1, maxval_ );
+ _mm_storeu_si128( (__m128i*)(dst + j), v0 );
+ _mm_storeu_si128( (__m128i*)(dst + j + 16), v1 );
+ }
+
+ for( ; j <= roi.width - 8; j += 8 )
+ {
+ __m128i v0 = _mm_loadl_epi64( (const __m128i*)(src + j) );
+ v0 = _mm_cmpgt_epi8( _mm_xor_si128(v0, _x80), thresh_s );
+ v0 = _mm_and_si128( v0, maxval_ );
+ _mm_storel_epi64( (__m128i*)(dst + j), v0 );
+ }
+ break;
+
+ case THRESH_BINARY_INV:
+ for( ; j <= roi.width - 32; j += 32 )
+ {
+ __m128i v0, v1;
+ v0 = _mm_loadu_si128( (const __m128i*)(src + j) );
+ v1 = _mm_loadu_si128( (const __m128i*)(src + j + 16) );
+ v0 = _mm_cmpgt_epi8( _mm_xor_si128(v0, _x80), thresh_s );
+ v1 = _mm_cmpgt_epi8( _mm_xor_si128(v1, _x80), thresh_s );
+ v0 = _mm_andnot_si128( v0, maxval_ );
+ v1 = _mm_andnot_si128( v1, maxval_ );
+ _mm_storeu_si128( (__m128i*)(dst + j), v0 );
+ _mm_storeu_si128( (__m128i*)(dst + j + 16), v1 );
+ }
+
+ for( ; j <= roi.width - 8; j += 8 )
+ {
+ __m128i v0 = _mm_loadl_epi64( (const __m128i*)(src + j) );
+ v0 = _mm_cmpgt_epi8( _mm_xor_si128(v0, _x80), thresh_s );
+ v0 = _mm_andnot_si128( v0, maxval_ );
+ _mm_storel_epi64( (__m128i*)(dst + j), v0 );
+ }
+ break;
+
+ case THRESH_TRUNC:
+ for( ; j <= roi.width - 32; j += 32 )
+ {
+ __m128i v0, v1;
+ v0 = _mm_loadu_si128( (const __m128i*)(src + j) );
+ v1 = _mm_loadu_si128( (const __m128i*)(src + j + 16) );
+ v0 = _mm_subs_epu8( v0, _mm_subs_epu8( v0, thresh_u ));
+ v1 = _mm_subs_epu8( v1, _mm_subs_epu8( v1, thresh_u ));
+ _mm_storeu_si128( (__m128i*)(dst + j), v0 );
+ _mm_storeu_si128( (__m128i*)(dst + j + 16), v1 );
+ }
+
+ for( ; j <= roi.width - 8; j += 8 )
+ {
+ __m128i v0 = _mm_loadl_epi64( (const __m128i*)(src + j) );
+ v0 = _mm_subs_epu8( v0, _mm_subs_epu8( v0, thresh_u ));
+ _mm_storel_epi64( (__m128i*)(dst + j), v0 );
+ }
+ break;
+
+ case THRESH_TOZERO:
+ for( ; j <= roi.width - 32; j += 32 )
+ {
+ __m128i v0, v1;
+ v0 = _mm_loadu_si128( (const __m128i*)(src + j) );
+ v1 = _mm_loadu_si128( (const __m128i*)(src + j + 16) );
+ v0 = _mm_and_si128( v0, _mm_cmpgt_epi8(_mm_xor_si128(v0, _x80), thresh_s ));
+ v1 = _mm_and_si128( v1, _mm_cmpgt_epi8(_mm_xor_si128(v1, _x80), thresh_s ));
+ _mm_storeu_si128( (__m128i*)(dst + j), v0 );
+ _mm_storeu_si128( (__m128i*)(dst + j + 16), v1 );
+ }
+
+ for( ; j <= roi.width - 8; j += 8 )
+ {
+ __m128i v0 = _mm_loadl_epi64( (const __m128i*)(src + j) );
+ v0 = _mm_and_si128( v0, _mm_cmpgt_epi8(_mm_xor_si128(v0, _x80), thresh_s ));
+ _mm_storel_epi64( (__m128i*)(dst + j), v0 );
+ }
+ break;
+
+ case THRESH_TOZERO_INV:
+ for( ; j <= roi.width - 32; j += 32 )
+ {
+ __m128i v0, v1;
+ v0 = _mm_loadu_si128( (const __m128i*)(src + j) );
+ v1 = _mm_loadu_si128( (const __m128i*)(src + j + 16) );
+ v0 = _mm_andnot_si128( _mm_cmpgt_epi8(_mm_xor_si128(v0, _x80), thresh_s ), v0 );
+ v1 = _mm_andnot_si128( _mm_cmpgt_epi8(_mm_xor_si128(v1, _x80), thresh_s ), v1 );
+ _mm_storeu_si128( (__m128i*)(dst + j), v0 );
+ _mm_storeu_si128( (__m128i*)(dst + j + 16), v1 );
+ }
+
+ for( ; j <= roi.width - 8; j += 8 )
+ {
+ __m128i v0 = _mm_loadl_epi64( (const __m128i*)(src + j) );
+ v0 = _mm_andnot_si128( _mm_cmpgt_epi8(_mm_xor_si128(v0, _x80), thresh_s ), v0 );
+ _mm_storel_epi64( (__m128i*)(dst + j), v0 );
+ }
+ break;
+ }
+ #endif
+
+ for( ; j <= roi.width - 4; j += 4 )
{
uchar t0 = tab[src[j]];
uchar t1 = tab[src[j+1]];
for( ; j < roi.width; j++ )
dst[j] = tab[src[j]];
}
-
- return CV_NO_ERR;
}
-static CvStatus CV_STDCALL
-icvThresh_32f_C1R( const float *src, int src_step, float *dst, int dst_step,
- CvSize roi, float thresh, float maxval, int type )
+static void
+thresh_32f( const Mat& _src, Mat& _dst, float thresh, float maxval, int type )
{
int i, j;
- const int* isrc = (const int*)src;
- int* idst = (int*)dst;
- Cv32suf v;
- int iThresh, iMax;
-
- v.f = thresh; iThresh = CV_TOGGLE_FLT(v.i);
- v.f = maxval; iMax = v.i;
-
- src_step /= sizeof(src[0]);
- dst_step /= sizeof(dst[0]);
+ Size roi = _src.size();
+ roi.width *= _src.channels();
+ const float* src = (const float*)_src.data;
+ float* dst = (float*)_dst.data;
+ int src_step = _src.step/sizeof(src[0]);
+ int dst_step = _dst.step/sizeof(dst[0]);
+#if CV_SSE2
+ __m128 thresh4 = _mm_set1_ps(thresh), maxval4 = _mm_set1_ps(maxval);
+#endif
+
+ if( _src.isContinuous() && _dst.isContinuous() )
+ {
+ roi.width *= roi.height;
+ roi.height = 1;
+ }
switch( type )
{
- case CV_THRESH_BINARY:
- for( i = 0; i < roi.height; i++, isrc += src_step, idst += dst_step )
+ case THRESH_BINARY:
+ for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
{
- for( j = 0; j < roi.width; j++ )
+ j = 0;
+ #if CV_SSE2
+ for( ; j <= roi.width - 8; j += 8 )
{
- int temp = isrc[j];
- idst[j] = ((CV_TOGGLE_FLT(temp) <= iThresh) - 1) & iMax;
+ __m128 v0, v1;
+ v0 = _mm_loadu_ps( src + j );
+ v1 = _mm_loadu_ps( src + j + 4 );
+ v0 = _mm_cmpgt_ps( v0, thresh4 );
+ v1 = _mm_cmpgt_ps( v1, thresh4 );
+ v0 = _mm_and_ps( v0, maxval4 );
+ v1 = _mm_and_ps( v1, maxval4 );
+ _mm_storeu_ps( dst + j, v0 );
+ _mm_storeu_ps( dst + j + 4, v1 );
}
+ #endif
+
+ for( ; j < roi.width; j++ )
+ dst[j] = src[j] > thresh ? maxval : 0;
}
break;
- case CV_THRESH_BINARY_INV:
- for( i = 0; i < roi.height; i++, isrc += src_step, idst += dst_step )
+ case THRESH_BINARY_INV:
+ for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
{
- for( j = 0; j < roi.width; j++ )
+ j = 0;
+ #if CV_SSE2
+ for( ; j <= roi.width - 8; j += 8 )
{
- int temp = isrc[j];
- idst[j] = ((CV_TOGGLE_FLT(temp) > iThresh) - 1) & iMax;
+ __m128 v0, v1;
+ v0 = _mm_loadu_ps( src + j );
+ v1 = _mm_loadu_ps( src + j + 4 );
+ v0 = _mm_cmple_ps( v0, thresh4 );
+ v1 = _mm_cmple_ps( v1, thresh4 );
+ v0 = _mm_and_ps( v0, maxval4 );
+ v1 = _mm_and_ps( v1, maxval4 );
+ _mm_storeu_ps( dst + j, v0 );
+ _mm_storeu_ps( dst + j + 4, v1 );
}
+ #endif
+
+ for( ; j < roi.width; j++ )
+ dst[j] = src[j] <= thresh ? maxval : 0;
}
break;
- case CV_THRESH_TRUNC:
+ case THRESH_TRUNC:
for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
{
- for( j = 0; j < roi.width; j++ )
+ j = 0;
+ #if CV_SSE2
+ for( ; j <= roi.width - 8; j += 8 )
{
- float temp = src[j];
-
- if( temp > thresh )
- temp = thresh;
- dst[j] = temp;
+ __m128 v0, v1;
+ v0 = _mm_loadu_ps( src + j );
+ v1 = _mm_loadu_ps( src + j + 4 );
+ v0 = _mm_min_ps( v0, thresh4 );
+ v1 = _mm_min_ps( v1, thresh4 );
+ _mm_storeu_ps( dst + j, v0 );
+ _mm_storeu_ps( dst + j + 4, v1 );
}
+ #endif
+
+ for( ; j < roi.width; j++ )
+ dst[j] = std::min(src[j], thresh);
}
break;
- case CV_THRESH_TOZERO:
- for( i = 0; i < roi.height; i++, isrc += src_step, idst += dst_step )
+ case THRESH_TOZERO:
+ for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
{
- for( j = 0; j < roi.width; j++ )
+ j = 0;
+ #if CV_SSE2
+ for( ; j <= roi.width - 8; j += 8 )
{
- int temp = isrc[j];
- idst[j] = ((CV_TOGGLE_FLT( temp ) <= iThresh) - 1) & temp;
+ __m128 v0, v1;
+ v0 = _mm_loadu_ps( src + j );
+ v1 = _mm_loadu_ps( src + j + 4 );
+ v0 = _mm_and_ps(v0, _mm_cmpgt_ps(v0, thresh4));
+ v1 = _mm_and_ps(v1, _mm_cmpgt_ps(v1, thresh4));
+ _mm_storeu_ps( dst + j, v0 );
+ _mm_storeu_ps( dst + j + 4, v1 );
+ }
+ #endif
+
+ for( ; j < roi.width; j++ )
+ {
+ float v = src[j];
+ dst[j] = v > thresh ? v : 0;
}
}
break;
- case CV_THRESH_TOZERO_INV:
- for( i = 0; i < roi.height; i++, isrc += src_step, idst += dst_step )
+ case THRESH_TOZERO_INV:
+ for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
{
- for( j = 0; j < roi.width; j++ )
+ j = 0;
+ #if CV_SSE2
+ for( ; j <= roi.width - 8; j += 8 )
{
- int temp = isrc[j];
- idst[j] = ((CV_TOGGLE_FLT( temp ) > iThresh) - 1) & temp;
+ __m128 v0, v1;
+ v0 = _mm_loadu_ps( src + j );
+ v1 = _mm_loadu_ps( src + j + 4 );
+ v0 = _mm_and_ps(v0, _mm_cmple_ps(v0, thresh4));
+ v1 = _mm_and_ps(v1, _mm_cmple_ps(v1, thresh4));
+ _mm_storeu_ps( dst + j, v0 );
+ _mm_storeu_ps( dst + j + 4, v1 );
+ }
+ #endif
+ for( ; j < roi.width; j++ )
+ {
+ float v = src[j];
+ dst[j] = v <= thresh ? v : 0;
}
}
break;
-
default:
- return CV_BADFLAG_ERR;
+ return CV_Error( CV_StsBadArg, "" );
}
-
- return CV_OK;
}
static double
-icvGetThreshVal_Otsu( const CvHistogram* hist )
+getThreshVal_Otsu_8u( const Mat& _src )
{
- double max_val = 0;
-
- CV_FUNCNAME( "icvGetThreshVal_Otsu" );
-
- __BEGIN__;
-
- int i, count;
- const float* h;
- double sum = 0, mu = 0;
- bool uniform = false;
- double low = 0, high = 0, delta = 0;
- float* nu_thresh = 0;
- double mu1 = 0, q1 = 0;
- double max_sigma = 0;
-
- if( !CV_IS_HIST(hist) || CV_IS_SPARSE_HIST(hist) || hist->mat.dims != 1 )
- CV_ERROR( CV_StsBadArg,
- "The histogram in Otsu method must be a valid dense 1D histogram" );
-
- count = hist->mat.dim[0].size;
- h = (float*)cvPtr1D( hist->bins, 0 );
-
- if( !CV_HIST_HAS_RANGES(hist) || CV_IS_UNIFORM_HIST(hist) )
+ Size size = _src.size();
+ if( _src.isContinuous() )
{
- if( CV_HIST_HAS_RANGES(hist) )
- {
- low = hist->thresh[0][0];
- high = hist->thresh[0][1];
- }
- else
+ size.width *= size.height;
+ size.height = 1;
+ }
+ const int N = 256;
+ int i, j, h[N] = {0};
+ for( i = 0; i < size.height; i++ )
+ {
+ const uchar* src = _src.data + _src.step*i;
+ for( j = 0; j <= size.width - 4; j += 4 )
{
- low = 0;
- high = count;
+ int v0 = src[j], v1 = src[j+1];
+ h[v0]++; h[v1]++;
+ v0 = src[j+2]; v1 = src[j+3];
+ h[v0]++; h[v1]++;
}
-
- delta = (high-low)/count;
- low += delta*0.5;
- uniform = true;
+ for( ; j < size.width; j++ )
+ h[src[j]]++;
}
- else
- nu_thresh = hist->thresh2[0];
- for( i = 0; i < count; i++ )
- {
- sum += h[i];
- if( uniform )
- mu += (i*delta + low)*h[i];
- else
- mu += (nu_thresh[i*2] + nu_thresh[i*2+1])*0.5*h[i];
- }
+ double mu = 0, scale = 1./(size.width*size.height);
+ for( i = 0; i < N; i++ )
+ mu += i*h[i];
- sum = fabs(sum) > FLT_EPSILON ? 1./sum : 0;
- mu *= sum;
-
- mu1 = 0;
- q1 = 0;
+ mu *= scale;
+ double mu1 = 0, q1 = 0;
+ double max_sigma = 0, max_val = 0;
- for( i = 0; i < count; i++ )
+ for( i = 0; i < N; i++ )
{
- double p_i, q2, mu2, val_i, sigma;
+ double p_i, q2, mu2, sigma;
- p_i = h[i]*sum;
+ p_i = h[i]*scale;
mu1 *= q1;
q1 += p_i;
q2 = 1. - q1;
- if( MIN(q1,q2) < FLT_EPSILON || MAX(q1,q2) > 1. - FLT_EPSILON )
+ if( std::min(q1,q2) < FLT_EPSILON || std::max(q1,q2) > 1. - FLT_EPSILON )
continue;
- if( uniform )
- val_i = i*delta + low;
- else
- val_i = (nu_thresh[i*2] + nu_thresh[i*2+1])*0.5;
-
- mu1 = (mu1 + val_i*p_i)/q1;
+ mu1 = (mu1 + i*p_i)/q1;
mu2 = (mu - q1*mu1)/q2;
sigma = q1*q2*(mu1 - mu2)*(mu1 - mu2);
if( sigma > max_sigma )
{
max_sigma = sigma;
- max_val = val_i;
+ max_val = i;
}
}
- __END__;
-
return max_val;
}
-CV_IMPL double
-cvThreshold( const void* srcarr, void* dstarr, double thresh, double maxval, int type )
+double threshold( const Mat& _src, Mat& _dst, double thresh, double maxval, int type )
{
- CvHistogram* hist = 0;
-
- CV_FUNCNAME( "cvThreshold" );
-
- __BEGIN__;
-
- CvSize roi;
- int src_step, dst_step;
- CvMat src_stub, *src = (CvMat*)srcarr;
- CvMat dst_stub, *dst = (CvMat*)dstarr;
- CvMat src0, dst0;
- int coi1 = 0, coi2 = 0;
- int ithresh, imaxval, cn;
- bool use_otsu;
-
- CV_CALL( src = cvGetMat( src, &src_stub, &coi1 ));
- CV_CALL( dst = cvGetMat( dst, &dst_stub, &coi2 ));
-
- if( coi1 + coi2 )
- CV_ERROR( CV_BadCOI, "COI is not supported by the function" );
-
- if( !CV_ARE_CNS_EQ( src, dst ) )
- CV_ERROR( CV_StsUnmatchedFormats, "Both arrays must have equal number of channels" );
-
- cn = CV_MAT_CN(src->type);
- if( cn > 1 )
- {
- src = cvReshape( src, &src0, 1 );
- dst = cvReshape( dst, &dst0, 1 );
- }
-
- use_otsu = (type & ~CV_THRESH_MASK) == CV_THRESH_OTSU;
- type &= CV_THRESH_MASK;
+ bool use_otsu = (type & THRESH_OTSU) != 0;
+ type &= THRESH_MASK;
if( use_otsu )
{
- float _ranges[] = { 0, 256 };
- float* ranges = _ranges;
- int hist_size = 256;
- void* srcarr0 = src;
-
- if( CV_MAT_TYPE(src->type) != CV_8UC1 )
- CV_ERROR( CV_StsNotImplemented, "Otsu method can only be used with 8uC1 images" );
-
- CV_CALL( hist = cvCreateHist( 1, &hist_size, CV_HIST_ARRAY, &ranges ));
- cvCalcArrHist( &srcarr0, hist );
- thresh = cvFloor(icvGetThreshVal_Otsu( hist ));
+ CV_Assert( _src.type() == CV_8UC1 );
+ thresh = getThreshVal_Otsu_8u(_src);
}
-
- if( !CV_ARE_DEPTHS_EQ( src, dst ) )
+
+ _dst.create( _src.size(), _src.type() );
+ if( _src.depth() == CV_8U )
{
- if( CV_MAT_TYPE(dst->type) != CV_8UC1 )
- CV_ERROR( CV_StsUnsupportedFormat, "In case of different types destination should be 8uC1" );
-
- if( type != CV_THRESH_BINARY && type != CV_THRESH_BINARY_INV )
- CV_ERROR( CV_StsBadArg,
- "In case of different types only CV_THRESH_BINARY "
- "and CV_THRESH_BINARY_INV thresholding types are supported" );
+ int ithresh = cvFloor(thresh);
+ thresh = ithresh;
+ int imaxval = cvRound(maxval);
+ if( type == THRESH_TRUNC )
+ imaxval = ithresh;
+ imaxval = saturate_cast<uchar>(imaxval);
- if( maxval < 0 )
+ if( ithresh < 0 || ithresh >= 255 )
{
- CV_CALL( cvSetZero( dst ));
+ if( type == THRESH_BINARY || type == THRESH_BINARY_INV ||
+ ((type == THRESH_TRUNC || type == THRESH_TOZERO_INV) && ithresh < 0) ||
+ (type == THRESH_TOZERO && ithresh >= 255) )
+ {
+ int v = type == THRESH_BINARY ? (ithresh >= 255 ? 0 : imaxval) :
+ type == THRESH_BINARY_INV ? (ithresh >= 255 ? imaxval : 0) :
+ type == THRESH_TRUNC ? imaxval : 0;
+ _dst = Scalar::all(v);
+ }
+ else
+ _src.copyTo(_dst);
}
else
- {
- CV_CALL( cvCmpS( src, thresh, dst, type == CV_THRESH_BINARY ? CV_CMP_GT : CV_CMP_LE ));
- if( maxval < 255 )
- CV_CALL( cvAndS( dst, cvScalarAll( maxval ), dst ));
- }
- EXIT;
+ thresh_8u( _src, _dst, (uchar)ithresh, (uchar)imaxval, type );
}
+ else if( _src.depth() == CV_32F )
+ thresh_32f( _src, _dst, (float)thresh, (float)maxval, type );
+ else
+ CV_Error( CV_StsUnsupportedFormat, "" );
+
+ return thresh;
+}
+
- if( !CV_ARE_SIZES_EQ( src, dst ) )
- CV_ERROR( CV_StsUnmatchedSizes, "" );
+void adaptiveThreshold( const Mat& _src, Mat& _dst, double maxValue,
+ int method, int type, int blockSize, double delta )
+{
+ CV_Assert( _src.type() == CV_8UC1 );
+ CV_Assert( blockSize % 2 == 1 && blockSize > 1 );
+ Size size = _src.size();
+
+ _dst.create( size, _src.type() );
- roi = cvGetMatSize( src );
- if( CV_IS_MAT_CONT( src->type & dst->type ))
+ if( maxValue < 0 )
{
- roi.width *= roi.height;
- roi.height = 1;
- src_step = dst_step = CV_STUB_STEP;
+ _dst = Scalar(0);
+ return;
}
+
+ Mat _mean;
+
+ if( _src.data != _dst.data )
+ _mean = _dst;
+
+ if( method == ADAPTIVE_THRESH_MEAN_C )
+ boxFilter( _src, _mean, _src.type(), Size(blockSize, blockSize),
+ Point(-1,-1), true, BORDER_REPLICATE );
+ else if( method == ADAPTIVE_THRESH_GAUSSIAN_C )
+ GaussianBlur( _src, _mean, Size(blockSize, blockSize), 0, 0, BORDER_REPLICATE );
+ else
+ CV_Error( CV_StsBadFlag, "Unknown/unsupported adaptive threshold method" );
+
+ int i, j;
+ uchar imaxval = saturate_cast<uchar>(maxValue);
+ int idelta = type == THRESH_BINARY ? cvCeil(delta) : cvFloor(delta);
+ uchar tab[768];
+
+ if( type == CV_THRESH_BINARY )
+ for( i = 0; i < 768; i++ )
+ tab[i] = (uchar)(i - 255 > -idelta ? imaxval : 0);
+ else if( type == CV_THRESH_BINARY_INV )
+ for( i = 0; i < 768; i++ )
+ tab[i] = (uchar)(i - 255 <= -idelta ? imaxval : 0);
else
+ CV_Error( CV_StsBadFlag, "Unknown/unsupported threshold type" );
+
+ if( _src.isContinuous() && _mean.isContinuous() && _dst.isContinuous() )
{
- src_step = src->step;
- dst_step = dst->step;
+ size.width *= size.height;
+ size.height = 1;
}
- switch( CV_MAT_DEPTH(src->type) )
+ for( i = 0; i < size.height; i++ )
{
- case CV_8U:
-
- ithresh = cvFloor(thresh);
- imaxval = cvRound(maxval);
- if( type == CV_THRESH_TRUNC )
- imaxval = ithresh;
- imaxval = CV_CAST_8U(imaxval);
+ const uchar* src = _src.data + _src.step*i;
+ const uchar* mean = _mean.data + _mean.step*i;
+ uchar* dst = _dst.data + _dst.step*i;
- if( ithresh < 0 || ithresh >= 255 )
- {
- if( type == CV_THRESH_BINARY || type == CV_THRESH_BINARY_INV ||
- ((type == CV_THRESH_TRUNC || type == CV_THRESH_TOZERO_INV) && ithresh < 0) ||
- (type == CV_THRESH_TOZERO && ithresh >= 255) )
- {
- int v = type == CV_THRESH_BINARY ? (ithresh >= 255 ? 0 : imaxval) :
- type == CV_THRESH_BINARY_INV ? (ithresh >= 255 ? imaxval : 0) :
- type == CV_THRESH_TRUNC ? imaxval : 0;
-
- cvSet( dst, cvScalarAll(v) );
- EXIT;
- }
- else
- {
- cvCopy( src, dst );
- EXIT;
- }
- }
-
- /*if( type == CV_THRESH_BINARY || type == CV_THRESH_BINARY_INV )
- {
- if( icvCompareC_8u_C1R_cv_p && icvAndC_8u_C1R_p )
- {
- IPPI_CALL( icvCompareC_8u_C1R_cv_p( src->data.ptr, src_step,
- (uchar)ithresh, dst->data.ptr, dst_step, roi,
- type == CV_THRESH_BINARY ? cvCmpGreater : cvCmpLessEq ));
-
- if( imaxval < 255 )
- IPPI_CALL( icvAndC_8u_C1R_p( dst->data.ptr, dst_step,
- (uchar)imaxval, dst->data.ptr, dst_step, roi ));
- EXIT;
- }
- }
- else if( type == CV_THRESH_TRUNC || type == CV_THRESH_TOZERO_INV )
- {
- if( icvThreshold_GTVal_8u_C1R_p )
- {
- IPPI_CALL( icvThreshold_GTVal_8u_C1R_p( src->data.ptr, src_step,
- dst->data.ptr, dst_step, roi, (uchar)ithresh,
- (uchar)(type == CV_THRESH_TRUNC ? ithresh : 0) ));
- EXIT;
- }
- }
- else
- {
- assert( type == CV_THRESH_TOZERO );
- if( icvThreshold_LTVal_8u_C1R_p )
- {
- ithresh = cvFloor(thresh+1.);
- ithresh = CV_CAST_8U(ithresh);
- IPPI_CALL( icvThreshold_LTVal_8u_C1R_p( src->data.ptr, src_step,
- dst->data.ptr, dst_step, roi, (uchar)ithresh, 0 ));
- EXIT;
- }
- }*/
+ for( j = 0; j < size.width; j++ )
+ dst[j] = tab[src[j] - mean[j] + 255];
+ }
+}
- icvThresh_8u_C1R( src->data.ptr, src_step,
- dst->data.ptr, dst_step, roi,
- (uchar)ithresh, (uchar)imaxval, type );
- break;
- case CV_32F:
+}
- /*if( type == CV_THRESH_TRUNC || type == CV_THRESH_TOZERO_INV )
- {
- if( icvThreshold_GTVal_32f_C1R_p )
- {
- IPPI_CALL( icvThreshold_GTVal_32f_C1R_p( src->data.fl, src_step,
- dst->data.fl, dst_step, roi, (float)thresh,
- type == CV_THRESH_TRUNC ? (float)thresh : 0 ));
- EXIT;
- }
- }
- else if( type == CV_THRESH_TOZERO )
- {
- if( icvThreshold_LTVal_32f_C1R_p )
- {
- IPPI_CALL( icvThreshold_LTVal_32f_C1R_p( src->data.fl, src_step,
- dst->data.fl, dst_step, roi, (float)(thresh*(1 + FLT_EPSILON)), 0 ));
- EXIT;
- }
- }*/
+CV_IMPL double
+cvThreshold( const void* srcarr, void* dstarr, double thresh, double maxval, int type )
+{
+ cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), dst0 = dst;
- icvThresh_32f_C1R( src->data.fl, src_step, dst->data.fl, dst_step, roi,
- (float)thresh, (float)maxval, type );
- break;
- default:
- CV_ERROR( CV_BadDepth, cvUnsupportedFormat );
- }
+ CV_Assert( src.size() == dst.size() && src.channels() == dst.channels() &&
+ (src.depth() == dst.depth() || dst.depth() == CV_8U));
- __END__;
+ thresh = cv::threshold( src, dst, thresh, maxval, type );
+ if( dst0.data != dst.data )
+ dst.convertTo( dst0, dst0.depth() );
+ return thresh;
+}
- if( hist )
- cvReleaseHist( &hist );
- return thresh;
+CV_IMPL void
+cvAdaptiveThreshold( const void *srcIm, void *dstIm, double maxValue,
+ int method, int type, int blockSize, double delta )
+{
+ cv::Mat src = cv::cvarrToMat(srcIm), dst = cv::cvarrToMat(dstIm);
+ CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
+ cv::adaptiveThreshold( src, dst, maxValue, method, type, blockSize, delta );
}
/* End of file. */
// copy or use the software.
//
//
-// Intel License Agreement
+// License Agreement
// For Open Source Computer Vision Library
//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
-// * The name of Intel Corporation may not be used to endorse or promote products
+// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
for( i = 0; i < dstroi.height; i++, idst += dststep )
{
if( idst + left != isrc )
- for( j = 0; j < srcroi.width; j++ )
- idst[j + left] = isrc[j];
+ memcpy( idst + left, isrc, srcroi.width*sizeof(idst[0]) );
for( j = left - 1; j >= 0; j-- )
idst[j] = idst[j + cn];
for( j = left+srcroi.width; j < dstroi.width; j++ )
for( i = 0; i < dstroi.height; i++, dst += dststep )
{
if( dst + left != src )
- for( j = 0; j < srcroi.width; j++ )
- dst[j + left] = src[j];
+ memcpy( dst + left, src, srcroi.width );
for( j = left - 1; j >= 0; j-- )
dst[j] = dst[j + cn];
for( j = left+srcroi.width; j < dstroi.width; j++ )
for( i = 0; i < srcroi.height; i++, isrc += srcstep, idst += dststep )
{
if( idst + left != isrc )
- for( j = 0; j < srcroi.width; j++ )
- idst[j + left] = isrc[j];
+ memcpy( idst + left, isrc, srcroi.width*sizeof(idst[0]) );
for( j = 0; j < left; j++ )
{
k = tab[j];
for( i = 0; i < srcroi.height; i++, src += srcstep, dst += dststep )
{
if( dst + left != src )
- for( j = 0; j < srcroi.width; j++ )
- dst[j + left] = src[j];
+ memcpy( dst + left, src, srcroi.width );
for( j = 0; j < left; j++ )
{
k = tab[j];
if( cvTsRandInt(rng) % 3 > 0 )
{
sizes[OUTPUT][1] = sizes[REF_OUTPUT][1] = sum_size;
- if( cvTsRandInt(rng) % 2 > 0 && cn == 1 )
+ if( cvTsRandInt(rng) % 2 > 0 )
sizes[REF_OUTPUT][2] = sizes[OUTPUT][2] = sum_size;
}