extern "C" {
#endif
+static __inline double r_lg10(real *x)
+{
+ return 0.43429448190325182765*log(*x);
+}
+
static __inline double d_lg10(doublereal *x)
{
return 0.43429448190325182765*log(*x);
--- /dev/null
+#include "clapack.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static real c_b18 = 1.f;
+static real c_b22 = -1.f;
+
+/* Subroutine */ int strtri_(char *uplo, char *diag, integer *n, real *a,
+ integer *lda, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, i__1, i__2[2], i__3, i__4, i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer j, jb, nb, nn;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int strmm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+), strsm_(char *, char *, char *,
+ char *, integer *, integer *, real *, real *, integer *, real *,
+ integer *), strti2_(char *, char *
+, integer *, real *, integer *, integer *),
+ xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.1) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STRTRI computes the inverse of a real upper or lower triangular */
+/* matrix A. */
+
+/* This is the Level 3 BLAS version of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the triangular matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of the array A contains */
+/* the upper triangular matrix, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of the array A contains */
+/* the lower triangular matrix, and the strictly upper */
+/* triangular part of A is not referenced. If DIAG = 'U', the */
+/* diagonal elements of A are also not referenced and are */
+/* assumed to be 1. */
+/* On exit, the (triangular) inverse of the original matrix, in */
+/* the same storage format. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, A(i,i) is exactly zero. The triangular */
+/* matrix is singular and its inverse can not be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ nounit = lsame_(diag, "N");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STRTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check for singularity if non-unit. */
+
+ if (nounit) {
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ if (a[*info + *info * a_dim1] == 0.f) {
+ return 0;
+ }
+/* L10: */
+ }
+ *info = 0;
+ }
+
+/* Determine the block size for this environment. */
+
+/* Writing concatenation */
+ i__2[0] = 1, a__1[0] = uplo;
+ i__2[1] = 1, a__1[1] = diag;
+ s_cat(ch__1, a__1, i__2, &c__2, (ftnlen)2);
+ nb = ilaenv_(&c__1, "STRTRI", ch__1, n, &c_n1, &c_n1, &c_n1);
+ if (nb <= 1 || nb >= *n) {
+
+/* Use unblocked code */
+
+ strti2_(uplo, diag, n, &a[a_offset], lda, info);
+ } else {
+
+/* Use blocked code */
+
+ if (upper) {
+
+/* Compute inverse of upper triangular matrix */
+
+ i__1 = *n;
+ i__3 = nb;
+ for (j = 1; i__3 < 0 ? j >= i__1 : j <= i__1; j += i__3) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *n - j + 1;
+ jb = min(i__4,i__5);
+
+/* Compute rows 1:j-1 of current block column */
+
+ i__4 = j - 1;
+ strmm_("Left", "Upper", "No transpose", diag, &i__4, &jb, &
+ c_b18, &a[a_offset], lda, &a[j * a_dim1 + 1], lda);
+ i__4 = j - 1;
+ strsm_("Right", "Upper", "No transpose", diag, &i__4, &jb, &
+ c_b22, &a[j + j * a_dim1], lda, &a[j * a_dim1 + 1],
+ lda);
+
+/* Compute inverse of current diagonal block */
+
+ strti2_("Upper", diag, &jb, &a[j + j * a_dim1], lda, info);
+/* L20: */
+ }
+ } else {
+
+/* Compute inverse of lower triangular matrix */
+
+ nn = (*n - 1) / nb * nb + 1;
+ i__3 = -nb;
+ for (j = nn; i__3 < 0 ? j >= 1 : j <= 1; j += i__3) {
+/* Computing MIN */
+ i__1 = nb, i__4 = *n - j + 1;
+ jb = min(i__1,i__4);
+ if (j + jb <= *n) {
+
+/* Compute rows j+jb:n of current block column */
+
+ i__1 = *n - j - jb + 1;
+ strmm_("Left", "Lower", "No transpose", diag, &i__1, &jb,
+ &c_b18, &a[j + jb + (j + jb) * a_dim1], lda, &a[j
+ + jb + j * a_dim1], lda);
+ i__1 = *n - j - jb + 1;
+ strsm_("Right", "Lower", "No transpose", diag, &i__1, &jb,
+ &c_b22, &a[j + j * a_dim1], lda, &a[j + jb + j *
+ a_dim1], lda);
+ }
+
+/* Compute inverse of current diagonal block */
+
+ strti2_("Lower", diag, &jb, &a[j + j * a_dim1], lda, info);
+/* L30: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of STRTRI */
+
+} /* strtri_ */
#define CV_LU 0
#define CV_SVD 1
#define CV_SVD_SYM 2
-#define CV_LSQ 8
+#define CV_CHOLESKY 3
+#define CV_QR 4
+#define CV_NORMAL 16
/* Inverts matrix */
CVAPI(double) cvInvert( const CvArr* src, CvArr* dst,
file(GLOB lib_srcs "*.cpp")
source_group("Src" FILES ${lib_srcs})
+include_directories("${CMAKE_SOURCE_DIR}/3rdparty/include")
+
set(lib_hdr_names cxcore.h cxcore.hpp cxerror.h cxmisc.h cxtypes.h cvver.h cvwimage.h)
set(lib_hdrs)
foreach(h ${lib_hdr_names})
)
# Add the required libraries for linking:
-target_link_libraries(${the_target} ${OPENCV_LINKER_LIBS})
+target_link_libraries(${the_target} ${OPENCV_LINKER_LIBS} opencv_lapack)
+
+add_dependencies(${the_target} opencv_lapack)
install(TARGETS ${the_target}
RUNTIME DESTINATION bin COMPONENT dev
install(FILES ${lib_hdrs}
DESTINATION include/opencv
COMPONENT dev)
-
--- /dev/null
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// Intel License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000, Intel Corporation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of Intel Corporation may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "_cxcore.h"
+#include "clapack.h"
+
+/****************************************************************************************\
+* LU decomposition/back substitution *
+\****************************************************************************************/
+
+#define arrtype float
+#define temptype double
+
+typedef CvStatus (CV_STDCALL * CvLUDecompFunc)( double* A, int stepA, CvSize sizeA,
+ void* B, int stepB, CvSize sizeB,
+ double* det );
+
+typedef CvStatus (CV_STDCALL * CvLUBackFunc)( double* A, int stepA, CvSize sizeA,
+ void* B, int stepB, CvSize sizeB );
+
+
+#define ICV_DEF_LU_DECOMP_FUNC( flavor, arrtype ) \
+static CvStatus CV_STDCALL \
+icvLUDecomp_##flavor( double* A, int stepA, CvSize sizeA, \
+ arrtype* B, int stepB, CvSize sizeB, double* _det ) \
+{ \
+ int n = sizeA.width; \
+ int m = 0, i; \
+ double det = 1; \
+ \
+ assert( sizeA.width == sizeA.height ); \
+ \
+ if( B ) \
+ { \
+ assert( sizeA.height == sizeB.height ); \
+ m = sizeB.width; \
+ } \
+ stepA /= sizeof(A[0]); \
+ stepB /= sizeof(B[0]); \
+ \
+ for( i = 0; i < n; i++, A += stepA, B += stepB ) \
+ { \
+ int j, k = i; \
+ double* tA = A; \
+ arrtype* tB = 0; \
+ double kval = fabs(A[i]), tval; \
+ \
+ /* find the pivot element */ \
+ for( j = i + 1; j < n; j++ ) \
+ { \
+ tA += stepA; \
+ tval = fabs(tA[i]); \
+ \
+ if( tval > kval ) \
+ { \
+ kval = tval; \
+ k = j; \
+ } \
+ } \
+ \
+ if( kval == 0 ) \
+ { \
+ det = 0; \
+ break; \
+ } \
+ \
+ /* swap rows */ \
+ if( k != i ) \
+ { \
+ tA = A + stepA*(k - i); \
+ det = -det; \
+ \
+ for( j = i; j < n; j++ ) \
+ { \
+ double t; \
+ CV_SWAP( A[j], tA[j], t ); \
+ } \
+ \
+ if( m > 0 ) \
+ { \
+ tB = B + stepB*(k - i); \
+ \
+ for( j = 0; j < m; j++ ) \
+ { \
+ arrtype t = B[j]; \
+ CV_SWAP( B[j], tB[j], t ); \
+ } \
+ } \
+ } \
+ \
+ tval = 1./A[i]; \
+ det *= A[i]; \
+ tA = A; \
+ tB = B; \
+ A[i] = tval; /* to replace division with multiplication in LUBack */ \
+ \
+ /* update matrix and the right side of the system */ \
+ for( j = i + 1; j < n; j++ ) \
+ { \
+ tA += stepA; \
+ tB += stepB; \
+ double alpha = -tA[i]*tval; \
+ \
+ for( k = i + 1; k < n; k++ ) \
+ tA[k] = tA[k] + alpha*A[k]; \
+ \
+ if( m > 0 ) \
+ for( k = 0; k < m; k++ ) \
+ tB[k] = (arrtype)(tB[k] + alpha*B[k]); \
+ } \
+ } \
+ \
+ if( _det ) \
+ *_det = det; \
+ \
+ return CV_OK; \
+}
+
+
+ICV_DEF_LU_DECOMP_FUNC( 32f, float )
+ICV_DEF_LU_DECOMP_FUNC( 64f, double )
+
+
+#define ICV_DEF_LU_BACK_FUNC( flavor, arrtype ) \
+static CvStatus CV_STDCALL \
+icvLUBack_##flavor( double* A, int stepA, CvSize sizeA, \
+ arrtype* B, int stepB, CvSize sizeB ) \
+{ \
+ int n = sizeA.width; \
+ int m = sizeB.width, i; \
+ \
+ assert( m > 0 && sizeA.width == sizeA.height && \
+ sizeA.height == sizeB.height ); \
+ stepA /= sizeof(A[0]); \
+ stepB /= sizeof(B[0]); \
+ \
+ A += stepA*(n - 1); \
+ B += stepB*(n - 1); \
+ \
+ for( i = n - 1; i >= 0; i--, A -= stepA ) \
+ { \
+ int j, k; \
+ for( j = 0; j < m; j++ ) \
+ { \
+ arrtype* tB = B + j; \
+ double x = 0; \
+ \
+ for( k = n - 1; k > i; k--, tB -= stepB ) \
+ x += A[k]*tB[0]; \
+ \
+ tB[0] = (arrtype)((tB[0] - x)*A[i]); \
+ } \
+ } \
+ \
+ return CV_OK; \
+}
+
+
+ICV_DEF_LU_BACK_FUNC( 32f, float )
+ICV_DEF_LU_BACK_FUNC( 64f, double )
+
+static CvFuncTable lu_decomp_tab, lu_back_tab;
+static int lu_inittab = 0;
+
+static void icvInitLUTable( CvFuncTable* decomp_tab,
+ CvFuncTable* back_tab )
+{
+ decomp_tab->fn_2d[0] = (void*)icvLUDecomp_32f;
+ decomp_tab->fn_2d[1] = (void*)icvLUDecomp_64f;
+ back_tab->fn_2d[0] = (void*)icvLUBack_32f;
+ back_tab->fn_2d[1] = (void*)icvLUBack_64f;
+}
+
+
+
+/****************************************************************************************\
+* Determinant of the matrix *
+\****************************************************************************************/
+
+#define det2(m) (m(0,0)*m(1,1) - m(0,1)*m(1,0))
+#define det3(m) (m(0,0)*(m(1,1)*m(2,2) - m(1,2)*m(2,1)) - \
+ m(0,1)*(m(1,0)*m(2,2) - m(1,2)*m(2,0)) + \
+ m(0,2)*(m(1,0)*m(2,1) - m(1,1)*m(2,0)))
+
+CV_IMPL double
+cvDet( const CvArr* arr )
+{
+ double result = 0;
+ uchar* buffer = 0;
+ int local_alloc = 0;
+
+ CV_FUNCNAME( "cvDet" );
+
+ __BEGIN__;
+
+ CvMat stub, *mat = (CvMat*)arr;
+ int type;
+
+ if( !CV_IS_MAT( mat ))
+ {
+ CV_CALL( mat = cvGetMat( mat, &stub ));
+ }
+
+ type = CV_MAT_TYPE( mat->type );
+
+ if( mat->width != mat->height )
+ CV_ERROR( CV_StsBadSize, "The matrix must be square" );
+
+ #define Mf( y, x ) ((float*)(m + y*step))[x]
+ #define Md( y, x ) ((double*)(m + y*step))[x]
+
+ if( mat->width == 2 )
+ {
+ uchar* m = mat->data.ptr;
+ int step = mat->step;
+
+ if( type == CV_32FC1 )
+ {
+ result = det2(Mf);
+ }
+ else if( type == CV_64FC1 )
+ {
+ result = det2(Md);
+ }
+ else
+ {
+ CV_ERROR( CV_StsUnsupportedFormat, "" );
+ }
+ }
+ else if( mat->width == 3 )
+ {
+ uchar* m = mat->data.ptr;
+ int step = mat->step;
+
+ if( type == CV_32FC1 )
+ {
+ result = det3(Mf);
+ }
+ else if( type == CV_64FC1 )
+ {
+ result = det3(Md);
+ }
+ else
+ {
+ CV_ERROR( CV_StsUnsupportedFormat, "" );
+ }
+ }
+ else if( mat->width == 1 )
+ {
+ if( type == CV_32FC1 )
+ {
+ result = mat->data.fl[0];
+ }
+ else if( type == CV_64FC1 )
+ {
+ result = mat->data.db[0];
+ }
+ else
+ {
+ CV_ERROR( CV_StsUnsupportedFormat, "" );
+ }
+ }
+ else
+ {
+ CvLUDecompFunc decomp_func;
+ CvSize size = cvGetMatSize( mat );
+ const int worktype = CV_64FC1;
+ int buf_size = size.width*size.height*CV_ELEM_SIZE(worktype);
+ CvMat tmat;
+
+ if( !lu_inittab )
+ {
+ icvInitLUTable( &lu_decomp_tab, &lu_back_tab );
+ lu_inittab = 1;
+ }
+
+ if( CV_MAT_CN( type ) != 1 || CV_MAT_DEPTH( type ) < CV_32F )
+ CV_ERROR( CV_StsUnsupportedFormat, "" );
+
+ if( buf_size <= CV_MAX_LOCAL_SIZE )
+ {
+ buffer = (uchar*)cvStackAlloc( buf_size );
+ local_alloc = 1;
+ }
+ else
+ {
+ CV_CALL( buffer = (uchar*)cvAlloc( buf_size ));
+ }
+
+ CV_CALL( cvInitMatHeader( &tmat, size.height, size.width, worktype, buffer ));
+ if( type == worktype )
+ {
+ CV_CALL( cvCopy( mat, &tmat ));
+ }
+ else
+ CV_CALL( cvConvert( mat, &tmat ));
+
+ decomp_func = (CvLUDecompFunc)(lu_decomp_tab.fn_2d[CV_MAT_DEPTH(worktype)-CV_32F]);
+ assert( decomp_func );
+
+ IPPI_CALL( decomp_func( tmat.data.db, tmat.step, size, 0, 0, size, &result ));
+ }
+
+ #undef Mf
+ #undef Md
+
+ /*icvCheckVector_64f( &result, 1 );*/
+
+ __END__;
+
+ if( buffer && !local_alloc )
+ cvFree( &buffer );
+
+ return result;
+}
+
+
+
+/****************************************************************************************\
+* Inverse (or pseudo-inverse) of the matrix *
+\****************************************************************************************/
+
+#define Sf( y, x ) ((float*)(srcdata + y*srcstep))[x]
+#define Sd( y, x ) ((double*)(srcdata + y*srcstep))[x]
+#define Df( y, x ) ((float*)(dstdata + y*dststep))[x]
+#define Dd( y, x ) ((double*)(dstdata + y*dststep))[x]
+
+CV_IMPL double
+cvInvert( const CvArr* srcarr, CvArr* dstarr, int method )
+{
+ CvMat* u = 0;
+ CvMat* v = 0;
+ CvMat* w = 0;
+
+ uchar* buffer = 0;
+ int local_alloc = 0;
+ double result = 0;
+
+ CV_FUNCNAME( "cvInvert" );
+
+ __BEGIN__;
+
+ CvMat sstub, *src = (CvMat*)srcarr;
+ CvMat dstub, *dst = (CvMat*)dstarr;
+ int type;
+
+ if( !CV_IS_MAT( src ))
+ CV_CALL( src = cvGetMat( src, &sstub ));
+
+ if( !CV_IS_MAT( dst ))
+ CV_CALL( dst = cvGetMat( dst, &dstub ));
+
+ type = CV_MAT_TYPE( src->type );
+
+ if( method == CV_SVD || method == CV_SVD_SYM )
+ {
+ int n = MIN(src->rows,src->cols);
+ if( method == CV_SVD_SYM && src->rows != src->cols )
+ CV_ERROR( CV_StsBadSize, "CV_SVD_SYM method is used for non-square matrix" );
+
+ CV_CALL( u = cvCreateMat( n, src->rows, src->type ));
+ if( method != CV_SVD_SYM )
+ CV_CALL( v = cvCreateMat( n, src->cols, src->type ));
+ CV_CALL( w = cvCreateMat( n, 1, src->type ));
+ CV_CALL( cvSVD( src, w, u, v, CV_SVD_U_T + CV_SVD_V_T ));
+
+ if( type == CV_32FC1 )
+ result = w->data.fl[0] >= FLT_EPSILON ?
+ w->data.fl[w->rows-1]/w->data.fl[0] : 0;
+ else
+ result = w->data.db[0] >= FLT_EPSILON ?
+ w->data.db[w->rows-1]/w->data.db[0] : 0;
+
+ CV_CALL( cvSVBkSb( w, u, v ? v : u, 0, dst, CV_SVD_U_T + CV_SVD_V_T ));
+ EXIT;
+ }
+ else if( method != CV_LU && method != CV_CHOLESKY )
+ CV_ERROR( CV_StsBadArg, "Unknown inversion method" );
+
+ if( !CV_ARE_TYPES_EQ( src, dst ))
+ CV_ERROR( CV_StsUnmatchedFormats, "" );
+
+ if( src->width != src->height )
+ CV_ERROR( CV_StsBadSize, "The matrix must be square" );
+
+ if( !CV_ARE_SIZES_EQ( src, dst ))
+ CV_ERROR( CV_StsUnmatchedSizes, "" );
+
+ if( type != CV_32FC1 && type != CV_64FC1 )
+ CV_ERROR( CV_StsUnsupportedFormat, "" );
+
+ if( method == CV_LU || method == CV_CHOLESKY )
+ {
+ if( src->width <= 3 )
+ {
+ uchar* srcdata = src->data.ptr;
+ uchar* dstdata = dst->data.ptr;
+ int srcstep = src->step;
+ int dststep = dst->step;
+
+ if( src->width == 2 )
+ {
+ if( type == CV_32FC1 )
+ {
+ double d = det2(Sf);
+ if( d != 0. )
+ {
+ double t0, t1;
+ result = d;
+ d = 1./d;
+ t0 = Sf(0,0)*d;
+ t1 = Sf(1,1)*d;
+ Df(1,1) = (float)t0;
+ Df(0,0) = (float)t1;
+ t0 = -Sf(0,1)*d;
+ t1 = -Sf(1,0)*d;
+ Df(0,1) = (float)t0;
+ Df(1,0) = (float)t1;
+ }
+ }
+ else
+ {
+ double d = det2(Sd);
+ if( d != 0. )
+ {
+ double t0, t1;
+ result = d;
+ d = 1./d;
+ t0 = Sd(0,0)*d;
+ t1 = Sd(1,1)*d;
+ Dd(1,1) = t0;
+ Dd(0,0) = t1;
+ t0 = -Sd(0,1)*d;
+ t1 = -Sd(1,0)*d;
+ Dd(0,1) = t0;
+ Dd(1,0) = t1;
+ }
+ }
+ }
+ else if( src->width == 3 )
+ {
+ if( type == CV_32FC1 )
+ {
+ double d = det3(Sf);
+ if( d != 0. )
+ {
+ float t[9];
+ result = d;
+ d = 1./d;
+
+ t[0] = (float)((Sf(1,1) * Sf(2,2) - Sf(1,2) * Sf(2,1)) * d);
+ t[1] = (float)((Sf(0,2) * Sf(2,1) - Sf(0,1) * Sf(2,2)) * d);
+ t[2] = (float)((Sf(0,1) * Sf(1,2) - Sf(0,2) * Sf(1,1)) * d);
+
+ t[3] = (float)((Sf(1,2) * Sf(2,0) - Sf(1,0) * Sf(2,2)) * d);
+ t[4] = (float)((Sf(0,0) * Sf(2,2) - Sf(0,2) * Sf(2,0)) * d);
+ t[5] = (float)((Sf(0,2) * Sf(1,0) - Sf(0,0) * Sf(1,2)) * d);
+
+ t[6] = (float)((Sf(1,0) * Sf(2,1) - Sf(1,1) * Sf(2,0)) * d);
+ t[7] = (float)((Sf(0,1) * Sf(2,0) - Sf(0,0) * Sf(2,1)) * d);
+ t[8] = (float)((Sf(0,0) * Sf(1,1) - Sf(0,1) * Sf(1,0)) * d);
+
+ Df(0,0) = t[0]; Df(0,1) = t[1]; Df(0,2) = t[2];
+ Df(1,0) = t[3]; Df(1,1) = t[4]; Df(1,2) = t[5];
+ Df(2,0) = t[6]; Df(2,1) = t[7]; Df(2,2) = t[8];
+ }
+ }
+ else
+ {
+ double d = det3(Sd);
+ if( d != 0. )
+ {
+ double t[9];
+ result = d;
+ d = 1./d;
+
+ t[0] = (Sd(1,1) * Sd(2,2) - Sd(1,2) * Sd(2,1)) * d;
+ t[1] = (Sd(0,2) * Sd(2,1) - Sd(0,1) * Sd(2,2)) * d;
+ t[2] = (Sd(0,1) * Sd(1,2) - Sd(0,2) * Sd(1,1)) * d;
+
+ t[3] = (Sd(1,2) * Sd(2,0) - Sd(1,0) * Sd(2,2)) * d;
+ t[4] = (Sd(0,0) * Sd(2,2) - Sd(0,2) * Sd(2,0)) * d;
+ t[5] = (Sd(0,2) * Sd(1,0) - Sd(0,0) * Sd(1,2)) * d;
+
+ t[6] = (Sd(1,0) * Sd(2,1) - Sd(1,1) * Sd(2,0)) * d;
+ t[7] = (Sd(0,1) * Sd(2,0) - Sd(0,0) * Sd(2,1)) * d;
+ t[8] = (Sd(0,0) * Sd(1,1) - Sd(0,1) * Sd(1,0)) * d;
+
+ Dd(0,0) = t[0]; Dd(0,1) = t[1]; Dd(0,2) = t[2];
+ Dd(1,0) = t[3]; Dd(1,1) = t[4]; Dd(1,2) = t[5];
+ Dd(2,0) = t[6]; Dd(2,1) = t[7]; Dd(2,2) = t[8];
+ }
+ }
+ }
+ else
+ {
+ assert( src->width == 1 );
+
+ if( type == CV_32FC1 )
+ {
+ double d = Sf(0,0);
+ if( d != 0. )
+ {
+ result = d;
+ Df(0,0) = (float)(1./d);
+ }
+ }
+ else
+ {
+ double d = Sd(0,0);
+ if( d != 0. )
+ {
+ result = d;
+ Dd(0,0) = 1./d;
+ }
+ }
+ }
+ EXIT;
+ }
+
+ {
+ integer n = dst->cols, type = CV_MAT_TYPE(dst->type), lwork=-1,
+ elem_size = CV_ELEM_SIZE(type), lda = dst->step/elem_size, piv1=0, info=0;
+ int buf_size = (int)(n*sizeof(integer));
+
+ cvCopy( src, dst );
+ if( method == CV_LU )
+ {
+ if( type == CV_32F )
+ {
+ real work1 = 0;
+ sgetri_(&n, dst->data.fl, &lda, &piv1, &work1, &lwork, &info);
+ lwork = cvRound(work1);
+ }
+ else
+ {
+ double work1 = 0;
+ dgetri_(&n, dst->data.db, &lda, &piv1, &work1, &lwork, &info);
+ lwork = cvRound(work1);
+ }
+
+ buf_size += (int)((lwork + 1)*elem_size);
+ if( buf_size <= CV_MAX_LOCAL_SIZE )
+ {
+ buffer = (uchar*)cvStackAlloc( buf_size );
+ local_alloc = 1;
+ }
+ else
+ {
+ CV_CALL( buffer = (uchar*)cvAlloc( buf_size ));
+ }
+
+ if( type == CV_32F )
+ {
+ sgetrf_(&n, &n, dst->data.fl, &lda, (integer*)buffer, &info);
+ sgetri_(&n, dst->data.fl, &lda, (integer*)buffer,
+ (float*)(buffer + n*sizeof(integer)), &lwork, &info);
+ }
+ else
+ {
+ dgetrf_(&n, &n, dst->data.db, &lda, (integer*)buffer, &info);
+ dgetri_(&n, dst->data.db, &lda, (integer*)buffer,
+ (double*)cvAlignPtr(buffer + n*sizeof(integer), elem_size), &lwork, &info);
+ }
+ }
+ else if( method == CV_CHOLESKY )
+ {
+ if( type == CV_32F )
+ {
+ spotrf_("L", &n, dst->data.fl, &lda, &info);
+ spotri_("L", &n, dst->data.fl, &lda, &info);
+ }
+ else
+ {
+ dpotrf_("L", &n, dst->data.db, &lda, &info);
+ dpotri_("L", &n, dst->data.db, &lda, &info);
+ }
+ cvCompleteSymm(dst);
+ }
+ result = info == 0;
+ }
+ }
+
+ if( !result )
+ CV_CALL( cvSetZero( dst ));
+
+ __END__;
+
+ if( buffer && !local_alloc )
+ cvFree( &buffer );
+
+ if( u || v || w )
+ {
+ cvReleaseMat( &u );
+ cvReleaseMat( &v );
+ cvReleaseMat( &w );
+ }
+
+ return result;
+}
+
+
+/****************************************************************************************\
+* Linear system [least-squares] solution *
+\****************************************************************************************/
+
+CV_IMPL int
+cvSolve( const CvArr* A, const CvArr* b, CvArr* x, int method )
+{
+ uchar *buffer = 0, *ptr;
+ int local_alloc = 0;
+ int result = 1;
+
+ CV_FUNCNAME( "cvSolve" );
+
+ __BEGIN__;
+
+ CvMat sstub, *src = (CvMat*)A, at;
+ CvMat dstub, *dst = (CvMat*)x, xt;
+ CvMat bstub, *src2 = (CvMat*)b;
+ double rcond=-1, s1=0, work1=0, *work=0, *s=0;
+ float frcond=-1, fs1=0, fwork1=0, *fwork=0, *fs=0;
+ integer m, m_, n, mn, nm, nb, lwork=-1, liwork=0, iwork1=0,
+ lda, ldx, info=0, rank=0, *iwork=0;
+ int type, elem_size, buf_size=0;
+ bool copy_rhs=false, is_normal=(method & CV_NORMAL)!=0;
+
+ if( !CV_IS_MAT( src ))
+ CV_CALL( src = cvGetMat( src, &sstub ));
+
+ if( !CV_IS_MAT( src2 ))
+ CV_CALL( src2 = cvGetMat( src2, &bstub ));
+
+ if( !CV_IS_MAT( dst ))
+ CV_CALL( dst = cvGetMat( dst, &dstub ));
+
+ type = CV_MAT_TYPE(src->type);
+
+ if( !CV_ARE_TYPES_EQ(src, src2) || !CV_ARE_TYPES_EQ(src, dst) )
+ CV_ERROR( CV_StsUnmatchedFormats,
+ "All the input and output matrices must have the same type" );
+
+ if( type != CV_32FC1 && type != CV_64FC1 )
+ CV_ERROR( CV_StsUnsupportedFormat,
+ "All the input and output matrices must be 32fC1 or 64fC1" );
+
+ method &= ~CV_NORMAL;
+ if( (method == CV_LU || method == CV_CHOLESKY) && !is_normal && src->rows != src->cols )
+ CV_ERROR( CV_StsBadSize, "LU and Cholesky methods work only with square matrices" );
+
+ // check case of a single equation and small matrix
+ if( (method == CV_LU || method == CV_CHOLESKY) &&
+ src->rows <= 3 && src->rows == src->cols && src2->cols == 1 )
+ {
+ #define bf(y) ((float*)(bdata + y*src2step))[0]
+ #define bd(y) ((double*)(bdata + y*src2step))[0]
+
+ uchar* srcdata = src->data.ptr;
+ uchar* bdata = src2->data.ptr;
+ uchar* dstdata = dst->data.ptr;
+ int srcstep = src->step;
+ int src2step = src2->step;
+ int dststep = dst->step;
+
+ if( src->width == 2 )
+ {
+ if( type == CV_32FC1 )
+ {
+ double d = det2(Sf);
+ if( d != 0. )
+ {
+ float t;
+ d = 1./d;
+ t = (float)((bf(0)*Sf(1,1) - bf(1)*Sf(0,1))*d);
+ Df(1,0) = (float)((bf(1)*Sf(0,0) - bf(0)*Sf(1,0))*d);
+ Df(0,0) = t;
+ }
+ else
+ result = 0;
+ }
+ else
+ {
+ double d = det2(Sd);
+ if( d != 0. )
+ {
+ double t;
+ d = 1./d;
+ t = (bd(0)*Sd(1,1) - bd(1)*Sd(0,1))*d;
+ Dd(1,0) = (bd(1)*Sd(0,0) - bd(0)*Sd(1,0))*d;
+ Dd(0,0) = t;
+ }
+ else
+ result = 0;
+ }
+ }
+ else if( src->width == 3 )
+ {
+ if( type == CV_32FC1 )
+ {
+ double d = det3(Sf);
+ if( d != 0. )
+ {
+ float t[3];
+ d = 1./d;
+
+ t[0] = (float)(d*
+ (bf(0)*(Sf(1,1)*Sf(2,2) - Sf(1,2)*Sf(2,1)) -
+ Sf(0,1)*(bf(1)*Sf(2,2) - Sf(1,2)*bf(2)) +
+ Sf(0,2)*(bf(1)*Sf(2,1) - Sf(1,1)*bf(2))));
+
+ t[1] = (float)(d*
+ (Sf(0,0)*(bf(1)*Sf(2,2) - Sf(1,2)*bf(2)) -
+ bf(0)*(Sf(1,0)*Sf(2,2) - Sf(1,2)*Sf(2,0)) +
+ Sf(0,2)*(Sf(1,0)*bf(2) - bf(1)*Sf(2,0))));
+
+ t[2] = (float)(d*
+ (Sf(0,0)*(Sf(1,1)*bf(2) - bf(1)*Sf(2,1)) -
+ Sf(0,1)*(Sf(1,0)*bf(2) - bf(1)*Sf(2,0)) +
+ bf(0)*(Sf(1,0)*Sf(2,1) - Sf(1,1)*Sf(2,0))));
+
+ Df(0,0) = t[0];
+ Df(1,0) = t[1];
+ Df(2,0) = t[2];
+ }
+ else
+ result = 0;
+ }
+ else
+ {
+ double d = det3(Sd);
+ if( d != 0. )
+ {
+ double t[9];
+
+ d = 1./d;
+
+ t[0] = ((Sd(1,1) * Sd(2,2) - Sd(1,2) * Sd(2,1))*bd(0) +
+ (Sd(0,2) * Sd(2,1) - Sd(0,1) * Sd(2,2))*bd(1) +
+ (Sd(0,1) * Sd(1,2) - Sd(0,2) * Sd(1,1))*bd(2))*d;
+
+ t[1] = ((Sd(1,2) * Sd(2,0) - Sd(1,0) * Sd(2,2))*bd(0) +
+ (Sd(0,0) * Sd(2,2) - Sd(0,2) * Sd(2,0))*bd(1) +
+ (Sd(0,2) * Sd(1,0) - Sd(0,0) * Sd(1,2))*bd(2))*d;
+
+ t[2] = ((Sd(1,0) * Sd(2,1) - Sd(1,1) * Sd(2,0))*bd(0) +
+ (Sd(0,1) * Sd(2,0) - Sd(0,0) * Sd(2,1))*bd(1) +
+ (Sd(0,0) * Sd(1,1) - Sd(0,1) * Sd(1,0))*bd(2))*d;
+
+ Dd(0,0) = t[0];
+ Dd(1,0) = t[1];
+ Dd(2,0) = t[2];
+ }
+ else
+ result = 0;
+ }
+ }
+ else
+ {
+ assert( src->width == 1 );
+
+ if( type == CV_32FC1 )
+ {
+ double d = Sf(0,0);
+ if( d != 0. )
+ Df(0,0) = (float)(bf(0)/d);
+ else
+ result = 0;
+ }
+ else
+ {
+ double d = Sd(0,0);
+ if( d != 0. )
+ Dd(0,0) = (bd(0)/d);
+ else
+ result = 0;
+ }
+ }
+ }
+
+ elem_size = CV_ELEM_SIZE(src->type);
+ lda = m = m_ = src->rows;
+ n = src->cols;
+ nb = src2->cols;
+ ldx = mn = MAX(m, n);
+ nm = MIN(m, n);
+
+ if( m <= n )
+ is_normal = false;
+ else
+ m_ = n;
+
+ buf_size += (is_normal ? n*n : m*n)*elem_size;
+
+ if( m_ != n || nb > 1 || !CV_IS_MAT_CONT(dst->type) )
+ {
+ copy_rhs = true;
+ if( is_normal )
+ buf_size += n*nb*elem_size;
+ else
+ buf_size += mn*nb*elem_size;
+ }
+
+ if( method == CV_SVD || method == CV_SVD_SYM )
+ {
+ int nlvl = cvRound(log(MAX(MIN(m_,n)/25., 1.))/CV_LOG2) + 1;
+ liwork = MIN(m_,n)*(3*MAX(nlvl,0) + 11);
+
+ if( type == CV_32F )
+ sgelsd_(&m_, &n, &nb, src->data.fl, &lda, dst->data.fl, &ldx,
+ &fs1, &frcond, &rank, &fwork1, &lwork, &iwork1, &info);
+ else
+ dgelsd_(&m_, &n, &nb, src->data.db, &lda, dst->data.db, &ldx,
+ &s1, &rcond, &rank, &work1, &lwork, &iwork1, &info );
+ buf_size += nm*elem_size + (liwork + 1)*sizeof(integer);
+ }
+ else if( method == CV_QR )
+ {
+ if( type == CV_32F )
+ sgels_("N", &m_, &n, &nb, src->data.fl, &lda, dst->data.fl, &ldx, &fwork1, &lwork, &info );
+ else
+ dgels_("N", &m_, &n, &nb, src->data.db, &lda, dst->data.db, &ldx, &work1, &lwork, &info );
+ }
+ else if( method == CV_LU )
+ {
+ buf_size += (n+1)*sizeof(integer);
+ }
+ else if( method == CV_CHOLESKY )
+ ;
+ else
+ CV_ERROR( CV_StsBadArg, "Unknown method" );
+
+ lwork = cvRound(type == CV_32F ? (double)fwork1 : work1);
+ buf_size += lwork*elem_size;
+
+ if( buf_size <= CV_MAX_LOCAL_SIZE )
+ {
+ buffer = (uchar*)cvStackAlloc( buf_size );
+ local_alloc = 1;
+ }
+ else
+ CV_CALL( buffer = (uchar*)cvAlloc( buf_size ));
+
+ ptr = buffer;
+
+ at = cvMat(n, m_, type, ptr);
+ ptr += n*m_*elem_size;
+
+ if( method == CV_SVD_SYM || method == CV_CHOLESKY )
+ cvCopy( src, &at );
+ else if( !is_normal )
+ cvTranspose( src, &at );
+ else
+ cvMulTransposed( src, &at, 1 );
+
+ if( !is_normal )
+ {
+ if( copy_rhs )
+ {
+ CvMat temp = cvMat(nb, mn, type, ptr), bt;
+ ptr += nb*mn*elem_size;
+ cvGetCols(&temp, &bt, 0, m);
+ cvGetCols(&temp, &xt, 0, n);
+ cvTranspose( src2, &bt );
+ }
+ else
+ {
+ cvCopy( src2, dst );
+ xt = cvMat(1, n, type, dst->data.ptr);
+ }
+ }
+ else
+ {
+ if( copy_rhs )
+ {
+ xt = cvMat(nb, n, type, ptr);
+ ptr += nb*n*elem_size;
+ }
+ else
+ xt = cvMat(1, n, type, dst->data.ptr);
+ // (a'*b)' = b'*a
+ cvGEMM( src2, src, 1, 0, 0, &xt, CV_GEMM_A_T );
+ }
+
+ lda = at.step/elem_size;
+ ldx = xt.rows > 1 ? xt.step/elem_size : (!is_normal && copy_rhs ? mn : n);
+
+ if( method == CV_SVD || method == CV_SVD_SYM )
+ {
+ if( type == CV_32F )
+ {
+ fs = (float*)ptr;
+ ptr += nm*elem_size;
+ fwork = (float*)ptr;
+ ptr += lwork*elem_size;
+ iwork = (integer*)cvAlignPtr(ptr, sizeof(integer));
+
+ sgelsd_(&m_, &n, &nb, at.data.fl, &lda, xt.data.fl, &ldx,
+ fs, &frcond, &rank, fwork, &lwork, iwork, &info);
+ }
+ else
+ {
+ s = (double*)ptr;
+ ptr += nm*elem_size;
+ work = (double*)ptr;
+ ptr += lwork*elem_size;
+ iwork = (integer*)cvAlignPtr(ptr, sizeof(integer));
+
+ dgelsd_(&m_, &n, &nb, at.data.db, &lda, xt.data.db, &ldx,
+ s, &rcond, &rank, work, &lwork, iwork, &info);
+ }
+ }
+ else if( method == CV_QR )
+ {
+ if( type == CV_32F )
+ {
+ fwork = (float*)ptr;
+ sgels_("N", &m_, &n, &nb, at.data.fl, &lda, xt.data.fl, &ldx, fwork, &lwork, &info);
+ }
+ else
+ {
+ work = (double*)ptr;
+ dgels_("N", &m_, &n, &nb, at.data.db, &lda, xt.data.db, &ldx, work, &lwork, &info);
+ }
+ }
+ else if( method == CV_CHOLESKY || (method == CV_LU && is_normal) )
+ {
+ if( type == CV_32F )
+ {
+ spotrf_("L", &n, at.data.fl, &lda, &info);
+ spotrs_("L", &n, &nb, at.data.fl, &lda, xt.data.fl, &ldx, &info);
+ }
+ else
+ {
+ dpotrf_("L", &n, at.data.db, &lda, &info);
+ dpotrs_("L", &n, &nb, at.data.db, &lda, xt.data.db, &ldx, &info);
+ }
+ }
+ else if( method == CV_LU )
+ {
+ iwork = (integer*)cvAlignPtr(ptr, sizeof(integer));
+ if( type == CV_32F )
+ sgesv_(&n, &nb, at.data.fl, &lda, iwork, xt.data.fl, &ldx, &info );
+ else
+ dgesv_(&n, &nb, at.data.db, &lda, iwork, xt.data.db, &ldx, &info );
+ }
+ else
+ assert(0);
+
+ result = info == 0;
+
+ if( !result )
+ cvSetZero( dst );
+ else if( xt.data.ptr != dst->data.ptr )
+ cvTranspose( &xt, dst );
+
+ __END__;
+
+ if( buffer && !local_alloc )
+ cvFree( &buffer );
+
+ return result;
+}
+
+
+/////////////////// finding eigenvalues and eigenvectors of a symmetric matrix ///////////////
+
+CV_IMPL void
+cvEigenVV( CvArr* srcarr, CvArr* evectsarr, CvArr* evalsarr, double )
+{
+ uchar* work = 0;
+
+ CV_FUNCNAME( "cvEigenVV" );
+
+ __BEGIN__;
+
+ CvMat sstub, *src = (CvMat*)srcarr;
+ CvMat estub1, *evects = (CvMat*)evectsarr;
+ CvMat estub2, *evals = (CvMat*)evalsarr;
+ integer n, m=0, lda, ldv, lwork=-1, iwork1=0, liwork=-1, idummy=0, info=0, type, elem_size;
+ integer *isupport, *iwork;
+ bool copy_evals;
+ char job[] = { evects ? 'V' : 'N', '\0' };
+
+ if( !CV_IS_MAT( src ))
+ CV_CALL( src = cvGetMat( src, &sstub ));
+
+ if( !CV_IS_MAT( evals ))
+ CV_CALL( evals = cvGetMat( evals, &estub2 ));
+
+ type = CV_MAT_TYPE(src->type);
+ elem_size = CV_ELEM_SIZE(src->type);
+ lda = src->step/elem_size;
+ n = ldv = src->cols;
+
+ if( src->cols != src->rows )
+ CV_ERROR( CV_StsUnmatchedSizes, "source is not quadratic matrix" );
+
+ if( (evals->rows != src->rows || evals->cols != 1) &&
+ (evals->cols != src->rows || evals->rows != 1))
+ CV_ERROR( CV_StsBadSize, "eigenvalues vector has inappropriate size" );
+
+ if( !CV_ARE_TYPES_EQ( src, evals ))
+ CV_ERROR( CV_StsUnmatchedFormats,
+ "input matrix, eigenvalues and eigenvectors must have the same type" );
+
+ if( evects )
+ {
+ if( !CV_IS_MAT( evects ))
+ CV_CALL( evects = cvGetMat( evects, &estub1 ));
+ if( !CV_ARE_SIZES_EQ( src, evects) )
+ CV_ERROR( CV_StsUnmatchedSizes,
+ "eigenvectors matrix has inappropriate size" );
+ if( !CV_ARE_TYPES_EQ( src, evects ))
+ CV_ERROR( CV_StsUnmatchedFormats,
+ "input matrix, eigenvalues and eigenvectors must have the same type" );
+ ldv = evects->step/elem_size;
+ }
+
+ copy_evals = !CV_IS_MAT_CONT(evals->type);
+
+ if( type == CV_32FC1 )
+ {
+ float work1 = 0, dummy = 0, abstol = 0, *s;
+
+ ssyevr_(job, "A", "L", &n, src->data.fl, &lda, &dummy, &dummy, &idummy, &idummy,
+ &abstol, &m, evals->data.fl, evects ? evects->data.fl : 0, &ldv,
+ &idummy, &work1, &lwork, &iwork1, &liwork, &info );
+ assert( info == 0 );
+
+ lwork = cvRound(work1);
+ liwork = iwork1;
+ work = (uchar*)cvAlloc((lwork + (copy_evals ? n : 0))*elem_size +
+ (liwork+2*n+1)*sizeof(integer));
+ if( copy_evals )
+ s = (float*)(work + lwork*elem_size);
+ else
+ s = evals->data.fl;
+
+ iwork = (integer*)cvAlignPtr(work + (lwork + (copy_evals ? n : 0))*elem_size, sizeof(integer));
+ isupport = iwork + liwork;
+
+ ssyevr_(job, "A", "L", &n, src->data.fl, &lda, &dummy, &dummy,
+ &idummy, &idummy, &abstol, &m, s, evects ? evects->data.fl : 0,
+ &ldv, isupport, (float*)work, &lwork, iwork, &liwork, &info );
+ assert( info == 0 );
+ }
+ else if( type == CV_64FC1 )
+ {
+ double work1 = 0, dummy = 0, abstol = 0, *s;
+
+ dsyevr_(job, "A", "L", &n, src->data.db, &lda, &dummy, &dummy, &idummy, &idummy,
+ &abstol, &m, evals->data.db, evects ? evects->data.db : 0, &ldv,
+ &idummy, &work1, &lwork, &iwork1, &liwork, &info );
+ assert( info == 0 );
+
+ lwork = cvRound(work1);
+ liwork = iwork1;
+ work = (uchar*)cvAlloc((lwork + (copy_evals ? n : 0))*elem_size +
+ (liwork+2*n+1)*sizeof(integer));
+ if( copy_evals )
+ s = (double*)(work + lwork*elem_size);
+ else
+ s = evals->data.db;
+
+ iwork = (integer*)cvAlignPtr(work + (lwork + (copy_evals ? n : 0))*elem_size, sizeof(integer));
+ isupport = iwork + liwork;
+
+ dsyevr_(job, "A", "L", &n, src->data.db, &lda, &dummy, &dummy,
+ &idummy, &idummy, &abstol, &m, s, evects ? evects->data.db : 0,
+ &ldv, isupport, (double*)work, &lwork, iwork, &liwork, &info );
+ assert( info == 0 );
+ }
+ else
+ {
+ CV_ERROR( CV_StsUnsupportedFormat, "Only 32fC1 and 64fC1 types are supported" );
+ }
+
+ if( copy_evals )
+ {
+ CvMat s = cvMat( evals->rows, evals->cols, type, work + lwork*elem_size );
+ cvCopy( &s, evals );
+ }
+
+ __END__;
+
+ cvFree( &work );
+}
__END__;
}
-
-/****************************************************************************************\
-* LU decomposition/back substitution *
-\****************************************************************************************/
-
-#define arrtype float
-#define temptype double
-
-typedef CvStatus (CV_STDCALL * CvLUDecompFunc)( double* A, int stepA, CvSize sizeA,
- void* B, int stepB, CvSize sizeB,
- double* det );
-
-typedef CvStatus (CV_STDCALL * CvLUBackFunc)( double* A, int stepA, CvSize sizeA,
- void* B, int stepB, CvSize sizeB );
-
-
-#define ICV_DEF_LU_DECOMP_FUNC( flavor, arrtype ) \
-static CvStatus CV_STDCALL \
-icvLUDecomp_##flavor( double* A, int stepA, CvSize sizeA, \
- arrtype* B, int stepB, CvSize sizeB, double* _det ) \
-{ \
- int n = sizeA.width; \
- int m = 0, i; \
- double det = 1; \
- \
- assert( sizeA.width == sizeA.height ); \
- \
- if( B ) \
- { \
- assert( sizeA.height == sizeB.height ); \
- m = sizeB.width; \
- } \
- stepA /= sizeof(A[0]); \
- stepB /= sizeof(B[0]); \
- \
- for( i = 0; i < n; i++, A += stepA, B += stepB ) \
- { \
- int j, k = i; \
- double* tA = A; \
- arrtype* tB = 0; \
- double kval = fabs(A[i]), tval; \
- \
- /* find the pivot element */ \
- for( j = i + 1; j < n; j++ ) \
- { \
- tA += stepA; \
- tval = fabs(tA[i]); \
- \
- if( tval > kval ) \
- { \
- kval = tval; \
- k = j; \
- } \
- } \
- \
- if( kval == 0 ) \
- { \
- det = 0; \
- break; \
- } \
- \
- /* swap rows */ \
- if( k != i ) \
- { \
- tA = A + stepA*(k - i); \
- det = -det; \
- \
- for( j = i; j < n; j++ ) \
- { \
- double t; \
- CV_SWAP( A[j], tA[j], t ); \
- } \
- \
- if( m > 0 ) \
- { \
- tB = B + stepB*(k - i); \
- \
- for( j = 0; j < m; j++ ) \
- { \
- arrtype t = B[j]; \
- CV_SWAP( B[j], tB[j], t ); \
- } \
- } \
- } \
- \
- tval = 1./A[i]; \
- det *= A[i]; \
- tA = A; \
- tB = B; \
- A[i] = tval; /* to replace division with multiplication in LUBack */ \
- \
- /* update matrix and the right side of the system */ \
- for( j = i + 1; j < n; j++ ) \
- { \
- tA += stepA; \
- tB += stepB; \
- double alpha = -tA[i]*tval; \
- \
- for( k = i + 1; k < n; k++ ) \
- tA[k] = tA[k] + alpha*A[k]; \
- \
- if( m > 0 ) \
- for( k = 0; k < m; k++ ) \
- tB[k] = (arrtype)(tB[k] + alpha*B[k]); \
- } \
- } \
- \
- if( _det ) \
- *_det = det; \
- \
- return CV_OK; \
-}
-
-
-ICV_DEF_LU_DECOMP_FUNC( 32f, float )
-ICV_DEF_LU_DECOMP_FUNC( 64f, double )
-
-
-#define ICV_DEF_LU_BACK_FUNC( flavor, arrtype ) \
-static CvStatus CV_STDCALL \
-icvLUBack_##flavor( double* A, int stepA, CvSize sizeA, \
- arrtype* B, int stepB, CvSize sizeB ) \
-{ \
- int n = sizeA.width; \
- int m = sizeB.width, i; \
- \
- assert( m > 0 && sizeA.width == sizeA.height && \
- sizeA.height == sizeB.height ); \
- stepA /= sizeof(A[0]); \
- stepB /= sizeof(B[0]); \
- \
- A += stepA*(n - 1); \
- B += stepB*(n - 1); \
- \
- for( i = n - 1; i >= 0; i--, A -= stepA ) \
- { \
- int j, k; \
- for( j = 0; j < m; j++ ) \
- { \
- arrtype* tB = B + j; \
- double x = 0; \
- \
- for( k = n - 1; k > i; k--, tB -= stepB ) \
- x += A[k]*tB[0]; \
- \
- tB[0] = (arrtype)((tB[0] - x)*A[i]); \
- } \
- } \
- \
- return CV_OK; \
-}
-
-
-ICV_DEF_LU_BACK_FUNC( 32f, float )
-ICV_DEF_LU_BACK_FUNC( 64f, double )
-
-static CvFuncTable lu_decomp_tab, lu_back_tab;
-static int lu_inittab = 0;
-
-static void icvInitLUTable( CvFuncTable* decomp_tab,
- CvFuncTable* back_tab )
-{
- decomp_tab->fn_2d[0] = (void*)icvLUDecomp_32f;
- decomp_tab->fn_2d[1] = (void*)icvLUDecomp_64f;
- back_tab->fn_2d[0] = (void*)icvLUBack_32f;
- back_tab->fn_2d[1] = (void*)icvLUBack_64f;
-}
-
-
-
-/****************************************************************************************\
-* Determinant of the matrix *
-\****************************************************************************************/
-
-#define det2(m) (m(0,0)*m(1,1) - m(0,1)*m(1,0))
-#define det3(m) (m(0,0)*(m(1,1)*m(2,2) - m(1,2)*m(2,1)) - \
- m(0,1)*(m(1,0)*m(2,2) - m(1,2)*m(2,0)) + \
- m(0,2)*(m(1,0)*m(2,1) - m(1,1)*m(2,0)))
-
-CV_IMPL double
-cvDet( const CvArr* arr )
-{
- double result = 0;
- uchar* buffer = 0;
- int local_alloc = 0;
-
- CV_FUNCNAME( "cvDet" );
-
- __BEGIN__;
-
- CvMat stub, *mat = (CvMat*)arr;
- int type;
-
- if( !CV_IS_MAT( mat ))
- {
- CV_CALL( mat = cvGetMat( mat, &stub ));
- }
-
- type = CV_MAT_TYPE( mat->type );
-
- if( mat->width != mat->height )
- CV_ERROR( CV_StsBadSize, "The matrix must be square" );
-
- #define Mf( y, x ) ((float*)(m + y*step))[x]
- #define Md( y, x ) ((double*)(m + y*step))[x]
-
- if( mat->width == 2 )
- {
- uchar* m = mat->data.ptr;
- int step = mat->step;
-
- if( type == CV_32FC1 )
- {
- result = det2(Mf);
- }
- else if( type == CV_64FC1 )
- {
- result = det2(Md);
- }
- else
- {
- CV_ERROR( CV_StsUnsupportedFormat, "" );
- }
- }
- else if( mat->width == 3 )
- {
- uchar* m = mat->data.ptr;
- int step = mat->step;
-
- if( type == CV_32FC1 )
- {
- result = det3(Mf);
- }
- else if( type == CV_64FC1 )
- {
- result = det3(Md);
- }
- else
- {
- CV_ERROR( CV_StsUnsupportedFormat, "" );
- }
- }
- else if( mat->width == 1 )
- {
- if( type == CV_32FC1 )
- {
- result = mat->data.fl[0];
- }
- else if( type == CV_64FC1 )
- {
- result = mat->data.db[0];
- }
- else
- {
- CV_ERROR( CV_StsUnsupportedFormat, "" );
- }
- }
- else
- {
- CvLUDecompFunc decomp_func;
- CvSize size = cvGetMatSize( mat );
- const int worktype = CV_64FC1;
- int buf_size = size.width*size.height*CV_ELEM_SIZE(worktype);
- CvMat tmat;
-
- if( !lu_inittab )
- {
- icvInitLUTable( &lu_decomp_tab, &lu_back_tab );
- lu_inittab = 1;
- }
-
- if( CV_MAT_CN( type ) != 1 || CV_MAT_DEPTH( type ) < CV_32F )
- CV_ERROR( CV_StsUnsupportedFormat, "" );
-
- if( size.width <= CV_MAX_LOCAL_MAT_SIZE )
- {
- buffer = (uchar*)cvStackAlloc( buf_size );
- local_alloc = 1;
- }
- else
- {
- CV_CALL( buffer = (uchar*)cvAlloc( buf_size ));
- }
-
- CV_CALL( cvInitMatHeader( &tmat, size.height, size.width, worktype, buffer ));
- if( type == worktype )
- {
- CV_CALL( cvCopy( mat, &tmat ));
- }
- else
- CV_CALL( cvConvert( mat, &tmat ));
-
- decomp_func = (CvLUDecompFunc)(lu_decomp_tab.fn_2d[CV_MAT_DEPTH(worktype)-CV_32F]);
- assert( decomp_func );
-
- IPPI_CALL( decomp_func( tmat.data.db, tmat.step, size, 0, 0, size, &result ));
- }
-
- #undef Mf
- #undef Md
-
- /*icvCheckVector_64f( &result, 1 );*/
-
- __END__;
-
- if( buffer && !local_alloc )
- cvFree( &buffer );
-
- return result;
-}
-
-
-
-/****************************************************************************************\
-* Inverse (or pseudo-inverse) of the matrix *
-\****************************************************************************************/
-
-#define Sf( y, x ) ((float*)(srcdata + y*srcstep))[x]
-#define Sd( y, x ) ((double*)(srcdata + y*srcstep))[x]
-#define Df( y, x ) ((float*)(dstdata + y*dststep))[x]
-#define Dd( y, x ) ((double*)(dstdata + y*dststep))[x]
-
-CV_IMPL double
-cvInvert( const CvArr* srcarr, CvArr* dstarr, int method )
-{
- CvMat* u = 0;
- CvMat* v = 0;
- CvMat* w = 0;
-
- uchar* buffer = 0;
- int local_alloc = 0;
- double result = 0;
-
- CV_FUNCNAME( "cvInvert" );
-
- __BEGIN__;
-
- CvMat sstub, *src = (CvMat*)srcarr;
- CvMat dstub, *dst = (CvMat*)dstarr;
- int type;
-
- if( !CV_IS_MAT( src ))
- CV_CALL( src = cvGetMat( src, &sstub ));
-
- if( !CV_IS_MAT( dst ))
- CV_CALL( dst = cvGetMat( dst, &dstub ));
-
- type = CV_MAT_TYPE( src->type );
-
- if( method == CV_SVD || method == CV_SVD_SYM )
- {
- int n = MIN(src->rows,src->cols);
- if( method == CV_SVD_SYM && src->rows != src->cols )
- CV_ERROR( CV_StsBadSize, "CV_SVD_SYM method is used for non-square matrix" );
-
- CV_CALL( u = cvCreateMat( n, src->rows, src->type ));
- if( method != CV_SVD_SYM )
- CV_CALL( v = cvCreateMat( n, src->cols, src->type ));
- CV_CALL( w = cvCreateMat( n, 1, src->type ));
- CV_CALL( cvSVD( src, w, u, v, CV_SVD_U_T + CV_SVD_V_T ));
-
- if( type == CV_32FC1 )
- result = w->data.fl[0] >= FLT_EPSILON ?
- w->data.fl[w->rows-1]/w->data.fl[0] : 0;
- else
- result = w->data.db[0] >= FLT_EPSILON ?
- w->data.db[w->rows-1]/w->data.db[0] : 0;
-
- CV_CALL( cvSVBkSb( w, u, v ? v : u, 0, dst, CV_SVD_U_T + CV_SVD_V_T ));
- EXIT;
- }
- else if( method != CV_LU )
- CV_ERROR( CV_StsBadArg, "Unknown inversion method" );
-
- if( !CV_ARE_TYPES_EQ( src, dst ))
- CV_ERROR( CV_StsUnmatchedFormats, "" );
-
- if( src->width != src->height )
- CV_ERROR( CV_StsBadSize, "The matrix must be square" );
-
- if( !CV_ARE_SIZES_EQ( src, dst ))
- CV_ERROR( CV_StsUnmatchedSizes, "" );
-
- if( type != CV_32FC1 && type != CV_64FC1 )
- CV_ERROR( CV_StsUnsupportedFormat, "" );
-
- if( src->width <= 3 )
- {
- uchar* srcdata = src->data.ptr;
- uchar* dstdata = dst->data.ptr;
- int srcstep = src->step;
- int dststep = dst->step;
-
- if( src->width == 2 )
- {
- if( type == CV_32FC1 )
- {
- double d = det2(Sf);
- if( d != 0. )
- {
- double t0, t1;
- result = d;
- d = 1./d;
- t0 = Sf(0,0)*d;
- t1 = Sf(1,1)*d;
- Df(1,1) = (float)t0;
- Df(0,0) = (float)t1;
- t0 = -Sf(0,1)*d;
- t1 = -Sf(1,0)*d;
- Df(0,1) = (float)t0;
- Df(1,0) = (float)t1;
- }
- }
- else
- {
- double d = det2(Sd);
- if( d != 0. )
- {
- double t0, t1;
- result = d;
- d = 1./d;
- t0 = Sd(0,0)*d;
- t1 = Sd(1,1)*d;
- Dd(1,1) = t0;
- Dd(0,0) = t1;
- t0 = -Sd(0,1)*d;
- t1 = -Sd(1,0)*d;
- Dd(0,1) = t0;
- Dd(1,0) = t1;
- }
- }
- }
- else if( src->width == 3 )
- {
- if( type == CV_32FC1 )
- {
- double d = det3(Sf);
- if( d != 0. )
- {
- float t[9];
- result = d;
- d = 1./d;
-
- t[0] = (float)((Sf(1,1) * Sf(2,2) - Sf(1,2) * Sf(2,1)) * d);
- t[1] = (float)((Sf(0,2) * Sf(2,1) - Sf(0,1) * Sf(2,2)) * d);
- t[2] = (float)((Sf(0,1) * Sf(1,2) - Sf(0,2) * Sf(1,1)) * d);
-
- t[3] = (float)((Sf(1,2) * Sf(2,0) - Sf(1,0) * Sf(2,2)) * d);
- t[4] = (float)((Sf(0,0) * Sf(2,2) - Sf(0,2) * Sf(2,0)) * d);
- t[5] = (float)((Sf(0,2) * Sf(1,0) - Sf(0,0) * Sf(1,2)) * d);
-
- t[6] = (float)((Sf(1,0) * Sf(2,1) - Sf(1,1) * Sf(2,0)) * d);
- t[7] = (float)((Sf(0,1) * Sf(2,0) - Sf(0,0) * Sf(2,1)) * d);
- t[8] = (float)((Sf(0,0) * Sf(1,1) - Sf(0,1) * Sf(1,0)) * d);
-
- Df(0,0) = t[0]; Df(0,1) = t[1]; Df(0,2) = t[2];
- Df(1,0) = t[3]; Df(1,1) = t[4]; Df(1,2) = t[5];
- Df(2,0) = t[6]; Df(2,1) = t[7]; Df(2,2) = t[8];
- }
- }
- else
- {
- double d = det3(Sd);
- if( d != 0. )
- {
- double t[9];
- result = d;
- d = 1./d;
-
- t[0] = (Sd(1,1) * Sd(2,2) - Sd(1,2) * Sd(2,1)) * d;
- t[1] = (Sd(0,2) * Sd(2,1) - Sd(0,1) * Sd(2,2)) * d;
- t[2] = (Sd(0,1) * Sd(1,2) - Sd(0,2) * Sd(1,1)) * d;
-
- t[3] = (Sd(1,2) * Sd(2,0) - Sd(1,0) * Sd(2,2)) * d;
- t[4] = (Sd(0,0) * Sd(2,2) - Sd(0,2) * Sd(2,0)) * d;
- t[5] = (Sd(0,2) * Sd(1,0) - Sd(0,0) * Sd(1,2)) * d;
-
- t[6] = (Sd(1,0) * Sd(2,1) - Sd(1,1) * Sd(2,0)) * d;
- t[7] = (Sd(0,1) * Sd(2,0) - Sd(0,0) * Sd(2,1)) * d;
- t[8] = (Sd(0,0) * Sd(1,1) - Sd(0,1) * Sd(1,0)) * d;
-
- Dd(0,0) = t[0]; Dd(0,1) = t[1]; Dd(0,2) = t[2];
- Dd(1,0) = t[3]; Dd(1,1) = t[4]; Dd(1,2) = t[5];
- Dd(2,0) = t[6]; Dd(2,1) = t[7]; Dd(2,2) = t[8];
- }
- }
- }
- else
- {
- assert( src->width == 1 );
-
- if( type == CV_32FC1 )
- {
- double d = Sf(0,0);
- if( d != 0. )
- {
- result = d;
- Df(0,0) = (float)(1./d);
- }
- }
- else
- {
- double d = Sd(0,0);
- if( d != 0. )
- {
- result = d;
- Dd(0,0) = 1./d;
- }
- }
- }
- }
- else
- {
- CvLUDecompFunc decomp_func;
- CvLUBackFunc back_func;
- CvSize size = cvGetMatSize( src );
- const int worktype = CV_64FC1;
- int buf_size = size.width*size.height*CV_ELEM_SIZE(worktype);
- CvMat tmat;
-
- if( !lu_inittab )
- {
- icvInitLUTable( &lu_decomp_tab, &lu_back_tab );
- lu_inittab = 1;
- }
-
- if( size.width <= CV_MAX_LOCAL_MAT_SIZE )
- {
- buffer = (uchar*)cvStackAlloc( buf_size );
- local_alloc = 1;
- }
- else
- {
- CV_CALL( buffer = (uchar*)cvAlloc( buf_size ));
- }
-
- CV_CALL( cvInitMatHeader( &tmat, size.height, size.width, worktype, buffer ));
- if( type == worktype )
- {
- CV_CALL( cvCopy( src, &tmat ));
- }
- else
- CV_CALL( cvConvert( src, &tmat ));
- CV_CALL( cvSetIdentity( dst ));
-
- decomp_func = (CvLUDecompFunc)(lu_decomp_tab.fn_2d[CV_MAT_DEPTH(type)-CV_32F]);
- back_func = (CvLUBackFunc)(lu_back_tab.fn_2d[CV_MAT_DEPTH(type)-CV_32F]);
- assert( decomp_func && back_func );
-
- IPPI_CALL( decomp_func( tmat.data.db, tmat.step, size,
- dst->data.ptr, dst->step, size, &result ));
-
- if( result != 0 )
- {
- IPPI_CALL( back_func( tmat.data.db, tmat.step, size,
- dst->data.ptr, dst->step, size ));
- }
- }
-
- if( !result )
- CV_CALL( cvSetZero( dst ));
-
- __END__;
-
- if( buffer && !local_alloc )
- cvFree( &buffer );
-
- if( u || v || w )
- {
- cvReleaseMat( &u );
- cvReleaseMat( &v );
- cvReleaseMat( &w );
- }
-
- return result;
-}
-
-
-/****************************************************************************************\
-* Linear system [least-squares] solution *
-\****************************************************************************************/
-
-static void
-icvLSQ( const CvMat* A, const CvMat* B, CvMat* X )
-{
- CvMat* AtA = 0;
- CvMat* AtB = 0;
- CvMat* W = 0;
- CvMat* V = 0;
-
- CV_FUNCNAME( "icvLSQ" );
-
- __BEGIN__;
-
- if( !CV_IS_MAT(A) || !CV_IS_MAT(B) || !CV_IS_MAT(X) )
- CV_ERROR( CV_StsBadArg, "Some of required arguments is not a valid matrix" );
-
- AtA = cvCreateMat( A->cols, A->cols, A->type );
- AtB = cvCreateMat( A->cols, 1, A->type );
- W = cvCreateMat( A->cols, 1, A->type );
- V = cvCreateMat( A->cols, A->cols, A->type );
-
- cvMulTransposed( A, AtA, 1 );
- cvGEMM( A, B, 1, 0, 0, AtB, CV_GEMM_A_T );
- cvSVD( AtA, W, 0, V, CV_SVD_MODIFY_A + CV_SVD_V_T );
- cvSVBkSb( W, V, V, AtB, X, CV_SVD_U_T + CV_SVD_V_T );
-
- __END__;
-
- cvReleaseMat( &AtA );
- cvReleaseMat( &AtB );
- cvReleaseMat( &W );
- cvReleaseMat( &V );
-}
-
-CV_IMPL int
-cvSolve( const CvArr* A, const CvArr* b, CvArr* x, int method )
-{
- CvMat* u = 0;
- CvMat* v = 0;
- CvMat* w = 0;
-
- uchar* buffer = 0;
- int local_alloc = 0;
- int result = 1;
-
- CV_FUNCNAME( "cvSolve" );
-
- __BEGIN__;
-
- CvMat sstub, *src = (CvMat*)A;
- CvMat dstub, *dst = (CvMat*)x;
- CvMat bstub, *src2 = (CvMat*)b;
- int type;
-
- if( !CV_IS_MAT( src ))
- CV_CALL( src = cvGetMat( src, &sstub ));
-
- if( !CV_IS_MAT( src2 ))
- CV_CALL( src2 = cvGetMat( src2, &bstub ));
-
- if( !CV_IS_MAT( dst ))
- CV_CALL( dst = cvGetMat( dst, &dstub ));
-
- if( method & CV_LSQ )
- {
- icvLSQ( src, src2, dst );
- EXIT;
- }
-
- if( method == CV_SVD || method == CV_SVD_SYM )
- {
- int n = MIN(src->rows,src->cols);
-
- if( method == CV_SVD_SYM && src->rows != src->cols )
- CV_ERROR( CV_StsBadSize, "CV_SVD_SYM method is used for non-square matrix" );
-
- CV_CALL( u = cvCreateMat( n, src->rows, src->type ));
- if( method != CV_SVD_SYM )
- CV_CALL( v = cvCreateMat( n, src->cols, src->type ));
- CV_CALL( w = cvCreateMat( n, 1, src->type ));
- CV_CALL( cvSVD( src, w, u, v, CV_SVD_U_T + CV_SVD_V_T ));
- CV_CALL( cvSVBkSb( w, u, v ? v : u, src2, dst, CV_SVD_U_T + CV_SVD_V_T ));
- EXIT;
- }
- else if( method != CV_LU )
- CV_ERROR( CV_StsBadArg, "Unknown inversion method" );
-
- type = CV_MAT_TYPE( src->type );
-
- if( !CV_ARE_TYPES_EQ( src, dst ) || !CV_ARE_TYPES_EQ( src, src2 ))
- CV_ERROR( CV_StsUnmatchedFormats, "" );
-
- if( src->width != src->height )
- CV_ERROR( CV_StsBadSize, "The matrix must be square" );
-
- if( !CV_ARE_SIZES_EQ( src2, dst ) || src->width != src2->height )
- CV_ERROR( CV_StsUnmatchedSizes, "" );
-
- if( type != CV_32FC1 && type != CV_64FC1 )
- CV_ERROR( CV_StsUnsupportedFormat, "" );
-
- // check case of a single equation and small matrix
- if( src->width <= 3 && src2->width == 1 )
- {
- #define bf(y) ((float*)(bdata + y*src2step))[0]
- #define bd(y) ((double*)(bdata + y*src2step))[0]
-
- uchar* srcdata = src->data.ptr;
- uchar* bdata = src2->data.ptr;
- uchar* dstdata = dst->data.ptr;
- int srcstep = src->step;
- int src2step = src2->step;
- int dststep = dst->step;
-
- if( src->width == 2 )
- {
- if( type == CV_32FC1 )
- {
- double d = det2(Sf);
- if( d != 0. )
- {
- float t;
- d = 1./d;
- t = (float)((bf(0)*Sf(1,1) - bf(1)*Sf(0,1))*d);
- Df(1,0) = (float)((bf(1)*Sf(0,0) - bf(0)*Sf(1,0))*d);
- Df(0,0) = t;
- }
- else
- result = 0;
- }
- else
- {
- double d = det2(Sd);
- if( d != 0. )
- {
- double t;
- d = 1./d;
- t = (bd(0)*Sd(1,1) - bd(1)*Sd(0,1))*d;
- Dd(1,0) = (bd(1)*Sd(0,0) - bd(0)*Sd(1,0))*d;
- Dd(0,0) = t;
- }
- else
- result = 0;
- }
- }
- else if( src->width == 3 )
- {
- if( type == CV_32FC1 )
- {
- double d = det3(Sf);
- if( d != 0. )
- {
- float t[3];
- d = 1./d;
-
- t[0] = (float)(d*
- (bf(0)*(Sf(1,1)*Sf(2,2) - Sf(1,2)*Sf(2,1)) -
- Sf(0,1)*(bf(1)*Sf(2,2) - Sf(1,2)*bf(2)) +
- Sf(0,2)*(bf(1)*Sf(2,1) - Sf(1,1)*bf(2))));
-
- t[1] = (float)(d*
- (Sf(0,0)*(bf(1)*Sf(2,2) - Sf(1,2)*bf(2)) -
- bf(0)*(Sf(1,0)*Sf(2,2) - Sf(1,2)*Sf(2,0)) +
- Sf(0,2)*(Sf(1,0)*bf(2) - bf(1)*Sf(2,0))));
-
- t[2] = (float)(d*
- (Sf(0,0)*(Sf(1,1)*bf(2) - bf(1)*Sf(2,1)) -
- Sf(0,1)*(Sf(1,0)*bf(2) - bf(1)*Sf(2,0)) +
- bf(0)*(Sf(1,0)*Sf(2,1) - Sf(1,1)*Sf(2,0))));
-
- Df(0,0) = t[0];
- Df(1,0) = t[1];
- Df(2,0) = t[2];
- }
- else
- result = 0;
- }
- else
- {
- double d = det3(Sd);
- if( d != 0. )
- {
- double t[9];
-
- d = 1./d;
-
- t[0] = ((Sd(1,1) * Sd(2,2) - Sd(1,2) * Sd(2,1))*bd(0) +
- (Sd(0,2) * Sd(2,1) - Sd(0,1) * Sd(2,2))*bd(1) +
- (Sd(0,1) * Sd(1,2) - Sd(0,2) * Sd(1,1))*bd(2))*d;
-
- t[1] = ((Sd(1,2) * Sd(2,0) - Sd(1,0) * Sd(2,2))*bd(0) +
- (Sd(0,0) * Sd(2,2) - Sd(0,2) * Sd(2,0))*bd(1) +
- (Sd(0,2) * Sd(1,0) - Sd(0,0) * Sd(1,2))*bd(2))*d;
-
- t[2] = ((Sd(1,0) * Sd(2,1) - Sd(1,1) * Sd(2,0))*bd(0) +
- (Sd(0,1) * Sd(2,0) - Sd(0,0) * Sd(2,1))*bd(1) +
- (Sd(0,0) * Sd(1,1) - Sd(0,1) * Sd(1,0))*bd(2))*d;
-
- Dd(0,0) = t[0];
- Dd(1,0) = t[1];
- Dd(2,0) = t[2];
- }
- else
- result = 0;
- }
- }
- else
- {
- assert( src->width == 1 );
-
- if( type == CV_32FC1 )
- {
- double d = Sf(0,0);
- if( d != 0. )
- Df(0,0) = (float)(bf(0)/d);
- else
- result = 0;
- }
- else
- {
- double d = Sd(0,0);
- if( d != 0. )
- Dd(0,0) = (bd(0)/d);
- else
- result = 0;
- }
- }
- }
- else
- {
- CvLUDecompFunc decomp_func;
- CvLUBackFunc back_func;
- CvSize size = cvGetMatSize( src );
- CvSize dstsize = cvGetMatSize( dst );
- int worktype = CV_64FC1;
- int buf_size = size.width*size.height*CV_ELEM_SIZE(worktype);
- double d = 0;
- CvMat tmat;
-
- if( !lu_inittab )
- {
- icvInitLUTable( &lu_decomp_tab, &lu_back_tab );
- lu_inittab = 1;
- }
-
- if( size.width <= CV_MAX_LOCAL_MAT_SIZE )
- {
- buffer = (uchar*)cvStackAlloc( buf_size );
- local_alloc = 1;
- }
- else
- {
- CV_CALL( buffer = (uchar*)cvAlloc( buf_size ));
- }
-
- CV_CALL( cvInitMatHeader( &tmat, size.height, size.width, worktype, buffer ));
- if( type == worktype )
- {
- CV_CALL( cvCopy( src, &tmat ));
- }
- else
- CV_CALL( cvConvert( src, &tmat ));
-
- if( src2->data.ptr != dst->data.ptr )
- {
- CV_CALL( cvCopy( src2, dst ));
- }
-
- decomp_func = (CvLUDecompFunc)(lu_decomp_tab.fn_2d[CV_MAT_DEPTH(type)-CV_32F]);
- back_func = (CvLUBackFunc)(lu_back_tab.fn_2d[CV_MAT_DEPTH(type)-CV_32F]);
- assert( decomp_func && back_func );
-
- IPPI_CALL( decomp_func( tmat.data.db, tmat.step, size,
- dst->data.ptr, dst->step, dstsize, &d ));
-
- if( d != 0 )
- {
- IPPI_CALL( back_func( tmat.data.db, tmat.step, size,
- dst->data.ptr, dst->step, dstsize ));
- }
- else
- result = 0;
- }
-
- if( !result )
- CV_CALL( cvSetZero( dst ));
-
- __END__;
-
- if( buffer && !local_alloc )
- cvFree( &buffer );
-
- if( u || v || w )
- {
- cvReleaseMat( &u );
- cvReleaseMat( &v );
- cvReleaseMat( &w );
- }
-
- return result;
-}
-
-
-
/****************************************************************************************\
* 3D vector cross-product *
\****************************************************************************************/
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// Intel License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of Intel Corporation may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#include "_cxcore.h"
-#include <float.h>
-
-/////////////////////////////////////////////////////////////////////////////////////////
-
-#define icvGivens_64f( n, x, y, c, s ) \
-{ \
- int _i; \
- double* _x = (x); \
- double* _y = (y); \
- \
- for( _i = 0; _i < n; _i++ ) \
- { \
- double t0 = _x[_i]; \
- double t1 = _y[_i]; \
- _x[_i] = t0*c + t1*s; \
- _y[_i] = -t0*s + t1*c; \
- } \
-}
-
-
-/* y[0:m,0:n] += diag(a[0:1,0:m]) * x[0:m,0:n] */
-static void
-icvMatrAXPY_64f( int m, int n, const double* x, int dx,
- const double* a, double* y, int dy )
-{
- int i, j;
-
- for( i = 0; i < m; i++, x += dx, y += dy )
- {
- double s = a[i];
-
- for( j = 0; j <= n - 4; j += 4 )
- {
- double t0 = y[j] + s*x[j];
- double t1 = y[j+1] + s*x[j+1];
- y[j] = t0;
- y[j+1] = t1;
- t0 = y[j+2] + s*x[j+2];
- t1 = y[j+3] + s*x[j+3];
- y[j+2] = t0;
- y[j+3] = t1;
- }
-
- for( ; j < n; j++ ) y[j] += s*x[j];
- }
-}
-
-
-/* y[1:m,-1] = h*y[1:m,0:n]*x[0:1,0:n]'*x[-1] (this is used for U&V reconstruction)
- y[1:m,0:n] += h*y[1:m,0:n]*x[0:1,0:n]'*x[0:1,0:n] */
-static void
-icvMatrAXPY3_64f( int m, int n, const double* x, int l, double* y, double h )
-{
- int i, j;
-
- for( i = 1; i < m; i++ )
- {
- double s = 0;
-
- y += l;
-
- for( j = 0; j <= n - 4; j += 4 )
- s += x[j]*y[j] + x[j+1]*y[j+1] + x[j+2]*y[j+2] + x[j+3]*y[j+3];
-
- for( ; j < n; j++ ) s += x[j]*y[j];
-
- s *= h;
- y[-1] = s*x[-1];
-
- for( j = 0; j <= n - 4; j += 4 )
- {
- double t0 = y[j] + s*x[j];
- double t1 = y[j+1] + s*x[j+1];
- y[j] = t0;
- y[j+1] = t1;
- t0 = y[j+2] + s*x[j+2];
- t1 = y[j+3] + s*x[j+3];
- y[j+2] = t0;
- y[j+3] = t1;
- }
-
- for( ; j < n; j++ ) y[j] += s*x[j];
- }
-}
-
-
-#define icvGivens_32f( n, x, y, c, s ) \
-{ \
- int _i; \
- float* _x = (x); \
- float* _y = (y); \
- \
- for( _i = 0; _i < n; _i++ ) \
- { \
- double t0 = _x[_i]; \
- double t1 = _y[_i]; \
- _x[_i] = (float)(t0*c + t1*s); \
- _y[_i] = (float)(-t0*s + t1*c);\
- } \
-}
-
-static void
-icvMatrAXPY_32f( int m, int n, const float* x, int dx,
- const float* a, float* y, int dy )
-{
- int i, j;
-
- for( i = 0; i < m; i++, x += dx, y += dy )
- {
- double s = a[i];
-
- for( j = 0; j <= n - 4; j += 4 )
- {
- double t0 = y[j] + s*x[j];
- double t1 = y[j+1] + s*x[j+1];
- y[j] = (float)t0;
- y[j+1] = (float)t1;
- t0 = y[j+2] + s*x[j+2];
- t1 = y[j+3] + s*x[j+3];
- y[j+2] = (float)t0;
- y[j+3] = (float)t1;
- }
-
- for( ; j < n; j++ )
- y[j] = (float)(y[j] + s*x[j]);
- }
-}
-
-
-static void
-icvMatrAXPY3_32f( int m, int n, const float* x, int l, float* y, double h )
-{
- int i, j;
-
- for( i = 1; i < m; i++ )
- {
- double s = 0;
- y += l;
-
- for( j = 0; j <= n - 4; j += 4 )
- s += x[j]*y[j] + x[j+1]*y[j+1] + x[j+2]*y[j+2] + x[j+3]*y[j+3];
-
- for( ; j < n; j++ ) s += x[j]*y[j];
-
- s *= h;
- y[-1] = (float)(s*x[-1]);
-
- for( j = 0; j <= n - 4; j += 4 )
- {
- double t0 = y[j] + s*x[j];
- double t1 = y[j+1] + s*x[j+1];
- y[j] = (float)t0;
- y[j+1] = (float)t1;
- t0 = y[j+2] + s*x[j+2];
- t1 = y[j+3] + s*x[j+3];
- y[j+2] = (float)t0;
- y[j+3] = (float)t1;
- }
-
- for( ; j < n; j++ ) y[j] = (float)(y[j] + s*x[j]);
- }
-}
-
-/* accurate hypotenuse calculation */
-static double
-pythag( double a, double b )
-{
- a = fabs( a );
- b = fabs( b );
- if( a > b )
- {
- b /= a;
- a *= sqrt( 1. + b * b );
- }
- else if( b != 0 )
- {
- a /= b;
- a = b * sqrt( 1. + a * a );
- }
-
- return a;
-}
-
-/****************************************************************************************/
-/****************************************************************************************/
-
-#define MAX_ITERS 30
-
-static void
-icvSVD_64f( double* a, int lda, int m, int n,
- double* w,
- double* uT, int lduT, int nu,
- double* vT, int ldvT,
- double* buffer )
-{
- double* e;
- double* temp;
- double *w1, *e1;
- double *hv;
- double ku0 = 0, kv0 = 0;
- double anorm = 0;
- double *a1, *u0 = uT, *v0 = vT;
- double scale, h;
- int i, j, k, l;
- int nm, m1, n1;
- int nv = n;
- int iters = 0;
- double* hv0 = (double*)cvStackAlloc( (m+2)*sizeof(hv0[0])) + 1;
-
- e = buffer;
- w1 = w;
- e1 = e + 1;
- nm = n;
-
- temp = buffer + nm;
-
- memset( w, 0, nm * sizeof( w[0] ));
- memset( e, 0, nm * sizeof( e[0] ));
-
- m1 = m;
- n1 = n;
-
- /* transform a to bi-diagonal form */
- for( ;; )
- {
- int update_u;
- int update_v;
-
- if( m1 == 0 )
- break;
-
- scale = h = 0;
- update_u = uT && m1 > m - nu;
- hv = update_u ? uT : hv0;
-
- for( j = 0, a1 = a; j < m1; j++, a1 += lda )
- {
- double t = a1[0];
- scale += fabs( hv[j] = t );
- }
-
- if( scale != 0 )
- {
- double f = 1./scale, g, s = 0;
-
- for( j = 0; j < m1; j++ )
- {
- double t = (hv[j] *= f);
- s += t * t;
- }
-
- g = sqrt( s );
- f = hv[0];
- if( f >= 0 )
- g = -g;
- hv[0] = f - g;
- h = 1. / (f * g - s);
-
- memset( temp, 0, n1 * sizeof( temp[0] ));
-
- /* calc temp[0:n-i] = a[i:m,i:n]'*hv[0:m-i] */
- icvMatrAXPY_64f( m1, n1 - 1, a + 1, lda, hv, temp + 1, 0 );
- for( k = 1; k < n1; k++ ) temp[k] *= h;
-
- /* modify a: a[i:m,i:n] = a[i:m,i:n] + hv[0:m-i]*temp[0:n-i]' */
- icvMatrAXPY_64f( m1, n1 - 1, temp + 1, 0, hv, a + 1, lda );
- *w1 = g*scale;
- }
- w1++;
-
- /* store -2/(hv'*hv) */
- if( update_u )
- {
- if( m1 == m )
- ku0 = h;
- else
- hv[-1] = h;
- }
-
- a++;
- n1--;
- if( vT )
- vT += ldvT + 1;
-
- if( n1 == 0 )
- break;
-
- scale = h = 0;
- update_v = vT && n1 > n - nv;
-
- hv = update_v ? vT : hv0;
-
- for( j = 0; j < n1; j++ )
- {
- double t = a[j];
- scale += fabs( hv[j] = t );
- }
-
- if( scale != 0 )
- {
- double f = 1./scale, g, s = 0;
-
- for( j = 0; j < n1; j++ )
- {
- double t = (hv[j] *= f);
- s += t * t;
- }
-
- g = sqrt( s );
- f = hv[0];
- if( f >= 0 )
- g = -g;
- hv[0] = f - g;
- h = 1. / (f * g - s);
- hv[-1] = 0.;
-
- /* update a[i:m:i+1:n] = a[i:m,i+1:n] + (a[i:m,i+1:n]*hv[0:m-i])*... */
- icvMatrAXPY3_64f( m1, n1, hv, lda, a, h );
-
- *e1 = g*scale;
- }
- e1++;
-
- /* store -2/(hv'*hv) */
- if( update_v )
- {
- if( n1 == n )
- kv0 = h;
- else
- hv[-1] = h;
- }
-
- a += lda;
- m1--;
- if( uT )
- uT += lduT + 1;
- }
-
- m1 -= m1 != 0;
- n1 -= n1 != 0;
-
- /* accumulate left transformations */
- if( uT )
- {
- m1 = m - m1;
- uT = u0 + m1 * lduT;
- for( i = m1; i < nu; i++, uT += lduT )
- {
- memset( uT + m1, 0, (m - m1) * sizeof( uT[0] ));
- uT[i] = 1.;
- }
-
- for( i = m1 - 1; i >= 0; i-- )
- {
- double s;
- int lh = nu - i;
-
- l = m - i;
-
- hv = u0 + (lduT + 1) * i;
- h = i == 0 ? ku0 : hv[-1];
-
- assert( h <= 0 );
-
- if( h != 0 )
- {
- uT = hv;
- icvMatrAXPY3_64f( lh, l-1, hv+1, lduT, uT+1, h );
-
- s = hv[0] * h;
- for( k = 0; k < l; k++ ) hv[k] *= s;
- hv[0] += 1;
- }
- else
- {
- for( j = 1; j < l; j++ )
- hv[j] = 0;
- for( j = 1; j < lh; j++ )
- hv[j * lduT] = 0;
- hv[0] = 1;
- }
- }
- uT = u0;
- }
-
- /* accumulate right transformations */
- if( vT )
- {
- n1 = n - n1;
- vT = v0 + n1 * ldvT;
- for( i = n1; i < nv; i++, vT += ldvT )
- {
- memset( vT + n1, 0, (n - n1) * sizeof( vT[0] ));
- vT[i] = 1.;
- }
-
- for( i = n1 - 1; i >= 0; i-- )
- {
- double s;
- int lh = nv - i;
-
- l = n - i;
- hv = v0 + (ldvT + 1) * i;
- h = i == 0 ? kv0 : hv[-1];
-
- assert( h <= 0 );
-
- if( h != 0 )
- {
- vT = hv;
- icvMatrAXPY3_64f( lh, l-1, hv+1, ldvT, vT+1, h );
-
- s = hv[0] * h;
- for( k = 0; k < l; k++ ) hv[k] *= s;
- hv[0] += 1;
- }
- else
- {
- for( j = 1; j < l; j++ )
- hv[j] = 0;
- for( j = 1; j < lh; j++ )
- hv[j * ldvT] = 0;
- hv[0] = 1;
- }
- }
- vT = v0;
- }
-
- for( i = 0; i < nm; i++ )
- {
- double tnorm = fabs( w[i] );
- tnorm += fabs( e[i] );
-
- if( anorm < tnorm )
- anorm = tnorm;
- }
-
- anorm *= DBL_EPSILON;
-
- /* diagonalization of the bidiagonal form */
- for( k = nm - 1; k >= 0; k-- )
- {
- double z = 0;
- iters = 0;
-
- for( ;; ) /* do iterations */
- {
- double c, s, f, g, x, y;
- int flag = 0;
-
- /* test for splitting */
- for( l = k; l >= 0; l-- )
- {
- if( fabs(e[l]) <= anorm )
- {
- flag = 1;
- break;
- }
- assert( l > 0 );
- if( fabs(w[l - 1]) <= anorm )
- break;
- }
-
- if( !flag )
- {
- c = 0;
- s = 1;
-
- for( i = l; i <= k; i++ )
- {
- f = s * e[i];
-
- e[i] *= c;
-
- if( anorm + fabs( f ) == anorm )
- break;
-
- g = w[i];
- h = pythag( f, g );
- w[i] = h;
- c = g / h;
- s = -f / h;
-
- if( uT )
- icvGivens_64f( m, uT + lduT * (l - 1), uT + lduT * i, c, s );
- }
- }
-
- z = w[k];
- if( l == k || iters++ == MAX_ITERS )
- break;
-
- /* shift from bottom 2x2 minor */
- x = w[l];
- y = w[k - 1];
- g = e[k - 1];
- h = e[k];
- f = 0.5 * (((g + z) / h) * ((g - z) / y) + y / h - h / y);
- g = pythag( f, 1 );
- if( f < 0 )
- g = -g;
- f = x - (z / x) * z + (h / x) * (y / (f + g) - h);
- /* next QR transformation */
- c = s = 1;
-
- for( i = l + 1; i <= k; i++ )
- {
- g = e[i];
- y = w[i];
- h = s * g;
- g *= c;
- z = pythag( f, h );
- e[i - 1] = z;
- c = f / z;
- s = h / z;
- f = x * c + g * s;
- g = -x * s + g * c;
- h = y * s;
- y *= c;
-
- if( vT )
- icvGivens_64f( n, vT + ldvT * (i - 1), vT + ldvT * i, c, s );
-
- z = pythag( f, h );
- w[i - 1] = z;
-
- /* rotation can be arbitrary if z == 0 */
- if( z != 0 )
- {
- c = f / z;
- s = h / z;
- }
- f = c * g + s * y;
- x = -s * g + c * y;
-
- if( uT )
- icvGivens_64f( m, uT + lduT * (i - 1), uT + lduT * i, c, s );
- }
-
- e[l] = 0;
- e[k] = f;
- w[k] = x;
- } /* end of iteration loop */
-
- if( iters > MAX_ITERS )
- break;
-
- if( z < 0 )
- {
- w[k] = -z;
- if( vT )
- {
- for( j = 0; j < n; j++ )
- vT[j + k * ldvT] = -vT[j + k * ldvT];
- }
- }
- } /* end of diagonalization loop */
-
- /* sort singular values and corresponding values */
- for( i = 0; i < nm; i++ )
- {
- k = i;
- for( j = i + 1; j < nm; j++ )
- if( w[k] < w[j] )
- k = j;
-
- if( k != i )
- {
- double t;
- CV_SWAP( w[i], w[k], t );
-
- if( vT )
- for( j = 0; j < n; j++ )
- CV_SWAP( vT[j + ldvT*k], vT[j + ldvT*i], t );
-
- if( uT )
- for( j = 0; j < m; j++ )
- CV_SWAP( uT[j + lduT*k], uT[j + lduT*i], t );
- }
- }
-}
-
-
-static void
-icvSVD_32f( float* a, int lda, int m, int n,
- float* w,
- float* uT, int lduT, int nu,
- float* vT, int ldvT,
- float* buffer )
-{
- float* e;
- float* temp;
- float *w1, *e1;
- float *hv;
- double ku0 = 0, kv0 = 0;
- double anorm = 0;
- float *a1, *u0 = uT, *v0 = vT;
- double scale, h;
- int i, j, k, l;
- int nm, m1, n1;
- int nv = n;
- int iters = 0;
- float* hv0 = (float*)cvStackAlloc( (m+2)*sizeof(hv0[0])) + 1;
-
- e = buffer;
-
- w1 = w;
- e1 = e + 1;
- nm = n;
-
- temp = buffer + nm;
-
- memset( w, 0, nm * sizeof( w[0] ));
- memset( e, 0, nm * sizeof( e[0] ));
-
- m1 = m;
- n1 = n;
-
- /* transform a to bi-diagonal form */
- for( ;; )
- {
- int update_u;
- int update_v;
-
- if( m1 == 0 )
- break;
-
- scale = h = 0;
-
- update_u = uT && m1 > m - nu;
- hv = update_u ? uT : hv0;
-
- for( j = 0, a1 = a; j < m1; j++, a1 += lda )
- {
- double t = a1[0];
- scale += fabs( hv[j] = (float)t );
- }
-
- if( scale != 0 )
- {
- double f = 1./scale, g, s = 0;
-
- for( j = 0; j < m1; j++ )
- {
- double t = (hv[j] = (float)(hv[j]*f));
- s += t * t;
- }
-
- g = sqrt( s );
- f = hv[0];
- if( f >= 0 )
- g = -g;
- hv[0] = (float)(f - g);
- h = 1. / (f * g - s);
-
- memset( temp, 0, n1 * sizeof( temp[0] ));
-
- /* calc temp[0:n-i] = a[i:m,i:n]'*hv[0:m-i] */
- icvMatrAXPY_32f( m1, n1 - 1, a + 1, lda, hv, temp + 1, 0 );
-
- for( k = 1; k < n1; k++ ) temp[k] = (float)(temp[k]*h);
-
- /* modify a: a[i:m,i:n] = a[i:m,i:n] + hv[0:m-i]*temp[0:n-i]' */
- icvMatrAXPY_32f( m1, n1 - 1, temp + 1, 0, hv, a + 1, lda );
- *w1 = (float)(g*scale);
- }
- w1++;
-
- /* store -2/(hv'*hv) */
- if( update_u )
- {
- if( m1 == m )
- ku0 = h;
- else
- hv[-1] = (float)h;
- }
-
- a++;
- n1--;
- if( vT )
- vT += ldvT + 1;
-
- if( n1 == 0 )
- break;
-
- scale = h = 0;
- update_v = vT && n1 > n - nv;
- hv = update_v ? vT : hv0;
-
- for( j = 0; j < n1; j++ )
- {
- double t = a[j];
- scale += fabs( hv[j] = (float)t );
- }
-
- if( scale != 0 )
- {
- double f = 1./scale, g, s = 0;
-
- for( j = 0; j < n1; j++ )
- {
- double t = (hv[j] = (float)(hv[j]*f));
- s += t * t;
- }
-
- g = sqrt( s );
- f = hv[0];
- if( f >= 0 )
- g = -g;
- hv[0] = (float)(f - g);
- h = 1. / (f * g - s);
- hv[-1] = 0.f;
-
- /* update a[i:m:i+1:n] = a[i:m,i+1:n] + (a[i:m,i+1:n]*hv[0:m-i])*... */
- icvMatrAXPY3_32f( m1, n1, hv, lda, a, h );
-
- *e1 = (float)(g*scale);
- }
- e1++;
-
- /* store -2/(hv'*hv) */
- if( update_v )
- {
- if( n1 == n )
- kv0 = h;
- else
- hv[-1] = (float)h;
- }
-
- a += lda;
- m1--;
- if( uT )
- uT += lduT + 1;
- }
-
- m1 -= m1 != 0;
- n1 -= n1 != 0;
-
- /* accumulate left transformations */
- if( uT )
- {
- m1 = m - m1;
- uT = u0 + m1 * lduT;
- for( i = m1; i < nu; i++, uT += lduT )
- {
- memset( uT + m1, 0, (m - m1) * sizeof( uT[0] ));
- uT[i] = 1.;
- }
-
- for( i = m1 - 1; i >= 0; i-- )
- {
- double s;
- int lh = nu - i;
-
- l = m - i;
-
- hv = u0 + (lduT + 1) * i;
- h = i == 0 ? ku0 : hv[-1];
-
- assert( h <= 0 );
-
- if( h != 0 )
- {
- uT = hv;
- icvMatrAXPY3_32f( lh, l-1, hv+1, lduT, uT+1, h );
-
- s = hv[0] * h;
- for( k = 0; k < l; k++ ) hv[k] = (float)(hv[k]*s);
- hv[0] += 1;
- }
- else
- {
- for( j = 1; j < l; j++ )
- hv[j] = 0;
- for( j = 1; j < lh; j++ )
- hv[j * lduT] = 0;
- hv[0] = 1;
- }
- }
- uT = u0;
- }
-
- /* accumulate right transformations */
- if( vT )
- {
- n1 = n - n1;
- vT = v0 + n1 * ldvT;
- for( i = n1; i < nv; i++, vT += ldvT )
- {
- memset( vT + n1, 0, (n - n1) * sizeof( vT[0] ));
- vT[i] = 1.;
- }
-
- for( i = n1 - 1; i >= 0; i-- )
- {
- double s;
- int lh = nv - i;
-
- l = n - i;
- hv = v0 + (ldvT + 1) * i;
- h = i == 0 ? kv0 : hv[-1];
-
- assert( h <= 0 );
-
- if( h != 0 )
- {
- vT = hv;
- icvMatrAXPY3_32f( lh, l-1, hv+1, ldvT, vT+1, h );
-
- s = hv[0] * h;
- for( k = 0; k < l; k++ ) hv[k] = (float)(hv[k]*s);
- hv[0] += 1;
- }
- else
- {
- for( j = 1; j < l; j++ )
- hv[j] = 0;
- for( j = 1; j < lh; j++ )
- hv[j * ldvT] = 0;
- hv[0] = 1;
- }
- }
- vT = v0;
- }
-
- for( i = 0; i < nm; i++ )
- {
- double tnorm = fabs( w[i] );
- tnorm += fabs( e[i] );
-
- if( anorm < tnorm )
- anorm = tnorm;
- }
-
- anorm *= FLT_EPSILON;
-
- /* diagonalization of the bidiagonal form */
- for( k = nm - 1; k >= 0; k-- )
- {
- double z = 0;
- iters = 0;
-
- for( ;; ) /* do iterations */
- {
- double c, s, f, g, x, y;
- int flag = 0;
-
- /* test for splitting */
- for( l = k; l >= 0; l-- )
- {
- if( fabs( e[l] ) <= anorm )
- {
- flag = 1;
- break;
- }
- assert( l > 0 );
- if( fabs( w[l - 1] ) <= anorm )
- break;
- }
-
- if( !flag )
- {
- c = 0;
- s = 1;
-
- for( i = l; i <= k; i++ )
- {
- f = s * e[i];
- e[i] = (float)(e[i]*c);
-
- if( anorm + fabs( f ) == anorm )
- break;
-
- g = w[i];
- h = pythag( f, g );
- w[i] = (float)h;
- c = g / h;
- s = -f / h;
-
- if( uT )
- icvGivens_32f( m, uT + lduT * (l - 1), uT + lduT * i, c, s );
- }
- }
-
- z = w[k];
- if( l == k || iters++ == MAX_ITERS )
- break;
-
- /* shift from bottom 2x2 minor */
- x = w[l];
- y = w[k - 1];
- g = e[k - 1];
- h = e[k];
- f = 0.5 * (((g + z) / h) * ((g - z) / y) + y / h - h / y);
- g = pythag( f, 1 );
- if( f < 0 )
- g = -g;
- f = x - (z / x) * z + (h / x) * (y / (f + g) - h);
- /* next QR transformation */
- c = s = 1;
-
- for( i = l + 1; i <= k; i++ )
- {
- g = e[i];
- y = w[i];
- h = s * g;
- g *= c;
- z = pythag( f, h );
- e[i - 1] = (float)z;
- c = f / z;
- s = h / z;
- f = x * c + g * s;
- g = -x * s + g * c;
- h = y * s;
- y *= c;
-
- if( vT )
- icvGivens_32f( n, vT + ldvT * (i - 1), vT + ldvT * i, c, s );
-
- z = pythag( f, h );
- w[i - 1] = (float)z;
-
- /* rotation can be arbitrary if z == 0 */
- if( z != 0 )
- {
- c = f / z;
- s = h / z;
- }
- f = c * g + s * y;
- x = -s * g + c * y;
-
- if( uT )
- icvGivens_32f( m, uT + lduT * (i - 1), uT + lduT * i, c, s );
- }
-
- e[l] = 0;
- e[k] = (float)f;
- w[k] = (float)x;
- } /* end of iteration loop */
-
- if( iters > MAX_ITERS )
- break;
-
- if( z < 0 )
- {
- w[k] = (float)(-z);
- if( vT )
- {
- for( j = 0; j < n; j++ )
- vT[j + k * ldvT] = -vT[j + k * ldvT];
- }
- }
- } /* end of diagonalization loop */
-
- /* sort singular values and corresponding vectors */
- for( i = 0; i < nm; i++ )
- {
- k = i;
- for( j = i + 1; j < nm; j++ )
- if( w[k] < w[j] )
- k = j;
-
- if( k != i )
- {
- float t;
- CV_SWAP( w[i], w[k], t );
-
- if( vT )
- for( j = 0; j < n; j++ )
- CV_SWAP( vT[j + ldvT*k], vT[j + ldvT*i], t );
-
- if( uT )
- for( j = 0; j < m; j++ )
- CV_SWAP( uT[j + lduT*k], uT[j + lduT*i], t );
- }
- }
-}
-
-
-static void
-icvSVBkSb_64f( int m, int n, const double* w,
- const double* uT, int lduT,
- const double* vT, int ldvT,
- const double* b, int ldb, int nb,
- double* x, int ldx, double* buffer )
-{
- double threshold = 0;
- int i, j, nm = MIN( m, n );
-
- if( !b )
- nb = m;
-
- for( i = 0; i < n; i++ )
- memset( x + i*ldx, 0, nb*sizeof(x[0]));
-
- for( i = 0; i < nm; i++ )
- threshold += w[i];
- threshold *= 2*DBL_EPSILON;
-
- /* vT * inv(w) * uT * b */
- for( i = 0; i < nm; i++, uT += lduT, vT += ldvT )
- {
- double wi = w[i];
-
- if( wi > threshold )
- {
- wi = 1./wi;
-
- if( nb == 1 )
- {
- double s = 0;
- if( b )
- {
- if( ldb == 1 )
- {
- for( j = 0; j <= m - 4; j += 4 )
- s += uT[j]*b[j] + uT[j+1]*b[j+1] + uT[j+2]*b[j+2] + uT[j+3]*b[j+3];
- for( ; j < m; j++ )
- s += uT[j]*b[j];
- }
- else
- {
- for( j = 0; j < m; j++ )
- s += uT[j]*b[j*ldb];
- }
- }
- else
- s = uT[0];
- s *= wi;
- if( ldx == 1 )
- {
- for( j = 0; j <= n - 4; j += 4 )
- {
- double t0 = x[j] + s*vT[j];
- double t1 = x[j+1] + s*vT[j+1];
- x[j] = t0;
- x[j+1] = t1;
- t0 = x[j+2] + s*vT[j+2];
- t1 = x[j+3] + s*vT[j+3];
- x[j+2] = t0;
- x[j+3] = t1;
- }
-
- for( ; j < n; j++ )
- x[j] += s*vT[j];
- }
- else
- {
- for( j = 0; j < n; j++ )
- x[j*ldx] += s*vT[j];
- }
- }
- else
- {
- if( b )
- {
- memset( buffer, 0, nb*sizeof(buffer[0]));
- icvMatrAXPY_64f( m, nb, b, ldb, uT, buffer, 0 );
- for( j = 0; j < nb; j++ )
- buffer[j] *= wi;
- }
- else
- {
- for( j = 0; j < nb; j++ )
- buffer[j] = uT[j]*wi;
- }
- icvMatrAXPY_64f( n, nb, buffer, 0, vT, x, ldx );
- }
- }
- }
-}
-
-
-static void
-icvSVBkSb_32f( int m, int n, const float* w,
- const float* uT, int lduT,
- const float* vT, int ldvT,
- const float* b, int ldb, int nb,
- float* x, int ldx, float* buffer )
-{
- float threshold = 0.f;
- int i, j, nm = MIN( m, n );
-
- if( !b )
- nb = m;
-
- for( i = 0; i < n; i++ )
- memset( x + i*ldx, 0, nb*sizeof(x[0]));
-
- for( i = 0; i < nm; i++ )
- threshold += w[i];
- threshold *= 2*FLT_EPSILON;
-
- /* vT * inv(w) * uT * b */
- for( i = 0; i < nm; i++, uT += lduT, vT += ldvT )
- {
- double wi = w[i];
-
- if( wi > threshold )
- {
- wi = 1./wi;
-
- if( nb == 1 )
- {
- double s = 0;
- if( b )
- {
- if( ldb == 1 )
- {
- for( j = 0; j <= m - 4; j += 4 )
- s += uT[j]*b[j] + uT[j+1]*b[j+1] + uT[j+2]*b[j+2] + uT[j+3]*b[j+3];
- for( ; j < m; j++ )
- s += uT[j]*b[j];
- }
- else
- {
- for( j = 0; j < m; j++ )
- s += uT[j]*b[j*ldb];
- }
- }
- else
- s = uT[0];
- s *= wi;
-
- if( ldx == 1 )
- {
- for( j = 0; j <= n - 4; j += 4 )
- {
- double t0 = x[j] + s*vT[j];
- double t1 = x[j+1] + s*vT[j+1];
- x[j] = (float)t0;
- x[j+1] = (float)t1;
- t0 = x[j+2] + s*vT[j+2];
- t1 = x[j+3] + s*vT[j+3];
- x[j+2] = (float)t0;
- x[j+3] = (float)t1;
- }
-
- for( ; j < n; j++ )
- x[j] = (float)(x[j] + s*vT[j]);
- }
- else
- {
- for( j = 0; j < n; j++ )
- x[j*ldx] = (float)(x[j*ldx] + s*vT[j]);
- }
- }
- else
- {
- if( b )
- {
- memset( buffer, 0, nb*sizeof(buffer[0]));
- icvMatrAXPY_32f( m, nb, b, ldb, uT, buffer, 0 );
- for( j = 0; j < nb; j++ )
- buffer[j] = (float)(buffer[j]*wi);
- }
- else
- {
- for( j = 0; j < nb; j++ )
- buffer[j] = (float)(uT[j]*wi);
- }
- icvMatrAXPY_32f( n, nb, buffer, 0, vT, x, ldx );
- }
- }
- }
-}
-
-
-CV_IMPL void
-cvSVD( CvArr* aarr, CvArr* warr, CvArr* uarr, CvArr* varr, int flags )
-{
- uchar* buffer = 0;
- int local_alloc = 0;
-
- CV_FUNCNAME( "cvSVD" );
-
- __BEGIN__;
-
- CvMat astub, *a = (CvMat*)aarr;
- CvMat wstub, *w = (CvMat*)warr;
- CvMat ustub, *u;
- CvMat vstub, *v;
- CvMat tmat;
- uchar* tw = 0;
- int type;
- int a_buf_offset = 0, u_buf_offset = 0, buf_size, pix_size;
- int temp_u = 0, /* temporary storage for U is needed */
- t_svd; /* special case: a->rows < a->cols */
- int m, n;
- int w_rows, w_cols;
- int u_rows = 0, u_cols = 0;
- int w_is_mat = 0;
-
- if( !CV_IS_MAT( a ))
- CV_CALL( a = cvGetMat( a, &astub ));
-
- if( !CV_IS_MAT( w ))
- CV_CALL( w = cvGetMat( w, &wstub ));
-
- if( !CV_ARE_TYPES_EQ( a, w ))
- CV_ERROR( CV_StsUnmatchedFormats, "" );
-
- if( a->rows >= a->cols )
- {
- m = a->rows;
- n = a->cols;
- w_rows = w->rows;
- w_cols = w->cols;
- t_svd = 0;
- }
- else
- {
- CvArr* t;
- CV_SWAP( uarr, varr, t );
-
- flags = (flags & CV_SVD_U_T ? CV_SVD_V_T : 0)|
- (flags & CV_SVD_V_T ? CV_SVD_U_T : 0);
- m = a->cols;
- n = a->rows;
- w_rows = w->cols;
- w_cols = w->rows;
- t_svd = 1;
- }
-
- u = (CvMat*)uarr;
- v = (CvMat*)varr;
-
- w_is_mat = w_cols > 1 && w_rows > 1;
-
- if( !w_is_mat && CV_IS_MAT_CONT(w->type) && w_cols + w_rows - 1 == n )
- tw = w->data.ptr;
-
- if( u )
- {
- if( !CV_IS_MAT( u ))
- CV_CALL( u = cvGetMat( u, &ustub ));
-
- if( !(flags & CV_SVD_U_T) )
- {
- u_rows = u->rows;
- u_cols = u->cols;
- }
- else
- {
- u_rows = u->cols;
- u_cols = u->rows;
- }
-
- if( !CV_ARE_TYPES_EQ( a, u ))
- CV_ERROR( CV_StsUnmatchedFormats, "" );
-
- if( u_rows != m || (u_cols != m && u_cols != n))
- CV_ERROR( CV_StsUnmatchedSizes, !t_svd ? "U matrix has unappropriate size" :
- "V matrix has unappropriate size" );
-
- temp_u = (u_rows != u_cols && !(flags & CV_SVD_U_T)) || u->data.ptr==a->data.ptr;
-
- if( w_is_mat && u_cols != w_rows )
- CV_ERROR( CV_StsUnmatchedSizes, !t_svd ? "U and W have incompatible sizes" :
- "V and W have incompatible sizes" );
- }
- else
- {
- u = &ustub;
- u->data.ptr = 0;
- u->step = 0;
- }
-
- if( v )
- {
- int v_rows, v_cols;
-
- if( !CV_IS_MAT( v ))
- CV_CALL( v = cvGetMat( v, &vstub ));
-
- if( !(flags & CV_SVD_V_T) )
- {
- v_rows = v->rows;
- v_cols = v->cols;
- }
- else
- {
- v_rows = v->cols;
- v_cols = v->rows;
- }
-
- if( !CV_ARE_TYPES_EQ( a, v ))
- CV_ERROR( CV_StsUnmatchedFormats, "" );
-
- if( v_rows != n || v_cols != n )
- CV_ERROR( CV_StsUnmatchedSizes, t_svd ? "U matrix has unappropriate size" :
- "V matrix has unappropriate size" );
-
- if( w_is_mat && w_cols != v_cols )
- CV_ERROR( CV_StsUnmatchedSizes, t_svd ? "U and W have incompatible sizes" :
- "V and W have incompatible sizes" );
- }
- else
- {
- v = &vstub;
- v->data.ptr = 0;
- v->step = 0;
- }
-
- type = CV_MAT_TYPE( a->type );
- pix_size = CV_ELEM_SIZE(type);
- buf_size = n*2 + m;
-
- if( !(flags & CV_SVD_MODIFY_A) )
- {
- a_buf_offset = buf_size;
- buf_size += a->rows*a->cols;
- }
-
- if( temp_u )
- {
- u_buf_offset = buf_size;
- buf_size += u->rows*u->cols;
- }
-
- buf_size *= pix_size;
-
- if( buf_size <= CV_MAX_LOCAL_SIZE )
- {
- buffer = (uchar*)cvStackAlloc( buf_size );
- local_alloc = 1;
- }
- else
- {
- CV_CALL( buffer = (uchar*)cvAlloc( buf_size ));
- }
-
- if( !(flags & CV_SVD_MODIFY_A) )
- {
- cvInitMatHeader( &tmat, m, n, type,
- buffer + a_buf_offset*pix_size );
- if( !t_svd )
- cvCopy( a, &tmat );
- else
- cvT( a, &tmat );
- a = &tmat;
- }
-
- if( temp_u )
- {
- cvInitMatHeader( &ustub, u_cols, u_rows, type, buffer + u_buf_offset*pix_size );
- u = &ustub;
- }
-
- if( !tw )
- tw = buffer + (n + m)*pix_size;
-
- if( type == CV_32FC1 )
- {
- icvSVD_32f( a->data.fl, a->step/sizeof(float), a->rows, a->cols,
- (float*)tw, u->data.fl, u->step/sizeof(float), u_cols,
- v->data.fl, v->step/sizeof(float), (float*)buffer );
- }
- else if( type == CV_64FC1 )
- {
- icvSVD_64f( a->data.db, a->step/sizeof(double), a->rows, a->cols,
- (double*)tw, u->data.db, u->step/sizeof(double), u_cols,
- v->data.db, v->step/sizeof(double), (double*)buffer );
- }
- else
- {
- CV_ERROR( CV_StsUnsupportedFormat, "" );
- }
-
- if( tw != w->data.ptr )
- {
- int shift = w->cols != 1;
- cvSetZero( w );
- if( type == CV_32FC1 )
- for( int i = 0; i < n; i++ )
- ((float*)(w->data.ptr + i*w->step))[i*shift] = ((float*)tw)[i];
- else
- for( int i = 0; i < n; i++ )
- ((double*)(w->data.ptr + i*w->step))[i*shift] = ((double*)tw)[i];
- }
-
- if( uarr )
- {
- if( !(flags & CV_SVD_U_T))
- cvT( u, uarr );
- else if( temp_u )
- cvCopy( u, uarr );
- /*CV_CHECK_NANS( uarr );*/
- }
-
- if( varr )
- {
- if( !(flags & CV_SVD_V_T))
- cvT( v, varr );
- /*CV_CHECK_NANS( varr );*/
- }
-
- CV_CHECK_NANS( w );
-
- __END__;
-
- if( buffer && !local_alloc )
- cvFree( &buffer );
-}
-
-
-CV_IMPL void
-cvSVBkSb( const CvArr* warr, const CvArr* uarr,
- const CvArr* varr, const CvArr* barr,
- CvArr* xarr, int flags )
-{
- uchar* buffer = 0;
- int local_alloc = 0;
-
- CV_FUNCNAME( "cvSVBkSb" );
-
- __BEGIN__;
-
- CvMat wstub, *w = (CvMat*)warr;
- CvMat bstub, *b = (CvMat*)barr;
- CvMat xstub, *x = (CvMat*)xarr;
- CvMat ustub, ustub2, *u = (CvMat*)uarr;
- CvMat vstub, vstub2, *v = (CvMat*)varr;
- uchar* tw = 0;
- int type;
- int temp_u = 0, temp_v = 0;
- int u_buf_offset = 0, v_buf_offset = 0, w_buf_offset = 0, t_buf_offset = 0;
- int buf_size = 0, pix_size;
- int m, n, nm;
- int u_rows, u_cols;
- int v_rows, v_cols;
-
- if( !CV_IS_MAT( w ))
- CV_CALL( w = cvGetMat( w, &wstub ));
-
- if( !CV_IS_MAT( u ))
- CV_CALL( u = cvGetMat( u, &ustub ));
-
- if( !CV_IS_MAT( v ))
- CV_CALL( v = cvGetMat( v, &vstub ));
-
- if( !CV_IS_MAT( x ))
- CV_CALL( x = cvGetMat( x, &xstub ));
-
- if( !CV_ARE_TYPES_EQ( w, u ) || !CV_ARE_TYPES_EQ( w, v ) || !CV_ARE_TYPES_EQ( w, x ))
- CV_ERROR( CV_StsUnmatchedFormats, "All matrices must have the same type" );
-
- type = CV_MAT_TYPE( w->type );
- pix_size = CV_ELEM_SIZE(type);
-
- if( !(flags & CV_SVD_U_T) )
- {
- temp_u = 1;
- u_buf_offset = buf_size;
- buf_size += u->cols*u->rows*pix_size;
- u_rows = u->rows;
- u_cols = u->cols;
- }
- else
- {
- u_rows = u->cols;
- u_cols = u->rows;
- }
-
- if( !(flags & CV_SVD_V_T) )
- {
- temp_v = 1;
- v_buf_offset = buf_size;
- buf_size += v->cols*v->rows*pix_size;
- v_rows = v->rows;
- v_cols = v->cols;
- }
- else
- {
- v_rows = v->cols;
- v_cols = v->rows;
- }
-
- m = u_rows;
- n = v_rows;
- nm = MIN(n,m);
-
- if( (u_rows != u_cols && v_rows != v_cols) || x->rows != v_rows )
- CV_ERROR( CV_StsBadSize, "V or U matrix must be square" );
-
- if( (w->rows == 1 || w->cols == 1) && w->rows + w->cols - 1 == nm )
- {
- if( CV_IS_MAT_CONT(w->type) )
- tw = w->data.ptr;
- else
- {
- w_buf_offset = buf_size;
- buf_size += nm*pix_size;
- }
- }
- else
- {
- if( w->cols != v_cols || w->rows != u_cols )
- CV_ERROR( CV_StsBadSize, "W must be 1d array of MIN(m,n) elements or "
- "matrix which size matches to U and V" );
- w_buf_offset = buf_size;
- buf_size += nm*pix_size;
- }
-
- if( b )
- {
- if( !CV_IS_MAT( b ))
- CV_CALL( b = cvGetMat( b, &bstub ));
- if( !CV_ARE_TYPES_EQ( w, b ))
- CV_ERROR( CV_StsUnmatchedFormats, "All matrices must have the same type" );
- if( b->cols != x->cols || b->rows != m )
- CV_ERROR( CV_StsUnmatchedSizes, "b matrix must have (m x x->cols) size" );
- }
- else
- {
- b = &bstub;
- memset( b, 0, sizeof(*b));
- }
-
- t_buf_offset = buf_size;
- buf_size += (MAX(m,n) + b->cols)*pix_size;
-
- if( buf_size <= CV_MAX_LOCAL_SIZE )
- {
- buffer = (uchar*)cvStackAlloc( buf_size );
- local_alloc = 1;
- }
- else
- CV_CALL( buffer = (uchar*)cvAlloc( buf_size ));
-
- if( temp_u )
- {
- cvInitMatHeader( &ustub2, u_cols, u_rows, type, buffer + u_buf_offset );
- cvT( u, &ustub2 );
- u = &ustub2;
- }
-
- if( temp_v )
- {
- cvInitMatHeader( &vstub2, v_cols, v_rows, type, buffer + v_buf_offset );
- cvT( v, &vstub2 );
- v = &vstub2;
- }
-
- if( !tw )
- {
- int i, shift = w->cols > 1 ? pix_size : 0;
- tw = buffer + w_buf_offset;
- for( i = 0; i < nm; i++ )
- memcpy( tw + i*pix_size, w->data.ptr + i*(w->step + shift), pix_size );
- }
-
- if( type == CV_32FC1 )
- {
- icvSVBkSb_32f( m, n, (float*)tw, u->data.fl, u->step/sizeof(float),
- v->data.fl, v->step/sizeof(float),
- b->data.fl, b->step/sizeof(float), b->cols,
- x->data.fl, x->step/sizeof(float),
- (float*)(buffer + t_buf_offset) );
- }
- else if( type == CV_64FC1 )
- {
- icvSVBkSb_64f( m, n, (double*)tw, u->data.db, u->step/sizeof(double),
- v->data.db, v->step/sizeof(double),
- b->data.db, b->step/sizeof(double), b->cols,
- x->data.db, x->step/sizeof(double),
- (double*)(buffer + t_buf_offset) );
- }
- else
- {
- CV_ERROR( CV_StsUnsupportedFormat, "" );
- }
-
- __END__;
-
- if( buffer && !local_alloc )
- cvFree( &buffer );
-}
-
-/* End of file. */
+/*M///////////////////////////////////////////////////////////////////////////////////////\r
+//\r
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
+//\r
+// By downloading, copying, installing or using the software you agree to this license.\r
+// If you do not agree to this license, do not download, install,\r
+// copy or use the software.\r
+//\r
+//\r
+// Intel License Agreement\r
+// For Open Source Computer Vision Library\r
+//\r
+// Copyright (C) 2000, Intel Corporation, all rights reserved.\r
+// Third party copyrights are property of their respective owners.\r
+//\r
+// Redistribution and use in source and binary forms, with or without modification,\r
+// are permitted provided that the following conditions are met:\r
+//\r
+// * Redistribution's of source code must retain the above copyright notice,\r
+// this list of conditions and the following disclaimer.\r
+//\r
+// * Redistribution's in binary form must reproduce the above copyright notice,\r
+// this list of conditions and the following disclaimer in the documentation\r
+// and/or other materials provided with the distribution.\r
+//\r
+// * The name of Intel Corporation may not be used to endorse or promote products\r
+// derived from this software without specific prior written permission.\r
+//\r
+// This software is provided by the copyright holders and contributors "as is" and\r
+// any express or implied warranties, including, but not limited to, the implied\r
+// warranties of merchantability and fitness for a particular purpose are disclaimed.\r
+// In no event shall the Intel Corporation or contributors be liable for any direct,\r
+// indirect, incidental, special, exemplary, or consequential damages\r
+// (including, but not limited to, procurement of substitute goods or services;\r
+// loss of use, data, or profits; or business interruption) however caused\r
+// and on any theory of liability, whether in contract, strict liability,\r
+// or tort (including negligence or otherwise) arising in any way out of\r
+// the use of this software, even if advised of the possibility of such damage.\r
+//\r
+//M*/\r
+\r
+#include "_cxcore.h"\r
+#include "clapack.h"\r
+#include <float.h>\r
+\r
+/* y[0:m,0:n] += diag(a[0:1,0:m]) * x[0:m,0:n] */\r
+static void\r
+icvMatrAXPY_64f( int m, int n, const double* x, int dx,\r
+ const double* a, double* y, int dy )\r
+{\r
+ int i, j;\r
+\r
+ for( i = 0; i < m; i++, x += dx, y += dy )\r
+ {\r
+ double s = a[i];\r
+\r
+ for( j = 0; j <= n - 4; j += 4 )\r
+ {\r
+ double t0 = y[j] + s*x[j];\r
+ double t1 = y[j+1] + s*x[j+1];\r
+ y[j] = t0;\r
+ y[j+1] = t1;\r
+ t0 = y[j+2] + s*x[j+2];\r
+ t1 = y[j+3] + s*x[j+3];\r
+ y[j+2] = t0;\r
+ y[j+3] = t1;\r
+ }\r
+\r
+ for( ; j < n; j++ ) y[j] += s*x[j];\r
+ }\r
+}\r
+\r
+static void\r
+icvMatrAXPY_32f( int m, int n, const float* x, int dx,\r
+ const float* a, float* y, int dy )\r
+{\r
+ int i, j;\r
+\r
+ for( i = 0; i < m; i++, x += dx, y += dy )\r
+ {\r
+ double s = a[i];\r
+\r
+ for( j = 0; j <= n - 4; j += 4 )\r
+ {\r
+ double t0 = y[j] + s*x[j];\r
+ double t1 = y[j+1] + s*x[j+1];\r
+ y[j] = (float)t0;\r
+ y[j+1] = (float)t1;\r
+ t0 = y[j+2] + s*x[j+2];\r
+ t1 = y[j+3] + s*x[j+3];\r
+ y[j+2] = (float)t0;\r
+ y[j+3] = (float)t1;\r
+ }\r
+\r
+ for( ; j < n; j++ )\r
+ y[j] = (float)(y[j] + s*x[j]);\r
+ }\r
+}\r
+\r
+\r
+static void\r
+icvSVBkSb_64f( int m, int n, const double* w,\r
+ const double* uT, int lduT,\r
+ const double* vT, int ldvT,\r
+ const double* b, int ldb, int nb,\r
+ double* x, int ldx, double* buffer )\r
+{\r
+ double threshold = w[0]*DBL_EPSILON;\r
+ int i, j, nm = MIN( m, n );\r
+\r
+ if( !b )\r
+ nb = m;\r
+\r
+ for( i = 0; i < n; i++ )\r
+ memset( x + i*ldx, 0, nb*sizeof(x[0]));\r
+\r
+ /*for( i = 0; i < nm; i++ )\r
+ threshold += w[i];\r
+ threshold *= 2*DBL_EPSILON;*/\r
+\r
+ /* vT * inv(w) * uT * b */\r
+ for( i = 0; i < nm; i++, uT += lduT, vT += ldvT )\r
+ {\r
+ double wi = w[i];\r
+\r
+ if( wi > threshold )\r
+ {\r
+ wi = 1./wi;\r
+\r
+ if( nb == 1 )\r
+ {\r
+ double s = 0;\r
+ if( b )\r
+ {\r
+ if( ldb == 1 )\r
+ {\r
+ for( j = 0; j <= m - 4; j += 4 )\r
+ s += uT[j]*b[j] + uT[j+1]*b[j+1] + uT[j+2]*b[j+2] + uT[j+3]*b[j+3];\r
+ for( ; j < m; j++ )\r
+ s += uT[j]*b[j];\r
+ }\r
+ else\r
+ {\r
+ for( j = 0; j < m; j++ )\r
+ s += uT[j]*b[j*ldb];\r
+ }\r
+ }\r
+ else\r
+ s = uT[0];\r
+ s *= wi;\r
+ if( ldx == 1 )\r
+ {\r
+ for( j = 0; j <= n - 4; j += 4 )\r
+ {\r
+ double t0 = x[j] + s*vT[j];\r
+ double t1 = x[j+1] + s*vT[j+1];\r
+ x[j] = t0;\r
+ x[j+1] = t1;\r
+ t0 = x[j+2] + s*vT[j+2];\r
+ t1 = x[j+3] + s*vT[j+3];\r
+ x[j+2] = t0;\r
+ x[j+3] = t1;\r
+ }\r
+\r
+ for( ; j < n; j++ )\r
+ x[j] += s*vT[j];\r
+ }\r
+ else\r
+ {\r
+ for( j = 0; j < n; j++ )\r
+ x[j*ldx] += s*vT[j];\r
+ }\r
+ }\r
+ else\r
+ {\r
+ if( b )\r
+ {\r
+ memset( buffer, 0, nb*sizeof(buffer[0]));\r
+ icvMatrAXPY_64f( m, nb, b, ldb, uT, buffer, 0 );\r
+ for( j = 0; j < nb; j++ )\r
+ buffer[j] *= wi;\r
+ }\r
+ else\r
+ {\r
+ for( j = 0; j < nb; j++ )\r
+ buffer[j] = uT[j]*wi;\r
+ }\r
+ icvMatrAXPY_64f( n, nb, buffer, 0, vT, x, ldx );\r
+ }\r
+ }\r
+ }\r
+}\r
+\r
+\r
+static void\r
+icvSVBkSb_32f( int m, int n, const float* w,\r
+ const float* uT, int lduT,\r
+ const float* vT, int ldvT,\r
+ const float* b, int ldb, int nb,\r
+ float* x, int ldx, float* buffer )\r
+{\r
+ float threshold = w[0]*FLT_EPSILON;\r
+ int i, j, nm = MIN( m, n );\r
+\r
+ if( !b )\r
+ nb = m;\r
+\r
+ for( i = 0; i < n; i++ )\r
+ memset( x + i*ldx, 0, nb*sizeof(x[0]));\r
+\r
+ /*for( i = 0; i < nm; i++ )\r
+ threshold += w[i];\r
+ threshold *= 2*FLT_EPSILON;*/\r
+\r
+ /* vT * inv(w) * uT * b */\r
+ for( i = 0; i < nm; i++, uT += lduT, vT += ldvT )\r
+ {\r
+ double wi = w[i];\r
+ \r
+ if( wi > threshold )\r
+ {\r
+ wi = 1./wi;\r
+\r
+ if( nb == 1 )\r
+ {\r
+ double s = 0;\r
+ if( b )\r
+ {\r
+ if( ldb == 1 )\r
+ {\r
+ for( j = 0; j <= m - 4; j += 4 )\r
+ s += uT[j]*b[j] + uT[j+1]*b[j+1] + uT[j+2]*b[j+2] + uT[j+3]*b[j+3];\r
+ for( ; j < m; j++ )\r
+ s += uT[j]*b[j];\r
+ }\r
+ else\r
+ {\r
+ for( j = 0; j < m; j++ )\r
+ s += uT[j]*b[j*ldb];\r
+ }\r
+ }\r
+ else\r
+ s = uT[0];\r
+ s *= wi;\r
+\r
+ if( ldx == 1 )\r
+ {\r
+ for( j = 0; j <= n - 4; j += 4 )\r
+ {\r
+ double t0 = x[j] + s*vT[j];\r
+ double t1 = x[j+1] + s*vT[j+1];\r
+ x[j] = (float)t0;\r
+ x[j+1] = (float)t1;\r
+ t0 = x[j+2] + s*vT[j+2];\r
+ t1 = x[j+3] + s*vT[j+3];\r
+ x[j+2] = (float)t0;\r
+ x[j+3] = (float)t1;\r
+ }\r
+\r
+ for( ; j < n; j++ )\r
+ x[j] = (float)(x[j] + s*vT[j]);\r
+ }\r
+ else\r
+ {\r
+ for( j = 0; j < n; j++ )\r
+ x[j*ldx] = (float)(x[j*ldx] + s*vT[j]);\r
+ }\r
+ }\r
+ else\r
+ {\r
+ if( b )\r
+ {\r
+ memset( buffer, 0, nb*sizeof(buffer[0]));\r
+ icvMatrAXPY_32f( m, nb, b, ldb, uT, buffer, 0 );\r
+ for( j = 0; j < nb; j++ )\r
+ buffer[j] = (float)(buffer[j]*wi);\r
+ }\r
+ else\r
+ {\r
+ for( j = 0; j < nb; j++ )\r
+ buffer[j] = (float)(uT[j]*wi);\r
+ }\r
+ icvMatrAXPY_32f( n, nb, buffer, 0, vT, x, ldx );\r
+ }\r
+ }\r
+ }\r
+}\r
+\r
+\r
+CV_IMPL void\r
+cvSVD( CvArr* aarr, CvArr* warr, CvArr* uarr, CvArr* varr, int flags )\r
+{\r
+ uchar* buffer = 0;\r
+ int local_alloc = 0;\r
+\r
+ CV_FUNCNAME( "cvSVD" );\r
+\r
+ __BEGIN__;\r
+\r
+ CvMat astub0, astub, *a0 = (CvMat*)aarr, *a = a0;\r
+ CvMat wstub0, wstub, *w0 = (CvMat*)warr, *w = w0;\r
+ CvMat ustub0, ustub, *u0 = (CvMat*)uarr, *u = u0;\r
+ CvMat vstub0, vstub, *v0 = (CvMat*)varr, *v = v0;\r
+\r
+ integer m, n;\r
+ int nm, type, elem_size, w_rows, w_cols, w_is_mat = 0, u_rows = 0, u_cols = 0, v_rows = 0, v_cols = 0;\r
+ int a_ofs = 0, u_ofs = 0, v_ofs = 0, work_ofs=0, iwork_ofs=0, buf_size = 0;\r
+ int temp_a = 0, temp_u = 0, temp_v = 0, temp_w = 1;\r
+ double u1=0, v1=0, work1=0;\r
+ float uf1=0, vf1=0, workf1=0;\r
+ integer lda, ldu, ldv, lwork=-1, iwork1=0, info=0, *iwork=0;\r
+ char mode[] = {u || v ? 'S' : 'N', '\0'};\r
+\r
+ if( !CV_IS_MAT( a ))\r
+ CV_CALL( a0 = a = cvGetMat( a, &astub0 ));\r
+\r
+ if( !CV_IS_MAT( w ))\r
+ CV_CALL( w0 = w = cvGetMat( w, &wstub0 ));\r
+\r
+ if( !CV_ARE_TYPES_EQ( a, w ))\r
+ CV_ERROR( CV_StsUnmatchedFormats, "" );\r
+\r
+ m = a->rows;\r
+ n = a->cols;\r
+ nm = MIN(m, n);\r
+ type = CV_MAT_TYPE(a->type);\r
+ elem_size = CV_ELEM_SIZE(type);\r
+ w_rows = w->rows;\r
+ w_cols = w->cols;\r
+ w_is_mat = w_cols > 1 && w_rows > 1;\r
+\r
+ if( u )\r
+ {\r
+ if( !CV_IS_MAT( u ))\r
+ CV_CALL( u0 = u = cvGetMat( u, &ustub0 ));\r
+\r
+ if( !CV_ARE_TYPES_EQ( a, u ))\r
+ CV_ERROR( CV_StsUnmatchedFormats, "" );\r
+ }\r
+\r
+ if( v )\r
+ {\r
+ if( !CV_IS_MAT( v ))\r
+ CV_CALL( v0 = v = cvGetMat( v, &vstub0 ));\r
+\r
+ if( !CV_ARE_TYPES_EQ( a, v ))\r
+ CV_ERROR( CV_StsUnmatchedFormats, "" );\r
+ }\r
+\r
+ if( m != n &&\r
+ ((u && u->rows == u->cols && u->rows == MAX(m,n)) ||\r
+ (v && v->rows == v->cols && v->rows == MAX(m,n))))\r
+ mode[0] = 'A';\r
+\r
+ u_rows = m;\r
+ u_cols = mode[0] == 'A' ? u_rows : nm;\r
+ v_cols = n;\r
+ v_rows = mode[0] == 'A' ? v_cols : nm;\r
+\r
+ if( !w_is_mat && CV_IS_MAT_CONT(w->type) && w_cols + w_rows - 1 == nm )\r
+ temp_w = 0;\r
+\r
+ if( u || v )\r
+ {\r
+ temp_v = temp_u = 1;\r
+ if( v && v->rows == v_rows && v->cols == v_cols )\r
+ temp_v = 0;\r
+ else if( (flags & CV_SVD_MODIFY_A) && mode[0] != 'A' &&\r
+ a->rows == v_rows && a->cols == v_cols )\r
+ {\r
+ mode[0] = 'O';\r
+ temp_v = 0;\r
+ }\r
+\r
+ if( u && u->rows == u_rows && u->cols == u_cols )\r
+ temp_u = 0;\r
+ else if( (flags & CV_SVD_MODIFY_A) && mode[0] != 'A' && mode[0] != 'O' &&\r
+ a->rows == u_rows && a->cols == u_cols )\r
+ {\r
+ mode[0] = 'O';\r
+ temp_u = 0;\r
+ }\r
+ }\r
+\r
+ if( !(flags & CV_SVD_MODIFY_A) )\r
+ {\r
+ if( mode[0] == 'N' || mode[0] == 'A' )\r
+ temp_a = 1;\r
+ else if( ((v && CV_ARE_SIZES_EQ(a, v)) || (temp_v && v_rows == a->rows && v_cols == a->cols) ||\r
+ (u && CV_ARE_SIZES_EQ(a, u)) || (temp_u && u_rows == a->rows && u_cols == a->cols)) &&\r
+ mode[0] == 'S' )\r
+ mode[0] = 'O';\r
+ }\r
+\r
+ lda = a->step/elem_size;\r
+ ldv = n;\r
+ ldu = m;\r
+\r
+ if( type == CV_32F )\r
+ {\r
+ sgesdd_(mode, &n, &m, a->data.fl, &lda, w->data.fl,\r
+ &vf1, &ldv, &uf1, &ldu, &workf1, &lwork, &iwork1, &info );\r
+ lwork = cvRound(workf1);\r
+ }\r
+ else\r
+ {\r
+ dgesdd_(mode, &n, &m, a->data.db, &lda, w->data.db,\r
+ &v1, &ldv, &u1, &ldu, &work1, &lwork, &iwork1, &info );\r
+ lwork = cvRound(work1);\r
+ }\r
+\r
+ assert(info == 0);\r
+ if( temp_w )\r
+ buf_size += nm*elem_size;\r
+ if( temp_a )\r
+ {\r
+ a_ofs = buf_size;\r
+ buf_size += n*m*elem_size;\r
+ }\r
+ if( temp_v )\r
+ {\r
+ v_ofs = buf_size;\r
+ buf_size += v_rows*v_cols*elem_size;\r
+ }\r
+ if( temp_u )\r
+ {\r
+ u_ofs = buf_size;\r
+ buf_size += u_rows*u_cols*elem_size;\r
+ }\r
+ work_ofs = buf_size;\r
+ buf_size += lwork*elem_size;\r
+ buf_size = cvAlign(buf_size, sizeof(iwork[0]));\r
+ iwork_ofs = buf_size;\r
+ buf_size += 8*nm*sizeof(integer);\r
+\r
+ if( buf_size <= CV_MAX_LOCAL_SIZE )\r
+ {\r
+ buffer = (uchar*)cvStackAlloc( buf_size );\r
+ local_alloc = 1;\r
+ }\r
+ else\r
+ {\r
+ CV_CALL( buffer = (uchar*)cvAlloc( buf_size ));\r
+ }\r
+\r
+ if( temp_w )\r
+ w = &(wstub = cvMat( 1, nm, type, buffer ));\r
+\r
+ if( temp_a )\r
+ {\r
+ a = &(astub = cvMat( a->rows, a->cols, type, buffer + a_ofs ));\r
+ cvCopy(a0, a);\r
+ }\r
+\r
+ if( temp_v )\r
+ v = &(vstub = cvMat( v_rows, v_cols, type, buffer + v_ofs ));\r
+\r
+ if( temp_u )\r
+ u = &(ustub = cvMat( u_rows, u_cols, type, buffer + u_ofs ));\r
+\r
+ if( !(flags & CV_SVD_MODIFY_A) && !temp_a )\r
+ {\r
+ if( v && CV_ARE_SIZES_EQ(a, v) )\r
+ {\r
+ cvCopy(a, v);\r
+ a = v;\r
+ }\r
+ else if( u && CV_ARE_SIZES_EQ(a, u) )\r
+ {\r
+ cvCopy(a, u);\r
+ a = u;\r
+ }\r
+ }\r
+\r
+ if( mode[0] != 'N' )\r
+ {\r
+ if( !v )\r
+ v = a;\r
+ else if( !u )\r
+ u = a;\r
+ assert( u && v );\r
+ ldv = v->step/elem_size;\r
+ ldu = u->step/elem_size;\r
+ }\r
+\r
+ lda = a->step/elem_size;\r
+ if( type == CV_32F )\r
+ {\r
+ sgesdd_(mode, &n, &m, a->data.fl, &lda, w->data.fl,\r
+ v ? v->data.fl : &vf1, &ldv, u ? u->data.fl : &uf1, &ldu,\r
+ (float*)(buffer + work_ofs), &lwork, (integer*)(buffer + iwork_ofs), &info );\r
+ }\r
+ else\r
+ {\r
+ dgesdd_(mode, &n, &m, a->data.db, &lda, w->data.db,\r
+ v ? v->data.db : &v1, &ldv, u ? u->data.db : &u1, &ldu,\r
+ (double*)(buffer + work_ofs), &lwork, (integer*)(buffer + iwork_ofs), &info );\r
+ }\r
+ assert(info == 0);\r
+\r
+ if( w != w0 )\r
+ {\r
+ int shift = w0->cols != 1;\r
+ cvSetZero( w0 );\r
+ if( type == CV_32FC1 )\r
+ for( int i = 0; i < n; i++ )\r
+ ((float*)(w->data.ptr + i*w->step))[i*shift] = w->data.fl[i];\r
+ else\r
+ for( int i = 0; i < n; i++ )\r
+ ((double*)(w->data.ptr + i*w->step))[i*shift] = w->data.db[i];\r
+ }\r
+\r
+ if( u0 )\r
+ {\r
+ if( flags & CV_SVD_U_T )\r
+ cvTranspose( u, u0 );\r
+ else if( u != u0 )\r
+ cvCopy( u, u0 );\r
+ }\r
+\r
+ if( v0 )\r
+ {\r
+ if( !(flags & CV_SVD_V_T) )\r
+ cvTranspose( v, v0 );\r
+ else if( v != v0 )\r
+ cvCopy( v, v0 );\r
+ }\r
+\r
+ CV_CHECK_NANS( w );\r
+\r
+ __END__;\r
+\r
+ if( buffer && !local_alloc )\r
+ cvFree( &buffer );\r
+}\r
+\r
+CV_IMPL void\r
+cvSVBkSb( const CvArr* warr, const CvArr* uarr,\r
+ const CvArr* varr, const CvArr* barr,\r
+ CvArr* xarr, int flags )\r
+{\r
+ uchar* buffer = 0;\r
+ int local_alloc = 0;\r
+\r
+ CV_FUNCNAME( "cvSVBkSb" );\r
+\r
+ __BEGIN__;\r
+\r
+ CvMat wstub, *w = (CvMat*)warr;\r
+ CvMat bstub, *b = (CvMat*)barr;\r
+ CvMat xstub, *x = (CvMat*)xarr;\r
+ CvMat ustub, ustub2, *u = (CvMat*)uarr;\r
+ CvMat vstub, vstub2, *v = (CvMat*)varr;\r
+ uchar* tw = 0;\r
+ int type;\r
+ int temp_u = 0, temp_v = 0;\r
+ int u_buf_offset = 0, v_buf_offset = 0, w_buf_offset = 0, t_buf_offset = 0;\r
+ int buf_size = 0, pix_size;\r
+ int m, n, nm;\r
+ int u_rows, u_cols;\r
+ int v_rows, v_cols;\r
+\r
+ if( !CV_IS_MAT( w ))\r
+ CV_CALL( w = cvGetMat( w, &wstub ));\r
+\r
+ if( !CV_IS_MAT( u ))\r
+ CV_CALL( u = cvGetMat( u, &ustub ));\r
+\r
+ if( !CV_IS_MAT( v ))\r
+ CV_CALL( v = cvGetMat( v, &vstub ));\r
+\r
+ if( !CV_IS_MAT( x ))\r
+ CV_CALL( x = cvGetMat( x, &xstub ));\r
+\r
+ if( !CV_ARE_TYPES_EQ( w, u ) || !CV_ARE_TYPES_EQ( w, v ) || !CV_ARE_TYPES_EQ( w, x ))\r
+ CV_ERROR( CV_StsUnmatchedFormats, "All matrices must have the same type" );\r
+\r
+ type = CV_MAT_TYPE( w->type );\r
+ pix_size = CV_ELEM_SIZE(type);\r
+\r
+ if( !(flags & CV_SVD_U_T) )\r
+ {\r
+ temp_u = 1;\r
+ u_buf_offset = buf_size;\r
+ buf_size += u->cols*u->rows*pix_size;\r
+ u_rows = u->rows;\r
+ u_cols = u->cols;\r
+ }\r
+ else\r
+ {\r
+ u_rows = u->cols;\r
+ u_cols = u->rows;\r
+ }\r
+\r
+ if( !(flags & CV_SVD_V_T) )\r
+ {\r
+ temp_v = 1;\r
+ v_buf_offset = buf_size;\r
+ buf_size += v->cols*v->rows*pix_size;\r
+ v_rows = v->rows;\r
+ v_cols = v->cols;\r
+ }\r
+ else\r
+ {\r
+ v_rows = v->cols;\r
+ v_cols = v->rows;\r
+ }\r
+\r
+ m = u_rows;\r
+ n = v_rows;\r
+ nm = MIN(n,m);\r
+\r
+ if( (u_rows != u_cols && v_rows != v_cols) || x->rows != v_rows )\r
+ CV_ERROR( CV_StsBadSize, "V or U matrix must be square" );\r
+\r
+ if( (w->rows == 1 || w->cols == 1) && w->rows + w->cols - 1 == nm )\r
+ {\r
+ if( CV_IS_MAT_CONT(w->type) )\r
+ tw = w->data.ptr;\r
+ else\r
+ {\r
+ w_buf_offset = buf_size;\r
+ buf_size += nm*pix_size;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ if( w->cols != v_cols || w->rows != u_cols )\r
+ CV_ERROR( CV_StsBadSize, "W must be 1d array of MIN(m,n) elements or "\r
+ "matrix which size matches to U and V" );\r
+ w_buf_offset = buf_size;\r
+ buf_size += nm*pix_size;\r
+ }\r
+\r
+ if( b )\r
+ {\r
+ if( !CV_IS_MAT( b ))\r
+ CV_CALL( b = cvGetMat( b, &bstub ));\r
+ if( !CV_ARE_TYPES_EQ( w, b ))\r
+ CV_ERROR( CV_StsUnmatchedFormats, "All matrices must have the same type" );\r
+ if( b->cols != x->cols || b->rows != m )\r
+ CV_ERROR( CV_StsUnmatchedSizes, "b matrix must have (m x x->cols) size" );\r
+ }\r
+ else\r
+ {\r
+ b = &bstub;\r
+ memset( b, 0, sizeof(*b));\r
+ }\r
+\r
+ t_buf_offset = buf_size;\r
+ buf_size += (MAX(m,n) + b->cols)*pix_size;\r
+\r
+ if( buf_size <= CV_MAX_LOCAL_SIZE )\r
+ {\r
+ buffer = (uchar*)cvStackAlloc( buf_size );\r
+ local_alloc = 1;\r
+ }\r
+ else\r
+ CV_CALL( buffer = (uchar*)cvAlloc( buf_size ));\r
+\r
+ if( temp_u )\r
+ {\r
+ cvInitMatHeader( &ustub2, u_cols, u_rows, type, buffer + u_buf_offset );\r
+ cvT( u, &ustub2 );\r
+ u = &ustub2;\r
+ }\r
+\r
+ if( temp_v )\r
+ {\r
+ cvInitMatHeader( &vstub2, v_cols, v_rows, type, buffer + v_buf_offset );\r
+ cvT( v, &vstub2 );\r
+ v = &vstub2;\r
+ }\r
+\r
+ if( !tw )\r
+ {\r
+ int i, shift = w->cols > 1 ? pix_size : 0;\r
+ tw = buffer + w_buf_offset;\r
+ for( i = 0; i < nm; i++ )\r
+ memcpy( tw + i*pix_size, w->data.ptr + i*(w->step + shift), pix_size );\r
+ }\r
+\r
+ if( type == CV_32FC1 )\r
+ {\r
+ icvSVBkSb_32f( m, n, (float*)tw, u->data.fl, u->step/sizeof(float),\r
+ v->data.fl, v->step/sizeof(float),\r
+ b->data.fl, b->step/sizeof(float), b->cols,\r
+ x->data.fl, x->step/sizeof(float),\r
+ (float*)(buffer + t_buf_offset) );\r
+ }\r
+ else if( type == CV_64FC1 )\r
+ {\r
+ icvSVBkSb_64f( m, n, (double*)tw, u->data.db, u->step/sizeof(double),\r
+ v->data.db, v->step/sizeof(double),\r
+ b->data.db, b->step/sizeof(double), b->cols,\r
+ x->data.db, x->step/sizeof(double),\r
+ (double*)(buffer + t_buf_offset) );\r
+ }\r
+ else\r
+ {\r
+ CV_ERROR( CV_StsUnsupportedFormat, "" );\r
+ }\r
+\r
+ __END__;\r
+\r
+ if( buffer && !local_alloc )\r
+ cvFree( &buffer );\r
+}\r
+\r
+/* End of file. */\r