1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
10 // Intel License Agreement
12 // Copyright (C) 2000, Intel Corporation, all rights reserved.
13 // Third party copyrights are property of their respective owners.
15 // Redistribution and use in source and binary forms, with or without modification,
16 // are permitted provided that the following conditions are met:
18 // * Redistribution's of source code must retain the above copyright notice,
19 // this list of conditions and the following disclaimer.
21 // * Redistribution's in binary form must reproduce the above copyright notice,
22 // this list of conditions and the following disclaimer in the documentation
23 // and/or other materials provided with the distribution.
25 // * The name of Intel Corporation may not be used to endorse or promote products
26 // derived from this software without specific prior written permission.
28 // This software is provided by the copyright holders and contributors "as is" and
29 // any express or implied warranties, including, but not limited to, the implied
30 // warranties of merchantability and fitness for a particular purpose are disclaimed.
31 // In no event shall the Intel Corporation or contributors be liable for any direct,
32 // indirect, incidental, special, exemplary, or consequential damages
33 // (including, but not limited to, procurement of substitute goods or services;
34 // loss of use, data, or profits; or business interruption) however caused
35 // and on any theory of liability, whether in contract, strict liability,
36 // or tort (including negligence or otherwise) arising in any way out of
37 // the use of this software, even if advised of the possibility of such damage.
46 static const float ord_nan = FLT_MAX*0.5f;
47 static const int min_block_size = 1 << 16;
48 static const int block_size_delta = 1 << 10;
50 CvDTreeTrainData::CvDTreeTrainData()
52 var_idx = var_type = cat_count = cat_ofs = cat_map =
53 priors = priors_mult = counts = buf = direction = split_buf = responses_copy = 0;
54 tree_storage = temp_storage = 0;
60 CvDTreeTrainData::CvDTreeTrainData( const CvMat* _train_data, int _tflag,
61 const CvMat* _responses, const CvMat* _var_idx,
62 const CvMat* _sample_idx, const CvMat* _var_type,
63 const CvMat* _missing_mask, const CvDTreeParams& _params,
64 bool _shared, bool _add_labels )
66 var_idx = var_type = cat_count = cat_ofs = cat_map =
67 priors = priors_mult = counts = buf = direction = split_buf = responses_copy = 0;
69 tree_storage = temp_storage = 0;
71 set_data( _train_data, _tflag, _responses, _var_idx, _sample_idx,
72 _var_type, _missing_mask, _params, _shared, _add_labels );
76 CvDTreeTrainData::~CvDTreeTrainData()
82 bool CvDTreeTrainData::set_params( const CvDTreeParams& _params )
86 CV_FUNCNAME( "CvDTreeTrainData::set_params" );
93 if( params.max_categories < 2 )
94 CV_ERROR( CV_StsOutOfRange, "params.max_categories should be >= 2" );
95 params.max_categories = MIN( params.max_categories, 15 );
97 if( params.max_depth < 0 )
98 CV_ERROR( CV_StsOutOfRange, "params.max_depth should be >= 0" );
99 params.max_depth = MIN( params.max_depth, 25 );
101 params.min_sample_count = MAX(params.min_sample_count,1);
103 if( params.cv_folds < 0 )
104 CV_ERROR( CV_StsOutOfRange,
105 "params.cv_folds should be =0 (the tree is not pruned) "
106 "or n>0 (tree is pruned using n-fold cross-validation)" );
108 if( params.cv_folds == 1 )
111 if( params.regression_accuracy < 0 )
112 CV_ERROR( CV_StsOutOfRange, "params.regression_accuracy should be >= 0" );
121 #define CV_CMP_NUM_PTR(a,b) (*(a) < *(b))
122 static CV_IMPLEMENT_QSORT_EX( icvSortIntPtr, int*, CV_CMP_NUM_PTR, int )
123 static CV_IMPLEMENT_QSORT_EX( icvSortDblPtr, double*, CV_CMP_NUM_PTR, int )
125 #define CV_CMP_NUM_IDX(i,j) (aux[i] < aux[j])
126 static CV_IMPLEMENT_QSORT_EX( icvSortIntAux, int, CV_CMP_NUM_IDX, const float* )
127 static CV_IMPLEMENT_QSORT_EX( icvSortUShAux, unsigned short, CV_CMP_NUM_IDX, const float* )
129 #define CV_CMP_PAIRS(a,b) (*((a).i) < *((b).i))
130 static CV_IMPLEMENT_QSORT_EX( icvSortPairs, CvPair16u32s, CV_CMP_PAIRS, int )
132 void CvDTreeTrainData::set_data( const CvMat* _train_data, int _tflag,
133 const CvMat* _responses, const CvMat* _var_idx, const CvMat* _sample_idx,
134 const CvMat* _var_type, const CvMat* _missing_mask, const CvDTreeParams& _params,
135 bool _shared, bool _add_labels, bool _update_data )
137 CvMat* sample_indices = 0;
138 CvMat* var_type0 = 0;
141 CvPair16u32s* pair16u32s_ptr = 0;
142 CvDTreeTrainData* data = 0;
145 unsigned short* udst = 0;
148 CV_FUNCNAME( "CvDTreeTrainData::set_data" );
152 int sample_all = 0, r_type = 0, cv_n;
153 int total_c_count = 0;
154 int tree_block_size, temp_block_size, max_split_size, nv_size, cv_size = 0;
155 int ds_step, dv_step, ms_step = 0, mv_step = 0; // {data|mask}{sample|var}_step
158 const int *sidx = 0, *vidx = 0;
160 if( _update_data && data_root )
162 data = new CvDTreeTrainData( _train_data, _tflag, _responses, _var_idx,
163 _sample_idx, _var_type, _missing_mask, _params, _shared, _add_labels );
165 // compare new and old train data
166 if( !(data->var_count == var_count &&
167 cvNorm( data->var_type, var_type, CV_C ) < FLT_EPSILON &&
168 cvNorm( data->cat_count, cat_count, CV_C ) < FLT_EPSILON &&
169 cvNorm( data->cat_map, cat_map, CV_C ) < FLT_EPSILON) )
170 CV_ERROR( CV_StsBadArg,
171 "The new training data must have the same types and the input and output variables "
172 "and the same categories for categorical variables" );
174 cvReleaseMat( &priors );
175 cvReleaseMat( &priors_mult );
176 cvReleaseMat( &buf );
177 cvReleaseMat( &direction );
178 cvReleaseMat( &split_buf );
179 cvReleaseMemStorage( &temp_storage );
181 priors = data->priors; data->priors = 0;
182 priors_mult = data->priors_mult; data->priors_mult = 0;
183 buf = data->buf; data->buf = 0;
184 buf_count = data->buf_count; buf_size = data->buf_size;
185 sample_count = data->sample_count;
187 direction = data->direction; data->direction = 0;
188 split_buf = data->split_buf; data->split_buf = 0;
189 temp_storage = data->temp_storage; data->temp_storage = 0;
190 nv_heap = data->nv_heap; cv_heap = data->cv_heap;
192 data_root = new_node( 0, sample_count, 0, 0 );
201 CV_CALL( set_params( _params ));
203 // check parameter types and sizes
204 CV_CALL( cvCheckTrainData( _train_data, _tflag, _missing_mask, &var_all, &sample_all ));
206 train_data = _train_data;
207 responses = _responses;
209 if( _tflag == CV_ROW_SAMPLE )
211 ds_step = _train_data->step/CV_ELEM_SIZE(_train_data->type);
214 ms_step = _missing_mask->step, mv_step = 1;
218 dv_step = _train_data->step/CV_ELEM_SIZE(_train_data->type);
221 mv_step = _missing_mask->step, ms_step = 1;
225 sample_count = sample_all;
230 CV_CALL( sample_indices = cvPreprocessIndexArray( _sample_idx, sample_all ));
231 sidx = sample_indices->data.i;
232 sample_count = sample_indices->rows + sample_indices->cols - 1;
237 CV_CALL( var_idx = cvPreprocessIndexArray( _var_idx, var_all ));
238 vidx = var_idx->data.i;
239 var_count = var_idx->rows + var_idx->cols - 1;
243 if ( sample_count < 65536 )
246 if( !CV_IS_MAT(_responses) ||
247 (CV_MAT_TYPE(_responses->type) != CV_32SC1 &&
248 CV_MAT_TYPE(_responses->type) != CV_32FC1) ||
249 (_responses->rows != 1 && _responses->cols != 1) ||
250 _responses->rows + _responses->cols - 1 != sample_all )
251 CV_ERROR( CV_StsBadArg, "The array of _responses must be an integer or "
252 "floating-point vector containing as many elements as "
253 "the total number of samples in the training data matrix" );
256 CV_CALL( var_type0 = cvPreprocessVarType( _var_type, var_idx, var_count, &r_type ));
258 CV_CALL( var_type = cvCreateMat( 1, var_count+2, CV_32SC1 ));
264 is_classifier = r_type == CV_VAR_CATEGORICAL;
266 // step 0. calc the number of categorical vars
267 for( vi = 0; vi < var_count; vi++ )
269 var_type->data.i[vi] = var_type0->data.ptr[vi] == CV_VAR_CATEGORICAL ?
270 cat_var_count++ : ord_var_count--;
273 ord_var_count = ~ord_var_count;
274 cv_n = params.cv_folds;
275 // set the two last elements of var_type array to be able
276 // to locate responses and cross-validation labels using
277 // the corresponding get_* functions.
278 var_type->data.i[var_count] = cat_var_count;
279 var_type->data.i[var_count+1] = cat_var_count+1;
281 // in case of single ordered predictor we need dummy cv_labels
282 // for safe split_node_data() operation
283 have_labels = cv_n > 0 || (ord_var_count == 1 && cat_var_count == 0) || _add_labels;
285 work_var_count = var_count + (is_classifier ? 1 : 0) + (have_labels ? 1 : 0);
286 buf_size = (work_var_count + 1)*sample_count;
288 buf_count = shared ? 2 : 1;
292 CV_CALL( buf = cvCreateMat( buf_count, buf_size, CV_16UC1 ));
293 CV_CALL( pair16u32s_ptr = (CvPair16u32s*)cvAlloc( sample_count*sizeof(pair16u32s_ptr[0]) ));
297 CV_CALL( buf = cvCreateMat( buf_count, buf_size, CV_32SC1 ));
298 CV_CALL( int_ptr = (int**)cvAlloc( sample_count*sizeof(int_ptr[0]) ));
301 size = is_classifier ? (cat_var_count+1) : cat_var_count;
302 size = !size ? 1 : size;
303 CV_CALL( cat_count = cvCreateMat( 1, size, CV_32SC1 ));
304 CV_CALL( cat_ofs = cvCreateMat( 1, size, CV_32SC1 ));
306 size = is_classifier ? (cat_var_count + 1)*params.max_categories : cat_var_count*params.max_categories;
307 size = !size ? 1 : size;
308 CV_CALL( cat_map = cvCreateMat( 1, size, CV_32SC1 ));
310 // now calculate the maximum size of split,
311 // create memory storage that will keep nodes and splits of the decision tree
312 // allocate root node and the buffer for the whole training data
313 max_split_size = cvAlign(sizeof(CvDTreeSplit) +
314 (MAX(0,sample_count - 33)/32)*sizeof(int),sizeof(void*));
315 tree_block_size = MAX((int)sizeof(CvDTreeNode)*8, max_split_size);
316 tree_block_size = MAX(tree_block_size + block_size_delta, min_block_size);
317 CV_CALL( tree_storage = cvCreateMemStorage( tree_block_size ));
318 CV_CALL( node_heap = cvCreateSet( 0, sizeof(*node_heap), sizeof(CvDTreeNode), tree_storage ));
320 nv_size = var_count*sizeof(int);
321 nv_size = cvAlign(MAX( nv_size, (int)sizeof(CvSetElem) ), sizeof(void*));
323 temp_block_size = nv_size;
327 if( sample_count < cv_n*MAX(params.min_sample_count,10) )
328 CV_ERROR( CV_StsOutOfRange,
329 "The many folds in cross-validation for such a small dataset" );
331 cv_size = cvAlign( cv_n*(sizeof(int) + sizeof(double)*2), sizeof(double) );
332 temp_block_size = MAX(temp_block_size, cv_size);
335 temp_block_size = MAX( temp_block_size + block_size_delta, min_block_size );
336 CV_CALL( temp_storage = cvCreateMemStorage( temp_block_size ));
337 CV_CALL( nv_heap = cvCreateSet( 0, sizeof(*nv_heap), nv_size, temp_storage ));
339 CV_CALL( cv_heap = cvCreateSet( 0, sizeof(*cv_heap), cv_size, temp_storage ));
341 CV_CALL( data_root = new_node( 0, sample_count, 0, 0 ));
348 _fdst = (float*)cvAlloc(sample_count*sizeof(_fdst[0]));
349 if (is_buf_16u && (cat_var_count || is_classifier))
350 _idst = (int*)cvAlloc(sample_count*sizeof(_idst[0]));
352 // transform the training data to convenient representation
353 for( vi = 0; vi <= var_count; vi++ )
356 const uchar* mask = 0;
357 int m_step = 0, step;
358 const int* idata = 0;
359 const float* fdata = 0;
362 if( vi < var_count ) // analyze i-th input variable
364 int vi0 = vidx ? vidx[vi] : vi;
365 ci = get_var_type(vi);
366 step = ds_step; m_step = ms_step;
367 if( CV_MAT_TYPE(_train_data->type) == CV_32SC1 )
368 idata = _train_data->data.i + vi0*dv_step;
370 fdata = _train_data->data.fl + vi0*dv_step;
372 mask = _missing_mask->data.ptr + vi0*mv_step;
374 else // analyze _responses
377 step = CV_IS_MAT_CONT(_responses->type) ?
378 1 : _responses->step / CV_ELEM_SIZE(_responses->type);
379 if( CV_MAT_TYPE(_responses->type) == CV_32SC1 )
380 idata = _responses->data.i;
382 fdata = _responses->data.fl;
385 if( (vi < var_count && ci>=0) ||
386 (vi == var_count && is_classifier) ) // process categorical variable or response
388 int c_count, prev_label;
392 udst = (unsigned short*)(buf->data.s + vi*sample_count);
394 idst = buf->data.i + vi*sample_count;
397 for( i = 0; i < sample_count; i++ )
399 int val = INT_MAX, si = sidx ? sidx[i] : i;
400 if( !mask || !mask[si*m_step] )
403 val = idata[si*step];
406 float t = fdata[si*step];
408 if( fabs(t - val) > FLT_EPSILON )
410 sprintf( err, "%d-th value of %d-th (categorical) "
411 "variable is not an integer", i, vi );
412 CV_ERROR( CV_StsBadArg, err );
418 sprintf( err, "%d-th value of %d-th (categorical) "
419 "variable is too large", i, vi );
420 CV_ERROR( CV_StsBadArg, err );
427 pair16u32s_ptr[i].u = udst + i;
428 pair16u32s_ptr[i].i = _idst + i;
433 int_ptr[i] = idst + i;
437 c_count = num_valid > 0;
440 icvSortPairs( pair16u32s_ptr, sample_count, 0 );
441 // count the categories
442 for( i = 1; i < num_valid; i++ )
443 if (*pair16u32s_ptr[i].i != *pair16u32s_ptr[i-1].i)
448 icvSortIntPtr( int_ptr, sample_count, 0 );
449 // count the categories
450 for( i = 1; i < num_valid; i++ )
451 c_count += *int_ptr[i] != *int_ptr[i-1];
455 max_c_count = MAX( max_c_count, c_count );
456 cat_count->data.i[ci] = c_count;
457 cat_ofs->data.i[ci] = total_c_count;
459 // resize cat_map, if need
460 if( cat_map->cols < total_c_count + c_count )
463 CV_CALL( cat_map = cvCreateMat( 1,
464 MAX(cat_map->cols*3/2,total_c_count+c_count), CV_32SC1 ));
465 for( i = 0; i < total_c_count; i++ )
466 cat_map->data.i[i] = tmp_map->data.i[i];
467 cvReleaseMat( &tmp_map );
470 c_map = cat_map->data.i + total_c_count;
471 total_c_count += c_count;
476 // compact the class indices and build the map
477 prev_label = ~*pair16u32s_ptr[0].i;
478 for( i = 0; i < num_valid; i++ )
480 int cur_label = *pair16u32s_ptr[i].i;
481 if( cur_label != prev_label )
482 c_map[++c_count] = prev_label = cur_label;
483 *pair16u32s_ptr[i].u = (unsigned short)c_count;
485 // replace labels for missing values with -1
486 for( ; i < sample_count; i++ )
487 *pair16u32s_ptr[i].u = 65535;
491 // compact the class indices and build the map
492 prev_label = ~*int_ptr[0];
493 for( i = 0; i < num_valid; i++ )
495 int cur_label = *int_ptr[i];
496 if( cur_label != prev_label )
497 c_map[++c_count] = prev_label = cur_label;
498 *int_ptr[i] = c_count;
500 // replace labels for missing values with -1
501 for( ; i < sample_count; i++ )
505 else if( ci < 0 ) // process ordered variable
508 udst = (unsigned short*)(buf->data.s + vi*sample_count);
510 idst = buf->data.i + vi*sample_count;
512 for( i = 0; i < sample_count; i++ )
515 int si = sidx ? sidx[i] : i;
516 if( !mask || !mask[si*m_step] )
519 val = (float)idata[si*step];
521 val = fdata[si*step];
523 if( fabs(val) >= ord_nan )
525 sprintf( err, "%d-th value of %d-th (ordered) "
526 "variable (=%g) is too large", i, vi, val );
527 CV_ERROR( CV_StsBadArg, err );
532 udst[i] = (unsigned short)i;
539 icvSortUShAux( udst, num_valid, _fdst);
541 icvSortIntAux( idst, /*or num_valid?\*/ sample_count, _fdst );
545 data_root->set_num_valid(vi, num_valid);
550 udst = (unsigned short*)(buf->data.s + work_var_count*sample_count);
552 idst = buf->data.i + work_var_count*sample_count;
554 for (i = 0; i < sample_count; i++)
557 udst[i] = sidx ? (unsigned short)sidx[i] : (unsigned short)i;
559 idst[i] = sidx ? sidx[i] : i;
564 unsigned short* udst = 0;
570 udst = (unsigned short*)(buf->data.s + (get_work_var_count()-1)*sample_count);
571 for( i = vi = 0; i < sample_count; i++ )
573 udst[i] = (unsigned short)vi++;
574 vi &= vi < cv_n ? -1 : 0;
577 for( i = 0; i < sample_count; i++ )
579 int a = cvRandInt(r) % sample_count;
580 int b = cvRandInt(r) % sample_count;
581 unsigned short unsh = (unsigned short)vi;
582 CV_SWAP( udst[a], udst[b], unsh );
587 idst = buf->data.i + (get_work_var_count()-1)*sample_count;
588 for( i = vi = 0; i < sample_count; i++ )
591 vi &= vi < cv_n ? -1 : 0;
594 for( i = 0; i < sample_count; i++ )
596 int a = cvRandInt(r) % sample_count;
597 int b = cvRandInt(r) % sample_count;
598 CV_SWAP( idst[a], idst[b], vi );
604 cat_map->cols = MAX( total_c_count, 1 );
606 max_split_size = cvAlign(sizeof(CvDTreeSplit) +
607 (MAX(0,max_c_count - 33)/32)*sizeof(int),sizeof(void*));
608 CV_CALL( split_heap = cvCreateSet( 0, sizeof(*split_heap), max_split_size, tree_storage ));
610 have_priors = is_classifier && params.priors;
613 int m = get_num_classes();
615 CV_CALL( priors = cvCreateMat( 1, m, CV_64F ));
616 for( i = 0; i < m; i++ )
618 double val = have_priors ? params.priors[i] : 1.;
620 CV_ERROR( CV_StsOutOfRange, "Every class weight should be positive" );
621 priors->data.db[i] = val;
627 cvScale( priors, priors, 1./sum );
629 CV_CALL( priors_mult = cvCloneMat( priors ));
630 CV_CALL( counts = cvCreateMat( 1, m, CV_32SC1 ));
634 CV_CALL( direction = cvCreateMat( 1, sample_count, CV_8UC1 ));
635 CV_CALL( split_buf = cvCreateMat( 1, sample_count, CV_32SC1 ));
638 int maxNumThreads = 1;
640 maxNumThreads = cv::getNumThreads();
642 pred_float_buf.resize(maxNumThreads);
643 pred_int_buf.resize(maxNumThreads);
644 resp_float_buf.resize(maxNumThreads);
645 resp_int_buf.resize(maxNumThreads);
646 cv_lables_buf.resize(maxNumThreads);
647 sample_idx_buf.resize(maxNumThreads);
648 for( int ti = 0; ti < maxNumThreads; ti++ )
650 pred_float_buf[ti].resize(sample_count);
651 pred_int_buf[ti].resize(sample_count);
652 resp_float_buf[ti].resize(sample_count);
653 resp_int_buf[ti].resize(sample_count);
654 cv_lables_buf[ti].resize(sample_count);
655 sample_idx_buf[ti].resize(sample_count);
669 cvReleaseMat( &var_type0 );
670 cvReleaseMat( &sample_indices );
671 cvReleaseMat( &tmp_map );
674 void CvDTreeTrainData::do_responses_copy()
676 responses_copy = cvCreateMat( responses->rows, responses->cols, responses->type );
677 cvCopy( responses, responses_copy);
678 responses = responses_copy;
681 CvDTreeNode* CvDTreeTrainData::subsample_data( const CvMat* _subsample_idx )
683 CvDTreeNode* root = 0;
684 CvMat* isubsample_idx = 0;
685 CvMat* subsample_co = 0;
687 CV_FUNCNAME( "CvDTreeTrainData::subsample_data" );
692 CV_ERROR( CV_StsError, "No training data has been set" );
695 CV_CALL( isubsample_idx = cvPreprocessIndexArray( _subsample_idx, sample_count ));
697 if( !isubsample_idx )
699 // make a copy of the root node
702 root = new_node( 0, 1, 0, 0 );
705 root->num_valid = temp.num_valid;
706 if( root->num_valid )
708 for( i = 0; i < var_count; i++ )
709 root->num_valid[i] = data_root->num_valid[i];
711 root->cv_Tn = temp.cv_Tn;
712 root->cv_node_risk = temp.cv_node_risk;
713 root->cv_node_error = temp.cv_node_error;
717 int* sidx = isubsample_idx->data.i;
718 // co - array of count/offset pairs (to handle duplicated values in _subsample_idx)
719 int* co, cur_ofs = 0;
721 int work_var_count = get_work_var_count();
722 int count = isubsample_idx->rows + isubsample_idx->cols - 1;
724 root = new_node( 0, count, 1, 0 );
726 CV_CALL( subsample_co = cvCreateMat( 1, sample_count*2, CV_32SC1 ));
727 cvZero( subsample_co );
728 co = subsample_co->data.i;
729 for( i = 0; i < count; i++ )
731 for( i = 0; i < sample_count; i++ )
742 for( vi = 0; vi < work_var_count; vi++ )
744 int ci = get_var_type(vi);
746 if( ci >= 0 || vi >= var_count )
748 int* src_buf = get_pred_int_buf();
752 get_cat_var_data( data_root, vi, src_buf, &src );
756 unsigned short* udst = (unsigned short*)(buf->data.s + root->buf_idx*buf->cols +
757 vi*sample_count + root->offset);
758 for( i = 0; i < count; i++ )
760 int val = src[sidx[i]];
761 udst[i] = (unsigned short)val;
762 num_valid += val >= 0;
767 int* idst = buf->data.i + root->buf_idx*buf->cols +
768 vi*sample_count + root->offset;
769 for( i = 0; i < count; i++ )
771 int val = src[sidx[i]];
773 num_valid += val >= 0;
778 root->set_num_valid(vi, num_valid);
782 int *src_idx_buf = get_pred_int_buf();
783 const int* src_idx = 0;
784 float *src_val_buf = get_pred_float_buf();
785 const float* src_val = 0;
786 int j = 0, idx, count_i;
787 int num_valid = data_root->get_num_valid(vi);
789 get_ord_var_data( data_root, vi, src_val_buf, src_idx_buf, &src_val, &src_idx );
792 unsigned short* udst_idx = (unsigned short*)(buf->data.s + root->buf_idx*buf->cols +
793 vi*sample_count + data_root->offset);
794 for( i = 0; i < num_valid; i++ )
799 for( cur_ofs = co[idx*2+1]; count_i > 0; count_i--, j++, cur_ofs++ )
800 udst_idx[j] = (unsigned short)cur_ofs;
803 root->set_num_valid(vi, j);
805 for( ; i < sample_count; i++ )
810 for( cur_ofs = co[idx*2+1]; count_i > 0; count_i--, j++, cur_ofs++ )
811 udst_idx[j] = (unsigned short)cur_ofs;
816 int* idst_idx = buf->data.i + root->buf_idx*buf->cols +
817 vi*sample_count + root->offset;
818 for( i = 0; i < num_valid; i++ )
823 for( cur_ofs = co[idx*2+1]; count_i > 0; count_i--, j++, cur_ofs++ )
824 idst_idx[j] = cur_ofs;
827 root->set_num_valid(vi, j);
829 for( ; i < sample_count; i++ )
834 for( cur_ofs = co[idx*2+1]; count_i > 0; count_i--, j++, cur_ofs++ )
835 idst_idx[j] = cur_ofs;
840 // sample indices subsampling
841 int* sample_idx_src_buf = get_sample_idx_buf();
842 const int* sample_idx_src = 0;
843 get_sample_indices(data_root, sample_idx_src_buf, &sample_idx_src);
846 unsigned short* sample_idx_dst = (unsigned short*)(buf->data.s + root->buf_idx*buf->cols +
847 get_work_var_count()*sample_count + root->offset);
848 for (i = 0; i < count; i++)
849 sample_idx_dst[i] = (unsigned short)sample_idx_src[sidx[i]];
853 int* sample_idx_dst = buf->data.i + root->buf_idx*buf->cols +
854 get_work_var_count()*sample_count + root->offset;
855 for (i = 0; i < count; i++)
856 sample_idx_dst[i] = sample_idx_src[sidx[i]];
862 cvReleaseMat( &isubsample_idx );
863 cvReleaseMat( &subsample_co );
869 void CvDTreeTrainData::get_vectors( const CvMat* _subsample_idx,
870 float* values, uchar* missing,
871 float* responses, bool get_class_idx )
873 CvMat* subsample_idx = 0;
874 CvMat* subsample_co = 0;
876 CV_FUNCNAME( "CvDTreeTrainData::get_vectors" );
880 int i, vi, total = sample_count, count = total, cur_ofs = 0;
886 CV_CALL( subsample_idx = cvPreprocessIndexArray( _subsample_idx, sample_count ));
887 sidx = subsample_idx->data.i;
888 CV_CALL( subsample_co = cvCreateMat( 1, sample_count*2, CV_32SC1 ));
889 co = subsample_co->data.i;
890 cvZero( subsample_co );
891 count = subsample_idx->cols + subsample_idx->rows - 1;
892 for( i = 0; i < count; i++ )
894 for( i = 0; i < total; i++ )
896 int count_i = co[i*2];
899 co[i*2+1] = cur_ofs*var_count;
906 memset( missing, 1, count*var_count );
908 for( vi = 0; vi < var_count; vi++ )
910 int ci = get_var_type(vi);
911 if( ci >= 0 ) // categorical
913 float* dst = values + vi;
914 uchar* m = missing ? missing + vi : 0;
915 int* src_buf = get_pred_int_buf();
917 get_cat_var_data(data_root, vi, src_buf, &src);
919 for( i = 0; i < count; i++, dst += var_count )
921 int idx = sidx ? sidx[i] : i;
926 *m = (!is_buf_16u && val < 0) || (is_buf_16u && (val == 65535));
933 float* dst = values + vi;
934 uchar* m = missing ? missing + vi : 0;
935 int count1 = data_root->get_num_valid(vi);
936 float *src_val_buf = get_pred_float_buf();
937 const float *src_val = 0;
938 int* src_idx_buf = get_pred_int_buf();
939 const int* src_idx = 0;
940 get_ord_var_data(data_root, vi, src_val_buf, src_idx_buf, &src_val, &src_idx);
942 for( i = 0; i < count1; i++ )
944 int idx = src_idx[i];
949 cur_ofs = co[idx*2+1];
952 cur_ofs = idx*var_count;
955 float val = src_val[i];
956 for( ; count_i > 0; count_i--, cur_ofs += var_count )
972 int* src_buf = get_resp_int_buf();
974 get_class_labels(data_root, src_buf, &src);
975 for( i = 0; i < count; i++ )
977 int idx = sidx ? sidx[i] : i;
978 int val = get_class_idx ? src[idx] :
979 cat_map->data.i[cat_ofs->data.i[cat_var_count]+src[idx]];
980 responses[i] = (float)val;
985 float *_values_buf = get_resp_float_buf();
986 const float* _values = 0;
987 get_ord_responses(data_root, _values_buf, &_values);
988 for( i = 0; i < count; i++ )
990 int idx = sidx ? sidx[i] : i;
991 responses[i] = _values[idx];
998 cvReleaseMat( &subsample_idx );
999 cvReleaseMat( &subsample_co );
1003 CvDTreeNode* CvDTreeTrainData::new_node( CvDTreeNode* parent, int count,
1004 int storage_idx, int offset )
1006 CvDTreeNode* node = (CvDTreeNode*)cvSetNew( node_heap );
1008 node->sample_count = count;
1009 node->depth = parent ? parent->depth + 1 : 0;
1010 node->parent = parent;
1011 node->left = node->right = 0;
1014 node->class_idx = 0;
1017 node->buf_idx = storage_idx;
1018 node->offset = offset;
1020 node->num_valid = (int*)cvSetNew( nv_heap );
1022 node->num_valid = 0;
1023 node->alpha = node->node_risk = node->tree_risk = node->tree_error = 0.;
1024 node->complexity = 0;
1026 if( params.cv_folds > 0 && cv_heap )
1028 int cv_n = params.cv_folds;
1030 node->cv_Tn = (int*)cvSetNew( cv_heap );
1031 node->cv_node_risk = (double*)cvAlignPtr(node->cv_Tn + cv_n, sizeof(double));
1032 node->cv_node_error = node->cv_node_risk + cv_n;
1038 node->cv_node_risk = 0;
1039 node->cv_node_error = 0;
1046 CvDTreeSplit* CvDTreeTrainData::new_split_ord( int vi, float cmp_val,
1047 int split_point, int inversed, float quality )
1049 CvDTreeSplit* split = (CvDTreeSplit*)cvSetNew( split_heap );
1050 split->var_idx = vi;
1051 split->condensed_idx = INT_MIN;
1052 split->ord.c = cmp_val;
1053 split->ord.split_point = split_point;
1054 split->inversed = inversed;
1055 split->quality = quality;
1062 CvDTreeSplit* CvDTreeTrainData::new_split_cat( int vi, float quality )
1064 CvDTreeSplit* split = (CvDTreeSplit*)cvSetNew( split_heap );
1065 int i, n = (max_c_count + 31)/32;
1067 split->var_idx = vi;
1068 split->condensed_idx = INT_MIN;
1069 split->inversed = 0;
1070 split->quality = quality;
1071 for( i = 0; i < n; i++ )
1072 split->subset[i] = 0;
1079 void CvDTreeTrainData::free_node( CvDTreeNode* node )
1081 CvDTreeSplit* split = node->split;
1082 free_node_data( node );
1085 CvDTreeSplit* next = split->next;
1086 cvSetRemoveByPtr( split_heap, split );
1090 cvSetRemoveByPtr( node_heap, node );
1094 void CvDTreeTrainData::free_node_data( CvDTreeNode* node )
1096 if( node->num_valid )
1098 cvSetRemoveByPtr( nv_heap, node->num_valid );
1099 node->num_valid = 0;
1101 // do not free cv_* fields, as all the cross-validation related data is released at once.
1105 void CvDTreeTrainData::free_train_data()
1107 cvReleaseMat( &counts );
1108 cvReleaseMat( &buf );
1109 cvReleaseMat( &direction );
1110 cvReleaseMat( &split_buf );
1111 cvReleaseMemStorage( &temp_storage );
1112 cvReleaseMat( &responses_copy );
1113 pred_float_buf.clear();
1114 pred_int_buf.clear();
1115 resp_float_buf.clear();
1116 resp_int_buf.clear();
1117 cv_lables_buf.clear();
1118 sample_idx_buf.clear();
1120 cv_heap = nv_heap = 0;
1124 void CvDTreeTrainData::clear()
1128 cvReleaseMemStorage( &tree_storage );
1130 cvReleaseMat( &var_idx );
1131 cvReleaseMat( &var_type );
1132 cvReleaseMat( &cat_count );
1133 cvReleaseMat( &cat_ofs );
1134 cvReleaseMat( &cat_map );
1135 cvReleaseMat( &priors );
1136 cvReleaseMat( &priors_mult );
1138 node_heap = split_heap = 0;
1140 sample_count = var_all = var_count = max_c_count = ord_var_count = cat_var_count = 0;
1141 have_labels = have_priors = is_classifier = false;
1143 buf_count = buf_size = 0;
1152 int CvDTreeTrainData::get_num_classes() const
1154 return is_classifier ? cat_count->data.i[cat_var_count] : 0;
1158 int CvDTreeTrainData::get_var_type(int vi) const
1160 return var_type->data.i[vi];
1163 int CvDTreeTrainData::get_ord_var_data( CvDTreeNode* n, int vi, float* ord_values_buf, int* indices_buf, const float** ord_values, const int** indices )
1165 int vidx = var_idx ? var_idx->data.i[vi] : vi;
1166 int node_sample_count = n->sample_count;
1167 int* sample_indices_buf = get_sample_idx_buf();
1168 const int* sample_indices = 0;
1169 int td_step = train_data->step/CV_ELEM_SIZE(train_data->type);
1171 get_sample_indices(n, sample_indices_buf, &sample_indices);
1174 *indices = buf->data.i + n->buf_idx*buf->cols +
1175 vi*sample_count + n->offset;
1177 const unsigned short* short_indices = (const unsigned short*)(buf->data.s + n->buf_idx*buf->cols +
1178 vi*sample_count + n->offset );
1179 for( int i = 0; i < node_sample_count; i++ )
1180 indices_buf[i] = short_indices[i];
1181 *indices = indices_buf;
1184 if( tflag == CV_ROW_SAMPLE )
1186 for( int i = 0; i < node_sample_count &&
1187 ((((*indices)[i] >= 0) && !is_buf_16u) || (((*indices)[i] != 65535) && is_buf_16u)); i++ )
1189 int idx = (*indices)[i];
1190 idx = sample_indices[idx];
1191 ord_values_buf[i] = *(train_data->data.fl + idx * td_step + vidx);
1195 for( int i = 0; i < node_sample_count &&
1196 ((((*indices)[i] >= 0) && !is_buf_16u) || (((*indices)[i] != 65535) && is_buf_16u)); i++ )
1198 int idx = (*indices)[i];
1199 idx = sample_indices[idx];
1200 ord_values_buf[i] = *(train_data->data.fl + vidx* td_step + idx);
1203 *ord_values = ord_values_buf;
1204 return 0; //TODO: return the number of non-missing values
1208 void CvDTreeTrainData::get_class_labels( CvDTreeNode* n, int* labels_buf, const int** labels )
1211 get_cat_var_data( n, var_count, labels_buf, labels );
1214 void CvDTreeTrainData::get_sample_indices( CvDTreeNode* n, int* indices_buf, const int** indices )
1216 get_cat_var_data( n, get_work_var_count(), indices_buf, indices );
1219 void CvDTreeTrainData::get_ord_responses( CvDTreeNode* n, float* values_buf, const float** values)
1221 int sample_count = n->sample_count;
1222 int* indices_buf = get_sample_idx_buf();
1223 const int* indices = 0;
1225 int r_step = responses->step/CV_ELEM_SIZE(responses->type);
1227 get_sample_indices(n, indices_buf, &indices);
1230 for( int i = 0; i < sample_count &&
1231 (((indices[i] >= 0) && !is_buf_16u) || ((indices[i] != 65535) && is_buf_16u)); i++ )
1233 int idx = indices[i];
1234 values_buf[i] = *(responses->data.fl + idx * r_step);
1237 *values = values_buf;
1241 void CvDTreeTrainData::get_cv_labels( CvDTreeNode* n, int* labels_buf, const int** labels )
1244 get_cat_var_data( n, get_work_var_count()- 1, labels_buf, labels );
1248 int CvDTreeTrainData::get_cat_var_data( CvDTreeNode* n, int vi, int* cat_values_buf, const int** cat_values )
1251 *cat_values = buf->data.i + n->buf_idx*buf->cols +
1252 vi*sample_count + n->offset;
1254 const unsigned short* short_values = (const unsigned short*)(buf->data.s + n->buf_idx*buf->cols +
1255 vi*sample_count + n->offset);
1256 for( int i = 0; i < n->sample_count; i++ )
1257 cat_values_buf[i] = short_values[i];
1258 *cat_values = cat_values_buf;
1261 return 0; //TODO: return the number of non-missing values
1265 int CvDTreeTrainData::get_child_buf_idx( CvDTreeNode* n )
1267 int idx = n->buf_idx + 1;
1268 if( idx >= buf_count )
1269 idx = shared ? 1 : 0;
1274 void CvDTreeTrainData::write_params( CvFileStorage* fs ) const
1276 CV_FUNCNAME( "CvDTreeTrainData::write_params" );
1280 int vi, vcount = var_count;
1282 cvWriteInt( fs, "is_classifier", is_classifier ? 1 : 0 );
1283 cvWriteInt( fs, "var_all", var_all );
1284 cvWriteInt( fs, "var_count", var_count );
1285 cvWriteInt( fs, "ord_var_count", ord_var_count );
1286 cvWriteInt( fs, "cat_var_count", cat_var_count );
1288 cvStartWriteStruct( fs, "training_params", CV_NODE_MAP );
1289 cvWriteInt( fs, "use_surrogates", params.use_surrogates ? 1 : 0 );
1293 cvWriteInt( fs, "max_categories", params.max_categories );
1297 cvWriteReal( fs, "regression_accuracy", params.regression_accuracy );
1300 cvWriteInt( fs, "max_depth", params.max_depth );
1301 cvWriteInt( fs, "min_sample_count", params.min_sample_count );
1302 cvWriteInt( fs, "cross_validation_folds", params.cv_folds );
1304 if( params.cv_folds > 1 )
1306 cvWriteInt( fs, "use_1se_rule", params.use_1se_rule ? 1 : 0 );
1307 cvWriteInt( fs, "truncate_pruned_tree", params.truncate_pruned_tree ? 1 : 0 );
1311 cvWrite( fs, "priors", priors );
1313 cvEndWriteStruct( fs );
1316 cvWrite( fs, "var_idx", var_idx );
1318 cvStartWriteStruct( fs, "var_type", CV_NODE_SEQ+CV_NODE_FLOW );
1320 for( vi = 0; vi < vcount; vi++ )
1321 cvWriteInt( fs, 0, var_type->data.i[vi] >= 0 );
1323 cvEndWriteStruct( fs );
1325 if( cat_count && (cat_var_count > 0 || is_classifier) )
1327 CV_ASSERT( cat_count != 0 );
1328 cvWrite( fs, "cat_count", cat_count );
1329 cvWrite( fs, "cat_map", cat_map );
1336 void CvDTreeTrainData::read_params( CvFileStorage* fs, CvFileNode* node )
1338 CV_FUNCNAME( "CvDTreeTrainData::read_params" );
1342 CvFileNode *tparams_node, *vartype_node;
1344 int vi, max_split_size, tree_block_size;
1346 is_classifier = (cvReadIntByName( fs, node, "is_classifier" ) != 0);
1347 var_all = cvReadIntByName( fs, node, "var_all" );
1348 var_count = cvReadIntByName( fs, node, "var_count", var_all );
1349 cat_var_count = cvReadIntByName( fs, node, "cat_var_count" );
1350 ord_var_count = cvReadIntByName( fs, node, "ord_var_count" );
1352 tparams_node = cvGetFileNodeByName( fs, node, "training_params" );
1354 if( tparams_node ) // training parameters are not necessary
1356 params.use_surrogates = cvReadIntByName( fs, tparams_node, "use_surrogates", 1 ) != 0;
1360 params.max_categories = cvReadIntByName( fs, tparams_node, "max_categories" );
1364 params.regression_accuracy =
1365 (float)cvReadRealByName( fs, tparams_node, "regression_accuracy" );
1368 params.max_depth = cvReadIntByName( fs, tparams_node, "max_depth" );
1369 params.min_sample_count = cvReadIntByName( fs, tparams_node, "min_sample_count" );
1370 params.cv_folds = cvReadIntByName( fs, tparams_node, "cross_validation_folds" );
1372 if( params.cv_folds > 1 )
1374 params.use_1se_rule = cvReadIntByName( fs, tparams_node, "use_1se_rule" ) != 0;
1375 params.truncate_pruned_tree =
1376 cvReadIntByName( fs, tparams_node, "truncate_pruned_tree" ) != 0;
1379 priors = (CvMat*)cvReadByName( fs, tparams_node, "priors" );
1382 if( !CV_IS_MAT(priors) )
1383 CV_ERROR( CV_StsParseError, "priors must stored as a matrix" );
1384 priors_mult = cvCloneMat( priors );
1388 CV_CALL( var_idx = (CvMat*)cvReadByName( fs, node, "var_idx" ));
1391 if( !CV_IS_MAT(var_idx) ||
1392 (var_idx->cols != 1 && var_idx->rows != 1) ||
1393 var_idx->cols + var_idx->rows - 1 != var_count ||
1394 CV_MAT_TYPE(var_idx->type) != CV_32SC1 )
1395 CV_ERROR( CV_StsParseError,
1396 "var_idx (if exist) must be valid 1d integer vector containing <var_count> elements" );
1398 for( vi = 0; vi < var_count; vi++ )
1399 if( (unsigned)var_idx->data.i[vi] >= (unsigned)var_all )
1400 CV_ERROR( CV_StsOutOfRange, "some of var_idx elements are out of range" );
1403 ////// read var type
1404 CV_CALL( var_type = cvCreateMat( 1, var_count + 2, CV_32SC1 ));
1408 vartype_node = cvGetFileNodeByName( fs, node, "var_type" );
1410 if( vartype_node && CV_NODE_TYPE(vartype_node->tag) == CV_NODE_INT && var_count == 1 )
1411 var_type->data.i[0] = vartype_node->data.i ? cat_var_count++ : ord_var_count--;
1414 if( !vartype_node || CV_NODE_TYPE(vartype_node->tag) != CV_NODE_SEQ ||
1415 vartype_node->data.seq->total != var_count )
1416 CV_ERROR( CV_StsParseError, "var_type must exist and be a sequence of 0's and 1's" );
1418 cvStartReadSeq( vartype_node->data.seq, &reader );
1420 for( vi = 0; vi < var_count; vi++ )
1422 CvFileNode* n = (CvFileNode*)reader.ptr;
1423 if( CV_NODE_TYPE(n->tag) != CV_NODE_INT || (n->data.i & ~1) )
1424 CV_ERROR( CV_StsParseError, "var_type must exist and be a sequence of 0's and 1's" );
1425 var_type->data.i[vi] = n->data.i ? cat_var_count++ : ord_var_count--;
1426 CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader );
1429 var_type->data.i[var_count] = cat_var_count;
1431 ord_var_count = ~ord_var_count;
1432 if( cat_var_count != cat_var_count || ord_var_count != ord_var_count )
1433 CV_ERROR( CV_StsParseError, "var_type is inconsistent with cat_var_count and ord_var_count" );
1436 if( cat_var_count > 0 || is_classifier )
1438 int ccount, total_c_count = 0;
1439 CV_CALL( cat_count = (CvMat*)cvReadByName( fs, node, "cat_count" ));
1440 CV_CALL( cat_map = (CvMat*)cvReadByName( fs, node, "cat_map" ));
1442 if( !CV_IS_MAT(cat_count) || !CV_IS_MAT(cat_map) ||
1443 (cat_count->cols != 1 && cat_count->rows != 1) ||
1444 CV_MAT_TYPE(cat_count->type) != CV_32SC1 ||
1445 cat_count->cols + cat_count->rows - 1 != cat_var_count + is_classifier ||
1446 (cat_map->cols != 1 && cat_map->rows != 1) ||
1447 CV_MAT_TYPE(cat_map->type) != CV_32SC1 )
1448 CV_ERROR( CV_StsParseError,
1449 "Both cat_count and cat_map must exist and be valid 1d integer vectors of an appropriate size" );
1451 ccount = cat_var_count + is_classifier;
1453 CV_CALL( cat_ofs = cvCreateMat( 1, ccount + 1, CV_32SC1 ));
1454 cat_ofs->data.i[0] = 0;
1457 for( vi = 0; vi < ccount; vi++ )
1459 int val = cat_count->data.i[vi];
1461 CV_ERROR( CV_StsOutOfRange, "some of cat_count elements are out of range" );
1462 max_c_count = MAX( max_c_count, val );
1463 cat_ofs->data.i[vi+1] = total_c_count += val;
1466 if( cat_map->cols + cat_map->rows - 1 != total_c_count )
1467 CV_ERROR( CV_StsBadSize,
1468 "cat_map vector length is not equal to the total number of categories in all categorical vars" );
1471 max_split_size = cvAlign(sizeof(CvDTreeSplit) +
1472 (MAX(0,max_c_count - 33)/32)*sizeof(int),sizeof(void*));
1474 tree_block_size = MAX((int)sizeof(CvDTreeNode)*8, max_split_size);
1475 tree_block_size = MAX(tree_block_size + block_size_delta, min_block_size);
1476 CV_CALL( tree_storage = cvCreateMemStorage( tree_block_size ));
1477 CV_CALL( node_heap = cvCreateSet( 0, sizeof(node_heap[0]),
1478 sizeof(CvDTreeNode), tree_storage ));
1479 CV_CALL( split_heap = cvCreateSet( 0, sizeof(split_heap[0]),
1480 max_split_size, tree_storage ));
1485 float* CvDTreeTrainData::get_pred_float_buf()
1487 return &pred_float_buf[cv::getThreadNum()][0];
1489 int* CvDTreeTrainData::get_pred_int_buf()
1491 return &pred_int_buf[cv::getThreadNum()][0];
1493 float* CvDTreeTrainData::get_resp_float_buf()
1495 return &resp_float_buf[cv::getThreadNum()][0];
1497 int* CvDTreeTrainData::get_resp_int_buf()
1499 return &resp_int_buf[cv::getThreadNum()][0];
1501 int* CvDTreeTrainData::get_cv_lables_buf()
1503 return &cv_lables_buf[cv::getThreadNum()][0];
1505 int* CvDTreeTrainData::get_sample_idx_buf()
1507 return &sample_idx_buf[cv::getThreadNum()][0];
1510 /////////////////////// Decision Tree /////////////////////////
1516 default_model_name = "my_tree";
1522 void CvDTree::clear()
1524 cvReleaseMat( &var_importance );
1534 pruned_tree_idx = -1;
1544 const CvDTreeNode* CvDTree::get_root() const
1550 int CvDTree::get_pruned_tree_idx() const
1552 return pruned_tree_idx;
1556 CvDTreeTrainData* CvDTree::get_data()
1562 bool CvDTree::train( const CvMat* _train_data, int _tflag,
1563 const CvMat* _responses, const CvMat* _var_idx,
1564 const CvMat* _sample_idx, const CvMat* _var_type,
1565 const CvMat* _missing_mask, CvDTreeParams _params )
1567 bool result = false;
1569 CV_FUNCNAME( "CvDTree::train" );
1574 data = new CvDTreeTrainData( _train_data, _tflag, _responses,
1575 _var_idx, _sample_idx, _var_type,
1576 _missing_mask, _params, false );
1577 CV_CALL( result = do_train(0) );
1584 bool CvDTree::train( const Mat& _train_data, int _tflag,
1585 const Mat& _responses, const Mat& _var_idx,
1586 const Mat& _sample_idx, const Mat& _var_type,
1587 const Mat& _missing_mask, CvDTreeParams _params )
1589 CvMat tdata = _train_data, responses = _responses, vidx=_var_idx,
1590 sidx=_sample_idx, vtype=_var_type, mmask=_missing_mask;
1591 return train(&tdata, _tflag, &responses, vidx.data.ptr ? &vidx : 0, sidx.data.ptr ? &sidx : 0,
1592 vtype.data.ptr ? &vtype : 0, mmask.data.ptr ? &mmask : 0, _params);
1596 bool CvDTree::train( CvMLData* _data, CvDTreeParams _params )
1598 bool result = false;
1600 CV_FUNCNAME( "CvDTree::train" );
1604 const CvMat* values = _data->get_values();
1605 const CvMat* response = _data->get_responses();
1606 const CvMat* missing = _data->get_missing();
1607 const CvMat* var_types = _data->get_var_types();
1608 const CvMat* train_sidx = _data->get_train_sample_idx();
1609 const CvMat* var_idx = _data->get_var_idx();
1611 CV_CALL( result = train( values, CV_ROW_SAMPLE, response, var_idx,
1612 train_sidx, var_types, missing, _params ) );
1619 bool CvDTree::train( CvDTreeTrainData* _data, const CvMat* _subsample_idx )
1621 bool result = false;
1623 CV_FUNCNAME( "CvDTree::train" );
1629 data->shared = true;
1630 CV_CALL( result = do_train(_subsample_idx));
1638 bool CvDTree::do_train( const CvMat* _subsample_idx )
1640 bool result = false;
1642 CV_FUNCNAME( "CvDTree::do_train" );
1646 root = data->subsample_data( _subsample_idx );
1648 CV_CALL( try_split_node(root));
1650 if( data->params.cv_folds > 0 )
1651 CV_CALL( prune_cv());
1654 data->free_train_data();
1664 void CvDTree::try_split_node( CvDTreeNode* node )
1666 CvDTreeSplit* best_split = 0;
1667 int i, n = node->sample_count, vi;
1668 bool can_split = true;
1669 double quality_scale;
1671 calc_node_value( node );
1673 if( node->sample_count <= data->params.min_sample_count ||
1674 node->depth >= data->params.max_depth )
1677 if( can_split && data->is_classifier )
1679 // check if we have a "pure" node,
1680 // we assume that cls_count is filled by calc_node_value()
1681 int* cls_count = data->counts->data.i;
1682 int nz = 0, m = data->get_num_classes();
1683 for( i = 0; i < m; i++ )
1684 nz += cls_count[i] != 0;
1685 if( nz == 1 ) // there is only one class
1688 else if( can_split )
1690 if( sqrt(node->node_risk)/n < data->params.regression_accuracy )
1696 best_split = find_best_split(node);
1697 // TODO: check the split quality ...
1698 node->split = best_split;
1700 if( !can_split || !best_split )
1702 data->free_node_data(node);
1706 quality_scale = calc_node_dir( node );
1707 if( data->params.use_surrogates )
1709 // find all the surrogate splits
1710 // and sort them by their similarity to the primary one
1711 for( vi = 0; vi < data->var_count; vi++ )
1713 CvDTreeSplit* split;
1714 int ci = data->get_var_type(vi);
1716 if( vi == best_split->var_idx )
1720 split = find_surrogate_split_cat( node, vi );
1722 split = find_surrogate_split_ord( node, vi );
1727 CvDTreeSplit* prev_split = node->split;
1728 split->quality = (float)(split->quality*quality_scale);
1730 while( prev_split->next &&
1731 prev_split->next->quality > split->quality )
1732 prev_split = prev_split->next;
1733 split->next = prev_split->next;
1734 prev_split->next = split;
1738 split_node_data( node );
1739 try_split_node( node->left );
1740 try_split_node( node->right );
1744 // calculate direction (left(-1),right(1),missing(0))
1745 // for each sample using the best split
1746 // the function returns scale coefficients for surrogate split quality factors.
1747 // the scale is applied to normalize surrogate split quality relatively to the
1748 // best (primary) split quality. That is, if a surrogate split is absolutely
1749 // identical to the primary split, its quality will be set to the maximum value =
1750 // quality of the primary split; otherwise, it will be lower.
1751 // besides, the function compute node->maxlr,
1752 // minimum possible quality (w/o considering the above mentioned scale)
1753 // for a surrogate split. Surrogate splits with quality less than node->maxlr
1754 // are not discarded.
1755 double CvDTree::calc_node_dir( CvDTreeNode* node )
1757 char* dir = (char*)data->direction->data.ptr;
1758 int i, n = node->sample_count, vi = node->split->var_idx;
1761 assert( !node->split->inversed );
1763 if( data->get_var_type(vi) >= 0 ) // split on categorical var
1765 int* labels_buf = data->get_pred_int_buf();
1766 const int* labels = 0;
1767 const int* subset = node->split->subset;
1768 data->get_cat_var_data( node, vi, labels_buf, &labels );
1769 if( !data->have_priors )
1771 int sum = 0, sum_abs = 0;
1773 for( i = 0; i < n; i++ )
1775 int idx = labels[i];
1776 int d = ( ((idx >= 0)&&(!data->is_buf_16u)) || ((idx != 65535)&&(data->is_buf_16u)) ) ?
1777 CV_DTREE_CAT_DIR(idx,subset) : 0;
1778 sum += d; sum_abs += d & 1;
1782 R = (sum_abs + sum) >> 1;
1783 L = (sum_abs - sum) >> 1;
1787 const double* priors = data->priors_mult->data.db;
1788 double sum = 0, sum_abs = 0;
1789 int *responses_buf = data->get_resp_int_buf();
1790 const int* responses;
1791 data->get_class_labels(node, responses_buf, &responses);
1793 for( i = 0; i < n; i++ )
1795 int idx = labels[i];
1796 double w = priors[responses[i]];
1797 int d = idx >= 0 ? CV_DTREE_CAT_DIR(idx,subset) : 0;
1798 sum += d*w; sum_abs += (d & 1)*w;
1802 R = (sum_abs + sum) * 0.5;
1803 L = (sum_abs - sum) * 0.5;
1806 else // split on ordered var
1808 int split_point = node->split->ord.split_point;
1809 int n1 = node->get_num_valid(vi);
1811 float* val_buf = data->get_pred_float_buf();
1812 const float* val = 0;
1813 int* sorted_buf = data->get_pred_int_buf();
1814 const int* sorted = 0;
1815 data->get_ord_var_data( node, vi, val_buf, sorted_buf, &val, &sorted);
1817 assert( 0 <= split_point && split_point < n1-1 );
1819 if( !data->have_priors )
1821 for( i = 0; i <= split_point; i++ )
1822 dir[sorted[i]] = (char)-1;
1823 for( ; i < n1; i++ )
1824 dir[sorted[i]] = (char)1;
1826 dir[sorted[i]] = (char)0;
1829 R = n1 - split_point + 1;
1833 const double* priors = data->priors_mult->data.db;
1834 int* responses_buf = data->get_resp_int_buf();
1835 const int* responses = 0;
1836 data->get_class_labels(node, responses_buf, &responses);
1839 for( i = 0; i <= split_point; i++ )
1841 int idx = sorted[i];
1842 double w = priors[responses[idx]];
1843 dir[idx] = (char)-1;
1847 for( ; i < n1; i++ )
1849 int idx = sorted[i];
1850 double w = priors[responses[idx]];
1856 dir[sorted[i]] = (char)0;
1859 node->maxlr = MAX( L, R );
1860 return node->split->quality/(L + R);
1863 CvDTreeSplit* CvDTree::find_best_split( CvDTreeNode* node )
1866 CvDTreeSplit *bestSplit = 0;
1867 int maxNumThreads = 1;
1869 maxNumThreads = cv::getNumThreads();
1871 vector<CvDTreeSplit*> splits(maxNumThreads);
1872 vector<CvDTreeSplit*> bestSplits(maxNumThreads);
1873 vector<int> canSplit(maxNumThreads);
1874 CvDTreeSplit **splitsPtr = &splits[0], ** bestSplitsPtr = &bestSplits[0];
1875 int* canSplitPtr = &canSplit[0];
1876 for (int i = 0; i < maxNumThreads; i++)
1878 splitsPtr[i] = data->new_split_cat( 0, -1.0f );
1879 bestSplitsPtr[i] = data->new_split_cat( 0, -1.0f );
1884 #pragma omp parallel for num_threads(maxNumThreads) schedule(dynamic)
1886 for( vi = 0; vi < data->var_count; vi++ )
1888 CvDTreeSplit *res, *t;
1889 int threadIdx = cv::getThreadNum();
1890 int ci = data->get_var_type(vi);
1891 if( node->get_num_valid(vi) <= 1 )
1894 if( data->is_classifier )
1897 res = find_split_cat_class( node, vi, bestSplitsPtr[threadIdx]->quality, splitsPtr[threadIdx] );
1899 res = find_split_ord_class( node, vi, bestSplitsPtr[threadIdx]->quality, splitsPtr[threadIdx] );
1904 res = find_split_cat_reg( node, vi, bestSplitsPtr[threadIdx]->quality, splitsPtr[threadIdx] );
1906 res = find_split_ord_reg( node, vi, bestSplitsPtr[threadIdx]->quality, splitsPtr[threadIdx] );
1911 canSplitPtr[threadIdx] = 1;
1912 if( bestSplitsPtr[threadIdx]->quality < splitsPtr[threadIdx]->quality )
1913 CV_SWAP( bestSplitsPtr[threadIdx], splitsPtr[threadIdx], t );
1917 for( ; ti < maxNumThreads; ti++ )
1919 if( canSplitPtr[ti] )
1921 bestSplit = bestSplitsPtr[ti];
1925 for( ; ti < maxNumThreads; ti++ )
1927 if( bestSplit->quality < bestSplitsPtr[ti]->quality )
1928 bestSplit = bestSplitsPtr[ti];
1930 for(int i = 0; i < maxNumThreads; i++)
1932 cvSetRemoveByPtr( data->split_heap, splitsPtr[i] );
1933 if( bestSplitsPtr[i] != bestSplit )
1934 cvSetRemoveByPtr( data->split_heap, bestSplitsPtr[i] );
1939 CvDTreeSplit* CvDTree::find_split_ord_class( CvDTreeNode* node, int vi,
1940 float init_quality, CvDTreeSplit* _split )
1942 const float epsilon = FLT_EPSILON*2;
1943 int n = node->sample_count;
1944 int n1 = node->get_num_valid(vi);
1945 int m = data->get_num_classes();
1947 float* values_buf = data->get_pred_float_buf();
1948 const float* values = 0;
1949 int* indices_buf = data->get_pred_int_buf();
1950 const int* indices = 0;
1951 data->get_ord_var_data( node, vi, values_buf, indices_buf, &values, &indices );
1952 int* responses_buf = data->get_resp_int_buf();
1953 const int* responses = 0;
1954 data->get_class_labels( node, responses_buf, &responses );
1956 const int* rc0 = data->counts->data.i;
1957 int* lc = (int*)cvStackAlloc(m*sizeof(lc[0]));
1958 int* rc = (int*)cvStackAlloc(m*sizeof(rc[0]));
1960 double lsum2 = 0, rsum2 = 0, best_val = init_quality;
1961 const double* priors = data->have_priors ? data->priors_mult->data.db : 0;
1963 // init arrays of class instance counters on both sides of the split
1964 for( i = 0; i < m; i++ )
1970 // compensate for missing values
1971 for( i = n1; i < n; i++ )
1973 rc[responses[indices[i]]]--;
1980 for( i = 0; i < m; i++ )
1981 rsum2 += (double)rc[i]*rc[i];
1983 for( i = 0; i < n1 - 1; i++ )
1985 int idx = responses[indices[i]];
1988 lv = lc[idx]; rv = rc[idx];
1991 lc[idx] = lv + 1; rc[idx] = rv - 1;
1993 if( values[i] + epsilon < values[i+1] )
1995 double val = (lsum2*R + rsum2*L)/((double)L*R);
1996 if( best_val < val )
2006 double L = 0, R = 0;
2007 for( i = 0; i < m; i++ )
2009 double wv = rc[i]*priors[i];
2014 for( i = 0; i < n1 - 1; i++ )
2016 int idx = responses[indices[i]];
2018 double p = priors[idx], p2 = p*p;
2020 lv = lc[idx]; rv = rc[idx];
2021 lsum2 += p2*(lv*2 + 1);
2022 rsum2 -= p2*(rv*2 - 1);
2023 lc[idx] = lv + 1; rc[idx] = rv - 1;
2025 if( values[i] + epsilon < values[i+1] )
2027 double val = (lsum2*R + rsum2*L)/((double)L*R);
2028 if( best_val < val )
2037 CvDTreeSplit* split = 0;
2040 split = _split ? _split : data->new_split_ord( 0, 0.0f, 0, 0, 0.0f );
2041 split->var_idx = vi;
2042 split->ord.c = (values[best_i] + values[best_i+1])*0.5f;
2043 split->ord.split_point = best_i;
2044 split->inversed = 0;
2045 split->quality = (float)best_val;
2051 void CvDTree::cluster_categories( const int* vectors, int n, int m,
2052 int* csums, int k, int* labels )
2054 // TODO: consider adding priors (class weights) and sample weights to the clustering algorithm
2055 int iters = 0, max_iters = 100;
2057 double* buf = (double*)cvStackAlloc( (n + k)*sizeof(buf[0]) );
2058 double *v_weights = buf, *c_weights = buf + n;
2059 bool modified = true;
2060 CvRNG* r = &data->rng;
2062 // assign labels randomly
2063 for( i = 0; i < n; i++ )
2066 const int* v = vectors + i*m;
2067 labels[i] = i < k ? i : (cvRandInt(r) % k);
2069 // compute weight of each vector
2070 for( j = 0; j < m; j++ )
2072 v_weights[i] = sum ? 1./sum : 0.;
2075 for( i = 0; i < n; i++ )
2077 int i1 = cvRandInt(r) % n;
2078 int i2 = cvRandInt(r) % n;
2079 CV_SWAP( labels[i1], labels[i2], j );
2082 for( iters = 0; iters <= max_iters; iters++ )
2085 for( i = 0; i < k; i++ )
2087 for( j = 0; j < m; j++ )
2091 for( i = 0; i < n; i++ )
2093 const int* v = vectors + i*m;
2094 int* s = csums + labels[i]*m;
2095 for( j = 0; j < m; j++ )
2099 // exit the loop here, when we have up-to-date csums
2100 if( iters == max_iters || !modified )
2105 // calculate weight of each cluster
2106 for( i = 0; i < k; i++ )
2108 const int* s = csums + i*m;
2110 for( j = 0; j < m; j++ )
2112 c_weights[i] = sum ? 1./sum : 0;
2115 // now for each vector determine the closest cluster
2116 for( i = 0; i < n; i++ )
2118 const int* v = vectors + i*m;
2119 double alpha = v_weights[i];
2120 double min_dist2 = DBL_MAX;
2123 for( idx = 0; idx < k; idx++ )
2125 const int* s = csums + idx*m;
2126 double dist2 = 0., beta = c_weights[idx];
2127 for( j = 0; j < m; j++ )
2129 double t = v[j]*alpha - s[j]*beta;
2132 if( min_dist2 > dist2 )
2139 if( min_idx != labels[i] )
2141 labels[i] = min_idx;
2147 CvDTreeSplit* CvDTree::find_split_cat_class( CvDTreeNode* node, int vi, float init_quality, CvDTreeSplit* _split )
2149 int ci = data->get_var_type(vi);
2150 int n = node->sample_count;
2151 int m = data->get_num_classes();
2152 int _mi = data->cat_count->data.i[ci], mi = _mi;
2154 int* labels_buf = data->get_pred_int_buf();
2155 const int* labels = 0;
2156 data->get_cat_var_data(node, vi, labels_buf, &labels);
2157 int *responses_buf = data->get_resp_int_buf();
2158 const int* responses = 0;
2159 data->get_class_labels(node, responses_buf, &responses);
2161 int* lc = (int*)cvStackAlloc(m*sizeof(lc[0]));
2162 int* rc = (int*)cvStackAlloc(m*sizeof(rc[0]));
2163 int* _cjk = (int*)cvStackAlloc(m*(mi+1)*sizeof(_cjk[0]))+m, *cjk = _cjk;
2164 double* c_weights = (double*)cvStackAlloc( mi*sizeof(c_weights[0]) );
2165 int* cluster_labels = 0;
2168 double L = 0, R = 0;
2169 double best_val = init_quality;
2170 int prevcode = 0, best_subset = -1, subset_i, subset_n, subtract = 0;
2171 const double* priors = data->priors_mult->data.db;
2173 // init array of counters:
2174 // c_{jk} - number of samples that have vi-th input variable = j and response = k.
2175 for( j = -1; j < mi; j++ )
2176 for( k = 0; k < m; k++ )
2179 for( i = 0; i < n; i++ )
2181 j = ( labels[i] == 65535 && data->is_buf_16u) ? -1 : labels[i];
2188 if( mi > data->params.max_categories )
2190 mi = MIN(data->params.max_categories, n);
2191 cjk = (int*)cvStackAlloc( m*mi*sizeof(cjk[0]) );
2192 cluster_labels = (int*)cvStackAlloc( _mi*sizeof(cluster_labels[0]) );
2193 cluster_categories( _cjk, _mi, m, cjk, mi, cluster_labels );
2201 int_ptr = (int**)cvStackAlloc( mi*sizeof(int_ptr[0]) );
2202 for( j = 0; j < mi; j++ )
2203 int_ptr[j] = cjk + j*2 + 1;
2204 icvSortIntPtr( int_ptr, mi, 0 );
2209 for( k = 0; k < m; k++ )
2212 for( j = 0; j < mi; j++ )
2213 sum += cjk[j*m + k];
2218 for( j = 0; j < mi; j++ )
2221 for( k = 0; k < m; k++ )
2222 sum += cjk[j*m + k]*priors[k];
2227 for( ; subset_i < subset_n; subset_i++ )
2231 double lsum2 = 0, rsum2 = 0;
2234 idx = (int)(int_ptr[subset_i] - cjk)/2;
2237 int graycode = (subset_i>>1)^subset_i;
2238 int diff = graycode ^ prevcode;
2240 // determine index of the changed bit.
2242 idx = diff >= (1 << 16) ? 16 : 0;
2243 u.f = (float)(((diff >> 16) | diff) & 65535);
2244 idx += (u.i >> 23) - 127;
2245 subtract = graycode < prevcode;
2246 prevcode = graycode;
2250 weight = c_weights[idx];
2251 if( weight < FLT_EPSILON )
2256 for( k = 0; k < m; k++ )
2259 int lval = lc[k] + t;
2260 int rval = rc[k] - t;
2261 double p = priors[k], p2 = p*p;
2262 lsum2 += p2*lval*lval;
2263 rsum2 += p2*rval*rval;
2264 lc[k] = lval; rc[k] = rval;
2271 for( k = 0; k < m; k++ )
2274 int lval = lc[k] - t;
2275 int rval = rc[k] + t;
2276 double p = priors[k], p2 = p*p;
2277 lsum2 += p2*lval*lval;
2278 rsum2 += p2*rval*rval;
2279 lc[k] = lval; rc[k] = rval;
2285 if( L > FLT_EPSILON && R > FLT_EPSILON )
2287 double val = (lsum2*R + rsum2*L)/((double)L*R);
2288 if( best_val < val )
2291 best_subset = subset_i;
2296 CvDTreeSplit* split = 0;
2297 if( best_subset >= 0 )
2299 split = _split ? _split : data->new_split_cat( 0, -1.0f );
2300 split->var_idx = vi;
2301 split->quality = (float)best_val;
2302 memset( split->subset, 0, (data->max_c_count + 31)/32 * sizeof(int));
2305 for( i = 0; i <= best_subset; i++ )
2307 idx = (int)(int_ptr[i] - cjk) >> 1;
2308 split->subset[idx >> 5] |= 1 << (idx & 31);
2313 for( i = 0; i < _mi; i++ )
2315 idx = cluster_labels ? cluster_labels[i] : i;
2316 if( best_subset & (1 << idx) )
2317 split->subset[i >> 5] |= 1 << (i & 31);
2325 CvDTreeSplit* CvDTree::find_split_ord_reg( CvDTreeNode* node, int vi, float init_quality, CvDTreeSplit* _split )
2327 const float epsilon = FLT_EPSILON*2;
2328 int n = node->sample_count;
2329 int n1 = node->get_num_valid(vi);
2331 float* values_buf = data->get_pred_float_buf();
2332 const float* values = 0;
2333 int* indices_buf = data->get_pred_int_buf();
2334 const int* indices = 0;
2335 data->get_ord_var_data( node, vi, values_buf, indices_buf, &values, &indices );
2336 float* responses_buf = data->get_resp_float_buf();
2337 const float* responses = 0;
2338 data->get_ord_responses( node, responses_buf, &responses );
2341 double best_val = init_quality, lsum = 0, rsum = node->value*n;
2344 // compensate for missing values
2345 for( i = n1; i < n; i++ )
2346 rsum -= responses[indices[i]];
2348 // find the optimal split
2349 for( i = 0; i < n1 - 1; i++ )
2351 float t = responses[indices[i]];
2356 if( values[i] + epsilon < values[i+1] )
2358 double val = (lsum*lsum*R + rsum*rsum*L)/((double)L*R);
2359 if( best_val < val )
2367 CvDTreeSplit* split = 0;
2370 split = _split ? _split : data->new_split_ord( 0, 0.0f, 0, 0, 0.0f );
2371 split->var_idx = vi;
2372 split->ord.c = (values[best_i] + values[best_i+1])*0.5f;
2373 split->ord.split_point = best_i;
2374 split->inversed = 0;
2375 split->quality = (float)best_val;
2380 CvDTreeSplit* CvDTree::find_split_cat_reg( CvDTreeNode* node, int vi, float init_quality, CvDTreeSplit* _split )
2382 int ci = data->get_var_type(vi);
2383 int n = node->sample_count;
2384 int mi = data->cat_count->data.i[ci];
2385 int* labels_buf = data->get_pred_int_buf();
2386 const int* labels = 0;
2387 float* responses_buf = data->get_resp_float_buf();
2388 const float* responses = 0;
2389 data->get_cat_var_data(node, vi, labels_buf, &labels);
2390 data->get_ord_responses(node, responses_buf, &responses);
2392 double* sum = (double*)cvStackAlloc( (mi+1)*sizeof(sum[0]) ) + 1;
2393 int* counts = (int*)cvStackAlloc( (mi+1)*sizeof(counts[0]) ) + 1;
2394 double** sum_ptr = (double**)cvStackAlloc( (mi+1)*sizeof(sum_ptr[0]) );
2395 int i, L = 0, R = 0;
2396 double best_val = init_quality, lsum = 0, rsum = 0;
2397 int best_subset = -1, subset_i;
2399 for( i = -1; i < mi; i++ )
2400 sum[i] = counts[i] = 0;
2402 // calculate sum response and weight of each category of the input var
2403 for( i = 0; i < n; i++ )
2405 int idx = ( (labels[i] == 65535) && data->is_buf_16u ) ? -1 : labels[i];
2406 double s = sum[idx] + responses[i];
2407 int nc = counts[idx] + 1;
2412 // calculate average response in each category
2413 for( i = 0; i < mi; i++ )
2417 sum[i] /= MAX(counts[i],1);
2418 sum_ptr[i] = sum + i;
2421 icvSortDblPtr( sum_ptr, mi, 0 );
2423 // revert back to unnormalized sums
2424 // (there should be a very little loss of accuracy)
2425 for( i = 0; i < mi; i++ )
2426 sum[i] *= counts[i];
2428 for( subset_i = 0; subset_i < mi-1; subset_i++ )
2430 int idx = (int)(sum_ptr[subset_i] - sum);
2431 int ni = counts[idx];
2435 double s = sum[idx];
2441 double val = (lsum*lsum*R + rsum*rsum*L)/((double)L*R);
2442 if( best_val < val )
2445 best_subset = subset_i;
2451 CvDTreeSplit* split = 0;
2452 if( best_subset >= 0 )
2454 split = _split ? _split : data->new_split_cat( 0, -1.0f);
2455 split->var_idx = vi;
2456 split->quality = (float)best_val;
2457 memset( split->subset, 0, (data->max_c_count + 31)/32 * sizeof(int));
2458 for( i = 0; i <= best_subset; i++ )
2460 int idx = (int)(sum_ptr[i] - sum);
2461 split->subset[idx >> 5] |= 1 << (idx & 31);
2467 CvDTreeSplit* CvDTree::find_surrogate_split_ord( CvDTreeNode* node, int vi )
2469 const float epsilon = FLT_EPSILON*2;
2470 const char* dir = (char*)data->direction->data.ptr;
2471 int n1 = node->get_num_valid(vi);
2472 float* values_buf = data->get_pred_float_buf();
2473 const float* values = 0;
2474 int* indices_buf = data->get_pred_int_buf();
2475 const int* indices = 0;
2476 data->get_ord_var_data( node, vi, values_buf, indices_buf, &values, &indices );
2477 // LL - number of samples that both the primary and the surrogate splits send to the left
2478 // LR - ... primary split sends to the left and the surrogate split sends to the right
2479 // RL - ... primary split sends to the right and the surrogate split sends to the left
2480 // RR - ... both send to the right
2481 int i, best_i = -1, best_inversed = 0;
2484 if( !data->have_priors )
2486 int LL = 0, RL = 0, LR, RR;
2487 int worst_val = cvFloor(node->maxlr), _best_val = worst_val;
2488 int sum = 0, sum_abs = 0;
2490 for( i = 0; i < n1; i++ )
2492 int d = dir[indices[i]];
2493 sum += d; sum_abs += d & 1;
2496 // sum_abs = R + L; sum = R - L
2497 RR = (sum_abs + sum) >> 1;
2498 LR = (sum_abs - sum) >> 1;
2500 // initially all the samples are sent to the right by the surrogate split,
2501 // LR of them are sent to the left by primary split, and RR - to the right.
2502 // now iteratively compute LL, LR, RL and RR for every possible surrogate split value.
2503 for( i = 0; i < n1 - 1; i++ )
2505 int d = dir[indices[i]];
2510 if( LL + RR > _best_val && values[i] + epsilon < values[i+1] )
2513 best_i = i; best_inversed = 0;
2519 if( RL + LR > _best_val && values[i] + epsilon < values[i+1] )
2522 best_i = i; best_inversed = 1;
2526 best_val = _best_val;
2530 double LL = 0, RL = 0, LR, RR;
2531 double worst_val = node->maxlr;
2532 double sum = 0, sum_abs = 0;
2533 const double* priors = data->priors_mult->data.db;
2534 int* responses_buf = data->get_resp_int_buf();
2535 const int* responses = 0;
2536 data->get_class_labels(node, responses_buf, &responses);
2537 best_val = worst_val;
2539 for( i = 0; i < n1; i++ )
2541 int idx = indices[i];
2542 double w = priors[responses[idx]];
2544 sum += d*w; sum_abs += (d & 1)*w;
2547 // sum_abs = R + L; sum = R - L
2548 RR = (sum_abs + sum)*0.5;
2549 LR = (sum_abs - sum)*0.5;
2551 // initially all the samples are sent to the right by the surrogate split,
2552 // LR of them are sent to the left by primary split, and RR - to the right.
2553 // now iteratively compute LL, LR, RL and RR for every possible surrogate split value.
2554 for( i = 0; i < n1 - 1; i++ )
2556 int idx = indices[i];
2557 double w = priors[responses[idx]];
2563 if( LL + RR > best_val && values[i] + epsilon < values[i+1] )
2566 best_i = i; best_inversed = 0;
2572 if( RL + LR > best_val && values[i] + epsilon < values[i+1] )
2575 best_i = i; best_inversed = 1;
2580 return best_i >= 0 && best_val > node->maxlr ? data->new_split_ord( vi,
2581 (values[best_i] + values[best_i+1])*0.5f, best_i, best_inversed, (float)best_val ) : 0;
2585 CvDTreeSplit* CvDTree::find_surrogate_split_cat( CvDTreeNode* node, int vi )
2587 const char* dir = (char*)data->direction->data.ptr;
2588 int n = node->sample_count;
2589 int* labels_buf = data->get_pred_int_buf();
2590 const int* labels = 0;
2591 data->get_cat_var_data(node, vi, labels_buf, &labels);
2592 // LL - number of samples that both the primary and the surrogate splits send to the left
2593 // LR - ... primary split sends to the left and the surrogate split sends to the right
2594 // RL - ... primary split sends to the right and the surrogate split sends to the left
2595 // RR - ... both send to the right
2596 CvDTreeSplit* split = data->new_split_cat( vi, 0 );
2597 int i, mi = data->cat_count->data.i[data->get_var_type(vi)], l_win = 0;
2598 double best_val = 0;
2599 double* lc = (double*)cvStackAlloc( (mi+1)*2*sizeof(lc[0]) ) + 1;
2600 double* rc = lc + mi + 1;
2602 for( i = -1; i < mi; i++ )
2605 // for each category calculate the weight of samples
2606 // sent to the left (lc) and to the right (rc) by the primary split
2607 if( !data->have_priors )
2609 int* _lc = (int*)cvStackAlloc((mi+2)*2*sizeof(_lc[0])) + 1;
2610 int* _rc = _lc + mi + 1;
2612 for( i = -1; i < mi; i++ )
2613 _lc[i] = _rc[i] = 0;
2615 for( i = 0; i < n; i++ )
2617 int idx = ( (labels[i] == 65535) && (data->is_buf_16u) ) ? -1 : labels[i];
2619 int sum = _lc[idx] + d;
2620 int sum_abs = _rc[idx] + (d & 1);
2621 _lc[idx] = sum; _rc[idx] = sum_abs;
2624 for( i = 0; i < mi; i++ )
2627 int sum_abs = _rc[i];
2628 lc[i] = (sum_abs - sum) >> 1;
2629 rc[i] = (sum_abs + sum) >> 1;
2634 const double* priors = data->priors_mult->data.db;
2635 int* responses_buf = data->get_resp_int_buf();
2636 const int* responses = 0;
2637 data->get_class_labels(node, responses_buf, &responses);
2639 for( i = 0; i < n; i++ )
2641 int idx = ( (labels[i] == 65535) && (data->is_buf_16u) ) ? -1 : labels[i];
2642 double w = priors[responses[i]];
2644 double sum = lc[idx] + d*w;
2645 double sum_abs = rc[idx] + (d & 1)*w;
2646 lc[idx] = sum; rc[idx] = sum_abs;
2649 for( i = 0; i < mi; i++ )
2652 double sum_abs = rc[i];
2653 lc[i] = (sum_abs - sum) * 0.5;
2654 rc[i] = (sum_abs + sum) * 0.5;
2658 // 2. now form the split.
2659 // in each category send all the samples to the same direction as majority
2660 for( i = 0; i < mi; i++ )
2662 double lval = lc[i], rval = rc[i];
2665 split->subset[i >> 5] |= 1 << (i & 31);
2673 split->quality = (float)best_val;
2674 if( split->quality <= node->maxlr || l_win == 0 || l_win == mi )
2675 cvSetRemoveByPtr( data->split_heap, split ), split = 0;
2681 void CvDTree::calc_node_value( CvDTreeNode* node )
2683 int i, j, k, n = node->sample_count, cv_n = data->params.cv_folds;
2684 int* cv_labels_buf = data->get_cv_lables_buf();
2685 const int* cv_labels = 0;
2686 data->get_cv_labels(node, cv_labels_buf, &cv_labels);
2688 if( data->is_classifier )
2690 // in case of classification tree:
2691 // * node value is the label of the class that has the largest weight in the node.
2692 // * node risk is the weighted number of misclassified samples,
2693 // * j-th cross-validation fold value and risk are calculated as above,
2694 // but using the samples with cv_labels(*)!=j.
2695 // * j-th cross-validation fold error is calculated as the weighted number of
2696 // misclassified samples with cv_labels(*)==j.
2698 // compute the number of instances of each class
2699 int* cls_count = data->counts->data.i;
2700 int* responses_buf = data->get_resp_int_buf();
2701 const int* responses = 0;
2702 data->get_class_labels(node, responses_buf, &responses);
2703 int m = data->get_num_classes();
2704 int* cv_cls_count = (int*)cvStackAlloc(m*cv_n*sizeof(cv_cls_count[0]));
2705 double max_val = -1, total_weight = 0;
2707 double* priors = data->priors_mult->data.db;
2709 for( k = 0; k < m; k++ )
2714 for( i = 0; i < n; i++ )
2715 cls_count[responses[i]]++;
2719 for( j = 0; j < cv_n; j++ )
2720 for( k = 0; k < m; k++ )
2721 cv_cls_count[j*m + k] = 0;
2723 for( i = 0; i < n; i++ )
2725 j = cv_labels[i]; k = responses[i];
2726 cv_cls_count[j*m + k]++;
2729 for( j = 0; j < cv_n; j++ )
2730 for( k = 0; k < m; k++ )
2731 cls_count[k] += cv_cls_count[j*m + k];
2734 if( data->have_priors && node->parent == 0 )
2736 // compute priors_mult from priors, take the sample ratio into account.
2738 for( k = 0; k < m; k++ )
2740 int n_k = cls_count[k];
2741 priors[k] = data->priors->data.db[k]*(n_k ? 1./n_k : 0.);
2745 for( k = 0; k < m; k++ )
2749 for( k = 0; k < m; k++ )
2751 double val = cls_count[k]*priors[k];
2752 total_weight += val;
2760 node->class_idx = max_k;
2761 node->value = data->cat_map->data.i[
2762 data->cat_ofs->data.i[data->cat_var_count] + max_k];
2763 node->node_risk = total_weight - max_val;
2765 for( j = 0; j < cv_n; j++ )
2767 double sum_k = 0, sum = 0, max_val_k = 0;
2768 max_val = -1; max_k = -1;
2770 for( k = 0; k < m; k++ )
2772 double w = priors[k];
2773 double val_k = cv_cls_count[j*m + k]*w;
2774 double val = cls_count[k]*w - val_k;
2785 node->cv_Tn[j] = INT_MAX;
2786 node->cv_node_risk[j] = sum - max_val;
2787 node->cv_node_error[j] = sum_k - max_val_k;
2792 // in case of regression tree:
2793 // * node value is 1/n*sum_i(Y_i), where Y_i is i-th response,
2794 // n is the number of samples in the node.
2795 // * node risk is the sum of squared errors: sum_i((Y_i - <node_value>)^2)
2796 // * j-th cross-validation fold value and risk are calculated as above,
2797 // but using the samples with cv_labels(*)!=j.
2798 // * j-th cross-validation fold error is calculated
2799 // using samples with cv_labels(*)==j as the test subset:
2800 // error_j = sum_(i,cv_labels(i)==j)((Y_i - <node_value_j>)^2),
2801 // where node_value_j is the node value calculated
2802 // as described in the previous bullet, and summation is done
2803 // over the samples with cv_labels(*)==j.
2805 double sum = 0, sum2 = 0;
2806 float* values_buf = data->get_resp_float_buf();
2807 const float* values = 0;
2808 data->get_ord_responses(node, values_buf, &values);
2809 double *cv_sum = 0, *cv_sum2 = 0;
2814 for( i = 0; i < n; i++ )
2816 double t = values[i];
2823 cv_sum = (double*)cvStackAlloc( cv_n*sizeof(cv_sum[0]) );
2824 cv_sum2 = (double*)cvStackAlloc( cv_n*sizeof(cv_sum2[0]) );
2825 cv_count = (int*)cvStackAlloc( cv_n*sizeof(cv_count[0]) );
2827 for( j = 0; j < cv_n; j++ )
2829 cv_sum[j] = cv_sum2[j] = 0.;
2833 for( i = 0; i < n; i++ )
2836 double t = values[i];
2837 double s = cv_sum[j] + t;
2838 double s2 = cv_sum2[j] + t*t;
2839 int nc = cv_count[j] + 1;
2845 for( j = 0; j < cv_n; j++ )
2852 node->node_risk = sum2 - (sum/n)*sum;
2853 node->value = sum/n;
2855 for( j = 0; j < cv_n; j++ )
2857 double s = cv_sum[j], si = sum - s;
2858 double s2 = cv_sum2[j], s2i = sum2 - s2;
2859 int c = cv_count[j], ci = n - c;
2860 double r = si/MAX(ci,1);
2861 node->cv_node_risk[j] = s2i - r*r*ci;
2862 node->cv_node_error[j] = s2 - 2*r*s + c*r*r;
2863 node->cv_Tn[j] = INT_MAX;
2869 void CvDTree::complete_node_dir( CvDTreeNode* node )
2871 int vi, i, n = node->sample_count, nl, nr, d0 = 0, d1 = -1;
2872 int nz = n - node->get_num_valid(node->split->var_idx);
2873 char* dir = (char*)data->direction->data.ptr;
2875 // try to complete direction using surrogate splits
2876 if( nz && data->params.use_surrogates )
2878 CvDTreeSplit* split = node->split->next;
2879 for( ; split != 0 && nz; split = split->next )
2881 int inversed_mask = split->inversed ? -1 : 0;
2882 vi = split->var_idx;
2884 if( data->get_var_type(vi) >= 0 ) // split on categorical var
2886 int* labels_buf = data->get_pred_int_buf();
2887 const int* labels = 0;
2888 data->get_cat_var_data(node, vi, labels_buf, &labels);
2889 const int* subset = split->subset;
2891 for( i = 0; i < n; i++ )
2893 int idx = labels[i];
2894 if( !dir[i] && ( ((idx >= 0)&&(!data->is_buf_16u)) || ((idx != 65535)&&(data->is_buf_16u)) ))
2897 int d = CV_DTREE_CAT_DIR(idx,subset);
2898 dir[i] = (char)((d ^ inversed_mask) - inversed_mask);
2904 else // split on ordered var
2906 float* values_buf = data->get_pred_float_buf();
2907 const float* values = 0;
2908 int* indices_buf = data->get_pred_int_buf();
2909 const int* indices = 0;
2910 data->get_ord_var_data( node, vi, values_buf, indices_buf, &values, &indices );
2911 int split_point = split->ord.split_point;
2912 int n1 = node->get_num_valid(vi);
2914 assert( 0 <= split_point && split_point < n-1 );
2916 for( i = 0; i < n1; i++ )
2918 int idx = indices[i];
2921 int d = i <= split_point ? -1 : 1;
2922 dir[idx] = (char)((d ^ inversed_mask) - inversed_mask);
2931 // find the default direction for the rest
2934 for( i = nr = 0; i < n; i++ )
2937 d0 = nl > nr ? -1 : nr > nl;
2940 // make sure that every sample is directed either to the left or to the right
2941 for( i = 0; i < n; i++ )
2951 dir[i] = (char)d; // remap (-1,1) to (0,1)
2956 void CvDTree::split_node_data( CvDTreeNode* node )
2958 int vi, i, n = node->sample_count, nl, nr, scount = data->sample_count;
2959 char* dir = (char*)data->direction->data.ptr;
2960 CvDTreeNode *left = 0, *right = 0;
2961 int* new_idx = data->split_buf->data.i;
2962 int new_buf_idx = data->get_child_buf_idx( node );
2963 int work_var_count = data->get_work_var_count();
2964 CvMat* buf = data->buf;
2965 cv::AutoBuffer<int, 1<<14> _temp_buf(n);
2966 int* temp_buf = _temp_buf;
2968 complete_node_dir(node);
2970 for( i = nl = nr = 0; i < n; i++ )
2973 // initialize new indices for splitting ordered variables
2974 new_idx[i] = (nl & (d-1)) | (nr & -d); // d ? ri : li
2980 bool split_input_data;
2981 node->left = left = data->new_node( node, nl, new_buf_idx, node->offset );
2982 node->right = right = data->new_node( node, nr, new_buf_idx, node->offset + nl );
2984 split_input_data = node->depth + 1 < data->params.max_depth &&
2985 (node->left->sample_count > data->params.min_sample_count ||
2986 node->right->sample_count > data->params.min_sample_count);
2988 // split ordered variables, keep both halves sorted.
2989 for( vi = 0; vi < data->var_count; vi++ )
2991 int ci = data->get_var_type(vi);
2992 int n1 = node->get_num_valid(vi);
2993 int *src_idx_buf = data->get_pred_int_buf();
2994 const int* src_idx = 0;
2995 float *src_val_buf = data->get_pred_float_buf();
2996 const float* src_val = 0;
2998 if( ci >= 0 || !split_input_data )
3001 data->get_ord_var_data(node, vi, src_val_buf, src_idx_buf, &src_val, &src_idx);
3003 for(i = 0; i < n; i++)
3004 temp_buf[i] = src_idx[i];
3006 if (data->is_buf_16u)
3008 unsigned short *ldst, *rdst, *ldst0, *rdst0;
3009 //unsigned short tl, tr;
3010 ldst0 = ldst = (unsigned short*)(buf->data.s + left->buf_idx*buf->cols +
3011 vi*scount + left->offset);
3012 rdst0 = rdst = (unsigned short*)(ldst + nl);
3015 for( i = 0; i < n1; i++ )
3017 int idx = temp_buf[i];
3022 *rdst = (unsigned short)idx;
3027 *ldst = (unsigned short)idx;
3032 left->set_num_valid(vi, (int)(ldst - ldst0));
3033 right->set_num_valid(vi, (int)(rdst - rdst0));
3038 int idx = temp_buf[i];
3043 *rdst = (unsigned short)idx;
3048 *ldst = (unsigned short)idx;
3055 int *ldst0, *ldst, *rdst0, *rdst;
3056 ldst0 = ldst = buf->data.i + left->buf_idx*buf->cols +
3057 vi*scount + left->offset;
3058 rdst0 = rdst = buf->data.i + right->buf_idx*buf->cols +
3059 vi*scount + right->offset;
3062 for( i = 0; i < n1; i++ )
3064 int idx = temp_buf[i];
3079 left->set_num_valid(vi, (int)(ldst - ldst0));
3080 right->set_num_valid(vi, (int)(rdst - rdst0));
3085 int idx = temp_buf[i];
3102 // split categorical vars, responses and cv_labels using new_idx relocation table
3103 for( vi = 0; vi < work_var_count; vi++ )
3105 int ci = data->get_var_type(vi);
3106 int n1 = node->get_num_valid(vi), nr1 = 0;
3108 if( ci < 0 || (vi < data->var_count && !split_input_data) )
3111 int *src_lbls_buf = data->get_pred_int_buf();
3112 const int* src_lbls = 0;
3113 data->get_cat_var_data(node, vi, src_lbls_buf, &src_lbls);
3115 for(i = 0; i < n; i++)
3116 temp_buf[i] = src_lbls[i];
3118 if (data->is_buf_16u)
3120 unsigned short *ldst = (unsigned short *)(buf->data.s + left->buf_idx*buf->cols +
3121 vi*scount + left->offset);
3122 unsigned short *rdst = (unsigned short *)(buf->data.s + right->buf_idx*buf->cols +
3123 vi*scount + right->offset);
3125 for( i = 0; i < n; i++ )
3128 int idx = temp_buf[i];
3131 *rdst = (unsigned short)idx;
3133 nr1 += (idx != 65535 )&d;
3137 *ldst = (unsigned short)idx;
3142 if( vi < data->var_count )
3144 left->set_num_valid(vi, n1 - nr1);
3145 right->set_num_valid(vi, nr1);
3150 int *ldst = buf->data.i + left->buf_idx*buf->cols +
3151 vi*scount + left->offset;
3152 int *rdst = buf->data.i + right->buf_idx*buf->cols +
3153 vi*scount + right->offset;
3155 for( i = 0; i < n; i++ )
3158 int idx = temp_buf[i];
3163 nr1 += (idx >= 0)&d;
3173 if( vi < data->var_count )
3175 left->set_num_valid(vi, n1 - nr1);
3176 right->set_num_valid(vi, nr1);
3182 // split sample indices
3183 int *sample_idx_src_buf = data->get_sample_idx_buf();
3184 const int* sample_idx_src = 0;
3185 data->get_sample_indices(node, sample_idx_src_buf, &sample_idx_src);
3187 for(i = 0; i < n; i++)
3188 temp_buf[i] = sample_idx_src[i];
3190 int pos = data->get_work_var_count();
3191 if (data->is_buf_16u)
3193 unsigned short* ldst = (unsigned short*)(buf->data.s + left->buf_idx*buf->cols +
3194 pos*scount + left->offset);
3195 unsigned short* rdst = (unsigned short*)(buf->data.s + right->buf_idx*buf->cols +
3196 pos*scount + right->offset);
3197 for (i = 0; i < n; i++)
3200 unsigned short idx = (unsigned short)temp_buf[i];
3215 int* ldst = buf->data.i + left->buf_idx*buf->cols +
3216 pos*scount + left->offset;
3217 int* rdst = buf->data.i + right->buf_idx*buf->cols +
3218 pos*scount + right->offset;
3219 for (i = 0; i < n; i++)
3222 int idx = temp_buf[i];
3236 // deallocate the parent node data that is not needed anymore
3237 data->free_node_data(node);
3240 float CvDTree::calc_error( CvMLData* _data, int type, vector<float> *resp )
3243 const CvMat* values = _data->get_values();
3244 const CvMat* response = _data->get_responses();
3245 const CvMat* missing = _data->get_missing();
3246 const CvMat* sample_idx = (type == CV_TEST_ERROR) ? _data->get_test_sample_idx() : _data->get_train_sample_idx();
3247 const CvMat* var_types = _data->get_var_types();
3248 int* sidx = sample_idx ? sample_idx->data.i : 0;
3249 int r_step = CV_IS_MAT_CONT(response->type) ?
3250 1 : response->step / CV_ELEM_SIZE(response->type);
3251 bool is_classifier = var_types->data.ptr[var_types->cols-1] == CV_VAR_CATEGORICAL;
3252 int sample_count = sample_idx ? sample_idx->cols : 0;
3253 sample_count = (type == CV_TRAIN_ERROR && sample_count == 0) ? values->rows : sample_count;
3254 float* pred_resp = 0;
3255 if( resp && (sample_count > 0) )
3257 resp->resize( sample_count );
3258 pred_resp = &((*resp)[0]);
3261 if ( is_classifier )
3263 for( int i = 0; i < sample_count; i++ )
3266 int si = sidx ? sidx[i] : i;
3267 cvGetRow( values, &sample, si );
3269 cvGetRow( missing, &miss, si );
3270 float r = (float)predict( &sample, missing ? &miss : 0 )->value;
3273 int d = fabs((double)r - response->data.fl[si*r_step]) <= FLT_EPSILON ? 0 : 1;
3276 err = sample_count ? err / (float)sample_count * 100 : -FLT_MAX;
3280 for( int i = 0; i < sample_count; i++ )
3283 int si = sidx ? sidx[i] : i;
3284 cvGetRow( values, &sample, si );
3286 cvGetRow( missing, &miss, si );
3287 float r = (float)predict( &sample, missing ? &miss : 0 )->value;
3290 float d = r - response->data.fl[si*r_step];
3293 err = sample_count ? err / (float)sample_count : -FLT_MAX;
3298 void CvDTree::prune_cv()
3304 // 1. build tree sequence for each cv fold, calculate error_{Tj,beta_k}.
3305 // 2. choose the best tree index (if need, apply 1SE rule).
3306 // 3. store the best index and cut the branches.
3308 CV_FUNCNAME( "CvDTree::prune_cv" );
3312 int ti, j, tree_count = 0, cv_n = data->params.cv_folds, n = root->sample_count;
3313 // currently, 1SE for regression is not implemented
3314 bool use_1se = data->params.use_1se_rule != 0 && data->is_classifier;
3316 double min_err = 0, min_err_se = 0;
3319 CV_CALL( ab = cvCreateMat( 1, 256, CV_64F ));
3321 // build the main tree sequence, calculate alpha's
3324 double min_alpha = update_tree_rnc(tree_count, -1);
3325 if( cut_tree(tree_count, -1, min_alpha) )
3328 if( ab->cols <= tree_count )
3330 CV_CALL( temp = cvCreateMat( 1, ab->cols*3/2, CV_64F ));
3331 for( ti = 0; ti < ab->cols; ti++ )
3332 temp->data.db[ti] = ab->data.db[ti];
3333 cvReleaseMat( &ab );
3338 ab->data.db[tree_count] = min_alpha;
3341 ab->data.db[0] = 0.;
3343 if( tree_count > 0 )
3345 for( ti = 1; ti < tree_count-1; ti++ )
3346 ab->data.db[ti] = sqrt(ab->data.db[ti]*ab->data.db[ti+1]);
3347 ab->data.db[tree_count-1] = DBL_MAX*0.5;
3349 CV_CALL( err_jk = cvCreateMat( cv_n, tree_count, CV_64F ));
3350 err = err_jk->data.db;
3352 for( j = 0; j < cv_n; j++ )
3355 for( ; tk < tree_count; tj++ )
3357 double min_alpha = update_tree_rnc(tj, j);
3358 if( cut_tree(tj, j, min_alpha) )
3359 min_alpha = DBL_MAX;
3361 for( ; tk < tree_count; tk++ )
3363 if( ab->data.db[tk] > min_alpha )
3365 err[j*tree_count + tk] = root->tree_error;
3370 for( ti = 0; ti < tree_count; ti++ )
3373 for( j = 0; j < cv_n; j++ )
3374 sum_err += err[j*tree_count + ti];
3375 if( ti == 0 || sum_err < min_err )
3380 min_err_se = sqrt( sum_err*(n - sum_err) );
3382 else if( sum_err < min_err + min_err_se )
3387 pruned_tree_idx = min_idx;
3388 free_prune_data(data->params.truncate_pruned_tree != 0);
3392 cvReleaseMat( &err_jk );
3393 cvReleaseMat( &ab );
3394 cvReleaseMat( &temp );
3398 double CvDTree::update_tree_rnc( int T, int fold )
3400 CvDTreeNode* node = root;
3401 double min_alpha = DBL_MAX;
3405 CvDTreeNode* parent;
3408 int t = fold >= 0 ? node->cv_Tn[fold] : node->Tn;
3409 if( t <= T || !node->left )
3411 node->complexity = 1;
3412 node->tree_risk = node->node_risk;
3413 node->tree_error = 0.;
3416 node->tree_risk = node->cv_node_risk[fold];
3417 node->tree_error = node->cv_node_error[fold];
3424 for( parent = node->parent; parent && parent->right == node;
3425 node = parent, parent = parent->parent )
3427 parent->complexity += node->complexity;
3428 parent->tree_risk += node->tree_risk;
3429 parent->tree_error += node->tree_error;
3431 parent->alpha = ((fold >= 0 ? parent->cv_node_risk[fold] : parent->node_risk)
3432 - parent->tree_risk)/(parent->complexity - 1);
3433 min_alpha = MIN( min_alpha, parent->alpha );
3439 parent->complexity = node->complexity;
3440 parent->tree_risk = node->tree_risk;
3441 parent->tree_error = node->tree_error;
3442 node = parent->right;
3449 int CvDTree::cut_tree( int T, int fold, double min_alpha )
3451 CvDTreeNode* node = root;
3457 CvDTreeNode* parent;
3460 int t = fold >= 0 ? node->cv_Tn[fold] : node->Tn;
3461 if( t <= T || !node->left )
3463 if( node->alpha <= min_alpha + FLT_EPSILON )
3466 node->cv_Tn[fold] = T;
3476 for( parent = node->parent; parent && parent->right == node;
3477 node = parent, parent = parent->parent )
3483 node = parent->right;
3490 void CvDTree::free_prune_data(bool cut_tree)
3492 CvDTreeNode* node = root;
3496 CvDTreeNode* parent;
3499 // do not call cvSetRemoveByPtr( cv_heap, node->cv_Tn )
3500 // as we will clear the whole cross-validation heap at the end
3502 node->cv_node_error = node->cv_node_risk = 0;
3508 for( parent = node->parent; parent && parent->right == node;
3509 node = parent, parent = parent->parent )
3511 if( cut_tree && parent->Tn <= pruned_tree_idx )
3513 data->free_node( parent->left );
3514 data->free_node( parent->right );
3515 parent->left = parent->right = 0;
3522 node = parent->right;
3526 cvClearSet( data->cv_heap );
3530 void CvDTree::free_tree()
3532 if( root && data && data->shared )
3534 pruned_tree_idx = INT_MIN;
3535 free_prune_data(true);
3536 data->free_node(root);
3541 CvDTreeNode* CvDTree::predict( const CvMat* _sample,
3542 const CvMat* _missing, bool preprocessed_input ) const
3544 CvDTreeNode* result = 0;
3547 CV_FUNCNAME( "CvDTree::predict" );
3551 int i, step, mstep = 0;
3552 const float* sample;
3554 CvDTreeNode* node = root;
3561 CV_ERROR( CV_StsError, "The tree has not been trained yet" );
3563 if( !CV_IS_MAT(_sample) || CV_MAT_TYPE(_sample->type) != CV_32FC1 ||
3564 (_sample->cols != 1 && _sample->rows != 1) ||
3565 (_sample->cols + _sample->rows - 1 != data->var_all && !preprocessed_input) ||
3566 (_sample->cols + _sample->rows - 1 != data->var_count && preprocessed_input) )
3567 CV_ERROR( CV_StsBadArg,
3568 "the input sample must be 1d floating-point vector with the same "
3569 "number of elements as the total number of variables used for training" );
3571 sample = _sample->data.fl;
3572 step = CV_IS_MAT_CONT(_sample->type) ? 1 : _sample->step/sizeof(sample[0]);
3574 if( data->cat_count && !preprocessed_input ) // cache for categorical variables
3576 int n = data->cat_count->cols;
3577 catbuf = (int*)cvStackAlloc(n*sizeof(catbuf[0]));
3578 for( i = 0; i < n; i++ )
3584 if( !CV_IS_MAT(_missing) || !CV_IS_MASK_ARR(_missing) ||
3585 !CV_ARE_SIZES_EQ(_missing, _sample) )
3586 CV_ERROR( CV_StsBadArg,
3587 "the missing data mask must be 8-bit vector of the same size as input sample" );
3588 m = _missing->data.ptr;
3589 mstep = CV_IS_MAT_CONT(_missing->type) ? 1 : _missing->step/sizeof(m[0]);
3592 vtype = data->var_type->data.i;
3593 vidx = data->var_idx && !preprocessed_input ? data->var_idx->data.i : 0;
3594 cmap = data->cat_map ? data->cat_map->data.i : 0;
3595 cofs = data->cat_ofs ? data->cat_ofs->data.i : 0;
3597 while( node->Tn > pruned_tree_idx && node->left )
3599 CvDTreeSplit* split = node->split;
3601 for( ; !dir && split != 0; split = split->next )
3603 int vi = split->var_idx;
3605 i = vidx ? vidx[vi] : vi;
3606 float val = sample[i*step];
3607 if( m && m[i*mstep] )
3609 if( ci < 0 ) // ordered
3610 dir = val <= split->ord.c ? -1 : 1;
3614 if( preprocessed_input )
3621 int a = c = cofs[ci];
3622 int b = (ci+1 >= data->cat_ofs->cols) ? data->cat_map->cols : cofs[ci+1];
3624 int ival = cvRound(val);
3626 CV_ERROR( CV_StsBadArg,
3627 "one of input categorical variable is not an integer" );
3634 if( ival < cmap[c] )
3636 else if( ival > cmap[c] )
3642 if( c < 0 || ival != cmap[c] )
3645 catbuf[ci] = c -= cofs[ci];
3648 c = ( (c == 65535) && data->is_buf_16u ) ? -1 : c;
3649 dir = CV_DTREE_CAT_DIR(c, split->subset);
3652 if( split->inversed )
3658 double diff = node->right->sample_count - node->left->sample_count;
3659 dir = diff < 0 ? -1 : 1;
3661 node = dir < 0 ? node->left : node->right;
3672 CvDTreeNode* CvDTree::predict( const Mat& _sample, const Mat& _missing, bool preprocessed_input ) const
3674 CvMat sample = _sample, mmask = _missing;
3675 return predict(&sample, mmask.data.ptr ? &mmask : 0, preprocessed_input);
3679 const CvMat* CvDTree::get_var_importance()
3681 if( !var_importance )
3683 CvDTreeNode* node = root;
3687 var_importance = cvCreateMat( 1, data->var_count, CV_64F );
3688 cvZero( var_importance );
3689 importance = var_importance->data.db;
3693 CvDTreeNode* parent;
3694 for( ;; node = node->left )
3696 CvDTreeSplit* split = node->split;
3698 if( !node->left || node->Tn <= pruned_tree_idx )
3701 for( ; split != 0; split = split->next )
3702 importance[split->var_idx] += split->quality;
3705 for( parent = node->parent; parent && parent->right == node;
3706 node = parent, parent = parent->parent )
3712 node = parent->right;
3715 cvNormalize( var_importance, var_importance, 1., 0, CV_L1 );
3718 return var_importance;
3722 void CvDTree::write_split( CvFileStorage* fs, CvDTreeSplit* split ) const
3726 cvStartWriteStruct( fs, 0, CV_NODE_MAP + CV_NODE_FLOW );
3727 cvWriteInt( fs, "var", split->var_idx );
3728 cvWriteReal( fs, "quality", split->quality );
3730 ci = data->get_var_type(split->var_idx);
3731 if( ci >= 0 ) // split on a categorical var
3733 int i, n = data->cat_count->data.i[ci], to_right = 0, default_dir;
3734 for( i = 0; i < n; i++ )
3735 to_right += CV_DTREE_CAT_DIR(i,split->subset) > 0;
3737 // ad-hoc rule when to use inverse categorical split notation
3738 // to achieve more compact and clear representation
3739 default_dir = to_right <= 1 || to_right <= MIN(3, n/2) || to_right <= n/3 ? -1 : 1;
3741 cvStartWriteStruct( fs, default_dir*(split->inversed ? -1 : 1) > 0 ?
3742 "in" : "not_in", CV_NODE_SEQ+CV_NODE_FLOW );
3744 for( i = 0; i < n; i++ )
3746 int dir = CV_DTREE_CAT_DIR(i,split->subset);
3747 if( dir*default_dir < 0 )
3748 cvWriteInt( fs, 0, i );
3750 cvEndWriteStruct( fs );
3753 cvWriteReal( fs, !split->inversed ? "le" : "gt", split->ord.c );
3755 cvEndWriteStruct( fs );
3759 void CvDTree::write_node( CvFileStorage* fs, CvDTreeNode* node ) const
3761 CvDTreeSplit* split;
3763 cvStartWriteStruct( fs, 0, CV_NODE_MAP );
3765 cvWriteInt( fs, "depth", node->depth );
3766 cvWriteInt( fs, "sample_count", node->sample_count );
3767 cvWriteReal( fs, "value", node->value );
3769 if( data->is_classifier )
3770 cvWriteInt( fs, "norm_class_idx", node->class_idx );
3772 cvWriteInt( fs, "Tn", node->Tn );
3773 cvWriteInt( fs, "complexity", node->complexity );
3774 cvWriteReal( fs, "alpha", node->alpha );
3775 cvWriteReal( fs, "node_risk", node->node_risk );
3776 cvWriteReal( fs, "tree_risk", node->tree_risk );
3777 cvWriteReal( fs, "tree_error", node->tree_error );
3781 cvStartWriteStruct( fs, "splits", CV_NODE_SEQ );
3783 for( split = node->split; split != 0; split = split->next )
3784 write_split( fs, split );
3786 cvEndWriteStruct( fs );
3789 cvEndWriteStruct( fs );
3793 void CvDTree::write_tree_nodes( CvFileStorage* fs ) const
3795 //CV_FUNCNAME( "CvDTree::write_tree_nodes" );
3799 CvDTreeNode* node = root;
3801 // traverse the tree and save all the nodes in depth-first order
3804 CvDTreeNode* parent;
3807 write_node( fs, node );
3813 for( parent = node->parent; parent && parent->right == node;
3814 node = parent, parent = parent->parent )
3820 node = parent->right;
3827 void CvDTree::write( CvFileStorage* fs, const char* name ) const
3829 //CV_FUNCNAME( "CvDTree::write" );
3833 cvStartWriteStruct( fs, name, CV_NODE_MAP, CV_TYPE_NAME_ML_TREE );
3835 //get_var_importance();
3836 data->write_params( fs );
3837 //if( var_importance )
3838 //cvWrite( fs, "var_importance", var_importance );
3841 cvEndWriteStruct( fs );
3847 void CvDTree::write( CvFileStorage* fs ) const
3849 //CV_FUNCNAME( "CvDTree::write" );
3853 cvWriteInt( fs, "best_tree_idx", pruned_tree_idx );
3855 cvStartWriteStruct( fs, "nodes", CV_NODE_SEQ );
3856 write_tree_nodes( fs );
3857 cvEndWriteStruct( fs );
3863 CvDTreeSplit* CvDTree::read_split( CvFileStorage* fs, CvFileNode* fnode )
3865 CvDTreeSplit* split = 0;
3867 CV_FUNCNAME( "CvDTree::read_split" );
3873 if( !fnode || CV_NODE_TYPE(fnode->tag) != CV_NODE_MAP )
3874 CV_ERROR( CV_StsParseError, "some of the splits are not stored properly" );
3876 vi = cvReadIntByName( fs, fnode, "var", -1 );
3877 if( (unsigned)vi >= (unsigned)data->var_count )
3878 CV_ERROR( CV_StsOutOfRange, "Split variable index is out of range" );
3880 ci = data->get_var_type(vi);
3881 if( ci >= 0 ) // split on categorical var
3883 int i, n = data->cat_count->data.i[ci], inversed = 0, val;
3886 split = data->new_split_cat( vi, 0 );
3887 inseq = cvGetFileNodeByName( fs, fnode, "in" );
3890 inseq = cvGetFileNodeByName( fs, fnode, "not_in" );
3894 (CV_NODE_TYPE(inseq->tag) != CV_NODE_SEQ && CV_NODE_TYPE(inseq->tag) != CV_NODE_INT))
3895 CV_ERROR( CV_StsParseError,
3896 "Either 'in' or 'not_in' tags should be inside a categorical split data" );
3898 if( CV_NODE_TYPE(inseq->tag) == CV_NODE_INT )
3900 val = inseq->data.i;
3901 if( (unsigned)val >= (unsigned)n )
3902 CV_ERROR( CV_StsOutOfRange, "some of in/not_in elements are out of range" );
3904 split->subset[val >> 5] |= 1 << (val & 31);
3908 cvStartReadSeq( inseq->data.seq, &reader );
3910 for( i = 0; i < reader.seq->total; i++ )
3912 CvFileNode* inode = (CvFileNode*)reader.ptr;
3913 val = inode->data.i;
3914 if( CV_NODE_TYPE(inode->tag) != CV_NODE_INT || (unsigned)val >= (unsigned)n )
3915 CV_ERROR( CV_StsOutOfRange, "some of in/not_in elements are out of range" );
3917 split->subset[val >> 5] |= 1 << (val & 31);
3918 CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader );
3922 // for categorical splits we do not use inversed splits,
3923 // instead we inverse the variable set in the split
3925 for( i = 0; i < (n + 31) >> 5; i++ )
3926 split->subset[i] ^= -1;
3930 CvFileNode* cmp_node;
3931 split = data->new_split_ord( vi, 0, 0, 0, 0 );
3933 cmp_node = cvGetFileNodeByName( fs, fnode, "le" );
3936 cmp_node = cvGetFileNodeByName( fs, fnode, "gt" );
3937 split->inversed = 1;
3940 split->ord.c = (float)cvReadReal( cmp_node );
3943 split->quality = (float)cvReadRealByName( fs, fnode, "quality" );
3951 CvDTreeNode* CvDTree::read_node( CvFileStorage* fs, CvFileNode* fnode, CvDTreeNode* parent )
3953 CvDTreeNode* node = 0;
3955 CV_FUNCNAME( "CvDTree::read_node" );
3962 if( !fnode || CV_NODE_TYPE(fnode->tag) != CV_NODE_MAP )
3963 CV_ERROR( CV_StsParseError, "some of the tree elements are not stored properly" );
3965 CV_CALL( node = data->new_node( parent, 0, 0, 0 ));
3966 depth = cvReadIntByName( fs, fnode, "depth", -1 );
3967 if( depth != node->depth )
3968 CV_ERROR( CV_StsParseError, "incorrect node depth" );
3970 node->sample_count = cvReadIntByName( fs, fnode, "sample_count" );
3971 node->value = cvReadRealByName( fs, fnode, "value" );
3972 if( data->is_classifier )
3973 node->class_idx = cvReadIntByName( fs, fnode, "norm_class_idx" );
3975 node->Tn = cvReadIntByName( fs, fnode, "Tn" );
3976 node->complexity = cvReadIntByName( fs, fnode, "complexity" );
3977 node->alpha = cvReadRealByName( fs, fnode, "alpha" );
3978 node->node_risk = cvReadRealByName( fs, fnode, "node_risk" );
3979 node->tree_risk = cvReadRealByName( fs, fnode, "tree_risk" );
3980 node->tree_error = cvReadRealByName( fs, fnode, "tree_error" );
3982 splits = cvGetFileNodeByName( fs, fnode, "splits" );
3986 CvDTreeSplit* last_split = 0;
3988 if( CV_NODE_TYPE(splits->tag) != CV_NODE_SEQ )
3989 CV_ERROR( CV_StsParseError, "splits tag must stored as a sequence" );
3991 cvStartReadSeq( splits->data.seq, &reader );
3992 for( i = 0; i < reader.seq->total; i++ )
3994 CvDTreeSplit* split;
3995 CV_CALL( split = read_split( fs, (CvFileNode*)reader.ptr ));
3997 node->split = last_split = split;
3999 last_split = last_split->next = split;
4001 CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader );
4011 void CvDTree::read_tree_nodes( CvFileStorage* fs, CvFileNode* fnode )
4013 CV_FUNCNAME( "CvDTree::read_tree_nodes" );
4019 CvDTreeNode* parent = &_root;
4021 parent->left = parent->right = parent->parent = 0;
4023 cvStartReadSeq( fnode->data.seq, &reader );
4025 for( i = 0; i < reader.seq->total; i++ )
4029 CV_CALL( node = read_node( fs, (CvFileNode*)reader.ptr, parent != &_root ? parent : 0 ));
4031 parent->left = node;
4033 parent->right = node;
4038 while( parent && parent->right )
4039 parent = parent->parent;
4042 CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader );
4051 void CvDTree::read( CvFileStorage* fs, CvFileNode* fnode )
4053 CvDTreeTrainData* _data = new CvDTreeTrainData();
4054 _data->read_params( fs, fnode );
4056 read( fs, fnode, _data );
4057 get_var_importance();
4061 // a special entry point for reading weak decision trees from the tree ensembles
4062 void CvDTree::read( CvFileStorage* fs, CvFileNode* node, CvDTreeTrainData* _data )
4064 CV_FUNCNAME( "CvDTree::read" );
4068 CvFileNode* tree_nodes;
4073 tree_nodes = cvGetFileNodeByName( fs, node, "nodes" );
4074 if( !tree_nodes || CV_NODE_TYPE(tree_nodes->tag) != CV_NODE_SEQ )
4075 CV_ERROR( CV_StsParseError, "nodes tag is missing" );
4077 pruned_tree_idx = cvReadIntByName( fs, node, "best_tree_idx", -1 );
4078 read_tree_nodes( fs, tree_nodes );