1 /*M///////////////////////////////////////////////////////////////////////////////////////
\r
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
\r
5 // By downloading, copying, installing or using the software you agree to this license.
\r
6 // If you do not agree to this license, do not download, install,
\r
7 // copy or use the software.
\r
10 // Intel License Agreement
\r
12 // Copyright (C) 2000, Intel Corporation, all rights reserved.
\r
13 // Third party copyrights are property of their respective owners.
\r
15 // Redistribution and use in source and binary forms, with or without modification,
\r
16 // are permitted provided that the following conditions are met:
\r
18 // * Redistribution's of source code must retain the above copyright notice,
\r
19 // this list of conditions and the following disclaimer.
\r
21 // * Redistribution's in binary form must reproduce the above copyright notice,
\r
22 // this list of conditions and the following disclaimer in the documentation
\r
23 // and/or other materials provided with the distribution.
\r
25 // * The name of Intel Corporation may not be used to endorse or promote products
\r
26 // derived from this software without specific prior written permission.
\r
28 // This software is provided by the copyright holders and contributors "as is" and
\r
29 // any express or implied warranties, including, but not limited to, the implied
\r
30 // warranties of merchantability and fitness for a particular purpose are disclaimed.
\r
31 // In no event shall the Intel Corporation or contributors be liable for any direct,
\r
32 // indirect, incidental, special, exemplary, or consequential damages
\r
33 // (including, but not limited to, procurement of substitute goods or services;
\r
34 // loss of use, data, or profits; or business interruption) however caused
\r
35 // and on any theory of liability, whether in contract, strict liability,
\r
36 // or tort (including negligence or otherwise) arising in any way out of
\r
37 // the use of this software, even if advised of the possibility of such damage.
\r
43 static const float ord_nan = FLT_MAX*0.5f;
\r
44 static const int min_block_size = 1 << 16;
\r
45 static const int block_size_delta = 1 << 10;
\r
47 CvDTreeTrainData::CvDTreeTrainData()
\r
49 var_idx = var_type = cat_count = cat_ofs = cat_map =
\r
50 priors = priors_mult = counts = buf = direction = split_buf = responses_copy = 0;
\r
51 pred_int_buf = resp_int_buf = cv_lables_buf = sample_idx_buf = 0;
\r
52 pred_float_buf = resp_float_buf = 0;
\r
53 tree_storage = temp_storage = 0;
\r
59 CvDTreeTrainData::CvDTreeTrainData( const CvMat* _train_data, int _tflag,
\r
60 const CvMat* _responses, const CvMat* _var_idx,
\r
61 const CvMat* _sample_idx, const CvMat* _var_type,
\r
62 const CvMat* _missing_mask, const CvDTreeParams& _params,
\r
63 bool _shared, bool _add_labels )
\r
65 var_idx = var_type = cat_count = cat_ofs = cat_map =
\r
66 priors = priors_mult = counts = buf = direction = split_buf = responses_copy = 0;
\r
68 pred_int_buf = resp_int_buf = cv_lables_buf = sample_idx_buf = 0;
\r
69 pred_float_buf = resp_float_buf = 0;
\r
71 tree_storage = temp_storage = 0;
\r
73 set_data( _train_data, _tflag, _responses, _var_idx, _sample_idx,
\r
74 _var_type, _missing_mask, _params, _shared, _add_labels );
\r
78 CvDTreeTrainData::~CvDTreeTrainData()
\r
84 bool CvDTreeTrainData::set_params( const CvDTreeParams& _params )
\r
88 CV_FUNCNAME( "CvDTreeTrainData::set_params" );
\r
95 if( params.max_categories < 2 )
\r
96 CV_ERROR( CV_StsOutOfRange, "params.max_categories should be >= 2" );
\r
97 params.max_categories = MIN( params.max_categories, 15 );
\r
99 if( params.max_depth < 0 )
\r
100 CV_ERROR( CV_StsOutOfRange, "params.max_depth should be >= 0" );
\r
101 params.max_depth = MIN( params.max_depth, 25 );
\r
103 params.min_sample_count = MAX(params.min_sample_count,1);
\r
105 if( params.cv_folds < 0 )
\r
106 CV_ERROR( CV_StsOutOfRange,
\r
107 "params.cv_folds should be =0 (the tree is not pruned) "
\r
108 "or n>0 (tree is pruned using n-fold cross-validation)" );
\r
110 if( params.cv_folds == 1 )
\r
111 params.cv_folds = 0;
\r
113 if( params.regression_accuracy < 0 )
\r
114 CV_ERROR( CV_StsOutOfRange, "params.regression_accuracy should be >= 0" );
\r
123 #define CV_CMP_NUM_PTR(a,b) (*(a) < *(b))
\r
124 static CV_IMPLEMENT_QSORT_EX( icvSortIntPtr, int*, CV_CMP_NUM_PTR, int )
\r
125 static CV_IMPLEMENT_QSORT_EX( icvSortDblPtr, double*, CV_CMP_NUM_PTR, int )
\r
127 #define CV_CMP_NUM_IDX(i,j) (aux[i] < aux[j])
\r
128 static CV_IMPLEMENT_QSORT_EX( icvSortIntAux, int, CV_CMP_NUM_IDX, const float* )
\r
129 static CV_IMPLEMENT_QSORT_EX( icvSortUShAux, unsigned short, CV_CMP_NUM_IDX, const float* )
\r
131 #define CV_CMP_PAIRS(a,b) (*((a).i) < *((b).i))
\r
132 static CV_IMPLEMENT_QSORT_EX( icvSortPairs, CvPair16u32s, CV_CMP_PAIRS, int )
\r
134 void CvDTreeTrainData::set_data( const CvMat* _train_data, int _tflag,
\r
135 const CvMat* _responses, const CvMat* _var_idx, const CvMat* _sample_idx,
\r
136 const CvMat* _var_type, const CvMat* _missing_mask, const CvDTreeParams& _params,
\r
137 bool _shared, bool _add_labels, bool _update_data )
\r
139 CvMat* sample_indices = 0;
\r
140 CvMat* var_type0 = 0;
\r
141 CvMat* tmp_map = 0;
\r
143 CvPair16u32s* pair16u32s_ptr = 0;
\r
144 CvDTreeTrainData* data = 0;
\r
147 unsigned short* udst = 0;
\r
150 CV_FUNCNAME( "CvDTreeTrainData::set_data" );
\r
154 int sample_all = 0, r_type = 0, cv_n;
\r
155 int total_c_count = 0;
\r
156 int tree_block_size, temp_block_size, max_split_size, nv_size, cv_size = 0;
\r
157 int ds_step, dv_step, ms_step = 0, mv_step = 0; // {data|mask}{sample|var}_step
\r
160 const int *sidx = 0, *vidx = 0;
\r
163 if( _update_data && data_root )
\r
165 data = new CvDTreeTrainData( _train_data, _tflag, _responses, _var_idx,
\r
166 _sample_idx, _var_type, _missing_mask, _params, _shared, _add_labels );
\r
168 // compare new and old train data
\r
169 if( !(data->var_count == var_count &&
\r
170 cvNorm( data->var_type, var_type, CV_C ) < FLT_EPSILON &&
\r
171 cvNorm( data->cat_count, cat_count, CV_C ) < FLT_EPSILON &&
\r
172 cvNorm( data->cat_map, cat_map, CV_C ) < FLT_EPSILON) )
\r
173 CV_ERROR( CV_StsBadArg,
\r
174 "The new training data must have the same types and the input and output variables "
\r
175 "and the same categories for categorical variables" );
\r
177 cvReleaseMat( &priors );
\r
178 cvReleaseMat( &priors_mult );
\r
179 cvReleaseMat( &buf );
\r
180 cvReleaseMat( &direction );
\r
181 cvReleaseMat( &split_buf );
\r
182 cvReleaseMemStorage( &temp_storage );
\r
184 priors = data->priors; data->priors = 0;
\r
185 priors_mult = data->priors_mult; data->priors_mult = 0;
\r
186 buf = data->buf; data->buf = 0;
\r
187 buf_count = data->buf_count; buf_size = data->buf_size;
\r
188 sample_count = data->sample_count;
\r
190 direction = data->direction; data->direction = 0;
\r
191 split_buf = data->split_buf; data->split_buf = 0;
\r
192 temp_storage = data->temp_storage; data->temp_storage = 0;
\r
193 nv_heap = data->nv_heap; cv_heap = data->cv_heap;
\r
195 data_root = new_node( 0, sample_count, 0, 0 );
\r
204 CV_CALL( set_params( _params ));
\r
206 // check parameter types and sizes
\r
207 CV_CALL( cvCheckTrainData( _train_data, _tflag, _missing_mask, &var_all, &sample_all ));
\r
209 train_data = _train_data;
\r
210 responses = _responses;
\r
212 if( _tflag == CV_ROW_SAMPLE )
\r
214 ds_step = _train_data->step/CV_ELEM_SIZE(_train_data->type);
\r
216 if( _missing_mask )
\r
217 ms_step = _missing_mask->step, mv_step = 1;
\r
221 dv_step = _train_data->step/CV_ELEM_SIZE(_train_data->type);
\r
223 if( _missing_mask )
\r
224 mv_step = _missing_mask->step, ms_step = 1;
\r
228 sample_count = sample_all;
\r
229 var_count = var_all;
\r
230 is_buf_16u = false;
\r
231 if (_train_data->rows + _train_data->cols -1 < 65536)
\r
232 is_buf_16u = true;
\r
236 CV_CALL( sample_indices = cvPreprocessIndexArray( _sample_idx, sample_all ));
\r
237 sidx = sample_indices->data.i;
\r
238 sample_count = sample_indices->rows + sample_indices->cols - 1;
\r
243 CV_CALL( var_idx = cvPreprocessIndexArray( _var_idx, var_all ));
\r
244 vidx = var_idx->data.i;
\r
245 var_count = var_idx->rows + var_idx->cols - 1;
\r
248 if( !CV_IS_MAT(_responses) ||
\r
249 (CV_MAT_TYPE(_responses->type) != CV_32SC1 &&
\r
250 CV_MAT_TYPE(_responses->type) != CV_32FC1) ||
\r
251 (_responses->rows != 1 && _responses->cols != 1) ||
\r
252 _responses->rows + _responses->cols - 1 != sample_all )
\r
253 CV_ERROR( CV_StsBadArg, "The array of _responses must be an integer or "
\r
254 "floating-point vector containing as many elements as "
\r
255 "the total number of samples in the training data matrix" );
\r
258 CV_CALL( var_type0 = cvPreprocessVarType( _var_type, var_idx, var_all, &r_type ));
\r
260 CV_CALL( var_type = cvCreateMat( 1, var_count+2, CV_32SC1 ));
\r
264 ord_var_count = -1;
\r
266 is_classifier = r_type == CV_VAR_CATEGORICAL;
\r
268 // step 0. calc the number of categorical vars
\r
269 for( vi = 0; vi < var_count; vi++ )
\r
271 var_type->data.i[vi] = var_type0->data.ptr[vi] == CV_VAR_CATEGORICAL ?
\r
272 cat_var_count++ : ord_var_count--;
\r
275 ord_var_count = ~ord_var_count;
\r
276 cv_n = params.cv_folds;
\r
277 // set the two last elements of var_type array to be able
\r
278 // to locate responses and cross-validation labels using
\r
279 // the corresponding get_* functions.
\r
280 var_type->data.i[var_count] = cat_var_count;
\r
281 var_type->data.i[var_count+1] = cat_var_count+1;
\r
283 // in case of single ordered predictor we need dummy cv_labels
\r
284 // for safe split_node_data() operation
\r
285 have_labels = cv_n > 0 || (ord_var_count == 1 && cat_var_count == 0) || _add_labels;
\r
287 work_var_count = var_count + (is_classifier ? 1 : 0) + (have_labels ? 1 : 0);
\r
288 buf_size = (work_var_count + 1)*sample_count;
\r
290 buf_count = shared ? 2 : 1;
\r
294 CV_CALL( buf = cvCreateMat( buf_count, buf_size, CV_16UC1 ));
\r
295 CV_CALL( pair16u32s_ptr = (CvPair16u32s*)cvAlloc( sample_count*sizeof(pair16u32s_ptr[0]) ));
\r
299 CV_CALL( buf = cvCreateMat( buf_count, buf_size, CV_32SC1 ));
\r
300 CV_CALL( int_ptr = (int**)cvAlloc( sample_count*sizeof(int_ptr[0]) ));
\r
303 size = is_classifier ? cat_var_count+1 : cat_var_count;
\r
304 CV_CALL( cat_count = cvCreateMat( 1, size, CV_32SC1 ));
\r
305 CV_CALL( cat_ofs = cvCreateMat( 1, size, CV_32SC1 ));
\r
307 size = is_classifier ? (cat_var_count + 1)*params.max_categories : cat_var_count*params.max_categories;
\r
308 size = !size ? 1 : size;
\r
309 CV_CALL( cat_map = cvCreateMat( 1, size, CV_32SC1 ));
\r
311 // now calculate the maximum size of split,
\r
312 // create memory storage that will keep nodes and splits of the decision tree
\r
313 // allocate root node and the buffer for the whole training data
\r
314 max_split_size = cvAlign(sizeof(CvDTreeSplit) +
\r
315 (MAX(0,sample_count - 33)/32)*sizeof(int),sizeof(void*));
\r
316 tree_block_size = MAX((int)sizeof(CvDTreeNode)*8, max_split_size);
\r
317 tree_block_size = MAX(tree_block_size + block_size_delta, min_block_size);
\r
318 CV_CALL( tree_storage = cvCreateMemStorage( tree_block_size ));
\r
319 CV_CALL( node_heap = cvCreateSet( 0, sizeof(*node_heap), sizeof(CvDTreeNode), tree_storage ));
\r
321 nv_size = var_count*sizeof(int);
\r
322 nv_size = cvAlign(MAX( nv_size, (int)sizeof(CvSetElem) ), sizeof(void*));
\r
324 temp_block_size = nv_size;
\r
328 if( sample_count < cv_n*MAX(params.min_sample_count,10) )
\r
329 CV_ERROR( CV_StsOutOfRange,
\r
330 "The many folds in cross-validation for such a small dataset" );
\r
332 cv_size = cvAlign( cv_n*(sizeof(int) + sizeof(double)*2), sizeof(double) );
\r
333 temp_block_size = MAX(temp_block_size, cv_size);
\r
336 temp_block_size = MAX( temp_block_size + block_size_delta, min_block_size );
\r
337 CV_CALL( temp_storage = cvCreateMemStorage( temp_block_size ));
\r
338 CV_CALL( nv_heap = cvCreateSet( 0, sizeof(*nv_heap), nv_size, temp_storage ));
\r
340 CV_CALL( cv_heap = cvCreateSet( 0, sizeof(*cv_heap), cv_size, temp_storage ));
\r
342 CV_CALL( data_root = new_node( 0, sample_count, 0, 0 ));
\r
349 _fdst = (float*)cvAlloc(sample_count*sizeof(_fdst[0]));
\r
350 if (is_buf_16u && (cat_var_count || is_classifier))
\r
351 _idst = (int*)cvAlloc(sample_count*sizeof(_idst[0]));
\r
353 // transform the training data to convenient representation
\r
354 for( vi = 0; vi <= var_count; vi++ )
\r
357 const uchar* mask = 0;
\r
358 int m_step = 0, step;
\r
359 const int* idata = 0;
\r
360 const float* fdata = 0;
\r
363 if( vi < var_count ) // analyze i-th input variable
\r
365 int vi0 = vidx ? vidx[vi] : vi;
\r
366 ci = get_var_type(vi);
\r
367 step = ds_step; m_step = ms_step;
\r
368 if( CV_MAT_TYPE(_train_data->type) == CV_32SC1 )
\r
369 idata = _train_data->data.i + vi0*dv_step;
\r
371 fdata = _train_data->data.fl + vi0*dv_step;
\r
372 if( _missing_mask )
\r
373 mask = _missing_mask->data.ptr + vi0*mv_step;
\r
375 else // analyze _responses
\r
377 ci = cat_var_count;
\r
378 step = CV_IS_MAT_CONT(_responses->type) ?
\r
379 1 : _responses->step / CV_ELEM_SIZE(_responses->type);
\r
380 if( CV_MAT_TYPE(_responses->type) == CV_32SC1 )
\r
381 idata = _responses->data.i;
\r
383 fdata = _responses->data.fl;
\r
386 if( (vi < var_count && ci>=0) ||
\r
387 (vi == var_count && is_classifier) ) // process categorical variable or response
\r
389 int c_count, prev_label;
\r
393 udst = (unsigned short*)(buf->data.s + vi*sample_count);
\r
395 idst = buf->data.i + vi*sample_count;
\r
398 for( i = 0; i < sample_count; i++ )
\r
400 int val = INT_MAX, si = sidx ? sidx[i] : i;
\r
401 if( !mask || !mask[si*m_step] )
\r
404 val = idata[si*step];
\r
407 float t = fdata[si*step];
\r
411 sprintf( err, "%d-th value of %d-th (categorical) "
\r
412 "variable is not an integer", i, vi );
\r
413 CV_ERROR( CV_StsBadArg, err );
\r
417 if( val == INT_MAX )
\r
419 sprintf( err, "%d-th value of %d-th (categorical) "
\r
420 "variable is too large", i, vi );
\r
421 CV_ERROR( CV_StsBadArg, err );
\r
428 pair16u32s_ptr[i].u = udst + i;
\r
429 pair16u32s_ptr[i].i = _idst + i;
\r
434 int_ptr[i] = idst + i;
\r
438 c_count = num_valid > 0;
\r
442 icvSortPairs( pair16u32s_ptr, sample_count, 0 );
\r
443 // count the categories
\r
444 for( i = 1; i < num_valid; i++ )
\r
445 if (*pair16u32s_ptr[i].i != *pair16u32s_ptr[i-1].i)
\r
450 icvSortIntPtr( int_ptr, sample_count, 0 );
\r
451 // count the categories
\r
452 for( i = 1; i < num_valid; i++ )
\r
453 c_count += *int_ptr[i] != *int_ptr[i-1];
\r
457 max_c_count = MAX( max_c_count, c_count );
\r
458 cat_count->data.i[ci] = c_count;
\r
459 cat_ofs->data.i[ci] = total_c_count;
\r
461 // resize cat_map, if need
\r
462 if( cat_map->cols < total_c_count + c_count )
\r
465 CV_CALL( cat_map = cvCreateMat( 1,
\r
466 MAX(cat_map->cols*3/2,total_c_count+c_count), CV_32SC1 ));
\r
467 for( i = 0; i < total_c_count; i++ )
\r
468 cat_map->data.i[i] = tmp_map->data.i[i];
\r
469 cvReleaseMat( &tmp_map );
\r
472 c_map = cat_map->data.i + total_c_count;
\r
473 total_c_count += c_count;
\r
478 // compact the class indices and build the map
\r
479 prev_label = ~*pair16u32s_ptr[0].i;
\r
480 for( i = 0; i < num_valid; i++ )
\r
482 int cur_label = *pair16u32s_ptr[i].i;
\r
483 if( cur_label != prev_label )
\r
484 c_map[++c_count] = prev_label = cur_label;
\r
485 *pair16u32s_ptr[i].u = (unsigned short)c_count;
\r
487 // replace labels for missing values with -1
\r
488 for( ; i < sample_count; i++ )
\r
489 *pair16u32s_ptr[i].u = 65535;
\r
493 // compact the class indices and build the map
\r
494 prev_label = ~*int_ptr[0];
\r
495 for( i = 0; i < num_valid; i++ )
\r
497 int cur_label = *int_ptr[i];
\r
498 if( cur_label != prev_label )
\r
499 c_map[++c_count] = prev_label = cur_label;
\r
500 *int_ptr[i] = c_count;
\r
502 // replace labels for missing values with -1
\r
503 for( ; i < sample_count; i++ )
\r
507 else if( ci < 0 ) // process ordered variable
\r
510 udst = (unsigned short*)(buf->data.s + vi*sample_count);
\r
512 idst = buf->data.i + vi*sample_count;
\r
514 for( i = 0; i < sample_count; i++ )
\r
516 float val = ord_nan;
\r
517 int si = sidx ? sidx[i] : i;
\r
518 if( !mask || !mask[si*m_step] )
\r
521 val = (float)idata[si*step];
\r
523 val = fdata[si*step];
\r
525 if( fabs(val) >= ord_nan )
\r
527 sprintf( err, "%d-th value of %d-th (ordered) "
\r
528 "variable (=%g) is too large", i, vi, val );
\r
529 CV_ERROR( CV_StsBadArg, err );
\r
534 udst[i] = (unsigned short)i;
\r
536 idst[i] = i; // ïåðåÃåñòè âûøå â if( idata )
\r
541 icvSortUShAux( udst, num_valid, _fdst);
\r
543 icvSortIntAux( idst, /*or num_valid?\*/ sample_count, _fdst );
\r
546 if( vi < var_count )
\r
547 data_root->set_num_valid(vi, num_valid);
\r
550 // set sample labels
\r
552 udst = (unsigned short*)(buf->data.s + work_var_count*sample_count);
\r
554 idst = buf->data.i + work_var_count*sample_count;
\r
556 for (i = 0; i < sample_count; i++)
\r
559 udst[i] = sidx ? (unsigned short)sidx[i] : (unsigned short)i;
\r
561 idst[i] = sidx ? sidx[i] : i;
\r
566 unsigned short* udst = 0;
\r
572 udst = (unsigned short*)(buf->data.s + (get_work_var_count()-1)*sample_count);
\r
573 for( i = vi = 0; i < sample_count; i++ )
\r
575 udst[i] = (unsigned short)vi++;
\r
576 vi &= vi < cv_n ? -1 : 0;
\r
579 for( i = 0; i < sample_count; i++ )
\r
581 int a = cvRandInt(r) % sample_count;
\r
582 int b = cvRandInt(r) % sample_count;
\r
583 unsigned short unsh = (unsigned short)vi;
\r
584 CV_SWAP( udst[a], udst[b], unsh );
\r
589 idst = buf->data.i + (get_work_var_count()-1)*sample_count;
\r
590 for( i = vi = 0; i < sample_count; i++ )
\r
593 vi &= vi < cv_n ? -1 : 0;
\r
596 for( i = 0; i < sample_count; i++ )
\r
598 int a = cvRandInt(r) % sample_count;
\r
599 int b = cvRandInt(r) % sample_count;
\r
600 CV_SWAP( idst[a], idst[b], vi );
\r
606 cat_map->cols = MAX( total_c_count, 1 );
\r
608 max_split_size = cvAlign(sizeof(CvDTreeSplit) +
\r
609 (MAX(0,max_c_count - 33)/32)*sizeof(int),sizeof(void*));
\r
610 CV_CALL( split_heap = cvCreateSet( 0, sizeof(*split_heap), max_split_size, tree_storage ));
\r
612 have_priors = is_classifier && params.priors;
\r
613 if( is_classifier )
\r
615 int m = get_num_classes();
\r
617 CV_CALL( priors = cvCreateMat( 1, m, CV_64F ));
\r
618 for( i = 0; i < m; i++ )
\r
620 double val = have_priors ? params.priors[i] : 1.;
\r
622 CV_ERROR( CV_StsOutOfRange, "Every class weight should be positive" );
\r
623 priors->data.db[i] = val;
\r
627 // normalize weights
\r
629 cvScale( priors, priors, 1./sum );
\r
631 CV_CALL( priors_mult = cvCloneMat( priors ));
\r
632 CV_CALL( counts = cvCreateMat( 1, m, CV_32SC1 ));
\r
636 CV_CALL( direction = cvCreateMat( 1, sample_count, CV_8UC1 ));
\r
637 CV_CALL( split_buf = cvCreateMat( 1, sample_count, CV_32SC1 ));
\r
639 CV_CALL( pred_float_buf = (float*)cvAlloc(sample_count*sizeof(pred_float_buf[0])) );
\r
640 CV_CALL( pred_int_buf = (int*)cvAlloc(sample_count*sizeof(pred_int_buf[0])) );
\r
641 CV_CALL( resp_float_buf = (float*)cvAlloc(sample_count*sizeof(resp_float_buf[0])) );
\r
642 CV_CALL( resp_int_buf = (int*)cvAlloc(sample_count*sizeof(resp_int_buf[0])) );
\r
643 CV_CALL( cv_lables_buf = (int*)cvAlloc(sample_count*sizeof(cv_lables_buf[0])) );
\r
644 CV_CALL( sample_idx_buf = (int*)cvAlloc(sample_count*sizeof(sample_idx_buf[0])) );
\r
655 cvFree( &int_ptr );
\r
656 cvReleaseMat( &var_type0 );
\r
657 cvReleaseMat( &sample_indices );
\r
658 cvReleaseMat( &tmp_map );
\r
663 void CvDTreeTrainData::do_responses_copy()
\r
665 responses_copy = cvCreateMat( responses->rows, responses->cols, responses->type );
\r
666 cvCopy( responses, responses_copy);
\r
667 responses = responses_copy;
\r
670 CvDTreeNode* CvDTreeTrainData::subsample_data( const CvMat* _subsample_idx )
\r
672 CvDTreeNode* root = 0;
\r
673 CvMat* isubsample_idx = 0;
\r
674 CvMat* subsample_co = 0;
\r
676 CV_FUNCNAME( "CvDTreeTrainData::subsample_data" );
\r
681 CV_ERROR( CV_StsError, "No training data has been set" );
\r
683 if( _subsample_idx )
\r
684 CV_CALL( isubsample_idx = cvPreprocessIndexArray( _subsample_idx, sample_count ));
\r
686 if( !isubsample_idx )
\r
688 // make a copy of the root node
\r
691 root = new_node( 0, 1, 0, 0 );
\r
693 *root = *data_root;
\r
694 root->num_valid = temp.num_valid;
\r
695 if( root->num_valid )
\r
697 for( i = 0; i < var_count; i++ )
\r
698 root->num_valid[i] = data_root->num_valid[i];
\r
700 root->cv_Tn = temp.cv_Tn;
\r
701 root->cv_node_risk = temp.cv_node_risk;
\r
702 root->cv_node_error = temp.cv_node_error;
\r
706 int* sidx = isubsample_idx->data.i;
\r
707 // co - array of count/offset pairs (to handle duplicated values in _subsample_idx)
\r
708 int* co, cur_ofs = 0;
\r
710 int work_var_count = get_work_var_count();
\r
711 int count = isubsample_idx->rows + isubsample_idx->cols - 1;
\r
713 root = new_node( 0, count, 1, 0 );
\r
715 CV_CALL( subsample_co = cvCreateMat( 1, sample_count*2, CV_32SC1 ));
\r
716 cvZero( subsample_co );
\r
717 co = subsample_co->data.i;
\r
718 for( i = 0; i < count; i++ )
\r
720 for( i = 0; i < sample_count; i++ )
\r
724 co[i*2+1] = cur_ofs;
\r
725 cur_ofs += co[i*2];
\r
731 for( vi = 0; vi < work_var_count; vi++ )
\r
733 int ci = get_var_type(vi);
\r
735 if( ci >= 0 || vi >= var_count )
\r
737 int* src_buf = pred_int_buf;
\r
738 const int* src = 0;
\r
741 get_cat_var_data( data_root, vi, src_buf, &src );
\r
745 unsigned short* udst = (unsigned short*)(buf->data.s + root->buf_idx*buf->cols +
\r
746 vi*sample_count + root->offset);
\r
747 for( i = 0; i < count; i++ )
\r
749 int val = src[sidx[i]];
\r
750 udst[i] = (unsigned short)val;
\r
751 num_valid += val >= 0;
\r
756 int* idst = buf->data.i + root->buf_idx*buf->cols +
\r
757 vi*sample_count + root->offset;
\r
758 for( i = 0; i < count; i++ )
\r
760 int val = src[sidx[i]];
\r
762 num_valid += val >= 0;
\r
766 if( vi < var_count )
\r
767 root->set_num_valid(vi, num_valid);
\r
771 int *src_idx_buf = pred_int_buf;
\r
772 const int* src_idx = 0;
\r
773 float *src_val_buf = pred_float_buf;
\r
774 const float* src_val = 0;
\r
775 int j = 0, idx, count_i;
\r
776 int num_valid = data_root->get_num_valid(vi);
\r
778 get_ord_var_data( data_root, vi, src_val_buf, src_idx_buf, &src_val, &src_idx );
\r
781 unsigned short* udst_idx = (unsigned short*)(buf->data.s + root->buf_idx*buf->cols +
\r
782 vi*sample_count + data_root->offset);
\r
783 for( i = 0; i < num_valid; i++ )
\r
786 count_i = co[idx*2];
\r
788 for( cur_ofs = co[idx*2+1]; count_i > 0; count_i--, j++, cur_ofs++ )
\r
789 udst_idx[j] = (unsigned short)cur_ofs;
\r
792 root->set_num_valid(vi, j);
\r
794 for( ; i < sample_count; i++ )
\r
797 count_i = co[idx*2];
\r
799 for( cur_ofs = co[idx*2+1]; count_i > 0; count_i--, j++, cur_ofs++ )
\r
800 udst_idx[j] = (unsigned short)cur_ofs;
\r
805 int* idst_idx = buf->data.i + root->buf_idx*buf->cols +
\r
806 vi*sample_count + root->offset;
\r
807 for( i = 0; i < num_valid; i++ )
\r
810 count_i = co[idx*2];
\r
812 for( cur_ofs = co[idx*2+1]; count_i > 0; count_i--, j++, cur_ofs++ )
\r
813 idst_idx[j] = cur_ofs;
\r
816 root->set_num_valid(vi, j);
\r
818 for( ; i < sample_count; i++ )
\r
821 count_i = co[idx*2];
\r
823 for( cur_ofs = co[idx*2+1]; count_i > 0; count_i--, j++, cur_ofs++ )
\r
824 idst_idx[j] = cur_ofs;
\r
829 // sample indices subsampling
\r
830 int* sample_idx_src_buf = sample_idx_buf;
\r
831 const int* sample_idx_src = 0;
\r
832 get_sample_indices(data_root, sample_idx_src_buf, &sample_idx_src);
\r
835 unsigned short* sample_idx_dst = (unsigned short*)(buf->data.s + root->buf_idx*buf->cols +
\r
836 get_work_var_count()*sample_count + root->offset);
\r
837 for (i = 0; i < count; i++)
\r
838 sample_idx_dst[i] = (unsigned short)sample_idx_src[sidx[i]];
\r
842 int* sample_idx_dst = buf->data.i + root->buf_idx*buf->cols +
\r
843 get_work_var_count()*sample_count + root->offset;
\r
844 for (i = 0; i < count; i++)
\r
845 sample_idx_dst[i] = sample_idx_src[sidx[i]];
\r
851 cvReleaseMat( &isubsample_idx );
\r
852 cvReleaseMat( &subsample_co );
\r
858 void CvDTreeTrainData::get_vectors( const CvMat* _subsample_idx,
\r
859 float* values, uchar* missing,
\r
860 float* responses, bool get_class_idx )
\r
862 CvMat* subsample_idx = 0;
\r
863 CvMat* subsample_co = 0;
\r
865 CV_FUNCNAME( "CvDTreeTrainData::get_vectors" );
\r
869 int i, vi, total = sample_count, count = total, cur_ofs = 0;
\r
873 if( _subsample_idx )
\r
875 CV_CALL( subsample_idx = cvPreprocessIndexArray( _subsample_idx, sample_count ));
\r
876 sidx = subsample_idx->data.i;
\r
877 CV_CALL( subsample_co = cvCreateMat( 1, sample_count*2, CV_32SC1 ));
\r
878 co = subsample_co->data.i;
\r
879 cvZero( subsample_co );
\r
880 count = subsample_idx->cols + subsample_idx->rows - 1;
\r
881 for( i = 0; i < count; i++ )
\r
883 for( i = 0; i < total; i++ )
\r
885 int count_i = co[i*2];
\r
888 co[i*2+1] = cur_ofs*var_count;
\r
889 cur_ofs += count_i;
\r
895 memset( missing, 1, count*var_count );
\r
897 for( vi = 0; vi < var_count; vi++ )
\r
899 int ci = get_var_type(vi);
\r
900 if( ci >= 0 ) // categorical
\r
902 float* dst = values + vi;
\r
903 uchar* m = missing ? missing + vi : 0;
\r
904 int* src_buf = pred_int_buf;
\r
905 const int* src = 0;
\r
906 get_cat_var_data(data_root, vi, src_buf, &src);
\r
908 for( i = 0; i < count; i++, dst += var_count )
\r
910 int idx = sidx ? sidx[i] : i;
\r
911 int val = src[idx];
\r
922 float* dst = values + vi;
\r
923 uchar* m = missing ? missing + vi : 0;
\r
924 int count1 = data_root->get_num_valid(vi);
\r
925 float *src_val_buf = pred_float_buf;
\r
926 const float *src_val = 0;
\r
927 int* src_idx_buf = pred_int_buf;
\r
928 const int* src_idx = 0;
\r
929 get_ord_var_data(data_root, vi, src_val_buf, src_idx_buf, &src_val, &src_idx);
\r
931 for( i = 0; i < count1; i++ )
\r
933 int idx = src_idx[i];
\r
937 count_i = co[idx*2];
\r
938 cur_ofs = co[idx*2+1];
\r
941 cur_ofs = idx*var_count;
\r
944 float val = src_val[i];
\r
945 for( ; count_i > 0; count_i--, cur_ofs += var_count )
\r
947 dst[cur_ofs] = val;
\r
959 if( is_classifier )
\r
961 int* src_buf = resp_int_buf;
\r
962 const int* src = 0;
\r
963 get_class_labels(data_root, src_buf, &src);
\r
964 for( i = 0; i < count; i++ )
\r
966 int idx = sidx ? sidx[i] : i;
\r
967 int val = get_class_idx ? src[idx] :
\r
968 cat_map->data.i[cat_ofs->data.i[cat_var_count]+src[idx]];
\r
969 responses[i] = (float)val;
\r
974 float *_values_buf = resp_float_buf;
\r
975 const float* _values = 0;
\r
976 get_ord_responses(data_root, _values_buf, &_values);
\r
977 for( i = 0; i < count; i++ )
\r
979 int idx = sidx ? sidx[i] : i;
\r
980 responses[i] = _values[idx];
\r
987 cvReleaseMat( &subsample_idx );
\r
988 cvReleaseMat( &subsample_co );
\r
992 CvDTreeNode* CvDTreeTrainData::new_node( CvDTreeNode* parent, int count,
\r
993 int storage_idx, int offset )
\r
995 CvDTreeNode* node = (CvDTreeNode*)cvSetNew( node_heap );
\r
997 node->sample_count = count;
\r
998 node->depth = parent ? parent->depth + 1 : 0;
\r
999 node->parent = parent;
\r
1000 node->left = node->right = 0;
\r
1003 node->class_idx = 0;
\r
1006 node->buf_idx = storage_idx;
\r
1007 node->offset = offset;
\r
1009 node->num_valid = (int*)cvSetNew( nv_heap );
\r
1011 node->num_valid = 0;
\r
1012 node->alpha = node->node_risk = node->tree_risk = node->tree_error = 0.;
\r
1013 node->complexity = 0;
\r
1015 if( params.cv_folds > 0 && cv_heap )
\r
1017 int cv_n = params.cv_folds;
\r
1018 node->Tn = INT_MAX;
\r
1019 node->cv_Tn = (int*)cvSetNew( cv_heap );
\r
1020 node->cv_node_risk = (double*)cvAlignPtr(node->cv_Tn + cv_n, sizeof(double));
\r
1021 node->cv_node_error = node->cv_node_risk + cv_n;
\r
1027 node->cv_node_risk = 0;
\r
1028 node->cv_node_error = 0;
\r
1035 CvDTreeSplit* CvDTreeTrainData::new_split_ord( int vi, float cmp_val,
\r
1036 int split_point, int inversed, float quality )
\r
1038 CvDTreeSplit* split = (CvDTreeSplit*)cvSetNew( split_heap );
\r
1039 split->var_idx = vi;
\r
1040 split->condensed_idx = INT_MIN;
\r
1041 split->ord.c = cmp_val;
\r
1042 split->ord.split_point = split_point;
\r
1043 split->inversed = inversed;
\r
1044 split->quality = quality;
\r
1051 CvDTreeSplit* CvDTreeTrainData::new_split_cat( int vi, float quality )
\r
1053 CvDTreeSplit* split = (CvDTreeSplit*)cvSetNew( split_heap );
\r
1054 int i, n = (max_c_count + 31)/32;
\r
1056 split->var_idx = vi;
\r
1057 split->condensed_idx = INT_MIN;
\r
1058 split->inversed = 0;
\r
1059 split->quality = quality;
\r
1060 for( i = 0; i < n; i++ )
\r
1061 split->subset[i] = 0;
\r
1068 void CvDTreeTrainData::free_node( CvDTreeNode* node )
\r
1070 CvDTreeSplit* split = node->split;
\r
1071 free_node_data( node );
\r
1074 CvDTreeSplit* next = split->next;
\r
1075 cvSetRemoveByPtr( split_heap, split );
\r
1079 cvSetRemoveByPtr( node_heap, node );
\r
1083 void CvDTreeTrainData::free_node_data( CvDTreeNode* node )
\r
1085 if( node->num_valid )
\r
1087 cvSetRemoveByPtr( nv_heap, node->num_valid );
\r
1088 node->num_valid = 0;
\r
1090 // do not free cv_* fields, as all the cross-validation related data is released at once.
\r
1094 void CvDTreeTrainData::free_train_data()
\r
1096 cvReleaseMat( &counts );
\r
1097 cvReleaseMat( &buf );
\r
1098 cvReleaseMat( &direction );
\r
1099 cvReleaseMat( &split_buf );
\r
1100 cvReleaseMemStorage( &temp_storage );
\r
1101 cvReleaseMat( &responses_copy );
\r
1102 cvFree( &pred_float_buf );
1103 cvFree( &pred_int_buf );
1104 cvFree( &resp_float_buf );
1105 cvFree( &resp_int_buf );
1106 cvFree( &cv_lables_buf );
1107 cvFree( &sample_idx_buf );
\r
1109 cv_heap = nv_heap = 0;
\r
1113 void CvDTreeTrainData::clear()
\r
1115 free_train_data();
\r
1117 cvReleaseMemStorage( &tree_storage );
\r
1119 cvReleaseMat( &var_idx );
\r
1120 cvReleaseMat( &var_type );
\r
1121 cvReleaseMat( &cat_count );
\r
1122 cvReleaseMat( &cat_ofs );
\r
1123 cvReleaseMat( &cat_map );
\r
1124 cvReleaseMat( &priors );
\r
1125 cvReleaseMat( &priors_mult );
\r
1127 node_heap = split_heap = 0;
\r
1129 sample_count = var_all = var_count = max_c_count = ord_var_count = cat_var_count = 0;
\r
1130 have_labels = have_priors = is_classifier = false;
\r
1132 buf_count = buf_size = 0;
\r
1141 int CvDTreeTrainData::get_num_classes() const
\r
1143 return is_classifier ? cat_count->data.i[cat_var_count] : 0;
\r
1147 int CvDTreeTrainData::get_var_type(int vi) const
\r
1149 return var_type->data.i[vi];
\r
1152 int CvDTreeTrainData::get_ord_var_data( CvDTreeNode* n, int vi, float* ord_values_buf, int* indices_buf, const float** ord_values, const int** indices )
\r
1154 int node_sample_count = n->sample_count;
\r
1155 int* sample_indices_buf = sample_idx_buf;
\r
1156 const int* sample_indices = 0;
\r
1158 get_sample_indices(n, sample_indices_buf, &sample_indices);
\r
1161 *indices = buf->data.i + n->buf_idx*buf->cols +
\r
1162 vi*sample_count + n->offset;
\r
1164 const unsigned short* short_indices = (const unsigned short*)(buf->data.s + n->buf_idx*buf->cols +
\r
1165 vi*sample_count + n->offset );
\r
1166 for( int i = 0; i < node_sample_count; i++ )
\r
1167 indices_buf[i] = short_indices[i];
\r
1168 *indices = indices_buf;
\r
1171 int td_cols = train_data->cols;
\r
1172 if( tflag == CV_ROW_SAMPLE )
\r
1174 for( int i = 0; i < node_sample_count &&
\r
1175 ((((*indices)[i] >= 0) && !is_buf_16u) || (((*indices)[i] != 65535) && is_buf_16u)); i++ )
\r
1177 int idx = (*indices)[i];
\r
1178 idx = sample_indices[idx];
\r
1179 ord_values_buf[i] = *(train_data->data.fl + idx * td_cols + vi);
\r
1183 for( int i = 0; i < node_sample_count &&
\r
1184 ((((*indices)[i] >= 0) && !is_buf_16u) || (((*indices)[i] != 65535) && is_buf_16u)); i++ )
\r
1186 int idx = (*indices)[i];
\r
1187 idx = sample_indices[idx];
\r
1188 ord_values_buf[i] = *(train_data->data.fl + vi* td_cols + idx);
\r
1191 *ord_values = ord_values_buf;
\r
1192 return 0; //TODO: return the number of non-missing values
\r
1196 void CvDTreeTrainData::get_class_labels( CvDTreeNode* n, int* labels_buf, const int** labels )
\r
1198 if (is_classifier)
\r
1199 get_cat_var_data( n, var_count, labels_buf, labels );
\r
1202 void CvDTreeTrainData::get_sample_indices( CvDTreeNode* n, int* indices_buf, const int** indices )
\r
1204 get_cat_var_data( n, get_work_var_count(), indices_buf, indices );
\r
1207 void CvDTreeTrainData::get_ord_responses( CvDTreeNode* n, float* values_buf, const float** values)
\r
1209 int sample_count = n->sample_count;
\r
1210 int* indices_buf = sample_idx_buf;
\r
1211 const int* indices = 0;
\r
1213 int r_cols = responses->cols;
\r
1215 get_sample_indices(n, indices_buf, &indices);
\r
1218 for( int i = 0; i < sample_count &&
\r
1219 (((indices[i] >= 0) && !is_buf_16u) || ((indices[i] != 65535) && is_buf_16u)); i++ )
\r
1221 int idx = indices[i];
\r
1222 values_buf[i] = *(responses->data.fl + idx * r_cols);
\r
1225 *values = values_buf;
\r
1229 void CvDTreeTrainData::get_cv_labels( CvDTreeNode* n, int* labels_buf, const int** labels )
\r
1232 get_cat_var_data( n, get_work_var_count()- 1, labels_buf, labels );
\r
1236 int CvDTreeTrainData::get_cat_var_data( CvDTreeNode* n, int vi, int* cat_values_buf, const int** cat_values )
\r
1239 *cat_values = buf->data.i + n->buf_idx*buf->cols +
\r
1240 vi*sample_count + n->offset;
\r
1242 const unsigned short* short_values = (const unsigned short*)(buf->data.s + n->buf_idx*buf->cols +
\r
1243 vi*sample_count + n->offset);
\r
1244 for( int i = 0; i < n->sample_count; i++ )
\r
1245 cat_values_buf[i] = short_values[i];
\r
1246 *cat_values = cat_values_buf;
\r
1249 return 0; //TODO: return the number of non-missing values
\r
1253 int CvDTreeTrainData::get_child_buf_idx( CvDTreeNode* n )
\r
1255 int idx = n->buf_idx + 1;
\r
1256 if( idx >= buf_count )
\r
1257 idx = shared ? 1 : 0;
\r
1262 void CvDTreeTrainData::write_params( CvFileStorage* fs )
\r
1264 CV_FUNCNAME( "CvDTreeTrainData::write_params" );
\r
1268 int vi, vcount = var_count;
\r
1270 cvWriteInt( fs, "is_classifier", is_classifier ? 1 : 0 );
\r
1271 cvWriteInt( fs, "var_all", var_all );
\r
1272 cvWriteInt( fs, "var_count", var_count );
\r
1273 cvWriteInt( fs, "ord_var_count", ord_var_count );
\r
1274 cvWriteInt( fs, "cat_var_count", cat_var_count );
\r
1276 cvStartWriteStruct( fs, "training_params", CV_NODE_MAP );
\r
1277 cvWriteInt( fs, "use_surrogates", params.use_surrogates ? 1 : 0 );
\r
1279 if( is_classifier )
\r
1281 cvWriteInt( fs, "max_categories", params.max_categories );
\r
1285 cvWriteReal( fs, "regression_accuracy", params.regression_accuracy );
\r
1288 cvWriteInt( fs, "max_depth", params.max_depth );
\r
1289 cvWriteInt( fs, "min_sample_count", params.min_sample_count );
\r
1290 cvWriteInt( fs, "cross_validation_folds", params.cv_folds );
\r
1292 if( params.cv_folds > 1 )
\r
1294 cvWriteInt( fs, "use_1se_rule", params.use_1se_rule ? 1 : 0 );
\r
1295 cvWriteInt( fs, "truncate_pruned_tree", params.truncate_pruned_tree ? 1 : 0 );
\r
1299 cvWrite( fs, "priors", priors );
\r
1301 cvEndWriteStruct( fs );
\r
1304 cvWrite( fs, "var_idx", var_idx );
\r
1306 cvStartWriteStruct( fs, "var_type", CV_NODE_SEQ+CV_NODE_FLOW );
\r
1308 for( vi = 0; vi < vcount; vi++ )
\r
1309 cvWriteInt( fs, 0, var_type->data.i[vi] >= 0 );
\r
1311 cvEndWriteStruct( fs );
\r
1313 if( cat_count && (cat_var_count > 0 || is_classifier) )
\r
1315 CV_ASSERT( cat_count != 0 );
\r
1316 cvWrite( fs, "cat_count", cat_count );
\r
1317 cvWrite( fs, "cat_map", cat_map );
\r
1324 void CvDTreeTrainData::read_params( CvFileStorage* fs, CvFileNode* node )
\r
1326 CV_FUNCNAME( "CvDTreeTrainData::read_params" );
\r
1330 CvFileNode *tparams_node, *vartype_node;
\r
1331 CvSeqReader reader;
\r
1332 int vi, max_split_size, tree_block_size;
\r
1334 is_classifier = (cvReadIntByName( fs, node, "is_classifier" ) != 0);
\r
1335 var_all = cvReadIntByName( fs, node, "var_all" );
\r
1336 var_count = cvReadIntByName( fs, node, "var_count", var_all );
\r
1337 cat_var_count = cvReadIntByName( fs, node, "cat_var_count" );
\r
1338 ord_var_count = cvReadIntByName( fs, node, "ord_var_count" );
\r
1340 tparams_node = cvGetFileNodeByName( fs, node, "training_params" );
\r
1342 if( tparams_node ) // training parameters are not necessary
\r
1344 params.use_surrogates = cvReadIntByName( fs, tparams_node, "use_surrogates", 1 ) != 0;
\r
1346 if( is_classifier )
\r
1348 params.max_categories = cvReadIntByName( fs, tparams_node, "max_categories" );
\r
1352 params.regression_accuracy =
\r
1353 (float)cvReadRealByName( fs, tparams_node, "regression_accuracy" );
\r
1356 params.max_depth = cvReadIntByName( fs, tparams_node, "max_depth" );
\r
1357 params.min_sample_count = cvReadIntByName( fs, tparams_node, "min_sample_count" );
\r
1358 params.cv_folds = cvReadIntByName( fs, tparams_node, "cross_validation_folds" );
\r
1360 if( params.cv_folds > 1 )
\r
1362 params.use_1se_rule = cvReadIntByName( fs, tparams_node, "use_1se_rule" ) != 0;
\r
1363 params.truncate_pruned_tree =
\r
1364 cvReadIntByName( fs, tparams_node, "truncate_pruned_tree" ) != 0;
\r
1367 priors = (CvMat*)cvReadByName( fs, tparams_node, "priors" );
\r
1370 if( !CV_IS_MAT(priors) )
\r
1371 CV_ERROR( CV_StsParseError, "priors must stored as a matrix" );
\r
1372 priors_mult = cvCloneMat( priors );
\r
1376 CV_CALL( var_idx = (CvMat*)cvReadByName( fs, node, "var_idx" ));
\r
1379 if( !CV_IS_MAT(var_idx) ||
\r
1380 (var_idx->cols != 1 && var_idx->rows != 1) ||
\r
1381 var_idx->cols + var_idx->rows - 1 != var_count ||
\r
1382 CV_MAT_TYPE(var_idx->type) != CV_32SC1 )
\r
1383 CV_ERROR( CV_StsParseError,
\r
1384 "var_idx (if exist) must be valid 1d integer vector containing <var_count> elements" );
\r
1386 for( vi = 0; vi < var_count; vi++ )
\r
1387 if( (unsigned)var_idx->data.i[vi] >= (unsigned)var_all )
\r
1388 CV_ERROR( CV_StsOutOfRange, "some of var_idx elements are out of range" );
\r
1391 ////// read var type
\r
1392 CV_CALL( var_type = cvCreateMat( 1, var_count + 2, CV_32SC1 ));
\r
1394 cat_var_count = 0;
\r
1395 ord_var_count = -1;
\r
1396 vartype_node = cvGetFileNodeByName( fs, node, "var_type" );
\r
1398 if( vartype_node && CV_NODE_TYPE(vartype_node->tag) == CV_NODE_INT && var_count == 1 )
\r
1399 var_type->data.i[0] = vartype_node->data.i ? cat_var_count++ : ord_var_count--;
\r
1402 if( !vartype_node || CV_NODE_TYPE(vartype_node->tag) != CV_NODE_SEQ ||
\r
1403 vartype_node->data.seq->total != var_count )
\r
1404 CV_ERROR( CV_StsParseError, "var_type must exist and be a sequence of 0's and 1's" );
\r
1406 cvStartReadSeq( vartype_node->data.seq, &reader );
\r
1408 for( vi = 0; vi < var_count; vi++ )
\r
1410 CvFileNode* n = (CvFileNode*)reader.ptr;
\r
1411 if( CV_NODE_TYPE(n->tag) != CV_NODE_INT || (n->data.i & ~1) )
\r
1412 CV_ERROR( CV_StsParseError, "var_type must exist and be a sequence of 0's and 1's" );
\r
1413 var_type->data.i[vi] = n->data.i ? cat_var_count++ : ord_var_count--;
\r
1414 CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader );
\r
1417 var_type->data.i[var_count] = cat_var_count;
\r
1419 ord_var_count = ~ord_var_count;
\r
1420 if( cat_var_count != cat_var_count || ord_var_count != ord_var_count )
\r
1421 CV_ERROR( CV_StsParseError, "var_type is inconsistent with cat_var_count and ord_var_count" );
\r
1424 if( cat_var_count > 0 || is_classifier )
\r
1426 int ccount, total_c_count = 0;
\r
1427 CV_CALL( cat_count = (CvMat*)cvReadByName( fs, node, "cat_count" ));
\r
1428 CV_CALL( cat_map = (CvMat*)cvReadByName( fs, node, "cat_map" ));
\r
1430 if( !CV_IS_MAT(cat_count) || !CV_IS_MAT(cat_map) ||
\r
1431 (cat_count->cols != 1 && cat_count->rows != 1) ||
\r
1432 CV_MAT_TYPE(cat_count->type) != CV_32SC1 ||
\r
1433 cat_count->cols + cat_count->rows - 1 != cat_var_count + is_classifier ||
\r
1434 (cat_map->cols != 1 && cat_map->rows != 1) ||
\r
1435 CV_MAT_TYPE(cat_map->type) != CV_32SC1 )
\r
1436 CV_ERROR( CV_StsParseError,
\r
1437 "Both cat_count and cat_map must exist and be valid 1d integer vectors of an appropriate size" );
\r
1439 ccount = cat_var_count + is_classifier;
\r
1441 CV_CALL( cat_ofs = cvCreateMat( 1, ccount + 1, CV_32SC1 ));
\r
1442 cat_ofs->data.i[0] = 0;
\r
1445 for( vi = 0; vi < ccount; vi++ )
\r
1447 int val = cat_count->data.i[vi];
\r
1449 CV_ERROR( CV_StsOutOfRange, "some of cat_count elements are out of range" );
\r
1450 max_c_count = MAX( max_c_count, val );
\r
1451 cat_ofs->data.i[vi+1] = total_c_count += val;
\r
1454 if( cat_map->cols + cat_map->rows - 1 != total_c_count )
\r
1455 CV_ERROR( CV_StsBadSize,
\r
1456 "cat_map vector length is not equal to the total number of categories in all categorical vars" );
\r
1459 max_split_size = cvAlign(sizeof(CvDTreeSplit) +
\r
1460 (MAX(0,max_c_count - 33)/32)*sizeof(int),sizeof(void*));
\r
1462 tree_block_size = MAX((int)sizeof(CvDTreeNode)*8, max_split_size);
\r
1463 tree_block_size = MAX(tree_block_size + block_size_delta, min_block_size);
\r
1464 CV_CALL( tree_storage = cvCreateMemStorage( tree_block_size ));
\r
1465 CV_CALL( node_heap = cvCreateSet( 0, sizeof(node_heap[0]),
\r
1466 sizeof(CvDTreeNode), tree_storage ));
\r
1467 CV_CALL( split_heap = cvCreateSet( 0, sizeof(split_heap[0]),
\r
1468 max_split_size, tree_storage ));
\r
1472 /////////////////////// Decision Tree /////////////////////////
\r
1474 CvDTree::CvDTree()
\r
1477 var_importance = 0;
\r
1478 default_model_name = "my_tree";
\r
1484 void CvDTree::clear()
\r
1486 cvReleaseMat( &var_importance );
\r
1489 if( !data->shared )
\r
1496 pruned_tree_idx = -1;
\r
1500 CvDTree::~CvDTree()
\r
1506 const CvDTreeNode* CvDTree::get_root() const
\r
1512 int CvDTree::get_pruned_tree_idx() const
\r
1514 return pruned_tree_idx;
\r
1518 CvDTreeTrainData* CvDTree::get_data()
\r
1524 bool CvDTree::train( const CvMat* _train_data, int _tflag,
\r
1525 const CvMat* _responses, const CvMat* _var_idx,
\r
1526 const CvMat* _sample_idx, const CvMat* _var_type,
\r
1527 const CvMat* _missing_mask, CvDTreeParams _params )
\r
1529 bool result = false;
\r
1531 CV_FUNCNAME( "CvDTree::train" );
\r
1536 data = new CvDTreeTrainData( _train_data, _tflag, _responses,
\r
1537 _var_idx, _sample_idx, _var_type,
\r
1538 _missing_mask, _params, false );
\r
1539 CV_CALL( result = do_train(0));
\r
1547 bool CvDTree::train( CvDTreeTrainData* _data, const CvMat* _subsample_idx )
\r
1549 bool result = false;
\r
1551 CV_FUNCNAME( "CvDTree::train" );
\r
1557 data->shared = true;
\r
1558 CV_CALL( result = do_train(_subsample_idx));
\r
1566 bool CvDTree::do_train( const CvMat* _subsample_idx )
\r
1568 bool result = false;
\r
1570 CV_FUNCNAME( "CvDTree::do_train" );
\r
1574 root = data->subsample_data( _subsample_idx );
\r
1576 CV_CALL( try_split_node(root));
\r
1578 if( data->params.cv_folds > 0 )
\r
1579 CV_CALL( prune_cv());
\r
1581 if( !data->shared )
\r
1582 data->free_train_data();
\r
1592 void CvDTree::try_split_node( CvDTreeNode* node )
\r
1594 CvDTreeSplit* best_split = 0;
\r
1595 int i, n = node->sample_count, vi;
\r
1596 bool can_split = true;
\r
1597 double quality_scale;
\r
1599 calc_node_value( node );
\r
1601 if( node->sample_count <= data->params.min_sample_count ||
\r
1602 node->depth >= data->params.max_depth )
\r
1603 can_split = false;
\r
1605 if( can_split && data->is_classifier )
\r
1607 // check if we have a "pure" node,
\r
1608 // we assume that cls_count is filled by calc_node_value()
\r
1609 int* cls_count = data->counts->data.i;
\r
1610 int nz = 0, m = data->get_num_classes();
\r
1611 for( i = 0; i < m; i++ )
\r
1612 nz += cls_count[i] != 0;
\r
1613 if( nz == 1 ) // there is only one class
\r
1614 can_split = false;
\r
1616 else if( can_split )
\r
1618 if( sqrt(node->node_risk)/n < data->params.regression_accuracy )
\r
1619 can_split = false;
\r
1624 best_split = find_best_split(node);
\r
1625 // TODO: check the split quality ...
\r
1626 node->split = best_split;
\r
1629 if( !can_split || !best_split )
\r
1631 data->free_node_data(node);
\r
1635 quality_scale = calc_node_dir( node );
\r
1637 if( data->params.use_surrogates )
\r
1639 // find all the surrogate splits
\r
1640 // and sort them by their similarity to the primary one
\r
1641 for( vi = 0; vi < data->var_count; vi++ )
\r
1643 CvDTreeSplit* split;
\r
1644 int ci = data->get_var_type(vi);
\r
1646 if( vi == best_split->var_idx )
\r
1650 split = find_surrogate_split_cat( node, vi );
\r
1652 split = find_surrogate_split_ord( node, vi );
\r
1656 // insert the split
\r
1657 CvDTreeSplit* prev_split = node->split;
\r
1658 split->quality = (float)(split->quality*quality_scale);
\r
1660 while( prev_split->next &&
\r
1661 prev_split->next->quality > split->quality )
\r
1662 prev_split = prev_split->next;
\r
1663 split->next = prev_split->next;
\r
1664 prev_split->next = split;
\r
1669 split_node_data( node );
\r
1670 try_split_node( node->left );
\r
1671 try_split_node( node->right );
\r
1675 // calculate direction (left(-1),right(1),missing(0))
\r
1676 // for each sample using the best split
\r
1677 // the function returns scale coefficients for surrogate split quality factors.
\r
1678 // the scale is applied to normalize surrogate split quality relatively to the
\r
1679 // best (primary) split quality. That is, if a surrogate split is absolutely
\r
1680 // identical to the primary split, its quality will be set to the maximum value =
\r
1681 // quality of the primary split; otherwise, it will be lower.
\r
1682 // besides, the function compute node->maxlr,
\r
1683 // minimum possible quality (w/o considering the above mentioned scale)
\r
1684 // for a surrogate split. Surrogate splits with quality less than node->maxlr
\r
1685 // are not discarded.
\r
1686 double CvDTree::calc_node_dir( CvDTreeNode* node )
\r
1688 char* dir = (char*)data->direction->data.ptr;
\r
1689 int i, n = node->sample_count, vi = node->split->var_idx;
\r
1692 assert( !node->split->inversed );
\r
1694 if( data->get_var_type(vi) >= 0 ) // split on categorical var
\r
1696 int* labels_buf = data->pred_int_buf;
\r
1697 const int* labels = 0;
\r
1698 const int* subset = node->split->subset;
\r
1699 data->get_cat_var_data( node, vi, labels_buf, &labels );
\r
1700 if( !data->have_priors )
\r
1702 int sum = 0, sum_abs = 0;
\r
1704 for( i = 0; i < n; i++ )
\r
1706 int idx = labels[i];
\r
1707 int d = ( ((idx >= 0)&&(!data->is_buf_16u)) || ((idx != 65535)&&(data->is_buf_16u)) ) ?
\r
1708 CV_DTREE_CAT_DIR(idx,subset) : 0;
\r
1709 sum += d; sum_abs += d & 1;
\r
1713 R = (sum_abs + sum) >> 1;
\r
1714 L = (sum_abs - sum) >> 1;
\r
1718 const double* priors = data->priors_mult->data.db;
\r
1719 double sum = 0, sum_abs = 0;
\r
1720 int *responses_buf = data->resp_int_buf;
\r
1721 const int* responses;
\r
1722 data->get_class_labels(node, responses_buf, &responses);
\r
1724 for( i = 0; i < n; i++ )
\r
1726 int idx = labels[i];
\r
1727 double w = priors[responses[i]];
\r
1728 int d = idx >= 0 ? CV_DTREE_CAT_DIR(idx,subset) : 0;
\r
1729 sum += d*w; sum_abs += (d & 1)*w;
\r
1733 R = (sum_abs + sum) * 0.5;
\r
1734 L = (sum_abs - sum) * 0.5;
\r
1737 else // split on ordered var
\r
1739 int split_point = node->split->ord.split_point;
\r
1740 int n1 = node->get_num_valid(vi);
\r
1742 float* val_buf = data->pred_float_buf;
\r
1743 const float* val = 0;
\r
1744 int* sorted_buf = data->pred_int_buf;
\r
1745 const int* sorted = 0;
\r
1746 data->get_ord_var_data( node, vi, val_buf, sorted_buf, &val, &sorted);
\r
1748 assert( 0 <= split_point && split_point < n1-1 );
\r
1750 if( !data->have_priors )
\r
1752 for( i = 0; i <= split_point; i++ )
\r
1753 dir[sorted[i]] = (char)-1;
\r
1754 for( ; i < n1; i++ )
\r
1755 dir[sorted[i]] = (char)1;
\r
1756 for( ; i < n; i++ )
\r
1757 dir[sorted[i]] = (char)0;
\r
1759 L = split_point-1;
\r
1760 R = n1 - split_point + 1;
\r
1764 const double* priors = data->priors_mult->data.db;
\r
1765 int* responses_buf = data->resp_int_buf;
\r
1766 const int* responses = 0;
\r
1767 data->get_class_labels(node, responses_buf, &responses);
\r
1770 for( i = 0; i <= split_point; i++ )
\r
1772 int idx = sorted[i];
\r
1773 double w = priors[responses[idx]];
\r
1774 dir[idx] = (char)-1;
\r
1778 for( ; i < n1; i++ )
\r
1780 int idx = sorted[i];
\r
1781 double w = priors[responses[idx]];
\r
1782 dir[idx] = (char)1;
\r
1786 for( ; i < n; i++ )
\r
1787 dir[sorted[i]] = (char)0;
\r
1791 node->maxlr = MAX( L, R );
\r
1792 return node->split->quality/(L + R);
\r
1796 CvDTreeSplit* CvDTree::find_best_split( CvDTreeNode* node )
\r
1799 CvDTreeSplit *best_split = 0, *split = 0, *t;
\r
1801 for( vi = 0; vi < data->var_count; vi++ )
\r
1803 int ci = data->get_var_type(vi);
\r
1804 if( node->get_num_valid(vi) <= 1 )
\r
1807 if( data->is_classifier )
\r
1810 split = find_split_cat_class( node, vi );
\r
1812 split = find_split_ord_class( node, vi );
\r
1817 split = find_split_cat_reg( node, vi );
\r
1819 split = find_split_ord_reg( node, vi );
\r
1824 if( !best_split || best_split->quality < split->quality )
\r
1825 CV_SWAP( best_split, split, t );
\r
1827 cvSetRemoveByPtr( data->split_heap, split );
\r
1831 return best_split;
\r
1835 CvDTreeSplit* CvDTree::find_split_ord_class( CvDTreeNode* node, int vi )
\r
1837 const float epsilon = FLT_EPSILON*2;
\r
1838 int n = node->sample_count;
\r
1839 int n1 = node->get_num_valid(vi);
\r
1840 int m = data->get_num_classes();
\r
1842 float* values_buf = data->pred_float_buf;
\r
1843 const float* values = 0;
\r
1844 int* indices_buf = data->pred_int_buf;
\r
1845 const int* indices = 0;
\r
1846 data->get_ord_var_data( node, vi, values_buf, indices_buf, &values, &indices );
\r
1847 int* responses_buf = data->resp_int_buf;
\r
1848 const int* responses = 0;
\r
1849 data->get_class_labels( node, responses_buf, &responses );
\r
1851 const int* rc0 = data->counts->data.i;
\r
1852 int* lc = (int*)cvStackAlloc(m*sizeof(lc[0]));
\r
1853 int* rc = (int*)cvStackAlloc(m*sizeof(rc[0]));
\r
1854 int i, best_i = -1;
\r
1855 double lsum2 = 0, rsum2 = 0, best_val = 0;
\r
1856 const double* priors = data->have_priors ? data->priors_mult->data.db : 0;
\r
1858 // init arrays of class instance counters on both sides of the split
\r
1859 for( i = 0; i < m; i++ )
\r
1865 // compensate for missing values
\r
1866 for( i = n1; i < n; i++ )
\r
1868 rc[responses[indices[i]]]--;
\r
1873 int L = 0, R = n1;
\r
1875 for( i = 0; i < m; i++ )
\r
1876 rsum2 += (double)rc[i]*rc[i];
\r
1878 for( i = 0; i < n1 - 1; i++ )
\r
1880 int idx = responses[indices[i]];
\r
1883 lv = lc[idx]; rv = rc[idx];
\r
1884 lsum2 += lv*2 + 1;
\r
1885 rsum2 -= rv*2 - 1;
\r
1886 lc[idx] = lv + 1; rc[idx] = rv - 1;
\r
1888 if( values[i] + epsilon < values[i+1] )
\r
1890 double val = (lsum2*R + rsum2*L)/((double)L*R);
\r
1891 if( best_val < val )
\r
1901 double L = 0, R = 0;
\r
1902 for( i = 0; i < m; i++ )
\r
1904 double wv = rc[i]*priors[i];
\r
1909 for( i = 0; i < n1 - 1; i++ )
\r
1911 int idx = responses[indices[i]];
\r
1913 double p = priors[idx], p2 = p*p;
\r
1915 lv = lc[idx]; rv = rc[idx];
\r
1916 lsum2 += p2*(lv*2 + 1);
\r
1917 rsum2 -= p2*(rv*2 - 1);
\r
1918 lc[idx] = lv + 1; rc[idx] = rv - 1;
\r
1920 if( values[i] + epsilon < values[i+1] )
\r
1922 double val = (lsum2*R + rsum2*L)/((double)L*R);
\r
1923 if( best_val < val )
\r
1932 return best_i >= 0 ? data->new_split_ord( vi,
\r
1933 (values[best_i] + values[best_i+1])*0.5f, best_i,
\r
1934 0, (float)best_val ) : 0;
\r
1938 void CvDTree::cluster_categories( const int* vectors, int n, int m,
\r
1939 int* csums, int k, int* labels )
\r
1941 // TODO: consider adding priors (class weights) and sample weights to the clustering algorithm
\r
1942 int iters = 0, max_iters = 100;
\r
1944 double* buf = (double*)cvStackAlloc( (n + k)*sizeof(buf[0]) );
\r
1945 double *v_weights = buf, *c_weights = buf + k;
\r
1946 bool modified = true;
\r
1947 CvRNG* r = &data->rng;
\r
1949 // assign labels randomly
\r
1950 for( i = idx = 0; i < n; i++ )
\r
1953 const int* v = vectors + i*m;
\r
1954 labels[i] = idx++;
\r
1955 idx &= idx < k ? -1 : 0;
\r
1957 // compute weight of each vector
\r
1958 for( j = 0; j < m; j++ )
\r
1960 v_weights[i] = sum ? 1./sum : 0.;
\r
1963 for( i = 0; i < n; i++ )
\r
1965 int i1 = cvRandInt(r) % n;
\r
1966 int i2 = cvRandInt(r) % n;
\r
1967 CV_SWAP( labels[i1], labels[i2], j );
\r
1970 for( iters = 0; iters <= max_iters; iters++ )
\r
1972 // calculate csums
\r
1973 for( i = 0; i < k; i++ )
\r
1975 for( j = 0; j < m; j++ )
\r
1976 csums[i*m + j] = 0;
\r
1979 for( i = 0; i < n; i++ )
\r
1981 const int* v = vectors + i*m;
\r
1982 int* s = csums + labels[i]*m;
\r
1983 for( j = 0; j < m; j++ )
\r
1987 // exit the loop here, when we have up-to-date csums
\r
1988 if( iters == max_iters || !modified )
\r
1993 // calculate weight of each cluster
\r
1994 for( i = 0; i < k; i++ )
\r
1996 const int* s = csums + i*m;
\r
1998 for( j = 0; j < m; j++ )
\r
2000 c_weights[i] = sum ? 1./sum : 0;
\r
2003 // now for each vector determine the closest cluster
\r
2004 for( i = 0; i < n; i++ )
\r
2006 const int* v = vectors + i*m;
\r
2007 double alpha = v_weights[i];
\r
2008 double min_dist2 = DBL_MAX;
\r
2011 for( idx = 0; idx < k; idx++ )
\r
2013 const int* s = csums + idx*m;
\r
2014 double dist2 = 0., beta = c_weights[idx];
\r
2015 for( j = 0; j < m; j++ )
\r
2017 double t = v[j]*alpha - s[j]*beta;
\r
2020 if( min_dist2 > dist2 )
\r
2022 min_dist2 = dist2;
\r
2027 if( min_idx != labels[i] )
\r
2029 labels[i] = min_idx;
\r
2035 CvDTreeSplit* CvDTree::find_split_cat_class( CvDTreeNode* node, int vi )
\r
2037 CvDTreeSplit* split;
\r
2038 int ci = data->get_var_type(vi);
\r
2039 int n = node->sample_count;
\r
2040 int m = data->get_num_classes();
\r
2041 int _mi = data->cat_count->data.i[ci], mi = _mi;
\r
2043 int* labels_buf = data->pred_int_buf;
\r
2044 const int* labels = 0;
\r
2045 data->get_cat_var_data(node, vi, labels_buf, &labels);
\r
2046 int *responses_buf = data->resp_int_buf;
\r
2047 const int* responses = 0;
\r
2048 data->get_class_labels(node, responses_buf, &responses);
\r
2050 int* lc = (int*)cvStackAlloc(m*sizeof(lc[0]));
\r
2051 int* rc = (int*)cvStackAlloc(m*sizeof(rc[0]));
\r
2052 int* _cjk = (int*)cvStackAlloc(m*(mi+1)*sizeof(_cjk[0]))+m, *cjk = _cjk;
\r
2053 double* c_weights = (double*)cvStackAlloc( mi*sizeof(c_weights[0]) );
\r
2054 int* cluster_labels = 0;
\r
2055 int** int_ptr = 0;
\r
2057 double L = 0, R = 0;
\r
2058 double best_val = 0;
\r
2059 int prevcode = 0, best_subset = -1, subset_i, subset_n, subtract = 0;
\r
2060 const double* priors = data->priors_mult->data.db;
\r
2062 // init array of counters:
\r
2063 // c_{jk} - number of samples that have vi-th input variable = j and response = k.
\r
2064 for( j = -1; j < mi; j++ )
\r
2065 for( k = 0; k < m; k++ )
\r
2068 for( i = 0; i < n; i++ )
\r
2070 j = ( (labels[i] == 65535) && (data->is_buf_16u) ) ? -1 : labels[i];
\r
2077 if( mi > data->params.max_categories )
\r
2079 mi = MIN(data->params.max_categories, n);
\r
2081 cluster_labels = (int*)cvStackAlloc(mi*sizeof(cluster_labels[0]));
\r
2082 cluster_categories( _cjk, _mi, m, cjk, mi, cluster_labels );
\r
2085 subset_n = 1 << mi;
\r
2090 int_ptr = (int**)cvStackAlloc( mi*sizeof(int_ptr[0]) );
\r
2091 for( j = 0; j < mi; j++ )
\r
2092 int_ptr[j] = cjk + j*2 + 1;
\r
2093 icvSortIntPtr( int_ptr, mi, 0 );
\r
2098 for( k = 0; k < m; k++ )
\r
2101 for( j = 0; j < mi; j++ )
\r
2102 sum += cjk[j*m + k];
\r
2107 for( j = 0; j < mi; j++ )
\r
2110 for( k = 0; k < m; k++ )
\r
2111 sum += cjk[j*m + k]*priors[k];
\r
2112 c_weights[j] = sum;
\r
2113 R += c_weights[j];
\r
2116 for( ; subset_i < subset_n; subset_i++ )
\r
2120 double lsum2 = 0, rsum2 = 0;
\r
2123 idx = (int)(int_ptr[subset_i] - cjk)/2;
\r
2126 int graycode = (subset_i>>1)^subset_i;
\r
2127 int diff = graycode ^ prevcode;
\r
2129 // determine index of the changed bit.
\r
2131 idx = diff >= (1 << 16) ? 16 : 0;
\r
2132 u.f = (float)(((diff >> 16) | diff) & 65535);
\r
2133 idx += (u.i >> 23) - 127;
\r
2134 subtract = graycode < prevcode;
\r
2135 prevcode = graycode;
\r
2138 crow = cjk + idx*m;
\r
2139 weight = c_weights[idx];
\r
2140 if( weight < FLT_EPSILON )
\r
2145 for( k = 0; k < m; k++ )
\r
2148 int lval = lc[k] + t;
\r
2149 int rval = rc[k] - t;
\r
2150 double p = priors[k], p2 = p*p;
\r
2151 lsum2 += p2*lval*lval;
\r
2152 rsum2 += p2*rval*rval;
\r
2153 lc[k] = lval; rc[k] = rval;
\r
2160 for( k = 0; k < m; k++ )
\r
2163 int lval = lc[k] - t;
\r
2164 int rval = rc[k] + t;
\r
2165 double p = priors[k], p2 = p*p;
\r
2166 lsum2 += p2*lval*lval;
\r
2167 rsum2 += p2*rval*rval;
\r
2168 lc[k] = lval; rc[k] = rval;
\r
2174 if( L > FLT_EPSILON && R > FLT_EPSILON )
\r
2176 double val = (lsum2*R + rsum2*L)/((double)L*R);
\r
2177 if( best_val < val )
\r
2180 best_subset = subset_i;
\r
2185 if( best_subset < 0 )
\r
2188 split = data->new_split_cat( vi, (float)best_val );
\r
2192 for( i = 0; i <= best_subset; i++ )
\r
2194 idx = (int)(int_ptr[i] - cjk) >> 1;
\r
2195 split->subset[idx >> 5] |= 1 << (idx & 31);
\r
2200 for( i = 0; i < _mi; i++ )
\r
2202 idx = cluster_labels ? cluster_labels[i] : i;
\r
2203 if( best_subset & (1 << idx) )
\r
2204 split->subset[i >> 5] |= 1 << (i & 31);
\r
2212 CvDTreeSplit* CvDTree::find_split_ord_reg( CvDTreeNode* node, int vi )
\r
2214 const float epsilon = FLT_EPSILON*2;
\r
2215 int n = node->sample_count;
\r
2216 int n1 = node->get_num_valid(vi);
\r
2218 float* values_buf = data->pred_float_buf;
\r
2219 const float* values = 0;
\r
2220 int* indices_buf = data->pred_int_buf;
\r
2221 const int* indices = 0;
\r
2222 data->get_ord_var_data( node, vi, values_buf, indices_buf, &values, &indices );
\r
2223 float* responses_buf = data->resp_float_buf;
\r
2224 const float* responses = 0;
\r
2225 data->get_ord_responses( node, responses_buf, &responses );
\r
2227 int i, best_i = -1;
\r
2228 double best_val = 0, lsum = 0, rsum = node->value*n;
\r
2229 int L = 0, R = n1;
\r
2231 // compensate for missing values
\r
2232 for( i = n1; i < n; i++ )
\r
2233 rsum -= responses[indices[i]];
\r
2235 // find the optimal split
\r
2236 for( i = 0; i < n1 - 1; i++ )
\r
2238 float t = responses[indices[i]];
\r
2243 if( values[i] + epsilon < values[i+1] )
\r
2245 double val = (lsum*lsum*R + rsum*rsum*L)/((double)L*R);
\r
2246 if( best_val < val )
\r
2254 return best_i >= 0 ? data->new_split_ord( vi,
\r
2255 (values[best_i] + values[best_i+1])*0.5f, best_i,
\r
2256 0, (float)best_val ) : 0;
\r
2260 CvDTreeSplit* CvDTree::find_split_cat_reg( CvDTreeNode* node, int vi )
\r
2262 CvDTreeSplit* split;
\r
2263 int ci = data->get_var_type(vi);
\r
2264 int n = node->sample_count;
\r
2265 int mi = data->cat_count->data.i[ci];
\r
2266 int* labels_buf = data->pred_int_buf;
\r
2267 const int* labels = 0;
\r
2268 float* responses_buf = data->resp_float_buf;
\r
2269 const float* responses = 0;
\r
2270 data->get_cat_var_data(node, vi, labels_buf, &labels);
\r
2271 data->get_ord_responses(node, responses_buf, &responses);
\r
2273 double* sum = (double*)cvStackAlloc( (mi+1)*sizeof(sum[0]) ) + 1;
\r
2274 int* counts = (int*)cvStackAlloc( (mi+1)*sizeof(counts[0]) ) + 1;
\r
2275 double** sum_ptr = (double**)cvStackAlloc( (mi+1)*sizeof(sum_ptr[0]) );
\r
2276 int i, L = 0, R = 0;
\r
2277 double best_val = 0, lsum = 0, rsum = 0;
\r
2278 int best_subset = -1, subset_i;
\r
2280 for( i = -1; i < mi; i++ )
\r
2281 sum[i] = counts[i] = 0;
\r
2283 // calculate sum response and weight of each category of the input var
\r
2284 for( i = 0; i < n; i++ )
\r
2286 int idx = labels[i];
\r
2287 double s = sum[idx] + responses[i];
\r
2288 int nc = counts[idx] + 1;
\r
2293 // calculate average response in each category
\r
2294 for( i = 0; i < mi; i++ )
\r
2298 sum[i] /= MAX(counts[i],1);
\r
2299 sum_ptr[i] = sum + i;
\r
2302 icvSortDblPtr( sum_ptr, mi, 0 );
\r
2304 // revert back to unnormalized sums
\r
2305 // (there should be a very little loss of accuracy)
\r
2306 for( i = 0; i < mi; i++ )
\r
2307 sum[i] *= counts[i];
\r
2309 for( subset_i = 0; subset_i < mi-1; subset_i++ )
\r
2311 int idx = (int)(sum_ptr[subset_i] - sum);
\r
2312 int ni = counts[idx];
\r
2316 double s = sum[idx];
\r
2317 lsum += s; L += ni;
\r
2318 rsum -= s; R -= ni;
\r
2322 double val = (lsum*lsum*R + rsum*rsum*L)/((double)L*R);
\r
2323 if( best_val < val )
\r
2326 best_subset = subset_i;
\r
2332 if( best_subset < 0 )
\r
2335 split = data->new_split_cat( vi, (float)best_val );
\r
2336 for( i = 0; i <= best_subset; i++ )
\r
2338 int idx = (int)(sum_ptr[i] - sum);
\r
2339 split->subset[idx >> 5] |= 1 << (idx & 31);
\r
2345 CvDTreeSplit* CvDTree::find_surrogate_split_ord( CvDTreeNode* node, int vi )
\r
2347 const float epsilon = FLT_EPSILON*2;
\r
2348 const char* dir = (char*)data->direction->data.ptr;
\r
2349 int n1 = node->get_num_valid(vi);
\r
2350 float* values_buf = data->pred_float_buf;
\r
2351 const float* values = 0;
\r
2352 int* indices_buf = data->pred_int_buf;
\r
2353 const int* indices = 0;
\r
2354 data->get_ord_var_data( node, vi, values_buf, indices_buf, &values, &indices );
\r
2355 // LL - number of samples that both the primary and the surrogate splits send to the left
\r
2356 // LR - ... primary split sends to the left and the surrogate split sends to the right
\r
2357 // RL - ... primary split sends to the right and the surrogate split sends to the left
\r
2358 // RR - ... both send to the right
\r
2359 int i, best_i = -1, best_inversed = 0;
\r
2362 if( !data->have_priors )
\r
2364 int LL = 0, RL = 0, LR, RR;
\r
2365 int worst_val = cvFloor(node->maxlr), _best_val = worst_val;
\r
2366 int sum = 0, sum_abs = 0;
\r
2368 for( i = 0; i < n1; i++ )
\r
2370 int d = dir[indices[i]];
\r
2371 sum += d; sum_abs += d & 1;
\r
2374 // sum_abs = R + L; sum = R - L
\r
2375 RR = (sum_abs + sum) >> 1;
\r
2376 LR = (sum_abs - sum) >> 1;
\r
2378 // initially all the samples are sent to the right by the surrogate split,
\r
2379 // LR of them are sent to the left by primary split, and RR - to the right.
\r
2380 // now iteratively compute LL, LR, RL and RR for every possible surrogate split value.
\r
2381 for( i = 0; i < n1 - 1; i++ )
\r
2383 int d = dir[indices[i]];
\r
2388 if( LL + RR > _best_val && values[i] + epsilon < values[i+1] )
\r
2390 best_val = LL + RR;
\r
2391 best_i = i; best_inversed = 0;
\r
2397 if( RL + LR > _best_val && values[i] + epsilon < values[i+1] )
\r
2399 best_val = RL + LR;
\r
2400 best_i = i; best_inversed = 1;
\r
2404 best_val = _best_val;
\r
2408 double LL = 0, RL = 0, LR, RR;
\r
2409 double worst_val = node->maxlr;
\r
2410 double sum = 0, sum_abs = 0;
\r
2411 const double* priors = data->priors_mult->data.db;
\r
2412 int* responses_buf = data->resp_int_buf;
\r
2413 const int* responses = 0;
\r
2414 data->get_class_labels(node, responses_buf, &responses);
\r
2415 best_val = worst_val;
\r
2417 for( i = 0; i < n1; i++ )
\r
2419 int idx = indices[i];
\r
2420 double w = priors[responses[idx]];
\r
2422 sum += d*w; sum_abs += (d & 1)*w;
\r
2425 // sum_abs = R + L; sum = R - L
\r
2426 RR = (sum_abs + sum)*0.5;
\r
2427 LR = (sum_abs - sum)*0.5;
\r
2429 // initially all the samples are sent to the right by the surrogate split,
\r
2430 // LR of them are sent to the left by primary split, and RR - to the right.
\r
2431 // now iteratively compute LL, LR, RL and RR for every possible surrogate split value.
\r
2432 for( i = 0; i < n1 - 1; i++ )
\r
2434 int idx = indices[i];
\r
2435 double w = priors[responses[idx]];
\r
2441 if( LL + RR > best_val && values[i] + epsilon < values[i+1] )
\r
2443 best_val = LL + RR;
\r
2444 best_i = i; best_inversed = 0;
\r
2450 if( RL + LR > best_val && values[i] + epsilon < values[i+1] )
\r
2452 best_val = RL + LR;
\r
2453 best_i = i; best_inversed = 1;
\r
2459 return best_i >= 0 && best_val > node->maxlr ? data->new_split_ord( vi,
\r
2460 (values[best_i] + values[best_i+1])*0.5f, best_i,
\r
2461 best_inversed, (float)best_val ) : 0;
\r
2465 CvDTreeSplit* CvDTree::find_surrogate_split_cat( CvDTreeNode* node, int vi )
\r
2467 const char* dir = (char*)data->direction->data.ptr;
\r
2468 int n = node->sample_count;
\r
2469 int* labels_buf = data->pred_int_buf;
\r
2470 const int* labels = 0;
\r
2471 data->get_cat_var_data(node, vi, labels_buf, &labels);
\r
2472 // LL - number of samples that both the primary and the surrogate splits send to the left
\r
2473 // LR - ... primary split sends to the left and the surrogate split sends to the right
\r
2474 // RL - ... primary split sends to the right and the surrogate split sends to the left
\r
2475 // RR - ... both send to the right
\r
2476 CvDTreeSplit* split = data->new_split_cat( vi, 0 );
\r
2477 int i, mi = data->cat_count->data.i[data->get_var_type(vi)], l_win = 0;
\r
2478 double best_val = 0;
\r
2479 double* lc = (double*)cvStackAlloc( (mi+1)*2*sizeof(lc[0]) ) + 1;
\r
2480 double* rc = lc + mi + 1;
\r
2482 for( i = -1; i < mi; i++ )
\r
2483 lc[i] = rc[i] = 0;
\r
2485 // for each category calculate the weight of samples
\r
2486 // sent to the left (lc) and to the right (rc) by the primary split
\r
2487 if( !data->have_priors )
\r
2489 int* _lc = (int*)cvStackAlloc((mi+2)*2*sizeof(_lc[0])) + 1;
\r
2490 int* _rc = _lc + mi + 1;
\r
2492 for( i = -1; i < mi; i++ )
\r
2493 _lc[i] = _rc[i] = 0;
\r
2495 for( i = 0; i < n; i++ )
\r
2497 int idx = ( (labels[i] == 65535) && (data->is_buf_16u) ) ? -1 : labels[i];
\r
2499 int sum = _lc[idx] + d;
\r
2500 int sum_abs = _rc[idx] + (d & 1);
\r
2501 _lc[idx] = sum; _rc[idx] = sum_abs;
\r
2504 for( i = 0; i < mi; i++ )
\r
2507 int sum_abs = _rc[i];
\r
2508 lc[i] = (sum_abs - sum) >> 1;
\r
2509 rc[i] = (sum_abs + sum) >> 1;
\r
2514 const double* priors = data->priors_mult->data.db;
\r
2515 int* responses_buf = data->resp_int_buf;
\r
2516 const int* responses = 0;
\r
2517 data->get_class_labels(node, responses_buf, &responses);
\r
2519 for( i = 0; i < n; i++ )
\r
2521 int idx = labels[i];
\r
2522 double w = priors[responses[i]];
\r
2524 double sum = lc[idx] + d*w;
\r
2525 double sum_abs = rc[idx] + (d & 1)*w;
\r
2526 lc[idx] = sum; rc[idx] = sum_abs;
\r
2529 for( i = 0; i < mi; i++ )
\r
2531 double sum = lc[i];
\r
2532 double sum_abs = rc[i];
\r
2533 lc[i] = (sum_abs - sum) * 0.5;
\r
2534 rc[i] = (sum_abs + sum) * 0.5;
\r
2538 // 2. now form the split.
\r
2539 // in each category send all the samples to the same direction as majority
\r
2540 for( i = 0; i < mi; i++ )
\r
2542 double lval = lc[i], rval = rc[i];
\r
2545 split->subset[i >> 5] |= 1 << (i & 31);
\r
2553 split->quality = (float)best_val;
\r
2554 if( split->quality <= node->maxlr || l_win == 0 || l_win == mi )
\r
2555 cvSetRemoveByPtr( data->split_heap, split ), split = 0;
\r
2561 void CvDTree::calc_node_value( CvDTreeNode* node )
\r
2563 int i, j, k, n = node->sample_count, cv_n = data->params.cv_folds;
\r
2564 int* cv_labels_buf = data->cv_lables_buf;
\r
2565 const int* cv_labels = 0;
\r
2566 data->get_cv_labels(node, cv_labels_buf, &cv_labels);
\r
2568 if( data->is_classifier )
\r
2570 // in case of classification tree:
\r
2571 // * node value is the label of the class that has the largest weight in the node.
\r
2572 // * node risk is the weighted number of misclassified samples,
\r
2573 // * j-th cross-validation fold value and risk are calculated as above,
\r
2574 // but using the samples with cv_labels(*)!=j.
\r
2575 // * j-th cross-validation fold error is calculated as the weighted number of
\r
2576 // misclassified samples with cv_labels(*)==j.
\r
2578 // compute the number of instances of each class
\r
2579 int* cls_count = data->counts->data.i;
\r
2580 int* responses_buf = data->resp_int_buf;
\r
2581 const int* responses = 0;
\r
2582 data->get_class_labels(node, responses_buf, &responses);
\r
2583 int m = data->get_num_classes();
\r
2584 int* cv_cls_count = (int*)cvStackAlloc(m*cv_n*sizeof(cv_cls_count[0]));
\r
2585 double max_val = -1, total_weight = 0;
\r
2587 double* priors = data->priors_mult->data.db;
\r
2589 for( k = 0; k < m; k++ )
\r
2594 for( i = 0; i < n; i++ )
\r
2595 cls_count[responses[i]]++;
\r
2599 for( j = 0; j < cv_n; j++ )
\r
2600 for( k = 0; k < m; k++ )
\r
2601 cv_cls_count[j*m + k] = 0;
\r
2603 for( i = 0; i < n; i++ )
\r
2605 j = cv_labels[i]; k = responses[i];
\r
2606 cv_cls_count[j*m + k]++;
\r
2609 for( j = 0; j < cv_n; j++ )
\r
2610 for( k = 0; k < m; k++ )
\r
2611 cls_count[k] += cv_cls_count[j*m + k];
\r
2614 if( data->have_priors && node->parent == 0 )
\r
2616 // compute priors_mult from priors, take the sample ratio into account.
\r
2618 for( k = 0; k < m; k++ )
\r
2620 int n_k = cls_count[k];
\r
2621 priors[k] = data->priors->data.db[k]*(n_k ? 1./n_k : 0.);
\r
2625 for( k = 0; k < m; k++ )
\r
2629 for( k = 0; k < m; k++ )
\r
2631 double val = cls_count[k]*priors[k];
\r
2632 total_weight += val;
\r
2633 if( max_val < val )
\r
2640 node->class_idx = max_k;
\r
2641 node->value = data->cat_map->data.i[
\r
2642 data->cat_ofs->data.i[data->cat_var_count] + max_k];
\r
2643 node->node_risk = total_weight - max_val;
\r
2645 for( j = 0; j < cv_n; j++ )
\r
2647 double sum_k = 0, sum = 0, max_val_k = 0;
\r
2648 max_val = -1; max_k = -1;
\r
2650 for( k = 0; k < m; k++ )
\r
2652 double w = priors[k];
\r
2653 double val_k = cv_cls_count[j*m + k]*w;
\r
2654 double val = cls_count[k]*w - val_k;
\r
2657 if( max_val < val )
\r
2660 max_val_k = val_k;
\r
2665 node->cv_Tn[j] = INT_MAX;
\r
2666 node->cv_node_risk[j] = sum - max_val;
\r
2667 node->cv_node_error[j] = sum_k - max_val_k;
\r
2672 // in case of regression tree:
\r
2673 // * node value is 1/n*sum_i(Y_i), where Y_i is i-th response,
\r
2674 // n is the number of samples in the node.
\r
2675 // * node risk is the sum of squared errors: sum_i((Y_i - <node_value>)^2)
\r
2676 // * j-th cross-validation fold value and risk are calculated as above,
\r
2677 // but using the samples with cv_labels(*)!=j.
\r
2678 // * j-th cross-validation fold error is calculated
\r
2679 // using samples with cv_labels(*)==j as the test subset:
\r
2680 // error_j = sum_(i,cv_labels(i)==j)((Y_i - <node_value_j>)^2),
\r
2681 // where node_value_j is the node value calculated
\r
2682 // as described in the previous bullet, and summation is done
\r
2683 // over the samples with cv_labels(*)==j.
\r
2685 double sum = 0, sum2 = 0;
\r
2686 float* values_buf = data->resp_float_buf;
\r
2687 const float* values = 0;
\r
2688 data->get_ord_responses(node, values_buf, &values);
\r
2689 double *cv_sum = 0, *cv_sum2 = 0;
\r
2690 int* cv_count = 0;
\r
2694 for( i = 0; i < n; i++ )
\r
2696 double t = values[i];
\r
2703 cv_sum = (double*)cvStackAlloc( cv_n*sizeof(cv_sum[0]) );
\r
2704 cv_sum2 = (double*)cvStackAlloc( cv_n*sizeof(cv_sum2[0]) );
\r
2705 cv_count = (int*)cvStackAlloc( cv_n*sizeof(cv_count[0]) );
\r
2707 for( j = 0; j < cv_n; j++ )
\r
2709 cv_sum[j] = cv_sum2[j] = 0.;
\r
2713 for( i = 0; i < n; i++ )
\r
2716 double t = values[i];
\r
2717 double s = cv_sum[j] + t;
\r
2718 double s2 = cv_sum2[j] + t*t;
\r
2719 int nc = cv_count[j] + 1;
\r
2725 for( j = 0; j < cv_n; j++ )
\r
2728 sum2 += cv_sum2[j];
\r
2732 node->node_risk = sum2 - (sum/n)*sum;
\r
2733 node->value = sum/n;
\r
2735 for( j = 0; j < cv_n; j++ )
\r
2737 double s = cv_sum[j], si = sum - s;
\r
2738 double s2 = cv_sum2[j], s2i = sum2 - s2;
\r
2739 int c = cv_count[j], ci = n - c;
\r
2740 double r = si/MAX(ci,1);
\r
2741 node->cv_node_risk[j] = s2i - r*r*ci;
\r
2742 node->cv_node_error[j] = s2 - 2*r*s + c*r*r;
\r
2743 node->cv_Tn[j] = INT_MAX;
\r
2749 void CvDTree::complete_node_dir( CvDTreeNode* node )
\r
2751 int vi, i, n = node->sample_count, nl, nr, d0 = 0, d1 = -1;
\r
2752 int nz = n - node->get_num_valid(node->split->var_idx);
\r
2753 char* dir = (char*)data->direction->data.ptr;
\r
2755 // try to complete direction using surrogate splits
\r
2756 if( nz && data->params.use_surrogates )
\r
2758 CvDTreeSplit* split = node->split->next;
\r
2759 for( ; split != 0 && nz; split = split->next )
\r
2761 int inversed_mask = split->inversed ? -1 : 0;
\r
2762 vi = split->var_idx;
\r
2764 if( data->get_var_type(vi) >= 0 ) // split on categorical var
\r
2766 int* labels_buf = data->pred_int_buf;
\r
2767 const int* labels = 0;
\r
2768 data->get_cat_var_data(node, vi, labels_buf, &labels);
\r
2769 const int* subset = split->subset;
\r
2771 for( i = 0; i < n; i++ )
\r
2773 int idx = labels[i];
\r
2774 if( !dir[i] && ( ((idx >= 0)&&(!data->is_buf_16u)) || ((idx != 65535)&&(data->is_buf_16u)) ))
\r
2777 int d = CV_DTREE_CAT_DIR(idx,subset);
\r
2778 dir[i] = (char)((d ^ inversed_mask) - inversed_mask);
\r
2784 else // split on ordered var
\r
2786 float* values_buf = data->pred_float_buf;
\r
2787 const float* values = 0;
\r
2788 int* indices_buf = data->pred_int_buf;
\r
2789 const int* indices = 0;
\r
2790 data->get_ord_var_data( node, vi, values_buf, indices_buf, &values, &indices );
\r
2791 int split_point = split->ord.split_point;
\r
2792 int n1 = node->get_num_valid(vi);
\r
2794 assert( 0 <= split_point && split_point < n-1 );
\r
2796 for( i = 0; i < n1; i++ )
\r
2798 int idx = indices[i];
\r
2801 int d = i <= split_point ? -1 : 1;
\r
2802 dir[idx] = (char)((d ^ inversed_mask) - inversed_mask);
\r
2811 // find the default direction for the rest
\r
2814 for( i = nr = 0; i < n; i++ )
\r
2817 d0 = nl > nr ? -1 : nr > nl;
\r
2820 // make sure that every sample is directed either to the left or to the right
\r
2821 for( i = 0; i < n; i++ )
\r
2831 dir[i] = (char)d; // remap (-1,1) to (0,1)
\r
2836 void CvDTree::split_node_data( CvDTreeNode* node )
\r
2838 int vi, i, n = node->sample_count, nl, nr, scount = data->sample_count;
\r
2839 char* dir = (char*)data->direction->data.ptr;
\r
2840 CvDTreeNode *left = 0, *right = 0;
\r
2841 int* new_idx = data->split_buf->data.i;
\r
2842 int new_buf_idx = data->get_child_buf_idx( node );
\r
2843 int work_var_count = data->get_work_var_count();
\r
2844 CvMat* buf = data->buf;
\r
2845 int* temp_buf = (int*)cvStackAlloc(n*sizeof(temp_buf[0]));
\r
2847 complete_node_dir(node);
\r
2849 for( i = nl = nr = 0; i < n; i++ )
\r
2852 // initialize new indices for splitting ordered variables
\r
2853 new_idx[i] = (nl & (d-1)) | (nr & -d); // d ? ri : li
\r
2859 bool split_input_data;
\r
2860 node->left = left = data->new_node( node, nl, new_buf_idx, node->offset );
\r
2861 node->right = right = data->new_node( node, nr, new_buf_idx, node->offset + nl );
\r
2863 split_input_data = node->depth + 1 < data->params.max_depth &&
\r
2864 (node->left->sample_count > data->params.min_sample_count ||
\r
2865 node->right->sample_count > data->params.min_sample_count);
\r
2867 // split ordered variables, keep both halves sorted.
\r
2868 for( vi = 0; vi < data->var_count; vi++ )
\r
2870 int ci = data->get_var_type(vi);
\r
2871 int n1 = node->get_num_valid(vi);
\r
2872 int *src_idx_buf = data->pred_int_buf;
\r
2873 const int* src_idx = 0;
\r
2874 float *src_val_buf = data->pred_float_buf;
\r
2875 const float* src_val = 0;
\r
2877 if( ci >= 0 || !split_input_data )
\r
2880 data->get_ord_var_data(node, vi, src_val_buf, src_idx_buf, &src_val, &src_idx);
\r
2882 for(i = 0; i < n; i++)
\r
2883 temp_buf[i] = src_idx[i];
\r
2885 if (data->is_buf_16u)
\r
2887 unsigned short *ldst, *rdst, *ldst0, *rdst0;
\r
2888 //unsigned short tl, tr;
\r
2889 ldst0 = ldst = (unsigned short*)(buf->data.s + left->buf_idx*buf->cols +
\r
2890 vi*scount + left->offset);
\r
2891 rdst0 = rdst = (unsigned short*)(ldst + nl);
\r
2894 for( i = 0; i < n1; i++ )
\r
2896 int idx = temp_buf[i];
\r
2898 idx = new_idx[idx];
\r
2901 *rdst = (unsigned short)idx;
\r
2906 *ldst = (unsigned short)idx;
\r
2911 left->set_num_valid(vi, (int)(ldst - ldst0));
\r
2912 right->set_num_valid(vi, (int)(rdst - rdst0));
\r
2915 for( ; i < n; i++ )
\r
2917 int idx = temp_buf[i];
\r
2919 idx = new_idx[idx];
\r
2922 *rdst = (unsigned short)idx;
\r
2927 *ldst = (unsigned short)idx;
\r
2934 int *ldst0, *ldst, *rdst0, *rdst;
\r
2935 ldst0 = ldst = buf->data.i + left->buf_idx*buf->cols +
\r
2936 vi*scount + left->offset;
\r
2937 rdst0 = rdst = buf->data.i + right->buf_idx*buf->cols +
\r
2938 vi*scount + right->offset;
\r
2941 for( i = 0; i < n1; i++ )
\r
2943 int idx = temp_buf[i];
\r
2945 idx = new_idx[idx];
\r
2958 left->set_num_valid(vi, (int)(ldst - ldst0));
\r
2959 right->set_num_valid(vi, (int)(rdst - rdst0));
\r
2962 for( ; i < n; i++ )
\r
2964 int idx = temp_buf[i];
\r
2966 idx = new_idx[idx];
\r
2981 // split categorical vars, responses and cv_labels using new_idx relocation table
\r
2982 for( vi = 0; vi < work_var_count; vi++ )
\r
2984 int ci = data->get_var_type(vi);
\r
2985 int n1 = node->get_num_valid(vi), nr1 = 0;
\r
2987 if( ci < 0 || (vi < data->var_count && !split_input_data) )
\r
2990 int *src_lbls_buf = data->pred_int_buf;
\r
2991 const int* src_lbls = 0;
\r
2992 data->get_cat_var_data(node, vi, src_lbls_buf, &src_lbls);
\r
2994 for(i = 0; i < n; i++)
\r
2995 temp_buf[i] = src_lbls[i];
\r
2997 if (data->is_buf_16u)
\r
2999 unsigned short *ldst = (unsigned short *)(buf->data.s + left->buf_idx*buf->cols +
\r
3000 vi*scount + left->offset);
\r
3001 unsigned short *rdst = (unsigned short *)(buf->data.s + right->buf_idx*buf->cols +
\r
3002 vi*scount + right->offset);
\r
3004 for( i = 0; i < n; i++ )
\r
3007 int idx = temp_buf[i];
\r
3010 *rdst = (unsigned short)idx;
\r
3012 nr1 += (idx != 65535 )&d;
\r
3016 *ldst = (unsigned short)idx;
\r
3021 if( vi < data->var_count )
\r
3023 left->set_num_valid(vi, n1 - nr1);
\r
3024 right->set_num_valid(vi, nr1);
\r
3029 int *ldst = buf->data.i + left->buf_idx*buf->cols +
\r
3030 vi*scount + left->offset;
\r
3031 int *rdst = buf->data.i + right->buf_idx*buf->cols +
\r
3032 vi*scount + right->offset;
\r
3034 for( i = 0; i < n; i++ )
\r
3037 int idx = temp_buf[i];
\r
3042 nr1 += (idx >= 0)&d;
\r
3052 if( vi < data->var_count )
\r
3054 left->set_num_valid(vi, n1 - nr1);
\r
3055 right->set_num_valid(vi, nr1);
\r
3061 // split sample indices
\r
3062 int *sample_idx_src_buf = data->sample_idx_buf;
\r
3063 const int* sample_idx_src = 0;
\r
3064 data->get_sample_indices(node, sample_idx_src_buf, &sample_idx_src);
\r
3066 for(i = 0; i < n; i++)
\r
3067 temp_buf[i] = sample_idx_src[i];
\r
3069 int pos = data->get_work_var_count();
\r
3070 if (data->is_buf_16u)
\r
3072 unsigned short* ldst = (unsigned short*)(buf->data.s + left->buf_idx*buf->cols +
\r
3073 pos*scount + left->offset);
\r
3074 unsigned short* rdst = (unsigned short*)(buf->data.s + right->buf_idx*buf->cols +
\r
3075 pos*scount + right->offset);
\r
3076 for (i = 0; i < n; i++)
\r
3079 unsigned short idx = (unsigned short)temp_buf[i];
\r
3094 int* ldst = buf->data.i + left->buf_idx*buf->cols +
\r
3095 pos*scount + left->offset;
\r
3096 int* rdst = buf->data.i + right->buf_idx*buf->cols +
\r
3097 pos*scount + right->offset;
\r
3098 for (i = 0; i < n; i++)
\r
3101 int idx = temp_buf[i];
\r
3115 // deallocate the parent node data that is not needed anymore
\r
3116 data->free_node_data(node);
\r
3120 void CvDTree::prune_cv()
\r
3124 CvMat* err_jk = 0;
\r
3126 // 1. build tree sequence for each cv fold, calculate error_{Tj,beta_k}.
\r
3127 // 2. choose the best tree index (if need, apply 1SE rule).
\r
3128 // 3. store the best index and cut the branches.
\r
3130 CV_FUNCNAME( "CvDTree::prune_cv" );
\r
3134 int ti, j, tree_count = 0, cv_n = data->params.cv_folds, n = root->sample_count;
\r
3135 // currently, 1SE for regression is not implemented
\r
3136 bool use_1se = data->params.use_1se_rule != 0 && data->is_classifier;
\r
3138 double min_err = 0, min_err_se = 0;
\r
3141 CV_CALL( ab = cvCreateMat( 1, 256, CV_64F ));
\r
3143 // build the main tree sequence, calculate alpha's
\r
3144 for(;;tree_count++)
\r
3146 double min_alpha = update_tree_rnc(tree_count, -1);
\r
3147 if( cut_tree(tree_count, -1, min_alpha) )
\r
3150 if( ab->cols <= tree_count )
\r
3152 CV_CALL( temp = cvCreateMat( 1, ab->cols*3/2, CV_64F ));
\r
3153 for( ti = 0; ti < ab->cols; ti++ )
\r
3154 temp->data.db[ti] = ab->data.db[ti];
\r
3155 cvReleaseMat( &ab );
\r
3160 ab->data.db[tree_count] = min_alpha;
\r
3163 ab->data.db[0] = 0.;
\r
3165 if( tree_count > 0 )
\r
3167 for( ti = 1; ti < tree_count-1; ti++ )
\r
3168 ab->data.db[ti] = sqrt(ab->data.db[ti]*ab->data.db[ti+1]);
\r
3169 ab->data.db[tree_count-1] = DBL_MAX*0.5;
\r
3171 CV_CALL( err_jk = cvCreateMat( cv_n, tree_count, CV_64F ));
\r
3172 err = err_jk->data.db;
\r
3174 for( j = 0; j < cv_n; j++ )
\r
3176 int tj = 0, tk = 0;
\r
3177 for( ; tk < tree_count; tj++ )
\r
3179 double min_alpha = update_tree_rnc(tj, j);
\r
3180 if( cut_tree(tj, j, min_alpha) )
\r
3181 min_alpha = DBL_MAX;
\r
3183 for( ; tk < tree_count; tk++ )
\r
3185 if( ab->data.db[tk] > min_alpha )
\r
3187 err[j*tree_count + tk] = root->tree_error;
\r
3192 for( ti = 0; ti < tree_count; ti++ )
\r
3194 double sum_err = 0;
\r
3195 for( j = 0; j < cv_n; j++ )
\r
3196 sum_err += err[j*tree_count + ti];
\r
3197 if( ti == 0 || sum_err < min_err )
\r
3199 min_err = sum_err;
\r
3202 min_err_se = sqrt( sum_err*(n - sum_err) );
\r
3204 else if( sum_err < min_err + min_err_se )
\r
3209 pruned_tree_idx = min_idx;
\r
3210 free_prune_data(data->params.truncate_pruned_tree != 0);
\r
3214 cvReleaseMat( &err_jk );
\r
3215 cvReleaseMat( &ab );
\r
3216 cvReleaseMat( &temp );
\r
3220 double CvDTree::update_tree_rnc( int T, int fold )
\r
3222 CvDTreeNode* node = root;
\r
3223 double min_alpha = DBL_MAX;
\r
3227 CvDTreeNode* parent;
\r
3230 int t = fold >= 0 ? node->cv_Tn[fold] : node->Tn;
\r
3231 if( t <= T || !node->left )
\r
3233 node->complexity = 1;
\r
3234 node->tree_risk = node->node_risk;
\r
3235 node->tree_error = 0.;
\r
3238 node->tree_risk = node->cv_node_risk[fold];
\r
3239 node->tree_error = node->cv_node_error[fold];
\r
3243 node = node->left;
\r
3246 for( parent = node->parent; parent && parent->right == node;
\r
3247 node = parent, parent = parent->parent )
\r
3249 parent->complexity += node->complexity;
\r
3250 parent->tree_risk += node->tree_risk;
\r
3251 parent->tree_error += node->tree_error;
\r
3253 parent->alpha = ((fold >= 0 ? parent->cv_node_risk[fold] : parent->node_risk)
\r
3254 - parent->tree_risk)/(parent->complexity - 1);
\r
3255 min_alpha = MIN( min_alpha, parent->alpha );
\r
3261 parent->complexity = node->complexity;
\r
3262 parent->tree_risk = node->tree_risk;
\r
3263 parent->tree_error = node->tree_error;
\r
3264 node = parent->right;
\r
3271 int CvDTree::cut_tree( int T, int fold, double min_alpha )
\r
3273 CvDTreeNode* node = root;
\r
3279 CvDTreeNode* parent;
\r
3282 int t = fold >= 0 ? node->cv_Tn[fold] : node->Tn;
\r
3283 if( t <= T || !node->left )
\r
3285 if( node->alpha <= min_alpha + FLT_EPSILON )
\r
3288 node->cv_Tn[fold] = T;
\r
3291 if( node == root )
\r
3295 node = node->left;
\r
3298 for( parent = node->parent; parent && parent->right == node;
\r
3299 node = parent, parent = parent->parent )
\r
3305 node = parent->right;
\r
3312 void CvDTree::free_prune_data(bool cut_tree)
\r
3314 CvDTreeNode* node = root;
\r
3318 CvDTreeNode* parent;
\r
3321 // do not call cvSetRemoveByPtr( cv_heap, node->cv_Tn )
\r
3322 // as we will clear the whole cross-validation heap at the end
\r
3324 node->cv_node_error = node->cv_node_risk = 0;
\r
3327 node = node->left;
\r
3330 for( parent = node->parent; parent && parent->right == node;
\r
3331 node = parent, parent = parent->parent )
\r
3333 if( cut_tree && parent->Tn <= pruned_tree_idx )
\r
3335 data->free_node( parent->left );
\r
3336 data->free_node( parent->right );
\r
3337 parent->left = parent->right = 0;
\r
3344 node = parent->right;
\r
3347 if( data->cv_heap )
\r
3348 cvClearSet( data->cv_heap );
\r
3352 void CvDTree::free_tree()
\r
3354 if( root && data && data->shared )
\r
3356 pruned_tree_idx = INT_MIN;
\r
3357 free_prune_data(true);
\r
3358 data->free_node(root);
\r
3364 #include <conio.h>
\r
3365 #include <ctype.h>
\r
3368 CvDTreeNode* CvDTree::predict( const CvMat* _sample,
\r
3369 const CvMat* _missing, bool preprocessed_input ) const
\r
3371 CvDTreeNode* result = 0;
\r
3374 CV_FUNCNAME( "CvDTree::predict" );
\r
3378 int i, step, mstep = 0;
\r
3379 const float* sample;
\r
3380 const uchar* m = 0;
\r
3381 CvDTreeNode* node = root;
\r
3388 CV_ERROR( CV_StsError, "The tree has not been trained yet" );
\r
3390 if( !CV_IS_MAT(_sample) || CV_MAT_TYPE(_sample->type) != CV_32FC1 ||
\r
3391 (_sample->cols != 1 && _sample->rows != 1) ||
\r
3392 (_sample->cols + _sample->rows - 1 != data->var_all && !preprocessed_input) ||
\r
3393 (_sample->cols + _sample->rows - 1 != data->var_count && preprocessed_input) )
\r
3394 CV_ERROR( CV_StsBadArg,
\r
3395 "the input sample must be 1d floating-point vector with the same "
\r
3396 "number of elements as the total number of variables used for training" );
\r
3398 sample = _sample->data.fl;
\r
3399 step = CV_IS_MAT_CONT(_sample->type) ? 1 : _sample->step/sizeof(sample[0]);
\r
3401 if( data->cat_count && !preprocessed_input ) // cache for categorical variables
\r
3403 int n = data->cat_count->cols;
\r
3404 catbuf = (int*)cvStackAlloc(n*sizeof(catbuf[0]));
\r
3405 for( i = 0; i < n; i++ )
\r
3411 if( !CV_IS_MAT(_missing) || !CV_IS_MASK_ARR(_missing) ||
\r
3412 !CV_ARE_SIZES_EQ(_missing, _sample) )
\r
3413 CV_ERROR( CV_StsBadArg,
\r
3414 "the missing data mask must be 8-bit vector of the same size as input sample" );
\r
3415 m = _missing->data.ptr;
\r
3416 mstep = CV_IS_MAT_CONT(_missing->type) ? 1 : _missing->step/sizeof(m[0]);
\r
3419 vtype = data->var_type->data.i;
\r
3420 vidx = data->var_idx && !preprocessed_input ? data->var_idx->data.i : 0;
\r
3421 cmap = data->cat_map ? data->cat_map->data.i : 0;
\r
3422 cofs = data->cat_ofs ? data->cat_ofs->data.i : 0;
\r
3424 while( node->Tn > pruned_tree_idx && node->left )
\r
3426 CvDTreeSplit* split = node->split;
\r
3428 for( ; !dir && split != 0; split = split->next )
\r
3430 int vi = split->var_idx;
\r
3431 int ci = vtype[vi];
\r
3432 i = vidx ? vidx[vi] : vi;
\r
3433 float val = sample[i*step];
\r
3434 if( m && m[i*mstep] )
\r
3436 if( ci < 0 ) // ordered
\r
3437 dir = val <= split->ord.c ? -1 : 1;
\r
3438 else // categorical
\r
3441 if( preprocessed_input )
\r
3448 int a = c = cofs[ci];
\r
3449 int b = (ci+1 >= data->cat_ofs->cols) ? data->cat_map->cols : cofs[ci+1];
\r
3451 int ival = cvRound(val);
\r
3453 CV_ERROR( CV_StsBadArg,
\r
3454 "one of input categorical variable is not an integer" );
\r
3461 if( ival < cmap[c] )
\r
3463 else if( ival > cmap[c] )
\r
3469 if( c < 0 || ival != cmap[c] )
\r
3472 catbuf[ci] = c -= cofs[ci];
\r
3475 c = ( (c == 65535) && data->is_buf_16u ) ? -1 : c;
\r
3476 dir = CV_DTREE_CAT_DIR(c, split->subset);
\r
3479 if( split->inversed )
\r
3485 double diff = node->right->sample_count - node->left->sample_count;
\r
3486 dir = diff < 0 ? -1 : 1;
\r
3488 node = dir < 0 ? node->left : node->right;
\r
3499 const CvMat* CvDTree::get_var_importance()
\r
3501 if( !var_importance )
\r
3503 CvDTreeNode* node = root;
\r
3504 double* importance;
\r
3507 var_importance = cvCreateMat( 1, data->var_count, CV_64F );
\r
3508 cvZero( var_importance );
\r
3509 importance = var_importance->data.db;
\r
3513 CvDTreeNode* parent;
\r
3514 for( ;; node = node->left )
\r
3516 CvDTreeSplit* split = node->split;
\r
3518 if( !node->left || node->Tn <= pruned_tree_idx )
\r
3521 for( ; split != 0; split = split->next )
\r
3522 importance[split->var_idx] += split->quality;
\r
3525 for( parent = node->parent; parent && parent->right == node;
\r
3526 node = parent, parent = parent->parent )
\r
3532 node = parent->right;
\r
3535 cvNormalize( var_importance, var_importance, 1., 0, CV_L1 );
\r
3538 return var_importance;
\r
3542 void CvDTree::write_split( CvFileStorage* fs, CvDTreeSplit* split )
\r
3546 cvStartWriteStruct( fs, 0, CV_NODE_MAP + CV_NODE_FLOW );
\r
3547 cvWriteInt( fs, "var", split->var_idx );
\r
3548 cvWriteReal( fs, "quality", split->quality );
\r
3550 ci = data->get_var_type(split->var_idx);
\r
3551 if( ci >= 0 ) // split on a categorical var
\r
3553 int i, n = data->cat_count->data.i[ci], to_right = 0, default_dir;
\r
3554 for( i = 0; i < n; i++ )
\r
3555 to_right += CV_DTREE_CAT_DIR(i,split->subset) > 0;
\r
3557 // ad-hoc rule when to use inverse categorical split notation
\r
3558 // to achieve more compact and clear representation
\r
3559 default_dir = to_right <= 1 || to_right <= MIN(3, n/2) || to_right <= n/3 ? -1 : 1;
\r
3561 cvStartWriteStruct( fs, default_dir*(split->inversed ? -1 : 1) > 0 ?
\r
3562 "in" : "not_in", CV_NODE_SEQ+CV_NODE_FLOW );
\r
3564 for( i = 0; i < n; i++ )
\r
3566 int dir = CV_DTREE_CAT_DIR(i,split->subset);
\r
3567 if( dir*default_dir < 0 )
\r
3568 cvWriteInt( fs, 0, i );
\r
3570 cvEndWriteStruct( fs );
\r
3573 cvWriteReal( fs, !split->inversed ? "le" : "gt", split->ord.c );
\r
3575 cvEndWriteStruct( fs );
\r
3579 void CvDTree::write_node( CvFileStorage* fs, CvDTreeNode* node )
\r
3581 CvDTreeSplit* split;
\r
3583 cvStartWriteStruct( fs, 0, CV_NODE_MAP );
\r
3585 cvWriteInt( fs, "depth", node->depth );
\r
3586 cvWriteInt( fs, "sample_count", node->sample_count );
\r
3587 cvWriteReal( fs, "value", node->value );
\r
3589 if( data->is_classifier )
\r
3590 cvWriteInt( fs, "norm_class_idx", node->class_idx );
\r
3592 cvWriteInt( fs, "Tn", node->Tn );
\r
3593 cvWriteInt( fs, "complexity", node->complexity );
\r
3594 cvWriteReal( fs, "alpha", node->alpha );
\r
3595 cvWriteReal( fs, "node_risk", node->node_risk );
\r
3596 cvWriteReal( fs, "tree_risk", node->tree_risk );
\r
3597 cvWriteReal( fs, "tree_error", node->tree_error );
\r
3601 cvStartWriteStruct( fs, "splits", CV_NODE_SEQ );
\r
3603 for( split = node->split; split != 0; split = split->next )
\r
3604 write_split( fs, split );
\r
3606 cvEndWriteStruct( fs );
\r
3609 cvEndWriteStruct( fs );
\r
3613 void CvDTree::write_tree_nodes( CvFileStorage* fs )
\r
3615 //CV_FUNCNAME( "CvDTree::write_tree_nodes" );
\r
3619 CvDTreeNode* node = root;
\r
3621 // traverse the tree and save all the nodes in depth-first order
\r
3624 CvDTreeNode* parent;
\r
3627 write_node( fs, node );
\r
3630 node = node->left;
\r
3633 for( parent = node->parent; parent && parent->right == node;
\r
3634 node = parent, parent = parent->parent )
\r
3640 node = parent->right;
\r
3647 void CvDTree::write( CvFileStorage* fs, const char* name )
\r
3649 //CV_FUNCNAME( "CvDTree::write" );
\r
3653 cvStartWriteStruct( fs, name, CV_NODE_MAP, CV_TYPE_NAME_ML_TREE );
\r
3655 get_var_importance();
\r
3656 data->write_params( fs );
\r
3657 if( var_importance )
\r
3658 cvWrite( fs, "var_importance", var_importance );
\r
3661 cvEndWriteStruct( fs );
\r
3667 void CvDTree::write( CvFileStorage* fs )
\r
3669 //CV_FUNCNAME( "CvDTree::write" );
\r
3673 cvWriteInt( fs, "best_tree_idx", pruned_tree_idx );
\r
3675 cvStartWriteStruct( fs, "nodes", CV_NODE_SEQ );
\r
3676 write_tree_nodes( fs );
\r
3677 cvEndWriteStruct( fs );
\r
3683 CvDTreeSplit* CvDTree::read_split( CvFileStorage* fs, CvFileNode* fnode )
\r
3685 CvDTreeSplit* split = 0;
\r
3687 CV_FUNCNAME( "CvDTree::read_split" );
\r
3693 if( !fnode || CV_NODE_TYPE(fnode->tag) != CV_NODE_MAP )
\r
3694 CV_ERROR( CV_StsParseError, "some of the splits are not stored properly" );
\r
3696 vi = cvReadIntByName( fs, fnode, "var", -1 );
\r
3697 if( (unsigned)vi >= (unsigned)data->var_count )
\r
3698 CV_ERROR( CV_StsOutOfRange, "Split variable index is out of range" );
\r
3700 ci = data->get_var_type(vi);
\r
3701 if( ci >= 0 ) // split on categorical var
\r
3703 int i, n = data->cat_count->data.i[ci], inversed = 0, val;
\r
3704 CvSeqReader reader;
\r
3705 CvFileNode* inseq;
\r
3706 split = data->new_split_cat( vi, 0 );
\r
3707 inseq = cvGetFileNodeByName( fs, fnode, "in" );
\r
3710 inseq = cvGetFileNodeByName( fs, fnode, "not_in" );
\r
3714 (CV_NODE_TYPE(inseq->tag) != CV_NODE_SEQ && CV_NODE_TYPE(inseq->tag) != CV_NODE_INT))
\r
3715 CV_ERROR( CV_StsParseError,
\r
3716 "Either 'in' or 'not_in' tags should be inside a categorical split data" );
\r
3718 if( CV_NODE_TYPE(inseq->tag) == CV_NODE_INT )
\r
3720 val = inseq->data.i;
\r
3721 if( (unsigned)val >= (unsigned)n )
\r
3722 CV_ERROR( CV_StsOutOfRange, "some of in/not_in elements are out of range" );
\r
3724 split->subset[val >> 5] |= 1 << (val & 31);
\r
3728 cvStartReadSeq( inseq->data.seq, &reader );
\r
3730 for( i = 0; i < reader.seq->total; i++ )
\r
3732 CvFileNode* inode = (CvFileNode*)reader.ptr;
\r
3733 val = inode->data.i;
\r
3734 if( CV_NODE_TYPE(inode->tag) != CV_NODE_INT || (unsigned)val >= (unsigned)n )
\r
3735 CV_ERROR( CV_StsOutOfRange, "some of in/not_in elements are out of range" );
\r
3737 split->subset[val >> 5] |= 1 << (val & 31);
\r
3738 CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader );
\r
3742 // for categorical splits we do not use inversed splits,
\r
3743 // instead we inverse the variable set in the split
\r
3745 for( i = 0; i < (n + 31) >> 5; i++ )
\r
3746 split->subset[i] ^= -1;
\r
3750 CvFileNode* cmp_node;
\r
3751 split = data->new_split_ord( vi, 0, 0, 0, 0 );
\r
3753 cmp_node = cvGetFileNodeByName( fs, fnode, "le" );
\r
3756 cmp_node = cvGetFileNodeByName( fs, fnode, "gt" );
\r
3757 split->inversed = 1;
\r
3760 split->ord.c = (float)cvReadReal( cmp_node );
\r
3763 split->quality = (float)cvReadRealByName( fs, fnode, "quality" );
\r
3771 CvDTreeNode* CvDTree::read_node( CvFileStorage* fs, CvFileNode* fnode, CvDTreeNode* parent )
\r
3773 CvDTreeNode* node = 0;
\r
3775 CV_FUNCNAME( "CvDTree::read_node" );
\r
3779 CvFileNode* splits;
\r
3782 if( !fnode || CV_NODE_TYPE(fnode->tag) != CV_NODE_MAP )
\r
3783 CV_ERROR( CV_StsParseError, "some of the tree elements are not stored properly" );
\r
3785 CV_CALL( node = data->new_node( parent, 0, 0, 0 ));
\r
3786 depth = cvReadIntByName( fs, fnode, "depth", -1 );
\r
3787 if( depth != node->depth )
\r
3788 CV_ERROR( CV_StsParseError, "incorrect node depth" );
\r
3790 node->sample_count = cvReadIntByName( fs, fnode, "sample_count" );
\r
3791 node->value = cvReadRealByName( fs, fnode, "value" );
\r
3792 if( data->is_classifier )
\r
3793 node->class_idx = cvReadIntByName( fs, fnode, "norm_class_idx" );
\r
3795 node->Tn = cvReadIntByName( fs, fnode, "Tn" );
\r
3796 node->complexity = cvReadIntByName( fs, fnode, "complexity" );
\r
3797 node->alpha = cvReadRealByName( fs, fnode, "alpha" );
\r
3798 node->node_risk = cvReadRealByName( fs, fnode, "node_risk" );
\r
3799 node->tree_risk = cvReadRealByName( fs, fnode, "tree_risk" );
\r
3800 node->tree_error = cvReadRealByName( fs, fnode, "tree_error" );
\r
3802 splits = cvGetFileNodeByName( fs, fnode, "splits" );
\r
3805 CvSeqReader reader;
\r
3806 CvDTreeSplit* last_split = 0;
\r
3808 if( CV_NODE_TYPE(splits->tag) != CV_NODE_SEQ )
\r
3809 CV_ERROR( CV_StsParseError, "splits tag must stored as a sequence" );
\r
3811 cvStartReadSeq( splits->data.seq, &reader );
\r
3812 for( i = 0; i < reader.seq->total; i++ )
\r
3814 CvDTreeSplit* split;
\r
3815 CV_CALL( split = read_split( fs, (CvFileNode*)reader.ptr ));
\r
3817 node->split = last_split = split;
\r
3819 last_split = last_split->next = split;
\r
3821 CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader );
\r
3831 void CvDTree::read_tree_nodes( CvFileStorage* fs, CvFileNode* fnode )
\r
3833 CV_FUNCNAME( "CvDTree::read_tree_nodes" );
\r
3837 CvSeqReader reader;
\r
3838 CvDTreeNode _root;
\r
3839 CvDTreeNode* parent = &_root;
\r
3841 parent->left = parent->right = parent->parent = 0;
\r
3843 cvStartReadSeq( fnode->data.seq, &reader );
\r
3845 for( i = 0; i < reader.seq->total; i++ )
\r
3847 CvDTreeNode* node;
\r
3849 CV_CALL( node = read_node( fs, (CvFileNode*)reader.ptr, parent != &_root ? parent : 0 ));
\r
3850 if( !parent->left )
\r
3851 parent->left = node;
\r
3853 parent->right = node;
\r
3858 while( parent && parent->right )
\r
3859 parent = parent->parent;
\r
3862 CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader );
\r
3865 root = _root.left;
\r
3871 void CvDTree::read( CvFileStorage* fs, CvFileNode* fnode )
\r
3873 CvDTreeTrainData* _data = new CvDTreeTrainData();
\r
3874 _data->read_params( fs, fnode );
\r
3876 read( fs, fnode, _data );
\r
3877 get_var_importance();
\r
3881 // a special entry point for reading weak decision trees from the tree ensembles
\r
3882 void CvDTree::read( CvFileStorage* fs, CvFileNode* node, CvDTreeTrainData* _data )
\r
3884 CV_FUNCNAME( "CvDTree::read" );
\r
3888 CvFileNode* tree_nodes;
\r
3893 tree_nodes = cvGetFileNodeByName( fs, node, "nodes" );
\r
3894 if( !tree_nodes || CV_NODE_TYPE(tree_nodes->tag) != CV_NODE_SEQ )
\r
3895 CV_ERROR( CV_StsParseError, "nodes tag is missing" );
\r
3897 pruned_tree_idx = cvReadIntByName( fs, node, "best_tree_idx", -1 );
\r
3898 read_tree_nodes( fs, tree_nodes );
\r
3903 /* End of file. */
\r