1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
47 /****************************************************************************************\
48 Basic Morphological Operations: Erosion & Dilation
49 \****************************************************************************************/
54 template<typename T> struct MinOp
59 T operator ()(T a, T b) const { return std::min(a, b); }
62 template<typename T> struct MaxOp
67 T operator ()(T a, T b) const { return std::max(a, b); }
72 #define CV_MIN_8U(a,b) ((a) - CV_FAST_CAST_8U((a) - (b)))
73 #define CV_MAX_8U(a,b) ((a) + CV_FAST_CAST_8U((b) - (a)))
75 template<> inline uchar MinOp<uchar>::operator ()(uchar a, uchar b) const { return CV_MIN_8U(a, b); }
76 template<> inline uchar MaxOp<uchar>::operator ()(uchar a, uchar b) const { return CV_MAX_8U(a, b); }
80 template<class VecUpdate> struct MorphRowIVec
82 enum { ESZ = VecUpdate::ESZ };
84 MorphRowIVec(int _ksize, int _anchor) : ksize(_ksize), anchor(_anchor) {}
85 int operator()(const uchar* src, uchar* dst, int width, int cn) const
87 if( !checkHardwareSupport(CV_CPU_SSE2) )
91 int i, k, _ksize = ksize*cn;
95 for( i = 0; i <= width - 16; i += 16 )
97 __m128i s = _mm_loadu_si128((const __m128i*)(src + i));
98 for( k = cn; k < _ksize; k += cn )
100 __m128i x = _mm_loadu_si128((const __m128i*)(src + i + k));
103 _mm_storeu_si128((__m128i*)(dst + i), s);
106 for( ; i <= width - 4; i += 4 )
108 __m128i s = _mm_cvtsi32_si128(*(const int*)(src + i));
109 for( k = cn; k < _ksize; k += cn )
111 __m128i x = _mm_cvtsi32_si128(*(const int*)(src + i + k));
114 *(int*)(dst + i) = _mm_cvtsi128_si32(s);
124 template<class VecUpdate> struct MorphRowFVec
126 MorphRowFVec(int _ksize, int _anchor) : ksize(_ksize), anchor(_anchor) {}
127 int operator()(const uchar* src, uchar* dst, int width, int cn) const
129 if( !checkHardwareSupport(CV_CPU_SSE) )
132 int i, k, _ksize = ksize*cn;
136 for( i = 0; i <= width - 4; i += 4 )
138 __m128 s = _mm_loadu_ps((const float*)src + i);
139 for( k = cn; k < _ksize; k += cn )
141 __m128 x = _mm_loadu_ps((const float*)src + i + k);
144 _mm_storeu_ps((float*)dst + i, s);
154 template<class VecUpdate> struct MorphColumnIVec
156 enum { ESZ = VecUpdate::ESZ };
158 MorphColumnIVec(int _ksize, int _anchor) : ksize(_ksize), anchor(_anchor) {}
159 int operator()(const uchar** src, uchar* dst, int dststep, int count, int width) const
161 if( !checkHardwareSupport(CV_CPU_SSE2) )
164 int i = 0, k, _ksize = ksize;
168 for( i = 0; i < count + ksize - 1; i++ )
169 CV_Assert( ((size_t)src[i] & 15) == 0 );
171 for( ; _ksize > 1 && count > 1; count -= 2, dst += dststep*2, src += 2 )
173 for( i = 0; i <= width - 32; i += 32 )
175 const uchar* sptr = src[1] + i;
176 __m128i s0 = _mm_load_si128((const __m128i*)sptr);
177 __m128i s1 = _mm_load_si128((const __m128i*)(sptr + 16));
180 for( k = 2; k < _ksize; k++ )
183 x0 = _mm_load_si128((const __m128i*)sptr);
184 x1 = _mm_load_si128((const __m128i*)(sptr + 16));
185 s0 = updateOp(s0, x0);
186 s1 = updateOp(s1, x1);
190 x0 = _mm_load_si128((const __m128i*)sptr);
191 x1 = _mm_load_si128((const __m128i*)(sptr + 16));
192 _mm_storeu_si128((__m128i*)(dst + i), updateOp(s0, x0));
193 _mm_storeu_si128((__m128i*)(dst + i + 16), updateOp(s1, x1));
196 x0 = _mm_load_si128((const __m128i*)sptr);
197 x1 = _mm_load_si128((const __m128i*)(sptr + 16));
198 _mm_storeu_si128((__m128i*)(dst + dststep + i), updateOp(s0, x0));
199 _mm_storeu_si128((__m128i*)(dst + dststep + i + 16), updateOp(s1, x1));
202 for( ; i <= width - 8; i += 8 )
204 __m128i s0 = _mm_loadl_epi64((const __m128i*)(src[1] + i)), x0;
206 for( k = 2; k < _ksize; k++ )
208 x0 = _mm_loadl_epi64((const __m128i*)(src[k] + i));
209 s0 = updateOp(s0, x0);
212 x0 = _mm_loadl_epi64((const __m128i*)(src[0] + i));
213 _mm_storel_epi64((__m128i*)(dst + i), updateOp(s0, x0));
214 x0 = _mm_loadl_epi64((const __m128i*)(src[k] + i));
215 _mm_storel_epi64((__m128i*)(dst + dststep + i), updateOp(s0, x0));
219 for( ; count > 0; count--, dst += dststep, src++ )
221 for( i = 0; i <= width - 32; i += 32 )
223 const uchar* sptr = src[0] + i;
224 __m128i s0 = _mm_load_si128((const __m128i*)sptr);
225 __m128i s1 = _mm_load_si128((const __m128i*)(sptr + 16));
228 for( k = 1; k < _ksize; k++ )
231 x0 = _mm_load_si128((const __m128i*)sptr);
232 x1 = _mm_load_si128((const __m128i*)(sptr + 16));
233 s0 = updateOp(s0, x0);
234 s1 = updateOp(s1, x1);
236 _mm_storeu_si128((__m128i*)(dst + i), s0);
237 _mm_storeu_si128((__m128i*)(dst + i + 16), s1);
240 for( ; i <= width - 8; i += 8 )
242 __m128i s0 = _mm_loadl_epi64((const __m128i*)(src[0] + i)), x0;
244 for( k = 1; k < _ksize; k++ )
246 x0 = _mm_loadl_epi64((const __m128i*)(src[k] + i));
247 s0 = updateOp(s0, x0);
249 _mm_storel_epi64((__m128i*)(dst + i), s0);
260 template<class VecUpdate> struct MorphColumnFVec
262 MorphColumnFVec(int _ksize, int _anchor) : ksize(_ksize), anchor(_anchor) {}
263 int operator()(const uchar** _src, uchar* _dst, int dststep, int count, int width) const
265 if( !checkHardwareSupport(CV_CPU_SSE) )
268 int i = 0, k, _ksize = ksize;
271 for( i = 0; i < count + ksize - 1; i++ )
272 CV_Assert( ((size_t)_src[i] & 15) == 0 );
274 const float** src = (const float**)_src;
275 float* dst = (float*)_dst;
276 dststep /= sizeof(dst[0]);
278 for( ; _ksize > 1 && count > 1; count -= 2, dst += dststep*2, src += 2 )
280 for( i = 0; i <= width - 16; i += 16 )
282 const float* sptr = src[1] + i;
283 __m128 s0 = _mm_load_ps(sptr);
284 __m128 s1 = _mm_load_ps(sptr + 4);
285 __m128 s2 = _mm_load_ps(sptr + 8);
286 __m128 s3 = _mm_load_ps(sptr + 12);
287 __m128 x0, x1, x2, x3;
289 for( k = 2; k < _ksize; k++ )
292 x0 = _mm_load_ps(sptr);
293 x1 = _mm_load_ps(sptr + 4);
294 s0 = updateOp(s0, x0);
295 s1 = updateOp(s1, x1);
296 x2 = _mm_load_ps(sptr + 8);
297 x3 = _mm_load_ps(sptr + 12);
298 s2 = updateOp(s2, x2);
299 s3 = updateOp(s3, x3);
303 x0 = _mm_load_ps(sptr);
304 x1 = _mm_load_ps(sptr + 4);
305 x2 = _mm_load_ps(sptr + 8);
306 x3 = _mm_load_ps(sptr + 12);
307 _mm_storeu_ps(dst + i, updateOp(s0, x0));
308 _mm_storeu_ps(dst + i + 4, updateOp(s1, x1));
309 _mm_storeu_ps(dst + i + 8, updateOp(s2, x2));
310 _mm_storeu_ps(dst + i + 12, updateOp(s3, x3));
313 x0 = _mm_load_ps(sptr);
314 x1 = _mm_load_ps(sptr + 4);
315 x2 = _mm_load_ps(sptr + 8);
316 x3 = _mm_load_ps(sptr + 12);
317 _mm_storeu_ps(dst + dststep + i, updateOp(s0, x0));
318 _mm_storeu_ps(dst + dststep + i + 4, updateOp(s1, x1));
319 _mm_storeu_ps(dst + dststep + i + 8, updateOp(s2, x2));
320 _mm_storeu_ps(dst + dststep + i + 12, updateOp(s3, x3));
323 for( ; i <= width - 4; i += 4 )
325 __m128 s0 = _mm_load_ps(src[1] + i), x0;
327 for( k = 2; k < _ksize; k++ )
329 x0 = _mm_load_ps(src[k] + i);
330 s0 = updateOp(s0, x0);
333 x0 = _mm_load_ps(src[0] + i);
334 _mm_storeu_ps(dst + i, updateOp(s0, x0));
335 x0 = _mm_load_ps(src[k] + i);
336 _mm_storeu_ps(dst + dststep + i, updateOp(s0, x0));
340 for( ; count > 0; count--, dst += dststep, src++ )
342 for( i = 0; i <= width - 16; i += 16 )
344 const float* sptr = src[0] + i;
345 __m128 s0 = _mm_load_ps(sptr);
346 __m128 s1 = _mm_load_ps(sptr + 4);
347 __m128 s2 = _mm_load_ps(sptr + 8);
348 __m128 s3 = _mm_load_ps(sptr + 12);
349 __m128 x0, x1, x2, x3;
351 for( k = 1; k < _ksize; k++ )
354 x0 = _mm_load_ps(sptr);
355 x1 = _mm_load_ps(sptr + 4);
356 s0 = updateOp(s0, x0);
357 s1 = updateOp(s1, x1);
358 x2 = _mm_load_ps(sptr + 8);
359 x3 = _mm_load_ps(sptr + 12);
360 s2 = updateOp(s2, x2);
361 s3 = updateOp(s3, x3);
363 _mm_storeu_ps(dst + i, s0);
364 _mm_storeu_ps(dst + i + 4, s1);
365 _mm_storeu_ps(dst + i + 8, s2);
366 _mm_storeu_ps(dst + i + 12, s3);
369 for( i = 0; i <= width - 4; i += 4 )
371 __m128 s0 = _mm_load_ps(src[0] + i), x0;
372 for( k = 1; k < _ksize; k++ )
374 x0 = _mm_load_ps(src[k] + i);
375 s0 = updateOp(s0, x0);
377 _mm_storeu_ps(dst + i, s0);
388 template<class VecUpdate> struct MorphIVec
390 enum { ESZ = VecUpdate::ESZ };
392 int operator()(uchar** src, int nz, uchar* dst, int width) const
394 if( !checkHardwareSupport(CV_CPU_SSE2) )
401 for( i = 0; i <= width - 32; i += 32 )
403 const uchar* sptr = src[0] + i;
404 __m128i s0 = _mm_loadu_si128((const __m128i*)sptr);
405 __m128i s1 = _mm_loadu_si128((const __m128i*)(sptr + 16));
408 for( k = 1; k < nz; k++ )
411 x0 = _mm_loadu_si128((const __m128i*)sptr);
412 x1 = _mm_loadu_si128((const __m128i*)(sptr + 16));
413 s0 = updateOp(s0, x0);
414 s1 = updateOp(s1, x1);
416 _mm_storeu_si128((__m128i*)(dst + i), s0);
417 _mm_storeu_si128((__m128i*)(dst + i + 16), s1);
420 for( ; i <= width - 8; i += 8 )
422 __m128i s0 = _mm_loadl_epi64((const __m128i*)(src[0] + i)), x0;
424 for( k = 1; k < nz; k++ )
426 x0 = _mm_loadl_epi64((const __m128i*)(src[k] + i));
427 s0 = updateOp(s0, x0);
429 _mm_storel_epi64((__m128i*)(dst + i), s0);
437 template<class VecUpdate> struct MorphFVec
439 int operator()(uchar** _src, int nz, uchar* _dst, int width) const
441 if( !checkHardwareSupport(CV_CPU_SSE) )
444 const float** src = (const float**)_src;
445 float* dst = (float*)_dst;
449 for( i = 0; i <= width - 16; i += 16 )
451 const float* sptr = src[0] + i;
452 __m128 s0 = _mm_loadu_ps(sptr);
453 __m128 s1 = _mm_loadu_ps(sptr + 4);
454 __m128 s2 = _mm_loadu_ps(sptr + 8);
455 __m128 s3 = _mm_loadu_ps(sptr + 12);
456 __m128 x0, x1, x2, x3;
458 for( k = 1; k < nz; k++ )
461 x0 = _mm_loadu_ps(sptr);
462 x1 = _mm_loadu_ps(sptr + 4);
463 x2 = _mm_loadu_ps(sptr + 8);
464 x3 = _mm_loadu_ps(sptr + 12);
465 s0 = updateOp(s0, x0);
466 s1 = updateOp(s1, x1);
467 s2 = updateOp(s2, x2);
468 s3 = updateOp(s3, x3);
470 _mm_storeu_ps(dst + i, s0);
471 _mm_storeu_ps(dst + i + 4, s1);
472 _mm_storeu_ps(dst + i + 8, s2);
473 _mm_storeu_ps(dst + i + 12, s3);
476 for( ; i <= width - 4; i += 4 )
478 __m128 s0 = _mm_loadu_ps(src[0] + i), x0;
480 for( k = 1; k < nz; k++ )
482 x0 = _mm_loadu_ps(src[k] + i);
483 s0 = updateOp(s0, x0);
485 _mm_storeu_ps(dst + i, s0);
488 for( ; i < width; i++ )
490 __m128 s0 = _mm_load_ss(src[0] + i), x0;
492 for( k = 1; k < nz; k++ )
494 x0 = _mm_load_ss(src[k] + i);
495 s0 = updateOp(s0, x0);
497 _mm_store_ss(dst + i, s0);
507 __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_min_epu8(a,b); }
512 __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_max_epu8(a,b); }
517 __m128i operator()(const __m128i& a, const __m128i& b) const
518 { return _mm_subs_epu16(a,_mm_subs_epu16(a,b)); }
523 __m128i operator()(const __m128i& a, const __m128i& b) const
524 { return _mm_adds_epu16(_mm_subs_epu16(a,b), b); }
529 __m128i operator()(const __m128i& a, const __m128i& b) const
530 { return _mm_min_epi16(a, b); }
535 __m128i operator()(const __m128i& a, const __m128i& b) const
536 { return _mm_max_epi16(a, b); }
538 struct VMin32f { __m128 operator()(const __m128& a, const __m128& b) const { return _mm_min_ps(a,b); }};
539 struct VMax32f { __m128 operator()(const __m128& a, const __m128& b) const { return _mm_max_ps(a,b); }};
541 typedef MorphRowIVec<VMin8u> ErodeRowVec8u;
542 typedef MorphRowIVec<VMax8u> DilateRowVec8u;
543 typedef MorphRowIVec<VMin16u> ErodeRowVec16u;
544 typedef MorphRowIVec<VMax16u> DilateRowVec16u;
545 typedef MorphRowIVec<VMin16s> ErodeRowVec16s;
546 typedef MorphRowIVec<VMax16s> DilateRowVec16s;
547 typedef MorphRowFVec<VMin32f> ErodeRowVec32f;
548 typedef MorphRowFVec<VMax32f> DilateRowVec32f;
550 typedef MorphColumnIVec<VMin8u> ErodeColumnVec8u;
551 typedef MorphColumnIVec<VMax8u> DilateColumnVec8u;
552 typedef MorphColumnIVec<VMin16u> ErodeColumnVec16u;
553 typedef MorphColumnIVec<VMax16u> DilateColumnVec16u;
554 typedef MorphColumnIVec<VMin16s> ErodeColumnVec16s;
555 typedef MorphColumnIVec<VMax16s> DilateColumnVec16s;
556 typedef MorphColumnFVec<VMin32f> ErodeColumnVec32f;
557 typedef MorphColumnFVec<VMax32f> DilateColumnVec32f;
559 typedef MorphIVec<VMin8u> ErodeVec8u;
560 typedef MorphIVec<VMax8u> DilateVec8u;
561 typedef MorphIVec<VMin16u> ErodeVec16u;
562 typedef MorphIVec<VMax16u> DilateVec16u;
563 typedef MorphIVec<VMin16s> ErodeVec16s;
564 typedef MorphIVec<VMax16s> DilateVec16s;
565 typedef MorphFVec<VMin32f> ErodeVec32f;
566 typedef MorphFVec<VMax32f> DilateVec32f;
572 MorphRowNoVec(int, int) {}
573 int operator()(const uchar*, uchar*, int, int) const { return 0; }
576 struct MorphColumnNoVec
578 MorphColumnNoVec(int, int) {}
579 int operator()(const uchar**, uchar*, int, int, int) const { return 0; }
584 int operator()(uchar**, int, uchar*, int) const { return 0; }
587 typedef MorphRowNoVec ErodeRowVec8u;
588 typedef MorphRowNoVec DilateRowVec8u;
589 typedef MorphRowNoVec ErodeRowVec16u;
590 typedef MorphRowNoVec DilateRowVec16u;
591 typedef MorphRowNoVec ErodeRowVec16s;
592 typedef MorphRowNoVec DilateRowVec16s;
593 typedef MorphRowNoVec ErodeRowVec32f;
594 typedef MorphRowNoVec DilateRowVec32f;
596 typedef MorphColumnNoVec ErodeColumnVec8u;
597 typedef MorphColumnNoVec DilateColumnVec8u;
598 typedef MorphColumnNoVec ErodeColumnVec16u;
599 typedef MorphColumnNoVec DilateColumnVec16u;
600 typedef MorphColumnNoVec ErodeColumnVec16s;
601 typedef MorphColumnNoVec DilateColumnVec16s;
602 typedef MorphColumnNoVec ErodeColumnVec32f;
603 typedef MorphColumnNoVec DilateColumnVec32f;
605 typedef MorphNoVec ErodeVec8u;
606 typedef MorphNoVec DilateVec8u;
607 typedef MorphNoVec ErodeVec16u;
608 typedef MorphNoVec DilateVec16u;
609 typedef MorphNoVec ErodeVec16s;
610 typedef MorphNoVec DilateVec16s;
611 typedef MorphNoVec ErodeVec32f;
612 typedef MorphNoVec DilateVec32f;
616 template<class Op, class VecOp> struct MorphRowFilter : public BaseRowFilter
618 typedef typename Op::rtype T;
620 MorphRowFilter( int _ksize, int _anchor ) : vecOp(_ksize, _anchor)
626 void operator()(const uchar* src, uchar* dst, int width, int cn)
628 int i, j, k, _ksize = ksize*cn;
629 const T* S = (const T*)src;
635 for( i = 0; i < width*cn; i++ )
640 int i0 = vecOp(src, dst, width, cn);
643 for( k = 0; k < cn; k++, S++, D++ )
645 for( i = i0; i <= width - cn*2; i += cn*2 )
649 for( j = cn*2; j < _ksize; j += cn )
652 D[i+cn] = op(m, s[j]);
655 for( ; i < width; i += cn )
659 for( j = cn; j < _ksize; j += cn )
670 template<class Op, class VecOp> struct MorphColumnFilter : public BaseColumnFilter
672 typedef typename Op::rtype T;
674 MorphColumnFilter( int _ksize, int _anchor ) : vecOp(_ksize, _anchor)
680 void operator()(const uchar** _src, uchar* dst, int dststep, int count, int width)
682 int i, k, _ksize = ksize;
683 const T** src = (const T**)_src;
687 int i0 = vecOp(_src, dst, dststep, count, width);
688 dststep /= sizeof(D[0]);
690 for( ; _ksize > 1 && count > 1; count -= 2, D += dststep*2, src += 2 )
692 for( i = i0; i <= width - 4; i += 4 )
694 const T* sptr = src[1] + i;
695 T s0 = sptr[0], s1 = sptr[1], s2 = sptr[2], s3 = sptr[3];
697 for( k = 2; k < _ksize; k++ )
700 s0 = op(s0, sptr[0]); s1 = op(s1, sptr[1]);
701 s2 = op(s2, sptr[2]); s3 = op(s3, sptr[3]);
705 D[i] = op(s0, sptr[0]);
706 D[i+1] = op(s1, sptr[1]);
707 D[i+2] = op(s2, sptr[2]);
708 D[i+3] = op(s3, sptr[3]);
711 D[i+dststep] = op(s0, sptr[0]);
712 D[i+dststep+1] = op(s1, sptr[1]);
713 D[i+dststep+2] = op(s2, sptr[2]);
714 D[i+dststep+3] = op(s3, sptr[3]);
717 for( ; i < width; i++ )
721 for( k = 2; k < _ksize; k++ )
722 s0 = op(s0, src[k][i]);
724 D[i] = op(s0, src[0][i]);
725 D[i+dststep] = op(s0, src[k][i]);
729 for( ; count > 0; count--, D += dststep, src++ )
731 for( i = i0; i <= width - 4; i += 4 )
733 const T* sptr = src[0] + i;
734 T s0 = sptr[0], s1 = sptr[1], s2 = sptr[2], s3 = sptr[3];
736 for( k = 1; k < _ksize; k++ )
739 s0 = op(s0, sptr[0]); s1 = op(s1, sptr[1]);
740 s2 = op(s2, sptr[2]); s3 = op(s3, sptr[3]);
743 D[i] = s0; D[i+1] = s1;
744 D[i+2] = s2; D[i+3] = s3;
747 for( ; i < width; i++ )
750 for( k = 1; k < _ksize; k++ )
751 s0 = op(s0, src[k][i]);
761 template<class Op, class VecOp> struct MorphFilter : BaseFilter
763 typedef typename Op::rtype T;
765 MorphFilter( const Mat& _kernel, Point _anchor )
768 ksize = _kernel.size();
769 CV_Assert( _kernel.type() == CV_8U );
771 vector<uchar> coeffs; // we do not really the values of non-zero
772 // kernel elements, just their locations
773 preprocess2DKernel( _kernel, coords, coeffs );
774 ptrs.resize( coords.size() );
777 void operator()(const uchar** src, uchar* dst, int dststep, int count, int width, int cn)
779 const Point* pt = &coords[0];
780 const T** kp = (const T**)&ptrs[0];
781 int i, k, nz = (int)coords.size();
785 for( ; count > 0; count--, dst += dststep, src++ )
789 for( k = 0; k < nz; k++ )
790 kp[k] = (const T*)src[pt[k].y] + pt[k].x*cn;
792 i = vecOp(&ptrs[0], nz, dst, width);
794 for( ; i <= width - 4; i += 4 )
796 const T* sptr = kp[0] + i;
797 T s0 = sptr[0], s1 = sptr[1], s2 = sptr[2], s3 = sptr[3];
799 for( k = 1; k < nz; k++ )
802 s0 = op(s0, sptr[0]); s1 = op(s1, sptr[1]);
803 s2 = op(s2, sptr[2]); s3 = op(s3, sptr[3]);
806 D[i] = s0; D[i+1] = s1;
807 D[i+2] = s2; D[i+3] = s3;
810 for( ; i < width; i++ )
813 for( k = 1; k < nz; k++ )
814 s0 = op(s0, kp[k][i]);
820 vector<Point> coords;
825 /////////////////////////////////// External Interface /////////////////////////////////////
827 Ptr<BaseRowFilter> getMorphologyRowFilter(int op, int type, int ksize, int anchor)
829 int depth = CV_MAT_DEPTH(type);
832 CV_Assert( op == MORPH_ERODE || op == MORPH_DILATE );
833 if( op == MORPH_ERODE )
836 return Ptr<BaseRowFilter>(new MorphRowFilter<MinOp<uchar>,
837 ErodeRowVec8u>(ksize, anchor));
838 if( depth == CV_16U )
839 return Ptr<BaseRowFilter>(new MorphRowFilter<MinOp<ushort>,
840 ErodeRowVec16u>(ksize, anchor));
841 if( depth == CV_16S )
842 return Ptr<BaseRowFilter>(new MorphRowFilter<MinOp<short>,
843 ErodeRowVec16s>(ksize, anchor));
844 if( depth == CV_32F )
845 return Ptr<BaseRowFilter>(new MorphRowFilter<MinOp<float>,
846 ErodeRowVec32f>(ksize, anchor));
851 return Ptr<BaseRowFilter>(new MorphRowFilter<MaxOp<uchar>,
852 DilateRowVec8u>(ksize, anchor));
853 if( depth == CV_16U )
854 return Ptr<BaseRowFilter>(new MorphRowFilter<MaxOp<ushort>,
855 DilateRowVec16u>(ksize, anchor));
856 if( depth == CV_16S )
857 return Ptr<BaseRowFilter>(new MorphRowFilter<MaxOp<short>,
858 DilateRowVec16s>(ksize, anchor));
859 if( depth == CV_32F )
860 return Ptr<BaseRowFilter>(new MorphRowFilter<MaxOp<float>,
861 DilateRowVec32f>(ksize, anchor));
864 CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
865 return Ptr<BaseRowFilter>(0);
868 Ptr<BaseColumnFilter> getMorphologyColumnFilter(int op, int type, int ksize, int anchor)
870 int depth = CV_MAT_DEPTH(type);
873 CV_Assert( op == MORPH_ERODE || op == MORPH_DILATE );
874 if( op == MORPH_ERODE )
877 return Ptr<BaseColumnFilter>(new MorphColumnFilter<MinOp<uchar>,
878 ErodeColumnVec8u>(ksize, anchor));
879 if( depth == CV_16U )
880 return Ptr<BaseColumnFilter>(new MorphColumnFilter<MinOp<ushort>,
881 ErodeColumnVec16u>(ksize, anchor));
882 if( depth == CV_16S )
883 return Ptr<BaseColumnFilter>(new MorphColumnFilter<MinOp<short>,
884 ErodeColumnVec16s>(ksize, anchor));
885 if( depth == CV_32F )
886 return Ptr<BaseColumnFilter>(new MorphColumnFilter<MinOp<float>,
887 ErodeColumnVec32f>(ksize, anchor));
892 return Ptr<BaseColumnFilter>(new MorphColumnFilter<MaxOp<uchar>,
893 DilateColumnVec8u>(ksize, anchor));
894 if( depth == CV_16U )
895 return Ptr<BaseColumnFilter>(new MorphColumnFilter<MaxOp<ushort>,
896 DilateColumnVec16u>(ksize, anchor));
897 if( depth == CV_16S )
898 return Ptr<BaseColumnFilter>(new MorphColumnFilter<MaxOp<short>,
899 DilateColumnVec16s>(ksize, anchor));
900 if( depth == CV_32F )
901 return Ptr<BaseColumnFilter>(new MorphColumnFilter<MaxOp<float>,
902 DilateColumnVec32f>(ksize, anchor));
905 CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
906 return Ptr<BaseColumnFilter>(0);
910 Ptr<BaseFilter> getMorphologyFilter(int op, int type, const Mat& kernel, Point anchor)
912 int depth = CV_MAT_DEPTH(type);
913 anchor = normalizeAnchor(anchor, kernel.size());
914 CV_Assert( op == MORPH_ERODE || op == MORPH_DILATE );
915 if( op == MORPH_ERODE )
918 return Ptr<BaseFilter>(new MorphFilter<MinOp<uchar>, ErodeVec8u>(kernel, anchor));
919 if( depth == CV_16U )
920 return Ptr<BaseFilter>(new MorphFilter<MinOp<ushort>, ErodeVec16u>(kernel, anchor));
921 if( depth == CV_16S )
922 return Ptr<BaseFilter>(new MorphFilter<MinOp<short>, ErodeVec16s>(kernel, anchor));
923 if( depth == CV_32F )
924 return Ptr<BaseFilter>(new MorphFilter<MinOp<float>, ErodeVec32f>(kernel, anchor));
929 return Ptr<BaseFilter>(new MorphFilter<MaxOp<uchar>, DilateVec8u>(kernel, anchor));
930 if( depth == CV_16U )
931 return Ptr<BaseFilter>(new MorphFilter<MaxOp<ushort>, DilateVec16u>(kernel, anchor));
932 if( depth == CV_16S )
933 return Ptr<BaseFilter>(new MorphFilter<MaxOp<short>, DilateVec16s>(kernel, anchor));
934 if( depth == CV_32F )
935 return Ptr<BaseFilter>(new MorphFilter<MaxOp<float>, DilateVec32f>(kernel, anchor));
938 CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
939 return Ptr<BaseFilter>(0);
943 Ptr<FilterEngine> createMorphologyFilter( int op, int type, const Mat& kernel,
944 Point anchor, int _rowBorderType, int _columnBorderType,
945 const Scalar& _borderValue )
947 anchor = normalizeAnchor(anchor, kernel.size());
949 Ptr<BaseRowFilter> rowFilter;
950 Ptr<BaseColumnFilter> columnFilter;
951 Ptr<BaseFilter> filter2D;
953 if( countNonZero(kernel) == kernel.rows*kernel.cols )
955 // rectangular structuring element
956 rowFilter = getMorphologyRowFilter(op, type, kernel.cols, anchor.x);
957 columnFilter = getMorphologyColumnFilter(op, type, kernel.rows, anchor.y);
960 filter2D = getMorphologyFilter(op, type, kernel, anchor);
962 Scalar borderValue = _borderValue;
963 if( (_rowBorderType == BORDER_CONSTANT || _columnBorderType == BORDER_CONSTANT) &&
964 borderValue == morphologyDefaultBorderValue() )
966 int depth = CV_MAT_DEPTH(type);
967 CV_Assert( depth == CV_8U || depth == CV_16U || depth == CV_32F );
968 if( op == MORPH_ERODE )
969 borderValue = Scalar::all( depth == CV_8U ? (double)UCHAR_MAX :
970 depth == CV_16U ? (double)USHRT_MAX : (double)FLT_MAX );
972 borderValue = Scalar::all( depth == CV_8U || depth == CV_16U ?
973 0. : (double)-FLT_MAX );
976 return Ptr<FilterEngine>(new FilterEngine(filter2D, rowFilter, columnFilter,
977 type, type, type, _rowBorderType, _columnBorderType, borderValue ));
981 Mat getStructuringElement(int shape, Size ksize, Point anchor)
987 CV_Assert( shape == MORPH_RECT || shape == MORPH_CROSS || shape == MORPH_ELLIPSE );
989 anchor = normalizeAnchor(anchor, ksize);
991 if( ksize == Size(1,1) )
994 if( shape == MORPH_ELLIPSE )
998 inv_r2 = r ? 1./((double)r*r) : 0;
1001 Mat elem(ksize, CV_8U);
1003 for( i = 0; i < ksize.height; i++ )
1005 uchar* ptr = elem.data + i*elem.step;
1008 if( shape == MORPH_RECT || (shape == MORPH_CROSS && i == anchor.y) )
1010 else if( shape == MORPH_CROSS )
1011 j1 = anchor.x, j2 = j1 + 1;
1015 if( std::abs(dy) <= r )
1017 int dx = saturate_cast<int>(c*std::sqrt((r*r - dy*dy)*inv_r2));
1018 j1 = std::max( c - dx, 0 );
1019 j2 = std::min( c + dx + 1, ksize.width );
1023 for( j = 0; j < j1; j++ )
1025 for( ; j < j2; j++ )
1027 for( ; j < ksize.width; j++ )
1034 static void morphOp( int op, const Mat& src, Mat& dst, const Mat& _kernel,
1035 Point anchor, int iterations,
1036 int borderType, const Scalar& borderValue )
1039 Size ksize = _kernel.data ? _kernel.size() : Size(3,3);
1040 anchor = normalizeAnchor(anchor, ksize);
1042 CV_Assert( anchor.inside(Rect(0, 0, ksize.width, ksize.height)) );
1044 if( iterations == 0 || _kernel.rows*_kernel.cols == 1 )
1050 dst.create( src.size(), src.type() );
1054 kernel = getStructuringElement(MORPH_RECT, Size(1+iterations*2,1+iterations*2));
1055 anchor = Point(iterations, iterations);
1058 else if( iterations > 1 && countNonZero(_kernel) == _kernel.rows*_kernel.cols )
1060 anchor = Point(anchor.x*iterations, anchor.y*iterations);
1061 kernel = getStructuringElement(MORPH_RECT,
1062 Size(ksize.width + iterations*(ksize.width-1),
1063 ksize.height + iterations*(ksize.height-1)),
1070 Ptr<FilterEngine> f = createMorphologyFilter(op, src.type(),
1071 kernel, anchor, borderType, borderType, borderValue );
1073 f->apply( src, dst );
1074 for( int i = 1; i < iterations; i++ )
1075 f->apply( dst, dst );
1079 void erode( const Mat& src, Mat& dst, const Mat& kernel,
1080 Point anchor, int iterations,
1081 int borderType, const Scalar& borderValue )
1083 morphOp( MORPH_ERODE, src, dst, kernel, anchor, iterations, borderType, borderValue );
1087 void dilate( const Mat& src, Mat& dst, const Mat& kernel,
1088 Point anchor, int iterations,
1089 int borderType, const Scalar& borderValue )
1091 morphOp( MORPH_DILATE, src, dst, kernel, anchor, iterations, borderType, borderValue );
1095 void morphologyEx( const Mat& src, Mat& dst, int op, const Mat& kernel,
1096 Point anchor, int iterations, int borderType,
1097 const Scalar& borderValue )
1103 erode( src, dst, kernel, anchor, iterations, borderType, borderValue );
1106 dilate( src, dst, kernel, anchor, iterations, borderType, borderValue );
1109 erode( src, dst, kernel, anchor, iterations, borderType, borderValue );
1110 dilate( dst, dst, kernel, anchor, iterations, borderType, borderValue );
1113 dilate( src, dst, kernel, anchor, iterations, borderType, borderValue );
1114 erode( dst, dst, kernel, anchor, iterations, borderType, borderValue );
1116 case CV_MOP_GRADIENT:
1117 erode( src, temp, kernel, anchor, iterations, borderType, borderValue );
1118 dilate( src, dst, kernel, anchor, iterations, borderType, borderValue );
1122 if( src.data != dst.data )
1124 erode( src, temp, kernel, anchor, iterations, borderType, borderValue );
1125 dilate( temp, temp, kernel, anchor, iterations, borderType, borderValue );
1128 case CV_MOP_BLACKHAT:
1129 if( src.data != dst.data )
1131 dilate( src, temp, kernel, anchor, iterations, borderType, borderValue );
1132 erode( temp, temp, kernel, anchor, iterations, borderType, borderValue );
1136 CV_Error( CV_StsBadArg, "unknown morphological operation" );
1142 CV_IMPL IplConvKernel *
1143 cvCreateStructuringElementEx( int cols, int rows,
1144 int anchorX, int anchorY,
1145 int shape, int *values )
1147 cv::Size ksize = cv::Size(cols, rows);
1148 cv::Point anchor = cv::Point(anchorX, anchorY);
1149 CV_Assert( cols > 0 && rows > 0 && anchor.inside(cv::Rect(0,0,cols,rows)) &&
1150 (shape != CV_SHAPE_CUSTOM || values != 0));
1152 int i, size = rows * cols;
1153 int element_size = sizeof(IplConvKernel) + size*sizeof(int);
1154 IplConvKernel *element = (IplConvKernel*)cvAlloc(element_size + 32);
1156 element->nCols = cols;
1157 element->nRows = rows;
1158 element->anchorX = anchorX;
1159 element->anchorY = anchorY;
1160 element->nShiftR = shape < CV_SHAPE_ELLIPSE ? shape : CV_SHAPE_CUSTOM;
1161 element->values = (int*)(element + 1);
1163 if( shape == CV_SHAPE_CUSTOM )
1165 for( i = 0; i < size; i++ )
1166 element->values[i] = values[i];
1170 cv::Mat elem = cv::getStructuringElement(shape, ksize, anchor);
1171 for( i = 0; i < size; i++ )
1172 element->values[i] = elem.data[i];
1180 cvReleaseStructuringElement( IplConvKernel ** element )
1183 CV_Error( CV_StsNullPtr, "" );
1188 static void convertConvKernel( const IplConvKernel* src, cv::Mat& dst, cv::Point& anchor )
1192 anchor = cv::Point(1,1);
1196 anchor = cv::Point(src->anchorX, src->anchorY);
1197 dst.create(src->nRows, src->nCols, CV_8U);
1199 int i, size = src->nRows*src->nCols;
1200 for( i = 0; i < size; i++ )
1201 dst.data[i] = (uchar)src->values[i];
1206 cvErode( const CvArr* srcarr, CvArr* dstarr, IplConvKernel* element, int iterations )
1208 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), kernel;
1209 CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
1211 convertConvKernel( element, kernel, anchor );
1212 cv::erode( src, dst, kernel, anchor, iterations, cv::BORDER_REPLICATE );
1217 cvDilate( const CvArr* srcarr, CvArr* dstarr, IplConvKernel* element, int iterations )
1219 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), kernel;
1220 CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
1222 convertConvKernel( element, kernel, anchor );
1223 cv::dilate( src, dst, kernel, anchor, iterations, cv::BORDER_REPLICATE );
1228 cvMorphologyEx( const void* srcarr, void* dstarr, void*,
1229 IplConvKernel* element, int op, int iterations )
1231 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), kernel;
1232 CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
1234 IplConvKernel* temp_element = NULL;
1237 temp_element = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_RECT);
1239 temp_element = element;
1241 convertConvKernel( temp_element, kernel, anchor );
1244 cvReleaseStructuringElement(&temp_element);
1246 cv::morphologyEx( src, dst, op, kernel, anchor, iterations, cv::BORDER_REPLICATE );