diff --git a/modules/bioinspired/include/opencv2/bioinspired/retina.hpp b/modules/bioinspired/include/opencv2/bioinspired/retina.hpp
index 8e6eda93cae..478b6a0f75c 100644
--- a/modules/bioinspired/include/opencv2/bioinspired/retina.hpp
+++ b/modules/bioinspired/include/opencv2/bioinspired/retina.hpp
@@ -94,57 +94,12 @@ enum {
Here is the default configuration file of the retina module. It gives results such as the first
retina output shown on the top of this page.
- @code{xml}
-
-
-
- 1
- 1
- 7.5e-01
- 9.0e-01
- 5.3e-01
- 0.01
- 0.5
- 7.
- 7.5e-01
-
- 1
- 0.
- 0.
- 7.
- 2.0e+00
- 9.5e-01
- 0.
- 7.
-
- @endcode
+ @include default_retina_config.xml
Here is the 'realistic" setup used to obtain the second retina output shown on the top of this page.
- @code{xml}
-
-
-
- 1
- 1
- 8.9e-01
- 9.0e-01
- 5.3e-01
- 0.3
- 0.5
- 7.
- 8.9e-01
-
- 1
- 0.
- 0.
- 7.
- 2.0e+00
- 9.5e-01
- 0.
- 7.
-
- @endcode
+ @include realistic_retina_config.xml
+
*/
struct RetinaParameters{
//! Outer Plexiform Layer (OPL) and Inner Plexiform Layer Parvocellular (IplParvo) parameters
diff --git a/modules/bioinspired/samples/default_retina_config.xml b/modules/bioinspired/samples/default_retina_config.xml
new file mode 100644
index 00000000000..469b5d58f10
--- /dev/null
+++ b/modules/bioinspired/samples/default_retina_config.xml
@@ -0,0 +1,24 @@
+
+
+
+ 1
+ 1
+ 7.5e-01
+ 9.0e-01
+ 5.3e-01
+ 0.01
+ 0.5
+ 7.
+ 7.5e-01
+
+
+ 1
+ 0.
+ 0.
+ 7.
+ 2.0e+00
+ 9.5e-01
+ 0.
+ 7.
+
+
diff --git a/modules/bioinspired/samples/realistic_retina_config.xml b/modules/bioinspired/samples/realistic_retina_config.xml
new file mode 100644
index 00000000000..c02e79b3c6d
--- /dev/null
+++ b/modules/bioinspired/samples/realistic_retina_config.xml
@@ -0,0 +1,24 @@
+
+
+
+ 1
+ 1
+ 8.9e-01
+ 9.0e-01
+ 5.3e-01
+ 0.3
+ 0.5
+ 7.
+ 8.9e-01
+
+
+ 1
+ 0.
+ 0.
+ 7.
+ 2.0e+00
+ 9.5e-01
+ 0.
+ 7.
+
+
diff --git a/modules/bioinspired/tutorials/retina_model/retina_model.markdown b/modules/bioinspired/tutorials/retina_model/retina_model.markdown
index 37285bfa1c7..d71fe797bec 100644
--- a/modules/bioinspired/tutorials/retina_model/retina_model.markdown
+++ b/modules/bioinspired/tutorials/retina_model/retina_model.markdown
@@ -1,6 +1,8 @@
Retina and real-world vision {#tutorial_bioinspired_retina_model}
=============================================================
+@tableofcontents
+
Goal
----
@@ -382,7 +384,7 @@ need to know if mean luminance information is required or not. If not, the the r
significantly reduce its energy thus giving more visibility to higher spatial frequency details.
-#### Basic parameters
+## Basic parameters
The simplest parameters are as follows :
@@ -397,7 +399,7 @@ processing. You can expect much faster processing using gray levels : it would r
product per pixel for all of the retina processes and it has recently been parallelized for multicore
architectures.
-#### Photo-receptors parameters
+## Photo-receptors parameters
The following parameters act on the entry point of the retina - photo-receptors - and has impact on all
of the following processes. These sensors are low pass spatio-temporal filters that smooth temporal and
@@ -421,7 +423,7 @@ and high frequency noise canceling.
A good compromise for color images is a 0.53 value since such choice won't affect too much the color spectrum.
Higher values would lead to gray and blurred output images.
-#### Horizontal cells parameters
+## Horizontal cells parameters
This parameter set tunes the neural network connected to the photo-receptors, the horizontal cells.
It modulates photo-receptors sensitivity and completes the processing for final spectral whitening
@@ -446,7 +448,7 @@ It modulates photo-receptors sensitivity and completes the processing for final
and luminance is already partly enhanced. The following parameters act on the last processing stages
of the two outing retina signals.
-#### Parvo (details channel) dedicated parameter
+## Parvo (details channel) dedicated parameter
- **ganglionCellsSensitivity** specifies the strength of the final local adaptation occurring at
the output of this details' dedicated channel. Parameter values remain between 0 and 1. Low value
@@ -455,7 +457,7 @@ of the two outing retina signals.
**Note :** this parameter can correct eventual burned images by favoring low energetic details of
the visual scene, even in bright areas.
-#### IPL Magno (motion/transient channel) parameters
+## IPL Magno (motion/transient channel) parameters
Once image's information are cleaned, this channel acts as a high pass temporal filter that
selects only the signals related to transient signals (events, motion, etc.). A low pass spatial filter
diff --git a/modules/cannops/include/opencv2/cann.hpp b/modules/cannops/include/opencv2/cann.hpp
index 30555dd8257..bd351481624 100644
--- a/modules/cannops/include/opencv2/cann.hpp
+++ b/modules/cannops/include/opencv2/cann.hpp
@@ -8,12 +8,12 @@
#include "opencv2/core.hpp"
/**
- @defgroup cann Ascend-accelerated Computer Vision
+ @defgroup cannops Ascend-accelerated Computer Vision
@{
@defgroup canncore Core part
@{
@defgroup cann_struct Data Structures
- @defgroup cann_init Initializeation and Information
+ @defgroup cann_init Initialization and Information
@}
@}
*/
diff --git a/modules/cannops/include/opencv2/cann_interface.hpp b/modules/cannops/include/opencv2/cann_interface.hpp
index 6667eb58519..6b13090f4f1 100644
--- a/modules/cannops/include/opencv2/cann_interface.hpp
+++ b/modules/cannops/include/opencv2/cann_interface.hpp
@@ -13,9 +13,9 @@ namespace cann
{
/**
- @addtogroup cann
+ @addtogroup cannops
@{
- @defgroup cannops Operations for Ascend Backend.
+ @defgroup cannops_ops Operations for Ascend Backend.
@{
@defgroup cannops_elem Per-element Operations
@defgroup cannops_core Core Operations on Matrices
diff --git a/modules/cudaimgproc/include/opencv2/cudaimgproc.hpp b/modules/cudaimgproc/include/opencv2/cudaimgproc.hpp
index d72700168cd..01e7c41ca9a 100644
--- a/modules/cudaimgproc/include/opencv2/cudaimgproc.hpp
+++ b/modules/cudaimgproc/include/opencv2/cudaimgproc.hpp
@@ -844,7 +844,6 @@ cv::Moments cvMoments = convertSpatialMoments(spatialMoments, order);
```
see the \a CUDA_TEST_P(Moments, Async) test inside opencv_contrib_source_code/modules/cudaimgproc/test/test_moments.cpp for an example.
-@returns cv::Moments.
@sa cuda::moments, cuda::convertSpatialMoments, cuda::numMoments, cuda::MomentsOrder
*/
CV_EXPORTS_W void spatialMoments(InputArray src, OutputArray moments, const bool binaryImage = false, const MomentsOrder order = MomentsOrder::THIRD_ORDER_MOMENTS, const int momentsType = CV_64F, Stream& stream = Stream::Null());
diff --git a/modules/dnn_superres/tutorials/benchmark/sr_benchmark.markdown b/modules/dnn_superres/tutorials/benchmark/sr_benchmark.markdown
index 26244c9f8ae..3a4b88ef81b 100644
--- a/modules/dnn_superres/tutorials/benchmark/sr_benchmark.markdown
+++ b/modules/dnn_superres/tutorials/benchmark/sr_benchmark.markdown
@@ -50,14 +50,9 @@ Explanation
Benchmarking results
-----------
-Dataset benchmarking
-----
-
-###General100 dataset
-
-
+## General100 dataset
-#####2x scaling factor
+### 2x scaling factor
| | Avg inference time in sec (CPU)| Avg PSNR | Avg SSIM |
@@ -70,7 +65,7 @@ Dataset benchmarking
| Nearest neighbor | 0.000114 | 29.1665 | 0.9049 |
| Lanczos | 0.001094 | 32.4687 | 0.9327 |
-#####3x scaling factor
+### 3x scaling factor
| | Avg inference time in sec (CPU)| Avg PSNR | Avg SSIM |
| ------------- |:-------------------:| ---------:|--------:|
@@ -83,7 +78,7 @@ Dataset benchmarking
| Lanczos | 0.001012 |25.9115 |0.8706 |
-#####4x scaling factor
+### 4x scaling factor
| | Avg inference time in sec (CPU)| Avg PSNR | Avg SSIM |
| ------------- |:-------------------:| ---------:|--------:|
@@ -96,14 +91,10 @@ Dataset benchmarking
| Lanczos | 0.001012 |25.9115 |0.8706 |
-
-Images
-----
-
-
+## Images
-####2x scaling factor
+### 2x scaling factor
|Set5: butterfly.png | size: 256x256 | ||
|:-------------:|:-------------------:|:-------------:|:----:|
@@ -112,7 +103,7 @@ Images
|  |  | 
|29.0341 / 0.9354 / **0.004157**| 29.0077 / 0.9345 / 0.006325 | 27.8212 / 0.9230 / 0.037937 | **30.0347** / **0.9453** / 2.077280 |
-####3x scaling factor
+### 3x scaling factor
|Urban100: img_001.png | size: 1024x644 | ||
|:-------------:|:-------------------:|:-------------:|:----:|
@@ -122,7 +113,7 @@ Images
|28.0118 / 0.8588 / **0.030748**| 28.0184 / 0.8597 / 0.094173 | | **30.5671** / **0.9019** / 9.517580 |
-####4x scaling factor
+### 4x scaling factor
|Set14: comic.png | size: 250x361 | ||
|:-------------:|:-------------------:|:-------------:|:----:|
@@ -131,7 +122,7 @@ Images
||  |  | 
|20.0417 / 0.6302 / **0.001894**| 20.0885 / 0.6384 / 0.002103 | 20.0676 / 0.6339 / 0.061640 | **20.5233** / **0.6901** / 0.665876 |
-####8x scaling factor
+### 8x scaling factor
|Div2K: 0006.png | size: 1356x2040 | |
|:-------------:|:-------------------:|:-------------:|
@@ -139,5 +130,3 @@ Images
|PSRN / SSIM / Speed (CPU)| 26.3139 / **0.8033** / 0.001107| 23.8291 / 0.7340 / **0.000611** |
||  | |
|26.1565 / 0.7962 / 0.004782| **26.7046** / 0.7987 / 2.274290 | |
-
-
\ No newline at end of file
diff --git a/modules/face/include/opencv2/face/facemark.hpp b/modules/face/include/opencv2/face/facemark.hpp
index 86e9384342e..4e66727fe46 100644
--- a/modules/face/include/opencv2/face/facemark.hpp
+++ b/modules/face/include/opencv2/face/facemark.hpp
@@ -12,12 +12,6 @@ Mentor: Delia Passalacqua
#ifndef __OPENCV_FACELANDMARK_HPP__
#define __OPENCV_FACELANDMARK_HPP__
-/**
-@defgroup face Face Analysis
-- @ref tutorial_table_of_content_facemark
-- The Facemark API
-*/
-
#include "opencv2/core.hpp"
#include
@@ -25,6 +19,8 @@ Mentor: Delia Passalacqua
namespace cv {
namespace face {
+//! @addtogroup face
+//! @{
/** @brief Abstract base class for all facemark models
@@ -88,6 +84,7 @@ CV_EXPORTS_W Ptr createFacemarkLBF();
//! construct a Kazemi facemark detector
CV_EXPORTS_W Ptr createFacemarkKazemi();
+//! @}
} // face
} // cv
diff --git a/modules/face/include/opencv2/face/facemark_train.hpp b/modules/face/include/opencv2/face/facemark_train.hpp
index d6e27e9face..591c079a0d6 100644
--- a/modules/face/include/opencv2/face/facemark_train.hpp
+++ b/modules/face/include/opencv2/face/facemark_train.hpp
@@ -12,12 +12,6 @@ Mentor: Delia Passalacqua
#ifndef __OPENCV_FACELANDMARKTRAIN_HPP__
#define __OPENCV_FACELANDMARKTRAIN_HPP__
-/**
-@defgroup face Face Analysis
-- @ref tutorial_table_of_content_facemark
-- The Facemark API
-*/
-
#include "opencv2/face/facemark.hpp"
#include "opencv2/objdetect.hpp"
#include
diff --git a/modules/face/tutorials/face_landmark/face_landmark_trainer.markdown b/modules/face/tutorials/face_landmark/face_landmark_trainer.markdown
index 601a6b4c428..8fdeaa611d5 100644
--- a/modules/face/tutorials/face_landmark/face_landmark_trainer.markdown
+++ b/modules/face/tutorials/face_landmark/face_landmark_trainer.markdown
@@ -21,7 +21,7 @@ The above format is similar to HELEN dataset which is used for training the mode
./sample_train_landmark_detector -annotations=/home/sukhad/Downloads/code/trainset/ -config=config.xml -face_cascade=lbpcascadefrontalface.xml -model=trained_model.dat -width=460 -height=460
```
-### Description of command parameters
+## Description of command parameters
> * **annotations** a : (REQUIRED) Path to annotations txt file [example - /data/annotations.txt]
> * **config** c : (REQUIRED) Path to configuration xml file containing parameters for training.[ example - /data/config.xml]
@@ -30,7 +30,7 @@ The above format is similar to HELEN dataset which is used for training the mode
> * **height** h : (OPTIONAL) The height which you want all images to get to scale the annotations. Large images are slow to process [default = 460]
> * **face_cascade** f (REQUIRED) Path to the face cascade xml file which you want to use as a detector.
-### Description of training parameters
+## Description of training parameters
The configuration file described above which is used while training contains the training parameters which are required for training.
@@ -49,7 +49,7 @@ The configuration file described above which is used while training contains the
To get more detailed description about the training parameters you can refer to the [Research paper](https://pdfs.semanticscholar.org/d78b/6a5b0dcaa81b1faea5fb0000045a62513567.pdf).
-### Understanding code
+## Understanding code

diff --git a/modules/fuzzy/include/opencv2/fuzzy.hpp b/modules/fuzzy/include/opencv2/fuzzy.hpp
index d660cc3615c..59f2a3f2a1f 100644
--- a/modules/fuzzy/include/opencv2/fuzzy.hpp
+++ b/modules/fuzzy/include/opencv2/fuzzy.hpp
@@ -52,19 +52,19 @@
Namespace for all functions is `ft`. The module brings implementation of the last image processing algorithms based on fuzzy mathematics. Method are named based on the pattern `FT`_degree_dimension`_`method.
- @{
+@{
@defgroup f0_math Math with F0-transform support
-Fuzzy transform (\f$F^0\f$-transform) of the 0th degree transforms whole image to a matrix of its components. These components are used in latter computation where each of them represents average color of certain subarea.
+ Fuzzy transform (\f$F^0\f$-transform) of the 0th degree transforms whole image to a matrix of its components. These components are used in latter computation where each of them represents average color of certain subarea.
@defgroup f1_math Math with F1-transform support
-Fuzzy transform (\f$F^1\f$-transform) of the 1th degree transforms whole image to a matrix of its components. Each component is polynomial of the 1th degree carrying information about average color and average gradient of certain subarea.
+ Fuzzy transform (\f$F^1\f$-transform) of the 1th degree transforms whole image to a matrix of its components. Each component is polynomial of the 1th degree carrying information about average color and average gradient of certain subarea.
@defgroup f_image Fuzzy image processing
-Image proceesing based on fuzzy mathematics namely F-transform.
- @}
+ Image proceesing based on fuzzy mathematics namely F-transform.
+@}
*/
diff --git a/modules/hdf/include/opencv2/hdf.hpp b/modules/hdf/include/opencv2/hdf.hpp
index ff40426ff65..ac48e4b9ac8 100644
--- a/modules/hdf/include/opencv2/hdf.hpp
+++ b/modules/hdf/include/opencv2/hdf.hpp
@@ -41,17 +41,15 @@
This module provides storage routines for Hierarchical Data Format objects.
- @{
+@{
@defgroup hdf5 Hierarchical Data Format version 5
-Hierarchical Data Format version 5
---------------------------------------------------------
+ Hierarchical Data Format version 5
+ --------------------------------------------------------
-In order to use it, the hdf5 library has to be installed, which
-means cmake should find it using `find_package(HDF5)` .
-
-
- @}
+ In order to use it, the hdf5 library has to be installed, which
+ means cmake should find it using `find_package(HDF5)`.
+@}
*/
#endif
diff --git a/modules/mcc/include/opencv2/mcc/checker_model.hpp b/modules/mcc/include/opencv2/mcc/checker_model.hpp
index c13d5afc585..0768c691e05 100644
--- a/modules/mcc/include/opencv2/mcc/checker_model.hpp
+++ b/modules/mcc/include/opencv2/mcc/checker_model.hpp
@@ -116,7 +116,6 @@ class CV_EXPORTS_W CCheckerDraw
virtual ~CCheckerDraw() {}
/** \brief Draws the checker to the given image.
* \param img image in color space BGR
- * \return void
*/
CV_WRAP virtual void draw(InputOutputArray img) = 0;
/** \brief Create a new CCheckerDraw object.
diff --git a/modules/rgbd/include/opencv2/rgbd/dynafu.hpp b/modules/rgbd/include/opencv2/rgbd/dynafu.hpp
index 32875ad5ac7..e5ad3447778 100644
--- a/modules/rgbd/include/opencv2/rgbd/dynafu.hpp
+++ b/modules/rgbd/include/opencv2/rgbd/dynafu.hpp
@@ -114,7 +114,6 @@ class CV_EXPORTS_W DynaFu
virtual void renderSurface(OutputArray depthImage, OutputArray vertImage, OutputArray normImage, bool warp=true) = 0;
};
-//! @}
-}
-}
-#endif
+} // dynafu::
+} // cv::
+#endif // __OPENCV_RGBD_DYNAFU_HPP__
diff --git a/modules/sfm/include/opencv2/sfm.hpp b/modules/sfm/include/opencv2/sfm.hpp
index 25a3b10da5d..52c1af07e8e 100644
--- a/modules/sfm/include/opencv2/sfm.hpp
+++ b/modules/sfm/include/opencv2/sfm.hpp
@@ -75,7 +75,7 @@ This module has been originally developed as a project for Google Summer of Code
- Notice that it is compiled only when Eigen, GLog and GFlags are correctly installed.\n
Check installation instructions in the following tutorial: @ref tutorial_sfm_installation
- @{
+@{
@defgroup conditioning Conditioning
@defgroup fundamental Fundamental
@defgroup io Input/Output
@@ -85,18 +85,17 @@ This module has been originally developed as a project for Google Summer of Code
@defgroup triangulation Triangulation
@defgroup reconstruction Reconstruction
- @note
- - Notice that it is compiled only when Ceres Solver is correctly installed.\n
- Check installation instructions in the following tutorial: @ref tutorial_sfm_installation
+ @note
+ - Notice that it is compiled only when Ceres Solver is correctly installed.\n
+ Check installation instructions in the following tutorial: @ref tutorial_sfm_installation
@defgroup simple_pipeline Simple Pipeline
- @note
- - Notice that it is compiled only when Ceres Solver is correctly installed.\n
- Check installation instructions in the following tutorial: @ref tutorial_sfm_installation
-
- @}
+ @note
+ - Notice that it is compiled only when Ceres Solver is correctly installed.\n
+ Check installation instructions in the following tutorial: @ref tutorial_sfm_installation
+@}
*/
#endif
diff --git a/modules/stereo/include/opencv2/stereo/quasi_dense_stereo.hpp b/modules/stereo/include/opencv2/stereo/quasi_dense_stereo.hpp
index b2290e3768c..469c46f72ea 100644
--- a/modules/stereo/include/opencv2/stereo/quasi_dense_stereo.hpp
+++ b/modules/stereo/include/opencv2/stereo/quasi_dense_stereo.hpp
@@ -18,6 +18,7 @@ namespace cv
{
namespace stereo
{
+
/** \addtogroup stereo
* @{
*/
@@ -190,9 +191,8 @@ class CV_EXPORTS_W QuasiDenseStereo
CV_PROP_RW PropagationParameters Param;
};
-} //namespace cv
-} //namespace stereo
-
/** @}*/
+} //namespace cv
+} //namespace stereo
#endif // __OPENCV_QUASI_DENSE_STEREO_H__
diff --git a/modules/text/include/opencv2/text.hpp b/modules/text/include/opencv2/text.hpp
index 86ce3ec6e80..2b84451c23f 100644
--- a/modules/text/include/opencv2/text.hpp
+++ b/modules/text/include/opencv2/text.hpp
@@ -52,49 +52,49 @@ scene images.
@{
@defgroup text_detect Scene Text Detection
-Class-specific Extremal Regions for Scene Text Detection
---------------------------------------------------------
-
-The scene text detection algorithm described below has been initially proposed by Lukás Neumann &
-Jiri Matas @cite Neumann11. The main idea behind Class-specific Extremal Regions is similar to the MSER
-in that suitable Extremal Regions (ERs) are selected from the whole component tree of the image.
-However, this technique differs from MSER in that selection of suitable ERs is done by a sequential
-classifier trained for character detection, i.e. dropping the stability requirement of MSERs and
-selecting class-specific (not necessarily stable) regions.
-
-The component tree of an image is constructed by thresholding by an increasing value step-by-step
-from 0 to 255 and then linking the obtained connected components from successive levels in a
-hierarchy by their inclusion relation:
-
-
-
-The component tree may contain a huge number of regions even for a very simple image as shown in
-the previous image. This number can easily reach the order of 1 x 10\^6 regions for an average 1
-Megapixel image. In order to efficiently select suitable regions among all the ERs the algorithm
-make use of a sequential classifier with two differentiated stages.
-
-In the first stage incrementally computable descriptors (area, perimeter, bounding box, and Euler's
-number) are computed (in O(1)) for each region r and used as features for a classifier which
-estimates the class-conditional probability p(r|character). Only the ERs which correspond to local
-maximum of the probability p(r|character) are selected (if their probability is above a global limit
-p_min and the difference between local maximum and local minimum is greater than a delta_min
-value).
-
-In the second stage, the ERs that passed the first stage are classified into character and
-non-character classes using more informative but also more computationally expensive features. (Hole
-area ratio, convex hull ratio, and the number of outer boundary inflexion points).
-
-This ER filtering process is done in different single-channel projections of the input image in
-order to increase the character localization recall.
-
-After the ER filtering is done on each input channel, character candidates must be grouped in
-high-level text blocks (i.e. words, text lines, paragraphs, ...). The opencv_text module implements
-two different grouping algorithms: the Exhaustive Search algorithm proposed in @cite Neumann12 for
-grouping horizontally aligned text, and the method proposed by Lluis Gomez and Dimosthenis Karatzas
-in @cite Gomez13 @cite Gomez14 for grouping arbitrary oriented text (see erGrouping).
-
-To see the text detector at work, have a look at the textdetection demo:
-
+ Class-specific Extremal Regions for Scene Text Detection
+ --------------------------------------------------------
+
+ The scene text detection algorithm described below has been initially proposed by Lukás Neumann &
+ Jiri Matas @cite Neumann11. The main idea behind Class-specific Extremal Regions is similar to the MSER
+ in that suitable Extremal Regions (ERs) are selected from the whole component tree of the image.
+ However, this technique differs from MSER in that selection of suitable ERs is done by a sequential
+ classifier trained for character detection, i.e. dropping the stability requirement of MSERs and
+ selecting class-specific (not necessarily stable) regions.
+
+ The component tree of an image is constructed by thresholding by an increasing value step-by-step
+ from 0 to 255 and then linking the obtained connected components from successive levels in a
+ hierarchy by their inclusion relation:
+
+ 
+
+ The component tree may contain a huge number of regions even for a very simple image as shown in
+ the previous image. This number can easily reach the order of 1 x 10\^6 regions for an average 1
+ Megapixel image. In order to efficiently select suitable regions among all the ERs the algorithm
+ make use of a sequential classifier with two differentiated stages.
+
+ In the first stage incrementally computable descriptors (area, perimeter, bounding box, and Euler's
+ number) are computed (in O(1)) for each region r and used as features for a classifier which
+ estimates the class-conditional probability p(r|character). Only the ERs which correspond to local
+ maximum of the probability p(r|character) are selected (if their probability is above a global limit
+ p_min and the difference between local maximum and local minimum is greater than a delta_min
+ value).
+
+ In the second stage, the ERs that passed the first stage are classified into character and
+ non-character classes using more informative but also more computationally expensive features. (Hole
+ area ratio, convex hull ratio, and the number of outer boundary inflexion points).
+
+ This ER filtering process is done in different single-channel projections of the input image in
+ order to increase the character localization recall.
+
+ After the ER filtering is done on each input channel, character candidates must be grouped in
+ high-level text blocks (i.e. words, text lines, paragraphs, ...). The opencv_text module implements
+ two different grouping algorithms: the Exhaustive Search algorithm proposed in @cite Neumann12 for
+ grouping horizontally aligned text, and the method proposed by Lluis Gomez and Dimosthenis Karatzas
+ in @cite Gomez13 @cite Gomez14 for grouping arbitrary oriented text (see erGrouping).
+
+ To see the text detector at work, have a look at the textdetection demo:
+
@defgroup text_recognize Scene Text Recognition
@}
diff --git a/modules/text/include/opencv2/text/ocr.hpp b/modules/text/include/opencv2/text/ocr.hpp
index a0c967e87bd..083fc7a5aba 100644
--- a/modules/text/include/opencv2/text/ocr.hpp
+++ b/modules/text/include/opencv2/text/ocr.hpp
@@ -363,7 +363,6 @@ CV_EXPORTS_W Ptr loadOCRHMMClassifierCNN(cons
*/
CV_EXPORTS_W Ptr loadOCRHMMClassifier(const String& filename, int classifier);
-//! @}
/** @brief Utility function to create a tailored language model transitions table from a given list of words (lexicon).
*
diff --git a/modules/videostab/include/opencv2/videostab.hpp b/modules/videostab/include/opencv2/videostab.hpp
index ca3f5adef2b..14c52ebaf1b 100644
--- a/modules/videostab/include/opencv2/videostab.hpp
+++ b/modules/videostab/include/opencv2/videostab.hpp
@@ -44,7 +44,7 @@
#define OPENCV_VIDEOSTAB_HPP
/**
- @defgroup videostab Video Stabilization
+@defgroup videostab Video Stabilization
The video stabilization module contains a set of functions and classes that can be used to solve the
problem of video stabilization. There are a few methods implemented, most of them are described in
@@ -53,26 +53,24 @@ paper methods.
### References
- 1. "Full-Frame Video Stabilization with Motion Inpainting"
- Yasuyuki Matsushita, Eyal Ofek, Weina Ge, Xiaoou Tang, Senior Member, and Heung-Yeung Shum
- 2. "Auto-Directed Video Stabilization with Robust L1 Optimal Camera Paths"
- Matthias Grundmann, Vivek Kwatra, Irfan Essa
+1. "Full-Frame Video Stabilization with Motion Inpainting"
+ Yasuyuki Matsushita, Eyal Ofek, Weina Ge, Xiaoou Tang, Senior Member, and Heung-Yeung Shum
+2. "Auto-Directed Video Stabilization with Robust L1 Optimal Camera Paths"
+ Matthias Grundmann, Vivek Kwatra, Irfan Essa
- @{
- @defgroup videostab_motion Global Motion Estimation
+@{
+ @defgroup videostab_motion Global Motion Estimation
-The video stabilization module contains a set of functions and classes for global motion estimation
-between point clouds or between images. In the last case features are extracted and matched
-internally. For the sake of convenience the motion estimation functions are wrapped into classes.
-Both the functions and the classes are available.
+ The video stabilization module contains a set of functions and classes for global motion estimation
+ between point clouds or between images. In the last case features are extracted and matched
+ internally. For the sake of convenience the motion estimation functions are wrapped into classes.
+ Both the functions and the classes are available.
- @defgroup videostab_marching Fast Marching Method
-
-The Fast Marching Method @cite Telea04 is used in of the video stabilization routines to do motion and
-color inpainting. The method is implemented is a flexible way and it's made public for other users.
-
- @}
+ @defgroup videostab_marching Fast Marching Method
+ The Fast Marching Method @cite Telea04 is used in of the video stabilization routines to do motion and
+ color inpainting. The method is implemented is a flexible way and it's made public for other users.
+@}
*/
#include "opencv2/videostab/stabilizer.hpp"
diff --git a/modules/viz/include/opencv2/viz.hpp b/modules/viz/include/opencv2/viz.hpp
index fc79b8b60e7..c31ed342ab1 100644
--- a/modules/viz/include/opencv2/viz.hpp
+++ b/modules/viz/include/opencv2/viz.hpp
@@ -60,25 +60,24 @@ interact with it.
3D visualization window (see Viz3d) is used to display widgets (see Widget), and it provides several
methods to interact with scene and widgets.
- @{
+@{
@defgroup viz_widget Widget
-In this section, the widget framework is explained. Widgets represent 2D or 3D objects, varying from
-simple ones such as lines to complex ones such as point clouds and meshes.
+ In this section, the widget framework is explained. Widgets represent 2D or 3D objects, varying from
+ simple ones such as lines to complex ones such as point clouds and meshes.
-Widgets are **implicitly shared**. Therefore, one can add a widget to the scene, and modify the
-widget without re-adding the widget.
+ Widgets are **implicitly shared**. Therefore, one can add a widget to the scene, and modify the
+ widget without re-adding the widget.
-@code
-// Create a cloud widget
-viz::WCloud cw(cloud, viz::Color::red());
-// Display it in a window
-myWindow.showWidget("CloudWidget1", cw);
-// Modify it, and it will be modified in the window.
-cw.setColor(viz::Color::yellow());
-@endcode
-
- @}
+ @code
+ // Create a cloud widget
+ viz::WCloud cw(cloud, viz::Color::red());
+ // Display it in a window
+ myWindow.showWidget("CloudWidget1", cw);
+ // Modify it, and it will be modified in the window.
+ cw.setColor(viz::Color::yellow());
+ @endcode
+@}
*/
#endif /* OPENCV_VIZ_HPP */
diff --git a/modules/xfeatures2d/include/opencv2/xfeatures2d.hpp b/modules/xfeatures2d/include/opencv2/xfeatures2d.hpp
index 3313a38348a..3793541c238 100644
--- a/modules/xfeatures2d/include/opencv2/xfeatures2d.hpp
+++ b/modules/xfeatures2d/include/opencv2/xfeatures2d.hpp
@@ -46,19 +46,18 @@ the use of this software, even if advised of the possibility of such damage.
@{
@defgroup xfeatures2d_experiment Experimental 2D Features Algorithms
-This section describes experimental algorithms for 2d feature detection.
+ This section describes experimental algorithms for 2d feature detection.
@defgroup xfeatures2d_nonfree Non-free 2D Features Algorithms
-This section describes two popular algorithms for 2d feature detection, SIFT and SURF, that are
-known to be patented. You need to set the OPENCV_ENABLE_NONFREE option in cmake to use those. Use them at your own risk.
+ This section describes two popular algorithms for 2d feature detection, SIFT and SURF, that are
+ known to be patented. You need to set the OPENCV_ENABLE_NONFREE option in cmake to use those. Use them at your own risk.
@defgroup xfeatures2d_match Experimental 2D Features Matching Algorithm
-This section describes the following matching strategies:
- - GMS: Grid-based Motion Statistics, @cite Bian2017gms
- - LOGOS: Local geometric support for high-outlier spatial verification, @cite Lowry2018LOGOSLG
-
+ This section describes the following matching strategies:
+ - GMS: Grid-based Motion Statistics, @cite Bian2017gms
+ - LOGOS: Local geometric support for high-outlier spatial verification, @cite Lowry2018LOGOSLG
@}
*/
diff --git a/modules/xfeatures2d/include/opencv2/xfeatures2d/nonfree.hpp b/modules/xfeatures2d/include/opencv2/xfeatures2d/nonfree.hpp
index 8eb11aa6653..5fb299f20f4 100644
--- a/modules/xfeatures2d/include/opencv2/xfeatures2d/nonfree.hpp
+++ b/modules/xfeatures2d/include/opencv2/xfeatures2d/nonfree.hpp
@@ -50,6 +50,9 @@ namespace cv
namespace xfeatures2d
{
+//! @addtogroup xfeatures2d_nonfree
+//! @{
+
/** @brief Class for extracting Speeded Up Robust Features from an image @cite Bay06 .
The algorithm parameters:
diff --git a/modules/ximgproc/include/opencv2/ximgproc.hpp b/modules/ximgproc/include/opencv2/ximgproc.hpp
index dca0443c0ad..099205126cb 100644
--- a/modules/ximgproc/include/opencv2/ximgproc.hpp
+++ b/modules/ximgproc/include/opencv2/ximgproc.hpp
@@ -65,12 +65,13 @@
#include "ximgproc/find_ellipses.hpp"
-/** @defgroup ximgproc Extended Image Processing
- @{
+/**
+@defgroup ximgproc Extended Image Processing
+@{
@defgroup ximgproc_edge Structured forests for fast edge detection
-This module contains implementations of modern structured edge detection algorithms,
-i.e. algorithms which somehow takes into account pixel affinities in natural images.
+ This module contains implementations of modern structured edge detection algorithms,
+ i.e. algorithms which somehow takes into account pixel affinities in natural images.
@defgroup ximgproc_edgeboxes EdgeBoxes
@@ -84,16 +85,16 @@ i.e. algorithms which somehow takes into account pixel affinities in natural ima
@defgroup ximgproc_edge_drawing EdgeDrawing
-EDGE DRAWING LIBRARY FOR GEOMETRIC FEATURE EXTRACTION AND VALIDATION
+ EDGE DRAWING LIBRARY FOR GEOMETRIC FEATURE EXTRACTION AND VALIDATION
-Edge Drawing (ED) algorithm is an proactive approach on edge detection problem. In contrast to many other existing edge detection algorithms which follow a subtractive
-approach (i.e. after applying gradient filters onto an image eliminating pixels w.r.t. several rules, e.g. non-maximal suppression and hysteresis in Canny), ED algorithm
-works via an additive strategy, i.e. it picks edge pixels one by one, hence the name Edge Drawing. Then we process those random shaped edge segments to extract higher level
-edge features, i.e. lines, circles, ellipses, etc. The popular method of extraction edge pixels from the thresholded gradient magnitudes is non-maximal supression that tests
-every pixel whether it has the maximum gradient response along its gradient direction and eliminates if it does not. However, this method does not check status of the
-neighboring pixels, and therefore might result low quality (in terms of edge continuity, smoothness, thinness, localization) edge segments. Instead of non-maximal supression,
-ED points a set of edge pixels and join them by maximizing the total gradient response of edge segments. Therefore it can extract high quality edge segments without need for
-an additional hysteresis step.
+ Edge Drawing (ED) algorithm is an proactive approach on edge detection problem. In contrast to many other existing edge detection algorithms which follow a subtractive
+ approach (i.e. after applying gradient filters onto an image eliminating pixels w.r.t. several rules, e.g. non-maximal suppression and hysteresis in Canny), ED algorithm
+ works via an additive strategy, i.e. it picks edge pixels one by one, hence the name Edge Drawing. Then we process those random shaped edge segments to extract higher level
+ edge features, i.e. lines, circles, ellipses, etc. The popular method of extraction edge pixels from the thresholded gradient magnitudes is non-maximal supression that tests
+ every pixel whether it has the maximum gradient response along its gradient direction and eliminates if it does not. However, this method does not check status of the
+ neighboring pixels, and therefore might result low quality (in terms of edge continuity, smoothness, thinness, localization) edge segments. Instead of non-maximal supression,
+ ED points a set of edge pixels and join them by maximizing the total gradient response of edge segments. Therefore it can extract high quality edge segments without need for
+ an additional hysteresis step.
@defgroup ximgproc_fourier Fourier descriptors
@@ -115,8 +116,7 @@ an additional hysteresis step.
The size of the original image is required for compatibility with the imgproc functions when the boundary handling requires that pixel outside the image boundary are
"on".
-
- @}
+@}
*/
namespace cv
@@ -124,6 +124,9 @@ namespace cv
namespace ximgproc
{
+//! @addtogroup ximgproc
+//! @{
+
enum ThinningTypes{
THINNING_ZHANGSUEN = 0, // Thinning technique of Zhang-Suen
THINNING_GUOHALL = 1 // Thinning technique of Guo-Hall
@@ -139,9 +142,6 @@ enum LocalBinarizationMethods{
BINARIZATION_NICK = 3 //!< NICK technique. See @cite Khurshid2009 .
};
-//! @addtogroup ximgproc
-//! @{
-
/** @brief Performs thresholding on input images using Niblack's technique or some of the
popular variations it inspired.
diff --git a/modules/ximgproc/include/opencv2/ximgproc/color_match.hpp b/modules/ximgproc/include/opencv2/ximgproc/color_match.hpp
index c18390d4ac6..8408b5b2331 100644
--- a/modules/ximgproc/include/opencv2/ximgproc/color_match.hpp
+++ b/modules/ximgproc/include/opencv2/ximgproc/color_match.hpp
@@ -61,6 +61,8 @@ CV_EXPORTS_W void qdft(InputArray img, OutputArray qimg, int flags, bool sideL
*/
CV_EXPORTS_W void colorMatchTemplate(InputArray img, InputArray templ, OutputArray result);
+//! @}
+
}
}
#endif
diff --git a/modules/ximgproc/include/opencv2/ximgproc/deriche_filter.hpp b/modules/ximgproc/include/opencv2/ximgproc/deriche_filter.hpp
index 26d3b6759da..18adade6f90 100644
--- a/modules/ximgproc/include/opencv2/ximgproc/deriche_filter.hpp
+++ b/modules/ximgproc/include/opencv2/ximgproc/deriche_filter.hpp
@@ -71,6 +71,8 @@ CV_EXPORTS_W void GradientDericheY(InputArray op, OutputArray dst, double alpha,
*/
CV_EXPORTS_W void GradientDericheX(InputArray op, OutputArray dst, double alpha,double omega);
+//! @}
+
}
}
#endif
diff --git a/modules/ximgproc/include/opencv2/ximgproc/edgepreserving_filter.hpp b/modules/ximgproc/include/opencv2/ximgproc/edgepreserving_filter.hpp
index f5685ce39bb..758b61b4349 100644
--- a/modules/ximgproc/include/opencv2/ximgproc/edgepreserving_filter.hpp
+++ b/modules/ximgproc/include/opencv2/ximgproc/edgepreserving_filter.hpp
@@ -26,8 +26,8 @@ namespace cv { namespace ximgproc {
*/
CV_EXPORTS_W void edgePreservingFilter( InputArray src, OutputArray dst, int d, double threshold );
-}} // namespace
-
//! @}
+}} // namespace
+
#endif
diff --git a/modules/ximgproc/include/opencv2/ximgproc/fast_hough_transform.hpp b/modules/ximgproc/include/opencv2/ximgproc/fast_hough_transform.hpp
index adfbf543b57..94668b06520 100644
--- a/modules/ximgproc/include/opencv2/ximgproc/fast_hough_transform.hpp
+++ b/modules/ximgproc/include/opencv2/ximgproc/fast_hough_transform.hpp
@@ -82,8 +82,7 @@ enum AngleRangeOption
* two operands. Formally, a binary operation @f$ f @f$ on a set @f$ S @f$
* is a binary relation that maps elements of the Cartesian product
* @f$ S \times S @f$ to @f$ S @f$:
-* @f[ f: S \times S \to S @f]
- * @ingroup MinUtils_MathOper
+ * @f[ f: S \times S \to S @f]
*/
enum HoughOp
{
diff --git a/modules/ximgproc/include/opencv2/ximgproc/paillou_filter.hpp b/modules/ximgproc/include/opencv2/ximgproc/paillou_filter.hpp
index 03754a1119d..56fcd3c9618 100644
--- a/modules/ximgproc/include/opencv2/ximgproc/paillou_filter.hpp
+++ b/modules/ximgproc/include/opencv2/ximgproc/paillou_filter.hpp
@@ -61,6 +61,8 @@ namespace ximgproc {
CV_EXPORTS void GradientPaillouY(InputArray op, OutputArray _dst, double alpha, double omega);
CV_EXPORTS void GradientPaillouX(InputArray op, OutputArray _dst, double alpha, double omega);
+//! @}
+
}
}
#endif
diff --git a/modules/ximgproc/include/opencv2/ximgproc/peilin.hpp b/modules/ximgproc/include/opencv2/ximgproc/peilin.hpp
index 1b224aaf88b..194f12e1196 100644
--- a/modules/ximgproc/include/opencv2/ximgproc/peilin.hpp
+++ b/modules/ximgproc/include/opencv2/ximgproc/peilin.hpp
@@ -27,6 +27,8 @@ namespace cv { namespace ximgproc {
/** @overload */
CV_EXPORTS_W void PeiLinNormalization ( InputArray I, OutputArray T );
+ //! @}
+
}} // namespace
#endif
diff --git a/modules/ximgproc/include/opencv2/ximgproc/run_length_morphology.hpp b/modules/ximgproc/include/opencv2/ximgproc/run_length_morphology.hpp
index c19e2d858db..6cf2eb663c1 100644
--- a/modules/ximgproc/include/opencv2/ximgproc/run_length_morphology.hpp
+++ b/modules/ximgproc/include/opencv2/ximgproc/run_length_morphology.hpp
@@ -113,6 +113,8 @@ CV_EXPORTS void createRLEImage(const std::vector& runs, OutputArray
CV_EXPORTS void morphologyEx(InputArray rlSrc, OutputArray rlDest, int op, InputArray rlKernel,
bool bBoundaryOnForErosion = true, Point anchor = Point(0,0));
+//! @}
+
}
}
}