diff --git a/modules/ml/CMakeLists.txt b/modules/ml/CMakeLists.txt new file mode 100644 index 00000000000..794653e8129 --- /dev/null +++ b/modules/ml/CMakeLists.txt @@ -0,0 +1,10 @@ +set(the_description "Machine Learning") + +ocv_add_module(ml opencv_core WRAP java objc python) +ocv_glob_module_sources() +ocv_module_include_directories() +ocv_create_module() + +ocv_add_accuracy_tests() +ocv_add_perf_tests() +ocv_add_samples(opencv_imgproc opencv_objdetect opencv_video) \ No newline at end of file diff --git a/modules/ml/doc/ml.bib b/modules/ml/doc/ml.bib new file mode 100644 index 00000000000..709b1844d27 --- /dev/null +++ b/modules/ml/doc/ml.bib @@ -0,0 +1,60 @@ +@inproceedings{RPROP93, + author = {Riedmiller, Martin and Braun, Heinrich}, + title = {A direct adaptive method for faster backpropagation learning: The RPROP algorithm}, + booktitle = {Neural Networks, 1993., IEEE International Conference on}, + year = {1993}, + pages = {586--591}, + publisher = {IEEE} +} +@article{Kirkpatrick83, + author = {Kirkpatrick, S. and Gelatt, C. D. Jr and Vecchi, M. P.}, + title = {Optimization by Simulated Annealing}, + year = {1983}, + pages = {671--680}, + journal = {Science}, + volume = {220}, + number = {4598}, + publisher = {American Association for the Advancement of Science}, + url = {http://sci2s.ugr.es/sites/default/files/files/Teaching/GraduatesCourses/Metaheuristicas/Bibliography/1983-Science-Kirkpatrick-sim_anneal.pdf} +} +@incollection{bottou2010large, + title = {Large-scale machine learning with stochastic gradient descent}, + author = {Bottou, L{\'e}on}, + booktitle = {Proceedings of COMPSTAT'2010}, + pages = {177--186}, + year = {2010}, + publisher = {Springer} +} +@article{LibSVM, + author = {Chang, Chih-Chung and Lin, Chih-Jen}, + title = {LIBSVM: a library for support vector machines}, + year = {2011}, + pages = {27}, + journal = {ACM Transactions on Intelligent Systems and Technology (TIST)}, + volume = {2}, + number = {3}, + publisher = {ACM} +} +@book{Breiman84, + title = {Classification and regression trees}, + author = {Breiman, Leo and Friedman, Jerome and Stone, Charles J and Olshen, Richard A}, + year = {1984}, + publisher = {CRC press}, + url = {https://projecteuclid.org/download/pdf_1/euclid.aos/1016218223} +} +@article{HTF01, + author = {Trevor, Hastie and Robert, Tibshirani and Jerome, Friedman}, + title = {The elements of statistical learning: data mining, inference and prediction}, + year = {2001}, + pages = {371--406}, + journal = {New York: Springer-Verlag}, + volume = {1}, + number = {8}, + url = {http://www.stat.auckland.ac.nz/~yee/784/files/ch09AdditiveModelsTrees.pdf} +} +@article{FHT98, + author = {Friedman, Jerome and Hastie, Trevor and Tibshirani, Robert}, + title = {Additive Logistic Regression: a Statistical View of Boosting}, + year = {1998}, + url = {https://projecteuclid.org/download/pdf_1/euclid.aos/1016218223} +} \ No newline at end of file diff --git a/modules/ml/doc/ml_intro.markdown b/modules/ml/doc/ml_intro.markdown new file mode 100644 index 00000000000..6fd316a349a --- /dev/null +++ b/modules/ml/doc/ml_intro.markdown @@ -0,0 +1,481 @@ +Machine Learning Overview {#ml_intro} +========================= + +[TOC] + +Training Data {#ml_intro_data} +============= + +In machine learning algorithms there is notion of training data. Training data includes several +components: + +- A set of training samples. Each training sample is a vector of values (in Computer Vision it's + sometimes referred to as feature vector). Usually all the vectors have the same number of + components (features); OpenCV ml module assumes that. Each feature can be ordered (i.e. its + values are floating-point numbers that can be compared with each other and strictly ordered, + i.e. sorted) or categorical (i.e. its value belongs to a fixed set of values that can be + integers, strings etc.). +- Optional set of responses corresponding to the samples. Training data with no responses is used + in unsupervised learning algorithms that learn structure of the supplied data based on distances + between different samples. Training data with responses is used in supervised learning + algorithms, which learn the function mapping samples to responses. Usually the responses are + scalar values, ordered (when we deal with regression problem) or categorical (when we deal with + classification problem; in this case the responses are often called "labels"). Some algorithms, + most noticeably Neural networks, can handle not only scalar, but also multi-dimensional or + vector responses. +- Another optional component is the mask of missing measurements. Most algorithms require all the + components in all the training samples be valid, but some other algorithms, such as decision + trees, can handle the cases of missing measurements. +- In the case of classification problem user may want to give different weights to different + classes. This is useful, for example, when: + - user wants to shift prediction accuracy towards lower false-alarm rate or higher hit-rate. + - user wants to compensate for significantly different amounts of training samples from + different classes. +- In addition to that, each training sample may be given a weight, if user wants the algorithm to + pay special attention to certain training samples and adjust the training model accordingly. +- Also, user may wish not to use the whole training data at once, but rather use parts of it, e.g. + to do parameter optimization via cross-validation procedure. + +As you can see, training data can have rather complex structure; besides, it may be very big and/or +not entirely available, so there is need to make abstraction for this concept. In OpenCV ml there is +cv::ml::TrainData class for that. + +@sa cv::ml::TrainData + +Normal Bayes Classifier {#ml_intro_bayes} +======================= + +This simple classification model assumes that feature vectors from each class are normally +distributed (though, not necessarily independently distributed). So, the whole data distribution +function is assumed to be a Gaussian mixture, one component per class. Using the training data the +algorithm estimates mean vectors and covariance matrices for every class, and then it uses them for +prediction. + +@sa cv::ml::NormalBayesClassifier + +K-Nearest Neighbors {#ml_intro_knn} +=================== + +The algorithm caches all training samples and predicts the response for a new sample by analyzing a +certain number (__K__) of the nearest neighbors of the sample using voting, calculating weighted +sum, and so on. The method is sometimes referred to as "learning by example" because for prediction +it looks for the feature vector with a known response that is closest to the given vector. + +@sa cv::ml::KNearest + +Support Vector Machines {#ml_intro_svm} +======================= + +Originally, support vector machines (SVM) was a technique for building an optimal binary (2-class) +classifier. Later the technique was extended to regression and clustering problems. SVM is a partial +case of kernel-based methods. It maps feature vectors into a higher-dimensional space using a kernel +function and builds an optimal linear discriminating function in this space or an optimal hyper- +plane that fits into the training data. In case of SVM, the kernel is not defined explicitly. +Instead, a distance between any 2 points in the hyper-space needs to be defined. + +The solution is optimal, which means that the margin between the separating hyper-plane and the +nearest feature vectors from both classes (in case of 2-class classifier) is maximal. The feature +vectors that are the closest to the hyper-plane are called _support vectors_, which means that the +position of other vectors does not affect the hyper-plane (the decision function). + +SVM implementation in OpenCV is based on @cite LibSVM + +@sa cv::ml::SVM + +Prediction with SVM {#ml_intro_svm_predict} +------------------- + +StatModel::predict(samples, results, flags) should be used. Pass flags=StatModel::RAW_OUTPUT to get +the raw response from SVM (in the case of regression, 1-class or 2-class classification problem). + +Decision Trees {#ml_intro_trees} +============== + +The ML classes discussed in this section implement Classification and Regression Tree algorithms +described in @cite Breiman84 . + +The class cv::ml::DTrees represents a single decision tree or a collection of decision trees. It's +also a base class for RTrees and Boost. + +A decision tree is a binary tree (tree where each non-leaf node has two child nodes). It can be used +either for classification or for regression. For classification, each tree leaf is marked with a +class label; multiple leaves may have the same label. For regression, a constant is also assigned to +each tree leaf, so the approximation function is piecewise constant. + +@sa cv::ml::DTrees + +Predicting with Decision Trees {#ml_intro_trees_predict} +------------------------------ + +To reach a leaf node and to obtain a response for the input feature vector, the prediction procedure +starts with the root node. From each non-leaf node the procedure goes to the left (selects the left +child node as the next observed node) or to the right based on the value of a certain variable whose +index is stored in the observed node. The following variables are possible: + +- __Ordered variables.__ The variable value is compared with a threshold that is also stored in + the node. If the value is less than the threshold, the procedure goes to the left. Otherwise, it + goes to the right. For example, if the weight is less than 1 kilogram, the procedure goes to the + left, else to the right. + +- __Categorical variables.__ A discrete variable value is tested to see whether it belongs to a + certain subset of values (also stored in the node) from a limited set of values the variable + could take. If it does, the procedure goes to the left. Otherwise, it goes to the right. For + example, if the color is green or red, go to the left, else to the right. + +So, in each node, a pair of entities (variable_index , `decision_rule (threshold/subset)` ) is used. +This pair is called a _split_ (split on the variable variable_index ). Once a leaf node is reached, +the value assigned to this node is used as the output of the prediction procedure. + +Sometimes, certain features of the input vector are missed (for example, in the darkness it is +difficult to determine the object color), and the prediction procedure may get stuck in the certain +node (in the mentioned example, if the node is split by color). To avoid such situations, decision +trees use so-called _surrogate splits_. That is, in addition to the best "primary" split, every tree +node may also be split to one or more other variables with nearly the same results. + +Training Decision Trees {#ml_intro_trees_train} +----------------------- + +The tree is built recursively, starting from the root node. All training data (feature vectors and +responses) is used to split the root node. In each node the optimum decision rule (the best +"primary" split) is found based on some criteria. In machine learning, gini "purity" criteria are +used for classification, and sum of squared errors is used for regression. Then, if necessary, the +surrogate splits are found. They resemble the results of the primary split on the training data. All +the data is divided using the primary and the surrogate splits (like it is done in the prediction +procedure) between the left and the right child node. Then, the procedure recursively splits both +left and right nodes. At each node the recursive procedure may stop (that is, stop splitting the +node further) in one of the following cases: + +- Depth of the constructed tree branch has reached the specified maximum value. +- Number of training samples in the node is less than the specified threshold when it is not + statistically representative to split the node further. +- All the samples in the node belong to the same class or, in case of regression, the variation is + too small. +- The best found split does not give any noticeable improvement compared to a random choice. + +When the tree is built, it may be pruned using a cross-validation procedure, if necessary. That is, +some branches of the tree that may lead to the model overfitting are cut off. Normally, this +procedure is only applied to standalone decision trees. Usually tree ensembles build trees that are +small enough and use their own protection schemes against overfitting. + +Variable Importance {#ml_intro_trees_var} +------------------- + +Besides the prediction that is an obvious use of decision trees, the tree can be also used for +various data analyses. One of the key properties of the constructed decision tree algorithms is an +ability to compute the importance (relative decisive power) of each variable. For example, in a spam +filter that uses a set of words occurred in the message as a feature vector, the variable importance +rating can be used to determine the most "spam-indicating" words and thus help keep the dictionary +size reasonable. + +Importance of each variable is computed over all the splits on this variable in the tree, primary +and surrogate ones. Thus, to compute variable importance correctly, the surrogate splits must be +enabled in the training parameters, even if there is no missing data. + +Boosting {#ml_intro_boost} +======== + +A common machine learning task is supervised learning. In supervised learning, the goal is to learn +the functional relationship \f$F: y = F(x)\f$ between the input \f$x\f$ and the output \f$y\f$ . +Predicting the qualitative output is called _classification_, while predicting the quantitative +output is called _regression_. + +Boosting is a powerful learning concept that provides a solution to the supervised classification +learning task. It combines the performance of many "weak" classifiers to produce a powerful +committee @cite HTF01 . A weak classifier is only required to be better than chance, and thus can be +very simple and computationally inexpensive. However, many of them smartly combine results to a +strong classifier that often outperforms most "monolithic" strong classifiers such as SVMs and +Neural Networks. + +Decision trees are the most popular weak classifiers used in boosting schemes. Often the simplest +decision trees with only a single split node per tree (called stumps ) are sufficient. + +The boosted model is based on \f$N\f$ training examples \f${(x_i,y_i)}1N\f$ with \f$x_i \in{R^K}\f$ +and \f$y_i \in{-1, +1}\f$ . \f$x_i\f$ is a \f$K\f$ -component vector. Each component encodes a +feature relevant to the learning task at hand. The desired two-class output is encoded as -1 and +1. + +Different variants of boosting are known as Discrete Adaboost, Real AdaBoost, LogitBoost, and Gentle +AdaBoost @cite FHT98 . All of them are very similar in their overall structure. Therefore, this +chapter focuses only on the standard two-class Discrete AdaBoost algorithm, outlined below. +Initially the same weight is assigned to each sample (step 2). Then, a weak classifier +\f$f_{m(x)}\f$ is trained on the weighted training data (step 3a). Its weighted training error and +scaling factor \f$c_m\f$ is computed (step 3b). The weights are increased for training samples that +have been misclassified (step 3c). All weights are then normalized, and the process of finding the +next weak classifier continues for another \f$M\f$ -1 times. The final classifier \f$F(x)\f$ is the +sign of the weighted sum over the individual weak classifiers (step 4). + +__Two-class Discrete AdaBoost Algorithm__ + +- Set \f$N\f$ examples \f${(x_i,y_i)}1N\f$ with \f$x_i \in{R^K}, y_i \in{-1, +1}\f$ . + +- Assign weights as \f$w_i = 1/N, i = 1,...,N\f$ . + +- Repeat for \f$m = 1,2,...,M\f$ : + + - Fit the classifier \f$f_m(x) \in{-1,1}\f$, using weights \f$w_i\f$ on the training data. + + - Compute \f$err_m = E_w [1_{(y \neq f_m(x))}], c_m = log((1 - err_m)/err_m)\f$ . + + - Set \f$w_i \Leftarrow w_i exp[c_m 1_{(y_i \neq f_m(x_i))}], i = 1,2,...,N,\f$ and + renormalize so that \f$\Sigma i w_i = 1\f$ . + +- Classify new samples _x_ using the formula: \f$\textrm{sign} (\Sigma m = 1M c_m f_m(x))\f$ . + +@note Similar to the classical boosting methods, the current implementation supports two-class +classifiers only. For M \> 2 classes, there is the __AdaBoost.MH__ algorithm (described in +@cite FHT98) that reduces the problem to the two-class problem, yet with a much larger training set. + +To reduce computation time for boosted models without substantially losing accuracy, the influence +trimming technique can be employed. As the training algorithm proceeds and the number of trees in +the ensemble is increased, a larger number of the training samples are classified correctly and with +increasing confidence, thereby those samples receive smaller weights on the subsequent iterations. +Examples with a very low relative weight have a small impact on the weak classifier training. Thus, +such examples may be excluded during the weak classifier training without having much effect on the +induced classifier. This process is controlled with the weight_trim_rate parameter. Only examples +with the summary fraction weight_trim_rate of the total weight mass are used in the weak classifier +training. Note that the weights for __all__ training examples are recomputed at each training +iteration. Examples deleted at a particular iteration may be used again for learning some of the +weak classifiers further @cite FHT98 + +@sa cv::ml::Boost + +Prediction with Boost {#ml_intro_boost_predict} +--------------------- +StatModel::predict(samples, results, flags) should be used. Pass flags=StatModel::RAW_OUTPUT to get +the raw sum from Boost classifier. + +Random Trees {#ml_intro_rtrees} +============ + +Random trees have been introduced by Leo Breiman and Adele Cutler: + . The algorithm can deal with both +classification and regression problems. Random trees is a collection (ensemble) of tree predictors +that is called _forest_ further in this section (the term has been also introduced by L. Breiman). +The classification works as follows: the random trees classifier takes the input feature vector, +classifies it with every tree in the forest, and outputs the class label that received the majority +of "votes". In case of a regression, the classifier response is the average of the responses over +all the trees in the forest. + +All the trees are trained with the same parameters but on different training sets. These sets are +generated from the original training set using the bootstrap procedure: for each training set, you +randomly select the same number of vectors as in the original set ( =N ). The vectors are chosen +with replacement. That is, some vectors will occur more than once and some will be absent. At each +node of each trained tree, not all the variables are used to find the best split, but a random +subset of them. With each node a new subset is generated. However, its size is fixed for all the +nodes and all the trees. It is a training parameter set to \f$\sqrt{number\_of\_variables}\f$ by +default. None of the built trees are pruned. + +In random trees there is no need for any accuracy estimation procedures, such as cross-validation or +bootstrap, or a separate test set to get an estimate of the training error. The error is estimated +internally during the training. When the training set for the current tree is drawn by sampling with +replacement, some vectors are left out (so-called _oob (out-of-bag) data_ ). The size of oob data is +about N/3 . The classification error is estimated by using this oob-data as follows: + +- Get a prediction for each vector, which is oob relative to the i-th tree, using the very i-th + tree. + +- After all the trees have been trained, for each vector that has ever been oob, find the + class-winner for it (the class that has got the majority of votes in the trees where + the vector was oob) and compare it to the ground-truth response. + +- Compute the classification error estimate as a ratio of the number of misclassified oob vectors + to all the vectors in the original data. In case of regression, the oob-error is computed as the + squared error for oob vectors difference divided by the total number of vectors. + +For the random trees usage example, please, see letter_recog.cpp sample in OpenCV distribution. + +@sa cv::ml::RTrees + +__References:__ + +- _Machine Learning_, Wald I, July 2002. + +- _Looking Inside the Black Box_, Wald II, July 2002. + +- _Software for the Masses_, Wald III, July 2002. + +- And other articles from the web site + + +Expectation Maximization {#ml_intro_em} +======================== + +The Expectation Maximization(EM) algorithm estimates the parameters of the multivariate probability +density function in the form of a Gaussian mixture distribution with a specified number of mixtures. + +Consider the set of the N feature vectors { \f$x_1, x_2,...,x_{N}\f$ } from a d-dimensional Euclidean +space drawn from a Gaussian mixture: + +\f[p(x;a_k,S_k, \pi _k) = \sum _{k=1}^{m} \pi _kp_k(x), \quad \pi _k \geq 0, \quad \sum _{k=1}^{m} \pi _k=1,\f] + +\f[p_k(x)= \varphi (x;a_k,S_k)= \frac{1}{(2\pi)^{d/2}\mid{S_k}\mid^{1/2}} exp \left \{ - \frac{1}{2} (x-a_k)^TS_k^{-1}(x-a_k) \right \} ,\f] + +where \f$m\f$ is the number of mixtures, \f$p_k\f$ is the normal distribution density with the mean +\f$a_k\f$ and covariance matrix \f$S_k\f$, \f$\pi_k\f$ is the weight of the k-th mixture. Given the +number of mixtures \f$M\f$ and the samples \f$x_i\f$, \f$i=1..N\f$ the algorithm finds the maximum- +likelihood estimates (MLE) of all the mixture parameters, that is, \f$a_k\f$, \f$S_k\f$ and +\f$\pi_k\f$ : + +\f[L(x, \theta )=logp(x, \theta )= \sum _{i=1}^{N}log \left ( \sum _{k=1}^{m} \pi _kp_k(x) \right ) \to \max _{ \theta \in \Theta },\f] + +\f[\Theta = \left \{ (a_k,S_k, \pi _k): a_k \in \mathbbm{R} ^d,S_k=S_k^T>0,S_k \in \mathbbm{R} ^{d \times d}, \pi _k \geq 0, \sum _{k=1}^{m} \pi _k=1 \right \} .\f] + +The EM algorithm is an iterative procedure. Each iteration includes two steps. At the first step +(Expectation step or E-step), you find a probability \f$p_{i,k}\f$ (denoted \f$\alpha_{i,k}\f$ in +the formula below) of sample i to belong to mixture k using the currently available mixture +parameter estimates: + +\f[\alpha _{ki} = \frac{\pi_k\varphi(x;a_k,S_k)}{\sum\limits_{j=1}^{m}\pi_j\varphi(x;a_j,S_j)} .\f] + +At the second step (Maximization step or M-step), the mixture parameter estimates are refined using +the computed probabilities: + +\f[\pi _k= \frac{1}{N} \sum _{i=1}^{N} \alpha _{ki}, \quad a_k= \frac{\sum\limits_{i=1}^{N}\alpha_{ki}x_i}{\sum\limits_{i=1}^{N}\alpha_{ki}} , \quad S_k= \frac{\sum\limits_{i=1}^{N}\alpha_{ki}(x_i-a_k)(x_i-a_k)^T}{\sum\limits_{i=1}^{N}\alpha_{ki}}\f] + +Alternatively, the algorithm may start with the M-step when the initial values for \f$p_{i,k}\f$ can +be provided. Another alternative when \f$p_{i,k}\f$ are unknown is to use a simpler clustering +algorithm to pre-cluster the input samples and thus obtain initial \f$p_{i,k}\f$ . Often (including +machine learning) the k-means algorithm is used for that purpose. + +One of the main problems of the EM algorithm is a large number of parameters to estimate. The +majority of the parameters reside in covariance matrices, which are \f$d \times d\f$ elements each +where \f$d\f$ is the feature space dimensionality. However, in many practical problems, the +covariance matrices are close to diagonal or even to \f$\mu_k*I\f$ , where \f$I\f$ is an identity +matrix and \f$\mu_k\f$ is a mixture-dependent "scale" parameter. So, a robust computation scheme +could start with harder constraints on the covariance matrices and then use the estimated parameters +as an input for a less constrained optimization problem (often a diagonal covariance matrix is +already a good enough approximation). + +@sa cv::ml::EM + +References: +- Bilmes98 J. A. Bilmes. _A Gentle Tutorial of the EM Algorithm and its Application to Parameter +Estimation for Gaussian Mixture and Hidden Markov Models_. Technical Report TR-97-021, +International Computer Science Institute and Computer Science Division, University of California +at Berkeley, April 1998. + +Neural Networks {#ml_intro_ann} +=============== + +ML implements feed-forward artificial neural networks or, more particularly, multi-layer perceptrons +(MLP), the most commonly used type of neural networks. MLP consists of the input layer, output +layer, and one or more hidden layers. Each layer of MLP includes one or more neurons directionally +linked with the neurons from the previous and the next layer. The example below represents a 3-layer +perceptron with three inputs, two outputs, and the hidden layer including five neurons: + +![image](pics/mlp.png) + +All the neurons in MLP are similar. Each of them has several input links (it takes the output values +from several neurons in the previous layer as input) and several output links (it passes the +response to several neurons in the next layer). The values retrieved from the previous layer are +summed up with certain weights, individual for each neuron, plus the bias term. The sum is +transformed using the activation function \f$f\f$ that may be also different for different neurons. + +![image](pics/neuron_model.png) + +In other words, given the outputs \f$x_j\f$ of the layer \f$n\f$ , the outputs \f$y_i\f$ of the +layer \f$n+1\f$ are computed as: + +\f[u_i = \sum _j (w^{n+1}_{i,j}*x_j) + w^{n+1}_{i,bias}\f] + +\f[y_i = f(u_i)\f] + +Different activation functions may be used. ML implements three standard functions: + +- Identity function ( cv::ml::ANN_MLP::IDENTITY ): \f$f(x)=x\f$ + +- Symmetrical sigmoid ( cv::ml::ANN_MLP::SIGMOID_SYM ): \f$f(x)=\beta*(1-e^{-\alpha + x})/(1+e^{-\alpha x}\f$ ), which is the default choice for MLP. The standard sigmoid with + \f$\beta =1, \alpha =1\f$ is shown below: + + ![image](pics/sigmoid_bipolar.png) + +- Gaussian function ( cv::ml::ANN_MLP::GAUSSIAN ): \f$f(x)=\beta e^{-\alpha x*x}\f$ , which is not + completely supported at the moment. + +In ML, all the neurons have the same activation functions, with the same free parameters ( +\f$\alpha, \beta\f$ ) that are specified by user and are not altered by the training algorithms. + +So, the whole trained network works as follows: + +1. Take the feature vector as input. The vector size is equal to the size of the input layer. +2. Pass values as input to the first hidden layer. +3. Compute outputs of the hidden layer using the weights and the activation functions. +4. Pass outputs further downstream until you compute the output layer. + +So, to compute the network, you need to know all the weights \f$w^{n+1)}_{i,j}\f$ . The weights are +computed by the training algorithm. The algorithm takes a training set, multiple input vectors with +the corresponding output vectors, and iteratively adjusts the weights to enable the network to give +the desired response to the provided input vectors. + +The larger the network size (the number of hidden layers and their sizes) is, the more the potential +network flexibility is. The error on the training set could be made arbitrarily small. But at the +same time the learned network also "learns" the noise present in the training set, so the error on +the test set usually starts increasing after the network size reaches a limit. Besides, the larger +networks are trained much longer than the smaller ones, so it is reasonable to pre-process the data, +using cv::PCA or similar technique, and train a smaller network on only essential features. + +Another MLP feature is an inability to handle categorical data as is. However, there is a +workaround. If a certain feature in the input or output (in case of n -class classifier for +\f$n>2\f$ ) layer is categorical and can take \f$M>2\f$ different values, it makes sense to +represent it as a binary tuple of M elements, where the i -th element is 1 if and only if the +feature is equal to the i -th value out of M possible. It increases the size of the input/output +layer but speeds up the training algorithm convergence and at the same time enables "fuzzy" values +of such variables, that is, a tuple of probabilities instead of a fixed value. + +ML implements two algorithms for training MLP's. The first algorithm is a classical random +sequential back-propagation algorithm. The second (default) one is a batch RPROP algorithm. + +@sa cv::ml::ANN_MLP + +Logistic Regression {#ml_intro_lr} +=================== + +ML implements logistic regression, which is a probabilistic classification technique. Logistic +Regression is a binary classification algorithm which is closely related to Support Vector Machines +(SVM). Like SVM, Logistic Regression can be extended to work on multi-class classification problems +like digit recognition (i.e. recognizing digits like 0,1 2, 3,... from the given images). This +version of Logistic Regression supports both binary and multi-class classifications (for multi-class +it creates a multiple 2-class classifiers). In order to train the logistic regression classifier, +Batch Gradient Descent and Mini-Batch Gradient Descent algorithms are used (see +). Logistic Regression is a +discriminative classifier (see for more details). +Logistic Regression is implemented as a C++ class in LogisticRegression. + +In Logistic Regression, we try to optimize the training parameter \f$\theta\f$ such that the +hypothesis \f$0 \leq h_\theta(x) \leq 1\f$ is achieved. We have \f$h_\theta(x) = g(h_\theta(x))\f$ +and \f$g(z) = \frac{1}{1+e^{-z}}\f$ as the logistic or sigmoid function. The term "Logistic" in +Logistic Regression refers to this function. For given data of a binary classification problem of +classes 0 and 1, one can determine that the given data instance belongs to class 1 if \f$h_\theta(x) +\geq 0.5\f$ or class 0 if \f$h_\theta(x) < 0.5\f$ . + +In Logistic Regression, choosing the right parameters is of utmost importance for reducing the +training error and ensuring high training accuracy: + +- The learning rate can be set with @ref cv::ml::LogisticRegression::setLearningRate "setLearningRate" + method. It determines how fast we approach the solution. It is a positive real number. + +- Optimization algorithms like Batch Gradient Descent and Mini-Batch Gradient Descent are supported + in LogisticRegression. It is important that we mention the number of iterations these optimization + algorithms have to run. The number of iterations can be set with @ref + cv::ml::LogisticRegression::setIterations "setIterations". This parameter can be thought + as number of steps taken and learning rate specifies if it is a long step or a short step. This + and previous parameter define how fast we arrive at a possible solution. + +- In order to compensate for overfitting regularization is performed, which can be enabled with + @ref cv::ml::LogisticRegression::setRegularization "setRegularization". One can specify what + kind of regularization has to be performed by passing one of @ref + cv::ml::LogisticRegression::RegKinds "regularization kinds" to this method. + +- Logistic regression implementation provides a choice of 2 training methods with Batch Gradient + Descent or the MiniBatch Gradient Descent. To specify this, call @ref + cv::ml::LogisticRegression::setTrainMethod "setTrainMethod" with either @ref + cv::ml::LogisticRegression::BATCH "LogisticRegression::BATCH" or @ref + cv::ml::LogisticRegression::MINI_BATCH "LogisticRegression::MINI_BATCH". If training method is + set to @ref cv::ml::LogisticRegression::MINI_BATCH "MINI_BATCH", the size of the mini batch has + to be to a positive integer set with @ref cv::ml::LogisticRegression::setMiniBatchSize + "setMiniBatchSize". + +A sample set of training parameters for the Logistic Regression classifier can be initialized as follows: +@snippet samples/logistic_regression.cpp init + +@sa cv::ml::LogisticRegression diff --git a/modules/ml/doc/pics/SVM_Comparison.png b/modules/ml/doc/pics/SVM_Comparison.png new file mode 100644 index 00000000000..4bb3dababc2 Binary files /dev/null and b/modules/ml/doc/pics/SVM_Comparison.png differ diff --git a/modules/ml/doc/pics/mlp.png b/modules/ml/doc/pics/mlp.png new file mode 100644 index 00000000000..ce3392c4542 Binary files /dev/null and b/modules/ml/doc/pics/mlp.png differ diff --git a/modules/ml/doc/pics/neuron_model.png b/modules/ml/doc/pics/neuron_model.png new file mode 100644 index 00000000000..635a5318041 Binary files /dev/null and b/modules/ml/doc/pics/neuron_model.png differ diff --git a/modules/ml/doc/pics/sigmoid_bipolar.png b/modules/ml/doc/pics/sigmoid_bipolar.png new file mode 100644 index 00000000000..d94a85031d8 Binary files /dev/null and b/modules/ml/doc/pics/sigmoid_bipolar.png differ diff --git a/modules/ml/include/opencv2/ml.hpp b/modules/ml/include/opencv2/ml.hpp new file mode 100644 index 00000000000..d537ab7759b --- /dev/null +++ b/modules/ml/include/opencv2/ml.hpp @@ -0,0 +1,1956 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Copyright (C) 2014, Itseez Inc, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_ML_HPP +#define OPENCV_ML_HPP + +#ifdef __cplusplus +# include "opencv2/core.hpp" +#endif + +#ifdef __cplusplus + +#include +#include +#include + +/** + @defgroup ml Machine Learning + + The Machine Learning Library (MLL) is a set of classes and functions for statistical + classification, regression, and clustering of data. + + Most of the classification and regression algorithms are implemented as C++ classes. As the + algorithms have different sets of features (like an ability to handle missing measurements or + categorical input variables), there is a little common ground between the classes. This common + ground is defined by the class cv::ml::StatModel that all the other ML classes are derived from. + + See detailed overview here: @ref ml_intro. + */ + +namespace cv +{ + +namespace ml +{ + +//! @addtogroup ml +//! @{ + +/** @brief Variable types */ +enum VariableTypes +{ + VAR_NUMERICAL =0, //!< same as VAR_ORDERED + VAR_ORDERED =0, //!< ordered variables + VAR_CATEGORICAL =1 //!< categorical variables +}; + +/** @brief %Error types */ +enum ErrorTypes +{ + TEST_ERROR = 0, + TRAIN_ERROR = 1 +}; + +/** @brief Sample types */ +enum SampleTypes +{ + ROW_SAMPLE = 0, //!< each training sample is a row of samples + COL_SAMPLE = 1 //!< each training sample occupies a column of samples +}; + +/** @brief The structure represents the logarithmic grid range of statmodel parameters. + +It is used for optimizing statmodel accuracy by varying model parameters, the accuracy estimate +being computed by cross-validation. + */ +class CV_EXPORTS_W ParamGrid +{ +public: + /** @brief Default constructor */ + ParamGrid(); + /** @brief Constructor with parameters */ + ParamGrid(double _minVal, double _maxVal, double _logStep); + + CV_PROP_RW double minVal; //!< Minimum value of the statmodel parameter. Default value is 0. + CV_PROP_RW double maxVal; //!< Maximum value of the statmodel parameter. Default value is 0. + /** @brief Logarithmic step for iterating the statmodel parameter. + + The grid determines the following iteration sequence of the statmodel parameter values: + \f[(minVal, minVal*step, minVal*{step}^2, \dots, minVal*{logStep}^n),\f] + where \f$n\f$ is the maximal index satisfying + \f[\texttt{minVal} * \texttt{logStep} ^n < \texttt{maxVal}\f] + The grid is logarithmic, so logStep must always be greater than 1. Default value is 1. + */ + CV_PROP_RW double logStep; + + /** @brief Creates a ParamGrid Ptr that can be given to the %SVM::trainAuto method + + @param minVal minimum value of the parameter grid + @param maxVal maximum value of the parameter grid + @param logstep Logarithmic step for iterating the statmodel parameter + */ + CV_WRAP static Ptr create(double minVal=0., double maxVal=0., double logstep=1.); +}; + +/** @brief Class encapsulating training data. + +Please note that the class only specifies the interface of training data, but not implementation. +All the statistical model classes in _ml_ module accepts Ptr\ as parameter. In other +words, you can create your own class derived from TrainData and pass smart pointer to the instance +of this class into StatModel::train. + +@sa @ref ml_intro_data + */ +class CV_EXPORTS_W TrainData +{ +public: + static inline float missingValue() { return FLT_MAX; } + virtual ~TrainData(); + + CV_WRAP virtual int getLayout() const = 0; + CV_WRAP virtual int getNTrainSamples() const = 0; + CV_WRAP virtual int getNTestSamples() const = 0; + CV_WRAP virtual int getNSamples() const = 0; + CV_WRAP virtual int getNVars() const = 0; + CV_WRAP virtual int getNAllVars() const = 0; + + CV_WRAP virtual void getSample(InputArray varIdx, int sidx, float* buf) const = 0; + CV_WRAP virtual Mat getSamples() const = 0; + CV_WRAP virtual Mat getMissing() const = 0; + + /** @brief Returns matrix of train samples + + @param layout The requested layout. If it's different from the initial one, the matrix is + transposed. See ml::SampleTypes. + @param compressSamples if true, the function returns only the training samples (specified by + sampleIdx) + @param compressVars if true, the function returns the shorter training samples, containing only + the active variables. + + In current implementation the function tries to avoid physical data copying and returns the + matrix stored inside TrainData (unless the transposition or compression is needed). + */ + CV_WRAP virtual Mat getTrainSamples(int layout=ROW_SAMPLE, + bool compressSamples=true, + bool compressVars=true) const = 0; + + /** @brief Returns the vector of responses + + The function returns ordered or the original categorical responses. Usually it's used in + regression algorithms. + */ + CV_WRAP virtual Mat getTrainResponses() const = 0; + + /** @brief Returns the vector of normalized categorical responses + + The function returns vector of responses. Each response is integer from `0` to `-1`. The actual label value can be retrieved then from the class label vector, see + TrainData::getClassLabels. + */ + CV_WRAP virtual Mat getTrainNormCatResponses() const = 0; + CV_WRAP virtual Mat getTestResponses() const = 0; + CV_WRAP virtual Mat getTestNormCatResponses() const = 0; + CV_WRAP virtual Mat getResponses() const = 0; + CV_WRAP virtual Mat getNormCatResponses() const = 0; + CV_WRAP virtual Mat getSampleWeights() const = 0; + CV_WRAP virtual Mat getTrainSampleWeights() const = 0; + CV_WRAP virtual Mat getTestSampleWeights() const = 0; + CV_WRAP virtual Mat getVarIdx() const = 0; + CV_WRAP virtual Mat getVarType() const = 0; + CV_WRAP virtual Mat getVarSymbolFlags() const = 0; + CV_WRAP virtual int getResponseType() const = 0; + CV_WRAP virtual Mat getTrainSampleIdx() const = 0; + CV_WRAP virtual Mat getTestSampleIdx() const = 0; + CV_WRAP virtual void getValues(int vi, InputArray sidx, float* values) const = 0; + virtual void getNormCatValues(int vi, InputArray sidx, int* values) const = 0; + CV_WRAP virtual Mat getDefaultSubstValues() const = 0; + + CV_WRAP virtual int getCatCount(int vi) const = 0; + + /** @brief Returns the vector of class labels + + The function returns vector of unique labels occurred in the responses. + */ + CV_WRAP virtual Mat getClassLabels() const = 0; + + CV_WRAP virtual Mat getCatOfs() const = 0; + CV_WRAP virtual Mat getCatMap() const = 0; + + /** @brief Splits the training data into the training and test parts + @sa TrainData::setTrainTestSplitRatio + */ + CV_WRAP virtual void setTrainTestSplit(int count, bool shuffle=true) = 0; + + /** @brief Splits the training data into the training and test parts + + The function selects a subset of specified relative size and then returns it as the training + set. If the function is not called, all the data is used for training. Please, note that for + each of TrainData::getTrain\* there is corresponding TrainData::getTest\*, so that the test + subset can be retrieved and processed as well. + @sa TrainData::setTrainTestSplit + */ + CV_WRAP virtual void setTrainTestSplitRatio(double ratio, bool shuffle=true) = 0; + CV_WRAP virtual void shuffleTrainTest() = 0; + + /** @brief Returns matrix of test samples */ + CV_WRAP virtual Mat getTestSamples() const = 0; + + /** @brief Returns vector of symbolic names captured in loadFromCSV() */ + CV_WRAP virtual void getNames(std::vector& names) const = 0; + + /** @brief Extract from 1D vector elements specified by passed indexes. + @param vec input vector (supported types: CV_32S, CV_32F, CV_64F) + @param idx 1D index vector + */ + static CV_WRAP Mat getSubVector(const Mat& vec, const Mat& idx); + + /** @brief Extract from matrix rows/cols specified by passed indexes. + @param matrix input matrix (supported types: CV_32S, CV_32F, CV_64F) + @param idx 1D index vector + @param layout specifies to extract rows (cv::ml::ROW_SAMPLES) or to extract columns (cv::ml::COL_SAMPLES) + */ + static CV_WRAP Mat getSubMatrix(const Mat& matrix, const Mat& idx, int layout); + + /** @brief Reads the dataset from a .csv file and returns the ready-to-use training data. + + @param filename The input file name + @param headerLineCount The number of lines in the beginning to skip; besides the header, the + function also skips empty lines and lines staring with `#` + @param responseStartIdx Index of the first output variable. If -1, the function considers the + last variable as the response + @param responseEndIdx Index of the last output variable + 1. If -1, then there is single + response variable at responseStartIdx. + @param varTypeSpec The optional text string that specifies the variables' types. It has the + format `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, variables from `n1 to n2` + (inclusive range), `n3`, `n4 to n5` ... are considered ordered and `n6`, `n7 to n8` ... are + considered as categorical. The range `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` + should cover all the variables. If varTypeSpec is not specified, then algorithm uses the + following rules: + - all input variables are considered ordered by default. If some column contains has non- + numerical values, e.g. 'apple', 'pear', 'apple', 'apple', 'mango', the corresponding + variable is considered categorical. + - if there are several output variables, they are all considered as ordered. Error is + reported when non-numerical values are used. + - if there is a single output variable, then if its values are non-numerical or are all + integers, then it's considered categorical. Otherwise, it's considered ordered. + @param delimiter The character used to separate values in each line. + @param missch The character used to specify missing measurements. It should not be a digit. + Although it's a non-numerical value, it surely does not affect the decision of whether the + variable ordered or categorical. + @note If the dataset only contains input variables and no responses, use responseStartIdx = -2 + and responseEndIdx = 0. The output variables vector will just contain zeros. + */ + static Ptr loadFromCSV(const String& filename, + int headerLineCount, + int responseStartIdx=-1, + int responseEndIdx=-1, + const String& varTypeSpec=String(), + char delimiter=',', + char missch='?'); + + /** @brief Creates training data from in-memory arrays. + + @param samples matrix of samples. It should have CV_32F type. + @param layout see ml::SampleTypes. + @param responses matrix of responses. If the responses are scalar, they should be stored as a + single row or as a single column. The matrix should have type CV_32F or CV_32S (in the + former case the responses are considered as ordered by default; in the latter case - as + categorical) + @param varIdx vector specifying which variables to use for training. It can be an integer vector + (CV_32S) containing 0-based variable indices or byte vector (CV_8U) containing a mask of + active variables. + @param sampleIdx vector specifying which samples to use for training. It can be an integer + vector (CV_32S) containing 0-based sample indices or byte vector (CV_8U) containing a mask + of training samples. + @param sampleWeights optional vector with weights for each sample. It should have CV_32F type. + @param varType optional vector of type CV_8U and size ` + + `, containing types of each input and output variable. See + ml::VariableTypes. + */ + CV_WRAP static Ptr create(InputArray samples, int layout, InputArray responses, + InputArray varIdx=noArray(), InputArray sampleIdx=noArray(), + InputArray sampleWeights=noArray(), InputArray varType=noArray()); +}; + +/** @brief Base class for statistical models in OpenCV ML. + */ +class CV_EXPORTS_W StatModel : public Algorithm +{ +public: + /** Predict options */ + enum Flags { + UPDATE_MODEL = 1, + RAW_OUTPUT=1, //!< makes the method return the raw results (the sum), not the class label + COMPRESSED_INPUT=2, + PREPROCESSED_INPUT=4 + }; + + /** @brief Returns the number of variables in training samples */ + CV_WRAP virtual int getVarCount() const = 0; + + CV_WRAP virtual bool empty() const CV_OVERRIDE; + + /** @brief Returns true if the model is trained */ + CV_WRAP virtual bool isTrained() const = 0; + /** @brief Returns true if the model is classifier */ + CV_WRAP virtual bool isClassifier() const = 0; + + /** @brief Trains the statistical model + + @param trainData training data that can be loaded from file using TrainData::loadFromCSV or + created with TrainData::create. + @param flags optional flags, depending on the model. Some of the models can be updated with the + new training samples, not completely overwritten (such as NormalBayesClassifier or ANN_MLP). + */ + CV_WRAP virtual bool train( const Ptr& trainData, int flags=0 ); + + /** @brief Trains the statistical model + + @param samples training samples + @param layout See ml::SampleTypes. + @param responses vector of responses associated with the training samples. + */ + CV_WRAP virtual bool train( InputArray samples, int layout, InputArray responses ); + + /** @brief Computes error on the training or test dataset + + @param data the training data + @param test if true, the error is computed over the test subset of the data, otherwise it's + computed over the training subset of the data. Please note that if you loaded a completely + different dataset to evaluate already trained classifier, you will probably want not to set + the test subset at all with TrainData::setTrainTestSplitRatio and specify test=false, so + that the error is computed for the whole new set. Yes, this sounds a bit confusing. + @param resp the optional output responses. + + The method uses StatModel::predict to compute the error. For regression models the error is + computed as RMS, for classifiers - as a percent of missclassified samples (0%-100%). + */ + CV_WRAP virtual float calcError( const Ptr& data, bool test, OutputArray resp ) const; + + /** @brief Predicts response(s) for the provided sample(s) + + @param samples The input samples, floating-point matrix + @param results The optional output matrix of results. + @param flags The optional flags, model-dependent. See cv::ml::StatModel::Flags. + */ + CV_WRAP virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const = 0; + + /** @brief Create and train model with default parameters + + The class must implement static `create()` method with no parameters or with all default parameter values + */ + template static Ptr<_Tp> train(const Ptr& data, int flags=0) + { + Ptr<_Tp> model = _Tp::create(); + return !model.empty() && model->train(data, flags) ? model : Ptr<_Tp>(); + } +}; + +/****************************************************************************************\ +* Normal Bayes Classifier * +\****************************************************************************************/ + +/** @brief Bayes classifier for normally distributed data. + +@sa @ref ml_intro_bayes + */ +class CV_EXPORTS_W NormalBayesClassifier : public StatModel +{ +public: + /** @brief Predicts the response for sample(s). + + The method estimates the most probable classes for input vectors. Input vectors (one or more) + are stored as rows of the matrix inputs. In case of multiple input vectors, there should be one + output vector outputs. The predicted class for a single input vector is returned by the method. + The vector outputProbs contains the output probabilities corresponding to each element of + result. + */ + CV_WRAP virtual float predictProb( InputArray inputs, OutputArray outputs, + OutputArray outputProbs, int flags=0 ) const = 0; + + /** Creates empty model + Use StatModel::train to train the model after creation. */ + CV_WRAP static Ptr create(); + + /** @brief Loads and creates a serialized NormalBayesClassifier from a file + * + * Use NormalBayesClassifier::save to serialize and store an NormalBayesClassifier to disk. + * Load the NormalBayesClassifier from this file again, by calling this function with the path to the file. + * Optionally specify the node for the file containing the classifier + * + * @param filepath path to serialized NormalBayesClassifier + * @param nodeName name of node containing the classifier + */ + CV_WRAP static Ptr load(const String& filepath , const String& nodeName = String()); +}; + +/****************************************************************************************\ +* K-Nearest Neighbour Classifier * +\****************************************************************************************/ + +/** @brief The class implements K-Nearest Neighbors model + +@sa @ref ml_intro_knn + */ +class CV_EXPORTS_W KNearest : public StatModel +{ +public: + + /** Default number of neighbors to use in predict method. */ + /** @see setDefaultK */ + CV_WRAP virtual int getDefaultK() const = 0; + /** @copybrief getDefaultK @see getDefaultK */ + CV_WRAP virtual void setDefaultK(int val) = 0; + + /** Whether classification or regression model should be trained. */ + /** @see setIsClassifier */ + CV_WRAP virtual bool getIsClassifier() const = 0; + /** @copybrief getIsClassifier @see getIsClassifier */ + CV_WRAP virtual void setIsClassifier(bool val) = 0; + + /** Parameter for KDTree implementation. */ + /** @see setEmax */ + CV_WRAP virtual int getEmax() const = 0; + /** @copybrief getEmax @see getEmax */ + CV_WRAP virtual void setEmax(int val) = 0; + + /** %Algorithm type, one of KNearest::Types. */ + /** @see setAlgorithmType */ + CV_WRAP virtual int getAlgorithmType() const = 0; + /** @copybrief getAlgorithmType @see getAlgorithmType */ + CV_WRAP virtual void setAlgorithmType(int val) = 0; + + /** @brief Finds the neighbors and predicts responses for input vectors. + + @param samples Input samples stored by rows. It is a single-precision floating-point matrix of + ` * k` size. + @param k Number of used nearest neighbors. Should be greater than 1. + @param results Vector with results of prediction (regression or classification) for each input + sample. It is a single-precision floating-point vector with `` elements. + @param neighborResponses Optional output values for corresponding neighbors. It is a single- + precision floating-point matrix of ` * k` size. + @param dist Optional output distances from the input vectors to the corresponding neighbors. It + is a single-precision floating-point matrix of ` * k` size. + + For each input vector (a row of the matrix samples), the method finds the k nearest neighbors. + In case of regression, the predicted result is a mean value of the particular vector's neighbor + responses. In case of classification, the class is determined by voting. + + For each input vector, the neighbors are sorted by their distances to the vector. + + In case of C++ interface you can use output pointers to empty matrices and the function will + allocate memory itself. + + If only a single input vector is passed, all output matrices are optional and the predicted + value is returned by the method. + + The function is parallelized with the TBB library. + */ + CV_WRAP virtual float findNearest( InputArray samples, int k, + OutputArray results, + OutputArray neighborResponses=noArray(), + OutputArray dist=noArray() ) const = 0; + + /** @brief Implementations of KNearest algorithm + */ + enum Types + { + BRUTE_FORCE=1, + KDTREE=2 + }; + + /** @brief Creates the empty model + + The static method creates empty %KNearest classifier. It should be then trained using StatModel::train method. + */ + CV_WRAP static Ptr create(); + /** @brief Loads and creates a serialized knearest from a file + * + * Use KNearest::save to serialize and store an KNearest to disk. + * Load the KNearest from this file again, by calling this function with the path to the file. + * + * @param filepath path to serialized KNearest + */ + CV_WRAP static Ptr load(const String& filepath); +}; + +/****************************************************************************************\ +* Support Vector Machines * +\****************************************************************************************/ + +/** @brief Support Vector Machines. + +@sa @ref ml_intro_svm + */ +class CV_EXPORTS_W SVM : public StatModel +{ +public: + + class CV_EXPORTS Kernel : public Algorithm + { + public: + virtual int getType() const = 0; + virtual void calc( int vcount, int n, const float* vecs, const float* another, float* results ) = 0; + }; + + /** Type of a %SVM formulation. + See SVM::Types. Default value is SVM::C_SVC. */ + /** @see setType */ + CV_WRAP virtual int getType() const = 0; + /** @copybrief getType @see getType */ + CV_WRAP virtual void setType(int val) = 0; + + /** Parameter \f$\gamma\f$ of a kernel function. + For SVM::POLY, SVM::RBF, SVM::SIGMOID or SVM::CHI2. Default value is 1. */ + /** @see setGamma */ + CV_WRAP virtual double getGamma() const = 0; + /** @copybrief getGamma @see getGamma */ + CV_WRAP virtual void setGamma(double val) = 0; + + /** Parameter _coef0_ of a kernel function. + For SVM::POLY or SVM::SIGMOID. Default value is 0.*/ + /** @see setCoef0 */ + CV_WRAP virtual double getCoef0() const = 0; + /** @copybrief getCoef0 @see getCoef0 */ + CV_WRAP virtual void setCoef0(double val) = 0; + + /** Parameter _degree_ of a kernel function. + For SVM::POLY. Default value is 0. */ + /** @see setDegree */ + CV_WRAP virtual double getDegree() const = 0; + /** @copybrief getDegree @see getDegree */ + CV_WRAP virtual void setDegree(double val) = 0; + + /** Parameter _C_ of a %SVM optimization problem. + For SVM::C_SVC, SVM::EPS_SVR or SVM::NU_SVR. Default value is 0. */ + /** @see setC */ + CV_WRAP virtual double getC() const = 0; + /** @copybrief getC @see getC */ + CV_WRAP virtual void setC(double val) = 0; + + /** Parameter \f$\nu\f$ of a %SVM optimization problem. + For SVM::NU_SVC, SVM::ONE_CLASS or SVM::NU_SVR. Default value is 0. */ + /** @see setNu */ + CV_WRAP virtual double getNu() const = 0; + /** @copybrief getNu @see getNu */ + CV_WRAP virtual void setNu(double val) = 0; + + /** Parameter \f$\epsilon\f$ of a %SVM optimization problem. + For SVM::EPS_SVR. Default value is 0. */ + /** @see setP */ + CV_WRAP virtual double getP() const = 0; + /** @copybrief getP @see getP */ + CV_WRAP virtual void setP(double val) = 0; + + /** Optional weights in the SVM::C_SVC problem, assigned to particular classes. + They are multiplied by _C_ so the parameter _C_ of class _i_ becomes `classWeights(i) * C`. Thus + these weights affect the misclassification penalty for different classes. The larger weight, + the larger penalty on misclassification of data from the corresponding class. Default value is + empty Mat. */ + /** @see setClassWeights */ + CV_WRAP virtual cv::Mat getClassWeights() const = 0; + /** @copybrief getClassWeights @see getClassWeights */ + CV_WRAP virtual void setClassWeights(const cv::Mat &val) = 0; + + /** Termination criteria of the iterative %SVM training procedure which solves a partial + case of constrained quadratic optimization problem. + You can specify tolerance and/or the maximum number of iterations. Default value is + `TermCriteria( TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, FLT_EPSILON )`; */ + /** @see setTermCriteria */ + CV_WRAP virtual cv::TermCriteria getTermCriteria() const = 0; + /** @copybrief getTermCriteria @see getTermCriteria */ + CV_WRAP virtual void setTermCriteria(const cv::TermCriteria &val) = 0; + + /** Type of a %SVM kernel. + See SVM::KernelTypes. Default value is SVM::RBF. */ + CV_WRAP virtual int getKernelType() const = 0; + + /** Initialize with one of predefined kernels. + See SVM::KernelTypes. */ + CV_WRAP virtual void setKernel(int kernelType) = 0; + + /** Initialize with custom kernel. + See SVM::Kernel class for implementation details */ + virtual void setCustomKernel(const Ptr &_kernel) = 0; + + //! %SVM type + enum Types { + /** C-Support Vector Classification. n-class classification (n \f$\geq\f$ 2), allows + imperfect separation of classes with penalty multiplier C for outliers. */ + C_SVC=100, + /** \f$\nu\f$-Support Vector Classification. n-class classification with possible + imperfect separation. Parameter \f$\nu\f$ (in the range 0..1, the larger the value, the smoother + the decision boundary) is used instead of C. */ + NU_SVC=101, + /** Distribution Estimation (One-class %SVM). All the training data are from + the same class, %SVM builds a boundary that separates the class from the rest of the feature + space. */ + ONE_CLASS=102, + /** \f$\epsilon\f$-Support Vector Regression. The distance between feature vectors + from the training set and the fitting hyper-plane must be less than p. For outliers the + penalty multiplier C is used. */ + EPS_SVR=103, + /** \f$\nu\f$-Support Vector Regression. \f$\nu\f$ is used instead of p. + See @cite LibSVM for details. */ + NU_SVR=104 + }; + + /** @brief %SVM kernel type + + A comparison of different kernels on the following 2D test case with four classes. Four + SVM::C_SVC SVMs have been trained (one against rest) with auto_train. Evaluation on three + different kernels (SVM::CHI2, SVM::INTER, SVM::RBF). The color depicts the class with max score. + Bright means max-score \> 0, dark means max-score \< 0. + ![image](pics/SVM_Comparison.png) + */ + enum KernelTypes { + /** Returned by SVM::getKernelType in case when custom kernel has been set */ + CUSTOM=-1, + /** Linear kernel. No mapping is done, linear discrimination (or regression) is + done in the original feature space. It is the fastest option. \f$K(x_i, x_j) = x_i^T x_j\f$. */ + LINEAR=0, + /** Polynomial kernel: + \f$K(x_i, x_j) = (\gamma x_i^T x_j + coef0)^{degree}, \gamma > 0\f$. */ + POLY=1, + /** Radial basis function (RBF), a good choice in most cases. + \f$K(x_i, x_j) = e^{-\gamma ||x_i - x_j||^2}, \gamma > 0\f$. */ + RBF=2, + /** Sigmoid kernel: \f$K(x_i, x_j) = \tanh(\gamma x_i^T x_j + coef0)\f$. */ + SIGMOID=3, + /** Exponential Chi2 kernel, similar to the RBF kernel: + \f$K(x_i, x_j) = e^{-\gamma \chi^2(x_i,x_j)}, \chi^2(x_i,x_j) = (x_i-x_j)^2/(x_i+x_j), \gamma > 0\f$. */ + CHI2=4, + /** Histogram intersection kernel. A fast kernel. \f$K(x_i, x_j) = min(x_i,x_j)\f$. */ + INTER=5 + }; + + //! %SVM params type + enum ParamTypes { + C=0, + GAMMA=1, + P=2, + NU=3, + COEF=4, + DEGREE=5 + }; + + /** @brief Trains an %SVM with optimal parameters. + + @param data the training data that can be constructed using TrainData::create or + TrainData::loadFromCSV. + @param kFold Cross-validation parameter. The training set is divided into kFold subsets. One + subset is used to test the model, the others form the train set. So, the %SVM algorithm is + executed kFold times. + @param Cgrid grid for C + @param gammaGrid grid for gamma + @param pGrid grid for p + @param nuGrid grid for nu + @param coeffGrid grid for coeff + @param degreeGrid grid for degree + @param balanced If true and the problem is 2-class classification then the method creates more + balanced cross-validation subsets that is proportions between classes in subsets are close + to such proportion in the whole train dataset. + + The method trains the %SVM model automatically by choosing the optimal parameters C, gamma, p, + nu, coef0, degree. Parameters are considered optimal when the cross-validation + estimate of the test set error is minimal. + + If there is no need to optimize a parameter, the corresponding grid step should be set to any + value less than or equal to 1. For example, to avoid optimization in gamma, set `gammaGrid.step + = 0`, `gammaGrid.minVal`, `gamma_grid.maxVal` as arbitrary numbers. In this case, the value + `Gamma` is taken for gamma. + + And, finally, if the optimization in a parameter is required but the corresponding grid is + unknown, you may call the function SVM::getDefaultGrid. To generate a grid, for example, for + gamma, call `SVM::getDefaultGrid(SVM::GAMMA)`. + + This function works for the classification (SVM::C_SVC or SVM::NU_SVC) as well as for the + regression (SVM::EPS_SVR or SVM::NU_SVR). If it is SVM::ONE_CLASS, no optimization is made and + the usual %SVM with parameters specified in params is executed. + */ + virtual bool trainAuto( const Ptr& data, int kFold = 10, + ParamGrid Cgrid = getDefaultGrid(C), + ParamGrid gammaGrid = getDefaultGrid(GAMMA), + ParamGrid pGrid = getDefaultGrid(P), + ParamGrid nuGrid = getDefaultGrid(NU), + ParamGrid coeffGrid = getDefaultGrid(COEF), + ParamGrid degreeGrid = getDefaultGrid(DEGREE), + bool balanced=false) = 0; + + /** @brief Trains an %SVM with optimal parameters + + @param samples training samples + @param layout See ml::SampleTypes. + @param responses vector of responses associated with the training samples. + @param kFold Cross-validation parameter. The training set is divided into kFold subsets. One + subset is used to test the model, the others form the train set. So, the %SVM algorithm is + @param Cgrid grid for C + @param gammaGrid grid for gamma + @param pGrid grid for p + @param nuGrid grid for nu + @param coeffGrid grid for coeff + @param degreeGrid grid for degree + @param balanced If true and the problem is 2-class classification then the method creates more + balanced cross-validation subsets that is proportions between classes in subsets are close + to such proportion in the whole train dataset. + + The method trains the %SVM model automatically by choosing the optimal parameters C, gamma, p, + nu, coef0, degree. Parameters are considered optimal when the cross-validation + estimate of the test set error is minimal. + + This function only makes use of SVM::getDefaultGrid for parameter optimization and thus only + offers rudimentary parameter options. + + This function works for the classification (SVM::C_SVC or SVM::NU_SVC) as well as for the + regression (SVM::EPS_SVR or SVM::NU_SVR). If it is SVM::ONE_CLASS, no optimization is made and + the usual %SVM with parameters specified in params is executed. + */ + CV_WRAP virtual bool trainAuto(InputArray samples, + int layout, + InputArray responses, + int kFold = 10, + Ptr Cgrid = SVM::getDefaultGridPtr(SVM::C), + Ptr gammaGrid = SVM::getDefaultGridPtr(SVM::GAMMA), + Ptr pGrid = SVM::getDefaultGridPtr(SVM::P), + Ptr nuGrid = SVM::getDefaultGridPtr(SVM::NU), + Ptr coeffGrid = SVM::getDefaultGridPtr(SVM::COEF), + Ptr degreeGrid = SVM::getDefaultGridPtr(SVM::DEGREE), + bool balanced=false) = 0; + + /** @brief Retrieves all the support vectors + + The method returns all the support vectors as a floating-point matrix, where support vectors are + stored as matrix rows. + */ + CV_WRAP virtual Mat getSupportVectors() const = 0; + + /** @brief Retrieves all the uncompressed support vectors of a linear %SVM + + The method returns all the uncompressed support vectors of a linear %SVM that the compressed + support vector, used for prediction, was derived from. They are returned in a floating-point + matrix, where the support vectors are stored as matrix rows. + */ + CV_WRAP virtual Mat getUncompressedSupportVectors() const = 0; + + /** @brief Retrieves the decision function + + @param i the index of the decision function. If the problem solved is regression, 1-class or + 2-class classification, then there will be just one decision function and the index should + always be 0. Otherwise, in the case of N-class classification, there will be \f$N(N-1)/2\f$ + decision functions. + @param alpha the optional output vector for weights, corresponding to different support vectors. + In the case of linear %SVM all the alpha's will be 1's. + @param svidx the optional output vector of indices of support vectors within the matrix of + support vectors (which can be retrieved by SVM::getSupportVectors). In the case of linear + %SVM each decision function consists of a single "compressed" support vector. + + The method returns rho parameter of the decision function, a scalar subtracted from the weighted + sum of kernel responses. + */ + CV_WRAP virtual double getDecisionFunction(int i, OutputArray alpha, OutputArray svidx) const = 0; + + /** @brief Generates a grid for %SVM parameters. + + @param param_id %SVM parameters IDs that must be one of the SVM::ParamTypes. The grid is + generated for the parameter with this ID. + + The function generates a grid for the specified parameter of the %SVM algorithm. The grid may be + passed to the function SVM::trainAuto. + */ + static ParamGrid getDefaultGrid( int param_id ); + + /** @brief Generates a grid for %SVM parameters. + + @param param_id %SVM parameters IDs that must be one of the SVM::ParamTypes. The grid is + generated for the parameter with this ID. + + The function generates a grid pointer for the specified parameter of the %SVM algorithm. + The grid may be passed to the function SVM::trainAuto. + */ + CV_WRAP static Ptr getDefaultGridPtr( int param_id ); + + /** Creates empty model. + Use StatModel::train to train the model. Since %SVM has several parameters, you may want to + find the best parameters for your problem, it can be done with SVM::trainAuto. */ + CV_WRAP static Ptr create(); + + /** @brief Loads and creates a serialized svm from a file + * + * Use SVM::save to serialize and store an SVM to disk. + * Load the SVM from this file again, by calling this function with the path to the file. + * + * @param filepath path to serialized svm + */ + CV_WRAP static Ptr load(const String& filepath); +}; + +/****************************************************************************************\ +* Expectation - Maximization * +\****************************************************************************************/ + +/** @brief The class implements the Expectation Maximization algorithm. + +@sa @ref ml_intro_em + */ +class CV_EXPORTS_W EM : public StatModel +{ +public: + //! Type of covariation matrices + enum Types { + /** A scaled identity matrix \f$\mu_k * I\f$. There is the only + parameter \f$\mu_k\f$ to be estimated for each matrix. The option may be used in special cases, + when the constraint is relevant, or as a first step in the optimization (for example in case + when the data is preprocessed with PCA). The results of such preliminary estimation may be + passed again to the optimization procedure, this time with + covMatType=EM::COV_MAT_DIAGONAL. */ + COV_MAT_SPHERICAL=0, + /** A diagonal matrix with positive diagonal elements. The number of + free parameters is d for each matrix. This is most commonly used option yielding good + estimation results. */ + COV_MAT_DIAGONAL=1, + /** A symmetric positively defined matrix. The number of free + parameters in each matrix is about \f$d^2/2\f$. It is not recommended to use this option, unless + there is pretty accurate initial estimation of the parameters and/or a huge number of + training samples. */ + COV_MAT_GENERIC=2, + COV_MAT_DEFAULT=COV_MAT_DIAGONAL + }; + + //! Default parameters + enum {DEFAULT_NCLUSTERS=5, DEFAULT_MAX_ITERS=100}; + + //! The initial step + enum {START_E_STEP=1, START_M_STEP=2, START_AUTO_STEP=0}; + + /** The number of mixture components in the Gaussian mixture model. + Default value of the parameter is EM::DEFAULT_NCLUSTERS=5. Some of %EM implementation could + determine the optimal number of mixtures within a specified value range, but that is not the + case in ML yet. */ + /** @see setClustersNumber */ + CV_WRAP virtual int getClustersNumber() const = 0; + /** @copybrief getClustersNumber @see getClustersNumber */ + CV_WRAP virtual void setClustersNumber(int val) = 0; + + /** Constraint on covariance matrices which defines type of matrices. + See EM::Types. */ + /** @see setCovarianceMatrixType */ + CV_WRAP virtual int getCovarianceMatrixType() const = 0; + /** @copybrief getCovarianceMatrixType @see getCovarianceMatrixType */ + CV_WRAP virtual void setCovarianceMatrixType(int val) = 0; + + /** The termination criteria of the %EM algorithm. + The %EM algorithm can be terminated by the number of iterations termCrit.maxCount (number of + M-steps) or when relative change of likelihood logarithm is less than termCrit.epsilon. Default + maximum number of iterations is EM::DEFAULT_MAX_ITERS=100. */ + /** @see setTermCriteria */ + CV_WRAP virtual TermCriteria getTermCriteria() const = 0; + /** @copybrief getTermCriteria @see getTermCriteria */ + CV_WRAP virtual void setTermCriteria(const TermCriteria &val) = 0; + + /** @brief Returns weights of the mixtures + + Returns vector with the number of elements equal to the number of mixtures. + */ + CV_WRAP virtual Mat getWeights() const = 0; + /** @brief Returns the cluster centers (means of the Gaussian mixture) + + Returns matrix with the number of rows equal to the number of mixtures and number of columns + equal to the space dimensionality. + */ + CV_WRAP virtual Mat getMeans() const = 0; + /** @brief Returns covariation matrices + + Returns vector of covariation matrices. Number of matrices is the number of gaussian mixtures, + each matrix is a square floating-point matrix NxN, where N is the space dimensionality. + */ + CV_WRAP virtual void getCovs(CV_OUT std::vector& covs) const = 0; + + /** @brief Returns posterior probabilities for the provided samples + + @param samples The input samples, floating-point matrix + @param results The optional output \f$ nSamples \times nClusters\f$ matrix of results. It contains + posterior probabilities for each sample from the input + @param flags This parameter will be ignored + */ + CV_WRAP virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const CV_OVERRIDE = 0; + + /** @brief Returns a likelihood logarithm value and an index of the most probable mixture component + for the given sample. + + @param sample A sample for classification. It should be a one-channel matrix of + \f$1 \times dims\f$ or \f$dims \times 1\f$ size. + @param probs Optional output matrix that contains posterior probabilities of each component + given the sample. It has \f$1 \times nclusters\f$ size and CV_64FC1 type. + + The method returns a two-element double vector. Zero element is a likelihood logarithm value for + the sample. First element is an index of the most probable mixture component for the given + sample. + */ + CV_WRAP virtual Vec2d predict2(InputArray sample, OutputArray probs) const = 0; + + /** @brief Estimate the Gaussian mixture parameters from a samples set. + + This variation starts with Expectation step. Initial values of the model parameters will be + estimated by the k-means algorithm. + + Unlike many of the ML models, %EM is an unsupervised learning algorithm and it does not take + responses (class labels or function values) as input. Instead, it computes the *Maximum + Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the + parameters inside the structure: \f$p_{i,k}\f$ in probs, \f$a_k\f$ in means , \f$S_k\f$ in + covs[k], \f$\pi_k\f$ in weights , and optionally computes the output "class label" for each + sample: \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most + probable mixture component for each sample). + + The trained model can be used further for prediction, just like any other classifier. The + trained model is similar to the NormalBayesClassifier. + + @param samples Samples from which the Gaussian mixture model will be estimated. It should be a + one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type + it will be converted to the inner matrix of such type for the further computing. + @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for + each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type. + @param labels The optional output "class label" for each sample: + \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable + mixture component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type. + @param probs The optional output matrix that contains posterior probabilities of each Gaussian + mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and + CV_64FC1 type. + */ + CV_WRAP virtual bool trainEM(InputArray samples, + OutputArray logLikelihoods=noArray(), + OutputArray labels=noArray(), + OutputArray probs=noArray()) = 0; + + /** @brief Estimate the Gaussian mixture parameters from a samples set. + + This variation starts with Expectation step. You need to provide initial means \f$a_k\f$ of + mixture components. Optionally you can pass initial weights \f$\pi_k\f$ and covariance matrices + \f$S_k\f$ of mixture components. + + @param samples Samples from which the Gaussian mixture model will be estimated. It should be a + one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type + it will be converted to the inner matrix of such type for the further computing. + @param means0 Initial means \f$a_k\f$ of mixture components. It is a one-channel matrix of + \f$nclusters \times dims\f$ size. If the matrix does not have CV_64F type it will be + converted to the inner matrix of such type for the further computing. + @param covs0 The vector of initial covariance matrices \f$S_k\f$ of mixture components. Each of + covariance matrices is a one-channel matrix of \f$dims \times dims\f$ size. If the matrices + do not have CV_64F type they will be converted to the inner matrices of such type for the + further computing. + @param weights0 Initial weights \f$\pi_k\f$ of mixture components. It should be a one-channel + floating-point matrix with \f$1 \times nclusters\f$ or \f$nclusters \times 1\f$ size. + @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for + each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type. + @param labels The optional output "class label" for each sample: + \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable + mixture component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type. + @param probs The optional output matrix that contains posterior probabilities of each Gaussian + mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and + CV_64FC1 type. + */ + CV_WRAP virtual bool trainE(InputArray samples, InputArray means0, + InputArray covs0=noArray(), + InputArray weights0=noArray(), + OutputArray logLikelihoods=noArray(), + OutputArray labels=noArray(), + OutputArray probs=noArray()) = 0; + + /** @brief Estimate the Gaussian mixture parameters from a samples set. + + This variation starts with Maximization step. You need to provide initial probabilities + \f$p_{i,k}\f$ to use this option. + + @param samples Samples from which the Gaussian mixture model will be estimated. It should be a + one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type + it will be converted to the inner matrix of such type for the further computing. + @param probs0 the probabilities + @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for + each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type. + @param labels The optional output "class label" for each sample: + \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable + mixture component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type. + @param probs The optional output matrix that contains posterior probabilities of each Gaussian + mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and + CV_64FC1 type. + */ + CV_WRAP virtual bool trainM(InputArray samples, InputArray probs0, + OutputArray logLikelihoods=noArray(), + OutputArray labels=noArray(), + OutputArray probs=noArray()) = 0; + + /** Creates empty %EM model. + The model should be trained then using StatModel::train(traindata, flags) method. Alternatively, you + can use one of the EM::train\* methods or load it from file using Algorithm::load\(filename). + */ + CV_WRAP static Ptr create(); + + /** @brief Loads and creates a serialized EM from a file + * + * Use EM::save to serialize and store an EM to disk. + * Load the EM from this file again, by calling this function with the path to the file. + * Optionally specify the node for the file containing the classifier + * + * @param filepath path to serialized EM + * @param nodeName name of node containing the classifier + */ + CV_WRAP static Ptr load(const String& filepath , const String& nodeName = String()); +}; + +/****************************************************************************************\ +* Decision Tree * +\****************************************************************************************/ + +/** @brief The class represents a single decision tree or a collection of decision trees. + +The current public interface of the class allows user to train only a single decision tree, however +the class is capable of storing multiple decision trees and using them for prediction (by summing +responses or using a voting schemes), and the derived from DTrees classes (such as RTrees and Boost) +use this capability to implement decision tree ensembles. + +@sa @ref ml_intro_trees +*/ +class CV_EXPORTS_W DTrees : public StatModel +{ +public: + /** Predict options */ + enum Flags { PREDICT_AUTO=0, PREDICT_SUM=(1<<8), PREDICT_MAX_VOTE=(2<<8), PREDICT_MASK=(3<<8) }; + + /** Cluster possible values of a categorical variable into K\<=maxCategories clusters to + find a suboptimal split. + If a discrete variable, on which the training procedure tries to make a split, takes more than + maxCategories values, the precise best subset estimation may take a very long time because the + algorithm is exponential. Instead, many decision trees engines (including our implementation) + try to find sub-optimal split in this case by clustering all the samples into maxCategories + clusters that is some categories are merged together. The clustering is applied only in n \> + 2-class classification problems for categorical variables with N \> max_categories possible + values. In case of regression and 2-class classification the optimal split can be found + efficiently without employing clustering, thus the parameter is not used in these cases. + Default value is 10.*/ + /** @see setMaxCategories */ + CV_WRAP virtual int getMaxCategories() const = 0; + /** @copybrief getMaxCategories @see getMaxCategories */ + CV_WRAP virtual void setMaxCategories(int val) = 0; + + /** The maximum possible depth of the tree. + That is the training algorithms attempts to split a node while its depth is less than maxDepth. + The root node has zero depth. The actual depth may be smaller if the other termination criteria + are met (see the outline of the training procedure @ref ml_intro_trees "here"), and/or if the + tree is pruned. Default value is INT_MAX.*/ + /** @see setMaxDepth */ + CV_WRAP virtual int getMaxDepth() const = 0; + /** @copybrief getMaxDepth @see getMaxDepth */ + CV_WRAP virtual void setMaxDepth(int val) = 0; + + /** If the number of samples in a node is less than this parameter then the node will not be split. + + Default value is 10.*/ + /** @see setMinSampleCount */ + CV_WRAP virtual int getMinSampleCount() const = 0; + /** @copybrief getMinSampleCount @see getMinSampleCount */ + CV_WRAP virtual void setMinSampleCount(int val) = 0; + + /** If CVFolds \> 1 then algorithms prunes the built decision tree using K-fold + cross-validation procedure where K is equal to CVFolds. + Default value is 10.*/ + /** @see setCVFolds */ + CV_WRAP virtual int getCVFolds() const = 0; + /** @copybrief getCVFolds @see getCVFolds */ + CV_WRAP virtual void setCVFolds(int val) = 0; + + /** If true then surrogate splits will be built. + These splits allow to work with missing data and compute variable importance correctly. + Default value is false. + @note currently it's not implemented.*/ + /** @see setUseSurrogates */ + CV_WRAP virtual bool getUseSurrogates() const = 0; + /** @copybrief getUseSurrogates @see getUseSurrogates */ + CV_WRAP virtual void setUseSurrogates(bool val) = 0; + + /** If true then a pruning will be harsher. + This will make a tree more compact and more resistant to the training data noise but a bit less + accurate. Default value is true.*/ + /** @see setUse1SERule */ + CV_WRAP virtual bool getUse1SERule() const = 0; + /** @copybrief getUse1SERule @see getUse1SERule */ + CV_WRAP virtual void setUse1SERule(bool val) = 0; + + /** If true then pruned branches are physically removed from the tree. + Otherwise they are retained and it is possible to get results from the original unpruned (or + pruned less aggressively) tree. Default value is true.*/ + /** @see setTruncatePrunedTree */ + CV_WRAP virtual bool getTruncatePrunedTree() const = 0; + /** @copybrief getTruncatePrunedTree @see getTruncatePrunedTree */ + CV_WRAP virtual void setTruncatePrunedTree(bool val) = 0; + + /** Termination criteria for regression trees. + If all absolute differences between an estimated value in a node and values of train samples + in this node are less than this parameter then the node will not be split further. Default + value is 0.01f*/ + /** @see setRegressionAccuracy */ + CV_WRAP virtual float getRegressionAccuracy() const = 0; + /** @copybrief getRegressionAccuracy @see getRegressionAccuracy */ + CV_WRAP virtual void setRegressionAccuracy(float val) = 0; + + /** @brief The array of a priori class probabilities, sorted by the class label value. + + The parameter can be used to tune the decision tree preferences toward a certain class. For + example, if you want to detect some rare anomaly occurrence, the training base will likely + contain much more normal cases than anomalies, so a very good classification performance + will be achieved just by considering every case as normal. To avoid this, the priors can be + specified, where the anomaly probability is artificially increased (up to 0.5 or even + greater), so the weight of the misclassified anomalies becomes much bigger, and the tree is + adjusted properly. + + You can also think about this parameter as weights of prediction categories which determine + relative weights that you give to misclassification. That is, if the weight of the first + category is 1 and the weight of the second category is 10, then each mistake in predicting + the second category is equivalent to making 10 mistakes in predicting the first category. + Default value is empty Mat.*/ + /** @see setPriors */ + CV_WRAP virtual cv::Mat getPriors() const = 0; + /** @copybrief getPriors @see getPriors */ + CV_WRAP virtual void setPriors(const cv::Mat &val) = 0; + + /** @brief The class represents a decision tree node. + */ + class CV_EXPORTS Node + { + public: + Node(); + double value; //!< Value at the node: a class label in case of classification or estimated + //!< function value in case of regression. + int classIdx; //!< Class index normalized to 0..class_count-1 range and assigned to the + //!< node. It is used internally in classification trees and tree ensembles. + int parent; //!< Index of the parent node + int left; //!< Index of the left child node + int right; //!< Index of right child node + int defaultDir; //!< Default direction where to go (-1: left or +1: right). It helps in the + //!< case of missing values. + int split; //!< Index of the first split + }; + + /** @brief The class represents split in a decision tree. + */ + class CV_EXPORTS Split + { + public: + Split(); + int varIdx; //!< Index of variable on which the split is created. + bool inversed; //!< If true, then the inverse split rule is used (i.e. left and right + //!< branches are exchanged in the rule expressions below). + float quality; //!< The split quality, a positive number. It is used to choose the best split. + int next; //!< Index of the next split in the list of splits for the node + float c; /**< The threshold value in case of split on an ordered variable. + The rule is: + @code{.none} + if var_value < c + then next_node <- left + else next_node <- right + @endcode */ + int subsetOfs; /**< Offset of the bitset used by the split on a categorical variable. + The rule is: + @code{.none} + if bitset[var_value] == 1 + then next_node <- left + else next_node <- right + @endcode */ + }; + + /** @brief Returns indices of root nodes + */ + virtual const std::vector& getRoots() const = 0; + /** @brief Returns all the nodes + + all the node indices are indices in the returned vector + */ + virtual const std::vector& getNodes() const = 0; + /** @brief Returns all the splits + + all the split indices are indices in the returned vector + */ + virtual const std::vector& getSplits() const = 0; + /** @brief Returns all the bitsets for categorical splits + + Split::subsetOfs is an offset in the returned vector + */ + virtual const std::vector& getSubsets() const = 0; + + /** @brief Creates the empty model + + The static method creates empty decision tree with the specified parameters. It should be then + trained using train method (see StatModel::train). Alternatively, you can load the model from + file using Algorithm::load\(filename). + */ + CV_WRAP static Ptr create(); + + /** @brief Loads and creates a serialized DTrees from a file + * + * Use DTree::save to serialize and store an DTree to disk. + * Load the DTree from this file again, by calling this function with the path to the file. + * Optionally specify the node for the file containing the classifier + * + * @param filepath path to serialized DTree + * @param nodeName name of node containing the classifier + */ + CV_WRAP static Ptr load(const String& filepath , const String& nodeName = String()); +}; + +/****************************************************************************************\ +* Random Trees Classifier * +\****************************************************************************************/ + +/** @brief The class implements the random forest predictor. + +@sa @ref ml_intro_rtrees + */ +class CV_EXPORTS_W RTrees : public DTrees +{ +public: + + /** If true then variable importance will be calculated and then it can be retrieved by RTrees::getVarImportance. + Default value is false.*/ + /** @see setCalculateVarImportance */ + CV_WRAP virtual bool getCalculateVarImportance() const = 0; + /** @copybrief getCalculateVarImportance @see getCalculateVarImportance */ + CV_WRAP virtual void setCalculateVarImportance(bool val) = 0; + + /** The size of the randomly selected subset of features at each tree node and that are used + to find the best split(s). + If you set it to 0 then the size will be set to the square root of the total number of + features. Default value is 0.*/ + /** @see setActiveVarCount */ + CV_WRAP virtual int getActiveVarCount() const = 0; + /** @copybrief getActiveVarCount @see getActiveVarCount */ + CV_WRAP virtual void setActiveVarCount(int val) = 0; + + /** The termination criteria that specifies when the training algorithm stops. + Either when the specified number of trees is trained and added to the ensemble or when + sufficient accuracy (measured as OOB error) is achieved. Typically the more trees you have the + better the accuracy. However, the improvement in accuracy generally diminishes and asymptotes + pass a certain number of trees. Also to keep in mind, the number of tree increases the + prediction time linearly. Default value is TermCriteria(TermCriteria::MAX_ITERS + + TermCriteria::EPS, 50, 0.1)*/ + /** @see setTermCriteria */ + CV_WRAP virtual TermCriteria getTermCriteria() const = 0; + /** @copybrief getTermCriteria @see getTermCriteria */ + CV_WRAP virtual void setTermCriteria(const TermCriteria &val) = 0; + + /** Returns the variable importance array. + The method returns the variable importance vector, computed at the training stage when + CalculateVarImportance is set to true. If this flag was set to false, the empty matrix is + returned. + */ + CV_WRAP virtual Mat getVarImportance() const = 0; + + /** Returns the result of each individual tree in the forest. + In case the model is a regression problem, the method will return each of the trees' + results for each of the sample cases. If the model is a classifier, it will return + a Mat with samples + 1 rows, where the first row gives the class number and the + following rows return the votes each class had for each sample. + @param samples Array containing the samples for which votes will be calculated. + @param results Array where the result of the calculation will be written. + @param flags Flags for defining the type of RTrees. + */ + CV_WRAP virtual void getVotes(InputArray samples, OutputArray results, int flags) const = 0; + + /** Returns the OOB error value, computed at the training stage when calcOOBError is set to true. + * If this flag was set to false, 0 is returned. The OOB error is also scaled by sample weighting. + */ +#if CV_VERSION_MAJOR == 4 + CV_WRAP virtual double getOOBError() const { return 0; } +#else + /*CV_WRAP*/ virtual double getOOBError() const = 0; +#endif + + /** Creates the empty model. + Use StatModel::train to train the model, StatModel::train to create and train the model, + Algorithm::load to load the pre-trained model. + */ + CV_WRAP static Ptr create(); + + /** @brief Loads and creates a serialized RTree from a file + * + * Use RTree::save to serialize and store an RTree to disk. + * Load the RTree from this file again, by calling this function with the path to the file. + * Optionally specify the node for the file containing the classifier + * + * @param filepath path to serialized RTree + * @param nodeName name of node containing the classifier + */ + CV_WRAP static Ptr load(const String& filepath , const String& nodeName = String()); +}; + +/****************************************************************************************\ +* Boosted tree classifier * +\****************************************************************************************/ + +/** @brief Boosted tree classifier derived from DTrees + +@sa @ref ml_intro_boost + */ +class CV_EXPORTS_W Boost : public DTrees +{ +public: + /** Type of the boosting algorithm. + See Boost::Types. Default value is Boost::REAL. */ + /** @see setBoostType */ + CV_WRAP virtual int getBoostType() const = 0; + /** @copybrief getBoostType @see getBoostType */ + CV_WRAP virtual void setBoostType(int val) = 0; + + /** The number of weak classifiers. + Default value is 100. */ + /** @see setWeakCount */ + CV_WRAP virtual int getWeakCount() const = 0; + /** @copybrief getWeakCount @see getWeakCount */ + CV_WRAP virtual void setWeakCount(int val) = 0; + + /** A threshold between 0 and 1 used to save computational time. + Samples with summary weight \f$\leq 1 - weight_trim_rate\f$ do not participate in the *next* + iteration of training. Set this parameter to 0 to turn off this functionality. Default value is 0.95.*/ + /** @see setWeightTrimRate */ + CV_WRAP virtual double getWeightTrimRate() const = 0; + /** @copybrief getWeightTrimRate @see getWeightTrimRate */ + CV_WRAP virtual void setWeightTrimRate(double val) = 0; + + /** Boosting type. + Gentle AdaBoost and Real AdaBoost are often the preferable choices. */ + enum Types { + DISCRETE=0, //!< Discrete AdaBoost. + REAL=1, //!< Real AdaBoost. It is a technique that utilizes confidence-rated predictions + //!< and works well with categorical data. + LOGIT=2, //!< LogitBoost. It can produce good regression fits. + GENTLE=3 //!< Gentle AdaBoost. It puts less weight on outlier data points and for that + //!(filename) to load the pre-trained model. */ + CV_WRAP static Ptr create(); + + /** @brief Loads and creates a serialized Boost from a file + * + * Use Boost::save to serialize and store an RTree to disk. + * Load the Boost from this file again, by calling this function with the path to the file. + * Optionally specify the node for the file containing the classifier + * + * @param filepath path to serialized Boost + * @param nodeName name of node containing the classifier + */ + CV_WRAP static Ptr load(const String& filepath , const String& nodeName = String()); +}; + +/****************************************************************************************\ +* Gradient Boosted Trees * +\****************************************************************************************/ + +/*class CV_EXPORTS_W GBTrees : public DTrees +{ +public: + struct CV_EXPORTS_W_MAP Params : public DTrees::Params + { + CV_PROP_RW int weakCount; + CV_PROP_RW int lossFunctionType; + CV_PROP_RW float subsamplePortion; + CV_PROP_RW float shrinkage; + + Params(); + Params( int lossFunctionType, int weakCount, float shrinkage, + float subsamplePortion, int maxDepth, bool useSurrogates ); + }; + + enum {SQUARED_LOSS=0, ABSOLUTE_LOSS, HUBER_LOSS=3, DEVIANCE_LOSS}; + + virtual void setK(int k) = 0; + + virtual float predictSerial( InputArray samples, + OutputArray weakResponses, int flags) const = 0; + + static Ptr create(const Params& p); +};*/ + +/****************************************************************************************\ +* Artificial Neural Networks (ANN) * +\****************************************************************************************/ + +/////////////////////////////////// Multi-Layer Perceptrons ////////////////////////////// + +/** @brief Artificial Neural Networks - Multi-Layer Perceptrons. + +Unlike many other models in ML that are constructed and trained at once, in the MLP model these +steps are separated. First, a network with the specified topology is created using the non-default +constructor or the method ANN_MLP::create. All the weights are set to zeros. Then, the network is +trained using a set of input and output vectors. The training procedure can be repeated more than +once, that is, the weights can be adjusted based on the new training data. + +Additional flags for StatModel::train are available: ANN_MLP::TrainFlags. + +@sa @ref ml_intro_ann + */ +class CV_EXPORTS_W ANN_MLP : public StatModel +{ +public: + /** Available training methods */ + enum TrainingMethods { + BACKPROP=0, //!< The back-propagation algorithm. + RPROP = 1, //!< The RPROP algorithm. See @cite RPROP93 for details. + ANNEAL = 2 //!< The simulated annealing algorithm. See @cite Kirkpatrick83 for details. + }; + + /** Sets training method and common parameters. + @param method Default value is ANN_MLP::RPROP. See ANN_MLP::TrainingMethods. + @param param1 passed to setRpropDW0 for ANN_MLP::RPROP and to setBackpropWeightScale for ANN_MLP::BACKPROP and to initialT for ANN_MLP::ANNEAL. + @param param2 passed to setRpropDWMin for ANN_MLP::RPROP and to setBackpropMomentumScale for ANN_MLP::BACKPROP and to finalT for ANN_MLP::ANNEAL. + */ + CV_WRAP virtual void setTrainMethod(int method, double param1 = 0, double param2 = 0) = 0; + + /** Returns current training method */ + CV_WRAP virtual int getTrainMethod() const = 0; + + /** Initialize the activation function for each neuron. + Currently the default and the only fully supported activation function is ANN_MLP::SIGMOID_SYM. + @param type The type of activation function. See ANN_MLP::ActivationFunctions. + @param param1 The first parameter of the activation function, \f$\alpha\f$. Default value is 0. + @param param2 The second parameter of the activation function, \f$\beta\f$. Default value is 0. + */ + CV_WRAP virtual void setActivationFunction(int type, double param1 = 0, double param2 = 0) = 0; + + /** Integer vector specifying the number of neurons in each layer including the input and output layers. + The very first element specifies the number of elements in the input layer. + The last element - number of elements in the output layer. Default value is empty Mat. + @sa getLayerSizes */ + CV_WRAP virtual void setLayerSizes(InputArray _layer_sizes) = 0; + + /** Integer vector specifying the number of neurons in each layer including the input and output layers. + The very first element specifies the number of elements in the input layer. + The last element - number of elements in the output layer. + @sa setLayerSizes */ + CV_WRAP virtual cv::Mat getLayerSizes() const = 0; + + /** Termination criteria of the training algorithm. + You can specify the maximum number of iterations (maxCount) and/or how much the error could + change between the iterations to make the algorithm continue (epsilon). Default value is + TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, 0.01).*/ + /** @see setTermCriteria */ + CV_WRAP virtual TermCriteria getTermCriteria() const = 0; + /** @copybrief getTermCriteria @see getTermCriteria */ + CV_WRAP virtual void setTermCriteria(TermCriteria val) = 0; + + /** BPROP: Strength of the weight gradient term. + The recommended value is about 0.1. Default value is 0.1.*/ + /** @see setBackpropWeightScale */ + CV_WRAP virtual double getBackpropWeightScale() const = 0; + /** @copybrief getBackpropWeightScale @see getBackpropWeightScale */ + CV_WRAP virtual void setBackpropWeightScale(double val) = 0; + + /** BPROP: Strength of the momentum term (the difference between weights on the 2 previous iterations). + This parameter provides some inertia to smooth the random fluctuations of the weights. It can + vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough. + Default value is 0.1.*/ + /** @see setBackpropMomentumScale */ + CV_WRAP virtual double getBackpropMomentumScale() const = 0; + /** @copybrief getBackpropMomentumScale @see getBackpropMomentumScale */ + CV_WRAP virtual void setBackpropMomentumScale(double val) = 0; + + /** RPROP: Initial value \f$\Delta_0\f$ of update-values \f$\Delta_{ij}\f$. + Default value is 0.1.*/ + /** @see setRpropDW0 */ + CV_WRAP virtual double getRpropDW0() const = 0; + /** @copybrief getRpropDW0 @see getRpropDW0 */ + CV_WRAP virtual void setRpropDW0(double val) = 0; + + /** RPROP: Increase factor \f$\eta^+\f$. + It must be \>1. Default value is 1.2.*/ + /** @see setRpropDWPlus */ + CV_WRAP virtual double getRpropDWPlus() const = 0; + /** @copybrief getRpropDWPlus @see getRpropDWPlus */ + CV_WRAP virtual void setRpropDWPlus(double val) = 0; + + /** RPROP: Decrease factor \f$\eta^-\f$. + It must be \<1. Default value is 0.5.*/ + /** @see setRpropDWMinus */ + CV_WRAP virtual double getRpropDWMinus() const = 0; + /** @copybrief getRpropDWMinus @see getRpropDWMinus */ + CV_WRAP virtual void setRpropDWMinus(double val) = 0; + + /** RPROP: Update-values lower limit \f$\Delta_{min}\f$. + It must be positive. Default value is FLT_EPSILON.*/ + /** @see setRpropDWMin */ + CV_WRAP virtual double getRpropDWMin() const = 0; + /** @copybrief getRpropDWMin @see getRpropDWMin */ + CV_WRAP virtual void setRpropDWMin(double val) = 0; + + /** RPROP: Update-values upper limit \f$\Delta_{max}\f$. + It must be \>1. Default value is 50.*/ + /** @see setRpropDWMax */ + CV_WRAP virtual double getRpropDWMax() const = 0; + /** @copybrief getRpropDWMax @see getRpropDWMax */ + CV_WRAP virtual void setRpropDWMax(double val) = 0; + + /** ANNEAL: Update initial temperature. + It must be \>=0. Default value is 10.*/ + /** @see setAnnealInitialT */ + CV_WRAP virtual double getAnnealInitialT() const = 0; + /** @copybrief getAnnealInitialT @see getAnnealInitialT */ + CV_WRAP virtual void setAnnealInitialT(double val) = 0; + + /** ANNEAL: Update final temperature. + It must be \>=0 and less than initialT. Default value is 0.1.*/ + /** @see setAnnealFinalT */ + CV_WRAP virtual double getAnnealFinalT() const = 0; + /** @copybrief getAnnealFinalT @see getAnnealFinalT */ + CV_WRAP virtual void setAnnealFinalT(double val) = 0; + + /** ANNEAL: Update cooling ratio. + It must be \>0 and less than 1. Default value is 0.95.*/ + /** @see setAnnealCoolingRatio */ + CV_WRAP virtual double getAnnealCoolingRatio() const = 0; + /** @copybrief getAnnealCoolingRatio @see getAnnealCoolingRatio */ + CV_WRAP virtual void setAnnealCoolingRatio(double val) = 0; + + /** ANNEAL: Update iteration per step. + It must be \>0 . Default value is 10.*/ + /** @see setAnnealItePerStep */ + CV_WRAP virtual int getAnnealItePerStep() const = 0; + /** @copybrief getAnnealItePerStep @see getAnnealItePerStep */ + CV_WRAP virtual void setAnnealItePerStep(int val) = 0; + + /** @brief Set/initialize anneal RNG */ + virtual void setAnnealEnergyRNG(const RNG& rng) = 0; + + /** possible activation functions */ + enum ActivationFunctions { + /** Identity function: \f$f(x)=x\f$ */ + IDENTITY = 0, + /** Symmetrical sigmoid: \f$f(x)=\beta*(1-e^{-\alpha x})/(1+e^{-\alpha x})\f$ + @note + If you are using the default sigmoid activation function with the default parameter values + fparam1=0 and fparam2=0 then the function used is y = 1.7159\*tanh(2/3 \* x), so the output + will range from [-1.7159, 1.7159], instead of [0,1].*/ + SIGMOID_SYM = 1, + /** Gaussian function: \f$f(x)=\beta e^{-\alpha x*x}\f$ */ + GAUSSIAN = 2, + /** ReLU function: \f$f(x)=max(0,x)\f$ */ + RELU = 3, + /** Leaky ReLU function: for x>0 \f$f(x)=x \f$ and x<=0 \f$f(x)=\alpha x \f$*/ + LEAKYRELU= 4 + }; + + /** Train options */ + enum TrainFlags { + /** Update the network weights, rather than compute them from scratch. In the latter case + the weights are initialized using the Nguyen-Widrow algorithm. */ + UPDATE_WEIGHTS = 1, + /** Do not normalize the input vectors. If this flag is not set, the training algorithm + normalizes each input feature independently, shifting its mean value to 0 and making the + standard deviation equal to 1. If the network is assumed to be updated frequently, the new + training data could be much different from original one. In this case, you should take care + of proper normalization. */ + NO_INPUT_SCALE = 2, + /** Do not normalize the output vectors. If the flag is not set, the training algorithm + normalizes each output feature independently, by transforming it to the certain range + depending on the used activation function. */ + NO_OUTPUT_SCALE = 4 + }; + + CV_WRAP virtual Mat getWeights(int layerIdx) const = 0; + + /** @brief Creates empty model + + Use StatModel::train to train the model, Algorithm::load\(filename) to load the pre-trained model. + Note that the train method has optional flags: ANN_MLP::TrainFlags. + */ + CV_WRAP static Ptr create(); + + /** @brief Loads and creates a serialized ANN from a file + * + * Use ANN::save to serialize and store an ANN to disk. + * Load the ANN from this file again, by calling this function with the path to the file. + * + * @param filepath path to serialized ANN + */ + CV_WRAP static Ptr load(const String& filepath); + +}; + +#ifndef DISABLE_OPENCV_3_COMPATIBILITY +typedef ANN_MLP ANN_MLP_ANNEAL; +#endif + +/****************************************************************************************\ +* Logistic Regression * +\****************************************************************************************/ + +/** @brief Implements Logistic Regression classifier. + +@sa @ref ml_intro_lr + */ +class CV_EXPORTS_W LogisticRegression : public StatModel +{ +public: + + /** Learning rate. */ + /** @see setLearningRate */ + CV_WRAP virtual double getLearningRate() const = 0; + /** @copybrief getLearningRate @see getLearningRate */ + CV_WRAP virtual void setLearningRate(double val) = 0; + + /** Number of iterations. */ + /** @see setIterations */ + CV_WRAP virtual int getIterations() const = 0; + /** @copybrief getIterations @see getIterations */ + CV_WRAP virtual void setIterations(int val) = 0; + + /** Kind of regularization to be applied. See LogisticRegression::RegKinds. */ + /** @see setRegularization */ + CV_WRAP virtual int getRegularization() const = 0; + /** @copybrief getRegularization @see getRegularization */ + CV_WRAP virtual void setRegularization(int val) = 0; + + /** Kind of training method used. See LogisticRegression::Methods. */ + /** @see setTrainMethod */ + CV_WRAP virtual int getTrainMethod() const = 0; + /** @copybrief getTrainMethod @see getTrainMethod */ + CV_WRAP virtual void setTrainMethod(int val) = 0; + + /** Specifies the number of training samples taken in each step of Mini-Batch Gradient + Descent. Will only be used if using LogisticRegression::MINI_BATCH training algorithm. It + has to take values less than the total number of training samples. */ + /** @see setMiniBatchSize */ + CV_WRAP virtual int getMiniBatchSize() const = 0; + /** @copybrief getMiniBatchSize @see getMiniBatchSize */ + CV_WRAP virtual void setMiniBatchSize(int val) = 0; + + /** Termination criteria of the algorithm. */ + /** @see setTermCriteria */ + CV_WRAP virtual TermCriteria getTermCriteria() const = 0; + /** @copybrief getTermCriteria @see getTermCriteria */ + CV_WRAP virtual void setTermCriteria(TermCriteria val) = 0; + + //! Regularization kinds + enum RegKinds { + REG_DISABLE = -1, //!< Regularization disabled + REG_L1 = 0, //!< %L1 norm + REG_L2 = 1 //!< %L2 norm + }; + + //! Training methods + enum Methods { + BATCH = 0, + MINI_BATCH = 1 //!< Set MiniBatchSize to a positive integer when using this method. + }; + + /** @brief Predicts responses for input samples and returns a float type. + + @param samples The input data for the prediction algorithm. Matrix [m x n], where each row + contains variables (features) of one object being classified. Should have data type CV_32F. + @param results Predicted labels as a column matrix of type CV_32S. + @param flags Not used. + */ + CV_WRAP virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const CV_OVERRIDE = 0; + + /** @brief This function returns the trained parameters arranged across rows. + + For a two class classification problem, it returns a row matrix. It returns learnt parameters of + the Logistic Regression as a matrix of type CV_32F. + */ + CV_WRAP virtual Mat get_learnt_thetas() const = 0; + + /** @brief Creates empty model. + + Creates Logistic Regression model with parameters given. + */ + CV_WRAP static Ptr create(); + + /** @brief Loads and creates a serialized LogisticRegression from a file + * + * Use LogisticRegression::save to serialize and store an LogisticRegression to disk. + * Load the LogisticRegression from this file again, by calling this function with the path to the file. + * Optionally specify the node for the file containing the classifier + * + * @param filepath path to serialized LogisticRegression + * @param nodeName name of node containing the classifier + */ + CV_WRAP static Ptr load(const String& filepath , const String& nodeName = String()); +}; + + +/****************************************************************************************\ +* Stochastic Gradient Descent SVM Classifier * +\****************************************************************************************/ + +/*! +@brief Stochastic Gradient Descent SVM classifier + +SVMSGD provides a fast and easy-to-use implementation of the SVM classifier using the Stochastic Gradient Descent approach, +as presented in @cite bottou2010large. + +The classifier has following parameters: +- model type, +- margin type, +- margin regularization (\f$\lambda\f$), +- initial step size (\f$\gamma_0\f$), +- step decreasing power (\f$c\f$), +- and termination criteria. + +The model type may have one of the following values: \ref SGD and \ref ASGD. + +- \ref SGD is the classic version of SVMSGD classifier: every next step is calculated by the formula + \f[w_{t+1} = w_t - \gamma(t) \frac{dQ_i}{dw} |_{w = w_t}\f] + where + - \f$w_t\f$ is the weights vector for decision function at step \f$t\f$, + - \f$\gamma(t)\f$ is the step size of model parameters at the iteration \f$t\f$, it is decreased on each step by the formula + \f$\gamma(t) = \gamma_0 (1 + \lambda \gamma_0 t) ^ {-c}\f$ + - \f$Q_i\f$ is the target functional from SVM task for sample with number \f$i\f$, this sample is chosen stochastically on each step of the algorithm. + +- \ref ASGD is Average Stochastic Gradient Descent SVM Classifier. ASGD classifier averages weights vector on each step of algorithm by the formula +\f$\widehat{w}_{t+1} = \frac{t}{1+t}\widehat{w}_{t} + \frac{1}{1+t}w_{t+1}\f$ + +The recommended model type is ASGD (following @cite bottou2010large). + +The margin type may have one of the following values: \ref SOFT_MARGIN or \ref HARD_MARGIN. + +- You should use \ref HARD_MARGIN type, if you have linearly separable sets. +- You should use \ref SOFT_MARGIN type, if you have non-linearly separable sets or sets with outliers. +- In the general case (if you know nothing about linear separability of your sets), use SOFT_MARGIN. + +The other parameters may be described as follows: +- Margin regularization parameter is responsible for weights decreasing at each step and for the strength of restrictions on outliers + (the less the parameter, the less probability that an outlier will be ignored). + Recommended value for SGD model is 0.0001, for ASGD model is 0.00001. + +- Initial step size parameter is the initial value for the step size \f$\gamma(t)\f$. + You will have to find the best initial step for your problem. + +- Step decreasing power is the power parameter for \f$\gamma(t)\f$ decreasing by the formula, mentioned above. + Recommended value for SGD model is 1, for ASGD model is 0.75. + +- Termination criteria can be TermCriteria::COUNT, TermCriteria::EPS or TermCriteria::COUNT + TermCriteria::EPS. + You will have to find the best termination criteria for your problem. + +Note that the parameters margin regularization, initial step size, and step decreasing power should be positive. + +To use SVMSGD algorithm do as follows: + +- first, create the SVMSGD object. The algorithm will set optimal parameters by default, but you can set your own parameters via functions setSvmsgdType(), + setMarginType(), setMarginRegularization(), setInitialStepSize(), and setStepDecreasingPower(). + +- then the SVM model can be trained using the train features and the correspondent labels by the method train(). + +- after that, the label of a new feature vector can be predicted using the method predict(). + +@code +// Create empty object +cv::Ptr svmsgd = SVMSGD::create(); + +// Train the Stochastic Gradient Descent SVM +svmsgd->train(trainData); + +// Predict labels for the new samples +svmsgd->predict(samples, responses); +@endcode + +*/ + +class CV_EXPORTS_W SVMSGD : public cv::ml::StatModel +{ +public: + + /** SVMSGD type. + ASGD is often the preferable choice. */ + enum SvmsgdType + { + SGD, //!< Stochastic Gradient Descent + ASGD //!< Average Stochastic Gradient Descent + }; + + /** Margin type.*/ + enum MarginType + { + SOFT_MARGIN, //!< General case, suits to the case of non-linearly separable sets, allows outliers. + HARD_MARGIN //!< More accurate for the case of linearly separable sets. + }; + + /** + * @return the weights of the trained model (decision function f(x) = weights * x + shift). + */ + CV_WRAP virtual Mat getWeights() = 0; + + /** + * @return the shift of the trained model (decision function f(x) = weights * x + shift). + */ + CV_WRAP virtual float getShift() = 0; + + /** @brief Creates empty model. + * Use StatModel::train to train the model. Since %SVMSGD has several parameters, you may want to + * find the best parameters for your problem or use setOptimalParameters() to set some default parameters. + */ + CV_WRAP static Ptr create(); + + /** @brief Loads and creates a serialized SVMSGD from a file + * + * Use SVMSGD::save to serialize and store an SVMSGD to disk. + * Load the SVMSGD from this file again, by calling this function with the path to the file. + * Optionally specify the node for the file containing the classifier + * + * @param filepath path to serialized SVMSGD + * @param nodeName name of node containing the classifier + */ + CV_WRAP static Ptr load(const String& filepath , const String& nodeName = String()); + + /** @brief Function sets optimal parameters values for chosen SVM SGD model. + * @param svmsgdType is the type of SVMSGD classifier. + * @param marginType is the type of margin constraint. + */ + CV_WRAP virtual void setOptimalParameters(int svmsgdType = SVMSGD::ASGD, int marginType = SVMSGD::SOFT_MARGIN) = 0; + + /** @brief %Algorithm type, one of SVMSGD::SvmsgdType. */ + /** @see setSvmsgdType */ + CV_WRAP virtual int getSvmsgdType() const = 0; + /** @copybrief getSvmsgdType @see getSvmsgdType */ + CV_WRAP virtual void setSvmsgdType(int svmsgdType) = 0; + + /** @brief %Margin type, one of SVMSGD::MarginType. */ + /** @see setMarginType */ + CV_WRAP virtual int getMarginType() const = 0; + /** @copybrief getMarginType @see getMarginType */ + CV_WRAP virtual void setMarginType(int marginType) = 0; + + /** @brief Parameter marginRegularization of a %SVMSGD optimization problem. */ + /** @see setMarginRegularization */ + CV_WRAP virtual float getMarginRegularization() const = 0; + /** @copybrief getMarginRegularization @see getMarginRegularization */ + CV_WRAP virtual void setMarginRegularization(float marginRegularization) = 0; + + /** @brief Parameter initialStepSize of a %SVMSGD optimization problem. */ + /** @see setInitialStepSize */ + CV_WRAP virtual float getInitialStepSize() const = 0; + /** @copybrief getInitialStepSize @see getInitialStepSize */ + CV_WRAP virtual void setInitialStepSize(float InitialStepSize) = 0; + + /** @brief Parameter stepDecreasingPower of a %SVMSGD optimization problem. */ + /** @see setStepDecreasingPower */ + CV_WRAP virtual float getStepDecreasingPower() const = 0; + /** @copybrief getStepDecreasingPower @see getStepDecreasingPower */ + CV_WRAP virtual void setStepDecreasingPower(float stepDecreasingPower) = 0; + + /** @brief Termination criteria of the training algorithm. + You can specify the maximum number of iterations (maxCount) and/or how much the error could + change between the iterations to make the algorithm continue (epsilon).*/ + /** @see setTermCriteria */ + CV_WRAP virtual TermCriteria getTermCriteria() const = 0; + /** @copybrief getTermCriteria @see getTermCriteria */ + CV_WRAP virtual void setTermCriteria(const cv::TermCriteria &val) = 0; +}; + + +/****************************************************************************************\ +* Auxiliary functions declarations * +\****************************************************************************************/ + +/** @brief Generates _sample_ from multivariate normal distribution + +@param mean an average row vector +@param cov symmetric covariation matrix +@param nsamples returned samples count +@param samples returned samples array +*/ +CV_EXPORTS void randMVNormal( InputArray mean, InputArray cov, int nsamples, OutputArray samples); + +/** @brief Creates test set */ +CV_EXPORTS void createConcentricSpheresTestSet( int nsamples, int nfeatures, int nclasses, + OutputArray samples, OutputArray responses); + + +/****************************************************************************************\ +* Simulated annealing solver * +\****************************************************************************************/ + +#ifdef CV_DOXYGEN +/** @brief This class declares example interface for system state used in simulated annealing optimization algorithm. + +@note This class is not defined in C++ code and can't be use directly - you need your own implementation with the same methods. +*/ +struct SimulatedAnnealingSolverSystem +{ + /** Give energy value for a state of system.*/ + double energy() const; + /** Function which change the state of system (random perturbation).*/ + void changeState(); + /** Function to reverse to the previous state. Can be called once only after changeState(). */ + void reverseState(); +}; +#endif // CV_DOXYGEN + +/** @brief The class implements simulated annealing for optimization. + +@cite Kirkpatrick83 for details + +@param solverSystem optimization system (see SimulatedAnnealingSolverSystem) +@param initialTemperature initial temperature +@param finalTemperature final temperature +@param coolingRatio temperature step multiplies +@param iterationsPerStep number of iterations per temperature changing step +@param lastTemperature optional output for last used temperature +@param rngEnergy specify custom random numbers generator (cv::theRNG() by default) +*/ +template +int simulatedAnnealingSolver(SimulatedAnnealingSolverSystem& solverSystem, + double initialTemperature, double finalTemperature, double coolingRatio, + size_t iterationsPerStep, + CV_OUT double* lastTemperature = NULL, + cv::RNG& rngEnergy = cv::theRNG() +); + +//! @} ml + +} +} + +#include + +#endif // __cplusplus +#endif // OPENCV_ML_HPP + +/* End of file. */ diff --git a/modules/ml/include/opencv2/ml/ml.hpp b/modules/ml/include/opencv2/ml/ml.hpp new file mode 100644 index 00000000000..f6f9cd8f893 --- /dev/null +++ b/modules/ml/include/opencv2/ml/ml.hpp @@ -0,0 +1,48 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifdef __OPENCV_BUILD +#error this is a compatibility header which should not be used inside the OpenCV library +#endif + +#include "opencv2/ml.hpp" diff --git a/modules/ml/include/opencv2/ml/ml.inl.hpp b/modules/ml/include/opencv2/ml/ml.inl.hpp new file mode 100644 index 00000000000..dc9c78393a5 --- /dev/null +++ b/modules/ml/include/opencv2/ml/ml.inl.hpp @@ -0,0 +1,60 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#ifndef OPENCV_ML_INL_HPP +#define OPENCV_ML_INL_HPP + +namespace cv { namespace ml { + +// declared in ml.hpp +template +int simulatedAnnealingSolver(SimulatedAnnealingSolverSystem& solverSystem, + double initialTemperature, double finalTemperature, double coolingRatio, + size_t iterationsPerStep, + CV_OUT double* lastTemperature, + cv::RNG& rngEnergy +) +{ + CV_Assert(finalTemperature > 0); + CV_Assert(initialTemperature > finalTemperature); + CV_Assert(iterationsPerStep > 0); + CV_Assert(coolingRatio < 1.0f); + double Ti = initialTemperature; + double previousEnergy = solverSystem.energy(); + int exchange = 0; + while (Ti > finalTemperature) + { + for (size_t i = 0; i < iterationsPerStep; i++) + { + solverSystem.changeState(); + double newEnergy = solverSystem.energy(); + if (newEnergy < previousEnergy) + { + previousEnergy = newEnergy; + exchange++; + } + else + { + double r = rngEnergy.uniform(0.0, 1.0); + if (r < std::exp(-(newEnergy - previousEnergy) / Ti)) + { + previousEnergy = newEnergy; + exchange++; + } + else + { + solverSystem.reverseState(); + } + } + } + Ti *= coolingRatio; + } + if (lastTemperature) + *lastTemperature = Ti; + return exchange; +} + +}} //namespace + +#endif // OPENCV_ML_INL_HPP diff --git a/modules/ml/misc/java/test/MLTest.java b/modules/ml/misc/java/test/MLTest.java new file mode 100644 index 00000000000..504805dffa9 --- /dev/null +++ b/modules/ml/misc/java/test/MLTest.java @@ -0,0 +1,42 @@ +package org.opencv.test.ml; + +import org.opencv.ml.Ml; +import org.opencv.ml.SVM; +import org.opencv.core.Mat; +import org.opencv.core.MatOfFloat; +import org.opencv.core.MatOfInt; +import org.opencv.core.CvType; +import org.opencv.test.OpenCVTestCase; +import org.opencv.test.OpenCVTestRunner; + +public class MLTest extends OpenCVTestCase { + + public void testSaveLoad() { + Mat samples = new MatOfFloat(new float[] { + 5.1f, 3.5f, 1.4f, 0.2f, + 4.9f, 3.0f, 1.4f, 0.2f, + 4.7f, 3.2f, 1.3f, 0.2f, + 4.6f, 3.1f, 1.5f, 0.2f, + 5.0f, 3.6f, 1.4f, 0.2f, + 7.0f, 3.2f, 4.7f, 1.4f, + 6.4f, 3.2f, 4.5f, 1.5f, + 6.9f, 3.1f, 4.9f, 1.5f, + 5.5f, 2.3f, 4.0f, 1.3f, + 6.5f, 2.8f, 4.6f, 1.5f + }).reshape(1, 10); + Mat responses = new MatOfInt(new int[] { + 0, 0, 0, 0, 0, 1, 1, 1, 1, 1 + }).reshape(1, 10); + SVM saved = SVM.create(); + assertFalse(saved.isTrained()); + + saved.train(samples, Ml.ROW_SAMPLE, responses); + assertTrue(saved.isTrained()); + + String filename = OpenCVTestRunner.getTempFileName("yml"); + saved.save(filename); + SVM loaded = SVM.load(filename); + assertTrue(loaded.isTrained()); + } + +} diff --git a/modules/ml/misc/objc/gen_dict.json b/modules/ml/misc/objc/gen_dict.json new file mode 100644 index 00000000000..1f35051c2d0 --- /dev/null +++ b/modules/ml/misc/objc/gen_dict.json @@ -0,0 +1,9 @@ +{ + "enum_fix" : { + "EM" : { "Types": "EMTypes" }, + "SVM" : { "Types": "SVMTypes" }, + "KNearest" : { "Types": "KNearestTypes" }, + "DTrees" : { "Flags": "DTreeFlags" }, + "StatModel" : { "Flags": "StatModelFlags" } + } +} diff --git a/modules/ml/misc/python/pyopencv_ml.hpp b/modules/ml/misc/python/pyopencv_ml.hpp new file mode 100644 index 00000000000..564eba5face --- /dev/null +++ b/modules/ml/misc/python/pyopencv_ml.hpp @@ -0,0 +1,22 @@ +template<> +bool pyopencv_to(PyObject *obj, CvTermCriteria& dst, const ArgInfo& info) +{ + CV_UNUSED(info); + if(!obj) + return true; + return PyArg_ParseTuple(obj, "iid", &dst.type, &dst.max_iter, &dst.epsilon) > 0; +} + +template<> +bool pyopencv_to(PyObject* obj, CvSlice& r, const ArgInfo& info) +{ + CV_UNUSED(info); + if(!obj || obj == Py_None) + return true; + if(PyObject_Size(obj) == 0) + { + r = CV_WHOLE_SEQ; + return true; + } + return PyArg_ParseTuple(obj, "ii", &r.start_index, &r.end_index) > 0; +} \ No newline at end of file diff --git a/modules/ml/misc/python/test/test_digits.py b/modules/ml/misc/python/test/test_digits.py new file mode 100644 index 00000000000..2d5c99826fe --- /dev/null +++ b/modules/ml/misc/python/test/test_digits.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python + +''' +SVM and KNearest digit recognition. + +Sample loads a dataset of handwritten digits from '../data/digits.png'. +Then it trains a SVM and KNearest classifiers on it and evaluates +their accuracy. + +Following preprocessing is applied to the dataset: + - Moment-based image deskew (see deskew()) + - Digit images are split into 4 10x10 cells and 16-bin + histogram of oriented gradients is computed for each + cell + - Transform histograms to space with Hellinger metric (see [1] (RootSIFT)) + + +[1] R. Arandjelovic, A. Zisserman + "Three things everyone should know to improve object retrieval" + http://www.robots.ox.ac.uk/~vgg/publications/2012/Arandjelovic12/arandjelovic12.pdf + +''' + + +# Python 2/3 compatibility +from __future__ import print_function + +# built-in modules +from multiprocessing.pool import ThreadPool + +import cv2 as cv + +import numpy as np +from numpy.linalg import norm + + +SZ = 20 # size of each digit is SZ x SZ +CLASS_N = 10 +DIGITS_FN = 'samples/data/digits.png' + +def split2d(img, cell_size, flatten=True): + h, w = img.shape[:2] + sx, sy = cell_size + cells = [np.hsplit(row, w//sx) for row in np.vsplit(img, h//sy)] + cells = np.array(cells) + if flatten: + cells = cells.reshape(-1, sy, sx) + return cells + +def deskew(img): + m = cv.moments(img) + if abs(m['mu02']) < 1e-2: + return img.copy() + skew = m['mu11']/m['mu02'] + M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]]) + img = cv.warpAffine(img, M, (SZ, SZ), flags=cv.WARP_INVERSE_MAP | cv.INTER_LINEAR) + return img + +class StatModel(object): + def load(self, fn): + self.model.load(fn) # Known bug: https://github.com/opencv/opencv/issues/4969 + def save(self, fn): + self.model.save(fn) + +class KNearest(StatModel): + def __init__(self, k = 3): + self.k = k + self.model = cv.ml.KNearest_create() + + def train(self, samples, responses): + self.model.train(samples, cv.ml.ROW_SAMPLE, responses) + + def predict(self, samples): + _retval, results, _neigh_resp, _dists = self.model.findNearest(samples, self.k) + return results.ravel() + +class SVM(StatModel): + def __init__(self, C = 1, gamma = 0.5): + self.model = cv.ml.SVM_create() + self.model.setGamma(gamma) + self.model.setC(C) + self.model.setKernel(cv.ml.SVM_RBF) + self.model.setType(cv.ml.SVM_C_SVC) + + def train(self, samples, responses): + self.model.train(samples, cv.ml.ROW_SAMPLE, responses) + + def predict(self, samples): + return self.model.predict(samples)[1].ravel() + + +def evaluate_model(model, digits, samples, labels): + resp = model.predict(samples) + err = (labels != resp).mean() + + confusion = np.zeros((10, 10), np.int32) + for i, j in zip(labels, resp): + confusion[int(i), int(j)] += 1 + + return err, confusion + +def preprocess_simple(digits): + return np.float32(digits).reshape(-1, SZ*SZ) / 255.0 + +def preprocess_hog(digits): + samples = [] + for img in digits: + gx = cv.Sobel(img, cv.CV_32F, 1, 0) + gy = cv.Sobel(img, cv.CV_32F, 0, 1) + mag, ang = cv.cartToPolar(gx, gy) + bin_n = 16 + bin = np.int32(bin_n*ang/(2*np.pi)) + bin_cells = bin[:10,:10], bin[10:,:10], bin[:10,10:], bin[10:,10:] + mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:] + hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)] + hist = np.hstack(hists) + + # transform to Hellinger kernel + eps = 1e-7 + hist /= hist.sum() + eps + hist = np.sqrt(hist) + hist /= norm(hist) + eps + + samples.append(hist) + return np.float32(samples) + +from tests_common import NewOpenCVTests + +class digits_test(NewOpenCVTests): + + def load_digits(self, fn): + digits_img = self.get_sample(fn, 0) + digits = split2d(digits_img, (SZ, SZ)) + labels = np.repeat(np.arange(CLASS_N), len(digits)/CLASS_N) + return digits, labels + + def test_digits(self): + + digits, labels = self.load_digits(DIGITS_FN) + + # shuffle digits + rand = np.random.RandomState(321) + shuffle = rand.permutation(len(digits)) + digits, labels = digits[shuffle], labels[shuffle] + + digits2 = list(map(deskew, digits)) + samples = preprocess_hog(digits2) + + train_n = int(0.9*len(samples)) + _digits_train, digits_test = np.split(digits2, [train_n]) + samples_train, samples_test = np.split(samples, [train_n]) + labels_train, labels_test = np.split(labels, [train_n]) + errors = list() + confusionMatrixes = list() + + model = KNearest(k=4) + model.train(samples_train, labels_train) + error, confusion = evaluate_model(model, digits_test, samples_test, labels_test) + errors.append(error) + confusionMatrixes.append(confusion) + + model = SVM(C=2.67, gamma=5.383) + model.train(samples_train, labels_train) + error, confusion = evaluate_model(model, digits_test, samples_test, labels_test) + errors.append(error) + confusionMatrixes.append(confusion) + + eps = 0.001 + normEps = len(samples_test) * 0.02 + + confusionKNN = [[45, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [ 0, 57, 0, 0, 0, 0, 0, 0, 0, 0], + [ 0, 0, 59, 1, 0, 0, 0, 0, 1, 0], + [ 0, 0, 0, 43, 0, 0, 0, 1, 0, 0], + [ 0, 0, 0, 0, 38, 0, 2, 0, 0, 0], + [ 0, 0, 0, 2, 0, 48, 0, 0, 1, 0], + [ 0, 1, 0, 0, 0, 0, 51, 0, 0, 0], + [ 0, 0, 1, 0, 0, 0, 0, 54, 0, 0], + [ 0, 0, 0, 0, 0, 1, 0, 0, 46, 0], + [ 1, 1, 0, 1, 1, 0, 0, 0, 2, 42]] + + confusionSVM = [[45, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [ 0, 57, 0, 0, 0, 0, 0, 0, 0, 0], + [ 0, 0, 59, 2, 0, 0, 0, 0, 0, 0], + [ 0, 0, 0, 43, 0, 0, 0, 1, 0, 0], + [ 0, 0, 0, 0, 40, 0, 0, 0, 0, 0], + [ 0, 0, 0, 1, 0, 50, 0, 0, 0, 0], + [ 0, 0, 0, 0, 1, 0, 51, 0, 0, 0], + [ 0, 0, 1, 0, 0, 0, 0, 54, 0, 0], + [ 0, 0, 0, 0, 0, 0, 0, 0, 47, 0], + [ 0, 1, 0, 1, 0, 0, 0, 0, 1, 45]] + + self.assertLess(cv.norm(confusionMatrixes[0] - confusionKNN, cv.NORM_L1), normEps) + self.assertLess(cv.norm(confusionMatrixes[1] - confusionSVM, cv.NORM_L1), normEps) + + self.assertLess(errors[0] - 0.034, eps) + self.assertLess(errors[1] - 0.018, eps) + + +if __name__ == '__main__': + NewOpenCVTests.bootstrap() diff --git a/modules/ml/misc/python/test/test_gaussian_mix.py b/modules/ml/misc/python/test/test_gaussian_mix.py new file mode 100644 index 00000000000..62866295e61 --- /dev/null +++ b/modules/ml/misc/python/test/test_gaussian_mix.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python + +# Python 2/3 compatibility +from __future__ import print_function +import sys +PY3 = sys.version_info[0] == 3 + +if PY3: + xrange = range + +import numpy as np +from numpy import random +import cv2 as cv + +def make_gaussians(cluster_n, img_size): + points = [] + ref_distrs = [] + for _ in xrange(cluster_n): + mean = (0.1 + 0.8*random.rand(2)) * img_size + a = (random.rand(2, 2)-0.5)*img_size*0.1 + cov = np.dot(a.T, a) + img_size*0.05*np.eye(2) + n = 100 + random.randint(900) + pts = random.multivariate_normal(mean, cov, n) + points.append( pts ) + ref_distrs.append( (mean, cov) ) + points = np.float32( np.vstack(points) ) + return points, ref_distrs + +from tests_common import NewOpenCVTests + +class gaussian_mix_test(NewOpenCVTests): + + def test_gaussian_mix(self): + + np.random.seed(10) + cluster_n = 5 + img_size = 512 + + points, ref_distrs = make_gaussians(cluster_n, img_size) + + em = cv.ml.EM_create() + em.setClustersNumber(cluster_n) + em.setCovarianceMatrixType(cv.ml.EM_COV_MAT_GENERIC) + em.trainEM(points) + means = em.getMeans() + covs = em.getCovs() # Known bug: https://github.com/opencv/opencv/pull/4232 + #found_distrs = zip(means, covs) + + matches_count = 0 + + meanEps = 0.05 + covEps = 0.1 + + for i in range(cluster_n): + for j in range(cluster_n): + if (cv.norm(means[i] - ref_distrs[j][0], cv.NORM_L2) / cv.norm(ref_distrs[j][0], cv.NORM_L2) < meanEps and + cv.norm(covs[i] - ref_distrs[j][1], cv.NORM_L2) / cv.norm(ref_distrs[j][1], cv.NORM_L2) < covEps): + matches_count += 1 + + self.assertEqual(matches_count, cluster_n) + + +if __name__ == '__main__': + NewOpenCVTests.bootstrap() diff --git a/modules/ml/misc/python/test/test_goodfeatures.py b/modules/ml/misc/python/test/test_goodfeatures.py new file mode 100644 index 00000000000..a590ba9fa9f --- /dev/null +++ b/modules/ml/misc/python/test/test_goodfeatures.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python + +# Python 2/3 compatibility +from __future__ import print_function + +import cv2 as cv +import numpy as np + +from tests_common import NewOpenCVTests + +class TestGoodFeaturesToTrack_test(NewOpenCVTests): + def test_goodFeaturesToTrack(self): + arr = self.get_sample('samples/data/lena.jpg', 0) + original = arr.copy() + threshes = [ x / 100. for x in range(1,10) ] + numPoints = 20000 + + results = dict([(t, cv.goodFeaturesToTrack(arr, numPoints, t, 2, useHarrisDetector=True)) for t in threshes]) + # Check that GoodFeaturesToTrack has not modified input image + self.assertTrue(arr.tostring() == original.tostring()) + # Check for repeatability + for i in range(1): + results2 = dict([(t, cv.goodFeaturesToTrack(arr, numPoints, t, 2, useHarrisDetector=True)) for t in threshes]) + for t in threshes: + self.assertTrue(len(results2[t]) == len(results[t])) + for i in range(len(results[t])): + self.assertTrue(cv.norm(results[t][i][0] - results2[t][i][0]) == 0) + + for t0,t1 in zip(threshes, threshes[1:]): + r0 = results[t0] + r1 = results[t1] + # Increasing thresh should make result list shorter + self.assertTrue(len(r0) > len(r1)) + # Increasing thresh should monly truncate result list + for i in range(len(r1)): + self.assertTrue(cv.norm(r1[i][0] - r0[i][0])==0) + + +if __name__ == '__main__': + NewOpenCVTests.bootstrap() diff --git a/modules/ml/misc/python/test/test_knearest.py b/modules/ml/misc/python/test/test_knearest.py new file mode 100644 index 00000000000..8ae0be5f73e --- /dev/null +++ b/modules/ml/misc/python/test/test_knearest.py @@ -0,0 +1,13 @@ +#!/usr/bin/env python +import cv2 as cv + +from tests_common import NewOpenCVTests + +class knearest_test(NewOpenCVTests): + def test_load(self): + k_nearest = cv.ml.KNearest_load(self.find_file("ml/opencv_ml_knn.xml")) + self.assertFalse(k_nearest.empty()) + self.assertTrue(k_nearest.isTrained()) + +if __name__ == '__main__': + NewOpenCVTests.bootstrap() diff --git a/modules/ml/misc/python/test/test_letter_recog.py b/modules/ml/misc/python/test/test_letter_recog.py new file mode 100644 index 00000000000..66bef390613 --- /dev/null +++ b/modules/ml/misc/python/test/test_letter_recog.py @@ -0,0 +1,171 @@ +#!/usr/bin/env python + +''' +The sample demonstrates how to train Random Trees classifier +(or Boosting classifier, or MLP, or Knearest, or Support Vector Machines) using the provided dataset. + +We use the sample database letter-recognition.data +from UCI Repository, here is the link: + +Newman, D.J. & Hettich, S. & Blake, C.L. & Merz, C.J. (1998). +UCI Repository of machine learning databases +[http://www.ics.uci.edu/~mlearn/MLRepository.html]. +Irvine, CA: University of California, Department of Information and Computer Science. + +The dataset consists of 20000 feature vectors along with the +responses - capital latin letters A..Z. +The first 10000 samples are used for training +and the remaining 10000 - to test the classifier. +====================================================== + Models: RTrees, KNearest, Boost, SVM, MLP +''' + +# Python 2/3 compatibility +from __future__ import print_function + +import numpy as np +import cv2 as cv + +def load_base(fn): + a = np.loadtxt(fn, np.float32, delimiter=',', converters={ 0 : lambda ch : ord(ch)-ord('A') }) + samples, responses = a[:,1:], a[:,0] + return samples, responses + +class LetterStatModel(object): + class_n = 26 + train_ratio = 0.5 + + def load(self, fn): + self.model.load(fn) + def save(self, fn): + self.model.save(fn) + + def unroll_samples(self, samples): + sample_n, var_n = samples.shape + new_samples = np.zeros((sample_n * self.class_n, var_n+1), np.float32) + new_samples[:,:-1] = np.repeat(samples, self.class_n, axis=0) + new_samples[:,-1] = np.tile(np.arange(self.class_n), sample_n) + return new_samples + + def unroll_responses(self, responses): + sample_n = len(responses) + new_responses = np.zeros(sample_n*self.class_n, np.int32) + resp_idx = np.int32( responses + np.arange(sample_n)*self.class_n ) + new_responses[resp_idx] = 1 + return new_responses + +class RTrees(LetterStatModel): + def __init__(self): + self.model = cv.ml.RTrees_create() + + def train(self, samples, responses): + #sample_n, var_n = samples.shape + self.model.setMaxDepth(20) + self.model.train(samples, cv.ml.ROW_SAMPLE, responses.astype(int)) + + def predict(self, samples): + _ret, resp = self.model.predict(samples) + return resp.ravel() + + +class KNearest(LetterStatModel): + def __init__(self): + self.model = cv.ml.KNearest_create() + + def train(self, samples, responses): + self.model.train(samples, cv.ml.ROW_SAMPLE, responses) + + def predict(self, samples): + _retval, results, _neigh_resp, _dists = self.model.findNearest(samples, k = 10) + return results.ravel() + + +class Boost(LetterStatModel): + def __init__(self): + self.model = cv.ml.Boost_create() + + def train(self, samples, responses): + _sample_n, var_n = samples.shape + new_samples = self.unroll_samples(samples) + new_responses = self.unroll_responses(responses) + var_types = np.array([cv.ml.VAR_NUMERICAL] * var_n + [cv.ml.VAR_CATEGORICAL, cv.ml.VAR_CATEGORICAL], np.uint8) + + self.model.setWeakCount(15) + self.model.setMaxDepth(10) + self.model.train(cv.ml.TrainData_create(new_samples, cv.ml.ROW_SAMPLE, new_responses.astype(int), varType = var_types)) + + def predict(self, samples): + new_samples = self.unroll_samples(samples) + _ret, resp = self.model.predict(new_samples) + + return resp.ravel().reshape(-1, self.class_n).argmax(1) + + +class SVM(LetterStatModel): + def __init__(self): + self.model = cv.ml.SVM_create() + + def train(self, samples, responses): + self.model.setType(cv.ml.SVM_C_SVC) + self.model.setC(1) + self.model.setKernel(cv.ml.SVM_RBF) + self.model.setGamma(.1) + self.model.train(samples, cv.ml.ROW_SAMPLE, responses.astype(int)) + + def predict(self, samples): + _ret, resp = self.model.predict(samples) + return resp.ravel() + + +class MLP(LetterStatModel): + def __init__(self): + self.model = cv.ml.ANN_MLP_create() + + def train(self, samples, responses): + _sample_n, var_n = samples.shape + new_responses = self.unroll_responses(responses).reshape(-1, self.class_n) + layer_sizes = np.int32([var_n, 100, 100, self.class_n]) + + self.model.setLayerSizes(layer_sizes) + self.model.setTrainMethod(cv.ml.ANN_MLP_BACKPROP) + self.model.setBackpropMomentumScale(0) + self.model.setBackpropWeightScale(0.001) + self.model.setTermCriteria((cv.TERM_CRITERIA_COUNT, 20, 0.01)) + self.model.setActivationFunction(cv.ml.ANN_MLP_SIGMOID_SYM, 2, 1) + + self.model.train(samples, cv.ml.ROW_SAMPLE, np.float32(new_responses)) + + def predict(self, samples): + _ret, resp = self.model.predict(samples) + return resp.argmax(-1) + +from tests_common import NewOpenCVTests + +class letter_recog_test(NewOpenCVTests): + + def test_letter_recog(self): + + eps = 0.01 + + models = [RTrees, KNearest, Boost, SVM, MLP] + models = dict( [(cls.__name__.lower(), cls) for cls in models] ) + testErrors = {RTrees: (98.930000, 92.390000), KNearest: (94.960000, 92.010000), + Boost: (85.970000, 74.920000), SVM: (99.780000, 95.680000), MLP: (90.060000, 87.410000)} + + for model in models: + Model = models[model] + classifier = Model() + + samples, responses = load_base(self.repoPath + '/samples/data/letter-recognition.data') + train_n = int(len(samples)*classifier.train_ratio) + + classifier.train(samples[:train_n], responses[:train_n]) + train_rate = np.mean(classifier.predict(samples[:train_n]) == responses[:train_n].astype(int)) + test_rate = np.mean(classifier.predict(samples[train_n:]) == responses[train_n:].astype(int)) + + self.assertLess(train_rate - testErrors[Model][0], eps) + self.assertLess(test_rate - testErrors[Model][1], eps) + + +if __name__ == '__main__': + NewOpenCVTests.bootstrap() diff --git a/modules/ml/misc/python/test/test_ml.py b/modules/ml/misc/python/test/test_ml.py new file mode 100644 index 00000000000..40cc0e4d88b --- /dev/null +++ b/modules/ml/misc/python/test/test_ml.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python +from __future__ import print_function + +import numpy as np +import cv2 as cv + +from tests_common import NewOpenCVTests + +class Bindings(NewOpenCVTests): + + def test_inheritance(self): + + boost = cv.ml.Boost_create() + boost.getBoostType() # from ml::Boost + boost.getMaxDepth() # from ml::DTrees + boost.isClassifier() # from ml::StatModel + +class Arguments(NewOpenCVTests): + + def test_class_from_submodule_has_global_alias(self): + self.assertTrue(hasattr(cv.ml, "Boost"), + msg="Class is not registered in the submodule") + self.assertTrue(hasattr(cv, "ml_Boost"), + msg="Class from submodule doesn't have alias in the " + "global module") + self.assertEqual(cv.ml.Boost, cv.ml_Boost, + msg="Classes from submodules and global module don't refer " + "to the same type") + +if __name__ == '__main__': + NewOpenCVTests.bootstrap() \ No newline at end of file diff --git a/modules/ml/samples/digits_svm.cpp b/modules/ml/samples/digits_svm.cpp new file mode 100644 index 00000000000..3e28e24fe71 --- /dev/null +++ b/modules/ml/samples/digits_svm.cpp @@ -0,0 +1,367 @@ +#include "opencv2/core.hpp" +#include "opencv2/highgui.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/ml.hpp" + +#include +#include +#include + +using namespace cv; +using namespace std; + +const int SZ = 20; // size of each digit is SZ x SZ +const int CLASS_N = 10; +const char* DIGITS_FN = "digits.png"; + +static void help(char** argv) +{ + cout << + "\n" + "SVM and KNearest digit recognition.\n" + "\n" + "Sample loads a dataset of handwritten digits from 'digits.png'.\n" + "Then it trains a SVM and KNearest classifiers on it and evaluates\n" + "their accuracy.\n" + "\n" + "Following preprocessing is applied to the dataset:\n" + " - Moment-based image deskew (see deskew())\n" + " - Digit images are split into 4 10x10 cells and 16-bin\n" + " histogram of oriented gradients is computed for each\n" + " cell\n" + " - Transform histograms to space with Hellinger metric (see [1] (RootSIFT))\n" + "\n" + "\n" + "[1] R. Arandjelovic, A. Zisserman\n" + " \"Three things everyone should know to improve object retrieval\"\n" + " http://www.robots.ox.ac.uk/~vgg/publications/2012/Arandjelovic12/arandjelovic12.pdf\n" + "\n" + "Usage:\n" + << argv[0] << endl; +} + +static void split2d(const Mat& image, const Size cell_size, vector& cells) +{ + int height = image.rows; + int width = image.cols; + + int sx = cell_size.width; + int sy = cell_size.height; + + cells.clear(); + + for (int i = 0; i < height; i += sy) + { + for (int j = 0; j < width; j += sx) + { + cells.push_back(image(Rect(j, i, sx, sy))); + } + } +} + +static void load_digits(const char* fn, vector& digits, vector& labels) +{ + digits.clear(); + labels.clear(); + + String filename = samples::findFile(fn); + + cout << "Loading " << filename << " ..." << endl; + + Mat digits_img = imread(filename, IMREAD_GRAYSCALE); + split2d(digits_img, Size(SZ, SZ), digits); + + for (int i = 0; i < CLASS_N; i++) + { + for (size_t j = 0; j < digits.size() / CLASS_N; j++) + { + labels.push_back(i); + } + } +} + +static void deskew(const Mat& img, Mat& deskewed_img) +{ + Moments m = moments(img); + + if (abs(m.mu02) < 0.01) + { + deskewed_img = img.clone(); + return; + } + + float skew = (float)(m.mu11 / m.mu02); + float M_vals[2][3] = {{1, skew, -0.5f * SZ * skew}, {0, 1, 0}}; + Mat M(Size(3, 2), CV_32F, &M_vals[0][0]); + + warpAffine(img, deskewed_img, M, Size(SZ, SZ), WARP_INVERSE_MAP | INTER_LINEAR); +} + +static void mosaic(const int width, const vector& images, Mat& grid) +{ + int mat_width = SZ * width; + int mat_height = SZ * (int)ceil((double)images.size() / width); + + if (!images.empty()) + { + grid = Mat(Size(mat_width, mat_height), images[0].type()); + + for (size_t i = 0; i < images.size(); i++) + { + Mat location_on_grid = grid(Rect(SZ * ((int)i % width), SZ * ((int)i / width), SZ, SZ)); + images[i].copyTo(location_on_grid); + } + } +} + +static void evaluate_model(const vector& predictions, const vector& digits, const vector& labels, Mat& mos) +{ + double err = 0; + + for (size_t i = 0; i < predictions.size(); i++) + { + if ((int)predictions[i] != labels[i]) + { + err++; + } + } + + err /= predictions.size(); + + cout << cv::format("error: %.2f %%", err * 100) << endl; + + int confusion[10][10] = {}; + + for (size_t i = 0; i < labels.size(); i++) + { + confusion[labels[i]][(int)predictions[i]]++; + } + + cout << "confusion matrix:" << endl; + for (int i = 0; i < 10; i++) + { + for (int j = 0; j < 10; j++) + { + cout << cv::format("%2d ", confusion[i][j]); + } + cout << endl; + } + + cout << endl; + + vector vis; + + for (size_t i = 0; i < digits.size(); i++) + { + Mat img; + cvtColor(digits[i], img, COLOR_GRAY2BGR); + + if ((int)predictions[i] != labels[i]) + { + for (int j = 0; j < img.rows; j++) + { + for (int k = 0; k < img.cols; k++) + { + img.at(j, k)[0] = 0; + img.at(j, k)[1] = 0; + } + } + } + + vis.push_back(img); + } + + mosaic(25, vis, mos); +} + +static void bincount(const Mat& x, const Mat& weights, const int min_length, vector& bins) +{ + double max_x_val = 0; + minMaxLoc(x, NULL, &max_x_val); + + bins = vector(max((int)max_x_val, min_length)); + + for (int i = 0; i < x.rows; i++) + { + for (int j = 0; j < x.cols; j++) + { + bins[x.at(i, j)] += weights.at(i, j); + } + } +} + +static void preprocess_hog(const vector& digits, Mat& hog) +{ + int bin_n = 16; + int half_cell = SZ / 2; + double eps = 1e-7; + + hog = Mat(Size(4 * bin_n, (int)digits.size()), CV_32F); + + for (size_t img_index = 0; img_index < digits.size(); img_index++) + { + Mat gx; + Sobel(digits[img_index], gx, CV_32F, 1, 0); + + Mat gy; + Sobel(digits[img_index], gy, CV_32F, 0, 1); + + Mat mag; + Mat ang; + cartToPolar(gx, gy, mag, ang); + + Mat bin(ang.size(), CV_32S); + + for (int i = 0; i < ang.rows; i++) + { + for (int j = 0; j < ang.cols; j++) + { + bin.at(i, j) = (int)(bin_n * ang.at(i, j) / (2 * CV_PI)); + } + } + + Mat bin_cells[] = { + bin(Rect(0, 0, half_cell, half_cell)), + bin(Rect(half_cell, 0, half_cell, half_cell)), + bin(Rect(0, half_cell, half_cell, half_cell)), + bin(Rect(half_cell, half_cell, half_cell, half_cell)) + }; + Mat mag_cells[] = { + mag(Rect(0, 0, half_cell, half_cell)), + mag(Rect(half_cell, 0, half_cell, half_cell)), + mag(Rect(0, half_cell, half_cell, half_cell)), + mag(Rect(half_cell, half_cell, half_cell, half_cell)) + }; + + vector hist; + hist.reserve(4 * bin_n); + + for (int i = 0; i < 4; i++) + { + vector partial_hist; + bincount(bin_cells[i], mag_cells[i], bin_n, partial_hist); + hist.insert(hist.end(), partial_hist.begin(), partial_hist.end()); + } + + // transform to Hellinger kernel + double sum = 0; + + for (size_t i = 0; i < hist.size(); i++) + { + sum += hist[i]; + } + + for (size_t i = 0; i < hist.size(); i++) + { + hist[i] /= sum + eps; + hist[i] = sqrt(hist[i]); + } + + double hist_norm = norm(hist); + + for (size_t i = 0; i < hist.size(); i++) + { + hog.at((int)img_index, (int)i) = (float)(hist[i] / (hist_norm + eps)); + } + } +} + +static void shuffle(vector& digits, vector& labels) +{ + vector shuffled_indexes(digits.size()); + + for (size_t i = 0; i < digits.size(); i++) + { + shuffled_indexes[i] = (int)i; + } + + randShuffle(shuffled_indexes); + + vector shuffled_digits(digits.size()); + vector shuffled_labels(labels.size()); + + for (size_t i = 0; i < shuffled_indexes.size(); i++) + { + shuffled_digits[shuffled_indexes[i]] = digits[i]; + shuffled_labels[shuffled_indexes[i]] = labels[i]; + } + + digits = shuffled_digits; + labels = shuffled_labels; +} + +int main(int /* argc */, char* argv[]) +{ + help(argv); + + vector digits; + vector labels; + + load_digits(DIGITS_FN, digits, labels); + + cout << "preprocessing..." << endl; + + // shuffle digits + shuffle(digits, labels); + + vector digits2; + + for (size_t i = 0; i < digits.size(); i++) + { + Mat deskewed_digit; + deskew(digits[i], deskewed_digit); + digits2.push_back(deskewed_digit); + } + + Mat samples; + + preprocess_hog(digits2, samples); + + int train_n = (int)(0.9 * samples.rows); + Mat test_set; + + vector digits_test(digits2.begin() + train_n, digits2.end()); + mosaic(25, digits_test, test_set); + imshow("test set", test_set); + + Mat samples_train = samples(Rect(0, 0, samples.cols, train_n)); + Mat samples_test = samples(Rect(0, train_n, samples.cols, samples.rows - train_n)); + vector labels_train(labels.begin(), labels.begin() + train_n); + vector labels_test(labels.begin() + train_n, labels.end()); + + Ptr k_nearest; + Ptr svm; + vector predictions; + Mat vis; + + cout << "training KNearest..." << endl; + k_nearest = ml::KNearest::create(); + k_nearest->train(samples_train, ml::ROW_SAMPLE, labels_train); + + // predict digits with KNearest + k_nearest->findNearest(samples_test, 4, predictions); + evaluate_model(predictions, digits_test, labels_test, vis); + imshow("KNearest test", vis); + k_nearest.release(); + + cout << "training SVM..." << endl; + svm = ml::SVM::create(); + svm->setGamma(5.383); + svm->setC(2.67); + svm->setKernel(ml::SVM::RBF); + svm->setType(ml::SVM::C_SVC); + svm->train(samples_train, ml::ROW_SAMPLE, labels_train); + + // predict digits with SVM + svm->predict(samples_test, predictions); + evaluate_model(predictions, digits_test, labels_test, vis); + imshow("SVM test", vis); + cout << "Saving SVM as \"digits_svm.yml\"..." << endl; + svm->save("digits_svm.yml"); + svm.release(); + + waitKey(); + + return 0; +} diff --git a/modules/ml/samples/em.cpp b/modules/ml/samples/em.cpp new file mode 100644 index 00000000000..f5310740f46 --- /dev/null +++ b/modules/ml/samples/em.cpp @@ -0,0 +1,70 @@ +#include "opencv2/highgui.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/ml.hpp" + +using namespace cv; +using namespace cv::ml; + +int main( int /*argc*/, char** /*argv*/ ) +{ + const int N = 4; + const int N1 = (int)sqrt((double)N); + const Scalar colors[] = + { + Scalar(0,0,255), Scalar(0,255,0), + Scalar(0,255,255),Scalar(255,255,0) + }; + + int i, j; + int nsamples = 100; + Mat samples( nsamples, 2, CV_32FC1 ); + Mat labels; + Mat img = Mat::zeros( Size( 500, 500 ), CV_8UC3 ); + Mat sample( 1, 2, CV_32FC1 ); + + samples = samples.reshape(2, 0); + for( i = 0; i < N; i++ ) + { + // form the training samples + Mat samples_part = samples.rowRange(i*nsamples/N, (i+1)*nsamples/N ); + + Scalar mean(((i%N1)+1)*img.rows/(N1+1), + ((i/N1)+1)*img.rows/(N1+1)); + Scalar sigma(30,30); + randn( samples_part, mean, sigma ); + } + samples = samples.reshape(1, 0); + + // cluster the data + Ptr em_model = EM::create(); + em_model->setClustersNumber(N); + em_model->setCovarianceMatrixType(EM::COV_MAT_SPHERICAL); + em_model->setTermCriteria(TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 300, 0.1)); + em_model->trainEM( samples, noArray(), labels, noArray() ); + + // classify every image pixel + for( i = 0; i < img.rows; i++ ) + { + for( j = 0; j < img.cols; j++ ) + { + sample.at(0) = (float)j; + sample.at(1) = (float)i; + int response = cvRound(em_model->predict2( sample, noArray() )[1]); + Scalar c = colors[response]; + + circle( img, Point(j, i), 1, c*0.75, FILLED ); + } + } + + //draw the clustered samples + for( i = 0; i < nsamples; i++ ) + { + Point pt(cvRound(samples.at(i, 0)), cvRound(samples.at(i, 1))); + circle( img, pt, 1, colors[labels.at(i)], FILLED ); + } + + imshow( "EM-clustering result", img ); + waitKey(0); + + return 0; +} diff --git a/modules/ml/samples/introduction_to_svm.cpp b/modules/ml/samples/introduction_to_svm.cpp new file mode 100644 index 00000000000..a5bcf98cc0b --- /dev/null +++ b/modules/ml/samples/introduction_to_svm.cpp @@ -0,0 +1,81 @@ +#include +#include +#include +#include +#include + +using namespace cv; +using namespace cv::ml; + +int main(int, char**) +{ + // Set up training data + //! [setup1] + int labels[4] = {1, -1, -1, -1}; + float trainingData[4][2] = { {501, 10}, {255, 10}, {501, 255}, {10, 501} }; + //! [setup1] + //! [setup2] + Mat trainingDataMat(4, 2, CV_32F, trainingData); + Mat labelsMat(4, 1, CV_32SC1, labels); + //! [setup2] + + // Train the SVM + //! [init] + Ptr svm = SVM::create(); + svm->setType(SVM::C_SVC); + svm->setKernel(SVM::LINEAR); + svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6)); + //! [init] + //! [train] + svm->train(trainingDataMat, ROW_SAMPLE, labelsMat); + //! [train] + + // Data for visual representation + int width = 512, height = 512; + Mat image = Mat::zeros(height, width, CV_8UC3); + + // Show the decision regions given by the SVM + //! [show] + Vec3b green(0,255,0), blue(255,0,0); + for (int i = 0; i < image.rows; i++) + { + for (int j = 0; j < image.cols; j++) + { + Mat sampleMat = (Mat_(1,2) << j,i); + float response = svm->predict(sampleMat); + + if (response == 1) + image.at(i,j) = green; + else if (response == -1) + image.at(i,j) = blue; + } + } + //! [show] + + // Show the training data + //! [show_data] + int thickness = -1; + circle( image, Point(501, 10), 5, Scalar( 0, 0, 0), thickness ); + circle( image, Point(255, 10), 5, Scalar(255, 255, 255), thickness ); + circle( image, Point(501, 255), 5, Scalar(255, 255, 255), thickness ); + circle( image, Point( 10, 501), 5, Scalar(255, 255, 255), thickness ); + //! [show_data] + + // Show support vectors + //! [show_vectors] + thickness = 2; + Mat sv = svm->getUncompressedSupportVectors(); + + for (int i = 0; i < sv.rows; i++) + { + const float* v = sv.ptr(i); + circle(image, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thickness); + } + //! [show_vectors] + + imwrite("result.png", image); // save the image + + imshow("SVM Simple Example", image); // show it to the user + waitKey(); + return 0; +} diff --git a/modules/ml/samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java b/modules/ml/samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java new file mode 100644 index 00000000000..dcff5ff7884 --- /dev/null +++ b/modules/ml/samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java @@ -0,0 +1,99 @@ +import org.opencv.core.Core; +import org.opencv.core.CvType; +import org.opencv.core.Mat; +import org.opencv.core.Point; +import org.opencv.core.Scalar; +import org.opencv.core.TermCriteria; +import org.opencv.highgui.HighGui; +import org.opencv.imgcodecs.Imgcodecs; +import org.opencv.imgproc.Imgproc; +import org.opencv.ml.Ml; +import org.opencv.ml.SVM; + +public class IntroductionToSVMDemo { + public static void main(String[] args) { + // Load the native OpenCV library + System.loadLibrary(Core.NATIVE_LIBRARY_NAME); + + // Set up training data + //! [setup1] + int[] labels = { 1, -1, -1, -1 }; + float[] trainingData = { 501, 10, 255, 10, 501, 255, 10, 501 }; + //! [setup1] + //! [setup2] + Mat trainingDataMat = new Mat(4, 2, CvType.CV_32FC1); + trainingDataMat.put(0, 0, trainingData); + Mat labelsMat = new Mat(4, 1, CvType.CV_32SC1); + labelsMat.put(0, 0, labels); + //! [setup2] + + // Train the SVM + //! [init] + SVM svm = SVM.create(); + svm.setType(SVM.C_SVC); + svm.setKernel(SVM.LINEAR); + svm.setTermCriteria(new TermCriteria(TermCriteria.MAX_ITER, 100, 1e-6)); + //! [init] + //! [train] + svm.train(trainingDataMat, Ml.ROW_SAMPLE, labelsMat); + //! [train] + + // Data for visual representation + int width = 512, height = 512; + Mat image = Mat.zeros(height, width, CvType.CV_8UC3); + + // Show the decision regions given by the SVM + //! [show] + byte[] imageData = new byte[(int) (image.total() * image.channels())]; + Mat sampleMat = new Mat(1, 2, CvType.CV_32F); + float[] sampleMatData = new float[(int) (sampleMat.total() * sampleMat.channels())]; + for (int i = 0; i < image.rows(); i++) { + for (int j = 0; j < image.cols(); j++) { + sampleMatData[0] = j; + sampleMatData[1] = i; + sampleMat.put(0, 0, sampleMatData); + float response = svm.predict(sampleMat); + + if (response == 1) { + imageData[(i * image.cols() + j) * image.channels()] = 0; + imageData[(i * image.cols() + j) * image.channels() + 1] = (byte) 255; + imageData[(i * image.cols() + j) * image.channels() + 2] = 0; + } else if (response == -1) { + imageData[(i * image.cols() + j) * image.channels()] = (byte) 255; + imageData[(i * image.cols() + j) * image.channels() + 1] = 0; + imageData[(i * image.cols() + j) * image.channels() + 2] = 0; + } + } + } + image.put(0, 0, imageData); + //! [show] + + // Show the training data + //! [show_data] + int thickness = -1; + int lineType = Imgproc.LINE_8; + Imgproc.circle(image, new Point(501, 10), 5, new Scalar(0, 0, 0), thickness, lineType, 0); + Imgproc.circle(image, new Point(255, 10), 5, new Scalar(255, 255, 255), thickness, lineType, 0); + Imgproc.circle(image, new Point(501, 255), 5, new Scalar(255, 255, 255), thickness, lineType, 0); + Imgproc.circle(image, new Point(10, 501), 5, new Scalar(255, 255, 255), thickness, lineType, 0); + //! [show_data] + + // Show support vectors + //! [show_vectors] + thickness = 2; + Mat sv = svm.getUncompressedSupportVectors(); + float[] svData = new float[(int) (sv.total() * sv.channels())]; + sv.get(0, 0, svData); + for (int i = 0; i < sv.rows(); ++i) { + Imgproc.circle(image, new Point(svData[i * sv.cols()], svData[i * sv.cols() + 1]), 6, + new Scalar(128, 128, 128), thickness, lineType, 0); + } + //! [show_vectors] + + Imgcodecs.imwrite("result.png", image); // save the image + + HighGui.imshow("SVM Simple Example", image); // show it to the user + HighGui.waitKey(); + System.exit(0); + } +} diff --git a/modules/ml/samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java b/modules/ml/samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java new file mode 100644 index 00000000000..b2b40d1513e --- /dev/null +++ b/modules/ml/samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java @@ -0,0 +1,186 @@ +import java.util.Random; + +import org.opencv.core.Core; +import org.opencv.core.CvType; +import org.opencv.core.Mat; +import org.opencv.core.Point; +import org.opencv.core.Scalar; +import org.opencv.core.TermCriteria; +import org.opencv.highgui.HighGui; +import org.opencv.imgcodecs.Imgcodecs; +import org.opencv.imgproc.Imgproc; +import org.opencv.ml.Ml; +import org.opencv.ml.SVM; + +public class NonLinearSVMsDemo { + public static final int NTRAINING_SAMPLES = 100; + public static final float FRAC_LINEAR_SEP = 0.9f; + + public static void main(String[] args) { + // Load the native OpenCV library + System.loadLibrary(Core.NATIVE_LIBRARY_NAME); + + System.out.println("\n--------------------------------------------------------------------------"); + System.out.println("This program shows Support Vector Machines for Non-Linearly Separable Data. "); + System.out.println("--------------------------------------------------------------------------\n"); + + // Data for visual representation + int width = 512, height = 512; + Mat I = Mat.zeros(height, width, CvType.CV_8UC3); + + // --------------------- 1. Set up training data randomly--------------------------------------- + Mat trainData = new Mat(2 * NTRAINING_SAMPLES, 2, CvType.CV_32F); + Mat labels = new Mat(2 * NTRAINING_SAMPLES, 1, CvType.CV_32S); + + Random rng = new Random(100); // Random value generation class + + // Set up the linearly separable part of the training data + int nLinearSamples = (int) (FRAC_LINEAR_SEP * NTRAINING_SAMPLES); + + //! [setup1] + // Generate random points for the class 1 + Mat trainClass = trainData.rowRange(0, nLinearSamples); + // The x coordinate of the points is in [0, 0.4) + Mat c = trainClass.colRange(0, 1); + float[] cData = new float[(int) (c.total() * c.channels())]; + double[] cDataDbl = rng.doubles(cData.length, 0, 0.4f * width).toArray(); + for (int i = 0; i < cData.length; i++) { + cData[i] = (float) cDataDbl[i]; + } + c.put(0, 0, cData); + // The y coordinate of the points is in [0, 1) + c = trainClass.colRange(1, 2); + cData = new float[(int) (c.total() * c.channels())]; + cDataDbl = rng.doubles(cData.length, 0, height).toArray(); + for (int i = 0; i < cData.length; i++) { + cData[i] = (float) cDataDbl[i]; + } + c.put(0, 0, cData); + + // Generate random points for the class 2 + trainClass = trainData.rowRange(2 * NTRAINING_SAMPLES - nLinearSamples, 2 * NTRAINING_SAMPLES); + // The x coordinate of the points is in [0.6, 1] + c = trainClass.colRange(0, 1); + cData = new float[(int) (c.total() * c.channels())]; + cDataDbl = rng.doubles(cData.length, 0.6 * width, width).toArray(); + for (int i = 0; i < cData.length; i++) { + cData[i] = (float) cDataDbl[i]; + } + c.put(0, 0, cData); + // The y coordinate of the points is in [0, 1) + c = trainClass.colRange(1, 2); + cData = new float[(int) (c.total() * c.channels())]; + cDataDbl = rng.doubles(cData.length, 0, height).toArray(); + for (int i = 0; i < cData.length; i++) { + cData[i] = (float) cDataDbl[i]; + } + c.put(0, 0, cData); + //! [setup1] + + // ------------------ Set up the non-linearly separable part of the training data --------------- + //! [setup2] + // Generate random points for the classes 1 and 2 + trainClass = trainData.rowRange(nLinearSamples, 2 * NTRAINING_SAMPLES - nLinearSamples); + // The x coordinate of the points is in [0.4, 0.6) + c = trainClass.colRange(0, 1); + cData = new float[(int) (c.total() * c.channels())]; + cDataDbl = rng.doubles(cData.length, 0.4 * width, 0.6 * width).toArray(); + for (int i = 0; i < cData.length; i++) { + cData[i] = (float) cDataDbl[i]; + } + c.put(0, 0, cData); + // The y coordinate of the points is in [0, 1) + c = trainClass.colRange(1, 2); + cData = new float[(int) (c.total() * c.channels())]; + cDataDbl = rng.doubles(cData.length, 0, height).toArray(); + for (int i = 0; i < cData.length; i++) { + cData[i] = (float) cDataDbl[i]; + } + c.put(0, 0, cData); + //! [setup2] + + // ------------------------- Set up the labels for the classes--------------------------------- + labels.rowRange(0, NTRAINING_SAMPLES).setTo(new Scalar(1)); // Class 1 + labels.rowRange(NTRAINING_SAMPLES, 2 * NTRAINING_SAMPLES).setTo(new Scalar(2)); // Class 2 + + // ------------------------ 2. Set up the support vector machines parameters-------------------- + System.out.println("Starting training process"); + //! [init] + SVM svm = SVM.create(); + svm.setType(SVM.C_SVC); + svm.setC(0.1); + svm.setKernel(SVM.LINEAR); + svm.setTermCriteria(new TermCriteria(TermCriteria.MAX_ITER, (int) 1e7, 1e-6)); + //! [init] + + // ------------------------ 3. Train the svm---------------------------------------------------- + //! [train] + svm.train(trainData, Ml.ROW_SAMPLE, labels); + //! [train] + System.out.println("Finished training process"); + + // ------------------------ 4. Show the decision regions---------------------------------------- + //! [show] + byte[] IData = new byte[(int) (I.total() * I.channels())]; + Mat sampleMat = new Mat(1, 2, CvType.CV_32F); + float[] sampleMatData = new float[(int) (sampleMat.total() * sampleMat.channels())]; + for (int i = 0; i < I.rows(); i++) { + for (int j = 0; j < I.cols(); j++) { + sampleMatData[0] = j; + sampleMatData[1] = i; + sampleMat.put(0, 0, sampleMatData); + float response = svm.predict(sampleMat); + + if (response == 1) { + IData[(i * I.cols() + j) * I.channels()] = 0; + IData[(i * I.cols() + j) * I.channels() + 1] = 100; + IData[(i * I.cols() + j) * I.channels() + 2] = 0; + } else if (response == 2) { + IData[(i * I.cols() + j) * I.channels()] = 100; + IData[(i * I.cols() + j) * I.channels() + 1] = 0; + IData[(i * I.cols() + j) * I.channels() + 2] = 0; + } + } + } + I.put(0, 0, IData); + //! [show] + + // ----------------------- 5. Show the training data-------------------------------------------- + //! [show_data] + int thick = -1; + int lineType = Imgproc.LINE_8; + float px, py; + // Class 1 + float[] trainDataData = new float[(int) (trainData.total() * trainData.channels())]; + trainData.get(0, 0, trainDataData); + for (int i = 0; i < NTRAINING_SAMPLES; i++) { + px = trainDataData[i * trainData.cols()]; + py = trainDataData[i * trainData.cols() + 1]; + Imgproc.circle(I, new Point(px, py), 3, new Scalar(0, 255, 0), thick, lineType, 0); + } + // Class 2 + for (int i = NTRAINING_SAMPLES; i < 2 * NTRAINING_SAMPLES; ++i) { + px = trainDataData[i * trainData.cols()]; + py = trainDataData[i * trainData.cols() + 1]; + Imgproc.circle(I, new Point(px, py), 3, new Scalar(255, 0, 0), thick, lineType, 0); + } + //! [show_data] + + // ------------------------- 6. Show support vectors-------------------------------------------- + //! [show_vectors] + thick = 2; + Mat sv = svm.getUncompressedSupportVectors(); + float[] svData = new float[(int) (sv.total() * sv.channels())]; + sv.get(0, 0, svData); + for (int i = 0; i < sv.rows(); i++) { + Imgproc.circle(I, new Point(svData[i * sv.cols()], svData[i * sv.cols() + 1]), 6, new Scalar(128, 128, 128), + thick, lineType, 0); + } + //! [show_vectors] + + Imgcodecs.imwrite("result.png", I); // save the Image + HighGui.imshow("SVM for Non-Linear Training Data", I); // show it to the user + HighGui.waitKey(); + System.exit(0); + } +} diff --git a/modules/ml/samples/letter_recog.cpp b/modules/ml/samples/letter_recog.cpp new file mode 100644 index 00000000000..bcad2f46873 --- /dev/null +++ b/modules/ml/samples/letter_recog.cpp @@ -0,0 +1,558 @@ +#include "opencv2/core.hpp" +#include "opencv2/ml.hpp" + +#include +#include +#include + +using namespace std; +using namespace cv; +using namespace cv::ml; + +static void help(char** argv) +{ + printf("\nThe sample demonstrates how to train Random Trees classifier\n" + "(or Boosting classifier, or MLP, or Knearest, or Nbayes, or Support Vector Machines - see main()) using the provided dataset.\n" + "\n" + "We use the sample database letter-recognition.data\n" + "from UCI Repository, here is the link:\n" + "\n" + "Newman, D.J. & Hettich, S. & Blake, C.L. & Merz, C.J. (1998).\n" + "UCI Repository of machine learning databases\n" + "[http://www.ics.uci.edu/~mlearn/MLRepository.html].\n" + "Irvine, CA: University of California, Department of Information and Computer Science.\n" + "\n" + "The dataset consists of 20000 feature vectors along with the\n" + "responses - capital latin letters A..Z.\n" + "The first 16000 (10000 for boosting)) samples are used for training\n" + "and the remaining 4000 (10000 for boosting) - to test the classifier.\n" + "======================================================\n"); + printf("\nThis is letter recognition sample.\n" + "The usage: %s [-data=] \\\n" + " [-save=] \\\n" + " [-load=] \\\n" + " [-boost|-mlp|-knearest|-nbayes|-svm] # to use boost/mlp/knearest/SVM classifier instead of default Random Trees\n", argv[0] ); +} + +// This function reads data and responses from the file +static bool +read_num_class_data( const string& filename, int var_count, + Mat* _data, Mat* _responses ) +{ + const int M = 1024; + char buf[M+2]; + + Mat el_ptr(1, var_count, CV_32F); + int i; + vector responses; + + _data->release(); + _responses->release(); + + FILE* f = fopen( filename.c_str(), "rt" ); + if( !f ) + { + cout << "Could not read the database " << filename << endl; + return false; + } + + for(;;) + { + char* ptr; + if( !fgets( buf, M, f ) || !strchr( buf, ',' ) ) + break; + responses.push_back((int)buf[0]); + ptr = buf+2; + for( i = 0; i < var_count; i++ ) + { + int n = 0; + sscanf( ptr, "%f%n", &el_ptr.at(i), &n ); + ptr += n + 1; + } + if( i < var_count ) + break; + _data->push_back(el_ptr); + } + fclose(f); + Mat(responses).copyTo(*_responses); + + cout << "The database " << filename << " is loaded.\n"; + + return true; +} + +template +static Ptr load_classifier(const string& filename_to_load) +{ + // load classifier from the specified file + Ptr model = StatModel::load( filename_to_load ); + if( model.empty() ) + cout << "Could not read the classifier " << filename_to_load << endl; + else + cout << "The classifier " << filename_to_load << " is loaded.\n"; + + return model; +} + +static Ptr +prepare_train_data(const Mat& data, const Mat& responses, int ntrain_samples) +{ + Mat sample_idx = Mat::zeros( 1, data.rows, CV_8U ); + Mat train_samples = sample_idx.colRange(0, ntrain_samples); + train_samples.setTo(Scalar::all(1)); + + int nvars = data.cols; + Mat var_type( nvars + 1, 1, CV_8U ); + var_type.setTo(Scalar::all(VAR_ORDERED)); + var_type.at(nvars) = VAR_CATEGORICAL; + + return TrainData::create(data, ROW_SAMPLE, responses, + noArray(), sample_idx, noArray(), var_type); +} + +inline TermCriteria TC(int iters, double eps) +{ + return TermCriteria(TermCriteria::MAX_ITER + (eps > 0 ? TermCriteria::EPS : 0), iters, eps); +} + +static void test_and_save_classifier(const Ptr& model, + const Mat& data, const Mat& responses, + int ntrain_samples, int rdelta, + const string& filename_to_save) +{ + int i, nsamples_all = data.rows; + double train_hr = 0, test_hr = 0; + + // compute prediction error on train and test data + for( i = 0; i < nsamples_all; i++ ) + { + Mat sample = data.row(i); + + float r = model->predict( sample ); + r = std::abs(r + rdelta - responses.at(i)) <= FLT_EPSILON ? 1.f : 0.f; + + if( i < ntrain_samples ) + train_hr += r; + else + test_hr += r; + } + + test_hr /= nsamples_all - ntrain_samples; + train_hr = ntrain_samples > 0 ? train_hr/ntrain_samples : 1.; + + printf( "Recognition rate: train = %.1f%%, test = %.1f%%\n", + train_hr*100., test_hr*100. ); + + if( !filename_to_save.empty() ) + { + model->save( filename_to_save ); + } +} + + +static bool +build_rtrees_classifier( const string& data_filename, + const string& filename_to_save, + const string& filename_to_load ) +{ + Mat data; + Mat responses; + bool ok = read_num_class_data( data_filename, 16, &data, &responses ); + if( !ok ) + return ok; + + Ptr model; + + int nsamples_all = data.rows; + int ntrain_samples = (int)(nsamples_all*0.8); + + // Create or load Random Trees classifier + if( !filename_to_load.empty() ) + { + model = load_classifier(filename_to_load); + if( model.empty() ) + return false; + ntrain_samples = 0; + } + else + { + // create classifier by using and + cout << "Training the classifier ...\n"; +// Params( int maxDepth, int minSampleCount, +// double regressionAccuracy, bool useSurrogates, +// int maxCategories, const Mat& priors, +// bool calcVarImportance, int nactiveVars, +// TermCriteria termCrit ); + Ptr tdata = prepare_train_data(data, responses, ntrain_samples); + model = RTrees::create(); + model->setMaxDepth(10); + model->setMinSampleCount(10); + model->setRegressionAccuracy(0); + model->setUseSurrogates(false); + model->setMaxCategories(15); + model->setPriors(Mat()); + model->setCalculateVarImportance(true); + model->setActiveVarCount(4); + model->setTermCriteria(TC(100,0.01f)); + model->train(tdata); + cout << endl; + } + + test_and_save_classifier(model, data, responses, ntrain_samples, 0, filename_to_save); + cout << "Number of trees: " << model->getRoots().size() << endl; + + // Print variable importance + Mat var_importance = model->getVarImportance(); + if( !var_importance.empty() ) + { + double rt_imp_sum = sum( var_importance )[0]; + printf("var#\timportance (in %%):\n"); + int i, n = (int)var_importance.total(); + for( i = 0; i < n; i++ ) + printf( "%-2d\t%-4.1f\n", i, 100.f*var_importance.at(i)/rt_imp_sum); + } + + return true; +} + + +static bool +build_boost_classifier( const string& data_filename, + const string& filename_to_save, + const string& filename_to_load ) +{ + const int class_count = 26; + Mat data; + Mat responses; + Mat weak_responses; + + bool ok = read_num_class_data( data_filename, 16, &data, &responses ); + if( !ok ) + return ok; + + int i, j, k; + Ptr model; + + int nsamples_all = data.rows; + int ntrain_samples = (int)(nsamples_all*0.5); + int var_count = data.cols; + + // Create or load Boosted Tree classifier + if( !filename_to_load.empty() ) + { + model = load_classifier(filename_to_load); + if( model.empty() ) + return false; + ntrain_samples = 0; + } + else + { + // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + // + // As currently boosted tree classifier in MLL can only be trained + // for 2-class problems, we transform the training database by + // "unrolling" each training sample as many times as the number of + // classes (26) that we have. + // + // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + Mat new_data( ntrain_samples*class_count, var_count + 1, CV_32F ); + Mat new_responses( ntrain_samples*class_count, 1, CV_32S ); + + // 1. unroll the database type mask + printf( "Unrolling the database...\n"); + for( i = 0; i < ntrain_samples; i++ ) + { + const float* data_row = data.ptr(i); + for( j = 0; j < class_count; j++ ) + { + float* new_data_row = (float*)new_data.ptr(i*class_count+j); + memcpy(new_data_row, data_row, var_count*sizeof(data_row[0])); + new_data_row[var_count] = (float)j; + new_responses.at(i*class_count + j) = responses.at(i) == j+'A'; + } + } + + Mat var_type( 1, var_count + 2, CV_8U ); + var_type.setTo(Scalar::all(VAR_ORDERED)); + var_type.at(var_count) = var_type.at(var_count+1) = VAR_CATEGORICAL; + + Ptr tdata = TrainData::create(new_data, ROW_SAMPLE, new_responses, + noArray(), noArray(), noArray(), var_type); + vector priors(2); + priors[0] = 1; + priors[1] = 26; + + cout << "Training the classifier (may take a few minutes)...\n"; + model = Boost::create(); + model->setBoostType(Boost::GENTLE); + model->setWeakCount(100); + model->setWeightTrimRate(0.95); + model->setMaxDepth(5); + model->setUseSurrogates(false); + model->setPriors(Mat(priors)); + model->train(tdata); + cout << endl; + } + + Mat temp_sample( 1, var_count + 1, CV_32F ); + float* tptr = temp_sample.ptr(); + + // compute prediction error on train and test data + double train_hr = 0, test_hr = 0; + for( i = 0; i < nsamples_all; i++ ) + { + int best_class = 0; + double max_sum = -DBL_MAX; + const float* ptr = data.ptr(i); + for( k = 0; k < var_count; k++ ) + tptr[k] = ptr[k]; + + for( j = 0; j < class_count; j++ ) + { + tptr[var_count] = (float)j; + float s = model->predict( temp_sample, noArray(), StatModel::RAW_OUTPUT ); + if( max_sum < s ) + { + max_sum = s; + best_class = j + 'A'; + } + } + + double r = std::abs(best_class - responses.at(i)) < FLT_EPSILON ? 1 : 0; + if( i < ntrain_samples ) + train_hr += r; + else + test_hr += r; + } + + test_hr /= nsamples_all-ntrain_samples; + train_hr = ntrain_samples > 0 ? train_hr/ntrain_samples : 1.; + printf( "Recognition rate: train = %.1f%%, test = %.1f%%\n", + train_hr*100., test_hr*100. ); + + cout << "Number of trees: " << model->getRoots().size() << endl; + + // Save classifier to file if needed + if( !filename_to_save.empty() ) + model->save( filename_to_save ); + + return true; +} + + +static bool +build_mlp_classifier( const string& data_filename, + const string& filename_to_save, + const string& filename_to_load ) +{ + const int class_count = 26; + Mat data; + Mat responses; + + bool ok = read_num_class_data( data_filename, 16, &data, &responses ); + if( !ok ) + return ok; + + Ptr model; + + int nsamples_all = data.rows; + int ntrain_samples = (int)(nsamples_all*0.8); + + // Create or load MLP classifier + if( !filename_to_load.empty() ) + { + model = load_classifier(filename_to_load); + if( model.empty() ) + return false; + ntrain_samples = 0; + } + else + { + // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + // + // MLP does not support categorical variables by explicitly. + // So, instead of the output class label, we will use + // a binary vector of components for training and, + // therefore, MLP will give us a vector of "probabilities" at the + // prediction stage + // + // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + Mat train_data = data.rowRange(0, ntrain_samples); + Mat train_responses = Mat::zeros( ntrain_samples, class_count, CV_32F ); + + // 1. unroll the responses + cout << "Unrolling the responses...\n"; + for( int i = 0; i < ntrain_samples; i++ ) + { + int cls_label = responses.at(i) - 'A'; + train_responses.at(i, cls_label) = 1.f; + } + + // 2. train classifier + int layer_sz[] = { data.cols, 100, 100, class_count }; + int nlayers = (int)(sizeof(layer_sz)/sizeof(layer_sz[0])); + Mat layer_sizes( 1, nlayers, CV_32S, layer_sz ); + +#if 1 + int method = ANN_MLP::BACKPROP; + double method_param = 0.001; + int max_iter = 300; +#else + int method = ANN_MLP::RPROP; + double method_param = 0.1; + int max_iter = 1000; +#endif + + Ptr tdata = TrainData::create(train_data, ROW_SAMPLE, train_responses); + + cout << "Training the classifier (may take a few minutes)...\n"; + model = ANN_MLP::create(); + model->setLayerSizes(layer_sizes); + model->setActivationFunction(ANN_MLP::SIGMOID_SYM, 0, 0); + model->setTermCriteria(TC(max_iter,0)); + model->setTrainMethod(method, method_param); + model->train(tdata); + cout << endl; + } + + test_and_save_classifier(model, data, responses, ntrain_samples, 'A', filename_to_save); + return true; +} + +static bool +build_knearest_classifier( const string& data_filename, int K ) +{ + Mat data; + Mat responses; + bool ok = read_num_class_data( data_filename, 16, &data, &responses ); + if( !ok ) + return ok; + + + int nsamples_all = data.rows; + int ntrain_samples = (int)(nsamples_all*0.8); + + // create classifier by using and + cout << "Training the classifier ...\n"; + Ptr tdata = prepare_train_data(data, responses, ntrain_samples); + Ptr model = KNearest::create(); + model->setDefaultK(K); + model->setIsClassifier(true); + model->train(tdata); + cout << endl; + + test_and_save_classifier(model, data, responses, ntrain_samples, 0, string()); + return true; +} + +static bool +build_nbayes_classifier( const string& data_filename ) +{ + Mat data; + Mat responses; + bool ok = read_num_class_data( data_filename, 16, &data, &responses ); + if( !ok ) + return ok; + + Ptr model; + + int nsamples_all = data.rows; + int ntrain_samples = (int)(nsamples_all*0.8); + + // create classifier by using and + cout << "Training the classifier ...\n"; + Ptr tdata = prepare_train_data(data, responses, ntrain_samples); + model = NormalBayesClassifier::create(); + model->train(tdata); + cout << endl; + + test_and_save_classifier(model, data, responses, ntrain_samples, 0, string()); + return true; +} + +static bool +build_svm_classifier( const string& data_filename, + const string& filename_to_save, + const string& filename_to_load ) +{ + Mat data; + Mat responses; + bool ok = read_num_class_data( data_filename, 16, &data, &responses ); + if( !ok ) + return ok; + + Ptr model; + + int nsamples_all = data.rows; + int ntrain_samples = (int)(nsamples_all*0.8); + + // Create or load Random Trees classifier + if( !filename_to_load.empty() ) + { + model = load_classifier(filename_to_load); + if( model.empty() ) + return false; + ntrain_samples = 0; + } + else + { + // create classifier by using and + cout << "Training the classifier ...\n"; + Ptr tdata = prepare_train_data(data, responses, ntrain_samples); + model = SVM::create(); + model->setType(SVM::C_SVC); + model->setKernel(SVM::LINEAR); + model->setC(1); + model->train(tdata); + cout << endl; + } + + test_and_save_classifier(model, data, responses, ntrain_samples, 0, filename_to_save); + return true; +} + +int main( int argc, char *argv[] ) +{ + string filename_to_save = ""; + string filename_to_load = ""; + string data_filename; + int method = 0; + + cv::CommandLineParser parser(argc, argv, "{data|letter-recognition.data|}{save||}{load||}{boost||}" + "{mlp||}{knn knearest||}{nbayes||}{svm||}"); + data_filename = samples::findFile(parser.get("data")); + if (parser.has("save")) + filename_to_save = parser.get("save"); + if (parser.has("load")) + filename_to_load = samples::findFile(parser.get("load")); + if (parser.has("boost")) + method = 1; + else if (parser.has("mlp")) + method = 2; + else if (parser.has("knearest")) + method = 3; + else if (parser.has("nbayes")) + method = 4; + else if (parser.has("svm")) + method = 5; + + help(argv); + + if( (method == 0 ? + build_rtrees_classifier( data_filename, filename_to_save, filename_to_load ) : + method == 1 ? + build_boost_classifier( data_filename, filename_to_save, filename_to_load ) : + method == 2 ? + build_mlp_classifier( data_filename, filename_to_save, filename_to_load ) : + method == 3 ? + build_knearest_classifier( data_filename, 10 ) : + method == 4 ? + build_nbayes_classifier( data_filename) : + method == 5 ? + build_svm_classifier( data_filename, filename_to_save, filename_to_load ): + -1) < 0) + + return 0; +} diff --git a/modules/ml/samples/logistic_regression.cpp b/modules/ml/samples/logistic_regression.cpp new file mode 100644 index 00000000000..1bc2bf97118 --- /dev/null +++ b/modules/ml/samples/logistic_regression.cpp @@ -0,0 +1,127 @@ +// Logistic Regression sample +// AUTHOR: Rahul Kavi rahulkavi[at]live[at]com + +#include + +#include +#include +#include + +using namespace std; +using namespace cv; +using namespace cv::ml; + +static void showImage(const Mat &data, int columns, const String &name) +{ + Mat bigImage; + for(int i = 0; i < data.rows; ++i) + { + bigImage.push_back(data.row(i).reshape(0, columns)); + } + imshow(name, bigImage.t()); +} + +static float calculateAccuracyPercent(const Mat &original, const Mat &predicted) +{ + return 100 * (float)countNonZero(original == predicted) / predicted.rows; +} + +int main() +{ + const String filename = samples::findFile("data01.xml"); + cout << "**********************************************************************" << endl; + cout << filename + << " contains digits 0 and 1 of 20 samples each, collected on an Android device" << endl; + cout << "Each of the collected images are of size 28 x 28 re-arranged to 1 x 784 matrix" + << endl; + cout << "**********************************************************************" << endl; + + Mat data, labels; + { + cout << "loading the dataset..."; + FileStorage f; + if(f.open(filename, FileStorage::READ)) + { + f["datamat"] >> data; + f["labelsmat"] >> labels; + f.release(); + } + else + { + cerr << "file can not be opened: " << filename << endl; + return 1; + } + data.convertTo(data, CV_32F); + labels.convertTo(labels, CV_32F); + cout << "read " << data.rows << " rows of data" << endl; + } + + Mat data_train, data_test; + Mat labels_train, labels_test; + for(int i = 0; i < data.rows; i++) + { + if(i % 2 == 0) + { + data_train.push_back(data.row(i)); + labels_train.push_back(labels.row(i)); + } + else + { + data_test.push_back(data.row(i)); + labels_test.push_back(labels.row(i)); + } + } + cout << "training/testing samples count: " << data_train.rows << "/" << data_test.rows << endl; + + // display sample image + showImage(data_train, 28, "train data"); + showImage(data_test, 28, "test data"); + + // simple case with batch gradient + cout << "training..."; + //! [init] + Ptr lr1 = LogisticRegression::create(); + lr1->setLearningRate(0.001); + lr1->setIterations(10); + lr1->setRegularization(LogisticRegression::REG_L2); + lr1->setTrainMethod(LogisticRegression::BATCH); + lr1->setMiniBatchSize(1); + //! [init] + lr1->train(data_train, ROW_SAMPLE, labels_train); + cout << "done!" << endl; + + cout << "predicting..."; + Mat responses; + lr1->predict(data_test, responses); + cout << "done!" << endl; + + // show prediction report + cout << "original vs predicted:" << endl; + labels_test.convertTo(labels_test, CV_32S); + cout << labels_test.t() << endl; + cout << responses.t() << endl; + cout << "accuracy: " << calculateAccuracyPercent(labels_test, responses) << "%" << endl; + + // save the classifier + const String saveFilename = "NewLR_Trained.xml"; + cout << "saving the classifier to " << saveFilename << endl; + lr1->save(saveFilename); + + // load the classifier onto new object + cout << "loading a new classifier from " << saveFilename << endl; + Ptr lr2 = StatModel::load(saveFilename); + + // predict using loaded classifier + cout << "predicting the dataset using the loaded classifier..."; + Mat responses2; + lr2->predict(data_test, responses2); + cout << "done!" << endl; + + // calculate accuracy + cout << labels_test.t() << endl; + cout << responses2.t() << endl; + cout << "accuracy: " << calculateAccuracyPercent(labels_test, responses2) << "%" << endl; + + waitKey(0); + return 0; +} diff --git a/modules/ml/samples/neural_network.cpp b/modules/ml/samples/neural_network.cpp new file mode 100644 index 00000000000..4a8d7e22f96 --- /dev/null +++ b/modules/ml/samples/neural_network.cpp @@ -0,0 +1,66 @@ +//#include +#include + +using namespace std; +using namespace cv; +using namespace cv::ml; + +int main() +{ + //create random training data + Mat_ data(100, 100); + randn(data, Mat::zeros(1, 1, data.type()), Mat::ones(1, 1, data.type())); + + //half of the samples for each class + Mat_ responses(data.rows, 2); + for (int i = 0; i responses(data.rows, 1); + for (int i=0; i layerSizes(1, 3); + layerSizes(0, 0) = data.cols; + layerSizes(0, 1) = 20; + layerSizes(0, 2) = responses.cols; + + Ptr network = ANN_MLP::create(); + network->setLayerSizes(layerSizes); + network->setActivationFunction(ANN_MLP::SIGMOID_SYM, 0.1, 0.1); + network->setTrainMethod(ANN_MLP::BACKPROP, 0.1, 0.1); + Ptr trainData = TrainData::create(data, ROW_SAMPLE, responses); + + network->train(trainData); + if (network->isTrained()) + { + printf("Predict one-vector:\n"); + Mat result; + network->predict(Mat::ones(1, data.cols, data.type()), result); + cout << result << endl; + + printf("Predict training data:\n"); + for (int i=0; ipredict(data.row(i), result); + cout << result << endl; + } + } + + return 0; +} diff --git a/modules/ml/samples/non_linear_svms.cpp b/modules/ml/samples/non_linear_svms.cpp new file mode 100644 index 00000000000..f8b7a373cc8 --- /dev/null +++ b/modules/ml/samples/non_linear_svms.cpp @@ -0,0 +1,144 @@ +#include +#include +#include +#include "opencv2/imgcodecs.hpp" +#include +#include + +using namespace cv; +using namespace cv::ml; +using namespace std; + +static void help() +{ + cout<< "\n--------------------------------------------------------------------------" << endl + << "This program shows Support Vector Machines for Non-Linearly Separable Data. " << endl + << "--------------------------------------------------------------------------" << endl + << endl; +} + +int main() +{ + help(); + + const int NTRAINING_SAMPLES = 100; // Number of training samples per class + const float FRAC_LINEAR_SEP = 0.9f; // Fraction of samples which compose the linear separable part + + // Data for visual representation + const int WIDTH = 512, HEIGHT = 512; + Mat I = Mat::zeros(HEIGHT, WIDTH, CV_8UC3); + + //--------------------- 1. Set up training data randomly --------------------------------------- + Mat trainData(2*NTRAINING_SAMPLES, 2, CV_32F); + Mat labels (2*NTRAINING_SAMPLES, 1, CV_32S); + + RNG rng(100); // Random value generation class + + // Set up the linearly separable part of the training data + int nLinearSamples = (int) (FRAC_LINEAR_SEP * NTRAINING_SAMPLES); + + //! [setup1] + // Generate random points for the class 1 + Mat trainClass = trainData.rowRange(0, nLinearSamples); + // The x coordinate of the points is in [0, 0.4) + Mat c = trainClass.colRange(0, 1); + rng.fill(c, RNG::UNIFORM, Scalar(0), Scalar(0.4 * WIDTH)); + // The y coordinate of the points is in [0, 1) + c = trainClass.colRange(1,2); + rng.fill(c, RNG::UNIFORM, Scalar(0), Scalar(HEIGHT)); + + // Generate random points for the class 2 + trainClass = trainData.rowRange(2*NTRAINING_SAMPLES-nLinearSamples, 2*NTRAINING_SAMPLES); + // The x coordinate of the points is in [0.6, 1] + c = trainClass.colRange(0 , 1); + rng.fill(c, RNG::UNIFORM, Scalar(0.6*WIDTH), Scalar(WIDTH)); + // The y coordinate of the points is in [0, 1) + c = trainClass.colRange(1,2); + rng.fill(c, RNG::UNIFORM, Scalar(0), Scalar(HEIGHT)); + //! [setup1] + + //------------------ Set up the non-linearly separable part of the training data --------------- + //! [setup2] + // Generate random points for the classes 1 and 2 + trainClass = trainData.rowRange(nLinearSamples, 2*NTRAINING_SAMPLES-nLinearSamples); + // The x coordinate of the points is in [0.4, 0.6) + c = trainClass.colRange(0,1); + rng.fill(c, RNG::UNIFORM, Scalar(0.4*WIDTH), Scalar(0.6*WIDTH)); + // The y coordinate of the points is in [0, 1) + c = trainClass.colRange(1,2); + rng.fill(c, RNG::UNIFORM, Scalar(0), Scalar(HEIGHT)); + //! [setup2] + + //------------------------- Set up the labels for the classes --------------------------------- + labels.rowRange( 0, NTRAINING_SAMPLES).setTo(1); // Class 1 + labels.rowRange(NTRAINING_SAMPLES, 2*NTRAINING_SAMPLES).setTo(2); // Class 2 + + //------------------------ 2. Set up the support vector machines parameters -------------------- + cout << "Starting training process" << endl; + //! [init] + Ptr svm = SVM::create(); + svm->setType(SVM::C_SVC); + svm->setC(0.1); + svm->setKernel(SVM::LINEAR); + svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, (int)1e7, 1e-6)); + //! [init] + + //------------------------ 3. Train the svm ---------------------------------------------------- + //! [train] + svm->train(trainData, ROW_SAMPLE, labels); + //! [train] + cout << "Finished training process" << endl; + + //------------------------ 4. Show the decision regions ---------------------------------------- + //! [show] + Vec3b green(0,100,0), blue(100,0,0); + for (int i = 0; i < I.rows; i++) + { + for (int j = 0; j < I.cols; j++) + { + Mat sampleMat = (Mat_(1,2) << j, i); + float response = svm->predict(sampleMat); + + if (response == 1) I.at(i,j) = green; + else if (response == 2) I.at(i,j) = blue; + } + } + //! [show] + + //----------------------- 5. Show the training data -------------------------------------------- + //! [show_data] + int thick = -1; + float px, py; + // Class 1 + for (int i = 0; i < NTRAINING_SAMPLES; i++) + { + px = trainData.at(i,0); + py = trainData.at(i,1); + circle(I, Point( (int) px, (int) py ), 3, Scalar(0, 255, 0), thick); + } + // Class 2 + for (int i = NTRAINING_SAMPLES; i <2*NTRAINING_SAMPLES; i++) + { + px = trainData.at(i,0); + py = trainData.at(i,1); + circle(I, Point( (int) px, (int) py ), 3, Scalar(255, 0, 0), thick); + } + //! [show_data] + + //------------------------- 6. Show support vectors -------------------------------------------- + //! [show_vectors] + thick = 2; + Mat sv = svm->getUncompressedSupportVectors(); + + for (int i = 0; i < sv.rows; i++) + { + const float* v = sv.ptr(i); + circle(I, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thick); + } + //! [show_vectors] + + imwrite("result.png", I); // save the Image + imshow("SVM for Non-Linear Training Data", I); // show it to the user + waitKey(); + return 0; +} diff --git a/modules/ml/samples/points_classifier.cpp b/modules/ml/samples/points_classifier.cpp new file mode 100644 index 00000000000..02e393495d6 --- /dev/null +++ b/modules/ml/samples/points_classifier.cpp @@ -0,0 +1,399 @@ +#include "opencv2/core.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/ml.hpp" +#include "opencv2/highgui.hpp" + +#include + +using namespace std; +using namespace cv; +using namespace cv::ml; + +const Scalar WHITE_COLOR = Scalar(255,255,255); +const string winName = "points"; +const int testStep = 5; + +Mat img, imgDst; +RNG rng; + +vector trainedPoints; +vector trainedPointsMarkers; +const int MAX_CLASSES = 2; +vector classColors(MAX_CLASSES); +int currentClass = 0; +vector classCounters(MAX_CLASSES); + +#define _NBC_ 1 // normal Bayessian classifier +#define _KNN_ 1 // k nearest neighbors classifier +#define _SVM_ 1 // support vectors machine +#define _DT_ 1 // decision tree +#define _BT_ 1 // ADA Boost +#define _GBT_ 0 // gradient boosted trees +#define _RF_ 1 // random forest +#define _ANN_ 1 // artificial neural networks +#define _EM_ 1 // expectation-maximization + +static void on_mouse( int event, int x, int y, int /*flags*/, void* ) +{ + if( img.empty() ) + return; + + int updateFlag = 0; + + if( event == EVENT_LBUTTONUP ) + { + trainedPoints.push_back( Point(x,y) ); + trainedPointsMarkers.push_back( currentClass ); + classCounters[currentClass]++; + updateFlag = true; + } + + //draw + if( updateFlag ) + { + img = Scalar::all(0); + + // draw points + for( size_t i = 0; i < trainedPoints.size(); i++ ) + { + Vec3b c = classColors[trainedPointsMarkers[i]]; + circle( img, trainedPoints[i], 5, Scalar(c), -1 ); + } + + imshow( winName, img ); + } +} + +static Mat prepare_train_samples(const vector& pts) +{ + Mat samples; + Mat(pts).reshape(1, (int)pts.size()).convertTo(samples, CV_32F); + return samples; +} + +static Ptr prepare_train_data() +{ + Mat samples = prepare_train_samples(trainedPoints); + return TrainData::create(samples, ROW_SAMPLE, Mat(trainedPointsMarkers)); +} + +static void predict_and_paint(const Ptr& model, Mat& dst) +{ + Mat testSample( 1, 2, CV_32FC1 ); + for( int y = 0; y < img.rows; y += testStep ) + { + for( int x = 0; x < img.cols; x += testStep ) + { + testSample.at(0) = (float)x; + testSample.at(1) = (float)y; + + int response = (int)model->predict( testSample ); + dst.at(y, x) = classColors[response]; + } + } +} + +#if _NBC_ +static void find_decision_boundary_NBC() +{ + // learn classifier + Ptr normalBayesClassifier = StatModel::train(prepare_train_data()); + + predict_and_paint(normalBayesClassifier, imgDst); +} +#endif + + +#if _KNN_ +static void find_decision_boundary_KNN( int K ) +{ + + Ptr knn = KNearest::create(); + knn->setDefaultK(K); + knn->setIsClassifier(true); + knn->train(prepare_train_data()); + predict_and_paint(knn, imgDst); +} +#endif + +#if _SVM_ +static void find_decision_boundary_SVM( double C ) +{ + Ptr svm = SVM::create(); + svm->setType(SVM::C_SVC); + svm->setKernel(SVM::POLY); //SVM::LINEAR; + svm->setDegree(0.5); + svm->setGamma(1); + svm->setCoef0(1); + svm->setNu(0.5); + svm->setP(0); + svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 1000, 0.01)); + svm->setC(C); + svm->train(prepare_train_data()); + predict_and_paint(svm, imgDst); + + Mat sv = svm->getSupportVectors(); + for( int i = 0; i < sv.rows; i++ ) + { + const float* supportVector = sv.ptr(i); + circle( imgDst, Point(saturate_cast(supportVector[0]),saturate_cast(supportVector[1])), 5, Scalar(255,255,255), -1 ); + } +} +#endif + +#if _DT_ +static void find_decision_boundary_DT() +{ + Ptr dtree = DTrees::create(); + dtree->setMaxDepth(8); + dtree->setMinSampleCount(2); + dtree->setUseSurrogates(false); + dtree->setCVFolds(0); // the number of cross-validation folds + dtree->setUse1SERule(false); + dtree->setTruncatePrunedTree(false); + dtree->train(prepare_train_data()); + predict_and_paint(dtree, imgDst); +} +#endif + +#if _BT_ +static void find_decision_boundary_BT() +{ + Ptr boost = Boost::create(); + boost->setBoostType(Boost::DISCRETE); + boost->setWeakCount(100); + boost->setWeightTrimRate(0.95); + boost->setMaxDepth(2); + boost->setUseSurrogates(false); + boost->setPriors(Mat()); + boost->train(prepare_train_data()); + predict_and_paint(boost, imgDst); +} + +#endif + +#if _GBT_ +static void find_decision_boundary_GBT() +{ + GBTrees::Params params( GBTrees::DEVIANCE_LOSS, // loss_function_type + 100, // weak_count + 0.1f, // shrinkage + 1.0f, // subsample_portion + 2, // max_depth + false // use_surrogates ) + ); + + Ptr gbtrees = StatModel::train(prepare_train_data(), params); + predict_and_paint(gbtrees, imgDst); +} +#endif + +#if _RF_ +static void find_decision_boundary_RF() +{ + Ptr rtrees = RTrees::create(); + rtrees->setMaxDepth(4); + rtrees->setMinSampleCount(2); + rtrees->setRegressionAccuracy(0.f); + rtrees->setUseSurrogates(false); + rtrees->setMaxCategories(16); + rtrees->setPriors(Mat()); + rtrees->setCalculateVarImportance(false); + rtrees->setActiveVarCount(1); + rtrees->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 5, 0)); + rtrees->train(prepare_train_data()); + predict_and_paint(rtrees, imgDst); +} + +#endif + +#if _ANN_ +static void find_decision_boundary_ANN( const Mat& layer_sizes ) +{ + Mat trainClasses = Mat::zeros( (int)trainedPoints.size(), (int)classColors.size(), CV_32FC1 ); + for( int i = 0; i < trainClasses.rows; i++ ) + { + trainClasses.at(i, trainedPointsMarkers[i]) = 1.f; + } + + Mat samples = prepare_train_samples(trainedPoints); + Ptr tdata = TrainData::create(samples, ROW_SAMPLE, trainClasses); + + Ptr ann = ANN_MLP::create(); + ann->setLayerSizes(layer_sizes); + ann->setActivationFunction(ANN_MLP::SIGMOID_SYM, 1, 1); + ann->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 300, FLT_EPSILON)); + ann->setTrainMethod(ANN_MLP::BACKPROP, 0.001); + ann->train(tdata); + predict_and_paint(ann, imgDst); +} +#endif + +#if _EM_ +static void find_decision_boundary_EM() +{ + img.copyTo( imgDst ); + + Mat samples = prepare_train_samples(trainedPoints); + + int i, j, nmodels = (int)classColors.size(); + vector > em_models(nmodels); + Mat modelSamples; + + for( i = 0; i < nmodels; i++ ) + { + const int componentCount = 3; + + modelSamples.release(); + for( j = 0; j < samples.rows; j++ ) + { + if( trainedPointsMarkers[j] == i ) + modelSamples.push_back(samples.row(j)); + } + + // learn models + if( !modelSamples.empty() ) + { + Ptr em = EM::create(); + em->setClustersNumber(componentCount); + em->setCovarianceMatrixType(EM::COV_MAT_DIAGONAL); + em->trainEM(modelSamples, noArray(), noArray(), noArray()); + em_models[i] = em; + } + } + + // classify coordinate plane points using the bayes classifier, i.e. + // y(x) = arg max_i=1_modelsCount likelihoods_i(x) + Mat testSample(1, 2, CV_32FC1 ); + Mat logLikelihoods(1, nmodels, CV_64FC1, Scalar(-DBL_MAX)); + + for( int y = 0; y < img.rows; y += testStep ) + { + for( int x = 0; x < img.cols; x += testStep ) + { + testSample.at(0) = (float)x; + testSample.at(1) = (float)y; + + for( i = 0; i < nmodels; i++ ) + { + if( !em_models[i].empty() ) + logLikelihoods.at(i) = em_models[i]->predict2(testSample, noArray())[0]; + } + Point maxLoc; + minMaxLoc(logLikelihoods, 0, 0, 0, &maxLoc); + imgDst.at(y, x) = classColors[maxLoc.x]; + } + } +} +#endif + +int main() +{ + cout << "Use:" << endl + << " key '0' .. '1' - switch to class #n" << endl + << " left mouse button - to add new point;" << endl + << " key 'r' - to run the ML model;" << endl + << " key 'i' - to init (clear) the data." << endl << endl; + + cv::namedWindow( "points", 1 ); + img.create( 480, 640, CV_8UC3 ); + imgDst.create( 480, 640, CV_8UC3 ); + + imshow( "points", img ); + setMouseCallback( "points", on_mouse ); + + classColors[0] = Vec3b(0, 255, 0); + classColors[1] = Vec3b(0, 0, 255); + + for(;;) + { + char key = (char)waitKey(); + + if( key == 27 ) break; + + if( key == 'i' ) // init + { + img = Scalar::all(0); + + trainedPoints.clear(); + trainedPointsMarkers.clear(); + classCounters.assign(MAX_CLASSES, 0); + + imshow( winName, img ); + } + + if( key == '0' || key == '1' ) + { + currentClass = key - '0'; + } + + if( key == 'r' ) // run + { + double minVal = 0; + minMaxLoc(classCounters, &minVal, 0, 0, 0); + if( minVal == 0 ) + { + printf("each class should have at least 1 point\n"); + continue; + } + img.copyTo( imgDst ); +#if _NBC_ + find_decision_boundary_NBC(); + imshow( "NormalBayesClassifier", imgDst ); +#endif +#if _KNN_ + find_decision_boundary_KNN( 3 ); + imshow( "kNN", imgDst ); + + find_decision_boundary_KNN( 15 ); + imshow( "kNN2", imgDst ); +#endif + +#if _SVM_ + //(1)-(2)separable and not sets + + find_decision_boundary_SVM( 1 ); + imshow( "classificationSVM1", imgDst ); + + find_decision_boundary_SVM( 10 ); + imshow( "classificationSVM2", imgDst ); +#endif + +#if _DT_ + find_decision_boundary_DT(); + imshow( "DT", imgDst ); +#endif + +#if _BT_ + find_decision_boundary_BT(); + imshow( "BT", imgDst); +#endif + +#if _GBT_ + find_decision_boundary_GBT(); + imshow( "GBT", imgDst); +#endif + +#if _RF_ + find_decision_boundary_RF(); + imshow( "RF", imgDst); +#endif + +#if _ANN_ + Mat layer_sizes1( 1, 3, CV_32SC1 ); + layer_sizes1.at(0) = 2; + layer_sizes1.at(1) = 5; + layer_sizes1.at(2) = (int)classColors.size(); + find_decision_boundary_ANN( layer_sizes1 ); + imshow( "ANN", imgDst ); +#endif + +#if _EM_ + find_decision_boundary_EM(); + imshow( "EM", imgDst ); +#endif + } + } + + return 0; +} diff --git a/modules/ml/samples/python/common.py b/modules/ml/samples/python/common.py new file mode 100755 index 00000000000..399b968d74c --- /dev/null +++ b/modules/ml/samples/python/common.py @@ -0,0 +1,224 @@ +#!/usr/bin/env python + +''' +This module contains some common routines used by other samples. +''' + +from functools import reduce +import numpy as np +import cv2 as cv + +# built-in modules +import os +import itertools as it +from contextlib import contextmanager + +image_extensions = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.pbm', '.pgm', '.ppm'] + +class Bunch(object): + def __init__(self, **kw): + self.__dict__.update(kw) + def __str__(self): + return str(self.__dict__) + +def splitfn(fn): + path, fn = os.path.split(fn) + name, ext = os.path.splitext(fn) + return path, name, ext + +def anorm2(a): + return (a*a).sum(-1) +def anorm(a): + return np.sqrt( anorm2(a) ) + +def homotrans(H, x, y): + xs = H[0, 0]*x + H[0, 1]*y + H[0, 2] + ys = H[1, 0]*x + H[1, 1]*y + H[1, 2] + s = H[2, 0]*x + H[2, 1]*y + H[2, 2] + return xs/s, ys/s + +def to_rect(a): + a = np.ravel(a) + if len(a) == 2: + a = (0, 0, a[0], a[1]) + return np.array(a, np.float64).reshape(2, 2) + +def rect2rect_mtx(src, dst): + src, dst = to_rect(src), to_rect(dst) + cx, cy = (dst[1] - dst[0]) / (src[1] - src[0]) + tx, ty = dst[0] - src[0] * (cx, cy) + M = np.float64([[ cx, 0, tx], + [ 0, cy, ty], + [ 0, 0, 1]]) + return M + + +def lookat(eye, target, up = (0, 0, 1)): + fwd = np.asarray(target, np.float64) - eye + fwd /= anorm(fwd) + right = np.cross(fwd, up) + right /= anorm(right) + down = np.cross(fwd, right) + R = np.float64([right, down, fwd]) + tvec = -np.dot(R, eye) + return R, tvec + +def mtx2rvec(R): + w, u, vt = cv.SVDecomp(R - np.eye(3)) + p = vt[0] + u[:,0]*w[0] # same as np.dot(R, vt[0]) + c = np.dot(vt[0], p) + s = np.dot(vt[1], p) + axis = np.cross(vt[0], vt[1]) + return axis * np.arctan2(s, c) + +def draw_str(dst, target, s): + x, y = target + cv.putText(dst, s, (x+1, y+1), cv.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv.LINE_AA) + cv.putText(dst, s, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv.LINE_AA) + +class Sketcher: + def __init__(self, windowname, dests, colors_func): + self.prev_pt = None + self.windowname = windowname + self.dests = dests + self.colors_func = colors_func + self.dirty = False + self.show() + cv.setMouseCallback(self.windowname, self.on_mouse) + + def show(self): + cv.imshow(self.windowname, self.dests[0]) + + def on_mouse(self, event, x, y, flags, param): + pt = (x, y) + if event == cv.EVENT_LBUTTONDOWN: + self.prev_pt = pt + elif event == cv.EVENT_LBUTTONUP: + self.prev_pt = None + + if self.prev_pt and flags & cv.EVENT_FLAG_LBUTTON: + for dst, color in zip(self.dests, self.colors_func()): + cv.line(dst, self.prev_pt, pt, color, 5) + self.dirty = True + self.prev_pt = pt + self.show() + + +# palette data from matplotlib/_cm.py +_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1), + (1, 0.5, 0.5)), + 'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1), + (0.91,0,0), (1, 0, 0)), + 'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0), + (1, 0, 0))} + +cmap_data = { 'jet' : _jet_data } + +def make_cmap(name, n=256): + data = cmap_data[name] + xs = np.linspace(0.0, 1.0, n) + channels = [] + eps = 1e-6 + for ch_name in ['blue', 'green', 'red']: + ch_data = data[ch_name] + xp, yp = [], [] + for x, y1, y2 in ch_data: + xp += [x, x+eps] + yp += [y1, y2] + ch = np.interp(xs, xp, yp) + channels.append(ch) + return np.uint8(np.array(channels).T*255) + +def nothing(*arg, **kw): + pass + +def clock(): + return cv.getTickCount() / cv.getTickFrequency() + +@contextmanager +def Timer(msg): + print(msg, '...',) + start = clock() + try: + yield + finally: + print("%.2f ms" % ((clock()-start)*1000)) + +class StatValue: + def __init__(self, smooth_coef = 0.5): + self.value = None + self.smooth_coef = smooth_coef + def update(self, v): + if self.value is None: + self.value = v + else: + c = self.smooth_coef + self.value = c * self.value + (1.0-c) * v + +class RectSelector: + def __init__(self, win, callback): + self.win = win + self.callback = callback + cv.setMouseCallback(win, self.onmouse) + self.drag_start = None + self.drag_rect = None + def onmouse(self, event, x, y, flags, param): + x, y = np.int16([x, y]) # BUG + if event == cv.EVENT_LBUTTONDOWN: + self.drag_start = (x, y) + return + if self.drag_start: + if flags & cv.EVENT_FLAG_LBUTTON: + xo, yo = self.drag_start + x0, y0 = np.minimum([xo, yo], [x, y]) + x1, y1 = np.maximum([xo, yo], [x, y]) + self.drag_rect = None + if x1-x0 > 0 and y1-y0 > 0: + self.drag_rect = (x0, y0, x1, y1) + else: + rect = self.drag_rect + self.drag_start = None + self.drag_rect = None + if rect: + self.callback(rect) + def draw(self, vis): + if not self.drag_rect: + return False + x0, y0, x1, y1 = self.drag_rect + cv.rectangle(vis, (x0, y0), (x1, y1), (0, 255, 0), 2) + return True + @property + def dragging(self): + return self.drag_rect is not None + + +def grouper(n, iterable, fillvalue=None): + '''grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx''' + args = [iter(iterable)] * n + output = it.zip_longest(fillvalue=fillvalue, *args) + return output + +def mosaic(w, imgs): + '''Make a grid from images. + + w -- number of grid columns + imgs -- images (must have same size and format) + ''' + imgs = iter(imgs) + img0 = next(imgs) + pad = np.zeros_like(img0) + imgs = it.chain([img0], imgs) + rows = grouper(w, imgs, pad) + return np.vstack(list(map(np.hstack, rows))) + +def getsize(img): + h, w = img.shape[:2] + return w, h + +def mdot(*args): + return reduce(np.dot, args) + +def draw_keypoints(vis, keypoints, color = (0, 255, 255)): + for kp in keypoints: + x, y = kp.pt + cv.circle(vis, (int(x), int(y)), 2, color) diff --git a/modules/ml/samples/python/digits.py b/modules/ml/samples/python/digits.py new file mode 100755 index 00000000000..25db411f942 --- /dev/null +++ b/modules/ml/samples/python/digits.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python + +''' +SVM and KNearest digit recognition. + +Sample loads a dataset of handwritten digits from 'digits.png'. +Then it trains a SVM and KNearest classifiers on it and evaluates +their accuracy. + +Following preprocessing is applied to the dataset: + - Moment-based image deskew (see deskew()) + - Digit images are split into 4 10x10 cells and 16-bin + histogram of oriented gradients is computed for each + cell + - Transform histograms to space with Hellinger metric (see [1] (RootSIFT)) + + +[1] R. Arandjelovic, A. Zisserman + "Three things everyone should know to improve object retrieval" + http://www.robots.ox.ac.uk/~vgg/publications/2012/Arandjelovic12/arandjelovic12.pdf + +Usage: + digits.py +''' + + +# Python 2/3 compatibility +from __future__ import print_function + +import numpy as np +import cv2 as cv + +# built-in modules +from multiprocessing.pool import ThreadPool + +from numpy.linalg import norm + +# local modules +from common import clock, mosaic + + + +SZ = 20 # size of each digit is SZ x SZ +CLASS_N = 10 +DIGITS_FN = 'digits.png' + +def split2d(img, cell_size, flatten=True): + h, w = img.shape[:2] + sx, sy = cell_size + cells = [np.hsplit(row, w//sx) for row in np.vsplit(img, h//sy)] + cells = np.array(cells) + if flatten: + cells = cells.reshape(-1, sy, sx) + return cells + +def load_digits(fn): + fn = cv.samples.findFile(fn) + print('loading "%s" ...' % fn) + digits_img = cv.imread(fn, cv.IMREAD_GRAYSCALE) + digits = split2d(digits_img, (SZ, SZ)) + labels = np.repeat(np.arange(CLASS_N), len(digits)/CLASS_N) + return digits, labels + +def deskew(img): + m = cv.moments(img) + if abs(m['mu02']) < 1e-2: + return img.copy() + skew = m['mu11']/m['mu02'] + M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]]) + img = cv.warpAffine(img, M, (SZ, SZ), flags=cv.WARP_INVERSE_MAP | cv.INTER_LINEAR) + return img + + +class KNearest(object): + def __init__(self, k = 3): + self.k = k + self.model = cv.ml.KNearest_create() + + def train(self, samples, responses): + self.model.train(samples, cv.ml.ROW_SAMPLE, responses) + + def predict(self, samples): + _retval, results, _neigh_resp, _dists = self.model.findNearest(samples, self.k) + return results.ravel() + + def load(self, fn): + self.model = cv.ml.KNearest_load(fn) + + def save(self, fn): + self.model.save(fn) + +class SVM(object): + def __init__(self, C = 1, gamma = 0.5): + self.model = cv.ml.SVM_create() + self.model.setGamma(gamma) + self.model.setC(C) + self.model.setKernel(cv.ml.SVM_RBF) + self.model.setType(cv.ml.SVM_C_SVC) + + def train(self, samples, responses): + self.model.train(samples, cv.ml.ROW_SAMPLE, responses) + + def predict(self, samples): + return self.model.predict(samples)[1].ravel() + + def load(self, fn): + self.model = cv.ml.SVM_load(fn) + + def save(self, fn): + self.model.save(fn) + +def evaluate_model(model, digits, samples, labels): + resp = model.predict(samples) + err = (labels != resp).mean() + print('error: %.2f %%' % (err*100)) + + confusion = np.zeros((10, 10), np.int32) + for i, j in zip(labels, resp): + confusion[i, int(j)] += 1 + print('confusion matrix:') + print(confusion) + print() + + vis = [] + for img, flag in zip(digits, resp == labels): + img = cv.cvtColor(img, cv.COLOR_GRAY2BGR) + if not flag: + img[...,:2] = 0 + vis.append(img) + return mosaic(25, vis) + +def preprocess_simple(digits): + return np.float32(digits).reshape(-1, SZ*SZ) / 255.0 + +def preprocess_hog(digits): + samples = [] + for img in digits: + gx = cv.Sobel(img, cv.CV_32F, 1, 0) + gy = cv.Sobel(img, cv.CV_32F, 0, 1) + mag, ang = cv.cartToPolar(gx, gy) + bin_n = 16 + bin = np.int32(bin_n*ang/(2*np.pi)) + bin_cells = bin[:10,:10], bin[10:,:10], bin[:10,10:], bin[10:,10:] + mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:] + hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)] + hist = np.hstack(hists) + + # transform to Hellinger kernel + eps = 1e-7 + hist /= hist.sum() + eps + hist = np.sqrt(hist) + hist /= norm(hist) + eps + + samples.append(hist) + return np.float32(samples) + + +if __name__ == '__main__': + print(__doc__) + + digits, labels = load_digits(DIGITS_FN) + + print('preprocessing...') + # shuffle digits + rand = np.random.RandomState(321) + shuffle = rand.permutation(len(digits)) + digits, labels = digits[shuffle], labels[shuffle] + + digits2 = list(map(deskew, digits)) + samples = preprocess_hog(digits2) + + train_n = int(0.9*len(samples)) + cv.imshow('test set', mosaic(25, digits[train_n:])) + digits_train, digits_test = np.split(digits2, [train_n]) + samples_train, samples_test = np.split(samples, [train_n]) + labels_train, labels_test = np.split(labels, [train_n]) + + + print('training KNearest...') + model = KNearest(k=4) + model.train(samples_train, labels_train) + vis = evaluate_model(model, digits_test, samples_test, labels_test) + cv.imshow('KNearest test', vis) + + print('training SVM...') + model = SVM(C=2.67, gamma=5.383) + model.train(samples_train, labels_train) + vis = evaluate_model(model, digits_test, samples_test, labels_test) + cv.imshow('SVM test', vis) + print('saving SVM as "digits_svm.dat"...') + model.save('digits_svm.dat') + + cv.waitKey(0) + cv.destroyAllWindows() diff --git a/modules/ml/samples/python/digits_adjust.py b/modules/ml/samples/python/digits_adjust.py new file mode 100755 index 00000000000..94771e63b1e --- /dev/null +++ b/modules/ml/samples/python/digits_adjust.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python + +''' +Digit recognition adjustment. +Grid search is used to find the best parameters for SVM and KNearest classifiers. +SVM adjustment follows the guidelines given in +http://www.csie.ntu.edu.tw/~cjlin/papers/guide/guide.pdf + +Usage: + digits_adjust.py [--model {svm|knearest}] + + --model {svm|knearest} - select the classifier (SVM is the default) + +''' + +import numpy as np +import cv2 as cv + +from multiprocessing.pool import ThreadPool + +from digits import * + +def cross_validate(model_class, params, samples, labels, kfold = 3, pool = None): + n = len(samples) + folds = np.array_split(np.arange(n), kfold) + def f(i): + model = model_class(**params) + test_idx = folds[i] + train_idx = list(folds) + train_idx.pop(i) + train_idx = np.hstack(train_idx) + train_samples, train_labels = samples[train_idx], labels[train_idx] + test_samples, test_labels = samples[test_idx], labels[test_idx] + model.train(train_samples, train_labels) + resp = model.predict(test_samples) + score = (resp != test_labels).mean() + print(".", end='') + return score + if pool is None: + scores = list(map(f, range(kfold))) + else: + scores = pool.map(f, range(kfold)) + return np.mean(scores) + + +class App(object): + def __init__(self): + self._samples, self._labels = self.preprocess() + + def preprocess(self): + digits, labels = load_digits(DIGITS_FN) + shuffle = np.random.permutation(len(digits)) + digits, labels = digits[shuffle], labels[shuffle] + digits2 = list(map(deskew, digits)) + samples = preprocess_hog(digits2) + return samples, labels + + def get_dataset(self): + return self._samples, self._labels + + def run_jobs(self, f, jobs): + pool = ThreadPool(processes=cv.getNumberOfCPUs()) + ires = pool.imap_unordered(f, jobs) + return ires + + def adjust_SVM(self): + Cs = np.logspace(0, 10, 15, base=2) + gammas = np.logspace(-7, 4, 15, base=2) + scores = np.zeros((len(Cs), len(gammas))) + scores[:] = np.nan + + print('adjusting SVM (may take a long time) ...') + def f(job): + i, j = job + samples, labels = self.get_dataset() + params = dict(C = Cs[i], gamma=gammas[j]) + score = cross_validate(SVM, params, samples, labels) + return i, j, score + + ires = self.run_jobs(f, np.ndindex(*scores.shape)) + for count, (i, j, score) in enumerate(ires): + scores[i, j] = score + print('%d / %d (best error: %.2f %%, last: %.2f %%)' % + (count+1, scores.size, np.nanmin(scores)*100, score*100)) + print(scores) + + print('writing score table to "svm_scores.npz"') + np.savez('svm_scores.npz', scores=scores, Cs=Cs, gammas=gammas) + + i, j = np.unravel_index(scores.argmin(), scores.shape) + best_params = dict(C = Cs[i], gamma=gammas[j]) + print('best params:', best_params) + print('best error: %.2f %%' % (scores.min()*100)) + return best_params + + def adjust_KNearest(self): + print('adjusting KNearest ...') + def f(k): + samples, labels = self.get_dataset() + err = cross_validate(KNearest, dict(k=k), samples, labels) + return k, err + best_err, best_k = np.inf, -1 + for k, err in self.run_jobs(f, range(1, 9)): + if err < best_err: + best_err, best_k = err, k + print('k = %d, error: %.2f %%' % (k, err*100)) + best_params = dict(k=best_k) + print('best params:', best_params, 'err: %.2f' % (best_err*100)) + return best_params + + +if __name__ == '__main__': + import getopt + import sys + + print(__doc__) + + args, _ = getopt.getopt(sys.argv[1:], '', ['model=']) + args = dict(args) + args.setdefault('--model', 'svm') + args.setdefault('--env', '') + if args['--model'] not in ['svm', 'knearest']: + print('unknown model "%s"' % args['--model']) + sys.exit(1) + + t = clock() + app = App() + if args['--model'] == 'knearest': + app.adjust_KNearest() + else: + app.adjust_SVM() + print('work time: %f s' % (clock() - t)) diff --git a/modules/ml/samples/python/digits_video.py b/modules/ml/samples/python/digits_video.py new file mode 100755 index 00000000000..17f44c333dd --- /dev/null +++ b/modules/ml/samples/python/digits_video.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python +''' +Digit recognition from video. + +Run digits.py before, to train and save the SVM. + +Usage: + digits_video.py [{camera_id|video_file}] +''' + +# Python 2/3 compatibility +from __future__ import print_function + +import numpy as np +import cv2 as cv + +# built-in modules +import os +import sys + +# local modules +import video +from common import mosaic + +from digits import * + +def main(): + try: + src = sys.argv[1] + except: + src = 0 + cap = video.create_capture(src, fallback='synth:bg={}:noise=0.05'.format(cv.samples.findFile('sudoku.png'))) + + classifier_fn = 'digits_svm.dat' + if not os.path.exists(classifier_fn): + print('"%s" not found, run digits.py first' % classifier_fn) + return + + model = cv.ml.SVM_load(classifier_fn) + + while True: + _ret, frame = cap.read() + gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) + + + bin = cv.adaptiveThreshold(gray, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, 31, 10) + bin = cv.medianBlur(bin, 3) + contours, heirs = cv.findContours( bin.copy(), cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE) + try: + heirs = heirs[0] + except: + heirs = [] + + for cnt, heir in zip(contours, heirs): + _, _, _, outer_i = heir + if outer_i >= 0: + continue + x, y, w, h = cv.boundingRect(cnt) + if not (16 <= h <= 64 and w <= 1.2*h): + continue + pad = max(h-w, 0) + x, w = x - (pad // 2), w + pad + cv.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0)) + + bin_roi = bin[y:,x:][:h,:w] + + m = bin_roi != 0 + if not 0.1 < m.mean() < 0.4: + continue + ''' + gray_roi = gray[y:,x:][:h,:w] + v_in, v_out = gray_roi[m], gray_roi[~m] + if v_out.std() > 10.0: + continue + s = "%f, %f" % (abs(v_in.mean() - v_out.mean()), v_out.std()) + cv.putText(frame, s, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (200, 0, 0), thickness = 1) + ''' + + s = 1.5*float(h)/SZ + m = cv.moments(bin_roi) + c1 = np.float32([m['m10'], m['m01']]) / m['m00'] + c0 = np.float32([SZ/2, SZ/2]) + t = c1 - s*c0 + A = np.zeros((2, 3), np.float32) + A[:,:2] = np.eye(2)*s + A[:,2] = t + bin_norm = cv.warpAffine(bin_roi, A, (SZ, SZ), flags=cv.WARP_INVERSE_MAP | cv.INTER_LINEAR) + bin_norm = deskew(bin_norm) + if x+w+SZ < frame.shape[1] and y+SZ < frame.shape[0]: + frame[y:,x+w:][:SZ, :SZ] = bin_norm[...,np.newaxis] + + sample = preprocess_hog([bin_norm]) + digit = model.predict(sample)[1].ravel() + cv.putText(frame, '%d'%digit, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (200, 0, 0), thickness = 1) + + + cv.imshow('frame', frame) + cv.imshow('bin', bin) + ch = cv.waitKey(1) + if ch == 27: + break + + print('Done') + + +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() diff --git a/modules/ml/samples/python/gaussian_mix.py b/modules/ml/samples/python/gaussian_mix.py new file mode 100755 index 00000000000..dd49535ab8f --- /dev/null +++ b/modules/ml/samples/python/gaussian_mix.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python + +import numpy as np +import cv2 as cv + +from numpy import random + +def make_gaussians(cluster_n, img_size): + points = [] + ref_distrs = [] + for _i in range(cluster_n): + mean = (0.1 + 0.8*random.rand(2)) * img_size + a = (random.rand(2, 2)-0.5)*img_size*0.1 + cov = np.dot(a.T, a) + img_size*0.05*np.eye(2) + n = 100 + random.randint(900) + pts = random.multivariate_normal(mean, cov, n) + points.append( pts ) + ref_distrs.append( (mean, cov) ) + points = np.float32( np.vstack(points) ) + return points, ref_distrs + +def draw_gaussain(img, mean, cov, color): + x, y = mean + w, u, _vt = cv.SVDecomp(cov) + ang = np.arctan2(u[1, 0], u[0, 0])*(180/np.pi) + s1, s2 = np.sqrt(w)*3.0 + cv.ellipse(img, (int(x), int(y)), (int(s1), int(s2)), ang, 0, 360, color, 1, cv.LINE_AA) + + +def main(): + cluster_n = 5 + img_size = 512 + + print('press any key to update distributions, ESC - exit\n') + + while True: + print('sampling distributions...') + points, ref_distrs = make_gaussians(cluster_n, img_size) + + print('EM (opencv) ...') + em = cv.ml.EM_create() + em.setClustersNumber(cluster_n) + em.setCovarianceMatrixType(cv.ml.EM_COV_MAT_GENERIC) + em.trainEM(points) + means = em.getMeans() + covs = em.getCovs() # Known bug: https://github.com/opencv/opencv/pull/4232 + found_distrs = zip(means, covs) + print('ready!\n') + + img = np.zeros((img_size, img_size, 3), np.uint8) + for x, y in np.int32(points): + cv.circle(img, (x, y), 1, (255, 255, 255), -1) + for m, cov in ref_distrs: + draw_gaussain(img, m, cov, (0, 255, 0)) + for m, cov in found_distrs: + draw_gaussain(img, m, cov, (0, 0, 255)) + + cv.imshow('gaussian mixture', img) + ch = cv.waitKey(0) + if ch == 27: + break + + print('Done') + + +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() diff --git a/modules/ml/samples/python/letter_recog.py b/modules/ml/samples/python/letter_recog.py new file mode 100755 index 00000000000..f646f178fc3 --- /dev/null +++ b/modules/ml/samples/python/letter_recog.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python + +''' +The sample demonstrates how to train Random Trees classifier +(or Boosting classifier, or MLP, or Knearest, or Support Vector Machines) using the provided dataset. + +We use the sample database letter-recognition.data +from UCI Repository, here is the link: + +Newman, D.J. & Hettich, S. & Blake, C.L. & Merz, C.J. (1998). +UCI Repository of machine learning databases +[http://www.ics.uci.edu/~mlearn/MLRepository.html]. +Irvine, CA: University of California, Department of Information and Computer Science. + +The dataset consists of 20000 feature vectors along with the +responses - capital latin letters A..Z. +The first 10000 samples are used for training +and the remaining 10000 - to test the classifier. +====================================================== +USAGE: + letter_recog.py [--model ] + [--data ] + [--load ] [--save ] + + Models: RTrees, KNearest, Boost, SVM, MLP +''' + +# Python 2/3 compatibility +from __future__ import print_function + +import numpy as np +import cv2 as cv + +def load_base(fn): + a = np.loadtxt(fn, np.float32, delimiter=',', converters={ 0 : lambda ch : ord(ch)-ord('A') }) + samples, responses = a[:,1:], a[:,0] + return samples, responses + +class LetterStatModel(object): + class_n = 26 + train_ratio = 0.5 + + def load(self, fn): + self.model = self.model.load(fn) + def save(self, fn): + self.model.save(fn) + + def unroll_samples(self, samples): + sample_n, var_n = samples.shape + new_samples = np.zeros((sample_n * self.class_n, var_n+1), np.float32) + new_samples[:,:-1] = np.repeat(samples, self.class_n, axis=0) + new_samples[:,-1] = np.tile(np.arange(self.class_n), sample_n) + return new_samples + + def unroll_responses(self, responses): + sample_n = len(responses) + new_responses = np.zeros(sample_n*self.class_n, np.int32) + resp_idx = np.int32( responses + np.arange(sample_n)*self.class_n ) + new_responses[resp_idx] = 1 + return new_responses + +class RTrees(LetterStatModel): + def __init__(self): + self.model = cv.ml.RTrees_create() + + def train(self, samples, responses): + self.model.setMaxDepth(20) + self.model.train(samples, cv.ml.ROW_SAMPLE, responses.astype(int)) + + def predict(self, samples): + _ret, resp = self.model.predict(samples) + return resp.ravel() + + +class KNearest(LetterStatModel): + def __init__(self): + self.model = cv.ml.KNearest_create() + + def train(self, samples, responses): + self.model.train(samples, cv.ml.ROW_SAMPLE, responses) + + def predict(self, samples): + _retval, results, _neigh_resp, _dists = self.model.findNearest(samples, k = 10) + return results.ravel() + + +class Boost(LetterStatModel): + def __init__(self): + self.model = cv.ml.Boost_create() + + def train(self, samples, responses): + _sample_n, var_n = samples.shape + new_samples = self.unroll_samples(samples) + new_responses = self.unroll_responses(responses) + var_types = np.array([cv.ml.VAR_NUMERICAL] * var_n + [cv.ml.VAR_CATEGORICAL, cv.ml.VAR_CATEGORICAL], np.uint8) + + self.model.setWeakCount(15) + self.model.setMaxDepth(10) + self.model.train(cv.ml.TrainData_create(new_samples, cv.ml.ROW_SAMPLE, new_responses.astype(int), varType = var_types)) + + def predict(self, samples): + new_samples = self.unroll_samples(samples) + _ret, resp = self.model.predict(new_samples) + + return resp.ravel().reshape(-1, self.class_n).argmax(1) + + +class SVM(LetterStatModel): + def __init__(self): + self.model = cv.ml.SVM_create() + + def train(self, samples, responses): + self.model.setType(cv.ml.SVM_C_SVC) + self.model.setC(1) + self.model.setKernel(cv.ml.SVM_RBF) + self.model.setGamma(.1) + self.model.train(samples, cv.ml.ROW_SAMPLE, responses.astype(int)) + + def predict(self, samples): + _ret, resp = self.model.predict(samples) + return resp.ravel() + + +class MLP(LetterStatModel): + def __init__(self): + self.model = cv.ml.ANN_MLP_create() + + def train(self, samples, responses): + _sample_n, var_n = samples.shape + new_responses = self.unroll_responses(responses).reshape(-1, self.class_n) + layer_sizes = np.int32([var_n, 100, 100, self.class_n]) + + self.model.setLayerSizes(layer_sizes) + self.model.setTrainMethod(cv.ml.ANN_MLP_BACKPROP) + self.model.setBackpropMomentumScale(0.0) + self.model.setBackpropWeightScale(0.001) + self.model.setTermCriteria((cv.TERM_CRITERIA_COUNT, 20, 0.01)) + self.model.setActivationFunction(cv.ml.ANN_MLP_SIGMOID_SYM, 2, 1) + + self.model.train(samples, cv.ml.ROW_SAMPLE, np.float32(new_responses)) + + def predict(self, samples): + _ret, resp = self.model.predict(samples) + return resp.argmax(-1) + + + +def main(): + import getopt + import sys + + models = [RTrees, KNearest, Boost, SVM, MLP] # NBayes + models = dict( [(cls.__name__.lower(), cls) for cls in models] ) + + + args, dummy = getopt.getopt(sys.argv[1:], '', ['model=', 'data=', 'load=', 'save=']) + args = dict(args) + args.setdefault('--model', 'svm') + args.setdefault('--data', 'letter-recognition.data') + + datafile = cv.samples.findFile(args['--data']) + + print('loading data %s ...' % datafile) + samples, responses = load_base(datafile) + Model = models[args['--model']] + model = Model() + + train_n = int(len(samples)*model.train_ratio) + if '--load' in args: + fn = args['--load'] + print('loading model from %s ...' % fn) + model.load(fn) + else: + print('training %s ...' % Model.__name__) + model.train(samples[:train_n], responses[:train_n]) + + print('testing...') + train_rate = np.mean(model.predict(samples[:train_n]) == responses[:train_n].astype(int)) + test_rate = np.mean(model.predict(samples[train_n:]) == responses[train_n:].astype(int)) + + print('train rate: %f test rate: %f' % (train_rate*100, test_rate*100)) + + if '--save' in args: + fn = args['--save'] + print('saving model to %s ...' % fn) + model.save(fn) + + print('Done') + + +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() diff --git a/modules/ml/samples/python/tst_scene_render.py b/modules/ml/samples/python/tst_scene_render.py new file mode 100644 index 00000000000..c3eb69ef9ca --- /dev/null +++ b/modules/ml/samples/python/tst_scene_render.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python + + +# Python 2/3 compatibility +from __future__ import print_function + +import numpy as np +import cv2 as cv + +from numpy import pi, sin, cos + + +defaultSize = 512 + +class TestSceneRender(): + + def __init__(self, bgImg = None, fgImg = None, + deformation = False, speed = 0.25, **params): + self.time = 0.0 + self.timeStep = 1.0 / 30.0 + self.foreground = fgImg + self.deformation = deformation + self.speed = speed + + if bgImg is not None: + self.sceneBg = bgImg.copy() + else: + self.sceneBg = np.zeros((defaultSize, defaultSize,3), np.uint8) + + self.w = self.sceneBg.shape[0] + self.h = self.sceneBg.shape[1] + + if fgImg is not None: + self.foreground = fgImg.copy() + self.center = self.currentCenter = (int(self.w/2 - fgImg.shape[0]/2), int(self.h/2 - fgImg.shape[1]/2)) + + self.xAmpl = self.sceneBg.shape[0] - (self.center[0] + fgImg.shape[0]) + self.yAmpl = self.sceneBg.shape[1] - (self.center[1] + fgImg.shape[1]) + + self.initialRect = np.array([ (self.h/2, self.w/2), (self.h/2, self.w/2 + self.w/10), + (self.h/2 + self.h/10, self.w/2 + self.w/10), (self.h/2 + self.h/10, self.w/2)]).astype(int) + self.currentRect = self.initialRect + + def getXOffset(self, time): + return int( self.xAmpl*cos(time*self.speed)) + + + def getYOffset(self, time): + return int(self.yAmpl*sin(time*self.speed)) + + def setInitialRect(self, rect): + self.initialRect = rect + + def getRectInTime(self, time): + + if self.foreground is not None: + tmp = np.array(self.center) + np.array((self.getXOffset(time), self.getYOffset(time))) + x0, y0 = tmp + x1, y1 = tmp + self.foreground.shape[0:2] + return np.array([y0, x0, y1, x1]) + else: + x0, y0 = self.initialRect[0] + np.array((self.getXOffset(time), self.getYOffset(time))) + x1, y1 = self.initialRect[2] + np.array((self.getXOffset(time), self.getYOffset(time))) + return np.array([y0, x0, y1, x1]) + + def getCurrentRect(self): + + if self.foreground is not None: + + x0 = self.currentCenter[0] + y0 = self.currentCenter[1] + x1 = self.currentCenter[0] + self.foreground.shape[0] + y1 = self.currentCenter[1] + self.foreground.shape[1] + return np.array([y0, x0, y1, x1]) + else: + x0, y0 = self.currentRect[0] + x1, y1 = self.currentRect[2] + return np.array([x0, y0, x1, y1]) + + def getNextFrame(self): + img = self.sceneBg.copy() + + if self.foreground is not None: + self.currentCenter = (self.center[0] + self.getXOffset(self.time), self.center[1] + self.getYOffset(self.time)) + img[self.currentCenter[0]:self.currentCenter[0]+self.foreground.shape[0], + self.currentCenter[1]:self.currentCenter[1]+self.foreground.shape[1]] = self.foreground + else: + self.currentRect = self.initialRect + int( 30*cos(self.time*self.speed) + 50*sin(self.time*self.speed)) + if self.deformation: + self.currentRect[1:3] += int(self.h/20*cos(self.time)) + cv.fillConvexPoly(img, self.currentRect, (0, 0, 255)) + + self.time += self.timeStep + return img + + def resetTime(self): + self.time = 0.0 + + +def main(): + backGr = cv.imread(cv.samples.findFile('graf1.png')) + fgr = cv.imread(cv.samples.findFile('box.png')) + + render = TestSceneRender(backGr, fgr) + + while True: + + img = render.getNextFrame() + cv.imshow('img', img) + + ch = cv.waitKey(3) + if ch == 27: + break + + print('Done') + + +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() diff --git a/modules/ml/samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py b/modules/ml/samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py new file mode 100644 index 00000000000..eeb246bc389 --- /dev/null +++ b/modules/ml/samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py @@ -0,0 +1,62 @@ +import cv2 as cv +import numpy as np + +# Set up training data +## [setup1] +labels = np.array([1, -1, -1, -1]) +trainingData = np.matrix([[501, 10], [255, 10], [501, 255], [10, 501]], dtype=np.float32) +## [setup1] + +# Train the SVM +## [init] +svm = cv.ml.SVM_create() +svm.setType(cv.ml.SVM_C_SVC) +svm.setKernel(cv.ml.SVM_LINEAR) +svm.setTermCriteria((cv.TERM_CRITERIA_MAX_ITER, 100, 1e-6)) +## [init] +## [train] +svm.train(trainingData, cv.ml.ROW_SAMPLE, labels) +## [train] + +# Data for visual representation +width = 512 +height = 512 +image = np.zeros((height, width, 3), dtype=np.uint8) + +# Show the decision regions given by the SVM +## [show] +green = (0,255,0) +blue = (255,0,0) +for i in range(image.shape[0]): + for j in range(image.shape[1]): + sampleMat = np.matrix([[j,i]], dtype=np.float32) + response = svm.predict(sampleMat)[1] + + if response == 1: + image[i,j] = green + elif response == -1: + image[i,j] = blue +## [show] + +# Show the training data +## [show_data] +thickness = -1 +cv.circle(image, (501, 10), 5, ( 0, 0, 0), thickness) +cv.circle(image, (255, 10), 5, (255, 255, 255), thickness) +cv.circle(image, (501, 255), 5, (255, 255, 255), thickness) +cv.circle(image, ( 10, 501), 5, (255, 255, 255), thickness) +## [show_data] + +# Show support vectors +## [show_vectors] +thickness = 2 +sv = svm.getUncompressedSupportVectors() + +for i in range(sv.shape[0]): + cv.circle(image, (int(sv[i,0]), int(sv[i,1])), 6, (128, 128, 128), thickness) +## [show_vectors] + +cv.imwrite('result.png', image) # save the image + +cv.imshow('SVM Simple Example', image) # show it to the user +cv.waitKey() diff --git a/modules/ml/samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py b/modules/ml/samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py new file mode 100644 index 00000000000..a88ac4bd1b1 --- /dev/null +++ b/modules/ml/samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py @@ -0,0 +1,117 @@ +from __future__ import print_function +import cv2 as cv +import numpy as np +import random as rng + +NTRAINING_SAMPLES = 100 # Number of training samples per class +FRAC_LINEAR_SEP = 0.9 # Fraction of samples which compose the linear separable part + +# Data for visual representation +WIDTH = 512 +HEIGHT = 512 +I = np.zeros((HEIGHT, WIDTH, 3), dtype=np.uint8) + +# --------------------- 1. Set up training data randomly --------------------------------------- +trainData = np.empty((2*NTRAINING_SAMPLES, 2), dtype=np.float32) +labels = np.empty((2*NTRAINING_SAMPLES, 1), dtype=np.int32) + +rng.seed(100) # Random value generation class + +# Set up the linearly separable part of the training data +nLinearSamples = int(FRAC_LINEAR_SEP * NTRAINING_SAMPLES) + +## [setup1] +# Generate random points for the class 1 +trainClass = trainData[0:nLinearSamples,:] +# The x coordinate of the points is in [0, 0.4) +c = trainClass[:,0:1] +c[:] = np.random.uniform(0.0, 0.4 * WIDTH, c.shape) +# The y coordinate of the points is in [0, 1) +c = trainClass[:,1:2] +c[:] = np.random.uniform(0.0, HEIGHT, c.shape) + +# Generate random points for the class 2 +trainClass = trainData[2*NTRAINING_SAMPLES-nLinearSamples:2*NTRAINING_SAMPLES,:] +# The x coordinate of the points is in [0.6, 1] +c = trainClass[:,0:1] +c[:] = np.random.uniform(0.6*WIDTH, WIDTH, c.shape) +# The y coordinate of the points is in [0, 1) +c = trainClass[:,1:2] +c[:] = np.random.uniform(0.0, HEIGHT, c.shape) +## [setup1] + +#------------------ Set up the non-linearly separable part of the training data --------------- +## [setup2] +# Generate random points for the classes 1 and 2 +trainClass = trainData[nLinearSamples:2*NTRAINING_SAMPLES-nLinearSamples,:] +# The x coordinate of the points is in [0.4, 0.6) +c = trainClass[:,0:1] +c[:] = np.random.uniform(0.4*WIDTH, 0.6*WIDTH, c.shape) +# The y coordinate of the points is in [0, 1) +c = trainClass[:,1:2] +c[:] = np.random.uniform(0.0, HEIGHT, c.shape) +## [setup2] + +#------------------------- Set up the labels for the classes --------------------------------- +labels[0:NTRAINING_SAMPLES,:] = 1 # Class 1 +labels[NTRAINING_SAMPLES:2*NTRAINING_SAMPLES,:] = 2 # Class 2 + +#------------------------ 2. Set up the support vector machines parameters -------------------- +print('Starting training process') +## [init] +svm = cv.ml.SVM_create() +svm.setType(cv.ml.SVM_C_SVC) +svm.setC(0.1) +svm.setKernel(cv.ml.SVM_LINEAR) +svm.setTermCriteria((cv.TERM_CRITERIA_MAX_ITER, int(1e7), 1e-6)) +## [init] + +#------------------------ 3. Train the svm ---------------------------------------------------- +## [train] +svm.train(trainData, cv.ml.ROW_SAMPLE, labels) +## [train] +print('Finished training process') + +#------------------------ 4. Show the decision regions ---------------------------------------- +## [show] +green = (0,100,0) +blue = (100,0,0) +for i in range(I.shape[0]): + for j in range(I.shape[1]): + sampleMat = np.matrix([[j,i]], dtype=np.float32) + response = svm.predict(sampleMat)[1] + + if response == 1: + I[i,j] = green + elif response == 2: + I[i,j] = blue +## [show] + +#----------------------- 5. Show the training data -------------------------------------------- +## [show_data] +thick = -1 +# Class 1 +for i in range(NTRAINING_SAMPLES): + px = trainData[i,0] + py = trainData[i,1] + cv.circle(I, (int(px), int(py)), 3, (0, 255, 0), thick) + +# Class 2 +for i in range(NTRAINING_SAMPLES, 2*NTRAINING_SAMPLES): + px = trainData[i,0] + py = trainData[i,1] + cv.circle(I, (int(px), int(py)), 3, (255, 0, 0), thick) +## [show_data] + +#------------------------- 6. Show support vectors -------------------------------------------- +## [show_vectors] +thick = 2 +sv = svm.getUncompressedSupportVectors() + +for i in range(sv.shape[0]): + cv.circle(I, (int(sv[i,0]), int(sv[i,1])), 6, (128, 128, 128), thick) +## [show_vectors] + +cv.imwrite('result.png', I) # save the Image +cv.imshow('SVM for Non-Linear Training Data', I) # show it to the user +cv.waitKey() diff --git a/modules/ml/samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py b/modules/ml/samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py new file mode 100755 index 00000000000..898c7dc4d7c --- /dev/null +++ b/modules/ml/samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python + +import cv2 as cv +import numpy as np + +SZ=20 +bin_n = 16 # Number of bins + + +affine_flags = cv.WARP_INVERSE_MAP|cv.INTER_LINEAR + +## [deskew] +def deskew(img): + m = cv.moments(img) + if abs(m['mu02']) < 1e-2: + return img.copy() + skew = m['mu11']/m['mu02'] + M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]]) + img = cv.warpAffine(img,M,(SZ, SZ),flags=affine_flags) + return img +## [deskew] + +## [hog] +def hog(img): + gx = cv.Sobel(img, cv.CV_32F, 1, 0) + gy = cv.Sobel(img, cv.CV_32F, 0, 1) + mag, ang = cv.cartToPolar(gx, gy) + bins = np.int32(bin_n*ang/(2*np.pi)) # quantizing binvalues in (0...16) + bin_cells = bins[:10,:10], bins[10:,:10], bins[:10,10:], bins[10:,10:] + mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:] + hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)] + hist = np.hstack(hists) # hist is a 64 bit vector + return hist +## [hog] + +img = cv.imread(cv.samples.findFile('digits.png'),0) +if img is None: + raise Exception("we need the digits.png image from samples/data here !") + + +cells = [np.hsplit(row,100) for row in np.vsplit(img,50)] + +# First half is trainData, remaining is testData +train_cells = [ i[:50] for i in cells ] +test_cells = [ i[50:] for i in cells] + +###### Now training ######################## + +deskewed = [list(map(deskew,row)) for row in train_cells] +hogdata = [list(map(hog,row)) for row in deskewed] +trainData = np.float32(hogdata).reshape(-1,64) +responses = np.repeat(np.arange(10),250)[:,np.newaxis] + +svm = cv.ml.SVM_create() +svm.setKernel(cv.ml.SVM_LINEAR) +svm.setType(cv.ml.SVM_C_SVC) +svm.setC(2.67) +svm.setGamma(5.383) + +svm.train(trainData, cv.ml.ROW_SAMPLE, responses) +svm.save('svm_data.dat') + +###### Now testing ######################## + +deskewed = [list(map(deskew,row)) for row in test_cells] +hogdata = [list(map(hog,row)) for row in deskewed] +testData = np.float32(hogdata).reshape(-1,bin_n*4) +result = svm.predict(testData)[1] + +####### Check Accuracy ######################## +mask = result==responses +correct = np.count_nonzero(mask) +print(correct*100.0/result.size) diff --git a/modules/ml/samples/python/video.py b/modules/ml/samples/python/video.py new file mode 100755 index 00000000000..682026837c7 --- /dev/null +++ b/modules/ml/samples/python/video.py @@ -0,0 +1,228 @@ +#!/usr/bin/env python + +''' +Video capture sample. + +Sample shows how VideoCapture class can be used to acquire video +frames from a camera of a movie file. Also the sample provides +an example of procedural video generation by an object, mimicking +the VideoCapture interface (see Chess class). + +'create_capture' is a convenience function for capture creation, +falling back to procedural video in case of error. + +Usage: + video.py [--shotdir ] [source0] [source1] ...' + + sourceN is an + - integer number for camera capture + - name of video file + - synth: for procedural video + +Synth examples: + synth:bg=lena.jpg:noise=0.1 + synth:class=chess:bg=lena.jpg:noise=0.1:size=640x480 + +Keys: + ESC - exit + SPACE - save current frame to directory + +''' + +# Python 2/3 compatibility +from __future__ import print_function + +import numpy as np +import cv2 as cv + +import re + +from numpy import pi, sin, cos + +# local modules +from tst_scene_render import TestSceneRender +import common + +class VideoSynthBase(object): + def __init__(self, size=None, noise=0.0, bg = None, **params): + self.bg = None + self.frame_size = (640, 480) + if bg is not None: + self.bg = cv.imread(cv.samples.findFile(bg)) + h, w = self.bg.shape[:2] + self.frame_size = (w, h) + + if size is not None: + w, h = map(int, size.split('x')) + self.frame_size = (w, h) + self.bg = cv.resize(self.bg, self.frame_size) + + self.noise = float(noise) + + def render(self, dst): + pass + + def read(self, dst=None): + w, h = self.frame_size + + if self.bg is None: + buf = np.zeros((h, w, 3), np.uint8) + else: + buf = self.bg.copy() + + self.render(buf) + + if self.noise > 0.0: + noise = np.zeros((h, w, 3), np.int8) + cv.randn(noise, np.zeros(3), np.ones(3)*255*self.noise) + buf = cv.add(buf, noise, dtype=cv.CV_8UC3) + return True, buf + + def isOpened(self): + return True + +class Book(VideoSynthBase): + def __init__(self, **kw): + super(Book, self).__init__(**kw) + backGr = cv.imread(cv.samples.findFile('graf1.png')) + fgr = cv.imread(cv.samples.findFile('box.png')) + self.render = TestSceneRender(backGr, fgr, speed = 1) + + def read(self, dst=None): + noise = np.zeros(self.render.sceneBg.shape, np.int8) + cv.randn(noise, np.zeros(3), np.ones(3)*255*self.noise) + + return True, cv.add(self.render.getNextFrame(), noise, dtype=cv.CV_8UC3) + +class Cube(VideoSynthBase): + def __init__(self, **kw): + super(Cube, self).__init__(**kw) + self.render = TestSceneRender(cv.imread(cv.samples.findFile('pca_test1.jpg')), deformation = True, speed = 1) + + def read(self, dst=None): + noise = np.zeros(self.render.sceneBg.shape, np.int8) + cv.randn(noise, np.zeros(3), np.ones(3)*255*self.noise) + + return True, cv.add(self.render.getNextFrame(), noise, dtype=cv.CV_8UC3) + +class Chess(VideoSynthBase): + def __init__(self, **kw): + super(Chess, self).__init__(**kw) + + w, h = self.frame_size + + self.grid_size = sx, sy = 10, 7 + white_quads = [] + black_quads = [] + for i, j in np.ndindex(sy, sx): + q = [[j, i, 0], [j+1, i, 0], [j+1, i+1, 0], [j, i+1, 0]] + [white_quads, black_quads][(i + j) % 2].append(q) + self.white_quads = np.float32(white_quads) + self.black_quads = np.float32(black_quads) + + fx = 0.9 + self.K = np.float64([[fx*w, 0, 0.5*(w-1)], + [0, fx*w, 0.5*(h-1)], + [0.0,0.0, 1.0]]) + + self.dist_coef = np.float64([-0.2, 0.1, 0, 0]) + self.t = 0 + + def draw_quads(self, img, quads, color = (0, 255, 0)): + img_quads = cv.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0] + img_quads.shape = quads.shape[:2] + (2,) + for q in img_quads: + cv.fillConvexPoly(img, np.int32(q*4), color, cv.LINE_AA, shift=2) + + def render(self, dst): + t = self.t + self.t += 1.0/30.0 + + sx, sy = self.grid_size + center = np.array([0.5*sx, 0.5*sy, 0.0]) + phi = pi/3 + sin(t*3)*pi/8 + c, s = cos(phi), sin(phi) + ofs = np.array([sin(1.2*t), cos(1.8*t), 0]) * sx * 0.2 + eye_pos = center + np.array([cos(t)*c, sin(t)*c, s]) * 15.0 + ofs + target_pos = center + ofs + + R, self.tvec = common.lookat(eye_pos, target_pos) + self.rvec = common.mtx2rvec(R) + + self.draw_quads(dst, self.white_quads, (245, 245, 245)) + self.draw_quads(dst, self.black_quads, (10, 10, 10)) + + +classes = dict(chess=Chess, book=Book, cube=Cube) + +presets = dict( + empty = 'synth:', + lena = 'synth:bg=lena.jpg:noise=0.1', + chess = 'synth:class=chess:bg=lena.jpg:noise=0.1:size=640x480', + book = 'synth:class=book:bg=graf1.png:noise=0.1:size=640x480', + cube = 'synth:class=cube:bg=pca_test1.jpg:noise=0.0:size=640x480' +) + + +def create_capture(source = 0, fallback = presets['chess']): + '''source: or '||synth [:= [:...]]' + ''' + source = str(source).strip() + + # Win32: handle drive letter ('c:', ...) + source = re.sub(r'(^|=)([a-zA-Z]):([/\\a-zA-Z0-9])', r'\1?disk\2?\3', source) + chunks = source.split(':') + chunks = [re.sub(r'\?disk([a-zA-Z])\?', r'\1:', s) for s in chunks] + + source = chunks[0] + try: source = int(source) + except ValueError: pass + params = dict( s.split('=') for s in chunks[1:] ) + + cap = None + if source == 'synth': + Class = classes.get(params.get('class', None), VideoSynthBase) + try: cap = Class(**params) + except: pass + else: + cap = cv.VideoCapture(source) + if 'size' in params: + w, h = map(int, params['size'].split('x')) + cap.set(cv.CAP_PROP_FRAME_WIDTH, w) + cap.set(cv.CAP_PROP_FRAME_HEIGHT, h) + if cap is None or not cap.isOpened(): + print('Warning: unable to open video source: ', source) + if fallback is not None: + return create_capture(fallback, None) + return cap + +if __name__ == '__main__': + import sys + import getopt + + print(__doc__) + + args, sources = getopt.getopt(sys.argv[1:], '', 'shotdir=') + args = dict(args) + shotdir = args.get('--shotdir', '.') + if len(sources) == 0: + sources = [ 0 ] + + caps = list(map(create_capture, sources)) + shot_idx = 0 + while True: + imgs = [] + for i, cap in enumerate(caps): + ret, img = cap.read() + imgs.append(img) + cv.imshow('capture %d' % i, img) + ch = cv.waitKey(1) + if ch == 27: + break + if ch == ord(' '): + for i, img in enumerate(imgs): + fn = '%s/shot_%d_%03d.bmp' % (shotdir, i, shot_idx) + cv.imwrite(fn, img) + print(fn, 'saved') + shot_idx += 1 + cv.destroyAllWindows() diff --git a/modules/ml/samples/train_svmsgd.cpp b/modules/ml/samples/train_svmsgd.cpp new file mode 100644 index 00000000000..12e03840810 --- /dev/null +++ b/modules/ml/samples/train_svmsgd.cpp @@ -0,0 +1,211 @@ +#include "opencv2/core.hpp" +#include "opencv2/video/tracking.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/highgui.hpp" +#include "opencv2/ml.hpp" + +using namespace cv; +using namespace cv::ml; + + +struct Data +{ + Mat img; + Mat samples; //Set of train samples. Contains points on image + Mat responses; //Set of responses for train samples + + Data() + { + const int WIDTH = 841; + const int HEIGHT = 594; + img = Mat::zeros(HEIGHT, WIDTH, CV_8UC3); + imshow("Train svmsgd", img); + } +}; + +//Train with SVMSGD algorithm +//(samples, responses) is a train set +//weights is a required vector for decision function of SVMSGD algorithm +bool doTrain(const Mat samples, const Mat responses, Mat &weights, float &shift); + +//function finds two points for drawing line (wx = 0) +bool findPointsForLine(const Mat &weights, float shift, Point points[2], int width, int height); + +// function finds cross point of line (wx = 0) and segment ( (y = HEIGHT, 0 <= x <= WIDTH) or (x = WIDTH, 0 <= y <= HEIGHT) ) +bool findCrossPointWithBorders(const Mat &weights, float shift, const std::pair &segment, Point &crossPoint); + +//segments' initialization ( (y = HEIGHT, 0 <= x <= WIDTH) and (x = WIDTH, 0 <= y <= HEIGHT) ) +void fillSegments(std::vector > &segments, int width, int height); + +//redraw points' set and line (wx = 0) +void redraw(Data data, const Point points[2]); + +//add point in train set, train SVMSGD algorithm and draw results on image +void addPointRetrainAndRedraw(Data &data, int x, int y, int response); + + +bool doTrain( const Mat samples, const Mat responses, Mat &weights, float &shift) +{ + cv::Ptr svmsgd = SVMSGD::create(); + + cv::Ptr trainData = TrainData::create(samples, cv::ml::ROW_SAMPLE, responses); + svmsgd->train( trainData ); + + if (svmsgd->isTrained()) + { + weights = svmsgd->getWeights(); + shift = svmsgd->getShift(); + + return true; + } + return false; +} + +void fillSegments(std::vector > &segments, int width, int height) +{ + std::pair currentSegment; + + currentSegment.first = Point(width, 0); + currentSegment.second = Point(width, height); + segments.push_back(currentSegment); + + currentSegment.first = Point(0, height); + currentSegment.second = Point(width, height); + segments.push_back(currentSegment); + + currentSegment.first = Point(0, 0); + currentSegment.second = Point(width, 0); + segments.push_back(currentSegment); + + currentSegment.first = Point(0, 0); + currentSegment.second = Point(0, height); + segments.push_back(currentSegment); +} + + +bool findCrossPointWithBorders(const Mat &weights, float shift, const std::pair &segment, Point &crossPoint) +{ + int x = 0; + int y = 0; + int xMin = std::min(segment.first.x, segment.second.x); + int xMax = std::max(segment.first.x, segment.second.x); + int yMin = std::min(segment.first.y, segment.second.y); + int yMax = std::max(segment.first.y, segment.second.y); + + CV_Assert(weights.type() == CV_32FC1); + CV_Assert(xMin == xMax || yMin == yMax); + + if (xMin == xMax && weights.at(1) != 0) + { + x = xMin; + y = static_cast(std::floor( - (weights.at(0) * x + shift) / weights.at(1))); + if (y >= yMin && y <= yMax) + { + crossPoint.x = x; + crossPoint.y = y; + return true; + } + } + else if (yMin == yMax && weights.at(0) != 0) + { + y = yMin; + x = static_cast(std::floor( - (weights.at(1) * y + shift) / weights.at(0))); + if (x >= xMin && x <= xMax) + { + crossPoint.x = x; + crossPoint.y = y; + return true; + } + } + return false; +} + +bool findPointsForLine(const Mat &weights, float shift, Point points[2], int width, int height) +{ + if (weights.empty()) + { + return false; + } + + int foundPointsCount = 0; + std::vector > segments; + fillSegments(segments, width, height); + + for (uint i = 0; i < segments.size(); i++) + { + if (findCrossPointWithBorders(weights, shift, segments[i], points[foundPointsCount])) + foundPointsCount++; + if (foundPointsCount >= 2) + break; + } + + return true; +} + +void redraw(Data data, const Point points[2]) +{ + data.img.setTo(0); + Point center; + int radius = 3; + Scalar color; + CV_Assert((data.samples.type() == CV_32FC1) && (data.responses.type() == CV_32FC1)); + for (int i = 0; i < data.samples.rows; i++) + { + center.x = static_cast(data.samples.at(i,0)); + center.y = static_cast(data.samples.at(i,1)); + color = (data.responses.at(i) > 0) ? Scalar(128,128,0) : Scalar(0,128,128); + circle(data.img, center, radius, color, 5); + } + line(data.img, points[0], points[1],cv::Scalar(1,255,1)); + + imshow("Train svmsgd", data.img); +} + +void addPointRetrainAndRedraw(Data &data, int x, int y, int response) +{ + Mat currentSample(1, 2, CV_32FC1); + + currentSample.at(0,0) = (float)x; + currentSample.at(0,1) = (float)y; + data.samples.push_back(currentSample); + data.responses.push_back(static_cast(response)); + + Mat weights(1, 2, CV_32FC1); + float shift = 0; + + if (doTrain(data.samples, data.responses, weights, shift)) + { + Point points[2]; + findPointsForLine(weights, shift, points, data.img.cols, data.img.rows); + + redraw(data, points); + } +} + + +static void onMouse( int event, int x, int y, int, void* pData) +{ + Data &data = *(Data*)pData; + + switch( event ) + { + case EVENT_LBUTTONUP: + addPointRetrainAndRedraw(data, x, y, 1); + break; + + case EVENT_RBUTTONDOWN: + addPointRetrainAndRedraw(data, x, y, -1); + break; + } + +} + +int main() +{ + Data data; + + setMouseCallback( "Train svmsgd", onMouse, &data ); + waitKey(); + + return 0; +} diff --git a/modules/ml/samples/travelsalesman.cpp b/modules/ml/samples/travelsalesman.cpp new file mode 100644 index 00000000000..256ff55b432 --- /dev/null +++ b/modules/ml/samples/travelsalesman.cpp @@ -0,0 +1,109 @@ +#include +#include +#include +#include + +using namespace cv; + +class TravelSalesman +{ +private : + const std::vector& posCity; + std::vector& next; + RNG rng; + int d0,d1,d2,d3; + +public: + TravelSalesman(std::vector &p, std::vector &n) : + posCity(p), next(n) + { + rng = theRNG(); + } + /** Give energy value for a state of system.*/ + double energy() const; + /** Function which change the state of system (random perturbation).*/ + void changeState(); + /** Function to reverse to the previous state.*/ + void reverseState(); + +}; + +void TravelSalesman::changeState() +{ + d0 = rng.uniform(0,static_cast(posCity.size())); + d1 = next[d0]; + d2 = next[d1]; + d3 = next[d2]; + + next[d0] = d2; + next[d2] = d1; + next[d1] = d3; +} + + +void TravelSalesman::reverseState() +{ + next[d0] = d1; + next[d1] = d2; + next[d2] = d3; +} + +double TravelSalesman::energy() const +{ + double e = 0; + for (size_t i = 0; i < next.size(); i++) + { + e += norm(posCity[i]-posCity[next[i]]); + } + return e; +} + + +static void DrawTravelMap(Mat &img, std::vector &p, std::vector &n) +{ + for (size_t i = 0; i < n.size(); i++) + { + circle(img,p[i],5,Scalar(0,0,255),2); + line(img,p[i],p[n[i]],Scalar(0,255,0),2); + } +} +int main(void) +{ + int nbCity=40; + Mat img(500,500,CV_8UC3,Scalar::all(0)); + RNG rng(123456); + int radius=static_cast(img.cols*0.45); + Point center(img.cols/2,img.rows/2); + + std::vector posCity(nbCity); + std::vector next(nbCity); + for (size_t i = 0; i < posCity.size(); i++) + { + double theta = rng.uniform(0., 2 * CV_PI); + posCity[i].x = static_cast(radius*cos(theta)) + center.x; + posCity[i].y = static_cast(radius*sin(theta)) + center.y; + next[i]=(i+1)%nbCity; + } + TravelSalesman ts_system(posCity, next); + + DrawTravelMap(img,posCity,next); + imshow("Map",img); + waitKey(10); + double currentTemperature = 100.0; + for (int i = 0, zeroChanges = 0; zeroChanges < 10; i++) + { + int changesApplied = ml::simulatedAnnealingSolver(ts_system, currentTemperature, currentTemperature*0.97, 0.99, 10000*nbCity, ¤tTemperature, rng); + img.setTo(Scalar::all(0)); + DrawTravelMap(img, posCity, next); + imshow("Map", img); + int k = waitKey(10); + std::cout << "i=" << i << " changesApplied=" << changesApplied << " temp=" << currentTemperature << " result=" << ts_system.energy() << std::endl; + if (k == 27 || k == 'q' || k == 'Q') + return 0; + if (changesApplied == 0) + zeroChanges++; + } + std::cout << "Done" << std::endl; + waitKey(0); + return 0; +} diff --git a/modules/ml/samples/tree_engine.cpp b/modules/ml/samples/tree_engine.cpp new file mode 100644 index 00000000000..956deb8f78f --- /dev/null +++ b/modules/ml/samples/tree_engine.cpp @@ -0,0 +1,116 @@ +#include "opencv2/ml.hpp" +#include "opencv2/core.hpp" +#include "opencv2/core/utility.hpp" +#include +#include +#include + +using namespace cv; +using namespace cv::ml; + +static void help(char** argv) +{ + printf( + "\nThis sample demonstrates how to use different decision trees and forests including boosting and random trees.\n" + "Usage:\n\t%s [-r=] [-ts=type_spec] \n" + "where -r= specified the 0-based index of the response (0 by default)\n" + "-ts= specifies the var type spec in the form ord[n1,n2-n3,n4-n5,...]cat[m1-m2,m3,m4-m5,...]\n" + " is the name of training data file in comma-separated value format\n\n", argv[0]); +} + +static void train_and_print_errs(Ptr model, const Ptr& data) +{ + bool ok = model->train(data); + if( !ok ) + { + printf("Training failed\n"); + } + else + { + printf( "train error: %f\n", model->calcError(data, false, noArray()) ); + printf( "test error: %f\n\n", model->calcError(data, true, noArray()) ); + } +} + +int main(int argc, char** argv) +{ + cv::CommandLineParser parser(argc, argv, "{ help h | | }{r | 0 | }{ts | | }{@input | | }"); + if (parser.has("help")) + { + help(argv); + return 0; + } + std::string filename = parser.get("@input"); + int response_idx; + std::string typespec; + response_idx = parser.get("r"); + typespec = parser.get("ts"); + if( filename.empty() || !parser.check() ) + { + parser.printErrors(); + help(argv); + return 0; + } + printf("\nReading in %s...\n\n",filename.c_str()); + const double train_test_split_ratio = 0.5; + + Ptr data = TrainData::loadFromCSV(filename, 0, response_idx, response_idx+1, typespec); + if( data.empty() ) + { + printf("ERROR: File %s can not be read\n", filename.c_str()); + return 0; + } + + data->setTrainTestSplitRatio(train_test_split_ratio); + std::cout << "Test/Train: " << data->getNTestSamples() << "/" << data->getNTrainSamples(); + + printf("======DTREE=====\n"); + Ptr dtree = DTrees::create(); + dtree->setMaxDepth(10); + dtree->setMinSampleCount(2); + dtree->setRegressionAccuracy(0); + dtree->setUseSurrogates(false); + dtree->setMaxCategories(16); + dtree->setCVFolds(0); + dtree->setUse1SERule(false); + dtree->setTruncatePrunedTree(false); + dtree->setPriors(Mat()); + train_and_print_errs(dtree, data); + + if( (int)data->getClassLabels().total() <= 2 ) // regression or 2-class classification problem + { + printf("======BOOST=====\n"); + Ptr boost = Boost::create(); + boost->setBoostType(Boost::GENTLE); + boost->setWeakCount(100); + boost->setWeightTrimRate(0.95); + boost->setMaxDepth(2); + boost->setUseSurrogates(false); + boost->setPriors(Mat()); + train_and_print_errs(boost, data); + } + + printf("======RTREES=====\n"); + Ptr rtrees = RTrees::create(); + rtrees->setMaxDepth(10); + rtrees->setMinSampleCount(2); + rtrees->setRegressionAccuracy(0); + rtrees->setUseSurrogates(false); + rtrees->setMaxCategories(16); + rtrees->setPriors(Mat()); + rtrees->setCalculateVarImportance(true); + rtrees->setActiveVarCount(0); + rtrees->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 0)); + train_and_print_errs(rtrees, data); + cv::Mat ref_labels = data->getClassLabels(); + cv::Mat test_data = data->getTestSampleIdx(); + cv::Mat predict_labels; + rtrees->predict(data->getSamples(), predict_labels); + + cv::Mat variable_importance = rtrees->getVarImportance(); + std::cout << "Estimated variable importance" << std::endl; + for (int i = 0; i < variable_importance.rows; i++) { + std::cout << "Variable " << i << ": " << variable_importance.at(i, 0) << std::endl; + } + return 0; +} diff --git a/modules/ml/src/ann_mlp.cpp b/modules/ml/src/ann_mlp.cpp new file mode 100644 index 00000000000..c6a4552c9e9 --- /dev/null +++ b/modules/ml/src/ann_mlp.cpp @@ -0,0 +1,1534 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +namespace cv { namespace ml { + +struct AnnParams +{ + AnnParams() + { + termCrit = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 1000, 0.01 ); + trainMethod = ANN_MLP::RPROP; + bpDWScale = bpMomentScale = 0.1; + rpDW0 = 0.1; rpDWPlus = 1.2; rpDWMinus = 0.5; + rpDWMin = FLT_EPSILON; rpDWMax = 50.; + initialT=10;finalT=0.1,coolingRatio=0.95;itePerStep=10; + rEnergy = cv::RNG(12345); + } + + TermCriteria termCrit; + int trainMethod; + + double bpDWScale; + double bpMomentScale; + + double rpDW0; + double rpDWPlus; + double rpDWMinus; + double rpDWMin; + double rpDWMax; + + double initialT; + double finalT; + double coolingRatio; + int itePerStep; + RNG rEnergy; +}; + +template +inline T inBounds(T val, T min_val, T max_val) +{ + return std::min(std::max(val, min_val), max_val); +} + +class SimulatedAnnealingANN_MLP +{ +protected: + ml::ANN_MLP& nn; + Ptr data; + int nbVariables; + vector adrVariables; + RNG rVar; + RNG rIndex; + double varTmp; + int index; +public: + SimulatedAnnealingANN_MLP(ml::ANN_MLP& x, const Ptr& d) : nn(x), data(d), varTmp(0.0), index(0) + { + initVarMap(); + } + ~SimulatedAnnealingANN_MLP() {} + + void changeState() + { + index = rIndex.uniform(0, nbVariables); + double dv = rVar.uniform(-1.0, 1.0); + varTmp = *adrVariables[index]; + *adrVariables[index] = dv; + } + + void reverseState() + { + *adrVariables[index] = varTmp; + } + + double energy() const { return nn.calcError(data, false, noArray()); } + +protected: + void initVarMap() + { + Mat l = nn.getLayerSizes(); + nbVariables = 0; + adrVariables.clear(); + int nlayers = (int)l.total(); + for (int i = 1; i < nlayers-1; i++) + { + Mat w = nn.getWeights(i); + for (int j = 0; j < w.rows; j++) + { + for (int k = 0; k < w.cols; k++, nbVariables++) + { + if (j == w.rows - 1) + { + adrVariables.push_back(&w.at(w.rows - 1, k)); + } + else + { + adrVariables.push_back(&w.at(j, k)); + } + } + } + } + } + +}; + +class ANN_MLPImpl CV_FINAL : public ANN_MLP +{ +public: + ANN_MLPImpl() + { + clear(); + setActivationFunction( SIGMOID_SYM, 0, 0); + setLayerSizes(Mat()); + setTrainMethod(ANN_MLP::RPROP, 0.1, FLT_EPSILON); + } + + virtual ~ANN_MLPImpl() CV_OVERRIDE {} + + inline TermCriteria getTermCriteria() const CV_OVERRIDE { return params.termCrit; } + inline void setTermCriteria(TermCriteria val) CV_OVERRIDE { params.termCrit = val; } + inline double getBackpropWeightScale() const CV_OVERRIDE { return params.bpDWScale; } + inline void setBackpropWeightScale(double val) CV_OVERRIDE { params.bpDWScale = val; } + inline double getBackpropMomentumScale() const CV_OVERRIDE { return params.bpMomentScale; } + inline void setBackpropMomentumScale(double val) CV_OVERRIDE { params.bpMomentScale = val; } + inline double getRpropDW0() const CV_OVERRIDE { return params.rpDW0; } + inline void setRpropDW0(double val) CV_OVERRIDE { params.rpDW0 = val; } + inline double getRpropDWPlus() const CV_OVERRIDE { return params.rpDWPlus; } + inline void setRpropDWPlus(double val) CV_OVERRIDE { params.rpDWPlus = val; } + inline double getRpropDWMinus() const CV_OVERRIDE { return params.rpDWMinus; } + inline void setRpropDWMinus(double val) CV_OVERRIDE { params.rpDWMinus = val; } + inline double getRpropDWMin() const CV_OVERRIDE { return params.rpDWMin; } + inline void setRpropDWMin(double val) CV_OVERRIDE { params.rpDWMin = val; } + inline double getRpropDWMax() const CV_OVERRIDE { return params.rpDWMax; } + inline void setRpropDWMax(double val) CV_OVERRIDE { params.rpDWMax = val; } + inline double getAnnealInitialT() const CV_OVERRIDE { return params.initialT; } + inline void setAnnealInitialT(double val) CV_OVERRIDE { params.initialT = val; } + inline double getAnnealFinalT() const CV_OVERRIDE { return params.finalT; } + inline void setAnnealFinalT(double val) CV_OVERRIDE { params.finalT = val; } + inline double getAnnealCoolingRatio() const CV_OVERRIDE { return params.coolingRatio; } + inline void setAnnealCoolingRatio(double val) CV_OVERRIDE { params.coolingRatio = val; } + inline int getAnnealItePerStep() const CV_OVERRIDE { return params.itePerStep; } + inline void setAnnealItePerStep(int val) CV_OVERRIDE { params.itePerStep = val; } + // disabled getAnnealEnergyRNG() + inline void setAnnealEnergyRNG(const RNG& val) CV_OVERRIDE { params.rEnergy = val; } + + void clear() CV_OVERRIDE + { + min_val = max_val = min_val1 = max_val1 = 0.; + rng = RNG((uint64)-1); + weights.clear(); + trained = false; + max_buf_sz = 1 << 12; + } + + int layer_count() const { return (int)layer_sizes.size(); } + + void setTrainMethod(int method, double param1, double param2) CV_OVERRIDE + { + if (method != ANN_MLP::RPROP && method != ANN_MLP::BACKPROP && method != ANN_MLP::ANNEAL) + method = ANN_MLP::RPROP; + params.trainMethod = method; + if(method == ANN_MLP::RPROP ) + { + if( param1 < FLT_EPSILON ) + param1 = 1.; + params.rpDW0 = param1; + params.rpDWMin = std::max( param2, 0. ); + } + else if (method == ANN_MLP::BACKPROP) + { + if (param1 <= 0) + param1 = 0.1; + params.bpDWScale = inBounds(param1, 1e-3, 1.); + if (param2 < 0) + param2 = 0.1; + params.bpMomentScale = std::min(param2, 1.); + } + } + + int getTrainMethod() const CV_OVERRIDE + { + return params.trainMethod; + } + + void setActivationFunction(int _activ_func, double _f_param1, double _f_param2) CV_OVERRIDE + { + if( _activ_func < 0 || _activ_func > LEAKYRELU) + CV_Error( CV_StsOutOfRange, "Unknown activation function" ); + + activ_func = _activ_func; + + switch( activ_func ) + { + case SIGMOID_SYM: + max_val = 0.95; min_val = -max_val; + max_val1 = 0.98; min_val1 = -max_val1; + if( fabs(_f_param1) < FLT_EPSILON ) + _f_param1 = 2./3; + if( fabs(_f_param2) < FLT_EPSILON ) + _f_param2 = 1.7159; + break; + case GAUSSIAN: + max_val = 1.; min_val = 0.05; + max_val1 = 1.; min_val1 = 0.02; + if (fabs(_f_param1) < FLT_EPSILON) + _f_param1 = 1.; + if (fabs(_f_param2) < FLT_EPSILON) + _f_param2 = 1.; + break; + case RELU: + if (fabs(_f_param1) < FLT_EPSILON) + _f_param1 = 1; + min_val = max_val = min_val1 = max_val1 = 0.; + _f_param2 = 0.; + break; + case LEAKYRELU: + if (fabs(_f_param1) < FLT_EPSILON) + _f_param1 = 0.01; + min_val = max_val = min_val1 = max_val1 = 0.; + _f_param2 = 0.; + break; + default: + min_val = max_val = min_val1 = max_val1 = 0.; + _f_param1 = 1.; + _f_param2 = 0.; + } + + f_param1 = _f_param1; + f_param2 = _f_param2; + } + + + void init_weights() + { + int i, j, k, l_count = layer_count(); + + for( i = 1; i < l_count; i++ ) + { + int n1 = layer_sizes[i-1]; + int n2 = layer_sizes[i]; + double val = 0, G = n2 > 2 ? 0.7*pow((double)n1,1./(n2-1)) : 1.; + double* w = weights[i].ptr(); + + // initialize weights using Nguyen-Widrow algorithm + for( j = 0; j < n2; j++ ) + { + double s = 0; + for( k = 0; k <= n1; k++ ) + { + val = rng.uniform(0., 1.)*2-1.; + w[k*n2 + j] = val; + s += fabs(val); + } + + if( i < l_count - 1 ) + { + s = 1./(s - fabs(val)); + for( k = 0; k <= n1; k++ ) + w[k*n2 + j] *= s; + w[n1*n2 + j] *= G*(-1+j*2./n2); + } + } + } + } + + Mat getLayerSizes() const CV_OVERRIDE + { + return Mat_(layer_sizes, true); + } + + void setLayerSizes( InputArray _layer_sizes ) CV_OVERRIDE + { + clear(); + + _layer_sizes.copyTo(layer_sizes); + int l_count = layer_count(); + + weights.resize(l_count + 2); + max_lsize = 0; + + if( l_count > 0 ) + { + for( int i = 0; i < l_count; i++ ) + { + int n = layer_sizes[i]; + if( n < 1 + (0 < i && i < l_count-1)) + CV_Error( CV_StsOutOfRange, + "there should be at least one input and one output " + "and every hidden layer must have more than 1 neuron" ); + max_lsize = std::max( max_lsize, n ); + if( i > 0 ) + weights[i].create(layer_sizes[i-1]+1, n, CV_64F); + } + + int ninputs = layer_sizes.front(); + int noutputs = layer_sizes.back(); + weights[0].create(1, ninputs*2, CV_64F); + weights[l_count].create(1, noutputs*2, CV_64F); + weights[l_count+1].create(1, noutputs*2, CV_64F); + } + } + + float predict( InputArray _inputs, OutputArray _outputs, int ) const CV_OVERRIDE + { + if( !trained ) + CV_Error( CV_StsError, "The network has not been trained or loaded" ); + + Mat inputs = _inputs.getMat(); + int type = inputs.type(), l_count = layer_count(); + int n = inputs.rows, dn0 = n; + + CV_Assert( (type == CV_32F || type == CV_64F) && inputs.cols == layer_sizes[0] ); + int noutputs = layer_sizes[l_count-1]; + Mat outputs; + + int min_buf_sz = 2*max_lsize; + int buf_sz = n*min_buf_sz; + + if( buf_sz > max_buf_sz ) + { + dn0 = max_buf_sz/min_buf_sz; + dn0 = std::max( dn0, 1 ); + buf_sz = dn0*min_buf_sz; + } + + cv::AutoBuffer _buf(buf_sz+noutputs); + double* buf = _buf.data(); + + if( !_outputs.needed() ) + { + CV_Assert( n == 1 ); + outputs = Mat(n, noutputs, type, buf + buf_sz); + } + else + { + _outputs.create(n, noutputs, type); + outputs = _outputs.getMat(); + } + + int dn = 0; + for( int i = 0; i < n; i += dn ) + { + dn = std::min( dn0, n - i ); + + Mat layer_in = inputs.rowRange(i, i + dn); + Mat layer_out( dn, layer_in.cols, CV_64F, buf); + + scale_input( layer_in, layer_out ); + layer_in = layer_out; + + for( int j = 1; j < l_count; j++ ) + { + double* data = buf + ((j&1) ? max_lsize*dn0 : 0); + int cols = layer_sizes[j]; + + layer_out = Mat(dn, cols, CV_64F, data); + Mat w = weights[j].rowRange(0, layer_in.cols); + gemm(layer_in, w, 1, noArray(), 0, layer_out); + calc_activ_func( layer_out, weights[j] ); + + layer_in = layer_out; + } + + layer_out = outputs.rowRange(i, i + dn); + scale_output( layer_in, layer_out ); + } + + if( n == 1 ) + { + int maxIdx[] = {0, 0}; + minMaxIdx(outputs, 0, 0, 0, maxIdx); + return (float)(maxIdx[0] + maxIdx[1]); + } + + return 0.f; + } + + void scale_input( const Mat& _src, Mat& _dst ) const + { + int cols = _src.cols; + const double* w = weights[0].ptr(); + + if( _src.type() == CV_32F ) + { + for( int i = 0; i < _src.rows; i++ ) + { + const float* src = _src.ptr(i); + double* dst = _dst.ptr(i); + for( int j = 0; j < cols; j++ ) + dst[j] = src[j]*w[j*2] + w[j*2+1]; + } + } + else + { + for( int i = 0; i < _src.rows; i++ ) + { + const double* src = _src.ptr(i); + double* dst = _dst.ptr(i); + for( int j = 0; j < cols; j++ ) + dst[j] = src[j]*w[j*2] + w[j*2+1]; + } + } + } + + void scale_output( const Mat& _src, Mat& _dst ) const + { + int cols = _src.cols; + const double* w = weights[layer_count()].ptr(); + + if( _dst.type() == CV_32F ) + { + for( int i = 0; i < _src.rows; i++ ) + { + const double* src = _src.ptr(i); + float* dst = _dst.ptr(i); + for( int j = 0; j < cols; j++ ) + dst[j] = (float)(src[j]*w[j*2] + w[j*2+1]); + } + } + else + { + for( int i = 0; i < _src.rows; i++ ) + { + const double* src = _src.ptr(i); + double* dst = _dst.ptr(i); + for( int j = 0; j < cols; j++ ) + dst[j] = src[j]*w[j*2] + w[j*2+1]; + } + } + } + + void calc_activ_func(Mat& sums, const Mat& w) const + { + const double* bias = w.ptr(w.rows - 1); + int i, j, n = sums.rows, cols = sums.cols; + double scale = 0, scale2 = f_param2; + + switch (activ_func) + { + case IDENTITY: + scale = 1.; + break; + case SIGMOID_SYM: + scale = -f_param1; + break; + case GAUSSIAN: + scale = -f_param1*f_param1; + break; + case RELU: + scale = 1; + break; + case LEAKYRELU: + scale = 1; + break; + default: + ; + } + + CV_Assert(sums.isContinuous()); + + if (activ_func != GAUSSIAN) + { + for (i = 0; i < n; i++) + { + double* data = sums.ptr(i); + for (j = 0; j < cols; j++) + { + data[j] = (data[j] + bias[j])*scale; + if (activ_func == RELU) + if (data[j] < 0) + data[j] = 0; + if (activ_func == LEAKYRELU) + if (data[j] < 0) + data[j] *= f_param1; + } + } + + if (activ_func == IDENTITY || activ_func == RELU || activ_func == LEAKYRELU) + return; + } + else + { + for (i = 0; i < n; i++) + { + double* data = sums.ptr(i); + for (j = 0; j < cols; j++) + { + double t = data[j] + bias[j]; + data[j] = t*t*scale; + } + } + } + + exp(sums, sums); + + if (sums.isContinuous()) + { + cols *= n; + n = 1; + } + + switch (activ_func) + { + case SIGMOID_SYM: + for (i = 0; i < n; i++) + { + double* data = sums.ptr(i); + for (j = 0; j < cols; j++) + { + if (!cvIsInf(data[j])) + { + double t = scale2*(1. - data[j]) / (1. + data[j]); + data[j] = t; + } + else + { + data[j] = -scale2; + } + } + } + break; + + case GAUSSIAN: + for (i = 0; i < n; i++) + { + double* data = sums.ptr(i); + for (j = 0; j < cols; j++) + data[j] = scale2*data[j]; + } + break; + + default: + ; + } + } + + void calc_activ_func_deriv(Mat& _xf, Mat& _df, const Mat& w) const + { + const double* bias = w.ptr(w.rows - 1); + int i, j, n = _xf.rows, cols = _xf.cols; + + if (activ_func == IDENTITY) + { + for (i = 0; i < n; i++) + { + double* xf = _xf.ptr(i); + double* df = _df.ptr(i); + + for (j = 0; j < cols; j++) + { + xf[j] += bias[j]; + df[j] = 1; + } + } + } + else if (activ_func == RELU) + { + for (i = 0; i < n; i++) + { + double* xf = _xf.ptr(i); + double* df = _df.ptr(i); + + for (j = 0; j < cols; j++) + { + xf[j] += bias[j]; + if (xf[j] < 0) + { + xf[j] = 0; + df[j] = 0; + } + else + df[j] = 1; + } + } + } + else if (activ_func == LEAKYRELU) + { + for (i = 0; i < n; i++) + { + double* xf = _xf.ptr(i); + double* df = _df.ptr(i); + + for (j = 0; j < cols; j++) + { + xf[j] += bias[j]; + if (xf[j] < 0) + { + xf[j] = f_param1*xf[j]; + df[j] = f_param1; + } + else + df[j] = 1; + } + } + } + else if (activ_func == GAUSSIAN) + { + double scale = -f_param1*f_param1; + double scale2 = scale*f_param2; + for (i = 0; i < n; i++) + { + double* xf = _xf.ptr(i); + double* df = _df.ptr(i); + + for (j = 0; j < cols; j++) + { + double t = xf[j] + bias[j]; + df[j] = t * 2 * scale2; + xf[j] = t*t*scale; + } + } + exp(_xf, _xf); + + for (i = 0; i < n; i++) + { + double* xf = _xf.ptr(i); + double* df = _df.ptr(i); + + for (j = 0; j < cols; j++) + df[j] *= xf[j]; + } + } + else + { + double scale = f_param1; + double scale2 = f_param2; + + for (i = 0; i < n; i++) + { + double* xf = _xf.ptr(i); + double* df = _df.ptr(i); + + for (j = 0; j < cols; j++) + { + xf[j] = (xf[j] + bias[j])*scale; + df[j] = -fabs(xf[j]); + } + } + + exp(_df, _df); + + // ((1+exp(-ax))^-1)'=a*((1+exp(-ax))^-2)*exp(-ax); + // ((1-exp(-ax))/(1+exp(-ax)))'=(a*exp(-ax)*(1+exp(-ax)) + a*exp(-ax)*(1-exp(-ax)))/(1+exp(-ax))^2= + // 2*a*exp(-ax)/(1+exp(-ax))^2 + scale *= 2 * f_param2; + for (i = 0; i < n; i++) + { + double* xf = _xf.ptr(i); + double* df = _df.ptr(i); + + for (j = 0; j < cols; j++) + { + int s0 = xf[j] > 0 ? 1 : -1; + double t0 = 1. / (1. + df[j]); + double t1 = scale*df[j] * t0*t0; + t0 *= scale2*(1. - df[j])*s0; + df[j] = t1; + xf[j] = t0; + } + } + } + } + + void calc_input_scale( const Mat& inputs, int flags ) + { + bool reset_weights = (flags & UPDATE_WEIGHTS) == 0; + bool no_scale = (flags & NO_INPUT_SCALE) != 0; + double* scale = weights[0].ptr(); + int count = inputs.rows; + + if( reset_weights ) + { + int i, j, vcount = layer_sizes[0]; + int type = inputs.type(); + double a = no_scale ? 1. : 0.; + + for( j = 0; j < vcount; j++ ) + scale[2*j] = a, scale[j*2+1] = 0.; + + if( no_scale ) + return; + + for( i = 0; i < count; i++ ) + { + const uchar* p = inputs.ptr(i); + const float* f = (const float*)p; + const double* d = (const double*)p; + for( j = 0; j < vcount; j++ ) + { + double t = type == CV_32F ? (double)f[j] : d[j]; + scale[j*2] += t; + scale[j*2+1] += t*t; + } + } + + for( j = 0; j < vcount; j++ ) + { + double s = scale[j*2], s2 = scale[j*2+1]; + double m = s/count, sigma2 = s2/count - m*m; + scale[j*2] = sigma2 < DBL_EPSILON ? 1 : 1./sqrt(sigma2); + scale[j*2+1] = -m*scale[j*2]; + } + } + } + + void calc_output_scale( const Mat& outputs, int flags ) + { + int i, j, vcount = layer_sizes.back(); + int type = outputs.type(); + double m = min_val, M = max_val, m1 = min_val1, M1 = max_val1; + bool reset_weights = (flags & UPDATE_WEIGHTS) == 0; + bool no_scale = (flags & NO_OUTPUT_SCALE) != 0; + int l_count = layer_count(); + double* scale = weights[l_count].ptr(); + double* inv_scale = weights[l_count+1].ptr(); + int count = outputs.rows; + + if( reset_weights ) + { + double a0 = no_scale ? 1 : DBL_MAX, b0 = no_scale ? 0 : -DBL_MAX; + + for( j = 0; j < vcount; j++ ) + { + scale[2*j] = inv_scale[2*j] = a0; + scale[j*2+1] = inv_scale[2*j+1] = b0; + } + + if( no_scale ) + return; + } + + for( i = 0; i < count; i++ ) + { + const uchar* p = outputs.ptr(i); + const float* f = (const float*)p; + const double* d = (const double*)p; + + for( j = 0; j < vcount; j++ ) + { + double t = type == CV_32F ? (double)f[j] : d[j]; + + if( reset_weights ) + { + double mj = scale[j*2], Mj = scale[j*2+1]; + if( mj > t ) mj = t; + if( Mj < t ) Mj = t; + + scale[j*2] = mj; + scale[j*2+1] = Mj; + } + else if( !no_scale ) + { + t = t*inv_scale[j*2] + inv_scale[2*j+1]; + if( t < m1 || t > M1 ) + CV_Error( CV_StsOutOfRange, + "Some of new output training vector components run exceed the original range too much" ); + } + } + } + + if( reset_weights ) + for( j = 0; j < vcount; j++ ) + { + // map mj..Mj to m..M + double mj = scale[j*2], Mj = scale[j*2+1]; + double a, b; + double delta = Mj - mj; + if( delta < DBL_EPSILON ) + a = 1, b = (M + m - Mj - mj)*0.5; + else + a = (M - m)/delta, b = m - mj*a; + inv_scale[j*2] = a; inv_scale[j*2+1] = b; + a = 1./a; b = -b*a; + scale[j*2] = a; scale[j*2+1] = b; + } + } + + void prepare_to_train( const Mat& inputs, const Mat& outputs, + Mat& sample_weights, int flags ) + { + if( layer_sizes.empty() ) + CV_Error( CV_StsError, + "The network has not been created. Use method create or the appropriate constructor" ); + + if( (inputs.type() != CV_32F && inputs.type() != CV_64F) || + inputs.cols != layer_sizes[0] ) + CV_Error( CV_StsBadArg, + "input training data should be a floating-point matrix with " + "the number of rows equal to the number of training samples and " + "the number of columns equal to the size of 0-th (input) layer" ); + + if( (outputs.type() != CV_32F && outputs.type() != CV_64F) || + outputs.cols != layer_sizes.back() ) + CV_Error( CV_StsBadArg, + "output training data should be a floating-point matrix with " + "the number of rows equal to the number of training samples and " + "the number of columns equal to the size of last (output) layer" ); + + if( inputs.rows != outputs.rows ) + CV_Error( CV_StsUnmatchedSizes, "The numbers of input and output samples do not match" ); + + Mat temp; + double s = sum(sample_weights)[0]; + sample_weights.convertTo(temp, CV_64F, 1./s); + sample_weights = temp; + + calc_input_scale( inputs, flags ); + calc_output_scale( outputs, flags ); + } + + bool train( const Ptr& trainData, int flags ) CV_OVERRIDE + { + CV_Assert(!trainData.empty()); + const int MAX_ITER = 1000; + const double DEFAULT_EPSILON = FLT_EPSILON; + + // initialize training data + Mat inputs = trainData->getTrainSamples(); + Mat outputs = trainData->getTrainResponses(); + Mat sw = trainData->getTrainSampleWeights(); + prepare_to_train( inputs, outputs, sw, flags ); + + // ... and link weights + if( !(flags & UPDATE_WEIGHTS) ) + init_weights(); + + TermCriteria termcrit; + termcrit.type = TermCriteria::COUNT + TermCriteria::EPS; + termcrit.maxCount = std::max((params.termCrit.type & CV_TERMCRIT_ITER ? params.termCrit.maxCount : MAX_ITER), 1); + termcrit.epsilon = std::max((params.termCrit.type & CV_TERMCRIT_EPS ? params.termCrit.epsilon : DEFAULT_EPSILON), DBL_EPSILON); + + int iter = 0; + switch(params.trainMethod){ + case ANN_MLP::BACKPROP: + iter = train_backprop(inputs, outputs, sw, termcrit); + break; + case ANN_MLP::RPROP: + iter = train_rprop(inputs, outputs, sw, termcrit); + break; + case ANN_MLP::ANNEAL: + iter = train_anneal(trainData); + break; + } + trained = iter > 0; + return trained; + } + int train_anneal(const Ptr& trainData) + { + CV_Assert(!trainData.empty()); + SimulatedAnnealingANN_MLP s(*this, trainData); + trained = true; // Enable call to CalcError + int iter = simulatedAnnealingSolver(s, params.initialT, params.finalT, params.coolingRatio, params.itePerStep, NULL, params.rEnergy); + trained =false; + return iter + 1; // ensure that 'train()' call is always successful + } + + int train_backprop( const Mat& inputs, const Mat& outputs, const Mat& _sw, TermCriteria termCrit ) + { + int i, j, k; + double prev_E = DBL_MAX*0.5, E = 0; + int itype = inputs.type(), otype = outputs.type(); + + int count = inputs.rows; + + int iter = -1, max_iter = termCrit.maxCount*count; + double epsilon = (termCrit.type & CV_TERMCRIT_EPS) ? termCrit.epsilon*count : 0; + + int l_count = layer_count(); + int ivcount = layer_sizes[0]; + int ovcount = layer_sizes.back(); + + // allocate buffers + vector > x(l_count); + vector > df(l_count); + vector dw(l_count); + + for( i = 0; i < l_count; i++ ) + { + int n = layer_sizes[i]; + x[i].resize(n+1); + df[i].resize(n); + dw[i] = Mat::zeros(weights[i].size(), CV_64F); + } + + Mat _idx_m(1, count, CV_32S); + int* _idx = _idx_m.ptr(); + for( i = 0; i < count; i++ ) + _idx[i] = i; + + AutoBuffer _buf(max_lsize*2); + double* buf[] = { _buf.data(), _buf.data() + max_lsize }; + + const double* sw = _sw.empty() ? 0 : _sw.ptr(); + + // run back-propagation loop + /* + y_i = w_i*x_{i-1} + x_i = f(y_i) + E = 1/2*||u - x_N||^2 + grad_N = (x_N - u)*f'(y_i) + dw_i(t) = momentum*dw_i(t-1) + dw_scale*x_{i-1}*grad_i + w_i(t+1) = w_i(t) + dw_i(t) + grad_{i-1} = w_i^t*grad_i + */ + for( iter = 0; iter < max_iter; iter++ ) + { + int idx = iter % count; + double sweight = sw ? count*sw[idx] : 1.; + + if( idx == 0 ) + { + //printf("%d. E = %g\n", iter/count, E); + if( fabs(prev_E - E) < epsilon ) + break; + prev_E = E; + E = 0; + + // shuffle indices + for( i = 0; i (); + for( j = 0; j < ivcount; j++ ) + x[0][j] = (itype == CV_32F ? (double)x0data_f[j] : x0data_d[j])*w[j*2] + w[j*2 + 1]; + + Mat x1( 1, ivcount, CV_64F, &x[0][0] ); + + // forward pass, compute y[i]=w*x[i-1], x[i]=f(y[i]), df[i]=f'(y[i]) + for( i = 1; i < l_count; i++ ) + { + int n = layer_sizes[i]; + Mat x2(1, n, CV_64F, &x[i][0] ); + Mat _w = weights[i].rowRange(0, x1.cols); + gemm(x1, _w, 1, noArray(), 0, x2); + Mat _df(1, n, CV_64F, &df[i][0] ); + calc_activ_func_deriv( x2, _df, weights[i] ); + x1 = x2; + } + + Mat grad1( 1, ovcount, CV_64F, buf[l_count&1] ); + w = weights[l_count+1].ptr(); + + // calculate error + const uchar* udata_p = outputs.ptr(idx); + const float* udata_f = (const float*)udata_p; + const double* udata_d = (const double*)udata_p; + + double* gdata = grad1.ptr(); + for( k = 0; k < ovcount; k++ ) + { + double t = (otype == CV_32F ? (double)udata_f[k] : udata_d[k])*w[k*2] + w[k*2+1] - x[l_count-1][k]; + gdata[k] = t*sweight; + E += t*t; + } + E *= sweight; + + // backward pass, update weights + for( i = l_count-1; i > 0; i-- ) + { + int n1 = layer_sizes[i-1], n2 = layer_sizes[i]; + Mat _df(1, n2, CV_64F, &df[i][0]); + multiply( grad1, _df, grad1 ); + Mat _x(n1+1, 1, CV_64F, &x[i-1][0]); + x[i-1][n1] = 1.; + gemm( _x, grad1, params.bpDWScale, dw[i], params.bpMomentScale, dw[i] ); + add( weights[i], dw[i], weights[i] ); + if( i > 1 ) + { + Mat grad2(1, n1, CV_64F, buf[i&1]); + Mat _w = weights[i].rowRange(0, n1); + gemm( grad1, _w, 1, noArray(), 0, grad2, GEMM_2_T ); + grad1 = grad2; + } + } + } + + iter /= count; + return iter; + } + + struct RPropLoop : public ParallelLoopBody + { + RPropLoop(ANN_MLPImpl* _ann, + const Mat& _inputs, const Mat& _outputs, const Mat& _sw, + int _dcount0, vector& _dEdw, double* _E) + { + ann = _ann; + inputs = _inputs; + outputs = _outputs; + sw = _sw.ptr(); + dcount0 = _dcount0; + dEdw = &_dEdw; + pE = _E; + } + + ANN_MLPImpl* ann; + vector* dEdw; + Mat inputs, outputs; + const double* sw; + int dcount0; + double* pE; + + void operator()(const Range& range) const CV_OVERRIDE + { + double inv_count = 1./inputs.rows; + int ivcount = ann->layer_sizes.front(); + int ovcount = ann->layer_sizes.back(); + int itype = inputs.type(), otype = outputs.type(); + int count = inputs.rows; + int i, j, k, l_count = ann->layer_count(); + vector > x(l_count); + vector > df(l_count); + vector _buf(ann->max_lsize*dcount0*2); + double* buf[] = { &_buf[0], &_buf[ann->max_lsize*dcount0] }; + double E = 0; + + for( i = 0; i < l_count; i++ ) + { + x[i].resize(ann->layer_sizes[i]*dcount0); + df[i].resize(ann->layer_sizes[i]*dcount0); + } + + for( int si = range.start; si < range.end; si++ ) + { + int i0 = si*dcount0, i1 = std::min((si + 1)*dcount0, count); + int dcount = i1 - i0; + const double* w = ann->weights[0].ptr(); + + // grab and preprocess input data + for( i = 0; i < dcount; i++ ) + { + const uchar* x0data_p = inputs.ptr(i0 + i); + const float* x0data_f = (const float*)x0data_p; + const double* x0data_d = (const double*)x0data_p; + + double* xdata = &x[0][i*ivcount]; + for( j = 0; j < ivcount; j++ ) + xdata[j] = (itype == CV_32F ? (double)x0data_f[j] : x0data_d[j])*w[j*2] + w[j*2+1]; + } + Mat x1(dcount, ivcount, CV_64F, &x[0][0]); + + // forward pass, compute y[i]=w*x[i-1], x[i]=f(y[i]), df[i]=f'(y[i]) + for( i = 1; i < l_count; i++ ) + { + Mat x2( dcount, ann->layer_sizes[i], CV_64F, &x[i][0] ); + Mat _w = ann->weights[i].rowRange(0, x1.cols); + gemm( x1, _w, 1, noArray(), 0, x2 ); + Mat _df( x2.size(), CV_64F, &df[i][0] ); + ann->calc_activ_func_deriv( x2, _df, ann->weights[i] ); + x1 = x2; + } + + Mat grad1(dcount, ovcount, CV_64F, buf[l_count & 1]); + + w = ann->weights[l_count+1].ptr(); + + // calculate error + for( i = 0; i < dcount; i++ ) + { + const uchar* udata_p = outputs.ptr(i0+i); + const float* udata_f = (const float*)udata_p; + const double* udata_d = (const double*)udata_p; + + const double* xdata = &x[l_count-1][i*ovcount]; + double* gdata = grad1.ptr(i); + double sweight = sw ? sw[si+i] : inv_count, E1 = 0; + + for( j = 0; j < ovcount; j++ ) + { + double t = (otype == CV_32F ? (double)udata_f[j] : udata_d[j])*w[j*2] + w[j*2+1] - xdata[j]; + gdata[j] = t*sweight; + E1 += t*t; + } + E += sweight*E1; + } + + for( i = l_count-1; i > 0; i-- ) + { + int n1 = ann->layer_sizes[i-1], n2 = ann->layer_sizes[i]; + Mat _df(dcount, n2, CV_64F, &df[i][0]); + multiply(grad1, _df, grad1); + + { + AutoLock lock(ann->mtx); + Mat _dEdw = dEdw->at(i).rowRange(0, n1); + x1 = Mat(dcount, n1, CV_64F, &x[i-1][0]); + gemm(x1, grad1, 1, _dEdw, 1, _dEdw, GEMM_1_T); + + // update bias part of dEdw + double* dst = dEdw->at(i).ptr(n1); + for( k = 0; k < dcount; k++ ) + { + const double* src = grad1.ptr(k); + for( j = 0; j < n2; j++ ) + dst[j] += src[j]; + } + } + + Mat grad2( dcount, n1, CV_64F, buf[i&1] ); + if( i > 1 ) + { + Mat _w = ann->weights[i].rowRange(0, n1); + gemm(grad1, _w, 1, noArray(), 0, grad2, GEMM_2_T); + } + grad1 = grad2; + } + } + { + AutoLock lock(ann->mtx); + *pE += E; + } + } + }; + + int train_rprop( const Mat& inputs, const Mat& outputs, const Mat& _sw, TermCriteria termCrit ) + { + const int max_buf_size = 1 << 16; + int i, iter = -1, count = inputs.rows; + + double prev_E = DBL_MAX*0.5; + + int max_iter = termCrit.maxCount; + double epsilon = termCrit.epsilon; + double dw_plus = params.rpDWPlus; + double dw_minus = params.rpDWMinus; + double dw_min = params.rpDWMin; + double dw_max = params.rpDWMax; + + int l_count = layer_count(); + + // allocate buffers + vector dw(l_count), dEdw(l_count), prev_dEdw_sign(l_count); + + int total = 0; + for( i = 0; i < l_count; i++ ) + { + total += layer_sizes[i]; + dw[i].create(weights[i].size(), CV_64F); + dw[i].setTo(Scalar::all(params.rpDW0)); + prev_dEdw_sign[i] = Mat::zeros(weights[i].size(), CV_8S); + dEdw[i] = Mat::zeros(weights[i].size(), CV_64F); + } + CV_Assert(total > 0); + int dcount0 = max_buf_size/(2*total); + dcount0 = std::max( dcount0, 1 ); + dcount0 = std::min( dcount0, count ); + int chunk_count = (count + dcount0 - 1)/dcount0; + + // run rprop loop + /* + y_i(t) = w_i(t)*x_{i-1}(t) + x_i(t) = f(y_i(t)) + E = sum_over_all_samples(1/2*||u - x_N||^2) + grad_N = (x_N - u)*f'(y_i) + + std::min(dw_i{jk}(t)*dw_plus, dw_max), if dE/dw_i{jk}(t)*dE/dw_i{jk}(t-1) > 0 + dw_i{jk}(t) = std::max(dw_i{jk}(t)*dw_minus, dw_min), if dE/dw_i{jk}(t)*dE/dw_i{jk}(t-1) < 0 + dw_i{jk}(t-1) else + + if (dE/dw_i{jk}(t)*dE/dw_i{jk}(t-1) < 0) + dE/dw_i{jk}(t)<-0 + else + w_i{jk}(t+1) = w_i{jk}(t) + dw_i{jk}(t) + grad_{i-1}(t) = w_i^t(t)*grad_i(t) + */ + for( iter = 0; iter < max_iter; iter++ ) + { + double E = 0; + + for( i = 0; i < l_count; i++ ) + dEdw[i].setTo(Scalar::all(0)); + + // first, iterate through all the samples and compute dEdw + RPropLoop invoker(this, inputs, outputs, _sw, dcount0, dEdw, &E); + parallel_for_(Range(0, chunk_count), invoker); + //invoker(Range(0, chunk_count)); + + // now update weights + for( i = 1; i < l_count; i++ ) + { + int n1 = layer_sizes[i-1], n2 = layer_sizes[i]; + for( int k = 0; k <= n1; k++ ) + { + CV_Assert(weights[i].size() == Size(n2, n1+1)); + double* wk = weights[i].ptr(k); + double* dwk = dw[i].ptr(k); + double* dEdwk = dEdw[i].ptr(k); + schar* prevEk = prev_dEdw_sign[i].ptr(k); + + for( int j = 0; j < n2; j++ ) + { + double Eval = dEdwk[j]; + double dval = dwk[j]; + double wval = wk[j]; + int s = CV_SIGN(Eval); + int ss = prevEk[j]*s; + if( ss > 0 ) + { + dval *= dw_plus; + dval = std::min( dval, dw_max ); + dwk[j] = dval; + wk[j] = wval + dval*s; + } + else if( ss < 0 ) + { + dval *= dw_minus; + dval = std::max( dval, dw_min ); + prevEk[j] = 0; + dwk[j] = dval; + wk[j] = wval + dval*s; + } + else + { + prevEk[j] = (schar)s; + wk[j] = wval + dval*s; + } + dEdwk[j] = 0.; + } + } + } + + //printf("%d. E = %g\n", iter, E); + if( fabs(prev_E - E) < epsilon ) + break; + prev_E = E; + } + + return iter; + } + + void write_params( FileStorage& fs ) const + { + const char* activ_func_name = activ_func == IDENTITY ? "IDENTITY" : + activ_func == SIGMOID_SYM ? "SIGMOID_SYM" : + activ_func == GAUSSIAN ? "GAUSSIAN" : + activ_func == RELU ? "RELU" : + activ_func == LEAKYRELU ? "LEAKYRELU" : 0; + + if( activ_func_name ) + fs << "activation_function" << activ_func_name; + else + fs << "activation_function_id" << activ_func; + + if( activ_func != IDENTITY ) + { + fs << "f_param1" << f_param1; + fs << "f_param2" << f_param2; + } + + fs << "min_val" << min_val << "max_val" << max_val << "min_val1" << min_val1 << "max_val1" << max_val1; + + fs << "training_params" << "{"; + if( params.trainMethod == ANN_MLP::BACKPROP ) + { + fs << "train_method" << "BACKPROP"; + fs << "dw_scale" << params.bpDWScale; + fs << "moment_scale" << params.bpMomentScale; + } + else if (params.trainMethod == ANN_MLP::RPROP) + { + fs << "train_method" << "RPROP"; + fs << "dw0" << params.rpDW0; + fs << "dw_plus" << params.rpDWPlus; + fs << "dw_minus" << params.rpDWMinus; + fs << "dw_min" << params.rpDWMin; + fs << "dw_max" << params.rpDWMax; + } + else if (params.trainMethod == ANN_MLP::ANNEAL) + { + fs << "train_method" << "ANNEAL"; + fs << "initialT" << params.initialT; + fs << "finalT" << params.finalT; + fs << "coolingRatio" << params.coolingRatio; + fs << "itePerStep" << params.itePerStep; + } + else + CV_Error(CV_StsError, "Unknown training method"); + + fs << "term_criteria" << "{"; + if( params.termCrit.type & TermCriteria::EPS ) + fs << "epsilon" << params.termCrit.epsilon; + if( params.termCrit.type & TermCriteria::COUNT ) + fs << "iterations" << params.termCrit.maxCount; + fs << "}" << "}"; + } + + void write( FileStorage& fs ) const CV_OVERRIDE + { + if( layer_sizes.empty() ) + return; + int i, l_count = layer_count(); + + writeFormat(fs); + fs << "layer_sizes" << layer_sizes; + + write_params( fs ); + + size_t esz = weights[0].elemSize(); + + fs << "input_scale" << "["; + fs.writeRaw("d", weights[0].ptr(), weights[0].total()*esz); + + fs << "]" << "output_scale" << "["; + fs.writeRaw("d", weights[l_count].ptr(), weights[l_count].total()*esz); + + fs << "]" << "inv_output_scale" << "["; + fs.writeRaw("d", weights[l_count+1].ptr(), weights[l_count+1].total()*esz); + + fs << "]" << "weights" << "["; + for( i = 1; i < l_count; i++ ) + { + fs << "["; + fs.writeRaw("d", weights[i].ptr(), weights[i].total()*esz); + fs << "]"; + } + fs << "]"; + } + + void read_params( const FileNode& fn ) + { + String activ_func_name = (String)fn["activation_function"]; + if( !activ_func_name.empty() ) + { + activ_func = activ_func_name == "SIGMOID_SYM" ? SIGMOID_SYM : + activ_func_name == "IDENTITY" ? IDENTITY : + activ_func_name == "RELU" ? RELU : + activ_func_name == "LEAKYRELU" ? LEAKYRELU : + activ_func_name == "GAUSSIAN" ? GAUSSIAN : -1; + CV_Assert( activ_func >= 0 ); + } + else + activ_func = (int)fn["activation_function_id"]; + + f_param1 = (double)fn["f_param1"]; + f_param2 = (double)fn["f_param2"]; + + setActivationFunction( activ_func, f_param1, f_param2); + + min_val = (double)fn["min_val"]; + max_val = (double)fn["max_val"]; + min_val1 = (double)fn["min_val1"]; + max_val1 = (double)fn["max_val1"]; + + FileNode tpn = fn["training_params"]; + params = AnnParams(); + + if( !tpn.empty() ) + { + String tmethod_name = (String)tpn["train_method"]; + + if( tmethod_name == "BACKPROP" ) + { + params.trainMethod = ANN_MLP::BACKPROP; + params.bpDWScale = (double)tpn["dw_scale"]; + params.bpMomentScale = (double)tpn["moment_scale"]; + } + else if (tmethod_name == "RPROP") + { + params.trainMethod = ANN_MLP::RPROP; + params.rpDW0 = (double)tpn["dw0"]; + params.rpDWPlus = (double)tpn["dw_plus"]; + params.rpDWMinus = (double)tpn["dw_minus"]; + params.rpDWMin = (double)tpn["dw_min"]; + params.rpDWMax = (double)tpn["dw_max"]; + } + else if (tmethod_name == "ANNEAL") + { + params.trainMethod = ANN_MLP::ANNEAL; + params.initialT = (double)tpn["initialT"]; + params.finalT = (double)tpn["finalT"]; + params.coolingRatio = (double)tpn["coolingRatio"]; + params.itePerStep = tpn["itePerStep"]; + } + else + CV_Error(CV_StsParseError, "Unknown training method (should be BACKPROP or RPROP)"); + + FileNode tcn = tpn["term_criteria"]; + if( !tcn.empty() ) + { + FileNode tcn_e = tcn["epsilon"]; + FileNode tcn_i = tcn["iterations"]; + params.termCrit.type = 0; + if( !tcn_e.empty() ) + { + params.termCrit.type |= TermCriteria::EPS; + params.termCrit.epsilon = (double)tcn_e; + } + if( !tcn_i.empty() ) + { + params.termCrit.type |= TermCriteria::COUNT; + params.termCrit.maxCount = (int)tcn_i; + } + } + } + } + + void read( const FileNode& fn ) CV_OVERRIDE + { + clear(); + + vector _layer_sizes; + readVectorOrMat(fn["layer_sizes"], _layer_sizes); + setLayerSizes( _layer_sizes ); + + int i, l_count = layer_count(); + read_params(fn); + + size_t esz = weights[0].elemSize(); + + FileNode w = fn["input_scale"]; + w.readRaw("d", weights[0].ptr(), weights[0].total()*esz); + + w = fn["output_scale"]; + w.readRaw("d", weights[l_count].ptr(), weights[l_count].total()*esz); + + w = fn["inv_output_scale"]; + w.readRaw("d", weights[l_count+1].ptr(), weights[l_count+1].total()*esz); + + FileNodeIterator w_it = fn["weights"].begin(); + + for( i = 1; i < l_count; i++, ++w_it ) + (*w_it).readRaw("d", weights[i].ptr(), weights[i].total()*esz); + trained = true; + } + + Mat getWeights(int layerIdx) const CV_OVERRIDE + { + CV_Assert( 0 <= layerIdx && layerIdx < (int)weights.size() ); + return weights[layerIdx]; + } + + bool isTrained() const CV_OVERRIDE + { + return trained; + } + + bool isClassifier() const CV_OVERRIDE + { + return false; + } + + int getVarCount() const CV_OVERRIDE + { + return layer_sizes.empty() ? 0 : layer_sizes[0]; + } + + String getDefaultName() const CV_OVERRIDE + { + return "opencv_ml_ann_mlp"; + } + + vector layer_sizes; + vector weights; + double f_param1, f_param2; + double min_val, max_val, min_val1, max_val1; + int activ_func; + int max_lsize, max_buf_sz; + AnnParams params; + RNG rng; + Mutex mtx; + bool trained; +}; + + + + +Ptr ANN_MLP::create() +{ + return makePtr(); +} + +Ptr ANN_MLP::load(const String& filepath) +{ + FileStorage fs; + fs.open(filepath, FileStorage::READ); + CV_Assert(fs.isOpened()); + Ptr ann = makePtr(); + ((ANN_MLPImpl*)ann.get())->read(fs.getFirstTopLevelNode()); + return ann; +} + +}} + +/* End of file. */ diff --git a/modules/ml/src/boost.cpp b/modules/ml/src/boost.cpp new file mode 100644 index 00000000000..be9c9a7b467 --- /dev/null +++ b/modules/ml/src/boost.cpp @@ -0,0 +1,533 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Copyright (C) 2014, Itseez Inc, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +namespace cv { namespace ml { + +static inline double +log_ratio( double val ) +{ + const double eps = 1e-5; + val = std::max( val, eps ); + val = std::min( val, 1. - eps ); + return log( val/(1. - val) ); +} + + +BoostTreeParams::BoostTreeParams() +{ + boostType = Boost::REAL; + weakCount = 100; + weightTrimRate = 0.95; +} + +BoostTreeParams::BoostTreeParams( int _boostType, int _weak_count, + double _weightTrimRate) +{ + boostType = _boostType; + weakCount = _weak_count; + weightTrimRate = _weightTrimRate; +} + +class DTreesImplForBoost CV_FINAL : public DTreesImpl +{ +public: + DTreesImplForBoost() + { + params.setCVFolds(0); + params.setMaxDepth(1); + } + virtual ~DTreesImplForBoost() {} + + bool isClassifier() const CV_OVERRIDE { return true; } + + void clear() CV_OVERRIDE + { + DTreesImpl::clear(); + } + + void startTraining( const Ptr& trainData, int flags ) CV_OVERRIDE + { + CV_Assert(!trainData.empty()); + DTreesImpl::startTraining(trainData, flags); + sumResult.assign(w->sidx.size(), 0.); + + if( bparams.boostType != Boost::DISCRETE ) + { + _isClassifier = false; + int i, n = (int)w->cat_responses.size(); + w->ord_responses.resize(n); + + double a = -1, b = 1; + if( bparams.boostType == Boost::LOGIT ) + { + a = -2, b = 2; + } + for( i = 0; i < n; i++ ) + w->ord_responses[i] = w->cat_responses[i] > 0 ? b : a; + } + + normalizeWeights(); + } + + void normalizeWeights() + { + int i, n = (int)w->sidx.size(); + double sumw = 0, a, b; + for( i = 0; i < n; i++ ) + sumw += w->sample_weights[w->sidx[i]]; + if( sumw > DBL_EPSILON ) + { + a = 1./sumw; + b = 0; + } + else + { + a = 0; + b = 1; + } + for( i = 0; i < n; i++ ) + { + double& wval = w->sample_weights[w->sidx[i]]; + wval = wval*a + b; + } + } + + void endTraining() CV_OVERRIDE + { + DTreesImpl::endTraining(); + vector e; + std::swap(sumResult, e); + } + + void scaleTree( int root, double scale ) + { + int nidx = root, pidx = 0; + Node *node = 0; + + // traverse the tree and save all the nodes in depth-first order + for(;;) + { + for(;;) + { + node = &nodes[nidx]; + node->value *= scale; + if( node->left < 0 ) + break; + nidx = node->left; + } + + for( pidx = node->parent; pidx >= 0 && nodes[pidx].right == nidx; + nidx = pidx, pidx = nodes[pidx].parent ) + ; + + if( pidx < 0 ) + break; + + nidx = nodes[pidx].right; + } + } + + void calcValue( int nidx, const vector& _sidx ) CV_OVERRIDE + { + DTreesImpl::calcValue(nidx, _sidx); + WNode* node = &w->wnodes[nidx]; + if( bparams.boostType == Boost::DISCRETE ) + { + node->value = node->class_idx == 0 ? -1 : 1; + } + else if( bparams.boostType == Boost::REAL ) + { + double p = (node->value+1)*0.5; + node->value = 0.5*log_ratio(p); + } + } + + bool train( const Ptr& trainData, int flags ) CV_OVERRIDE + { + CV_Assert(!trainData.empty()); + startTraining(trainData, flags); + int treeidx, ntrees = bparams.weakCount >= 0 ? bparams.weakCount : 10000; + vector sidx = w->sidx; + + for( treeidx = 0; treeidx < ntrees; treeidx++ ) + { + int root = addTree( sidx ); + if( root < 0 ) + return false; + updateWeightsAndTrim( treeidx, sidx ); + } + endTraining(); + return true; + } + + void updateWeightsAndTrim( int treeidx, vector& sidx ) + { + int i, n = (int)w->sidx.size(); + int nvars = (int)varIdx.size(); + double sumw = 0., C = 1.; + cv::AutoBuffer buf(n + nvars); + double* result = buf.data(); + float* sbuf = (float*)(result + n); + Mat sample(1, nvars, CV_32F, sbuf); + int predictFlags = bparams.boostType == Boost::DISCRETE ? (PREDICT_MAX_VOTE | RAW_OUTPUT) : PREDICT_SUM; + predictFlags |= COMPRESSED_INPUT; + + for( i = 0; i < n; i++ ) + { + w->data->getSample(varIdx, w->sidx[i], sbuf ); + result[i] = predictTrees(Range(treeidx, treeidx+1), sample, predictFlags); + } + + // now update weights and other parameters for each type of boosting + if( bparams.boostType == Boost::DISCRETE ) + { + // Discrete AdaBoost: + // weak_eval[i] (=f(x_i)) is in {-1,1} + // err = sum(w_i*(f(x_i) != y_i))/sum(w_i) + // C = log((1-err)/err) + // w_i *= exp(C*(f(x_i) != y_i)) + double err = 0.; + + for( i = 0; i < n; i++ ) + { + int si = w->sidx[i]; + double wval = w->sample_weights[si]; + sumw += wval; + err += wval*(result[i] != w->cat_responses[si]); + } + + if( sumw != 0 ) + err /= sumw; + C = -log_ratio( err ); + double scale = std::exp(C); + + sumw = 0; + for( i = 0; i < n; i++ ) + { + int si = w->sidx[i]; + double wval = w->sample_weights[si]; + if( result[i] != w->cat_responses[si] ) + wval *= scale; + sumw += wval; + w->sample_weights[si] = wval; + } + + scaleTree(roots[treeidx], C); + } + else if( bparams.boostType == Boost::REAL || bparams.boostType == Boost::GENTLE ) + { + // Real AdaBoost: + // weak_eval[i] = f(x_i) = 0.5*log(p(x_i)/(1-p(x_i))), p(x_i)=P(y=1|x_i) + // w_i *= exp(-y_i*f(x_i)) + + // Gentle AdaBoost: + // weak_eval[i] = f(x_i) in [-1,1] + // w_i *= exp(-y_i*f(x_i)) + for( i = 0; i < n; i++ ) + { + int si = w->sidx[i]; + CV_Assert( std::abs(w->ord_responses[si]) == 1 ); + double wval = w->sample_weights[si]*std::exp(-result[i]*w->ord_responses[si]); + sumw += wval; + w->sample_weights[si] = wval; + } + } + else if( bparams.boostType == Boost::LOGIT ) + { + // LogitBoost: + // weak_eval[i] = f(x_i) in [-z_max,z_max] + // sum_response = F(x_i). + // F(x_i) += 0.5*f(x_i) + // p(x_i) = exp(F(x_i))/(exp(F(x_i)) + exp(-F(x_i))=1/(1+exp(-2*F(x_i))) + // reuse weak_eval: weak_eval[i] <- p(x_i) + // w_i = p(x_i)*1(1 - p(x_i)) + // z_i = ((y_i+1)/2 - p(x_i))/(p(x_i)*(1 - p(x_i))) + // store z_i to the data->data_root as the new target responses + const double lb_weight_thresh = FLT_EPSILON; + const double lb_z_max = 10.; + + for( i = 0; i < n; i++ ) + { + int si = w->sidx[i]; + sumResult[i] += 0.5*result[i]; + double p = 1./(1 + std::exp(-2*sumResult[i])); + double wval = std::max( p*(1 - p), lb_weight_thresh ), z; + w->sample_weights[si] = wval; + sumw += wval; + if( w->ord_responses[si] > 0 ) + { + z = 1./p; + w->ord_responses[si] = std::min(z, lb_z_max); + } + else + { + z = 1./(1-p); + w->ord_responses[si] = -std::min(z, lb_z_max); + } + } + } + else + CV_Error(CV_StsNotImplemented, "Unknown boosting type"); + + /*if( bparams.boostType != Boost::LOGIT ) + { + double err = 0; + for( i = 0; i < n; i++ ) + { + sumResult[i] += result[i]*C; + if( bparams.boostType != Boost::DISCRETE ) + err += sumResult[i]*w->ord_responses[w->sidx[i]] < 0; + else + err += sumResult[i]*w->cat_responses[w->sidx[i]] < 0; + } + printf("%d trees. C=%.2f, training error=%.1f%%, working set size=%d (out of %d)\n", (int)roots.size(), C, err*100./n, (int)sidx.size(), n); + }*/ + + // renormalize weights + if( sumw > FLT_EPSILON ) + normalizeWeights(); + + if( bparams.weightTrimRate <= 0. || bparams.weightTrimRate >= 1. ) + return; + + for( i = 0; i < n; i++ ) + result[i] = w->sample_weights[w->sidx[i]]; + std::sort(result, result + n); + + // as weight trimming occurs immediately after updating the weights, + // where they are renormalized, we assume that the weight sum = 1. + sumw = 1. - bparams.weightTrimRate; + + for( i = 0; i < n; i++ ) + { + double wval = result[i]; + if( sumw <= 0 ) + break; + sumw -= wval; + } + + double threshold = i < n ? result[i] : DBL_MAX; + sidx.clear(); + + for( i = 0; i < n; i++ ) + { + int si = w->sidx[i]; + if( w->sample_weights[si] >= threshold ) + sidx.push_back(si); + } + } + + float predictTrees( const Range& range, const Mat& sample, int flags0 ) const CV_OVERRIDE + { + int flags = (flags0 & ~PREDICT_MASK) | PREDICT_SUM; + float val = DTreesImpl::predictTrees(range, sample, flags); + if( flags != flags0 ) + { + int ival = (int)(val > 0); + if( !(flags0 & RAW_OUTPUT) ) + ival = classLabels[ival]; + val = (float)ival; + } + return val; + } + + void writeTrainingParams( FileStorage& fs ) const CV_OVERRIDE + { + fs << "boosting_type" << + (bparams.boostType == Boost::DISCRETE ? "DiscreteAdaboost" : + bparams.boostType == Boost::REAL ? "RealAdaboost" : + bparams.boostType == Boost::LOGIT ? "LogitBoost" : + bparams.boostType == Boost::GENTLE ? "GentleAdaboost" : "Unknown"); + + DTreesImpl::writeTrainingParams(fs); + fs << "weight_trimming_rate" << bparams.weightTrimRate; + } + + void write( FileStorage& fs ) const CV_OVERRIDE + { + if( roots.empty() ) + CV_Error( CV_StsBadArg, "RTrees have not been trained" ); + + writeFormat(fs); + writeParams(fs); + + int k, ntrees = (int)roots.size(); + + fs << "ntrees" << ntrees + << "trees" << "["; + + for( k = 0; k < ntrees; k++ ) + { + fs << "{"; + writeTree(fs, roots[k]); + fs << "}"; + } + + fs << "]"; + } + + void readParams( const FileNode& fn ) CV_OVERRIDE + { + DTreesImpl::readParams(fn); + + FileNode tparams_node = fn["training_params"]; + // check for old layout + String bts = (String)(fn["boosting_type"].empty() ? + tparams_node["boosting_type"] : fn["boosting_type"]); + bparams.boostType = (bts == "DiscreteAdaboost" ? Boost::DISCRETE : + bts == "RealAdaboost" ? Boost::REAL : + bts == "LogitBoost" ? Boost::LOGIT : + bts == "GentleAdaboost" ? Boost::GENTLE : -1); + _isClassifier = bparams.boostType == Boost::DISCRETE; + // check for old layout + bparams.weightTrimRate = (double)(fn["weight_trimming_rate"].empty() ? + tparams_node["weight_trimming_rate"] : fn["weight_trimming_rate"]); + } + + void read( const FileNode& fn ) CV_OVERRIDE + { + clear(); + + int ntrees = (int)fn["ntrees"]; + readParams(fn); + + FileNode trees_node = fn["trees"]; + FileNodeIterator it = trees_node.begin(); + CV_Assert( ntrees == (int)trees_node.size() ); + + for( int treeidx = 0; treeidx < ntrees; treeidx++, ++it ) + { + FileNode nfn = (*it)["nodes"]; + readTree(nfn); + } + } + + BoostTreeParams bparams; + vector sumResult; +}; + + +class BoostImpl : public Boost +{ +public: + BoostImpl() {} + virtual ~BoostImpl() {} + + inline int getBoostType() const CV_OVERRIDE { return impl.bparams.boostType; } + inline void setBoostType(int val) CV_OVERRIDE { impl.bparams.boostType = val; } + inline int getWeakCount() const CV_OVERRIDE { return impl.bparams.weakCount; } + inline void setWeakCount(int val) CV_OVERRIDE { impl.bparams.weakCount = val; } + inline double getWeightTrimRate() const CV_OVERRIDE { return impl.bparams.weightTrimRate; } + inline void setWeightTrimRate(double val) CV_OVERRIDE { impl.bparams.weightTrimRate = val; } + + inline int getMaxCategories() const CV_OVERRIDE { return impl.params.getMaxCategories(); } + inline void setMaxCategories(int val) CV_OVERRIDE { impl.params.setMaxCategories(val); } + inline int getMaxDepth() const CV_OVERRIDE { return impl.params.getMaxDepth(); } + inline void setMaxDepth(int val) CV_OVERRIDE { impl.params.setMaxDepth(val); } + inline int getMinSampleCount() const CV_OVERRIDE { return impl.params.getMinSampleCount(); } + inline void setMinSampleCount(int val) CV_OVERRIDE { impl.params.setMinSampleCount(val); } + inline int getCVFolds() const CV_OVERRIDE { return impl.params.getCVFolds(); } + inline void setCVFolds(int val) CV_OVERRIDE { impl.params.setCVFolds(val); } + inline bool getUseSurrogates() const CV_OVERRIDE { return impl.params.getUseSurrogates(); } + inline void setUseSurrogates(bool val) CV_OVERRIDE { impl.params.setUseSurrogates(val); } + inline bool getUse1SERule() const CV_OVERRIDE { return impl.params.getUse1SERule(); } + inline void setUse1SERule(bool val) CV_OVERRIDE { impl.params.setUse1SERule(val); } + inline bool getTruncatePrunedTree() const CV_OVERRIDE { return impl.params.getTruncatePrunedTree(); } + inline void setTruncatePrunedTree(bool val) CV_OVERRIDE { impl.params.setTruncatePrunedTree(val); } + inline float getRegressionAccuracy() const CV_OVERRIDE { return impl.params.getRegressionAccuracy(); } + inline void setRegressionAccuracy(float val) CV_OVERRIDE { impl.params.setRegressionAccuracy(val); } + inline cv::Mat getPriors() const CV_OVERRIDE { return impl.params.getPriors(); } + inline void setPriors(const cv::Mat& val) CV_OVERRIDE { impl.params.setPriors(val); } + + String getDefaultName() const CV_OVERRIDE { return "opencv_ml_boost"; } + + bool train( const Ptr& trainData, int flags ) CV_OVERRIDE + { + CV_Assert(!trainData.empty()); + return impl.train(trainData, flags); + } + + float predict( InputArray samples, OutputArray results, int flags ) const CV_OVERRIDE + { + CV_CheckEQ(samples.cols(), getVarCount(), ""); + return impl.predict(samples, results, flags); + } + + void write( FileStorage& fs ) const CV_OVERRIDE + { + impl.write(fs); + } + + void read( const FileNode& fn ) CV_OVERRIDE + { + impl.read(fn); + } + + int getVarCount() const CV_OVERRIDE { return impl.getVarCount(); } + + bool isTrained() const CV_OVERRIDE { return impl.isTrained(); } + bool isClassifier() const CV_OVERRIDE { return impl.isClassifier(); } + + const vector& getRoots() const CV_OVERRIDE { return impl.getRoots(); } + const vector& getNodes() const CV_OVERRIDE { return impl.getNodes(); } + const vector& getSplits() const CV_OVERRIDE { return impl.getSplits(); } + const vector& getSubsets() const CV_OVERRIDE { return impl.getSubsets(); } + + DTreesImplForBoost impl; +}; + + +Ptr Boost::create() +{ + return makePtr(); +} + +Ptr Boost::load(const String& filepath, const String& nodeName) +{ + return Algorithm::load(filepath, nodeName); +} + +}} + +/* End of file. */ diff --git a/modules/ml/src/data.cpp b/modules/ml/src/data.cpp new file mode 100644 index 00000000000..fd7c8d10160 --- /dev/null +++ b/modules/ml/src/data.cpp @@ -0,0 +1,1045 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" +#include +#include +#include + +#include + +namespace cv { namespace ml { + +static const float MISSED_VAL = TrainData::missingValue(); +static const int VAR_MISSED = VAR_ORDERED; + +TrainData::~TrainData() {} + +Mat TrainData::getSubVector(const Mat& vec, const Mat& idx) +{ + if (!(vec.cols == 1 || vec.rows == 1)) + CV_LOG_WARNING(NULL, "'getSubVector(const Mat& vec, const Mat& idx)' call with non-1D input is deprecated. It is not designed to work with 2D matrixes (especially with 'cv::ml::COL_SAMPLE' layout)."); + return getSubMatrix(vec, idx, vec.rows == 1 ? cv::ml::COL_SAMPLE : cv::ml::ROW_SAMPLE); +} + +template +Mat getSubMatrixImpl(const Mat& m, const Mat& idx, int layout) +{ + int nidx = idx.checkVector(1, CV_32S); + int dims = m.cols, nsamples = m.rows; + + Mat subm; + if (layout == COL_SAMPLE) + { + std::swap(dims, nsamples); + subm.create(dims, nidx, m.type()); + } + else + { + subm.create(nidx, dims, m.type()); + } + + for (int i = 0; i < nidx; i++) + { + int k = idx.at(i); CV_CheckGE(k, 0, "Bad idx"); CV_CheckLT(k, nsamples, "Bad idx or layout"); + if (dims == 1) + { + subm.at(i) = m.at(k); // at() has "transparent" access for 1D col-based / row-based vectors. + } + else if (layout == COL_SAMPLE) + { + for (int j = 0; j < dims; j++) + subm.at(j, i) = m.at(j, k); + } + else + { + for (int j = 0; j < dims; j++) + subm.at(i, j) = m.at(k, j); + } + } + return subm; +} + +Mat TrainData::getSubMatrix(const Mat& m, const Mat& idx, int layout) +{ + if (idx.empty()) + return m; + int type = m.type(); + CV_CheckType(type, type == CV_32S || type == CV_32F || type == CV_64F, ""); + if (type == CV_32S || type == CV_32F) // 32-bit + return getSubMatrixImpl(m, idx, layout); + if (type == CV_64F) // 64-bit + return getSubMatrixImpl(m, idx, layout); + CV_Error(Error::StsInternal, ""); +} + + +class TrainDataImpl CV_FINAL : public TrainData +{ +public: + typedef std::map MapType; + + TrainDataImpl() + { + file = 0; + clear(); + } + + virtual ~TrainDataImpl() { closeFile(); } + + int getLayout() const CV_OVERRIDE { return layout; } + int getNSamples() const CV_OVERRIDE + { + return !sampleIdx.empty() ? (int)sampleIdx.total() : + layout == ROW_SAMPLE ? samples.rows : samples.cols; + } + int getNTrainSamples() const CV_OVERRIDE + { + return !trainSampleIdx.empty() ? (int)trainSampleIdx.total() : getNSamples(); + } + int getNTestSamples() const CV_OVERRIDE + { + return !testSampleIdx.empty() ? (int)testSampleIdx.total() : 0; + } + int getNVars() const CV_OVERRIDE + { + return !varIdx.empty() ? (int)varIdx.total() : getNAllVars(); + } + int getNAllVars() const CV_OVERRIDE + { + return layout == ROW_SAMPLE ? samples.cols : samples.rows; + } + + Mat getTestSamples() const CV_OVERRIDE + { + Mat idx = getTestSampleIdx(); + return idx.empty() ? Mat() : getSubMatrix(samples, idx, getLayout()); + } + + Mat getSamples() const CV_OVERRIDE { return samples; } + Mat getResponses() const CV_OVERRIDE { return responses; } + Mat getMissing() const CV_OVERRIDE { return missing; } + Mat getVarIdx() const CV_OVERRIDE { return varIdx; } + Mat getVarType() const CV_OVERRIDE { return varType; } + int getResponseType() const CV_OVERRIDE + { + return classLabels.empty() ? VAR_ORDERED : VAR_CATEGORICAL; + } + Mat getTrainSampleIdx() const CV_OVERRIDE { return !trainSampleIdx.empty() ? trainSampleIdx : sampleIdx; } + Mat getTestSampleIdx() const CV_OVERRIDE { return testSampleIdx; } + Mat getSampleWeights() const CV_OVERRIDE + { + return sampleWeights; + } + Mat getTrainSampleWeights() const CV_OVERRIDE + { + return getSubVector(sampleWeights, getTrainSampleIdx()); // 1D-vector + } + Mat getTestSampleWeights() const CV_OVERRIDE + { + Mat idx = getTestSampleIdx(); + return idx.empty() ? Mat() : getSubVector(sampleWeights, idx); // 1D-vector + } + Mat getTrainResponses() const CV_OVERRIDE + { + return getSubMatrix(responses, getTrainSampleIdx(), cv::ml::ROW_SAMPLE); // col-based responses are transposed in setData() + } + Mat getTrainNormCatResponses() const CV_OVERRIDE + { + return getSubMatrix(normCatResponses, getTrainSampleIdx(), cv::ml::ROW_SAMPLE); // like 'responses' + } + Mat getTestResponses() const CV_OVERRIDE + { + Mat idx = getTestSampleIdx(); + return idx.empty() ? Mat() : getSubMatrix(responses, idx, cv::ml::ROW_SAMPLE); // col-based responses are transposed in setData() + } + Mat getTestNormCatResponses() const CV_OVERRIDE + { + Mat idx = getTestSampleIdx(); + return idx.empty() ? Mat() : getSubMatrix(normCatResponses, idx, cv::ml::ROW_SAMPLE); // like 'responses' + } + Mat getNormCatResponses() const CV_OVERRIDE { return normCatResponses; } + Mat getClassLabels() const CV_OVERRIDE { return classLabels; } + Mat getClassCounters() const { return classCounters; } + int getCatCount(int vi) const CV_OVERRIDE + { + int n = (int)catOfs.total(); + CV_Assert( 0 <= vi && vi < n ); + Vec2i ofs = catOfs.at(vi); + return ofs[1] - ofs[0]; + } + + Mat getCatOfs() const CV_OVERRIDE { return catOfs; } + Mat getCatMap() const CV_OVERRIDE { return catMap; } + + Mat getDefaultSubstValues() const CV_OVERRIDE { return missingSubst; } + + void closeFile() { if(file) fclose(file); file=0; } + void clear() + { + closeFile(); + samples.release(); + missing.release(); + varType.release(); + varSymbolFlags.release(); + responses.release(); + sampleIdx.release(); + trainSampleIdx.release(); + testSampleIdx.release(); + normCatResponses.release(); + classLabels.release(); + classCounters.release(); + catMap.release(); + catOfs.release(); + nameMap = MapType(); + layout = ROW_SAMPLE; + } + + typedef std::map CatMapHash; + + void setData(InputArray _samples, int _layout, InputArray _responses, + InputArray _varIdx, InputArray _sampleIdx, InputArray _sampleWeights, + InputArray _varType, InputArray _missing) + { + clear(); + + CV_Assert(_layout == ROW_SAMPLE || _layout == COL_SAMPLE ); + samples = _samples.getMat(); + layout = _layout; + responses = _responses.getMat(); + varIdx = _varIdx.getMat(); + sampleIdx = _sampleIdx.getMat(); + sampleWeights = _sampleWeights.getMat(); + varType = _varType.getMat(); + missing = _missing.getMat(); + + int nsamples = layout == ROW_SAMPLE ? samples.rows : samples.cols; + int ninputvars = layout == ROW_SAMPLE ? samples.cols : samples.rows; + int i, noutputvars = 0; + + CV_Assert( samples.type() == CV_32F || samples.type() == CV_32S ); + + if( !sampleIdx.empty() ) + { + CV_Assert( (sampleIdx.checkVector(1, CV_32S, true) > 0 && + checkRange(sampleIdx, true, 0, 0, nsamples)) || + sampleIdx.checkVector(1, CV_8U, true) == nsamples ); + if( sampleIdx.type() == CV_8U ) + sampleIdx = convertMaskToIdx(sampleIdx); + } + + if( !sampleWeights.empty() ) + { + CV_Assert( sampleWeights.checkVector(1, CV_32F, true) == nsamples ); + } + else + { + sampleWeights = Mat::ones(nsamples, 1, CV_32F); + } + + if( !varIdx.empty() ) + { + CV_Assert( (varIdx.checkVector(1, CV_32S, true) > 0 && + checkRange(varIdx, true, 0, 0, ninputvars)) || + varIdx.checkVector(1, CV_8U, true) == ninputvars ); + if( varIdx.type() == CV_8U ) + varIdx = convertMaskToIdx(varIdx); + varIdx = varIdx.clone(); + std::sort(varIdx.ptr(), varIdx.ptr() + varIdx.total()); + } + + if( !responses.empty() ) + { + CV_Assert( responses.type() == CV_32F || responses.type() == CV_32S ); + if( (responses.cols == 1 || responses.rows == 1) && (int)responses.total() == nsamples ) + noutputvars = 1; + else + { + CV_Assert( (layout == ROW_SAMPLE && responses.rows == nsamples) || + (layout == COL_SAMPLE && responses.cols == nsamples) ); + noutputvars = layout == ROW_SAMPLE ? responses.cols : responses.rows; + } + if( !responses.isContinuous() || (layout == COL_SAMPLE && noutputvars > 1) ) + { + Mat temp; + transpose(responses, temp); + responses = temp; + } + } + + int nvars = ninputvars + noutputvars; + + if( !varType.empty() ) + { + CV_Assert( varType.checkVector(1, CV_8U, true) == nvars && + checkRange(varType, true, 0, VAR_ORDERED, VAR_CATEGORICAL+1) ); + } + else + { + varType.create(1, nvars, CV_8U); + varType = Scalar::all(VAR_ORDERED); + if( noutputvars == 1 ) + varType.at(ninputvars) = (uchar)(responses.type() < CV_32F ? VAR_CATEGORICAL : VAR_ORDERED); + } + + if( noutputvars > 1 ) + { + for( i = 0; i < noutputvars; i++ ) + CV_Assert( varType.at(ninputvars + i) == VAR_ORDERED ); + } + + catOfs = Mat::zeros(1, nvars, CV_32SC2); + missingSubst = Mat::zeros(1, nvars, CV_32F); + + vector labels, counters, sortbuf, tempCatMap; + vector tempCatOfs; + CatMapHash ofshash; + + AutoBuffer buf(nsamples); + Mat non_missing(layout == ROW_SAMPLE ? Size(1, nsamples) : Size(nsamples, 1), CV_8U, buf.data()); + bool haveMissing = !missing.empty(); + if( haveMissing ) + { + CV_Assert( missing.size() == samples.size() && missing.type() == CV_8U ); + } + + // we iterate through all the variables. For each categorical variable we build a map + // in order to convert input values of the variable into normalized values (0..catcount_vi-1) + // often many categorical variables are similar, so we compress the map - try to re-use + // maps for different variables if they are identical + for( i = 0; i < ninputvars; i++ ) + { + Mat values_i = layout == ROW_SAMPLE ? samples.col(i) : samples.row(i); + + if( varType.at(i) == VAR_CATEGORICAL ) + { + preprocessCategorical(values_i, 0, labels, 0, sortbuf); + missingSubst.at(i) = -1.f; + int j, m = (int)labels.size(); + CV_Assert( m > 0 ); + int a = labels.front(), b = labels.back(); + const int* currmap = &labels[0]; + int hashval = ((unsigned)a*127 + (unsigned)b)*127 + m; + CatMapHash::iterator it = ofshash.find(hashval); + if( it != ofshash.end() ) + { + int vi = it->second; + Vec2i ofs0 = tempCatOfs[vi]; + int m0 = ofs0[1] - ofs0[0]; + const int* map0 = &tempCatMap[ofs0[0]]; + if( m0 == m && map0[0] == a && map0[m0-1] == b ) + { + for( j = 0; j < m; j++ ) + if( map0[j] != currmap[j] ) + break; + if( j == m ) + { + // re-use the map + tempCatOfs.push_back(ofs0); + continue; + } + } + } + else + ofshash[hashval] = i; + Vec2i ofs; + ofs[0] = (int)tempCatMap.size(); + ofs[1] = ofs[0] + m; + tempCatOfs.push_back(ofs); + std::copy(labels.begin(), labels.end(), std::back_inserter(tempCatMap)); + } + else + { + tempCatOfs.push_back(Vec2i(0, 0)); + /*Mat missing_i = layout == ROW_SAMPLE ? missing.col(i) : missing.row(i); + compare(missing_i, Scalar::all(0), non_missing, CMP_EQ); + missingSubst.at(i) = (float)(mean(values_i, non_missing)[0]);*/ + missingSubst.at(i) = 0.f; + } + } + + if( !tempCatOfs.empty() ) + { + Mat(tempCatOfs).copyTo(catOfs); + Mat(tempCatMap).copyTo(catMap); + } + + if( noutputvars > 0 && varType.at(ninputvars) == VAR_CATEGORICAL ) + { + preprocessCategorical(responses, &normCatResponses, labels, &counters, sortbuf); + Mat(labels).copyTo(classLabels); + Mat(counters).copyTo(classCounters); + } + } + + Mat convertMaskToIdx(const Mat& mask) + { + int i, j, nz = countNonZero(mask), n = mask.cols + mask.rows - 1; + Mat idx(1, nz, CV_32S); + for( i = j = 0; i < n; i++ ) + if( mask.at(i) ) + idx.at(j++) = i; + return idx; + } + + struct CmpByIdx + { + CmpByIdx(const int* _data, int _step) : data(_data), step(_step) {} + bool operator ()(int i, int j) const { return data[i*step] < data[j*step]; } + const int* data; + int step; + }; + + void preprocessCategorical(const Mat& data, Mat* normdata, vector& labels, + vector* counters, vector& sortbuf) + { + CV_Assert((data.cols == 1 || data.rows == 1) && (data.type() == CV_32S || data.type() == CV_32F)); + int* odata = 0; + int ostep = 0; + + if(normdata) + { + normdata->create(data.size(), CV_32S); + odata = normdata->ptr(); + ostep = normdata->isContinuous() ? 1 : (int)normdata->step1(); + } + + int i, n = data.cols + data.rows - 1; + sortbuf.resize(n*2); + int* idx = &sortbuf[0]; + int* idata = (int*)data.ptr(); + int istep = data.isContinuous() ? 1 : (int)data.step1(); + + if( data.type() == CV_32F ) + { + idata = idx + n; + const float* fdata = data.ptr(); + for( i = 0; i < n; i++ ) + { + if( fdata[i*istep] == MISSED_VAL ) + idata[i] = -1; + else + { + idata[i] = cvRound(fdata[i*istep]); + CV_Assert( (float)idata[i] == fdata[i*istep] ); + } + } + istep = 1; + } + + for( i = 0; i < n; i++ ) + idx[i] = i; + + std::sort(idx, idx + n, CmpByIdx(idata, istep)); + + int clscount = 1; + for( i = 1; i < n; i++ ) + clscount += idata[idx[i]*istep] != idata[idx[i-1]*istep]; + + int clslabel = -1; + int prev = ~idata[idx[0]*istep]; + int previdx = 0; + + labels.resize(clscount); + if(counters) + counters->resize(clscount); + + for( i = 0; i < n; i++ ) + { + int l = idata[idx[i]*istep]; + if( l != prev ) + { + clslabel++; + labels[clslabel] = l; + int k = i - previdx; + if( clslabel > 0 && counters ) + counters->at(clslabel-1) = k; + prev = l; + previdx = i; + } + if(odata) + odata[idx[i]*ostep] = clslabel; + } + if(counters) + counters->at(clslabel) = i - previdx; + } + + bool loadCSV(const String& filename, int headerLines, + int responseStartIdx, int responseEndIdx, + const String& varTypeSpec, char delimiter, char missch) + { + const int M = 1000000; + const char delimiters[3] = { ' ', delimiter, '\0' }; + int nvars = 0; + bool varTypesSet = false; + + clear(); + + file = fopen( filename.c_str(), "rt" ); + + if( !file ) + return false; + + std::vector _buf(M); + std::vector allresponses; + std::vector rowvals; + std::vector vtypes, rowtypes; + std::vector vsymbolflags; + bool haveMissed = false; + char* buf = &_buf[0]; + + int i, ridx0 = responseStartIdx, ridx1 = responseEndIdx; + int ninputvars = 0, noutputvars = 0; + + Mat tempSamples, tempMissing, tempResponses; + MapType tempNameMap; + int catCounter = 1; + + // skip header lines + int lineno = 0; + for(;;lineno++) + { + if( !fgets(buf, M, file) ) + break; + if(lineno < headerLines ) + continue; + // trim trailing spaces + int idx = (int)strlen(buf)-1; + while( idx >= 0 && isspace(buf[idx]) ) + buf[idx--] = '\0'; + // skip spaces in the beginning + char* ptr = buf; + while( *ptr != '\0' && isspace(*ptr) ) + ptr++; + // skip commented off lines + if(*ptr == '#') + continue; + rowvals.clear(); + rowtypes.clear(); + + char* token = strtok(buf, delimiters); + if (!token) + break; + + for(;;) + { + float val=0.f; int tp = 0; + decodeElem( token, val, tp, missch, tempNameMap, catCounter ); + if( tp == VAR_MISSED ) + haveMissed = true; + rowvals.push_back(val); + rowtypes.push_back((uchar)tp); + token = strtok(NULL, delimiters); + if (!token) + break; + } + + if( nvars == 0 ) + { + if( rowvals.empty() ) + CV_Error(CV_StsBadArg, "invalid CSV format; no data found"); + nvars = (int)rowvals.size(); + if( !varTypeSpec.empty() && varTypeSpec.size() > 0 ) + { + setVarTypes(varTypeSpec, nvars, vtypes); + varTypesSet = true; + } + else + vtypes = rowtypes; + vsymbolflags.resize(nvars); + for( i = 0; i < nvars; i++ ) + vsymbolflags[i] = (uchar)(rowtypes[i] == VAR_CATEGORICAL); + + ridx0 = ridx0 >= 0 ? ridx0 : ridx0 == -1 ? nvars - 1 : -1; + ridx1 = ridx1 >= 0 ? ridx1 : ridx0 >= 0 ? ridx0+1 : -1; + CV_Assert(ridx1 > ridx0); + noutputvars = ridx0 >= 0 ? ridx1 - ridx0 : 0; + ninputvars = nvars - noutputvars; + } + else + CV_Assert( nvars == (int)rowvals.size() ); + + // check var types + for( i = 0; i < nvars; i++ ) + { + CV_Assert( (!varTypesSet && vtypes[i] == rowtypes[i]) || + (varTypesSet && (vtypes[i] == rowtypes[i] || rowtypes[i] == VAR_ORDERED)) ); + uchar sflag = (uchar)(rowtypes[i] == VAR_CATEGORICAL); + if( vsymbolflags[i] == VAR_MISSED ) + vsymbolflags[i] = sflag; + else + CV_Assert(vsymbolflags[i] == sflag || rowtypes[i] == VAR_MISSED); + } + + if( ridx0 >= 0 ) + { + for( i = ridx1; i < nvars; i++ ) + std::swap(rowvals[i], rowvals[i-noutputvars]); + for( i = ninputvars; i < nvars; i++ ) + allresponses.push_back(rowvals[i]); + rowvals.pop_back(); + } + Mat rmat(1, ninputvars, CV_32F, &rowvals[0]); + tempSamples.push_back(rmat); + } + + closeFile(); + + int nsamples = tempSamples.rows; + if( nsamples == 0 ) + return false; + + if( haveMissed ) + compare(tempSamples, MISSED_VAL, tempMissing, CMP_EQ); + + if( ridx0 >= 0 ) + { + for( i = ridx1; i < nvars; i++ ) + std::swap(vtypes[i], vtypes[i-noutputvars]); + if( noutputvars > 1 ) + { + for( i = ninputvars; i < nvars; i++ ) + if( vtypes[i] == VAR_CATEGORICAL ) + CV_Error(CV_StsBadArg, + "If responses are vector values, not scalars, they must be marked as ordered responses"); + } + } + + if( !varTypesSet && noutputvars == 1 && vtypes[ninputvars] == VAR_ORDERED ) + { + for( i = 0; i < nsamples; i++ ) + if( allresponses[i] != cvRound(allresponses[i]) ) + break; + if( i == nsamples ) + vtypes[ninputvars] = VAR_CATEGORICAL; + } + + //If there are responses in the csv file, save them. If not, responses matrix will contain just zeros + if (noutputvars != 0){ + Mat(nsamples, noutputvars, CV_32F, &allresponses[0]).copyTo(tempResponses); + setData(tempSamples, ROW_SAMPLE, tempResponses, noArray(), noArray(), + noArray(), Mat(vtypes).clone(), tempMissing); + } + else{ + Mat zero_mat(nsamples, 1, CV_32F, Scalar(0)); + zero_mat.copyTo(tempResponses); + setData(tempSamples, ROW_SAMPLE, tempResponses, noArray(), noArray(), + noArray(), noArray(), tempMissing); + } + bool ok = !samples.empty(); + if(ok) + { + std::swap(tempNameMap, nameMap); + Mat(vsymbolflags).copyTo(varSymbolFlags); + } + return ok; + } + + void decodeElem( const char* token, float& elem, int& type, + char missch, MapType& namemap, int& counter ) const + { + char* stopstring = NULL; + elem = (float)strtod( token, &stopstring ); + if( *stopstring == missch && strlen(stopstring) == 1 ) // missed value + { + elem = MISSED_VAL; + type = VAR_MISSED; + } + else if( *stopstring != '\0' ) + { + MapType::iterator it = namemap.find(token); + if( it == namemap.end() ) + { + elem = (float)counter; + namemap[token] = counter++; + } + else + elem = (float)it->second; + type = VAR_CATEGORICAL; + } + else + type = VAR_ORDERED; + } + + void setVarTypes( const String& s, int nvars, std::vector& vtypes ) const + { + const char* errmsg = "type spec is not correct; it should have format \"cat\", \"ord\" or " + "\"ord[n1,n2-n3,n4-n5,...]cat[m1-m2,m3,m4-m5,...]\", where n's and m's are 0-based variable indices"; + const char* str = s.c_str(); + int specCounter = 0; + + vtypes.resize(nvars); + + for( int k = 0; k < 2; k++ ) + { + const char* ptr = strstr(str, k == 0 ? "ord" : "cat"); + int tp = k == 0 ? VAR_ORDERED : VAR_CATEGORICAL; + if( ptr ) // parse ord/cat str + { + char* stopstring = NULL; + + if( ptr[3] == '\0' ) + { + for( int i = 0; i < nvars; i++ ) + vtypes[i] = (uchar)tp; + specCounter = nvars; + break; + } + + if ( ptr[3] != '[') + CV_Error( CV_StsBadArg, errmsg ); + + ptr += 4; // pass "ord[" + do + { + int b1 = (int)strtod( ptr, &stopstring ); + if( *stopstring == 0 || (*stopstring != ',' && *stopstring != ']' && *stopstring != '-') ) + CV_Error( CV_StsBadArg, errmsg ); + ptr = stopstring + 1; + if( (stopstring[0] == ',') || (stopstring[0] == ']')) + { + CV_Assert( 0 <= b1 && b1 < nvars ); + vtypes[b1] = (uchar)tp; + specCounter++; + } + else + { + if( stopstring[0] == '-') + { + int b2 = (int)strtod( ptr, &stopstring); + if ( (*stopstring == 0) || (*stopstring != ',' && *stopstring != ']') ) + CV_Error( CV_StsBadArg, errmsg ); + ptr = stopstring + 1; + CV_Assert( 0 <= b1 && b1 <= b2 && b2 < nvars ); + for (int i = b1; i <= b2; i++) + vtypes[i] = (uchar)tp; + specCounter += b2 - b1 + 1; + } + else + CV_Error( CV_StsBadArg, errmsg ); + + } + } + while(*stopstring != ']'); + } + } + + if( specCounter != nvars ) + CV_Error( CV_StsBadArg, "type of some variables is not specified" ); + } + + void setTrainTestSplitRatio(double ratio, bool shuffle) CV_OVERRIDE + { + CV_Assert( 0. <= ratio && ratio <= 1. ); + setTrainTestSplit(cvRound(getNSamples()*ratio), shuffle); + } + + void setTrainTestSplit(int count, bool shuffle) CV_OVERRIDE + { + int i, nsamples = getNSamples(); + CV_Assert( 0 <= count && count < nsamples ); + + trainSampleIdx.release(); + testSampleIdx.release(); + + if( count == 0 ) + trainSampleIdx = sampleIdx; + else if( count == nsamples ) + testSampleIdx = sampleIdx; + else + { + Mat mask(1, nsamples, CV_8U); + uchar* mptr = mask.ptr(); + for( i = 0; i < nsamples; i++ ) + mptr[i] = (uchar)(i < count); + trainSampleIdx.create(1, count, CV_32S); + testSampleIdx.create(1, nsamples - count, CV_32S); + int j0 = 0, j1 = 0; + const int* sptr = !sampleIdx.empty() ? sampleIdx.ptr() : 0; + int* trainptr = trainSampleIdx.ptr(); + int* testptr = testSampleIdx.ptr(); + for( i = 0; i < nsamples; i++ ) + { + int idx = sptr ? sptr[i] : i; + if( mptr[i] ) + trainptr[j0++] = idx; + else + testptr[j1++] = idx; + } + if( shuffle ) + shuffleTrainTest(); + } + } + + void shuffleTrainTest() CV_OVERRIDE + { + if( !trainSampleIdx.empty() && !testSampleIdx.empty() ) + { + int i, nsamples = getNSamples(), ntrain = getNTrainSamples(), ntest = getNTestSamples(); + int* trainIdx = trainSampleIdx.ptr(); + int* testIdx = testSampleIdx.ptr(); + RNG& rng = theRNG(); + + for( i = 0; i < nsamples; i++) + { + int a = rng.uniform(0, nsamples); + int b = rng.uniform(0, nsamples); + int* ptra = trainIdx; + int* ptrb = trainIdx; + if( a >= ntrain ) + { + ptra = testIdx; + a -= ntrain; + CV_Assert( a < ntest ); + } + if( b >= ntrain ) + { + ptrb = testIdx; + b -= ntrain; + CV_Assert( b < ntest ); + } + std::swap(ptra[a], ptrb[b]); + } + } + } + + Mat getTrainSamples(int _layout, + bool compressSamples, + bool compressVars) const CV_OVERRIDE + { + if( samples.empty() ) + return samples; + + if( (!compressSamples || (trainSampleIdx.empty() && sampleIdx.empty())) && + (!compressVars || varIdx.empty()) && + layout == _layout ) + return samples; + + int drows = getNTrainSamples(), dcols = getNVars(); + Mat sidx = getTrainSampleIdx(), vidx = getVarIdx(); + const float* src0 = samples.ptr(); + const int* sptr = !sidx.empty() ? sidx.ptr() : 0; + const int* vptr = !vidx.empty() ? vidx.ptr() : 0; + size_t sstep0 = samples.step/samples.elemSize(); + size_t sstep = layout == ROW_SAMPLE ? sstep0 : 1; + size_t vstep = layout == ROW_SAMPLE ? 1 : sstep0; + + if( _layout == COL_SAMPLE ) + { + std::swap(drows, dcols); + std::swap(sptr, vptr); + std::swap(sstep, vstep); + } + + Mat dsamples(drows, dcols, CV_32F); + + for( int i = 0; i < drows; i++ ) + { + const float* src = src0 + (sptr ? sptr[i] : i)*sstep; + float* dst = dsamples.ptr(i); + + for( int j = 0; j < dcols; j++ ) + dst[j] = src[(vptr ? vptr[j] : j)*vstep]; + } + + return dsamples; + } + + void getValues( int vi, InputArray _sidx, float* values ) const CV_OVERRIDE + { + Mat sidx = _sidx.getMat(); + int i, n = sidx.checkVector(1, CV_32S), nsamples = getNSamples(); + CV_Assert( 0 <= vi && vi < getNAllVars() ); + CV_Assert( n >= 0 ); + const int* s = n > 0 ? sidx.ptr() : 0; + if( n == 0 ) + n = nsamples; + + size_t step = samples.step/samples.elemSize(); + size_t sstep = layout == ROW_SAMPLE ? step : 1; + size_t vstep = layout == ROW_SAMPLE ? 1 : step; + + const float* src = samples.ptr() + vi*vstep; + float subst = missingSubst.at(vi); + for( i = 0; i < n; i++ ) + { + int j = i; + if( s ) + { + j = s[i]; + CV_Assert( 0 <= j && j < ((layout == ROW_SAMPLE) ? samples.rows : samples.cols) ); + } + values[i] = src[j*sstep]; + if( values[i] == MISSED_VAL ) + values[i] = subst; + } + } + + void getNormCatValues( int vi, InputArray _sidx, int* values ) const CV_OVERRIDE + { + float* fvalues = (float*)values; + getValues(vi, _sidx, fvalues); + int i, n = (int)_sidx.total(); + Vec2i ofs = catOfs.at(vi); + int m = ofs[1] - ofs[0]; + + CV_Assert( m > 0 ); // if m==0, vi is an ordered variable + const int* cmap = &catMap.at(ofs[0]); + bool fastMap = (m == cmap[m - 1] - cmap[0] + 1); + + if( fastMap ) + { + for( i = 0; i < n; i++ ) + { + int val = cvRound(fvalues[i]); + int idx = val - cmap[0]; + CV_Assert(cmap[idx] == val); + values[i] = idx; + } + } + else + { + for( i = 0; i < n; i++ ) + { + int val = cvRound(fvalues[i]); + int a = 0, b = m, c = -1; + + while( a < b ) + { + c = (a + b) >> 1; + if( val < cmap[c] ) + b = c; + else if( val > cmap[c] ) + a = c+1; + else + break; + } + + CV_DbgAssert( c >= 0 && val == cmap[c] ); + values[i] = c; + } + } + } + + void getSample(InputArray _vidx, int sidx, float* buf) const CV_OVERRIDE + { + CV_Assert(buf != 0 && 0 <= sidx && sidx < getNSamples()); + Mat vidx = _vidx.getMat(); + int i, n = vidx.checkVector(1, CV_32S), nvars = getNAllVars(); + CV_Assert( n >= 0 ); + const int* vptr = n > 0 ? vidx.ptr() : 0; + if( n == 0 ) + n = nvars; + + size_t step = samples.step/samples.elemSize(); + size_t sstep = layout == ROW_SAMPLE ? step : 1; + size_t vstep = layout == ROW_SAMPLE ? 1 : step; + + const float* src = samples.ptr() + sidx*sstep; + for( i = 0; i < n; i++ ) + { + int j = i; + if( vptr ) + { + j = vptr[i]; + CV_Assert( 0 <= j && j < nvars ); + } + buf[i] = src[j*vstep]; + } + } + + void getNames(std::vector& names) const CV_OVERRIDE + { + size_t n = nameMap.size(); + TrainDataImpl::MapType::const_iterator it = nameMap.begin(), + it_end = nameMap.end(); + names.resize(n+1); + names[0] = "?"; + for( ; it != it_end; ++it ) + { + String s = it->first; + int label = it->second; + CV_Assert( label > 0 && label <= (int)n ); + names[label] = s; + } + } + + Mat getVarSymbolFlags() const CV_OVERRIDE + { + return varSymbolFlags; + } + + FILE* file; + int layout; + Mat samples, missing, varType, varIdx, varSymbolFlags, responses, missingSubst; + Mat sampleIdx, trainSampleIdx, testSampleIdx; + Mat sampleWeights, catMap, catOfs; + Mat normCatResponses, classLabels, classCounters; + MapType nameMap; +}; + + +Ptr TrainData::loadFromCSV(const String& filename, + int headerLines, + int responseStartIdx, + int responseEndIdx, + const String& varTypeSpec, + char delimiter, char missch) +{ + CV_TRACE_FUNCTION_SKIP_NESTED(); + Ptr td = makePtr(); + if(!td->loadCSV(filename, headerLines, responseStartIdx, responseEndIdx, varTypeSpec, delimiter, missch)) + td.release(); + return td; +} + +Ptr TrainData::create(InputArray samples, int layout, InputArray responses, + InputArray varIdx, InputArray sampleIdx, InputArray sampleWeights, + InputArray varType) +{ + CV_TRACE_FUNCTION_SKIP_NESTED(); + Ptr td = makePtr(); + td->setData(samples, layout, responses, varIdx, sampleIdx, sampleWeights, varType, noArray()); + return td; +} + +}} + +/* End of file. */ diff --git a/modules/ml/src/em.cpp b/modules/ml/src/em.cpp new file mode 100644 index 00000000000..3e0eeb560a4 --- /dev/null +++ b/modules/ml/src/em.cpp @@ -0,0 +1,859 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright( C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +//(including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort(including negligence or otherwise) arising in any way out of +// the use of this software, even ifadvised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +namespace cv +{ +namespace ml +{ + +const double minEigenValue = DBL_EPSILON; + +class CV_EXPORTS EMImpl CV_FINAL : public EM +{ +public: + + int nclusters; + int covMatType; + TermCriteria termCrit; + + inline TermCriteria getTermCriteria() const CV_OVERRIDE { return termCrit; } + inline void setTermCriteria(const TermCriteria& val) CV_OVERRIDE { termCrit = val; } + + void setClustersNumber(int val) CV_OVERRIDE + { + nclusters = val; + CV_Assert(nclusters >= 1); + } + + int getClustersNumber() const CV_OVERRIDE + { + return nclusters; + } + + void setCovarianceMatrixType(int val) CV_OVERRIDE + { + covMatType = val; + CV_Assert(covMatType == COV_MAT_SPHERICAL || + covMatType == COV_MAT_DIAGONAL || + covMatType == COV_MAT_GENERIC); + } + + int getCovarianceMatrixType() const CV_OVERRIDE + { + return covMatType; + } + + EMImpl() + { + nclusters = DEFAULT_NCLUSTERS; + covMatType=EM::COV_MAT_DIAGONAL; + termCrit = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, EM::DEFAULT_MAX_ITERS, 1e-6); + } + + virtual ~EMImpl() {} + + void clear() CV_OVERRIDE + { + trainSamples.release(); + trainProbs.release(); + trainLogLikelihoods.release(); + trainLabels.release(); + + weights.release(); + means.release(); + covs.clear(); + + covsEigenValues.clear(); + invCovsEigenValues.clear(); + covsRotateMats.clear(); + + logWeightDivDet.release(); + } + + bool train(const Ptr& data, int) CV_OVERRIDE + { + CV_Assert(!data.empty()); + Mat samples = data->getTrainSamples(), labels; + return trainEM(samples, labels, noArray(), noArray()); + } + + bool trainEM(InputArray samples, + OutputArray logLikelihoods, + OutputArray labels, + OutputArray probs) CV_OVERRIDE + { + Mat samplesMat = samples.getMat(); + setTrainData(START_AUTO_STEP, samplesMat, 0, 0, 0, 0); + return doTrain(START_AUTO_STEP, logLikelihoods, labels, probs); + } + + bool trainE(InputArray samples, + InputArray _means0, + InputArray _covs0, + InputArray _weights0, + OutputArray logLikelihoods, + OutputArray labels, + OutputArray probs) CV_OVERRIDE + { + Mat samplesMat = samples.getMat(); + std::vector covs0; + _covs0.getMatVector(covs0); + + Mat means0 = _means0.getMat(), weights0 = _weights0.getMat(); + + setTrainData(START_E_STEP, samplesMat, 0, !_means0.empty() ? &means0 : 0, + !_covs0.empty() ? &covs0 : 0, !_weights0.empty() ? &weights0 : 0); + return doTrain(START_E_STEP, logLikelihoods, labels, probs); + } + + bool trainM(InputArray samples, + InputArray _probs0, + OutputArray logLikelihoods, + OutputArray labels, + OutputArray probs) CV_OVERRIDE + { + Mat samplesMat = samples.getMat(); + Mat probs0 = _probs0.getMat(); + + setTrainData(START_M_STEP, samplesMat, !_probs0.empty() ? &probs0 : 0, 0, 0, 0); + return doTrain(START_M_STEP, logLikelihoods, labels, probs); + } + + float predict(InputArray _inputs, OutputArray _outputs, int) const CV_OVERRIDE + { + bool needprobs = _outputs.needed(); + Mat samples = _inputs.getMat(), probs, probsrow; + int ptype = CV_64F; + float firstres = 0.f; + int i, nsamples = samples.rows; + + if( needprobs ) + { + if( _outputs.fixedType() ) + ptype = _outputs.type(); + _outputs.create(samples.rows, nclusters, ptype); + probs = _outputs.getMat(); + } + else + nsamples = std::min(nsamples, 1); + + for( i = 0; i < nsamples; i++ ) + { + if( needprobs ) + probsrow = probs.row(i); + Vec2d res = computeProbabilities(samples.row(i), needprobs ? &probsrow : 0, ptype); + if( i == 0 ) + firstres = (float)res[1]; + } + return firstres; + } + + Vec2d predict2(InputArray _sample, OutputArray _probs) const CV_OVERRIDE + { + int ptype = CV_64F; + Mat sample = _sample.getMat(); + CV_Assert(isTrained()); + + CV_Assert(!sample.empty()); + if(sample.type() != CV_64FC1) + { + Mat tmp; + sample.convertTo(tmp, CV_64FC1); + sample = tmp; + } + sample = sample.reshape(1, 1); + + Mat probs; + if( _probs.needed() ) + { + if( _probs.fixedType() ) + ptype = _probs.type(); + _probs.create(1, nclusters, ptype); + probs = _probs.getMat(); + } + + return computeProbabilities(sample, !probs.empty() ? &probs : 0, ptype); + } + + bool isTrained() const CV_OVERRIDE + { + return !means.empty(); + } + + bool isClassifier() const CV_OVERRIDE + { + return true; + } + + int getVarCount() const CV_OVERRIDE + { + return means.cols; + } + + String getDefaultName() const CV_OVERRIDE + { + return "opencv_ml_em"; + } + + static void checkTrainData(int startStep, const Mat& samples, + int nclusters, int covMatType, const Mat* probs, const Mat* means, + const std::vector* covs, const Mat* weights) + { + // Check samples. + CV_Assert(!samples.empty()); + CV_Assert(samples.channels() == 1); + + int nsamples = samples.rows; + int dim = samples.cols; + + // Check training params. + CV_Assert(nclusters > 0); + CV_Assert(nclusters <= nsamples); + CV_Assert(startStep == START_AUTO_STEP || + startStep == START_E_STEP || + startStep == START_M_STEP); + CV_Assert(covMatType == COV_MAT_GENERIC || + covMatType == COV_MAT_DIAGONAL || + covMatType == COV_MAT_SPHERICAL); + + CV_Assert(!probs || + (!probs->empty() && + probs->rows == nsamples && probs->cols == nclusters && + (probs->type() == CV_32FC1 || probs->type() == CV_64FC1))); + + CV_Assert(!weights || + (!weights->empty() && + (weights->cols == 1 || weights->rows == 1) && static_cast(weights->total()) == nclusters && + (weights->type() == CV_32FC1 || weights->type() == CV_64FC1))); + + CV_Assert(!means || + (!means->empty() && + means->rows == nclusters && means->cols == dim && + means->channels() == 1)); + + CV_Assert(!covs || + (!covs->empty() && + static_cast(covs->size()) == nclusters)); + if(covs) + { + const Size covSize(dim, dim); + for(size_t i = 0; i < covs->size(); i++) + { + const Mat& m = (*covs)[i]; + CV_Assert(!m.empty() && m.size() == covSize && (m.channels() == 1)); + } + } + + if(startStep == START_E_STEP) + { + CV_Assert(means); + } + else if(startStep == START_M_STEP) + { + CV_Assert(probs); + } + } + + static void preprocessSampleData(const Mat& src, Mat& dst, int dstType, bool isAlwaysClone) + { + if(src.type() == dstType && !isAlwaysClone) + dst = src; + else + src.convertTo(dst, dstType); + } + + static void preprocessProbability(Mat& probs) + { + max(probs, 0., probs); + + const double uniformProbability = (double)(1./probs.cols); + for(int y = 0; y < probs.rows; y++) + { + Mat sampleProbs = probs.row(y); + + double maxVal = 0; + minMaxLoc(sampleProbs, 0, &maxVal); + if(maxVal < FLT_EPSILON) + sampleProbs.setTo(uniformProbability); + else + normalize(sampleProbs, sampleProbs, 1, 0, NORM_L1); + } + } + + void setTrainData(int startStep, const Mat& samples, + const Mat* probs0, + const Mat* means0, + const std::vector* covs0, + const Mat* weights0) + { + clear(); + + checkTrainData(startStep, samples, nclusters, covMatType, probs0, means0, covs0, weights0); + + bool isKMeansInit = (startStep == START_AUTO_STEP) || (startStep == START_E_STEP && (covs0 == 0 || weights0 == 0)); + // Set checked data + preprocessSampleData(samples, trainSamples, isKMeansInit ? CV_32FC1 : CV_64FC1, false); + + // set probs + if(probs0 && startStep == START_M_STEP) + { + preprocessSampleData(*probs0, trainProbs, CV_64FC1, true); + preprocessProbability(trainProbs); + } + + // set weights + if(weights0 && (startStep == START_E_STEP && covs0)) + { + weights0->convertTo(weights, CV_64FC1); + weights = weights.reshape(1,1); + preprocessProbability(weights); + } + + // set means + if(means0 && (startStep == START_E_STEP/* || startStep == START_AUTO_STEP*/)) + means0->convertTo(means, isKMeansInit ? CV_32FC1 : CV_64FC1); + + // set covs + if(covs0 && (startStep == START_E_STEP && weights0)) + { + covs.resize(nclusters); + for(size_t i = 0; i < covs0->size(); i++) + (*covs0)[i].convertTo(covs[i], CV_64FC1); + } + } + + void decomposeCovs() + { + CV_Assert(!covs.empty()); + covsEigenValues.resize(nclusters); + if(covMatType == COV_MAT_GENERIC) + covsRotateMats.resize(nclusters); + invCovsEigenValues.resize(nclusters); + for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++) + { + CV_Assert(!covs[clusterIndex].empty()); + + SVD svd(covs[clusterIndex], SVD::MODIFY_A + SVD::FULL_UV); + + if(covMatType == COV_MAT_SPHERICAL) + { + double maxSingularVal = svd.w.at(0); + covsEigenValues[clusterIndex] = Mat(1, 1, CV_64FC1, Scalar(maxSingularVal)); + } + else if(covMatType == COV_MAT_DIAGONAL) + { + covsEigenValues[clusterIndex] = covs[clusterIndex].diag().clone(); //Preserve the original order of eigen values. + } + else //COV_MAT_GENERIC + { + covsEigenValues[clusterIndex] = svd.w; + covsRotateMats[clusterIndex] = svd.u; + } + max(covsEigenValues[clusterIndex], minEigenValue, covsEigenValues[clusterIndex]); + invCovsEigenValues[clusterIndex] = 1./covsEigenValues[clusterIndex]; + } + } + + void clusterTrainSamples() + { + int nsamples = trainSamples.rows; + + // Cluster samples, compute/update means + + // Convert samples and means to 32F, because kmeans requires this type. + Mat trainSamplesFlt, meansFlt; + if(trainSamples.type() != CV_32FC1) + trainSamples.convertTo(trainSamplesFlt, CV_32FC1); + else + trainSamplesFlt = trainSamples; + if(!means.empty()) + { + if(means.type() != CV_32FC1) + means.convertTo(meansFlt, CV_32FC1); + else + meansFlt = means; + } + + Mat labels; + kmeans(trainSamplesFlt, nclusters, labels, + TermCriteria(TermCriteria::COUNT, means.empty() ? 10 : 1, 0.5), + 10, KMEANS_PP_CENTERS, meansFlt); + + // Convert samples and means back to 64F. + CV_Assert(meansFlt.type() == CV_32FC1); + if(trainSamples.type() != CV_64FC1) + { + Mat trainSamplesBuffer; + trainSamplesFlt.convertTo(trainSamplesBuffer, CV_64FC1); + trainSamples = trainSamplesBuffer; + } + meansFlt.convertTo(means, CV_64FC1); + + // Compute weights and covs + weights = Mat(1, nclusters, CV_64FC1, Scalar(0)); + covs.resize(nclusters); + for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++) + { + Mat clusterSamples; + for(int sampleIndex = 0; sampleIndex < nsamples; sampleIndex++) + { + if(labels.at(sampleIndex) == clusterIndex) + { + const Mat sample = trainSamples.row(sampleIndex); + clusterSamples.push_back(sample); + } + } + CV_Assert(!clusterSamples.empty()); + + calcCovarMatrix(clusterSamples, covs[clusterIndex], means.row(clusterIndex), + CV_COVAR_NORMAL + CV_COVAR_ROWS + CV_COVAR_USE_AVG + CV_COVAR_SCALE, CV_64FC1); + weights.at(clusterIndex) = static_cast(clusterSamples.rows)/static_cast(nsamples); + } + + decomposeCovs(); + } + + void computeLogWeightDivDet() + { + CV_Assert(!covsEigenValues.empty()); + + Mat logWeights; + cv::max(weights, DBL_MIN, weights); + log(weights, logWeights); + + logWeightDivDet.create(1, nclusters, CV_64FC1); + // note: logWeightDivDet = log(weight_k) - 0.5 * log(|det(cov_k)|) + + for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++) + { + double logDetCov = 0.; + const int evalCount = static_cast(covsEigenValues[clusterIndex].total()); + for(int di = 0; di < evalCount; di++) + logDetCov += std::log(covsEigenValues[clusterIndex].at(covMatType != COV_MAT_SPHERICAL ? di : 0)); + + logWeightDivDet.at(clusterIndex) = logWeights.at(clusterIndex) - 0.5 * logDetCov; + } + } + + bool doTrain(int startStep, OutputArray logLikelihoods, OutputArray labels, OutputArray probs) + { + int dim = trainSamples.cols; + // Precompute the empty initial train data in the cases of START_E_STEP and START_AUTO_STEP + if(startStep != START_M_STEP) + { + if(covs.empty()) + { + CV_Assert(weights.empty()); + clusterTrainSamples(); + } + } + + if(!covs.empty() && covsEigenValues.empty() ) + { + CV_Assert(invCovsEigenValues.empty()); + decomposeCovs(); + } + + if(startStep == START_M_STEP) + mStep(); + + double trainLogLikelihood, prevTrainLogLikelihood = 0.; + int maxIters = (termCrit.type & TermCriteria::MAX_ITER) ? + termCrit.maxCount : DEFAULT_MAX_ITERS; + double epsilon = (termCrit.type & TermCriteria::EPS) ? termCrit.epsilon : 0.; + + for(int iter = 0; ; iter++) + { + eStep(); + trainLogLikelihood = sum(trainLogLikelihoods)[0]; + + if(iter >= maxIters - 1) + break; + + double trainLogLikelihoodDelta = trainLogLikelihood - prevTrainLogLikelihood; + if( iter != 0 && + (trainLogLikelihoodDelta < -DBL_EPSILON || + trainLogLikelihoodDelta < epsilon * std::fabs(trainLogLikelihood))) + break; + + mStep(); + + prevTrainLogLikelihood = trainLogLikelihood; + } + + if( trainLogLikelihood <= -DBL_MAX/10000. ) + { + clear(); + return false; + } + + // postprocess covs + covs.resize(nclusters); + for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++) + { + if(covMatType == COV_MAT_SPHERICAL) + { + covs[clusterIndex].create(dim, dim, CV_64FC1); + setIdentity(covs[clusterIndex], Scalar(covsEigenValues[clusterIndex].at(0))); + } + else if(covMatType == COV_MAT_DIAGONAL) + { + covs[clusterIndex] = Mat::diag(covsEigenValues[clusterIndex]); + } + } + + if(labels.needed()) + trainLabels.copyTo(labels); + if(probs.needed()) + trainProbs.copyTo(probs); + if(logLikelihoods.needed()) + trainLogLikelihoods.copyTo(logLikelihoods); + + trainSamples.release(); + trainProbs.release(); + trainLabels.release(); + trainLogLikelihoods.release(); + + return true; + } + + Vec2d computeProbabilities(const Mat& sample, Mat* probs, int ptype) const + { + // L_ik = log(weight_k) - 0.5 * log(|det(cov_k)|) - 0.5 *(x_i - mean_k)' cov_k^(-1) (x_i - mean_k)] + // q = arg(max_k(L_ik)) + // probs_ik = exp(L_ik - L_iq) / (1 + sum_j!=q (exp(L_ij - L_iq)) + // see Alex Smola's blog http://blog.smola.org/page/2 for + // details on the log-sum-exp trick + + int stype = sample.type(); + CV_Assert(!means.empty()); + CV_Assert((stype == CV_32F || stype == CV_64F) && (ptype == CV_32F || ptype == CV_64F)); + CV_Assert(sample.size() == Size(means.cols, 1)); + + int dim = sample.cols; + + Mat L(1, nclusters, CV_64FC1), centeredSample(1, dim, CV_64F); + int i, label = 0; + for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++) + { + const double* mptr = means.ptr(clusterIndex); + double* dptr = centeredSample.ptr(); + if( stype == CV_32F ) + { + const float* sptr = sample.ptr(); + for( i = 0; i < dim; i++ ) + dptr[i] = sptr[i] - mptr[i]; + } + else + { + const double* sptr = sample.ptr(); + for( i = 0; i < dim; i++ ) + dptr[i] = sptr[i] - mptr[i]; + } + + Mat rotatedCenteredSample = covMatType != COV_MAT_GENERIC ? + centeredSample : centeredSample * covsRotateMats[clusterIndex]; + + double Lval = 0; + for(int di = 0; di < dim; di++) + { + double w = invCovsEigenValues[clusterIndex].at(covMatType != COV_MAT_SPHERICAL ? di : 0); + double val = rotatedCenteredSample.at(di); + Lval += w * val * val; + } + CV_DbgAssert(!logWeightDivDet.empty()); + L.at(clusterIndex) = logWeightDivDet.at(clusterIndex) - 0.5 * Lval; + + if(L.at(clusterIndex) > L.at(label)) + label = clusterIndex; + } + + double maxLVal = L.at(label); + double expDiffSum = 0; + for( i = 0; i < L.cols; i++ ) + { + double v = std::exp(L.at(i) - maxLVal); + L.at(i) = v; + expDiffSum += v; // sum_j(exp(L_ij - L_iq)) + } + + CV_Assert(expDiffSum > 0); + if(probs) + L.convertTo(*probs, ptype, 1./expDiffSum); + + Vec2d res; + res[0] = std::log(expDiffSum) + maxLVal - 0.5 * dim * CV_LOG2PI; + res[1] = label; + + return res; + } + + void eStep() + { + // Compute probs_ik from means_k, covs_k and weights_k. + trainProbs.create(trainSamples.rows, nclusters, CV_64FC1); + trainLabels.create(trainSamples.rows, 1, CV_32SC1); + trainLogLikelihoods.create(trainSamples.rows, 1, CV_64FC1); + + computeLogWeightDivDet(); + + CV_DbgAssert(trainSamples.type() == CV_64FC1); + CV_DbgAssert(means.type() == CV_64FC1); + + for(int sampleIndex = 0; sampleIndex < trainSamples.rows; sampleIndex++) + { + Mat sampleProbs = trainProbs.row(sampleIndex); + Vec2d res = computeProbabilities(trainSamples.row(sampleIndex), &sampleProbs, CV_64F); + trainLogLikelihoods.at(sampleIndex) = res[0]; + trainLabels.at(sampleIndex) = static_cast(res[1]); + } + } + + void mStep() + { + // Update means_k, covs_k and weights_k from probs_ik + int dim = trainSamples.cols; + + // Update weights + // not normalized first + reduce(trainProbs, weights, 0, REDUCE_SUM); + + // Update means + means.create(nclusters, dim, CV_64FC1); + means = Scalar(0); + + const double minPosWeight = trainSamples.rows * DBL_EPSILON; + double minWeight = DBL_MAX; + int minWeightClusterIndex = -1; + for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++) + { + if(weights.at(clusterIndex) <= minPosWeight) + continue; + + if(weights.at(clusterIndex) < minWeight) + { + minWeight = weights.at(clusterIndex); + minWeightClusterIndex = clusterIndex; + } + + Mat clusterMean = means.row(clusterIndex); + for(int sampleIndex = 0; sampleIndex < trainSamples.rows; sampleIndex++) + clusterMean += trainProbs.at(sampleIndex, clusterIndex) * trainSamples.row(sampleIndex); + clusterMean /= weights.at(clusterIndex); + } + + // Update covsEigenValues and invCovsEigenValues + covs.resize(nclusters); + covsEigenValues.resize(nclusters); + if(covMatType == COV_MAT_GENERIC) + covsRotateMats.resize(nclusters); + invCovsEigenValues.resize(nclusters); + for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++) + { + if(weights.at(clusterIndex) <= minPosWeight) + continue; + + if(covMatType != COV_MAT_SPHERICAL) + covsEigenValues[clusterIndex].create(1, dim, CV_64FC1); + else + covsEigenValues[clusterIndex].create(1, 1, CV_64FC1); + + if(covMatType == COV_MAT_GENERIC) + covs[clusterIndex].create(dim, dim, CV_64FC1); + + Mat clusterCov = covMatType != COV_MAT_GENERIC ? + covsEigenValues[clusterIndex] : covs[clusterIndex]; + + clusterCov = Scalar(0); + + Mat centeredSample; + for(int sampleIndex = 0; sampleIndex < trainSamples.rows; sampleIndex++) + { + centeredSample = trainSamples.row(sampleIndex) - means.row(clusterIndex); + + if(covMatType == COV_MAT_GENERIC) + clusterCov += trainProbs.at(sampleIndex, clusterIndex) * centeredSample.t() * centeredSample; + else + { + double p = trainProbs.at(sampleIndex, clusterIndex); + for(int di = 0; di < dim; di++ ) + { + double val = centeredSample.at(di); + clusterCov.at(covMatType != COV_MAT_SPHERICAL ? di : 0) += p*val*val; + } + } + } + + if(covMatType == COV_MAT_SPHERICAL) + clusterCov /= dim; + + clusterCov /= weights.at(clusterIndex); + + // Update covsRotateMats for COV_MAT_GENERIC only + if(covMatType == COV_MAT_GENERIC) + { + SVD svd(covs[clusterIndex], SVD::MODIFY_A + SVD::FULL_UV); + covsEigenValues[clusterIndex] = svd.w; + covsRotateMats[clusterIndex] = svd.u; + } + + max(covsEigenValues[clusterIndex], minEigenValue, covsEigenValues[clusterIndex]); + + // update invCovsEigenValues + invCovsEigenValues[clusterIndex] = 1./covsEigenValues[clusterIndex]; + } + + for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++) + { + if(weights.at(clusterIndex) <= minPosWeight) + { + Mat clusterMean = means.row(clusterIndex); + means.row(minWeightClusterIndex).copyTo(clusterMean); + covs[minWeightClusterIndex].copyTo(covs[clusterIndex]); + covsEigenValues[minWeightClusterIndex].copyTo(covsEigenValues[clusterIndex]); + if(covMatType == COV_MAT_GENERIC) + covsRotateMats[minWeightClusterIndex].copyTo(covsRotateMats[clusterIndex]); + invCovsEigenValues[minWeightClusterIndex].copyTo(invCovsEigenValues[clusterIndex]); + } + } + + // Normalize weights + weights /= trainSamples.rows; + } + + void write_params(FileStorage& fs) const + { + fs << "nclusters" << nclusters; + fs << "cov_mat_type" << (covMatType == COV_MAT_SPHERICAL ? String("spherical") : + covMatType == COV_MAT_DIAGONAL ? String("diagonal") : + covMatType == COV_MAT_GENERIC ? String("generic") : + format("unknown_%d", covMatType)); + writeTermCrit(fs, termCrit); + } + + void write(FileStorage& fs) const CV_OVERRIDE + { + writeFormat(fs); + fs << "training_params" << "{"; + write_params(fs); + fs << "}"; + fs << "weights" << weights; + fs << "means" << means; + + size_t i, n = covs.size(); + + fs << "covs" << "["; + for( i = 0; i < n; i++ ) + fs << covs[i]; + fs << "]"; + } + + void read_params(const FileNode& fn) + { + nclusters = (int)fn["nclusters"]; + String s = (String)fn["cov_mat_type"]; + covMatType = s == "spherical" ? COV_MAT_SPHERICAL : + s == "diagonal" ? COV_MAT_DIAGONAL : + s == "generic" ? COV_MAT_GENERIC : -1; + CV_Assert(covMatType >= 0); + termCrit = readTermCrit(fn); + } + + void read(const FileNode& fn) CV_OVERRIDE + { + clear(); + read_params(fn["training_params"]); + + fn["weights"] >> weights; + fn["means"] >> means; + + FileNode cfn = fn["covs"]; + FileNodeIterator cfn_it = cfn.begin(); + int i, n = (int)cfn.size(); + covs.resize(n); + + for( i = 0; i < n; i++, ++cfn_it ) + (*cfn_it) >> covs[i]; + + decomposeCovs(); + computeLogWeightDivDet(); + } + + Mat getWeights() const CV_OVERRIDE { return weights; } + Mat getMeans() const CV_OVERRIDE { return means; } + void getCovs(std::vector& _covs) const CV_OVERRIDE + { + _covs.resize(covs.size()); + std::copy(covs.begin(), covs.end(), _covs.begin()); + } + + // all inner matrices have type CV_64FC1 + Mat trainSamples; + Mat trainProbs; + Mat trainLogLikelihoods; + Mat trainLabels; + + Mat weights; + Mat means; + std::vector covs; + + std::vector covsEigenValues; + std::vector covsRotateMats; + std::vector invCovsEigenValues; + Mat logWeightDivDet; +}; + +Ptr EM::create() +{ + return makePtr(); +} + +Ptr EM::load(const String& filepath, const String& nodeName) +{ + return Algorithm::load(filepath, nodeName); +} + +} +} // namespace cv + +/* End of file. */ diff --git a/modules/ml/src/gbt.cpp b/modules/ml/src/gbt.cpp new file mode 100644 index 00000000000..57f2eb176b9 --- /dev/null +++ b/modules/ml/src/gbt.cpp @@ -0,0 +1,1373 @@ + +#include "precomp.hpp" +#include + +#if 0 + +#define pCvSeq CvSeq* +#define pCvDTreeNode CvDTreeNode* + +//=========================================================================== +//----------------------------- CvGBTreesParams ----------------------------- +//=========================================================================== + +CvGBTreesParams::CvGBTreesParams() + : CvDTreeParams( 3, 10, 0, false, 10, 0, false, false, 0 ) +{ + weak_count = 200; + loss_function_type = CvGBTrees::SQUARED_LOSS; + subsample_portion = 0.8f; + shrinkage = 0.01f; +} + +//=========================================================================== + +CvGBTreesParams::CvGBTreesParams( int _loss_function_type, int _weak_count, + float _shrinkage, float _subsample_portion, + int _max_depth, bool _use_surrogates ) + : CvDTreeParams( 3, 10, 0, false, 10, 0, false, false, 0 ) +{ + loss_function_type = _loss_function_type; + weak_count = _weak_count; + shrinkage = _shrinkage; + subsample_portion = _subsample_portion; + max_depth = _max_depth; + use_surrogates = _use_surrogates; +} + +//=========================================================================== +//------------------------------- CvGBTrees --------------------------------- +//=========================================================================== + +CvGBTrees::CvGBTrees() +{ + data = 0; + weak = 0; + default_model_name = "my_boost_tree"; + orig_response = sum_response = sum_response_tmp = 0; + subsample_train = subsample_test = 0; + missing = sample_idx = 0; + class_labels = 0; + class_count = 1; + delta = 0.0f; + + clear(); +} + +//=========================================================================== + +int CvGBTrees::get_len(const CvMat* mat) const +{ + return (mat->cols > mat->rows) ? mat->cols : mat->rows; +} + +//=========================================================================== + +void CvGBTrees::clear() +{ + if( weak ) + { + CvSeqReader reader; + CvSlice slice = CV_WHOLE_SEQ; + CvDTree* tree; + + //data->shared = false; + for (int i=0; iclear(); + delete tree; + tree = 0; + } + } + } + for (int i=0; istorage) ); + delete[] weak; + } + if (data) + { + data->shared = false; + delete data; + } + weak = 0; + data = 0; + delta = 0.0f; + cvReleaseMat( &orig_response ); + cvReleaseMat( &sum_response ); + cvReleaseMat( &sum_response_tmp ); + cvReleaseMat( &subsample_train ); + cvReleaseMat( &subsample_test ); + cvReleaseMat( &sample_idx ); + cvReleaseMat( &missing ); + cvReleaseMat( &class_labels ); +} + +//=========================================================================== + +CvGBTrees::~CvGBTrees() +{ + clear(); +} + +//=========================================================================== + +CvGBTrees::CvGBTrees( const CvMat* _train_data, int _tflag, + const CvMat* _responses, const CvMat* _var_idx, + const CvMat* _sample_idx, const CvMat* _var_type, + const CvMat* _missing_mask, CvGBTreesParams _params ) +{ + weak = 0; + data = 0; + default_model_name = "my_boost_tree"; + orig_response = sum_response = sum_response_tmp = 0; + subsample_train = subsample_test = 0; + missing = sample_idx = 0; + class_labels = 0; + class_count = 1; + delta = 0.0f; + + train( _train_data, _tflag, _responses, _var_idx, _sample_idx, + _var_type, _missing_mask, _params ); +} + +//=========================================================================== + +bool CvGBTrees::problem_type() const +{ + switch (params.loss_function_type) + { + case DEVIANCE_LOSS: return false; + default: return true; + } +} + +//=========================================================================== + +bool +CvGBTrees::train( CvMLData* _data, CvGBTreesParams _params, bool update ) +{ + bool result; + result = train ( _data->get_values(), CV_ROW_SAMPLE, + _data->get_responses(), _data->get_var_idx(), + _data->get_train_sample_idx(), _data->get_var_types(), + _data->get_missing(), _params, update); + //update is not supported + return result; +} + +//=========================================================================== + + +bool +CvGBTrees::train( const CvMat* _train_data, int _tflag, + const CvMat* _responses, const CvMat* _var_idx, + const CvMat* _sample_idx, const CvMat* _var_type, + const CvMat* _missing_mask, + CvGBTreesParams _params, bool /*_update*/ ) //update is not supported +{ + CvMemStorage* storage = 0; + + params = _params; + bool is_regression = problem_type(); + + clear(); + /* + n - count of samples + m - count of variables + */ + int n = _train_data->rows; + int m = _train_data->cols; + if (_tflag != CV_ROW_SAMPLE) + { + int tmp; + CV_SWAP(n,m,tmp); + } + + CvMat* new_responses = cvCreateMat( n, 1, CV_32F); + cvZero(new_responses); + + data = new CvDTreeTrainData( _train_data, _tflag, new_responses, _var_idx, + _sample_idx, _var_type, _missing_mask, _params, true, true ); + if (_missing_mask) + { + missing = cvCreateMat(_missing_mask->rows, _missing_mask->cols, + _missing_mask->type); + cvCopy( _missing_mask, missing); + } + + orig_response = cvCreateMat( 1, n, CV_32F ); + int step = (_responses->cols > _responses->rows) ? 1 : _responses->step / CV_ELEM_SIZE(_responses->type); + switch (CV_MAT_TYPE(_responses->type)) + { + case CV_32FC1: + { + for (int i=0; idata.fl[i] = _responses->data.fl[i*step]; + }; break; + case CV_32SC1: + { + for (int i=0; idata.fl[i] = (float) _responses->data.i[i*step]; + }; break; + default: + CV_Error(CV_StsUnmatchedFormats, "Response should be a 32fC1 or 32sC1 vector."); + } + + if (!is_regression) + { + class_count = 0; + unsigned char * mask = new unsigned char[n]; + memset(mask, 0, n); + // compute the count of different output classes + for (int i=0; idata.fl[j]) == int(orig_response->data.fl[i])) + mask[j] = 1; + } + delete[] mask; + + class_labels = cvCreateMat(1, class_count, CV_32S); + class_labels->data.i[0] = int(orig_response->data.fl[0]); + int j = 1; + for (int i=1; idata.fl[i]) - class_labels->data.i[k])) + k++; + if (k == j) + { + class_labels->data.i[k] = int(orig_response->data.fl[i]); + j++; + } + } + } + + // inside gbt learning process only regression decision trees are built + data->is_classifier = false; + + // preproccessing sample indices + if (_sample_idx) + { + int sample_idx_len = get_len(_sample_idx); + + switch (CV_MAT_TYPE(_sample_idx->type)) + { + case CV_32SC1: + { + sample_idx = cvCreateMat( 1, sample_idx_len, CV_32S ); + for (int i=0; idata.i[i] = _sample_idx->data.i[i]; + std::sort(sample_idx->data.i, sample_idx->data.i + sample_idx_len); + } break; + case CV_8S: + case CV_8U: + { + int active_samples_count = 0; + for (int i=0; idata.ptr[i] ); + sample_idx = cvCreateMat( 1, active_samples_count, CV_32S ); + active_samples_count = 0; + for (int i=0; idata.ptr[i] )) + sample_idx->data.i[active_samples_count++] = i; + + } break; + default: CV_Error(CV_StsUnmatchedFormats, "_sample_idx should be a 32sC1, 8sC1 or 8uC1 vector."); + } + } + else + { + sample_idx = cvCreateMat( 1, n, CV_32S ); + for (int i=0; idata.i[i] = i; + } + + sum_response = cvCreateMat(class_count, n, CV_32F); + sum_response_tmp = cvCreateMat(class_count, n, CV_32F); + cvZero(sum_response); + + delta = 0.0f; + /* + in the case of a regression problem the initial guess (the zero term + in the sum) is set to the mean of all the training responses, that is + the best constant model + */ + if (is_regression) base_value = find_optimal_value(sample_idx); + /* + in the case of a classification problem the initial guess (the zero term + in the sum) is set to zero for all the trees sequences + */ + else base_value = 0.0f; + /* + current predicition on all training samples is set to be + equal to the base_value + */ + cvSet( sum_response, cvScalar(base_value) ); + + weak = new pCvSeq[class_count]; + for (int i=0; itrain( data, subsample_train ); + change_values(tree, k); + + if (subsample_test) + { + CvMat x; + CvMat x_miss; + int* sample_data = sample_idx->data.i; + int* subsample_data = subsample_test->data.i; + int s_step = (sample_idx->cols > sample_idx->rows) ? 1 + : sample_idx->step/CV_ELEM_SIZE(sample_idx->type); + for (int j=0; jtrain_data, &x, idx); + else + cvGetCol( data->train_data, &x, idx); + + if (missing) + { + if (_tflag == CV_ROW_SAMPLE) + cvGetRow( missing, &x_miss, idx); + else + cvGetCol( missing, &x_miss, idx); + + res = (float)tree->predict(&x, &x_miss)->value; + } + else + { + res = (float)tree->predict(&x)->value; + } + sum_response_tmp->data.fl[idx + k*n] = + sum_response->data.fl[idx + k*n] + + params.shrinkage * res; + } + } + + cvSeqPush( weak[k], &tree ); + tree = 0; + } // k=0..class_count + CvMat* tmp; + tmp = sum_response_tmp; + sum_response_tmp = sum_response; + sum_response = tmp; + tmp = 0; + } // i=0..params.weak_count + + delete[] idx_data; + cvReleaseMat(&new_responses); + data->free_train_data(); + + return true; + +} // CvGBTrees::train(...) + +//=========================================================================== + +inline float Sign(float x) + { + if (x<0.0f) return -1.0f; + else if (x>0.0f) return 1.0f; + return 0.0f; + } + +//=========================================================================== + +void CvGBTrees::find_gradient(const int k) +{ + int* sample_data = sample_idx->data.i; + int* subsample_data = subsample_train->data.i; + float* grad_data = data->responses->data.fl; + float* resp_data = orig_response->data.fl; + float* current_data = sum_response->data.fl; + + switch (params.loss_function_type) + // loss_function_type in + // {SQUARED_LOSS, ABSOLUTE_LOSS, HUBER_LOSS, DEVIANCE_LOSS} + { + case SQUARED_LOSS: + { + for (int i=0; icols > sample_idx->rows) ? 1 + : sample_idx->step/CV_ELEM_SIZE(sample_idx->type); + int idx = *(sample_data + subsample_data[i]*s_step); + grad_data[idx] = resp_data[idx] - current_data[idx]; + } + }; break; + + case ABSOLUTE_LOSS: + { + for (int i=0; icols > sample_idx->rows) ? 1 + : sample_idx->step/CV_ELEM_SIZE(sample_idx->type); + int idx = *(sample_data + subsample_data[i]*s_step); + grad_data[idx] = Sign(resp_data[idx] - current_data[idx]); + } + }; break; + + case HUBER_LOSS: + { + float alpha = 0.2f; + int n = get_len(subsample_train); + int s_step = (sample_idx->cols > sample_idx->rows) ? 1 + : sample_idx->step/CV_ELEM_SIZE(sample_idx->type); + + float* residuals = new float[n]; + for (int i=0; i delta) ? delta*Sign(r) : r; + } + delete[] residuals; + + }; break; + + case DEVIANCE_LOSS: + { + for (int i=0; icols > sample_idx->rows) ? 1 + : sample_idx->step/CV_ELEM_SIZE(sample_idx->type); + int idx = *(sample_data + subsample_data[i]*s_step); + + for (int j=0; jcols]; + res = exp(res); + if (j == k) exp_fk = res; + exp_sfi += res; + } + int orig_label = int(resp_data[idx]); + /* + grad_data[idx] = (float)(!(k-class_labels->data.i[orig_label]+1)) - + (float)(exp_fk / exp_sfi); + */ + int ensemble_label = 0; + while (class_labels->data.i[ensemble_label] - orig_label) + ensemble_label++; + + grad_data[idx] = (float)(!(k-ensemble_label)) - + (float)(exp_fk / exp_sfi); + } + }; break; + + default: break; + } + +} // CvGBTrees::find_gradient(...) + +//=========================================================================== + +void CvGBTrees::change_values(CvDTree* tree, const int _k) +{ + CvDTreeNode** predictions = new pCvDTreeNode[get_len(subsample_train)]; + + int* sample_data = sample_idx->data.i; + int* subsample_data = subsample_train->data.i; + int s_step = (sample_idx->cols > sample_idx->rows) ? 1 + : sample_idx->step/CV_ELEM_SIZE(sample_idx->type); + + CvMat x; + CvMat miss_x; + + for (int i=0; itflag == CV_ROW_SAMPLE) + cvGetRow( data->train_data, &x, idx); + else + cvGetCol( data->train_data, &x, idx); + + if (missing) + { + if (data->tflag == CV_ROW_SAMPLE) + cvGetRow( missing, &miss_x, idx); + else + cvGetCol( missing, &miss_x, idx); + + predictions[i] = tree->predict(&x, &miss_x); + } + else + predictions[i] = tree->predict(&x); + } + + + CvDTreeNode** leaves; + int leaves_count = 0; + leaves = GetLeaves( tree, leaves_count); + + for (int i=0; ivalue = 0.0; + continue; + } + + CvMat* leaf_idx = cvCreateMat(1, samples_in_leaf, CV_32S); + int* leaf_idx_data = leaf_idx->data.i; + + for (int j=0; jvalue = value; + + leaf_idx_data = leaf_idx->data.i; + + int len = sum_response_tmp->cols; + for (int j=0; jdata.fl[idx + _k*len] = + sum_response->data.fl[idx + _k*len] + + params.shrinkage * value; + } + leaf_idx_data = 0; + cvReleaseMat(&leaf_idx); + } + + // releasing the memory + for (int i=0; icols; + CvMat leaf_idx; + leaf_idx.rows = 1; + + leaves = GetLeaves( tree, leaves_count); + + for (int i=0; isample_count; + int* leaf_idx_data = new int[n]; + data->get_sample_indices(leaves[i], leaf_idx_data); + //CvMat* leaf_idx = new CvMat(); + //cvInitMatHeader(leaf_idx, n, 1, CV_32S, leaf_idx_data); + leaf_idx.cols = n; + leaf_idx.data.i = leaf_idx_data; + + float value = find_optimal_value(&leaf_idx); + leaves[i]->value = value; + float val = params.shrinkage * value; + + + for (int j=0; jdata.fl[idx] = sum_response->data.fl[idx] + val; + } + //leaf_idx_data = 0; + //cvReleaseMat(&leaf_idx); + leaf_idx.data.i = 0; + //delete leaf_idx; + delete[] leaf_idx_data; + } + + // releasing the memory + for (int i=0; idata.i; + float* resp_data = orig_response->data.fl; + float* cur_data = sum_response->data.fl; + int n = get_len(_Idx); + + switch (params.loss_function_type) + // SQUARED_LOSS=0, ABSOLUTE_LOSS=1, HUBER_LOSS=3, DEVIANCE_LOSS=4 + { + case SQUARED_LOSS: + { + for (int i=0; i> 1; + float r_median = (n == n_half<<1) ? + (residuals[n_half-1] + residuals[n_half]) / 2.0f : + residuals[n_half]; + + for (int i=0; iresponses->data.fl; + double tmp1 = 0; + double tmp2 = 0; + double tmp = 0; + for (int i=0; ileft != NULL) leaves_get(leaves, count, node->left); + if (node->right != NULL) leaves_get(leaves, count, node->right); + if ((node->left == NULL) && (node->right == NULL)) + leaves[count++] = node; +} + +//--------------------------------------------------------------------------- + +CvDTreeNode** CvGBTrees::GetLeaves( const CvDTree* dtree, int& len ) +{ + len = 0; + CvDTreeNode** leaves = new pCvDTreeNode[(size_t)1 << params.max_depth]; + leaves_get(leaves, len, const_cast(dtree->get_root())); + return leaves; +} + +//=========================================================================== + +void CvGBTrees::do_subsample() +{ + + int n = get_len(sample_idx); + int* idx = subsample_train->data.i; + + for (int i = 0; i < n; i++ ) + idx[i] = i; + + if (subsample_test) + for (int i = 0; i < n; i++) + { + int a = (*rng)(n); + int b = (*rng)(n); + int t; + CV_SWAP( idx[a], idx[b], t ); + } + +/* + int n = get_len(sample_idx); + if (subsample_train == 0) + subsample_train = cvCreateMat(1, n, CV_32S); + int* subsample_data = subsample_train->data.i; + for (int i=0; itype) != CV_32F) + return 0.0f; + if ((k >= 0) && (krows != 1)) + return 0.0f; + if ((k == -1) && (weak_responses->rows != class_count)) + return 0.0f; + if (weak_responses->cols != weak_count) + return 0.0f; + } + + float* sum = new float[class_count]; + memset(sum, 0, class_count*sizeof(float)); + + for (int i=0; ipredict(_sample, _missing)->value); + sum[i] += params.shrinkage * p; + if (weak_responses) + weak_responses->data.fl[i*weak_count+j] = p; + } + } + } + + for (int i=0; i=0) && (k max) + { + max = sum[i]; + class_label = i; + } + + delete[] sum; + + /* + int orig_class_label = -1; + for (int i=0; idata.i[i] == class_label+1) + orig_class_label = i; + */ + int orig_class_label = class_labels->data.i[class_label]; + + return float(orig_class_label); +} + + +class Tree_predictor : public cv::ParallelLoopBody +{ +private: + pCvSeq* weak; + float* sum; + const int k; + const CvMat* sample; + const CvMat* missing; + const float shrinkage; + + static cv::Mutex SumMutex; + + +public: + Tree_predictor() : weak(0), sum(0), k(0), sample(0), missing(0), shrinkage(1.0f) {} + Tree_predictor(pCvSeq* _weak, const int _k, const float _shrinkage, + const CvMat* _sample, const CvMat* _missing, float* _sum ) : + weak(_weak), sum(_sum), k(_k), sample(_sample), + missing(_missing), shrinkage(_shrinkage) + {} + + Tree_predictor( const Tree_predictor& p, cv::Split ) : + weak(p.weak), sum(p.sum), k(p.k), sample(p.sample), + missing(p.missing), shrinkage(p.shrinkage) + {} + + Tree_predictor& operator=( const Tree_predictor& ) + { return *this; } + + virtual void operator()(const cv::Range& range) const + { + CvSeqReader reader; + int begin = range.start; + int end = range.end; + + int weak_count = end - begin; + CvDTree* tree; + + for (int i=0; ipredict(sample, missing)->value); + } + } + + { + cv::AutoLock lock(SumMutex); + sum[i] += tmp_sum; + } + } + } // Tree_predictor::operator() + + virtual ~Tree_predictor() {} + +}; // class Tree_predictor + +cv::Mutex Tree_predictor::SumMutex; + + +float CvGBTrees::predict( const CvMat* _sample, const CvMat* _missing, + CvMat* /*weak_responses*/, CvSlice slice, int k) const + { + float result = 0.0f; + if (!weak) return 0.0f; + float* sum = new float[class_count]; + for (int i=0; i=0) && (k max) + { + max = sum[i]; + class_label = i; + } + + delete[] sum; + int orig_class_label = class_labels->data.i[class_label]; + + return float(orig_class_label); + } + + +//=========================================================================== + +void CvGBTrees::write_params( CvFileStorage* fs ) const +{ + const char* loss_function_type_str = + params.loss_function_type == SQUARED_LOSS ? "SquaredLoss" : + params.loss_function_type == ABSOLUTE_LOSS ? "AbsoluteLoss" : + params.loss_function_type == HUBER_LOSS ? "HuberLoss" : + params.loss_function_type == DEVIANCE_LOSS ? "DevianceLoss" : 0; + + + if( loss_function_type_str ) + cvWriteString( fs, "loss_function", loss_function_type_str ); + else + cvWriteInt( fs, "loss_function", params.loss_function_type ); + + cvWriteInt( fs, "ensemble_length", params.weak_count ); + cvWriteReal( fs, "shrinkage", params.shrinkage ); + cvWriteReal( fs, "subsample_portion", params.subsample_portion ); + //cvWriteInt( fs, "max_tree_depth", params.max_depth ); + //cvWriteString( fs, "use_surrogate_splits", params.use_surrogates ? "true" : "false"); + if (class_labels) cvWrite( fs, "class_labels", class_labels); + + data->is_classifier = !problem_type(); + data->write_params( fs ); + data->is_classifier = 0; +} + + +//=========================================================================== + +void CvGBTrees::read_params( CvFileStorage* fs, CvFileNode* fnode ) +{ + CV_FUNCNAME( "CvGBTrees::read_params" ); + __BEGIN__; + + + CvFileNode* temp; + + if( !fnode || !CV_NODE_IS_MAP(fnode->tag) ) + return; + + data = new CvDTreeTrainData(); + CV_CALL( data->read_params(fs, fnode)); + data->shared = true; + + params.max_depth = data->params.max_depth; + params.min_sample_count = data->params.min_sample_count; + params.max_categories = data->params.max_categories; + params.priors = data->params.priors; + params.regression_accuracy = data->params.regression_accuracy; + params.use_surrogates = data->params.use_surrogates; + + temp = cvGetFileNodeByName( fs, fnode, "loss_function" ); + if( !temp ) + EXIT; + + if( temp && CV_NODE_IS_STRING(temp->tag) ) + { + const char* loss_function_type_str = cvReadString( temp, "" ); + params.loss_function_type = strcmp( loss_function_type_str, "SquaredLoss" ) == 0 ? SQUARED_LOSS : + strcmp( loss_function_type_str, "AbsoluteLoss" ) == 0 ? ABSOLUTE_LOSS : + strcmp( loss_function_type_str, "HuberLoss" ) == 0 ? HUBER_LOSS : + strcmp( loss_function_type_str, "DevianceLoss" ) == 0 ? DEVIANCE_LOSS : -1; + } + else + params.loss_function_type = cvReadInt( temp, -1 ); + + + if( params.loss_function_type < SQUARED_LOSS || params.loss_function_type > DEVIANCE_LOSS || params.loss_function_type == 2) + CV_ERROR( CV_StsBadArg, "Unknown loss function" ); + + params.weak_count = cvReadIntByName( fs, fnode, "ensemble_length" ); + params.shrinkage = (float)cvReadRealByName( fs, fnode, "shrinkage", 0.1 ); + params.subsample_portion = (float)cvReadRealByName( fs, fnode, "subsample_portion", 1.0 ); + + if (data->is_classifier) + { + class_labels = (CvMat*)cvReadByName( fs, fnode, "class_labels" ); + if( class_labels && !CV_IS_MAT(class_labels)) + CV_ERROR( CV_StsParseError, "class_labels must stored as a matrix"); + } + data->is_classifier = 0; + + __END__; +} + + + + +void CvGBTrees::write( CvFileStorage* fs, const char* name ) const +{ + CV_FUNCNAME( "CvGBTrees::write" ); + + __BEGIN__; + + CvSeqReader reader; + int i; + cv::String s; + + cvStartWriteStruct( fs, name, CV_NODE_MAP, CV_TYPE_NAME_ML_GBT ); + + if( !weak ) + CV_ERROR( CV_StsBadArg, "The model has not been trained yet" ); + + write_params( fs ); + cvWriteReal( fs, "base_value", base_value); + cvWriteInt( fs, "class_count", class_count); + + for ( int j=0; j < class_count; ++j ) + { + s = cv::format("trees_%d", j); + cvStartWriteStruct( fs, s.c_str(), CV_NODE_SEQ ); + + cvStartReadSeq( weak[j], &reader ); + + for( i = 0; i < weak[j]->total; i++ ) + { + CvDTree* tree; + CV_READ_SEQ_ELEM( tree, reader ); + cvStartWriteStruct( fs, 0, CV_NODE_MAP ); + tree->write( fs ); + cvEndWriteStruct( fs ); + } + + cvEndWriteStruct( fs ); + } + + cvEndWriteStruct( fs ); + + __END__; +} + + +//=========================================================================== + + +void CvGBTrees::read( CvFileStorage* fs, CvFileNode* node ) +{ + + CV_FUNCNAME( "CvGBTrees::read" ); + + __BEGIN__; + + CvSeqReader reader; + CvFileNode* trees_fnode; + CvMemStorage* storage; + int i, ntrees; + cv::String s; + + clear(); + read_params( fs, node ); + + if( !data ) + EXIT; + + base_value = (float)cvReadRealByName( fs, node, "base_value", 0.0 ); + class_count = cvReadIntByName( fs, node, "class_count", 1 ); + + weak = new pCvSeq[class_count]; + + + for (int j=0; jtag) ) + CV_ERROR( CV_StsParseError, " tag is missing" ); + + cvStartReadSeq( trees_fnode->data.seq, &reader ); + ntrees = trees_fnode->data.seq->total; + + if( ntrees != params.weak_count ) + CV_ERROR( CV_StsUnmatchedSizes, + "The number of trees stored does not match tag value" ); + + CV_CALL( storage = cvCreateMemStorage() ); + weak[j] = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvDTree*), storage ); + + for( i = 0; i < ntrees; i++ ) + { + CvDTree* tree = new CvDTree(); + CV_CALL(tree->read( fs, (CvFileNode*)reader.ptr, data )); + CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader ); + cvSeqPush( weak[j], &tree ); + } + } + + __END__; +} + +//=========================================================================== + +class Sample_predictor : public cv::ParallelLoopBody +{ +private: + const CvGBTrees* gbt; + float* predictions; + const CvMat* samples; + const CvMat* missing; + const CvMat* idx; + CvSlice slice; + +public: + Sample_predictor() : gbt(0), predictions(0), samples(0), missing(0), + idx(0), slice(CV_WHOLE_SEQ) + {} + + Sample_predictor(const CvGBTrees* _gbt, float* _predictions, + const CvMat* _samples, const CvMat* _missing, + const CvMat* _idx, CvSlice _slice=CV_WHOLE_SEQ) : + gbt(_gbt), predictions(_predictions), samples(_samples), + missing(_missing), idx(_idx), slice(_slice) + {} + + + Sample_predictor( const Sample_predictor& p, cv::Split ) : + gbt(p.gbt), predictions(p.predictions), + samples(p.samples), missing(p.missing), idx(p.idx), + slice(p.slice) + {} + + + virtual void operator()(const cv::Range& range) const + { + int begin = range.start; + int end = range.end; + + CvMat x; + CvMat miss; + + for (int i=begin; idata.i[i] : i; + cvGetRow(samples, &x, j); + if (!missing) + { + predictions[i] = gbt->predict_serial(&x,0,0,slice); + } + else + { + cvGetRow(missing, &miss, j); + predictions[i] = gbt->predict_serial(&x,&miss,0,slice); + } + } + } // Sample_predictor::operator() + + virtual ~Sample_predictor() {} + +}; // class Sample_predictor + + + +// type in {CV_TRAIN_ERROR, CV_TEST_ERROR} +float +CvGBTrees::calc_error( CvMLData* _data, int type, std::vector *resp ) +{ + + float err = 0.0f; + const CvMat* _sample_idx = (type == CV_TRAIN_ERROR) ? + _data->get_train_sample_idx() : + _data->get_test_sample_idx(); + const CvMat* response = _data->get_responses(); + + int n = _sample_idx ? get_len(_sample_idx) : 0; + n = (type == CV_TRAIN_ERROR && n == 0) ? _data->get_values()->rows : n; + + if (!n) + return -FLT_MAX; + + float* pred_resp = 0; + bool needsFreeing = false; + + if (resp) + { + resp->resize(n); + pred_resp = &((*resp)[0]); + } + else + { + pred_resp = new float[n]; + needsFreeing = true; + } + + Sample_predictor predictor = Sample_predictor(this, pred_resp, _data->get_values(), + _data->get_missing(), _sample_idx); + + cv::parallel_for_(cv::Range(0,n), predictor); + + int* sidx = _sample_idx ? _sample_idx->data.i : 0; + int r_step = CV_IS_MAT_CONT(response->type) ? + 1 : response->step / CV_ELEM_SIZE(response->type); + + + if ( !problem_type() ) + { + for( int i = 0; i < n; i++ ) + { + int si = sidx ? sidx[i] : i; + int d = fabs((double)pred_resp[i] - response->data.fl[si*r_step]) <= FLT_EPSILON ? 0 : 1; + err += d; + } + err = err / (float)n * 100.0f; + } + else + { + for( int i = 0; i < n; i++ ) + { + int si = sidx ? sidx[i] : i; + float d = pred_resp[i] - response->data.fl[si*r_step]; + err += d*d; + } + err = err / (float)n; + } + + if (needsFreeing) + delete[]pred_resp; + + return err; +} + + +CvGBTrees::CvGBTrees( const cv::Mat& trainData, int tflag, + const cv::Mat& responses, const cv::Mat& varIdx, + const cv::Mat& sampleIdx, const cv::Mat& varType, + const cv::Mat& missingDataMask, + CvGBTreesParams _params ) +{ + data = 0; + weak = 0; + default_model_name = "my_boost_tree"; + orig_response = sum_response = sum_response_tmp = 0; + subsample_train = subsample_test = 0; + missing = sample_idx = 0; + class_labels = 0; + class_count = 1; + delta = 0.0f; + + clear(); + + train(trainData, tflag, responses, varIdx, sampleIdx, varType, missingDataMask, _params, false); +} + +bool CvGBTrees::train( const cv::Mat& trainData, int tflag, + const cv::Mat& responses, const cv::Mat& varIdx, + const cv::Mat& sampleIdx, const cv::Mat& varType, + const cv::Mat& missingDataMask, + CvGBTreesParams _params, + bool update ) +{ + CvMat _trainData = trainData, _responses = responses; + CvMat _varIdx = varIdx, _sampleIdx = sampleIdx, _varType = varType; + CvMat _missingDataMask = missingDataMask; + + return train( &_trainData, tflag, &_responses, varIdx.empty() ? 0 : &_varIdx, + sampleIdx.empty() ? 0 : &_sampleIdx, varType.empty() ? 0 : &_varType, + missingDataMask.empty() ? 0 : &_missingDataMask, _params, update); +} + +float CvGBTrees::predict( const cv::Mat& sample, const cv::Mat& _missing, + const cv::Range& slice, int k ) const +{ + CvMat _sample = sample, miss = _missing; + return predict(&_sample, _missing.empty() ? 0 : &miss, 0, + slice==cv::Range::all() ? CV_WHOLE_SEQ : cvSlice(slice.start, slice.end), k); +} + +#endif diff --git a/modules/ml/src/inner_functions.cpp b/modules/ml/src/inner_functions.cpp new file mode 100644 index 00000000000..6b3affcebc8 --- /dev/null +++ b/modules/ml/src/inner_functions.cpp @@ -0,0 +1,222 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +namespace cv { namespace ml { + +ParamGrid::ParamGrid() { minVal = maxVal = 0.; logStep = 1; } +ParamGrid::ParamGrid(double _minVal, double _maxVal, double _logStep) +{ + CV_TRACE_FUNCTION(); + minVal = std::min(_minVal, _maxVal); + maxVal = std::max(_minVal, _maxVal); + logStep = std::max(_logStep, 1.); +} + +Ptr ParamGrid::create(double minval, double maxval, double logstep) { + return makePtr(minval, maxval, logstep); +} + +bool StatModel::empty() const { return !isTrained(); } + +int StatModel::getVarCount() const { return 0; } + +bool StatModel::train(const Ptr& trainData, int ) +{ + CV_TRACE_FUNCTION(); + CV_Assert(!trainData.empty()); + CV_Error(CV_StsNotImplemented, ""); + return false; +} + +bool StatModel::train( InputArray samples, int layout, InputArray responses ) +{ + CV_TRACE_FUNCTION(); + CV_Assert(!samples.empty()); + return train(TrainData::create(samples, layout, responses)); +} + +class ParallelCalcError : public ParallelLoopBody +{ +private: + const Ptr& data; + bool &testerr; + Mat &resp; + const StatModel &s; + vector &errStrip; +public: + ParallelCalcError(const Ptr& d, bool &t, Mat &_r,const StatModel &w, vector &e) : + data(d), + testerr(t), + resp(_r), + s(w), + errStrip(e) + { + } + virtual void operator()(const Range& range) const CV_OVERRIDE + { + int idxErr = range.start; + CV_TRACE_FUNCTION_SKIP_NESTED(); + Mat samples = data->getSamples(); + Mat weights=testerr? data->getTestSampleWeights() : data->getTrainSampleWeights(); + int layout = data->getLayout(); + Mat sidx = testerr ? data->getTestSampleIdx() : data->getTrainSampleIdx(); + const int* sidx_ptr = sidx.ptr(); + bool isclassifier = s.isClassifier(); + Mat responses = data->getResponses(); + int responses_type = responses.type(); + double err = 0; + + + const float* sw = weights.empty() ? 0 : weights.ptr(); + for (int i = range.start; i < range.end; i++) + { + int si = sidx_ptr ? sidx_ptr[i] : i; + double sweight = sw ? static_cast(sw[i]) : 1.; + Mat sample = layout == ROW_SAMPLE ? samples.row(si) : samples.col(si); + float val = s.predict(sample); + float val0 = (responses_type == CV_32S) ? (float)responses.at(si) : responses.at(si); + + if (isclassifier) + err += sweight * fabs(val - val0) > FLT_EPSILON; + else + err += sweight * (val - val0)*(val - val0); + if (!resp.empty()) + resp.at(i) = val; + } + + + errStrip[idxErr]=err ; + + } + ParallelCalcError& operator=(const ParallelCalcError &) { + return *this; + } +}; + + +float StatModel::calcError(const Ptr& data, bool testerr, OutputArray _resp) const +{ + CV_TRACE_FUNCTION_SKIP_NESTED(); + CV_Assert(!data.empty()); + Mat samples = data->getSamples(); + Mat sidx = testerr ? data->getTestSampleIdx() : data->getTrainSampleIdx(); + Mat weights = testerr ? data->getTestSampleWeights() : data->getTrainSampleWeights(); + int n = (int)sidx.total(); + bool isclassifier = isClassifier(); + Mat responses = data->getResponses(); + + if (n == 0) + { + n = data->getNSamples(); + weights = data->getTrainSampleWeights(); + testerr =false; + } + + if (n == 0) + return -FLT_MAX; + + Mat resp; + if (_resp.needed()) + resp.create(n, 1, CV_32F); + + double err = 0; + vector errStrip(n,0.0); + ParallelCalcError x(data, testerr, resp, *this,errStrip); + + parallel_for_(Range(0,n),x); + + for (size_t i = 0; i < errStrip.size(); i++) + err += errStrip[i]; + float weightSum= weights.empty() ? n: static_cast(sum(weights)(0)); + if (_resp.needed()) + resp.copyTo(_resp); + + return (float)(err/ weightSum * (isclassifier ? 100 : 1)); +} + +/* Calculates upper triangular matrix S, where A is a symmetrical matrix A=S'*S */ +static void Cholesky( const Mat& A, Mat& S ) +{ + CV_TRACE_FUNCTION(); + CV_Assert(A.type() == CV_32F); + + S = A.clone(); + cv::Cholesky ((float*)S.ptr(),S.step, S.rows,NULL, 0, 0); + S = S.t(); + for (int i=1;i(i,j)=0; +} + +/* Generates from multivariate normal distribution, where - is an + average row vector, - symmetric covariation matrix */ +void randMVNormal( InputArray _mean, InputArray _cov, int nsamples, OutputArray _samples ) +{ + CV_TRACE_FUNCTION(); + // check mean vector and covariance matrix + Mat mean = _mean.getMat(), cov = _cov.getMat(); + int dim = (int)mean.total(); // dimensionality + CV_Assert(mean.rows == 1 || mean.cols == 1); + CV_Assert(cov.rows == dim && cov.cols == dim); + mean = mean.reshape(1,1); // ensure a row vector + + // generate n-samples of the same dimension, from ~N(0,1) + _samples.create(nsamples, dim, CV_32F); + Mat samples = _samples.getMat(); + randn(samples, Scalar::all(0), Scalar::all(1)); + + // decompose covariance using Cholesky: cov = U'*U + // (cov must be square, symmetric, and positive semi-definite matrix) + Mat utmat; + Cholesky(cov, utmat); + + // transform random numbers using specified mean and covariance + for( int i = 0; i < nsamples; i++ ) + { + Mat sample = samples.row(i); + sample = sample * utmat + mean; + } +} + +}} + +/* End of file */ diff --git a/modules/ml/src/kdtree.cpp b/modules/ml/src/kdtree.cpp new file mode 100644 index 00000000000..8cdab98f735 --- /dev/null +++ b/modules/ml/src/kdtree.cpp @@ -0,0 +1,530 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Copyright (C) 2014, Itseez Inc, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" +#include "kdtree.hpp" + +namespace cv +{ +namespace ml +{ +// This is reimplementation of kd-trees from cvkdtree*.* by Xavier Delacour, cleaned-up and +// adopted to work with the new OpenCV data structures. + +// The algorithm is taken from: +// J.S. Beis and D.G. Lowe. Shape indexing using approximate nearest-neighbor search +// in highdimensional spaces. In Proc. IEEE Conf. Comp. Vision Patt. Recog., +// pages 1000--1006, 1997. http://citeseer.ist.psu.edu/beis97shape.html + +const int MAX_TREE_DEPTH = 32; + +KDTree::KDTree() +{ + maxDepth = -1; + normType = NORM_L2; +} + +KDTree::KDTree(InputArray _points, bool _copyData) +{ + maxDepth = -1; + normType = NORM_L2; + build(_points, _copyData); +} + +KDTree::KDTree(InputArray _points, InputArray _labels, bool _copyData) +{ + maxDepth = -1; + normType = NORM_L2; + build(_points, _labels, _copyData); +} + +struct SubTree +{ + SubTree() : first(0), last(0), nodeIdx(0), depth(0) {} + SubTree(int _first, int _last, int _nodeIdx, int _depth) + : first(_first), last(_last), nodeIdx(_nodeIdx), depth(_depth) {} + int first; + int last; + int nodeIdx; + int depth; +}; + + +static float +medianPartition( size_t* ofs, int a, int b, const float* vals ) +{ + int k, a0 = a, b0 = b; + int middle = (a + b)/2; + while( b > a ) + { + int i0 = a, i1 = (a+b)/2, i2 = b; + float v0 = vals[ofs[i0]], v1 = vals[ofs[i1]], v2 = vals[ofs[i2]]; + int ip = v0 < v1 ? (v1 < v2 ? i1 : v0 < v2 ? i2 : i0) : + v0 < v2 ? (v1 == v0 ? i2 : i0): (v1 < v2 ? i2 : i1); + float pivot = vals[ofs[ip]]; + std::swap(ofs[ip], ofs[i2]); + + for( i1 = i0, i0--; i1 <= i2; i1++ ) + if( vals[ofs[i1]] <= pivot ) + { + i0++; + std::swap(ofs[i0], ofs[i1]); + } + if( i0 == middle ) + break; + if( i0 > middle ) + b = i0 - (b == i0); + else + a = i0; + } + + float pivot = vals[ofs[middle]]; + for( k = a0; k < middle; k++ ) + { + CV_Assert(vals[ofs[k]] <= pivot); + } + for( k = b0; k > middle; k-- ) + { + CV_Assert(vals[ofs[k]] >= pivot); + } + + return vals[ofs[middle]]; +} + +static void +computeSums( const Mat& points, const size_t* ofs, int a, int b, double* sums ) +{ + int i, j, dims = points.cols; + const float* data = points.ptr(0); + for( j = 0; j < dims; j++ ) + sums[j*2] = sums[j*2+1] = 0; + for( i = a; i <= b; i++ ) + { + const float* row = data + ofs[i]; + for( j = 0; j < dims; j++ ) + { + double t = row[j], s = sums[j*2] + t, s2 = sums[j*2+1] + t*t; + sums[j*2] = s; sums[j*2+1] = s2; + } + } +} + + +void KDTree::build(InputArray _points, bool _copyData) +{ + build(_points, noArray(), _copyData); +} + + +void KDTree::build(InputArray __points, InputArray __labels, bool _copyData) +{ + Mat _points = __points.getMat(), _labels = __labels.getMat(); + CV_Assert(_points.type() == CV_32F && !_points.empty()); + std::vector().swap(nodes); + + if( !_copyData ) + points = _points; + else + { + points.release(); + points.create(_points.size(), _points.type()); + } + + int i, j, n = _points.rows, ptdims = _points.cols, top = 0; + const float* data = _points.ptr(0); + float* dstdata = points.ptr(0); + size_t step = _points.step1(); + size_t dstep = points.step1(); + int ptpos = 0; + labels.resize(n); + const int* _labels_data = 0; + + if( !_labels.empty() ) + { + int nlabels = _labels.checkVector(1, CV_32S, true); + CV_Assert(nlabels == n); + _labels_data = _labels.ptr(); + } + + Mat sumstack(MAX_TREE_DEPTH*2, ptdims*2, CV_64F); + SubTree stack[MAX_TREE_DEPTH*2]; + + std::vector _ptofs(n); + size_t* ptofs = &_ptofs[0]; + + for( i = 0; i < n; i++ ) + ptofs[i] = i*step; + + nodes.push_back(Node()); + computeSums(points, ptofs, 0, n-1, sumstack.ptr(top)); + stack[top++] = SubTree(0, n-1, 0, 0); + int _maxDepth = 0; + + while( --top >= 0 ) + { + int first = stack[top].first, last = stack[top].last; + int depth = stack[top].depth, nidx = stack[top].nodeIdx; + int count = last - first + 1, dim = -1; + const double* sums = sumstack.ptr(top); + double invCount = 1./count, maxVar = -1.; + + if( count == 1 ) + { + int idx0 = (int)(ptofs[first]/step); + int idx = _copyData ? ptpos++ : idx0; + nodes[nidx].idx = ~idx; + if( _copyData ) + { + const float* src = data + ptofs[first]; + float* dst = dstdata + idx*dstep; + for( j = 0; j < ptdims; j++ ) + dst[j] = src[j]; + } + labels[idx] = _labels_data ? _labels_data[idx0] : idx0; + _maxDepth = std::max(_maxDepth, depth); + continue; + } + + // find the dimensionality with the biggest variance + for( j = 0; j < ptdims; j++ ) + { + double m = sums[j*2]*invCount; + double varj = sums[j*2+1]*invCount - m*m; + if( maxVar < varj ) + { + maxVar = varj; + dim = j; + } + } + + int left = (int)nodes.size(), right = left + 1; + nodes.push_back(Node()); + nodes.push_back(Node()); + nodes[nidx].idx = dim; + nodes[nidx].left = left; + nodes[nidx].right = right; + nodes[nidx].boundary = medianPartition(ptofs, first, last, data + dim); + + int middle = (first + last)/2; + double *lsums = (double*)sums, *rsums = lsums + ptdims*2; + computeSums(points, ptofs, middle+1, last, rsums); + for( j = 0; j < ptdims*2; j++ ) + lsums[j] = sums[j] - rsums[j]; + stack[top++] = SubTree(first, middle, left, depth+1); + stack[top++] = SubTree(middle+1, last, right, depth+1); + } + maxDepth = _maxDepth; +} + + +struct PQueueElem +{ + PQueueElem() : dist(0), idx(0) {} + PQueueElem(float _dist, int _idx) : dist(_dist), idx(_idx) {} + float dist; + int idx; +}; + + +int KDTree::findNearest(InputArray _vec, int K, int emax, + OutputArray _neighborsIdx, OutputArray _neighbors, + OutputArray _dist, OutputArray _labels) const + +{ + Mat vecmat = _vec.getMat(); + CV_Assert( vecmat.isContinuous() && vecmat.type() == CV_32F && vecmat.total() == (size_t)points.cols ); + const float* vec = vecmat.ptr(); + K = std::min(K, points.rows); + int ptdims = points.cols; + + CV_Assert(K > 0 && (normType == NORM_L2 || normType == NORM_L1)); + + AutoBuffer _buf((K+1)*(sizeof(float) + sizeof(int))); + int* idx = (int*)_buf.data(); + float* dist = (float*)(idx + K + 1); + int i, j, ncount = 0, e = 0; + + int qsize = 0, maxqsize = 1 << 10; + AutoBuffer _pqueue(maxqsize*sizeof(PQueueElem)); + PQueueElem* pqueue = (PQueueElem*)_pqueue.data(); + emax = std::max(emax, 1); + + for( e = 0; e < emax; ) + { + float d, alt_d = 0.f; + int nidx; + + if( e == 0 ) + nidx = 0; + else + { + // take the next node from the priority queue + if( qsize == 0 ) + break; + nidx = pqueue[0].idx; + alt_d = pqueue[0].dist; + if( --qsize > 0 ) + { + std::swap(pqueue[0], pqueue[qsize]); + d = pqueue[0].dist; + for( i = 0;;) + { + int left = i*2 + 1, right = i*2 + 2; + if( left >= qsize ) + break; + if( right < qsize && pqueue[right].dist < pqueue[left].dist ) + left = right; + if( pqueue[left].dist >= d ) + break; + std::swap(pqueue[i], pqueue[left]); + i = left; + } + } + + if( ncount == K && alt_d > dist[ncount-1] ) + continue; + } + + for(;;) + { + if( nidx < 0 ) + break; + const Node& n = nodes[nidx]; + + if( n.idx < 0 ) + { + i = ~n.idx; + const float* row = points.ptr(i); + if( normType == NORM_L2 ) + for( j = 0, d = 0.f; j < ptdims; j++ ) + { + float t = vec[j] - row[j]; + d += t*t; + } + else + for( j = 0, d = 0.f; j < ptdims; j++ ) + d += std::abs(vec[j] - row[j]); + + dist[ncount] = d; + idx[ncount] = i; + for( i = ncount-1; i >= 0; i-- ) + { + if( dist[i] <= d ) + break; + std::swap(dist[i], dist[i+1]); + std::swap(idx[i], idx[i+1]); + } + ncount += ncount < K; + e++; + break; + } + + int alt; + if( vec[n.idx] <= n.boundary ) + { + nidx = n.left; + alt = n.right; + } + else + { + nidx = n.right; + alt = n.left; + } + + d = vec[n.idx] - n.boundary; + if( normType == NORM_L2 ) + d = d*d + alt_d; + else + d = std::abs(d) + alt_d; + // subtree prunning + if( ncount == K && d > dist[ncount-1] ) + continue; + // add alternative subtree to the priority queue + pqueue[qsize] = PQueueElem(d, alt); + for( i = qsize; i > 0; ) + { + int parent = (i-1)/2; + if( parent < 0 || pqueue[parent].dist <= d ) + break; + std::swap(pqueue[i], pqueue[parent]); + i = parent; + } + qsize += qsize+1 < maxqsize; + } + } + + K = std::min(K, ncount); + if( _neighborsIdx.needed() ) + { + _neighborsIdx.create(K, 1, CV_32S, -1, true); + Mat nidx = _neighborsIdx.getMat(); + Mat(nidx.size(), CV_32S, &idx[0]).copyTo(nidx); + } + if( _dist.needed() ) + sqrt(Mat(K, 1, CV_32F, dist), _dist); + + if( _neighbors.needed() || _labels.needed() ) + getPoints(Mat(K, 1, CV_32S, idx), _neighbors, _labels); + return K; +} + + +void KDTree::findOrthoRange(InputArray _lowerBound, + InputArray _upperBound, + OutputArray _neighborsIdx, + OutputArray _neighbors, + OutputArray _labels ) const +{ + int ptdims = points.cols; + Mat lowerBound = _lowerBound.getMat(), upperBound = _upperBound.getMat(); + CV_Assert( lowerBound.size == upperBound.size && + lowerBound.isContinuous() && + upperBound.isContinuous() && + lowerBound.type() == upperBound.type() && + lowerBound.type() == CV_32F && + lowerBound.total() == (size_t)ptdims ); + const float* L = lowerBound.ptr(); + const float* R = upperBound.ptr(); + + std::vector idx; + AutoBuffer _stack(MAX_TREE_DEPTH*2 + 1); + int* stack = _stack.data(); + int top = 0; + + stack[top++] = 0; + + while( --top >= 0 ) + { + int nidx = stack[top]; + if( nidx < 0 ) + break; + const Node& n = nodes[nidx]; + if( n.idx < 0 ) + { + int j, i = ~n.idx; + const float* row = points.ptr(i); + for( j = 0; j < ptdims; j++ ) + if( row[j] < L[j] || row[j] >= R[j] ) + break; + if( j == ptdims ) + idx.push_back(i); + continue; + } + if( L[n.idx] <= n.boundary ) + stack[top++] = n.left; + if( R[n.idx] > n.boundary ) + stack[top++] = n.right; + } + + if( _neighborsIdx.needed() ) + { + _neighborsIdx.create((int)idx.size(), 1, CV_32S, -1, true); + Mat nidx = _neighborsIdx.getMat(); + Mat(nidx.size(), CV_32S, &idx[0]).copyTo(nidx); + } + getPoints( idx, _neighbors, _labels ); +} + + +void KDTree::getPoints(InputArray _idx, OutputArray _pts, OutputArray _labels) const +{ + Mat idxmat = _idx.getMat(), pts, labelsmat; + CV_Assert( idxmat.isContinuous() && idxmat.type() == CV_32S && + (idxmat.cols == 1 || idxmat.rows == 1) ); + const int* idx = idxmat.ptr(); + int* dstlabels = 0; + + int ptdims = points.cols; + int i, nidx = (int)idxmat.total(); + if( nidx == 0 ) + { + _pts.release(); + _labels.release(); + return; + } + + if( _pts.needed() ) + { + _pts.create( nidx, ptdims, points.type()); + pts = _pts.getMat(); + } + + if(_labels.needed()) + { + _labels.create(nidx, 1, CV_32S, -1, true); + labelsmat = _labels.getMat(); + CV_Assert( labelsmat.isContinuous() ); + dstlabels = labelsmat.ptr(); + } + const int* srclabels = !labels.empty() ? &labels[0] : 0; + + for( i = 0; i < nidx; i++ ) + { + int k = idx[i]; + CV_Assert( (unsigned)k < (unsigned)points.rows ); + const float* src = points.ptr(k); + if( !pts.empty() ) + std::copy(src, src + ptdims, pts.ptr(i)); + if( dstlabels ) + dstlabels[i] = srclabels ? srclabels[k] : k; + } +} + + +const float* KDTree::getPoint(int ptidx, int* label) const +{ + CV_Assert( (unsigned)ptidx < (unsigned)points.rows); + if(label) + *label = labels[ptidx]; + return points.ptr(ptidx); +} + + +int KDTree::dims() const +{ + return !points.empty() ? points.cols : 0; +} + +} +} diff --git a/modules/ml/src/kdtree.hpp b/modules/ml/src/kdtree.hpp new file mode 100644 index 00000000000..2975c7c75f7 --- /dev/null +++ b/modules/ml/src/kdtree.hpp @@ -0,0 +1,97 @@ +#ifndef KDTREE_H +#define KDTREE_H + +#include "precomp.hpp" + +namespace cv +{ +namespace ml +{ + +/*! + Fast Nearest Neighbor Search Class. + + The class implements D. Lowe BBF (Best-Bin-First) algorithm for the last + approximate (or accurate) nearest neighbor search in multi-dimensional spaces. + + First, a set of vectors is passed to KDTree::KDTree() constructor + or KDTree::build() method, where it is reordered. + + Then arbitrary vectors can be passed to KDTree::findNearest() methods, which + find the K nearest neighbors among the vectors from the initial set. + The user can balance between the speed and accuracy of the search by varying Emax + parameter, which is the number of leaves that the algorithm checks. + The larger parameter values yield more accurate results at the expense of lower processing speed. + + \code + KDTree T(points, false); + const int K = 3, Emax = INT_MAX; + int idx[K]; + float dist[K]; + T.findNearest(query_vec, K, Emax, idx, 0, dist); + CV_Assert(dist[0] <= dist[1] && dist[1] <= dist[2]); + \endcode +*/ +class CV_EXPORTS_W KDTree +{ +public: + /*! + The node of the search tree. + */ + struct Node + { + Node() : idx(-1), left(-1), right(-1), boundary(0.f) {} + Node(int _idx, int _left, int _right, float _boundary) + : idx(_idx), left(_left), right(_right), boundary(_boundary) {} + + //! split dimension; >=0 for nodes (dim), < 0 for leaves (index of the point) + int idx; + //! node indices of the left and the right branches + int left, right; + //! go to the left if query_vec[node.idx]<=node.boundary, otherwise go to the right + float boundary; + }; + + //! the default constructor + CV_WRAP KDTree(); + //! the full constructor that builds the search tree + CV_WRAP KDTree(InputArray points, bool copyAndReorderPoints = false); + //! the full constructor that builds the search tree + CV_WRAP KDTree(InputArray points, InputArray _labels, + bool copyAndReorderPoints = false); + //! builds the search tree + CV_WRAP void build(InputArray points, bool copyAndReorderPoints = false); + //! builds the search tree + CV_WRAP void build(InputArray points, InputArray labels, + bool copyAndReorderPoints = false); + //! finds the K nearest neighbors of "vec" while looking at Emax (at most) leaves + CV_WRAP int findNearest(InputArray vec, int K, int Emax, + OutputArray neighborsIdx, + OutputArray neighbors = noArray(), + OutputArray dist = noArray(), + OutputArray labels = noArray()) const; + //! finds all the points from the initial set that belong to the specified box + CV_WRAP void findOrthoRange(InputArray minBounds, + InputArray maxBounds, + OutputArray neighborsIdx, + OutputArray neighbors = noArray(), + OutputArray labels = noArray()) const; + //! returns vectors with the specified indices + CV_WRAP void getPoints(InputArray idx, OutputArray pts, + OutputArray labels = noArray()) const; + //! return a vector with the specified index + const float* getPoint(int ptidx, int* label = 0) const; + //! returns the search space dimensionality + CV_WRAP int dims() const; + + std::vector nodes; //!< all the tree nodes + CV_PROP Mat points; //!< all the points. It can be a reordered copy of the input vector set or the original vector set. + CV_PROP std::vector labels; //!< the parallel array of labels. + CV_PROP int maxDepth; //!< maximum depth of the search tree. Do not modify it + CV_PROP_RW int normType; //!< type of the distance (cv::NORM_L1 or cv::NORM_L2) used for search. Initially set to cv::NORM_L2, but you can modify it +}; + +} +} + +#endif diff --git a/modules/ml/src/knearest.cpp b/modules/ml/src/knearest.cpp new file mode 100644 index 00000000000..3d8f9b5d2ed --- /dev/null +++ b/modules/ml/src/knearest.cpp @@ -0,0 +1,521 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Copyright (C) 2014, Itseez Inc, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" +#include "kdtree.hpp" + +/****************************************************************************************\ +* K-Nearest Neighbors Classifier * +\****************************************************************************************/ + +namespace cv { +namespace ml { + +const String NAME_BRUTE_FORCE = "opencv_ml_knn"; +const String NAME_KDTREE = "opencv_ml_knn_kd"; + +class Impl +{ +public: + Impl() + { + defaultK = 10; + isclassifier = true; + Emax = INT_MAX; + } + + virtual ~Impl() {} + virtual String getModelName() const = 0; + virtual int getType() const = 0; + virtual float findNearest( InputArray _samples, int k, + OutputArray _results, + OutputArray _neighborResponses, + OutputArray _dists ) const = 0; + + bool train( const Ptr& data, int flags ) + { + CV_Assert(!data.empty()); + Mat new_samples = data->getTrainSamples(ROW_SAMPLE); + Mat new_responses; + data->getTrainResponses().convertTo(new_responses, CV_32F); + bool update = (flags & ml::KNearest::UPDATE_MODEL) != 0 && !samples.empty(); + + CV_Assert( new_samples.type() == CV_32F ); + + if( !update ) + { + clear(); + } + else + { + CV_Assert( new_samples.cols == samples.cols && + new_responses.cols == responses.cols ); + } + + samples.push_back(new_samples); + responses.push_back(new_responses); + + doTrain(samples); + + return true; + } + + virtual void doTrain(InputArray points) { CV_UNUSED(points); } + + void clear() + { + samples.release(); + responses.release(); + } + + void read( const FileNode& fn ) + { + clear(); + isclassifier = (int)fn["is_classifier"] != 0; + defaultK = (int)fn["default_k"]; + + fn["samples"] >> samples; + fn["responses"] >> responses; + } + + void write( FileStorage& fs ) const + { + fs << "is_classifier" << (int)isclassifier; + fs << "default_k" << defaultK; + + fs << "samples" << samples; + fs << "responses" << responses; + } + +public: + int defaultK; + bool isclassifier; + int Emax; + + Mat samples; + Mat responses; +}; + +class BruteForceImpl CV_FINAL : public Impl +{ +public: + String getModelName() const CV_OVERRIDE { return NAME_BRUTE_FORCE; } + int getType() const CV_OVERRIDE { return ml::KNearest::BRUTE_FORCE; } + + void findNearestCore( const Mat& _samples, int k, const Range& range, + Mat* results, Mat* neighbor_responses, + Mat* dists, float* presult ) const + { + int testidx, baseidx, i, j, d = samples.cols, nsamples = samples.rows; + int testcount = range.end - range.start; + + AutoBuffer buf(testcount*k*2); + float* dbuf = buf.data(); + float* rbuf = dbuf + testcount*k; + + const float* rptr = responses.ptr(); + + for( testidx = 0; testidx < testcount; testidx++ ) + { + for( i = 0; i < k; i++ ) + { + dbuf[testidx*k + i] = FLT_MAX; + rbuf[testidx*k + i] = 0.f; + } + } + + for( baseidx = 0; baseidx < nsamples; baseidx++ ) + { + for( testidx = 0; testidx < testcount; testidx++ ) + { + const float* v = samples.ptr(baseidx); + const float* u = _samples.ptr(testidx + range.start); + + float s = 0; + for( i = 0; i <= d - 4; i += 4 ) + { + float t0 = u[i] - v[i], t1 = u[i+1] - v[i+1]; + float t2 = u[i+2] - v[i+2], t3 = u[i+3] - v[i+3]; + s += t0*t0 + t1*t1 + t2*t2 + t3*t3; + } + + for( ; i < d; i++ ) + { + float t0 = u[i] - v[i]; + s += t0*t0; + } + + Cv32suf si; + si.f = (float)s; + Cv32suf* dd = (Cv32suf*)(&dbuf[testidx*k]); + float* nr = &rbuf[testidx*k]; + + for( i = k; i > 0; i-- ) + if( si.i >= dd[i-1].i ) + break; + if( i >= k ) + continue; + + for( j = k-2; j >= i; j-- ) + { + dd[j+1].i = dd[j].i; + nr[j+1] = nr[j]; + } + dd[i].i = si.i; + nr[i] = rptr[baseidx]; + } + } + + float result = 0.f; + float inv_scale = 1.f/k; + + for( testidx = 0; testidx < testcount; testidx++ ) + { + if( neighbor_responses ) + { + float* nr = neighbor_responses->ptr(testidx + range.start); + for( j = 0; j < k; j++ ) + nr[j] = rbuf[testidx*k + j]; + for( ; j < k; j++ ) + nr[j] = 0.f; + } + + if( dists ) + { + float* dptr = dists->ptr(testidx + range.start); + for( j = 0; j < k; j++ ) + dptr[j] = dbuf[testidx*k + j]; + for( ; j < k; j++ ) + dptr[j] = 0.f; + } + + if( results || testidx+range.start == 0 ) + { + if( !isclassifier || k == 1 ) + { + float s = 0.f; + for( j = 0; j < k; j++ ) + s += rbuf[testidx*k + j]; + result = (float)(s*inv_scale); + } + else + { + float* rp = rbuf + testidx*k; + std::sort(rp, rp+k); + + result = rp[0]; + int prev_start = 0; + int best_count = 0; + for( j = 1; j <= k; j++ ) + { + if( j == k || rp[j] != rp[j-1] ) + { + int count = j - prev_start; + if( best_count < count ) + { + best_count = count; + result = rp[j-1]; + } + prev_start = j; + } + } + } + if( results ) + results->at(testidx + range.start) = result; + if( presult && testidx+range.start == 0 ) + *presult = result; + } + } + } + + struct findKNearestInvoker : public ParallelLoopBody + { + findKNearestInvoker(const BruteForceImpl* _p, int _k, const Mat& __samples, + Mat* __results, Mat* __neighbor_responses, Mat* __dists, float* _presult) + { + p = _p; + k = _k; + _samples = &__samples; + _results = __results; + _neighbor_responses = __neighbor_responses; + _dists = __dists; + presult = _presult; + } + + void operator()(const Range& range) const CV_OVERRIDE + { + int delta = std::min(range.end - range.start, 256); + for( int start = range.start; start < range.end; start += delta ) + { + p->findNearestCore( *_samples, k, Range(start, std::min(start + delta, range.end)), + _results, _neighbor_responses, _dists, presult ); + } + } + + const BruteForceImpl* p; + int k; + const Mat* _samples; + Mat* _results; + Mat* _neighbor_responses; + Mat* _dists; + float* presult; + }; + + float findNearest( InputArray _samples, int k, + OutputArray _results, + OutputArray _neighborResponses, + OutputArray _dists ) const CV_OVERRIDE + { + float result = 0.f; + CV_Assert( 0 < k ); + k = std::min(k, samples.rows); + + Mat test_samples = _samples.getMat(); + CV_Assert( test_samples.type() == CV_32F && test_samples.cols == samples.cols ); + int testcount = test_samples.rows; + + if( testcount == 0 ) + { + _results.release(); + _neighborResponses.release(); + _dists.release(); + return 0.f; + } + + Mat res, nr, d, *pres = 0, *pnr = 0, *pd = 0; + if( _results.needed() ) + { + _results.create(testcount, 1, CV_32F); + pres = &(res = _results.getMat()); + } + if( _neighborResponses.needed() ) + { + _neighborResponses.create(testcount, k, CV_32F); + pnr = &(nr = _neighborResponses.getMat()); + } + if( _dists.needed() ) + { + _dists.create(testcount, k, CV_32F); + pd = &(d = _dists.getMat()); + } + + findKNearestInvoker invoker(this, k, test_samples, pres, pnr, pd, &result); + parallel_for_(Range(0, testcount), invoker); + //invoker(Range(0, testcount)); + return result; + } +}; + + +class KDTreeImpl CV_FINAL : public Impl +{ +public: + String getModelName() const CV_OVERRIDE { return NAME_KDTREE; } + int getType() const CV_OVERRIDE { return ml::KNearest::KDTREE; } + + void doTrain(InputArray points) CV_OVERRIDE + { + tr.build(points); + } + + float findNearest( InputArray _samples, int k, + OutputArray _results, + OutputArray _neighborResponses, + OutputArray _dists ) const CV_OVERRIDE + { + float result = 0.f; + CV_Assert( 0 < k ); + k = std::min(k, samples.rows); + + Mat test_samples = _samples.getMat(); + CV_Assert( test_samples.type() == CV_32F && test_samples.cols == samples.cols ); + int testcount = test_samples.rows; + + if( testcount == 0 ) + { + _results.release(); + _neighborResponses.release(); + _dists.release(); + return 0.f; + } + + Mat res, nr, d; + if( _results.needed() ) + { + res = _results.getMat(); + } + if( _neighborResponses.needed() ) + { + nr = _neighborResponses.getMat(); + } + if( _dists.needed() ) + { + d = _dists.getMat(); + } + + for (int i=0; idefaultK; } + inline void setDefaultK(int val) CV_OVERRIDE { impl->defaultK = val; } + inline bool getIsClassifier() const CV_OVERRIDE { return impl->isclassifier; } + inline void setIsClassifier(bool val) CV_OVERRIDE { impl->isclassifier = val; } + inline int getEmax() const CV_OVERRIDE { return impl->Emax; } + inline void setEmax(int val) CV_OVERRIDE { impl->Emax = val; } + +public: + int getAlgorithmType() const CV_OVERRIDE + { + return impl->getType(); + } + void setAlgorithmType(int val) CV_OVERRIDE + { + if (val != BRUTE_FORCE && val != KDTREE) + val = BRUTE_FORCE; + + int k = getDefaultK(); + int e = getEmax(); + bool c = getIsClassifier(); + + initImpl(val); + + setDefaultK(k); + setEmax(e); + setIsClassifier(c); + } + +public: + KNearestImpl() + { + initImpl(BRUTE_FORCE); + } + ~KNearestImpl() + { + } + + bool isClassifier() const CV_OVERRIDE { return impl->isclassifier; } + bool isTrained() const CV_OVERRIDE { return !impl->samples.empty(); } + + int getVarCount() const CV_OVERRIDE { return impl->samples.cols; } + + void write( FileStorage& fs ) const CV_OVERRIDE + { + writeFormat(fs); + impl->write(fs); + } + + void read( const FileNode& fn ) CV_OVERRIDE + { + int algorithmType = BRUTE_FORCE; + if (fn.name() == NAME_KDTREE) + algorithmType = KDTREE; + initImpl(algorithmType); + impl->read(fn); + } + + float findNearest( InputArray samples, int k, + OutputArray results, + OutputArray neighborResponses=noArray(), + OutputArray dist=noArray() ) const CV_OVERRIDE + { + return impl->findNearest(samples, k, results, neighborResponses, dist); + } + + float predict(InputArray inputs, OutputArray outputs, int) const CV_OVERRIDE + { + return impl->findNearest( inputs, impl->defaultK, outputs, noArray(), noArray() ); + } + + bool train( const Ptr& data, int flags ) CV_OVERRIDE + { + CV_Assert(!data.empty()); + return impl->train(data, flags); + } + + String getDefaultName() const CV_OVERRIDE { return impl->getModelName(); } + +protected: + void initImpl(int algorithmType) + { + if (algorithmType != KDTREE) + impl = makePtr(); + else + impl = makePtr(); + } + Ptr impl; +}; + +Ptr KNearest::create() +{ + return makePtr(); +} + +Ptr KNearest::load(const String& filepath) +{ + FileStorage fs; + fs.open(filepath, FileStorage::READ); + + Ptr knearest = makePtr(); + + ((KNearestImpl*)knearest.get())->read(fs.getFirstTopLevelNode()); + return knearest; +} + +} +} + +/* End of file */ diff --git a/modules/ml/src/lr.cpp b/modules/ml/src/lr.cpp new file mode 100644 index 00000000000..b43e1040454 --- /dev/null +++ b/modules/ml/src/lr.cpp @@ -0,0 +1,604 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// AUTHOR: Rahul Kavi rahulkavi[at]live[at]com + +// +// This is a implementation of the Logistic Regression algorithm +// + +#include "precomp.hpp" + +using namespace std; + +namespace cv { +namespace ml { + +class LrParams +{ +public: + LrParams() + { + alpha = 0.001; + num_iters = 1000; + norm = LogisticRegression::REG_L2; + train_method = LogisticRegression::BATCH; + mini_batch_size = 1; + term_crit = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, num_iters, alpha); + } + + double alpha; //!< learning rate. + int num_iters; //!< number of iterations. + int norm; + int train_method; + int mini_batch_size; + TermCriteria term_crit; +}; + +class LogisticRegressionImpl CV_FINAL : public LogisticRegression +{ +public: + + LogisticRegressionImpl() { } + virtual ~LogisticRegressionImpl() {} + + inline double getLearningRate() const CV_OVERRIDE { return params.alpha; } + inline void setLearningRate(double val) CV_OVERRIDE { params.alpha = val; } + inline int getIterations() const CV_OVERRIDE { return params.num_iters; } + inline void setIterations(int val) CV_OVERRIDE { params.num_iters = val; } + inline int getRegularization() const CV_OVERRIDE { return params.norm; } + inline void setRegularization(int val) CV_OVERRIDE { params.norm = val; } + inline int getTrainMethod() const CV_OVERRIDE { return params.train_method; } + inline void setTrainMethod(int val) CV_OVERRIDE { params.train_method = val; } + inline int getMiniBatchSize() const CV_OVERRIDE { return params.mini_batch_size; } + inline void setMiniBatchSize(int val) CV_OVERRIDE { params.mini_batch_size = val; } + inline TermCriteria getTermCriteria() const CV_OVERRIDE { return params.term_crit; } + inline void setTermCriteria(TermCriteria val) CV_OVERRIDE { params.term_crit = val; } + + virtual bool train( const Ptr& trainData, int=0 ) CV_OVERRIDE; + virtual float predict(InputArray samples, OutputArray results, int flags=0) const CV_OVERRIDE; + virtual void clear() CV_OVERRIDE; + virtual void write(FileStorage& fs) const CV_OVERRIDE; + virtual void read(const FileNode& fn) CV_OVERRIDE; + virtual Mat get_learnt_thetas() const CV_OVERRIDE { return learnt_thetas; } + virtual int getVarCount() const CV_OVERRIDE { return learnt_thetas.cols; } + virtual bool isTrained() const CV_OVERRIDE { return !learnt_thetas.empty(); } + virtual bool isClassifier() const CV_OVERRIDE { return true; } + virtual String getDefaultName() const CV_OVERRIDE { return "opencv_ml_lr"; } +protected: + Mat calc_sigmoid(const Mat& data) const; + double compute_cost(const Mat& _data, const Mat& _labels, const Mat& _init_theta); + void compute_gradient(const Mat& _data, const Mat& _labels, const Mat &_theta, const double _lambda, Mat & _gradient ); + Mat batch_gradient_descent(const Mat& _data, const Mat& _labels, const Mat& _init_theta); + Mat mini_batch_gradient_descent(const Mat& _data, const Mat& _labels, const Mat& _init_theta); + bool set_label_map(const Mat& _labels_i); + Mat remap_labels(const Mat& _labels_i, const map& lmap) const; +protected: + LrParams params; + Mat learnt_thetas; + map forward_mapper; + map reverse_mapper; + Mat labels_o; + Mat labels_n; +}; + +Ptr LogisticRegression::create() +{ + return makePtr(); +} + +Ptr LogisticRegression::load(const String& filepath, const String& nodeName) +{ + return Algorithm::load(filepath, nodeName); +} + + +bool LogisticRegressionImpl::train(const Ptr& trainData, int) +{ + CV_TRACE_FUNCTION_SKIP_NESTED(); + CV_Assert(!trainData.empty()); + + // return value + bool ok = false; + clear(); + Mat _data_i = trainData->getSamples(); + Mat _labels_i = trainData->getResponses(); + + // check size and type of training data + CV_Assert( !_labels_i.empty() && !_data_i.empty()); + if(_labels_i.cols != 1) + { + CV_Error( CV_StsBadArg, "labels should be a column matrix" ); + } + if(_data_i.type() != CV_32FC1 || _labels_i.type() != CV_32FC1) + { + CV_Error( CV_StsBadArg, "data and labels must be a floating point matrix" ); + } + if(_labels_i.rows != _data_i.rows) + { + CV_Error( CV_StsBadArg, "number of rows in data and labels should be equal" ); + } + + // class labels + set_label_map(_labels_i); + Mat labels_l = remap_labels(_labels_i, this->forward_mapper); + int num_classes = (int) this->forward_mapper.size(); + if(num_classes < 2) + { + CV_Error( CV_StsBadArg, "data should have at least 2 classes" ); + } + + // add a column of ones to the data (bias/intercept term) + Mat data_t; + hconcat( cv::Mat::ones( _data_i.rows, 1, CV_32F ), _data_i, data_t ); + + // coefficient matrix (zero-initialized) + Mat thetas; + Mat init_theta = Mat::zeros(data_t.cols, 1, CV_32F); + + // fit the model (handles binary and multiclass cases) + Mat new_theta; + Mat labels; + if(num_classes == 2) + { + labels_l.convertTo(labels, CV_32F); + if(this->params.train_method == LogisticRegression::BATCH) + new_theta = batch_gradient_descent(data_t, labels, init_theta); + else + new_theta = mini_batch_gradient_descent(data_t, labels, init_theta); + thetas = new_theta.t(); + } + else + { + /* take each class and rename classes you will get a theta per class + as in multi class class scenario, we will have n thetas for n classes */ + thetas.create(num_classes, data_t.cols, CV_32F); + Mat labels_binary; + int ii = 0; + for(map::iterator it = this->forward_mapper.begin(); it != this->forward_mapper.end(); ++it) + { + // one-vs-rest (OvR) scheme + labels_binary = (labels_l == it->second)/255; + labels_binary.convertTo(labels, CV_32F); + if(this->params.train_method == LogisticRegression::BATCH) + new_theta = batch_gradient_descent(data_t, labels, init_theta); + else + new_theta = mini_batch_gradient_descent(data_t, labels, init_theta); + hconcat(new_theta.t(), thetas.row(ii)); + ii += 1; + } + } + + // check that the estimates are stable and finite + this->learnt_thetas = thetas.clone(); + if( cvIsNaN( (double)sum(this->learnt_thetas)[0] ) ) + { + CV_Error( CV_StsBadArg, "check training parameters. Invalid training classifier" ); + } + + // success + ok = true; + return ok; +} + +float LogisticRegressionImpl::predict(InputArray samples, OutputArray results, int flags) const +{ + // check if learnt_mats array is populated + if(!this->isTrained()) + { + CV_Error( CV_StsBadArg, "classifier should be trained first" ); + } + + // coefficient matrix + Mat thetas; + if ( learnt_thetas.type() == CV_32F ) + { + thetas = learnt_thetas; + } + else + { + this->learnt_thetas.convertTo( thetas, CV_32F ); + } + CV_Assert(thetas.rows > 0); + + // data samples + Mat data = samples.getMat(); + if(data.type() != CV_32F) + { + CV_Error( CV_StsBadArg, "data must be of floating type" ); + } + + // add a column of ones to the data (bias/intercept term) + Mat data_t; + hconcat( cv::Mat::ones( data.rows, 1, CV_32F ), data, data_t ); + CV_Assert(data_t.cols == thetas.cols); + + // predict class labels for samples (handles binary and multiclass cases) + Mat labels_c; + Mat pred_m; + Mat temp_pred; + if(thetas.rows == 1) + { + // apply sigmoid function + temp_pred = calc_sigmoid(data_t * thetas.t()); + CV_Assert(temp_pred.cols==1); + pred_m = temp_pred.clone(); + + // if greater than 0.5, predict class 0 or predict class 1 + temp_pred = (temp_pred > 0.5f) / 255; + temp_pred.convertTo(labels_c, CV_32S); + } + else + { + // apply sigmoid function + pred_m.create(data_t.rows, thetas.rows, data.type()); + for(int i = 0; i < thetas.rows; i++) + { + temp_pred = calc_sigmoid(data_t * thetas.row(i).t()); + vconcat(temp_pred, pred_m.col(i)); + } + + // predict class with the maximum output + Point max_loc; + Mat labels; + for(int i = 0; i < pred_m.rows; i++) + { + temp_pred = pred_m.row(i); + minMaxLoc( temp_pred, NULL, NULL, NULL, &max_loc ); + labels.push_back(max_loc.x); + } + labels.convertTo(labels_c, CV_32S); + } + + // return label of the predicted class. class names can be 1,2,3,... + Mat pred_labs = remap_labels(labels_c, this->reverse_mapper); + pred_labs.convertTo(pred_labs, CV_32S); + + // return either the labels or the raw output + if ( results.needed() ) + { + if ( flags & StatModel::RAW_OUTPUT ) + { + pred_m.copyTo( results ); + } + else + { + pred_labs.copyTo(results); + } + } + + return ( pred_labs.empty() ? 0.f : static_cast(pred_labs.at(0)) ); +} + +Mat LogisticRegressionImpl::calc_sigmoid(const Mat& data) const +{ + CV_TRACE_FUNCTION(); + Mat dest; + exp(-data, dest); + return 1.0/(1.0+dest); +} + +double LogisticRegressionImpl::compute_cost(const Mat& _data, const Mat& _labels, const Mat& _init_theta) +{ + CV_TRACE_FUNCTION(); + float llambda = 0; /*changed llambda from int to float to solve issue #7924*/ + int m; + int n; + double cost = 0; + double rparameter = 0; + Mat theta_b; + Mat theta_c; + Mat d_a; + Mat d_b; + + m = _data.rows; + n = _data.cols; + + theta_b = _init_theta(Range(1, n), Range::all()); + + if (params.norm != REG_DISABLE) + { + llambda = 1; + } + + if(this->params.norm == LogisticRegression::REG_L1) + { + rparameter = (llambda/(2*m)) * sum(theta_b)[0]; + } + else + { + // assuming it to be L2 by default + multiply(theta_b, theta_b, theta_c, 1); + rparameter = (llambda/(2*m)) * sum(theta_c)[0]; + } + + d_a = calc_sigmoid(_data * _init_theta); + log(d_a, d_a); + multiply(d_a, _labels, d_a); + + // use the fact that: log(1 - sigmoid(x)) = log(sigmoid(-x)) + d_b = calc_sigmoid(- _data * _init_theta); + log(d_b, d_b); + multiply(d_b, 1-_labels, d_b); + + cost = (-1.0/m) * (sum(d_a)[0] + sum(d_b)[0]); + cost = cost + rparameter; + + if(cvIsNaN( cost ) == 1) + { + CV_Error( CV_StsBadArg, "check training parameters. Invalid training classifier" ); + } + + return cost; +} + +struct LogisticRegressionImpl_ComputeDradient_Impl : ParallelLoopBody +{ + const Mat* data; + const Mat* theta; + const Mat* pcal_a; + Mat* gradient; + double lambda; + + LogisticRegressionImpl_ComputeDradient_Impl(const Mat& _data, const Mat &_theta, const Mat& _pcal_a, const double _lambda, Mat & _gradient) + : data(&_data) + , theta(&_theta) + , pcal_a(&_pcal_a) + , gradient(&_gradient) + , lambda(_lambda) + { + + } + + void operator()(const cv::Range& r) const CV_OVERRIDE + { + const Mat& _data = *data; + const Mat &_theta = *theta; + Mat & _gradient = *gradient; + const Mat & _pcal_a = *pcal_a; + const int m = _data.rows; + Mat pcal_ab; + + for (int ii = r.start; iiparams.alpha<=0) + { + CV_Error( CV_StsBadArg, "check training parameters (learning rate) for the classifier" ); + } + + if(this->params.num_iters <= 0) + { + CV_Error( CV_StsBadArg, "number of iterations cannot be zero or a negative number" ); + } + + int llambda = 0; + int m; + Mat theta_p = _init_theta.clone(); + Mat gradient( theta_p.rows, theta_p.cols, theta_p.type() ); + m = _data.rows; + + if (params.norm != REG_DISABLE) + { + llambda = 1; + } + + for(int i = 0;iparams.num_iters;i++) + { + // this seems to only be called to ensure that cost is not NaN + compute_cost(_data, _labels, theta_p); + + compute_gradient( _data, _labels, theta_p, llambda, gradient ); + + theta_p = theta_p - ( static_cast(this->params.alpha)/m)*gradient; + } + return theta_p; +} + +Mat LogisticRegressionImpl::mini_batch_gradient_descent(const Mat& _data, const Mat& _labels, const Mat& _init_theta) +{ + // implements batch gradient descent + int lambda_l = 0; + int m; + int j = 0; + int size_b = this->params.mini_batch_size; + + if(this->params.mini_batch_size <= 0 || this->params.alpha == 0) + { + CV_Error( CV_StsBadArg, "check training parameters for the classifier" ); + } + + if(this->params.num_iters <= 0) + { + CV_Error( CV_StsBadArg, "number of iterations cannot be zero or a negative number" ); + } + + Mat theta_p = _init_theta.clone(); + Mat gradient( theta_p.rows, theta_p.cols, theta_p.type() ); + Mat data_d; + Mat labels_l; + + if (params.norm != REG_DISABLE) + { + lambda_l = 1; + } + + for(int i = 0;iparams.term_crit.maxCount;i++) + { + if(j+size_b<=_data.rows) + { + data_d = _data(Range(j,j+size_b), Range::all()); + labels_l = _labels(Range(j,j+size_b),Range::all()); + } + else + { + data_d = _data(Range(j, _data.rows), Range::all()); + labels_l = _labels(Range(j, _labels.rows),Range::all()); + } + + m = data_d.rows; + + // this seems to only be called to ensure that cost is not NaN + compute_cost(data_d, labels_l, theta_p); + + compute_gradient(data_d, labels_l, theta_p, lambda_l, gradient); + + theta_p = theta_p - ( static_cast(this->params.alpha)/m)*gradient; + + j += this->params.mini_batch_size; + + // if parsed through all data variables + if (j >= _data.rows) { + j = 0; + } + } + return theta_p; +} + +bool LogisticRegressionImpl::set_label_map(const Mat &_labels_i) +{ + // this function creates two maps to map user defined labels to program friendly labels two ways. + int ii = 0; + Mat labels; + + this->labels_o = Mat(0,1, CV_8U); + this->labels_n = Mat(0,1, CV_8U); + + _labels_i.convertTo(labels, CV_32S); + + for(int i = 0;iforward_mapper[labels.at(i)] += 1; + } + + for(map::iterator it = this->forward_mapper.begin(); it != this->forward_mapper.end(); ++it) + { + this->forward_mapper[it->first] = ii; + this->labels_o.push_back(it->first); + this->labels_n.push_back(ii); + ii += 1; + } + + for(map::iterator it = this->forward_mapper.begin(); it != this->forward_mapper.end(); ++it) + { + this->reverse_mapper[it->second] = it->first; + } + + return true; +} + +Mat LogisticRegressionImpl::remap_labels(const Mat& _labels_i, const map& lmap) const +{ + Mat labels; + _labels_i.convertTo(labels, CV_32S); + + Mat new_labels = Mat::zeros(labels.rows, labels.cols, labels.type()); + + CV_Assert( !lmap.empty() ); + + for(int i =0;i::const_iterator val = lmap.find(labels.at(i,0)); + CV_Assert(val != lmap.end()); + new_labels.at(i,0) = val->second; + } + return new_labels; +} + +void LogisticRegressionImpl::clear() +{ + this->learnt_thetas.release(); + this->labels_o.release(); + this->labels_n.release(); +} + +void LogisticRegressionImpl::write(FileStorage& fs) const +{ + // check if open + if(fs.isOpened() == 0) + { + CV_Error(CV_StsBadArg,"file can't open. Check file path"); + } + writeFormat(fs); + string desc = "Logistic Regression Classifier"; + fs<<"classifier"<params.alpha; + fs<<"iterations"<params.num_iters; + fs<<"norm"<params.norm; + fs<<"train_method"<params.train_method; + if(this->params.train_method == LogisticRegression::MINI_BATCH) + { + fs<<"mini_batch_size"<params.mini_batch_size; + } + fs<<"learnt_thetas"<learnt_thetas; + fs<<"n_labels"<labels_n; + fs<<"o_labels"<labels_o; +} + +void LogisticRegressionImpl::read(const FileNode& fn) +{ + // check if empty + if(fn.empty()) + { + CV_Error( CV_StsBadArg, "empty FileNode object" ); + } + + this->params.alpha = (double)fn["alpha"]; + this->params.num_iters = (int)fn["iterations"]; + this->params.norm = (int)fn["norm"]; + this->params.train_method = (int)fn["train_method"]; + + if(this->params.train_method == LogisticRegression::MINI_BATCH) + { + this->params.mini_batch_size = (int)fn["mini_batch_size"]; + } + + fn["learnt_thetas"] >> this->learnt_thetas; + fn["o_labels"] >> this->labels_o; + fn["n_labels"] >> this->labels_n; + + for(int ii =0;iiforward_mapper[labels_o.at(ii,0)] = labels_n.at(ii,0); + this->reverse_mapper[labels_n.at(ii,0)] = labels_o.at(ii,0); + } +} + +} +} + +/* End of file. */ diff --git a/modules/ml/src/nbayes.cpp b/modules/ml/src/nbayes.cpp new file mode 100644 index 00000000000..60dda0c7d47 --- /dev/null +++ b/modules/ml/src/nbayes.cpp @@ -0,0 +1,471 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +namespace cv { +namespace ml { + + +class NormalBayesClassifierImpl : public NormalBayesClassifier +{ +public: + NormalBayesClassifierImpl() + { + nallvars = 0; + } + + bool train( const Ptr& trainData, int flags ) CV_OVERRIDE + { + CV_Assert(!trainData.empty()); + const float min_variation = FLT_EPSILON; + Mat responses = trainData->getNormCatResponses(); + Mat __cls_labels = trainData->getClassLabels(); + Mat __var_idx = trainData->getVarIdx(); + Mat samples = trainData->getTrainSamples(); + int nclasses = (int)__cls_labels.total(); + + int nvars = trainData->getNVars(); + int s, c1, c2, cls; + + int __nallvars = trainData->getNAllVars(); + bool update = (flags & UPDATE_MODEL) != 0; + + if( !update ) + { + nallvars = __nallvars; + count.resize(nclasses); + sum.resize(nclasses); + productsum.resize(nclasses); + avg.resize(nclasses); + inv_eigen_values.resize(nclasses); + cov_rotate_mats.resize(nclasses); + + for( cls = 0; cls < nclasses; cls++ ) + { + count[cls] = Mat::zeros( 1, nvars, CV_32SC1 ); + sum[cls] = Mat::zeros( 1, nvars, CV_64FC1 ); + productsum[cls] = Mat::zeros( nvars, nvars, CV_64FC1 ); + avg[cls] = Mat::zeros( 1, nvars, CV_64FC1 ); + inv_eigen_values[cls] = Mat::zeros( 1, nvars, CV_64FC1 ); + cov_rotate_mats[cls] = Mat::zeros( nvars, nvars, CV_64FC1 ); + } + + var_idx = __var_idx; + cls_labels = __cls_labels; + + c.create(1, nclasses, CV_64FC1); + } + else + { + // check that the new training data has the same dimensionality etc. + if( nallvars != __nallvars || + var_idx.size() != __var_idx.size() || + norm(var_idx, __var_idx, NORM_INF) != 0 || + cls_labels.size() != __cls_labels.size() || + norm(cls_labels, __cls_labels, NORM_INF) != 0 ) + CV_Error( CV_StsBadArg, + "The new training data is inconsistent with the original training data; varIdx and the class labels should be the same" ); + } + + Mat cov( nvars, nvars, CV_64FC1 ); + int nsamples = samples.rows; + + // process train data (count, sum , productsum) + for( s = 0; s < nsamples; s++ ) + { + cls = responses.at(s); + int* count_data = count[cls].ptr(); + double* sum_data = sum[cls].ptr(); + double* prod_data = productsum[cls].ptr(); + const float* train_vec = samples.ptr(s); + + for( c1 = 0; c1 < nvars; c1++, prod_data += nvars ) + { + double val1 = train_vec[c1]; + sum_data[c1] += val1; + count_data[c1]++; + for( c2 = c1; c2 < nvars; c2++ ) + prod_data[c2] += train_vec[c2]*val1; + } + } + + Mat vt; + + // calculate avg, covariance matrix, c + for( cls = 0; cls < nclasses; cls++ ) + { + double det = 1; + int i, j; + Mat& w = inv_eigen_values[cls]; + int* count_data = count[cls].ptr(); + double* avg_data = avg[cls].ptr(); + double* sum1 = sum[cls].ptr(); + + completeSymm(productsum[cls], 0); + + for( j = 0; j < nvars; j++ ) + { + int n = count_data[j]; + avg_data[j] = n ? sum1[j] / n : 0.; + } + + count_data = count[cls].ptr(); + avg_data = avg[cls].ptr(); + sum1 = sum[cls].ptr(); + + for( i = 0; i < nvars; i++ ) + { + double* avg2_data = avg[cls].ptr(); + double* sum2 = sum[cls].ptr(); + double* prod_data = productsum[cls].ptr(i); + double* cov_data = cov.ptr(i); + double s1val = sum1[i]; + double avg1 = avg_data[i]; + int _count = count_data[i]; + + for( j = 0; j <= i; j++ ) + { + double avg2 = avg2_data[j]; + double cov_val = prod_data[j] - avg1 * sum2[j] - avg2 * s1val + avg1 * avg2 * _count; + cov_val = (_count > 1) ? cov_val / (_count - 1) : cov_val; + cov_data[j] = cov_val; + } + } + + completeSymm( cov, 1 ); + + SVD::compute(cov, w, cov_rotate_mats[cls], noArray()); + transpose(cov_rotate_mats[cls], cov_rotate_mats[cls]); + cv::max(w, min_variation, w); + for( j = 0; j < nvars; j++ ) + det *= w.at(j); + + divide(1., w, w); + c.at(cls) = det > 0 ? log(det) : -700; + } + + return true; + } + + class NBPredictBody : public ParallelLoopBody + { + public: + NBPredictBody( const Mat& _c, const vector& _cov_rotate_mats, + const vector& _inv_eigen_values, + const vector& _avg, + const Mat& _samples, const Mat& _vidx, const Mat& _cls_labels, + Mat& _results, Mat& _results_prob, bool _rawOutput ) + { + c = &_c; + cov_rotate_mats = &_cov_rotate_mats; + inv_eigen_values = &_inv_eigen_values; + avg = &_avg; + samples = &_samples; + vidx = &_vidx; + cls_labels = &_cls_labels; + results = &_results; + results_prob = !_results_prob.empty() ? &_results_prob : 0; + rawOutput = _rawOutput; + value = 0; + } + + const Mat* c; + const vector* cov_rotate_mats; + const vector* inv_eigen_values; + const vector* avg; + const Mat* samples; + const Mat* vidx; + const Mat* cls_labels; + + Mat* results_prob; + Mat* results; + float* value; + bool rawOutput; + + void operator()(const Range& range) const CV_OVERRIDE + { + int cls = -1; + int rtype = 0, rptype = 0; + size_t rstep = 0, rpstep = 0; + int nclasses = (int)cls_labels->total(); + int nvars = avg->at(0).cols; + double probability = 0; + const int* vptr = vidx && !vidx->empty() ? vidx->ptr() : 0; + + if (results) + { + rtype = results->type(); + rstep = results->isContinuous() ? 1 : results->step/results->elemSize(); + } + if (results_prob) + { + rptype = results_prob->type(); + rpstep = results_prob->isContinuous() ? results_prob->cols : results_prob->step/results_prob->elemSize(); + } + // allocate memory and initializing headers for calculating + cv::AutoBuffer _buffer(nvars*2); + double* _diffin = _buffer.data(); + double* _diffout = _buffer.data() + nvars; + Mat diffin( 1, nvars, CV_64FC1, _diffin ); + Mat diffout( 1, nvars, CV_64FC1, _diffout ); + + for(int k = range.start; k < range.end; k++ ) + { + double opt = FLT_MAX; + + for(int i = 0; i < nclasses; i++ ) + { + double cur = c->at(i); + const Mat& u = cov_rotate_mats->at(i); + const Mat& w = inv_eigen_values->at(i); + + const double* avg_data = avg->at(i).ptr(); + const float* x = samples->ptr(k); + + // cov = u w u' --> cov^(-1) = u w^(-1) u' + for(int j = 0; j < nvars; j++ ) + _diffin[j] = avg_data[j] - x[vptr ? vptr[j] : j]; + + gemm( diffin, u, 1, noArray(), 0, diffout, GEMM_2_T ); + for(int j = 0; j < nvars; j++ ) + { + double d = _diffout[j]; + cur += d*d*w.ptr()[j]; + } + + if( cur < opt ) + { + cls = i; + opt = cur; + } + probability = exp( -0.5 * cur ); + + if( results_prob ) + { + if ( rptype == CV_32FC1 ) + results_prob->ptr()[k*rpstep + i] = (float)probability; + else + results_prob->ptr()[k*rpstep + i] = probability; + } + } + + int ival = rawOutput ? cls : cls_labels->at(cls); + if( results ) + { + if( rtype == CV_32SC1 ) + results->ptr()[k*rstep] = ival; + else + results->ptr()[k*rstep] = (float)ival; + } + } + } + }; + + float predict( InputArray _samples, OutputArray _results, int flags ) const CV_OVERRIDE + { + return predictProb(_samples, _results, noArray(), flags); + } + + float predictProb( InputArray _samples, OutputArray _results, OutputArray _resultsProb, int flags ) const CV_OVERRIDE + { + int value=0; + Mat samples = _samples.getMat(), results, resultsProb; + int nsamples = samples.rows, nclasses = (int)cls_labels.total(); + bool rawOutput = (flags & RAW_OUTPUT) != 0; + + if( samples.type() != CV_32F || samples.cols != nallvars ) + CV_Error( CV_StsBadArg, + "The input samples must be 32f matrix with the number of columns = nallvars" ); + + if( (samples.rows > 1) && (! _results.needed()) ) + CV_Error( CV_StsNullPtr, + "When the number of input samples is >1, the output vector of results must be passed" ); + + if( _results.needed() ) + { + _results.create(nsamples, 1, CV_32S); + results = _results.getMat(); + } + else + results = Mat(1, 1, CV_32S, &value); + + if( _resultsProb.needed() ) + { + _resultsProb.create(nsamples, nclasses, CV_32F); + resultsProb = _resultsProb.getMat(); + } + + cv::parallel_for_(cv::Range(0, nsamples), + NBPredictBody(c, cov_rotate_mats, inv_eigen_values, avg, samples, + var_idx, cls_labels, results, resultsProb, rawOutput)); + + return (float)value; + } + + void write( FileStorage& fs ) const CV_OVERRIDE + { + int nclasses = (int)cls_labels.total(), i; + + writeFormat(fs); + fs << "var_count" << (var_idx.empty() ? nallvars : (int)var_idx.total()); + fs << "var_all" << nallvars; + + if( !var_idx.empty() ) + fs << "var_idx" << var_idx; + fs << "cls_labels" << cls_labels; + + fs << "count" << "["; + for( i = 0; i < nclasses; i++ ) + fs << count[i]; + + fs << "]" << "sum" << "["; + for( i = 0; i < nclasses; i++ ) + fs << sum[i]; + + fs << "]" << "productsum" << "["; + for( i = 0; i < nclasses; i++ ) + fs << productsum[i]; + + fs << "]" << "avg" << "["; + for( i = 0; i < nclasses; i++ ) + fs << avg[i]; + + fs << "]" << "inv_eigen_values" << "["; + for( i = 0; i < nclasses; i++ ) + fs << inv_eigen_values[i]; + + fs << "]" << "cov_rotate_mats" << "["; + for( i = 0; i < nclasses; i++ ) + fs << cov_rotate_mats[i]; + + fs << "]"; + + fs << "c" << c; + } + + void read( const FileNode& fn ) CV_OVERRIDE + { + clear(); + + fn["var_all"] >> nallvars; + + if( nallvars <= 0 ) + CV_Error( CV_StsParseError, + "The field \"var_count\" of NBayes classifier is missing or non-positive" ); + + fn["var_idx"] >> var_idx; + fn["cls_labels"] >> cls_labels; + + int nclasses = (int)cls_labels.total(), i; + + if( cls_labels.empty() || nclasses < 1 ) + CV_Error( CV_StsParseError, "No or invalid \"cls_labels\" in NBayes classifier" ); + + FileNodeIterator + count_it = fn["count"].begin(), + sum_it = fn["sum"].begin(), + productsum_it = fn["productsum"].begin(), + avg_it = fn["avg"].begin(), + inv_eigen_values_it = fn["inv_eigen_values"].begin(), + cov_rotate_mats_it = fn["cov_rotate_mats"].begin(); + + count.resize(nclasses); + sum.resize(nclasses); + productsum.resize(nclasses); + avg.resize(nclasses); + inv_eigen_values.resize(nclasses); + cov_rotate_mats.resize(nclasses); + + for( i = 0; i < nclasses; i++, ++count_it, ++sum_it, ++productsum_it, ++avg_it, + ++inv_eigen_values_it, ++cov_rotate_mats_it ) + { + *count_it >> count[i]; + *sum_it >> sum[i]; + *productsum_it >> productsum[i]; + *avg_it >> avg[i]; + *inv_eigen_values_it >> inv_eigen_values[i]; + *cov_rotate_mats_it >> cov_rotate_mats[i]; + } + + fn["c"] >> c; + } + + void clear() CV_OVERRIDE + { + count.clear(); + sum.clear(); + productsum.clear(); + avg.clear(); + inv_eigen_values.clear(); + cov_rotate_mats.clear(); + + var_idx.release(); + cls_labels.release(); + c.release(); + nallvars = 0; + } + + bool isTrained() const CV_OVERRIDE { return !avg.empty(); } + bool isClassifier() const CV_OVERRIDE { return true; } + int getVarCount() const CV_OVERRIDE { return nallvars; } + String getDefaultName() const CV_OVERRIDE { return "opencv_ml_nbayes"; } + + int nallvars; + Mat var_idx, cls_labels, c; + vector count, sum, productsum, avg, inv_eigen_values, cov_rotate_mats; +}; + + +Ptr NormalBayesClassifier::create() +{ + Ptr p = makePtr(); + return p; +} + +Ptr NormalBayesClassifier::load(const String& filepath, const String& nodeName) +{ + return Algorithm::load(filepath, nodeName); +} + +} +} + +/* End of file. */ diff --git a/modules/ml/src/precomp.hpp b/modules/ml/src/precomp.hpp new file mode 100644 index 00000000000..328cc4732a6 --- /dev/null +++ b/modules/ml/src/precomp.hpp @@ -0,0 +1,400 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_ML_PRECOMP_HPP__ +#define __OPENCV_ML_PRECOMP_HPP__ + +#include "opencv2/core.hpp" +#include "opencv2/ml.hpp" +#include "opencv2/core/core_c.h" +#include "opencv2/core/utility.hpp" + +#include "opencv2/core/private.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include + +/****************************************************************************************\ + * Main struct definitions * + \****************************************************************************************/ + +/* log(2*PI) */ +#define CV_LOG2PI (1.8378770664093454835606594728112) + +namespace cv +{ +namespace ml +{ + using std::vector; + + #define CV_DTREE_CAT_DIR(idx,subset) \ + (2*((subset[(idx)>>5]&(1 << ((idx) & 31)))==0)-1) + + template struct cmp_lt_idx + { + cmp_lt_idx(const _Tp* _arr) : arr(_arr) {} + bool operator ()(int a, int b) const { return arr[a] < arr[b]; } + const _Tp* arr; + }; + + template struct cmp_lt_ptr + { + cmp_lt_ptr() {} + bool operator ()(const _Tp* a, const _Tp* b) const { return *a < *b; } + }; + + static inline void setRangeVector(std::vector& vec, int n) + { + vec.resize(n); + for( int i = 0; i < n; i++ ) + vec[i] = i; + } + + static inline void writeTermCrit(FileStorage& fs, const TermCriteria& termCrit) + { + if( (termCrit.type & TermCriteria::EPS) != 0 ) + fs << "epsilon" << termCrit.epsilon; + if( (termCrit.type & TermCriteria::COUNT) != 0 ) + fs << "iterations" << termCrit.maxCount; + } + + static inline TermCriteria readTermCrit(const FileNode& fn) + { + TermCriteria termCrit; + double epsilon = (double)fn["epsilon"]; + if( epsilon > 0 ) + { + termCrit.type |= TermCriteria::EPS; + termCrit.epsilon = epsilon; + } + int iters = (int)fn["iterations"]; + if( iters > 0 ) + { + termCrit.type |= TermCriteria::COUNT; + termCrit.maxCount = iters; + } + return termCrit; + } + + struct TreeParams + { + TreeParams(); + TreeParams( int maxDepth, int minSampleCount, + double regressionAccuracy, bool useSurrogates, + int maxCategories, int CVFolds, + bool use1SERule, bool truncatePrunedTree, + const Mat& priors ); + + inline void setMaxCategories(int val) + { + if( val < 2 ) + CV_Error( CV_StsOutOfRange, "max_categories should be >= 2" ); + maxCategories = std::min(val, 15 ); + } + inline void setMaxDepth(int val) + { + if( val < 0 ) + CV_Error( CV_StsOutOfRange, "max_depth should be >= 0" ); + maxDepth = std::min( val, 25 ); + } + inline void setMinSampleCount(int val) + { + minSampleCount = std::max(val, 1); + } + inline void setCVFolds(int val) + { + if( val < 0 ) + CV_Error( CV_StsOutOfRange, + "params.CVFolds should be =0 (the tree is not pruned) " + "or n>0 (tree is pruned using n-fold cross-validation)" ); + if(val > 1) + CV_Error( CV_StsNotImplemented, + "tree pruning using cross-validation is not implemented." + "Set CVFolds to 1"); + + if( val == 1 ) + val = 0; + CVFolds = val; + } + inline void setRegressionAccuracy(float val) + { + if( val < 0 ) + CV_Error( CV_StsOutOfRange, "params.regression_accuracy should be >= 0" ); + regressionAccuracy = val; + } + + inline int getMaxCategories() const { return maxCategories; } + inline int getMaxDepth() const { return maxDepth; } + inline int getMinSampleCount() const { return minSampleCount; } + inline int getCVFolds() const { return CVFolds; } + inline float getRegressionAccuracy() const { return regressionAccuracy; } + + inline bool getUseSurrogates() const { return useSurrogates; } + inline void setUseSurrogates(bool val) { useSurrogates = val; } + inline bool getUse1SERule() const { return use1SERule; } + inline void setUse1SERule(bool val) { use1SERule = val; } + inline bool getTruncatePrunedTree() const { return truncatePrunedTree; } + inline void setTruncatePrunedTree(bool val) { truncatePrunedTree = val; } + inline cv::Mat getPriors() const { return priors; } + inline void setPriors(const cv::Mat& val) { priors = val; } + + public: + bool useSurrogates; + bool use1SERule; + bool truncatePrunedTree; + Mat priors; + + protected: + int maxCategories; + int maxDepth; + int minSampleCount; + int CVFolds; + float regressionAccuracy; + }; + + struct RTreeParams + { + RTreeParams(); + RTreeParams(bool calcVarImportance, int nactiveVars, TermCriteria termCrit ); + bool calcVarImportance; + int nactiveVars; + TermCriteria termCrit; + }; + + struct BoostTreeParams + { + BoostTreeParams(); + BoostTreeParams(int boostType, int weakCount, double weightTrimRate); + int boostType; + int weakCount; + double weightTrimRate; + }; + + class DTreesImpl : public DTrees + { + public: + struct WNode + { + WNode() + { + class_idx = sample_count = depth = complexity = 0; + parent = left = right = split = defaultDir = -1; + Tn = INT_MAX; + value = maxlr = alpha = node_risk = tree_risk = tree_error = 0.; + } + + int class_idx; + double Tn; + double value; + + int parent; + int left; + int right; + int defaultDir; + + int split; + + int sample_count; + int depth; + double maxlr; + + // global pruning data + int complexity; + double alpha; + double node_risk, tree_risk, tree_error; + }; + + struct WSplit + { + WSplit() + { + varIdx = next = 0; + inversed = false; + quality = c = 0.f; + subsetOfs = -1; + } + + int varIdx; + bool inversed; + float quality; + int next; + float c; + int subsetOfs; + }; + + struct WorkData + { + WorkData(const Ptr& _data); + + Ptr data; + vector wnodes; + vector wsplits; + vector wsubsets; + vector cv_Tn; + vector cv_node_risk; + vector cv_node_error; + vector cv_labels; + vector sample_weights; + vector cat_responses; + vector ord_responses; + vector sidx; + int maxSubsetSize; + }; + + inline int getMaxCategories() const CV_OVERRIDE { return params.getMaxCategories(); } + inline void setMaxCategories(int val) CV_OVERRIDE { params.setMaxCategories(val); } + inline int getMaxDepth() const CV_OVERRIDE { return params.getMaxDepth(); } + inline void setMaxDepth(int val) CV_OVERRIDE { params.setMaxDepth(val); } + inline int getMinSampleCount() const CV_OVERRIDE { return params.getMinSampleCount(); } + inline void setMinSampleCount(int val) CV_OVERRIDE { params.setMinSampleCount(val); } + inline int getCVFolds() const CV_OVERRIDE { return params.getCVFolds(); } + inline void setCVFolds(int val) CV_OVERRIDE { params.setCVFolds(val); } + inline bool getUseSurrogates() const CV_OVERRIDE { return params.getUseSurrogates(); } + inline void setUseSurrogates(bool val) CV_OVERRIDE { params.setUseSurrogates(val); } + inline bool getUse1SERule() const CV_OVERRIDE { return params.getUse1SERule(); } + inline void setUse1SERule(bool val) CV_OVERRIDE { params.setUse1SERule(val); } + inline bool getTruncatePrunedTree() const CV_OVERRIDE { return params.getTruncatePrunedTree(); } + inline void setTruncatePrunedTree(bool val) CV_OVERRIDE { params.setTruncatePrunedTree(val); } + inline float getRegressionAccuracy() const CV_OVERRIDE { return params.getRegressionAccuracy(); } + inline void setRegressionAccuracy(float val) CV_OVERRIDE { params.setRegressionAccuracy(val); } + inline cv::Mat getPriors() const CV_OVERRIDE { return params.getPriors(); } + inline void setPriors(const cv::Mat& val) CV_OVERRIDE { params.setPriors(val); } + + DTreesImpl(); + virtual ~DTreesImpl() CV_OVERRIDE; + virtual void clear() CV_OVERRIDE; + + String getDefaultName() const CV_OVERRIDE { return "opencv_ml_dtree"; } + bool isTrained() const CV_OVERRIDE { return !roots.empty(); } + bool isClassifier() const CV_OVERRIDE { return _isClassifier; } + int getVarCount() const CV_OVERRIDE { return varType.empty() ? 0 : (int)(varType.size() - 1); } + int getCatCount(int vi) const { return catOfs[vi][1] - catOfs[vi][0]; } + int getSubsetSize(int vi) const { return (getCatCount(vi) + 31)/32; } + + virtual void setDParams(const TreeParams& _params); + virtual void startTraining( const Ptr& trainData, int flags ); + virtual void endTraining(); + virtual void initCompVarIdx(); + virtual bool train( const Ptr& trainData, int flags ) CV_OVERRIDE; + + virtual int addTree( const vector& sidx ); + virtual int addNodeAndTrySplit( int parent, const vector& sidx ); + virtual const vector& getActiveVars(); + virtual int findBestSplit( const vector& _sidx ); + virtual void calcValue( int nidx, const vector& _sidx ); + + virtual WSplit findSplitOrdClass( int vi, const vector& _sidx, double initQuality ); + + // simple k-means, slightly modified to take into account the "weight" (L1-norm) of each vector. + virtual void clusterCategories( const double* vectors, int n, int m, double* csums, int k, int* labels ); + virtual WSplit findSplitCatClass( int vi, const vector& _sidx, double initQuality, int* subset ); + + virtual WSplit findSplitOrdReg( int vi, const vector& _sidx, double initQuality ); + virtual WSplit findSplitCatReg( int vi, const vector& _sidx, double initQuality, int* subset ); + + virtual int calcDir( int splitidx, const vector& _sidx, vector& _sleft, vector& _sright ); + virtual int pruneCV( int root ); + + virtual double updateTreeRNC( int root, double T, int fold ); + virtual bool cutTree( int root, double T, int fold, double min_alpha ); + virtual float predictTrees( const Range& range, const Mat& sample, int flags ) const; + virtual float predict( InputArray inputs, OutputArray outputs, int flags ) const CV_OVERRIDE; + + virtual void writeTrainingParams( FileStorage& fs ) const; + virtual void writeParams( FileStorage& fs ) const; + virtual void writeSplit( FileStorage& fs, int splitidx ) const; + virtual void writeNode( FileStorage& fs, int nidx, int depth ) const; + virtual void writeTree( FileStorage& fs, int root ) const; + virtual void write( FileStorage& fs ) const CV_OVERRIDE; + + virtual void readParams( const FileNode& fn ); + virtual int readSplit( const FileNode& fn ); + virtual int readNode( const FileNode& fn ); + virtual int readTree( const FileNode& fn ); + virtual void read( const FileNode& fn ) CV_OVERRIDE; + + virtual const std::vector& getRoots() const CV_OVERRIDE { return roots; } + virtual const std::vector& getNodes() const CV_OVERRIDE { return nodes; } + virtual const std::vector& getSplits() const CV_OVERRIDE { return splits; } + virtual const std::vector& getSubsets() const CV_OVERRIDE { return subsets; } + + TreeParams params; + + vector varIdx; + vector compVarIdx; + vector varType; + vector catOfs; + vector catMap; + vector roots; + vector nodes; + vector splits; + vector subsets; + vector classLabels; + vector missingSubst; + vector varMapping; + bool _isClassifier; + + Ptr w; + }; + + template + static inline void readVectorOrMat(const FileNode & node, std::vector & v) + { + if (node.type() == FileNode::MAP) + { + Mat m; + node >> m; + m.copyTo(v); + } + else if (node.type() == FileNode::SEQ) + { + node >> v; + } + } + +}} + +#endif /* __OPENCV_ML_PRECOMP_HPP__ */ diff --git a/modules/ml/src/rtrees.cpp b/modules/ml/src/rtrees.cpp new file mode 100644 index 00000000000..2cad961f99f --- /dev/null +++ b/modules/ml/src/rtrees.cpp @@ -0,0 +1,531 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Copyright (C) 2014, Itseez Inc, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" +namespace cv { +namespace ml { + +////////////////////////////////////////////////////////////////////////////////////////// +// Random trees // +////////////////////////////////////////////////////////////////////////////////////////// +RTreeParams::RTreeParams() +{ + CV_TRACE_FUNCTION(); + calcVarImportance = false; + nactiveVars = 0; + termCrit = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 50, 0.1); +} + +RTreeParams::RTreeParams(bool _calcVarImportance, + int _nactiveVars, + TermCriteria _termCrit ) +{ + CV_TRACE_FUNCTION(); + calcVarImportance = _calcVarImportance; + nactiveVars = _nactiveVars; + termCrit = _termCrit; +} + + +class DTreesImplForRTrees CV_FINAL : public DTreesImpl +{ +public: + DTreesImplForRTrees() + { + CV_TRACE_FUNCTION(); + params.setMaxDepth(5); + params.setMinSampleCount(10); + params.setRegressionAccuracy(0.f); + params.useSurrogates = false; + params.setMaxCategories(10); + params.setCVFolds(0); + params.use1SERule = false; + params.truncatePrunedTree = false; + params.priors = Mat(); + oobError = 0; + } + virtual ~DTreesImplForRTrees() {} + + void clear() CV_OVERRIDE + { + CV_TRACE_FUNCTION(); + DTreesImpl::clear(); + oobError = 0.; + } + + const vector& getActiveVars() CV_OVERRIDE + { + CV_TRACE_FUNCTION(); + RNG &rng = theRNG(); + int i, nvars = (int)allVars.size(), m = (int)activeVars.size(); + for( i = 0; i < nvars; i++ ) + { + int i1 = rng.uniform(0, nvars); + int i2 = rng.uniform(0, nvars); + std::swap(allVars[i1], allVars[i2]); + } + for( i = 0; i < m; i++ ) + activeVars[i] = allVars[i]; + return activeVars; + } + + void startTraining( const Ptr& trainData, int flags ) CV_OVERRIDE + { + CV_TRACE_FUNCTION(); + CV_Assert(!trainData.empty()); + DTreesImpl::startTraining(trainData, flags); + int nvars = w->data->getNVars(); + int i, m = rparams.nactiveVars > 0 ? rparams.nactiveVars : cvRound(std::sqrt((double)nvars)); + m = std::min(std::max(m, 1), nvars); + allVars.resize(nvars); + activeVars.resize(m); + for( i = 0; i < nvars; i++ ) + allVars[i] = varIdx[i]; + } + + void endTraining() CV_OVERRIDE + { + CV_TRACE_FUNCTION(); + DTreesImpl::endTraining(); + vector a, b; + std::swap(allVars, a); + std::swap(activeVars, b); + } + + bool train( const Ptr& trainData, int flags ) CV_OVERRIDE + { + CV_TRACE_FUNCTION(); + RNG &rng = theRNG(); + CV_Assert(!trainData.empty()); + startTraining(trainData, flags); + int treeidx, ntrees = (rparams.termCrit.type & TermCriteria::COUNT) != 0 ? + rparams.termCrit.maxCount : 10000; + int i, j, k, vi, vi_, n = (int)w->sidx.size(); + int nclasses = (int)classLabels.size(); + double eps = (rparams.termCrit.type & TermCriteria::EPS) != 0 && + rparams.termCrit.epsilon > 0 ? rparams.termCrit.epsilon : 0.; + vector sidx(n); + vector oobmask(n); + vector oobidx; + vector oobperm; + vector oobres(n, 0.); + vector oobcount(n, 0); + vector oobvotes(n*nclasses, 0); + int nvars = w->data->getNVars(); + int nallvars = w->data->getNAllVars(); + const int* vidx = !varIdx.empty() ? &varIdx[0] : 0; + vector samplebuf(nallvars); + Mat samples = w->data->getSamples(); + float* psamples = samples.ptr(); + size_t sstep0 = samples.step1(), sstep1 = 1; + Mat sample0, sample(nallvars, 1, CV_32F, &samplebuf[0]); + int predictFlags = _isClassifier ? (PREDICT_MAX_VOTE + RAW_OUTPUT) : PREDICT_SUM; + + bool calcOOBError = eps > 0 || rparams.calcVarImportance; + double max_response = 0.; + + if( w->data->getLayout() == COL_SAMPLE ) + std::swap(sstep0, sstep1); + + if( !_isClassifier ) + { + for( i = 0; i < n; i++ ) + { + double val = std::abs(w->ord_responses[w->sidx[i]]); + max_response = std::max(max_response, val); + } + CV_Assert(fabs(max_response) > 0); + } + + if( rparams.calcVarImportance ) + varImportance.resize(nallvars, 0.f); + + for( treeidx = 0; treeidx < ntrees; treeidx++ ) + { + for( i = 0; i < n; i++ ) + oobmask[i] = (uchar)1; + + for( i = 0; i < n; i++ ) + { + j = rng.uniform(0, n); + sidx[i] = w->sidx[j]; + oobmask[j] = (uchar)0; + } + int root = addTree( sidx ); + if( root < 0 ) + return false; + + if( calcOOBError ) + { + oobidx.clear(); + for( i = 0; i < n; i++ ) + { + if( oobmask[i] ) + oobidx.push_back(i); + } + int n_oob = (int)oobidx.size(); + // if there is no out-of-bag samples, we can not compute OOB error + // nor update the variable importance vector; so we proceed to the next tree + if( n_oob == 0 ) + continue; + double ncorrect_responses = 0.; + + oobError = 0.; + for( i = 0; i < n_oob; i++ ) + { + j = oobidx[i]; + sample = Mat( nallvars, 1, CV_32F, psamples + sstep0*w->sidx[j], sstep1*sizeof(psamples[0]) ); + + double val = predictTrees(Range(treeidx, treeidx+1), sample, predictFlags); + double sample_weight = w->sample_weights[w->sidx[j]]; + if( !_isClassifier ) + { + oobres[j] += val; + oobcount[j]++; + double true_val = w->ord_responses[w->sidx[j]]; + double a = oobres[j]/oobcount[j] - true_val; + oobError += sample_weight * a*a; + val = (val - true_val)/max_response; + ncorrect_responses += std::exp( -val*val ); + } + else + { + int ival = cvRound(val); + //Voting scheme to combine OOB errors of each tree + int* votes = &oobvotes[j*nclasses]; + votes[ival]++; + int best_class = 0; + for( k = 1; k < nclasses; k++ ) + if( votes[best_class] < votes[k] ) + best_class = k; + int diff = best_class != w->cat_responses[w->sidx[j]]; + oobError += sample_weight * diff; + ncorrect_responses += diff == 0; + } + } + + oobError /= n_oob; + if( rparams.calcVarImportance && n_oob > 1 ) + { + Mat sample_clone; + oobperm.resize(n_oob); + for( i = 0; i < n_oob; i++ ) + oobperm[i] = oobidx[i]; + for (i = n_oob - 1; i > 0; --i) //Randomly shuffle indices so we can permute features + { + int r_i = rng.uniform(0, n_oob); + std::swap(oobperm[i], oobperm[r_i]); + } + + for( vi_ = 0; vi_ < nvars; vi_++ ) + { + vi = vidx ? vidx[vi_] : vi_; //Ensure that only the user specified predictors are used for training + double ncorrect_responses_permuted = 0; + + for( i = 0; i < n_oob; i++ ) + { + j = oobidx[i]; + int vj = oobperm[i]; + sample0 = Mat( nallvars, 1, CV_32F, psamples + sstep0*w->sidx[j], sstep1*sizeof(psamples[0]) ); + sample0.copyTo(sample_clone); //create a copy so we don't mess up the original data + sample_clone.at(vi) = psamples[sstep0*w->sidx[vj] + sstep1*vi]; + + double val = predictTrees(Range(treeidx, treeidx+1), sample_clone, predictFlags); + if( !_isClassifier ) + { + val = (val - w->ord_responses[w->sidx[j]])/max_response; + ncorrect_responses_permuted += exp( -val*val ); + } + else + { + ncorrect_responses_permuted += cvRound(val) == w->cat_responses[w->sidx[j]]; + } + } + varImportance[vi] += (float)(ncorrect_responses - ncorrect_responses_permuted); + } + } + } + if( calcOOBError && oobError < eps ) + break; + } + + if( rparams.calcVarImportance ) + { + for( vi_ = 0; vi_ < nallvars; vi_++ ) + varImportance[vi_] = std::max(varImportance[vi_], 0.f); + normalize(varImportance, varImportance, 1., 0, NORM_L1); + } + endTraining(); + return true; + } + + void writeTrainingParams( FileStorage& fs ) const CV_OVERRIDE + { + CV_TRACE_FUNCTION(); + DTreesImpl::writeTrainingParams(fs); + fs << "nactive_vars" << rparams.nactiveVars; + } + + void write( FileStorage& fs ) const CV_OVERRIDE + { + CV_TRACE_FUNCTION(); + if( roots.empty() ) + CV_Error( CV_StsBadArg, "RTrees have not been trained" ); + + writeFormat(fs); + writeParams(fs); + + fs << "oob_error" << oobError; + if( !varImportance.empty() ) + fs << "var_importance" << varImportance; + + int k, ntrees = (int)roots.size(); + + fs << "ntrees" << ntrees + << "trees" << "["; + + for( k = 0; k < ntrees; k++ ) + { + fs << "{"; + writeTree(fs, roots[k]); + fs << "}"; + } + + fs << "]"; + } + + void readParams( const FileNode& fn ) CV_OVERRIDE + { + CV_TRACE_FUNCTION(); + DTreesImpl::readParams(fn); + + FileNode tparams_node = fn["training_params"]; + rparams.nactiveVars = (int)tparams_node["nactive_vars"]; + } + + void read( const FileNode& fn ) CV_OVERRIDE + { + CV_TRACE_FUNCTION(); + clear(); + + //int nclasses = (int)fn["nclasses"]; + //int nsamples = (int)fn["nsamples"]; + oobError = (double)fn["oob_error"]; + int ntrees = (int)fn["ntrees"]; + + readVectorOrMat(fn["var_importance"], varImportance); + + readParams(fn); + + FileNode trees_node = fn["trees"]; + FileNodeIterator it = trees_node.begin(); + CV_Assert( ntrees == (int)trees_node.size() ); + + for( int treeidx = 0; treeidx < ntrees; treeidx++, ++it ) + { + FileNode nfn = (*it)["nodes"]; + readTree(nfn); + } + } + + void getVotes( InputArray input, OutputArray output, int flags ) const + { + CV_TRACE_FUNCTION(); + CV_Assert( !roots.empty() ); + int nclasses = (int)classLabels.size(), ntrees = (int)roots.size(); + Mat samples = input.getMat(), results; + int i, j, nsamples = samples.rows; + + int predictType = flags & PREDICT_MASK; + if( predictType == PREDICT_AUTO ) + { + predictType = !_isClassifier || (classLabels.size() == 2 && (flags & RAW_OUTPUT) != 0) ? + PREDICT_SUM : PREDICT_MAX_VOTE; + } + + if( predictType == PREDICT_SUM ) + { + output.create(nsamples, ntrees, CV_32F); + results = output.getMat(); + for( i = 0; i < nsamples; i++ ) + { + for( j = 0; j < ntrees; j++ ) + { + float val = predictTrees( Range(j, j+1), samples.row(i), flags); + results.at (i, j) = val; + } + } + } else + { + vector votes; + output.create(nsamples+1, nclasses, CV_32S); + results = output.getMat(); + + for ( j = 0; j < nclasses; j++) + { + results.at (0, j) = classLabels[j]; + } + + for( i = 0; i < nsamples; i++ ) + { + votes.clear(); + for( j = 0; j < ntrees; j++ ) + { + int val = (int)predictTrees( Range(j, j+1), samples.row(i), flags); + votes.push_back(val); + } + + for ( j = 0; j < nclasses; j++) + { + results.at (i+1, j) = (int)std::count(votes.begin(), votes.end(), classLabels[j]); + } + } + } + } + + double getOOBError() const { + return oobError; + } + + RTreeParams rparams; + double oobError; + vector varImportance; + vector allVars, activeVars; +}; + + +class RTreesImpl CV_FINAL : public RTrees +{ +public: + inline bool getCalculateVarImportance() const CV_OVERRIDE { return impl.rparams.calcVarImportance; } + inline void setCalculateVarImportance(bool val) CV_OVERRIDE { impl.rparams.calcVarImportance = val; } + inline int getActiveVarCount() const CV_OVERRIDE { return impl.rparams.nactiveVars; } + inline void setActiveVarCount(int val) CV_OVERRIDE { impl.rparams.nactiveVars = val; } + inline TermCriteria getTermCriteria() const CV_OVERRIDE { return impl.rparams.termCrit; } + inline void setTermCriteria(const TermCriteria& val) CV_OVERRIDE { impl.rparams.termCrit = val; } + + inline int getMaxCategories() const CV_OVERRIDE { return impl.params.getMaxCategories(); } + inline void setMaxCategories(int val) CV_OVERRIDE { impl.params.setMaxCategories(val); } + inline int getMaxDepth() const CV_OVERRIDE { return impl.params.getMaxDepth(); } + inline void setMaxDepth(int val) CV_OVERRIDE { impl.params.setMaxDepth(val); } + inline int getMinSampleCount() const CV_OVERRIDE { return impl.params.getMinSampleCount(); } + inline void setMinSampleCount(int val) CV_OVERRIDE { impl.params.setMinSampleCount(val); } + inline int getCVFolds() const CV_OVERRIDE { return impl.params.getCVFolds(); } + inline void setCVFolds(int val) CV_OVERRIDE { impl.params.setCVFolds(val); } + inline bool getUseSurrogates() const CV_OVERRIDE { return impl.params.getUseSurrogates(); } + inline void setUseSurrogates(bool val) CV_OVERRIDE { impl.params.setUseSurrogates(val); } + inline bool getUse1SERule() const CV_OVERRIDE { return impl.params.getUse1SERule(); } + inline void setUse1SERule(bool val) CV_OVERRIDE { impl.params.setUse1SERule(val); } + inline bool getTruncatePrunedTree() const CV_OVERRIDE { return impl.params.getTruncatePrunedTree(); } + inline void setTruncatePrunedTree(bool val) CV_OVERRIDE { impl.params.setTruncatePrunedTree(val); } + inline float getRegressionAccuracy() const CV_OVERRIDE { return impl.params.getRegressionAccuracy(); } + inline void setRegressionAccuracy(float val) CV_OVERRIDE { impl.params.setRegressionAccuracy(val); } + inline cv::Mat getPriors() const CV_OVERRIDE { return impl.params.getPriors(); } + inline void setPriors(const cv::Mat& val) CV_OVERRIDE { impl.params.setPriors(val); } + inline void getVotes(InputArray input, OutputArray output, int flags) const CV_OVERRIDE {return impl.getVotes(input,output,flags);} + + RTreesImpl() {} + virtual ~RTreesImpl() CV_OVERRIDE {} + + String getDefaultName() const CV_OVERRIDE { return "opencv_ml_rtrees"; } + + bool train( const Ptr& trainData, int flags ) CV_OVERRIDE + { + CV_TRACE_FUNCTION(); + CV_Assert(!trainData.empty()); + if (impl.getCVFolds() != 0) + CV_Error(Error::StsBadArg, "Cross validation for RTrees is not implemented"); + return impl.train(trainData, flags); + } + + float predict( InputArray samples, OutputArray results, int flags ) const CV_OVERRIDE + { + CV_TRACE_FUNCTION(); + CV_CheckEQ(samples.cols(), getVarCount(), ""); + return impl.predict(samples, results, flags); + } + + void write( FileStorage& fs ) const CV_OVERRIDE + { + CV_TRACE_FUNCTION(); + impl.write(fs); + } + + void read( const FileNode& fn ) CV_OVERRIDE + { + CV_TRACE_FUNCTION(); + impl.read(fn); + } + + Mat getVarImportance() const CV_OVERRIDE { return Mat_(impl.varImportance, true); } + int getVarCount() const CV_OVERRIDE { return impl.getVarCount(); } + + bool isTrained() const CV_OVERRIDE { return impl.isTrained(); } + bool isClassifier() const CV_OVERRIDE { return impl.isClassifier(); } + + const vector& getRoots() const CV_OVERRIDE { return impl.getRoots(); } + const vector& getNodes() const CV_OVERRIDE { return impl.getNodes(); } + const vector& getSplits() const CV_OVERRIDE { return impl.getSplits(); } + const vector& getSubsets() const CV_OVERRIDE { return impl.getSubsets(); } + double getOOBError() const CV_OVERRIDE { return impl.getOOBError(); } + + + DTreesImplForRTrees impl; +}; + + +Ptr RTrees::create() +{ + CV_TRACE_FUNCTION(); + return makePtr(); +} + +//Function needed for Python and Java wrappers +Ptr RTrees::load(const String& filepath, const String& nodeName) +{ + CV_TRACE_FUNCTION(); + return Algorithm::load(filepath, nodeName); +} + +}} + +// End of file. diff --git a/modules/ml/src/svm.cpp b/modules/ml/src/svm.cpp new file mode 100644 index 00000000000..40c18c03ea1 --- /dev/null +++ b/modules/ml/src/svm.cpp @@ -0,0 +1,2357 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Copyright (C) 2014, Itseez Inc, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +#include +#include + +/****************************************************************************************\ + COPYRIGHT NOTICE + ---------------- + + The code has been derived from libsvm library (version 2.6) + (http://www.csie.ntu.edu.tw/~cjlin/libsvm). + + Here is the original copyright: +------------------------------------------------------------------------------------------ + Copyright (c) 2000-2003 Chih-Chung Chang and Chih-Jen Lin + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither name of copyright holders nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR + CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +\****************************************************************************************/ + +namespace cv { namespace ml { + +typedef float Qfloat; +const int QFLOAT_TYPE = DataDepth::value; + +// Param Grid +static void checkParamGrid(const ParamGrid& pg) +{ + if( pg.minVal > pg.maxVal ) + CV_Error( CV_StsBadArg, "Lower bound of the grid must be less then the upper one" ); + if( pg.minVal < DBL_EPSILON ) + CV_Error( CV_StsBadArg, "Lower bound of the grid must be positive" ); + if( pg.logStep < 1. + FLT_EPSILON ) + CV_Error( CV_StsBadArg, "Grid step must greater than 1" ); +} + +// SVM training parameters +struct SvmParams +{ + int svmType; + int kernelType; + double gamma; + double coef0; + double degree; + double C; + double nu; + double p; + Mat classWeights; + TermCriteria termCrit; + + SvmParams() + { + svmType = SVM::C_SVC; + kernelType = SVM::RBF; + degree = 0; + gamma = 1; + coef0 = 0; + C = 1; + nu = 0; + p = 0; + termCrit = TermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 1000, FLT_EPSILON ); + } + + SvmParams( int _svmType, int _kernelType, + double _degree, double _gamma, double _coef0, + double _Con, double _nu, double _p, + const Mat& _classWeights, TermCriteria _termCrit ) + { + svmType = _svmType; + kernelType = _kernelType; + degree = _degree; + gamma = _gamma; + coef0 = _coef0; + C = _Con; + nu = _nu; + p = _p; + classWeights = _classWeights; + termCrit = _termCrit; + } + +}; + +/////////////////////////////////////// SVM kernel /////////////////////////////////////// +class SVMKernelImpl CV_FINAL : public SVM::Kernel +{ +public: + SVMKernelImpl( const SvmParams& _params = SvmParams() ) + { + params = _params; + } + + int getType() const CV_OVERRIDE + { + return params.kernelType; + } + + void calc_non_rbf_base( int vcount, int var_count, const float* vecs, + const float* another, Qfloat* results, + double alpha, double beta ) + { + int j, k; + for( j = 0; j < vcount; j++ ) + { + const float* sample = &vecs[j*var_count]; + double s = 0; + for( k = 0; k <= var_count - 4; k += 4 ) + s += sample[k]*another[k] + sample[k+1]*another[k+1] + + sample[k+2]*another[k+2] + sample[k+3]*another[k+3]; + for( ; k < var_count; k++ ) + s += sample[k]*another[k]; + results[j] = (Qfloat)(s*alpha + beta); + } + } + + void calc_linear( int vcount, int var_count, const float* vecs, + const float* another, Qfloat* results ) + { + calc_non_rbf_base( vcount, var_count, vecs, another, results, 1, 0 ); + } + + void calc_poly( int vcount, int var_count, const float* vecs, + const float* another, Qfloat* results ) + { + Mat R( 1, vcount, QFLOAT_TYPE, results ); + calc_non_rbf_base( vcount, var_count, vecs, another, results, params.gamma, params.coef0 ); + if( vcount > 0 ) + pow( R, params.degree, R ); + } + + void calc_sigmoid( int vcount, int var_count, const float* vecs, + const float* another, Qfloat* results ) + { + int j; + calc_non_rbf_base( vcount, var_count, vecs, another, results, + 2*params.gamma, 2*params.coef0 ); + // TODO: speedup this + for( j = 0; j < vcount; j++ ) + { + Qfloat t = results[j]; + Qfloat e = std::exp(std::abs(t)); // Inf value is possible here + Qfloat r = (Qfloat)((e - 1.) / (e + 1.)); // NaN value is possible here (Inf/Inf or similar) + if (cvIsNaN(r)) + r = std::numeric_limits::infinity(); + if (t < 0) + r = -r; + CV_DbgAssert(!cvIsNaN(r)); + results[j] = r; + } + } + + void calc_rbf( int vcount, int var_count, const float* vecs, + const float* another, Qfloat* results ) + { + double gamma = -params.gamma; + int j, k; + + for( j = 0; j < vcount; j++ ) + { + const float* sample = &vecs[j*var_count]; + double s = 0; + + for( k = 0; k <= var_count - 4; k += 4 ) + { + double t0 = sample[k] - another[k]; + double t1 = sample[k+1] - another[k+1]; + + s += t0*t0 + t1*t1; + + t0 = sample[k+2] - another[k+2]; + t1 = sample[k+3] - another[k+3]; + + s += t0*t0 + t1*t1; + } + + for( ; k < var_count; k++ ) + { + double t0 = sample[k] - another[k]; + s += t0*t0; + } + results[j] = (Qfloat)(s*gamma); + } + + if( vcount > 0 ) + { + Mat R( 1, vcount, QFLOAT_TYPE, results ); + exp( R, R ); + } + } + + /// Histogram intersection kernel + void calc_intersec( int vcount, int var_count, const float* vecs, + const float* another, Qfloat* results ) + { + int j, k; + for( j = 0; j < vcount; j++ ) + { + const float* sample = &vecs[j*var_count]; + double s = 0; + for( k = 0; k <= var_count - 4; k += 4 ) + s += std::min(sample[k],another[k]) + std::min(sample[k+1],another[k+1]) + + std::min(sample[k+2],another[k+2]) + std::min(sample[k+3],another[k+3]); + for( ; k < var_count; k++ ) + s += std::min(sample[k],another[k]); + results[j] = (Qfloat)(s); + } + } + + /// Exponential chi2 kernel + void calc_chi2( int vcount, int var_count, const float* vecs, + const float* another, Qfloat* results ) + { + Mat R( 1, vcount, QFLOAT_TYPE, results ); + double gamma = -params.gamma; + int j, k; + for( j = 0; j < vcount; j++ ) + { + const float* sample = &vecs[j*var_count]; + double chi2 = 0; + for(k = 0 ; k < var_count; k++ ) + { + double d = sample[k]-another[k]; + double devisor = sample[k]+another[k]; + /// if devisor == 0, the Chi2 distance would be zero, + // but calculation would rise an error because of dividing by zero + if (devisor != 0) + { + chi2 += d*d/devisor; + } + } + results[j] = (Qfloat) (gamma*chi2); + } + if( vcount > 0 ) + exp( R, R ); + } + + void calc( int vcount, int var_count, const float* vecs, + const float* another, Qfloat* results ) CV_OVERRIDE + { + switch( params.kernelType ) + { + case SVM::LINEAR: + calc_linear(vcount, var_count, vecs, another, results); + break; + case SVM::RBF: + calc_rbf(vcount, var_count, vecs, another, results); + break; + case SVM::POLY: + calc_poly(vcount, var_count, vecs, another, results); + break; + case SVM::SIGMOID: + calc_sigmoid(vcount, var_count, vecs, another, results); + break; + case SVM::CHI2: + calc_chi2(vcount, var_count, vecs, another, results); + break; + case SVM::INTER: + calc_intersec(vcount, var_count, vecs, another, results); + break; + default: + CV_Error(CV_StsBadArg, "Unknown kernel type"); + } + const Qfloat max_val = (Qfloat)(FLT_MAX*1e-3); + for( int j = 0; j < vcount; j++ ) + { + if (!(results[j] <= max_val)) // handle NaNs too + results[j] = max_val; + } + } + + SvmParams params; +}; + + + +///////////////////////////////////////////////////////////////////////// + +static void sortSamplesByClasses( const Mat& _samples, const Mat& _responses, + vector& sidx_all, vector& class_ranges ) +{ + int i, nsamples = _samples.rows; + CV_Assert( _responses.isContinuous() && _responses.checkVector(1, CV_32S) == nsamples ); + + setRangeVector(sidx_all, nsamples); + + const int* rptr = _responses.ptr(); + std::sort(sidx_all.begin(), sidx_all.end(), cmp_lt_idx(rptr)); + class_ranges.clear(); + class_ranges.push_back(0); + + for( i = 0; i < nsamples; i++ ) + { + if( i == nsamples-1 || rptr[sidx_all[i]] != rptr[sidx_all[i+1]] ) + class_ranges.push_back(i+1); + } +} + +//////////////////////// SVM implementation ////////////////////////////// + +Ptr SVM::getDefaultGridPtr( int param_id) +{ + ParamGrid grid = getDefaultGrid(param_id); // this is not a nice solution.. + return makePtr(grid.minVal, grid.maxVal, grid.logStep); +} + +ParamGrid SVM::getDefaultGrid( int param_id ) +{ + ParamGrid grid; + if( param_id == SVM::C ) + { + grid.minVal = 0.1; + grid.maxVal = 500; + grid.logStep = 5; // total iterations = 5 + } + else if( param_id == SVM::GAMMA ) + { + grid.minVal = 1e-5; + grid.maxVal = 0.6; + grid.logStep = 15; // total iterations = 4 + } + else if( param_id == SVM::P ) + { + grid.minVal = 0.01; + grid.maxVal = 100; + grid.logStep = 7; // total iterations = 4 + } + else if( param_id == SVM::NU ) + { + grid.minVal = 0.01; + grid.maxVal = 0.2; + grid.logStep = 3; // total iterations = 3 + } + else if( param_id == SVM::COEF ) + { + grid.minVal = 0.1; + grid.maxVal = 300; + grid.logStep = 14; // total iterations = 3 + } + else if( param_id == SVM::DEGREE ) + { + grid.minVal = 0.01; + grid.maxVal = 4; + grid.logStep = 7; // total iterations = 3 + } + else + cvError( CV_StsBadArg, "SVM::getDefaultGrid", "Invalid type of parameter " + "(use one of SVM::C, SVM::GAMMA et al.)", __FILE__, __LINE__ ); + return grid; +} + + +class SVMImpl CV_FINAL : public SVM +{ +public: + struct DecisionFunc + { + DecisionFunc(double _rho, int _ofs) : rho(_rho), ofs(_ofs) {} + DecisionFunc() : rho(0.), ofs(0) {} + double rho; + int ofs; + }; + + // Generalized SMO+SVMlight algorithm + // Solves: + // + // min [0.5(\alpha^T Q \alpha) + b^T \alpha] + // + // y^T \alpha = \delta + // y_i = +1 or -1 + // 0 <= alpha_i <= Cp for y_i = 1 + // 0 <= alpha_i <= Cn for y_i = -1 + // + // Given: + // + // Q, b, y, Cp, Cn, and an initial feasible point \alpha + // l is the size of vectors and matrices + // eps is the stopping criterion + // + // solution will be put in \alpha, objective value will be put in obj + // + class Solver + { + public: + enum { MIN_CACHE_SIZE = (40 << 20) /* 40Mb */, MAX_CACHE_SIZE = (500 << 20) /* 500Mb */ }; + + typedef bool (Solver::*SelectWorkingSet)( int& i, int& j ); + typedef Qfloat* (Solver::*GetRow)( int i, Qfloat* row, Qfloat* dst, bool existed ); + typedef void (Solver::*CalcRho)( double& rho, double& r ); + + struct KernelRow + { + KernelRow() { idx = -1; prev = next = 0; } + KernelRow(int _idx, int _prev, int _next) : idx(_idx), prev(_prev), next(_next) {} + int idx; + int prev; + int next; + }; + + struct SolutionInfo + { + SolutionInfo() { obj = rho = upper_bound_p = upper_bound_n = r = 0; } + double obj; + double rho; + double upper_bound_p; + double upper_bound_n; + double r; // for Solver_NU + }; + + void clear() + { + alpha_vec = 0; + select_working_set_func = 0; + calc_rho_func = 0; + get_row_func = 0; + lru_cache.clear(); + } + + Solver( const Mat& _samples, const vector& _y, + vector& _alpha, const vector& _b, + double _Cp, double _Cn, + const Ptr& _kernel, GetRow _get_row, + SelectWorkingSet _select_working_set, CalcRho _calc_rho, + TermCriteria _termCrit ) + { + clear(); + + samples = _samples; + sample_count = samples.rows; + var_count = samples.cols; + + y_vec = _y; + alpha_vec = &_alpha; + alpha_count = (int)alpha_vec->size(); + b_vec = _b; + kernel = _kernel; + + C[0] = _Cn; + C[1] = _Cp; + eps = _termCrit.epsilon; + max_iter = _termCrit.maxCount; + + G_vec.resize(alpha_count); + alpha_status_vec.resize(alpha_count); + buf[0].resize(sample_count*2); + buf[1].resize(sample_count*2); + + select_working_set_func = _select_working_set; + CV_Assert(select_working_set_func != 0); + + calc_rho_func = _calc_rho; + CV_Assert(calc_rho_func != 0); + + get_row_func = _get_row; + CV_Assert(get_row_func != 0); + + // assume that for large training sets ~25% of Q matrix is used + int64 csize = (int64)sample_count*sample_count/4; + csize = std::max(csize, (int64)(MIN_CACHE_SIZE/sizeof(Qfloat)) ); + csize = std::min(csize, (int64)(MAX_CACHE_SIZE/sizeof(Qfloat)) ); + max_cache_size = (int)((csize + sample_count-1)/sample_count); + max_cache_size = std::min(std::max(max_cache_size, 1), sample_count); + cache_size = 0; + + lru_cache.clear(); + lru_cache.resize(sample_count+1, KernelRow(-1, 0, 0)); + lru_first = lru_last = 0; + lru_cache_data.create(max_cache_size, sample_count, QFLOAT_TYPE); + } + + Qfloat* get_row_base( int i, bool* _existed ) + { + int i1 = i < sample_count ? i : i - sample_count; + KernelRow& kr = lru_cache[i1+1]; + if( _existed ) + *_existed = kr.idx >= 0; + if( kr.idx < 0 ) + { + if( cache_size < max_cache_size ) + { + kr.idx = cache_size; + cache_size++; + if (!lru_last) + lru_last = i1+1; + } + else + { + KernelRow& last = lru_cache[lru_last]; + kr.idx = last.idx; + last.idx = -1; + lru_cache[last.prev].next = 0; + lru_last = last.prev; + last.prev = 0; + last.next = 0; + } + kernel->calc( sample_count, var_count, samples.ptr(), + samples.ptr(i1), lru_cache_data.ptr(kr.idx) ); + } + else + { + if( kr.next ) + lru_cache[kr.next].prev = kr.prev; + else + lru_last = kr.prev; + if( kr.prev ) + lru_cache[kr.prev].next = kr.next; + else + lru_first = kr.next; + } + if (lru_first) + lru_cache[lru_first].prev = i1+1; + kr.next = lru_first; + kr.prev = 0; + lru_first = i1+1; + + return lru_cache_data.ptr(kr.idx); + } + + Qfloat* get_row_svc( int i, Qfloat* row, Qfloat*, bool existed ) + { + if( !existed ) + { + const schar* _y = &y_vec[0]; + int j, len = sample_count; + + if( _y[i] > 0 ) + { + for( j = 0; j < len; j++ ) + row[j] = _y[j]*row[j]; + } + else + { + for( j = 0; j < len; j++ ) + row[j] = -_y[j]*row[j]; + } + } + return row; + } + + Qfloat* get_row_one_class( int, Qfloat* row, Qfloat*, bool ) + { + return row; + } + + Qfloat* get_row_svr( int i, Qfloat* row, Qfloat* dst, bool ) + { + int j, len = sample_count; + Qfloat* dst_pos = dst; + Qfloat* dst_neg = dst + len; + if( i >= len ) + std::swap(dst_pos, dst_neg); + + for( j = 0; j < len; j++ ) + { + Qfloat t = row[j]; + dst_pos[j] = t; + dst_neg[j] = -t; + } + return dst; + } + + Qfloat* get_row( int i, float* dst ) + { + bool existed = false; + float* row = get_row_base( i, &existed ); + return (this->*get_row_func)( i, row, dst, existed ); + } + + #undef is_upper_bound + #define is_upper_bound(i) (alpha_status[i] > 0) + + #undef is_lower_bound + #define is_lower_bound(i) (alpha_status[i] < 0) + + #undef get_C + #define get_C(i) (C[y[i]>0]) + + #undef update_alpha_status + #define update_alpha_status(i) \ + alpha_status[i] = (schar)(alpha[i] >= get_C(i) ? 1 : alpha[i] <= 0 ? -1 : 0) + + bool solve_generic( SolutionInfo& si ) + { + const schar* y = &y_vec[0]; + double* alpha = &alpha_vec->at(0); + schar* alpha_status = &alpha_status_vec[0]; + double* G = &G_vec[0]; + double* b = &b_vec[0]; + + int iter = 0; + int i, j, k; + + // 1. initialize gradient and alpha status + for( i = 0; i < alpha_count; i++ ) + { + update_alpha_status(i); + G[i] = b[i]; + if( fabs(G[i]) > 1e200 ) + return false; + } + + for( i = 0; i < alpha_count; i++ ) + { + if( !is_lower_bound(i) ) + { + const Qfloat *Q_i = get_row( i, &buf[0][0] ); + double alpha_i = alpha[i]; + + for( j = 0; j < alpha_count; j++ ) + G[j] += alpha_i*Q_i[j]; + } + } + + // 2. optimization loop + for(;;) + { + const Qfloat *Q_i, *Q_j; + double C_i, C_j; + double old_alpha_i, old_alpha_j, alpha_i, alpha_j; + double delta_alpha_i, delta_alpha_j; + + #ifdef _DEBUG + for( i = 0; i < alpha_count; i++ ) + { + if( fabs(G[i]) > 1e+300 ) + return false; + + if( fabs(alpha[i]) > 1e16 ) + return false; + } + #endif + + if( (this->*select_working_set_func)( i, j ) != 0 || iter++ >= max_iter ) + break; + + Q_i = get_row( i, &buf[0][0] ); + Q_j = get_row( j, &buf[1][0] ); + + C_i = get_C(i); + C_j = get_C(j); + + alpha_i = old_alpha_i = alpha[i]; + alpha_j = old_alpha_j = alpha[j]; + + if( y[i] != y[j] ) + { + double denom = Q_i[i]+Q_j[j]+2*Q_i[j]; + double delta = (-G[i]-G[j])/MAX(fabs(denom),FLT_EPSILON); + double diff = alpha_i - alpha_j; + alpha_i += delta; + alpha_j += delta; + + if( diff > 0 && alpha_j < 0 ) + { + alpha_j = 0; + alpha_i = diff; + } + else if( diff <= 0 && alpha_i < 0 ) + { + alpha_i = 0; + alpha_j = -diff; + } + + if( diff > C_i - C_j && alpha_i > C_i ) + { + alpha_i = C_i; + alpha_j = C_i - diff; + } + else if( diff <= C_i - C_j && alpha_j > C_j ) + { + alpha_j = C_j; + alpha_i = C_j + diff; + } + } + else + { + double denom = Q_i[i]+Q_j[j]-2*Q_i[j]; + double delta = (G[i]-G[j])/MAX(fabs(denom),FLT_EPSILON); + double sum = alpha_i + alpha_j; + alpha_i -= delta; + alpha_j += delta; + + if( sum > C_i && alpha_i > C_i ) + { + alpha_i = C_i; + alpha_j = sum - C_i; + } + else if( sum <= C_i && alpha_j < 0) + { + alpha_j = 0; + alpha_i = sum; + } + + if( sum > C_j && alpha_j > C_j ) + { + alpha_j = C_j; + alpha_i = sum - C_j; + } + else if( sum <= C_j && alpha_i < 0 ) + { + alpha_i = 0; + alpha_j = sum; + } + } + + // update alpha + alpha[i] = alpha_i; + alpha[j] = alpha_j; + update_alpha_status(i); + update_alpha_status(j); + + // update G + delta_alpha_i = alpha_i - old_alpha_i; + delta_alpha_j = alpha_j - old_alpha_j; + + for( k = 0; k < alpha_count; k++ ) + G[k] += Q_i[k]*delta_alpha_i + Q_j[k]*delta_alpha_j; + } + + // calculate rho + (this->*calc_rho_func)( si.rho, si.r ); + + // calculate objective value + for( i = 0, si.obj = 0; i < alpha_count; i++ ) + si.obj += alpha[i] * (G[i] + b[i]); + + si.obj *= 0.5; + + si.upper_bound_p = C[1]; + si.upper_bound_n = C[0]; + + return true; + } + + // return 1 if already optimal, return 0 otherwise + bool select_working_set( int& out_i, int& out_j ) + { + // return i,j which maximize -grad(f)^T d , under constraint + // if alpha_i == C, d != +1 + // if alpha_i == 0, d != -1 + double Gmax1 = -DBL_MAX; // max { -grad(f)_i * d | y_i*d = +1 } + int Gmax1_idx = -1; + + double Gmax2 = -DBL_MAX; // max { -grad(f)_i * d | y_i*d = -1 } + int Gmax2_idx = -1; + + const schar* y = &y_vec[0]; + const schar* alpha_status = &alpha_status_vec[0]; + const double* G = &G_vec[0]; + + for( int i = 0; i < alpha_count; i++ ) + { + double t; + + if( y[i] > 0 ) // y = +1 + { + if( !is_upper_bound(i) && (t = -G[i]) > Gmax1 ) // d = +1 + { + Gmax1 = t; + Gmax1_idx = i; + } + if( !is_lower_bound(i) && (t = G[i]) > Gmax2 ) // d = -1 + { + Gmax2 = t; + Gmax2_idx = i; + } + } + else // y = -1 + { + if( !is_upper_bound(i) && (t = -G[i]) > Gmax2 ) // d = +1 + { + Gmax2 = t; + Gmax2_idx = i; + } + if( !is_lower_bound(i) && (t = G[i]) > Gmax1 ) // d = -1 + { + Gmax1 = t; + Gmax1_idx = i; + } + } + } + + out_i = Gmax1_idx; + out_j = Gmax2_idx; + + return Gmax1 + Gmax2 < eps; + } + + void calc_rho( double& rho, double& r ) + { + int nr_free = 0; + double ub = DBL_MAX, lb = -DBL_MAX, sum_free = 0; + const schar* y = &y_vec[0]; + const schar* alpha_status = &alpha_status_vec[0]; + const double* G = &G_vec[0]; + + for( int i = 0; i < alpha_count; i++ ) + { + double yG = y[i]*G[i]; + + if( is_lower_bound(i) ) + { + if( y[i] > 0 ) + ub = MIN(ub,yG); + else + lb = MAX(lb,yG); + } + else if( is_upper_bound(i) ) + { + if( y[i] < 0) + ub = MIN(ub,yG); + else + lb = MAX(lb,yG); + } + else + { + ++nr_free; + sum_free += yG; + } + } + + rho = nr_free > 0 ? sum_free/nr_free : (ub + lb)*0.5; + r = 0; + } + + bool select_working_set_nu_svm( int& out_i, int& out_j ) + { + // return i,j which maximize -grad(f)^T d , under constraint + // if alpha_i == C, d != +1 + // if alpha_i == 0, d != -1 + double Gmax1 = -DBL_MAX; // max { -grad(f)_i * d | y_i = +1, d = +1 } + int Gmax1_idx = -1; + + double Gmax2 = -DBL_MAX; // max { -grad(f)_i * d | y_i = +1, d = -1 } + int Gmax2_idx = -1; + + double Gmax3 = -DBL_MAX; // max { -grad(f)_i * d | y_i = -1, d = +1 } + int Gmax3_idx = -1; + + double Gmax4 = -DBL_MAX; // max { -grad(f)_i * d | y_i = -1, d = -1 } + int Gmax4_idx = -1; + + const schar* y = &y_vec[0]; + const schar* alpha_status = &alpha_status_vec[0]; + const double* G = &G_vec[0]; + + for( int i = 0; i < alpha_count; i++ ) + { + double t; + + if( y[i] > 0 ) // y == +1 + { + if( !is_upper_bound(i) && (t = -G[i]) > Gmax1 ) // d = +1 + { + Gmax1 = t; + Gmax1_idx = i; + } + if( !is_lower_bound(i) && (t = G[i]) > Gmax2 ) // d = -1 + { + Gmax2 = t; + Gmax2_idx = i; + } + } + else // y == -1 + { + if( !is_upper_bound(i) && (t = -G[i]) > Gmax3 ) // d = +1 + { + Gmax3 = t; + Gmax3_idx = i; + } + if( !is_lower_bound(i) && (t = G[i]) > Gmax4 ) // d = -1 + { + Gmax4 = t; + Gmax4_idx = i; + } + } + } + + if( MAX(Gmax1 + Gmax2, Gmax3 + Gmax4) < eps ) + return 1; + + if( Gmax1 + Gmax2 > Gmax3 + Gmax4 ) + { + out_i = Gmax1_idx; + out_j = Gmax2_idx; + } + else + { + out_i = Gmax3_idx; + out_j = Gmax4_idx; + } + return 0; + } + + void calc_rho_nu_svm( double& rho, double& r ) + { + int nr_free1 = 0, nr_free2 = 0; + double ub1 = DBL_MAX, ub2 = DBL_MAX; + double lb1 = -DBL_MAX, lb2 = -DBL_MAX; + double sum_free1 = 0, sum_free2 = 0; + + const schar* y = &y_vec[0]; + const schar* alpha_status = &alpha_status_vec[0]; + const double* G = &G_vec[0]; + + for( int i = 0; i < alpha_count; i++ ) + { + double G_i = G[i]; + if( y[i] > 0 ) + { + if( is_lower_bound(i) ) + ub1 = MIN( ub1, G_i ); + else if( is_upper_bound(i) ) + lb1 = MAX( lb1, G_i ); + else + { + ++nr_free1; + sum_free1 += G_i; + } + } + else + { + if( is_lower_bound(i) ) + ub2 = MIN( ub2, G_i ); + else if( is_upper_bound(i) ) + lb2 = MAX( lb2, G_i ); + else + { + ++nr_free2; + sum_free2 += G_i; + } + } + } + + double r1 = nr_free1 > 0 ? sum_free1/nr_free1 : (ub1 + lb1)*0.5; + double r2 = nr_free2 > 0 ? sum_free2/nr_free2 : (ub2 + lb2)*0.5; + + rho = (r1 - r2)*0.5; + r = (r1 + r2)*0.5; + } + + /* + ///////////////////////// construct and solve various formulations /////////////////////// + */ + static bool solve_c_svc( const Mat& _samples, const vector& _y, + double _Cp, double _Cn, const Ptr& _kernel, + vector& _alpha, SolutionInfo& _si, TermCriteria termCrit ) + { + int sample_count = _samples.rows; + + _alpha.assign(sample_count, 0.); + vector _b(sample_count, -1.); + + Solver solver( _samples, _y, _alpha, _b, _Cp, _Cn, _kernel, + &Solver::get_row_svc, + &Solver::select_working_set, + &Solver::calc_rho, + termCrit ); + + if( !solver.solve_generic( _si )) + return false; + + for( int i = 0; i < sample_count; i++ ) + _alpha[i] *= _y[i]; + + return true; + } + + + static bool solve_nu_svc( const Mat& _samples, const vector& _y, + double nu, const Ptr& _kernel, + vector& _alpha, SolutionInfo& _si, + TermCriteria termCrit ) + { + int sample_count = _samples.rows; + + _alpha.resize(sample_count); + vector _b(sample_count, 0.); + + double sum_pos = nu * sample_count * 0.5; + double sum_neg = nu * sample_count * 0.5; + + for( int i = 0; i < sample_count; i++ ) + { + double a; + if( _y[i] > 0 ) + { + a = std::min(1.0, sum_pos); + sum_pos -= a; + } + else + { + a = std::min(1.0, sum_neg); + sum_neg -= a; + } + _alpha[i] = a; + } + + Solver solver( _samples, _y, _alpha, _b, 1., 1., _kernel, + &Solver::get_row_svc, + &Solver::select_working_set_nu_svm, + &Solver::calc_rho_nu_svm, + termCrit ); + + if( !solver.solve_generic( _si )) + return false; + + double inv_r = 1./_si.r; + + for( int i = 0; i < sample_count; i++ ) + _alpha[i] *= _y[i]*inv_r; + + _si.rho *= inv_r; + _si.obj *= (inv_r*inv_r); + _si.upper_bound_p = inv_r; + _si.upper_bound_n = inv_r; + + return true; + } + + static bool solve_one_class( const Mat& _samples, double nu, + const Ptr& _kernel, + vector& _alpha, SolutionInfo& _si, + TermCriteria termCrit ) + { + int sample_count = _samples.rows; + vector _y(sample_count, 1); + vector _b(sample_count, 0.); + + int i, n = cvRound( nu*sample_count ); + + _alpha.resize(sample_count); + for( i = 0; i < sample_count; i++ ) + _alpha[i] = i < n ? 1 : 0; + + if( n < sample_count ) + _alpha[n] = nu * sample_count - n; + else + _alpha[n-1] = nu * sample_count - (n-1); + + Solver solver( _samples, _y, _alpha, _b, 1., 1., _kernel, + &Solver::get_row_one_class, + &Solver::select_working_set, + &Solver::calc_rho, + termCrit ); + + return solver.solve_generic(_si); + } + + static bool solve_eps_svr( const Mat& _samples, const vector& _yf, + double p, double C, const Ptr& _kernel, + vector& _alpha, SolutionInfo& _si, + TermCriteria termCrit ) + { + int sample_count = _samples.rows; + int alpha_count = sample_count*2; + + CV_Assert( (int)_yf.size() == sample_count ); + + _alpha.assign(alpha_count, 0.); + vector _y(alpha_count); + vector _b(alpha_count); + + for( int i = 0; i < sample_count; i++ ) + { + _b[i] = p - _yf[i]; + _y[i] = 1; + + _b[i+sample_count] = p + _yf[i]; + _y[i+sample_count] = -1; + } + + Solver solver( _samples, _y, _alpha, _b, C, C, _kernel, + &Solver::get_row_svr, + &Solver::select_working_set, + &Solver::calc_rho, + termCrit ); + + if( !solver.solve_generic( _si )) + return false; + + for( int i = 0; i < sample_count; i++ ) + _alpha[i] -= _alpha[i+sample_count]; + + return true; + } + + + static bool solve_nu_svr( const Mat& _samples, const vector& _yf, + double nu, double C, const Ptr& _kernel, + vector& _alpha, SolutionInfo& _si, + TermCriteria termCrit ) + { + int sample_count = _samples.rows; + int alpha_count = sample_count*2; + double sum = C * nu * sample_count * 0.5; + + CV_Assert( (int)_yf.size() == sample_count ); + + _alpha.resize(alpha_count); + vector _y(alpha_count); + vector _b(alpha_count); + + for( int i = 0; i < sample_count; i++ ) + { + _alpha[i] = _alpha[i + sample_count] = std::min(sum, C); + sum -= _alpha[i]; + + _b[i] = -_yf[i]; + _y[i] = 1; + + _b[i + sample_count] = _yf[i]; + _y[i + sample_count] = -1; + } + + Solver solver( _samples, _y, _alpha, _b, 1., 1., _kernel, + &Solver::get_row_svr, + &Solver::select_working_set_nu_svm, + &Solver::calc_rho_nu_svm, + termCrit ); + + if( !solver.solve_generic( _si )) + return false; + + for( int i = 0; i < sample_count; i++ ) + _alpha[i] -= _alpha[i+sample_count]; + + return true; + } + + int sample_count; + int var_count; + int cache_size; + int max_cache_size; + Mat samples; + SvmParams params; + vector lru_cache; + int lru_first; + int lru_last; + Mat lru_cache_data; + + int alpha_count; + + vector G_vec; + vector* alpha_vec; + vector y_vec; + // -1 - lower bound, 0 - free, 1 - upper bound + vector alpha_status_vec; + vector b_vec; + + vector buf[2]; + double eps; + int max_iter; + double C[2]; // C[0] == Cn, C[1] == Cp + Ptr kernel; + + SelectWorkingSet select_working_set_func; + CalcRho calc_rho_func; + GetRow get_row_func; + }; + + ////////////////////////////////////////////////////////////////////////////////////////// + SVMImpl() + { + clear(); + checkParams(); + } + + ~SVMImpl() + { + clear(); + } + + void clear() CV_OVERRIDE + { + decision_func.clear(); + df_alpha.clear(); + df_index.clear(); + sv.release(); + uncompressed_sv.release(); + } + + Mat getUncompressedSupportVectors() const CV_OVERRIDE + { + return uncompressed_sv; + } + + Mat getSupportVectors() const CV_OVERRIDE + { + return sv; + } + + inline int getType() const CV_OVERRIDE { return params.svmType; } + inline void setType(int val) CV_OVERRIDE { params.svmType = val; } + inline double getGamma() const CV_OVERRIDE { return params.gamma; } + inline void setGamma(double val) CV_OVERRIDE { params.gamma = val; } + inline double getCoef0() const CV_OVERRIDE { return params.coef0; } + inline void setCoef0(double val) CV_OVERRIDE { params.coef0 = val; } + inline double getDegree() const CV_OVERRIDE { return params.degree; } + inline void setDegree(double val) CV_OVERRIDE { params.degree = val; } + inline double getC() const CV_OVERRIDE { return params.C; } + inline void setC(double val) CV_OVERRIDE { params.C = val; } + inline double getNu() const CV_OVERRIDE { return params.nu; } + inline void setNu(double val) CV_OVERRIDE { params.nu = val; } + inline double getP() const CV_OVERRIDE { return params.p; } + inline void setP(double val) CV_OVERRIDE { params.p = val; } + inline cv::Mat getClassWeights() const CV_OVERRIDE { return params.classWeights; } + inline void setClassWeights(const cv::Mat& val) CV_OVERRIDE { params.classWeights = val; } + inline cv::TermCriteria getTermCriteria() const CV_OVERRIDE { return params.termCrit; } + inline void setTermCriteria(const cv::TermCriteria& val) CV_OVERRIDE { params.termCrit = val; } + + int getKernelType() const CV_OVERRIDE { return params.kernelType; } + void setKernel(int kernelType) CV_OVERRIDE + { + params.kernelType = kernelType; + if (kernelType != CUSTOM) + kernel = makePtr(params); + } + + void setCustomKernel(const Ptr &_kernel) CV_OVERRIDE + { + params.kernelType = CUSTOM; + kernel = _kernel; + } + + void checkParams() + { + int kernelType = params.kernelType; + if (kernelType != CUSTOM) + { + if( kernelType != LINEAR && kernelType != POLY && + kernelType != SIGMOID && kernelType != RBF && + kernelType != INTER && kernelType != CHI2) + CV_Error( CV_StsBadArg, "Unknown/unsupported kernel type" ); + + if( kernelType == LINEAR ) + params.gamma = 1; + else if( params.gamma <= 0 ) + CV_Error( CV_StsOutOfRange, "gamma parameter of the kernel must be positive" ); + + if( kernelType != SIGMOID && kernelType != POLY ) + params.coef0 = 0; + + if( kernelType != POLY ) + params.degree = 0; + else if( params.degree <= 0 ) + CV_Error( CV_StsOutOfRange, "The kernel parameter must be positive" ); + + kernel = makePtr(params); + } + else + { + if (!kernel) + CV_Error( CV_StsBadArg, "Custom kernel is not set" ); + } + + int svmType = params.svmType; + + if( svmType != C_SVC && svmType != NU_SVC && + svmType != ONE_CLASS && svmType != EPS_SVR && + svmType != NU_SVR ) + CV_Error( CV_StsBadArg, "Unknown/unsupported SVM type" ); + + if( svmType == ONE_CLASS || svmType == NU_SVC ) + params.C = 0; + else if( params.C <= 0 ) + CV_Error( CV_StsOutOfRange, "The parameter C must be positive" ); + + if( svmType == C_SVC || svmType == EPS_SVR ) + params.nu = 0; + else if( params.nu <= 0 || params.nu >= 1 ) + CV_Error( CV_StsOutOfRange, "The parameter nu must be between 0 and 1" ); + + if( svmType != EPS_SVR ) + params.p = 0; + else if( params.p <= 0 ) + CV_Error( CV_StsOutOfRange, "The parameter p must be positive" ); + + if( svmType != C_SVC ) + params.classWeights.release(); + + if( !(params.termCrit.type & TermCriteria::EPS) ) + params.termCrit.epsilon = DBL_EPSILON; + params.termCrit.epsilon = std::max(params.termCrit.epsilon, DBL_EPSILON); + if( !(params.termCrit.type & TermCriteria::COUNT) ) + params.termCrit.maxCount = INT_MAX; + params.termCrit.maxCount = std::max(params.termCrit.maxCount, 1); + } + + void setParams( const SvmParams& _params) + { + params = _params; + checkParams(); + } + + int getSVCount(int i) const + { + return (i < (int)(decision_func.size()-1) ? decision_func[i+1].ofs : + (int)df_index.size()) - decision_func[i].ofs; + } + + bool do_train( const Mat& _samples, const Mat& _responses ) + { + int svmType = params.svmType; + int i, j, k, sample_count = _samples.rows; + vector _alpha; + Solver::SolutionInfo sinfo; + + CV_Assert( _samples.type() == CV_32F ); + var_count = _samples.cols; + + if( svmType == ONE_CLASS || svmType == EPS_SVR || svmType == NU_SVR ) + { + int sv_count = 0; + decision_func.clear(); + + vector _yf; + if( !_responses.empty() ) + _responses.convertTo(_yf, CV_32F); + + bool ok = + svmType == ONE_CLASS ? Solver::solve_one_class( _samples, params.nu, kernel, _alpha, sinfo, params.termCrit ) : + svmType == EPS_SVR ? Solver::solve_eps_svr( _samples, _yf, params.p, params.C, kernel, _alpha, sinfo, params.termCrit ) : + svmType == NU_SVR ? Solver::solve_nu_svr( _samples, _yf, params.nu, params.C, kernel, _alpha, sinfo, params.termCrit ) : false; + + if( !ok ) + return false; + + for( i = 0; i < sample_count; i++ ) + sv_count += fabs(_alpha[i]) > 0; + + CV_Assert(sv_count != 0); + + sv.create(sv_count, _samples.cols, CV_32F); + df_alpha.resize(sv_count); + df_index.resize(sv_count); + + for( i = k = 0; i < sample_count; i++ ) + { + if( std::abs(_alpha[i]) > 0 ) + { + _samples.row(i).copyTo(sv.row(k)); + df_alpha[k] = _alpha[i]; + df_index[k] = k; + k++; + } + } + + decision_func.push_back(DecisionFunc(sinfo.rho, 0)); + } + else + { + int class_count = (int)class_labels.total(); + vector svidx, sidx, sidx_all, sv_tab(sample_count, 0); + Mat temp_samples, class_weights; + vector class_ranges; + vector temp_y; + double nu = params.nu; + CV_Assert( svmType == C_SVC || svmType == NU_SVC ); + + if( svmType == C_SVC && !params.classWeights.empty() ) + { + const Mat cw = params.classWeights; + + if( (cw.cols != 1 && cw.rows != 1) || + (int)cw.total() != class_count || + (cw.type() != CV_32F && cw.type() != CV_64F) ) + CV_Error( CV_StsBadArg, "params.class_weights must be 1d floating-point vector " + "containing as many elements as the number of classes" ); + + cw.convertTo(class_weights, CV_64F, params.C); + //normalize(cw, class_weights, params.C, 0, NORM_L1, CV_64F); + } + + decision_func.clear(); + df_alpha.clear(); + df_index.clear(); + + sortSamplesByClasses( _samples, _responses, sidx_all, class_ranges ); + + //check that while cross-validation there were the samples from all the classes + if ((int)class_ranges.size() < class_count + 1) + CV_Error( CV_StsBadArg, "While cross-validation one or more of the classes have " + "been fell out of the sample. Try to reduce " ); + + if( svmType == NU_SVC ) + { + // check if nu is feasible + for( i = 0; i < class_count; i++ ) + { + int ci = class_ranges[i+1] - class_ranges[i]; + for( j = i+1; j< class_count; j++ ) + { + int cj = class_ranges[j+1] - class_ranges[j]; + if( nu*(ci + cj)*0.5 > std::min( ci, cj ) ) + // TODO: add some diagnostic + return false; + } + } + } + + size_t samplesize = _samples.cols*_samples.elemSize(); + + // train n*(n-1)/2 classifiers + for( i = 0; i < class_count; i++ ) + { + for( j = i+1; j < class_count; j++ ) + { + int si = class_ranges[i], ci = class_ranges[i+1] - si; + int sj = class_ranges[j], cj = class_ranges[j+1] - sj; + double Cp = params.C, Cn = Cp; + + temp_samples.create(ci + cj, _samples.cols, _samples.type()); + sidx.resize(ci + cj); + temp_y.resize(ci + cj); + + // form input for the binary classification problem + for( k = 0; k < ci+cj; k++ ) + { + int idx = k < ci ? si+k : sj+k-ci; + memcpy(temp_samples.ptr(k), _samples.ptr(sidx_all[idx]), samplesize); + sidx[k] = sidx_all[idx]; + temp_y[k] = k < ci ? 1 : -1; + } + + if( !class_weights.empty() ) + { + Cp = class_weights.at(i); + Cn = class_weights.at(j); + } + + DecisionFunc df; + bool ok = params.svmType == C_SVC ? + Solver::solve_c_svc( temp_samples, temp_y, Cp, Cn, + kernel, _alpha, sinfo, params.termCrit ) : + params.svmType == NU_SVC ? + Solver::solve_nu_svc( temp_samples, temp_y, params.nu, + kernel, _alpha, sinfo, params.termCrit ) : + false; + if( !ok ) + return false; + df.rho = sinfo.rho; + df.ofs = (int)df_index.size(); + decision_func.push_back(df); + + for( k = 0; k < ci + cj; k++ ) + { + if( std::abs(_alpha[k]) > 0 ) + { + int idx = k < ci ? si+k : sj+k-ci; + sv_tab[sidx_all[idx]] = 1; + df_index.push_back(sidx_all[idx]); + df_alpha.push_back(_alpha[k]); + } + } + } + } + + // allocate support vectors and initialize sv_tab + for( i = 0, k = 0; i < sample_count; i++ ) + { + if( sv_tab[i] ) + sv_tab[i] = ++k; + } + + int sv_total = k; + sv.create(sv_total, _samples.cols, _samples.type()); + + for( i = 0; i < sample_count; i++ ) + { + if( !sv_tab[i] ) + continue; + memcpy(sv.ptr(sv_tab[i]-1), _samples.ptr(i), samplesize); + } + + // set sv pointers + int n = (int)df_index.size(); + for( i = 0; i < n; i++ ) + { + CV_Assert( sv_tab[df_index[i]] > 0 ); + df_index[i] = sv_tab[df_index[i]] - 1; + } + } + + optimize_linear_svm(); + + return true; + } + + void optimize_linear_svm() + { + // we optimize only linear SVM: compress all the support vectors into one. + if( params.kernelType != LINEAR ) + return; + + int i, df_count = (int)decision_func.size(); + + for( i = 0; i < df_count; i++ ) + { + if( getSVCount(i) != 1 ) + break; + } + + // if every decision functions uses a single support vector; + // it's already compressed. skip it then. + if( i == df_count ) + return; + + AutoBuffer vbuf(var_count); + double* v = vbuf.data(); + Mat new_sv(df_count, var_count, CV_32F); + + vector new_df; + + for( i = 0; i < df_count; i++ ) + { + float* dst = new_sv.ptr(i); + memset(v, 0, var_count*sizeof(v[0])); + int j, k, sv_count = getSVCount(i); + const DecisionFunc& df = decision_func[i]; + const int* sv_index = &df_index[df.ofs]; + const double* sv_alpha = &df_alpha[df.ofs]; + for( j = 0; j < sv_count; j++ ) + { + const float* src = sv.ptr(sv_index[j]); + double a = sv_alpha[j]; + for( k = 0; k < var_count; k++ ) + v[k] += src[k]*a; + } + for( k = 0; k < var_count; k++ ) + dst[k] = (float)v[k]; + new_df.push_back(DecisionFunc(df.rho, i)); + } + + setRangeVector(df_index, df_count); + df_alpha.assign(df_count, 1.); + sv.copyTo(uncompressed_sv); + std::swap(sv, new_sv); + std::swap(decision_func, new_df); + } + + bool train( const Ptr& data, int ) CV_OVERRIDE + { + CV_Assert(!data.empty()); + clear(); + + checkParams(); + + int svmType = params.svmType; + Mat samples = data->getTrainSamples(); + Mat responses; + + if( svmType == C_SVC || svmType == NU_SVC ) + { + responses = data->getTrainNormCatResponses(); + if( responses.empty() ) + CV_Error(CV_StsBadArg, "in the case of classification problem the responses must be categorical; " + "either specify varType when creating TrainData, or pass integer responses"); + class_labels = data->getClassLabels(); + } + else + responses = data->getTrainResponses(); + + if( !do_train( samples, responses )) + { + clear(); + return false; + } + + return true; + } + + class TrainAutoBody : public ParallelLoopBody + { + public: + TrainAutoBody(const vector& _parameters, + const cv::Mat& _samples, + const cv::Mat& _responses, + const cv::Mat& _labels, + const vector& _sidx, + bool _is_classification, + int _k_fold, + std::vector& _result) : + parameters(_parameters), samples(_samples), responses(_responses), labels(_labels), + sidx(_sidx), is_classification(_is_classification), k_fold(_k_fold), result(_result) + {} + + void operator()( const cv::Range& range ) const CV_OVERRIDE + { + int sample_count = samples.rows; + int var_count_ = samples.cols; + size_t sample_size = var_count_*samples.elemSize(); + + int test_sample_count = (sample_count + k_fold/2)/k_fold; + int train_sample_count = sample_count - test_sample_count; + + // Use a local instance + cv::Ptr svm = makePtr(); + svm->class_labels = labels; + + int rtype = responses.type(); + + Mat temp_train_samples(train_sample_count, var_count_, CV_32F); + Mat temp_test_samples(test_sample_count, var_count_, CV_32F); + Mat temp_train_responses(train_sample_count, 1, rtype); + Mat temp_test_responses; + + for( int p = range.start; p < range.end; p++ ) + { + svm->setParams(parameters[p]); + + double error = 0; + for( int k = 0; k < k_fold; k++ ) + { + int start = (k*sample_count + k_fold/2)/k_fold; + for( int i = 0; i < train_sample_count; i++ ) + { + int j = sidx[(i+start)%sample_count]; + memcpy(temp_train_samples.ptr(i), samples.ptr(j), sample_size); + if( is_classification ) + temp_train_responses.at(i) = responses.at(j); + else if( !responses.empty() ) + temp_train_responses.at(i) = responses.at(j); + } + + // Train SVM on samples + if( !svm->do_train( temp_train_samples, temp_train_responses )) + continue; + + for( int i = 0; i < test_sample_count; i++ ) + { + int j = sidx[(i+start+train_sample_count) % sample_count]; + memcpy(temp_test_samples.ptr(i), samples.ptr(j), sample_size); + } + + svm->predict(temp_test_samples, temp_test_responses, 0); + for( int i = 0; i < test_sample_count; i++ ) + { + float val = temp_test_responses.at(i); + int j = sidx[(i+start+train_sample_count) % sample_count]; + if( is_classification ) + error += (float)(val != responses.at(j)); + else + { + val -= responses.at(j); + error += val*val; + } + } + } + + result[p] = error; + } + } + + private: + const vector& parameters; + const cv::Mat& samples; + const cv::Mat& responses; + const cv::Mat& labels; + const vector& sidx; + bool is_classification; + int k_fold; + std::vector& result; + }; + + bool trainAuto( const Ptr& data, int k_fold, + ParamGrid C_grid, ParamGrid gamma_grid, ParamGrid p_grid, + ParamGrid nu_grid, ParamGrid coef_grid, ParamGrid degree_grid, + bool balanced ) CV_OVERRIDE + { + CV_Assert(!data.empty()); + checkParams(); + + int svmType = params.svmType; + RNG rng((uint64)-1); + + if( svmType == ONE_CLASS ) + // current implementation of "auto" svm does not support the 1-class case. + return train( data, 0 ); + + clear(); + + CV_Assert( k_fold >= 2 ); + + // All the parameters except, possibly, are positive. + // is nonnegative + #define CHECK_GRID(grid, param) \ + if( grid.logStep <= 1 ) \ + { \ + grid.minVal = grid.maxVal = params.param; \ + grid.logStep = 10; \ + } \ + else \ + checkParamGrid(grid) + + CHECK_GRID(C_grid, C); + CHECK_GRID(gamma_grid, gamma); + CHECK_GRID(p_grid, p); + CHECK_GRID(nu_grid, nu); + CHECK_GRID(coef_grid, coef0); + CHECK_GRID(degree_grid, degree); + + // these parameters are not used: + if( params.kernelType != POLY ) + degree_grid.minVal = degree_grid.maxVal = params.degree; + if( params.kernelType == LINEAR ) + gamma_grid.minVal = gamma_grid.maxVal = params.gamma; + if( params.kernelType != POLY && params.kernelType != SIGMOID ) + coef_grid.minVal = coef_grid.maxVal = params.coef0; + if( svmType == NU_SVC || svmType == ONE_CLASS ) + C_grid.minVal = C_grid.maxVal = params.C; + if( svmType == C_SVC || svmType == EPS_SVR ) + nu_grid.minVal = nu_grid.maxVal = params.nu; + if( svmType != EPS_SVR ) + p_grid.minVal = p_grid.maxVal = params.p; + + Mat samples = data->getTrainSamples(); + Mat responses; + bool is_classification = false; + Mat class_labels0; + int class_count = (int)class_labels.total(); + + if( svmType == C_SVC || svmType == NU_SVC ) + { + responses = data->getTrainNormCatResponses(); + class_labels = data->getClassLabels(); + class_count = (int)class_labels.total(); + is_classification = true; + + vector temp_class_labels; + setRangeVector(temp_class_labels, class_count); + + // temporarily replace class labels with 0, 1, ..., NCLASSES-1 + class_labels0 = class_labels; + class_labels = Mat(temp_class_labels).clone(); + } + else + responses = data->getTrainResponses(); + + CV_Assert(samples.type() == CV_32F); + + int sample_count = samples.rows; + var_count = samples.cols; + + vector sidx; + setRangeVector(sidx, sample_count); + + // randomly permute training samples + for( int i = 0; i < sample_count; i++ ) + { + int i1 = rng.uniform(0, sample_count); + int i2 = rng.uniform(0, sample_count); + std::swap(sidx[i1], sidx[i2]); + } + + if( is_classification && class_count == 2 && balanced ) + { + // reshuffle the training set in such a way that + // instances of each class are divided more or less evenly + // between the k_fold parts. + vector sidx0, sidx1; + + for( int i = 0; i < sample_count; i++ ) + { + if( responses.at(sidx[i]) == 0 ) + sidx0.push_back(sidx[i]); + else + sidx1.push_back(sidx[i]); + } + + int n0 = (int)sidx0.size(), n1 = (int)sidx1.size(); + int a0 = 0, a1 = 0; + sidx.clear(); + for( int k = 0; k < k_fold; k++ ) + { + int b0 = ((k+1)*n0 + k_fold/2)/k_fold, b1 = ((k+1)*n1 + k_fold/2)/k_fold; + int a = (int)sidx.size(), b = a + (b0 - a0) + (b1 - a1); + for( int i = a0; i < b0; i++ ) + sidx.push_back(sidx0[i]); + for( int i = a1; i < b1; i++ ) + sidx.push_back(sidx1[i]); + for( int i = 0; i < (b - a); i++ ) + { + int i1 = rng.uniform(a, b); + int i2 = rng.uniform(a, b); + std::swap(sidx[i1], sidx[i2]); + } + a0 = b0; a1 = b1; + } + } + + // If grid.minVal == grid.maxVal, this will allow one and only one pass through the loop with params.var = grid.minVal. + #define FOR_IN_GRID(var, grid) \ + for( params.var = grid.minVal; params.var == grid.minVal || params.var < grid.maxVal; params.var = (grid.minVal == grid.maxVal) ? grid.maxVal + 1 : params.var * grid.logStep ) + + // Create the list of parameters to test + std::vector parameters; + FOR_IN_GRID(C, C_grid) + FOR_IN_GRID(gamma, gamma_grid) + FOR_IN_GRID(p, p_grid) + FOR_IN_GRID(nu, nu_grid) + FOR_IN_GRID(coef0, coef_grid) + FOR_IN_GRID(degree, degree_grid) + { + parameters.push_back(params); + } + + std::vector result(parameters.size()); + TrainAutoBody invoker(parameters, samples, responses, class_labels, sidx, + is_classification, k_fold, result); + parallel_for_(cv::Range(0,(int)parameters.size()), invoker); + + // Extract the best parameters + SvmParams best_params = params; + double min_error = FLT_MAX; + for( int i = 0; i < (int)result.size(); i++ ) + { + if( result[i] < min_error ) + { + min_error = result[i]; + best_params = parameters[i]; + } + } + + class_labels = class_labels0; + setParams(best_params); + return do_train( samples, responses ); + } + + struct PredictBody : ParallelLoopBody + { + PredictBody( const SVMImpl* _svm, const Mat& _samples, Mat& _results, bool _returnDFVal ) + { + svm = _svm; + results = &_results; + samples = &_samples; + returnDFVal = _returnDFVal; + } + + void operator()(const Range& range) const CV_OVERRIDE + { + int svmType = svm->params.svmType; + int sv_total = svm->sv.rows; + int class_count = !svm->class_labels.empty() ? (int)svm->class_labels.total() : svmType == ONE_CLASS ? 1 : 0; + + AutoBuffer _buffer(sv_total + (class_count+1)*2); + float* buffer = _buffer.data(); + + int i, j, dfi, k, si; + + if( svmType == EPS_SVR || svmType == NU_SVR || svmType == ONE_CLASS ) + { + for( si = range.start; si < range.end; si++ ) + { + const float* row_sample = samples->ptr(si); + svm->kernel->calc( sv_total, svm->var_count, svm->sv.ptr(), row_sample, buffer ); + + const SVMImpl::DecisionFunc* df = &svm->decision_func[0]; + double sum = -df->rho; + for( i = 0; i < sv_total; i++ ) + sum += buffer[i]*svm->df_alpha[i]; + float result = svm->params.svmType == ONE_CLASS && !returnDFVal ? (float)(sum > 0) : (float)sum; + results->at(si) = result; + } + } + else if( svmType == C_SVC || svmType == NU_SVC ) + { + int* vote = (int*)(buffer + sv_total); + + for( si = range.start; si < range.end; si++ ) + { + svm->kernel->calc( sv_total, svm->var_count, svm->sv.ptr(), + samples->ptr(si), buffer ); + double sum = 0.; + + memset( vote, 0, class_count*sizeof(vote[0])); + + for( i = dfi = 0; i < class_count; i++ ) + { + for( j = i+1; j < class_count; j++, dfi++ ) + { + const DecisionFunc& df = svm->decision_func[dfi]; + sum = -df.rho; + int sv_count = svm->getSVCount(dfi); + CV_DbgAssert(sv_count > 0); + const double* alpha = &svm->df_alpha[df.ofs]; + const int* sv_index = &svm->df_index[df.ofs]; + for( k = 0; k < sv_count; k++ ) + sum += alpha[k]*buffer[sv_index[k]]; + + vote[sum > 0 ? i : j]++; + } + } + + for( i = 1, k = 0; i < class_count; i++ ) + { + if( vote[i] > vote[k] ) + k = i; + } + float result = returnDFVal && class_count == 2 ? + (float)sum : (float)(svm->class_labels.at(k)); + results->at(si) = result; + } + } + else + CV_Error( CV_StsBadArg, "INTERNAL ERROR: Unknown SVM type, " + "the SVM structure is probably corrupted" ); + } + + const SVMImpl* svm; + const Mat* samples; + Mat* results; + bool returnDFVal; + }; + + bool trainAuto(InputArray samples, int layout, + InputArray responses, int kfold, Ptr Cgrid, + Ptr gammaGrid, Ptr pGrid, Ptr nuGrid, + Ptr coeffGrid, Ptr degreeGrid, bool balanced) CV_OVERRIDE + { + Ptr data = TrainData::create(samples, layout, responses); + return this->trainAuto( + data, kfold, + *Cgrid.get(), + *gammaGrid.get(), + *pGrid.get(), + *nuGrid.get(), + *coeffGrid.get(), + *degreeGrid.get(), + balanced); + } + + + float predict( InputArray _samples, OutputArray _results, int flags ) const CV_OVERRIDE + { + float result = 0; + Mat samples = _samples.getMat(), results; + int nsamples = samples.rows; + bool returnDFVal = (flags & RAW_OUTPUT) != 0; + + CV_Assert( samples.cols == var_count && samples.type() == CV_32F ); + + if( _results.needed() ) + { + _results.create( nsamples, 1, samples.type() ); + results = _results.getMat(); + } + else + { + CV_Assert( nsamples == 1 ); + results = Mat(1, 1, CV_32F, &result); + } + + PredictBody invoker(this, samples, results, returnDFVal); + if( nsamples < 10 ) + invoker(Range(0, nsamples)); + else + parallel_for_(Range(0, nsamples), invoker); + return result; + } + + double getDecisionFunction(int i, OutputArray _alpha, OutputArray _svidx ) const CV_OVERRIDE + { + CV_Assert( 0 <= i && i < (int)decision_func.size()); + const DecisionFunc& df = decision_func[i]; + int count = getSVCount(i); + Mat(1, count, CV_64F, (double*)&df_alpha[df.ofs]).copyTo(_alpha); + Mat(1, count, CV_32S, (int*)&df_index[df.ofs]).copyTo(_svidx); + return df.rho; + } + + void write_params( FileStorage& fs ) const + { + int svmType = params.svmType; + int kernelType = params.kernelType; + + String svm_type_str = + svmType == C_SVC ? "C_SVC" : + svmType == NU_SVC ? "NU_SVC" : + svmType == ONE_CLASS ? "ONE_CLASS" : + svmType == EPS_SVR ? "EPS_SVR" : + svmType == NU_SVR ? "NU_SVR" : format("Unknown_%d", svmType); + String kernel_type_str = + kernelType == LINEAR ? "LINEAR" : + kernelType == POLY ? "POLY" : + kernelType == RBF ? "RBF" : + kernelType == SIGMOID ? "SIGMOID" : + kernelType == CHI2 ? "CHI2" : + kernelType == INTER ? "INTER" : format("Unknown_%d", kernelType); + + fs << "svmType" << svm_type_str; + + // save kernel + fs << "kernel" << "{" << "type" << kernel_type_str; + + if( kernelType == POLY ) + fs << "degree" << params.degree; + + if( kernelType != LINEAR ) + fs << "gamma" << params.gamma; + + if( kernelType == POLY || kernelType == SIGMOID ) + fs << "coef0" << params.coef0; + + fs << "}"; + + if( svmType == C_SVC || svmType == EPS_SVR || svmType == NU_SVR ) + fs << "C" << params.C; + + if( svmType == NU_SVC || svmType == ONE_CLASS || svmType == NU_SVR ) + fs << "nu" << params.nu; + + if( svmType == EPS_SVR ) + fs << "p" << params.p; + + fs << "term_criteria" << "{:"; + if( params.termCrit.type & TermCriteria::EPS ) + fs << "epsilon" << params.termCrit.epsilon; + if( params.termCrit.type & TermCriteria::COUNT ) + fs << "iterations" << params.termCrit.maxCount; + fs << "}"; + } + + bool isTrained() const CV_OVERRIDE + { + return !sv.empty(); + } + + bool isClassifier() const CV_OVERRIDE + { + return params.svmType == C_SVC || params.svmType == NU_SVC || params.svmType == ONE_CLASS; + } + + int getVarCount() const CV_OVERRIDE + { + return var_count; + } + + String getDefaultName() const CV_OVERRIDE + { + return "opencv_ml_svm"; + } + + void write( FileStorage& fs ) const CV_OVERRIDE + { + int class_count = !class_labels.empty() ? (int)class_labels.total() : + params.svmType == ONE_CLASS ? 1 : 0; + if( !isTrained() ) + CV_Error( CV_StsParseError, "SVM model data is invalid, check sv_count, var_* and class_count tags" ); + + writeFormat(fs); + write_params( fs ); + + fs << "var_count" << var_count; + + if( class_count > 0 ) + { + fs << "class_count" << class_count; + + if( !class_labels.empty() ) + fs << "class_labels" << class_labels; + + if( !params.classWeights.empty() ) + fs << "class_weights" << params.classWeights; + } + + // write the joint collection of support vectors + int i, sv_total = sv.rows; + fs << "sv_total" << sv_total; + fs << "support_vectors" << "["; + for( i = 0; i < sv_total; i++ ) + { + fs << "[:"; + fs.writeRaw("f", sv.ptr(i), sv.cols*sv.elemSize()); + fs << "]"; + } + fs << "]"; + + if ( !uncompressed_sv.empty() ) + { + // write the joint collection of uncompressed support vectors + int uncompressed_sv_total = uncompressed_sv.rows; + fs << "uncompressed_sv_total" << uncompressed_sv_total; + fs << "uncompressed_support_vectors" << "["; + for( i = 0; i < uncompressed_sv_total; i++ ) + { + fs << "[:"; + fs.writeRaw("f", uncompressed_sv.ptr(i), uncompressed_sv.cols*uncompressed_sv.elemSize()); + fs << "]"; + } + fs << "]"; + } + + // write decision functions + int df_count = (int)decision_func.size(); + + fs << "decision_functions" << "["; + for( i = 0; i < df_count; i++ ) + { + const DecisionFunc& df = decision_func[i]; + int sv_count = getSVCount(i); + fs << "{" << "sv_count" << sv_count + << "rho" << df.rho + << "alpha" << "[:"; + fs.writeRaw("d", (const uchar*)&df_alpha[df.ofs], sv_count*sizeof(df_alpha[0])); + fs << "]"; + if( class_count >= 2 ) + { + fs << "index" << "[:"; + fs.writeRaw("i", (const uchar*)&df_index[df.ofs], sv_count*sizeof(df_index[0])); + fs << "]"; + } + else + CV_Assert( sv_count == sv_total ); + fs << "}"; + } + fs << "]"; + } + + void read_params( const FileNode& fn ) + { + SvmParams _params; + + // check for old naming + String svm_type_str = (String)(fn["svm_type"].empty() ? fn["svmType"] : fn["svm_type"]); + int svmType = + svm_type_str == "C_SVC" ? C_SVC : + svm_type_str == "NU_SVC" ? NU_SVC : + svm_type_str == "ONE_CLASS" ? ONE_CLASS : + svm_type_str == "EPS_SVR" ? EPS_SVR : + svm_type_str == "NU_SVR" ? NU_SVR : -1; + + if( svmType < 0 ) + CV_Error( CV_StsParseError, "Missing or invalid SVM type" ); + + FileNode kernel_node = fn["kernel"]; + if( kernel_node.empty() ) + CV_Error( CV_StsParseError, "SVM kernel tag is not found" ); + + String kernel_type_str = (String)kernel_node["type"]; + int kernelType = + kernel_type_str == "LINEAR" ? LINEAR : + kernel_type_str == "POLY" ? POLY : + kernel_type_str == "RBF" ? RBF : + kernel_type_str == "SIGMOID" ? SIGMOID : + kernel_type_str == "CHI2" ? CHI2 : + kernel_type_str == "INTER" ? INTER : CUSTOM; + + if( kernelType == CUSTOM ) + CV_Error( CV_StsParseError, "Invalid SVM kernel type (or custom kernel)" ); + + _params.svmType = svmType; + _params.kernelType = kernelType; + _params.degree = (double)kernel_node["degree"]; + _params.gamma = (double)kernel_node["gamma"]; + _params.coef0 = (double)kernel_node["coef0"]; + + _params.C = (double)fn["C"]; + _params.nu = (double)fn["nu"]; + _params.p = (double)fn["p"]; + _params.classWeights = Mat(); + + FileNode tcnode = fn["term_criteria"]; + if( !tcnode.empty() ) + { + _params.termCrit.epsilon = (double)tcnode["epsilon"]; + _params.termCrit.maxCount = (int)tcnode["iterations"]; + _params.termCrit.type = (_params.termCrit.epsilon > 0 ? TermCriteria::EPS : 0) + + (_params.termCrit.maxCount > 0 ? TermCriteria::COUNT : 0); + } + else + _params.termCrit = TermCriteria( TermCriteria::EPS + TermCriteria::COUNT, 1000, FLT_EPSILON ); + + setParams( _params ); + } + + void read( const FileNode& fn ) CV_OVERRIDE + { + clear(); + + // read SVM parameters + read_params( fn ); + + // and top-level data + int i, sv_total = (int)fn["sv_total"]; + var_count = (int)fn["var_count"]; + int class_count = (int)fn["class_count"]; + + if( sv_total <= 0 || var_count <= 0 ) + CV_Error( CV_StsParseError, "SVM model data is invalid, check sv_count, var_* and class_count tags" ); + + FileNode m = fn["class_labels"]; + if( !m.empty() ) + m >> class_labels; + m = fn["class_weights"]; + if( !m.empty() ) + m >> params.classWeights; + + if( class_count > 1 && (class_labels.empty() || (int)class_labels.total() != class_count)) + CV_Error( CV_StsParseError, "Array of class labels is missing or invalid" ); + + // read support vectors + FileNode sv_node = fn["support_vectors"]; + + CV_Assert((int)sv_node.size() == sv_total); + + sv.create(sv_total, var_count, CV_32F); + FileNodeIterator sv_it = sv_node.begin(); + for( i = 0; i < sv_total; i++, ++sv_it ) + { + (*sv_it).readRaw("f", sv.ptr(i), var_count*sv.elemSize()); + } + + int uncompressed_sv_total = (int)fn["uncompressed_sv_total"]; + + if( uncompressed_sv_total > 0 ) + { + // read uncompressed support vectors + FileNode uncompressed_sv_node = fn["uncompressed_support_vectors"]; + + CV_Assert((int)uncompressed_sv_node.size() == uncompressed_sv_total); + uncompressed_sv.create(uncompressed_sv_total, var_count, CV_32F); + + FileNodeIterator uncompressed_sv_it = uncompressed_sv_node.begin(); + for( i = 0; i < uncompressed_sv_total; i++, ++uncompressed_sv_it ) + { + (*uncompressed_sv_it).readRaw("f", uncompressed_sv.ptr(i), var_count*uncompressed_sv.elemSize()); + } + } + + // read decision functions + int df_count = class_count > 1 ? class_count*(class_count-1)/2 : 1; + FileNode df_node = fn["decision_functions"]; + + CV_Assert((int)df_node.size() == df_count); + + FileNodeIterator df_it = df_node.begin(); + for( i = 0; i < df_count; i++, ++df_it ) + { + FileNode dfi = *df_it; + DecisionFunc df; + int sv_count = (int)dfi["sv_count"]; + int ofs = (int)df_index.size(); + df.rho = (double)dfi["rho"]; + df.ofs = ofs; + df_index.resize(ofs + sv_count); + df_alpha.resize(ofs + sv_count); + dfi["alpha"].readRaw("d", (uchar*)&df_alpha[ofs], sv_count*sizeof(df_alpha[0])); + if( class_count >= 2 ) + dfi["index"].readRaw("i", (uchar*)&df_index[ofs], sv_count*sizeof(df_index[0])); + decision_func.push_back(df); + } + if( class_count < 2 ) + setRangeVector(df_index, sv_total); + if( (int)fn["optimize_linear"] != 0 ) + optimize_linear_svm(); + } + + SvmParams params; + Mat class_labels; + int var_count; + Mat sv, uncompressed_sv; + vector decision_func; + vector df_alpha; + vector df_index; + + Ptr kernel; +}; + + +Ptr SVM::create() +{ + return makePtr(); +} + +Ptr SVM::load(const String& filepath) +{ + FileStorage fs; + fs.open(filepath, FileStorage::READ); + + Ptr svm = makePtr(); + + ((SVMImpl*)svm.get())->read(fs.getFirstTopLevelNode()); + return svm; +} + + +} +} + +/* End of file. */ diff --git a/modules/ml/src/svmsgd.cpp b/modules/ml/src/svmsgd.cpp new file mode 100644 index 00000000000..266c7cf300e --- /dev/null +++ b/modules/ml/src/svmsgd.cpp @@ -0,0 +1,524 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Copyright (C) 2016, Itseez Inc, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" +#include "limits" + +#include + +using std::cout; +using std::endl; + +/****************************************************************************************\ +* Stochastic Gradient Descent SVM Classifier * +\****************************************************************************************/ + +namespace cv +{ +namespace ml +{ + +class SVMSGDImpl CV_FINAL : public SVMSGD +{ + +public: + SVMSGDImpl(); + + virtual ~SVMSGDImpl() {} + + virtual bool train(const Ptr& data, int) CV_OVERRIDE; + + virtual float predict( InputArray samples, OutputArray results=noArray(), int flags = 0 ) const CV_OVERRIDE; + + virtual bool isClassifier() const CV_OVERRIDE; + + virtual bool isTrained() const CV_OVERRIDE; + + virtual void clear() CV_OVERRIDE; + + virtual void write(FileStorage &fs) const CV_OVERRIDE; + + virtual void read(const FileNode &fn) CV_OVERRIDE; + + virtual Mat getWeights() CV_OVERRIDE { return weights_; } + + virtual float getShift() CV_OVERRIDE { return shift_; } + + virtual int getVarCount() const CV_OVERRIDE { return weights_.cols; } + + virtual String getDefaultName() const CV_OVERRIDE {return "opencv_ml_svmsgd";} + + virtual void setOptimalParameters(int svmsgdType = ASGD, int marginType = SOFT_MARGIN) CV_OVERRIDE; + + inline int getSvmsgdType() const CV_OVERRIDE { return params.svmsgdType; } + inline void setSvmsgdType(int val) CV_OVERRIDE { params.svmsgdType = val; } + inline int getMarginType() const CV_OVERRIDE { return params.marginType; } + inline void setMarginType(int val) CV_OVERRIDE { params.marginType = val; } + inline float getMarginRegularization() const CV_OVERRIDE { return params.marginRegularization; } + inline void setMarginRegularization(float val) CV_OVERRIDE { params.marginRegularization = val; } + inline float getInitialStepSize() const CV_OVERRIDE { return params.initialStepSize; } + inline void setInitialStepSize(float val) CV_OVERRIDE { params.initialStepSize = val; } + inline float getStepDecreasingPower() const CV_OVERRIDE { return params.stepDecreasingPower; } + inline void setStepDecreasingPower(float val) CV_OVERRIDE { params.stepDecreasingPower = val; } + inline cv::TermCriteria getTermCriteria() const CV_OVERRIDE { return params.termCrit; } + inline void setTermCriteria(const cv::TermCriteria& val) CV_OVERRIDE { params.termCrit = val; } + +private: + void updateWeights(InputArray sample, bool positive, float stepSize, Mat &weights); + + void writeParams( FileStorage &fs ) const; + + void readParams( const FileNode &fn ); + + static inline bool isPositive(float val) { return val > 0; } + + static void normalizeSamples(Mat &matrix, Mat &average, float &multiplier); + + float calcShift(InputArray _samples, InputArray _responses) const; + + static void makeExtendedTrainSamples(const Mat &trainSamples, Mat &extendedTrainSamples, Mat &average, float &multiplier); + + // Vector with SVM weights + Mat weights_; + float shift_; + + // Parameters for learning + struct SVMSGDParams + { + float marginRegularization; + float initialStepSize; + float stepDecreasingPower; + TermCriteria termCrit; + int svmsgdType; + int marginType; + }; + + SVMSGDParams params; +}; + +Ptr SVMSGD::create() +{ + return makePtr(); +} + +Ptr SVMSGD::load(const String& filepath, const String& nodeName) +{ + return Algorithm::load(filepath, nodeName); +} + + +void SVMSGDImpl::normalizeSamples(Mat &samples, Mat &average, float &multiplier) +{ + int featuresCount = samples.cols; + int samplesCount = samples.rows; + + average = Mat(1, featuresCount, samples.type()); + CV_Assert(average.type() == CV_32FC1); + for (int featureIndex = 0; featureIndex < featuresCount; featureIndex++) + { + average.at(featureIndex) = static_cast(mean(samples.col(featureIndex))[0]); + } + + for (int sampleIndex = 0; sampleIndex < samplesCount; sampleIndex++) + { + samples.row(sampleIndex) -= average; + } + + double normValue = norm(samples); + + multiplier = static_cast(sqrt(static_cast(samples.total())) / normValue); + + samples *= multiplier; +} + +void SVMSGDImpl::makeExtendedTrainSamples(const Mat &trainSamples, Mat &extendedTrainSamples, Mat &average, float &multiplier) +{ + Mat normalizedTrainSamples = trainSamples.clone(); + int samplesCount = normalizedTrainSamples.rows; + + normalizeSamples(normalizedTrainSamples, average, multiplier); + + Mat onesCol = Mat::ones(samplesCount, 1, CV_32F); + cv::hconcat(normalizedTrainSamples, onesCol, extendedTrainSamples); +} + +void SVMSGDImpl::updateWeights(InputArray _sample, bool positive, float stepSize, Mat& weights) +{ + Mat sample = _sample.getMat(); + + int response = positive ? 1 : -1; // ensure that trainResponses are -1 or 1 + + if ( sample.dot(weights) * response > 1) + { + // Not a support vector, only apply weight decay + weights *= (1.f - stepSize * params.marginRegularization); + } + else + { + // It's a support vector, add it to the weights + weights -= (stepSize * params.marginRegularization) * weights - (stepSize * response) * sample; + } +} + +float SVMSGDImpl::calcShift(InputArray _samples, InputArray _responses) const +{ + float margin[2] = { std::numeric_limits::max(), std::numeric_limits::max() }; + + Mat trainSamples = _samples.getMat(); + int trainSamplesCount = trainSamples.rows; + + Mat trainResponses = _responses.getMat(); + + CV_Assert(trainResponses.type() == CV_32FC1); + for (int samplesIndex = 0; samplesIndex < trainSamplesCount; samplesIndex++) + { + Mat currentSample = trainSamples.row(samplesIndex); + float dotProduct = static_cast(currentSample.dot(weights_)); + + bool positive = isPositive(trainResponses.at(samplesIndex)); + int index = positive ? 0 : 1; + float signToMul = positive ? 1.f : -1.f; + float curMargin = dotProduct * signToMul; + + if (curMargin < margin[index]) + { + margin[index] = curMargin; + } + } + + return -(margin[0] - margin[1]) / 2.f; +} + +bool SVMSGDImpl::train(const Ptr& data, int) +{ + CV_Assert(!data.empty()); + clear(); + CV_Assert( isClassifier() ); //toDo: consider + + Mat trainSamples = data->getTrainSamples(); + + int featureCount = trainSamples.cols; + Mat trainResponses = data->getTrainResponses(); // (trainSamplesCount x 1) matrix + + CV_Assert(trainResponses.rows == trainSamples.rows); + + if (trainResponses.empty()) + { + return false; + } + + int positiveCount = countNonZero(trainResponses >= 0); + int negativeCount = countNonZero(trainResponses < 0); + + if ( positiveCount <= 0 || negativeCount <= 0 ) + { + weights_ = Mat::zeros(1, featureCount, CV_32F); + shift_ = (positiveCount > 0) ? 1.f : -1.f; + return true; + } + + Mat extendedTrainSamples; + Mat average; + float multiplier = 0; + makeExtendedTrainSamples(trainSamples, extendedTrainSamples, average, multiplier); + + int extendedTrainSamplesCount = extendedTrainSamples.rows; + int extendedFeatureCount = extendedTrainSamples.cols; + + Mat extendedWeights = Mat::zeros(1, extendedFeatureCount, CV_32F); + Mat previousWeights = Mat::zeros(1, extendedFeatureCount, CV_32F); + Mat averageExtendedWeights; + if (params.svmsgdType == ASGD) + { + averageExtendedWeights = Mat::zeros(1, extendedFeatureCount, CV_32F); + } + + RNG rng(0); + + CV_Assert (params.termCrit.type & TermCriteria::COUNT || params.termCrit.type & TermCriteria::EPS); + int maxCount = (params.termCrit.type & TermCriteria::COUNT) ? params.termCrit.maxCount : INT_MAX; + double epsilon = (params.termCrit.type & TermCriteria::EPS) ? params.termCrit.epsilon : 0; + + double err = DBL_MAX; + CV_Assert (trainResponses.type() == CV_32FC1); + // Stochastic gradient descent SVM + for (int iter = 0; (iter < maxCount) && (err > epsilon); iter++) + { + int randomNumber = rng.uniform(0, extendedTrainSamplesCount); //generate sample number + + Mat currentSample = extendedTrainSamples.row(randomNumber); + + float stepSize = params.initialStepSize * std::pow((1 + params.marginRegularization * params.initialStepSize * (float)iter), (-params.stepDecreasingPower)); //update stepSize + + updateWeights( currentSample, isPositive(trainResponses.at(randomNumber)), stepSize, extendedWeights ); + + //average weights (only for ASGD model) + if (params.svmsgdType == ASGD) + { + averageExtendedWeights = ((float)iter/ (1 + (float)iter)) * averageExtendedWeights + extendedWeights / (1 + (float) iter); + err = norm(averageExtendedWeights - previousWeights); + averageExtendedWeights.copyTo(previousWeights); + } + else + { + err = norm(extendedWeights - previousWeights); + extendedWeights.copyTo(previousWeights); + } + } + + if (params.svmsgdType == ASGD) + { + extendedWeights = averageExtendedWeights; + } + + Rect roi(0, 0, featureCount, 1); + weights_ = extendedWeights(roi); + weights_ *= multiplier; + + CV_Assert((params.marginType == SOFT_MARGIN || params.marginType == HARD_MARGIN) && (extendedWeights.type() == CV_32FC1)); + + if (params.marginType == SOFT_MARGIN) + { + shift_ = extendedWeights.at(featureCount) - static_cast(weights_.dot(average)); + } + else + { + shift_ = calcShift(trainSamples, trainResponses); + } + + return true; +} + +float SVMSGDImpl::predict( InputArray _samples, OutputArray _results, int ) const +{ + float result = 0; + cv::Mat samples = _samples.getMat(); + int nSamples = samples.rows; + cv::Mat results; + + CV_Assert( samples.cols == weights_.cols && samples.type() == CV_32FC1); + + if( _results.needed() ) + { + _results.create( nSamples, 1, samples.type() ); + results = _results.getMat(); + } + else + { + CV_Assert( nSamples == 1 ); + results = Mat(1, 1, CV_32FC1, &result); + } + + for (int sampleIndex = 0; sampleIndex < nSamples; sampleIndex++) + { + Mat currentSample = samples.row(sampleIndex); + float criterion = static_cast(currentSample.dot(weights_)) + shift_; + results.at(sampleIndex) = (criterion >= 0) ? 1.f : -1.f; + } + + return result; +} + +bool SVMSGDImpl::isClassifier() const +{ + return (params.svmsgdType == SGD || params.svmsgdType == ASGD) + && + (params.marginType == SOFT_MARGIN || params.marginType == HARD_MARGIN) + && + (params.marginRegularization > 0) && (params.initialStepSize > 0) && (params.stepDecreasingPower >= 0); +} + +bool SVMSGDImpl::isTrained() const +{ + return !weights_.empty(); +} + +void SVMSGDImpl::write(FileStorage& fs) const +{ + if( !isTrained() ) + CV_Error( CV_StsParseError, "SVMSGD model data is invalid, it hasn't been trained" ); + + writeFormat(fs); + writeParams( fs ); + + fs << "weights" << weights_; + fs << "shift" << shift_; +} + +void SVMSGDImpl::writeParams( FileStorage& fs ) const +{ + String SvmsgdTypeStr; + + switch (params.svmsgdType) + { + case SGD: + SvmsgdTypeStr = "SGD"; + break; + case ASGD: + SvmsgdTypeStr = "ASGD"; + break; + default: + SvmsgdTypeStr = format("Unknown_%d", params.svmsgdType); + } + + fs << "svmsgdType" << SvmsgdTypeStr; + + String marginTypeStr; + + switch (params.marginType) + { + case SOFT_MARGIN: + marginTypeStr = "SOFT_MARGIN"; + break; + case HARD_MARGIN: + marginTypeStr = "HARD_MARGIN"; + break; + default: + marginTypeStr = format("Unknown_%d", params.marginType); + } + + fs << "marginType" << marginTypeStr; + + fs << "marginRegularization" << params.marginRegularization; + fs << "initialStepSize" << params.initialStepSize; + fs << "stepDecreasingPower" << params.stepDecreasingPower; + + fs << "term_criteria" << "{:"; + if( params.termCrit.type & TermCriteria::EPS ) + fs << "epsilon" << params.termCrit.epsilon; + if( params.termCrit.type & TermCriteria::COUNT ) + fs << "iterations" << params.termCrit.maxCount; + fs << "}"; +} +void SVMSGDImpl::readParams( const FileNode& fn ) +{ + String svmsgdTypeStr = (String)fn["svmsgdType"]; + int svmsgdType = + svmsgdTypeStr == "SGD" ? SGD : + svmsgdTypeStr == "ASGD" ? ASGD : -1; + + if( svmsgdType < 0 ) + CV_Error( CV_StsParseError, "Missing or invalid SVMSGD type" ); + + params.svmsgdType = svmsgdType; + + String marginTypeStr = (String)fn["marginType"]; + int marginType = + marginTypeStr == "SOFT_MARGIN" ? SOFT_MARGIN : + marginTypeStr == "HARD_MARGIN" ? HARD_MARGIN : -1; + + if( marginType < 0 ) + CV_Error( CV_StsParseError, "Missing or invalid margin type" ); + + params.marginType = marginType; + + CV_Assert ( fn["marginRegularization"].isReal() ); + params.marginRegularization = (float)fn["marginRegularization"]; + + CV_Assert ( fn["initialStepSize"].isReal() ); + params.initialStepSize = (float)fn["initialStepSize"]; + + CV_Assert ( fn["stepDecreasingPower"].isReal() ); + params.stepDecreasingPower = (float)fn["stepDecreasingPower"]; + + FileNode tcnode = fn["term_criteria"]; + CV_Assert(!tcnode.empty()); + params.termCrit.epsilon = (double)tcnode["epsilon"]; + params.termCrit.maxCount = (int)tcnode["iterations"]; + params.termCrit.type = (params.termCrit.epsilon > 0 ? TermCriteria::EPS : 0) + + (params.termCrit.maxCount > 0 ? TermCriteria::COUNT : 0); + CV_Assert ((params.termCrit.type & TermCriteria::COUNT || params.termCrit.type & TermCriteria::EPS)); +} + +void SVMSGDImpl::read(const FileNode& fn) +{ + clear(); + + readParams(fn); + + fn["weights"] >> weights_; + fn["shift"] >> shift_; +} + +void SVMSGDImpl::clear() +{ + weights_.release(); + shift_ = 0; +} + + +SVMSGDImpl::SVMSGDImpl() +{ + clear(); + setOptimalParameters(); +} + +void SVMSGDImpl::setOptimalParameters(int svmsgdType, int marginType) +{ + switch (svmsgdType) + { + case SGD: + params.svmsgdType = SGD; + params.marginType = (marginType == SOFT_MARGIN) ? SOFT_MARGIN : + (marginType == HARD_MARGIN) ? HARD_MARGIN : -1; + params.marginRegularization = 0.0001f; + params.initialStepSize = 0.05f; + params.stepDecreasingPower = 1.f; + params.termCrit = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100000, 0.00001); + break; + + case ASGD: + params.svmsgdType = ASGD; + params.marginType = (marginType == SOFT_MARGIN) ? SOFT_MARGIN : + (marginType == HARD_MARGIN) ? HARD_MARGIN : -1; + params.marginRegularization = 0.00001f; + params.initialStepSize = 0.05f; + params.stepDecreasingPower = 0.75f; + params.termCrit = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100000, 0.00001); + break; + + default: + CV_Error( CV_StsParseError, "SVMSGD model data is invalid" ); + } +} +} //ml +} //cv diff --git a/modules/ml/src/testset.cpp b/modules/ml/src/testset.cpp new file mode 100644 index 00000000000..48cd1341543 --- /dev/null +++ b/modules/ml/src/testset.cpp @@ -0,0 +1,113 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +namespace cv { namespace ml { + +struct PairDI +{ + double d; + int i; +}; + +struct CmpPairDI +{ + bool operator ()(const PairDI& e1, const PairDI& e2) const + { + return (e1.d < e2.d) || (e1.d == e2.d && e1.i < e2.i); + } +}; + +void createConcentricSpheresTestSet( int num_samples, int num_features, int num_classes, + OutputArray _samples, OutputArray _responses) +{ + if( num_samples < 1 ) + CV_Error( CV_StsBadArg, "num_samples parameter must be positive" ); + + if( num_features < 1 ) + CV_Error( CV_StsBadArg, "num_features parameter must be positive" ); + + if( num_classes < 1 ) + CV_Error( CV_StsBadArg, "num_classes parameter must be positive" ); + + int i, cur_class; + + _samples.create( num_samples, num_features, CV_32F ); + _responses.create( 1, num_samples, CV_32S ); + + Mat responses = _responses.getMat(); + + Mat mean = Mat::zeros(1, num_features, CV_32F); + Mat cov = Mat::eye(num_features, num_features, CV_32F); + + // fill the feature values matrix with random numbers drawn from standard normal distribution + randMVNormal( mean, cov, num_samples, _samples ); + Mat samples = _samples.getMat(); + + // calculate distances from the origin to the samples and put them + // into the sequence along with indices + std::vector dis(samples.rows); + + for( i = 0; i < samples.rows; i++ ) + { + PairDI& elem = dis[i]; + elem.i = i; + elem.d = norm(samples.row(i), NORM_L2); + } + + std::sort(dis.begin(), dis.end(), CmpPairDI()); + + // assign class labels + num_classes = std::min( num_samples, num_classes ); + for( i = 0, cur_class = 0; i < num_samples; ++cur_class ) + { + int last_idx = num_samples * (cur_class + 1) / num_classes - 1; + double max_dst = dis[last_idx].d; + max_dst = std::max( max_dst, dis[i].d ); + + for( ; i < num_samples && dis[i].d <= max_dst; ++i ) + responses.at(dis[i].i) = cur_class; + } +} + +}} + +/* End of file. */ diff --git a/modules/ml/src/tree.cpp b/modules/ml/src/tree.cpp new file mode 100644 index 00000000000..b69ddaece2d --- /dev/null +++ b/modules/ml/src/tree.cpp @@ -0,0 +1,1990 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Copyright (C) 2014, Itseez Inc, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" +#include + +#include + +namespace cv { +namespace ml { + +using std::vector; + +TreeParams::TreeParams() +{ + maxDepth = INT_MAX; + minSampleCount = 10; + regressionAccuracy = 0.01f; + useSurrogates = false; + maxCategories = 10; + CVFolds = 10; + use1SERule = true; + truncatePrunedTree = true; + priors = Mat(); +} + +TreeParams::TreeParams(int _maxDepth, int _minSampleCount, + double _regressionAccuracy, bool _useSurrogates, + int _maxCategories, int _CVFolds, + bool _use1SERule, bool _truncatePrunedTree, + const Mat& _priors) +{ + maxDepth = _maxDepth; + minSampleCount = _minSampleCount; + regressionAccuracy = (float)_regressionAccuracy; + useSurrogates = _useSurrogates; + maxCategories = _maxCategories; + CVFolds = _CVFolds; + use1SERule = _use1SERule; + truncatePrunedTree = _truncatePrunedTree; + priors = _priors; +} + +DTrees::Node::Node() +{ + classIdx = 0; + value = 0; + parent = left = right = split = defaultDir = -1; +} + +DTrees::Split::Split() +{ + varIdx = 0; + inversed = false; + quality = 0.f; + next = -1; + c = 0.f; + subsetOfs = 0; +} + + +DTreesImpl::WorkData::WorkData(const Ptr& _data) +{ + CV_Assert(!_data.empty()); + data = _data; + vector subsampleIdx; + Mat sidx0 = _data->getTrainSampleIdx(); + if( !sidx0.empty() ) + { + sidx0.copyTo(sidx); + std::sort(sidx.begin(), sidx.end()); + } + else + { + int n = _data->getNSamples(); + setRangeVector(sidx, n); + } + + maxSubsetSize = 0; +} + +DTreesImpl::DTreesImpl() : _isClassifier(false) {} +DTreesImpl::~DTreesImpl() {} +void DTreesImpl::clear() +{ + varIdx.clear(); + compVarIdx.clear(); + varType.clear(); + catOfs.clear(); + catMap.clear(); + roots.clear(); + nodes.clear(); + splits.clear(); + subsets.clear(); + classLabels.clear(); + + w.release(); + _isClassifier = false; +} + +void DTreesImpl::startTraining( const Ptr& data, int ) +{ + CV_Assert(!data.empty()); + clear(); + w = makePtr(data); + + Mat vtype = data->getVarType(); + vtype.copyTo(varType); + + data->getCatOfs().copyTo(catOfs); + data->getCatMap().copyTo(catMap); + data->getDefaultSubstValues().copyTo(missingSubst); + + int nallvars = data->getNAllVars(); + + Mat vidx0 = data->getVarIdx(); + if( !vidx0.empty() ) + vidx0.copyTo(varIdx); + else + setRangeVector(varIdx, nallvars); + + initCompVarIdx(); + + w->maxSubsetSize = 0; + + int i, nvars = (int)varIdx.size(); + for( i = 0; i < nvars; i++ ) + w->maxSubsetSize = std::max(w->maxSubsetSize, getCatCount(varIdx[i])); + + w->maxSubsetSize = std::max((w->maxSubsetSize + 31)/32, 1); + + data->getSampleWeights().copyTo(w->sample_weights); + + _isClassifier = data->getResponseType() == VAR_CATEGORICAL; + + if( _isClassifier ) + { + data->getNormCatResponses().copyTo(w->cat_responses); + data->getClassLabels().copyTo(classLabels); + int nclasses = (int)classLabels.size(); + + Mat class_weights = params.priors; + if( !class_weights.empty() ) + { + if( class_weights.type() != CV_64F || !class_weights.isContinuous() ) + { + Mat temp; + class_weights.convertTo(temp, CV_64F); + class_weights = temp; + } + CV_Assert( class_weights.checkVector(1, CV_64F) == nclasses ); + + int nsamples = (int)w->cat_responses.size(); + const double* cw = class_weights.ptr(); + CV_Assert( (int)w->sample_weights.size() == nsamples ); + + for( i = 0; i < nsamples; i++ ) + { + int ci = w->cat_responses[i]; + CV_Assert( 0 <= ci && ci < nclasses ); + w->sample_weights[i] *= cw[ci]; + } + } + } + else + data->getResponses().copyTo(w->ord_responses); +} + + +void DTreesImpl::initCompVarIdx() +{ + int nallvars = (int)varType.size(); + compVarIdx.assign(nallvars, -1); + int i, nvars = (int)varIdx.size(), prevIdx = -1; + for( i = 0; i < nvars; i++ ) + { + int vi = varIdx[i]; + CV_Assert( 0 <= vi && vi < nallvars && vi > prevIdx ); + prevIdx = vi; + compVarIdx[vi] = i; + } +} + +void DTreesImpl::endTraining() +{ + w.release(); +} + +bool DTreesImpl::train( const Ptr& trainData, int flags ) +{ + CV_Assert(!trainData.empty()); + startTraining(trainData, flags); + bool ok = addTree( w->sidx ) >= 0; + w.release(); + endTraining(); + return ok; +} + +const vector& DTreesImpl::getActiveVars() +{ + return varIdx; +} + +int DTreesImpl::addTree(const vector& sidx ) +{ + size_t n = (params.getMaxDepth() > 0 ? (1 << params.getMaxDepth()) : 1024) + w->wnodes.size(); + + w->wnodes.reserve(n); + w->wsplits.reserve(n); + w->wsubsets.reserve(n*w->maxSubsetSize); + w->wnodes.clear(); + w->wsplits.clear(); + w->wsubsets.clear(); + + int cv_n = params.getCVFolds(); + + if( cv_n > 0 ) + { + w->cv_Tn.resize(n*cv_n); + w->cv_node_error.resize(n*cv_n); + w->cv_node_risk.resize(n*cv_n); + } + + // build the tree recursively + int w_root = addNodeAndTrySplit(-1, sidx); + int maxdepth = INT_MAX;//pruneCV(root); + + int w_nidx = w_root, pidx = -1, depth = 0; + int root = (int)nodes.size(); + + for(;;) + { + const WNode& wnode = w->wnodes[w_nidx]; + Node node; + node.parent = pidx; + node.classIdx = wnode.class_idx; + node.value = wnode.value; + node.defaultDir = wnode.defaultDir; + + int wsplit_idx = wnode.split; + if( wsplit_idx >= 0 ) + { + const WSplit& wsplit = w->wsplits[wsplit_idx]; + Split split; + split.c = wsplit.c; + split.quality = wsplit.quality; + split.inversed = wsplit.inversed; + split.varIdx = wsplit.varIdx; + split.subsetOfs = -1; + if( wsplit.subsetOfs >= 0 ) + { + int ssize = getSubsetSize(split.varIdx); + split.subsetOfs = (int)subsets.size(); + subsets.resize(split.subsetOfs + ssize); + // This check verifies that subsets index is in the correct range + // as in case ssize == 0 no real resize performed. + // Thus memory kept safe. + // Also this skips useless memcpy call when size parameter is zero + if(ssize > 0) + { + memcpy(&subsets[split.subsetOfs], &w->wsubsets[wsplit.subsetOfs], ssize*sizeof(int)); + } + } + node.split = (int)splits.size(); + splits.push_back(split); + } + int nidx = (int)nodes.size(); + nodes.push_back(node); + if( pidx >= 0 ) + { + int w_pidx = w->wnodes[w_nidx].parent; + if( w->wnodes[w_pidx].left == w_nidx ) + { + nodes[pidx].left = nidx; + } + else + { + CV_Assert(w->wnodes[w_pidx].right == w_nidx); + nodes[pidx].right = nidx; + } + } + + if( wnode.left >= 0 && depth+1 < maxdepth ) + { + w_nidx = wnode.left; + pidx = nidx; + depth++; + } + else + { + int w_pidx = wnode.parent; + while( w_pidx >= 0 && w->wnodes[w_pidx].right == w_nidx ) + { + w_nidx = w_pidx; + w_pidx = w->wnodes[w_pidx].parent; + nidx = pidx; + pidx = nodes[pidx].parent; + depth--; + } + + if( w_pidx < 0 ) + break; + + w_nidx = w->wnodes[w_pidx].right; + CV_Assert( w_nidx >= 0 ); + } + } + roots.push_back(root); + return root; +} + +void DTreesImpl::setDParams(const TreeParams& _params) +{ + params = _params; +} + +int DTreesImpl::addNodeAndTrySplit( int parent, const vector& sidx ) +{ + w->wnodes.push_back(WNode()); + int nidx = (int)(w->wnodes.size() - 1); + WNode& node = w->wnodes.back(); + + node.parent = parent; + node.depth = parent >= 0 ? w->wnodes[parent].depth + 1 : 0; + int nfolds = params.getCVFolds(); + + if( nfolds > 0 ) + { + w->cv_Tn.resize((nidx+1)*nfolds); + w->cv_node_error.resize((nidx+1)*nfolds); + w->cv_node_risk.resize((nidx+1)*nfolds); + } + + int i, n = node.sample_count = (int)sidx.size(); + bool can_split = true; + vector sleft, sright; + + calcValue( nidx, sidx ); + + if( n <= params.getMinSampleCount() || node.depth >= params.getMaxDepth() ) + can_split = false; + else if( _isClassifier ) + { + const int* responses = &w->cat_responses[0]; + const int* s = &sidx[0]; + int first = responses[s[0]]; + for( i = 1; i < n; i++ ) + if( responses[s[i]] != first ) + break; + if( i == n ) + can_split = false; + } + else + { + if( sqrt(node.node_risk) < params.getRegressionAccuracy() ) + can_split = false; + } + + if( can_split ) + node.split = findBestSplit( sidx ); + + //printf("depth=%d, nidx=%d, parent=%d, n=%d, %s, value=%.1f, risk=%.1f\n", node.depth, nidx, node.parent, n, (node.split < 0 ? "leaf" : varType[w->wsplits[node.split].varIdx] == VAR_CATEGORICAL ? "cat" : "ord"), node.value, node.node_risk); + + if( node.split >= 0 ) + { + node.defaultDir = calcDir( node.split, sidx, sleft, sright ); + if( params.useSurrogates ) + CV_Error( CV_StsNotImplemented, "surrogate splits are not implemented yet"); + + int left = addNodeAndTrySplit( nidx, sleft ); + int right = addNodeAndTrySplit( nidx, sright ); + w->wnodes[nidx].left = left; + w->wnodes[nidx].right = right; + CV_Assert( w->wnodes[nidx].left > 0 && w->wnodes[nidx].right > 0 ); + } + + return nidx; +} + +int DTreesImpl::findBestSplit( const vector& _sidx ) +{ + const vector& activeVars = getActiveVars(); + int splitidx = -1; + int vi_, nv = (int)activeVars.size(); + AutoBuffer buf(w->maxSubsetSize*2); + int *subset = buf.data(), *best_subset = subset + w->maxSubsetSize; + WSplit split, best_split; + best_split.quality = 0.; + + for( vi_ = 0; vi_ < nv; vi_++ ) + { + int vi = activeVars[vi_]; + if( varType[vi] == VAR_CATEGORICAL ) + { + if( _isClassifier ) + split = findSplitCatClass(vi, _sidx, 0, subset); + else + split = findSplitCatReg(vi, _sidx, 0, subset); + } + else + { + if( _isClassifier ) + split = findSplitOrdClass(vi, _sidx, 0); + else + split = findSplitOrdReg(vi, _sidx, 0); + } + if( split.quality > best_split.quality ) + { + best_split = split; + std::swap(subset, best_subset); + } + } + + if( best_split.quality > 0 ) + { + int best_vi = best_split.varIdx; + CV_Assert( compVarIdx[best_split.varIdx] >= 0 && best_vi >= 0 ); + int i, prevsz = (int)w->wsubsets.size(), ssize = getSubsetSize(best_vi); + w->wsubsets.resize(prevsz + ssize); + for( i = 0; i < ssize; i++ ) + w->wsubsets[prevsz + i] = best_subset[i]; + best_split.subsetOfs = prevsz; + w->wsplits.push_back(best_split); + splitidx = (int)(w->wsplits.size()-1); + } + + return splitidx; +} + +void DTreesImpl::calcValue( int nidx, const vector& _sidx ) +{ + WNode* node = &w->wnodes[nidx]; + int i, j, k, n = (int)_sidx.size(), cv_n = params.getCVFolds(); + int m = (int)classLabels.size(); + + cv::AutoBuffer buf(std::max(m, 3)*(cv_n+1)); + + if( cv_n > 0 ) + { + size_t sz = w->cv_Tn.size(); + w->cv_Tn.resize(sz + cv_n); + w->cv_node_risk.resize(sz + cv_n); + w->cv_node_error.resize(sz + cv_n); + } + + if( _isClassifier ) + { + // in case of classification tree: + // * node value is the label of the class that has the largest weight in the node. + // * node risk is the weighted number of misclassified samples, + // * j-th cross-validation fold value and risk are calculated as above, + // but using the samples with cv_labels(*)!=j. + // * j-th cross-validation fold error is calculated as the weighted number of + // misclassified samples with cv_labels(*)==j. + + // compute the number of instances of each class + double* cls_count = buf.data(); + double* cv_cls_count = cls_count + m; + + double max_val = -1, total_weight = 0; + int max_k = -1; + + for( k = 0; k < m; k++ ) + cls_count[k] = 0; + + if( cv_n == 0 ) + { + for( i = 0; i < n; i++ ) + { + int si = _sidx[i]; + cls_count[w->cat_responses[si]] += w->sample_weights[si]; + } + } + else + { + for( j = 0; j < cv_n; j++ ) + for( k = 0; k < m; k++ ) + cv_cls_count[j*m + k] = 0; + + for( i = 0; i < n; i++ ) + { + int si = _sidx[i]; + j = w->cv_labels[si]; k = w->cat_responses[si]; + cv_cls_count[j*m + k] += w->sample_weights[si]; + } + + for( j = 0; j < cv_n; j++ ) + for( k = 0; k < m; k++ ) + cls_count[k] += cv_cls_count[j*m + k]; + } + + for( k = 0; k < m; k++ ) + { + double val = cls_count[k]; + total_weight += val; + if( max_val < val ) + { + max_val = val; + max_k = k; + } + } + + node->class_idx = max_k; + node->value = classLabels[max_k]; + node->node_risk = total_weight - max_val; + + for( j = 0; j < cv_n; j++ ) + { + double sum_k = 0, sum = 0, max_val_k = 0; + max_val = -1; max_k = -1; + + for( k = 0; k < m; k++ ) + { + double val_k = cv_cls_count[j*m + k]; + double val = cls_count[k] - val_k; + sum_k += val_k; + sum += val; + if( max_val < val ) + { + max_val = val; + max_val_k = val_k; + max_k = k; + } + } + + w->cv_Tn[nidx*cv_n + j] = INT_MAX; + w->cv_node_risk[nidx*cv_n + j] = sum - max_val; + w->cv_node_error[nidx*cv_n + j] = sum_k - max_val_k; + } + } + else + { + // in case of regression tree: + // * node value is 1/n*sum_i(Y_i), where Y_i is i-th response, + // n is the number of samples in the node. + // * node risk is the sum of squared errors: sum_i((Y_i - )^2) + // * j-th cross-validation fold value and risk are calculated as above, + // but using the samples with cv_labels(*)!=j. + // * j-th cross-validation fold error is calculated + // using samples with cv_labels(*)==j as the test subset: + // error_j = sum_(i,cv_labels(i)==j)((Y_i - )^2), + // where node_value_j is the node value calculated + // as described in the previous bullet, and summation is done + // over the samples with cv_labels(*)==j. + double sum = 0, sum2 = 0, sumw = 0; + + if( cv_n == 0 ) + { + for( i = 0; i < n; i++ ) + { + int si = _sidx[i]; + double wval = w->sample_weights[si]; + double t = w->ord_responses[si]; + sum += t*wval; + sum2 += t*t*wval; + sumw += wval; + } + } + else + { + double *cv_sum = buf.data(), *cv_sum2 = cv_sum + cv_n; + double* cv_count = (double*)(cv_sum2 + cv_n); + + for( j = 0; j < cv_n; j++ ) + { + cv_sum[j] = cv_sum2[j] = 0.; + cv_count[j] = 0; + } + + for( i = 0; i < n; i++ ) + { + int si = _sidx[i]; + j = w->cv_labels[si]; + double wval = w->sample_weights[si]; + double t = w->ord_responses[si]; + cv_sum[j] += t*wval; + cv_sum2[j] += t*t*wval; + cv_count[j] += wval; + } + + for( j = 0; j < cv_n; j++ ) + { + sum += cv_sum[j]; + sum2 += cv_sum2[j]; + sumw += cv_count[j]; + } + + for( j = 0; j < cv_n; j++ ) + { + double s = sum - cv_sum[j], si = sum - s; + double s2 = sum2 - cv_sum2[j], s2i = sum2 - s2; + double c = cv_count[j], ci = sumw - c; + double r = si/std::max(ci, DBL_EPSILON); + w->cv_node_risk[nidx*cv_n + j] = s2i - r*r*ci; + w->cv_node_error[nidx*cv_n + j] = s2 - 2*r*s + c*r*r; + w->cv_Tn[nidx*cv_n + j] = INT_MAX; + } + } + CV_Assert(fabs(sumw) > 0); + node->node_risk = sum2 - (sum/sumw)*sum; + node->node_risk /= sumw; + node->value = sum/sumw; + } +} + +DTreesImpl::WSplit DTreesImpl::findSplitOrdClass( int vi, const vector& _sidx, double initQuality ) +{ + int n = (int)_sidx.size(); + int m = (int)classLabels.size(); + + cv::AutoBuffer buf(n*(sizeof(float) + sizeof(int)) + m*2*sizeof(double)); + const int* sidx = &_sidx[0]; + const int* responses = &w->cat_responses[0]; + const double* weights = &w->sample_weights[0]; + double* lcw = (double*)buf.data(); + double* rcw = lcw + m; + float* values = (float*)(rcw + m); + int* sorted_idx = (int*)(values + n); + int i, best_i = -1; + double best_val = initQuality; + + for( i = 0; i < m; i++ ) + lcw[i] = rcw[i] = 0.; + + w->data->getValues( vi, _sidx, values ); + + for( i = 0; i < n; i++ ) + { + sorted_idx[i] = i; + int si = sidx[i]; + rcw[responses[si]] += weights[si]; + } + + std::sort(sorted_idx, sorted_idx + n, cmp_lt_idx(values)); + + double L = 0, R = 0, lsum2 = 0, rsum2 = 0; + for( i = 0; i < m; i++ ) + { + double wval = rcw[i]; + R += wval; + rsum2 += wval*wval; + } + + for( i = 0; i < n - 1; i++ ) + { + int curr = sorted_idx[i]; + int next = sorted_idx[i+1]; + int si = sidx[curr]; + double wval = weights[si], w2 = wval*wval; + L += wval; R -= wval; + int idx = responses[si]; + double lv = lcw[idx], rv = rcw[idx]; + lsum2 += 2*lv*wval + w2; + rsum2 -= 2*rv*wval - w2; + lcw[idx] = lv + wval; rcw[idx] = rv - wval; + + float value_between = (values[next] + values[curr]) * 0.5f; + if( value_between > values[curr] && value_between < values[next] ) + { + double val = (lsum2*R + rsum2*L)/(L*R); + if( best_val < val ) + { + best_val = val; + best_i = i; + } + } + } + + WSplit split; + if( best_i >= 0 ) + { + split.varIdx = vi; + split.c = (values[sorted_idx[best_i]] + values[sorted_idx[best_i+1]])*0.5f; + split.inversed = false; + split.quality = (float)best_val; + } + return split; +} + +// simple k-means, slightly modified to take into account the "weight" (L1-norm) of each vector. +void DTreesImpl::clusterCategories( const double* vectors, int n, int m, double* csums, int k, int* labels ) +{ + int iters = 0, max_iters = 100; + int i, j, idx; + cv::AutoBuffer buf(n + k); + double *v_weights = buf.data(), *c_weights = buf.data() + n; + bool modified = true; + RNG r((uint64)-1); + + // assign labels randomly + for( i = 0; i < n; i++ ) + { + double sum = 0; + const double* v = vectors + i*m; + labels[i] = i < k ? i : r.uniform(0, k); + + // compute weight of each vector + for( j = 0; j < m; j++ ) + sum += v[j]; + v_weights[i] = sum ? 1./sum : 0.; + } + + for( i = 0; i < n; i++ ) + { + int i1 = r.uniform(0, n); + int i2 = r.uniform(0, n); + std::swap( labels[i1], labels[i2] ); + } + + for( iters = 0; iters <= max_iters; iters++ ) + { + // calculate csums + for( i = 0; i < k; i++ ) + { + for( j = 0; j < m; j++ ) + csums[i*m + j] = 0; + } + + for( i = 0; i < n; i++ ) + { + const double* v = vectors + i*m; + double* s = csums + labels[i]*m; + for( j = 0; j < m; j++ ) + s[j] += v[j]; + } + + // exit the loop here, when we have up-to-date csums + if( iters == max_iters || !modified ) + break; + + modified = false; + + // calculate weight of each cluster + for( i = 0; i < k; i++ ) + { + const double* s = csums + i*m; + double sum = 0; + for( j = 0; j < m; j++ ) + sum += s[j]; + c_weights[i] = sum ? 1./sum : 0; + } + + // now for each vector determine the closest cluster + for( i = 0; i < n; i++ ) + { + const double* v = vectors + i*m; + double alpha = v_weights[i]; + double min_dist2 = DBL_MAX; + int min_idx = -1; + + for( idx = 0; idx < k; idx++ ) + { + const double* s = csums + idx*m; + double dist2 = 0., beta = c_weights[idx]; + for( j = 0; j < m; j++ ) + { + double t = v[j]*alpha - s[j]*beta; + dist2 += t*t; + } + if( min_dist2 > dist2 ) + { + min_dist2 = dist2; + min_idx = idx; + } + } + + if( min_idx != labels[i] ) + modified = true; + labels[i] = min_idx; + } + } +} + +DTreesImpl::WSplit DTreesImpl::findSplitCatClass( int vi, const vector& _sidx, + double initQuality, int* subset ) +{ + int _mi = getCatCount(vi), mi = _mi; + int n = (int)_sidx.size(); + int m = (int)classLabels.size(); + + int base_size = m*(3 + mi) + mi + 1; + if( m > 2 && mi > params.getMaxCategories() ) + base_size += m*std::min(params.getMaxCategories(), n) + mi; + else + base_size += mi; + AutoBuffer buf(base_size + n); + + double* lc = buf.data(); + double* rc = lc + m; + double* _cjk = rc + m*2, *cjk = _cjk; + double* c_weights = cjk + m*mi; + + int* labels = (int*)(buf.data() + base_size); + w->data->getNormCatValues(vi, _sidx, labels); + const int* responses = &w->cat_responses[0]; + const double* weights = &w->sample_weights[0]; + + int* cluster_labels = 0; + double** dbl_ptr = 0; + int i, j, k, si, idx; + double L = 0, R = 0; + double best_val = initQuality; + int prevcode = 0, best_subset = -1, subset_i, subset_n, subtract = 0; + + // init array of counters: + // c_{jk} - number of samples that have vi-th input variable = j and response = k. + for( j = -1; j < mi; j++ ) + for( k = 0; k < m; k++ ) + cjk[j*m + k] = 0; + + for( i = 0; i < n; i++ ) + { + si = _sidx[i]; + j = labels[i]; + k = responses[si]; + cjk[j*m + k] += weights[si]; + } + + if( m > 2 ) + { + if( mi > params.getMaxCategories() ) + { + mi = std::min(params.getMaxCategories(), n); + cjk = c_weights + _mi; + cluster_labels = (int*)(cjk + m*mi); + clusterCategories( _cjk, _mi, m, cjk, mi, cluster_labels ); + } + subset_i = 1; + subset_n = 1 << mi; + } + else + { + CV_Assert( m == 2 ); + dbl_ptr = (double**)(c_weights + _mi); + for( j = 0; j < mi; j++ ) + dbl_ptr[j] = cjk + j*2 + 1; + std::sort(dbl_ptr, dbl_ptr + mi, cmp_lt_ptr()); + subset_i = 0; + subset_n = mi; + } + + for( k = 0; k < m; k++ ) + { + double sum = 0; + for( j = 0; j < mi; j++ ) + sum += cjk[j*m + k]; + CV_Assert(sum > 0); + rc[k] = sum; + lc[k] = 0; + } + + for( j = 0; j < mi; j++ ) + { + double sum = 0; + for( k = 0; k < m; k++ ) + sum += cjk[j*m + k]; + c_weights[j] = sum; + R += c_weights[j]; + } + + for( ; subset_i < subset_n; subset_i++ ) + { + double lsum2 = 0, rsum2 = 0; + + if( m == 2 ) + idx = (int)(dbl_ptr[subset_i] - cjk)/2; + else + { + int graycode = (subset_i>>1)^subset_i; + int diff = graycode ^ prevcode; + + // determine index of the changed bit. + Cv32suf u; + idx = diff >= (1 << 16) ? 16 : 0; + u.f = (float)(((diff >> 16) | diff) & 65535); + idx += (u.i >> 23) - 127; + subtract = graycode < prevcode; + prevcode = graycode; + } + + double* crow = cjk + idx*m; + double weight = c_weights[idx]; + if( weight < FLT_EPSILON ) + continue; + + if( !subtract ) + { + for( k = 0; k < m; k++ ) + { + double t = crow[k]; + double lval = lc[k] + t; + double rval = rc[k] - t; + lsum2 += lval*lval; + rsum2 += rval*rval; + lc[k] = lval; rc[k] = rval; + } + L += weight; + R -= weight; + } + else + { + for( k = 0; k < m; k++ ) + { + double t = crow[k]; + double lval = lc[k] - t; + double rval = rc[k] + t; + lsum2 += lval*lval; + rsum2 += rval*rval; + lc[k] = lval; rc[k] = rval; + } + L -= weight; + R += weight; + } + + if( L > FLT_EPSILON && R > FLT_EPSILON ) + { + double val = (lsum2*R + rsum2*L)/(L*R); + if( best_val < val ) + { + best_val = val; + best_subset = subset_i; + } + } + } + + WSplit split; + if( best_subset >= 0 ) + { + split.varIdx = vi; + split.quality = (float)best_val; + memset( subset, 0, getSubsetSize(vi) * sizeof(int) ); + if( m == 2 ) + { + for( i = 0; i <= best_subset; i++ ) + { + idx = (int)(dbl_ptr[i] - cjk) >> 1; + subset[idx >> 5] |= 1 << (idx & 31); + } + } + else + { + for( i = 0; i < _mi; i++ ) + { + idx = cluster_labels ? cluster_labels[i] : i; + if( best_subset & (1 << idx) ) + subset[i >> 5] |= 1 << (i & 31); + } + } + } + return split; +} + +DTreesImpl::WSplit DTreesImpl::findSplitOrdReg( int vi, const vector& _sidx, double initQuality ) +{ + const double* weights = &w->sample_weights[0]; + int n = (int)_sidx.size(); + + AutoBuffer buf(n*(sizeof(int) + sizeof(float))); + + float* values = (float*)buf.data(); + int* sorted_idx = (int*)(values + n); + w->data->getValues(vi, _sidx, values); + const double* responses = &w->ord_responses[0]; + + int i, si, best_i = -1; + double L = 0, R = 0; + double best_val = initQuality, lsum = 0, rsum = 0; + + for( i = 0; i < n; i++ ) + { + sorted_idx[i] = i; + si = _sidx[i]; + R += weights[si]; + rsum += weights[si]*responses[si]; + } + + std::sort(sorted_idx, sorted_idx + n, cmp_lt_idx(values)); + + // find the optimal split + for( i = 0; i < n - 1; i++ ) + { + int curr = sorted_idx[i]; + int next = sorted_idx[i+1]; + si = _sidx[curr]; + double wval = weights[si]; + double t = responses[si]*wval; + L += wval; R -= wval; + lsum += t; rsum -= t; + + float value_between = (values[next] + values[curr]) * 0.5f; + if( value_between > values[curr] && value_between < values[next] ) + { + double val = (lsum*lsum*R + rsum*rsum*L)/(L*R); + if( best_val < val ) + { + best_val = val; + best_i = i; + } + } + } + + WSplit split; + if( best_i >= 0 ) + { + split.varIdx = vi; + split.c = (values[sorted_idx[best_i]] + values[sorted_idx[best_i+1]])*0.5f; + split.inversed = false; + split.quality = (float)best_val; + } + return split; +} + +DTreesImpl::WSplit DTreesImpl::findSplitCatReg( int vi, const vector& _sidx, + double initQuality, int* subset ) +{ + const double* weights = &w->sample_weights[0]; + const double* responses = &w->ord_responses[0]; + int n = (int)_sidx.size(); + int mi = getCatCount(vi); + + AutoBuffer buf(3*mi + 3 + n); + double* sum = buf.data() + 1; + double* counts = sum + mi + 1; + double** sum_ptr = (double**)(counts + mi); + int* cat_labels = (int*)(sum_ptr + mi); + + w->data->getNormCatValues(vi, _sidx, cat_labels); + + double L = 0, R = 0, best_val = initQuality, lsum = 0, rsum = 0; + int i, si, best_subset = -1, subset_i; + + for( i = -1; i < mi; i++ ) + sum[i] = counts[i] = 0; + + // calculate sum response and weight of each category of the input var + for( i = 0; i < n; i++ ) + { + int idx = cat_labels[i]; + si = _sidx[i]; + double wval = weights[si]; + sum[idx] += responses[si]*wval; + counts[idx] += wval; + } + + // calculate average response in each category + for( i = 0; i < mi; i++ ) + { + R += counts[i]; + rsum += sum[i]; + sum[i] = fabs(counts[i]) > DBL_EPSILON ? sum[i]/counts[i] : 0; + sum_ptr[i] = sum + i; + } + + std::sort(sum_ptr, sum_ptr + mi, cmp_lt_ptr()); + + // revert back to unnormalized sums + // (there should be a very little loss in accuracy) + for( i = 0; i < mi; i++ ) + sum[i] *= counts[i]; + + for( subset_i = 0; subset_i < mi-1; subset_i++ ) + { + int idx = (int)(sum_ptr[subset_i] - sum); + double ni = counts[idx]; + + if( ni > FLT_EPSILON ) + { + double s = sum[idx]; + lsum += s; L += ni; + rsum -= s; R -= ni; + + if( L > FLT_EPSILON && R > FLT_EPSILON ) + { + double val = (lsum*lsum*R + rsum*rsum*L)/(L*R); + if( best_val < val ) + { + best_val = val; + best_subset = subset_i; + } + } + } + } + + WSplit split; + if( best_subset >= 0 ) + { + split.varIdx = vi; + split.quality = (float)best_val; + memset( subset, 0, getSubsetSize(vi) * sizeof(int)); + for( i = 0; i <= best_subset; i++ ) + { + int idx = (int)(sum_ptr[i] - sum); + subset[idx >> 5] |= 1 << (idx & 31); + } + } + return split; +} + +int DTreesImpl::calcDir( int splitidx, const vector& _sidx, + vector& _sleft, vector& _sright ) +{ + WSplit split = w->wsplits[splitidx]; + int i, si, n = (int)_sidx.size(), vi = split.varIdx; + _sleft.reserve(n); + _sright.reserve(n); + _sleft.clear(); + _sright.clear(); + + AutoBuffer buf(n); + int mi = getCatCount(vi); + double wleft = 0, wright = 0; + const double* weights = &w->sample_weights[0]; + + if( mi <= 0 ) // split on an ordered variable + { + float c = split.c; + float* values = buf.data(); + w->data->getValues(vi, _sidx, values); + + for( i = 0; i < n; i++ ) + { + si = _sidx[i]; + if( values[i] <= c ) + { + _sleft.push_back(si); + wleft += weights[si]; + } + else + { + _sright.push_back(si); + wright += weights[si]; + } + } + } + else + { + const int* subset = &w->wsubsets[split.subsetOfs]; + int* cat_labels = (int*)buf.data(); + w->data->getNormCatValues(vi, _sidx, cat_labels); + + for( i = 0; i < n; i++ ) + { + si = _sidx[i]; + unsigned u = cat_labels[i]; + if( CV_DTREE_CAT_DIR(u, subset) < 0 ) + { + _sleft.push_back(si); + wleft += weights[si]; + } + else + { + _sright.push_back(si); + wright += weights[si]; + } + } + } + CV_Assert( (int)_sleft.size() < n && (int)_sright.size() < n ); + return wleft > wright ? -1 : 1; +} + +int DTreesImpl::pruneCV( int root ) +{ + vector ab; + + // 1. build tree sequence for each cv fold, calculate error_{Tj,beta_k}. + // 2. choose the best tree index (if need, apply 1SE rule). + // 3. store the best index and cut the branches. + + int ti, tree_count = 0, j, cv_n = params.getCVFolds(), n = w->wnodes[root].sample_count; + // currently, 1SE for regression is not implemented + bool use_1se = params.use1SERule != 0 && _isClassifier; + double min_err = 0, min_err_se = 0; + int min_idx = -1; + + // build the main tree sequence, calculate alpha's + for(;;tree_count++) + { + double min_alpha = updateTreeRNC(root, tree_count, -1); + if( cutTree(root, tree_count, -1, min_alpha) ) + break; + + ab.push_back(min_alpha); + } + + if( tree_count > 0 ) + { + ab[0] = 0.; + + for( ti = 1; ti < tree_count-1; ti++ ) + ab[ti] = std::sqrt(ab[ti]*ab[ti+1]); + ab[tree_count-1] = DBL_MAX*0.5; + + Mat err_jk(cv_n, tree_count, CV_64F); + + for( j = 0; j < cv_n; j++ ) + { + int tj = 0, tk = 0; + for( ; tj < tree_count; tj++ ) + { + double min_alpha = updateTreeRNC(root, tj, j); + if( cutTree(root, tj, j, min_alpha) ) + min_alpha = DBL_MAX; + + for( ; tk < tree_count; tk++ ) + { + if( ab[tk] > min_alpha ) + break; + err_jk.at(j, tk) = w->wnodes[root].tree_error; + } + } + } + + for( ti = 0; ti < tree_count; ti++ ) + { + double sum_err = 0; + for( j = 0; j < cv_n; j++ ) + sum_err += err_jk.at(j, ti); + if( ti == 0 || sum_err < min_err ) + { + min_err = sum_err; + min_idx = ti; + if( use_1se ) + min_err_se = sqrt( sum_err*(n - sum_err) ); + } + else if( sum_err < min_err + min_err_se ) + min_idx = ti; + } + } + + return min_idx; +} + +double DTreesImpl::updateTreeRNC( int root, double T, int fold ) +{ + int nidx = root, pidx = -1, cv_n = params.getCVFolds(); + double min_alpha = DBL_MAX; + + for(;;) + { + WNode *node = 0, *parent = 0; + + for(;;) + { + node = &w->wnodes[nidx]; + double t = fold >= 0 ? w->cv_Tn[nidx*cv_n + fold] : node->Tn; + if( t <= T || node->left < 0 ) + { + node->complexity = 1; + node->tree_risk = node->node_risk; + node->tree_error = 0.; + if( fold >= 0 ) + { + node->tree_risk = w->cv_node_risk[nidx*cv_n + fold]; + node->tree_error = w->cv_node_error[nidx*cv_n + fold]; + } + break; + } + nidx = node->left; + } + + for( pidx = node->parent; pidx >= 0 && w->wnodes[pidx].right == nidx; + nidx = pidx, pidx = w->wnodes[pidx].parent ) + { + node = &w->wnodes[nidx]; + parent = &w->wnodes[pidx]; + parent->complexity += node->complexity; + parent->tree_risk += node->tree_risk; + parent->tree_error += node->tree_error; + + parent->alpha = ((fold >= 0 ? w->cv_node_risk[pidx*cv_n + fold] : parent->node_risk) + - parent->tree_risk)/(parent->complexity - 1); + min_alpha = std::min( min_alpha, parent->alpha ); + } + + if( pidx < 0 ) + break; + + node = &w->wnodes[nidx]; + parent = &w->wnodes[pidx]; + parent->complexity = node->complexity; + parent->tree_risk = node->tree_risk; + parent->tree_error = node->tree_error; + nidx = parent->right; + } + + return min_alpha; +} + +bool DTreesImpl::cutTree( int root, double T, int fold, double min_alpha ) +{ + int cv_n = params.getCVFolds(), nidx = root, pidx = -1; + WNode* node = &w->wnodes[root]; + if( node->left < 0 ) + return true; + + for(;;) + { + for(;;) + { + node = &w->wnodes[nidx]; + double t = fold >= 0 ? w->cv_Tn[nidx*cv_n + fold] : node->Tn; + if( t <= T || node->left < 0 ) + break; + if( node->alpha <= min_alpha + FLT_EPSILON ) + { + if( fold >= 0 ) + w->cv_Tn[nidx*cv_n + fold] = T; + else + node->Tn = T; + if( nidx == root ) + return true; + break; + } + nidx = node->left; + } + + for( pidx = node->parent; pidx >= 0 && w->wnodes[pidx].right == nidx; + nidx = pidx, pidx = w->wnodes[pidx].parent ) + ; + + if( pidx < 0 ) + break; + + nidx = w->wnodes[pidx].right; + } + + return false; +} + +float DTreesImpl::predictTrees( const Range& range, const Mat& sample, int flags ) const +{ + CV_Assert( sample.type() == CV_32F ); + + int predictType = flags & PREDICT_MASK; + int nvars = (int)varIdx.size(); + if( nvars == 0 ) + nvars = (int)varType.size(); + int i, ncats = (int)catOfs.size(), nclasses = (int)classLabels.size(); + int catbufsize = ncats > 0 ? nvars : 0; + AutoBuffer buf(nclasses + catbufsize + 1); + int* votes = buf.data(); + int* catbuf = votes + nclasses; + const int* cvidx = (flags & (COMPRESSED_INPUT|PREPROCESSED_INPUT)) == 0 && !varIdx.empty() ? &compVarIdx[0] : 0; + const uchar* vtype = &varType[0]; + const Vec2i* cofs = !catOfs.empty() ? &catOfs[0] : 0; + const int* cmap = !catMap.empty() ? &catMap[0] : 0; + const float* psample = sample.ptr(); + const float* missingSubstPtr = !missingSubst.empty() ? &missingSubst[0] : 0; + size_t sstep = sample.isContinuous() ? 1 : sample.step/sizeof(float); + double sum = 0.; + int lastClassIdx = -1; + const float MISSED_VAL = TrainData::missingValue(); + + for( i = 0; i < catbufsize; i++ ) + catbuf[i] = -1; + + if( predictType == PREDICT_AUTO ) + { + predictType = !_isClassifier || (classLabels.size() == 2 && (flags & RAW_OUTPUT) != 0) ? + PREDICT_SUM : PREDICT_MAX_VOTE; + } + + if( predictType == PREDICT_MAX_VOTE ) + { + for( i = 0; i < nclasses; i++ ) + votes[i] = 0; + } + + for( int ridx = range.start; ridx < range.end; ridx++ ) + { + int nidx = roots[ridx], prev = nidx, c = 0; + + for(;;) + { + prev = nidx; + const Node& node = nodes[nidx]; + if( node.split < 0 ) + break; + const Split& split = splits[node.split]; + int vi = split.varIdx; + int ci = cvidx ? cvidx[vi] : vi; + float val = psample[ci*sstep]; + if( val == MISSED_VAL ) + { + if( !missingSubstPtr ) + { + nidx = node.defaultDir < 0 ? node.left : node.right; + continue; + } + val = missingSubstPtr[vi]; + } + + if( vtype[vi] == VAR_ORDERED ) + nidx = val <= split.c ? node.left : node.right; + else + { + if( flags & PREPROCESSED_INPUT ) + c = cvRound(val); + else + { + c = catbuf[ci]; + if( c < 0 ) + { + int a = c = cofs[vi][0]; + int b = cofs[vi][1]; + + int ival = cvRound(val); + if( ival != val ) + CV_Error( CV_StsBadArg, + "one of input categorical variable is not an integer" ); + + CV_Assert(cmap != NULL); + while( a < b ) + { + c = (a + b) >> 1; + if( ival < cmap[c] ) + b = c; + else if( ival > cmap[c] ) + a = c+1; + else + break; + } + + CV_Assert( c >= 0 && ival == cmap[c] ); + + c -= cofs[vi][0]; + catbuf[ci] = c; + } + const int* subset = &subsets[split.subsetOfs]; + unsigned u = c; + nidx = CV_DTREE_CAT_DIR(u, subset) < 0 ? node.left : node.right; + } + } + } + + if( predictType == PREDICT_SUM ) + sum += nodes[prev].value; + else + { + lastClassIdx = nodes[prev].classIdx; + votes[lastClassIdx]++; + } + } + + if( predictType == PREDICT_MAX_VOTE ) + { + int best_idx = lastClassIdx; + if( range.end - range.start > 1 ) + { + best_idx = 0; + for( i = 1; i < nclasses; i++ ) + if( votes[best_idx] < votes[i] ) + best_idx = i; + } + sum = (flags & RAW_OUTPUT) ? (float)best_idx : classLabels[best_idx]; + } + + return (float)sum; +} + + +float DTreesImpl::predict( InputArray _samples, OutputArray _results, int flags ) const +{ + CV_Assert( !roots.empty() ); + Mat samples = _samples.getMat(), results; + int i, nsamples = samples.rows; + int rtype = CV_32F; + bool needresults = _results.needed(); + float retval = 0.f; + bool iscls = isClassifier(); + float scale = !iscls ? 1.f/(int)roots.size() : 1.f; + + if( iscls && (flags & PREDICT_MASK) == PREDICT_MAX_VOTE ) + rtype = CV_32S; + + if( needresults ) + { + _results.create(nsamples, 1, rtype); + results = _results.getMat(); + } + else + nsamples = std::min(nsamples, 1); + + for( i = 0; i < nsamples; i++ ) + { + float val = predictTrees( Range(0, (int)roots.size()), samples.row(i), flags )*scale; + if( needresults ) + { + if( rtype == CV_32F ) + results.at(i) = val; + else + results.at(i) = cvRound(val); + } + if( i == 0 ) + retval = val; + } + return retval; +} + +void DTreesImpl::writeTrainingParams(FileStorage& fs) const +{ + fs << "use_surrogates" << (params.useSurrogates ? 1 : 0); + fs << "max_categories" << params.getMaxCategories(); + fs << "regression_accuracy" << params.getRegressionAccuracy(); + + fs << "max_depth" << params.getMaxDepth(); + fs << "min_sample_count" << params.getMinSampleCount(); + fs << "cross_validation_folds" << params.getCVFolds(); + + if( params.getCVFolds() > 1 ) + fs << "use_1se_rule" << (params.use1SERule ? 1 : 0); + + if( !params.priors.empty() ) + fs << "priors" << params.priors; +} + +void DTreesImpl::writeParams(FileStorage& fs) const +{ + fs << "is_classifier" << isClassifier(); + fs << "var_all" << (int)varType.size(); + fs << "var_count" << getVarCount(); + + int ord_var_count = 0, cat_var_count = 0; + int i, n = (int)varType.size(); + for( i = 0; i < n; i++ ) + if( varType[i] == VAR_ORDERED ) + ord_var_count++; + else + cat_var_count++; + fs << "ord_var_count" << ord_var_count; + fs << "cat_var_count" << cat_var_count; + + fs << "training_params" << "{"; + writeTrainingParams(fs); + + fs << "}"; + + if( !varIdx.empty() ) + { + fs << "global_var_idx" << 1; + fs << "var_idx" << varIdx; + } + + fs << "var_type" << varType; + + if( !catOfs.empty() ) + fs << "cat_ofs" << catOfs; + if( !catMap.empty() ) + fs << "cat_map" << catMap; + if( !classLabels.empty() ) + fs << "class_labels" << classLabels; + if( !missingSubst.empty() ) + fs << "missing_subst" << missingSubst; +} + +void DTreesImpl::writeSplit( FileStorage& fs, int splitidx ) const +{ + const Split& split = splits[splitidx]; + + fs << "{:"; + + int vi = split.varIdx; + fs << "var" << vi; + fs << "quality" << split.quality; + + if( varType[vi] == VAR_CATEGORICAL ) // split on a categorical var + { + int i, n = getCatCount(vi), to_right = 0; + const int* subset = &subsets[split.subsetOfs]; + for( i = 0; i < n; i++ ) + to_right += CV_DTREE_CAT_DIR(i, subset) > 0; + + // ad-hoc rule when to use inverse categorical split notation + // to achieve more compact and clear representation + int default_dir = to_right <= 1 || to_right <= std::min(3, n/2) || to_right <= n/3 ? -1 : 1; + + fs << (default_dir*(split.inversed ? -1 : 1) > 0 ? "in" : "not_in") << "[:"; + + for( i = 0; i < n; i++ ) + { + int dir = CV_DTREE_CAT_DIR(i, subset); + if( dir*default_dir < 0 ) + fs << i; + } + + fs << "]"; + } + else + fs << (!split.inversed ? "le" : "gt") << split.c; + + fs << "}"; +} + +void DTreesImpl::writeNode( FileStorage& fs, int nidx, int depth ) const +{ + const Node& node = nodes[nidx]; + fs << "{"; + fs << "depth" << depth; + fs << "value" << node.value; + + if( _isClassifier ) + fs << "norm_class_idx" << node.classIdx; + + if( node.split >= 0 ) + { + fs << "splits" << "["; + + for( int splitidx = node.split; splitidx >= 0; splitidx = splits[splitidx].next ) + writeSplit( fs, splitidx ); + + fs << "]"; + } + + fs << "}"; +} + +void DTreesImpl::writeTree( FileStorage& fs, int root ) const +{ + fs << "nodes" << "["; + + int nidx = root, pidx = 0, depth = 0; + const Node *node = 0; + + // traverse the tree and save all the nodes in depth-first order + for(;;) + { + for(;;) + { + writeNode( fs, nidx, depth ); + node = &nodes[nidx]; + if( node->left < 0 ) + break; + nidx = node->left; + depth++; + } + + for( pidx = node->parent; pidx >= 0 && nodes[pidx].right == nidx; + nidx = pidx, pidx = nodes[pidx].parent ) + depth--; + + if( pidx < 0 ) + break; + + nidx = nodes[pidx].right; + } + + fs << "]"; +} + +void DTreesImpl::write( FileStorage& fs ) const +{ + writeFormat(fs); + writeParams(fs); + writeTree(fs, roots[0]); +} + +void DTreesImpl::readParams( const FileNode& fn ) +{ + _isClassifier = (int)fn["is_classifier"] != 0; + int varAll = (int)fn["var_all"]; + int varCount = (int)fn["var_count"]; + /*int cat_var_count = (int)fn["cat_var_count"]; + int ord_var_count = (int)fn["ord_var_count"];*/ + + if (varAll <= 0) + CV_Error(Error::StsParseError, "The field \"var_all\" of DTree classifier is missing or non-positive"); + + FileNode tparams_node = fn["training_params"]; + + TreeParams params0 = TreeParams(); + + if( !tparams_node.empty() ) // training parameters are not necessary + { + params0.useSurrogates = (int)tparams_node["use_surrogates"] != 0; + params0.setMaxCategories((int)(tparams_node["max_categories"].empty() ? 16 : tparams_node["max_categories"])); + params0.setRegressionAccuracy((float)tparams_node["regression_accuracy"]); + params0.setMaxDepth((int)tparams_node["max_depth"]); + params0.setMinSampleCount((int)tparams_node["min_sample_count"]); + params0.setCVFolds((int)tparams_node["cross_validation_folds"]); + + if( params0.getCVFolds() > 1 ) + { + params.use1SERule = (int)tparams_node["use_1se_rule"] != 0; + } + + tparams_node["priors"] >> params0.priors; + } + + readVectorOrMat(fn["var_idx"], varIdx); + fn["var_type"] >> varType; + + bool isLegacy = false; + if (fn["format"].empty()) // Export bug until OpenCV 3.2: https://github.com/opencv/opencv/pull/6314 + { + if (!fn["cat_ofs"].empty()) + isLegacy = false; // 2.4 doesn't store "cat_ofs" + else if (!fn["missing_subst"].empty()) + isLegacy = false; // 2.4 doesn't store "missing_subst" + else if (!fn["class_labels"].empty()) + isLegacy = false; // 2.4 doesn't store "class_labels" + else if ((int)varType.size() != varAll) + isLegacy = true; // 3.0+: https://github.com/opencv/opencv/blame/3.0.0/modules/ml/src/tree.cpp#L1576 + else if (/*(int)varType.size() == varAll &&*/ varCount == varAll) + isLegacy = true; + else + { + // 3.0+: + // - https://github.com/opencv/opencv/blame/3.0.0/modules/ml/src/tree.cpp#L1552-L1553 + // - https://github.com/opencv/opencv/blame/3.0.0/modules/ml/src/precomp.hpp#L296 + isLegacy = !(varCount + 1 == varAll); + } + CV_LOG_INFO(NULL, "ML/DTrees: possible missing 'format' field due to bug of OpenCV export implementation. " + "Details: https://github.com/opencv/opencv/issues/5412. Consider re-exporting of saved ML model. " + "isLegacy = " << isLegacy); + } + else + { + int format = 0; + fn["format"] >> format; + CV_CheckGT(format, 0, ""); + isLegacy = format < 3; + } + + if (isLegacy && (int)varType.size() <= varAll) + { + std::vector extendedTypes(varAll + 1, 0); + + int i = 0, n; + if (!varIdx.empty()) + { + n = (int)varIdx.size(); + for (; i < n; ++i) + { + int var = varIdx[i]; + extendedTypes[var] = varType[i]; + } + } + else + { + n = (int)varType.size(); + for (; i < n; ++i) + { + extendedTypes[i] = varType[i]; + } + } + extendedTypes[varAll] = (uchar)(_isClassifier ? VAR_CATEGORICAL : VAR_ORDERED); + extendedTypes.swap(varType); + } + + readVectorOrMat(fn["cat_map"], catMap); + + if (isLegacy) + { + // generating "catOfs" from "cat_count" + catOfs.clear(); + classLabels.clear(); + std::vector counts; + readVectorOrMat(fn["cat_count"], counts); + unsigned int i = 0, j = 0, curShift = 0, size = (int)varType.size() - 1; + for (; i < size; ++i) + { + Vec2i newOffsets(0, 0); + if (varType[i] == VAR_CATEGORICAL) // only categorical vars are represented in catMap + { + newOffsets[0] = curShift; + curShift += counts[j]; + newOffsets[1] = curShift; + ++j; + } + catOfs.push_back(newOffsets); + } + // other elements in "catMap" are "classLabels" + if (curShift < catMap.size()) + { + classLabels.insert(classLabels.end(), catMap.begin() + curShift, catMap.end()); + catMap.erase(catMap.begin() + curShift, catMap.end()); + } + } + else + { + fn["cat_ofs"] >> catOfs; + fn["missing_subst"] >> missingSubst; + fn["class_labels"] >> classLabels; + } + + // init var mapping for node reading (var indexes or varIdx indexes) + bool globalVarIdx = false; + fn["global_var_idx"] >> globalVarIdx; + if (globalVarIdx || varIdx.empty()) + setRangeVector(varMapping, (int)varType.size()); + else + varMapping = varIdx; + + initCompVarIdx(); + setDParams(params0); +} + +int DTreesImpl::readSplit( const FileNode& fn ) +{ + Split split; + + int vi = (int)fn["var"]; + CV_Assert( 0 <= vi && vi <= (int)varType.size() ); + vi = varMapping[vi]; // convert to varIdx if needed + split.varIdx = vi; + + if( varType[vi] == VAR_CATEGORICAL ) // split on categorical var + { + int i, val, ssize = getSubsetSize(vi); + split.subsetOfs = (int)subsets.size(); + for( i = 0; i < ssize; i++ ) + subsets.push_back(0); + int* subset = &subsets[split.subsetOfs]; + FileNode fns = fn["in"]; + if( fns.empty() ) + { + fns = fn["not_in"]; + split.inversed = true; + } + + if( fns.isInt() ) + { + val = (int)fns; + subset[val >> 5] |= 1 << (val & 31); + } + else + { + FileNodeIterator it = fns.begin(); + int n = (int)fns.size(); + for( i = 0; i < n; i++, ++it ) + { + val = (int)*it; + subset[val >> 5] |= 1 << (val & 31); + } + } + + // for categorical splits we do not use inversed splits, + // instead we inverse the variable set in the split + if( split.inversed ) + { + for( i = 0; i < ssize; i++ ) + subset[i] ^= -1; + split.inversed = false; + } + } + else + { + FileNode cmpNode = fn["le"]; + if( cmpNode.empty() ) + { + cmpNode = fn["gt"]; + split.inversed = true; + } + split.c = (float)cmpNode; + } + + split.quality = (float)fn["quality"]; + splits.push_back(split); + + return (int)(splits.size() - 1); +} + +int DTreesImpl::readNode( const FileNode& fn ) +{ + Node node; + node.value = (double)fn["value"]; + + if( _isClassifier ) + node.classIdx = (int)fn["norm_class_idx"]; + + FileNode sfn = fn["splits"]; + if( !sfn.empty() ) + { + int i, n = (int)sfn.size(), prevsplit = -1; + FileNodeIterator it = sfn.begin(); + + for( i = 0; i < n; i++, ++it ) + { + int splitidx = readSplit(*it); + if( splitidx < 0 ) + break; + if( prevsplit < 0 ) + node.split = splitidx; + else + splits[prevsplit].next = splitidx; + prevsplit = splitidx; + } + } + nodes.push_back(node); + return (int)(nodes.size() - 1); +} + +int DTreesImpl::readTree( const FileNode& fn ) +{ + int i, n = (int)fn.size(), root = -1, pidx = -1; + FileNodeIterator it = fn.begin(); + + for( i = 0; i < n; i++, ++it ) + { + int nidx = readNode(*it); + if( nidx < 0 ) + break; + Node& node = nodes[nidx]; + node.parent = pidx; + if( pidx < 0 ) + root = nidx; + else + { + Node& parent = nodes[pidx]; + if( parent.left < 0 ) + parent.left = nidx; + else + parent.right = nidx; + } + if( node.split >= 0 ) + pidx = nidx; + else + { + while( pidx >= 0 && nodes[pidx].right >= 0 ) + pidx = nodes[pidx].parent; + } + } + roots.push_back(root); + return root; +} + +void DTreesImpl::read( const FileNode& fn ) +{ + clear(); + readParams(fn); + + FileNode fnodes = fn["nodes"]; + CV_Assert( !fnodes.empty() ); + readTree(fnodes); +} + +Ptr DTrees::create() +{ + return makePtr(); +} + +Ptr DTrees::load(const String& filepath, const String& nodeName) +{ + return Algorithm::load(filepath, nodeName); +} + + +} +} + +/* End of file. */ diff --git a/modules/ml/test/test_ann.cpp b/modules/ml/test/test_ann.cpp new file mode 100644 index 00000000000..a3782d25a81 --- /dev/null +++ b/modules/ml/test/test_ann.cpp @@ -0,0 +1,200 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#include "test_precomp.hpp" + +// #define GENERATE_TESTDATA + +namespace opencv_test { namespace { + +struct Activation +{ + int id; + const char * name; +}; +void PrintTo(const Activation &a, std::ostream *os) { *os << a.name; } + +Activation activation_list[] = +{ + { ml::ANN_MLP::IDENTITY, "identity" }, + { ml::ANN_MLP::SIGMOID_SYM, "sigmoid_sym" }, + { ml::ANN_MLP::GAUSSIAN, "gaussian" }, + { ml::ANN_MLP::RELU, "relu" }, + { ml::ANN_MLP::LEAKYRELU, "leakyrelu" }, +}; + +typedef testing::TestWithParam< Activation > ML_ANN_Params; + +TEST_P(ML_ANN_Params, ActivationFunction) +{ + const Activation &activation = GetParam(); + const string dataname = "waveform"; + const string data_path = findDataFile(dataname + ".data"); + const string model_name = dataname + "_" + activation.name + ".yml"; + + Ptr tdata = TrainData::loadFromCSV(data_path, 0); + ASSERT_FALSE(tdata.empty()); + + // hack? + const uint64 old_state = theRNG().state; + theRNG().state = 1027401484159173092; + tdata->setTrainTestSplit(500); + theRNG().state = old_state; + + Mat_ layerSizes(1, 4); + layerSizes(0, 0) = tdata->getNVars(); + layerSizes(0, 1) = 100; + layerSizes(0, 2) = 100; + layerSizes(0, 3) = tdata->getResponses().cols; + + Mat testSamples = tdata->getTestSamples(); + Mat rx, ry; + + { + Ptr x = ml::ANN_MLP::create(); + x->setActivationFunction(activation.id); + x->setLayerSizes(layerSizes); + x->setTrainMethod(ml::ANN_MLP::RPROP, 0.01, 0.1); + x->setTermCriteria(TermCriteria(TermCriteria::COUNT, 300, 0.01)); + x->train(tdata, ml::ANN_MLP::NO_OUTPUT_SCALE); + ASSERT_TRUE(x->isTrained()); + x->predict(testSamples, rx); +#ifdef GENERATE_TESTDATA + x->save(cvtest::TS::ptr()->get_data_path() + model_name); +#endif + } + + { + const string model_path = findDataFile(model_name); + Ptr y = Algorithm::load(model_path); + ASSERT_TRUE(y); + y->predict(testSamples, ry); + EXPECT_MAT_NEAR(rx, ry, FLT_EPSILON); + } +} + +INSTANTIATE_TEST_CASE_P(/**/, ML_ANN_Params, testing::ValuesIn(activation_list)); + +//================================================================================================== + +CV_ENUM(ANN_MLP_METHOD, ANN_MLP::RPROP, ANN_MLP::ANNEAL) + +typedef tuple ML_ANN_METHOD_Params; +typedef TestWithParam ML_ANN_METHOD; + +TEST_P(ML_ANN_METHOD, Test) +{ + int methodType = get<0>(GetParam()); + string methodName = get<1>(GetParam()); + int N = get<2>(GetParam()); + + String folder = string(cvtest::TS::ptr()->get_data_path()); + String original_path = findDataFile("waveform.data"); + string dataname = "waveform_" + methodName; + string weight_name = dataname + "_init_weight.yml.gz"; + string model_name = dataname + ".yml.gz"; + string response_name = dataname + "_response.yml.gz"; + + Ptr tdata2 = TrainData::loadFromCSV(original_path, 0); + ASSERT_FALSE(tdata2.empty()); + + Mat samples = tdata2->getSamples()(Range(0, N), Range::all()); + Mat responses(N, 3, CV_32FC1, Scalar(0)); + for (int i = 0; i < N; i++) + responses.at(i, static_cast(tdata2->getResponses().at(i, 0))) = 1; + + Ptr tdata = TrainData::create(samples, ml::ROW_SAMPLE, responses); + ASSERT_FALSE(tdata.empty()); + + // hack? + const uint64 old_state = theRNG().state; + theRNG().state = 0; + tdata->setTrainTestSplitRatio(0.8); + theRNG().state = old_state; + + Mat testSamples = tdata->getTestSamples(); + + // train 1st stage + + Ptr xx = ml::ANN_MLP::create(); + Mat_ layerSizes(1, 4); + layerSizes(0, 0) = tdata->getNVars(); + layerSizes(0, 1) = 30; + layerSizes(0, 2) = 30; + layerSizes(0, 3) = tdata->getResponses().cols; + xx->setLayerSizes(layerSizes); + xx->setActivationFunction(ml::ANN_MLP::SIGMOID_SYM); + xx->setTrainMethod(ml::ANN_MLP::RPROP); + xx->setTermCriteria(TermCriteria(TermCriteria::COUNT, 1, 0.01)); + xx->train(tdata, ml::ANN_MLP::NO_OUTPUT_SCALE + ml::ANN_MLP::NO_INPUT_SCALE); +#ifdef GENERATE_TESTDATA + { + FileStorage fs; + fs.open(cvtest::TS::ptr()->get_data_path() + weight_name, FileStorage::WRITE + FileStorage::BASE64); + xx->write(fs); + } +#endif + + // train 2nd stage + Mat r_gold; + Ptr x = ml::ANN_MLP::create(); + { + const string weight_file = findDataFile(weight_name); + FileStorage fs; + fs.open(weight_file, FileStorage::READ); + x->read(fs.root()); + } + x->setTrainMethod(methodType); + if (methodType == ml::ANN_MLP::ANNEAL) + { + x->setAnnealEnergyRNG(RNG(CV_BIG_INT(0xffffffff))); + x->setAnnealInitialT(12); + x->setAnnealFinalT(0.15); + x->setAnnealCoolingRatio(0.96); + x->setAnnealItePerStep(11); + } + x->setTermCriteria(TermCriteria(TermCriteria::COUNT, 100, 0.01)); + x->train(tdata, ml::ANN_MLP::NO_OUTPUT_SCALE + ml::ANN_MLP::NO_INPUT_SCALE + ml::ANN_MLP::UPDATE_WEIGHTS); + ASSERT_TRUE(x->isTrained()); +#ifdef GENERATE_TESTDATA + x->save(cvtest::TS::ptr()->get_data_path() + model_name); + x->predict(testSamples, r_gold); + { + FileStorage fs_response(cvtest::TS::ptr()->get_data_path() + response_name, FileStorage::WRITE + FileStorage::BASE64); + fs_response << "response" << r_gold; + } +#endif + { + const string response_file = findDataFile(response_name); + FileStorage fs_response(response_file, FileStorage::READ); + fs_response["response"] >> r_gold; + } + ASSERT_FALSE(r_gold.empty()); + + // verify + const string model_file = findDataFile(model_name); + Ptr y = Algorithm::load(model_file); + ASSERT_TRUE(y); + Mat rx, ry; + for (int j = 0; j < 4; j++) + { + rx = x->getWeights(j); + ry = y->getWeights(j); + EXPECT_MAT_NEAR(rx, ry, FLT_EPSILON) << "Weights are not equal for layer: " << j; + } + x->predict(testSamples, rx); + y->predict(testSamples, ry); + EXPECT_MAT_NEAR(ry, rx, FLT_EPSILON) << "Predict are not equal to result of the saved model"; + EXPECT_MAT_NEAR(r_gold, rx, FLT_EPSILON) << "Predict are not equal to 'gold' response"; +} + +INSTANTIATE_TEST_CASE_P(/*none*/, ML_ANN_METHOD, + testing::Values( + ML_ANN_METHOD_Params(ml::ANN_MLP::RPROP, "rprop", 5000), + ML_ANN_METHOD_Params(ml::ANN_MLP::ANNEAL, "anneal", 1000) + // ML_ANN_METHOD_Params(ml::ANN_MLP::BACKPROP, "backprop", 500) -----> NO BACKPROP TEST + ) +); + +}} // namespace diff --git a/modules/ml/test/test_bayes.cpp b/modules/ml/test/test_bayes.cpp new file mode 100644 index 00000000000..07ff8b2a361 --- /dev/null +++ b/modules/ml/test/test_bayes.cpp @@ -0,0 +1,56 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#include "test_precomp.hpp" + +namespace opencv_test { namespace { + +TEST(ML_NBAYES, regression_5911) +{ + int N=12; + Ptr nb = cv::ml::NormalBayesClassifier::create(); + + // data: + float X_data[] = { + 1,2,3,4, 1,2,3,4, 1,2,3,4, 1,2,3,4, + 5,5,5,5, 5,5,5,5, 5,5,5,5, 5,5,5,5, + 4,3,2,1, 4,3,2,1, 4,3,2,1, 4,3,2,1 + }; + Mat_ X(N, 4, X_data); + + // labels: + int Y_data[] = { 0,0,0,0, 1,1,1,1, 2,2,2,2 }; + Mat_ Y(N, 1, Y_data); + + nb->train(X, ml::ROW_SAMPLE, Y); + + // single prediction: + Mat R1,P1; + for (int i=0; ipredictProb(X.row(i), r, p); + R1.push_back(r); + P1.push_back(p); + } + + // bulk prediction (continuous memory): + Mat R2,P2; + nb->predictProb(X, R2, P2); + + EXPECT_EQ(255 * R2.total(), sum(R1 == R2)[0]); + EXPECT_EQ(255 * P2.total(), sum(P1 == P2)[0]); + + // bulk prediction, with non-continuous memory storage + Mat R3_(N, 1+1, CV_32S), + P3_(N, 3+1, CV_32F); + nb->predictProb(X, R3_.col(0), P3_.colRange(0,3)); + Mat R3 = R3_.col(0).clone(), + P3 = P3_.colRange(0,3).clone(); + + EXPECT_EQ(255 * R3.total(), sum(R1 == R3)[0]); + EXPECT_EQ(255 * P3.total(), sum(P1 == P3)[0]); +} + +}} // namespace diff --git a/modules/ml/test/test_em.cpp b/modules/ml/test/test_em.cpp new file mode 100644 index 00000000000..373385d406b --- /dev/null +++ b/modules/ml/test/test_em.cpp @@ -0,0 +1,186 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#include "test_precomp.hpp" + +namespace opencv_test { namespace { + +CV_ENUM(EM_START_STEP, EM::START_AUTO_STEP, EM::START_M_STEP, EM::START_E_STEP) +CV_ENUM(EM_COV_MAT, EM::COV_MAT_GENERIC, EM::COV_MAT_DIAGONAL, EM::COV_MAT_SPHERICAL) + +typedef testing::TestWithParam< tuple > ML_EM_Params; + +TEST_P(ML_EM_Params, accuracy) +{ + const int nclusters = 3; + const int sizesArr[] = { 500, 700, 800 }; + const vector sizes( sizesArr, sizesArr + sizeof(sizesArr) / sizeof(sizesArr[0]) ); + const int pointsCount = sizesArr[0] + sizesArr[1] + sizesArr[2]; + Mat means; + vector covs; + defaultDistribs( means, covs, CV_64FC1 ); + Mat trainData(pointsCount, 2, CV_64FC1 ); + Mat trainLabels; + generateData( trainData, trainLabels, sizes, means, covs, CV_64FC1, CV_32SC1 ); + Mat testData( pointsCount, 2, CV_64FC1 ); + Mat testLabels; + generateData( testData, testLabels, sizes, means, covs, CV_64FC1, CV_32SC1 ); + Mat probs(trainData.rows, nclusters, CV_64FC1, cv::Scalar(1)); + Mat weights(1, nclusters, CV_64FC1, cv::Scalar(1)); + TermCriteria termCrit(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 100, FLT_EPSILON); + int startStep = get<0>(GetParam()); + int covMatType = get<1>(GetParam()); + cv::Mat labels; + + Ptr em = EM::create(); + em->setClustersNumber(nclusters); + em->setCovarianceMatrixType(covMatType); + em->setTermCriteria(termCrit); + if( startStep == EM::START_AUTO_STEP ) + em->trainEM( trainData, noArray(), labels, noArray() ); + else if( startStep == EM::START_E_STEP ) + em->trainE( trainData, means, covs, weights, noArray(), labels, noArray() ); + else if( startStep == EM::START_M_STEP ) + em->trainM( trainData, probs, noArray(), labels, noArray() ); + + { + SCOPED_TRACE("Train"); + float err = 1000; + EXPECT_TRUE(calcErr( labels, trainLabels, sizes, err , false, false )); + EXPECT_LE(err, 0.008f); + } + + { + SCOPED_TRACE("Test"); + float err = 1000; + labels.create( testData.rows, 1, CV_32SC1 ); + for( int i = 0; i < testData.rows; i++ ) + { + Mat sample = testData.row(i); + Mat out_probs; + labels.at(i) = static_cast(em->predict2( sample, out_probs )[1]); + } + EXPECT_TRUE(calcErr( labels, testLabels, sizes, err, false, false )); + EXPECT_LE(err, 0.008f); + } +} + +INSTANTIATE_TEST_CASE_P(/**/, ML_EM_Params, + testing::Combine( + testing::Values(EM::START_AUTO_STEP, EM::START_M_STEP, EM::START_E_STEP), + testing::Values(EM::COV_MAT_GENERIC, EM::COV_MAT_DIAGONAL, EM::COV_MAT_SPHERICAL) + )); + +//================================================================================================== + +TEST(ML_EM, save_load) +{ + const int nclusters = 2; + Mat_ samples(3, 1); + samples << 1., 2., 3.; + + std::vector firstResult; + string filename = cv::tempfile(".xml"); + { + Mat labels; + Ptr em = EM::create(); + em->setClustersNumber(nclusters); + em->trainEM(samples, noArray(), labels, noArray()); + for( int i = 0; i < samples.rows; i++) + { + Vec2d res = em->predict2(samples.row(i), noArray()); + firstResult.push_back(res[1]); + } + { + FileStorage fs = FileStorage(filename, FileStorage::WRITE); + ASSERT_NO_THROW(fs << "em" << "{"); + ASSERT_NO_THROW(em->write(fs)); + ASSERT_NO_THROW(fs << "}"); + } + } + { + Ptr em; + ASSERT_NO_THROW(em = Algorithm::load(filename)); + for( int i = 0; i < samples.rows; i++) + { + SCOPED_TRACE(i); + Vec2d res = em->predict2(samples.row(i), noArray()); + EXPECT_DOUBLE_EQ(firstResult[i], res[1]); + } + } + remove(filename.c_str()); +} + +//================================================================================================== + +TEST(ML_EM, classification) +{ + // This test classifies spam by the following way: + // 1. estimates distributions of "spam" / "not spam" + // 2. predict classID using Bayes classifier for estimated distributions. + string dataFilename = findDataFile("spambase.data"); + Ptr data = TrainData::loadFromCSV(dataFilename, 0); + ASSERT_FALSE(data.empty()); + + Mat samples = data->getSamples(); + ASSERT_EQ(samples.cols, 57); + Mat responses = data->getResponses(); + + vector trainSamplesMask(samples.rows, 0); + const int trainSamplesCount = (int)(0.5f * samples.rows); + const int testSamplesCount = samples.rows - trainSamplesCount; + for(int i = 0; i < trainSamplesCount; i++) + trainSamplesMask[i] = 1; + RNG &rng = cv::theRNG(); + for(size_t i = 0; i < trainSamplesMask.size(); i++) + { + int i1 = rng(static_cast(trainSamplesMask.size())); + int i2 = rng(static_cast(trainSamplesMask.size())); + std::swap(trainSamplesMask[i1], trainSamplesMask[i2]); + } + + Mat samples0, samples1; + for(int i = 0; i < samples.rows; i++) + { + if(trainSamplesMask[i]) + { + Mat sample = samples.row(i); + int resp = (int)responses.at(i); + if(resp == 0) + samples0.push_back(sample); + else + samples1.push_back(sample); + } + } + + Ptr model0 = EM::create(); + model0->setClustersNumber(3); + model0->trainEM(samples0, noArray(), noArray(), noArray()); + + Ptr model1 = EM::create(); + model1->setClustersNumber(3); + model1->trainEM(samples1, noArray(), noArray(), noArray()); + + // confusion matrices + Mat_ trainCM(2, 2, 0); + Mat_ testCM(2, 2, 0); + const double lambda = 1.; + for(int i = 0; i < samples.rows; i++) + { + Mat sample = samples.row(i); + double sampleLogLikelihoods0 = model0->predict2(sample, noArray())[0]; + double sampleLogLikelihoods1 = model1->predict2(sample, noArray())[0]; + int classID = (sampleLogLikelihoods0 >= lambda * sampleLogLikelihoods1) ? 0 : 1; + int resp = (int)responses.at(i); + EXPECT_TRUE(resp == 0 || resp == 1); + if(trainSamplesMask[i]) + trainCM(resp, classID)++; + else + testCM(resp, classID)++; + } + EXPECT_LE((double)(trainCM(1,0) + trainCM(0,1)) / trainSamplesCount, 0.23); + EXPECT_LE((double)(testCM(1,0) + testCM(0,1)) / testSamplesCount, 0.26); +} + +}} // namespace diff --git a/modules/ml/test/test_kmeans.cpp b/modules/ml/test/test_kmeans.cpp new file mode 100644 index 00000000000..153ed642d3b --- /dev/null +++ b/modules/ml/test/test_kmeans.cpp @@ -0,0 +1,53 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#include "test_precomp.hpp" + +namespace opencv_test { namespace { + +TEST(ML_KMeans, accuracy) +{ + const int iters = 100; + int sizesArr[] = { 5000, 7000, 8000 }; + int pointsCount = sizesArr[0]+ sizesArr[1] + sizesArr[2]; + + Mat data( pointsCount, 2, CV_32FC1 ), labels; + vector sizes( sizesArr, sizesArr + sizeof(sizesArr) / sizeof(sizesArr[0]) ); + Mat means; + vector covs; + defaultDistribs( means, covs ); + generateData( data, labels, sizes, means, covs, CV_32FC1, CV_32SC1 ); + TermCriteria termCriteria( TermCriteria::COUNT, iters, 0.0); + + { + SCOPED_TRACE("KMEANS_PP_CENTERS"); + float err = 1000; + Mat bestLabels; + kmeans( data, 3, bestLabels, termCriteria, 0, KMEANS_PP_CENTERS, noArray() ); + EXPECT_TRUE(calcErr( bestLabels, labels, sizes, err , false )); + EXPECT_LE(err, 0.01f); + } + { + SCOPED_TRACE("KMEANS_RANDOM_CENTERS"); + float err = 1000; + Mat bestLabels; + kmeans( data, 3, bestLabels, termCriteria, 0, KMEANS_RANDOM_CENTERS, noArray() ); + EXPECT_TRUE(calcErr( bestLabels, labels, sizes, err, false )); + EXPECT_LE(err, 0.01f); + } + { + SCOPED_TRACE("KMEANS_USE_INITIAL_LABELS"); + float err = 1000; + Mat bestLabels; + labels.copyTo( bestLabels ); + RNG &rng = cv::theRNG(); + for( int i = 0; i < 0.5f * pointsCount; i++ ) + bestLabels.at( rng.next() % pointsCount, 0 ) = rng.next() % 3; + kmeans( data, 3, bestLabels, termCriteria, 0, KMEANS_USE_INITIAL_LABELS, noArray() ); + EXPECT_TRUE(calcErr( bestLabels, labels, sizes, err, false )); + EXPECT_LE(err, 0.01f); + } +} + +}} // namespace diff --git a/modules/ml/test/test_knearest.cpp b/modules/ml/test/test_knearest.cpp new file mode 100644 index 00000000000..80baed96266 --- /dev/null +++ b/modules/ml/test/test_knearest.cpp @@ -0,0 +1,112 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#include "test_precomp.hpp" + +namespace opencv_test { namespace { + +using cv::ml::TrainData; +using cv::ml::EM; +using cv::ml::KNearest; + +TEST(ML_KNearest, accuracy) +{ + int sizesArr[] = { 500, 700, 800 }; + int pointsCount = sizesArr[0]+ sizesArr[1] + sizesArr[2]; + + Mat trainData( pointsCount, 2, CV_32FC1 ), trainLabels; + vector sizes( sizesArr, sizesArr + sizeof(sizesArr) / sizeof(sizesArr[0]) ); + Mat means; + vector covs; + defaultDistribs( means, covs ); + generateData( trainData, trainLabels, sizes, means, covs, CV_32FC1, CV_32FC1 ); + + Mat testData( pointsCount, 2, CV_32FC1 ); + Mat testLabels; + generateData( testData, testLabels, sizes, means, covs, CV_32FC1, CV_32FC1 ); + + { + SCOPED_TRACE("Default"); + Mat bestLabels; + float err = 1000; + Ptr knn = KNearest::create(); + knn->train(trainData, ml::ROW_SAMPLE, trainLabels); + knn->findNearest(testData, 4, bestLabels); + EXPECT_TRUE(calcErr( bestLabels, testLabels, sizes, err, true )); + EXPECT_LE(err, 0.01f); + } + { + SCOPED_TRACE("KDTree"); + Mat neighborIndexes; + float err = 1000; + Ptr knn = KNearest::create(); + knn->setAlgorithmType(KNearest::KDTREE); + knn->train(trainData, ml::ROW_SAMPLE, trainLabels); + knn->findNearest(testData, 4, neighborIndexes); + Mat bestLabels; + // The output of the KDTree are the neighbor indexes, not actual class labels + // so we need to do some extra work to get actual predictions + for(int row_num = 0; row_num < neighborIndexes.rows; ++row_num){ + vector labels; + for(int index = 0; index < neighborIndexes.row(row_num).cols; ++index) { + labels.push_back(trainLabels.at(neighborIndexes.row(row_num).at(0, index) , 0)); + } + // computing the mode of the output class predictions to determine overall prediction + std::vector histogram(3,0); + for( int i=0; i<3; ++i ) + ++histogram[ static_cast(labels[i]) ]; + int bestLabel = static_cast(std::max_element( histogram.begin(), histogram.end() ) - histogram.begin()); + bestLabels.push_back(bestLabel); + } + bestLabels.convertTo(bestLabels, testLabels.type()); + EXPECT_TRUE(calcErr( bestLabels, testLabels, sizes, err, true )); + EXPECT_LE(err, 0.01f); + } +} + +TEST(ML_KNearest, regression_12347) +{ + Mat xTrainData = (Mat_(5,2) << 1, 1.1, 1.1, 1, 2, 2, 2.1, 2, 2.1, 2.1); + Mat yTrainLabels = (Mat_(5,1) << 1, 1, 2, 2, 2); + Ptr knn = KNearest::create(); + knn->train(xTrainData, ml::ROW_SAMPLE, yTrainLabels); + + Mat xTestData = (Mat_(2,2) << 1.1, 1.1, 2, 2.2); + Mat zBestLabels, neighbours, dist; + // check output shapes: + int K = 16, Kexp = std::min(K, xTrainData.rows); + knn->findNearest(xTestData, K, zBestLabels, neighbours, dist); + EXPECT_EQ(xTestData.rows, zBestLabels.rows); + EXPECT_EQ(neighbours.cols, Kexp); + EXPECT_EQ(dist.cols, Kexp); + // see if the result is still correct: + K = 2; + knn->findNearest(xTestData, K, zBestLabels, neighbours, dist); + EXPECT_EQ(1, zBestLabels.at(0,0)); + EXPECT_EQ(2, zBestLabels.at(1,0)); +} + +TEST(ML_KNearest, bug_11877) +{ + Mat trainData = (Mat_(5,2) << 3, 3, 3, 3, 4, 4, 4, 4, 4, 4); + Mat trainLabels = (Mat_(5,1) << 0, 0, 1, 1, 1); + + Ptr knnKdt = KNearest::create(); + knnKdt->setAlgorithmType(KNearest::KDTREE); + knnKdt->setIsClassifier(true); + + knnKdt->train(trainData, ml::ROW_SAMPLE, trainLabels); + + Mat testData = (Mat_(2,2) << 3.1, 3.1, 4, 4.1); + Mat testLabels = (Mat_(2,1) << 0, 1); + Mat result; + + knnKdt->findNearest(testData, 1, result); + + EXPECT_EQ(1, int(result.at(0, 0))); + EXPECT_EQ(2, int(result.at(1, 0))); + EXPECT_EQ(0, trainLabels.at(result.at(0, 0), 0)); +} + +}} // namespace diff --git a/modules/ml/test/test_lr.cpp b/modules/ml/test/test_lr.cpp new file mode 100644 index 00000000000..ec77fcbddab --- /dev/null +++ b/modules/ml/test/test_lr.cpp @@ -0,0 +1,81 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// AUTHOR: Rahul Kavi rahulkavi[at]live[at]com + +// +// Test data uses subset of data from the popular Iris Dataset (1936): +// - http://archive.ics.uci.edu/ml/datasets/Iris +// - https://en.wikipedia.org/wiki/Iris_flower_data_set +// + +#include "test_precomp.hpp" + +namespace opencv_test { namespace { + +TEST(ML_LR, accuracy) +{ + std::string dataFileName = findDataFile("iris.data"); + Ptr tdata = TrainData::loadFromCSV(dataFileName, 0); + ASSERT_FALSE(tdata.empty()); + + Ptr p = LogisticRegression::create(); + p->setLearningRate(1.0); + p->setIterations(10001); + p->setRegularization(LogisticRegression::REG_L2); + p->setTrainMethod(LogisticRegression::BATCH); + p->setMiniBatchSize(10); + p->train(tdata); + + Mat responses; + p->predict(tdata->getSamples(), responses); + + float error = 1000; + EXPECT_TRUE(calculateError(responses, tdata->getResponses(), error)); + EXPECT_LE(error, 0.05f); +} + +//================================================================================================== + +TEST(ML_LR, save_load) +{ + string dataFileName = findDataFile("iris.data"); + Ptr tdata = TrainData::loadFromCSV(dataFileName, 0); + ASSERT_FALSE(tdata.empty()); + Mat responses1, responses2; + Mat learnt_mat1, learnt_mat2; + String filename = tempfile(".xml"); + { + Ptr lr1 = LogisticRegression::create(); + lr1->setLearningRate(1.0); + lr1->setIterations(10001); + lr1->setRegularization(LogisticRegression::REG_L2); + lr1->setTrainMethod(LogisticRegression::BATCH); + lr1->setMiniBatchSize(10); + ASSERT_NO_THROW(lr1->train(tdata)); + ASSERT_NO_THROW(lr1->predict(tdata->getSamples(), responses1)); + ASSERT_NO_THROW(lr1->save(filename)); + learnt_mat1 = lr1->get_learnt_thetas(); + } + { + Ptr lr2; + ASSERT_NO_THROW(lr2 = Algorithm::load(filename)); + ASSERT_NO_THROW(lr2->predict(tdata->getSamples(), responses2)); + learnt_mat2 = lr2->get_learnt_thetas(); + } + // compare difference in prediction outputs and stored inputs + EXPECT_MAT_NEAR(responses1, responses2, 0.f); + + Mat comp_learnt_mats; + comp_learnt_mats = (learnt_mat1 == learnt_mat2); + comp_learnt_mats = comp_learnt_mats.reshape(1, comp_learnt_mats.rows*comp_learnt_mats.cols); + comp_learnt_mats.convertTo(comp_learnt_mats, CV_32S); + comp_learnt_mats = comp_learnt_mats/255; + // check if there is any difference between computed learnt mat and retrieved mat + EXPECT_EQ(comp_learnt_mats.rows, sum(comp_learnt_mats)[0]); + + remove( filename.c_str() ); +} + +}} // namespace diff --git a/modules/ml/test/test_main.cpp b/modules/ml/test/test_main.cpp new file mode 100644 index 00000000000..aab717ee519 --- /dev/null +++ b/modules/ml/test/test_main.cpp @@ -0,0 +1,10 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +#include "test_precomp.hpp" + +#if defined(HAVE_HPX) + #include +#endif + +CV_TEST_MAIN("ml") diff --git a/modules/ml/test/test_mltests.cpp b/modules/ml/test/test_mltests.cpp new file mode 100644 index 00000000000..a67f6b0bf27 --- /dev/null +++ b/modules/ml/test/test_mltests.cpp @@ -0,0 +1,373 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#include "test_precomp.hpp" + +namespace opencv_test { namespace { + +struct DatasetDesc +{ + string name; + int resp_idx; + int train_count; + int cat_num; + string type_desc; +public: + Ptr load() + { + string filename = findDataFile(name + ".data"); + Ptr data = TrainData::loadFromCSV(filename, 0, resp_idx, resp_idx + 1, type_desc); + data->setTrainTestSplit(train_count); + data->shuffleTrainTest(); + return data; + } +}; + +// see testdata/ml/protocol.txt (?) +DatasetDesc datasets[] = { + { "mushroom", 0, 4000, 16, "cat" }, + { "adult", 14, 22561, 16, "ord[0,2,4,10-12],cat[1,3,5-9,13,14]" }, + { "vehicle", 18, 761, 4, "ord[0-17],cat[18]" }, + { "abalone", 8, 3133, 16, "ord[1-8],cat[0]" }, + { "ringnorm", 20, 300, 2, "ord[0-19],cat[20]" }, + { "spambase", 57, 3221, 3, "ord[0-56],cat[57]" }, + { "waveform", 21, 300, 3, "ord[0-20],cat[21]" }, + { "elevators", 18, 5000, 0, "ord" }, + { "letter", 16, 10000, 26, "ord[0-15],cat[16]" }, + { "twonorm", 20, 300, 3, "ord[0-19],cat[20]" }, + { "poletelecomm", 48, 2500, 0, "ord" }, +}; + +static DatasetDesc & getDataset(const string & name) +{ + const int sz = sizeof(datasets)/sizeof(datasets[0]); + for (int i = 0; i < sz; ++i) + { + DatasetDesc & desc = datasets[i]; + if (desc.name == name) + return desc; + } + CV_Error(Error::StsInternal, ""); +} + +//================================================================================================== + +// interfaces and templates + +template string modelName() { return "Unknown"; } +template Ptr tuneModel(const DatasetDesc &, Ptr m) { return m; } + +struct IModelFactory +{ + virtual Ptr createNew(const DatasetDesc &dataset) const = 0; + virtual Ptr loadFromFile(const string &filename) const = 0; + virtual string name() const = 0; + virtual ~IModelFactory() {} +}; + +template +struct ModelFactory : public IModelFactory +{ + Ptr createNew(const DatasetDesc &dataset) const CV_OVERRIDE + { + return tuneModel(dataset, T::create()); + } + Ptr loadFromFile(const string & filename) const CV_OVERRIDE + { + return T::load(filename); + } + string name() const CV_OVERRIDE { return modelName(); } +}; + +// implementation + +template <> string modelName() { return "NormalBayesClassifier"; } +template <> string modelName() { return "DTrees"; } +template <> string modelName() { return "KNearest"; } +template <> string modelName() { return "RTrees"; } +template <> string modelName() { return "SVMSGD"; } + +template<> Ptr tuneModel(const DatasetDesc &dataset, Ptr m) +{ + m->setMaxDepth(10); + m->setMinSampleCount(2); + m->setRegressionAccuracy(0); + m->setUseSurrogates(false); + m->setCVFolds(0); + m->setUse1SERule(false); + m->setTruncatePrunedTree(false); + m->setPriors(Mat()); + m->setMaxCategories(dataset.cat_num); + return m; +} + +template<> Ptr tuneModel(const DatasetDesc &dataset, Ptr m) +{ + m->setMaxDepth(20); + m->setMinSampleCount(2); + m->setRegressionAccuracy(0); + m->setUseSurrogates(false); + m->setPriors(Mat()); + m->setCalculateVarImportance(true); + m->setActiveVarCount(0); + m->setTermCriteria(TermCriteria(TermCriteria::COUNT, 100, 0.0)); + m->setMaxCategories(dataset.cat_num); + return m; +} + +template<> Ptr tuneModel(const DatasetDesc &, Ptr m) +{ + m->setSvmsgdType(SVMSGD::ASGD); + m->setMarginType(SVMSGD::SOFT_MARGIN); + m->setMarginRegularization(0.00001f); + m->setInitialStepSize(0.1f); + m->setStepDecreasingPower(0.75); + m->setTermCriteria(TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 10000, 0.00001)); + return m; +} + +template <> +struct ModelFactory : public IModelFactory +{ + ModelFactory(int boostType_) : boostType(boostType_) {} + Ptr createNew(const DatasetDesc &) const CV_OVERRIDE + { + Ptr m = Boost::create(); + m->setBoostType(boostType); + m->setWeakCount(20); + m->setWeightTrimRate(0.95); + m->setMaxDepth(4); + m->setUseSurrogates(false); + m->setPriors(Mat()); + return m; + } + Ptr loadFromFile(const string &filename) const { return Boost::load(filename); } + string name() const CV_OVERRIDE { return "Boost"; } + int boostType; +}; + +template <> +struct ModelFactory : public IModelFactory +{ + ModelFactory(int svmType_, int kernelType_, double gamma_, double c_, double nu_) + : svmType(svmType_), kernelType(kernelType_), gamma(gamma_), c(c_), nu(nu_) {} + Ptr createNew(const DatasetDesc &) const CV_OVERRIDE + { + Ptr m = SVM::create(); + m->setType(svmType); + m->setKernel(kernelType); + m->setDegree(0); + m->setGamma(gamma); + m->setCoef0(0); + m->setC(c); + m->setNu(nu); + m->setP(0); + return m; + } + Ptr loadFromFile(const string &filename) const { return SVM::load(filename); } + string name() const CV_OVERRIDE { return "SVM"; } + int svmType; + int kernelType; + double gamma; + double c; + double nu; +}; + +//================================================================================================== + +struct ML_Params_t +{ + Ptr factory; + string dataset; + float mean; + float sigma; +}; + +void PrintTo(const ML_Params_t & param, std::ostream *os) +{ + *os << param.factory->name() << "_" << param.dataset; +} + +ML_Params_t ML_Params_List[] = { + { makePtr< ModelFactory >(), "mushroom", 0.027401f, 0.036236f }, + { makePtr< ModelFactory >(), "adult", 14.279000f, 0.354323f }, + { makePtr< ModelFactory >(), "vehicle", 29.761162f, 4.823927f }, + { makePtr< ModelFactory >(), "abalone", 7.297540f, 0.510058f }, + { makePtr< ModelFactory >(Boost::REAL), "adult", 13.894001f, 0.337763f }, + { makePtr< ModelFactory >(Boost::DISCRETE), "mushroom", 0.007274f, 0.029400f }, + { makePtr< ModelFactory >(Boost::LOGIT), "ringnorm", 9.993943f, 0.860256f }, + { makePtr< ModelFactory >(Boost::GENTLE), "spambase", 5.404347f, 0.581716f }, + { makePtr< ModelFactory >(), "waveform", 17.100641f, 0.630052f }, + { makePtr< ModelFactory >(), "mushroom", 0.006547f, 0.028248f }, + { makePtr< ModelFactory >(), "adult", 13.5129f, 0.266065f }, + { makePtr< ModelFactory >(), "abalone", 4.745199f, 0.282112f }, + { makePtr< ModelFactory >(), "vehicle", 24.964712f, 4.469287f }, + { makePtr< ModelFactory >(), "letter", 5.334999f, 0.261142f }, + { makePtr< ModelFactory >(), "ringnorm", 6.248733f, 0.904713f }, + { makePtr< ModelFactory >(), "twonorm", 4.506479f, 0.449739f }, + { makePtr< ModelFactory >(), "spambase", 5.243477f, 0.54232f }, +}; + +typedef testing::TestWithParam ML_Params; + +TEST_P(ML_Params, accuracy) +{ + const ML_Params_t & param = GetParam(); + DatasetDesc &dataset = getDataset(param.dataset); + Ptr data = dataset.load(); + ASSERT_TRUE(data); + ASSERT_TRUE(data->getNSamples() > 0); + + Ptr m = param.factory->createNew(dataset); + ASSERT_TRUE(m); + ASSERT_TRUE(m->train(data, 0)); + + float err = m->calcError(data, true, noArray()); + EXPECT_NEAR(err, param.mean, 4 * param.sigma); +} + +INSTANTIATE_TEST_CASE_P(/**/, ML_Params, testing::ValuesIn(ML_Params_List)); + + +//================================================================================================== + +struct ML_SL_Params_t +{ + Ptr factory; + string dataset; +}; + +void PrintTo(const ML_SL_Params_t & param, std::ostream *os) +{ + *os << param.factory->name() << "_" << param.dataset; +} + +ML_SL_Params_t ML_SL_Params_List[] = { + { makePtr< ModelFactory >(), "waveform" }, + { makePtr< ModelFactory >(), "waveform" }, + { makePtr< ModelFactory >(), "abalone" }, + { makePtr< ModelFactory >(SVM::C_SVC, SVM::LINEAR, 1, 0.5, 0), "waveform" }, + { makePtr< ModelFactory >(SVM::NU_SVR, SVM::RBF, 0.00225, 62.5, 0.03), "poletelecomm" }, + { makePtr< ModelFactory >(), "mushroom" }, + { makePtr< ModelFactory >(), "abalone" }, + { makePtr< ModelFactory >(Boost::REAL), "adult" }, + { makePtr< ModelFactory >(), "waveform" }, + { makePtr< ModelFactory >(), "abalone" }, + { makePtr< ModelFactory >(), "waveform" }, +}; + +typedef testing::TestWithParam ML_SL_Params; + +TEST_P(ML_SL_Params, save_load) +{ + const ML_SL_Params_t & param = GetParam(); + + DatasetDesc &dataset = getDataset(param.dataset); + Ptr data = dataset.load(); + ASSERT_TRUE(data); + ASSERT_TRUE(data->getNSamples() > 0); + + Mat responses1, responses2; + string file1 = tempfile(".json.gz"); + string file2 = tempfile(".json.gz"); + { + Ptr m = param.factory->createNew(dataset); + ASSERT_TRUE(m); + ASSERT_TRUE(m->train(data, 0)); + m->calcError(data, true, responses1); + m->save(file1 + "?base64"); + } + { + Ptr m = param.factory->loadFromFile(file1); + ASSERT_TRUE(m); + m->calcError(data, true, responses2); + m->save(file2 + "?base64"); + } + EXPECT_MAT_NEAR(responses1, responses2, 0.0); + { + ifstream f1(file1.c_str(), std::ios_base::binary); + ifstream f2(file2.c_str(), std::ios_base::binary); + ASSERT_TRUE(f1.is_open() && f2.is_open()); + const size_t BUFSZ = 10000; + vector buf1(BUFSZ, 0); + vector buf2(BUFSZ, 0); + while (true) + { + f1.read(&buf1[0], BUFSZ); + f2.read(&buf2[0], BUFSZ); + EXPECT_EQ(f1.gcount(), f2.gcount()); + EXPECT_EQ(f1.eof(), f2.eof()); + if (!f1.good() || !f2.good() || f1.gcount() != f2.gcount()) + break; + ASSERT_EQ(buf1, buf2); + } + } + remove(file1.c_str()); + remove(file2.c_str()); +} + +INSTANTIATE_TEST_CASE_P(/**/, ML_SL_Params, testing::ValuesIn(ML_SL_Params_List)); + +//================================================================================================== + +TEST(TrainDataGet, layout_ROW_SAMPLE) // Details: #12236 +{ + cv::Mat test = cv::Mat::ones(150, 30, CV_32FC1) * 2; + test.col(3) += Scalar::all(3); + cv::Mat labels = cv::Mat::ones(150, 3, CV_32SC1) * 5; + labels.col(1) += 1; + cv::Ptr train_data = cv::ml::TrainData::create(test, cv::ml::ROW_SAMPLE, labels); + train_data->setTrainTestSplitRatio(0.9); + + Mat tidx = train_data->getTestSampleIdx(); + EXPECT_EQ((size_t)15, tidx.total()); + + Mat tresp = train_data->getTestResponses(); + EXPECT_EQ(15, tresp.rows); + EXPECT_EQ(labels.cols, tresp.cols); + EXPECT_EQ(5, tresp.at(0, 0)) << tresp; + EXPECT_EQ(6, tresp.at(0, 1)) << tresp; + EXPECT_EQ(6, tresp.at(14, 1)) << tresp; + EXPECT_EQ(5, tresp.at(14, 2)) << tresp; + + Mat tsamples = train_data->getTestSamples(); + EXPECT_EQ(15, tsamples.rows); + EXPECT_EQ(test.cols, tsamples.cols); + EXPECT_EQ(2, tsamples.at(0, 0)) << tsamples; + EXPECT_EQ(5, tsamples.at(0, 3)) << tsamples; + EXPECT_EQ(2, tsamples.at(14, test.cols - 1)) << tsamples; + EXPECT_EQ(5, tsamples.at(14, 3)) << tsamples; +} + +TEST(TrainDataGet, layout_COL_SAMPLE) // Details: #12236 +{ + cv::Mat test = cv::Mat::ones(30, 150, CV_32FC1) * 3; + test.row(3) += Scalar::all(3); + cv::Mat labels = cv::Mat::ones(3, 150, CV_32SC1) * 5; + labels.row(1) += 1; + cv::Ptr train_data = cv::ml::TrainData::create(test, cv::ml::COL_SAMPLE, labels); + train_data->setTrainTestSplitRatio(0.9); + + Mat tidx = train_data->getTestSampleIdx(); + EXPECT_EQ((size_t)15, tidx.total()); + + Mat tresp = train_data->getTestResponses(); // always row-based, transposed + EXPECT_EQ(15, tresp.rows); + EXPECT_EQ(labels.rows, tresp.cols); + EXPECT_EQ(5, tresp.at(0, 0)) << tresp; + EXPECT_EQ(6, tresp.at(0, 1)) << tresp; + EXPECT_EQ(6, tresp.at(14, 1)) << tresp; + EXPECT_EQ(5, tresp.at(14, 2)) << tresp; + + + Mat tsamples = train_data->getTestSamples(); + EXPECT_EQ(15, tsamples.cols); + EXPECT_EQ(test.rows, tsamples.rows); + EXPECT_EQ(3, tsamples.at(0, 0)) << tsamples; + EXPECT_EQ(6, tsamples.at(3, 0)) << tsamples; + EXPECT_EQ(6, tsamples.at(3, 14)) << tsamples; + EXPECT_EQ(3, tsamples.at(test.rows - 1, 14)) << tsamples; +} + +}} // namespace diff --git a/modules/ml/test/test_precomp.hpp b/modules/ml/test/test_precomp.hpp new file mode 100644 index 00000000000..380e6126169 --- /dev/null +++ b/modules/ml/test/test_precomp.hpp @@ -0,0 +1,50 @@ +#ifndef __OPENCV_TEST_PRECOMP_HPP__ +#define __OPENCV_TEST_PRECOMP_HPP__ + +#include "opencv2/ts.hpp" +#include // EXPECT_MAT_NEAR +#include "opencv2/ml.hpp" + +#include +using std::ifstream; + +namespace opencv_test { + +using namespace cv::ml; + +#define CV_NBAYES "nbayes" +#define CV_KNEAREST "knearest" +#define CV_SVM "svm" +#define CV_EM "em" +#define CV_ANN "ann" +#define CV_DTREE "dtree" +#define CV_BOOST "boost" +#define CV_RTREES "rtrees" +#define CV_ERTREES "ertrees" +#define CV_SVMSGD "svmsgd" + +using cv::Ptr; +using cv::ml::StatModel; +using cv::ml::TrainData; +using cv::ml::NormalBayesClassifier; +using cv::ml::SVM; +using cv::ml::KNearest; +using cv::ml::ParamGrid; +using cv::ml::ANN_MLP; +using cv::ml::DTrees; +using cv::ml::Boost; +using cv::ml::RTrees; +using cv::ml::SVMSGD; + +void defaultDistribs( Mat& means, vector& covs, int type=CV_32FC1 ); +void generateData( Mat& data, Mat& labels, const vector& sizes, const Mat& _means, const vector& covs, int dataType, int labelType ); +int maxIdx( const vector& count ); +bool getLabelsMap( const Mat& labels, const vector& sizes, vector& labelsMap, bool checkClusterUniq=true ); +bool calcErr( const Mat& labels, const Mat& origLabels, const vector& sizes, float& err, bool labelsEquivalent = true, bool checkClusterUniq=true ); + +// used in LR test +bool calculateError( const Mat& _p_labels, const Mat& _o_labels, float& error); + +} // namespace + +#endif diff --git a/modules/ml/test/test_rtrees.cpp b/modules/ml/test/test_rtrees.cpp new file mode 100644 index 00000000000..5a4fb34e744 --- /dev/null +++ b/modules/ml/test/test_rtrees.cpp @@ -0,0 +1,119 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#include "test_precomp.hpp" + +namespace opencv_test { namespace { + +TEST(ML_RTrees, getVotes) +{ + int n = 12; + int count, i; + int label_size = 3; + int predicted_class = 0; + int max_votes = -1; + int val; + // RTrees for classification + Ptr rt = cv::ml::RTrees::create(); + + //data + Mat data(n, 4, CV_32F); + randu(data, 0, 10); + + //labels + Mat labels = (Mat_(n,1) << 0,0,0,0, 1,1,1,1, 2,2,2,2); + + rt->train(data, ml::ROW_SAMPLE, labels); + + //run function + Mat test(1, 4, CV_32F); + Mat result; + randu(test, 0, 10); + rt->getVotes(test, result, 0); + + //count vote amount and find highest vote + count = 0; + const int* result_row = result.ptr(1); + for( i = 0; i < label_size; i++ ) + { + val = result_row[i]; + //predicted_class = max_votes < val? i; + if( max_votes < val ) + { + max_votes = val; + predicted_class = i; + } + count += val; + } + + EXPECT_EQ(count, (int)rt->getRoots().size()); + EXPECT_EQ(result.at(0, predicted_class), rt->predict(test)); +} + +TEST(ML_RTrees, 11142_sample_weights_regression) +{ + int n = 3; + // RTrees for regression + Ptr rt = cv::ml::RTrees::create(); + //simple regression problem of x -> 2x + Mat data = (Mat_(n,1) << 1, 2, 3); + Mat values = (Mat_(n,1) << 2, 4, 6); + Mat weights = (Mat_(n, 1) << 10, 10, 10); + + Ptr trainData = TrainData::create(data, ml::ROW_SAMPLE, values); + rt->train(trainData); + double error_without_weights = round(rt->getOOBError()); + rt->clear(); + Ptr trainDataWithWeights = TrainData::create(data, ml::ROW_SAMPLE, values, Mat(), Mat(), weights ); + rt->train(trainDataWithWeights); + double error_with_weights = round(rt->getOOBError()); + // error with weights should be larger than error without weights + EXPECT_GE(error_with_weights, error_without_weights); +} + +TEST(ML_RTrees, 11142_sample_weights_classification) +{ + int n = 12; + // RTrees for classification + Ptr rt = cv::ml::RTrees::create(); + + Mat data(n, 4, CV_32F); + randu(data, 0, 10); + Mat labels = (Mat_(n,1) << 0,0,0,0, 1,1,1,1, 2,2,2,2); + Mat weights = (Mat_(n, 1) << 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10); + + rt->train(data, ml::ROW_SAMPLE, labels); + rt->clear(); + double error_without_weights = round(rt->getOOBError()); + Ptr trainDataWithWeights = TrainData::create(data, ml::ROW_SAMPLE, labels, Mat(), Mat(), weights ); + rt->train(data, ml::ROW_SAMPLE, labels); + double error_with_weights = round(rt->getOOBError()); + std::cout << error_without_weights << std::endl; + std::cout << error_with_weights << std::endl; + // error with weights should be larger than error without weights + EXPECT_GE(error_with_weights, error_without_weights); +} + +TEST(ML_RTrees, bug_12974_throw_exception_when_predict_different_feature_count) +{ + int numFeatures = 5; + // create a 5 feature dataset and train the model + cv::Ptr model = RTrees::create(); + Mat samples(10, numFeatures, CV_32F); + randu(samples, 0, 10); + Mat labels = (Mat_(10,1) << 0,0,0,0,0,1,1,1,1,1); + cv::Ptr trainData = TrainData::create(samples, cv::ml::ROW_SAMPLE, labels); + model->train(trainData); + // try to predict on data which have fewer features - this should throw an exception + for(int i = 1; i < numFeatures - 1; ++i) { + Mat test(1, i, CV_32FC1); + ASSERT_THROW(model->predict(test), Exception); + } + // try to predict on data which have more features - this should also throw an exception + Mat test(1, numFeatures + 1, CV_32FC1); + ASSERT_THROW(model->predict(test), Exception); +} + + +}} // namespace diff --git a/modules/ml/test/test_save_load.cpp b/modules/ml/test/test_save_load.cpp new file mode 100644 index 00000000000..201e6303f5c --- /dev/null +++ b/modules/ml/test/test_save_load.cpp @@ -0,0 +1,107 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#include "test_precomp.hpp" + +namespace opencv_test { namespace { + + +void randomFillCategories(const string & filename, Mat & input) +{ + Mat catMap; + Mat catCount; + std::vector varTypes; + + FileStorage fs(filename, FileStorage::READ); + FileNode root = fs.getFirstTopLevelNode(); + root["cat_map"] >> catMap; + root["cat_count"] >> catCount; + root["var_type"] >> varTypes; + + int offset = 0; + int countOffset = 0; + uint var = 0, varCount = (uint)varTypes.size(); + for (; var < varCount; ++var) + { + if (varTypes[var] == ml::VAR_CATEGORICAL) + { + int size = catCount.at(0, countOffset); + for (int row = 0; row < input.rows; ++row) + { + int randomChosenIndex = offset + ((uint)cv::theRNG()) % size; + int value = catMap.at(0, randomChosenIndex); + input.at(row, var) = (float)value; + } + offset += size; + ++countOffset; + } + } +} + +//================================================================================================== + +typedef tuple ML_Legacy_Param; +typedef testing::TestWithParam< ML_Legacy_Param > ML_Legacy_Params; + +TEST_P(ML_Legacy_Params, legacy_load) +{ + const string modelName = get<0>(GetParam()); + const string dataName = get<1>(GetParam()); + const string filename = findDataFile("legacy/" + modelName + "_" + dataName + ".xml"); + const bool isTree = modelName == CV_BOOST || modelName == CV_DTREE || modelName == CV_RTREES; + + Ptr model; + if (modelName == CV_BOOST) + model = Algorithm::load(filename); + else if (modelName == CV_ANN) + model = Algorithm::load(filename); + else if (modelName == CV_DTREE) + model = Algorithm::load(filename); + else if (modelName == CV_NBAYES) + model = Algorithm::load(filename); + else if (modelName == CV_SVM) + model = Algorithm::load(filename); + else if (modelName == CV_RTREES) + model = Algorithm::load(filename); + else if (modelName == CV_SVMSGD) + model = Algorithm::load(filename); + ASSERT_TRUE(model); + + Mat input = Mat(isTree ? 10 : 1, model->getVarCount(), CV_32F); + cv::theRNG().fill(input, RNG::UNIFORM, 0, 40); + + if (isTree) + randomFillCategories(filename, input); + + Mat output; + EXPECT_NO_THROW(model->predict(input, output, StatModel::RAW_OUTPUT | (isTree ? DTrees::PREDICT_SUM : 0))); + // just check if no internal assertions or errors thrown +} + +ML_Legacy_Param param_list[] = { + ML_Legacy_Param(CV_ANN, "waveform"), + ML_Legacy_Param(CV_BOOST, "adult"), + ML_Legacy_Param(CV_BOOST, "1"), + ML_Legacy_Param(CV_BOOST, "2"), + ML_Legacy_Param(CV_BOOST, "3"), + ML_Legacy_Param(CV_DTREE, "abalone"), + ML_Legacy_Param(CV_DTREE, "mushroom"), + ML_Legacy_Param(CV_NBAYES, "waveform"), + ML_Legacy_Param(CV_SVM, "poletelecomm"), + ML_Legacy_Param(CV_SVM, "waveform"), + ML_Legacy_Param(CV_RTREES, "waveform"), + ML_Legacy_Param(CV_SVMSGD, "waveform"), +}; + +INSTANTIATE_TEST_CASE_P(/**/, ML_Legacy_Params, testing::ValuesIn(param_list)); + +/*TEST(ML_SVM, throw_exception_when_save_untrained_model) +{ + Ptr svm; + string filename = tempfile("svm.xml"); + ASSERT_THROW(svm.save(filename.c_str()), Exception); + remove(filename.c_str()); +}*/ + +}} // namespace diff --git a/modules/ml/test/test_svmsgd.cpp b/modules/ml/test/test_svmsgd.cpp new file mode 100644 index 00000000000..038fca0d409 --- /dev/null +++ b/modules/ml/test/test_svmsgd.cpp @@ -0,0 +1,156 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#include "test_precomp.hpp" + +namespace opencv_test { namespace { + +static const int TEST_VALUE_LIMIT = 500; +enum +{ + UNIFORM_SAME_SCALE, + UNIFORM_DIFFERENT_SCALES +}; + +CV_ENUM(SVMSGD_TYPE, UNIFORM_SAME_SCALE, UNIFORM_DIFFERENT_SCALES) + +typedef std::vector< std::pair > BorderList; + +static void makeData(RNG &rng, int samplesCount, const Mat &weights, float shift, const BorderList & borders, Mat &samples, Mat & responses) +{ + int featureCount = weights.cols; + samples.create(samplesCount, featureCount, CV_32FC1); + for (int featureIndex = 0; featureIndex < featureCount; featureIndex++) + rng.fill(samples.col(featureIndex), RNG::UNIFORM, borders[featureIndex].first, borders[featureIndex].second); + responses.create(samplesCount, 1, CV_32FC1); + for (int i = 0 ; i < samplesCount; i++) + { + double res = samples.row(i).dot(weights) + shift; + responses.at(i) = res > 0 ? 1.f : -1.f; + } +} + +//================================================================================================== + +typedef tuple ML_SVMSGD_Param; +typedef testing::TestWithParam ML_SVMSGD_Params; + +TEST_P(ML_SVMSGD_Params, scale_and_features) +{ + const int type = get<0>(GetParam()); + const int featureCount = get<1>(GetParam()); + const double precision = get<2>(GetParam()); + + RNG &rng = cv::theRNG(); + + Mat_ weights(1, featureCount); + rng.fill(weights, RNG::UNIFORM, -1, 1); + const float shift = static_cast(rng.uniform(-featureCount, featureCount)); + + BorderList borders; + float lowerLimit = -TEST_VALUE_LIMIT; + float upperLimit = TEST_VALUE_LIMIT; + if (type == UNIFORM_SAME_SCALE) + { + for (int featureIndex = 0; featureIndex < featureCount; featureIndex++) + borders.push_back(std::pair(lowerLimit, upperLimit)); + } + else if (type == UNIFORM_DIFFERENT_SCALES) + { + for (int featureIndex = 0; featureIndex < featureCount; featureIndex++) + { + int crit = rng.uniform(0, 2); + if (crit > 0) + borders.push_back(std::pair(lowerLimit, upperLimit)); + else + borders.push_back(std::pair(lowerLimit/1000, upperLimit/1000)); + } + } + ASSERT_FALSE(borders.empty()); + + Mat trainSamples; + Mat trainResponses; + int trainSamplesCount = 10000; + makeData(rng, trainSamplesCount, weights, shift, borders, trainSamples, trainResponses); + ASSERT_EQ(trainResponses.type(), CV_32FC1); + + Mat testSamples; + Mat testResponses; + int testSamplesCount = 100000; + makeData(rng, testSamplesCount, weights, shift, borders, testSamples, testResponses); + ASSERT_EQ(testResponses.type(), CV_32FC1); + + Ptr data = TrainData::create(trainSamples, cv::ml::ROW_SAMPLE, trainResponses); + ASSERT_TRUE(data); + + cv::Ptr svmsgd = SVMSGD::create(); + ASSERT_TRUE(svmsgd); + + svmsgd->train(data); + + Mat responses; + svmsgd->predict(testSamples, responses); + ASSERT_EQ(responses.type(), CV_32FC1); + ASSERT_EQ(responses.rows, testSamplesCount); + + int errCount = 0; + for (int i = 0; i < testSamplesCount; i++) + if (responses.at(i) * testResponses.at(i) < 0) + errCount++; + float err = (float)errCount / testSamplesCount; + EXPECT_LE(err, precision); +} + +ML_SVMSGD_Param params_list[] = { + ML_SVMSGD_Param(UNIFORM_SAME_SCALE, 2, 0.01), + ML_SVMSGD_Param(UNIFORM_SAME_SCALE, 5, 0.01), + ML_SVMSGD_Param(UNIFORM_SAME_SCALE, 100, 0.02), + ML_SVMSGD_Param(UNIFORM_DIFFERENT_SCALES, 2, 0.01), + ML_SVMSGD_Param(UNIFORM_DIFFERENT_SCALES, 5, 0.01), + ML_SVMSGD_Param(UNIFORM_DIFFERENT_SCALES, 100, 0.01), +}; + +INSTANTIATE_TEST_CASE_P(/**/, ML_SVMSGD_Params, testing::ValuesIn(params_list)); + +//================================================================================================== + +TEST(ML_SVMSGD, twoPoints) +{ + Mat samples(2, 2, CV_32FC1); + samples.at(0,0) = 0; + samples.at(0,1) = 0; + samples.at(1,0) = 1000; + samples.at(1,1) = 1; + + Mat responses(2, 1, CV_32FC1); + responses.at(0) = -1; + responses.at(1) = 1; + + cv::Ptr trainData = TrainData::create(samples, cv::ml::ROW_SAMPLE, responses); + + Mat realWeights(1, 2, CV_32FC1); + realWeights.at(0) = 1000; + realWeights.at(1) = 1; + + float realShift = -500000.5; + + float normRealWeights = static_cast(cv::norm(realWeights)); // TODO cvtest + realWeights /= normRealWeights; + realShift /= normRealWeights; + + cv::Ptr svmsgd = SVMSGD::create(); + svmsgd->setOptimalParameters(); + svmsgd->train( trainData ); + + Mat foundWeights = svmsgd->getWeights(); + float foundShift = svmsgd->getShift(); + + float normFoundWeights = static_cast(cv::norm(foundWeights)); // TODO cvtest + foundWeights /= normFoundWeights; + foundShift /= normFoundWeights; + EXPECT_LE(cv::norm(Mat(foundWeights - realWeights)), 0.001); // TODO cvtest + EXPECT_LE(std::abs((foundShift - realShift) / realShift), 0.05); +} + +}} // namespace diff --git a/modules/ml/test/test_svmtrainauto.cpp b/modules/ml/test/test_svmtrainauto.cpp new file mode 100644 index 00000000000..9d78762c4c1 --- /dev/null +++ b/modules/ml/test/test_svmtrainauto.cpp @@ -0,0 +1,164 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#include "test_precomp.hpp" + +namespace opencv_test { namespace { + +using cv::ml::SVM; +using cv::ml::TrainData; + +static Ptr makeRandomData(int datasize) +{ + cv::Mat samples = cv::Mat::zeros( datasize, 2, CV_32FC1 ); + cv::Mat responses = cv::Mat::zeros( datasize, 1, CV_32S ); + RNG &rng = cv::theRNG(); + for (int i = 0; i < datasize; ++i) + { + int response = rng.uniform(0, 2); // Random from {0, 1}. + samples.at( i, 0 ) = rng.uniform(0.f, 0.5f) + response * 0.5f; + samples.at( i, 1 ) = rng.uniform(0.f, 0.5f) + response * 0.5f; + responses.at( i, 0 ) = response; + } + return TrainData::create( samples, cv::ml::ROW_SAMPLE, responses ); +} + +static Ptr makeCircleData(int datasize, float scale_factor, float radius) +{ + // Populate samples with data that can be split into two concentric circles + cv::Mat samples = cv::Mat::zeros( datasize, 2, CV_32FC1 ); + cv::Mat responses = cv::Mat::zeros( datasize, 1, CV_32S ); + for (int i = 0; i < datasize; i+=2) + { + const float pi = 3.14159f; + const float angle_rads = (i/datasize) * pi; + const float x = radius * cos(angle_rads); + const float y = radius * cos(angle_rads); + + // Larger circle + samples.at( i, 0 ) = x; + samples.at( i, 1 ) = y; + responses.at( i, 0 ) = 0; + + // Smaller circle + samples.at( i + 1, 0 ) = x * scale_factor; + samples.at( i + 1, 1 ) = y * scale_factor; + responses.at( i + 1, 0 ) = 1; + } + return TrainData::create( samples, cv::ml::ROW_SAMPLE, responses ); +} + +static Ptr makeRandomData2(int datasize) +{ + cv::Mat samples = cv::Mat::zeros( datasize, 2, CV_32FC1 ); + cv::Mat responses = cv::Mat::zeros( datasize, 1, CV_32S ); + RNG &rng = cv::theRNG(); + for (int i = 0; i < datasize; ++i) + { + int response = rng.uniform(0, 2); // Random from {0, 1}. + samples.at( i, 0 ) = 0; + samples.at( i, 1 ) = (0.5f - response) * rng.uniform(0.f, 1.2f) + response; + responses.at( i, 0 ) = response; + } + return TrainData::create( samples, cv::ml::ROW_SAMPLE, responses ); +} + +//================================================================================================== + +TEST(ML_SVM, trainauto) +{ + const int datasize = 100; + cv::Ptr data = makeRandomData(datasize); + ASSERT_TRUE(data); + cv::Ptr svm = SVM::create(); + ASSERT_TRUE(svm); + svm->trainAuto( data, 10 ); // 2-fold cross validation. + + float test_data0[2] = {0.25f, 0.25f}; + cv::Mat test_point0 = cv::Mat( 1, 2, CV_32FC1, test_data0 ); + float result0 = svm->predict( test_point0 ); + float test_data1[2] = {0.75f, 0.75f}; + cv::Mat test_point1 = cv::Mat( 1, 2, CV_32FC1, test_data1 ); + float result1 = svm->predict( test_point1 ); + + EXPECT_NEAR(result0, 0, 0.001); + EXPECT_NEAR(result1, 1, 0.001); +} + +TEST(ML_SVM, trainauto_sigmoid) +{ + const int datasize = 100; + const float scale_factor = 0.5; + const float radius = 2.0; + cv::Ptr data = makeCircleData(datasize, scale_factor, radius); + ASSERT_TRUE(data); + + cv::Ptr svm = SVM::create(); + ASSERT_TRUE(svm); + svm->setKernel(SVM::SIGMOID); + svm->setGamma(10.0); + svm->setCoef0(-10.0); + svm->trainAuto( data, 10 ); // 2-fold cross validation. + + float test_data0[2] = {radius, radius}; + cv::Mat test_point0 = cv::Mat( 1, 2, CV_32FC1, test_data0 ); + EXPECT_FLOAT_EQ(svm->predict( test_point0 ), 0); + + float test_data1[2] = {scale_factor * radius, scale_factor * radius}; + cv::Mat test_point1 = cv::Mat( 1, 2, CV_32FC1, test_data1 ); + EXPECT_FLOAT_EQ(svm->predict( test_point1 ), 1); +} + +TEST(ML_SVM, trainAuto_regression_5369) +{ + const int datasize = 100; + Ptr data = makeRandomData2(datasize); + cv::Ptr svm = SVM::create(); + svm->trainAuto( data, 10 ); // 2-fold cross validation. + + float test_data0[2] = {0.25f, 0.25f}; + cv::Mat test_point0 = cv::Mat( 1, 2, CV_32FC1, test_data0 ); + float result0 = svm->predict( test_point0 ); + float test_data1[2] = {0.75f, 0.75f}; + cv::Mat test_point1 = cv::Mat( 1, 2, CV_32FC1, test_data1 ); + float result1 = svm->predict( test_point1 ); + + EXPECT_EQ(0., result0); + EXPECT_EQ(1., result1); +} + +TEST(ML_SVM, getSupportVectors) +{ + // Set up training data + int labels[4] = {1, -1, -1, -1}; + float trainingData[4][2] = { {501, 10}, {255, 10}, {501, 255}, {10, 501} }; + Mat trainingDataMat(4, 2, CV_32FC1, trainingData); + Mat labelsMat(4, 1, CV_32SC1, labels); + + Ptr svm = SVM::create(); + ASSERT_TRUE(svm); + svm->setType(SVM::C_SVC); + svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6)); + + // Test retrieval of SVs and compressed SVs on linear SVM + svm->setKernel(SVM::LINEAR); + svm->train(trainingDataMat, cv::ml::ROW_SAMPLE, labelsMat); + + Mat sv = svm->getSupportVectors(); + EXPECT_EQ(1, sv.rows); // by default compressed SV returned + sv = svm->getUncompressedSupportVectors(); + EXPECT_EQ(3, sv.rows); + + // Test retrieval of SVs and compressed SVs on non-linear SVM + svm->setKernel(SVM::POLY); + svm->setDegree(2); + svm->train(trainingDataMat, cv::ml::ROW_SAMPLE, labelsMat); + + sv = svm->getSupportVectors(); + EXPECT_EQ(3, sv.rows); + sv = svm->getUncompressedSupportVectors(); + EXPECT_EQ(0, sv.rows); // inapplicable for non-linear SVMs +} + +}} // namespace diff --git a/modules/ml/test/test_utils.cpp b/modules/ml/test/test_utils.cpp new file mode 100644 index 00000000000..8717d9f301f --- /dev/null +++ b/modules/ml/test/test_utils.cpp @@ -0,0 +1,189 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +#include "test_precomp.hpp" + +namespace opencv_test { + +void defaultDistribs( Mat& means, vector& covs, int type) +{ + float mp0[] = {0.0f, 0.0f}, cp0[] = {0.67f, 0.0f, 0.0f, 0.67f}; + float mp1[] = {5.0f, 0.0f}, cp1[] = {1.0f, 0.0f, 0.0f, 1.0f}; + float mp2[] = {1.0f, 5.0f}, cp2[] = {1.0f, 0.0f, 0.0f, 1.0f}; + means.create(3, 2, type); + Mat m0( 1, 2, CV_32FC1, mp0 ), c0( 2, 2, CV_32FC1, cp0 ); + Mat m1( 1, 2, CV_32FC1, mp1 ), c1( 2, 2, CV_32FC1, cp1 ); + Mat m2( 1, 2, CV_32FC1, mp2 ), c2( 2, 2, CV_32FC1, cp2 ); + means.resize(3), covs.resize(3); + + Mat mr0 = means.row(0); + m0.convertTo(mr0, type); + c0.convertTo(covs[0], type); + + Mat mr1 = means.row(1); + m1.convertTo(mr1, type); + c1.convertTo(covs[1], type); + + Mat mr2 = means.row(2); + m2.convertTo(mr2, type); + c2.convertTo(covs[2], type); +} + +// generate points sets by normal distributions +void generateData( Mat& data, Mat& labels, const vector& sizes, const Mat& _means, const vector& covs, int dataType, int labelType ) +{ + vector::const_iterator sit = sizes.begin(); + int total = 0; + for( ; sit != sizes.end(); ++sit ) + total += *sit; + CV_Assert( _means.rows == (int)sizes.size() && covs.size() == sizes.size() ); + CV_Assert( !data.empty() && data.rows == total ); + CV_Assert( data.type() == dataType ); + + labels.create( data.rows, 1, labelType ); + + randn( data, Scalar::all(-1.0), Scalar::all(1.0) ); + vector means(sizes.size()); + for(int i = 0; i < _means.rows; i++) + means[i] = _means.row(i); + vector::const_iterator mit = means.begin(), cit = covs.begin(); + int bi, ei = 0; + sit = sizes.begin(); + for( int p = 0, l = 0; sit != sizes.end(); ++sit, ++mit, ++cit, l++ ) + { + bi = ei; + ei = bi + *sit; + CV_Assert( mit->rows == 1 && mit->cols == data.cols ); + CV_Assert( cit->rows == data.cols && cit->cols == data.cols ); + for( int i = bi; i < ei; i++, p++ ) + { + Mat r = data.row(i); + r = r * (*cit) + *mit; + if( labelType == CV_32FC1 ) + labels.at(p, 0) = (float)l; + else if( labelType == CV_32SC1 ) + labels.at(p, 0) = l; + else + { + CV_DbgAssert(0); + } + } + } +} + +int maxIdx( const vector& count ) +{ + int idx = -1; + int maxVal = -1; + vector::const_iterator it = count.begin(); + for( int i = 0; it != count.end(); ++it, i++ ) + { + if( *it > maxVal) + { + maxVal = *it; + idx = i; + } + } + CV_Assert( idx >= 0); + return idx; +} + +bool getLabelsMap( const Mat& labels, const vector& sizes, vector& labelsMap, bool checkClusterUniq) +{ + size_t total = 0, nclusters = sizes.size(); + for(size_t i = 0; i < sizes.size(); i++) + total += sizes[i]; + + CV_Assert( !labels.empty() ); + CV_Assert( labels.total() == total && (labels.cols == 1 || labels.rows == 1)); + CV_Assert( labels.type() == CV_32SC1 || labels.type() == CV_32FC1 ); + + bool isFlt = labels.type() == CV_32FC1; + + labelsMap.resize(nclusters); + + vector buzy(nclusters, false); + int startIndex = 0; + for( size_t clusterIndex = 0; clusterIndex < sizes.size(); clusterIndex++ ) + { + vector count( nclusters, 0 ); + for( int i = startIndex; i < startIndex + sizes[clusterIndex]; i++) + { + int lbl = isFlt ? (int)labels.at(i) : labels.at(i); + CV_Assert(lbl < (int)nclusters); + count[lbl]++; + CV_Assert(count[lbl] < (int)total); + } + startIndex += sizes[clusterIndex]; + + int cls = maxIdx( count ); + CV_Assert( !checkClusterUniq || !buzy[cls] ); + + labelsMap[clusterIndex] = cls; + + buzy[cls] = true; + } + + if(checkClusterUniq) + { + for(size_t i = 0; i < buzy.size(); i++) + if(!buzy[i]) + return false; + } + + return true; +} + +bool calcErr( const Mat& labels, const Mat& origLabels, const vector& sizes, float& err, bool labelsEquivalent, bool checkClusterUniq) +{ + err = 0; + CV_Assert( !labels.empty() && !origLabels.empty() ); + CV_Assert( labels.rows == 1 || labels.cols == 1 ); + CV_Assert( origLabels.rows == 1 || origLabels.cols == 1 ); + CV_Assert( labels.total() == origLabels.total() ); + CV_Assert( labels.type() == CV_32SC1 || labels.type() == CV_32FC1 ); + CV_Assert( origLabels.type() == labels.type() ); + + vector labelsMap; + bool isFlt = labels.type() == CV_32FC1; + if( !labelsEquivalent ) + { + if( !getLabelsMap( labels, sizes, labelsMap, checkClusterUniq ) ) + return false; + + for( int i = 0; i < labels.rows; i++ ) + if( isFlt ) + err += labels.at(i) != labelsMap[(int)origLabels.at(i)] ? 1.f : 0.f; + else + err += labels.at(i) != labelsMap[origLabels.at(i)] ? 1.f : 0.f; + } + else + { + for( int i = 0; i < labels.rows; i++ ) + if( isFlt ) + err += labels.at(i) != origLabels.at(i) ? 1.f : 0.f; + else + err += labels.at(i) != origLabels.at(i) ? 1.f : 0.f; + } + err /= (float)labels.rows; + return true; +} + +bool calculateError( const Mat& _p_labels, const Mat& _o_labels, float& error) +{ + error = 0.0f; + float accuracy = 0.0f; + Mat _p_labels_temp; + Mat _o_labels_temp; + _p_labels.convertTo(_p_labels_temp, CV_32S); + _o_labels.convertTo(_o_labels_temp, CV_32S); + + CV_Assert(_p_labels_temp.total() == _o_labels_temp.total()); + CV_Assert(_p_labels_temp.rows == _o_labels_temp.rows); + + accuracy = (float)countNonZero(_p_labels_temp == _o_labels_temp)/_p_labels_temp.rows; + error = 1 - accuracy; + return true; +} + +} // namespace diff --git a/modules/ml/tutorials/cpp_ml/images/optimal-hyperplane.png b/modules/ml/tutorials/cpp_ml/images/optimal-hyperplane.png new file mode 100644 index 00000000000..d4522f04798 Binary files /dev/null and b/modules/ml/tutorials/cpp_ml/images/optimal-hyperplane.png differ diff --git a/modules/ml/tutorials/cpp_ml/images/sample-errors-dist.png b/modules/ml/tutorials/cpp_ml/images/sample-errors-dist.png new file mode 100644 index 00000000000..1379a56d524 Binary files /dev/null and b/modules/ml/tutorials/cpp_ml/images/sample-errors-dist.png differ diff --git a/modules/ml/tutorials/cpp_ml/images/separating-lines.png b/modules/ml/tutorials/cpp_ml/images/separating-lines.png new file mode 100644 index 00000000000..93ae457c0c9 Binary files /dev/null and b/modules/ml/tutorials/cpp_ml/images/separating-lines.png differ diff --git a/modules/ml/tutorials/cpp_ml/images/svm_intro_result.png b/modules/ml/tutorials/cpp_ml/images/svm_intro_result.png new file mode 100644 index 00000000000..5f477a4f699 Binary files /dev/null and b/modules/ml/tutorials/cpp_ml/images/svm_intro_result.png differ diff --git a/modules/ml/tutorials/cpp_ml/images/svm_non_linear_result.png b/modules/ml/tutorials/cpp_ml/images/svm_non_linear_result.png new file mode 100644 index 00000000000..bfecae9a1b6 Binary files /dev/null and b/modules/ml/tutorials/cpp_ml/images/svm_non_linear_result.png differ diff --git a/modules/ml/tutorials/cpp_ml/introduction_to_svm/introduction_to_svm.markdown b/modules/ml/tutorials/cpp_ml/introduction_to_svm/introduction_to_svm.markdown new file mode 100644 index 00000000000..9003a6e2817 --- /dev/null +++ b/modules/ml/tutorials/cpp_ml/introduction_to_svm/introduction_to_svm.markdown @@ -0,0 +1,272 @@ +Introduction to Support Vector Machines {#tutorial_introduction_to_svm} +======================================= + +@tableofcontents + +@next_tutorial{tutorial_non_linear_svms} + +| | | +| -: | :- | +| Original author | Fernando Iglesias García | +| Compatibility | OpenCV >= 3.0 | + +Goal +---- + +In this tutorial you will learn how to: + +- Use the OpenCV functions @ref cv::ml::SVM::train to build a classifier based on SVMs and @ref + cv::ml::SVM::predict to test its performance. + +What is a SVM? +-------------- + +A Support Vector Machine (SVM) is a discriminative classifier formally defined by a separating +hyperplane. In other words, given labeled training data (*supervised learning*), the algorithm +outputs an optimal hyperplane which categorizes new examples. + +In which sense is the hyperplane obtained optimal? Let's consider the following simple problem: + +For a linearly separable set of 2D-points which belong to one of two classes, find a separating +straight line. + +![](images/separating-lines.png) + +@note In this example we deal with lines and points in the Cartesian plane instead of hyperplanes +and vectors in a high dimensional space. This is a simplification of the problem.It is important to +understand that this is done only because our intuition is better built from examples that are easy +to imagine. However, the same concepts apply to tasks where the examples to classify lie in a space +whose dimension is higher than two. + +In the above picture you can see that there exists multiple lines that offer a solution to the +problem. Is any of them better than the others? We can intuitively define a criterion to estimate +the worth of the lines: A line is bad if it passes too close to the points because it will be +noise sensitive and it will not generalize correctly. Therefore, our goal should be to find +the line passing as far as possible from all points. + +Then, the operation of the SVM algorithm is based on finding the hyperplane that gives the largest +minimum distance to the training examples. Twice, this distance receives the important name of +**margin** within SVM's theory. Therefore, the optimal separating hyperplane *maximizes* the margin +of the training data. + +![](images/optimal-hyperplane.png) + +How is the optimal hyperplane computed? +--------------------------------------- + +Let's introduce the notation used to define formally a hyperplane: + +\f[f(x) = \beta_{0} + \beta^{T} x,\f] + +where \f$\beta\f$ is known as the *weight vector* and \f$\beta_{0}\f$ as the *bias*. + +@note A more in depth description of this and hyperplanes you can find in the section 4.5 (*Separating +Hyperplanes*) of the book: *Elements of Statistical Learning* by T. Hastie, R. Tibshirani and J. H. +Friedman (@cite HTF01). + +The optimal hyperplane can be represented in an infinite number of different ways by +scaling of \f$\beta\f$ and \f$\beta_{0}\f$. As a matter of convention, among all the possible +representations of the hyperplane, the one chosen is + +\f[|\beta_{0} + \beta^{T} x| = 1\f] + +where \f$x\f$ symbolizes the training examples closest to the hyperplane. In general, the training +examples that are closest to the hyperplane are called **support vectors**. This representation is +known as the **canonical hyperplane**. + +Now, we use the result of geometry that gives the distance between a point \f$x\f$ and a hyperplane +\f$(\beta, \beta_{0})\f$: + +\f[\mathrm{distance} = \frac{|\beta_{0} + \beta^{T} x|}{||\beta||}.\f] + +In particular, for the canonical hyperplane, the numerator is equal to one and the distance to the +support vectors is + +\f[\mathrm{distance}_{\text{ support vectors}} = \frac{|\beta_{0} + \beta^{T} x|}{||\beta||} = \frac{1}{||\beta||}.\f] + +Recall that the margin introduced in the previous section, here denoted as \f$M\f$, is twice the +distance to the closest examples: + +\f[M = \frac{2}{||\beta||}\f] + +Finally, the problem of maximizing \f$M\f$ is equivalent to the problem of minimizing a function +\f$L(\beta)\f$ subject to some constraints. The constraints model the requirement for the hyperplane to +classify correctly all the training examples \f$x_{i}\f$. Formally, + +\f[\min_{\beta, \beta_{0}} L(\beta) = \frac{1}{2}||\beta||^{2} \text{ subject to } y_{i}(\beta^{T} x_{i} + \beta_{0}) \geq 1 \text{ } \forall i,\f] + +where \f$y_{i}\f$ represents each of the labels of the training examples. + +This is a problem of Lagrangian optimization that can be solved using Lagrange multipliers to obtain +the weight vector \f$\beta\f$ and the bias \f$\beta_{0}\f$ of the optimal hyperplane. + +Source Code +----------- + +@add_toggle_cpp +- **Downloadable code**: Click + [here](https://github.com/opencv/opencv/tree/5.x/samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp) + +- **Code at glance:** + @include samples/introduction_to_svm.cpp +@end_toggle + +@add_toggle_java +- **Downloadable code**: Click + [here](https://github.com/opencv/opencv/tree/5.x/samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java) + +- **Code at glance:** + @include samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java +@end_toggle + +@add_toggle_python +- **Downloadable code**: Click + [here](https://github.com/opencv/opencv/tree/5.x/samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py) + +- **Code at glance:** + @include samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py +@end_toggle + +Explanation +----------- + +- **Set up the training data** + +The training data of this exercise is formed by a set of labeled 2D-points that belong to one of +two different classes; one of the classes consists of one point and the other of three points. + +@add_toggle_cpp +@snippet samples/introduction_to_svm.cpp setup1 +@end_toggle + +@add_toggle_java +@snippet samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java setup1 +@end_toggle + +@add_toggle_python +@snippet samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py setup1 +@end_toggle + +The function @ref cv::ml::SVM::train that will be used afterwards requires the training data to be +stored as @ref cv::Mat objects of floats. Therefore, we create these objects from the arrays +defined above: + +@add_toggle_cpp +@snippet samples/introduction_to_svm.cpp setup2 +@end_toggle + +@add_toggle_java +@snippet samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java setup2 +@end_toggle + +@add_toggle_python +@snippet samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py setup1 +@end_toggle + +- **Set up SVM's parameters** + + In this tutorial we have introduced the theory of SVMs in the most simple case, when the + training examples are spread into two classes that are linearly separable. However, SVMs can be + used in a wide variety of problems (e.g. problems with non-linearly separable data, a SVM using + a kernel function to raise the dimensionality of the examples, etc). As a consequence of this, + we have to define some parameters before training the SVM. These parameters are stored in an + object of the class @ref cv::ml::SVM. + +@add_toggle_cpp +@snippet samples/introduction_to_svm.cpp init +@end_toggle + +@add_toggle_java +@snippet samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java init +@end_toggle + +@add_toggle_python +@snippet samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py init +@end_toggle + +Here: +- *Type of SVM*. We choose here the type @ref cv::ml::SVM::C_SVC "C_SVC" that can be used for + n-class classification (n \f$\geq\f$ 2). The important feature of this type is that it deals + with imperfect separation of classes (i.e. when the training data is non-linearly separable). + This feature is not important here since the data is linearly separable and we chose this SVM + type only for being the most commonly used. + +- *Type of SVM kernel*. We have not talked about kernel functions since they are not + interesting for the training data we are dealing with. Nevertheless, let's explain briefly now + the main idea behind a kernel function. It is a mapping done to the training data to improve + its resemblance to a linearly separable set of data. This mapping consists of increasing the + dimensionality of the data and is done efficiently using a kernel function. We choose here the + type @ref cv::ml::SVM::LINEAR "LINEAR" which means that no mapping is done. This parameter is + defined using cv::ml::SVM::setKernel. + +- *Termination criteria of the algorithm*. The SVM training procedure is implemented solving a + constrained quadratic optimization problem in an **iterative** fashion. Here we specify a + maximum number of iterations and a tolerance error so we allow the algorithm to finish in + less number of steps even if the optimal hyperplane has not been computed yet. This + parameter is defined in a structure @ref cv::TermCriteria . + +- **Train the SVM** + We call the method @ref cv::ml::SVM::train to build the SVM model. + +@add_toggle_cpp +@snippet samples/introduction_to_svm.cpp train +@end_toggle + +@add_toggle_java +@snippet samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java train +@end_toggle + +@add_toggle_python +@snippet samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py train +@end_toggle + +- **Regions classified by the SVM** + + The method @ref cv::ml::SVM::predict is used to classify an input sample using a trained SVM. In + this example we have used this method in order to color the space depending on the prediction done + by the SVM. In other words, an image is traversed interpreting its pixels as points of the + Cartesian plane. Each of the points is colored depending on the class predicted by the SVM; in + green if it is the class with label 1 and in blue if it is the class with label -1. + +@add_toggle_cpp +@snippet samples/introduction_to_svm.cpp show +@end_toggle + +@add_toggle_java +@snippet samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java show +@end_toggle + +@add_toggle_python +@snippet samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py show +@end_toggle + +- **Support vectors** + + We use here a couple of methods to obtain information about the support vectors. + The method @ref cv::ml::SVM::getSupportVectors obtain all of the support + vectors. We have used this methods here to find the training examples that are + support vectors and highlight them. + +@add_toggle_cpp +@snippet samples/introduction_to_svm.cpp show_vectors +@end_toggle + +@add_toggle_java +@snippet samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java show_vectors +@end_toggle + +@add_toggle_python +@snippet samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py show_vectors +@end_toggle + +Results +------- + +- The code opens an image and shows the training examples of both classes. The points of one class + are represented with white circles and black ones are used for the other class. +- The SVM is trained and used to classify all the pixels of the image. This results in a division + of the image in a blue region and a green region. The boundary between both regions is the + optimal separating hyperplane. +- Finally the support vectors are shown using gray rings around the training examples. + +![](images/svm_intro_result.png) diff --git a/modules/ml/tutorials/cpp_ml/non_linear_svms/non_linear_svms.markdown b/modules/ml/tutorials/cpp_ml/non_linear_svms/non_linear_svms.markdown new file mode 100644 index 00000000000..de627601b9d --- /dev/null +++ b/modules/ml/tutorials/cpp_ml/non_linear_svms/non_linear_svms.markdown @@ -0,0 +1,287 @@ +Support Vector Machines for Non-Linearly Separable Data {#tutorial_non_linear_svms} +======================================================= + +@tableofcontents + +@prev_tutorial{tutorial_introduction_to_svm} + +| | | +| -: | :- | +| Original author | Fernando Iglesias García | +| Compatibility | OpenCV >= 3.0 | + +Goal +---- + +In this tutorial you will learn how to: + +- Define the optimization problem for SVMs when it is not possible to separate linearly the + training data. +- How to configure the parameters to adapt your SVM for this class of problems. + +Motivation +---------- + +Why is it interesting to extend the SVM optimization problem in order to handle non-linearly separable +training data? Most of the applications in which SVMs are used in computer vision require a more +powerful tool than a simple linear classifier. This stems from the fact that in these tasks __the +training data can be rarely separated using an hyperplane__. + +Consider one of these tasks, for example, face detection. The training data in this case is composed +by a set of images that are faces and another set of images that are non-faces (_every other thing +in the world except from faces_). This training data is too complex so as to find a representation +of each sample (_feature vector_) that could make the whole set of faces linearly separable from the +whole set of non-faces. + +Extension of the Optimization Problem +------------------------------------- + +Remember that using SVMs we obtain a separating hyperplane. Therefore, since the training data is +now non-linearly separable, we must admit that the hyperplane found will misclassify some of the +samples. This _misclassification_ is a new variable in the optimization that must be taken into +account. The new model has to include both the old requirement of finding the hyperplane that gives +the biggest margin and the new one of generalizing the training data correctly by not allowing too +many classification errors. + +We start here from the formulation of the optimization problem of finding the hyperplane which +maximizes the __margin__ (this is explained in the previous tutorial (@ref tutorial_introduction_to_svm): + +\f[\min_{\beta, \beta_{0}} L(\beta) = \frac{1}{2}||\beta||^{2} \text{ subject to } y_{i}(\beta^{T} x_{i} + \beta_{0}) \geq 1 \text{ } \forall i\f] + +There are multiple ways in which this model can be modified so it takes into account the +misclassification errors. For example, one could think of minimizing the same quantity plus a +constant times the number of misclassification errors in the training data, i.e.: + +\f[\min ||\beta||^{2} + C \text{(misclassification errors)}\f] + +However, this one is not a very good solution since, among some other reasons, we do not distinguish +between samples that are misclassified with a small distance to their appropriate decision region or +samples that are not. Therefore, a better solution will take into account the _distance of the +misclassified samples to their correct decision regions_, i.e.: + +\f[\min ||\beta||^{2} + C \text{(distance of misclassified samples to their correct regions)}\f] + +For each sample of the training data a new parameter \f$\xi_{i}\f$ is defined. Each one of these +parameters contains the distance from its corresponding training sample to their correct decision +region. The following picture shows non-linearly separable training data from two classes, a +separating hyperplane and the distances to their correct regions of the samples that are +misclassified. + +![](images/sample-errors-dist.png) + +@note Only the distances of the samples that are misclassified are shown in the picture. The +distances of the rest of the samples are zero since they lay already in their correct decision +region. + +The red and blue lines that appear on the picture are the margins to each one of the +decision regions. It is very __important__ to realize that each of the \f$\xi_{i}\f$ goes from a +misclassified training sample to the margin of its appropriate region. + +Finally, the new formulation for the optimization problem is: + +\f[\min_{\beta, \beta_{0}} L(\beta) = ||\beta||^{2} + C \sum_{i} {\xi_{i}} \text{ subject to } y_{i}(\beta^{T} x_{i} + \beta_{0}) \geq 1 - \xi_{i} \text{ and } \xi_{i} \geq 0 \text{ } \forall i\f] + +How should the parameter C be chosen? It is obvious that the answer to this question depends on how +the training data is distributed. Although there is no general answer, it is useful to take into +account these rules: + +- Large values of C give solutions with _less misclassification errors_ but a _smaller margin_. + Consider that in this case it is expensive to make misclassification errors. Since the aim of + the optimization is to minimize the argument, few misclassifications errors are allowed. +- Small values of C give solutions with _bigger margin_ and _more classification errors_. In this + case the minimization does not consider that much the term of the sum so it focuses more on + finding a hyperplane with big margin. + +Source Code +----------- + +You may also find the source code in `samples/cpp/tutorial_code/ml/non_linear_svms` folder of the OpenCV source library or +[download it from here](https://github.com/opencv/opencv/tree/5.x/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp). + +@add_toggle_cpp +- **Downloadable code**: Click + [here](https://github.com/opencv/opencv/tree/5.x/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp) + +- **Code at glance:** + @include samples/non_linear_svms.cpp +@end_toggle + +@add_toggle_java +- **Downloadable code**: Click + [here](https://github.com/opencv/opencv/tree/5.x/samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java) + +- **Code at glance:** + @include samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java +@end_toggle + +@add_toggle_python +- **Downloadable code**: Click + [here](https://github.com/opencv/opencv/tree/5.x/samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py) + +- **Code at glance:** + @include samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py +@end_toggle + +Explanation +----------- + +- __Set up the training data__ + +The training data of this exercise is formed by a set of labeled 2D-points that belong to one of +two different classes. To make the exercise more appealing, the training data is generated +randomly using a uniform probability density functions (PDFs). + +We have divided the generation of the training data into two main parts. + +In the first part we generate data for both classes that is linearly separable. + +@add_toggle_cpp +@snippet samples/non_linear_svms.cpp setup1 +@end_toggle + +@add_toggle_java +@snippet samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java setup1 +@end_toggle + +@add_toggle_python +@snippet samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py setup1 +@end_toggle + +In the second part we create data for both classes that is non-linearly separable, data that +overlaps. + +@add_toggle_cpp +@snippet samples/non_linear_svms.cpp setup2 +@end_toggle + +@add_toggle_java +@snippet samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java setup2 +@end_toggle + +@add_toggle_python +@snippet samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py setup2 +@end_toggle + +- __Set up SVM's parameters__ + +@note In the previous tutorial @ref tutorial_introduction_to_svm there is an explanation of the +attributes of the class @ref cv::ml::SVM that we configure here before training the SVM. + +@add_toggle_cpp +@snippet samples/non_linear_svms.cpp init +@end_toggle + +@add_toggle_java +@snippet samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java init +@end_toggle + +@add_toggle_python +@snippet samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py init +@end_toggle + +There are just two differences between the configuration we do here and the one that was done in +the previous tutorial (@ref tutorial_introduction_to_svm) that we use as reference. + +- _C_. We chose here a small value of this parameter in order not to punish too much the + misclassification errors in the optimization. The idea of doing this stems from the will of + obtaining a solution close to the one intuitively expected. However, we recommend to get a + better insight of the problem by making adjustments to this parameter. + + @note In this case there are just very few points in the overlapping region between classes. + By giving a smaller value to __FRAC_LINEAR_SEP__ the density of points can be incremented and the + impact of the parameter _C_ explored deeply. + +- _Termination Criteria of the algorithm_. The maximum number of iterations has to be + increased considerably in order to solve correctly a problem with non-linearly separable + training data. In particular, we have increased in five orders of magnitude this value. + +- __Train the SVM__ + +We call the method @ref cv::ml::SVM::train to build the SVM model. Watch out that the training +process may take a quite long time. Have patiance when your run the program. + +@add_toggle_cpp +@snippet samples/non_linear_svms.cpp train +@end_toggle + +@add_toggle_java +@snippet samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java train +@end_toggle + +@add_toggle_python +@snippet samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py train +@end_toggle + +- __Show the Decision Regions__ + +The method @ref cv::ml::SVM::predict is used to classify an input sample using a trained SVM. In +this example we have used this method in order to color the space depending on the prediction done +by the SVM. In other words, an image is traversed interpreting its pixels as points of the +Cartesian plane. Each of the points is colored depending on the class predicted by the SVM; in +dark green if it is the class with label 1 and in dark blue if it is the class with label 2. + +@add_toggle_cpp +@snippet samples/non_linear_svms.cpp show +@end_toggle + +@add_toggle_java +@snippet samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java show +@end_toggle + +@add_toggle_python +@snippet samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py show +@end_toggle + +- __Show the training data__ + +The method @ref cv::circle is used to show the samples that compose the training data. The samples +of the class labeled with 1 are shown in light green and in light blue the samples of the class +labeled with 2. + +@add_toggle_cpp +@snippet samples/non_linear_svms.cpp show_data +@end_toggle + +@add_toggle_java +@snippet samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java show_data +@end_toggle + +@add_toggle_python +@snippet samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py show_data +@end_toggle + +- __Support vectors__ + +We use here a couple of methods to obtain information about the support vectors. The method +@ref cv::ml::SVM::getSupportVectors obtain all support vectors. We have used this methods here +to find the training examples that are support vectors and highlight them. + +@add_toggle_cpp +@snippet samples/non_linear_svms.cpp show_vectors +@end_toggle + +@add_toggle_java +@snippet samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java show_vectors +@end_toggle + +@add_toggle_python +@snippet samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py show_vectors +@end_toggle + +Results +------- + +- The code opens an image and shows the training examples of both classes. The points of one class + are represented with light green and light blue ones are used for the other class. +- The SVM is trained and used to classify all the pixels of the image. This results in a division + of the image in a blue region and a green region. The boundary between both regions is the + separating hyperplane. Since the training data is non-linearly separable, it can be seen that + some of the examples of both classes are misclassified; some green points lay on the blue region + and some blue points lay on the green one. +- Finally the support vectors are shown using gray rings around the training examples. + +![](images/svm_non_linear_result.png) + +You may observe a runtime instance of this on the [YouTube here](https://www.youtube.com/watch?v=vFv2yPcSo-Q). + +@youtube{vFv2yPcSo-Q} \ No newline at end of file diff --git a/modules/ml/tutorials/py_ml/images/knnicon.png b/modules/ml/tutorials/py_ml/images/knnicon.png new file mode 100644 index 00000000000..61e4dc040be Binary files /dev/null and b/modules/ml/tutorials/py_ml/images/knnicon.png differ diff --git a/modules/ml/tutorials/py_ml/images/svmicon.png b/modules/ml/tutorials/py_ml/images/svmicon.png new file mode 100644 index 00000000000..32608ee5c8e Binary files /dev/null and b/modules/ml/tutorials/py_ml/images/svmicon.png differ diff --git a/modules/ml/tutorials/py_ml/py_knn/images/knn_icon1.jpg b/modules/ml/tutorials/py_ml/py_knn/images/knn_icon1.jpg new file mode 100644 index 00000000000..81feba514a7 Binary files /dev/null and b/modules/ml/tutorials/py_ml/py_knn/images/knn_icon1.jpg differ diff --git a/modules/ml/tutorials/py_ml/py_knn/images/knn_icon2.jpg b/modules/ml/tutorials/py_ml/py_knn/images/knn_icon2.jpg new file mode 100644 index 00000000000..13d3c77f69a Binary files /dev/null and b/modules/ml/tutorials/py_ml/py_knn/images/knn_icon2.jpg differ diff --git a/modules/ml/tutorials/py_ml/py_knn/py_knn_index.markdown b/modules/ml/tutorials/py_ml/py_knn/py_knn_index.markdown new file mode 100644 index 00000000000..4f2c3d60b48 --- /dev/null +++ b/modules/ml/tutorials/py_ml/py_knn/py_knn_index.markdown @@ -0,0 +1,10 @@ +K-Nearest Neighbour {#tutorial_py_knn_index} +=================== + +- @subpage tutorial_py_knn_understanding + + Get a basic understanding of what kNN is + +- @subpage tutorial_py_knn_opencv + + Now let's use kNN in OpenCV for digit recognition OCR diff --git a/modules/ml/tutorials/py_ml/py_knn/py_knn_opencv/py_knn_opencv.markdown b/modules/ml/tutorials/py_ml/py_knn/py_knn_opencv/py_knn_opencv.markdown new file mode 100644 index 00000000000..e876ddf3e6f --- /dev/null +++ b/modules/ml/tutorials/py_ml/py_knn/py_knn_opencv/py_knn_opencv.markdown @@ -0,0 +1,123 @@ +OCR of Hand-written Data using kNN {#tutorial_py_knn_opencv} +================================== + +Goal +---- + +In this chapter: + - We will use our knowledge on kNN to build a basic OCR (Optical Character Recognition) application. + - We will try our application on Digits and Alphabets data that comes with OpenCV. + +OCR of Hand-written Digits +-------------------------- + +Our goal is to build an application which can read handwritten digits. For this we need some +training data and some test data. OpenCV comes with an image digits.png (in the folder +opencv/samples/data/) which has 5000 handwritten digits (500 for each digit). Each digit is +a 20x20 image. So our first step is to split this image into 5000 different digit images. Then for each digit (20x20 image), +we flatten it into a single row with 400 pixels. That is our feature set, i.e. intensity values of all +pixels. It is the simplest feature set we can create. We use the first 250 samples of each digit as +training data, and the other 250 samples as test data. So let's prepare them first. +@code{.py} +import numpy as np +import cv2 as cv + +img = cv.imread('digits.png') +gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) + +# Now we split the image to 5000 cells, each 20x20 size +cells = [np.hsplit(row,100) for row in np.vsplit(gray,50)] + +# Make it into a Numpy array: its size will be (50,100,20,20) +x = np.array(cells) + +# Now we prepare the training data and test data +train = x[:,:50].reshape(-1,400).astype(np.float32) # Size = (2500,400) +test = x[:,50:100].reshape(-1,400).astype(np.float32) # Size = (2500,400) + +# Create labels for train and test data +k = np.arange(10) +train_labels = np.repeat(k,250)[:,np.newaxis] +test_labels = train_labels.copy() + +# Initiate kNN, train it on the training data, then test it with the test data with k=1 +knn = cv.ml.KNearest_create() +knn.train(train, cv.ml.ROW_SAMPLE, train_labels) +ret,result,neighbours,dist = knn.findNearest(test,k=5) + +# Now we check the accuracy of classification +# For that, compare the result with test_labels and check which are wrong +matches = result==test_labels +correct = np.count_nonzero(matches) +accuracy = correct*100.0/result.size +print( accuracy ) +@endcode +So our basic OCR app is ready. This particular example gave me an accuracy of 91%. One option to +improve accuracy is to add more data for training, especially for the digits where we had more errors. + +Instead of finding +this training data every time I start the application, I better save it, so that the next time, I can directly +read this data from a file and start classification. This can be done with the help of some Numpy +functions like np.savetxt, np.savez, np.load, etc. Please check the NumPy docs for more details. +@code{.py} +# Save the data +np.savez('knn_data.npz',train=train, train_labels=train_labels) + +# Now load the data +with np.load('knn_data.npz') as data: + print( data.files ) + train = data['train'] + train_labels = data['train_labels'] +@endcode +In my system, it takes around 4.4 MB of memory. Since we are using intensity values (uint8 data) as +features, it would be better to convert the data to np.uint8 first and then save it. It takes only +1.1 MB in this case. Then while loading, you can convert back into float32. + +OCR of the English Alphabet +------------------------ + +Next we will do the same for the English alphabet, but there is a slight change in data and feature +set. Here, instead of images, OpenCV comes with a data file, letter-recognition.data in +opencv/samples/cpp/ folder. If you open it, you will see 20000 lines which may, on first sight, look +like garbage. Actually, in each row, the first column is a letter which is our label. The next 16 numbers +following it are the different features. These features are obtained from the [UCI Machine Learning +Repository](http://archive.ics.uci.edu/ml/). You can find the details of these features in [this +page](http://archive.ics.uci.edu/ml/datasets/Letter+Recognition). + +There are 20000 samples available, so we take the first 10000 as training samples and the remaining +10000 as test samples. We should change the letters to ascii characters because we can't work with +letters directly. +@code{.py} +import cv2 as cv +import numpy as np + +# Load the data and convert the letters to numbers +data= np.loadtxt('letter-recognition.data', dtype= 'float32', delimiter = ',', + converters= {0: lambda ch: ord(ch)-ord('A')}) + +# Split the dataset in two, with 10000 samples each for training and test sets +train, test = np.vsplit(data,2) + +# Split trainData and testData into features and responses +responses, trainData = np.hsplit(train,[1]) +labels, testData = np.hsplit(test,[1]) + +# Initiate the kNN, classify, measure accuracy +knn = cv.ml.KNearest_create() +knn.train(trainData, cv.ml.ROW_SAMPLE, responses) +ret, result, neighbours, dist = knn.findNearest(testData, k=5) + +correct = np.count_nonzero(result == labels) +accuracy = correct*100.0/10000 +print( accuracy ) +@endcode +It gives me an accuracy of 93.22%. Again, if you want to increase accuracy, you can iteratively add +more data. + +Additional Resources +-------------------- +1. [Wikipedia article on Optical character recognition](https://en.wikipedia.org/wiki/Optical_character_recognition) + +Exercises +--------- +1. Here we used k=5. What happens if you try other values of k? Can you find a value that maximizes accuracy (minimizes the number of errors)? \ No newline at end of file diff --git a/modules/ml/tutorials/py_ml/py_knn/py_knn_understanding/images/knn_simple.png b/modules/ml/tutorials/py_ml/py_knn/py_knn_understanding/images/knn_simple.png new file mode 100644 index 00000000000..cb3744e5175 Binary files /dev/null and b/modules/ml/tutorials/py_ml/py_knn/py_knn_understanding/images/knn_simple.png differ diff --git a/modules/ml/tutorials/py_ml/py_knn/py_knn_understanding/images/knn_theory.png b/modules/ml/tutorials/py_ml/py_knn/py_knn_understanding/images/knn_theory.png new file mode 100644 index 00000000000..9d1abdded4f Binary files /dev/null and b/modules/ml/tutorials/py_ml/py_knn/py_knn_understanding/images/knn_theory.png differ diff --git a/modules/ml/tutorials/py_ml/py_knn/py_knn_understanding/py_knn_understanding.markdown b/modules/ml/tutorials/py_ml/py_knn/py_knn_understanding/py_knn_understanding.markdown new file mode 100644 index 00000000000..5985cdd5596 --- /dev/null +++ b/modules/ml/tutorials/py_ml/py_knn/py_knn_understanding/py_knn_understanding.markdown @@ -0,0 +1,150 @@ +Understanding k-Nearest Neighbour {#tutorial_py_knn_understanding} +================================= + +Goal +---- + +In this chapter, we will understand the concepts of the k-Nearest Neighbour (kNN) algorithm. + +Theory +------ + +kNN is one of the simplest classification algorithms available for supervised learning. The idea +is to search for the closest match(es) of the test data in the feature space. We will look into it with the below +image. + +![image](images/knn_theory.png) + +In the image, there are two families: Blue Squares and Red Triangles. We refer to each family as +a **Class**. Their houses are shown in their town map which we call the **Feature Space**. You can consider +a feature space as a space where all data are projected. For example, consider a 2D coordinate +space. Each datum has two features, a x coordinate and a y coordinate. You can represent this datum in your 2D +coordinate space, right? Now imagine that there are three features, you will need 3D space. Now consider N +features: you need N-dimensional space, right? This N-dimensional space is its feature space. +In our image, you can consider it as a 2D case with two features. + +Now consider what happens if a new member comes into the town and creates a new home, which is shown as the green circle. He +should be added to one of these Blue or Red families (or *classes*). We call that process, **Classification**. How exactly should this new member be classified? Since we are dealing with kNN, let us apply the algorithm. + +One simple method is to check who is his nearest neighbour. From the image, it is clear that it is a member of the Red +Triangle family. So he is classified as a Red Triangle. This method is called simply **Nearest Neighbour** classification, because classification depends only on the *nearest neighbour*. + +But there is a problem with this approach! Red Triangle may be the nearest neighbour, but what if there are also a lot of Blue +Squares nearby? Then Blue Squares have more strength in that locality than Red Triangles, so +just checking the nearest one is not sufficient. Instead we may want to check some **k** nearest families. Then whichever family is the majority amongst them, the new guy should belong to that family. In our image, let's take k=3, i.e. consider the 3 nearest +neighbours. The new member has two Red neighbours and one Blue neighbour (there are two Blues equidistant, but since k=3, we can take only +one of them), so again he should be added to Red family. But what if we take k=7? Then he has 5 Blue +neighbours and 2 Red neighbours and should be added to the Blue family. The result will vary with the selected +value of k. Note that if k is not an odd number, we can get a tie, as would happen in the above case with k=4. We would see that our new member has 2 Red and 2 Blue neighbours as his four nearest neighbours and we would need to choose a method for breaking the tie to perform classification. So to reiterate, this method is called **k-Nearest Neighbour** since +classification depends on the *k nearest neighbours*. + +Again, in kNN, it is true we are considering k neighbours, but we are giving equal importance to +all, right? Is this justified? For example, take the tied case of k=4. As we can see, the 2 +Red neighbours are actually closer to the new member than the other 2 Blue neighbours, so he is more eligible to be +added to the Red family. How do we mathematically explain that? We give some weights to each neighbour +depending on their distance to the new-comer: those who are nearer to him get higher weights, while +those that are farther away get lower weights. Then we add the total weights of each family separately and classify the new-comer as part of whichever family +received higher total weights. This is called **modified kNN** or **weighted kNN**. + +So what are some important things you see here? + +- Because we have to check + the distance from the new-comer to all the existing houses to find the nearest neighbour(s), you need to have information about all of the houses in town, right? If there are plenty of houses and families, it takes a lot of memory, and also more time for calculation. +- There is almost zero time for any kind of "training" or preparation. Our "learning" involves only memorizing (storing) the data, before testing and classifying. + +Now let's see this algorithm at work in OpenCV. + +kNN in OpenCV +------------- + +We will do a simple example here, with two families (classes), just like above. Then in the next +chapter, we will do an even better example. + +So here, we label the Red family as **Class-0** (so denoted by 0) and Blue family as **Class-1** +(denoted by 1). We create 25 neighbours or 25 training data, and label each of them as either part of Class-0 or Class-1. +We can do this with the help of a Random Number Generator from NumPy. + +Then we can plot it with the help of Matplotlib. Red neighbours are shown as Red Triangles and Blue +neighbours are shown as Blue Squares. +@code{.py} +import cv2 as cv +import numpy as np +import matplotlib.pyplot as plt + +# Feature set containing (x,y) values of 25 known/training data +trainData = np.random.randint(0,100,(25,2)).astype(np.float32) + +# Label each one either Red or Blue with numbers 0 and 1 +responses = np.random.randint(0,2,(25,1)).astype(np.float32) + +# Take Red neighbours and plot them +red = trainData[responses.ravel()==0] +plt.scatter(red[:,0],red[:,1],80,'r','^') + +# Take Blue neighbours and plot them +blue = trainData[responses.ravel()==1] +plt.scatter(blue[:,0],blue[:,1],80,'b','s') + +plt.show() +@endcode +You will get something similar to our first image. Since you are using a random number generator, you +will get different data each time you run the code. + +Next initiate the kNN algorithm and pass the trainData and responses to train the kNN. (Underneath the hood, it constructs +a search tree: see the Additional Resources section below for more information on this.) + +Then we will bring one new-comer and classify him as belonging to a family with the help of kNN in OpenCV. Before +running kNN, we need to know something about our test data (data of new comers). Our data should be a +floating point array with size \f$number \; of \; testdata \times number \; of \; features\f$. Then we +find the nearest neighbours of the new-comer. We can specify *k*: how many neighbours we want. (Here we used 3.) It returns: + +1. The label given to the new-comer depending upon the kNN theory we saw earlier. If you want the *Nearest + Neighbour* algorithm, just specify k=1. +2. The labels of the k-Nearest Neighbours. +3. The corresponding distances from the new-comer to each nearest neighbour. + +So let's see how it works. The new-comer is marked in green. +@code{.py} +newcomer = np.random.randint(0,100,(1,2)).astype(np.float32) +plt.scatter(newcomer[:,0],newcomer[:,1],80,'g','o') + +knn = cv.ml.KNearest_create() +knn.train(trainData, cv.ml.ROW_SAMPLE, responses) +ret, results, neighbours ,dist = knn.findNearest(newcomer, 3) + +print( "result: {}\n".format(results) ) +print( "neighbours: {}\n".format(neighbours) ) +print( "distance: {}\n".format(dist) ) + +plt.show() +@endcode +I got the following results: +@code{.py} +result: [[ 1.]] +neighbours: [[ 1. 1. 1.]] +distance: [[ 53. 58. 61.]] +@endcode +It says that our new-comer's 3 nearest neighbours are all from the Blue family. Therefore, he is labelled as part of the Blue +family. It is obvious from the plot below: + +![image](images/knn_simple.png) + +If you have multiple new-comers (test data), you can just pass them as an array. Corresponding results are also +obtained as arrays. +@code{.py} +# 10 new-comers +newcomers = np.random.randint(0,100,(10,2)).astype(np.float32) +ret, results,neighbours,dist = knn.findNearest(newcomer, 3) +# The results also will contain 10 labels. +@endcode +Additional Resources +-------------------- + +1. [NPTEL notes on Pattern Recognition, Chapter + 11](https://nptel.ac.in/courses/106108057) +2. [Wikipedia article on Nearest neighbor search](https://en.wikipedia.org/wiki/Nearest_neighbor_search) +3. [Wikipedia article on k-d tree](https://en.wikipedia.org/wiki/K-d_tree) + +Exercises +--------- +1. Try repeating the above with more classes and different choices of k. Does choosing k become harder with more classes in the same 2D feature space? \ No newline at end of file diff --git a/modules/ml/tutorials/py_ml/py_svm/images/svm_icon1.jpg b/modules/ml/tutorials/py_ml/py_svm/images/svm_icon1.jpg new file mode 100644 index 00000000000..9bb12380871 Binary files /dev/null and b/modules/ml/tutorials/py_ml/py_svm/images/svm_icon1.jpg differ diff --git a/modules/ml/tutorials/py_ml/py_svm/images/svm_icon2.jpg b/modules/ml/tutorials/py_ml/py_svm/images/svm_icon2.jpg new file mode 100644 index 00000000000..dd13e9d7f3d Binary files /dev/null and b/modules/ml/tutorials/py_ml/py_svm/images/svm_icon2.jpg differ diff --git a/modules/ml/tutorials/py_ml/py_svm/py_svm_basics/images/svm_basics1.png b/modules/ml/tutorials/py_ml/py_svm/py_svm_basics/images/svm_basics1.png new file mode 100644 index 00000000000..93ae457c0c9 Binary files /dev/null and b/modules/ml/tutorials/py_ml/py_svm/py_svm_basics/images/svm_basics1.png differ diff --git a/modules/ml/tutorials/py_ml/py_svm/py_svm_basics/images/svm_basics2.png b/modules/ml/tutorials/py_ml/py_svm/py_svm_basics/images/svm_basics2.png new file mode 100644 index 00000000000..d4522f04798 Binary files /dev/null and b/modules/ml/tutorials/py_ml/py_svm/py_svm_basics/images/svm_basics2.png differ diff --git a/modules/ml/tutorials/py_ml/py_svm/py_svm_basics/images/svm_basics3.png b/modules/ml/tutorials/py_ml/py_svm/py_svm_basics/images/svm_basics3.png new file mode 100644 index 00000000000..1379a56d524 Binary files /dev/null and b/modules/ml/tutorials/py_ml/py_svm/py_svm_basics/images/svm_basics3.png differ diff --git a/modules/ml/tutorials/py_ml/py_svm/py_svm_basics/py_svm_basics.markdown b/modules/ml/tutorials/py_ml/py_svm/py_svm_basics/py_svm_basics.markdown new file mode 100644 index 00000000000..55f74237e9e --- /dev/null +++ b/modules/ml/tutorials/py_ml/py_svm/py_svm_basics/py_svm_basics.markdown @@ -0,0 +1,134 @@ +Understanding SVM {#tutorial_py_svm_basics} +================= + +Goal +---- + +In this chapter + - We will see an intuitive understanding of SVM + +Theory +------ + +### Linearly Separable Data + +Consider the image below which has two types of data, red and blue. In kNN, for a test data, we used +to measure its distance to all the training samples and take the one with minimum distance. It takes +plenty of time to measure all the distances and plenty of memory to store all the training-samples. +But considering the data given in image, should we need that much? + +![image](images/svm_basics1.png) + +Consider another idea. We find a line, \f$f(x)=ax_1+bx_2+c\f$ which divides both the data to two +regions. When we get a new test_data \f$X\f$, just substitute it in \f$f(x)\f$. If \f$f(X) > 0\f$, it belongs +to blue group, else it belongs to red group. We can call this line as **Decision Boundary**. It is +very simple and memory-efficient. Such data which can be divided into two with a straight line (or +hyperplanes in higher dimensions) is called **Linear Separable**. + +So in above image, you can see plenty of such lines are possible. Which one we will take? Very +intuitively we can say that the line should be passing as far as possible from all the points. Why? +Because there can be noise in the incoming data. This data should not affect the classification +accuracy. So taking a farthest line will provide more immunity against noise. So what SVM does is to +find a straight line (or hyperplane) with largest minimum distance to the training samples. See the +bold line in below image passing through the center. + +![image](images/svm_basics2.png) + +So to find this Decision Boundary, you need training data. Do you need all? NO. Just the ones which +are close to the opposite group are sufficient. In our image, they are the one blue filled circle +and two red filled squares. We can call them **Support Vectors** and the lines passing through them +are called **Support Planes**. They are adequate for finding our decision boundary. We need not +worry about all the data. It helps in data reduction. + +What happened is, first two hyperplanes are found which best represents the data. For eg, blue data +is represented by \f$w^Tx+b_0 > 1\f$ while red data is represented by \f$w^Tx+b_0 < -1\f$ where \f$w\f$ is +**weight vector** ( \f$w=[w_1, w_2,..., w_n]\f$) and \f$x\f$ is the feature vector +(\f$x = [x_1,x_2,..., x_n]\f$). \f$b_0\f$ is the **bias**. Weight vector decides the orientation of decision +boundary while bias point decides its location. Now decision boundary is defined to be midway +between these hyperplanes, so expressed as \f$w^Tx+b_0 = 0\f$. The minimum distance from support vector +to the decision boundary is given by, \f$distance_{support \, vectors}=\frac{1}{||w||}\f$. Margin is +twice this distance, and we need to maximize this margin. i.e. we need to minimize a new function +\f$L(w, b_0)\f$ with some constraints which can expressed below: + +\f[\min_{w, b_0} L(w, b_0) = \frac{1}{2}||w||^2 \; \text{subject to} \; t_i(w^Tx+b_0) \geq 1 \; \forall i\f] + +where \f$t_i\f$ is the label of each class, \f$t_i \in [-1,1]\f$. + +### Non-Linearly Separable Data + +Consider some data which can't be divided into two with a straight line. For example, consider an +one-dimensional data where 'X' is at -3 & +3 and 'O' is at -1 & +1. Clearly it is not linearly +separable. But there are methods to solve these kinds of problems. If we can map this data set with +a function, \f$f(x) = x^2\f$, we get 'X' at 9 and 'O' at 1 which are linear separable. + +Otherwise we can convert this one-dimensional to two-dimensional data. We can use \f$f(x)=(x,x^2)\f$ +function to map this data. Then 'X' becomes (-3,9) and (3,9) while 'O' becomes (-1,1) and (1,1). +This is also linear separable. In short, chance is more for a non-linear separable data in +lower-dimensional space to become linear separable in higher-dimensional space. + +In general, it is possible to map points in a d-dimensional space to some D-dimensional space +\f$(D>d)\f$ to check the possibility of linear separability. There is an idea which helps to compute the +dot product in the high-dimensional (kernel) space by performing computations in the low-dimensional +input (feature) space. We can illustrate with following example. + +Consider two points in two-dimensional space, \f$p=(p_1,p_2)\f$ and \f$q=(q_1,q_2)\f$. Let \f$\phi\f$ be a +mapping function which maps a two-dimensional point to three-dimensional space as follows: + +\f[\phi (p) = (p_{1}^2,p_{2}^2,\sqrt{2} p_1 p_2) +\phi (q) = (q_{1}^2,q_{2}^2,\sqrt{2} q_1 q_2)\f] + +Let us define a kernel function \f$K(p,q)\f$ which does a dot product between two points, shown below: + +\f[ +\begin{aligned} +K(p,q) = \phi(p).\phi(q) &= \phi(p)^T \phi(q) \\ + &= (p_{1}^2,p_{2}^2,\sqrt{2} p_1 p_2).(q_{1}^2,q_{2}^2,\sqrt{2} q_1 q_2) \\ + &= p_{1}^2 q_{1}^2 + p_{2}^2 q_{2}^2 + 2 p_1 q_1 p_2 q_2 \\ + &= (p_1 q_1 + p_2 q_2)^2 \\ + \phi(p).\phi(q) &= (p.q)^2 +\end{aligned} +\f] + +It means, a dot product in three-dimensional space can be achieved using squared dot product in +two-dimensional space. This can be applied to higher dimensional space. So we can calculate higher +dimensional features from lower dimensions itself. Once we map them, we get a higher dimensional +space. + +In addition to all these concepts, there comes the problem of misclassification. So just finding +decision boundary with maximum margin is not sufficient. We need to consider the problem of +misclassification errors also. Sometimes, it may be possible to find a decision boundary with less +margin, but with reduced misclassification. Anyway we need to modify our model such that it should +find decision boundary with maximum margin, but with less misclassification. The minimization +criteria is modified as: + +\f[min \; ||w||^2 + C(distance \; of \; misclassified \; samples \; to \; their \; correct \; regions)\f] + +Below image shows this concept. For each sample of the training data a new parameter \f$\xi_i\f$ is +defined. It is the distance from its corresponding training sample to their correct decision region. +For those who are not misclassified, they fall on their corresponding support planes, so their +distance is zero. + +![image](images/svm_basics3.png) + +So the new optimization problem is : + +\f[\min_{w, b_{0}} L(w,b_0) = ||w||^{2} + C \sum_{i} {\xi_{i}} \text{ subject to } y_{i}(w^{T} x_{i} + b_{0}) \geq 1 - \xi_{i} \text{ and } \xi_{i} \geq 0 \text{ } \forall i\f] + +How should the parameter C be chosen? It is obvious that the answer to this question depends on how +the training data is distributed. Although there is no general answer, it is useful to take into +account these rules: + +- Large values of C give solutions with less misclassification errors but a smaller margin. + Consider that in this case it is expensive to make misclassification errors. Since the aim of + the optimization is to minimize the argument, few misclassifications errors are allowed. +- Small values of C give solutions with bigger margin and more classification errors. In this + case the minimization does not consider that much the term of the sum so it focuses more on + finding a hyperplane with big margin. + +Additional Resources +-------------------- + +-# [NPTEL notes on Statistical Pattern Recognition, Chapters + 25-29](https://nptel.ac.in/courses/117108048) +Exercises +--------- diff --git a/modules/ml/tutorials/py_ml/py_svm/py_svm_index.markdown b/modules/ml/tutorials/py_ml/py_svm/py_svm_index.markdown new file mode 100644 index 00000000000..dc737e97a0f --- /dev/null +++ b/modules/ml/tutorials/py_ml/py_svm/py_svm_index.markdown @@ -0,0 +1,10 @@ +Support Vector Machines (SVM) {#tutorial_py_svm_index} +============================= + +- @subpage tutorial_py_svm_basics + + Get a basic understanding of what SVM is + +- @subpage tutorial_py_svm_opencv + + Let's use SVM functionalities in OpenCV diff --git a/modules/ml/tutorials/py_ml/py_svm/py_svm_opencv/images/deskew.jpg b/modules/ml/tutorials/py_ml/py_svm/py_svm_opencv/images/deskew.jpg new file mode 100644 index 00000000000..32c22b7afe9 Binary files /dev/null and b/modules/ml/tutorials/py_ml/py_svm/py_svm_opencv/images/deskew.jpg differ diff --git a/modules/ml/tutorials/py_ml/py_svm/py_svm_opencv/py_svm_opencv.markdown b/modules/ml/tutorials/py_ml/py_svm/py_svm_opencv/py_svm_opencv.markdown new file mode 100644 index 00000000000..8ec36dfc2f4 --- /dev/null +++ b/modules/ml/tutorials/py_ml/py_svm/py_svm_opencv/py_svm_opencv.markdown @@ -0,0 +1,56 @@ +OCR of Hand-written Data using SVM {#tutorial_py_svm_opencv} +================================== + +Goal +---- + +In this chapter + +- We will revisit the hand-written data OCR, but, with SVM instead of kNN. + +OCR of Hand-written Digits +-------------------------- + +In kNN, we directly used pixel intensity as the feature vector. This time we will use [Histogram of +Oriented Gradients](http://en.wikipedia.org/wiki/Histogram_of_oriented_gradients) (HOG) as feature +vectors. + +Here, before finding the HOG, we deskew the image using its second order moments. So we first define +a function **deskew()** which takes a digit image and deskew it. Below is the deskew() function: + +@snippet samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py deskew + +Below image shows above deskew function applied to an image of zero. Left image is the original +image and right image is the deskewed image. + +![image](images/deskew.jpg) + +Next we have to find the HOG Descriptor of each cell. For that, we find Sobel derivatives of each +cell in X and Y direction. Then find their magnitude and direction of gradient at each pixel. This +gradient is quantized to 16 integer values. Divide this image to four sub-squares. For each +sub-square, calculate the histogram of direction (16 bins) weighted with their magnitude. So each +sub-square gives you a vector containing 16 values. Four such vectors (of four sub-squares) together +gives us a feature vector containing 64 values. This is the feature vector we use to train our data. + +@snippet samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py hog + +Finally, as in the previous case, we start by splitting our big dataset into individual cells. For +every digit, 250 cells are reserved for training data and remaining 250 data is reserved for +testing. Full code is given below, you also can download it from [here](https://github.com/opencv/opencv/tree/5.x/samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py): + +@include samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py + +This particular technique gave me nearly 94% accuracy. You can try different values for various +parameters of SVM to check if higher accuracy is possible. Or you can read technical papers on this +area and try to implement them. + +Additional Resources +-------------------- + +-# [Histograms of Oriented Gradients Video](https://www.youtube.com/watch?v=0Zib1YEE4LU) + +Exercises +--------- + +-# OpenCV samples contain digits.py which applies a slight improvement of the above method to get + improved result. It also contains the reference. Check it and understand it. diff --git a/modules/ml/tutorials/py_table_of_contents_ml_.markdown b/modules/ml/tutorials/py_table_of_contents_ml_.markdown new file mode 100644 index 00000000000..a162c4d1861 --- /dev/null +++ b/modules/ml/tutorials/py_table_of_contents_ml_.markdown @@ -0,0 +1,11 @@ +Machine Learning (Python) {#tutorial_py_table_of_contents_ml_} +================ + +- @subpage tutorial_py_knn_index + + Learn to use kNN for classification + Plus learn about handwritten digit recognition using kNN + +- @subpage tutorial_py_svm_index + + Understand concepts of SVM diff --git a/modules/ml/tutorials/table_of_content_ml.markdown b/modules/ml/tutorials/table_of_content_ml.markdown new file mode 100644 index 00000000000..2a943be07a6 --- /dev/null +++ b/modules/ml/tutorials/table_of_content_ml.markdown @@ -0,0 +1,5 @@ +Machine Learning (C++) {#tutorial_table_of_content_ml} +======================================================== + +- @subpage tutorial_introduction_to_svm +- @subpage tutorial_non_linear_svms