{% endif %}
<li>Ask a question in the <a href="http://tech.groups.yahoo.com/group/OpenCV/">user group/mailing list</a>.</li>
<li>If you think something is missing or wrong in the documentation,
- please file a <a href="https://code.ros.org/trac/opencv/wiki">bug report</a>.</li>
+ please file a <a href="http://code.opencv.org">bug report</a>.</li>
</ul>
</div>
{%- if not embedded %}{% if not theme_nosidebar|tobool %}
%\texttt{\href{http://www.ros.org/wiki/Stack Manifest}{stack manifest}} & Description of a ROS stack.
%\end{tabular}
-\emph{The OpenCV C++ reference manual is here: \url{http://opencv.willowgarage.com/documentation/cpp/}. Use \textbf{Quick Search} to find descriptions of the particular functions and classes}
+\emph{The OpenCV C++ reference manual is here: \url{http://opencv.itseez.com}. Use \textbf{Quick Search} to find descriptions of the particular functions and classes}
\section{Key OpenCV Classes}
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#point}{Point\_}} & Template 2D point class \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#point3}{Point3\_}} & Template 3D point class \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#size}{Size\_}} & Template size (width, height) class \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#vec}{Vec}} & Template short vector class \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#vec}{Matx}} & Template small matrix class \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#scalar}{Scalar}} & 4-element vector \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#rect}{Rect}} & Rectangle \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#range}{Range}} & Integer value range \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#mat}{Mat}} & 2D or multi-dimensional dense array (can be used to store matrices, images, histograms, feature descriptors, voxel volumes etc.)\\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#sparsemat}{SparseMat}} & Multi-dimensional sparse array \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#ptr}{Ptr}} & Template smart pointer class
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#Point_}{Point\_}} & Template 2D point class \\
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#Point3_}{Point3\_}} & Template 3D point class \\
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#Size_}{Size\_}} & Template size (width, height) class \\
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#Vec}{Vec}} & Template short vector class \\
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#Matx}{Matx}} & Template small matrix class \\
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#Scalar_}{Scalar}} & 4-element vector \\
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#Rect_}{Rect}} & Rectangle \\
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#Range}{Range}} & Integer value range \\
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#Mat}{Mat}} & 2D or multi-dimensional dense array (can be used to store matrices, images, histograms, feature descriptors, voxel volumes etc.)\\
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#sparsemat}{SparseMat}} & Multi-dimensional sparse array \\
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#Ptr}{Ptr}} & Template smart pointer class
\end{tabular}
\section{Matrix Basics}
\> \texttt{Mat image(240, 320, CV\_8UC3);} \\
\textbf{[Re]allocate a pre-declared matrix}\\
-\> \texttt{image.\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#Mat::create}{create}(480, 640, CV\_8UC3);}\\
+\> \texttt{image.\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#mat-create}{create}(480, 640, CV\_8UC3);}\\
\textbf{Create a matrix initialized with a constant}\\
\> \texttt{Mat A33(3, 3, CV\_32F, Scalar(5));} \\
\> \texttt{Mat B22 = Mat(2, 2, CV\_32F, B22data).clone();}\\
\textbf{Initialize a random matrix}\\
-\> \texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-randu}{randu}(image, Scalar(0), Scalar(256)); }\textit{// uniform dist}\\
-\> \texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-randn}{randn}(image, Scalar(128), Scalar(10)); }\textit{// Gaussian dist}\\
+\> \texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#randu}{randu}(image, Scalar(0), Scalar(256)); }\textit{// uniform dist}\\
+\> \texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#randn}{randn}(image, Scalar(128), Scalar(10)); }\textit{// Gaussian dist}\\
\textbf{Convert matrix to/from other structures}\\
\>\textbf{(without copying the data)}\\
\section{Matrix Manipulations: Copying, Shuffling, Part Access}
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#Mat::copyTo}{src.copyTo(dst)}} & Copy matrix to another one \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#Mat::convertTo}{src.convertTo(dst,type,scale,shift)}} & \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ Scale and convert to another datatype \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#Mat::clone}{m.clone()}} & Make deep copy of a matrix \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#Mat::reshape}{m.reshape(nch,nrows)}} & Change matrix dimensions and/or number of channels without copying data \\
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#mat-copyto}{src.copyTo(dst)}} & Copy matrix to another one \\
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#mat-convertto}{src.convertTo(dst,type,scale,shift)}} & \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ Scale and convert to another datatype \\
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#mat-clone}{m.clone()}} & Make deep copy of a matrix \\
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#mat-reshape}{m.reshape(nch,nrows)}} & Change matrix dimensions and/or number of channels without copying data \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#Mat::row}{m.row(i)}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#Mat::col}{m.col(i)}} & Take a matrix row/column \\
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#mat-row}{m.row(i)}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#mat-col}{m.col(i)}} & Take a matrix row/column \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#Mat::rowRange}{m.rowRange(Range(i1,i2))}}
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#Mat::colRange}{m.colRange(Range(j1,j2))}} & \ \ \ \ \ \ \ Take a matrix row/column span \\
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#mat-rowrange}{m.rowRange(Range(i1,i2))}}
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#mat-colrange}{m.colRange(Range(j1,j2))}} & \ \ \ \ \ \ \ Take a matrix row/column span \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#Mat::diag}{m.diag(i)}} & Take a matrix diagonal \\
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#mat-diag}{m.diag(i)}} & Take a matrix diagonal \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#index-1245}{m(Range(i1,i2),Range(j1,j2)), m(roi)}} & \ \ \ \ \ \ \ \ \ \ \ \ \ Take a submatrix \\
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#Mat}{m(Range(i1,i2),Range(j1,j2)), m(roi)}} & \ \ \ \ \ \ \ \ \ \ \ \ \ Take a submatrix \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#Mat::repeat}{m.repeat(ny,nx)}} & Make a bigger matrix from a smaller one \\
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#repeat}{m.repeat(ny,nx)}} & Make a bigger matrix from a smaller one \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-flip}{flip(src,dst,dir)}} & Reverse the order of matrix rows and/or columns \\
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#flip}{flip(src,dst,dir)}} & Reverse the order of matrix rows and/or columns \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-split}{split(...)}} & Split multi-channel matrix into separate channels \\
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#split}{split(...)}} & Split multi-channel matrix into separate channels \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-merge}{merge(...)}} & Make a multi-channel matrix out of the separate channels \\
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#merge}{merge(...)}} & Make a multi-channel matrix out of the separate channels \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-mixchannels}{mixChannels(...)}} & Generalized form of split() and merge() \\
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#mixchannels}{mixChannels(...)}} & Generalized form of split() and merge() \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-randshuffle}{randShuffle(...)}} & Randomly shuffle matrix elements \\
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#randshuffle}{randShuffle(...)}} & Randomly shuffle matrix elements \\
\end{tabular}
\begin{itemize}
\item
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-add}{add()}}, \texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-subtract}{subtract()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-multiply}{multiply()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-divide}{divide()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-absdiff}{absdiff()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#bitwise-and}{bitwise\_and()}}, \texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#bitwise-or}{bitwise\_or()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#bitwise-xor}{bitwise\_xor()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-max}{max()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-min}{min()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-compare}{compare()}}
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#add}{add()}}, \texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-subtract}{subtract()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#multiply}{multiply()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#divide}{divide()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#absdiff}{absdiff()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#bitwise-and}{bitwise\_and()}}, \texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#bitwise-or}{bitwise\_or()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#bitwise-xor}{bitwise\_xor()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#max}{max()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#min}{min()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#compare}{compare()}}
-- correspondingly, addition, subtraction, element-wise multiplication ... comparison of two matrices or a matrix and a scalar.
\item
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-sum}{sum()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-mean}{mean()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-mean-stddev}{meanStdDev()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-norm}{norm()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-countnonzero}{countNonZero()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-minmaxloc}{minMaxLoc()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#sum}{sum()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#mean}{mean()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#meanstddev}{meanStdDev()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#norm}{norm()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#countnonzero}{countNonZero()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#minmaxloc}{minMaxLoc()}},
-- various statistics of matrix elements.
\item
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-exp}{exp()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-log}{log()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-pow}{pow()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-sqrt}{sqrt()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-carttopolar}{cartToPolar()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-polarToCart}{polarToCart()}}
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#exp}{exp()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#log}{log()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#pow}{pow()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#sqrt}{sqrt()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#carttopolar}{cartToPolar()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#polartocart}{polarToCart()}}
-- the classical math functions.
\item
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-scaleadd}{scaleAdd()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-transpose}{transpose()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-gemm}{gemm()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-invert}{invert()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-solve}{solve()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-determinant}{determinant()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-trace}{trace()}}
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-eigen}{eigen()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-SVD}{SVD}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#scaleadd}{scaleAdd()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#transpose}{transpose()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#gemm}{gemm()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#invert}{invert()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#solve}{solve()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#determinant}{determinant()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#trace}{trace()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#eigen}{eigen()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#SVD}{SVD}},
-- the algebraic functions + SVD class.
\item
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-dft}{dft()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-idft}{idft()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-dct}{dct()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-idct}{idct()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#dft}{dft()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#idft}{idft()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#dct}{dct()}},
+\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#idct}{idct()}},
-- discrete Fourier and cosine transformations
\end{itemize}
-For some operations a more convenient \href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html#matrix-expressions}{algebraic notation} can be used, for example:
+For some operations a more convenient \href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#matrix-expressions}{algebraic notation} can be used, for example:
\begin{tabbing}
\texttt{Mat}\={} \texttt{delta = (J.t()*J + lambda*}\\
\>\texttt{Mat::eye(J.cols, J.cols, J.type()))}\\
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_image_filtering.html\#cv-filter2d}{filter2D()}} & Non-separable linear filter \\
+\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/filtering.html\#filter2d}{filter2D()}} & Non-separable linear filter \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_image_filtering.html\#cv-sepfilter2d}{sepFilter2D()}} & Separable linear filter \\
+\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/filtering.html\#sepfilter2d}{sepFilter2D()}} & Separable linear filter \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_image_filtering.html\#cv-blur}{boxFilter()}}, \texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_image_filtering.html\#cv-gaussianblur}{GaussianBlur()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_image_filtering.html\#cv-medianblur}{medianBlur()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_image_filtering.html\#cv-bilateralfilter}{bilateralFilter()}}
+\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/filtering.html\#blur}{boxFilter()}}, \texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/filtering.html\#gaussianblur}{GaussianBlur()}},
+\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/filtering.html\#medianblur}{medianBlur()}},
+\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/filtering.html\#bilateralfilter}{bilateralFilter()}}
& Smooth the image with one of the linear or non-linear filters \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_image_filtering.html\#cv-sobel}{Sobel()}}, \texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_image_filtering.html\#cv-scharr}{Scharr()}}
+\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/filtering.html\#sobel}{Sobel()}}, \texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/filtering.html\#scharr}{Scharr()}}
& Compute the spatial image derivatives \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_image_filtering.html\#cv-laplacian}{Laplacian()}} & compute Laplacian: $\Delta I = \frac{\partial ^ 2 I}{\partial x^2} + \frac{\partial ^ 2 I}{\partial y^2}$ \\
+\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/filtering.html\#laplacian}{Laplacian()}} & compute Laplacian: $\Delta I = \frac{\partial ^ 2 I}{\partial x^2} + \frac{\partial ^ 2 I}{\partial y^2}$ \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_image_filtering.html\#cv-erode}{erode()}}, \texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_image_filtering.html\#cv-dilate}{dilate()}} & Morphological operations \\
+\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/filtering.html\#erode}{erode()}}, \texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/filtering.html\#dilate}{dilate()}} & Morphological operations \\
\end{tabular}
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_geometric_image_transformations.html\#cv-resize}{resize()}} & Resize image \\
+\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/geometric_transformations.html\#resize}{resize()}} & Resize image \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_geometric_image_transformations.html\#cv-getrectsubpix}{getRectSubPix()}} & Extract an image patch \\
+\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/geometric_transformations.html\#getrectsubpix}{getRectSubPix()}} & Extract an image patch \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_geometric_image_transformations.html\#cv-warpaffine}{warpAffine()}} & Warp image affinely\\
+\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/geometric_transformations.html\#warpaffine}{warpAffine()}} & Warp image affinely\\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_geometric_image_transformations.html\#cv-warpperspective}{warpPerspective()}} & Warp image perspectively\\
+\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/geometric_transformations.html\#warpperspective}{warpPerspective()}} & Warp image perspectively\\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_geometric_image_transformations.html\#cv-remap}{remap()}} & Generic image warping\\
+\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/geometric_transformations.html\#remap}{remap()}} & Generic image warping\\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_geometric_image_transformations.html\#cv-convertmaps}{convertMaps()}} & Optimize maps for a faster remap() execution\\
+\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/geometric_transformations.html\#convertmaps}{convertMaps()}} & Optimize maps for a faster remap() execution\\
\end{tabular}
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_miscellaneous_image_transformations.html\#cvtColor}{cvtColor()}} & Convert image from one color space to another \\
+\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/miscellaneous_transformations.html\#cvtcolor}{cvtColor()}} & Convert image from one color space to another \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_miscellaneous_image_transformations.html\#threshold}{threshold()}}, \texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_miscellaneous_image_transformations.html\#adaptivethreshold}{adaptivethreshold()}} & Convert grayscale image to binary image using a fixed or a variable threshold \\
+\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/miscellaneous_transformations.html\#threshold}{threshold()}}, \texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/miscellaneous_transformations.html\#adaptivethreshold}{adaptivethreshold()}} & Convert grayscale image to binary image using a fixed or a variable threshold \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_miscellaneous_image_transformations.html\#floodfill}{floodFill()}} & Find a connected component using region growing algorithm\\
+\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/miscellaneous_transformations.html\#floodfill}{floodFill()}} & Find a connected component using region growing algorithm\\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_miscellaneous_image_transformations.html\#floodfill}{integral()}} & Compute integral image \\
+\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/miscellaneous_transformations.html\#integral}{integral()}} & Compute integral image \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_miscellaneous_image_transformations.html\#distancetransform}{distanceTransform()}}
+\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/miscellaneous_transformations.html\#distancetransform}{distanceTransform()}}
& build distance map or discrete Voronoi diagram for a binary image. \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_miscellaneous_image_transformations.html\#floodfill}{watershed()}},
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_miscellaneous_image_transformations.html\#grabcut}{grabCut()}}
+\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/miscellaneous_transformations.html\#watershed}{watershed()}},
+\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/miscellaneous_transformations.html\#grabcut}{grabCut()}}
& marker-based image segmentation algorithms.
- See the samples \texttt{\href{https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/watershed.cpp}{watershed.cpp}} and \texttt{\href{https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/grabcut.cpp}{grabcut.cpp}}.
+ See the samples \texttt{\href{http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/watershed.cpp}{watershed.cpp}} and \texttt{\href{http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/grabcut.cpp}{grabcut.cpp}}.
\end{tabular}
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_histograms.html\#calchist}{calcHist()}} & Compute image(s) histogram \\
+\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/histograms.html\#calchist}{calcHist()}} & Compute image(s) histogram \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_histograms.html\#calcbackproject}{calcBackProject()}} & Back-project the histogram \\
+\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/histograms.html\#calcbackproject}{calcBackProject()}} & Back-project the histogram \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_histograms.html\#equalizehist}{equalizeHist()}} & Normalize image brightness and contrast\\
+\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/histograms.html\#equalizehist}{equalizeHist()}} & Normalize image brightness and contrast\\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_histograms.html\#comparehist}{compareHist()}} & Compare two histograms\\
+\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/histograms.html\#comparehist}{compareHist()}} & Compare two histograms\\
\end{tabular}
\end{tabbing}
\subsection{Contours}
-See \texttt{\href{https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/contours.cpp}{contours.cpp}} and \texttt{\href{https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/squares.cpp}{squares.cpp}}
+See \texttt{\href{http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/contours2.cpp}{contours2.cpp}} and \texttt{\href{http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/squares.cpp}{squares.cpp}}
samples on what are the contours and how to use them.
\section{Data I/O}
-\href{http://opencv.willowgarage.com/documentation/cpp/core_xml_yaml_persistence.html\#filestorage}{XML/YAML storages} are collections (possibly nested) of scalar values, structures and heterogeneous lists.
+\href{http://opencv.itseez.com/modules/core/doc/xml_yaml_persistence.html\#xml-yaml-file-storages-writing-to-a-file-storage}{XML/YAML storages} are collections (possibly nested) of scalar values, structures and heterogeneous lists.
\begin{tabbing}
\textbf{Wr}\=\textbf{iting data to YAML (or XML)}\\
\begin{tabbing}
\textbf{Wr}\=\textbf{iting and reading raster images}\\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/highgui_reading_and_writing_images_and_video.html\#cv-imwrite}{imwrite}("myimage.jpg", image);}\\
-\texttt{Mat image\_color\_copy = \href{http://opencv.willowgarage.com/documentation/cpp/highgui_reading_and_writing_images_and_video.html\#cv-imread}{imread}("myimage.jpg", 1);}\\
-\texttt{Mat image\_grayscale\_copy = \href{http://opencv.willowgarage.com/documentation/cpp/highgui_reading_and_writing_images_and_video.html\#cv-imread}{imread}("myimage.jpg", 0);}\\
+\texttt{\href{http://opencv.itseez.com/modules/highgui/doc/reading_and_writing_images_and_video.html\#imwrite}{imwrite}("myimage.jpg", image);}\\
+\texttt{Mat image\_color\_copy = \href{http://opencv.itseez.com/modules/highgui/doc/reading_and_writing_images_and_video.html\#imread}{imread}("myimage.jpg", 1);}\\
+\texttt{Mat image\_grayscale\_copy = \href{http://opencv.itseez.com/modules/highgui/doc/reading_and_writing_images_and_video.html\#imread}{imread}("myimage.jpg", 0);}\\
\end{tabbing}
\emph{The functions can read/write images in the following formats: \textbf{BMP (.bmp), JPEG (.jpg, .jpeg), TIFF (.tif, .tiff), PNG (.png), PBM/PGM/PPM (.p?m), Sun Raster (.sr), JPEG 2000 (.jp2)}. Every format supports 8-bit, 1- or 3-channel images. Some formats (PNG, JPEG 2000) support 16 bits per channel.}
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/highgui_user_interface.html\#cv-namedwindow}{namedWindow(winname,flags)}} & \ \ \ \ \ \ \ \ \ \ Create named highgui window \\
+\texttt{\href{http://opencv.itseez.com/modules/highgui/doc/user_interface.html\#namedwindow}{namedWindow(winname,flags)}} & \ \ \ \ \ \ \ \ \ \ Create named highgui window \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/highgui_user_interface.html\#cv-destroywindow}{destroyWindow(winname)}} & \ \ \ Destroy the specified window \\
+\texttt{\href{http://opencv.itseez.com/modules/highgui/doc/user_interface.html\#destroywindow}{destroyWindow(winname)}} & \ \ \ Destroy the specified window \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/highgui_user_interface.html\#cv-imshow}{imshow(winname, mtx)}} & Show image in the window \\
+\texttt{\href{http://opencv.itseez.com/modules/highgui/doc/user_interface.html\#imshow}{imshow(winname, mtx)}} & Show image in the window \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/highgui_user_interface.html\#cv-waitKey}{waitKey(delay)}} & Wait for a key press during the specified time interval (or forever). Process events while waiting. \emph{Do not forget to call this function several times a second in your code.} \\
+\texttt{\href{http://opencv.itseez.com/modules/highgui/doc/user_interface.html\#waitkey}{waitKey(delay)}} & Wait for a key press during the specified time interval (or forever). Process events while waiting. \emph{Do not forget to call this function several times a second in your code.} \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/highgui_user_interface.html\#cv-createTrackbar}{createTrackbar(...)}} & Add trackbar (slider) to the specified window \\
+\texttt{\href{http://opencv.itseez.com/modules/highgui/doc/user_interface.html\#createtrackbar}{createTrackbar(...)}} & Add trackbar (slider) to the specified window \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/highgui_user_interface.html\#cv-setmousecallback}{setMouseCallback(...)}} & \ \ Set the callback on mouse clicks and movements in the specified window \\
+\texttt{\href{http://opencv.itseez.com/modules/highgui/doc/user_interface.html\#setmousecallback}{setMouseCallback(...)}} & \ \ Set the callback on mouse clicks and movements in the specified window \\
\end{tabular}
-See \texttt{\href{https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/camshiftdemo.cpp}{camshiftdemo.cpp}} and other \href{https://code.ros.org/svn/opencv/trunk/opencv/samples/}{OpenCV samples} on how to use the GUI functions.
+See \texttt{\href{http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/camshiftdemo.cpp}{camshiftdemo.cpp}} and other \href{http://code.opencv.org/svn/opencv/trunk/opencv/samples/}{OpenCV samples} on how to use the GUI functions.
\section{Camera Calibration, Pose Estimation and Depth Estimation}
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/calib3d_camera_calibration_and_3d_reconstruction.html\#cv-calibratecamera}{calibrateCamera()}} & Calibrate camera from several views of a calibration pattern. \\
+\texttt{\href{http://opencv.itseez.com/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#calibratecamera}{calibrateCamera()}} & Calibrate camera from several views of a calibration pattern. \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/calib3d_camera_calibration_and_3d_reconstruction.html\#cv-findchessboardcorners}{findChessboardCorners()}} & \ \ \ \ \ \ Find feature points on the checkerboard calibration pattern. \\
+\texttt{\href{http://opencv.itseez.com/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#findchessboardcorners}{findChessboardCorners()}} & \ \ \ \ \ \ Find feature points on the checkerboard calibration pattern. \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/calib3d_camera_calibration_and_3d_reconstruction.html\#cv-solvepnp}{solvePnP()}} & Find the object pose from the known projections of its feature points. \\
+\texttt{\href{http://opencv.itseez.com/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#solvepnp}{solvePnP()}} & Find the object pose from the known projections of its feature points. \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/calib3d_camera_calibration_and_3d_reconstruction.html\#cv-stereocalibrate}{stereoCalibrate()}} & Calibrate stereo camera. \\
+\texttt{\href{http://opencv.itseez.com/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#stereocalibrate}{stereoCalibrate()}} & Calibrate stereo camera. \\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/calib3d_camera_calibration_and_3d_reconstruction.html\#cv-stereorectify}{stereoRectify()}} & Compute the rectification transforms for a calibrated stereo camera.\\
+\texttt{\href{http://opencv.itseez.com/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#stereorectify}{stereoRectify()}} & Compute the rectification transforms for a calibrated stereo camera.\\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/calib3d_camera_calibration_and_3d_reconstruction.html\#cv-initundistortrectifymap}{initUndistortRectifyMap()}} & \ \ \ \ \ \ Compute rectification map (for \texttt{remap()}) for each stereo camera head.\\
+\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/geometric_transformations.html\#initundistortrectifymap}{initUndistortRectifyMap()}} & \ \ \ \ \ \ Compute rectification map (for \texttt{remap()}) for each stereo camera head.\\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/calib3d_camera_calibration_and_3d_reconstruction.html\#cv-StereoBM}{StereoBM}}, \texttt{\href{http://opencv.willowgarage.com/documentation/cpp/calib3d_camera_calibration_and_3d_reconstruction.html\#cv-StereoSGBM}{StereoSGBM}} & The stereo correspondence engines to be run on rectified stereo pairs.\\
+\texttt{\href{http://opencv.itseez.com/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#StereoBM}{StereoBM}}, \texttt{\href{http://opencv.itseez.com/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#StereoSGBM}{StereoSGBM}} & The stereo correspondence engines to be run on rectified stereo pairs.\\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/calib3d_camera_calibration_and_3d_reconstruction.html\#cv-reprojectimageto3d}{reprojectImageTo3D()}} & Convert disparity map to 3D point cloud.\\
+\texttt{\href{http://opencv.itseez.com/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#reprojectimageto3d}{reprojectImageTo3D()}} & Convert disparity map to 3D point cloud.\\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/calib3d_camera_calibration_and_3d_reconstruction.html\#cv-findhomography}{findHomography()}} & Find best-fit perspective transformation between two 2D point sets. \\
+\texttt{\href{http://opencv.itseez.com/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#findhomography}{findHomography()}} & Find best-fit perspective transformation between two 2D point sets. \\
\end{tabular}
-To calibrate a camera, you can use \texttt{\href{https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/calibration.cpp}{calibration.cpp}} or
-\texttt{\href{https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/stereo\_calib.cpp}{stereo\_calib.cpp}} samples.
+To calibrate a camera, you can use \texttt{\href{http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/calibration.cpp}{calibration.cpp}} or
+\texttt{\href{http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/stereo\_calib.cpp}{stereo\_calib.cpp}} samples.
To get the disparity maps and the point clouds, use
-\texttt{\href{https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/stereo\_match.cpp}{stereo\_match.cpp}} sample.
+\texttt{\href{http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/stereo\_match.cpp}{stereo\_match.cpp}} sample.
\section{Object Detection}
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
- \texttt{\href{http://opencv.willowgarage.com/documentation/cpp/objdetect__object_detection.html\#matchTemplate}{matchTemplate}} & Compute proximity map for given template.\\
+ \texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/object_detection.html\#matchtemplate}{matchTemplate}} & Compute proximity map for given template.\\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/objdetect__object_detection.html\#CascadeClassifier}{CascadeClassifier}} & Viola's Cascade of Boosted classifiers using Haar or LBP features. Suits for detecting faces, facial features and some other objects without diverse textures. See \texttt{\href{https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/facedetect.cpp}{facedetect.cpp}}\\
+\texttt{\href{http://opencv.itseez.com/modules/objdetect/doc/cascade_classification.html\#cascadeclassifier}{CascadeClassifier}} & Viola's Cascade of Boosted classifiers using Haar or LBP features. Suits for detecting faces, facial features and some other objects without diverse textures. See \texttt{\href{http://code.opencv.org/svn/opencv/trunk/opencv/samples/c/facedetect.cpp}{facedetect.cpp}}\\
-\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/objdetect__object_detection.html\#HOGDescriptor}{HOGDescriptor}} & N. Dalal's object detector using Histogram-of-Oriented-Gradients (HOG) features. Suits for detecting people, cars and other objects with well-defined silhouettes. See \texttt{\href{https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/peopledetect.cpp}{peopledetect.cpp}}\\
+\texttt{{HOGDescriptor}} & N. Dalal's object detector using Histogram-of-Oriented-Gradients (HOG) features. Suits for detecting people, cars and other objects with well-defined silhouettes. See \texttt{\href{http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/peopledetect.cpp}{peopledetect.cpp}}\\
\end{tabular}
Code
=====
-* This code is in your OpenCV sample folder. Otherwise you can grab it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/core/Matrix/Drawing_1.cpp>`_
+* This code is in your OpenCV sample folder. Otherwise you can grab it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/core/Matrix/Drawing_1.cpp>`_
Explanation
=============
-.. _matTheBasicImageContainer:\r
-\r
-Mat - The Basic Image Container\r
-*******************************\r
-\r
-Goal\r
-====\r
-\r
-We have multiple ways to acquire digital images from the real world: digital cameras, scanners, computed tomography or magnetic resonance imaging to just name a few. In every case what we (humans) see are images. However, when transforming this to our digital devices what we record are numerical values for each of the points of the image.\r
-\r
-.. image:: images/MatBasicImageForComputer.jpg\r
- :alt: A matrix of the mirror of a car\r
- :align: center\r
-\r
-For example in the above image you can see that the mirror of the care is nothing more than a matrix containing all the intensity values of the pixel points. Now, how we get and store the pixels values may vary according to what fits best our need, in the end all images inside a computer world may be reduced to numerical matrices and some other information's describing the matric itself. *OpenCV* is a computer vision library whose main focus is to process and manipulate these information to find out further ones. Therefore, the first thing you need to learn and get accommodated with is how OpenCV stores and handles images.\r
-\r
-*Mat*\r
-=====\r
-\r
-OpenCV has been around ever since 2001. In those days the library was built around a *C* interface. In those days to store the image in the memory they used a C structure entitled *IplImage*. This is the one you'll see in most of the older tutorials and educational materials. The problem with this is that it brings to the table all the minuses of the C language. The biggest issue is the manual management. It builds on the assumption that the user is responsible for taking care of memory allocation and deallocation. While this is no issue in case of smaller programs once your code base start to grove larger and larger it will be more and more a struggle to handle all this rather than focusing on actually solving your development goal.\r
-\r
-Luckily C++ came around and introduced the concept of classes making possible to build another road for the user: automatic memory management (more or less). The good news is that C++ if fully compatible with C so no compatibility issues can arise from making the change. Therefore, OpenCV with its 2.0 version introduced a new C++ interface that by taking advantage of these offers a new way of doing things. A way, in which you do not need to fiddle with memory management; making your code concise (less to write, to achieve more). The only main downside of the C++ interface is that many embedded development systems at the moment support only C. Therefore, unless you are targeting this platform, there's no point on using the *old* methods (unless you're a masochist programmer and you're asking for trouble). \r
-\r
-The first thing you need to know about *Mat* is that you no longer need to manually allocate its size and release it as soon as you do not need it. While doing this is still a possibility, most of the OpenCV functions will allocate its output data manually. As a nice bonus if you pass on an already existing *Mat* object, what already has allocated the required space for the matrix, this will be reused. In other words we use at all times only as much memory as much we must to perform the task.\r
-\r
-*Mat* is basically a class having two data parts: the matrix header (containing information such as the size of the matrix, the method used for storing, at which address is the matrix stored and so on) and a pointer to the matrix containing the pixel values (may take any dimensionality depending on the method chosen for storing) . The matrix header size is constant. However, the size of the matrix itself may vary from image to image and usually is larger by order of magnitudes. Therefore, when you're passing on images in your program and at some point you need to create a copy of the image the big price you will need to build is for the matrix itself rather than its header. OpenCV is an image processing library. It contains a large collection of image processing functions. To solve a computational challenge most of the time you will end up using multiple functions of the library. Due to this passing on images to functions is a common practice. We should not forget that we are talking about image processing algorithms, which tend to be quite computational heavy. The last thing we want to do is to further decrease the speed of your program by making unnecessary copies of potentially *large* images.\r
-\r
-To tackle this issue OpenCV uses a reference counting system. The idea is that each *Mat* object has its own header, however the matrix may be shared between two instance of them by having their matrix pointer point to the same address. Moreover, the copy operators **will only copy the headers**, and as also copy the pointer to the large matrix too, however not the matrix itself. \r
-\r
-.. code-block:: cpp\r
- :linenos:\r
-\r
- Mat A, C; // creates just the header parts\r
- A = imread(argv[1], CV_LOAD_IMAGE_COLOR); // here we'll know the method used (allocate matrix)\r
-\r
- Mat B(A); // Use the copy constructor\r
-\r
- C = A; // Assignment operator\r
-\r
-All the above objects, in the end point to the same single data matrix. Their headers are different, however making any modification using either one of them will affect all the other ones too. In practice the different objects just provide different access method to the same underlying data. Nevertheless, their header parts are different. The real interesting part comes that you can create headers that refer only to a subsection of the full data. For example, to create a region of interest (*ROI*) in an image you just create a new header with the new boundaries: \r
-\r
-.. code-block:: cpp\r
- :linenos:\r
-\r
- Mat D (A, Rect(10, 10, 100, 100) ); // using a rectangle\r
- Mat E = A(Range:all(), Range(1,3)); // using row and column boundaries \r
-\r
-Now you may ask if the matrix itself may belong to multiple *Mat* objects who will take responsibility for its cleaning when it's no longer needed. The short answer is: the last object that used it. For this a reference counting mechanism is used. Whenever somebody copies a header of a *Mat* object a counter is increased for the matrix. Whenever a header is cleaned this counter is decreased. When the counter reaches zero the matrix too is freed. Because, sometimes you will still want to copy the matrix itself too, there exists the :basicstructures:`clone() <mat-clone>` or the :basicstructures:`copyTo() <mat-copyto>` function.\r
-\r
-.. code-block:: cpp\r
- :linenos:\r
-\r
- Mat F = A.clone(); \r
- Mat G; \r
- A.copyTo(G);\r
-\r
-Now modifying *F* or *G* will not affect the matrix pointed by the *Mat* header. What you need to remember from all this is that:\r
-\r
-.. container:: enumeratevisibleitemswithsquare\r
-\r
- * Output image allocation for OpenCV functions is automatic (unless specified otherwise).\r
- * No need to think about memory freeing with OpenCVs C++ interface.\r
- * The assignment operator and the copy constructor (*ctor*)copies only the header.\r
- * Use the :basicstructures:`clone()<mat-clone>` or the :basicstructures:`copyTo() <mat-copyto>` function to copy the underlying matrix of an image.\r
-\r
-*Storing* methods\r
-================= \r
-\r
-This is about how you store the pixel values. You can select the color space and the data type used. The color space refers to how we combine color components in order to code a given color. The simplest one is the gray scale. Here the colors at our disposal are black and white. The combination of these allows us to create many shades of gray. \r
-\r
-For *colorful* ways we have a lot more of methods to choose from. However, every one of them breaks it down to three or four basic components and the combination of this will give all others. The most popular one of this is RGB, mainly because this is also how our eye builds up colors in our eyes. Its base colors are red, green and blue. To code the transparency of a color sometimes a fourth element: alpha (A) is added. \r
-\r
-However, they are many color systems each with their own advantages:\r
-\r
-.. container:: enumeratevisibleitemswithsquare\r
-\r
- * RGB is the most common as our eyes use something similar, our display systems also compose colors using these.\r
- * The HSV and HLS decompose colors into their hue, saturation and value/luminance components, which is a more natural way for us to describe colors. Using you may for example dismiss the last component, making your algorithm less sensible to light conditions of the input image. \r
- * YCrCb is used by the popular JPEG image format. \r
- * CIE L*a*b* is a perceptually uniform color space, which comes handy if you need to measure the *distance* of a given color to another color.\r
-\r
-Now each of the building components has their own valid domains. This leads to the data type used. How we store a component defines just how fine control we have over its domain. The smallest data type possible is *char*, which means one byte or 8 bits. This may be unsigned (so can store values from 0 to 255) or signed (values from -127 to +127). Although in case of three components this already gives 16 million possible colors to represent (like in case of RGB) we may acquire an even finer control by using the float (4 byte = 32 bit) or double (8 byte = 64 bit) data types for each component. Nevertheless, remember that increasing the size of a component also increases the size of the whole picture in the memory.\r
-\r
-Creating explicitly a *Mat* object\r
-==================================\r
-\r
-In the :ref:`Load_Save_Image` tutorial you could already see how to write a matrix to an image file by using the :readWriteImageVideo:` imwrite() <imwrite>` function. However, for debugging purposes it's much more convenient to see the actual values. You can achieve this via the << operator of *Mat*. However, be aware that this only works for two dimensional matrices. \r
-\r
-Although *Mat* is a great class as image container it is also a general matrix class. Therefore, it is possible to create and manipulate multidimensional matrices. You can create a Mat object in multiple ways:\r
-\r
-.. container:: enumeratevisibleitemswithsquare\r
-\r
- + :basicstructures:`Mat() <mat-mat>` Constructor \r
-\r
- .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp\r
- :language: cpp\r
- :tab-width: 4\r
- :lines: 27-28\r
-\r
- .. image:: images/MatBasicContainerOut1.png\r
- :alt: Demo image of the matrix output\r
- :align: center\r
-\r
- For two dimensional and multichannel images we first define their size: row and column count wise.\r
-\r
- Then we need to specify the data type to use for storing the elements and the number of channels per matrix point. To do this we have multiple definitions made according to the following convention: \r
-\r
- .. code-block:: cpp\r
-\r
- CV_[The number of bits per item][Signed or Unsigned][Type Prefix]C[The channel number]\r
-\r
- For instance, *CV_8UC3* means we use unsigned char types that are 8 bit long and each pixel has three items of this to form the three channels. This are predefined for up to four channel numbers. The :basicstructures:`Scalar <scalar>` is four element short vector. Specify this and you can initialize all matrix points with a custom value. However if you need more you can create the type with the upper macro and putting the channel number in parenthesis as you can see below.\r
-\r
- + Use C\\C++ arrays and initialize via constructor\r
-\r
- .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp\r
- :language: cpp\r
- :tab-width: 4\r
- :lines: 35-36\r
-\r
- The upper example shows how to create a matrix with more than two dimensions. Specify its dimension, then pass a pointer containing the size for each dimension and the rest remains the same. \r
-\r
-\r
- + Create a header for an already existing IplImage pointer:\r
-\r
- .. code-block:: cpp\r
-\r
- IplImage* img = cvLoadImage("greatwave.png", 1);\r
- Mat mtx(img); // convert IplImage* -> Mat\r
-\r
- + :basicstructures:`Create() <mat-create>` function:\r
-\r
- .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp\r
- :language: cpp\r
- :tab-width: 4\r
- :lines: 31-32\r
-\r
- .. image:: images/MatBasicContainerOut2.png\r
- :alt: Demo image of the matrix output\r
- :align: center\r
-\r
- You cannot initialize the matrix values with this construction. It will only reallocate its matrix data memory if the new size will not fit into the old one.\r
-\r
- + MATLAB style initializer: :basicstructures:`zeros() <mat-zeros>`, :basicstructures:`ones() <mat-ones>`, ::basicstructures:`eyes() <mat-eye>`. Specify size and data type to use:\r
-\r
- .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp\r
- :language: cpp\r
- :tab-width: 4\r
- :lines: 40-47\r
-\r
- .. image:: images/MatBasicContainerOut3.png\r
- :alt: Demo image of the matrix output\r
- :align: center\r
-\r
- + For small matrices you may use comma separated initializers:\r
-\r
- .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp\r
- :language: cpp\r
- :tab-width: 4\r
- :lines: 50-51\r
-\r
- .. image:: images/MatBasicContainerOut6.png\r
- :alt: Demo image of the matrix output\r
- :align: center\r
-\r
- + Create a new header for an existing *Mat* object and :basicstructures:`clone() <mat-clone>` or :basicstructures:`copyTo() <mat-copyto>` it.\r
-\r
- .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp\r
- :language: cpp\r
- :tab-width: 4\r
- :lines: 53-54\r
-\r
- .. image:: images/MatBasicContainerOut7.png\r
- :alt: Demo image of the matrix output\r
- :align: center\r
-\r
-Print out formatting\r
-====================\r
-\r
-.. note:: \r
-\r
- You can fill out a matrix with random values using the :operationsOnArrays:`randu() <randu>` function. You need to give the lover and upper value between what you want the random values:\r
-\r
- .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp\r
- :language: cpp\r
- :tab-width: 4\r
- :lines: 57-58\r
-\r
-In the above examples you could see the default formatting option. Nevertheless, OpenCV allows you to format your matrix output format to fit the rules of: \r
-\r
-.. container:: enumeratevisibleitemswithsquare\r
-\r
- + Default \r
-\r
- .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp\r
- :language: cpp\r
- :tab-width: 4\r
- :lines: 61\r
-\r
- .. image:: images/MatBasicContainerOut8.png\r
- :alt: Default Output\r
- :align: center\r
-\r
- + Python\r
-\r
- .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp\r
- :language: cpp\r
- :tab-width: 4\r
- :lines: 62\r
-\r
- .. image:: images/MatBasicContainerOut16.png\r
- :alt: Default Output\r
- :align: center\r
-\r
- + Comma separated values (CSV) \r
-\r
- .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp\r
- :language: cpp\r
- :tab-width: 4\r
- :lines: 64\r
-\r
- .. image:: images/MatBasicContainerOut10.png\r
- :alt: Default Output\r
- :align: center\r
-\r
- + Numpy\r
-\r
- .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp\r
- :language: cpp\r
- :tab-width: 4\r
- :lines: 63\r
-\r
- .. image:: images/MatBasicContainerOut9.png\r
- :alt: Default Output\r
- :align: center\r
-\r
- + C\r
-\r
- .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp\r
- :language: cpp\r
- :tab-width: 4\r
- :lines: 65\r
-\r
- .. image:: images/MatBasicContainerOut11.png\r
- :alt: Default Output\r
- :align: center\r
-\r
-Print for other common items\r
-============================\r
-\r
-OpenCV offers support for print of other common OpenCV data structures too via the << operator like:\r
-\r
-.. container:: enumeratevisibleitemswithsquare\r
-\r
- + 2D Point \r
-\r
- .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp\r
- :language: cpp\r
- :tab-width: 4\r
- :lines: 67-68\r
-\r
- .. image:: images/MatBasicContainerOut12.png\r
- :alt: Default Output\r
- :align: center\r
-\r
-\r
- + 3D Point\r
-\r
- .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp\r
- :language: cpp\r
- :tab-width: 4\r
- :lines: 70-71\r
-\r
- .. image:: images/MatBasicContainerOut13.png\r
- :alt: Default Output\r
- :align: center\r
-\r
- + std::vector via cv::Mat\r
-\r
- .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp\r
- :language: cpp\r
- :tab-width: 4\r
- :lines: 74-77\r
-\r
- .. image:: images/MatBasicContainerOut14.png\r
- :alt: Default Output\r
- :align: center\r
-\r
- + std::vector of points\r
-\r
- .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp\r
- :language: cpp\r
- :tab-width: 4\r
- :lines: 79-83\r
-\r
- .. image:: images/MatBasicContainerOut15.png\r
- :alt: Default Output\r
- :align: center\r
-\r
-Most of the samples here have been included into a small console application. You can download it from :download:`here <../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp>` or in the core section of the cpp samples.\r
-\r
-A quick video demonstration of this you can find on `YouTube <https://www.youtube.com/watch?v=1tibU7vGWpk>`_.\r
-\r
-.. raw:: html\r
-\r
- <div align="center">\r
- <iframe title="Install OpenCV by using its source files - Part 1" width="560" height="349" src="http://www.youtube.com/embed/1tibU7vGWpk?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>\r
- </div>\r
+.. _matTheBasicImageContainer:
+
+Mat - The Basic Image Container
+*******************************
+
+Goal
+====
+
+We have multiple ways to acquire digital images from the real world: digital cameras, scanners, computed tomography or magnetic resonance imaging to just name a few. In every case what we (humans) see are images. However, when transforming this to our digital devices what we record are numerical values for each of the points of the image.
+
+.. image:: images/MatBasicImageForComputer.jpg
+ :alt: A matrix of the mirror of a car
+ :align: center
+
+For example in the above image you can see that the mirror of the care is nothing more than a matrix containing all the intensity values of the pixel points. Now, how we get and store the pixels values may vary according to what fits best our need, in the end all images inside a computer world may be reduced to numerical matrices and some other information's describing the matric itself. *OpenCV* is a computer vision library whose main focus is to process and manipulate these information to find out further ones. Therefore, the first thing you need to learn and get accommodated with is how OpenCV stores and handles images.
+
+*Mat*
+=====
+
+OpenCV has been around ever since 2001. In those days the library was built around a *C* interface. In those days to store the image in the memory they used a C structure entitled *IplImage*. This is the one you'll see in most of the older tutorials and educational materials. The problem with this is that it brings to the table all the minuses of the C language. The biggest issue is the manual management. It builds on the assumption that the user is responsible for taking care of memory allocation and deallocation. While this is no issue in case of smaller programs once your code base start to grove larger and larger it will be more and more a struggle to handle all this rather than focusing on actually solving your development goal.
+
+Luckily C++ came around and introduced the concept of classes making possible to build another road for the user: automatic memory management (more or less). The good news is that C++ if fully compatible with C so no compatibility issues can arise from making the change. Therefore, OpenCV with its 2.0 version introduced a new C++ interface that by taking advantage of these offers a new way of doing things. A way, in which you do not need to fiddle with memory management; making your code concise (less to write, to achieve more). The only main downside of the C++ interface is that many embedded development systems at the moment support only C. Therefore, unless you are targeting this platform, there's no point on using the *old* methods (unless you're a masochist programmer and you're asking for trouble).
+
+The first thing you need to know about *Mat* is that you no longer need to manually allocate its size and release it as soon as you do not need it. While doing this is still a possibility, most of the OpenCV functions will allocate its output data manually. As a nice bonus if you pass on an already existing *Mat* object, what already has allocated the required space for the matrix, this will be reused. In other words we use at all times only as much memory as much we must to perform the task.
+
+*Mat* is basically a class having two data parts: the matrix header (containing information such as the size of the matrix, the method used for storing, at which address is the matrix stored and so on) and a pointer to the matrix containing the pixel values (may take any dimensionality depending on the method chosen for storing) . The matrix header size is constant. However, the size of the matrix itself may vary from image to image and usually is larger by order of magnitudes. Therefore, when you're passing on images in your program and at some point you need to create a copy of the image the big price you will need to build is for the matrix itself rather than its header. OpenCV is an image processing library. It contains a large collection of image processing functions. To solve a computational challenge most of the time you will end up using multiple functions of the library. Due to this passing on images to functions is a common practice. We should not forget that we are talking about image processing algorithms, which tend to be quite computational heavy. The last thing we want to do is to further decrease the speed of your program by making unnecessary copies of potentially *large* images.
+
+To tackle this issue OpenCV uses a reference counting system. The idea is that each *Mat* object has its own header, however the matrix may be shared between two instance of them by having their matrix pointer point to the same address. Moreover, the copy operators **will only copy the headers**, and as also copy the pointer to the large matrix too, however not the matrix itself.
+
+.. code-block:: cpp
+ :linenos:
+
+ Mat A, C; // creates just the header parts
+ A = imread(argv[1], CV_LOAD_IMAGE_COLOR); // here we'll know the method used (allocate matrix)
+
+ Mat B(A); // Use the copy constructor
+
+ C = A; // Assignment operator
+
+All the above objects, in the end point to the same single data matrix. Their headers are different, however making any modification using either one of them will affect all the other ones too. In practice the different objects just provide different access method to the same underlying data. Nevertheless, their header parts are different. The real interesting part comes that you can create headers that refer only to a subsection of the full data. For example, to create a region of interest (*ROI*) in an image you just create a new header with the new boundaries:
+
+.. code-block:: cpp
+ :linenos:
+
+ Mat D (A, Rect(10, 10, 100, 100) ); // using a rectangle
+ Mat E = A(Range:all(), Range(1,3)); // using row and column boundaries
+
+Now you may ask if the matrix itself may belong to multiple *Mat* objects who will take responsibility for its cleaning when it's no longer needed. The short answer is: the last object that used it. For this a reference counting mechanism is used. Whenever somebody copies a header of a *Mat* object a counter is increased for the matrix. Whenever a header is cleaned this counter is decreased. When the counter reaches zero the matrix too is freed. Because, sometimes you will still want to copy the matrix itself too, there exists the :basicstructures:`clone() <mat-clone>` or the :basicstructures:`copyTo() <mat-copyto>` function.
+
+.. code-block:: cpp
+ :linenos:
+
+ Mat F = A.clone();
+ Mat G;
+ A.copyTo(G);
+
+Now modifying *F* or *G* will not affect the matrix pointed by the *Mat* header. What you need to remember from all this is that:
+
+.. container:: enumeratevisibleitemswithsquare
+
+ * Output image allocation for OpenCV functions is automatic (unless specified otherwise).
+ * No need to think about memory freeing with OpenCVs C++ interface.
+ * The assignment operator and the copy constructor (*ctor*)copies only the header.
+ * Use the :basicstructures:`clone()<mat-clone>` or the :basicstructures:`copyTo() <mat-copyto>` function to copy the underlying matrix of an image.
+
+*Storing* methods
+=================
+
+This is about how you store the pixel values. You can select the color space and the data type used. The color space refers to how we combine color components in order to code a given color. The simplest one is the gray scale. Here the colors at our disposal are black and white. The combination of these allows us to create many shades of gray.
+
+For *colorful* ways we have a lot more of methods to choose from. However, every one of them breaks it down to three or four basic components and the combination of this will give all others. The most popular one of this is RGB, mainly because this is also how our eye builds up colors in our eyes. Its base colors are red, green and blue. To code the transparency of a color sometimes a fourth element: alpha (A) is added.
+
+However, they are many color systems each with their own advantages:
+
+.. container:: enumeratevisibleitemswithsquare
+
+ * RGB is the most common as our eyes use something similar, our display systems also compose colors using these.
+ * The HSV and HLS decompose colors into their hue, saturation and value/luminance components, which is a more natural way for us to describe colors. Using you may for example dismiss the last component, making your algorithm less sensible to light conditions of the input image.
+ * YCrCb is used by the popular JPEG image format.
+ * CIE L*a*b* is a perceptually uniform color space, which comes handy if you need to measure the *distance* of a given color to another color.
+
+Now each of the building components has their own valid domains. This leads to the data type used. How we store a component defines just how fine control we have over its domain. The smallest data type possible is *char*, which means one byte or 8 bits. This may be unsigned (so can store values from 0 to 255) or signed (values from -127 to +127). Although in case of three components this already gives 16 million possible colors to represent (like in case of RGB) we may acquire an even finer control by using the float (4 byte = 32 bit) or double (8 byte = 64 bit) data types for each component. Nevertheless, remember that increasing the size of a component also increases the size of the whole picture in the memory.
+
+Creating explicitly a *Mat* object
+==================================
+
+In the :ref:`Load_Save_Image` tutorial you could already see how to write a matrix to an image file by using the :readWriteImageVideo:` imwrite() <imwrite>` function. However, for debugging purposes it's much more convenient to see the actual values. You can achieve this via the << operator of *Mat*. However, be aware that this only works for two dimensional matrices.
+
+Although *Mat* is a great class as image container it is also a general matrix class. Therefore, it is possible to create and manipulate multidimensional matrices. You can create a Mat object in multiple ways:
+
+.. container:: enumeratevisibleitemswithsquare
+
+ + :basicstructures:`Mat() <mat-mat>` Constructor
+
+ .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp
+ :language: cpp
+ :tab-width: 4
+ :lines: 27-28
+
+ .. image:: images/MatBasicContainerOut1.png
+ :alt: Demo image of the matrix output
+ :align: center
+
+ For two dimensional and multichannel images we first define their size: row and column count wise.
+
+ Then we need to specify the data type to use for storing the elements and the number of channels per matrix point. To do this we have multiple definitions made according to the following convention:
+
+ .. code-block:: cpp
+
+ CV_[The number of bits per item][Signed or Unsigned][Type Prefix]C[The channel number]
+
+ For instance, *CV_8UC3* means we use unsigned char types that are 8 bit long and each pixel has three items of this to form the three channels. This are predefined for up to four channel numbers. The :basicstructures:`Scalar <scalar>` is four element short vector. Specify this and you can initialize all matrix points with a custom value. However if you need more you can create the type with the upper macro and putting the channel number in parenthesis as you can see below.
+
+ + Use C\\C++ arrays and initialize via constructor
+
+ .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp
+ :language: cpp
+ :tab-width: 4
+ :lines: 35-36
+
+ The upper example shows how to create a matrix with more than two dimensions. Specify its dimension, then pass a pointer containing the size for each dimension and the rest remains the same.
+
+
+ + Create a header for an already existing IplImage pointer:
+
+ .. code-block:: cpp
+
+ IplImage* img = cvLoadImage("greatwave.png", 1);
+ Mat mtx(img); // convert IplImage* -> Mat
+
+ + :basicstructures:`Create() <mat-create>` function:
+
+ .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp
+ :language: cpp
+ :tab-width: 4
+ :lines: 31-32
+
+ .. image:: images/MatBasicContainerOut2.png
+ :alt: Demo image of the matrix output
+ :align: center
+
+ You cannot initialize the matrix values with this construction. It will only reallocate its matrix data memory if the new size will not fit into the old one.
+
+ + MATLAB style initializer: :basicstructures:`zeros() <mat-zeros>`, :basicstructures:`ones() <mat-ones>`, ::basicstructures:`eyes() <mat-eye>`. Specify size and data type to use:
+
+ .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp
+ :language: cpp
+ :tab-width: 4
+ :lines: 40-47
+
+ .. image:: images/MatBasicContainerOut3.png
+ :alt: Demo image of the matrix output
+ :align: center
+
+ + For small matrices you may use comma separated initializers:
+
+ .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp
+ :language: cpp
+ :tab-width: 4
+ :lines: 50-51
+
+ .. image:: images/MatBasicContainerOut6.png
+ :alt: Demo image of the matrix output
+ :align: center
+
+ + Create a new header for an existing *Mat* object and :basicstructures:`clone() <mat-clone>` or :basicstructures:`copyTo() <mat-copyto>` it.
+
+ .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp
+ :language: cpp
+ :tab-width: 4
+ :lines: 53-54
+
+ .. image:: images/MatBasicContainerOut7.png
+ :alt: Demo image of the matrix output
+ :align: center
+
+Print out formatting
+====================
+
+.. note::
+
+ You can fill out a matrix with random values using the :operationsOnArrays:`randu() <randu>` function. You need to give the lover and upper value between what you want the random values:
+
+ .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp
+ :language: cpp
+ :tab-width: 4
+ :lines: 57-58
+
+In the above examples you could see the default formatting option. Nevertheless, OpenCV allows you to format your matrix output format to fit the rules of:
+
+.. container:: enumeratevisibleitemswithsquare
+
+ + Default
+
+ .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp
+ :language: cpp
+ :tab-width: 4
+ :lines: 61
+
+ .. image:: images/MatBasicContainerOut8.png
+ :alt: Default Output
+ :align: center
+
+ + Python
+
+ .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp
+ :language: cpp
+ :tab-width: 4
+ :lines: 62
+
+ .. image:: images/MatBasicContainerOut16.png
+ :alt: Default Output
+ :align: center
+
+ + Comma separated values (CSV)
+
+ .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp
+ :language: cpp
+ :tab-width: 4
+ :lines: 64
+
+ .. image:: images/MatBasicContainerOut10.png
+ :alt: Default Output
+ :align: center
+
+ + Numpy
+
+ .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp
+ :language: cpp
+ :tab-width: 4
+ :lines: 63
+
+ .. image:: images/MatBasicContainerOut9.png
+ :alt: Default Output
+ :align: center
+
+ + C
+
+ .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp
+ :language: cpp
+ :tab-width: 4
+ :lines: 65
+
+ .. image:: images/MatBasicContainerOut11.png
+ :alt: Default Output
+ :align: center
+
+Print for other common items
+============================
+
+OpenCV offers support for print of other common OpenCV data structures too via the << operator like:
+
+.. container:: enumeratevisibleitemswithsquare
+
+ + 2D Point
+
+ .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp
+ :language: cpp
+ :tab-width: 4
+ :lines: 67-68
+
+ .. image:: images/MatBasicContainerOut12.png
+ :alt: Default Output
+ :align: center
+
+
+ + 3D Point
+
+ .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp
+ :language: cpp
+ :tab-width: 4
+ :lines: 70-71
+
+ .. image:: images/MatBasicContainerOut13.png
+ :alt: Default Output
+ :align: center
+
+ + std::vector via cv::Mat
+
+ .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp
+ :language: cpp
+ :tab-width: 4
+ :lines: 74-77
+
+ .. image:: images/MatBasicContainerOut14.png
+ :alt: Default Output
+ :align: center
+
+ + std::vector of points
+
+ .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp
+ :language: cpp
+ :tab-width: 4
+ :lines: 79-83
+
+ .. image:: images/MatBasicContainerOut15.png
+ :alt: Default Output
+ :align: center
+
+Most of the samples here have been included into a small console application. You can download it from :download:`here <../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp>` or in the core section of the cpp samples.
+
+A quick video demonstration of this you can find on `YouTube <https://www.youtube.com/watch?v=1tibU7vGWpk>`_.
+
+.. raw:: html
+
+ <div align="center">
+ <iframe title="Install OpenCV by using its source files - Part 1" width="560" height="349" src="http://www.youtube.com/embed/1tibU7vGWpk?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
+ </div>
* In this tutorial, we intend to use *random* values for the drawing parameters. Also, we intend to populate our image with a big number of geometric figures. Since we will be initializing them in a random fashion, this process will be automatic and made by using *loops* .
- * This code is in your OpenCV sample folder. Otherwise you can grab it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/core/Matrix/Drawing_2.cpp>`_ .
+ * This code is in your OpenCV sample folder. Otherwise you can grab it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/core/Matrix/Drawing_2.cpp>`_ .
Explanation
============
.. toctree::
:hidden:
- ../mat - the basic image container/mat - the basic image container
+ ../mat_the_basic_image_container/mat_the_basic_image_container
../how_to_scan_images/how_to_scan_images
../mat-mask-operations/mat-mask-operations
../adding_images/adding_images
Code
====
-This tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/features2D/SURF_descriptor.cpp>`_
+This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/features2D/SURF_descriptor.cpp>`_
.. code-block:: cpp
Code
====
-This tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/features2D/SURF_detector.cpp>`_
+This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/features2D/SURF_detector.cpp>`_
.. code-block:: cpp
Code
====
-This tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/features2D/SURF_FlannMatcher.cpp>`_
+This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/features2D/SURF_FlannMatcher.cpp>`_
.. code-block:: cpp
Code
====
-This tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/features2D/SURF_Homography.cpp>`_
+This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/features2D/SURF_Homography.cpp>`_
.. code-block:: cpp
Code
====
-This tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp>`_
+This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp>`_
.. code-block:: cpp
Code
====
-This tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp>`_
+This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp>`_
.. code-block:: cpp
Code
====
-This tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp>`_
+This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp>`_
.. code-block:: cpp
Code
====
-This tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp>`_
+This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp>`_
.. code-block:: cpp
Code
======
-This tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp>`_
+This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp>`_
.. code-block:: cpp
* Applies 4 different kinds of filters (explained in Theory) and show the filtered images sequentially
* **Downloadable code**:
- Click `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgProc/Smoothing.cpp>`_
+ Click `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgProc/Smoothing.cpp>`_
* **Code at glance:**
* **Downloadable code**:
- a. Click `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp>`_ for the basic version (explained in this tutorial).
- b. For stuff slightly fancier (using H-S histograms and floodFill to define a mask for the skin area) you can check the `improved demo <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp>`_
- c. ...or you can always check out the classical `camshiftdemo <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/camshiftdemo.cpp>`_ in samples.
+ a. Click `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp>`_ for the basic version (explained in this tutorial).
+ b. For stuff slightly fancier (using H-S histograms and floodFill to define a mask for the skin area) you can check the `improved demo <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp>`_
+ c. ...or you can always check out the classical `camshiftdemo <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/camshiftdemo.cpp>`_ in samples.
* **Code at glance:**
* Plot the three histograms in a window
* **Downloadable code**:
- Click `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp>`_
+ Click `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp>`_
* **Code at glance:**
* Display the numerical matching parameters obtained.
* **Downloadable code**:
- Click `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp>`_
+ Click `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp>`_
* **Code at glance:**
* Display the source and equalized images in a window.
* **Downloadable code**:
- Click `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp>`_
+ Click `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp>`_
* **Code at glance:**
* Draw a rectangle around the area corresponding to the highest match
* **Downloadable code**:
- Click `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp>`_
+ Click `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp>`_
* **Code at glance:**
* Applies the *Canny Detector* and generates a **mask** (bright lines representing the edges on a black background).
* Applies the mask obtained on the original image and display it in a window.
-#. The tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp>`_
+#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp>`_
.. code-block:: cpp
The user chooses either option by pressing 'c' (constant) or 'r' (replicate)
* The program finishes when the user presses 'ESC'
-#. The tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp>`_
+#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp>`_
.. code-block:: cpp
* The filter output (with each kernel) will be shown during 500 milliseconds
-#. The tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/filter2D_demo.cpp>`_
+#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/filter2D_demo.cpp>`_
.. code-block:: cpp
* Display the detected circle in a window.\r
\r
.. |TutorialHoughCirclesSimpleDownload| replace:: here\r
- .. _TutorialHoughCirclesSimpleDownload: https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/houghlines.cpp\r
+ .. _TutorialHoughCirclesSimpleDownload: http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/houghlines.cpp\r
.. |TutorialHoughCirclesFancyDownload| replace:: here\r
- .. _TutorialHoughCirclesFancyDownload: https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp\r
+ .. _TutorialHoughCirclesFancyDownload: http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp\r
\r
#. The sample code that we will explain can be downloaded from |TutorialHoughCirclesSimpleDownload|_. A slightly fancier version (which shows both Hough standard and probabilistic with trackbars for changing the threshold values) can be found |TutorialHoughCirclesFancyDownload|_.\r
\r
======
.. |TutorialHoughLinesSimpleDownload| replace:: here\r
-.. _TutorialHoughLinesSimpleDownload: https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/houghlines.cpp\r
+.. _TutorialHoughLinesSimpleDownload: http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/houghlines.cpp\r
.. |TutorialHoughLinesFancyDownload| replace:: here\r
-.. _TutorialHoughLinesFancyDownload: https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/HoughLines_Demo.cpp\r
+.. _TutorialHoughLinesFancyDownload: http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/HoughLines_Demo.cpp\r
#. **What does this program do?**
* Applies a Laplacian operator to the grayscale image and stores the output image
* Display the result in a window
-#. The tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp>`_
+#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp>`_
.. code-block:: cpp
* Each second, apply 1 of 4 different remapping processes to the image and display them indefinitely in a window.
* Wait for the user to exit the program
-#. The tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/Remap_Demo.cpp>`_
+#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/Remap_Demo.cpp>`_
.. code-block:: cpp
* Applies the *Sobel Operator* and generates as output an image with the detected *edges* bright on a darker background.
-#. The tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp>`_
+#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp>`_
.. code-block:: cpp
* Applies a Rotation to the image after being transformed. This rotation is with respect to the image center
* Waits until the user exits the program
-#. The tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp>`_
+#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp>`_
.. code-block:: cpp
Code
======
-This tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgProc/Morphology_2.cpp>`_
+This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgProc/Morphology_2.cpp>`_
.. code-block:: cpp
Code
======
-This tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgProc/Pyramids.cpp>`_
+This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgProc/Pyramids.cpp>`_
.. code-block:: cpp
Code
====
-This tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo1.cpp>`_
+This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo1.cpp>`_
.. code-block:: cpp
Code
====
-This tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo2.cpp>`_
+This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo2.cpp>`_
.. code-block:: cpp
Code
====
-This tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ShapeDescriptors/findContours_demo.cpp>`_
+This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ShapeDescriptors/findContours_demo.cpp>`_
.. code-block:: cpp
Code
====
-This tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ShapeDescriptors/hull_demo.cpp>`_
+This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ShapeDescriptors/hull_demo.cpp>`_
.. code-block:: cpp
Code
====
-This tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ShapeDescriptors/moments_demo.cpp>`_
+This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ShapeDescriptors/moments_demo.cpp>`_
.. code-block:: cpp
Code
====
-This tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ShapeDescriptors/pointPolygonTest_demo.cpp>`_
+This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ShapeDescriptors/pointPolygonTest_demo.cpp>`_
.. code-block:: cpp
Code
======
-The tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgProc/Threshold.cpp>`_
+The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgProc/Threshold.cpp>`_
.. code-block:: cpp
Source Code
===========
-Download the source code from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/introduction/display_image/display_image.cpp>`_.
+Download the source code from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/introduction/display_image/display_image.cpp>`_.
.. literalinclude:: ../../../../samples/cpp/tutorial_code/introduction/display_image/display_image.cpp
:language: cpp
-.. _howToWriteTutorial:\r\rHow to write a tutorial for OpenCV?\r***********************************\r\rOkay, so assume you have just finished a project of yours implementing something based on OpenCV and you want to present/share it with the community. Luckily, OpenCV is an *open source project*. This means that in theory anyone has access to the full source code and may extend it. While making a robust and practical library (like OpenCV) is great, the success of a library also depends on how user friendly it is. To improve on this aspect, the OpenCV team has already been listening to user feedback from its :opencv_group:`Yahoo user group <>` and by making samples you can find in the source directories sample folder. The addition of the tutorials (in both online and PDF format) is an extension of these efforts. \r\rGoal\r====\r\r.. _reST: http://docutils.sourceforge.net/rst.html\r.. |reST| replace:: reStructuredText\r.. |Sphinx| replace:: Sphinx\r.. _Sphinx: http://sphinx.pocoo.org/\r\rThe tutorials are just as an important part of the library as the implementation of those crafty data structures and algorithms you can find in OpenCV. Therefore, the source codes for the tutorials are part of the library. And yes, I meant source codes. The reason for this formulation is that the tutorials are written by using the |Sphinx|_ documentation generation system. This is based on the popular python documentation system called |reST|_ (reST). ReStructuredText is a really neat language that by using a few simple conventions (indentation, directives) and emulating old school e-mail writing techniques (text only) tries to offer a simple way to create and edit documents. Sphinx extends this with some new features and creates the resulting document in both HTML (for web) and PDF (for offline usage) format.\r\rUsually, an OpenCV tutorial has the following parts:\r\r1. A source code demonstration of an OpenCV feature: \r \r a. One or more CPP, Python, Java or other type of files depending for what OpenCV offers support and for what language you make the tutorial. \r #. Occasionaly, input resource files required for running your tutorials application.\r\r\r#. A table of content entry (so people may easily find the tutorial): \r \r a. Adding your stuff to the tutorials table of content (**reST** file). \r #. Add an image file near the TOC entry. \r\r\r#. The content of the tutorial itself: \r \r a. The **reST** text of the tutorial\r #. Images following the idea that "*A picture is worth a thousand words*". \r #. For more complex demonstrations you may create a video.\r\rAs you can see you will need at least some basic knowledge of the *reST* system in order to complete the task at hand with success. However, don't worry *reST* (and *Sphinx*) was made with simplicity in mind. It is easy to grasp its basics. I found that the `OpenAlea documentations introduction on this subject <http://openalea.gforge.inria.fr/doc/openalea/doc/_build/html/source/tutorial/rest_syntax.html>`_ (or the `Thomas Cokelaer one <http://thomas-cokelaer.info/tutorials/sphinx/rest_syntax.html>`_ ) should enough for this. If for some directive or feature you need a more in-depth description look it up in the official |reST|_ help files or at the |Sphinx|_ documentation.\r\rIn our world achieving some tasks is possible in multiple ways. However, some of the roads to take may have obvious or hidden advantages over others. Then again, in some other cases it may come down to just simple user preference. Here, I'll present how I decided to write the tutorials, based on my personal experience. If for some of them you know a better solution and you can back it up feel free to use that. I've nothing against it, as long as it gets the job done in an elegant fashion. \r\rNow the best would be if you could make the integration yourself. For this you need first to have the source code. I recommend following the guides for your operating system on acquiring OpenCV sources. For Linux users look :ref:`here <Linux-Installation>` and for :ref:`Windows here <Windows_Installation>`. You must also install python and sphinx with its dependencies in order to be able to build the documentation. \r\rOnce you have downloaded the repository to your hard drive you can take a look in the OpenCV directory to make sure you have both the samples and doc folder present. Anyone may download the trunk source files from :file:`/svn/opencv/trunk/` . Nevertheless, not everyone has upload (commit/submit) rights. This is to protect the integrity of the library. If you plan doing more than one tutorial, and would like to have an account with commit user rights you should first register an account at https://code.ros.org/ and then contact dr. Gary Bradski at -delete-bradski@-delete-willowgarage.com. Otherwise, you can just send the resulting files to us via the :opencv_group:`Yahoo user group <>` or to me at -delete-bernat@-delete-primeranks.net and I'll add it. If you have questions, suggestions or constructive critics I will gladly listen to them. If you send it to the OpenCV group please tag its subject with a **[Tutorial]** entry. \r\rFormat the Source Code\r======================\r\rBefore I start this let it be clear: the main goal is to have a working sample code. However, for your tutorial to be of a top notch quality you should follow a few guide lines I am going to present here. \r\rIn case you have an application by using the older interface (with *IplImage*, *CVMat*, *cvLoadImage* and such) consider migrating it to the new C++ interface. The tutorials are intended to be an up to date help for our users. And as of OpenCV 2 the OpenCV emphasis on using the less error prone and clearer C++ interface. Therefore, if possible please convert your code to the C++ interface. For this it may help to read the :ref:`InteroperabilityWithOpenCV1` tutorial. However, once you have an OpenCV 2 working code, then you should make your source code snippet as easy to read as possible. Here're a couple of advices for this: \r\r.. container:: enumeratevisibleitemswithsquare\r\r + Add a standard output with the description of what your program does. Keep it short and yet, descriptive. This output is at the start of the program. In my example files this usually takes the form of a *help* function containing the output. This way both the source file viewer and application runner can see what all is about in your sample. Here's an instance of this: \r\r .. code-block:: cpp\r\r void help()\r {\r cout\r << "--------------------------------------------------------------------------" << endl\r << "This program shows how to write video files. You can extract the R or G or B color channel "\r << " of the input video. You can choose to use the source codec (Y) or select a custom one. (N)"<< endl\r << "Usage:" << endl\r << "./video-write inputvideoName [ R | G | B] [Y | N]" << endl\r << "--------------------------------------------------------------------------" << endl\r << endl;\r }\r // ...\r int main(int argc, char *argv[], char *window_name)\r {\r help();\r // here comes the actual source code\r }\r\r Additionally, finalize the description with a short usage guide. This way the user will know how to call your programs, what leads us to the next point. \r\r + Prefer command line argument controlling instead of hard coded one. If your program has some variables that may be changed use command line arguments for this. The tutorials, can be a simple try-out ground for the user. If you offer command line controlling for the input image (for example), then you offer the possibility for the user to try it out with his/her own images, without the need to mess in the source code. In the upper example you can see that the input image, channel and codec selection may all be changed from the command line. Just compile the program and run it with your own input arguments. \r\r + Be as verbose as possible. There is no shame in filling the source code with comments. This way the more advanced user may figure out what's happening right from the sample code. This advice goes for the output console too. Specify to the user what's happening. Never leave the user hanging there and thinking on: "Is this program now crashing or just doing some computationally intensive task?." So, if you do a training task that may take some time, make sure you print out a message about this before starting and after finishing it. \r\r + Throw out unnecessary stuff from your source code. This is a warning to not take the previous point too seriously. Balance is the key. If it's something that can be done in a fewer lines or simpler than that's the way you should do it. Nevertheless, if for some reason you have such sections notify the user why you have chosen to do so. Keep the amount of information as low as possible, while still getting the job done in an elegant way. \r\r + Put your sample file into the :file:`opencv/samples/cpp/tutorial_code/sectionName` folder. If you write a tutorial for other languages than cpp, then change that part of the path. Before completing this you need to decide that to what section (module) does your tutorial goes. Think about on what module relies most heavily your code and that is the one to use. If the answer to this question is more than one modules then the *general* section is the one to use. For finding the *opencv* directory open up your file system and navigate where you downloaded our repository.\r\r + If the input resources are hard to acquire for the end user consider adding a few of them to the :file:`opencv/samples/cpp/tutorial_code/images`. Make sure that who reads your code can try it out!\r\rAdd the TOC entry\r=================\r\rFor this you will need to know some |reST|_. There is no going around this. |reST|_ files have **rst** extensions. However, these are simple text files. Use any text editor you like. Finding a text editor that offers syntax highlighting for |reST|_ was quite a challenge at the time of writing this tutorial. In my experience, `Intype <http://intype.info/>`_ is a solid option on Windows, although there is still place for improvement. \r\rAdding your source code to a table of content is important for multiple reasons. First and foremost this will allow for the user base to find your tutorial from our websites tutorial table of content. Secondly, if you omit this *Sphinx* will throw a warning that your tutorial file isn't part of any TOC tree entry. And there is nothing more than the developer team hates than an ever increasing warning/error list for their builds. *Sphinx* also uses this to build up the previous-back-up buttons on the website. Finally, omitting this step will lead to that your tutorial will **not** be added to the PDF version of the tutorials. \r\rNavigate to the :file:`opencv/doc/tutorials/section/table_of_content_section` folder (where the section is the module to which you're adding the tutorial). Open the *table_of_content_section* file. Now this may have two forms. If no prior tutorials are present in this section that there is a template message about this and has the following form:\r\r.. code-block:: rst\r\r .. _Table-Of-Content-Section:\r\r Section title\r -----------------------------------------------------------\r\r Description about the section.\r\r .. include:: ../../definitions/noContent.rst\r\r .. raw:: latex\r\r \pagebreak\r\rThe first line is a reference to the section title in the reST system. The section title will be a link and you may refer to it via the ``:ref:`` directive. The *include* directive imports the template text from the definitions directories *noContent.rst* file. *Sphinx* does not creates the PDF from scratch. It does this by first creating a latex file. Then creates the PDF from the latex file. With the *raw* directive you can directly add to this output commands. Its unique argument is for what kind of output to add the content of the directive. For the PDFs it may happen that multiple sections will overlap on a single page. To avoid this at the end of the TOC we add a *pagebreak* latex command, that hints to the LATEX system that the next line should be on a new page. \r\rIf you have one of this, try to transform it to the following form: \r\r.. include:: ../../definitions/tocDefinitions.rst \r\r.. code-block:: rst\r\r .. _Table-Of-Content-Section:\r\r Section title\r -----------------------------------------------------------\r\r .. include:: ../../definitions/tocDefinitions.rst\r\r +\r .. tabularcolumns:: m{100pt} m{300pt}\r .. cssclass:: toctableopencv\r\r =============== ======================================================\r |MatBasicIma| **Title:** :ref:`matTheBasicImageContainer`\r\r *Compatibility:* > OpenCV 2.0\r\r *Author:* |Author_BernatG|\r\r You will learn how to store images in the memory and how to print out their content to the console.\r\r =============== =====================================================\r\r .. |MatBasicIma| image:: images/matTheBasicImageStructure.jpg\r :height: 90pt\r :width: 90pt\r\r .. raw:: latex\r\r \pagebreak\r\r .. toctree::\r :hidden:\r\r ../mat - the basic image container/mat - the basic image container\r\rIf this is already present just add a new section of the content between the include and the raw directives (excluding those lines). Here you'll see a new include directive. This should be present only once in a TOC tree and the reST file contains the definitions of all the authors contributing to the OpenCV tutorials. We are a multicultural community and some of our name may contain some funky characters. However, reST **only supports** ANSI characters. Luckily we can specify Unicode characters with the *unicode* directive. Doing this for all of your tutorials is a troublesome procedure. Therefore, the tocDefinitions file contains the definition of your author name. Add it here once and afterwards just use the replace construction. For example here's the definition for my name: \r\r.. code-block:: rst\r\r .. |Author_BernatG| unicode:: Bern U+00E1 t U+0020 G U+00E1 bor\r\rThe ``|Author_BernatG|`` is the text definitions alias. I can use later this to add the definition, like I've done in the TOCs *Author* part. After the ``::`` and a space you start the definition. If you want to add an UNICODE character (non-ASCI) leave an empty space and specify it in the format U+(UNICODE code). To find the UNICODE code of a character I recommend using the `FileFormat <http://www.fileformat.info>`_ websites service. Spaces are trimmed from the definition, therefore we add a space by its UNICODE character (U+0020). \r\rUntil the *raw* directive what you can see is a TOC tree entry. Here's how a TOC entry will look like: \r\r+\r .. tabularcolumns:: m{100pt} m{300pt}\r .. cssclass:: toctableopencv\r\r =============== ======================================================\r |MatBasicIma| **Title:** :ref:`matTheBasicImageContainer`\r\r *Compatibility:* > OpenCV 2.0\r\r *Author:* |Author_BernatG|\r\r You will learn how to store images in the memory and how to print out their content to the console.\r\r =============== ======================================================\r\r .. |MatBasicIma| image:: images/matTheBasicImageStructure.jpg\r :height: 90pt\r :width: 90pt\r\rAs you can see we have an image to the left and a description box to the right. To create two boxes we use a table with two columns and a single row. In the left column is the image and in the right one the description. However, the image directive is way too long to fit in a column. Therefore, we need to use the substitution definition system. We add this definition after the TOC tree. All images for the TOC tree are to be put in the images folder near its |reST|_ file. We use the point measurement system because we are also creating PDFs. PDFs are printable documents, where there is no such thing that pixels (px), just points (pt). And while generally space is no problem for web pages (we have monitors with **huge** resolutions) the size of the paper (A4 or letter) is constant and will be for a long time in the future. Therefore, size constrains come in play more like for the PDF, than the generated HTML code. \r\rNow your images should be as small as possible, while still offering the intended information for the user. Remember that the tutorial will become part of the OpenCV source code. If you add large images (that manifest in form of large image size) it will just increase the size of the repository pointlessly. If someone wants to download it later, its download time will be that much longer. Not to mention the larger PDF size for the tutorials and the longer load time for the web pages. In terms of pixels a TOC image should not be larger than 120 X 120 pixels. Resize your images if they are larger! \r\r.. note::\r\r If you add a larger image and specify a smaller image size, *Sphinx* will not resize that. At build time will add the full size image and the resize will be done by your browser after the image is loaded. A 120 X 120 image is somewhere below 10KB. If you add a 110KB image, you have just pointlessly added a 100KB extra data to transfer over the internet for every user!\r\rGenerally speaking you shouldn't need to specify your images size (excluding the TOC entries). If no such is found *Sphinx* will use the size of the image itself (so no resize occurs). Then again if for some reason you decide to specify a size that should be the **width** of the image rather than its height. The reason for this again goes back to the PDFs. On a PDF page the height is larger than the width. In the PDF the images will not be resized. If you specify a size that does not fit in the page, then what does not fits in **will be cut off**. When creating your images for your tutorial you should try to keep the image widths below 500 pixels, and calculate with around 400 point page width when specifying image widths. \r\rThe image format depends on the content of the image. If you have some complex scene (many random like colors) then use *jpg*. Otherwise, prefer using *png*. They are even some tools out there that optimize the size of *PNG* images, such as `PNGGauntlet <http://pnggauntlet.com/>`_. Use them to make your images as small as possible in size. \r\rNow on the right side column of the table we add the information about the tutorial: \r\r.. container:: enumeratevisibleitemswithsquare\r\r + In the first line it is the title of the tutorial. However, there is no need to specify it explicitly. We use the reference system. We'll start up our tutorial with a reference specification, just like in case of this TOC entry with its `` .. _Table-Of-Content-Section:`` . If after this you have a title (pointed out by the following line of -), then Sphinx will replace the ``:ref:`Table-Of-Content-Section``` directive with the tile of the section in reference form (creates a link in web page). Here's how the definition looks in my case: \r\r .. code-block:: rst\r\r .. _matTheBasicImageContainer:\r\r Mat - The Basic Image Container\r *******************************\r\r Note, that according to the |reST|_ rules the * should be as long as your title. \r\r + Compatibility. What version of OpenCV is required to run your sample code. \r\r + Author. Use the substitution markup of |reST|_. \r\r + A short sentence describing the essence of your tutorial. \r\rNow before each TOC entry you need to add the three lines of: \r\r.. code-block:: cpp\r\r + \r .. tabularcolumns:: m{100pt} m{300pt}\r .. cssclass:: toctableopencv\r\rThe plus sign (+) is to enumerate tutorials by using bullet points. So for every TOC entry we have a corresponding bullet point represented by the +. Sphinx is highly indenting sensitive. Indentation is used to express from which point until to which point does a construction last. Un-indentation means end of that construction. So to keep all the bullet points to the same group the following TOC entries (until the next +) should be indented by two spaces. \r\rHere, I should also mention that **always** prefer using spaces instead of tabs. Working with only spaces makes possible that if we both use monotype fonts we will see the same thing. Tab size is text editor dependent and as should be avoided. *Sphinx* translates all tabs into 8 spaces before interpreting it. \r\rIt turns out that the automatic formatting of both the HTML and PDF(LATEX) system messes up our tables. Therefore, we need to help them out a little. For the PDF generation we add the ``.. tabularcolumns:: m{100pt} m{300pt}`` directive. This means that the first column should be 100 points wide and middle aligned. For the HTML look we simply name the following table of a *toctableopencv* class type. Then, we can modify the look of the table by modifying the CSS of our web page. The CSS definitions go into the :file:`opencv/doc/_themes/blue/static/default.css_t` file. \r\r.. code-block:: css\r\r .toctableopencv\r {\r width: 100% ; \r table-layout: fixed;\r }\r\r\r .toctableopencv colgroup col:first-child\r {\r width: 100pt !important;\r max-width: 100pt !important;\r min-width: 100pt !important;\r }\r\r .toctableopencv colgroup col:nth-child(2) \r {\r width: 100% !important;\r }\r\rHowever, you should not need to modify this. Just add these three lines (plus keep the two space indentation) for all TOC entries you add. At the end of the TOC file you'll find: \r\r.. code-block:: rst\r\r .. raw:: latex\r\r \pagebreak\r\r .. toctree::\r :hidden:\r\r ../mat - the basic image container/mat - the basic image container\r\rThe page break entry comes for separating sections and should be only one in a TOC tree |reST|_ file. Finally, at the end of the TOC tree we need to add our tutorial to the *Sphinx* TOC tree system. *Sphinx* will generate from this the previous-next-up information for the HTML file and add items to the PDF according to the order here. By default this TOC tree directive generates a simple table of contents. However, we already created a fancy looking one so we no longer need this basic one. Therefore, we add the *hidden* option to do not show it. \r\rThe path is of a relative type. We step back in the file system and then go into the :file:`mat - the basic image container` directory for the :file:`mat - the basic image container.rst` file. Putting out the *rst* extension for the file is optional. \r\rWrite the tutorial\r==================\r\rCreate a folder with the name of your tutorial. Preferably, use small letters only. Then create a text file in this folder with *rst* extension and the same name. If you have images for the tutorial create an :file:`images` folder and add your images there. When creating your images follow the guidelines described in the previous part!\r\rNow here's our recommendation for the structure of the tutorial (although, remember that this is not carved in the stone; if you have a better idea, use it!): \r\r\r.. container:: enumeratevisibleitemswithsquare\r\r + Create the reference point and the title. \r\r .. code-block:: rst\r\r .. _matTheBasicImageContainer:\r\r Mat - The Basic Image Container\r *******************************\r\r You start the tutorial by specifying a reference point by the ``.. _matTheBasicImageContainer:`` and then its title. The name of the reference point should be a unique one over the whole documentation. Therefore, do not use general names like *tutorial1*. Use the * character to underline the title for its full width. The subtitles of the tutorial should be underlined with = charachter.\r\r + Goals. You start your tutorial by specifying what you will present. You can also enumerate the sub jobs to be done. For this you can use a bullet point construction. There is a single configuration file for both the reference manual and the tutorial documentation. In the reference manuals at the argument enumeration we do not want any kind of bullet point style enumeration. Therefore, by default all the bullet points at this level are set to do not show the dot before the entries in the HTML. You can override this by putting the bullet point in a container. I've defined a square type bullet point view under the name *enumeratevisibleitemswithsquare*. The CSS style definition for this is again in the :file:`opencv\doc\_themes\blue\static\default.css_t` file. Here's a quick example of using it: \r\r .. code-block:: rst\r\r .. container:: enumeratevisibleitemswithsquare\r\r + Create the reference point and the title. \r + Second entry\r + Third entry\r\r Note that you need the keep the indentation of the container directive. Directive indentations are always three (3) spaces. Here you may even give usage tips for your sample code. \r\r + Source code. Present your samples code to the user. It's a good idea to offer a quick download link for the HTML page by using the *download* directive and pointing out where the user may find your source code in the file system by using the *file* directive: \r\r .. code-block:: rst\r\r Text :file:`samples/cpp/tutorial_code/highgui/video-write/` folder of the OpenCV source library\r or :download:`text to appear in the webpage \r <../../../../samples/cpp/tutorial_code/HighGUI/video-write/video-write.cpp>`.\r\r For the download link the path is a relative one, hence the multiple back stepping operations (..). Then you can add the source code either by using the *code block* directive or the *literal include* one. In case of the code block you will need to actually add all the source code text into your |reST|_ text and also apply the required indentation: \r\r .. code-block:: rst\r\r .. code-block:: cpp \r\r int i = 0; \r l = ++j; \r\r The only argument of the directive is the language used (here CPP). Then you add the source code into its content (meaning one empty line after the directive) by keeping the indentation of the directive (3 spaces). With the *literal include* directive you do not need to add the source code of the sample. You just specify the sample and *Sphinx* will load it for you, during build time. Here's an example usage: \r\r .. code-block:: rst\r\r .. literalinclude:: ../../../../samples/cpp/tutorial_code/HighGUI/video-write/video-write.cpp\r :language: cpp\r :linenos:\r :tab-width: 4\r :lines: 1-8, 21-22, 24-\r\r After the directive you specify a relative path to the file from what to import. It has four options: the language to use, if you add the ``:linenos:`` the line numbers will be shown, you can specify the tab size with the ``:tab-width:`` and you do not need to load the whole file, you can show just the important lines. Use the *lines* option to do not show redundant information (such as the *help* function). Here basically you specify ranges, if the second range line number is missing than that means that until the end of the file. The ranges specified here do no need to be in an ascending order, you may even reorganize the structure of how you want to show your sample inside the tutorial.\r\r + The tutorial. Well here goes the explanation for why and what have you used. Try to be short, clear, concise and yet a thorough one. There's no magic formula. Look into a few already made tutorials and start out from there. Try to mix sample OpenCV code with your explanations. If with words is hard to describe something do not hesitate to add in a reasonable size image, to overcome this issue.\r\r When you present OpenCV functionality it's a good idea to give a link to the used OpenCV data structure or function. Because the OpenCV tutorials and reference manual are in separate PDF files it is not possible to make this link work for the PDF format. Therefore, we use here only web page links to the **opencv.itseez.com** website. The OpenCV functions and data structures may be used for multiple tasks. Nevertheless, we want to avoid that every users creates its own reference to a commonly used function. So for this we use the global link collection of *Sphinx*. This is defined in the file:`opencv/doc/conf.py` configuration file. Open it and go all the way down to the last entry: \r\r .. code-block:: py\r\r # ---- External links for tutorials -----------------\r extlinks = {\r 'huivideo' : ('http://opencv.itseez.com/modules/highgui/doc/reading_and_writing_images_and_video.html#%s', None)\r }\r\r In short here we defined a new **huivideo** directive that refers to an external webpage link. Its usage is: \r\r .. code-block:: rst\r\r A sample function of the highgui modules image write and read page is the :huivideo:`imread() function <imread>`. \r\r Which turns to: A sample function of the highgui modules image write and read page is the :huivideo:`imread() function <imread>`. The argument you give between the <> will be put in place of the ``%s`` in the upper definition, and as the link will anchor to the correct function. To find out the anchor of a given function just open up a web page, search for the function and click on it. In the address bar it should appear like: ``http://opencv.itseez.com/modules/highgui/doc/reading_and_writing_images_and_video.html#imread`` . Look here for the name of the directives for each page of the OpenCV reference manual. If none present for one of them feel free to add one for it. \r\r For formulas you can add LATEX code that will translate in the web pages into images. You do this by using the *math* directive. A usage tip: \r\r .. code-block:: latex\r\r .. math::\r\r MSE = \frac{1}{c*i*j} \sum{(I_1-I_2)^2}\r\r That after build turns into: \r\r .. math::\r\r MSE = \frac{1}{c*i*j} \sum{(I_1-I_2)^2}\r\r You can even use it inline as ``:math:` MSE = \frac{1}{c*i*j} \sum{(I_1-I_2)^2}``` that turns into :math:`MSE = \frac{1}{c*i*j} \sum{(I_1-I_2)^2}`. \r\r If you use some crazy LATEX library extension you need to add those to the ones to use at build time. Look into the file:`opencv/doc/conf.py` configuration file for more information on this.\r\r + Results. Well, here depending on your program show one of more of the following: \r\r - Console outputs by using the code block directive. \r - Output images. \r - Runtime videos, visualization. For this use your favorite screens capture software. `Camtasia Studio <http://www.techsmith.com/camtasia/>`_ certainly is one of the better choices, however their prices are out of this world. `CamStudio <http://camstudio.org/>`_ is a free alternative, but less powerful. If you do a video you can upload it to YouTube and then use the raw directive with HTML option to embed it into the generated web page: \r\r .. code-block:: rst \r\r You may observe a runtime instance of this on the `YouTube here <https://www.youtube.com/watch?v=jpBwHxsl1_0>`_. \r\r .. raw:: html\r\r <div align="center">\r <iframe title="Creating a video with OpenCV" width="560" height="349" src="http://www.youtube.com/embed/jpBwHxsl1_0?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>\r </div>\r\r This results in the text and video: You may observe a runtime instance of this on the `YouTube here <https://www.youtube.com/watch?v=jpBwHxsl1_0>`_. \r\r .. raw:: html\r\r <div align="center">\r <iframe title="Creating a video with OpenCV" width="560" height="349" src="http://www.youtube.com/embed/jpBwHxsl1_0?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>\r </div>\r\r When these aren't self-explanatory make sure to throw in a few guiding lines about what and why we can see.\r\r + Build the documentation and check for errors or warnings. In the CMake make sure you check or pass the option for building documentation. Then simply build the **docs** project for the PDF file and the **docs_html** project for the web page. Read the output of the build and check for errors/warnings for what you have added. This is also the time to observe and correct any kind of *not so good looking* parts. Remember to keep clean our build logs. \r\r + Read again your tutorial and check for both programming and spelling errors. If found any, please correct them.\r\r\rTake home the pride and joy of a job well done!\r===============================================\r\rOnce you are done contact me or dr. Gary Bradski with the tutorial. We may submit the tutorial ourselves to the trunk branch of our repository or ask you to do so. \r\rNow, to see your work **live** you may need to wait some time. The PDFs are updated usually at the launch of a new OpenCV version. The web pages are a little more diverse. They are automatically rebuilt in each evening. However, the **opencv.itseez.com** website contains only the most recent **stable branch** of OpenCV. Currently this is 2.3. When we add something new (like a tutorial) that first goes to the **trunk branch** of our repository. A build of this you may find on the **opencv.itseez.com/trunk** website. Although, we try to make a build every night occasionally we might freeze any of the branches to fix upcoming issues. During this it may take a little longer to see your work *live*, however if you submited it, be sure that eventually it will show up. \r\rIf you have any questions or advices relating to this tutorial you can contact me at -delete-bernat@-delete-primeranks.net. Of course, delete the -delete- parts of that e-mail address.\r
\ No newline at end of file
+.. _howToWriteTutorial:\r\rHow to write a tutorial for OpenCV?\r***********************************\r\rOkay, so assume you have just finished a project of yours implementing something based on OpenCV and you want to present/share it with the community. Luckily, OpenCV is an *open source project*. This means that in theory anyone has access to the full source code and may extend it. While making a robust and practical library (like OpenCV) is great, the success of a library also depends on how user friendly it is. To improve on this aspect, the OpenCV team has already been listening to user feedback from its :opencv_group:`Yahoo user group <>` and by making samples you can find in the source directories sample folder. The addition of the tutorials (in both online and PDF format) is an extension of these efforts. \r\rGoal\r====\r\r.. _reST: http://docutils.sourceforge.net/rst.html\r.. |reST| replace:: reStructuredText\r.. |Sphinx| replace:: Sphinx\r.. _Sphinx: http://sphinx.pocoo.org/\r\rThe tutorials are just as an important part of the library as the implementation of those crafty data structures and algorithms you can find in OpenCV. Therefore, the source codes for the tutorials are part of the library. And yes, I meant source codes. The reason for this formulation is that the tutorials are written by using the |Sphinx|_ documentation generation system. This is based on the popular python documentation system called |reST|_ (reST). ReStructuredText is a really neat language that by using a few simple conventions (indentation, directives) and emulating old school e-mail writing techniques (text only) tries to offer a simple way to create and edit documents. Sphinx extends this with some new features and creates the resulting document in both HTML (for web) and PDF (for offline usage) format.\r\rUsually, an OpenCV tutorial has the following parts:\r\r1. A source code demonstration of an OpenCV feature: \r \r a. One or more CPP, Python, Java or other type of files depending for what OpenCV offers support and for what language you make the tutorial. \r #. Occasionaly, input resource files required for running your tutorials application.\r\r\r#. A table of content entry (so people may easily find the tutorial): \r \r a. Adding your stuff to the tutorials table of content (**reST** file). \r #. Add an image file near the TOC entry. \r\r\r#. The content of the tutorial itself: \r \r a. The **reST** text of the tutorial\r #. Images following the idea that "*A picture is worth a thousand words*". \r #. For more complex demonstrations you may create a video.\r\rAs you can see you will need at least some basic knowledge of the *reST* system in order to complete the task at hand with success. However, don't worry *reST* (and *Sphinx*) was made with simplicity in mind. It is easy to grasp its basics. I found that the `OpenAlea documentations introduction on this subject <http://openalea.gforge.inria.fr/doc/openalea/doc/_build/html/source/tutorial/rest_syntax.html>`_ (or the `Thomas Cokelaer one <http://thomas-cokelaer.info/tutorials/sphinx/rest_syntax.html>`_ ) should enough for this. If for some directive or feature you need a more in-depth description look it up in the official |reST|_ help files or at the |Sphinx|_ documentation.\r\rIn our world achieving some tasks is possible in multiple ways. However, some of the roads to take may have obvious or hidden advantages over others. Then again, in some other cases it may come down to just simple user preference. Here, I'll present how I decided to write the tutorials, based on my personal experience. If for some of them you know a better solution and you can back it up feel free to use that. I've nothing against it, as long as it gets the job done in an elegant fashion. \r\rNow the best would be if you could make the integration yourself. For this you need first to have the source code. I recommend following the guides for your operating system on acquiring OpenCV sources. For Linux users look :ref:`here <Linux-Installation>` and for :ref:`Windows here <Windows_Installation>`. You must also install python and sphinx with its dependencies in order to be able to build the documentation. \r\rOnce you have downloaded the repository to your hard drive you can take a look in the OpenCV directory to make sure you have both the samples and doc folder present. Anyone may download the trunk source files from :file:`/svn/opencv/trunk/` . Nevertheless, not everyone has upload (commit/submit) rights. This is to protect the integrity of the library. If you plan doing more than one tutorial, and would like to have an account with commit user rights you should first register an account at http://code.opencv.org/ and then contact dr. Gary Bradski at -delete-bradski@-delete-willowgarage.com. Otherwise, you can just send the resulting files to us via the :opencv_group:`Yahoo user group <>` or to me at -delete-bernat@-delete-primeranks.net and I'll add it. If you have questions, suggestions or constructive critics I will gladly listen to them. If you send it to the OpenCV group please tag its subject with a **[Tutorial]** entry. \r\rFormat the Source Code\r======================\r\rBefore I start this let it be clear: the main goal is to have a working sample code. However, for your tutorial to be of a top notch quality you should follow a few guide lines I am going to present here. \r\rIn case you have an application by using the older interface (with *IplImage*, *CVMat*, *cvLoadImage* and such) consider migrating it to the new C++ interface. The tutorials are intended to be an up to date help for our users. And as of OpenCV 2 the OpenCV emphasis on using the less error prone and clearer C++ interface. Therefore, if possible please convert your code to the C++ interface. For this it may help to read the :ref:`InteroperabilityWithOpenCV1` tutorial. However, once you have an OpenCV 2 working code, then you should make your source code snippet as easy to read as possible. Here're a couple of advices for this: \r\r.. container:: enumeratevisibleitemswithsquare\r\r + Add a standard output with the description of what your program does. Keep it short and yet, descriptive. This output is at the start of the program. In my example files this usually takes the form of a *help* function containing the output. This way both the source file viewer and application runner can see what all is about in your sample. Here's an instance of this: \r\r .. code-block:: cpp\r\r void help()\r {\r cout\r << "--------------------------------------------------------------------------" << endl\r << "This program shows how to write video files. You can extract the R or G or B color channel "\r << " of the input video. You can choose to use the source codec (Y) or select a custom one. (N)"<< endl\r << "Usage:" << endl\r << "./video-write inputvideoName [ R | G | B] [Y | N]" << endl\r << "--------------------------------------------------------------------------" << endl\r << endl;\r }\r // ...\r int main(int argc, char *argv[], char *window_name)\r {\r help();\r // here comes the actual source code\r }\r\r Additionally, finalize the description with a short usage guide. This way the user will know how to call your programs, what leads us to the next point. \r\r + Prefer command line argument controlling instead of hard coded one. If your program has some variables that may be changed use command line arguments for this. The tutorials, can be a simple try-out ground for the user. If you offer command line controlling for the input image (for example), then you offer the possibility for the user to try it out with his/her own images, without the need to mess in the source code. In the upper example you can see that the input image, channel and codec selection may all be changed from the command line. Just compile the program and run it with your own input arguments. \r\r + Be as verbose as possible. There is no shame in filling the source code with comments. This way the more advanced user may figure out what's happening right from the sample code. This advice goes for the output console too. Specify to the user what's happening. Never leave the user hanging there and thinking on: "Is this program now crashing or just doing some computationally intensive task?." So, if you do a training task that may take some time, make sure you print out a message about this before starting and after finishing it. \r\r + Throw out unnecessary stuff from your source code. This is a warning to not take the previous point too seriously. Balance is the key. If it's something that can be done in a fewer lines or simpler than that's the way you should do it. Nevertheless, if for some reason you have such sections notify the user why you have chosen to do so. Keep the amount of information as low as possible, while still getting the job done in an elegant way. \r\r + Put your sample file into the :file:`opencv/samples/cpp/tutorial_code/sectionName` folder. If you write a tutorial for other languages than cpp, then change that part of the path. Before completing this you need to decide that to what section (module) does your tutorial goes. Think about on what module relies most heavily your code and that is the one to use. If the answer to this question is more than one modules then the *general* section is the one to use. For finding the *opencv* directory open up your file system and navigate where you downloaded our repository.\r\r + If the input resources are hard to acquire for the end user consider adding a few of them to the :file:`opencv/samples/cpp/tutorial_code/images`. Make sure that who reads your code can try it out!\r\rAdd the TOC entry\r=================\r\rFor this you will need to know some |reST|_. There is no going around this. |reST|_ files have **rst** extensions. However, these are simple text files. Use any text editor you like. Finding a text editor that offers syntax highlighting for |reST|_ was quite a challenge at the time of writing this tutorial. In my experience, `Intype <http://intype.info/>`_ is a solid option on Windows, although there is still place for improvement. \r\rAdding your source code to a table of content is important for multiple reasons. First and foremost this will allow for the user base to find your tutorial from our websites tutorial table of content. Secondly, if you omit this *Sphinx* will throw a warning that your tutorial file isn't part of any TOC tree entry. And there is nothing more than the developer team hates than an ever increasing warning/error list for their builds. *Sphinx* also uses this to build up the previous-back-up buttons on the website. Finally, omitting this step will lead to that your tutorial will **not** be added to the PDF version of the tutorials. \r\rNavigate to the :file:`opencv/doc/tutorials/section/table_of_content_section` folder (where the section is the module to which you're adding the tutorial). Open the *table_of_content_section* file. Now this may have two forms. If no prior tutorials are present in this section that there is a template message about this and has the following form:\r\r.. code-block:: rst\r\r .. _Table-Of-Content-Section:\r\r Section title\r -----------------------------------------------------------\r\r Description about the section.\r\r .. include:: ../../definitions/noContent.rst\r\r .. raw:: latex\r\r \pagebreak\r\rThe first line is a reference to the section title in the reST system. The section title will be a link and you may refer to it via the ``:ref:`` directive. The *include* directive imports the template text from the definitions directories *noContent.rst* file. *Sphinx* does not creates the PDF from scratch. It does this by first creating a latex file. Then creates the PDF from the latex file. With the *raw* directive you can directly add to this output commands. Its unique argument is for what kind of output to add the content of the directive. For the PDFs it may happen that multiple sections will overlap on a single page. To avoid this at the end of the TOC we add a *pagebreak* latex command, that hints to the LATEX system that the next line should be on a new page. \r\rIf you have one of this, try to transform it to the following form: \r\r.. include:: ../../definitions/tocDefinitions.rst \r\r.. code-block:: rst\r\r .. _Table-Of-Content-Section:\r\r Section title\r -----------------------------------------------------------\r\r .. include:: ../../definitions/tocDefinitions.rst\r\r +\r .. tabularcolumns:: m{100pt} m{300pt}\r .. cssclass:: toctableopencv\r\r =============== ======================================================\r |MatBasicIma| **Title:** :ref:`matTheBasicImageContainer`\r\r *Compatibility:* > OpenCV 2.0\r\r *Author:* |Author_BernatG|\r\r You will learn how to store images in the memory and how to print out their content to the console.\r\r =============== =====================================================\r\r .. |MatBasicIma| image:: images/matTheBasicImageStructure.jpg\r :height: 90pt\r :width: 90pt\r\r .. raw:: latex\r\r \pagebreak\r\r .. toctree::\r :hidden:\r\r ../mat - the basic image container/mat - the basic image container\r\rIf this is already present just add a new section of the content between the include and the raw directives (excluding those lines). Here you'll see a new include directive. This should be present only once in a TOC tree and the reST file contains the definitions of all the authors contributing to the OpenCV tutorials. We are a multicultural community and some of our name may contain some funky characters. However, reST **only supports** ANSI characters. Luckily we can specify Unicode characters with the *unicode* directive. Doing this for all of your tutorials is a troublesome procedure. Therefore, the tocDefinitions file contains the definition of your author name. Add it here once and afterwards just use the replace construction. For example here's the definition for my name: \r\r.. code-block:: rst\r\r .. |Author_BernatG| unicode:: Bern U+00E1 t U+0020 G U+00E1 bor\r\rThe ``|Author_BernatG|`` is the text definitions alias. I can use later this to add the definition, like I've done in the TOCs *Author* part. After the ``::`` and a space you start the definition. If you want to add an UNICODE character (non-ASCI) leave an empty space and specify it in the format U+(UNICODE code). To find the UNICODE code of a character I recommend using the `FileFormat <http://www.fileformat.info>`_ websites service. Spaces are trimmed from the definition, therefore we add a space by its UNICODE character (U+0020). \r\rUntil the *raw* directive what you can see is a TOC tree entry. Here's how a TOC entry will look like: \r\r+\r .. tabularcolumns:: m{100pt} m{300pt}\r .. cssclass:: toctableopencv\r\r =============== ======================================================\r |MatBasicIma| **Title:** :ref:`matTheBasicImageContainer`\r\r *Compatibility:* > OpenCV 2.0\r\r *Author:* |Author_BernatG|\r\r You will learn how to store images in the memory and how to print out their content to the console.\r\r =============== ======================================================\r\r .. |MatBasicIma| image:: images/matTheBasicImageStructure.jpg\r :height: 90pt\r :width: 90pt\r\rAs you can see we have an image to the left and a description box to the right. To create two boxes we use a table with two columns and a single row. In the left column is the image and in the right one the description. However, the image directive is way too long to fit in a column. Therefore, we need to use the substitution definition system. We add this definition after the TOC tree. All images for the TOC tree are to be put in the images folder near its |reST|_ file. We use the point measurement system because we are also creating PDFs. PDFs are printable documents, where there is no such thing that pixels (px), just points (pt). And while generally space is no problem for web pages (we have monitors with **huge** resolutions) the size of the paper (A4 or letter) is constant and will be for a long time in the future. Therefore, size constrains come in play more like for the PDF, than the generated HTML code. \r\rNow your images should be as small as possible, while still offering the intended information for the user. Remember that the tutorial will become part of the OpenCV source code. If you add large images (that manifest in form of large image size) it will just increase the size of the repository pointlessly. If someone wants to download it later, its download time will be that much longer. Not to mention the larger PDF size for the tutorials and the longer load time for the web pages. In terms of pixels a TOC image should not be larger than 120 X 120 pixels. Resize your images if they are larger! \r\r.. note::\r\r If you add a larger image and specify a smaller image size, *Sphinx* will not resize that. At build time will add the full size image and the resize will be done by your browser after the image is loaded. A 120 X 120 image is somewhere below 10KB. If you add a 110KB image, you have just pointlessly added a 100KB extra data to transfer over the internet for every user!\r\rGenerally speaking you shouldn't need to specify your images size (excluding the TOC entries). If no such is found *Sphinx* will use the size of the image itself (so no resize occurs). Then again if for some reason you decide to specify a size that should be the **width** of the image rather than its height. The reason for this again goes back to the PDFs. On a PDF page the height is larger than the width. In the PDF the images will not be resized. If you specify a size that does not fit in the page, then what does not fits in **will be cut off**. When creating your images for your tutorial you should try to keep the image widths below 500 pixels, and calculate with around 400 point page width when specifying image widths. \r\rThe image format depends on the content of the image. If you have some complex scene (many random like colors) then use *jpg*. Otherwise, prefer using *png*. They are even some tools out there that optimize the size of *PNG* images, such as `PNGGauntlet <http://pnggauntlet.com/>`_. Use them to make your images as small as possible in size. \r\rNow on the right side column of the table we add the information about the tutorial: \r\r.. container:: enumeratevisibleitemswithsquare\r\r + In the first line it is the title of the tutorial. However, there is no need to specify it explicitly. We use the reference system. We'll start up our tutorial with a reference specification, just like in case of this TOC entry with its `` .. _Table-Of-Content-Section:`` . If after this you have a title (pointed out by the following line of -), then Sphinx will replace the ``:ref:`Table-Of-Content-Section``` directive with the tile of the section in reference form (creates a link in web page). Here's how the definition looks in my case: \r\r .. code-block:: rst\r\r .. _matTheBasicImageContainer:\r\r Mat - The Basic Image Container\r *******************************\r\r Note, that according to the |reST|_ rules the * should be as long as your title. \r\r + Compatibility. What version of OpenCV is required to run your sample code. \r\r + Author. Use the substitution markup of |reST|_. \r\r + A short sentence describing the essence of your tutorial. \r\rNow before each TOC entry you need to add the three lines of: \r\r.. code-block:: cpp\r\r + \r .. tabularcolumns:: m{100pt} m{300pt}\r .. cssclass:: toctableopencv\r\rThe plus sign (+) is to enumerate tutorials by using bullet points. So for every TOC entry we have a corresponding bullet point represented by the +. Sphinx is highly indenting sensitive. Indentation is used to express from which point until to which point does a construction last. Un-indentation means end of that construction. So to keep all the bullet points to the same group the following TOC entries (until the next +) should be indented by two spaces. \r\rHere, I should also mention that **always** prefer using spaces instead of tabs. Working with only spaces makes possible that if we both use monotype fonts we will see the same thing. Tab size is text editor dependent and as should be avoided. *Sphinx* translates all tabs into 8 spaces before interpreting it. \r\rIt turns out that the automatic formatting of both the HTML and PDF(LATEX) system messes up our tables. Therefore, we need to help them out a little. For the PDF generation we add the ``.. tabularcolumns:: m{100pt} m{300pt}`` directive. This means that the first column should be 100 points wide and middle aligned. For the HTML look we simply name the following table of a *toctableopencv* class type. Then, we can modify the look of the table by modifying the CSS of our web page. The CSS definitions go into the :file:`opencv/doc/_themes/blue/static/default.css_t` file. \r\r.. code-block:: css\r\r .toctableopencv\r {\r width: 100% ; \r table-layout: fixed;\r }\r\r\r .toctableopencv colgroup col:first-child\r {\r width: 100pt !important;\r max-width: 100pt !important;\r min-width: 100pt !important;\r }\r\r .toctableopencv colgroup col:nth-child(2) \r {\r width: 100% !important;\r }\r\rHowever, you should not need to modify this. Just add these three lines (plus keep the two space indentation) for all TOC entries you add. At the end of the TOC file you'll find: \r\r.. code-block:: rst\r\r .. raw:: latex\r\r \pagebreak\r\r .. toctree::\r :hidden:\r\r ../mat - the basic image container/mat - the basic image container\r\rThe page break entry comes for separating sections and should be only one in a TOC tree |reST|_ file. Finally, at the end of the TOC tree we need to add our tutorial to the *Sphinx* TOC tree system. *Sphinx* will generate from this the previous-next-up information for the HTML file and add items to the PDF according to the order here. By default this TOC tree directive generates a simple table of contents. However, we already created a fancy looking one so we no longer need this basic one. Therefore, we add the *hidden* option to do not show it. \r\rThe path is of a relative type. We step back in the file system and then go into the :file:`mat - the basic image container` directory for the :file:`mat - the basic image container.rst` file. Putting out the *rst* extension for the file is optional. \r\rWrite the tutorial\r==================\r\rCreate a folder with the name of your tutorial. Preferably, use small letters only. Then create a text file in this folder with *rst* extension and the same name. If you have images for the tutorial create an :file:`images` folder and add your images there. When creating your images follow the guidelines described in the previous part!\r\rNow here's our recommendation for the structure of the tutorial (although, remember that this is not carved in the stone; if you have a better idea, use it!): \r\r\r.. container:: enumeratevisibleitemswithsquare\r\r + Create the reference point and the title. \r\r .. code-block:: rst\r\r .. _matTheBasicImageContainer:\r\r Mat - The Basic Image Container\r *******************************\r\r You start the tutorial by specifying a reference point by the ``.. _matTheBasicImageContainer:`` and then its title. The name of the reference point should be a unique one over the whole documentation. Therefore, do not use general names like *tutorial1*. Use the * character to underline the title for its full width. The subtitles of the tutorial should be underlined with = charachter.\r\r + Goals. You start your tutorial by specifying what you will present. You can also enumerate the sub jobs to be done. For this you can use a bullet point construction. There is a single configuration file for both the reference manual and the tutorial documentation. In the reference manuals at the argument enumeration we do not want any kind of bullet point style enumeration. Therefore, by default all the bullet points at this level are set to do not show the dot before the entries in the HTML. You can override this by putting the bullet point in a container. I've defined a square type bullet point view under the name *enumeratevisibleitemswithsquare*. The CSS style definition for this is again in the :file:`opencv\doc\_themes\blue\static\default.css_t` file. Here's a quick example of using it: \r\r .. code-block:: rst\r\r .. container:: enumeratevisibleitemswithsquare\r\r + Create the reference point and the title. \r + Second entry\r + Third entry\r\r Note that you need the keep the indentation of the container directive. Directive indentations are always three (3) spaces. Here you may even give usage tips for your sample code. \r\r + Source code. Present your samples code to the user. It's a good idea to offer a quick download link for the HTML page by using the *download* directive and pointing out where the user may find your source code in the file system by using the *file* directive: \r\r .. code-block:: rst\r\r Text :file:`samples/cpp/tutorial_code/highgui/video-write/` folder of the OpenCV source library\r or :download:`text to appear in the webpage \r <../../../../samples/cpp/tutorial_code/HighGUI/video-write/video-write.cpp>`.\r\r For the download link the path is a relative one, hence the multiple back stepping operations (..). Then you can add the source code either by using the *code block* directive or the *literal include* one. In case of the code block you will need to actually add all the source code text into your |reST|_ text and also apply the required indentation: \r\r .. code-block:: rst\r\r .. code-block:: cpp \r\r int i = 0; \r l = ++j; \r\r The only argument of the directive is the language used (here CPP). Then you add the source code into its content (meaning one empty line after the directive) by keeping the indentation of the directive (3 spaces). With the *literal include* directive you do not need to add the source code of the sample. You just specify the sample and *Sphinx* will load it for you, during build time. Here's an example usage: \r\r .. code-block:: rst\r\r .. literalinclude:: ../../../../samples/cpp/tutorial_code/HighGUI/video-write/video-write.cpp\r :language: cpp\r :linenos:\r :tab-width: 4\r :lines: 1-8, 21-22, 24-\r\r After the directive you specify a relative path to the file from what to import. It has four options: the language to use, if you add the ``:linenos:`` the line numbers will be shown, you can specify the tab size with the ``:tab-width:`` and you do not need to load the whole file, you can show just the important lines. Use the *lines* option to do not show redundant information (such as the *help* function). Here basically you specify ranges, if the second range line number is missing than that means that until the end of the file. The ranges specified here do no need to be in an ascending order, you may even reorganize the structure of how you want to show your sample inside the tutorial.\r\r + The tutorial. Well here goes the explanation for why and what have you used. Try to be short, clear, concise and yet a thorough one. There's no magic formula. Look into a few already made tutorials and start out from there. Try to mix sample OpenCV code with your explanations. If with words is hard to describe something do not hesitate to add in a reasonable size image, to overcome this issue.\r\r When you present OpenCV functionality it's a good idea to give a link to the used OpenCV data structure or function. Because the OpenCV tutorials and reference manual are in separate PDF files it is not possible to make this link work for the PDF format. Therefore, we use here only web page links to the **opencv.itseez.com** website. The OpenCV functions and data structures may be used for multiple tasks. Nevertheless, we want to avoid that every users creates its own reference to a commonly used function. So for this we use the global link collection of *Sphinx*. This is defined in the file:`opencv/doc/conf.py` configuration file. Open it and go all the way down to the last entry: \r\r .. code-block:: py\r\r # ---- External links for tutorials -----------------\r extlinks = {\r 'huivideo' : ('http://opencv.itseez.com/modules/highgui/doc/reading_and_writing_images_and_video.html#%s', None)\r }\r\r In short here we defined a new **huivideo** directive that refers to an external webpage link. Its usage is: \r\r .. code-block:: rst\r\r A sample function of the highgui modules image write and read page is the :huivideo:`imread() function <imread>`. \r\r Which turns to: A sample function of the highgui modules image write and read page is the :huivideo:`imread() function <imread>`. The argument you give between the <> will be put in place of the ``%s`` in the upper definition, and as the link will anchor to the correct function. To find out the anchor of a given function just open up a web page, search for the function and click on it. In the address bar it should appear like: ``http://opencv.itseez.com/modules/highgui/doc/reading_and_writing_images_and_video.html#imread`` . Look here for the name of the directives for each page of the OpenCV reference manual. If none present for one of them feel free to add one for it. \r\r For formulas you can add LATEX code that will translate in the web pages into images. You do this by using the *math* directive. A usage tip: \r\r .. code-block:: latex\r\r .. math::\r\r MSE = \frac{1}{c*i*j} \sum{(I_1-I_2)^2}\r\r That after build turns into: \r\r .. math::\r\r MSE = \frac{1}{c*i*j} \sum{(I_1-I_2)^2}\r\r You can even use it inline as ``:math:` MSE = \frac{1}{c*i*j} \sum{(I_1-I_2)^2}``` that turns into :math:`MSE = \frac{1}{c*i*j} \sum{(I_1-I_2)^2}`. \r\r If you use some crazy LATEX library extension you need to add those to the ones to use at build time. Look into the file:`opencv/doc/conf.py` configuration file for more information on this.\r\r + Results. Well, here depending on your program show one of more of the following: \r\r - Console outputs by using the code block directive. \r - Output images. \r - Runtime videos, visualization. For this use your favorite screens capture software. `Camtasia Studio <http://www.techsmith.com/camtasia/>`_ certainly is one of the better choices, however their prices are out of this world. `CamStudio <http://camstudio.org/>`_ is a free alternative, but less powerful. If you do a video you can upload it to YouTube and then use the raw directive with HTML option to embed it into the generated web page: \r\r .. code-block:: rst \r\r You may observe a runtime instance of this on the `YouTube here <https://www.youtube.com/watch?v=jpBwHxsl1_0>`_. \r\r .. raw:: html\r\r <div align="center">\r <iframe title="Creating a video with OpenCV" width="560" height="349" src="http://www.youtube.com/embed/jpBwHxsl1_0?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>\r </div>\r\r This results in the text and video: You may observe a runtime instance of this on the `YouTube here <https://www.youtube.com/watch?v=jpBwHxsl1_0>`_. \r\r .. raw:: html\r\r <div align="center">\r <iframe title="Creating a video with OpenCV" width="560" height="349" src="http://www.youtube.com/embed/jpBwHxsl1_0?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>\r </div>\r\r When these aren't self-explanatory make sure to throw in a few guiding lines about what and why we can see.\r\r + Build the documentation and check for errors or warnings. In the CMake make sure you check or pass the option for building documentation. Then simply build the **docs** project for the PDF file and the **docs_html** project for the web page. Read the output of the build and check for errors/warnings for what you have added. This is also the time to observe and correct any kind of *not so good looking* parts. Remember to keep clean our build logs. \r\r + Read again your tutorial and check for both programming and spelling errors. If found any, please correct them.\r\r\rTake home the pride and joy of a job well done!\r===============================================\r\rOnce you are done contact me or dr. Gary Bradski with the tutorial. We may submit the tutorial ourselves to the trunk branch of our repository or ask you to do so. \r\rNow, to see your work **live** you may need to wait some time. The PDFs are updated usually at the launch of a new OpenCV version. The web pages are a little more diverse. They are automatically rebuilt in each evening. However, the **opencv.itseez.com** website contains only the most recent **stable branch** of OpenCV. Currently this is 2.3. When we add something new (like a tutorial) that first goes to the **trunk branch** of our repository. A build of this you may find on the **opencv.itseez.com/trunk** website. Although, we try to make a build every night occasionally we might freeze any of the branches to fix upcoming issues. During this it may take a little longer to see your work *live*, however if you submited it, be sure that eventually it will show up. \r\rIf you have any questions or advices relating to this tutorial you can contact me at -delete-bernat@-delete-primeranks.net. Of course, delete the -delete- parts of that e-mail address.\r
\ No newline at end of file
Launch SVN client and checkout either
-a. the current OpenCV snapshot from here: https://code.ros.org/svn/opencv/trunk
+a. the current OpenCV snapshot from here: http://code.opencv.org/svn/opencv/trunk
-#. or the latest tested OpenCV snapshot from here: http://code.ros.org/svn/opencv/tags/latest_tested_snapshot
+#. or the latest tested OpenCV snapshot from here: http://code.opencv.org/svn/opencv/tags/latest_tested_snapshot
In MacOS it can be done using the following command in Terminal:
.. code-block:: bash
cd ~/<my_working _directory>
- svn co https://code.ros.org/svn/opencv/trunk
+ svn co http://code.opencv.org/svn/opencv/trunk
Building OpenCV from source using CMake, using the command line
Getting OpenCV source code
============================
-You can use the latest stable OpenCV version available in *sourceforge* or you can grab the latest snapshot from the `SVN repository <http://code.ros.org/svn/opencv/>`_.
+You can use the latest stable OpenCV version available in *sourceforge* or you can grab the latest snapshot from the `SVN repository <http://code.opencv.org/svn/opencv/>`_.
Getting the latest stable OpenCV version
------------------------------------------
Launch SVN client and checkout either
-a. the current OpenCV snapshot from here: https://code.ros.org/svn/opencv/trunk
+a. the current OpenCV snapshot from here: http://code.opencv.org/svn/opencv/trunk
-#. or the latest tested OpenCV snapshot from here: http://code.ros.org/svn/opencv/tags/latest_tested_snapshot
+#. or the latest tested OpenCV snapshot from here: http://code.opencv.org/svn/opencv/tags/latest_tested_snapshot
In Ubuntu it can be done using the following command, e.g.:
.. code-block:: bash
cd ~/<my_working _directory>
- svn co https://code.ros.org/svn/opencv/trunk
+ svn co http://code.opencv.org/svn/opencv/trunk
Building OpenCV from source using CMake, using the command line
\r
.. container:: enumeratevisibleitemswithsquare\r
\r
- + stable and tested build - https://code.ros.org/svn/opencv/branches/2.3 (the number at the end will change with every new realease, so change it to that)\r
- + development build - https://code.ros.org/svn/opencv/trunk/\r
+ + stable and tested build - http://code.opencv.org/svn/opencv/branches/2.3 (the number at the end will change with every new realease, so change it to that)\r
+ + development build - http://code.opencv.org/svn/opencv/trunk/\r
\r
While the later one may contain a couple of new and experimental algorithms, performance increases and interface improvements, be aware, that it may also contain many-many bugs. Using the first one is recommended in most of the cases. That is unless you are extending the OpenCV library itself or really need to most up to date version of it. \r
\r
Code
====
-This tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/objectDetection/objectDetection.cpp>`_ . The second version (using LBP for face detection) can be `found here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/objectDetection/objectDetection2.cpp>`_
+This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/objectDetection/objectDetection.cpp>`_ . The second version (using LBP for face detection) can be `found here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/objectDetection/objectDetection2.cpp>`_
.. code-block:: cpp
The primary use of the function is in multi-camera environments, especially when the cameras do not have hardware synchronization. That is, you call ``VideoCapture::grab()`` for each camera and after that call the slower method ``VideoCapture::retrieve()`` to decode and get frame from each camera. This way the overhead on demosaicing or motion jpeg decompression etc. is eliminated and the retrieved frames from different cameras will be closer in time.
-Also, when a connected camera is multi-head (for example, a stereo camera or a Kinect device), the correct way of retrieving data from it is to call `VideoCapture::grab` first and then call :ocv:func:`VideoCapture::retrieve` one or more times with different values of the ``channel`` parameter. See https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/kinect_maps.cpp
+Also, when a connected camera is multi-head (for example, a stereo camera or a Kinect device), the correct way of retrieving data from it is to call `VideoCapture::grab` first and then call :ocv:func:`VideoCapture::retrieve` one or more times with different values of the ``channel`` parameter. See http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/kinect_maps.cpp
VideoCapture::retrieve
:param name: Window name
- :param onMouse: Mouse callback. See OpenCV samples, such as https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/ffilldemo.cpp, on how to specify and use the callback.
+ :param onMouse: Mouse callback. See OpenCV samples, such as http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/ffilldemo.cpp, on how to specify and use the callback.
:param param: The optional parameter passed to the callback.
The functions ``approxPolyDP`` approximate a curve or a polygon with another curve/polygon with less vertices so that the distance between them is less or equal to the specified precision. It uses the Douglas-Peucker algorithm
http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm
-See http://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/contours.cpp for the function usage model.
+See http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/contours.cpp for the function usage model.
ApproxChains
Core.fillConvexPoly(gray0, polyline2, colorBlack, Core.LINE_8, 1);
- assertEquals("see https://code.ros.org/trac/opencv/ticket/1284", 0, Core.countNonZero(gray0));
+ assertEquals("see http://code.opencv.org/issues/1284", 0, Core.countNonZero(gray0));
}
public void testFillPolyMatListOfListOfPointScalar() {
The feature used in a particular classifier is specified by its shape (1a, 2b etc.), position within the region of interest and the scale (this scale is not the same as the scale used at the detection stage, though these two scales are multiplied). For example, in the case of the third line feature (2c) the response is calculated as the difference between the sum of image pixels under the rectangle covering the whole feature (including the two white stripes and the black stripe in the middle) and the sum of the image pixels under the black stripe multiplied by 3 in order to compensate for the differences in the size of areas. The sums of pixel values over a rectangular regions are calculated rapidly using integral images (see below and the :ocv:func:`integral` description).
To see the object detector at work, have a look at the facedetect demo:
-https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/facedetect.cpp
+http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/facedetect.cpp
The following reference is for the detection part only. There is a separate application called ``opencv_traincascade`` that can train a cascade of boosted classifiers from a set of samples.
def get_sample(self, filename, iscolor = cv.CV_LOAD_IMAGE_COLOR):
if not filename in self.image_cache:
- filedata = urllib.urlopen("https://code.ros.org/svn/opencv/trunk/opencv/" + filename).read()
+ filedata = urllib.urlopen("http://code.opencv.org/svn/opencv/trunk/opencv/" + filename).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
self.image_cache[filename] = cv.DecodeImageM(imagefiledata, iscolor)
try:
img0 = cv.LoadImage(name, cv.CV_LOAD_IMAGE_COLOR)
except IOError:
- urlbase = 'https://code.ros.org/svn/opencv/trunk/opencv/samples/c/'
+ urlbase = 'http://code.opencv.org/svn/opencv/trunk/opencv/samples/c/'
file = name.split('/')[-1]
filedata = urllib2.urlopen(urlbase+file).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
if len(sys.argv) > 1:
src_image = cv.GetMat(cv.LoadImage(sys.argv[1], 0))
else:
- url = 'https://code.ros.org/svn/opencv/trunk/opencv/samples/c/baboon.jpg'
+ url = 'http://code.opencv.org/svn/opencv/trunk/opencv/samples/c/baboon.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
if len(sys.argv) > 1:
im = cv.LoadImage( sys.argv[1], cv.CV_LOAD_IMAGE_GRAYSCALE)
else:
- url = 'https://code.ros.org/svn/opencv/trunk/opencv/samples/c/baboon.jpg'
+ url = 'http://code.opencv.org/svn/opencv/trunk/opencv/samples/c/baboon.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
if len(sys.argv) > 1:
gray = cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_GRAYSCALE)
else:
- url = 'https://code.ros.org/svn/opencv/trunk/opencv/samples/c/stuff.jpg'
+ url = 'http://code.opencv.org/svn/opencv/trunk/opencv/samples/c/stuff.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
if len(sys.argv) > 1:
im = cv.LoadImage( sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
else:
- url = 'https://code.ros.org/svn/opencv/trunk/opencv/samples/c/fruits.jpg'
+ url = 'http://code.opencv.org/svn/opencv/trunk/opencv/samples/c/fruits.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
if len(sys.argv) > 1:
im = cv.LoadImage( sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
else:
- url = 'https://code.ros.org/svn/opencv/trunk/opencv/samples/c/fruits.jpg'
+ url = 'http://code.opencv.org/svn/opencv/trunk/opencv/samples/c/fruits.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
if len(sys.argv) > 1:
source_image = cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_GRAYSCALE)
else:
- url = 'https://code.ros.org/svn/opencv/trunk/opencv/samples/c/stuff.jpg'
+ url = 'http://code.opencv.org/svn/opencv/trunk/opencv/samples/c/stuff.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
filename = sys.argv[1]
src = cv.LoadImage(filename, cv.CV_LOAD_IMAGE_GRAYSCALE)
else:
- url = 'https://code.ros.org/svn/opencv/trunk/opencv/doc/pics/building.jpg'
+ url = 'http://code.opencv.org/svn/opencv/trunk/opencv/doc/pics/building.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
if len(sys.argv) > 1:
img0 = cv.LoadImage( sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
else:
- url = 'https://code.ros.org/svn/opencv/trunk/opencv/samples/c/fruits.jpg'
+ url = 'http://code.opencv.org/svn/opencv/trunk/opencv/samples/c/fruits.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
if len(sys.argv) > 1:
src = cv.LoadImage( sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
else:
- url = 'https://code.ros.org/svn/opencv/trunk/opencv/samples/c/fruits.jpg'
+ url = 'http://code.opencv.org/svn/opencv/trunk/opencv/samples/c/fruits.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
if len(sys.argv) > 1:
src = cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
else:
- url = 'https://code.ros.org/svn/opencv/trunk/opencv/samples/c/fruits.jpg'
+ url = 'http://code.opencv.org/svn/opencv/trunk/opencv/samples/c/fruits.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
if len(sys.argv) > 1:
img0 = cv.LoadImageM( sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
else:
- url = 'https://code.ros.org/svn/opencv/trunk/opencv/samples/c/lena.jpg'
+ url = 'http://code.opencv.org/svn/opencv/trunk/opencv/samples/c/lena.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
if len(sys.argv) > 1:
img0 = cv.LoadImage( sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
else:
- url = 'https://code.ros.org/svn/opencv/trunk/opencv/samples/c/fruits.jpg'
+ url = 'http://code.opencv.org/svn/opencv/trunk/opencv/samples/c/fruits.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))