#if 0
/* A structure, representing the lattice range of statmodel parameters.
It is used for optimizing statmodel parameters by cross-validation method.
- The lattice is logarithmic, so <step> must be greater then 1. */
+ The lattice is logarithmic, so <step> must be greater than 1. */
typedef struct CvParamLattice
{
double min_val;
/* The structure, representing the grid range of statmodel parameters.
It is used for optimizing statmodel accuracy by varying model parameters,
the accuracy estimate being computed by cross-validation.
- The grid is logarithmic, so <step> must be greater then 1. */
+ The grid is logarithmic, so <step> must be greater than 1. */
class CvMLData;
-u, --units - mm, inches, px, m (default mm)
-w, --page_width - page width in units (default 216)
-h, --page_height - page height in units (default 279)
--a, --page_size - page size (default A4), supercedes -h -w arguments
+-a, --page_size - page size (default A4), supersedes -h -w arguments
-H, --help - show help
"""
// You would need to provide the method body in the binder code
CV_WRAP_PHANTOM(static void* context());
- //! The wrapped method become equvalent to `get(int flags = ACCESS_RW)`
+ //! The wrapped method become equivalent to `get(int flags = ACCESS_RW)`
CV_WRAP_AS(get) Mat getMat(int flags CV_WRAP_DEFAULT(ACCESS_RW)) const;
};
@endcode
We know SIFT uses 128-dim vector for descriptors. Since it is using floating point numbers, it takes
basically 512 bytes. Similarly SURF also takes minimum of 256 bytes (for 64-dim). Creating such a
-vector for thousands of features takes a lot of memory which are not feasible for resouce-constraint
+vector for thousands of features takes a lot of memory which are not feasible for resource-constraint
applications especially for embedded systems. Larger the memory, longer the time it takes for
matching.
G-API defines _backend_ as the lower-level entity which knows how to
run kernels. Backends may have (and, in fact, do have) different
_Kernel APIs_ which are used to program and integrate kernels for that
-backends. In this context, _kernel_ is an implementaion of an
+backends. In this context, _kernel_ is an implementation of an
_operation_, which is defined on the top API level (see
G_TYPED_KERNEL() macro).
int icols = int(colCount());
int irows = int(rowCount());
if(icols < 3 || irows < 3)
- throw std::runtime_error("getCellCenters: Chessboard must be at least consist of 3 rows and cols to calcualte the cell centers");
+ throw std::runtime_error("getCellCenters: Chessboard must be at least consist of 3 rows and cols to calculate the cell centers");
std::vector<cv::Point2f> points;
cv::Matx33d H(estimateHomography(DUMMY_FIELD_SIZE));
rows = 3;
cols = 3;
- // set inital cell colors
+ // set initial cell colors
Point2f pt1 = *(cells[0]->top_right)-*(cells[0]->bottom_left);
pt1 /= cv::norm(pt1);
cv::Point2f pt2(cos(white_angle),-sin(white_angle));
points.push_back(*iter1);
}
- // genreate pairs those connection goes through the center
+ // generate pairs those connection goes through the center
std::vector<std::pair<cv::KeyPoint,cv::KeyPoint> > pairs;
iter1 = points.begin();
for(;iter1 != points.end();++iter1)
flags ^= CALIB_CB_ACCURACY;
}
if(flags)
- CV_Error(Error::StsOutOfRange, cv::format("Invalid remaing flags %d", (int)flags));
+ CV_Error(Error::StsOutOfRange, cv::format("Invalid remaining flags %d", (int)flags));
std::vector<cv::KeyPoint> corners;
details::Chessboard board(para);
size_t rowCount() const;
/**
- * \brief Returns the inner contour of the board inlcuding only valid corners
+ * \brief Returns the inner contour of the board including only valid corners
*
* \info the contour might be non squared if not all points of the board are defined
*
* order to compute a candidate homography (newH).
*
* The system above is solved by Cholesky decomposition of a
- * sufficently-damped JtJ into a lower-triangular matrix (and its
+ * sufficiently-damped JtJ into a lower-triangular matrix (and its
* transpose), whose inverse is then computed. This inverse (and its
* transpose) then multiply Jte in order to find dH.
*/
CV_EXPORTS void fastFree(void* ptr);
/*!
- The STL-compilant memory Allocator based on cv::fastMalloc() and cv::fastFree()
+ The STL-compliant memory Allocator based on cv::fastMalloc() and cv::fastFree()
*/
template<typename _Tp> class Allocator
{
T* operator->() const CV_NOEXCEPT { return std::shared_ptr<T>::get();}
typename std::add_lvalue_reference<T>::type operator*() const CV_NOEXCEPT { return *std::shared_ptr<T>::get(); }
- // OpenCV 3.x methods (not a part of standart C++ library)
+ // OpenCV 3.x methods (not a part of standard C++ library)
inline void release() { std::shared_ptr<T>::reset(); }
inline operator T* () const { return std::shared_ptr<T>::get(); }
inline bool empty() const { return std::shared_ptr<T>::get() == nullptr; }
v.s[0]*m0.s[3] + v.s[1]*m1.s[3] + v.s[2]*m2.s[3] + m3.s[3]);
}
-////// FP16 suport ///////
+////// FP16 support ///////
inline v_reg<float, V_TypeTraits<float>::nlanes128>
v_load_expand(const float16_t* ptr)
}
#endif
-////// FP16 suport ///////
+////// FP16 support ///////
#if CV_FP16
inline v_float32x4 v_load_expand(const float16_t* ptr)
{
}
catch (...)
{
- ss << " ERROR: exception occured, dump is non-complete"; // need to properly support different kinds
+ ss << " ERROR: exception occurred, dump is non-complete"; // need to properly support different kinds
}
return ss.str();
}
}
catch (...)
{
- ss << " ERROR: exception occured, dump is non-complete"; // need to properly support different kinds
+ ss << " ERROR: exception occurred, dump is non-complete"; // need to properly support different kinds
}
return ss.str();
}
}
catch (...)
{
- ss << " ERROR: exception occured, dump is non-complete"; // need to properly support different kinds
+ ss << " ERROR: exception occurred, dump is non-complete"; // need to properly support different kinds
}
return ss.str();
}
}
catch (...)
{
- ss << " ERROR: exception occured, dump is non-complete"; // need to properly support different kinds
+ ss << " ERROR: exception occurred, dump is non-complete"; // need to properly support different kinds
}
return ss.str();
}
// There is some mess in code with vectors representation.
// Both vector-column / vector-rows are used with dims=2 (as Mat2D always).
-// Reshape matrices if neccessary (in case of vectors) and returns size with scaled width.
+// Reshape matrices if necessary (in case of vectors) and returns size with scaled width.
Size getContinuousSize2D(Mat& m1, int widthScale=1);
Size getContinuousSize2D(Mat& m1, Mat& m2, int widthScale=1);
Size getContinuousSize2D(Mat& m1, Mat& m2, Mat& m3, int widthScale=1);
#if defined OPENCV_INSTALL_PREFIX && defined OPENCV_DATA_INSTALL_PATH
cv::String install_dir(OPENCV_INSTALL_PREFIX);
// use core/world module path and verify that library is running from installation directory
- // It is neccessary to avoid touching of unrelated common /usr/local path
+ // It is necessary to avoid touching of unrelated common /usr/local path
if (module_path.empty()) // can't determine
module_path = install_dir;
if (isSubDirectory(install_dir, module_path) || isSubDirectory(utils::fs::canonical(install_dir), utils::fs::canonical(module_path)))
// implementations needed to use has_field hueristics to determine
// which value field was in use. For IR_VERSION 0.0.2 or later, this
// field MUST be set and match the f|i|s|t|... field in use. This
- // change was made to accomodate proto3 implementations.
+ // change was made to accommodate proto3 implementations.
optional AttributeType type = 20; // discriminator that indicates which field below is in use
// Exactly ONE of the following fields must be present for this version of the IR
the algorithm implementation changes irrevocably -- becoming more
specific, less flexible, and harder to extend and maintain.
-G-API takes this responsiblity and complexity from user and does the
+G-API takes this responsibility and complexity from user and does the
majority of the work by itself, keeping the algorithm code clean from
device or optimization details. This approach has its own limitations,
though, as graph model is a _constrained_ model and not every
/** @brief Applies a fixed-level threshold to each matrix element.
The function applies fixed-level thresholding to a single- or multiple-channel matrix.
-The function is typically used to get a bi-level (binary) image out of a grayscale image ( cmp funtions could be also used for
+The function is typically used to get a bi-level (binary) image out of a grayscale image ( cmp functions could be also used for
this purpose) or for removing a noise, that is, filtering out pixels with too small or too large
values. There are several depths of thresholding supported by the function. They are determined by
depth parameter.
*
* This may be useful since all temporary objects (cv::GMats) and
* namespaces can be localized to scope of lambda, without
- * contaminating the parent scope with probably unecessary objects
+ * contaminating the parent scope with probably unnecessary objects
* and information.
*
* @param gen generator function which returns a cv::GComputation,
public:
/**
* @brief Returns total number of kernels in the package
- * (accross all backends included)
+ * (across all backends included)
*
* @return a number of kernels in the package
*/
// TODO: support sizes 3, 5, 7, 9, ...
GAPI_Assert(kernelSize.width == 3 && kernelSize.height == 3);
- // TODO: suport non-trivial anchor
+ // TODO: support non-trivial anchor
GAPI_Assert(anchor.x == -1 && anchor.y == -1);
static const bool normalize = true;
// TODO: support sizes 3, 5, 7, 9, ...
GAPI_Assert(kernelSize.width == 3 && kernelSize.height == 3);
- // TODO: suport non-trivial anchor
+ // TODO: support non-trivial anchor
GAPI_Assert(anchor.x == -1 && anchor.y == -1);
int width = src.length();
#include "executor/gexecutor.hpp"
// NB: BTW, GCompiled is the only "public API" class which
-// private part (implementaion) is hosted in the "compiler/" module.
+// private part (implementation) is hosted in the "compiler/" module.
//
// This file is here just to keep ADE hidden from the top-level APIs.
//
auto l_obj = gim.metadata(lhs_nh).get<FusedIsland>().object;
auto r_obj = gim.metadata(rhs_nh).get<FusedIsland>().object;
GAPI_LOG_INFO(NULL, r_obj->name() << " can be merged into " << l_obj->name());
- // Try to do a merge. If merge was succesfull, check if the
+ // Try to do a merge. If merge was successful, check if the
// graph have cycles (cycles are prohibited at this point).
// If there are cycles, roll-back the merge and mark a pair of
// these Islands with a special tag - "cycle-causing".
{
GModel::ConstGraph gr(ctx.graph);
- // The algorithm is teh following:
+ // The algorithm is the following:
//
// 1. Put all Tagged nodes (both Operations and Data) into a set
// 2. Initialize Visited set as (empty)
GModel::Graph gr(ctx.graph);
// Repeat the loop while there are compound kernels.
- // Restart procedure after every successfull unrolling
+ // Restart procedure after every successful unrolling
bool has_compound_kernel = true;
while (has_compound_kernel)
{
cv::Scalar sc(33);
cv::Mat out;
- // 3 channels intead 1
+ // 3 channels instead 1
cv::Mat in1 = cv::Mat::eye(cv::Size(64,32), CV_8UC3);
EXPECT_THROW(f(in1, sc, out), std::logic_error);
- // 32f intead 8u
+ // 32f instead 8u
cv::Mat in2 = cv::Mat::eye(cv::Size(64,32), CV_32F);
EXPECT_THROW(f(in2, sc, out), std::logic_error);
cv::Scalar sc(33);
cv::Mat out;
- // 3 channels intead 1
+ // 3 channels instead 1
cv::Mat in1 = cv::Mat::eye(cv::Size(64,32), CV_8UC3);
EXPECT_THROW(f(cv::gin(in1, sc), cv::gout(out)), std::logic_error);
- // 32f intead 8u
+ // 32f instead 8u
cv::Mat in2 = cv::Mat::eye(cv::Size(64,32), CV_32F);
EXPECT_THROW(f(cv::gin(in2, sc), cv::gout(out)), std::logic_error);
using namespace cv::detail;
static_assert(are_meta_descrs_but_last<cv::GScalarDesc, int>::value,
- "List is valid (int is ommitted)");
+ "List is valid (int is omitted)");
static_assert(are_meta_descrs_but_last<cv::GMatDesc, cv::GScalarDesc, cv::GCompileArgs>::value,
"List is valid (GCompileArgs are omitted)");
// |
// (in1) --------------------------`
- // Check that internal islands does't overlap user island
+ // Check that internal islands doesn't overlap user island
namespace J = Jupiter;
namespace S = Saturn;
}
// FIXME: Disabled since currently merge procedure merges two into one
-// succesfully
+// successfully
TEST_F(Islands, DISABLED_Two_Islands_With_Same_Name_Should_Fail)
{
// (in) -> Blur1 -> (tmp0) -> Blur2 -> (tmp1) -> Blur3 -> (tmp2) -> Blur4 -> (out)
EXPECT_EQ(0u, writer.wref().size()); // Check the initial state
EXPECT_EQ(0u, reader.rref().size());
- writer.wref().emplace_back(); // Check that write is successfull
+ writer.wref().emplace_back(); // Check that write is successful
EXPECT_EQ(1u, writer.wref().size());
EXPECT_EQ(1u, vec.size()); // Check that changes are reflected to the original container
EXPECT_EQ(0u, writer.wref<T>().size()); // Check the initial state
EXPECT_EQ(0u, reader.rref<T>().size());
- writer.wref<T>().emplace_back(); // Check that write is successfull
+ writer.wref<T>().emplace_back(); // Check that write is successful
EXPECT_EQ(1u, writer.wref<T>().size());
EXPECT_EQ(1u, vec.size()); // Check that changes are reflected to the original container
bool is_byte_order_swapped(double scale)
{
// ".pfm" format file specifies that:
- // positive scale means big endianess;
- // negative scale means little endianess.
+ // positive scale means big endianness;
+ // negative scale means little endianness.
#ifdef WORDS_BIGENDIAN
return scale < 0.0;
\f[(minVal, minVal*step, minVal*{step}^2, \dots, minVal*{logStep}^n),\f]
where \f$n\f$ is the maximal index satisfying
\f[\texttt{minVal} * \texttt{logStep} ^n < \texttt{maxVal}\f]
- The grid is logarithmic, so logStep must always be greater then 1. Default value is 1.
+ The grid is logarithmic, so logStep must always be greater than 1. Default value is 1.
*/
CV_PROP_RW double logStep;
if( pg.minVal < DBL_EPSILON )
CV_Error( CV_StsBadArg, "Lower bound of the grid must be positive" );
if( pg.logStep < 1. + FLT_EPSILON )
- CV_Error( CV_StsBadArg, "Grid step must greater then 1" );
+ CV_Error( CV_StsBadArg, "Grid step must greater than 1" );
}
// SVM training parameters
};
/** @brief Exposure compensator which tries to remove exposure related artifacts by adjusting image
-intensities on each channel independantly.
+intensities on each channel independently.
*/
class CV_EXPORTS_W ChannelsCompensator : public ExposureCompensator
{
\f[\begin{bmatrix} a_{11} & a_{12} & b_1 \\ -a_{12} & a_{11} & b_2 \end{bmatrix}\f]
when fullAffine=false.
-@deprecated Use cv::estimateAffine2D, cv::estimateAffinePartial2D instead. If you are using this fuction
-with images, extract points using cv::calcOpticalFlowPyrLK and then use the estimation fuctions.
+@deprecated Use cv::estimateAffine2D, cv::estimateAffinePartial2D instead. If you are using this function
+with images, extract points using cv::calcOpticalFlowPyrLK and then use the estimation functions.
@sa
estimateAffine2D, estimateAffinePartial2D, getAffineTransform, getPerspectiveTransform, findHomography
// ----------------------------------------------------------------------
-// Check that we are not trying to setup a non-existant device
+// Check that we are not trying to setup a non-existent device
// Then start the graph building!
// ----------------------------------------------------------------------