////////////////////////////////// Video Encoding //////////////////////////////////
// Works only under Windows.
-// Supports olny H264 video codec and AVI files.
+// Supports only H264 video codec and AVI files.
enum SurfaceFormat
{
return ARGBpixel;
}
- // CUDA kernel for outputing the final ARGB output from NV12
+ // CUDA kernel for outputting the final ARGB output from NV12
#define COLOR_COMPONENT_BIT_SIZE 10
#define COLOR_COMPONENT_MASK 0x3FF
// Spins until frame becomes available or decoding gets canceled.
// If the requested frame is available the method returns true.
- // If decoding was interupted before the requested frame becomes
+ // If decoding was interrupted before the requested frame becomes
// available, the method returns false.
bool waitUntilFrameAvailable(int pictureIndex);
GpuMat decodedFrame = videoDecoder_->mapFrame(frameInfo.first.picture_index, frameInfo.second);
// perform post processing on the CUDA surface (performs colors space conversion and post processing)
- // comment this out if we inclue the line of code seen above
+ // comment this out if we include the line of code seen above
videoDecPostProcessFrame(decodedFrame, frame, videoDecoder_->targetWidth(), videoDecoder_->targetHeight());
// unmap video frame
rng.fill(queryBuf, cv::RNG::UNIFORM, cv::Scalar::all(0), cv::Scalar::all(3));
queryBuf.convertTo(queryBuf, CV_32FC1);
- // Generate train decriptors as follows:
+ // Generate train descriptors as follows:
// copy each query descriptor to train set countFactor times
// and perturb some one element of the copied descriptors in
// in ascending order. General boundaries of the perturbation
if (scale != 1)
{
// usually the smoothing part is the slowest to compute,
- // so try to scale it instead of the faster differenciating part
+ // so try to scale it instead of the faster differentiating part
if (dx == 0)
kx *= scale;
else
/** @brief Performs bilateral filtering of passed image
-@param src Source image. Supports only (channles != 2 && depth() != CV_8S && depth() != CV_32S
+@param src Source image. Supports only (channels != 2 && depth() != CV_8S && depth() != CV_32S
&& depth() != CV_64F).
@param dst Destination imagwe.
@param kernel_size Kernel window size.
cudaSafeCall( cudaDeviceSynchronize() );
}
- __global__ void tranformKernel(const PtrStepSzb src, PtrStepb dst, const PtrStepb lut, const int2 tileSize, const int tilesX, const int tilesY)
+ __global__ void transformKernel(const PtrStepSzb src, PtrStepb dst, const PtrStepb lut, const int2 tileSize, const int tilesX, const int tilesY)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const dim3 block(32, 8);
const dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
- cudaSafeCall( cudaFuncSetCacheConfig(tranformKernel, cudaFuncCachePreferL1) );
+ cudaSafeCall( cudaFuncSetCacheConfig(transformKernel, cudaFuncCachePreferL1) );
- tranformKernel<<<grid, block, 0, stream>>>(src, dst, lut, tileSize, tilesX, tilesY);
+ transformKernel<<<grid, block, 0, stream>>>(src, dst, lut, tileSize, tilesX, tilesY);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
std::vector<SegmLink> edges;
edges.reserve(g.numv);
- // Prepare edges connecting differnet components
+ // Prepare edges connecting different components
for (int v = 0; v < g.numv; ++v)
{
int c1 = comps.find(v);
}
}
- // Sort all graph's edges connecting differnet components (in asceding order)
+ // Sort all graph's edges connecting different components (in asceding order)
std::sort(edges.begin(), edges.end());
// Exclude small components (starting from the nearest couple)
haar.ClassifierSize.width = oldCascade->orig_window_size.width;
haar.ClassifierSize.height = oldCascade->orig_window_size.height;
- int stagesCound = oldCascade->count;
- for(int s = 0; s < stagesCound; ++s) // by stages
+ int stagesCount = oldCascade->count;
+ for(int s = 0; s < stagesCount; ++s) // by stages
{
HaarStage64 curStage;
curStage.setStartClassifierRootNodeOffset(static_cast<Ncv32u>(haarClassifierNodes.size()));
virtual int getNumLevels() const = 0;
//! Threshold for the distance between features and SVM classifying plane.
- //! Usually it is 0 and should be specfied in the detector coefficients (as the last free
+ //! Usually it is 0 and should be specified in the detector coefficients (as the last free
//! coefficient). But if the free coefficient is omitted (which is allowed), you can specify it
//! manually here.
virtual void setHitThreshold(double hit_threshold) = 0;
int totalWidth = level.workArea.width / step;
total += totalWidth * (level.workArea.height / step);
- // go to next pyramide level
+ // go to next pyramid level
level = level.next(scaleFactor_, image.size(), NxM, minObjectSize_);
area = level.workArea;
}
template <class InIt, class OutIt, class UnOp>
-__device__ __forceinline__ static void blockTransfrom(InIt beg, InIt end, OutIt out, const UnOp& op)
+__device__ __forceinline__ static void blockTransform(InIt beg, InIt end, OutIt out, const UnOp& op)
{
uint STRIDE = Block::blockSize();
InIt t = beg + Block::threadLineId();
}
template <class InIt1, class InIt2, class OutIt, class BinOp>
-__device__ __forceinline__ static void blockTransfrom(InIt1 beg1, InIt1 end1, InIt2 beg2, OutIt out, const BinOp& op)
+__device__ __forceinline__ static void blockTransform(InIt1 beg1, InIt1 end1, InIt2 beg2, OutIt out, const BinOp& op)
{
uint STRIDE = Block::blockSize();
InIt1 t1 = beg1 + Block::threadLineId();
//! copy constructor
__host__ GpuMat_(const GpuMat_& m);
- //! copy/conversion contructor. If m is of different type, it's converted
+ //! copy/conversion constructor. If m is of different type, it's converted
__host__ explicit GpuMat_(const GpuMat& m, Allocator* allocator = defaultAllocator());
//! constructs a matrix on top of user-allocated data. step is in bytes(!!!), regardless of the type
@param _delta it compares \f$(size_{i}-size_{i-delta})/size_{i-delta}\f$
@param _min_area prune the area which smaller than minArea
@param _max_area prune the area which bigger than maxArea
- @param _max_variation prune the area have simliar size to its children
+ @param _max_variation prune the area have similar size to its children
@param _min_diversity for color image, trace back to cut off mser with diversity less than min_diversity
@param _max_evolution for color image, the evolution steps
@param _area_threshold for color image, the area threshold to cause re-initialize
rng.fill( buf, RNG::UNIFORM, Scalar::all(0), Scalar(3) );
buf.convertTo( query, sourceType );
- // Generate train decriptors as follows:
+ // Generate train descriptors as follows:
// copy each query descriptor to train set countFactor times
// and perturb some one element of the copied descriptors in
// in ascending order. General boundaries of the perturbation
}
//first use nth element to partition the keypoints into the best and worst.
std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreater());
- //this is the boundary response, and in the case of FAST may be ambigous
+ //this is the boundary response, and in the case of FAST may be ambiguous
float ambiguous_response = keypoints[n_points - 1].response;
//use std::partition to grab all of the keypoints with the boundary response.
std::vector<KeyPoint>::const_iterator new_end =
rng.fill(queryBuf, cv::RNG::UNIFORM, cv::Scalar::all(0), cv::Scalar::all(3));
queryBuf.convertTo(queryBuf, CV_32FC1);
- // Generate train decriptors as follows:
+ // Generate train descriptors as follows:
// copy each query descriptor to train set countFactor times
// and perturb some one element of the copied descriptors in
// in ascending order. General boundaries of the perturbation
if (!fs.isOpened())
{
fs.open(keypoints_filename, FileStorage::WRITE);
- ASSERT_TRUE(fs.isOpened()) << "File for writting keypoints can not be opened.";
+ ASSERT_TRUE(fs.isOpened()) << "File for writing keypoints can not be opened.";
if (detector.empty())
{
Ptr<ORB> fd = ORB::create();
rng.fill( buf, RNG::UNIFORM, Scalar::all(0), Scalar(3) );
buf.convertTo( query, CV_32FC1 );
- // Generate train decriptors as follows:
+ // Generate train descriptors as follows:
// copy each query descriptor to train set countFactor times
// and perturb some one element of the copied descriptors in
// in ascending order. General boundaries of the perturbation
Scalar(255, 0, 0), 2, 1);
}
- // show the image bouding box
+ // show the image bounding box
imshow(windowName, selectorParams.image);
// reset the image
}
}
- // save the keypressed characted
+ // save the keypressed character
int key;
Size imageSize;
};
{
//why -1,-1 ?: do this trick because the first time the code is run,
//no value pos was saved so we let Qt move the window in the middle of its parent (event ignored).
- //then hide will save the last position and thus, we want to retreive it (event accepted).
+ //then hide will save the last position and thus, we want to retrieve it (event accepted).
QPoint mypos(-1, -1);
QSettings settings("OpenCV2", objectName());
mypos = settings.value("pos", mypos).toPoint();
void* userdata;
};
-//Both are top level window, so that a way to differenciate them.
+//Both are top level window, so that a way to differentiate them.
//if (obj->metaObject ()->className () == "CvWindow") does not give me robust result
enum typeWindow { type_CvWindow = 1, type_CvWinProperties = 2 };
}
else
{
- fprintf(stderr, "Failed to tranform process type: %d\n", (int) ret);
+ fprintf(stderr, "Failed to transform process type: %d\n", (int) ret);
fflush (stderr);
}
}
return false;
}
- //extract the driver infomation
+ //extract the driver information
m_driver = m_dataset->GetDriver();
// if the driver failed, then exit
/* the case where data fits the opencv matrix */
if (m_sampledepth == img.depth() && target_channels == m_channels && !bit_mode) {
- /* special case for 16bit images with wrong endianess */
+ /* special case for 16bit images with wrong endianness */
if (m_sampledepth == CV_16U && !isBigEndian())
{
for (y = 0; y < m_height; y++, data += imp_stride )
{
m_strm.getBytes( src, src_stride );
- /* endianess correction */
+ /* endianness correction */
if( m_sampledepth == CV_16U && !isBigEndian() )
{
for( x = 0; x < src_elems_per_row; x++ )
if (img.depth() == CV_8U)
strm.putBytes( data, stride*height );
else if (img.depth() == CV_16U) {
- /* fix endianess */
+ /* fix endianness */
if (!isBigEndian()) {
for( y = 0; y < height; y++ ) {
memcpy( buffer, img.ptr(y), stride );
return rgbe_error(rgbe_read_error,NULL);
if (buf[0] == '\n') // end of the header
break;
- else if (buf[0] == '#') // commment
+ else if (buf[0] == '#') // comment
continue;
else if (strcmp(buf,"FORMAT=32-bit_rle_rgbe\n") == 0)
hasFormat = true;
class CV_EXPORTS LineIterator
{
public:
- /** @brief intializes the iterator
+ /** @brief initializes the iterator
creates iterators for the line connecting pt1 and pt2
the line will be clipped on the image boundaries
" return result;",
"\n",
" } catch(const cv::Exception& e) {",
- " LOGD(\"Imgproc::n_1getTextSize() catched cv::Exception: %s\", e.what());",
+ " LOGD(\"Imgproc::n_1getTextSize() caught cv::Exception: %s\", e.what());",
" jclass je = env->FindClass(\"org/opencv/core/CvException\");",
" if(!je) je = env->FindClass(\"java/lang/Exception\");",
" env->ThrowNew(je, e.what());",
" return NULL;",
" } catch (...) {",
- " LOGD(\"Imgproc::n_1getTextSize() catched unknown exception (...)\");",
+ " LOGD(\"Imgproc::n_1getTextSize() caught unknown exception (...)\");",
" jclass je = env->FindClass(\"java/lang/Exception\");",
" env->ThrowNew(je, \"Unknown exception in JNI code {core::getTextSize()}\");",
" return NULL;",
}
else
{
- CV_Error( CV_StsBadArg, "Input curves have uknown type" );
+ CV_Error( CV_StsBadArg, "Input curves have unknown type" );
}
}
bo = tab[bo];
}
- // L, a, b shoule be in their natural range
+ // L, a, b should be in their natural range
inline void processLabToXYZ(const v_uint8x16& lv, const v_uint8x16& av, const v_uint8x16& bv,
v_int32x4& xiv00, v_int32x4& yiv00, v_int32x4& ziv00,
v_int32x4& xiv01, v_int32x4& yiv01, v_int32x4& ziv01,
const int h = img.rows;
const int w = img.cols;
- //A quick and dirty upper bound for the maximimum number of labels.
+ //A quick and dirty upper bound for the maximum number of labels.
//Following formula comes from the fact that a 2x2 block in 4-way connectivity
//labeling can never have more than 2 new labels and 1 label for background.
//Worst case image example pattern:
const int h = img.rows;
const int w = img.cols;
- //A quick and dirty upper bound for the maximimum number of labels.
+ //A quick and dirty upper bound for the maximum number of labels.
//Following formula comes from the fact that a 2x2 block in 4-way connectivity
//labeling can never have more than 2 new labels and 1 label for background.
//Worst case image example pattern:
const int h = img.rows;
const int w = img.cols;
- //A quick and dirty upper bound for the maximimum number of labels.
+ //A quick and dirty upper bound for the maximum number of labels.
//Following formula comes from the fact that a 2x2 block in 8-connectivity case
//can never have more than 1 new label and 1 label for background.
//Worst case image example pattern:
const int h = img.rows;
const int w = img.cols;
- //A quick and dirty upper bound for the maximimum number of labels.
+ //A quick and dirty upper bound for the maximum number of labels.
//Following formula comes from the fact that a 2x2 block in 8-connectivity case
//can never have more than 1 new label and 1 label for background.
//Worst case image example pattern:
int i, j, k;
int max_iters = (criteria.type & CV_TERMCRIT_ITER) ? MIN(MAX(criteria.maxCount, 1), MAX_ITERS) : MAX_ITERS;
double eps = (criteria.type & CV_TERMCRIT_EPS) ? MAX(criteria.epsilon, 0.) : 0;
- eps *= eps; // use square of error in comparsion operations
+ eps *= eps; // use square of error in comparison operations
cv::Mat src = _image.getMat(), cornersmat = _corners.getMat();
int count = cornersmat.checkVector(2, CV_32F);
ivx::Image::createAddressing(dst.cols, dst.rows, 2, (vx_int32)(dst.step)), dst.data);
//ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments
- //since OpenVX standart says nothing about thread-safety for now
+ //since OpenVX standard says nothing about thread-safety for now
ivx::border_t prevBorder = ctx.immediateBorder();
ctx.setImmediateBorder(border, (vx_uint8)(0));
if(dx)
if( scale != 1 )
{
// usually the smoothing part is the slowest to compute,
- // so try to scale it instead of the faster differenciating part
+ // so try to scale it instead of the faster differentiating part
if( dx == 0 )
kx *= scale;
else
{
if( last && last->y1 == y )
{
- // exclude edge if y reachs its lower point
+ // exclude edge if y reaches its lower point
prelast->next = last->next;
last = last->next;
continue;
}
else if( i < total )
{
- // insert new edge into active list if y reachs its upper point
+ // insert new edge into active list if y reaches its upper point
prelast->next = e;
e->next = last;
prelast = e;
Mat Iyy( DELTA + kd.rows - 1, src.cols, dst.type() );
// inside the loop we always pass DELTA rows to the filter
- // (note that the "proceed" method takes care of possibe overflow, since
+ // (note that the "proceed" method takes care of possible overflow, since
// it was given the actual image height in the "start" method)
// on output we can get:
// * < DELTA rows (the initial buffer accumulation stage)
}
//---------------------------------------------------------------------
-// Returns true iff point c lies on the closed segement ab.
+// Returns true iff point c lies on the closed segment ab.
// Assumes it is already known that abc are collinear.
//---------------------------------------------------------------------
static bool between( Point2f a, Point2f b, Point2f c )
ivx::Image::createAddressing(dst.cols, dst.rows, 1, (vx_int32)(dst.step)), dst.data);
//ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments
- //since OpenVX standart says nothing about thread-safety for now
+ //since OpenVX standard says nothing about thread-safety for now
ivx::border_t prevBorder = ctx.immediateBorder();
ctx.setImmediateBorder(VX_BORDER_CONSTANT, (vx_uint8)(borderValue[0]));
index = successor(index, nrOfPoints);
}
-//! Return the succesor of the provided point index
+//! Return the successor of the provided point index
/*!
-* The succesor of the last polygon point is the first polygon point
+* The successor of the last polygon point is the first polygon point
* (circular referencing)
*
* @param index Index of the point
#elif defined GET_EDGES
-// Get the edge result. egde type of value 2 will be marked as an edge point and set to 255. Otherwise 0.
+// Get the edge result. edge type of value 2 will be marked as an edge point and set to 255. Otherwise 0.
// map edge type mappings
// dst edge output
{
case CALIPERS_MAXHEIGHT:
{
- /* now main element lies on edge alligned to calipers side */
+ /* now main element lies on edge aligned to calipers side */
/* find opposite element i.e. transform */
/* 0->2, 1->3, 2->0, 3->1 */
ivx::Image::createAddressing(dst.cols, dst.rows, 1, (vx_int32)(dst.step)), dst.data);
//ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments
- //since OpenVX standart says nothing about thread-safety for now
+ //since OpenVX standard says nothing about thread-safety for now
ivx::border_t prevBorder = ctx.immediateBorder();
ctx.setImmediateBorder(border, (vx_uint8)(0));
ivx::IVX_CHECK_STATUS(vxuBox3x3(ctx, ia, ib));
ivx::Image::createAddressing(dst.cols, dst.rows, 1, (vx_int32)(dst.step)), dst.data);
//ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments
- //since OpenVX standart says nothing about thread-safety for now
+ //since OpenVX standard says nothing about thread-safety for now
ivx::border_t prevBorder = ctx.immediateBorder();
ctx.setImmediateBorder(border, (vx_uint8)(0));
ivx::IVX_CHECK_STATUS(vxuGaussian3x3(ctx, ia, ib));
ivx::Image::createAddressing(dst.cols, dst.rows, 1, (vx_int32)(dst.step)), dst.data);
//ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments
- //since OpenVX standart says nothing about thread-safety for now
+ //since OpenVX standard says nothing about thread-safety for now
ivx::border_t prevBorder = ctx.immediateBorder();
ctx.setImmediateBorder(VX_BORDER_REPLICATE);
#ifdef VX_VERSION_1_1
Values(CV_8U, CV_16U, CV_32F),
OCL_ALL_CHANNELS,
Values(3, 5, 7), // Kernel size
- Values(1, 4, 8), // Width mutiple
+ Values(1, 4, 8), // Width multiple
Values((BorderType)BORDER_CONSTANT,
(BorderType)BORDER_REPLICATE,
(BorderType)BORDER_REFLECT,
int cn = reference.channels();
ssize.width *= cn;
bool next = true;
- //RGB2Lab_f works throug LUT and brings additional error
+ //RGB2Lab_f works through LUT and brings additional error
static const float maxErr = 1.f/192.f;
for (int y = 0; y < ssize.height && next; ++y)
if (inverse)
str += " | WARP_INVERSE_MAP";
- return str.empty() ? "Unsupported/Unkown interpolation type" : str;
+ return str.empty() ? "Unsupported/Unknown interpolation type" : str;
}
Size CV_ImageWarpBaseTest::randSize(RNG& rng) const
return "BORDER_WRAP";
if (borderType == BORDER_REFLECT_101)
return "BORDER_REFLECT_101";
- return "Unsupported/Unkown border type";
+ return "Unsupported/Unknown border type";
}
void CV_Remap_Test::prepare_test_data_for_reference_func()
public void wait_install()
{
- Log.e(TAG, "Instalation was not started! Nothing to wait!");
+ Log.e(TAG, "Installation was not started! Nothing to wait!");
}
};
self.cname = self.name.replace(".", "::")
self.methods = []
self.methods_suffixes = {}
- self.consts = [] # using a list to save the occurence order
+ self.consts = [] # using a list to save the occurrence order
self.private_consts = []
self.imports = set()
self.props= []
return;
} catch(const cv::Exception& e) {
AndroidBitmap_unlockPixels(env, bitmap);
- LOGE("nBitmapToMat catched cv::Exception: %s", e.what());
+ LOGE("nBitmapToMat caught cv::Exception: %s", e.what());
jclass je = env->FindClass("org/opencv/core/CvException");
if(!je) je = env->FindClass("java/lang/Exception");
env->ThrowNew(je, e.what());
return;
} catch (...) {
AndroidBitmap_unlockPixels(env, bitmap);
- LOGE("nBitmapToMat catched unknown exception (...)");
+ LOGE("nBitmapToMat caught unknown exception (...)");
jclass je = env->FindClass("java/lang/Exception");
env->ThrowNew(je, "Unknown exception in JNI code {nBitmapToMat}");
return;
return;
} catch(const cv::Exception& e) {
AndroidBitmap_unlockPixels(env, bitmap);
- LOGE("nMatToBitmap catched cv::Exception: %s", e.what());
+ LOGE("nMatToBitmap caught cv::Exception: %s", e.what());
jclass je = env->FindClass("org/opencv/core/CvException");
if(!je) je = env->FindClass("java/lang/Exception");
env->ThrowNew(je, e.what());
return;
} catch (...) {
AndroidBitmap_unlockPixels(env, bitmap);
- LOGE("nMatToBitmap catched unknown exception (...)");
+ LOGE("nMatToBitmap caught unknown exception (...)");
jclass je = env->FindClass("java/lang/Exception");
env->ThrowNew(je, "Unknown exception in JNI code {nMatToBitmap}");
return;
enum { ARG_NONE = 0, ARG_MAT = 1, ARG_SCALAR = 2 };
-// special case, when the convertor needs full ArgInfo structure
+// special case, when the converter needs full ArgInfo structure
static bool pyopencv_to(PyObject* o, Mat& m, const ArgInfo info)
{
bool allowND = true;
def find_next_token(self, s, tlist, p=0):
"""
Finds the next token from the 'tlist' in the input 's', starting from position 'p'.
- Returns the first occured token and its position, or ("", len(s)) when no token is found
+ Returns the first occurred token and its position, or ("", len(s)) when no token is found
"""
token = ""
tpos = len(s)
//
// GTEST_CHECK_ is an all-mode assert. It aborts the program if the condition
// is not satisfied.
-// Synopsys:
+// Synopsis:
// GTEST_CHECK_(boolean_condition);
// or
// GTEST_CHECK_(boolean_condition) << "Additional message";
// const Foo*). When you use ImplicitCast_, the compiler checks that
// the cast is safe. Such explicit ImplicitCast_s are necessary in
// surprisingly many situations where C++ demands an exact type match
-// instead of an argument type convertable to a target type.
+// instead of an argument type convertible to a target type.
//
// The syntax for using ImplicitCast_ is the same as for static_cast:
//
void Normalize();
- // Returns a pointer to the last occurence of a valid path separator in
+ // Returns a pointer to the last occurrence of a valid path separator in
// the FilePath. On Windows, for example, both '/' and '\' are valid path
// separators. Returns NULL if no path separator was found.
const char* FindLastPathSeparator() const;
internal::UnitTestImpl* impl() { return impl_; }
const internal::UnitTestImpl* impl() const { return impl_; }
- // These classes and funcions are friends as they need to access private
+ // These classes and functions are friends as they need to access private
// members of UnitTest.
friend class Test;
friend class internal::AssertHelper;
// Print a unified diff header for one hunk.
// The format is
// "@@ -<left_start>,<left_length> +<right_start>,<right_length> @@"
- // where the left/right parts are ommitted if unnecessary.
+ // where the left/right parts are omitted if unnecessary.
void PrintHeader(std::ostream* ss) const {
*ss << "@@ ";
if (removes_) {
return *this;
}
-// Returns a pointer to the last occurence of a valid path separator in
+// Returns a pointer to the last occurrence of a valid path separator in
// the FilePath. On Windows, for example, both '/' and '\' are valid path
// separators. Returns NULL if no path separator was found.
const char* FilePath::FindLastPathSeparator() const {
// Depending on the value of a char (or wchar_t), we print it in one
// of three formats:
// - as is if it's a printable ASCII (e.g. 'a', '2', ' '),
-// - as a hexidecimal escape sequence (e.g. '\x7F'), or
+// - as a hexadecimal escape sequence (e.g. '\x7F'), or
// - as a special escape sequence (e.g. '\r', '\n').
enum CharFormat {
kAsIs,
return;
*os << " (" << static_cast<int>(c);
- // For more convenience, we print c's code again in hexidecimal,
+ // For more convenience, we print c's code again in hexadecimal,
// unless c was already printed in the form '\x##' or the code is in
// [1, 9].
if (format == kHexEscape || (1 <= c && c <= 9)) {
createBackgroundSubtractorMOG2(int history=500, double varThreshold=16,
bool detectShadows=true);
-/** @brief K-nearest neigbours - based Background/Foreground Segmentation Algorithm.
+/** @brief K-nearest neighbours - based Background/Foreground Segmentation Algorithm.
-The class implements the K-nearest neigbours background subtraction described in @cite Zivkovic2006 .
+The class implements the K-nearest neighbours background subtraction described in @cite Zivkovic2006 .
Very efficient if number of foreground pixels is low.
*/
class CV_EXPORTS_W BackgroundSubtractorKNN : public BackgroundSubtractor
pixel is matching the kNN background model.
*/
CV_WRAP virtual int getkNNSamples() const = 0;
- /** @brief Sets the k in the kNN. How many nearest neigbours need to match.
+ /** @brief Sets the k in the kNN. How many nearest neighbours need to match.
*/
CV_WRAP virtual void setkNNSamples(int _nkNN) = 0;
////////////////////////
int history;
//alpha=1/history - speed of update - if the time interval you want to average over is T
- //set alpha=1/history. It is also usefull at start to make T slowly increase
+ //set alpha=1/history. It is also useful at start to make T slowly increase
//from 1 until the desired T
float fTb;
//Tb - threshold on the squared distance from the sample used to decide if it is well described
//and that is Tb=2*2*10*10 =400; where we take typical pixel level sigma=10
/////////////////////////
- //less important parameters - things you might change but be carefull
+ //less important parameters - things you might change but be careful
////////////////////////
int nN;//totlal number of samples
int nkNN;//number on NN for detcting background - default K=[0.1*nN]
/*//Implementation of the Gaussian mixture model background subtraction from:
//
-//"Improved adaptive Gausian mixture model for background subtraction"
+//"Improved adaptive Gaussian mixture model for background subtraction"
//Z.Zivkovic
//International Conference Pattern Recognition, UK, August, 2004
//http://www.zoranz.net/Publications/zivkovic2004ICPR.pdf
/*
Interface of Gaussian mixture algorithm from:
- "Improved adaptive Gausian mixture model for background subtraction"
+ "Improved adaptive Gaussian mixture model for background subtraction"
Z.Zivkovic
International Conference Pattern Recognition, UK, August, 2004
http://www.zoranz.net/Publications/zivkovic2004ICPR.pdf
// and that is varThreshold=4*4=16; Corresponds to Tb in the paper.
/////////////////////////
- // less important parameters - things you might change but be carefull
+ // less important parameters - things you might change but be careful
////////////////////////
float backgroundRatio;
// corresponds to fTB=1-cf from the paper
int nHeight;
int nND;//number of data dimensions (image channels)
- bool bPostFiltering;//defult 1 - do postfiltering - will make shadow detection results also give value 255
+ bool bPostFiltering;//default 1 - do postfiltering - will make shadow detection results also give value 255
double minArea; // for postfiltering
bool bInit;//default 1, faster updates at start
////////////////////////
float fAlphaT;
//alpha - speed of update - if the time interval you want to average over is T
- //set alpha=1/T. It is also usefull at start to make T slowly increase
+ //set alpha=1/T. It is also useful at start to make T slowly increase
//from 1 until the desired T
float fTb;
//Tb - threshold on the squared Mahalan. dist. to decide if it is well described
//and that is Tb=4*4=16;
/////////////////////////
- //less important parameters - things you might change but be carefull
+ //less important parameters - things you might change but be careful
////////////////////////
float fTg;
//Tg - threshold on the squared Mahalan. dist. to decide
};
// shadow detection performed per pixel
-// should work for rgb data, could be usefull for gray scale and depth data as well
+// should work for rgb data, could be useful for gray scale and depth data as well
// See: Prati,Mikic,Trivedi,Cucchiara,"Detecting Moving Shadows...",IEEE PAMI,2003.
CV_INLINE bool
detectShadowGMM(const float* data, int nchannels, int nmodes,
std::vector<UMat> prevPyr; prevPyr.resize(maxLevel + 1);
std::vector<UMat> nextPyr; nextPyr.resize(maxLevel + 1);
- // allocate buffers with aligned pitch to be able to use cl_khr_image2d_from_buffer extention
+ // allocate buffers with aligned pitch to be able to use cl_khr_image2d_from_buffer extension
// This is the required pitch alignment in pixels
int pitchAlign = (int)ocl::Device::getDefault().imagePitchAlignment();
if (pitchAlign>0)
for (int level = 1; level <= maxLevel; ++level)
{
int cols,rows;
- // allocate buffers with aligned pitch to be able to use image on buffer extention
+ // allocate buffers with aligned pitch to be able to use image on buffer extension
cols = (prevPyr[level - 1].cols+1)/2;
rows = (prevPyr[level - 1].rows+1)/2;
prevPyr[level] = UMat(rows,(cols+pitchAlign-1)&(-pitchAlign),prevPyr[level-1].type()).colRange(0,cols);
`VideoCapture -> API Backend -> Operating System -> Device Driver -> Device Hardware`
@endcode
The returned value might be different from what really used by the device or it could be encoded
- using device dependant rules (eg. steps or percentage). Effective behaviour depends from device
+ using device dependent rules (eg. steps or percentage). Effective behaviour depends from device
driver and API Backend
*/
CV_CAP_PROP_MONOCHROME =19,
CV_CAP_PROP_SHARPNESS =20,
CV_CAP_PROP_AUTO_EXPOSURE =21, // exposure control done by camera,
- // user can adjust refernce level
+ // user can adjust reference level
// using this feature
CV_CAP_PROP_GAMMA =22,
CV_CAP_PROP_TEMPERATURE =23,
CV_CAP_PROP_XI_COLOR_FILTER_ARRAY = 475, // Returns color filter array type of RAW data.
CV_CAP_PROP_XI_GAMMAY = 476, // Luminosity gamma
CV_CAP_PROP_XI_GAMMAC = 477, // Chromaticity gamma
- CV_CAP_PROP_XI_SHARPNESS = 478, // Sharpness Strenght
+ CV_CAP_PROP_XI_SHARPNESS = 478, // Sharpness Strength
CV_CAP_PROP_XI_CC_MATRIX_00 = 479, // Color Correction Matrix element [0][0]
CV_CAP_PROP_XI_CC_MATRIX_01 = 480, // Color Correction Matrix element [0][1]
CV_CAP_PROP_XI_CC_MATRIX_02 = 481, // Color Correction Matrix element [0][2]
unsigned int payload; // Width x height x Pixel width.
- int widthMin; // Camera sensor minium width.
+ int widthMin; // Camera sensor minimum width.
int widthMax; // Camera sensor maximum width.
- int heightMin; // Camera sensor minium height.
+ int heightMin; // Camera sensor minimum height.
int heightMax; // Camera sensor maximum height.
bool fpsAvailable;
- double fpsMin; // Camera minium fps.
+ double fpsMin; // Camera minimum fps.
double fpsMax; // Camera maximum fps.
bool gainAvailable;
double gainMin; // Camera minimum gain.
ng = CLIP( gain + ev + exposureCompensation, gainMin, gainMax);
if( ng < gain ) {
- // piority 1 - reduce gain
+ // priority 1 - reduce gain
arv_camera_set_gain(camera, (gain = ng));
return;
}
return 0;
}
- // Output image paramaters.
+ // Output image parameters.
int outChannels;
if (mMode == CV_CAP_MODE_BGR || mMode == CV_CAP_MODE_RGB) {
outChannels = 3;
mOutImage->imageData = reinterpret_cast<char *>(mOutImagedata);
mOutImage->imageSize = int(currSize);
- // Device image paramaters and conversion code.
+ // Device image parameters and conversion code.
// (Not all of these conversions are used in production, but they were all tested to find the fastest options.)
int deviceChannels;
int cvtCode;
dcCam = 0;
isoSpeed = 400;
fps = 15;
- // Resetted the value here to 1 in order to ensure only a single frame is stored in the buffer!
+ // Reset the value here to 1 in order to ensure only a single frame is stored in the buffer!
nDMABufs = 8;
started = false;
cameraId = 0;
//optionally setup a second (or third, fourth ...) device - same options as above
VI.setupDevice(device2);
- //As requested width and height can not always be accomodated
+ //As requested width and height can not always be accommodated
//make sure to check the size once the device is setup
int width = VI.getWidth(device1);
}
if (mt.pUnk != NULL)
{
- // Unecessary because pUnk should not be used, but safest.
+ // Unnecessary because pUnk should not be used, but safest.
mt.pUnk->Release();
mt.pUnk = NULL;
}
int videoInput::listDevices(bool silent){
- //COM Library Intialization
+ //COM Library Initialization
comInit();
if(!silent) DebugPrintOut("\nVIDEOINPUT SPY MODE!\n\n");
EnterCriticalSection(&VDList[id]->sgCallback->critSection);
result = VDList[id]->sgCallback->newFrame;
- //we need to give it some time at the begining to start up so lets check after 400 frames
+ //we need to give it some time at the beginning to start up so lets check after 400 frames
if(VDList[id]->nFramesRunning > 400 && VDList[id]->sgCallback->freezeCheck > VDList[id]->nFramesForReconnect ){
freeze = true;
}
// ----------------------------------------------------------------------
// Gives us a little pop up window to adjust settings
-// We do this in a seperate thread now!
+// We do this in a separate thread now!
// ----------------------------------------------------------------------
pAMVideoProcAmp->Set(Property, Default, VideoProcAmp_Flags_Auto);
}
else{
- // Perhaps add a check that lValue and Flags are within the range aquired from GetRange above
+ // Perhaps add a check that lValue and Flags are within the range acquired from GetRange above
pAMVideoProcAmp->Set(Property, lValue, Flags);
}
}
else
{
- // Perhaps add a check that lValue and Flags are within the range aquired from GetRange above
+ // Perhaps add a check that lValue and Flags are within the range acquired from GetRange above
pIAMCameraControl->Set(Property, lValue, Flags);
}
pIAMCameraControl->Release();
{
delete VDList[i];
}
- //Unitialize com
+ //Uninitialize com
comUnInit();
}
// ----------------------------------------------------------------------
-// Same as above but to unitialize com, decreases counter and frees com
+// Same as above but to uninitialize com, decreases counter and frees com
// if no one else is using it
// ----------------------------------------------------------------------
return hr;
}
- //FITLER GRAPH MANAGER//
+ //FILTER GRAPH MANAGER//
// Create the Filter Graph Manager.
hr = CoCreateInstance(CLSID_FilterGraph, 0, CLSCTX_INPROC_SERVER,IID_IGraphBuilder, (void**)&VD->pGraph);
if (FAILED(hr))
}
Crossbar->Route(pOIndex,pIndex);
}else{
- DebugPrintOut("SETUP: Didn't find specified Physical Connection type. Using Defualt.\n");
+ DebugPrintOut("SETUP: Didn't find specified Physical Connection type. Using Default.\n");
}
//we only free the crossbar when we close or restart the device
}
if (c->codec_id == CV_CODEC(CODEC_ID_MPEG1VIDEO) || c->codec_id == CV_CODEC(CODEC_ID_MSMPEG4V3)){
/* needed to avoid using macroblocks in which some coeffs overflow
- this doesnt happen with normal video, it just happens here as the
- motion of the chroma plane doesnt match the luma plane */
+ this doesn't happen with normal video, it just happens here as the
+ motion of the chroma plane doesn't match the luma plane */
/* avoid FFMPEG warning 'clipping 1 dct coefficients...' */
c->mb_decision=2;
}
#endif
#if LIBAVCODEC_VERSION_INT>0x000409
- // some formats want stream headers to be seperate
+ // some formats want stream headers to be separate
if(oc->oformat->flags & AVFMT_GLOBALHEADER)
{
#if LIBAVCODEC_BUILD > CALC_FFMPEG_VERSION(56, 35, 0)
if (c->codec_id == CV_CODEC(CODEC_ID_MPEG1VIDEO) || c->codec_id == CV_CODEC(CODEC_ID_MSMPEG4V3))
{
// needed to avoid using macroblocks in which some coeffs overflow
- // this doesnt happen with normal video, it just happens here as the
- // motion of the chroma plane doesnt match the luma plane
+ // this doesn't happen with normal video, it just happens here as the
+ // motion of the chroma plane doesn't match the luma plane
// avoid FFMPEG warning 'clipping 1 dct coefficients...'
}
#if LIBAVCODEC_VERSION_INT > 0x000409
- // some formats want stream headers to be seperate
+ // some formats want stream headers to be separate
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
{
#if LIBAVCODEC_BUILD > CALC_FFMPEG_VERSION(56, 35, 0)
\brief Wrapper to GigEVisionAPI function
\param api
\param eventHandler
- \return true - succsess, else - false
+ \return true - success, else - false
See \a wrprInitGigEVisionAPI, \a gige::IGigEVisionAPI
*/
/**
* Write message to @field msgsBuffer if user want to store them
* (@field collectMsgs).
- * Print debug informations on screen.
+ * Print debug information on screen.
*/
template<typename OsstreamPrintable>
void DigitalCameraCapture::message(MsgType msgType, const char * msg,
/*!
* \brief CvCapture_GStreamer::retrieveFrame
* \return IplImage pointer. [Transfer Full]
- * Retreive the previously grabbed buffer, and wrap it in an IPLImage structure
+ * Retrieve the previously grabbed buffer, and wrap it in an IPLImage structure
*/
IplImage * CvCapture_GStreamer::retrieveFrame(int)
{
if (!gst_structure_get_int (structure, "height", &height))
{
- CV_WARN("Cannot query video heigth\n");
+ CV_WARN("Cannot query video height\n");
}
gint num = 0, denom=1;
}
/*!
- * \brief CvCapture_GStreamer::getProperty retreive the requested property from the pipeline
+ * \brief CvCapture_GStreamer::getProperty retrieve the requested property from the pipeline
* \param propId requested property
* \return property value
*
- * There are two ways the properties can be retreived. For seek-based properties we can query the pipeline.
+ * There are two ways the properties can be retrieved. For seek-based properties we can query the pipeline.
* For frame-based properties, we use the caps of the lasst receivef sample. This means that some properties
* are not available until a first frame was received
*/
For Release: OpenCV-Linux Beta4 Opencv-0.9.6
FS: this patch fix not sequential index of device (unplugged device), and real numCameras.
- for -1 index (icvOpenCAM_V4L) i dont use /dev/video but real device available, because
+ for -1 index (icvOpenCAM_V4L) I don't use /dev/video but real device available, because
if /dev/video is a link to /dev/video0 and i unplugged device on /dev/video0, /dev/video
is a bad link. I search the first available device with indexList.
int detect = 0;
- // Test device for V4L compability
+ // Test device for V4L compatibility
/* Test using an open to see if this new device name really does exists. */
/* No matter what the name - it still must be opened! */
int detect = 0;
- // Test device for V4L2 compability
+ // Test device for V4L2 compatibility
/* Open and test V4L2 device */
capture->deviceHandle = v4l2_open (deviceName, O_RDWR /* required */ | O_NONBLOCK, 0);
char deviceName[MAX_DEVICE_DRIVER_NAME];
if (!numCameras)
- icvInitCapture_V4L(); /* Havent called icvInitCapture yet - do it now! */
+ icvInitCapture_V4L(); /* Haven't called icvInitCapture yet - do it now! */
if (!numCameras)
return NULL; /* Are there any /dev/video input sources? */
capture->buffers[MAX_V4L_BUFFERS].start = NULL;
#endif
- /* w/o memset some parts arent initialized - AKA: Fill it with zeros so it is clean */
+ /* w/o memset some parts aren't initialized - AKA: Fill it with zeros so it is clean */
memset(capture,0,sizeof(CvCaptureCAM_V4L));
- /* Present the routines needed for V4L funtionality. They are inserted as part of
+ /* Present the routines needed for V4L functionality. They are inserted as part of
the standard set of cv calls promoting transparency. "Vector Table" insertion. */
capture->FirstCapture = 1;
CLEAR (capture->form);
capture->form.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- /* read the current setting, mainly to retreive the pixelformat information */
+ /* read the current setting, mainly to retrieve the pixelformat information */
xioctl (capture->deviceHandle, VIDIOC_G_FMT, &capture->form);
/* set the values we want to change */
}
if (v4l1_ioctl(capture->deviceHandle, VIDIOCSPICT, &capture->imageProperties) < 0){
- fprintf(stderr, "VIDEOIO ERROR: V4L: Unable to set video informations\n");
+ fprintf(stderr, "VIDEOIO ERROR: V4L: Unable to set video information\n");
icvCloseCAM_V4L(capture);
return -1;
}
{
DWORD biSize; // Write header size of BITMAPINFO header structure
LONG biWidth; // width in pixels
- LONG biHeight; // heigth in pixels
+ LONG biHeight; // height in pixels
WORD biPlanes; // Number of color planes in which the data is stored
WORD biBitCount; // Number of bits per pixel
DWORD biCompression; // Type of compression used (uncompressed: NO_COMPRESSION=0)
static const char jpegHeader[] =
"\xFF\xD8" // SOI - start of image
-"\xFF\xE0" // APP0 - jfif extention
+"\xFF\xE0" // APP0 - jfif extension
"\x00\x10" // 2 bytes: length of APP0 segment
"JFIF\x00" // JFIF signature
"\x01\x02" // version of JFIF
Media Foundation-based Video Capturing module is based on
videoInput library by Evgeny Pereguda:
http://www.codeproject.com/Articles/559437/Capturing-of-video-from-web-camera-on-Windows-7-an
- Originaly licensed under The Code Project Open License (CPOL) 1.02:
+ Originally licensed under The Code Project Open License (CPOL) 1.02:
http://www.codeproject.com/info/cpol10.aspx
*/
//require Windows 8 for some of the formats defined otherwise could baseline on lower version
ImageGrabberThread::~ImageGrabberThread(void)
{
- DebugPrintOut(L"IMAGEGRABBERTHREAD VIDEODEVICE %i: Destroing ImageGrabberThread\n", igt_DeviceID);
+ DebugPrintOut(L"IMAGEGRABBERTHREAD VIDEODEVICE %i: Destroying ImageGrabberThread\n", igt_DeviceID);
if (igt_Handle)
WaitForSingleObject(igt_Handle, INFINITE);
delete igt_pImageGrabber;
if( propValue != 0.0 ) // "on"
{
// if there isn't image generator (i.e. ASUS XtionPro doesn't have it)
- // then the property isn't avaliable
+ // then the property isn't available
if( imageGenerator.IsValid() )
{
if( !depthGenerator.GetAlternativeViewPointCap().IsViewPointAs(imageGenerator) )
if( propValue != 0.0 ) // "on"
{
// if there isn't image generator (i.e. ASUS XtionPro doesn't have it)
- // then the property isn't avaliable
+ // then the property isn't available
if ( streams[CV_COLOR_STREAM].isValid() )
{
openni::ImageRegistrationMode mode = propValue != 0.0 ? openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR : openni::IMAGE_REGISTRATION_OFF;
// we would use CFStringCreateWithFileSystemRepresentation (kCFAllocatorDefault, filename) on Mac OS X 10.4
CFStringRef inPath = CFStringCreateWithCString (kCFAllocatorDefault, filename, kCFStringEncodingISOLatin1);
- OPENCV_ASSERT ((inPath != nil), "icvOpenFile_QT_Movie", "couldnt create CFString from a string");
+ OPENCV_ASSERT ((inPath != nil), "icvOpenFile_QT_Movie", "couldn't create CFString from a string");
// create the data reference
myErr = QTNewDataReferenceFromFullPathCFString (inPath, kQTPOSIXPathStyle, 0, & myDataRef, & myDataRefType);
// create gworld for decompressed image
myErr = QTNewGWorld (& capture->myGWorld, k32ARGBPixelFormat /* k24BGRPixelFormat geht leider nicht */,
& myRect, nil, nil, 0);
- OPENCV_ASSERT (myErr == noErr, "icvOpenFile_QT_Movie", "couldnt create QTNewGWorld() for output image");
+ OPENCV_ASSERT (myErr == noErr, "icvOpenFile_QT_Movie", "couldn't create QTNewGWorld() for output image");
SetMovieGWorld (capture->myMovie, capture->myGWorld, nil);
// build IplImage header that will point to the PixMap of the Movie's GWorld later on
// update IplImage header that points to PixMap of the Movie's GWorld.
// unfortunately, cvCvtColor doesn't know ARGB, the QuickTime pixel format,
- // so we pass a modfied address.
+ // so we pass a modified address.
// ATTENTION: don't access the last pixel's alpha entry, it's inexistant
myPixMapHandle = GetGWorldPixMap (capture->myGWorld);
LockPixels (myPixMapHandle);
char nameBuffer [255];
result = GetComponentInfo (component, & desc, nameHandle, nil, nil);
- OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt GetComponentInfo()");
+ OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't GetComponentInfo()");
OPENCV_ASSERT (*nameHandle, "icvOpenCamera_QT", "No name returned by GetComponentInfo()");
snprintf (nameBuffer, (**nameHandle) + 1, "%s", (char *) (* nameHandle + 1));
printf ("- Videodevice: %s\n", nameBuffer);
{
result = VDGetNumberOfInputs (capture->grabber, & capture->channel);
if (result != noErr)
- fprintf (stderr, "Couldnt GetNumberOfInputs: %d\n", (int) result);
+ fprintf (stderr, "Couldn't GetNumberOfInputs: %d\n", (int) result);
else
{
#ifndef NDEBUG
Str255 nameBuffer;
result = VDGetInputName (capture->grabber, capture->channel, nameBuffer);
- OPENCV_ASSERT (result == noErr, "ictOpenCamera_QT", "couldnt GetInputName()");
+ OPENCV_ASSERT (result == noErr, "ictOpenCamera_QT", "couldn't GetInputName()");
snprintf (name, *nameBuffer, "%s", (char *) (nameBuffer + 1));
printf (" Choosing input %d - %s\n", (int) capture->channel, name);
#endif
// Select the desired input
result = VDSetInput (capture->grabber, capture->channel);
- OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt select video digitizer input");
+ OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't select video digitizer input");
// get the bounding rectangle of the video digitizer
result = VDGetActiveSrcRect (capture->grabber, capture->channel, & myRect);
- OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt create VDGetActiveSrcRect from digitizer");
+ OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't create VDGetActiveSrcRect from digitizer");
myRect.right = 640; myRect.bottom = 480;
capture->size = cvSize (myRect.right - myRect.left, myRect.bottom - myRect.top);
printf ("Source rect is %d, %d -- %d, %d\n", (int) myRect.left, (int) myRect.top, (int) myRect.right, (int) myRect.bottom);
// create offscreen GWorld
result = QTNewGWorld (& capture->myGWorld, k32ARGBPixelFormat, & myRect, nil, nil, 0);
- OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt create QTNewGWorld() for output image");
+ OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't create QTNewGWorld() for output image");
// get pixmap
capture->pixmap = GetGWorldPixMap (capture->myGWorld);
result = GetMoviesError ();
- OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt get pixmap");
+ OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't get pixmap");
// set digitizer rect
result = VDSetDigitizerRect (capture->grabber, & myRect);
- OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt create VDGetActiveSrcRect from digitizer");
+ OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't create VDGetActiveSrcRect from digitizer");
// set destination of digitized input
result = VDSetPlayThruDestination (capture->grabber, capture->pixmap, & myRect, nil, nil);
printf ("QuickTime error: %d\n", (int) result);
- OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt set video destination");
+ OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set video destination");
// get destination of digitized images
result = VDGetPlayThruDestination (capture->grabber, & capture->pixmap, nil, nil, nil);
printf ("QuickTime error: %d\n", (int) result);
- OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt get video destination");
+ OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't get video destination");
OPENCV_ASSERT (capture->pixmap != nil, "icvOpenCamera_QT", "empty set video destination");
// get the bounding rectangle of the video digitizer
// build IplImage header that will point to the PixMap of the Movie's GWorld later on
capture->image_rgb = cvCreateImageHeader (capture->size, IPL_DEPTH_8U, 4);
- OPENCV_ASSERT (capture->image_rgb, "icvOpenCamera_QT", "couldnt create image header");
+ OPENCV_ASSERT (capture->image_rgb, "icvOpenCamera_QT", "couldn't create image header");
// create IplImage that hold correctly formatted result
capture->image_bgr = cvCreateImage (capture->size, IPL_DEPTH_8U, 3);
- OPENCV_ASSERT (capture->image_bgr, "icvOpenCamera_QT", "couldnt create image");
+ OPENCV_ASSERT (capture->image_bgr, "icvOpenCamera_QT", "couldn't create image");
// notify digitizer component, that we well be starting grabbing soon
result = VDCaptureStateChanging (capture->grabber, vdFlagCaptureIsForRecord | vdFlagCaptureStarting | vdFlagCaptureLowLatency);
- OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt set capture state");
+ OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set capture state");
// yeah, we did it
// notify digitizer component, that we well be stopping grabbing soon
result = VDCaptureStateChanging (capture->grabber, vdFlagCaptureStopping);
- OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt set capture state");
+ OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set capture state");
// release memory
cvReleaseImage (& capture->image_bgr);
DisposeGWorld (capture->myGWorld);
CloseComponent (capture->grabber);
- // sucessful
+ // successful
return 1;
}
// update IplImage header that points to PixMap of the Movie's GWorld.
// unfortunately, cvCvtColor doesn't know ARGB, the QuickTime pixel format,
- // so we pass a modfied address.
+ // so we pass a modified address.
// ATTENTION: don't access the last pixel's alpha entry, it's inexistant
//myPixMapHandle = GetGWorldPixMap (capture->myGWorld);
myPixMapHandle = capture->pixmap;
// we need a decompression sequence that fits the raw data coming from the camera
err = SGGetChannelSampleDescription (channel, (Handle) description);
- OPENCV_ASSERT (err == noErr, "icvDataProc_QT_Cam", "couldnt get channel sample description");
+ OPENCV_ASSERT (err == noErr, "icvDataProc_QT_Cam", "couldn't get channel sample description");
//*************************************************************************************//
//This fixed a bug when Quicktime is called twice to grab a frame (black band bug) - Yannick Verdie 2010
err = DecompressSequenceBegin (&capture->sequence, description, capture->gworld, 0,&capture->bounds,&scaleMatrix, srcCopy, NULL, 0, codecNormalQuality, bestSpeedCodec);
//**************************************************************************************//
- OPENCV_ASSERT (err == noErr, "icvDataProc_QT_Cam", "couldnt begin decompression sequence");
+ OPENCV_ASSERT (err == noErr, "icvDataProc_QT_Cam", "couldn't begin decompression sequence");
DisposeHandle ((Handle) description);
}
// open sequence grabber component
capture->grabber = OpenDefaultComponent (SeqGrabComponentType, 0);
- OPENCV_ASSERT (capture->grabber, "icvOpenCamera_QT", "couldnt create image");
+ OPENCV_ASSERT (capture->grabber, "icvOpenCamera_QT", "couldn't create image");
// initialize sequence grabber component
result = SGInitialize (capture->grabber);
- OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt initialize sequence grabber");
+ OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't initialize sequence grabber");
result = SGSetDataRef (capture->grabber, 0, 0, seqGrabDontMakeMovie);
- OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt set data reference of sequence grabber");
+ OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set data reference of sequence grabber");
// set up video channel
result = SGNewChannel (capture->grabber, VideoMediaType, & (capture->channel));
- OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt create new video channel");
+ OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't create new video channel");
// select the camera indicated by index
SGDeviceList device_list = 0;
result = SGGetChannelDeviceList (capture->channel, 0, & device_list);
- OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt get channel device list");
+ OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't get channel device list");
for (int i = 0, current_index = 1; i < (*device_list)->count; i++)
{
SGDeviceName device = (*device_list)->entry[i];
if (current_index == index)
{
result = SGSetChannelDevice (capture->channel, device.name);
- OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt set the channel video device");
+ OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set the channel video device");
break;
}
current_index++;
}
}
result = SGDisposeDeviceList (capture->grabber, device_list);
- OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt dispose the channel device list");
+ OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't dispose the channel device list");
// query natural camera resolution -- this will be wrong, but will be an upper
// bound on the actual resolution -- the actual resolution is set below
// after starting the frame grabber
result = SGGetSrcVideoBounds (capture->channel, & (capture->bounds));
- OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt set video channel bounds");
+ OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set video channel bounds");
// create offscreen GWorld
result = QTNewGWorld (& (capture->gworld), k32ARGBPixelFormat, & (capture->bounds), 0, 0, 0);
result = SGSetGWorld (capture->grabber, capture->gworld, 0);
- OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt set GWorld for sequence grabber");
+ OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set GWorld for sequence grabber");
result = SGSetChannelBounds (capture->channel, & (capture->bounds));
- OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt set video channel bounds");
+ OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set video channel bounds");
result = SGSetChannelUsage (capture->channel, seqGrabRecord);
- OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt set channel usage");
+ OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set channel usage");
// start recording so we can size
result = SGStartRecord (capture->grabber);
- OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt start recording");
+ OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't start recording");
// don't know *actual* resolution until now
ImageDescriptionHandle imageDesc = (ImageDescriptionHandle)NewHandle(0);
// stop grabber so that we can reset the parameters to the right size
result = SGStop (capture->grabber);
- OPENCV_ASSERT (result == noErr, "icveClose_QT_Cam", "couldnt stop recording");
+ OPENCV_ASSERT (result == noErr, "icveClose_QT_Cam", "couldn't stop recording");
// reset GWorld to correct image size
GWorldPtr tmpgworld;
result = QTNewGWorld( &tmpgworld, k32ARGBPixelFormat, &(capture->bounds), 0, 0, 0);
- OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt create offscreen GWorld");
+ OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't create offscreen GWorld");
result = SGSetGWorld( capture->grabber, tmpgworld, 0);
- OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt set GWorld for sequence grabber");
+ OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set GWorld for sequence grabber");
DisposeGWorld( capture->gworld );
capture->gworld = tmpgworld;
result = SGSetChannelBounds (capture->channel, & (capture->bounds));
- OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt set video channel bounds");
+ OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set video channel bounds");
// allocate images
capture->size = cvSize (capture->bounds.right - capture->bounds.left, capture->bounds.bottom - capture->bounds.top);
// so we shift the base address by one byte.
// ATTENTION: don't access the last pixel's alpha entry, it's inexistant
capture->image_rgb = cvCreateImageHeader (capture->size, IPL_DEPTH_8U, 4);
- OPENCV_ASSERT (capture->image_rgb, "icvOpenCamera_QT", "couldnt create image header");
+ OPENCV_ASSERT (capture->image_rgb, "icvOpenCamera_QT", "couldn't create image header");
pixmap = GetGWorldPixMap (capture->gworld);
OPENCV_ASSERT (pixmap, "icvOpenCamera_QT", "didn't get GWorld PixMap handle");
LockPixels (pixmap);
// create IplImage that hold correctly formatted result
capture->image_bgr = cvCreateImage (capture->size, IPL_DEPTH_8U, 3);
- OPENCV_ASSERT (capture->image_bgr, "icvOpenCamera_QT", "couldnt create image");
+ OPENCV_ASSERT (capture->image_bgr, "icvOpenCamera_QT", "couldn't create image");
// tell the sequence grabber to invoke our data proc
result = SGSetDataProc (capture->grabber, NewSGDataUPP (icvDataProc_QT_Cam), (long) capture);
- OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt set data proc");
+ OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set data proc");
// start recording
result = SGStartRecord (capture->grabber);
- OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt start recording");
+ OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't start recording");
return 1;
}
// stop recording
result = SGStop (capture->grabber);
- OPENCV_ASSERT (result == noErr, "icveClose_QT_Cam", "couldnt stop recording");
+ OPENCV_ASSERT (result == noErr, "icveClose_QT_Cam", "couldn't stop recording");
// close sequence grabber component
result = CloseComponent (capture->grabber);
- OPENCV_ASSERT (result == noErr, "icveClose_QT_Cam", "couldnt close sequence grabber component");
+ OPENCV_ASSERT (result == noErr, "icveClose_QT_Cam", "couldn't close sequence grabber component");
// end decompression sequence
CDSequenceEnd (capture->sequence);
cvReleaseImageHeader (& capture->image_rgb);
DisposeGWorld (capture->gworld);
- // sucessful
+ // successful
return 1;
}
For Release: OpenCV-Linux Beta4 Opencv-0.9.6
FS: this patch fix not sequential index of device (unplugged device), and real numCameras.
- for -1 index (icvOpenCAM_V4L) i dont use /dev/video but real device available, because
+ for -1 index (icvOpenCAM_V4L) I don't use /dev/video but real device available, because
if /dev/video is a link to /dev/video0 and i unplugged device on /dev/video0, /dev/video
is a bad link. I search the first available device with indexList.
11th patch: April 2, 2013, Forrest Reiling forrest.reiling@gmail.com
Added v4l2 support for getting capture property CV_CAP_PROP_POS_MSEC.
Returns the millisecond timestamp of the last frame grabbed or 0 if no frames have been grabbed
-Used to successfully synchonize 2 Logitech C310 USB webcams to within 16 ms of one another
+Used to successfully synchronize 2 Logitech C310 USB webcams to within 16 ms of one another
make & enjoy!
#endif
#ifdef HAVE_VIDEOIO
-// NetBSD compability layer with V4L2
+// NetBSD compatibility layer with V4L2
#include <sys/videoio.h>
#endif
static int try_init_v4l2(CvCaptureCAM_V4L* capture, const char *deviceName)
{
- // Test device for V4L2 compability
+ // Test device for V4L2 compatibility
// Return value:
// -1 then unable to open device
// 0 then detected nothing
char _deviceName[MAX_DEVICE_DRIVER_NAME];
if (!numCameras)
- icvInitCapture_V4L(); /* Havent called icvInitCapture yet - do it now! */
+ icvInitCapture_V4L(); /* Haven't called icvInitCapture yet - do it now! */
if (!numCameras)
return false; /* Are there any /dev/video input sources? */
case XI_WRITEREG : error_message = "Register write error"; break;
case XI_FREE_RESOURCES : error_message = "Freeing resiurces error"; break;
case XI_FREE_CHANNEL : error_message = "Freeing channel error"; break;
- case XI_FREE_BANDWIDTH : error_message = "Freeing bandwith error"; break;
+ case XI_FREE_BANDWIDTH : error_message = "Freeing bandwidth error"; break;
case XI_READBLK : error_message = "Read block error"; break;
case XI_WRITEBLK : error_message = "Write block error"; break;
case XI_NO_IMAGE : error_message = "No image"; break;
case XI_ACQUISITION_ALREADY_UP : error_message = "Acquisition already started"; break;
case XI_OLD_DRIVER_VERSION : error_message = "Old version of device driver installed to the system."; break;
case XI_GET_LAST_ERROR : error_message = "To get error code please call GetLastError function."; break;
- case XI_CANT_PROCESS : error_message = "Data cant be processed"; break;
+ case XI_CANT_PROCESS : error_message = "Data can't be processed"; break;
case XI_ACQUISITION_STOPED : error_message = "Acquisition has been stopped. It should be started before GetImage."; break;
- case XI_ACQUISITION_STOPED_WERR : error_message = "Acquisition has been stoped with error."; break;
+ case XI_ACQUISITION_STOPED_WERR : error_message = "Acquisition has been stopped with error."; break;
case XI_INVALID_INPUT_ICC_PROFILE : error_message = "Input ICC profile missed or corrupted"; break;
case XI_INVALID_OUTPUT_ICC_PROFILE : error_message = "Output ICC profile missed or corrupted"; break;
case XI_DEVICE_NOT_READY : error_message = "Device not ready to operate"; break;
case XI_SHADING_TOOCONTRAST : error_message = "Shading too contrast"; break;
case XI_ALREADY_INITIALIZED : error_message = "Module already initialized"; break;
- case XI_NOT_ENOUGH_PRIVILEGES : error_message = "Application doesnt enough privileges(one or more app"; break;
+ case XI_NOT_ENOUGH_PRIVILEGES : error_message = "Application doesn't enough privileges(one or more app"; break;
case XI_NOT_COMPATIBLE_DRIVER : error_message = "Installed driver not compatible with current software"; break;
case XI_TM_INVALID_RESOURCE : error_message = "TM file was not loaded successfully from resources"; break;
- case XI_DEVICE_HAS_BEEN_RESETED : error_message = "Device has been reseted, abnormal initial state"; break;
+ case XI_DEVICE_HAS_BEEN_RESETED : error_message = "Device has been reset, abnormal initial state"; break;
case XI_NO_DEVICES_FOUND : error_message = "No Devices Found"; break;
case XI_RESOURCE_OR_FUNCTION_LOCKED : error_message = "Resource(device) or function locked by mutex"; break;
case XI_BUFFER_SIZE_TOO_SMALL : error_message = "Buffer provided by user is too small"; break;
- case XI_COULDNT_INIT_PROCESSOR : error_message = "Couldnt initialize processor."; break;
+ case XI_COULDNT_INIT_PROCESSOR : error_message = "Couldn't initialize processor."; break;
case XI_NOT_INITIALIZED : error_message = "The object/module/procedure/process being referred to has not been started."; break;
case XI_RESOURCE_NOT_FOUND : error_message = "Resource not found(could be processor, file, item..)."; break;
case XI_UNKNOWN_PARAM : error_message = "Unknown parameter"; break;