add_definitions(-DJAS_WIN_MSVC_BUILD)
endif()
+if(CMAKE_COMPILER_IS_GNUCXX)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-uninitialized")
+endif()
+
if(UNIX)
if(CMAKE_COMPILER_IS_GNUCXX OR CV_ICC)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC")
Mat dst = _dst.getMat();
for(size_t idx = 0; idx < indices.size(); idx++) {
Mat originalRow = src.row(indices[idx]);
- Mat sortedRow = dst.row(idx);
+ Mat sortedRow = dst.row((int)idx);
originalRow.copyTo(sortedRow);
}
}
case CV_32SC1: return interp1_<int>(x,Y,xi); break;
case CV_32FC1: return interp1_<float>(x,Y,xi); break;
case CV_64FC1: return interp1_<double>(x,Y,xi); break;
- default: CV_Error(CV_StsUnsupportedFormat, ""); return Mat();
+ default: CV_Error(CV_StsUnsupportedFormat, ""); break;
}
+ return Mat();
}
namespace colormap
if(labels.size() != (size_t)N)
CV_Error(CV_StsUnsupportedFormat, "Labels must be given as integer (CV_32SC1).");
// compute the Fisherfaces
- int C = remove_dups(labels).size(); // number of unique classes
+ int C = (int)remove_dups(labels).size(); // number of unique classes
// clip number of components to be a valid number
if((_num_components <= 0) || (_num_components > (C-1)))
_num_components = (C-1);
calcHist(&src, 1, 0, Mat(), result, 1, &histSize, &histRange, true, false);
// normalize
if(normed) {
- result /= src.total();
+ result /= (int)src.total();
}
return result.reshape(1,1);
}
if(n == 0)
return Mat();
// dimensionality of samples
- int d = src.getMat(0).total();
+ int d = (int)src.getMat(0).total();
// create data matrix
Mat data(n, d, rtype);
// copy data
Mat dst = _dst.getMat();
for(size_t idx = 0; idx < indices.size(); idx++) {
Mat originalCol = src.col(indices[idx]);
- Mat sortedCol = dst.col(idx);
+ Mat sortedCol = dst.col((int)idx);
originalCol.copyTo(sortedCol);
}
}
vector<int> num2label = remove_dups(labels);
map<int, int> label2num;
for (size_t i = 0; i < num2label.size(); i++)
- label2num[num2label[i]] = i;
+ label2num[num2label[i]] = (int)i;
for (size_t i = 0; i < labels.size(); i++)
mapped_labels[i] = label2num[labels[i]];
// get sample size, dimension
int N = data.rows;
int D = data.cols;
// number of unique labels
- int C = num2label.size();
+ int C = (int)num2label.size();
// throw error if less labels, than samples
if (labels.size() != (size_t)N)
CV_Error(CV_StsBadArg, "Error: The number of samples must equal the number of labels.");
break;
std::transform(left.begin(), left.end(), buf_beg, WgcHelper(group, groupingMat));
- int minInd = min_element(buf_beg, buf_beg + left_size) - buf_beg;
+ size_t minInd = min_element(buf_beg, buf_beg + left_size) - buf_beg;
if (buf[minInd] < model.T_GroupingCorespondances) /* can add corespondance to group */
{
// On Win64 optimized versions of DFT and DCT fail the tests (fixed in VS2010)
#if defined _MSC_VER && !defined CV_ICC && defined _M_X64 && _MSC_VER < 1600
#pragma optimize("", off)
+#pragma warning( disable : 4748 )
#endif
/****************************************************************************************\
\r
for (size_t i = 0; i < dst_gold.size(); ++i)\r
{\r
- cv::Point2f res = h_dst.at<cv::Point2f>(0, i);\r
+ cv::Point2f res = h_dst.at<cv::Point2f>(0, (int)i);\r
cv::Point2f res_gold = dst_gold[i];\r
\r
ASSERT_LE(cv::norm(res_gold - res) / cv::norm(res_gold), 1e-3f);\r
\r
cv::Mat rvec, tvec;\r
std::vector<int> inliers;\r
- cv::gpu::solvePnPRansac(object, cv::Mat(1, image_vec.size(), CV_32FC2, &image_vec[0]),\r
+ cv::gpu::solvePnPRansac(object, cv::Mat(1, (int)image_vec.size(), CV_32FC2, &image_vec[0]),\r
camera_mat, cv::Mat(1, 8, CV_32F, cv::Scalar::all(0)),\r
rvec, tvec, false, 200, 2.f, 100, &inliers);\r
\r
cv::goodFeaturesToTrack(gray_frame, pts, 1000, 0.01, 0.0);\r
\r
cv::gpu::GpuMat d_pts;\r
- cv::Mat pts_mat(1, pts.size(), CV_32FC2, (void*)&pts[0]);\r
+ cv::Mat pts_mat(1, (int)pts.size(), CV_32FC2, (void*)&pts[0]);\r
d_pts.upload(pts_mat);\r
\r
cv::gpu::PyrLKOpticalFlow pyrLK;\r
{
if(!obj || obj == Py_None)
return true;
- value = (int)PyInt_AsLong(obj);
- return value != -1 || !PyErr_Occurred();
+ int ivalue = (int)PyInt_AsLong(obj);
+ value = cv::saturate_cast<uchar>(ivalue);
+ return ivalue != -1 || !PyErr_Occurred();
}
static PyObject* pyopencv_from(double value)
Mat getMotion(int from, int to, const vector<Mat> &motions)
{
- return getMotion(from, to, &motions[0], motions.size());
+ return getMotion(from, to, &motions[0], (int)motions.size());
}
} // namespace videostab
void OnePassStabilizer::stabilizeFrame()
{
- Mat stabilizationMotion = motionFilter_->stabilize(curStabilizedPos_, &motions_[0], motions_.size());
+ Mat stabilizationMotion = motionFilter_->stabilize(curStabilizedPos_, &motions_[0], (int)motions_.size());
StabilizerBase::stabilizeFrame(stabilizationMotion);
}
}
-int main( int argc, char** argv )
+int main()
{
int i, j;
CvMemStorage* storage = cvCreateMemStorage(0);
}
}
-int main(int argc, char** argv)
+int main()
{
CvCapture* capture = cvCreateCameraCapture(0);
CvMat* prevgray = 0, *gray = 0, *flow = 0, *cflow = 0;
switch (key)\r
{\r
case 27:\r
- return 0;\r
break;\r
\r
case 'A':\r