perViewErrors2.resize(numImgs);
}
- for (size_t i = 0; i<numImgs; i++)
+ for (size_t i = 0; i < numImgs; i++)
{
- perViewErrors1[i] = perViewErrorsMat.at<double>(i, 0);
- perViewErrors2[i] = perViewErrorsMat.at<double>(i, 1);
+ perViewErrors1[i] = perViewErrorsMat.at<double>((int)i, 0);
+ perViewErrors2[i] = perViewErrorsMat.at<double>((int)i, 1);
}
if (rotationMatrices.size() != numImgs)
translationVectors.resize(numImgs);
}
- for( size_t i = 0; i < numImgs; i++ )
+ for (size_t i = 0; i < numImgs; i++)
{
Mat r9;
cv::Rodrigues( rvecs[i], r9 );
std::vector<cv::Point2d> reprojectedImgPts[2] = {std::vector<cv::Point2d>(n_images), std::vector<cv::Point2d>(n_images)};
size_t totalPoints = 0;
- float totalMSError[2] = { 0, 0 }, viewMSError[2];
+ double totalMSError[2] = { 0, 0 };
for( size_t i = 0; i < n_images; i++ )
{
cv::Matx33d viewRotMat1, viewRotMat2;
cv::fisheye::projectPoints(objectPoints[i], reprojectedImgPts[0], viewRotVec1, viewT1, K1, D1, alpha1);
cv::fisheye::projectPoints(objectPoints[i], reprojectedImgPts[1], viewRotVec2, viewT2, K2, D2, alpha2);
- viewMSError[0] = cv::norm(leftPoints[i], reprojectedImgPts[0], cv::NORM_L2SQR);
- viewMSError[1] = cv::norm(rightPoints[i], reprojectedImgPts[1], cv::NORM_L2SQR);
+ double viewMSError[2] = {
+ cv::norm(leftPoints[i], reprojectedImgPts[0], cv::NORM_L2SQR),
+ cv::norm(rightPoints[i], reprojectedImgPts[1], cv::NORM_L2SQR)
+ };
size_t n = objectPoints[i].size();
totalMSError[0] += viewMSError[0];
#include "opencv2/core/cvstd.hpp"
#include "opencv2/core/matx.hpp"
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable: 4459) // declaration of '...' hides global declaration
+#endif
+
namespace cv
{
} // cv
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
#endif //OPENCV_CORE_TYPES_HPP
typedef typename VTraits<uint_reg>::lane_type uint_type;
Data<R> dataA, dataB(0), dataC, dataD(1), dataE(2);
- dataA[0] = std::numeric_limits<int_type>::max();
+ dataA[0] = (LaneType)std::numeric_limits<int_type>::max();
dataA[1] *= (LaneType)-1;
union
{
}
};
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable: 4702) // unreachable code
+#endif
template<typename... Outputs>
void postprocess_ocl(Outputs&... outs)
{
int dummy[] = { 0, (validate(&outs), 0)... };
cv::util::suppress_unused_warning(dummy);
}
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
template<class T> struct ocl_get_out;
template<> struct ocl_get_out<cv::GMat>
std::shared_ptr<IDeviceSelector> getDefaultDeviceSelector(const std::vector<CfgParam>&) {
std::cerr << "Cannot utilize getDefaultVPLDeviceAndCtx without HAVE_ONEVPL enabled" << std::endl;
util::throw_error(std::logic_error("Cannot utilize getDefaultVPLDeviceAndCtx without HAVE_ONEVPL enabled"));
- return nullptr;
}
} // namespace onevpl
const float tmp = normL2Sqr<float>(Point2f(finderPattern[i]) - cornerPointsQR[j]);
if (tmp < distanceToOrig) {
distanceToOrig = tmp;
- closestFinderPatternV = i;
- closetOriginalV = j;
+ closestFinderPatternV = (int)i;
+ closetOriginalV = (int)j;
}
}
}
if (useCode) {
CV_LOG_INFO(NULL, "Version type: useCode");
- version = versionByCode;
+ version = (uint8_t)versionByCode;
}
else if (useFinderPattern ) {
CV_LOG_INFO(NULL, "Version type: useFinderPattern");
- version = cvRound(versionByFinderPattern);
+ version = (uint8_t)cvRound(versionByFinderPattern);
}
else {
CV_LOG_INFO(NULL, "Version type: useTransition");
- version = versionByTransition;
+ version = (uint8_t)versionByTransition;
}
version_size = 21 + (version - 1) * 4;
if ( !(0 < version && version <= 40) ) { return false; }
for (int i = 0; i < sz; i++)
{
- x1Vec[i] = i - sz2;
+ x1Vec[i] = (float)(i - sz2);
}
Mat x1M(1, sz, CV_32FC1, x1Vec.data());
Size imgSz = srcImg.size();
int c = (originalSz + 1) / 2;
- int context_xmin = targetPos[0] - c;
+ int context_xmin = (int)(targetPos[0]) - c;
int context_xmax = context_xmin + originalSz - 1;
- int context_ymin = targetPos[1] - c;
+ int context_ymin = (int)(targetPos[1]) - c;
int context_ymax = context_ymin + originalSz - 1;
int left_pad = std::max(0, -context_xmin);
bool TrackerNanoImpl::update(InputArray image_, Rect &boundingBoxRes)
{
image = image_.getMat().clone();
- int targetSzSum = targetSz[0] + targetSz[1];
+ int targetSzSum = (int)(targetSz[0] + targetSz[1]);
float wc = targetSz[0] + trackState.contextAmount * targetSzSum;
float hc = targetSz[1] + trackState.contextAmount * targetSzSum;