[OpenCV4Android](https://groups.google.com/group/android-opencv/) discussion group or OpenCV [Q&A
forum](http://answers.opencv.org). We'll do our best to help you out.
-Tegra Android Development Pack users
-------------------------------------
-
-You may have used [Tegra Android Development
-Pack](http://developer.nvidia.com/tegra-android-development-pack) (**TADP**) released by **NVIDIA**
-for Android development environment setup.
-
-Beside Android development tools the TADP 2.0 includes OpenCV4Android SDK, so it can be already
-installed in your system and you can skip to @ref tutorial_O4A_SDK_samples "samples" section of this tutorial.
-
-More details regarding TADP can be found in the @ref tutorial_android_dev_intro guide.
-
General info
------------
-# OpenCV development will certainly require some knowledge of the [Android
Camera](http://developer.android.com/guide/topics/media/camera.html) specifics.
-Quick environment setup for Android development
------------------------------------------------
-
-If you are making a clean environment install, then you can try [Tegra Android Development
-Pack](https://developer.nvidia.com/tegra-android-development-pack) (**TADP**) released by
-**NVIDIA**.
-
-@note Starting the *version 2.0* the TADP package includes *OpenCV for Tegra* SDK that is a regular
-*OpenCV4Android SDK* extended with Tegra-specific stuff. When unpacked, TADP will cover all of the
-environment setup automatically and you can skip the rest of the guide.
-
-If you are a beginner in Android development then we also recommend you to start with TADP.
-
-@note *NVIDIA*'s Tegra Android Development Pack includes some special features for *NVIDIA*’s [Tegra
-platform](http://www.nvidia.com/object/tegra-3-processor.html)
-but its use is not limited to *Tegra* devices only. \* You need at least *1.6 Gb* free
-disk space for the install.
-
-- TADP will download Android SDK platforms and Android NDK from Google's server, so Internet
- connection is required for the installation.
-- TADP may ask you to flash your development kit at the end of installation process. Just skip
- this step if you have no [Tegra Development Kit](http://developer.nvidia.com/mobile/tegra-hardware-sales-inquiries).
-- (UNIX) TADP will ask you for *root* in the middle of installation, so you need to be a member of
- *sudo* group.
-
Manual environment setup for Android development
------------------------------------------------
void CirclesGridClusterFinder::hierarchicalClustering(const std::vector<Point2f> &points, const Size &patternSz, std::vector<Point2f> &patternPoints)
{
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if(tegra::useTegra() && tegra::hierarchicalClustering(points, patternSz, patternPoints))
- return;
-#endif
int j, n = (int)points.size();
size_t pn = static_cast<size_t>(patternSz.area());
#include "opencv2/core/ocl.hpp"
-#ifdef HAVE_TEGRA_OPTIMIZATION
-#include "opencv2/calib3d/calib3d_tegra.hpp"
-#else
#define GET_OPTIMIZED(func) (func)
-#endif
namespace cv
# endif
#endif
-#ifdef HAVE_TEGRA_OPTIMIZATION
-# include "tegra_round.hpp"
-#endif
-
#if defined __GNUC__ && defined __arm__ && (defined __ARM_PCS_VFP || defined __ARM_VFPV3__ || defined __ARM_NEON__) && !defined __SOFTFP__ && !defined(__CUDACC__)
// 1. general scheme
#define ARM_ROUND(_value, _asm_string) \
fistp t;
}
return t;
-#elif ((defined _MSC_VER && defined _M_ARM) || defined CV_ICC || \
- defined __GNUC__) && defined HAVE_TEGRA_OPTIMIZATION
- TEGRA_ROUND_DBL(value);
#elif defined CV_ICC || defined __GNUC__
# if defined ARM_ROUND_DBL
ARM_ROUND_DBL(value);
fistp t;
}
return t;
-#elif ((defined _MSC_VER && defined _M_ARM) || defined CV_ICC || \
- defined __GNUC__) && defined HAVE_TEGRA_OPTIMIZATION
- TEGRA_ROUND_FLT(value);
#elif defined CV_ICC || defined __GNUC__
# if defined ARM_ROUND_FLT
ARM_ROUND_FLT(value);
}
CvStatus;
-#ifdef HAVE_TEGRA_OPTIMIZATION
-namespace tegra {
-
-CV_EXPORTS bool useTegra();
-CV_EXPORTS void setUseTegra(bool flag);
-
-}
-#endif
-
#ifdef ENABLE_INSTRUMENTATION
namespace cv
{
{
CV_INSTRUMENT_REGION()
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if (tegra::useTegra())
- {
- int kind1 = _src1.kind(), kind2 = _src2.kind();
- Mat src1 = _src1.getMat(), src2 = _src2.getMat();
- bool src1Scalar = checkScalar(src1, _src2.type(), kind1, kind2);
- bool src2Scalar = checkScalar(src2, _src1.type(), kind2, kind1);
-
- if (!src1Scalar && !src2Scalar &&
- src1.depth() == CV_8U && src2.type() == src1.type() &&
- src1.dims == 2 && src2.size() == src1.size() &&
- mask.empty())
- {
- if (dtype < 0)
- {
- if (_dst.fixedType())
- {
- dtype = _dst.depth();
- }
- else
- {
- dtype = src1.depth();
- }
- }
-
- dtype = CV_MAT_DEPTH(dtype);
-
- if (!_dst.fixedType() || dtype == _dst.depth())
- {
- _dst.create(src1.size(), CV_MAKE_TYPE(dtype, src1.channels()));
-
- if (dtype == CV_16S)
- {
- Mat dst = _dst.getMat();
- if(tegra::subtract_8u8u16s(src1, src2, dst))
- return;
- }
- else if (dtype == CV_32F)
- {
- Mat dst = _dst.getMat();
- if(tegra::subtract_8u8u32f(src1, src2, dst))
- return;
- }
- else if (dtype == CV_8S)
- {
- Mat dst = _dst.getMat();
- if(tegra::subtract_8u8u8s(src1, src2, dst))
- return;
- }
- }
- }
- }
-#endif
arithm_op(_src1, _src2, _dst, mask, dtype, getSubTab(), false, 0, OCL_OP_SUB );
}
#include "arithm_core.hpp"
#include "hal_replacement.hpp"
-#ifdef HAVE_TEGRA_OPTIMIZATION
-#include "opencv2/core/core_tegra.hpp"
-#else
#define GET_OPTIMIZED(func) (func)
-#endif
namespace cv
{
//#endif
useIPP(-1),
useIPP_NE(-1)
-#ifdef HAVE_TEGRA_OPTIMIZATION
- ,useTegra(-1)
-#endif
#ifdef HAVE_OPENVX
,useOpenVX(-1)
#endif
//#endif
int useIPP; // 1 - use, 0 - do not use, -1 - auto/not initialized
int useIPP_NE; // 1 - use, 0 - do not use, -1 - auto/not initialized
-#ifdef HAVE_TEGRA_OPTIMIZATION
- int useTegra; // 1 - use, 0 - do not use, -1 - auto/not initialized
-#endif
#ifdef HAVE_OPENVX
int useOpenVX; // 1 - use, 0 - do not use, -1 - auto/not initialized
#endif
#ifdef HAVE_OPENCL
ocl::setUseOpenCL(flag);
#endif
-#ifdef HAVE_TEGRA_OPTIMIZATION
- ::tegra::setUseTegra(flag);
-#endif
}
bool useOptimized(void)
} // namespace cv
-#ifdef HAVE_TEGRA_OPTIMIZATION
-
-namespace tegra {
-
-bool useTegra()
-{
- cv::CoreTLSData* data = cv::getCoreTlsData().get();
-
- if (data->useTegra < 0)
- {
- const char* pTegraEnv = getenv("OPENCV_TEGRA");
- if (pTegraEnv && (cv::String(pTegraEnv) == "disabled"))
- data->useTegra = false;
- else
- data->useTegra = true;
- }
-
- return (data->useTegra > 0);
-}
-
-void setUseTegra(bool flag)
-{
- cv::CoreTLSData* data = cv::getCoreTlsData().get();
- data->useTegra = flag;
-}
-
-} // namespace tegra
-
-#endif
-
/* End of file. */
FAST_t<12>(_img, keypoints, threshold, nonmax_suppression);
break;
case FastFeatureDetector::TYPE_9_16:
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if(tegra::useTegra() && tegra::FAST(_img, keypoints, threshold, nonmax_suppression))
- break;
-#endif
FAST_t<16>(_img, keypoints, threshold, nonmax_suppression);
break;
}
#include <algorithm>
-#ifdef HAVE_TEGRA_OPTIMIZATION
-#include "opencv2/features2d/features2d_tegra.hpp"
-#endif
-
#endif
#undef abs
#endif
-#ifdef HAVE_TEGRA_OPTIMIZATION
-#include "opencv2/highgui/highgui_tegra.hpp"
-#endif
-
/* Errors */
#define HG_OK 0 /* Don't bet on it! */
#define HG_BADNAME -1 /* Bad window or file name */
aperture_size,
L2gradient ) )
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if (tegra::useTegra() && tegra::canny(src, dst, low_thresh, high_thresh, aperture_size, L2gradient))
- return;
-#endif
-
CV_IPP_RUN_FAST(ipp_Canny(src, Mat(), Mat(), dst, (float)low_thresh, (float)high_thresh, L2gradient, aperture_size))
if (L2gradient)
int aperture_size, int op_type, double k=0.,
int borderType=BORDER_DEFAULT )
{
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if (tegra::useTegra() && tegra::cornerEigenValsVecs(src, eigenv, block_size, aperture_size, op_type, k, borderType))
- return;
-#endif
#if CV_TRY_AVX
bool haveAvx = CV_CPU_HAS_SUPPORT_AVX;
#endif
CV_IPP_RUN(!(cv::ocl::isOpenCLActivated() && _dst.isUMat()), ipp_Laplacian(_src, _dst, ksize, scale, delta, borderType));
-
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if (tegra::useTegra() && scale == 1.0 && delta == 0)
- {
- Mat src = _src.getMat(), dst = _dst.getMat();
- if (ksize == 1 && tegra::laplace1(src, dst, borderType))
- return;
- if (ksize == 3 && tegra::laplace3(src, dst, borderType))
- return;
- if (ksize == 5 && tegra::laplace5(src, dst, borderType))
- return;
- }
-#endif
-
if( ksize == 1 || ksize == 3 )
{
float K[2][9] =
#include <limits.h>
#include <float.h>
-#ifdef HAVE_TEGRA_OPTIMIZATION
-#include "opencv2/imgproc/imgproc_tegra.hpp"
-#else
#define GET_OPTIMIZED(func) (func)
-#endif
/* helper tables */
extern const uchar icvSaturate8u_cv[];
CALL_HAL(pyrDown, cv_hal_pyrdown, src.data, src.step, src.cols, src.rows, dst.data, dst.step, dst.cols, dst.rows, depth, src.channels(), borderType);
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if(borderType == BORDER_DEFAULT && tegra::useTegra() && tegra::pyrDown(src, dst))
- return;
-#endif
-
#ifdef HAVE_IPP
bool isolated = (borderType & BORDER_ISOLATED) != 0;
int borderTypeNI = borderType & ~BORDER_ISOLATED;
Mat dst = _dst.getMat();
int depth = src.depth();
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if(borderType == BORDER_DEFAULT && tegra::useTegra() && tegra::pyrUp(src, dst))
- return;
-#endif
-
#ifdef HAVE_IPP
bool isolated = (borderType & BORDER_ISOLATED) != 0;
int borderTypeNI = borderType & ~BORDER_ISOLATED;
CV_IPP_RUN_FAST(ipp_medianFilter(src0, dst, ksize));
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if (tegra::useTegra() && tegra::medianBlur(src0, dst, ksize))
- return;
-#endif
-
bool useSortNet = ksize == 3 || (ksize == 5
#if !(CV_SIMD128)
&& ( src0.depth() > CV_8U || src0.channels() == 2 || src0.channels() > 4 )
_result.create(corrSize, CV_32F);
Mat result = _result.getMat();
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if (tegra::useTegra() && tegra::matchTemplate(img, templ, result, method))
- return;
-#endif
-
CV_IPP_RUN_FAST(ipp_matchTemplate(img, templ, result, method))
crossCorr( img, templ, result, result.size(), result.type(), Point(0,0), 0, 0);
src_step = dst_step = roi.width;
}
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if (tegra::useTegra() && tegra::thresh_8u(_src, _dst, roi.width, roi.height, thresh, maxval, type))
- return;
-#endif
-
#if defined(HAVE_IPP)
CV_IPP_CHECK()
{
src_step = dst_step = roi.width;
}
- // HAVE_TEGRA_OPTIMIZATION not supported
-
// HAVE_IPP not supported
const ushort* src = _src.ptr<ushort>();
src_step = dst_step = roi.width;
}
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if (tegra::useTegra() && tegra::thresh_16s(_src, _dst, roi.width, roi.height, thresh, maxval, type))
- return;
-#endif
-
#if defined(HAVE_IPP)
CV_IPP_CHECK()
{
roi.height = 1;
}
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if (tegra::useTegra() && tegra::thresh_32f(_src, _dst, roi.width, roi.height, thresh, maxval, type))
- return;
-#endif
-
#if defined(HAVE_IPP)
CV_IPP_CHECK()
{
Ptr<BaseCascadeClassifier::MaskGenerator> createFaceDetectionMaskGenerator()
{
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if (tegra::useTegra())
- return tegra::getCascadeClassifierMaskGenerator();
-#endif
return Ptr<BaseCascadeClassifier::MaskGenerator>();
}
#include "opencv2/core/ocl.hpp"
#include "opencv2/core/private.hpp"
-#ifdef HAVE_TEGRA_OPTIMIZATION
-#include "opencv2/objdetect/objdetect_tegra.hpp"
-#endif
-
#endif
switch (normType) {
case NORM_L2:
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if(hn == 1 && tegra::useTegra() &&
- tegra::fastNlMeansDenoising(src, dst, h[0], templateWindowSize, searchWindowSize))
- return;
-#endif
switch (depth) {
case CV_8U:
fastNlMeansDenoising_<uchar, int, unsigned, DistSquared>(src, dst, h,
#include "opencv2/core/ocl.hpp"
#include "opencv2/imgproc.hpp"
-#ifdef HAVE_TEGRA_OPTIMIZATION
-#include "opencv2/photo/photo_tegra.hpp"
-#endif
-
#endif
{
Mat src;
Mat weight;
-#ifdef HAVE_TEGRA_OPTIMIZATION
- src = _src.getMat();
- weight = _weight.getMat();
- if(tegra::useTegra() && tegra::normalizeUsingWeightMap(weight, src))
- return;
-#endif
#ifdef HAVE_OPENCL
if ( !cv::ocl::isOpenCLActivated() ||
void createLaplacePyr(InputArray img, int num_levels, std::vector<UMat> &pyr)
{
-#ifdef HAVE_TEGRA_OPTIMIZATION
- cv::Mat imgMat = img.getMat();
- if(tegra::useTegra() && tegra::createLaplacePyr(imgMat, num_levels, pyr))
- return;
-#endif
-
pyr.resize(num_levels + 1);
if(img.depth() == CV_8U)
CV_Assert(features1.descriptors.type() == features2.descriptors.type());
CV_Assert(features2.descriptors.depth() == CV_8U || features2.descriptors.depth() == CV_32F);
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if (tegra::useTegra() && tegra::match2nearest(features1, features2, matches_info, match_conf_))
- return;
-#endif
-
matches_info.matches.clear();
Ptr<cv::DescriptorMatcher> matcher;
#include "opencv2/core/private.hpp"
-#ifdef HAVE_TEGRA_OPTIMIZATION
-# include "opencv2/stitching/stitching_tegra.hpp"
-#endif
-
#include "util_log.hpp"
#endif
#include <limits.h>
#include "opencv2/imgproc/types_c.h"
-#ifdef HAVE_TEGRA_OPTIMIZATION
-#include "tegra.hpp"
-#endif
-
using namespace cv;
namespace cvtest
::testing::Test::RecordProperty("cv_cpu_features", cpu_features);
if (useStdOut) std::cout << "CPU features: " << cpu_features << std::endl;
-#ifdef HAVE_TEGRA_OPTIMIZATION
- const char * tegra_optimization = tegra::useTegra() && tegra::isDeviceSupported() ? "enabled" : "disabled";
- ::testing::Test::RecordProperty("cv_tegra_optimization", tegra_optimization);
- if (useStdOut) std::cout << "Tegra optimization: " << tegra_optimization << std::endl;
-#endif
-
#ifdef HAVE_IPP
const char * ipp_optimization = cv::ipp::useIPP()? "enabled" : "disabled";
::testing::Test::RecordProperty("cv_ipp_optimization", ipp_optimization);
CV_Assert(depth == CV_8U);
dst.create(rows, cols, CV_MAKETYPE(DataType<deriv_type>::depth, cn*2));
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if (tegra::useTegra() && tegra::calcSharrDeriv(src, dst))
- return;
-#endif
-
int x, y, delta = (int)alignSize((cols + 2)*cn, 16);
AutoBuffer<deriv_type> _tempBuf(delta*2 + 64);
deriv_type *trow0 = alignPtr(_tempBuf + cn, 16), *trow1 = alignPtr(trow0 + delta, 16);
CV_Assert(prevPyr[level * lvlStep1].size() == nextPyr[level * lvlStep2].size());
CV_Assert(prevPyr[level * lvlStep1].type() == nextPyr[level * lvlStep2].type());
-#ifdef HAVE_TEGRA_OPTIMIZATION
- typedef tegra::LKTrackerInvoker<cv::detail::LKTrackerInvoker> LKTrackerInvoker;
-#else
typedef cv::detail::LKTrackerInvoker LKTrackerInvoker;
-#endif
-
parallel_for_(Range(0, npoints), LKTrackerInvoker(prevPyr[level * lvlStep1], derivI,
nextPyr[level * lvlStep2], prevPts, nextPts,
status, err,
#include "opencv2/core/ocl.hpp"
#include "opencv2/core.hpp"
-#ifdef HAVE_TEGRA_OPTIMIZATION
-#include "opencv2/video/video_tegra.hpp"
-#endif
-
#endif