\r
explicit BruteForceMatcher_GPU_base(DistType distType = L2Dist);\r
\r
- // Add descriptors to train descriptor collection.\r
+ // Add descriptors to train descriptor collection\r
void add(const std::vector<GpuMat>& descCollection);\r
\r
- // Get train descriptors collection.\r
+ // Get train descriptors collection\r
const std::vector<GpuMat>& getTrainDescriptors() const;\r
\r
- // Clear train descriptors collection.\r
+ // Clear train descriptors collection\r
void clear();\r
\r
- // Return true if there are not train descriptors in collection.\r
+ // Return true if there are not train descriptors in collection\r
bool empty() const;\r
\r
- // Return true if the matcher supports mask in match methods.\r
+ // Return true if the matcher supports mask in match methods\r
bool isMaskSupported() const;\r
\r
- // Find one best match for each query descriptor.\r
- // trainIdx.at<int>(0, queryIdx) will contain best train index for queryIdx\r
- // distance.at<float>(0, queryIdx) will contain distance\r
- void matchSingle(const GpuMat& queryDescs, const GpuMat& trainDescs,\r
- GpuMat& trainIdx, GpuMat& distance,\r
+ // Find one best match for each query descriptor\r
+ void matchSingle(const GpuMat& query, const GpuMat& train, \r
+ GpuMat& trainIdx, GpuMat& distance, \r
const GpuMat& mask = GpuMat(), Stream& stream = Stream::Null());\r
\r
// Download trainIdx and distance and convert it to CPU vector with DMatch\r
// Convert trainIdx and distance to vector with DMatch\r
static void matchConvert(const Mat& trainIdx, const Mat& distance, std::vector<DMatch>& matches);\r
\r
- // Find one best match for each query descriptor.\r
- void match(const GpuMat& queryDescs, const GpuMat& trainDescs, std::vector<DMatch>& matches,\r
- const GpuMat& mask = GpuMat());\r
+ // Find one best match for each query descriptor\r
+ void match(const GpuMat& query, const GpuMat& train, std::vector<DMatch>& matches, const GpuMat& mask = GpuMat());\r
\r
// Make gpu collection of trains and masks in suitable format for matchCollection function\r
- void makeGpuCollection(GpuMat& trainCollection, GpuMat& maskCollection,\r
- const vector<GpuMat>& masks = std::vector<GpuMat>());\r
+ void makeGpuCollection(GpuMat& trainCollection, GpuMat& maskCollection, const std::vector<GpuMat>& masks = std::vector<GpuMat>());\r
\r
- // Find one best match from train collection for each query descriptor.\r
- // trainIdx.at<int>(0, queryIdx) will contain best train index for queryIdx\r
- // imgIdx.at<int>(0, queryIdx) will contain best image index for queryIdx\r
- // distance.at<float>(0, queryIdx) will contain distance\r
- void matchCollection(const GpuMat& queryDescs, const GpuMat& trainCollection,\r
+ // Find one best match from train collection for each query descriptor\r
+ void matchCollection(const GpuMat& query, const GpuMat& trainCollection, \r
GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance,\r
- const GpuMat& maskCollection, Stream& stream = Stream::Null());\r
+ const GpuMat& masks = GpuMat(), Stream& stream = Stream::Null());\r
\r
// Download trainIdx, imgIdx and distance and convert it to vector with DMatch\r
static void matchDownload(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance, std::vector<DMatch>& matches);\r
static void matchConvert(const Mat& trainIdx, const Mat& imgIdx, const Mat& distance, std::vector<DMatch>& matches);\r
\r
// Find one best match from train collection for each query descriptor.\r
- void match(const GpuMat& queryDescs, std::vector<DMatch>& matches, const std::vector<GpuMat>& masks = std::vector<GpuMat>());\r
+ void match(const GpuMat& query, std::vector<DMatch>& matches, const std::vector<GpuMat>& masks = std::vector<GpuMat>());\r
\r
- // Find k best matches for each query descriptor (in increasing order of distances).\r
- // trainIdx.at<int>(queryIdx, i) will contain index of i'th best trains (i < k).\r
- // distance.at<float>(queryIdx, i) will contain distance.\r
- // allDist is a buffer to store all distance between query descriptors and train descriptors\r
- // it have size (nQuery,nTrain) and CV_32F type\r
- // allDist.at<float>(queryIdx, trainIdx) will contain FLT_MAX, if trainIdx is one from k best,\r
- // otherwise it will contain distance between queryIdx and trainIdx descriptors\r
- void knnMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,\r
- GpuMat& trainIdx, GpuMat& distance, GpuMat& allDist, int k, const GpuMat& mask = GpuMat(), Stream& stream = Stream::Null());\r
+ // Find k best matches for each query descriptor (in increasing order of distances)\r
+ void knnMatchSingle(const GpuMat& query, const GpuMat& train,\r
+ GpuMat& trainIdx, GpuMat& distance, GpuMat& allDist, int k,\r
+ const GpuMat& mask = GpuMat(), Stream& stream = Stream::Null());\r
\r
// Download trainIdx and distance and convert it to vector with DMatch\r
// compactResult is used when mask is not empty. If compactResult is false matches\r
// compactResult is used when mask is not empty. If compactResult is false matches\r
// vector will have the same size as queryDescriptors rows. If compactResult is true\r
// matches vector will not contain matches for fully masked out query descriptors.\r
- void knnMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,\r
+ void knnMatch(const GpuMat& query, const GpuMat& train,\r
std::vector< std::vector<DMatch> >& matches, int k, const GpuMat& mask = GpuMat(),\r
bool compactResult = false);\r
\r
+ // Find k best matches from train collection for each query descriptor (in increasing order of distances)\r
+ void knnMatch2Collection(const GpuMat& query, const GpuMat& trainCollection,\r
+ GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance,\r
+ const GpuMat& maskCollection = GpuMat(), Stream& stream = Stream::Null());\r
+\r
+ // Download trainIdx and distance and convert it to vector with DMatch\r
+ // compactResult is used when mask is not empty. If compactResult is false matches\r
+ // vector will have the same size as queryDescriptors rows. If compactResult is true\r
+ // matches vector will not contain matches for fully masked out query descriptors.\r
+ static void knnMatch2Download(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance,\r
+ std::vector< std::vector<DMatch> >& matches, bool compactResult = false);\r
+ // Convert trainIdx and distance to vector with DMatch\r
+ static void knnMatch2Convert(const Mat& trainIdx, const Mat& imgIdx, const Mat& distance,\r
+ std::vector< std::vector<DMatch> >& matches, bool compactResult = false);\r
+\r
// Find k best matches for each query descriptor (in increasing order of distances).\r
// compactResult is used when mask is not empty. If compactResult is false matches\r
// vector will have the same size as queryDescriptors rows. If compactResult is true\r
// matches vector will not contain matches for fully masked out query descriptors.\r
- void knnMatch(const GpuMat& queryDescs, std::vector< std::vector<DMatch> >& matches, int knn,\r
- const std::vector<GpuMat>& masks = std::vector<GpuMat>(), bool compactResult = false );\r
+ void knnMatch(const GpuMat& query, std::vector< std::vector<DMatch> >& matches, int k,\r
+ const std::vector<GpuMat>& masks = std::vector<GpuMat>(), bool compactResult = false);\r
\r
// Find best matches for each query descriptor which have distance less than maxDistance.\r
// nMatches.at<int>(0, queryIdx) will contain matches count for queryIdx.\r
// carefully nMatches can be greater than trainIdx.cols - it means that matcher didn't find all matches,\r
// because it didn't have enough memory.\r
- // trainIdx.at<int>(queruIdx, i) will contain ith train index (i < min(nMatches.at<int>(0, queruIdx), trainIdx.cols))\r
- // distance.at<int>(queruIdx, i) will contain ith distance (i < min(nMatches.at<int>(0, queruIdx), trainIdx.cols))\r
- // If trainIdx is empty, then trainIdx and distance will be created with size nQuery x (nTrain / 2),\r
+ // If trainIdx is empty, then trainIdx and distance will be created with size nQuery x max((nTrain / 100), 10),\r
// otherwize user can pass own allocated trainIdx and distance with size nQuery x nMaxMatches\r
// Matches doesn't sorted.\r
- void radiusMatchSingle(const GpuMat& queryDescs, const GpuMat& trainDescs,\r
+ void radiusMatchSingle(const GpuMat& query, const GpuMat& train,\r
GpuMat& trainIdx, GpuMat& distance, GpuMat& nMatches, float maxDistance,\r
const GpuMat& mask = GpuMat(), Stream& stream = Stream::Null());\r
\r
\r
// Find best matches for each query descriptor which have distance less than maxDistance\r
// in increasing order of distances).\r
- void radiusMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,\r
+ void radiusMatch(const GpuMat& query, const GpuMat& train,\r
std::vector< std::vector<DMatch> >& matches, float maxDistance,\r
const GpuMat& mask = GpuMat(), bool compactResult = false);\r
\r
// Find best matches for each query descriptor which have distance less than maxDistance.\r
+ // If trainIdx is empty, then trainIdx and distance will be created with size nQuery x max((nQuery / 100), 10),\r
+ // otherwize user can pass own allocated trainIdx and distance with size nQuery x nMaxMatches\r
// Matches doesn't sorted.\r
- void radiusMatchCollection(const GpuMat& queryDescs, const GpuMat& trainCollection,\r
- GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance, GpuMat& nMatches, float maxDistance,\r
- const GpuMat& maskCollection, Stream& stream = Stream::Null());\r
+ void radiusMatchCollection(const GpuMat& query, GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance, GpuMat& nMatches, float maxDistance,\r
+ const std::vector<GpuMat>& masks = std::vector<GpuMat>(), Stream& stream = Stream::Null());\r
\r
// Download trainIdx, imgIdx, nMatches and distance and convert it to vector with DMatch.\r
// matches will be sorted in increasing order of distances.\r
\r
// Find best matches from train collection for each query descriptor which have distance less than\r
// maxDistance (in increasing order of distances).\r
- void radiusMatch(const GpuMat& queryDescs, std::vector< std::vector<DMatch> >& matches, float maxDistance,\r
+ void radiusMatch(const GpuMat& query, std::vector< std::vector<DMatch> >& matches, float maxDistance,\r
const std::vector<GpuMat>& masks = std::vector<GpuMat>(), bool compactResult = false);\r
\r
DistType distType;\r
#include "perf_precomp.hpp"\r
\r
PERF_TEST_P(DevInfo_DescSize, BruteForceMatcher_match, testing::Combine(testing::ValuesIn(devices()),\r
- testing::Values(64, 128)))\r
+ testing::Values(64, 128, 256)))\r
{\r
DeviceInfo devInfo = std::tr1::get<0>(GetParam());\r
int desc_size = std::tr1::get<1>(GetParam());\r
\r
BruteForceMatcher_GPU< L2<float> > matcher;\r
\r
- declare.time(0.5).iterations(100);\r
+ declare.time(3.0);\r
\r
SIMPLE_TEST_CYCLE()\r
{\r
\r
PERF_TEST_P(DevInfo_K_DescSize, BruteForceMatcher_knnMatch, testing::Combine(testing::ValuesIn(devices()),\r
testing::Values(2, 3),\r
- testing::Values(64, 128)))\r
+ testing::Values(64, 128, 256)))\r
{\r
DeviceInfo devInfo = std::tr1::get<0>(GetParam());\r
int k = std::tr1::get<1>(GetParam());\r
\r
BruteForceMatcher_GPU< L2<float> > matcher;\r
\r
- declare.time(0.5).iterations(100);\r
+ declare.time(3.0);\r
\r
SIMPLE_TEST_CYCLE()\r
{\r
- matcher.knnMatch(query, train, trainIdx, distance, allDist, k);\r
+ matcher.knnMatchSingle(query, train, trainIdx, distance, allDist, k);\r
}\r
\r
Mat trainIdx_host(trainIdx);\r
}\r
\r
PERF_TEST_P(DevInfo_DescSize, BruteForceMatcher_radiusMatch, testing::Combine(testing::ValuesIn(devices(SHARED_ATOMICS)),\r
- testing::Values(64, 128)))\r
+ testing::Values(64, 128, 256)))\r
{\r
DeviceInfo devInfo = std::tr1::get<0>(GetParam());\r
int desc_size = std::tr1::get<1>(GetParam());\r
\r
BruteForceMatcher_GPU< L2<float> > matcher;\r
\r
- declare.time(0.5).iterations(100);\r
+ declare.time(3.0);\r
\r
SIMPLE_TEST_CYCLE()\r
{\r
bool cv::gpu::BruteForceMatcher_GPU_base::isMaskSupported() const { throw_nogpu(); return true; }\r
void cv::gpu::BruteForceMatcher_GPU_base::matchSingle(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, const GpuMat&, Stream&) { throw_nogpu(); }\r
void cv::gpu::BruteForceMatcher_GPU_base::matchDownload(const GpuMat&, const GpuMat&, vector<DMatch>&) { throw_nogpu(); }\r
-void cv::gpu::BruteForceMatcher_GPU_base::matchConvert(const Mat&, const Mat&, std::vector<DMatch>&) { throw_nogpu(); }\r
+void cv::gpu::BruteForceMatcher_GPU_base::matchConvert(const Mat&, const Mat&, vector<DMatch>&) { throw_nogpu(); }\r
void cv::gpu::BruteForceMatcher_GPU_base::match(const GpuMat&, const GpuMat&, vector<DMatch>&, const GpuMat&) { throw_nogpu(); }\r
void cv::gpu::BruteForceMatcher_GPU_base::makeGpuCollection(GpuMat&, GpuMat&, const vector<GpuMat>&) { throw_nogpu(); }\r
void cv::gpu::BruteForceMatcher_GPU_base::matchCollection(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&, const GpuMat&, Stream&) { throw_nogpu(); }\r
-void cv::gpu::BruteForceMatcher_GPU_base::matchDownload(const GpuMat&, const GpuMat&, const GpuMat&, std::vector<DMatch>&) { throw_nogpu(); }\r
-void cv::gpu::BruteForceMatcher_GPU_base::matchConvert(const Mat&, const Mat&, const Mat&, std::vector<DMatch>&) { throw_nogpu(); }\r
-void cv::gpu::BruteForceMatcher_GPU_base::match(const GpuMat&, std::vector<DMatch>&, const std::vector<GpuMat>&) { throw_nogpu(); }\r
-void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&, int, const GpuMat&, Stream&) { throw_nogpu(); }\r
-void cv::gpu::BruteForceMatcher_GPU_base::knnMatchDownload(const GpuMat&, const GpuMat&, std::vector< std::vector<DMatch> >&, bool) { throw_nogpu(); }\r
-void cv::gpu::BruteForceMatcher_GPU_base::knnMatchConvert(const Mat&, const Mat&, std::vector< std::vector<DMatch> >&, bool) { throw_nogpu(); }\r
-void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat&, const GpuMat&, std::vector< std::vector<DMatch> >&, int, const GpuMat&, bool) { throw_nogpu(); }\r
-void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat&, std::vector< std::vector<DMatch> >&, int, const std::vector<GpuMat>&, bool) { throw_nogpu(); }\r
+void cv::gpu::BruteForceMatcher_GPU_base::matchDownload(const GpuMat&, const GpuMat&, const GpuMat&, vector<DMatch>&) { throw_nogpu(); }\r
+void cv::gpu::BruteForceMatcher_GPU_base::matchConvert(const Mat&, const Mat&, const Mat&, vector<DMatch>&) { throw_nogpu(); }\r
+void cv::gpu::BruteForceMatcher_GPU_base::match(const GpuMat&, vector<DMatch>&, const vector<GpuMat>&) { throw_nogpu(); }\r
+void cv::gpu::BruteForceMatcher_GPU_base::knnMatchSingle(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&, int, const GpuMat&, Stream&) { throw_nogpu(); }\r
+void cv::gpu::BruteForceMatcher_GPU_base::knnMatchDownload(const GpuMat&, const GpuMat&, vector< vector<DMatch> >&, bool) { throw_nogpu(); }\r
+void cv::gpu::BruteForceMatcher_GPU_base::knnMatchConvert(const Mat&, const Mat&, vector< vector<DMatch> >&, bool) { throw_nogpu(); }\r
+void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat&, const GpuMat&, vector< vector<DMatch> >&, int, const GpuMat&, bool) { throw_nogpu(); }\r
+void cv::gpu::BruteForceMatcher_GPU_base::knnMatch2Collection(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&, const GpuMat&, Stream&) { throw_nogpu(); }\r
+void cv::gpu::BruteForceMatcher_GPU_base::knnMatch2Download(const GpuMat&, const GpuMat&, const GpuMat&, vector< vector<DMatch> >&, bool) { throw_nogpu(); }\r
+void cv::gpu::BruteForceMatcher_GPU_base::knnMatch2Convert(const Mat&, const Mat&, const Mat&, vector< vector<DMatch> >&, bool) { throw_nogpu(); }\r
+void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat&, vector< vector<DMatch> >&, int, const vector<GpuMat>&, bool) { throw_nogpu(); }\r
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchSingle(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&, float, const GpuMat&, Stream&) { throw_nogpu(); }\r
-void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchDownload(const GpuMat&, const GpuMat&, const GpuMat&, std::vector< std::vector<DMatch> >&, bool) { throw_nogpu(); }\r
-void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchConvert(const Mat&, const Mat&, const Mat&, std::vector< std::vector<DMatch> >&, bool) { throw_nogpu(); }\r
-void cv::gpu::BruteForceMatcher_GPU_base::radiusMatch(const GpuMat&, const GpuMat&, std::vector< std::vector<DMatch> >&, float, const GpuMat&, bool) { throw_nogpu(); }\r
-void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchCollection(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, float, const GpuMat&, Stream&) { throw_nogpu(); }\r
+void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchDownload(const GpuMat&, const GpuMat&, const GpuMat&, vector< vector<DMatch> >&, bool) { throw_nogpu(); }\r
+void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchConvert(const Mat&, const Mat&, const Mat&, vector< vector<DMatch> >&, bool) { throw_nogpu(); }\r
+void cv::gpu::BruteForceMatcher_GPU_base::radiusMatch(const GpuMat&, const GpuMat&, vector< vector<DMatch> >&, float, const GpuMat&, bool) { throw_nogpu(); }\r
+void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchCollection(const GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, float, const vector<GpuMat>&, Stream&) { throw_nogpu(); }\r
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchDownload(const GpuMat&, const GpuMat&, const GpuMat&, const GpuMat&, vector< vector<DMatch> >&, bool) { throw_nogpu(); }\r
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchConvert(const Mat&, const Mat&, const Mat&, const Mat&, vector< vector<DMatch> >&, bool) { throw_nogpu(); }\r
-void cv::gpu::BruteForceMatcher_GPU_base::radiusMatch(const GpuMat&, std::vector< std::vector<DMatch> >&, float, const std::vector<GpuMat>&, bool) { throw_nogpu(); }\r
+void cv::gpu::BruteForceMatcher_GPU_base::radiusMatch(const GpuMat&, vector< vector<DMatch> >&, float, const vector<GpuMat>&, bool) { throw_nogpu(); }\r
\r
#else /* !defined (HAVE_CUDA) */\r
\r
namespace cv { namespace gpu { namespace bf_match\r
{\r
- template <typename T> void matchSingleL1_gpu(const DevMem2D& query, const DevMem2D& train, const DevMem2D& mask, \r
- const DevMem2D& trainIdx, const DevMem2D& distance, \r
+ template <typename T> void matchL1_gpu(const DevMem2D& query, const DevMem2D& train, const DevMem2D& mask, \r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance, \r
int cc, cudaStream_t stream);\r
- template <typename T> void matchSingleL2_gpu(const DevMem2D& query, const DevMem2D& train, const DevMem2D& mask, \r
- const DevMem2D& trainIdx, const DevMem2D& distance, \r
+ template <typename T> void matchL2_gpu(const DevMem2D& query, const DevMem2D& train, const DevMem2D& mask, \r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance, \r
int cc, cudaStream_t stream);\r
- template <typename T> void matchSingleHamming_gpu(const DevMem2D& query, const DevMem2D& train, const DevMem2D& mask, \r
- const DevMem2D& trainIdx, const DevMem2D& distance, \r
+ template <typename T> void matchHamming_gpu(const DevMem2D& query, const DevMem2D& train, const DevMem2D& mask, \r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance, \r
int cc, cudaStream_t stream);\r
\r
- template <typename T> void matchCollectionL1_gpu(const DevMem2D& query, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, \r
- const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, \r
+ template <typename T> void matchL1_gpu(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, \r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, \r
int cc, cudaStream_t stream);\r
- template <typename T> void matchCollectionL2_gpu(const DevMem2D& query, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, \r
- const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, \r
+ template <typename T> void matchL2_gpu(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, \r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, \r
int cc, cudaStream_t stream);\r
- template <typename T> void matchCollectionHamming_gpu(const DevMem2D& query, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, \r
- const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance,\r
+ template <typename T> void matchHamming_gpu(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, \r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,\r
int cc, cudaStream_t stream);\r
}}}\r
\r
namespace cv { namespace gpu { namespace bf_knnmatch\r
{\r
- template <typename T> void knnMatchL1_gpu(const DevMem2D& query, const DevMem2D& train, int k, const DevMem2D& mask, \r
- const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist, \r
+ template <typename T> void matchL1_gpu(const DevMem2D& query, const DevMem2D& train, int k, const DevMem2D& mask, \r
+ const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist, \r
+ int cc, cudaStream_t stream);\r
+ template <typename T> void matchL2_gpu(const DevMem2D& query, const DevMem2D& train, int k, const DevMem2D& mask, \r
+ const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist, \r
+ int cc, cudaStream_t stream);\r
+ template <typename T> void matchHamming_gpu(const DevMem2D& query, const DevMem2D& train, int k, const DevMem2D& mask, \r
+ const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist, \r
+ int cc, cudaStream_t stream);\r
+\r
+ template <typename T> void match2L1_gpu(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, \r
+ const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, \r
int cc, cudaStream_t stream);\r
- template <typename T> void knnMatchL2_gpu(const DevMem2D& query, const DevMem2D& train, int k, const DevMem2D& mask, \r
- const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist, \r
+ template <typename T> void match2L2_gpu(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, \r
+ const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, \r
int cc, cudaStream_t stream);\r
- template <typename T> void knnMatchHamming_gpu(const DevMem2D& query, const DevMem2D& train, int k, const DevMem2D& mask, \r
- const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist, \r
+ template <typename T> void match2Hamming_gpu(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, \r
+ const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, \r
int cc, cudaStream_t stream);\r
}}}\r
\r
namespace cv { namespace gpu { namespace bf_radius_match \r
{\r
- template <typename T> void radiusMatchSingleL1_gpu(const DevMem2D& query, const DevMem2D& train, float maxDistance, const DevMem2D& mask, \r
- const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, \r
- cudaStream_t stream);\r
- template <typename T> void radiusMatchSingleL2_gpu(const DevMem2D& query, const DevMem2D& train, float maxDistance, const DevMem2D& mask, \r
- const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, \r
- cudaStream_t stream);\r
- template <typename T> void radiusMatchSingleHamming_gpu(const DevMem2D& query, const DevMem2D& train, float maxDistance, const DevMem2D& mask, \r
- const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, \r
- cudaStream_t stream);\r
-\r
- template <typename T> void radiusMatchCollectionL1_gpu(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, \r
- const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, \r
- cudaStream_t stream);\r
- template <typename T> void radiusMatchCollectionL2_gpu(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, \r
- const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, \r
- cudaStream_t stream);\r
- template <typename T> void radiusMatchCollectionHamming_gpu(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, \r
- const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, \r
- cudaStream_t stream);\r
+ template <typename T> void matchL1_gpu(const DevMem2D& query, const DevMem2D& train, float maxDistance, const DevMem2D& mask, \r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ int cc, cudaStream_t stream);\r
+ template <typename T> void matchL2_gpu(const DevMem2D& query, const DevMem2D& train, float maxDistance, const DevMem2D& mask, \r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ int cc, cudaStream_t stream);\r
+ template <typename T> void matchHamming_gpu(const DevMem2D& query, const DevMem2D& train, float maxDistance, const DevMem2D& mask, \r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ int cc, cudaStream_t stream);\r
+\r
+ template <typename T> void matchL1_gpu(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, \r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ int cc, cudaStream_t stream);\r
+\r
+ template <typename T> void matchL2_gpu(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, \r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ int cc, cudaStream_t stream);\r
+\r
+ template <typename T> void matchHamming_gpu(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, \r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ int cc, cudaStream_t stream);\r
}}}\r
\r
cv::gpu::BruteForceMatcher_GPU_base::BruteForceMatcher_GPU_base(DistType distType_) : distType(distType_)\r
////////////////////////////////////////////////////////////////////\r
// Match\r
\r
-void cv::gpu::BruteForceMatcher_GPU_base::matchSingle(const GpuMat& queryDescs, const GpuMat& trainDescs,\r
- GpuMat& trainIdx, GpuMat& distance, const GpuMat& mask, Stream& stream)\r
+void cv::gpu::BruteForceMatcher_GPU_base::matchSingle(const GpuMat& query, const GpuMat& train,\r
+ GpuMat& trainIdx, GpuMat& distance, \r
+ const GpuMat& mask, Stream& stream)\r
{\r
- if (queryDescs.empty() || trainDescs.empty())\r
+ if (query.empty() || train.empty())\r
return;\r
\r
using namespace cv::gpu::bf_match;\r
\r
- typedef void (*match_caller_t)(const DevMem2D& query, const DevMem2D& train, const DevMem2D& mask, \r
- const DevMem2D& trainIdx, const DevMem2D& distance, \r
- int cc, cudaStream_t stream);\r
+ typedef void (*caller_t)(const DevMem2D& query, const DevMem2D& train, const DevMem2D& mask, \r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance,\r
+ int cc, cudaStream_t stream);\r
\r
- static const match_caller_t match_callers[3][8] =\r
+ static const caller_t callers[3][6] =\r
{\r
{\r
- matchSingleL1_gpu<unsigned char>, 0/*matchSingleL1_gpu<signed char>*/, \r
- matchSingleL1_gpu<unsigned short>, matchSingleL1_gpu<short>, \r
- matchSingleL1_gpu<int>, matchSingleL1_gpu<float>, 0, 0\r
+ matchL1_gpu<unsigned char>, 0/*matchL1_gpu<signed char>*/, \r
+ matchL1_gpu<unsigned short>, matchL1_gpu<short>, \r
+ matchL1_gpu<int>, matchL1_gpu<float>\r
},\r
{\r
- 0/*matchSingleL2_gpu<unsigned char>*/, 0/*matchSingleL2_gpu<signed char>*/, \r
- 0/*matchSingleL2_gpu<unsigned short>*/, 0/*matchSingleL2_gpu<short>*/, \r
- 0/*matchSingleL2_gpu<int>*/, matchSingleL2_gpu<float>, 0, 0\r
+ 0/*matchL2_gpu<unsigned char>*/, 0/*matchL2_gpu<signed char>*/, \r
+ 0/*matchL2_gpu<unsigned short>*/, 0/*matchL2_gpu<short>*/, \r
+ 0/*matchL2_gpu<int>*/, matchL2_gpu<float>\r
},\r
{\r
- matchSingleHamming_gpu<unsigned char>, 0/*matchSingleHamming_gpu<signed char>*/, \r
- matchSingleHamming_gpu<unsigned short>, 0/*matchSingleHamming_gpu<short>*/, \r
- matchSingleHamming_gpu<int>, 0, 0, 0\r
+ matchHamming_gpu<unsigned char>, 0/*matchHamming_gpu<signed char>*/, \r
+ matchHamming_gpu<unsigned short>, 0/*matchHamming_gpu<short>*/, \r
+ matchHamming_gpu<int>, 0/*matchHamming_gpu<float>*/\r
}\r
};\r
\r
- CV_Assert(queryDescs.channels() == 1 && queryDescs.depth() < CV_64F);\r
- CV_Assert(trainDescs.cols == queryDescs.cols && trainDescs.type() == queryDescs.type());\r
+ CV_Assert(query.channels() == 1 && query.depth() < CV_64F);\r
+ CV_Assert(train.cols == query.cols && train.type() == query.type());\r
\r
- const int nQuery = queryDescs.rows;\r
+ const int nQuery = query.rows;\r
\r
ensureSizeIsEnough(1, nQuery, CV_32S, trainIdx);\r
ensureSizeIsEnough(1, nQuery, CV_32F, distance);\r
\r
- match_caller_t func = match_callers[distType][queryDescs.depth()];\r
+ caller_t func = callers[distType][query.depth()];\r
CV_Assert(func != 0);\r
\r
DeviceInfo info;\r
int cc = info.majorVersion() * 10 + info.minorVersion();\r
\r
- func(queryDescs, trainDescs, mask, trainIdx, distance, cc, StreamAccessor::getStream(stream));\r
+ func(query, train, mask, trainIdx, distance, cc, StreamAccessor::getStream(stream));\r
}\r
\r
void cv::gpu::BruteForceMatcher_GPU_base::matchDownload(const GpuMat& trainIdx, const GpuMat& distance, vector<DMatch>& matches)\r
matchConvert(trainIdxCPU, distanceCPU, matches);\r
}\r
\r
-void cv::gpu::BruteForceMatcher_GPU_base::matchConvert(const Mat& trainIdx, const Mat& distance, std::vector<DMatch>& matches)\r
+void cv::gpu::BruteForceMatcher_GPU_base::matchConvert(const Mat& trainIdx, const Mat& distance, vector<DMatch>& matches)\r
{\r
if (trainIdx.empty() || distance.empty())\r
return;\r
\r
- CV_Assert(trainIdx.type() == CV_32SC1 && trainIdx.isContinuous());\r
- CV_Assert(distance.type() == CV_32FC1 && distance.isContinuous() && distance.cols == trainIdx.cols);\r
+ CV_Assert(trainIdx.type() == CV_32SC1);\r
+ CV_Assert(distance.type() == CV_32FC1 && distance.cols == trainIdx.cols);\r
\r
const int nQuery = trainIdx.cols;\r
\r
for (int queryIdx = 0; queryIdx < nQuery; ++queryIdx, ++trainIdx_ptr, ++distance_ptr)\r
{\r
int trainIdx = *trainIdx_ptr;\r
+\r
if (trainIdx == -1)\r
continue;\r
\r
}\r
}\r
\r
-void cv::gpu::BruteForceMatcher_GPU_base::match(const GpuMat& queryDescs, const GpuMat& trainDescs,\r
+void cv::gpu::BruteForceMatcher_GPU_base::match(const GpuMat& query, const GpuMat& train,\r
vector<DMatch>& matches, const GpuMat& mask)\r
{\r
GpuMat trainIdx, distance;\r
- matchSingle(queryDescs, trainDescs, trainIdx, distance, mask);\r
+ matchSingle(query, train, trainIdx, distance, mask);\r
matchDownload(trainIdx, distance, matches);\r
}\r
\r
{\r
Mat trainCollectionCPU(1, static_cast<int>(trainDescCollection.size()), CV_8UC(sizeof(DevMem2D)));\r
\r
- for (size_t i = 0; i < trainDescCollection.size(); ++i)\r
- {\r
- const GpuMat& trainDescs = trainDescCollection[i];\r
+ DevMem2D* trainCollectionCPU_ptr = trainCollectionCPU.ptr<DevMem2D>();\r
\r
- trainCollectionCPU.ptr<DevMem2D>(0)[i] = trainDescs;\r
- }\r
+ for (size_t i = 0, size = trainDescCollection.size(); i < size; ++i, ++trainCollectionCPU_ptr)\r
+ *trainCollectionCPU_ptr = trainDescCollection[i];\r
\r
trainCollection.upload(trainCollectionCPU);\r
+ maskCollection.release();\r
}\r
else\r
{\r
Mat trainCollectionCPU(1, static_cast<int>(trainDescCollection.size()), CV_8UC(sizeof(DevMem2D)));\r
Mat maskCollectionCPU(1, static_cast<int>(trainDescCollection.size()), CV_8UC(sizeof(PtrStep)));\r
\r
- for (size_t i = 0; i < trainDescCollection.size(); ++i)\r
+ DevMem2D* trainCollectionCPU_ptr = trainCollectionCPU.ptr<DevMem2D>();\r
+ PtrStep* maskCollectionCPU_ptr = maskCollectionCPU.ptr<PtrStep>();\r
+\r
+ for (size_t i = 0, size = trainDescCollection.size(); i < size; ++i, ++trainCollectionCPU_ptr, ++maskCollectionCPU_ptr)\r
{\r
- const GpuMat& trainDescs = trainDescCollection[i];\r
+ const GpuMat& train = trainDescCollection[i];\r
const GpuMat& mask = masks[i];\r
\r
- CV_Assert(mask.empty() || (mask.type() == CV_8UC1 && mask.cols == trainDescs.rows));\r
+ CV_Assert(mask.empty() || (mask.type() == CV_8UC1 && mask.cols == train.rows));\r
\r
- trainCollectionCPU.ptr<DevMem2D>(0)[i] = trainDescs;\r
-\r
- maskCollectionCPU.ptr<PtrStep>(0)[i] = mask;\r
+ *trainCollectionCPU_ptr = train;\r
+ *maskCollectionCPU_ptr = mask;\r
}\r
\r
trainCollection.upload(trainCollectionCPU);\r
}\r
}\r
\r
-void cv::gpu::BruteForceMatcher_GPU_base::matchCollection(const GpuMat& queryDescs, const GpuMat& trainCollection,\r
- GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance, const GpuMat& maskCollection, Stream& stream)\r
+void cv::gpu::BruteForceMatcher_GPU_base::matchCollection(const GpuMat& query, const GpuMat& trainCollection,\r
+ GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance, \r
+ const GpuMat& masks, Stream& stream)\r
{\r
- if (queryDescs.empty() || trainCollection.empty())\r
+ if (query.empty() || trainCollection.empty())\r
return;\r
\r
using namespace cv::gpu::bf_match;\r
\r
- typedef void (*match_caller_t)(const DevMem2D& query, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, \r
- const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, \r
- int cc, cudaStream_t stream);\r
+ typedef void (*caller_t)(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, \r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, \r
+ int cc, cudaStream_t stream);\r
\r
- static const match_caller_t match_callers[3][8] =\r
+ static const caller_t callers[3][6] =\r
{\r
{\r
- matchCollectionL1_gpu<unsigned char>, 0/*matchCollectionL1_gpu<signed char>*/,\r
- matchCollectionL1_gpu<unsigned short>, matchCollectionL1_gpu<short>,\r
- matchCollectionL1_gpu<int>, matchCollectionL1_gpu<float>, 0, 0\r
+ matchL1_gpu<unsigned char>, 0/*matchL1_gpu<signed char>*/,\r
+ matchL1_gpu<unsigned short>, matchL1_gpu<short>,\r
+ matchL1_gpu<int>, matchL1_gpu<float>\r
},\r
{\r
- 0/*matchCollectionL2_gpu<unsigned char>*/, 0/*matchCollectionL2_gpu<signed char>*/,\r
- 0/*matchCollectionL2_gpu<unsigned short>*/, 0/*matchCollectionL2_gpu<short>*/,\r
- 0/*matchCollectionL2_gpu<int>*/, matchCollectionL2_gpu<float>, 0, 0\r
+ 0/*matchL2_gpu<unsigned char>*/, 0/*matchL2_gpu<signed char>*/,\r
+ 0/*matchL2_gpu<unsigned short>*/, 0/*matchL2_gpu<short>*/,\r
+ 0/*matchL2_gpu<int>*/, matchL2_gpu<float>\r
},\r
{\r
- matchCollectionHamming_gpu<unsigned char>, 0/*matchCollectionHamming_gpu<signed char>*/,\r
- matchCollectionHamming_gpu<unsigned short>, 0/*matchCollectionHamming_gpu<short>*/,\r
- matchCollectionHamming_gpu<int>, 0, 0, 0\r
+ matchHamming_gpu<unsigned char>, 0/*matchHamming_gpu<signed char>*/,\r
+ matchHamming_gpu<unsigned short>, 0/*matchHamming_gpu<short>*/,\r
+ matchHamming_gpu<int>, 0/*matchHamming_gpu<float>*/\r
}\r
};\r
\r
- CV_Assert(queryDescs.channels() == 1 && queryDescs.depth() < CV_64F);\r
+ CV_Assert(query.channels() == 1 && query.depth() < CV_64F);\r
\r
- const int nQuery = queryDescs.rows;\r
+ const int nQuery = query.rows;\r
\r
ensureSizeIsEnough(1, nQuery, CV_32S, trainIdx);\r
ensureSizeIsEnough(1, nQuery, CV_32S, imgIdx);\r
ensureSizeIsEnough(1, nQuery, CV_32F, distance);\r
\r
- match_caller_t func = match_callers[distType][queryDescs.depth()];\r
+ caller_t func = callers[distType][query.depth()];\r
CV_Assert(func != 0);\r
\r
DeviceInfo info;\r
int cc = info.majorVersion() * 10 + info.minorVersion();\r
\r
- func(queryDescs, trainCollection, maskCollection, trainIdx, imgIdx, distance, cc, StreamAccessor::getStream(stream));\r
+ func(query, trainCollection, masks, trainIdx, imgIdx, distance, cc, StreamAccessor::getStream(stream));\r
}\r
\r
void cv::gpu::BruteForceMatcher_GPU_base::matchDownload(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance, vector<DMatch>& matches)\r
if (trainIdx.empty() || imgIdx.empty() || distance.empty())\r
return;\r
\r
- CV_Assert(trainIdx.type() == CV_32SC1 && trainIdx.isContinuous());\r
- CV_Assert(imgIdx.type() == CV_32SC1 && imgIdx.isContinuous() && imgIdx.cols == trainIdx.cols);\r
- CV_Assert(distance.type() == CV_32FC1 && distance.isContinuous() && imgIdx.cols == trainIdx.cols);\r
+ CV_Assert(trainIdx.type() == CV_32SC1);\r
+ CV_Assert(imgIdx.type() == CV_32SC1 && imgIdx.cols == trainIdx.cols);\r
+ CV_Assert(distance.type() == CV_32FC1 && distance.cols == trainIdx.cols);\r
\r
const int nQuery = trainIdx.cols;\r
\r
for (int queryIdx = 0; queryIdx < nQuery; ++queryIdx, ++trainIdx_ptr, ++imgIdx_ptr, ++distance_ptr)\r
{\r
int trainIdx = *trainIdx_ptr;\r
+\r
if (trainIdx == -1)\r
continue;\r
\r
}\r
}\r
\r
-void cv::gpu::BruteForceMatcher_GPU_base::match(const GpuMat& queryDescs, vector<DMatch>& matches, const vector<GpuMat>& masks)\r
+void cv::gpu::BruteForceMatcher_GPU_base::match(const GpuMat& query, vector<DMatch>& matches, const vector<GpuMat>& masks)\r
{\r
GpuMat trainCollection;\r
GpuMat maskCollection;\r
\r
GpuMat trainIdx, imgIdx, distance;\r
\r
- matchCollection(queryDescs, trainCollection, trainIdx, imgIdx, distance, maskCollection);\r
+ matchCollection(query, trainCollection, trainIdx, imgIdx, distance, maskCollection);\r
matchDownload(trainIdx, imgIdx, distance, matches);\r
}\r
\r
////////////////////////////////////////////////////////////////////\r
// KnnMatch\r
\r
-void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,\r
- GpuMat& trainIdx, GpuMat& distance, GpuMat& allDist, int k, const GpuMat& mask, Stream& stream)\r
+void cv::gpu::BruteForceMatcher_GPU_base::knnMatchSingle(const GpuMat& query, const GpuMat& train,\r
+ GpuMat& trainIdx, GpuMat& distance, GpuMat& allDist, int k,\r
+ const GpuMat& mask, Stream& stream)\r
{\r
- if (queryDescs.empty() || trainDescs.empty())\r
+ if (query.empty() || train.empty())\r
return;\r
\r
using namespace cv::gpu::bf_knnmatch;\r
\r
- typedef void (*match_caller_t)(const DevMem2D& query, const DevMem2D& train, int k, const DevMem2D& mask, \r
- const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist, \r
- int cc, cudaStream_t stream);\r
+ typedef void (*caller_t)(const DevMem2D& query, const DevMem2D& train, int k, const DevMem2D& mask, \r
+ const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist, \r
+ int cc, cudaStream_t stream);\r
\r
- static const match_caller_t match_callers[3][8] =\r
+ static const caller_t callers[3][6] =\r
{\r
{\r
- knnMatchL1_gpu<unsigned char>, 0/*knnMatchL1_gpu<signed char>*/, knnMatchL1_gpu<unsigned short>,\r
- knnMatchL1_gpu<short>, knnMatchL1_gpu<int>, knnMatchL1_gpu<float>, 0, 0\r
+ matchL1_gpu<unsigned char>, 0/*matchL1_gpu<signed char>*/, \r
+ matchL1_gpu<unsigned short>, matchL1_gpu<short>, \r
+ matchL1_gpu<int>, matchL1_gpu<float>\r
},\r
{\r
- 0/*knnMatchL2_gpu<unsigned char>*/, 0/*knnMatchL2_gpu<signed char>*/, 0/*knnMatchL2_gpu<unsigned short>*/,\r
- 0/*knnMatchL2_gpu<short>*/, 0/*knnMatchL2_gpu<int>*/, knnMatchL2_gpu<float>, 0, 0\r
+ 0/*matchL2_gpu<unsigned char>*/, 0/*matchL2_gpu<signed char>*/, \r
+ 0/*matchL2_gpu<unsigned short>*/, 0/*matchL2_gpu<short>*/, \r
+ 0/*matchL2_gpu<int>*/, matchL2_gpu<float>\r
},\r
{\r
- knnMatchHamming_gpu<unsigned char>, 0/*knnMatchHamming_gpu<signed char>*/, knnMatchHamming_gpu<unsigned short>,\r
- 0/*knnMatchHamming_gpu<short>*/, knnMatchHamming_gpu<int>, 0, 0, 0\r
+ matchHamming_gpu<unsigned char>, 0/*matchHamming_gpu<signed char>*/, \r
+ matchHamming_gpu<unsigned short>, 0/*matchHamming_gpu<short>*/, \r
+ matchHamming_gpu<int>, 0/*matchHamming_gpu<float>*/\r
}\r
};\r
\r
- CV_Assert(queryDescs.channels() == 1 && queryDescs.depth() < CV_64F);\r
- CV_Assert(trainDescs.type() == queryDescs.type() && trainDescs.cols == queryDescs.cols);\r
+ CV_Assert(query.channels() == 1 && query.depth() < CV_64F);\r
+ CV_Assert(train.type() == query.type() && train.cols == query.cols);\r
\r
- const int nQuery = queryDescs.rows;\r
- const int nTrain = trainDescs.rows;\r
+ const int nQuery = query.rows;\r
+ const int nTrain = train.rows;\r
\r
if (k == 2)\r
{\r
}\r
\r
if (stream)\r
- {\r
stream.enqueueMemSet(trainIdx, Scalar::all(-1));\r
- if (k != 2)\r
- stream.enqueueMemSet(allDist, Scalar::all(numeric_limits<float>::max()));\r
- }\r
else\r
- {\r
trainIdx.setTo(Scalar::all(-1));\r
- if (k != 2)\r
- allDist.setTo(Scalar::all(numeric_limits<float>::max()));\r
- }\r
\r
- match_caller_t func = match_callers[distType][queryDescs.depth()];\r
+ caller_t func = callers[distType][query.depth()];\r
CV_Assert(func != 0);\r
\r
DeviceInfo info;\r
int cc = info.majorVersion() * 10 + info.minorVersion();\r
\r
- func(queryDescs, trainDescs, k, mask, trainIdx, distance, allDist, cc, StreamAccessor::getStream(stream));\r
+ func(query, train, k, mask, trainIdx, distance, allDist, cc, StreamAccessor::getStream(stream));\r
}\r
\r
void cv::gpu::BruteForceMatcher_GPU_base::knnMatchDownload(const GpuMat& trainIdx, const GpuMat& distance,\r
}\r
\r
void cv::gpu::BruteForceMatcher_GPU_base::knnMatchConvert(const Mat& trainIdx, const Mat& distance, \r
- std::vector< std::vector<DMatch> >& matches, bool compactResult)\r
+ vector< vector<DMatch> >& matches, bool compactResult)\r
{\r
if (trainIdx.empty() || distance.empty())\r
return;\r
}\r
}\r
\r
-void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,\r
+void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat& query, const GpuMat& train,\r
vector< vector<DMatch> >& matches, int k, const GpuMat& mask, bool compactResult)\r
{\r
GpuMat trainIdx, distance, allDist;\r
- knnMatch(queryDescs, trainDescs, trainIdx, distance, allDist, k, mask);\r
+ knnMatchSingle(query, train, trainIdx, distance, allDist, k, mask);\r
knnMatchDownload(trainIdx, distance, matches, compactResult);\r
}\r
\r
+void cv::gpu::BruteForceMatcher_GPU_base::knnMatch2Collection(const GpuMat& query, const GpuMat& trainCollection,\r
+ GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance,\r
+ const GpuMat& maskCollection, Stream& stream)\r
+{\r
+ if (query.empty() || trainCollection.empty())\r
+ return;\r
+\r
+ using namespace cv::gpu::bf_knnmatch;\r
+\r
+ typedef void (*caller_t)(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, \r
+ const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, \r
+ int cc, cudaStream_t stream);\r
+\r
+ static const caller_t callers[3][6] =\r
+ {\r
+ {\r
+ match2L1_gpu<unsigned char>, 0/*match2L1_gpu<signed char>*/, \r
+ match2L1_gpu<unsigned short>, match2L1_gpu<short>, \r
+ match2L1_gpu<int>, match2L1_gpu<float>\r
+ },\r
+ {\r
+ 0/*match2L2_gpu<unsigned char>*/, 0/*match2L2_gpu<signed char>*/, \r
+ 0/*match2L2_gpu<unsigned short>*/, 0/*match2L2_gpu<short>*/, \r
+ 0/*match2L2_gpu<int>*/, match2L2_gpu<float>\r
+ },\r
+ {\r
+ match2Hamming_gpu<unsigned char>, 0/*match2Hamming_gpu<signed char>*/, \r
+ match2Hamming_gpu<unsigned short>, 0/*match2Hamming_gpu<short>*/, \r
+ match2Hamming_gpu<int>, 0/*match2Hamming_gpu<float>*/\r
+ }\r
+ };\r
+\r
+ CV_Assert(query.channels() == 1 && query.depth() < CV_64F);\r
+\r
+ const int nQuery = query.rows;\r
+\r
+ ensureSizeIsEnough(1, nQuery, CV_32SC2, trainIdx);\r
+ ensureSizeIsEnough(1, nQuery, CV_32SC2, imgIdx);\r
+ ensureSizeIsEnough(1, nQuery, CV_32FC2, distance);\r
+\r
+ if (stream)\r
+ stream.enqueueMemSet(trainIdx, Scalar::all(-1));\r
+ else\r
+ trainIdx.setTo(Scalar::all(-1));\r
+\r
+ caller_t func = callers[distType][query.depth()];\r
+ CV_Assert(func != 0);\r
+ \r
+ DeviceInfo info;\r
+ int cc = info.majorVersion() * 10 + info.minorVersion();\r
+\r
+ func(query, trainCollection, maskCollection, trainIdx, imgIdx, distance, cc, StreamAccessor::getStream(stream));\r
+}\r
+\r
+void cv::gpu::BruteForceMatcher_GPU_base::knnMatch2Download(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance,\r
+ vector< vector<DMatch> >& matches, bool compactResult)\r
+{\r
+ if (trainIdx.empty() || imgIdx.empty() || distance.empty())\r
+ return;\r
+\r
+ Mat trainIdxCPU = trainIdx;\r
+ Mat imgIdxCPU = imgIdx;\r
+ Mat distanceCPU = distance;\r
+\r
+ knnMatch2Convert(trainIdxCPU, imgIdxCPU, distanceCPU, matches, compactResult);\r
+}\r
+\r
+void cv::gpu::BruteForceMatcher_GPU_base::knnMatch2Convert(const Mat& trainIdx, const Mat& imgIdx, const Mat& distance,\r
+ vector< vector<DMatch> >& matches, bool compactResult)\r
+{\r
+ if (trainIdx.empty() || imgIdx.empty() || distance.empty())\r
+ return;\r
+\r
+ CV_Assert(trainIdx.type() == CV_32SC2);\r
+ CV_Assert(imgIdx.type() == CV_32SC2 && imgIdx.cols == trainIdx.cols);\r
+ CV_Assert(distance.type() == CV_32FC2 && distance.cols == trainIdx.cols);\r
+\r
+ const int nQuery = trainIdx.cols;\r
+\r
+ matches.clear();\r
+ matches.reserve(nQuery);\r
+ \r
+ const int* trainIdx_ptr = trainIdx.ptr<int>();\r
+ const int* imgIdx_ptr = imgIdx.ptr<int>();\r
+ const float* distance_ptr = distance.ptr<float>();\r
+\r
+ for (int queryIdx = 0; queryIdx < nQuery; ++queryIdx)\r
+ {\r
+ matches.push_back(vector<DMatch>());\r
+ vector<DMatch>& curMatches = matches.back();\r
+ curMatches.reserve(2);\r
+\r
+ for (int i = 0; i < 2; ++i, ++trainIdx_ptr, ++imgIdx_ptr, ++distance_ptr)\r
+ {\r
+ int trainIdx = *trainIdx_ptr;\r
+\r
+ if (trainIdx != -1)\r
+ {\r
+ int imgIdx = *imgIdx_ptr;\r
+\r
+ float distance = *distance_ptr;\r
+\r
+ DMatch m(queryIdx, trainIdx, imgIdx, distance);\r
+\r
+ curMatches.push_back(m);\r
+ }\r
+ }\r
+\r
+ if (compactResult && curMatches.empty())\r
+ matches.pop_back();\r
+ }\r
+}\r
+\r
namespace\r
{\r
struct ImgIdxSetter\r
};\r
}\r
\r
-void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat& queryDescs,\r
- vector< vector<DMatch> >& matches, int knn, const vector<GpuMat>& masks, bool compactResult)\r
+void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat& query, vector< vector<DMatch> >& matches, int k,\r
+ const vector<GpuMat>& masks, bool compactResult)\r
{\r
- if (queryDescs.empty() || empty())\r
- return;\r
+ if (k == 2)\r
+ {\r
+ GpuMat trainCollection;\r
+ GpuMat maskCollection;\r
\r
- vector< vector<DMatch> > curMatches;\r
- vector<DMatch> temp;\r
- temp.reserve(2 * knn);\r
+ makeGpuCollection(trainCollection, maskCollection, masks);\r
\r
- matches.resize(queryDescs.rows);\r
- for_each(matches.begin(), matches.end(), bind2nd(mem_fun_ref(&vector<DMatch>::reserve), knn));\r
+ GpuMat trainIdx, imgIdx, distance;\r
\r
- for (size_t imgIdx = 0; imgIdx < trainDescCollection.size(); ++imgIdx)\r
+ knnMatch2Collection(query, trainCollection, trainIdx, imgIdx, distance, maskCollection);\r
+ knnMatch2Download(trainIdx, imgIdx, distance, matches);\r
+ }\r
+ else\r
{\r
- knnMatch(queryDescs, trainDescCollection[imgIdx], curMatches, knn,\r
- masks.empty() ? GpuMat() : masks[imgIdx]);\r
+ if (query.empty() || empty())\r
+ return;\r
+\r
+ vector< vector<DMatch> > curMatches;\r
+ vector<DMatch> temp;\r
+ temp.reserve(2 * k);\r
+\r
+ matches.resize(query.rows);\r
+ for_each(matches.begin(), matches.end(), bind2nd(mem_fun_ref(&vector<DMatch>::reserve), k));\r
\r
- for (int queryIdx = 0; queryIdx < queryDescs.rows; ++queryIdx)\r
+ for (size_t imgIdx = 0, size = trainDescCollection.size(); imgIdx < size; ++imgIdx)\r
{\r
- vector<DMatch>& localMatch = curMatches[queryIdx];\r
- vector<DMatch>& globalMatch = matches[queryIdx];\r
+ knnMatch(query, trainDescCollection[imgIdx], curMatches, k, masks.empty() ? GpuMat() : masks[imgIdx]);\r
\r
- for_each(localMatch.begin(), localMatch.end(), ImgIdxSetter(static_cast<int>(imgIdx)));\r
+ for (int queryIdx = 0; queryIdx < query.rows; ++queryIdx)\r
+ {\r
+ vector<DMatch>& localMatch = curMatches[queryIdx];\r
+ vector<DMatch>& globalMatch = matches[queryIdx];\r
+\r
+ for_each(localMatch.begin(), localMatch.end(), ImgIdxSetter(static_cast<int>(imgIdx)));\r
\r
- temp.clear();\r
- merge(globalMatch.begin(), globalMatch.end(), localMatch.begin(), localMatch.end(), back_inserter(temp));\r
+ temp.clear();\r
+ merge(globalMatch.begin(), globalMatch.end(), localMatch.begin(), localMatch.end(), back_inserter(temp));\r
\r
- globalMatch.clear();\r
- const size_t count = std::min((size_t)knn, temp.size());\r
- copy(temp.begin(), temp.begin() + count, back_inserter(globalMatch));\r
+ globalMatch.clear();\r
+ const size_t count = std::min((size_t)k, temp.size());\r
+ copy(temp.begin(), temp.begin() + count, back_inserter(globalMatch));\r
+ }\r
}\r
- }\r
\r
- if (compactResult)\r
- {\r
- vector< vector<DMatch> >::iterator new_end = remove_if(matches.begin(), matches.end(),\r
- mem_fun_ref(&vector<DMatch>::empty));\r
- matches.erase(new_end, matches.end());\r
+ if (compactResult)\r
+ {\r
+ vector< vector<DMatch> >::iterator new_end = remove_if(matches.begin(), matches.end(), mem_fun_ref(&vector<DMatch>::empty));\r
+ matches.erase(new_end, matches.end());\r
+ }\r
}\r
}\r
\r
////////////////////////////////////////////////////////////////////\r
// RadiusMatch\r
\r
-void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchSingle(const GpuMat& queryDescs, const GpuMat& trainDescs,\r
- GpuMat& trainIdx, GpuMat& distance, GpuMat& nMatches, float maxDistance, const GpuMat& mask, Stream& stream)\r
+void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchSingle(const GpuMat& query, const GpuMat& train,\r
+ GpuMat& trainIdx, GpuMat& distance, GpuMat& nMatches, float maxDistance, \r
+ const GpuMat& mask, Stream& stream)\r
{\r
- if (queryDescs.empty() || trainDescs.empty())\r
+ if (query.empty() || train.empty())\r
return;\r
\r
using namespace cv::gpu::bf_radius_match;\r
\r
- typedef void (*radiusMatch_caller_t)(const DevMem2D& query, const DevMem2D& train, float maxDistance, const DevMem2D& mask, \r
- const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, \r
- cudaStream_t stream);\r
+ typedef void (*caller_t)(const DevMem2D& query, const DevMem2D& train, float maxDistance, const DevMem2D& mask, \r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ int cc, cudaStream_t stream);\r
\r
- static const radiusMatch_caller_t radiusMatch_callers[3][8] =\r
+ static const caller_t callers[3][6] =\r
{\r
{\r
- radiusMatchSingleL1_gpu<unsigned char>, 0/*radiusMatchSingleL1_gpu<signed char>*/, radiusMatchSingleL1_gpu<unsigned short>,\r
- radiusMatchSingleL1_gpu<short>, radiusMatchSingleL1_gpu<int>, radiusMatchSingleL1_gpu<float>, 0, 0\r
+ matchL1_gpu<unsigned char>, 0/*matchL1_gpu<signed char>*/, \r
+ matchL1_gpu<unsigned short>, matchL1_gpu<short>, \r
+ matchL1_gpu<int>, matchL1_gpu<float>\r
},\r
{\r
- 0/*radiusMatchSingleL2_gpu<unsigned char>*/, 0/*radiusMatchSingleL2_gpu<signed char>*/, 0/*radiusMatchSingleL2_gpu<unsigned short>*/,\r
- 0/*radiusMatchSingleL2_gpu<short>*/, 0/*radiusMatchSingleL2_gpu<int>*/, radiusMatchSingleL2_gpu<float>, 0, 0\r
+ 0/*matchL2_gpu<unsigned char>*/, 0/*matchL2_gpu<signed char>*/, \r
+ 0/*matchL2_gpu<unsigned short>*/, 0/*matchL2_gpu<short>*/, \r
+ 0/*matchL2_gpu<int>*/, matchL2_gpu<float>\r
},\r
{\r
- radiusMatchSingleHamming_gpu<unsigned char>, 0/*radiusMatchSingleHamming_gpu<signed char>*/, radiusMatchSingleHamming_gpu<unsigned short>,\r
- 0/*radiusMatchSingleHamming_gpu<short>*/, radiusMatchSingleHamming_gpu<int>, 0, 0, 0\r
+ matchHamming_gpu<unsigned char>, 0/*matchHamming_gpu<signed char>*/, \r
+ matchHamming_gpu<unsigned short>, 0/*matchHamming_gpu<short>*/, \r
+ matchHamming_gpu<int>, 0/*matchHamming_gpu<float>*/\r
}\r
};\r
\r
- CV_Assert(TargetArchs::builtWith(SHARED_ATOMICS) && DeviceInfo().supports(SHARED_ATOMICS));\r
+ DeviceInfo info;\r
+ int cc = info.majorVersion() * 10 + info.minorVersion();\r
\r
- const int nQuery = queryDescs.rows;\r
- const int nTrain = trainDescs.rows;\r
+ CV_Assert(TargetArchs::builtWith(GLOBAL_ATOMICS) && info.supports(GLOBAL_ATOMICS));\r
\r
- CV_Assert(queryDescs.channels() == 1 && queryDescs.depth() < CV_64F);\r
- CV_Assert(trainDescs.type() == queryDescs.type() && trainDescs.cols == queryDescs.cols);\r
+ const int nQuery = query.rows;\r
+ const int nTrain = train.rows;\r
+\r
+ CV_Assert(query.channels() == 1 && query.depth() < CV_64F);\r
+ CV_Assert(train.type() == query.type() && train.cols == query.cols);\r
CV_Assert(trainIdx.empty() || (trainIdx.rows == nQuery && trainIdx.size() == distance.size()));\r
\r
ensureSizeIsEnough(1, nQuery, CV_32SC1, nMatches);\r
if (trainIdx.empty())\r
{\r
- ensureSizeIsEnough(nQuery, nTrain / 2, CV_32SC1, trainIdx);\r
- ensureSizeIsEnough(nQuery, nTrain / 2, CV_32FC1, distance);\r
+ ensureSizeIsEnough(nQuery, std::max((nTrain / 100), 10), CV_32SC1, trainIdx);\r
+ ensureSizeIsEnough(nQuery, std::max((nTrain / 100), 10), CV_32FC1, distance);\r
}\r
\r
- radiusMatch_caller_t func = radiusMatch_callers[distType][queryDescs.depth()];\r
- CV_Assert(func != 0);\r
+ caller_t func = callers[distType][query.depth()];\r
+ CV_Assert(func != 0); \r
\r
- func(queryDescs, trainDescs, maxDistance, mask, trainIdx, distance, nMatches, StreamAccessor::getStream(stream));\r
+ func(query, train, maxDistance, mask, trainIdx, distance, nMatches, cc, StreamAccessor::getStream(stream));\r
}\r
\r
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchDownload(const GpuMat& trainIdx, const GpuMat& distance, const GpuMat& nMatches, \r
- vector< vector<DMatch> >& matches, bool compactResult)\r
+ vector< vector<DMatch> >& matches, bool compactResult)\r
{\r
if (trainIdx.empty() || distance.empty() || nMatches.empty())\r
return;\r
}\r
\r
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchConvert(const Mat& trainIdx, const Mat& distance, const Mat& nMatches,\r
- vector< vector<DMatch> >& matches, bool compactResult)\r
+ vector< vector<DMatch> >& matches, bool compactResult)\r
{\r
if (trainIdx.empty() || distance.empty() || nMatches.empty())\r
return;\r
\r
CV_Assert(trainIdx.type() == CV_32SC1);\r
- CV_Assert(nMatches.type() == CV_32SC1 && nMatches.isContinuous() && nMatches.cols >= trainIdx.rows);\r
CV_Assert(distance.type() == CV_32FC1 && distance.size() == trainIdx.size());\r
+ CV_Assert(nMatches.type() == CV_32SC1 && nMatches.cols == trainIdx.rows);\r
\r
const int nQuery = trainIdx.rows;\r
\r
matches.reserve(nQuery);\r
\r
const int* nMatches_ptr = nMatches.ptr<int>();\r
+\r
for (int queryIdx = 0; queryIdx < nQuery; ++queryIdx)\r
{\r
const int* trainIdx_ptr = trainIdx.ptr<int>(queryIdx);\r
}\r
}\r
\r
-void cv::gpu::BruteForceMatcher_GPU_base::radiusMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,\r
+void cv::gpu::BruteForceMatcher_GPU_base::radiusMatch(const GpuMat& query, const GpuMat& train,\r
vector< vector<DMatch> >& matches, float maxDistance, const GpuMat& mask, bool compactResult)\r
{\r
GpuMat trainIdx, distance, nMatches;\r
- radiusMatchSingle(queryDescs, trainDescs, trainIdx, distance, nMatches, maxDistance, mask);\r
+ radiusMatchSingle(query, train, trainIdx, distance, nMatches, maxDistance, mask);\r
radiusMatchDownload(trainIdx, distance, nMatches, matches, compactResult);\r
}\r
\r
-void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchCollection(const GpuMat& queryDescs, const GpuMat& trainCollection,\r
- GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance, GpuMat& nMatches, float maxDistance,\r
- const GpuMat& maskCollection, Stream& stream)\r
+void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchCollection(const GpuMat& query, GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance, GpuMat& nMatches, \r
+ float maxDistance, const vector<GpuMat>& masks, Stream& stream)\r
{\r
- if (queryDescs.empty() || trainCollection.empty())\r
+ if (query.empty() || empty())\r
return;\r
\r
using namespace cv::gpu::bf_radius_match;\r
\r
- typedef void (*radiusMatch_caller_t)(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, \r
- const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, \r
- cudaStream_t stream);\r
+ typedef void (*caller_t)(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, \r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ int cc, cudaStream_t stream);\r
\r
- static const radiusMatch_caller_t radiusMatch_callers[3][8] =\r
+ static const caller_t callers[3][6] =\r
{\r
{\r
- radiusMatchCollectionL1_gpu<unsigned char>, 0/*radiusMatchCollectionL1_gpu<signed char>*/, radiusMatchCollectionL1_gpu<unsigned short>,\r
- radiusMatchCollectionL1_gpu<short>, radiusMatchCollectionL1_gpu<int>, radiusMatchCollectionL1_gpu<float>, 0, 0\r
+ matchL1_gpu<unsigned char>, 0/*matchL1_gpu<signed char>*/, \r
+ matchL1_gpu<unsigned short>, matchL1_gpu<short>, \r
+ matchL1_gpu<int>, matchL1_gpu<float>\r
},\r
{\r
- 0/*radiusMatchCollectionL2_gpu<unsigned char>*/, 0/*radiusMatchCollectionL2_gpu<signed char>*/, 0/*radiusMatchCollectionL2_gpu<unsigned short>*/,\r
- 0/*radiusMatchCollectionL2_gpu<short>*/, 0/*radiusMatchCollectionL2_gpu<int>*/, radiusMatchCollectionL2_gpu<float>, 0, 0\r
+ 0/*matchL2_gpu<unsigned char>*/, 0/*matchL2_gpu<signed char>*/, \r
+ 0/*matchL2_gpu<unsigned short>*/, 0/*matchL2_gpu<short>*/, \r
+ 0/*matchL2_gpu<int>*/, matchL2_gpu<float>\r
},\r
{\r
- radiusMatchCollectionHamming_gpu<unsigned char>, 0/*radiusMatchCollectionHamming_gpu<signed char>*/, radiusMatchCollectionHamming_gpu<unsigned short>,\r
- 0/*radiusMatchCollectionHamming_gpu<short>*/, radiusMatchCollectionHamming_gpu<int>, 0, 0, 0\r
+ matchHamming_gpu<unsigned char>, 0/*matchHamming_gpu<signed char>*/, \r
+ matchHamming_gpu<unsigned short>, 0/*matchHamming_gpu<short>*/, \r
+ matchHamming_gpu<int>, 0/*matchHamming_gpu<float>*/\r
}\r
};\r
\r
- CV_Assert(TargetArchs::builtWith(SHARED_ATOMICS) && DeviceInfo().supports(SHARED_ATOMICS));\r
+ DeviceInfo info;\r
+ int cc = info.majorVersion() * 10 + info.minorVersion();\r
\r
- const int nQuery = queryDescs.rows;\r
+ CV_Assert(TargetArchs::builtWith(GLOBAL_ATOMICS) && info.supports(GLOBAL_ATOMICS));\r
\r
- CV_Assert(queryDescs.channels() == 1 && queryDescs.depth() < CV_64F);\r
+ const int nQuery = query.rows;\r
+\r
+ CV_Assert(query.channels() == 1 && query.depth() < CV_64F);\r
CV_Assert(trainIdx.empty() || (trainIdx.rows == nQuery && trainIdx.size() == distance.size() && trainIdx.size() == imgIdx.size()));\r
\r
ensureSizeIsEnough(1, nQuery, CV_32SC1, nMatches);\r
if (trainIdx.empty())\r
{\r
- ensureSizeIsEnough(nQuery, nQuery / 2, CV_32SC1, trainIdx);\r
- ensureSizeIsEnough(nQuery, nQuery / 2, CV_32SC1, imgIdx);\r
- ensureSizeIsEnough(nQuery, nQuery / 2, CV_32FC1, distance);\r
+ ensureSizeIsEnough(nQuery, std::max((nQuery / 100), 10), CV_32SC1, trainIdx);\r
+ ensureSizeIsEnough(nQuery, std::max((nQuery / 100), 10), CV_32SC1, imgIdx);\r
+ ensureSizeIsEnough(nQuery, std::max((nQuery / 100), 10), CV_32FC1, distance);\r
}\r
\r
- radiusMatch_caller_t func = radiusMatch_callers[distType][queryDescs.depth()];\r
+ caller_t func = callers[distType][query.depth()];\r
CV_Assert(func != 0);\r
\r
- func(queryDescs, trainCollection, maxDistance, maskCollection, trainIdx, imgIdx, distance, nMatches, StreamAccessor::getStream(stream));\r
+ vector<DevMem2D> trains_(trainDescCollection.begin(), trainDescCollection.end());\r
+ vector<DevMem2D> masks_(masks.begin(), masks.end());\r
+\r
+ func(query, &trains_[0], static_cast<int>(trains_.size()), maxDistance, masks_.size() == 0 ? 0 : &masks_[0], \r
+ trainIdx, imgIdx, distance, nMatches, cc, StreamAccessor::getStream(stream));\r
}\r
\r
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchDownload(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance, const GpuMat& nMatches,\r
- vector< vector<DMatch> >& matches, bool compactResult)\r
+ vector< vector<DMatch> >& matches, bool compactResult)\r
{\r
if (trainIdx.empty() || imgIdx.empty() || distance.empty() || nMatches.empty())\r
return;\r
CV_Assert(trainIdx.type() == CV_32SC1);\r
CV_Assert(imgIdx.type() == CV_32SC1 && imgIdx.size() == trainIdx.size());\r
CV_Assert(distance.type() == CV_32FC1 && distance.size() == trainIdx.size());\r
- CV_Assert(nMatches.type() == CV_32SC1 && nMatches.isContinuous() && nMatches.cols >= trainIdx.rows);\r
+ CV_Assert(nMatches.type() == CV_32SC1 && nMatches.cols == trainIdx.rows);\r
\r
const int nQuery = trainIdx.rows;\r
\r
matches.reserve(nQuery);\r
\r
const int* nMatches_ptr = nMatches.ptr<int>();\r
+\r
for (int queryIdx = 0; queryIdx < nQuery; ++queryIdx)\r
{\r
const int* trainIdx_ptr = trainIdx.ptr<int>(queryIdx);\r
}\r
}\r
\r
-void cv::gpu::BruteForceMatcher_GPU_base::radiusMatch(const GpuMat& queryDescs, vector< vector<DMatch> >& matches,\r
+void cv::gpu::BruteForceMatcher_GPU_base::radiusMatch(const GpuMat& query, vector< vector<DMatch> >& matches,\r
float maxDistance, const vector<GpuMat>& masks, bool compactResult)\r
{\r
- GpuMat trainCollection;\r
- GpuMat maskCollection;\r
-\r
- makeGpuCollection(trainCollection, maskCollection, masks);\r
-\r
GpuMat trainIdx, imgIdx, distance, nMatches;\r
-\r
- radiusMatchCollection(queryDescs, trainCollection, trainIdx, imgIdx, distance, nMatches, maxDistance, maskCollection);\r
-\r
+ radiusMatchCollection(query, trainIdx, imgIdx, distance, nMatches, maxDistance, masks);\r
radiusMatchDownload(trainIdx, imgIdx, distance, nMatches, matches, compactResult);\r
}\r
\r
\r
namespace cv { namespace gpu { namespace bf_knnmatch\r
{\r
- template <typename VecDiff, typename Dist, typename T, typename Mask>\r
- __device__ void distanceCalcLoop(const PtrStep_<T>& query, const DevMem2D_<T>& train, const Mask& m, int queryIdx,\r
- typename Dist::result_type& distMin1, typename Dist::result_type& distMin2, int& bestTrainIdx1, int& bestTrainIdx2, \r
- typename Dist::result_type* smem)\r
+ ///////////////////////////////////////////////////////////////////////////////\r
+ // Reduction\r
+\r
+ template <int BLOCK_SIZE> \r
+ __device__ void findBestMatch(float& bestDistance1, float& bestDistance2, \r
+ int& bestTrainIdx1, int& bestTrainIdx2, \r
+ float* s_distance, int* s_trainIdx)\r
{\r
- const VecDiff vecDiff(query.ptr(queryIdx), train.cols, (typename Dist::value_type*)smem, threadIdx.y * blockDim.x + threadIdx.x, threadIdx.x);\r
- \r
- typename Dist::result_type* sdiffRow = smem + blockDim.x * threadIdx.y;\r
- \r
- distMin1 = numeric_limits<typename Dist::result_type>::max();\r
- distMin2 = numeric_limits<typename Dist::result_type>::max();\r
+ float myBestDistance1 = numeric_limits<float>::max(); \r
+ float myBestDistance2 = numeric_limits<float>::max();\r
+ int myBestTrainIdx1 = -1;\r
+ int myBestTrainIdx2 = -1;\r
+\r
+ s_distance += threadIdx.y * BLOCK_SIZE;\r
+ s_trainIdx += threadIdx.y * BLOCK_SIZE;\r
+\r
+ s_distance[threadIdx.x] = bestDistance1;\r
+ s_trainIdx[threadIdx.x] = bestTrainIdx1;\r
+\r
+ __syncthreads();\r
+\r
+ if (threadIdx.x == 0)\r
+ {\r
+ #pragma unroll\r
+ for (int i = 0; i < BLOCK_SIZE; ++i)\r
+ {\r
+ float val = s_distance[i];\r
+\r
+ if (val < myBestDistance1)\r
+ {\r
+ myBestDistance2 = myBestDistance1;\r
+ myBestTrainIdx2 = myBestTrainIdx1;\r
+\r
+ myBestDistance1 = val;\r
+ myBestTrainIdx1 = s_trainIdx[i];\r
+ }\r
+ else if (val < myBestDistance2)\r
+ {\r
+ myBestDistance2 = val;\r
+ myBestTrainIdx2 = s_trainIdx[i];\r
+ }\r
+ }\r
+ }\r
+\r
+ __syncthreads();\r
\r
- bestTrainIdx1 = -1;\r
- bestTrainIdx2 = -1;\r
+ s_distance[threadIdx.x] = bestDistance2;\r
+ s_trainIdx[threadIdx.x] = bestTrainIdx2;\r
\r
- for (int trainIdx = threadIdx.y; trainIdx < train.rows; trainIdx += blockDim.y)\r
+ __syncthreads();\r
+\r
+ if (threadIdx.x == 0)\r
{\r
- if (m(queryIdx, trainIdx))\r
+ #pragma unroll\r
+ for (int i = 0; i < BLOCK_SIZE; ++i)\r
+ {\r
+ float val = s_distance[i];\r
+\r
+ if (val < myBestDistance2)\r
+ {\r
+ myBestDistance2 = val;\r
+ myBestTrainIdx2 = s_trainIdx[i];\r
+ }\r
+ }\r
+ }\r
+\r
+ bestDistance1 = myBestDistance1;\r
+ bestDistance2 = myBestDistance2;\r
+\r
+ bestTrainIdx1 = myBestTrainIdx1;\r
+ bestTrainIdx2 = myBestTrainIdx2;\r
+ }\r
+\r
+ template <int BLOCK_SIZE> \r
+ __device__ void findBestMatch(float& bestDistance1, float& bestDistance2, \r
+ int& bestTrainIdx1, int& bestTrainIdx2, \r
+ int& bestImgIdx1, int& bestImgIdx2, \r
+ float* s_distance, int* s_trainIdx, int* s_imgIdx)\r
+ {\r
+ float myBestDistance1 = numeric_limits<float>::max(); \r
+ float myBestDistance2 = numeric_limits<float>::max();\r
+ int myBestTrainIdx1 = -1;\r
+ int myBestTrainIdx2 = -1;\r
+ int myBestImgIdx1 = -1;\r
+ int myBestImgIdx2 = -1;\r
+\r
+ s_distance += threadIdx.y * BLOCK_SIZE;\r
+ s_trainIdx += threadIdx.y * BLOCK_SIZE;\r
+ s_imgIdx += threadIdx.y * BLOCK_SIZE;\r
+\r
+ s_distance[threadIdx.x] = bestDistance1;\r
+ s_trainIdx[threadIdx.x] = bestTrainIdx1;\r
+ s_imgIdx[threadIdx.x] = bestImgIdx1;\r
+\r
+ __syncthreads();\r
+\r
+ if (threadIdx.x == 0)\r
+ {\r
+ #pragma unroll\r
+ for (int i = 0; i < BLOCK_SIZE; ++i)\r
{\r
- Dist dist;\r
+ float val = s_distance[i];\r
+\r
+ if (val < myBestDistance1)\r
+ {\r
+ myBestDistance2 = myBestDistance1;\r
+ myBestTrainIdx2 = myBestTrainIdx1;\r
+ myBestImgIdx2 = myBestImgIdx1;\r
+\r
+ myBestDistance1 = val;\r
+ myBestTrainIdx1 = s_trainIdx[i];\r
+ myBestImgIdx1 = s_imgIdx[i];\r
+ }\r
+ else if (val < myBestDistance2)\r
+ {\r
+ myBestDistance2 = val;\r
+ myBestTrainIdx2 = s_trainIdx[i];\r
+ myBestImgIdx2 = s_imgIdx[i];\r
+ }\r
+ }\r
+ }\r
+\r
+ __syncthreads();\r
+\r
+ s_distance[threadIdx.x] = bestDistance2;\r
+ s_trainIdx[threadIdx.x] = bestTrainIdx2;\r
+ s_imgIdx[threadIdx.x] = bestImgIdx2;\r
+\r
+ __syncthreads();\r
+\r
+ if (threadIdx.x == 0)\r
+ {\r
+ #pragma unroll\r
+ for (int i = 0; i < BLOCK_SIZE; ++i)\r
+ {\r
+ float val = s_distance[i];\r
+\r
+ if (val < myBestDistance2)\r
+ {\r
+ myBestDistance2 = val;\r
+ myBestTrainIdx2 = s_trainIdx[i];\r
+ myBestImgIdx2 = s_imgIdx[i];\r
+ }\r
+ }\r
+ }\r
+\r
+ bestDistance1 = myBestDistance1;\r
+ bestDistance2 = myBestDistance2;\r
+\r
+ bestTrainIdx1 = myBestTrainIdx1;\r
+ bestTrainIdx2 = myBestTrainIdx2;\r
+\r
+ bestImgIdx1 = myBestImgIdx1;\r
+ bestImgIdx2 = myBestImgIdx2;\r
+ }\r
+\r
+ ///////////////////////////////////////////////////////////////////////////////\r
+ // Match Unrolled Cached\r
+\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename T, typename U> \r
+ __device__ void loadQueryToSmem(int queryIdx, const DevMem2D_<T>& query, U* s_query)\r
+ {\r
+ #pragma unroll\r
+ for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)\r
+ {\r
+ const int loadX = threadIdx.x + i * BLOCK_SIZE;\r
+ s_query[threadIdx.y * MAX_DESC_LEN + loadX] = loadX < query.cols ? query.ptr(min(queryIdx, query.rows - 1))[loadX] : 0;\r
+ }\r
+ }\r
+\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ __device__ void loopUnrolledCached(int queryIdx, const DevMem2D_<T>& query, int imgIdx, const DevMem2D_<T>& train, const Mask& mask, \r
+ typename Dist::value_type* s_query, typename Dist::value_type* s_train, \r
+ float& bestDistance1, float& bestDistance2, \r
+ int& bestTrainIdx1, int& bestTrainIdx2, \r
+ int& bestImgIdx1, int& bestImgIdx2)\r
+ {\r
+ for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)\r
+ {\r
+ Dist dist;\r
+\r
+ #pragma unroll\r
+ for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)\r
+ {\r
+ const int loadX = threadIdx.x + i * BLOCK_SIZE;\r
+\r
+ s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = loadX < train.cols ? train.ptr(min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX] : 0;\r
+\r
+ __syncthreads();\r
+\r
+ #pragma unroll\r
+ for (int j = 0; j < BLOCK_SIZE; ++j)\r
+ dist.reduceIter(s_query[threadIdx.y * MAX_DESC_LEN + i * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);\r
+\r
+ __syncthreads();\r
+ }\r
\r
- const T* trainRow = train.ptr(trainIdx);\r
- \r
- vecDiff.calc(trainRow, train.cols, dist, sdiffRow, threadIdx.x);\r
+ typename Dist::result_type distVal = dist;\r
\r
- const typename Dist::result_type val = dist;\r
+ const int trainIdx = t * BLOCK_SIZE + threadIdx.x;\r
\r
- if (val < distMin1)\r
+ if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx))\r
+ {\r
+ if (distVal < bestDistance1)\r
{\r
- distMin1 = val;\r
+ bestImgIdx2 = bestImgIdx1;\r
+ bestDistance2 = bestDistance1;\r
+ bestTrainIdx2 = bestTrainIdx1;\r
+\r
+ bestImgIdx1 = imgIdx;\r
+ bestDistance1 = distVal;\r
bestTrainIdx1 = trainIdx;\r
}\r
- else if (val < distMin2)\r
+ else if (distVal < bestDistance2)\r
{\r
- distMin2 = val;\r
+ bestImgIdx2 = imgIdx;\r
+ bestDistance2 = distVal;\r
bestTrainIdx2 = trainIdx;\r
}\r
}\r
}\r
}\r
\r
- template <int BLOCK_DIM_X, int BLOCK_DIM_Y, typename VecDiff, typename Dist, typename T, typename Mask>\r
- __global__ void knnMatch2(const PtrStep_<T> query, const DevMem2D_<T> train, const Mask m, int2* trainIdx, float2* distance)\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ __global__ void matchUnrolledCached(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance)\r
{\r
- typedef typename Dist::result_type result_type;\r
- typedef typename Dist::value_type value_type;\r
+ extern __shared__ int smem[];\r
\r
- __shared__ result_type smem[BLOCK_DIM_X * BLOCK_DIM_Y];\r
+ const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;\r
\r
- const int queryIdx = blockIdx.x;\r
+ typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);\r
+ typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN);\r
\r
- result_type distMin1;\r
- result_type distMin2;\r
+ loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query);\r
\r
- int bestTrainIdx1;\r
- int bestTrainIdx2;\r
+ float myBestDistance1 = numeric_limits<float>::max();\r
+ float myBestDistance2 = numeric_limits<float>::max();\r
+ int myBestTrainIdx1 = -1;\r
+ int myBestTrainIdx2 = -1;\r
+\r
+ loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestTrainIdx1, myBestTrainIdx2);\r
\r
- distanceCalcLoop<VecDiff, Dist>(query, train, m, queryIdx, distMin1, distMin2, bestTrainIdx1, bestTrainIdx2, smem);\r
__syncthreads();\r
\r
- volatile result_type* sdistMinRow = smem;\r
- volatile int* sbestTrainIdxRow = (int*)(sdistMinRow + 2 * BLOCK_DIM_Y);\r
+ float* s_distance = (float*)(smem);\r
+ int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
\r
- if (threadIdx.x == 0)\r
+ findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, s_distance, s_trainIdx);\r
+\r
+ if (queryIdx < query.rows && threadIdx.x == 0)\r
{\r
- sdistMinRow[threadIdx.y] = distMin1;\r
- sdistMinRow[threadIdx.y + BLOCK_DIM_Y] = distMin2;\r
+ bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2);\r
+ bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2);\r
+ }\r
+ }\r
+\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ void matchUnrolledCached(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, \r
+ const DevMem2D_<int2>& trainIdx, const DevMem2D_<float2>& distance, \r
+ cudaStream_t stream)\r
+ {\r
+ const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
+ const dim3 grid(divUp(query.rows, BLOCK_SIZE));\r
+\r
+ const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= BLOCK_SIZE ? MAX_DESC_LEN : BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);\r
+\r
+ matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data);\r
+ cudaSafeCall( cudaGetLastError() );\r
+\r
+ if (stream == 0)\r
+ cudaSafeCall( cudaDeviceSynchronize() );\r
+ }\r
+\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ __global__ void matchUnrolledCached(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance)\r
+ {\r
+ extern __shared__ int smem[];\r
+\r
+ const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;\r
+\r
+ typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);\r
+ typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN);\r
+\r
+ loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query);\r
+\r
+ float myBestDistance1 = numeric_limits<float>::max();\r
+ float myBestDistance2 = numeric_limits<float>::max();\r
+ int myBestTrainIdx1 = -1;\r
+ int myBestTrainIdx2 = -1;\r
+ int myBestImgIdx1 = -1;\r
+ int myBestImgIdx2 = -1;\r
\r
- sbestTrainIdxRow[threadIdx.y] = bestTrainIdx1; \r
- sbestTrainIdxRow[threadIdx.y + BLOCK_DIM_Y] = bestTrainIdx2;\r
+ Mask m = mask;\r
+\r
+ for (int imgIdx = 0; imgIdx < n; ++imgIdx)\r
+ {\r
+ const DevMem2D_<T> train = trains[imgIdx];\r
+ m.next();\r
+ loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2);\r
}\r
+\r
__syncthreads();\r
\r
- if (threadIdx.x == 0 && threadIdx.y == 0)\r
+ float* s_distance = (float*)(smem);\r
+ int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
+ int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);\r
+\r
+ findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2, s_distance, s_trainIdx, s_imgIdx);\r
+\r
+ if (queryIdx < query.rows && threadIdx.x == 0)\r
{\r
- distMin1 = numeric_limits<result_type>::max();\r
- distMin2 = numeric_limits<result_type>::max();\r
+ bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2);\r
+ bestImgIdx[queryIdx] = make_int2(myBestImgIdx1, myBestImgIdx2);\r
+ bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2);\r
+ }\r
+ }\r
+\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ void matchUnrolledCached(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask, \r
+ const DevMem2D_<int2>& trainIdx, const DevMem2D_<int2>& imgIdx, const DevMem2D_<float2>& distance, \r
+ cudaStream_t stream)\r
+ {\r
+ const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
+ const dim3 grid(divUp(query.rows, BLOCK_SIZE));\r
\r
- bestTrainIdx1 = -1;\r
- bestTrainIdx2 = -1;\r
+ const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= 2 * BLOCK_SIZE ? MAX_DESC_LEN : 2 * BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);\r
+\r
+ matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);\r
+ cudaSafeCall( cudaGetLastError() );\r
+\r
+ if (stream == 0)\r
+ cudaSafeCall( cudaDeviceSynchronize() );\r
+ }\r
+\r
+ ///////////////////////////////////////////////////////////////////////////////\r
+ // Match Unrolled\r
+\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ __device__ void loopUnrolled(int queryIdx, const DevMem2D_<T>& query, int imgIdx, const DevMem2D_<T>& train, const Mask& mask, \r
+ typename Dist::value_type* s_query, typename Dist::value_type* s_train, \r
+ float& bestDistance1, float& bestDistance2, \r
+ int& bestTrainIdx1, int& bestTrainIdx2, \r
+ int& bestImgIdx1, int& bestImgIdx2)\r
+ {\r
+ for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)\r
+ {\r
+ Dist dist;\r
\r
#pragma unroll\r
- for (int i = 0; i < BLOCK_DIM_Y; ++i)\r
+ for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)\r
{\r
- result_type val = sdistMinRow[i];\r
+ const int loadX = threadIdx.x + i * BLOCK_SIZE;\r
\r
- if (val < distMin1)\r
+ if (loadX < query.cols)\r
{\r
- distMin1 = val;\r
- bestTrainIdx1 = sbestTrainIdxRow[i];\r
+ s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = query.ptr(min(queryIdx, query.rows - 1))[loadX];\r
+ s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX];\r
}\r
- else if (val < distMin2)\r
- {\r
- distMin2 = val;\r
- bestTrainIdx2 = sbestTrainIdxRow[i];\r
+ else\r
+ { \r
+ s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;\r
+ s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;\r
}\r
+\r
+ __syncthreads();\r
+\r
+ #pragma unroll\r
+ for (int j = 0; j < BLOCK_SIZE; ++j)\r
+ dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);\r
+\r
+ __syncthreads();\r
}\r
\r
- #pragma unroll\r
- for (int i = BLOCK_DIM_Y; i < 2 * BLOCK_DIM_Y; ++i)\r
+ typename Dist::result_type distVal = dist;\r
+\r
+ const int trainIdx = t * BLOCK_SIZE + threadIdx.x;\r
+\r
+ if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx))\r
{\r
- result_type val = sdistMinRow[i];\r
+ if (distVal < bestDistance1)\r
+ {\r
+ bestImgIdx2 = bestImgIdx1;\r
+ bestDistance2 = bestDistance1;\r
+ bestTrainIdx2 = bestTrainIdx1;\r
\r
- if (val < distMin2)\r
+ bestImgIdx1 = imgIdx;\r
+ bestDistance1 = distVal;\r
+ bestTrainIdx1 = trainIdx;\r
+ }\r
+ else if (distVal < bestDistance2)\r
{\r
- distMin2 = val;\r
- bestTrainIdx2 = sbestTrainIdxRow[i];\r
+ bestImgIdx2 = imgIdx;\r
+ bestDistance2 = distVal;\r
+ bestTrainIdx2 = trainIdx;\r
}\r
}\r
+ }\r
+ }\r
+\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ __global__ void matchUnrolled(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance)\r
+ {\r
+ extern __shared__ int smem[];\r
+\r
+ const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;\r
+\r
+ typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);\r
+ typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
+\r
+ float myBestDistance1 = numeric_limits<float>::max();\r
+ float myBestDistance2 = numeric_limits<float>::max();\r
+ int myBestTrainIdx1 = -1;\r
+ int myBestTrainIdx2 = -1;\r
+\r
+ loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestTrainIdx1, myBestTrainIdx2);\r
+\r
+ __syncthreads();\r
+\r
+ float* s_distance = (float*)(smem);\r
+ int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
+\r
+ findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, s_distance, s_trainIdx);\r
+\r
+ if (queryIdx < query.rows && threadIdx.x == 0)\r
+ {\r
+ bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2);\r
+ bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2);\r
+ }\r
+ }\r
+\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, \r
+ const DevMem2D_<int2>& trainIdx, const DevMem2D_<float2>& distance, \r
+ cudaStream_t stream)\r
+ {\r
+ const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
+ const dim3 grid(divUp(query.rows, BLOCK_SIZE));\r
+\r
+ const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);\r
+\r
+ matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data);\r
+ cudaSafeCall( cudaGetLastError() );\r
+\r
+ if (stream == 0)\r
+ cudaSafeCall( cudaDeviceSynchronize() );\r
+ }\r
+\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ __global__ void matchUnrolled(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance)\r
+ {\r
+ extern __shared__ int smem[];\r
+\r
+ const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;\r
+\r
+ typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);\r
+ typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
+\r
+ float myBestDistance1 = numeric_limits<float>::max();\r
+ float myBestDistance2 = numeric_limits<float>::max();\r
+ int myBestTrainIdx1 = -1;\r
+ int myBestTrainIdx2 = -1;\r
+ int myBestImgIdx1 = -1;\r
+ int myBestImgIdx2 = -1;\r
+\r
+ Mask m = mask;\r
+\r
+ for (int imgIdx = 0; imgIdx < n; ++imgIdx)\r
+ {\r
+ const DevMem2D_<T> train = trains[imgIdx];\r
+ m.next();\r
+ loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2);\r
+ }\r
+\r
+ __syncthreads();\r
+\r
+ float* s_distance = (float*)(smem);\r
+ int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
+ int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);\r
+\r
+ findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2, s_distance, s_trainIdx, s_imgIdx);\r
\r
- trainIdx[queryIdx] = make_int2(bestTrainIdx1, bestTrainIdx2);\r
- distance[queryIdx] = make_float2(distMin1, distMin2);\r
+ if (queryIdx < query.rows && threadIdx.x == 0)\r
+ {\r
+ bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2);\r
+ bestImgIdx[queryIdx] = make_int2(myBestImgIdx1, myBestImgIdx2);\r
+ bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2);\r
}\r
}\r
\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask, \r
+ const DevMem2D_<int2>& trainIdx, const DevMem2D_<int2>& imgIdx, const DevMem2D_<float2>& distance, \r
+ cudaStream_t stream)\r
+ {\r
+ const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
+ const dim3 grid(divUp(query.rows, BLOCK_SIZE));\r
+\r
+ const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);\r
+\r
+ matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);\r
+ cudaSafeCall( cudaGetLastError() );\r
+\r
+ if (stream == 0)\r
+ cudaSafeCall( cudaDeviceSynchronize() );\r
+ }\r
+\r
///////////////////////////////////////////////////////////////////////////////\r
- // Knn 2 Match kernel caller\r
+ // Match\r
+\r
+ template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> \r
+ __device__ void loop(int queryIdx, const DevMem2D_<T>& query, int imgIdx, const DevMem2D_<T>& train, const Mask& mask, \r
+ typename Dist::value_type* s_query, typename Dist::value_type* s_train, \r
+ float& bestDistance1, float& bestDistance2, \r
+ int& bestTrainIdx1, int& bestTrainIdx2, \r
+ int& bestImgIdx1, int& bestImgIdx2)\r
+ {\r
+ for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)\r
+ {\r
+ Dist dist;\r
+\r
+ for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i)\r
+ {\r
+ const int loadX = threadIdx.x + i * BLOCK_SIZE;\r
+\r
+ if (loadX < query.cols)\r
+ {\r
+ s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = query.ptr(min(queryIdx, query.rows - 1))[loadX];\r
+ s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX];\r
+ }\r
+ else\r
+ { \r
+ s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;\r
+ s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;\r
+ }\r
+\r
+ __syncthreads();\r
+\r
+ #pragma unroll\r
+ for (int j = 0; j < BLOCK_SIZE; ++j)\r
+ dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);\r
+\r
+ __syncthreads();\r
+ }\r
+\r
+ typename Dist::result_type distVal = dist;\r
\r
- template <int BLOCK_DIM_X, int BLOCK_DIM_Y, typename Dist, typename T, typename Mask>\r
- void knnMatch2Simple_caller(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, \r
- const DevMem2D_<int2>& trainIdx, const DevMem2D_<float2>& distance, \r
- cudaStream_t stream)\r
+ const int trainIdx = t * BLOCK_SIZE + threadIdx.x;\r
+\r
+ if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx))\r
+ {\r
+ if (distVal < bestDistance1)\r
+ {\r
+ bestImgIdx2 = bestImgIdx1;\r
+ bestDistance2 = bestDistance1;\r
+ bestTrainIdx2 = bestTrainIdx1;\r
+\r
+ bestImgIdx1 = imgIdx;\r
+ bestDistance1 = distVal;\r
+ bestTrainIdx1 = trainIdx;\r
+ }\r
+ else if (distVal < bestDistance2)\r
+ {\r
+ bestImgIdx2 = imgIdx;\r
+ bestDistance2 = distVal;\r
+ bestTrainIdx2 = trainIdx;\r
+ }\r
+ }\r
+ }\r
+ }\r
+\r
+ template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> \r
+ __global__ void match(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance)\r
{\r
- const dim3 grid(query.rows, 1, 1);\r
- const dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y, 1);\r
+ extern __shared__ int smem[];\r
+\r
+ const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;\r
+\r
+ typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);\r
+ typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
\r
- knnMatch2<BLOCK_DIM_X, BLOCK_DIM_Y, VecDiffGlobal<BLOCK_DIM_X, T>, Dist, T>\r
- <<<grid, threads, 0, stream>>>(query, train, mask, trainIdx, distance);\r
+ float myBestDistance1 = numeric_limits<float>::max();\r
+ float myBestDistance2 = numeric_limits<float>::max();\r
+ int myBestTrainIdx1 = -1;\r
+ int myBestTrainIdx2 = -1;\r
+\r
+ loop<BLOCK_SIZE, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestTrainIdx1, myBestTrainIdx2);\r
+\r
+ __syncthreads();\r
+\r
+ float* s_distance = (float*)(smem);\r
+ int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
+\r
+ findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, s_distance, s_trainIdx);\r
+\r
+ if (queryIdx < query.rows && threadIdx.x == 0)\r
+ {\r
+ bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2);\r
+ bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2);\r
+ }\r
+ }\r
+\r
+ template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> \r
+ void match(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, \r
+ const DevMem2D_<int2>& trainIdx, const DevMem2D_<float2>& distance, \r
+ cudaStream_t stream)\r
+ {\r
+ const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
+ const dim3 grid(divUp(query.rows, BLOCK_SIZE));\r
+\r
+ const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);\r
+\r
+ match<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data);\r
cudaSafeCall( cudaGetLastError() );\r
\r
if (stream == 0)\r
cudaSafeCall( cudaDeviceSynchronize() );\r
}\r
\r
- template <int BLOCK_DIM_X, int BLOCK_DIM_Y, int MAX_LEN, bool LEN_EQ_MAX_LEN, typename Dist, typename T, typename Mask>\r
- void knnMatch2Cached_caller(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, \r
- const DevMem2D_<int2>& trainIdx, const DevMem2D_<float2>& distance, \r
- cudaStream_t stream)\r
+ template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> \r
+ __global__ void match(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance)\r
{\r
- StaticAssert<BLOCK_DIM_X * BLOCK_DIM_Y >= MAX_LEN>::check(); // block size must be greter than descriptors length\r
- StaticAssert<MAX_LEN % BLOCK_DIM_X == 0>::check(); // max descriptors length must divide to blockDimX\r
+ extern __shared__ int smem[];\r
+\r
+ const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;\r
\r
- const dim3 grid(query.rows, 1, 1);\r
- const dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y, 1);\r
+ typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);\r
+ typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
\r
- knnMatch2<BLOCK_DIM_X, BLOCK_DIM_Y, VecDiffCachedRegister<BLOCK_DIM_X, MAX_LEN, LEN_EQ_MAX_LEN, typename Dist::value_type>, Dist, T>\r
- <<<grid, threads, 0, stream>>>(query, train, mask, trainIdx.data, distance.data);\r
+ float myBestDistance1 = numeric_limits<float>::max();\r
+ float myBestDistance2 = numeric_limits<float>::max();\r
+ int myBestTrainIdx1 = -1;\r
+ int myBestTrainIdx2 = -1;\r
+ int myBestImgIdx1 = -1;\r
+ int myBestImgIdx2 = -1;\r
+\r
+ Mask m = mask;\r
+\r
+ for (int imgIdx = 0; imgIdx < n; ++imgIdx)\r
+ {\r
+ const DevMem2D_<T> train = trains[imgIdx];\r
+ m.next();\r
+ loop<BLOCK_SIZE, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2);\r
+ }\r
+\r
+ __syncthreads();\r
+\r
+ float* s_distance = (float*)(smem);\r
+ int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
+ int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);\r
+\r
+ findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2, s_distance, s_trainIdx, s_imgIdx);\r
+\r
+ if (queryIdx < query.rows && threadIdx.x == 0)\r
+ {\r
+ bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2);\r
+ bestImgIdx[queryIdx] = make_int2(myBestImgIdx1, myBestImgIdx2);\r
+ bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2);\r
+ }\r
+ }\r
+\r
+ template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> \r
+ void match(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask, \r
+ const DevMem2D_<int2>& trainIdx, const DevMem2D_<int2>& imgIdx, const DevMem2D_<float2>& distance, \r
+ cudaStream_t stream)\r
+ {\r
+ const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
+ const dim3 grid(divUp(query.rows, BLOCK_SIZE));\r
+\r
+ const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);\r
+\r
+ match<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);\r
cudaSafeCall( cudaGetLastError() );\r
\r
if (stream == 0)\r
}\r
\r
///////////////////////////////////////////////////////////////////////////////\r
- // Knn 2 Match Dispatcher\r
- \r
- template <typename Dist, typename T, typename Mask>\r
- void knnMatch2Dispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, \r
- const DevMem2D& trainIdx, const DevMem2D& distance, \r
- int cc, cudaStream_t stream)\r
+ // knnMatch 2 dispatcher\r
+\r
+ template <typename Dist, typename T, typename Mask> \r
+ void match2Dispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, \r
+ const DevMem2D& trainIdx, const DevMem2D& distance, \r
+ int cc, cudaStream_t stream)\r
{\r
- if (query.cols < 64)\r
+ if (query.cols <= 64)\r
{\r
- knnMatch2Cached_caller<16, 16, 64, false, Dist>(\r
- query, train, mask, \r
- static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> >(distance),\r
- stream);\r
+ matchUnrolledCached<16, 64, Dist>(query, train, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> > (distance), stream);\r
}\r
- else if (query.cols == 64)\r
+ else if (query.cols <= 128)\r
{\r
- knnMatch2Cached_caller<16, 16, 64, true, Dist>(\r
- query, train, mask, \r
- static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> >(distance), \r
- stream);\r
+ matchUnrolledCached<16, 128, Dist>(query, train, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> > (distance), stream);\r
}\r
- else if (query.cols < 128)\r
+ else if (query.cols <= 256)\r
{\r
- knnMatch2Cached_caller<16, 16, 128, false, Dist>(\r
- query, train, mask, \r
- static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> >(distance), \r
- stream);\r
+ matchUnrolled<16, 256, Dist>(query, train, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> > (distance), stream);\r
}\r
- else if (query.cols == 128 && cc >= 12)\r
+ else if (query.cols <= 512)\r
+ { \r
+ matchUnrolled<16, 512, Dist>(query, train, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> > (distance), stream);\r
+ }\r
+ else if (query.cols <= 1024)\r
+ { \r
+ matchUnrolled<16, 1024, Dist>(query, train, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> > (distance), stream);\r
+ }\r
+ else\r
+ {\r
+ match<16, Dist>(query, train, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> > (distance), stream);\r
+ }\r
+ }\r
+\r
+ template <typename Dist, typename T, typename Mask> \r
+ void match2Dispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask, \r
+ const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, \r
+ int cc, cudaStream_t stream)\r
+ {\r
+ if (query.cols <= 64)\r
{\r
- knnMatch2Cached_caller<16, 16, 128, true, Dist>(\r
- query, train, mask, \r
- static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> >(distance), \r
- stream);\r
+ matchUnrolledCached<16, 64, Dist>(query, trains, n, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<int2> >(imgIdx), static_cast< DevMem2D_<float2> > (distance), stream);\r
}\r
- else if (query.cols < 256 && cc >= 12)\r
+ else if (query.cols <= 128)\r
{\r
- knnMatch2Cached_caller<16, 16, 256, false, Dist>(\r
- query, train, mask, \r
- static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> >(distance), \r
- stream);\r
+ matchUnrolledCached<16, 128, Dist>(query, trains, n, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<int2> >(imgIdx), static_cast< DevMem2D_<float2> > (distance), stream);\r
}\r
- else if (query.cols == 256 && cc >= 12)\r
+ else if (query.cols <= 256)\r
{\r
- knnMatch2Cached_caller<16, 16, 256, true, Dist>(\r
- query, train, mask, \r
- static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> >(distance), \r
- stream);\r
+ matchUnrolled<16, 256, Dist>(query, trains, n, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<int2> >(imgIdx), static_cast< DevMem2D_<float2> > (distance), stream);\r
+ }\r
+ else if (query.cols <= 512)\r
+ { \r
+ matchUnrolled<16, 512, Dist>(query, trains, n, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<int2> >(imgIdx), static_cast< DevMem2D_<float2> > (distance), stream);\r
+ }\r
+ else if (query.cols <= 1024)\r
+ { \r
+ matchUnrolled<16, 1024, Dist>(query, trains, n, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<int2> >(imgIdx), static_cast< DevMem2D_<float2> > (distance), stream);\r
}\r
else\r
{\r
- knnMatch2Simple_caller<16, 16, Dist>(\r
- query, train, mask, \r
- static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> >(distance),\r
- stream);\r
+ match<16, Dist>(query, trains, n, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<int2> >(imgIdx), static_cast< DevMem2D_<float2> > (distance), stream);\r
}\r
}\r
- \r
+\r
///////////////////////////////////////////////////////////////////////////////\r
// Calc distance kernel\r
\r
- template <int BLOCK_DIM_X, int BLOCK_DIM_Y, typename Dist, typename T, typename Mask>\r
- __global__ void calcDistance(const PtrStep_<T> query, const DevMem2D_<T> train, const Mask mask, PtrStepf distance)\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>\r
+ __global__ void calcDistanceUnrolled(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, PtrStepf allDist)\r
{\r
- __shared__ typename Dist::result_type sdiff[BLOCK_DIM_X * BLOCK_DIM_Y];\r
+ extern __shared__ int smem[];\r
\r
- typename Dist::result_type* sdiff_row = sdiff + BLOCK_DIM_X * threadIdx.y;\r
- \r
- const int queryIdx = blockIdx.x;\r
- const T* queryDescs = query.ptr(queryIdx);\r
+ const int queryIdx = blockIdx.y * BLOCK_SIZE + threadIdx.y;\r
+ const int trainIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x;\r
\r
- const int trainIdx = blockIdx.y * BLOCK_DIM_Y + threadIdx.y;\r
+ typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);\r
+ typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
\r
- if (trainIdx < train.rows)\r
+ Dist dist;\r
+\r
+ #pragma unroll\r
+ for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)\r
{\r
- const T* trainDescs = train.ptr(trainIdx);\r
+ const int loadX = threadIdx.x + i * BLOCK_SIZE;\r
+\r
+ if (loadX < query.cols)\r
+ {\r
+ s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = query.ptr(min(queryIdx, query.rows - 1))[loadX];\r
+ s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(min(blockIdx.x * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX];\r
+ }\r
+ else\r
+ { \r
+ s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;\r
+ s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;\r
+ }\r
\r
- typename Dist::result_type myDist = numeric_limits<typename Dist::result_type>::max();\r
+ __syncthreads();\r
+\r
+ #pragma unroll\r
+ for (int j = 0; j < BLOCK_SIZE; ++j)\r
+ dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);\r
+\r
+ __syncthreads();\r
+ }\r
+\r
+ if (queryIdx < query.rows && trainIdx < train.rows)\r
+ {\r
+ float distVal = numeric_limits<float>::max();\r
\r
if (mask(queryIdx, trainIdx))\r
- {\r
- Dist dist;\r
+ distVal = (typename Dist::result_type)dist;\r
+\r
+ allDist.ptr(queryIdx)[trainIdx] = distVal;\r
+ }\r
+ }\r
+\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ void calcDistanceUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, const DevMem2Df& allDist, cudaStream_t stream)\r
+ {\r
+ const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
+ const dim3 grid(divUp(train.rows, BLOCK_SIZE), divUp(query.rows, BLOCK_SIZE));\r
+\r
+ const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);\r
+\r
+ calcDistanceUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, allDist);\r
+ cudaSafeCall( cudaGetLastError() );\r
+\r
+ if (stream == 0)\r
+ cudaSafeCall( cudaDeviceSynchronize() );\r
+ }\r
+\r
+ template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>\r
+ __global__ void calcDistance(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, PtrStepf allDist)\r
+ {\r
+ extern __shared__ int smem[];\r
+\r
+ const int queryIdx = blockIdx.y * BLOCK_SIZE + threadIdx.y;\r
+ const int trainIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x;\r
+\r
+ typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);\r
+ typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
\r
- calcVecDiffGlobal<BLOCK_DIM_X>(queryDescs, trainDescs, train.cols, dist, sdiff_row, threadIdx.x);\r
+ Dist dist;\r
\r
- myDist = dist;\r
+ for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i)\r
+ {\r
+ const int loadX = threadIdx.x + i * BLOCK_SIZE;\r
+\r
+ if (loadX < query.cols)\r
+ {\r
+ s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = query.ptr(min(queryIdx, query.rows - 1))[loadX];\r
+ s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(min(blockIdx.x * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX];\r
}\r
- \r
- if (threadIdx.x == 0)\r
- distance.ptr(queryIdx)[trainIdx] = myDist;\r
+ else\r
+ { \r
+ s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;\r
+ s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;\r
+ }\r
+\r
+ __syncthreads();\r
+\r
+ #pragma unroll\r
+ for (int j = 0; j < BLOCK_SIZE; ++j)\r
+ dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);\r
+\r
+ __syncthreads();\r
}\r
- }\r
\r
- ///////////////////////////////////////////////////////////////////////////////\r
- // Calc distance kernel caller\r
+ if (queryIdx < query.rows && trainIdx < train.rows)\r
+ {\r
+ float distVal = numeric_limits<float>::max();\r
\r
- template <int BLOCK_DIM_X, int BLOCK_DIM_Y, typename Dist, typename T, typename Mask>\r
- void calcDistance_caller(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, const DevMem2Df& distance, cudaStream_t stream)\r
+ if (mask(queryIdx, trainIdx))\r
+ distVal = (typename Dist::result_type)dist;\r
+\r
+ allDist.ptr(queryIdx)[trainIdx] = distVal;\r
+ }\r
+ }\r
+\r
+ template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> \r
+ void calcDistance(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, const DevMem2Df& allDist, cudaStream_t stream)\r
{\r
- const dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y, 1);\r
- const dim3 grid(query.rows, divUp(train.rows, BLOCK_DIM_Y), 1);\r
+ const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
+ const dim3 grid(divUp(train.rows, BLOCK_SIZE), divUp(query.rows, BLOCK_SIZE));\r
+\r
+ const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);\r
\r
- calcDistance<BLOCK_DIM_X, BLOCK_DIM_Y, Dist, T><<<grid, threads, 0, stream>>>(query, train, mask, distance);\r
+ calcDistance<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, allDist);\r
cudaSafeCall( cudaGetLastError() );\r
\r
if (stream == 0)\r
cudaSafeCall( cudaDeviceSynchronize() );\r
}\r
\r
- template <typename Dist, typename T, typename Mask>\r
- void calcDistanceDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, const DevMem2D& allDist, cudaStream_t stream)\r
+ ///////////////////////////////////////////////////////////////////////////////\r
+ // Calc Distance dispatcher\r
+\r
+ template <typename Dist, typename T, typename Mask> \r
+ void calcDistanceDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, \r
+ const DevMem2Df& allDist, \r
+ int cc, cudaStream_t stream)\r
{\r
- calcDistance_caller<16, 16, Dist>(query, train, mask, static_cast<DevMem2Df>(allDist), stream);\r
+ if (query.cols <= 64)\r
+ {\r
+ calcDistanceUnrolled<16, 64, Dist>(query, train, mask, allDist, stream);\r
+ }\r
+ else if (query.cols <= 128)\r
+ {\r
+ calcDistanceUnrolled<16, 128, Dist>(query, train, mask, allDist, stream);\r
+ }\r
+ else if (query.cols <= 256)\r
+ {\r
+ calcDistanceUnrolled<16, 256, Dist>(query, train, mask, allDist, stream);\r
+ }\r
+ else if (query.cols <= 512)\r
+ { \r
+ calcDistanceUnrolled<16, 512, Dist>(query, train, mask, allDist, stream);\r
+ }\r
+ else if (query.cols <= 1024)\r
+ { \r
+ calcDistanceUnrolled<16, 1024, Dist>(query, train, mask, allDist, stream);\r
+ }\r
+ else\r
+ {\r
+ calcDistance<16, Dist>(query, train, mask, allDist, stream);\r
+ }\r
}\r
\r
///////////////////////////////////////////////////////////////////////////////\r
// find knn match kernel\r
\r
- template <int BLOCK_SIZE> __global__ void findBestMatch(DevMem2Df allDist_, int i, PtrStepi trainIdx_, PtrStepf distance_)\r
+ template <int BLOCK_SIZE> \r
+ __global__ void findBestMatch(DevMem2Df allDist, int i, PtrStepi trainIdx, PtrStepf distance)\r
{\r
const int SMEM_SIZE = BLOCK_SIZE > 64 ? BLOCK_SIZE : 64;\r
- __shared__ float sdist[SMEM_SIZE];\r
- __shared__ int strainIdx[SMEM_SIZE];\r
+ __shared__ float s_dist[SMEM_SIZE];\r
+ __shared__ int s_trainIdx[SMEM_SIZE];\r
\r
const int queryIdx = blockIdx.x;\r
\r
- float* allDist = allDist_.ptr(queryIdx);\r
- int* trainIdx = trainIdx_.ptr(queryIdx);\r
- float* distance = distance_.ptr(queryIdx);\r
+ float* allDistRow = allDist.ptr(queryIdx);\r
\r
float dist = numeric_limits<float>::max();\r
int bestIdx = -1;\r
\r
- for (int i = threadIdx.x; i < allDist_.cols; i += BLOCK_SIZE)\r
+ for (int i = threadIdx.x; i < allDist.cols; i += BLOCK_SIZE)\r
{\r
- float reg = allDist[i];\r
+ float reg = allDistRow[i];\r
if (reg < dist)\r
{\r
dist = reg;\r
}\r
}\r
\r
- sdist[threadIdx.x] = dist;\r
- strainIdx[threadIdx.x] = bestIdx;\r
+ s_dist[threadIdx.x] = dist;\r
+ s_trainIdx[threadIdx.x] = bestIdx;\r
__syncthreads();\r
\r
- reducePredVal<BLOCK_SIZE>(sdist, dist, strainIdx, bestIdx, threadIdx.x, less<volatile float>());\r
+ reducePredVal<BLOCK_SIZE>(s_dist, dist, s_trainIdx, bestIdx, threadIdx.x, less<volatile float>());\r
\r
if (threadIdx.x == 0)\r
{\r
if (dist < numeric_limits<float>::max())\r
{\r
- allDist[bestIdx] = numeric_limits<float>::max();\r
- trainIdx[i] = bestIdx;\r
- distance[i] = dist;\r
+ allDistRow[bestIdx] = numeric_limits<float>::max();\r
+ trainIdx.ptr(queryIdx)[i] = bestIdx;\r
+ distance.ptr(queryIdx)[i] = dist;\r
}\r
}\r
}\r
- \r
- ///////////////////////////////////////////////////////////////////////////////\r
- // find knn match kernel caller\r
\r
- template <int BLOCK_SIZE> void findKnnMatch_caller(int k, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, cudaStream_t stream)\r
+ template <int BLOCK_SIZE> \r
+ void findKnnMatch(int k, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, cudaStream_t stream)\r
{\r
- const dim3 threads(BLOCK_SIZE, 1, 1);\r
+ const dim3 block(BLOCK_SIZE, 1, 1);\r
const dim3 grid(trainIdx.rows, 1, 1);\r
\r
for (int i = 0; i < k; ++i)\r
{\r
- findBestMatch<BLOCK_SIZE><<<grid, threads, 0, stream>>>(allDist, i, trainIdx, distance);\r
+ findBestMatch<BLOCK_SIZE><<<grid, block, 0, stream>>>(allDist, i, trainIdx, distance);\r
cudaSafeCall( cudaGetLastError() );\r
}\r
\r
cudaSafeCall( cudaDeviceSynchronize() );\r
}\r
\r
- void findKnnMatchDispatcher(int k, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist, cudaStream_t stream)\r
+ void findKnnMatchDispatcher(int k, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream)\r
{\r
- findKnnMatch_caller<256>(k, static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Df>(distance), static_cast<DevMem2Df>(allDist), stream);\r
+ findKnnMatch<256>(k, static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Df>(distance), allDist, stream);\r
}\r
- \r
+\r
///////////////////////////////////////////////////////////////////////////////\r
// knn match Dispatcher\r
\r
- template <typename Dist, typename T>\r
- void knnMatchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, int k, const DevMem2D& mask, \r
- const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist, \r
+ template <typename Dist, typename T, typename Mask>\r
+ void matchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, int k, const Mask& mask, \r
+ const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist, \r
int cc, cudaStream_t stream)\r
{\r
- if (mask.data)\r
+ if (k == 2)\r
{\r
- if (k == 2)\r
- {\r
- knnMatch2Dispatcher<Dist>(query, train, SingleMask(mask), trainIdx, distance, cc, stream);\r
- return;\r
- }\r
-\r
- calcDistanceDispatcher<Dist>(query, train, SingleMask(mask), allDist, stream);\r
+ match2Dispatcher<Dist>(query, train, mask, trainIdx, distance, cc, stream);\r
}\r
else\r
{\r
- if (k == 2)\r
- {\r
- knnMatch2Dispatcher<Dist>(query, train, WithOutMask(), trainIdx, distance, cc, stream);\r
- return;\r
- }\r
-\r
- calcDistanceDispatcher<Dist>(query, train, WithOutMask(), allDist, stream);\r
+ calcDistanceDispatcher<Dist>(query, train, mask, allDist, cc, stream);\r
+ findKnnMatchDispatcher(k, trainIdx, distance, allDist, cc, stream);\r
}\r
-\r
- findKnnMatchDispatcher(k, trainIdx, distance, allDist, stream);\r
- }\r
+ } \r
\r
///////////////////////////////////////////////////////////////////////////////\r
// knn match caller\r
\r
- template <typename T> void knnMatchL1_gpu(const DevMem2D& query, const DevMem2D& train, int k, const DevMem2D& mask, \r
- const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist, \r
+ template <typename T> void matchL1_gpu(const DevMem2D& query, const DevMem2D& train, int k, const DevMem2D& mask, \r
+ const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist, \r
int cc, cudaStream_t stream)\r
{\r
- knnMatchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), k, mask, trainIdx, distance, allDist, cc, stream);\r
+ if (mask.data)\r
+ matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), k, SingleMask(mask), trainIdx, distance, allDist, cc, stream);\r
+ else\r
+ matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), k, WithOutMask(), trainIdx, distance, allDist, cc, stream);\r
}\r
\r
- template void knnMatchL1_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist, int cc, cudaStream_t stream);\r
- //template void knnMatchL1_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist, int cc, cudaStream_t stream);\r
- template void knnMatchL1_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist, int cc, cudaStream_t stream);\r
- template void knnMatchL1_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist, int cc, cudaStream_t stream);\r
- template void knnMatchL1_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist, int cc, cudaStream_t stream);\r
- template void knnMatchL1_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist, int cc, cudaStream_t stream);\r
+ template void matchL1_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream);\r
+ //template void matchL1_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream);\r
+ template void matchL1_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream);\r
+ template void matchL1_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream);\r
+ template void matchL1_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream);\r
+ template void matchL1_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream);\r
\r
- template <typename T> void knnMatchL2_gpu(const DevMem2D& query, const DevMem2D& train, int k, const DevMem2D& mask, \r
- const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist,\r
+ template <typename T> void matchL2_gpu(const DevMem2D& query, const DevMem2D& train, int k, const DevMem2D& mask, \r
+ const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist,\r
int cc, cudaStream_t stream)\r
{\r
- knnMatchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), k, mask, trainIdx, distance, allDist, cc, stream);\r
+ if (mask.data)\r
+ matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), k, SingleMask(mask), trainIdx, distance, allDist, cc, stream);\r
+ else\r
+ matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), k, WithOutMask(), trainIdx, distance, allDist, cc, stream);\r
}\r
\r
- //template void knnMatchL2_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist, int cc, cudaStream_t stream);\r
- //template void knnMatchL2_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist, int cc, cudaStream_t stream);\r
- //template void knnMatchL2_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist, int cc, cudaStream_t stream);\r
- //template void knnMatchL2_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist, int cc, cudaStream_t stream);\r
- //template void knnMatchL2_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist, int cc, cudaStream_t stream);\r
- template void knnMatchL2_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist, int cc, cudaStream_t stream);\r
+ //template void matchL2_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream);\r
+ //template void matchL2_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream);\r
+ //template void matchL2_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream);\r
+ //template void matchL2_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream);\r
+ //template void matchL2_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream);\r
+ template void matchL2_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream);\r
\r
- template <typename T> void knnMatchHamming_gpu(const DevMem2D& query, const DevMem2D& train, int k, const DevMem2D& mask,\r
- const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist, \r
+ template <typename T> void matchHamming_gpu(const DevMem2D& query, const DevMem2D& train, int k, const DevMem2D& mask,\r
+ const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist, \r
int cc, cudaStream_t stream)\r
{\r
- knnMatchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), k, mask, trainIdx, distance, allDist, cc, stream);\r
+ if (mask.data)\r
+ matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), k, SingleMask(mask), trainIdx, distance, allDist, cc, stream);\r
+ else\r
+ matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), k, WithOutMask(), trainIdx, distance, allDist, cc, stream);\r
}\r
\r
- template void knnMatchHamming_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist, int cc, cudaStream_t stream);\r
- //template void knnMatchHamming_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist, int cc, cudaStream_t stream);\r
- template void knnMatchHamming_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist, int cc, cudaStream_t stream);\r
- //template void knnMatchHamming_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist, int cc, cudaStream_t stream);\r
- template void knnMatchHamming_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist, int cc, cudaStream_t stream);\r
+ template void matchHamming_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream);\r
+ //template void matchHamming_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream);\r
+ template void matchHamming_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream);\r
+ //template void matchHamming_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream);\r
+ template void matchHamming_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int k, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream);\r
+\r
+ template <typename T> void match2L1_gpu(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, \r
+ const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, \r
+ int cc, cudaStream_t stream)\r
+ {\r
+ if (masks.data)\r
+ match2Dispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, cc, stream);\r
+ else\r
+ match2Dispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, cc, stream);\r
+ }\r
+ \r
+ template void match2L1_gpu<uchar >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
+ //template void match2L1_gpu<schar >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
+ template void match2L1_gpu<ushort>(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
+ template void match2L1_gpu<short >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
+ template void match2L1_gpu<int >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
+ template void match2L1_gpu<float >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
+\r
+ template <typename T> void match2L2_gpu(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, \r
+ const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, \r
+ int cc, cudaStream_t stream)\r
+ {\r
+ if (masks.data)\r
+ match2Dispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, cc, stream);\r
+ else\r
+ match2Dispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, cc, stream);\r
+ }\r
+ \r
+ //template void match2L2_gpu<uchar >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
+ //template void match2L2_gpu<schar >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
+ //template void match2L2_gpu<ushort>(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
+ //template void match2L2_gpu<short >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
+ //template void match2L2_gpu<int >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2D& trainIdx, const DevMem2Di& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
+ template void match2L2_gpu<float >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
+ \r
+ template <typename T> void match2Hamming_gpu(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, \r
+ const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, \r
+ int cc, cudaStream_t stream)\r
+ {\r
+ if (masks.data)\r
+ match2Dispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, cc, stream);\r
+ else\r
+ match2Dispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, cc, stream);\r
+ }\r
+ \r
+ template void match2Hamming_gpu<uchar >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
+ //template void match2Hamming_gpu<schar >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
+ template void match2Hamming_gpu<ushort>(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
+ //template void match2Hamming_gpu<short >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
+ template void match2Hamming_gpu<int >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
}}}\r
\r
namespace cv { namespace gpu { namespace bf_match\r
{\r
- template <int BLOCK_DIM_Y, typename T>\r
- __device__ void findBestMatch(T& myDist, int2& myIdx, T* smin, int2* sIdx)\r
+ ///////////////////////////////////////////////////////////////////////////////\r
+ // Reduction\r
+\r
+ template <int BLOCK_SIZE> \r
+ __device__ void findBestMatch(float& bestDistance, int& bestTrainIdx, float* s_distance, int* s_trainIdx)\r
{\r
- if (threadIdx.x == 0)\r
- {\r
- smin[threadIdx.y] = myDist;\r
- sIdx[threadIdx.y] = myIdx;\r
- }\r
+ s_distance += threadIdx.y * BLOCK_SIZE;\r
+ s_trainIdx += threadIdx.y * BLOCK_SIZE;\r
+\r
+ s_distance[threadIdx.x] = bestDistance;\r
+ s_trainIdx[threadIdx.x] = bestTrainIdx;\r
+\r
+ __syncthreads();\r
+\r
+ reducePredVal<BLOCK_SIZE>(s_distance, bestDistance, s_trainIdx, bestTrainIdx, threadIdx.x, less<volatile float>());\r
+ }\r
+\r
+ template <int BLOCK_SIZE> \r
+ __device__ void findBestMatch(float& bestDistance, int& bestTrainIdx, int& bestImgIdx, float* s_distance, int* s_trainIdx, int* s_imgIdx)\r
+ {\r
+ s_distance += threadIdx.y * BLOCK_SIZE;\r
+ s_trainIdx += threadIdx.y * BLOCK_SIZE;\r
+ s_imgIdx += threadIdx.y * BLOCK_SIZE;\r
+\r
+ s_distance[threadIdx.x] = bestDistance;\r
+ s_trainIdx[threadIdx.x] = bestTrainIdx;\r
+ s_imgIdx [threadIdx.x] = bestImgIdx;\r
+\r
__syncthreads();\r
\r
- reducePredVal<BLOCK_DIM_Y>(smin, myDist, sIdx, myIdx, threadIdx.y * blockDim.x + threadIdx.x, less<volatile T>());\r
+ reducePredVal2<BLOCK_SIZE>(s_distance, bestDistance, s_trainIdx, bestTrainIdx, s_imgIdx, bestImgIdx, threadIdx.x, less<volatile float>());\r
}\r
\r
- template <typename Dist, typename VecDiff, typename T, typename Mask>\r
- __device__ void matchDescs(int queryIdx, int imgIdx, const DevMem2D_<T>& train, const Mask& m, const VecDiff& vecDiff,\r
- typename Dist::result_type& myDist, int2& myIdx, typename Dist::result_type* sdiff_row)\r
+ ///////////////////////////////////////////////////////////////////////////////\r
+ // Match Unrolled Cached\r
+\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename T, typename U> \r
+ __device__ void loadQueryToSmem(int queryIdx, const DevMem2D_<T>& query, U* s_query)\r
+ {\r
+ #pragma unroll\r
+ for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)\r
+ {\r
+ const int loadX = threadIdx.x + i * BLOCK_SIZE;\r
+ s_query[threadIdx.y * MAX_DESC_LEN + loadX] = loadX < query.cols ? query.ptr(min(queryIdx, query.rows - 1))[loadX] : 0;\r
+ }\r
+ }\r
+\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ __device__ void loopUnrolledCached(int queryIdx, const DevMem2D_<T>& query, int imgIdx, const DevMem2D_<T>& train, const Mask& mask, \r
+ typename Dist::value_type* s_query, typename Dist::value_type* s_train, \r
+ float& bestDistance, int& bestTrainIdx, int& bestImgIdx)\r
{\r
- for (int trainIdx = threadIdx.y; trainIdx < train.rows; trainIdx += blockDim.y)\r
+ for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)\r
{\r
- if (m(queryIdx, trainIdx))\r
+ Dist dist;\r
+\r
+ #pragma unroll\r
+ for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)\r
{\r
- const T* trainDescs = train.ptr(trainIdx);\r
+ const int loadX = threadIdx.x + i * BLOCK_SIZE;\r
\r
- Dist dist;\r
+ s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = loadX < train.cols ? train.ptr(min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX] : 0;\r
\r
- vecDiff.calc(trainDescs, train.cols, dist, sdiff_row, threadIdx.x);\r
+ __syncthreads();\r
\r
- const typename Dist::result_type res = dist;\r
+ #pragma unroll\r
+ for (int j = 0; j < BLOCK_SIZE; ++j)\r
+ dist.reduceIter(s_query[threadIdx.y * MAX_DESC_LEN + i * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);\r
\r
- if (res < myDist)\r
- {\r
- myDist = res;\r
- myIdx.x = trainIdx;\r
- myIdx.y = imgIdx;\r
- }\r
+ __syncthreads();\r
+ }\r
+\r
+ typename Dist::result_type distVal = dist;\r
+\r
+ const int trainIdx = t * BLOCK_SIZE + threadIdx.x;\r
+\r
+ if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx))\r
+ {\r
+ bestImgIdx = imgIdx;\r
+ bestDistance = distVal;\r
+ bestTrainIdx = trainIdx;\r
}\r
}\r
}\r
\r
- template <typename T> struct SingleTrain\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ __global__ void matchUnrolledCached(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance)\r
{\r
- explicit SingleTrain(const DevMem2D_<T>& train_) : train(train_)\r
- {\r
- }\r
+ extern __shared__ int smem[];\r
+\r
+ const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;\r
+\r
+ typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);\r
+ typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN);\r
+\r
+ loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query);\r
+\r
+ float myBestDistance = numeric_limits<float>::max();\r
+ int myBestTrainIdx = -1;\r
+\r
+ loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx);\r
+\r
+ __syncthreads();\r
+\r
+ float* s_distance = (float*)(smem);\r
+ int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
+\r
+ findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx);\r
\r
- template <typename Dist, typename VecDiff, typename Mask>\r
- __device__ __forceinline__ void loop(int queryIdx, Mask& m, const VecDiff& vecDiff, \r
- typename Dist::result_type& myDist, int2& myIdx, typename Dist::result_type* sdiff_row) const\r
+ if (queryIdx < query.rows && threadIdx.x == 0)\r
{\r
- matchDescs<Dist>(queryIdx, 0, train, m, vecDiff, myDist, myIdx, sdiff_row);\r
+ bestTrainIdx[queryIdx] = myBestTrainIdx;\r
+ bestDistance[queryIdx] = myBestDistance;\r
}\r
+ }\r
+\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ void matchUnrolledCached(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, \r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance, \r
+ cudaStream_t stream)\r
+ {\r
+ const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
+ const dim3 grid(divUp(query.rows, BLOCK_SIZE));\r
+\r
+ const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= BLOCK_SIZE ? MAX_DESC_LEN : BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);\r
+\r
+ matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data);\r
+ cudaSafeCall( cudaGetLastError() );\r
+\r
+ if (stream == 0)\r
+ cudaSafeCall( cudaDeviceSynchronize() );\r
+ }\r
+\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ __global__ void matchUnrolledCached(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask, \r
+ int* bestTrainIdx, int* bestImgIdx, float* bestDistance)\r
+ {\r
+ extern __shared__ int smem[];\r
+\r
+ const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;\r
\r
- __device__ __forceinline__ int desc_len() const\r
+ typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);\r
+ typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN);\r
+\r
+ loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query);\r
+\r
+ float myBestDistance = numeric_limits<float>::max();\r
+ int myBestTrainIdx = -1;\r
+ int myBestImgIdx = -1;\r
+\r
+ Mask m = mask;\r
+\r
+ for (int imgIdx = 0; imgIdx < n; ++imgIdx)\r
{\r
- return train.cols;\r
+ const DevMem2D_<T> train = trains[imgIdx];\r
+ m.next();\r
+ loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx);\r
}\r
\r
- static __device__ __forceinline__ void storeResult(float* distance, int* trainIdx, int* imgIdx, \r
- float myDist, const int2& myIdx, int queryIdx)\r
+ __syncthreads();\r
+\r
+ float* s_distance = (float*)(smem);\r
+ int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
+ int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);\r
+\r
+ findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdx);\r
+\r
+ if (queryIdx < query.rows && threadIdx.x == 0)\r
{\r
- trainIdx[queryIdx] = myIdx.x;\r
- distance[queryIdx] = myDist;\r
+ bestTrainIdx[queryIdx] = myBestTrainIdx;\r
+ bestImgIdx[queryIdx] = myBestImgIdx;\r
+ bestDistance[queryIdx] = myBestDistance;\r
}\r
+ }\r
+\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ void matchUnrolledCached(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask, \r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, \r
+ cudaStream_t stream)\r
+ {\r
+ const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
+ const dim3 grid(divUp(query.rows, BLOCK_SIZE));\r
+\r
+ const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= 2 * BLOCK_SIZE ? MAX_DESC_LEN : 2 * BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);\r
\r
- const DevMem2D_<T> train;\r
- };\r
+ matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);\r
+ cudaSafeCall( cudaGetLastError() );\r
+\r
+ if (stream == 0)\r
+ cudaSafeCall( cudaDeviceSynchronize() );\r
+ }\r
+\r
+ ///////////////////////////////////////////////////////////////////////////////\r
+ // Match Unrolled\r
\r
- template <typename T> struct TrainCollection\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ __device__ void loopUnrolled(int queryIdx, const DevMem2D_<T>& query, int imgIdx, const DevMem2D_<T>& train, const Mask& mask, \r
+ typename Dist::value_type* s_query, typename Dist::value_type* s_train, \r
+ float& bestDistance, int& bestTrainIdx, int& bestImgIdx)\r
{\r
- TrainCollection(const DevMem2D_<T>* trainCollection_, int nImg_, int desclen_) : \r
- trainCollection(trainCollection_), nImg(nImg_), desclen(desclen_)\r
+ for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)\r
{\r
- }\r
+ Dist dist;\r
\r
- template <typename Dist, typename VecDiff, typename Mask>\r
- __device__ void loop(int queryIdx, Mask& m, const VecDiff& vecDiff, \r
- typename Dist::result_type& myDist, int2& myIdx, typename Dist::result_type* sdiff_row) const\r
- {\r
- for (int imgIdx = 0; imgIdx < nImg; ++imgIdx)\r
+ #pragma unroll\r
+ for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)\r
+ {\r
+ const int loadX = threadIdx.x + i * BLOCK_SIZE;\r
+\r
+ if (loadX < query.cols)\r
+ {\r
+ s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = query.ptr(min(queryIdx, query.rows - 1))[loadX];\r
+ s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX];\r
+ }\r
+ else\r
+ { \r
+ s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;\r
+ s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;\r
+ }\r
+\r
+ __syncthreads();\r
+\r
+ #pragma unroll\r
+ for (int j = 0; j < BLOCK_SIZE; ++j)\r
+ dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);\r
+\r
+ __syncthreads();\r
+ }\r
+\r
+ typename Dist::result_type distVal = dist;\r
+\r
+ const int trainIdx = t * BLOCK_SIZE + threadIdx.x;\r
+\r
+ if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx))\r
{\r
- const DevMem2D_<T> train = trainCollection[imgIdx];\r
- m.next();\r
- matchDescs<Dist>(queryIdx, imgIdx, train, m, vecDiff, myDist, myIdx, sdiff_row);\r
+ bestImgIdx = imgIdx;\r
+ bestDistance = distVal;\r
+ bestTrainIdx = trainIdx;\r
}\r
}\r
+ }\r
+\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>\r
+ __global__ void matchUnrolled(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance)\r
+ {\r
+ extern __shared__ int smem[];\r
+\r
+ const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;\r
+\r
+ float myBestDistance = numeric_limits<float>::max();\r
+ int myBestTrainIdx = -1;\r
+\r
+ typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);\r
+ typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
+ \r
+ loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx);\r
+\r
+ __syncthreads();\r
+\r
+ float* s_distance = (float*)(smem);\r
+ int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
+\r
+ findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx);\r
+\r
+ if (queryIdx < query.rows && threadIdx.x == 0)\r
+ {\r
+ bestTrainIdx[queryIdx] = myBestTrainIdx;\r
+ bestDistance[queryIdx] = myBestDistance;\r
+ }\r
+ }\r
+\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, \r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance, \r
+ cudaStream_t stream)\r
+ {\r
+ const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
+ const dim3 grid(divUp(query.rows, BLOCK_SIZE));\r
+\r
+ const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);\r
\r
- __device__ __forceinline__ int desc_len() const\r
+ matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data);\r
+ cudaSafeCall( cudaGetLastError() );\r
+\r
+ if (stream == 0)\r
+ cudaSafeCall( cudaDeviceSynchronize() );\r
+ }\r
+\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>\r
+ __global__ void matchUnrolled(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask, \r
+ int* bestTrainIdx, int* bestImgIdx, float* bestDistance)\r
+ {\r
+ extern __shared__ int smem[];\r
+\r
+ const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;\r
+\r
+ float myBestDistance = numeric_limits<float>::max();\r
+ int myBestTrainIdx = -1;\r
+ int myBestImgIdx = -1;\r
+\r
+ typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);\r
+ typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
+\r
+ Mask m = mask;\r
+ \r
+ for (int imgIdx = 0; imgIdx < n; ++imgIdx)\r
{\r
- return desclen;\r
+ const DevMem2D_<T> train = trains[imgIdx];\r
+ m.next();\r
+ loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx);\r
}\r
\r
- static __device__ __forceinline__ void storeResult(float* distance, int* trainIdx, int* imgIdx, \r
- float myDist, const int2& myIdx, int queryIdx)\r
+ __syncthreads();\r
+\r
+ float* s_distance = (float*)(smem);\r
+ int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
+ int* s_imgIdxIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);\r
+\r
+ findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdxIdx);\r
+\r
+ if (queryIdx < query.rows && threadIdx.x == 0)\r
{\r
- trainIdx[queryIdx] = myIdx.x;\r
- imgIdx[queryIdx] = myIdx.y;\r
- distance[queryIdx] = myDist;\r
+ bestTrainIdx[queryIdx] = myBestTrainIdx;\r
+ bestImgIdx[queryIdx] = myBestImgIdx;\r
+ bestDistance[queryIdx] = myBestDistance;\r
}\r
+ }\r
+\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask, \r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, \r
+ cudaStream_t stream)\r
+ {\r
+ const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
+ const dim3 grid(divUp(query.rows, BLOCK_SIZE));\r
+\r
+ const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);\r
+\r
+ matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);\r
+ cudaSafeCall( cudaGetLastError() );\r
+\r
+ if (stream == 0)\r
+ cudaSafeCall( cudaDeviceSynchronize() );\r
+ }\r
\r
- const DevMem2D_<T>* trainCollection;\r
- const int nImg;\r
- const int desclen;\r
- };\r
+ ///////////////////////////////////////////////////////////////////////////////\r
+ // Match\r
\r
- template <typename VecDiff, typename Dist, typename T, typename Train, typename Mask>\r
- __device__ void distanceCalcLoop(const PtrStep_<T>& query, const Train& train, const Mask& mask, int queryIdx, \r
- typename Dist::result_type& myDist, int2& myIdx, typename Dist::result_type* smem)\r
+ template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> \r
+ __device__ void loop(int queryIdx, const DevMem2D_<T>& query, int imgIdx, const DevMem2D_<T>& train, const Mask& mask, \r
+ typename Dist::value_type* s_query, typename Dist::value_type* s_train, \r
+ float& bestDistance, int& bestTrainIdx, int& bestImgIdx)\r
{\r
- const VecDiff vecDiff(query.ptr(queryIdx), train.desc_len(), (typename Dist::value_type*)smem, threadIdx.y * blockDim.x + threadIdx.x, threadIdx.x);\r
- \r
- typename Dist::result_type* sdiff_row = smem + blockDim.x * threadIdx.y;\r
+ for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)\r
+ {\r
+ Dist dist;\r
\r
- Mask m = mask;\r
+ for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i)\r
+ {\r
+ const int loadX = threadIdx.x + i * BLOCK_SIZE;\r
+\r
+ if (loadX < query.cols)\r
+ {\r
+ s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = query.ptr(min(queryIdx, query.rows - 1))[loadX];\r
+ s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX];\r
+ }\r
+ else\r
+ { \r
+ s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;\r
+ s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;\r
+ }\r
+\r
+ __syncthreads();\r
+\r
+ #pragma unroll\r
+ for (int j = 0; j < BLOCK_SIZE; ++j)\r
+ dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);\r
+\r
+ __syncthreads();\r
+ }\r
\r
- myIdx.x = -1;\r
- myIdx.y = -1;\r
- myDist = numeric_limits<typename Dist::result_type>::max();\r
+ typename Dist::result_type distVal = dist;\r
\r
- train.template loop<Dist>(queryIdx, m, vecDiff, myDist, myIdx, sdiff_row);\r
+ const int trainIdx = t * BLOCK_SIZE + threadIdx.x;\r
+\r
+ if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx))\r
+ {\r
+ bestImgIdx = imgIdx;\r
+ bestDistance = distVal;\r
+ bestTrainIdx = trainIdx;\r
+ }\r
+ }\r
}\r
\r
- template <int BLOCK_DIM_X, int BLOCK_DIM_Y, typename VecDiff, typename Dist, typename T, typename Train, typename Mask>\r
- __global__ void match(const PtrStep_<T> query, const Train train, const Mask mask, int* trainIdx, int* imgIdx, float* distance)\r
+ template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>\r
+ __global__ void match(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance)\r
{\r
- __shared__ typename Dist::result_type smem[BLOCK_DIM_X * BLOCK_DIM_Y]; \r
- \r
- const int queryIdx = blockIdx.x;\r
+ extern __shared__ int smem[];\r
+\r
+ const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;\r
+\r
+ float myBestDistance = numeric_limits<float>::max();\r
+ int myBestTrainIdx = -1;\r
+\r
+ typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);\r
+ typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
\r
- int2 myIdx;\r
- typename Dist::result_type myDist;\r
+ loop<BLOCK_SIZE, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx);\r
\r
- distanceCalcLoop<VecDiff, Dist>(query, train, mask, queryIdx, myDist, myIdx, smem);\r
__syncthreads();\r
\r
- typename Dist::result_type* smin = smem;\r
- int2* sIdx = (int2*)(smin + BLOCK_DIM_Y);\r
+ float* s_distance = (float*)(smem);\r
+ int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
\r
- findBestMatch<BLOCK_DIM_Y>(myDist, myIdx, smin, sIdx);\r
+ findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx);\r
\r
- if (threadIdx.x == 0 && threadIdx.y == 0)\r
- Train::storeResult(distance, trainIdx, imgIdx, myDist, myIdx, queryIdx);\r
+ if (queryIdx < query.rows && threadIdx.x == 0)\r
+ {\r
+ bestTrainIdx[queryIdx] = myBestTrainIdx;\r
+ bestDistance[queryIdx] = myBestDistance;\r
+ }\r
}\r
\r
- ///////////////////////////////////////////////////////////////////////////////\r
- // Match kernel caller\r
-\r
- template <int BLOCK_DIM_X, int BLOCK_DIM_Y, typename Dist, typename T, typename Train, typename Mask>\r
- void matchSimple_caller(const DevMem2D_<T>& query, const Train& train, const Mask& mask, \r
- const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, \r
- cudaStream_t stream)\r
+ template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> \r
+ void match(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, \r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance, \r
+ cudaStream_t stream)\r
{\r
- StaticAssert<BLOCK_DIM_Y <= 64>::check(); // blockDimY vals must reduce by warp\r
+ const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
+ const dim3 grid(divUp(query.rows, BLOCK_SIZE));\r
\r
- const dim3 grid(query.rows, 1, 1);\r
- const dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y, 1);\r
+ const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);\r
\r
- match<BLOCK_DIM_X, BLOCK_DIM_Y, VecDiffGlobal<BLOCK_DIM_X, T>, Dist, T>\r
- <<<grid, threads, 0, stream>>>(query, train, mask, trainIdx.data, imgIdx.data, distance.data);\r
+ match<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data);\r
cudaSafeCall( cudaGetLastError() );\r
\r
if (stream == 0)\r
cudaSafeCall( cudaDeviceSynchronize() );\r
}\r
\r
- template <int BLOCK_DIM_X, int BLOCK_DIM_Y, int MAX_LEN, bool LEN_EQ_MAX_LEN, typename Dist, typename T, typename Train, typename Mask>\r
- void matchCached_caller(const DevMem2D_<T>& query, const Train& train, const Mask& mask, \r
- const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, \r
- cudaStream_t stream)\r
+ template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>\r
+ __global__ void match(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask, \r
+ int* bestTrainIdx, int* bestImgIdx, float* bestDistance)\r
+ {\r
+ extern __shared__ int smem[];\r
+\r
+ const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;\r
+\r
+ float myBestDistance = numeric_limits<float>::max();\r
+ int myBestTrainIdx = -1;\r
+ int myBestImgIdx = -1;\r
+\r
+ typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);\r
+ typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
+\r
+ Mask m = mask;\r
+ for (int imgIdx = 0; imgIdx < n; ++imgIdx)\r
+ {\r
+ const DevMem2D_<T> train = trains[imgIdx];\r
+ m.next();\r
+ loop<BLOCK_SIZE, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx);\r
+ }\r
+\r
+ __syncthreads();\r
+\r
+ float* s_distance = (float*)(smem);\r
+ int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
+ int* s_imgIdxIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);\r
+\r
+ findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdxIdx);\r
+\r
+ if (queryIdx < query.rows && threadIdx.x == 0)\r
+ {\r
+ bestTrainIdx[queryIdx] = myBestTrainIdx;\r
+ bestImgIdx[queryIdx] = myBestImgIdx;\r
+ bestDistance[queryIdx] = myBestDistance;\r
+ }\r
+ }\r
+\r
+ template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> \r
+ void match(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask, \r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, \r
+ cudaStream_t stream)\r
{\r
- StaticAssert<BLOCK_DIM_Y <= 64>::check(); // blockDimY vals must reduce by warp\r
- StaticAssert<BLOCK_DIM_X * BLOCK_DIM_Y >= MAX_LEN>::check(); // block size must be greter than descriptors length\r
- StaticAssert<MAX_LEN % BLOCK_DIM_X == 0>::check(); // max descriptors length must divide to blockDimX\r
+ const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
+ const dim3 grid(divUp(query.rows, BLOCK_SIZE));\r
\r
- const dim3 grid(query.rows, 1, 1);\r
- const dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y, 1);\r
+ const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);\r
\r
- match<BLOCK_DIM_X, BLOCK_DIM_Y, VecDiffCachedRegister<BLOCK_DIM_X, MAX_LEN, LEN_EQ_MAX_LEN, typename Dist::value_type>, Dist, T>\r
- <<<grid, threads, 0, stream>>>(query, train, mask, trainIdx.data, imgIdx.data, distance.data);\r
+ match<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);\r
cudaSafeCall( cudaGetLastError() );\r
\r
if (stream == 0)\r
cudaSafeCall( cudaDeviceSynchronize() );\r
}\r
- \r
+\r
///////////////////////////////////////////////////////////////////////////////\r
- // Match Dispatcher\r
+ // Match dispatcher\r
\r
- template <typename Dist, typename T, typename Train, typename Mask>\r
- void matchDispatcher(const DevMem2D_<T>& query, const Train& train, const Mask& mask, \r
- const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance,\r
- int cc, cudaStream_t stream)\r
+ template <typename Dist, typename T, typename Mask> \r
+ void matchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, \r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance, \r
+ int cc, cudaStream_t stream)\r
{\r
- if (query.cols < 64)\r
+ if (query.cols <= 64)\r
{\r
- matchCached_caller<16, 16, 64, false, Dist>(\r
- query, train, mask, \r
- static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), \r
- stream);\r
+ matchUnrolledCached<16, 64, Dist>(query, train, mask, trainIdx, distance, stream);\r
}\r
- else if (query.cols == 64)\r
+ else if (query.cols <= 128)\r
{\r
- matchCached_caller<16, 16, 64, true, Dist>(\r
- query, train, mask, \r
- static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), \r
- stream);\r
+ matchUnrolledCached<16, 128, Dist>(query, train, mask, trainIdx, distance, stream);\r
}\r
- else if (query.cols < 128)\r
+ else if (query.cols <= 256)\r
{\r
- matchCached_caller<16, 16, 128, false, Dist>(\r
- query, train, mask, \r
- static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), \r
- stream);\r
+ matchUnrolled<16, 256, Dist>(query, train, mask, trainIdx, distance, stream);\r
}\r
- else if (query.cols == 128 && cc >= 12)\r
+ else if (query.cols <= 512)\r
+ { \r
+ matchUnrolled<16, 512, Dist>(query, train, mask, trainIdx, distance, stream);\r
+ }\r
+ else if (query.cols <= 1024)\r
+ { \r
+ matchUnrolled<16, 1024, Dist>(query, train, mask, trainIdx, distance, stream);\r
+ }\r
+ else\r
{\r
- matchCached_caller<16, 16, 128, true, Dist>(\r
- query, train, mask, \r
- static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), \r
- stream);\r
+ match<16, Dist>(query, train, mask, trainIdx, distance, stream);\r
}\r
- else if (query.cols < 256 && cc >= 12)\r
+ }\r
+\r
+ template <typename Dist, typename T, typename Mask> \r
+ void matchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask, \r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, \r
+ int cc, cudaStream_t stream)\r
+ {\r
+ if (query.cols <= 64)\r
{\r
- matchCached_caller<16, 16, 256, false, Dist>(\r
- query, train, mask, \r
- static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), \r
- stream);\r
+ matchUnrolledCached<16, 64, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);\r
}\r
- else if (query.cols == 256 && cc >= 12)\r
+ else if (query.cols <= 128)\r
{\r
- matchCached_caller<16, 16, 256, true, Dist>(\r
- query, train, mask, \r
- static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), \r
- stream);\r
+ matchUnrolledCached<16, 128, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);\r
+ }\r
+ else if (query.cols <= 256)\r
+ {\r
+ matchUnrolled<16, 256, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);\r
+ }\r
+ else if (query.cols <= 512)\r
+ { \r
+ matchUnrolled<16, 512, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);\r
+ }\r
+ else if (query.cols <= 1024)\r
+ { \r
+ matchUnrolled<16, 1024, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);\r
}\r
else\r
{\r
- matchSimple_caller<16, 16, Dist>(\r
- query, train, mask, \r
- static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), \r
- stream);\r
+ match<16, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);\r
}\r
}\r
- \r
+\r
///////////////////////////////////////////////////////////////////////////////\r
// Match caller\r
\r
- template <typename T> void matchSingleL1_gpu(const DevMem2D& query, const DevMem2D& train_, const DevMem2D& mask, \r
- const DevMem2D& trainIdx, const DevMem2D& distance,\r
- int cc, cudaStream_t stream)\r
+ template <typename T> void matchL1_gpu(const DevMem2D& query, const DevMem2D& train, const DevMem2D& mask, \r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance,\r
+ int cc, cudaStream_t stream)\r
{\r
- SingleTrain<T> train(static_cast< DevMem2D_<T> >(train_));\r
if (mask.data)\r
- matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), train, SingleMask(mask), trainIdx, DevMem2D(), distance, cc, stream);\r
+ {\r
+ matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), SingleMask(mask), \r
+ trainIdx, distance, \r
+ cc, stream);\r
+ }\r
else\r
- matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), train, WithOutMask(), trainIdx, DevMem2D(), distance, cc, stream);\r
+ {\r
+ matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), WithOutMask(), \r
+ trainIdx, distance, \r
+ cc, stream);\r
+ }\r
}\r
\r
- template void matchSingleL1_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- //template void matchSingleL1_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- template void matchSingleL1_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- template void matchSingleL1_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- template void matchSingleL1_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- template void matchSingleL1_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
+ template void matchL1_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ //template void matchL1_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ template void matchL1_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ template void matchL1_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ template void matchL1_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ template void matchL1_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
\r
- template <typename T> void matchSingleL2_gpu(const DevMem2D& query, const DevMem2D& train_, const DevMem2D& mask, \r
- const DevMem2D& trainIdx, const DevMem2D& distance, \r
- int cc, cudaStream_t stream)\r
+ template <typename T> void matchL2_gpu(const DevMem2D& query, const DevMem2D& train, const DevMem2D& mask, \r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance, \r
+ int cc, cudaStream_t stream)\r
{\r
- SingleTrain<T> train(static_cast< DevMem2D_<T> >(train_));\r
if (mask.data)\r
- matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), train, SingleMask(mask), trainIdx, DevMem2D(), distance, cc, stream);\r
+ {\r
+ matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), SingleMask(mask), \r
+ trainIdx, distance, \r
+ cc, stream);\r
+ }\r
else\r
- matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), train, WithOutMask(), trainIdx, DevMem2D(), distance, cc, stream);\r
+ {\r
+ matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), WithOutMask(), \r
+ trainIdx, distance, \r
+ cc, stream);\r
+ }\r
}\r
\r
- //template void matchSingleL2_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- //template void matchSingleL2_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- //template void matchSingleL2_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- //template void matchSingleL2_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- //template void matchSingleL2_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- template void matchSingleL2_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
+ //template void matchL2_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ //template void matchL2_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ //template void matchL2_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ //template void matchL2_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ //template void matchL2_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ template void matchL2_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
\r
- template <typename T> void matchSingleHamming_gpu(const DevMem2D& query, const DevMem2D& train_, const DevMem2D& mask, \r
- const DevMem2D& trainIdx, const DevMem2D& distance, \r
- int cc, cudaStream_t stream)\r
+ template <typename T> void matchHamming_gpu(const DevMem2D& query, const DevMem2D& train, const DevMem2D& mask, \r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance, \r
+ int cc, cudaStream_t stream)\r
{\r
- SingleTrain<T> train(static_cast< DevMem2D_<T> >(train_));\r
if (mask.data)\r
- matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), train, SingleMask(mask), trainIdx, DevMem2D(), distance, cc, stream);\r
+ {\r
+ matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), SingleMask(mask), \r
+ trainIdx, distance, \r
+ cc, stream);\r
+ }\r
else\r
- matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), train, WithOutMask(), trainIdx, DevMem2D(), distance, cc, stream);\r
+ {\r
+ matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), WithOutMask(), \r
+ trainIdx, distance, \r
+ cc, stream);\r
+ }\r
}\r
\r
- template void matchSingleHamming_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- //template void matchSingleHamming_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- template void matchSingleHamming_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- //template void matchSingleHamming_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- template void matchSingleHamming_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
+ template void matchHamming_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ //template void matchHamming_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ template void matchHamming_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ //template void matchHamming_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ template void matchHamming_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
\r
- template <typename T> void matchCollectionL1_gpu(const DevMem2D& query, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, \r
- const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, \r
- int cc, cudaStream_t stream)\r
+ template <typename T> void matchL1_gpu(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, \r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, \r
+ int cc, cudaStream_t stream)\r
{\r
- TrainCollection<T> train((DevMem2D_<T>*)trainCollection.ptr(), trainCollection.cols, query.cols);\r
- if (maskCollection.data)\r
- matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), train, MaskCollection(maskCollection.data), trainIdx, imgIdx, distance, cc, stream);\r
+ if (masks.data)\r
+ {\r
+ matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), \r
+ trainIdx, imgIdx, distance, \r
+ cc, stream);\r
+ }\r
else\r
- matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), train, WithOutMask(), trainIdx, imgIdx, distance, cc, stream);\r
+ {\r
+ matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(), \r
+ trainIdx, imgIdx, distance, \r
+ cc, stream);\r
+ }\r
}\r
\r
- template void matchCollectionL1_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- //template void matchCollectionL1_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- template void matchCollectionL1_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- template void matchCollectionL1_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- template void matchCollectionL1_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- template void matchCollectionL1_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
+ template void matchL1_gpu<uchar >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ //template void matchL1_gpu<schar >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ template void matchL1_gpu<ushort>(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ template void matchL1_gpu<short >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ template void matchL1_gpu<int >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ template void matchL1_gpu<float >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
\r
- template <typename T> void matchCollectionL2_gpu(const DevMem2D& query, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, \r
- const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, \r
- int cc, cudaStream_t stream)\r
+ template <typename T> void matchL2_gpu(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, \r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, \r
+ int cc, cudaStream_t stream)\r
{\r
- TrainCollection<T> train((DevMem2D_<T>*)trainCollection.ptr(), trainCollection.cols, query.cols);\r
- if (maskCollection.data)\r
- matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), train, MaskCollection(maskCollection.data), trainIdx, imgIdx, distance, cc, stream);\r
+ if (masks.data)\r
+ {\r
+ matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), \r
+ trainIdx, imgIdx, distance, \r
+ cc, stream);\r
+ }\r
else\r
- matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), train, WithOutMask(), trainIdx, imgIdx, distance, cc, stream);\r
+ {\r
+ matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(), \r
+ trainIdx, imgIdx, distance, \r
+ cc, stream);\r
+ }\r
}\r
\r
- //template void matchCollectionL2_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- //template void matchCollectionL2_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- //template void matchCollectionL2_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- //template void matchCollectionL2_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- //template void matchCollectionL2_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- template void matchCollectionL2_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
+ //template void matchL2_gpu<uchar >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ //template void matchL2_gpu<schar >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ //template void matchL2_gpu<ushort>(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ //template void matchL2_gpu<short >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ //template void matchL2_gpu<int >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ template void matchL2_gpu<float >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
\r
- template <typename T> void matchCollectionHamming_gpu(const DevMem2D& query, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, \r
- const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, \r
- int cc, cudaStream_t stream)\r
+ template <typename T> void matchHamming_gpu(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, \r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, \r
+ int cc, cudaStream_t stream)\r
{\r
- TrainCollection<T> train((DevMem2D_<T>*)trainCollection.ptr(), trainCollection.cols, query.cols);\r
- if (maskCollection.data)\r
- matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), train, MaskCollection(maskCollection.data), trainIdx, imgIdx, distance, cc, stream);\r
+ if (masks.data)\r
+ {\r
+ matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), \r
+ trainIdx, imgIdx, distance, \r
+ cc, stream);\r
+ }\r
else\r
- matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), train, WithOutMask(), trainIdx, imgIdx, distance, cc, stream);\r
+ {\r
+ matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(), \r
+ trainIdx, imgIdx, distance, \r
+ cc, stream);\r
+ }\r
}\r
\r
- template void matchCollectionHamming_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- //template void matchCollectionHamming_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- template void matchCollectionHamming_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- //template void matchCollectionHamming_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
- template void matchCollectionHamming_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);\r
+ template void matchHamming_gpu<uchar >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ //template void matchHamming_gpu<schar >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ template void matchHamming_gpu<ushort>(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ //template void matchHamming_gpu<short >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
+ template void matchHamming_gpu<int >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
}}}\r
\r
namespace cv { namespace gpu { namespace bf_radius_match\r
{\r
- template <typename T> struct SingleTrain\r
+ ///////////////////////////////////////////////////////////////////////////////\r
+ // Match Unrolled\r
+\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, bool SAVE_IMG_IDX, typename Dist, typename T, typename Mask>\r
+ __global__ void matchUnrolled(const DevMem2D_<T> query, int imgIdx, const DevMem2D_<T> train, float maxDistance, const Mask mask,\r
+ PtrStepi bestTrainIdx, PtrStepi bestImgIdx, PtrStepf bestDistance, unsigned int* nMatches, int maxCount)\r
{\r
- enum {USE_IMG_IDX = 0};\r
+ #if __CUDA_ARCH__ >= 110\r
\r
- explicit SingleTrain(const DevMem2D_<T>& train_) : train(train_)\r
- {\r
- }\r
+ extern __shared__ int smem[];\r
\r
- static __device__ __forceinline__ void store(const int* s_trainIdx, const int* s_imgIdx, const float* s_dist, unsigned int& s_count, int& s_globInd, \r
- int* trainIdx, int* imgIdx, float* distance, int maxCount)\r
- {\r
- const int tid = threadIdx.y * blockDim.x + threadIdx.x;\r
+ const int queryIdx = blockIdx.y * BLOCK_SIZE + threadIdx.y;\r
+ const int trainIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x;\r
\r
- if (tid < s_count && s_globInd + tid < maxCount)\r
- {\r
- trainIdx[s_globInd + tid] = s_trainIdx[tid];\r
- distance[s_globInd + tid] = s_dist[tid];\r
- }\r
+ typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);\r
+ typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
\r
- if (tid == 0)\r
- {\r
- s_globInd += s_count;\r
- s_count = 0;\r
- }\r
- }\r
+ Dist dist;\r
\r
- template <int BLOCK_STACK, typename Dist, typename VecDiff, typename Mask>\r
- __device__ __forceinline__ void loop(float maxDistance, Mask& mask, const VecDiff& vecDiff, \r
- int* s_trainIdx, int* s_imgIdx, float* s_dist, unsigned int& s_count, int& s_globInd, \r
- int* trainIdxRow, int* imgIdxRow, float* distanceRow, int maxCount, \r
- typename Dist::result_type* s_diffRow) const\r
+ #pragma unroll\r
+ for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)\r
{\r
- #if __CUDA_ARCH__ >= 120\r
+ const int loadX = threadIdx.x + i * BLOCK_SIZE;\r
\r
- for (int i = 0; i < train.rows; i += blockDim.y)\r
+ if (loadX < query.cols)\r
{\r
- int trainIdx = i + threadIdx.y;\r
-\r
- if (trainIdx < train.rows && mask(blockIdx.x, trainIdx))\r
- {\r
- Dist dist;\r
- \r
- vecDiff.calc(train.ptr(trainIdx), train.cols, dist, s_diffRow, threadIdx.x);\r
+ s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = query.ptr(min(queryIdx, query.rows - 1))[loadX];\r
+ s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(min(blockIdx.x * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX];\r
+ }\r
+ else\r
+ { \r
+ s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;\r
+ s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;\r
+ }\r
\r
- const typename Dist::result_type val = dist;\r
+ __syncthreads();\r
\r
- if (threadIdx.x == 0 && val < maxDistance)\r
- {\r
- unsigned int ind = atomicInc(&s_count, (unsigned int) -1);\r
- s_trainIdx[ind] = trainIdx;\r
- s_dist[ind] = val;\r
- }\r
- }\r
+ #pragma unroll\r
+ for (int j = 0; j < BLOCK_SIZE; ++j)\r
+ dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);\r
\r
- __syncthreads();\r
+ __syncthreads();\r
+ }\r
\r
- if (s_count >= BLOCK_STACK - blockDim.y)\r
- store(s_trainIdx, s_imgIdx, s_dist, s_count, s_globInd, trainIdxRow, imgIdxRow, distanceRow, maxCount);\r
+ float distVal = (typename Dist::result_type)dist;\r
\r
- __syncthreads();\r
+ if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx) && distVal < maxDistance)\r
+ {\r
+ unsigned int ind = atomicInc(nMatches + queryIdx, (unsigned int) -1);\r
+ if (ind < maxCount)\r
+ {\r
+ bestTrainIdx.ptr(queryIdx)[ind] = trainIdx;\r
+ if (SAVE_IMG_IDX) bestImgIdx.ptr(queryIdx)[ind] = imgIdx;\r
+ bestDistance.ptr(queryIdx)[ind] = distVal;\r
}\r
+ }\r
\r
- store(s_trainIdx, s_imgIdx, s_dist, s_count, s_globInd, trainIdxRow, imgIdxRow, distanceRow, maxCount);\r
+ #endif\r
+ }\r
\r
- #endif\r
- }\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>& train, float maxDistance, const Mask& mask, \r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, cudaStream_t stream)\r
+ {\r
+ const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
+ const dim3 grid(divUp(train.rows, BLOCK_SIZE), divUp(query.rows, BLOCK_SIZE));\r
\r
- __device__ __forceinline__ int descLen() const\r
- {\r
- return train.cols;\r
- }\r
+ const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);\r
+\r
+ matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, false, Dist><<<grid, block, smemSize, stream>>>(query, 0, train, maxDistance, mask, \r
+ trainIdx, PtrStepi(), distance, nMatches.data, trainIdx.cols);\r
+ cudaSafeCall( cudaGetLastError() );\r
\r
- const DevMem2D_<T> train;\r
- };\r
+ if (stream == 0)\r
+ cudaSafeCall( cudaDeviceSynchronize() );\r
+ } \r
\r
- template <typename T> struct TrainCollection\r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T> \r
+ void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, float maxDistance, const DevMem2D* masks, \r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ cudaStream_t stream)\r
{\r
- enum {USE_IMG_IDX = 1};\r
+ const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
\r
- TrainCollection(const DevMem2D_<T>* trainCollection_, int nImg_, int desclen_) : \r
- trainCollection(trainCollection_), nImg(nImg_), desclen(desclen_)\r
- {\r
- }\r
+ const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);\r
\r
- static __device__ __forceinline__ void store(const int* s_trainIdx, const int* s_imgIdx, const float* s_dist, unsigned int& s_count, int& s_globInd, \r
- int* trainIdx, int* imgIdx, float* distance, int maxCount)\r
+ for (int i = 0; i < n; ++i)\r
{\r
- const int tid = threadIdx.y * blockDim.x + threadIdx.x;\r
+ const DevMem2D_<T> train = trains[i];\r
\r
- if (tid < s_count && s_globInd + tid < maxCount)\r
+ const dim3 grid(divUp(train.rows, BLOCK_SIZE), divUp(query.rows, BLOCK_SIZE));\r
+\r
+ if (masks != 0 && masks[i].data)\r
{\r
- trainIdx[s_globInd + tid] = s_trainIdx[tid];\r
- imgIdx[s_globInd + tid] = s_imgIdx[tid];\r
- distance[s_globInd + tid] = s_dist[tid];\r
+ matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, true, Dist><<<grid, block, smemSize, stream>>>(query, i, train, maxDistance, SingleMask(masks[i]), \r
+ trainIdx, imgIdx, distance, nMatches.data, trainIdx.cols);\r
}\r
-\r
- if (tid == 0)\r
+ else\r
{\r
- s_globInd += s_count;\r
- s_count = 0;\r
+ matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, true, Dist><<<grid, block, smemSize, stream>>>(query, i, train, maxDistance, WithOutMask(), \r
+ trainIdx, imgIdx, distance, nMatches.data, trainIdx.cols);\r
}\r
+ cudaSafeCall( cudaGetLastError() );\r
}\r
\r
- template <int BLOCK_STACK, typename Dist, typename VecDiff, typename Mask>\r
- __device__ void loop(float maxDistance, Mask& mask, const VecDiff& vecDiff, \r
- int* s_trainIdx, int* s_imgIdx, float* s_dist, unsigned int& s_count, int& s_globInd, \r
- int* trainIdxRow, int* imgIdxRow, float* distanceRow, int maxCount, \r
- typename Dist::result_type* s_diffRow) const\r
- {\r
- #if __CUDA_ARCH__ >= 120\r
-\r
- for (int imgIdx = 0; imgIdx < nImg; ++imgIdx)\r
- {\r
- const DevMem2D_<T> train = trainCollection[imgIdx];\r
+ if (stream == 0)\r
+ cudaSafeCall( cudaDeviceSynchronize() );\r
+ }\r
\r
- mask.next();\r
+ ///////////////////////////////////////////////////////////////////////////////\r
+ // Match\r
\r
- for (int i = 0; i < train.rows; i += blockDim.y)\r
- {\r
- int trainIdx = i + threadIdx.y;\r
+ template <int BLOCK_SIZE, bool SAVE_IMG_IDX, typename Dist, typename T, typename Mask>\r
+ __global__ void match(const DevMem2D_<T> query, int imgIdx, const DevMem2D_<T> train, float maxDistance, const Mask mask,\r
+ PtrStepi bestTrainIdx, PtrStepi bestImgIdx, PtrStepf bestDistance, unsigned int* nMatches, int maxCount)\r
+ {\r
+ #if __CUDA_ARCH__ >= 110\r
\r
- if (trainIdx < train.rows && mask(blockIdx.x, trainIdx))\r
- {\r
- Dist dist;\r
- \r
- vecDiff.calc(train.ptr(trainIdx), desclen, dist, s_diffRow, threadIdx.x);\r
+ extern __shared__ int smem[];\r
\r
- const typename Dist::result_type val = dist;\r
+ const int queryIdx = blockIdx.y * BLOCK_SIZE + threadIdx.y;\r
+ const int trainIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x;\r
\r
- if (threadIdx.x == 0 && val < maxDistance)\r
- {\r
- unsigned int ind = atomicInc(&s_count, (unsigned int) -1);\r
- s_trainIdx[ind] = trainIdx;\r
- s_imgIdx[ind] = imgIdx;\r
- s_dist[ind] = val;\r
- }\r
- }\r
+ typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);\r
+ typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
\r
- __syncthreads();\r
+ Dist dist;\r
\r
- if (s_count >= BLOCK_STACK - blockDim.y)\r
- store(s_trainIdx, s_imgIdx, s_dist, s_count, s_globInd, trainIdxRow, imgIdxRow, distanceRow, maxCount);\r
+ for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i)\r
+ {\r
+ const int loadX = threadIdx.x + i * BLOCK_SIZE;\r
\r
- __syncthreads();\r
- }\r
+ if (loadX < query.cols)\r
+ {\r
+ s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = query.ptr(min(queryIdx, query.rows - 1))[loadX];\r
+ s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(min(blockIdx.x * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX];\r
+ }\r
+ else\r
+ { \r
+ s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;\r
+ s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;\r
}\r
\r
- store(s_trainIdx, s_imgIdx, s_dist, s_count, s_globInd, trainIdxRow, imgIdxRow, distanceRow, maxCount);\r
+ __syncthreads();\r
\r
- #endif\r
- }\r
+ #pragma unroll\r
+ for (int j = 0; j < BLOCK_SIZE; ++j)\r
+ dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);\r
\r
- __device__ __forceinline__ int descLen() const\r
- {\r
- return desclen;\r
+ __syncthreads();\r
}\r
\r
- const DevMem2D_<T>* trainCollection;\r
- const int nImg;\r
- const int desclen;\r
- };\r
-\r
- template <int BLOCK_DIM_X, int BLOCK_DIM_Y, int BLOCK_STACK, typename VecDiff, typename Dist, typename T, typename Train, typename Mask>\r
- __global__ void radiusMatch(const PtrStep_<T> query, const Train train, float maxDistance, const Mask mask, \r
- PtrStepi trainIdx, PtrStepi imgIdx, PtrStepf distance, int* nMatches, int maxCount)\r
- {\r
- typedef typename Dist::result_type result_type;\r
- typedef typename Dist::value_type value_type;\r
-\r
- __shared__ result_type s_mem[BLOCK_DIM_X * BLOCK_DIM_Y];\r
+ float distVal = (typename Dist::result_type)dist;\r
\r
- __shared__ int s_trainIdx[BLOCK_STACK];\r
- __shared__ int s_imgIdx[Train::USE_IMG_IDX ? BLOCK_STACK : 1];\r
- __shared__ float s_dist[BLOCK_STACK];\r
- __shared__ unsigned int s_count;\r
-\r
- __shared__ int s_globInd;\r
-\r
- if (threadIdx.x == 0 && threadIdx.y == 0)\r
+ if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx) && distVal < maxDistance)\r
{\r
- s_count = 0;\r
- s_globInd = 0;\r
+ unsigned int ind = atomicInc(nMatches + queryIdx, (unsigned int) -1);\r
+ if (ind < maxCount)\r
+ {\r
+ bestTrainIdx.ptr(queryIdx)[ind] = trainIdx;\r
+ if (SAVE_IMG_IDX) bestImgIdx.ptr(queryIdx)[ind] = imgIdx;\r
+ bestDistance.ptr(queryIdx)[ind] = distVal;\r
+ }\r
}\r
- __syncthreads();\r
-\r
- const VecDiff vecDiff(query.ptr(blockIdx.x), train.descLen(), (typename Dist::value_type*)s_mem, threadIdx.y * BLOCK_DIM_X + threadIdx.x, threadIdx.x);\r
-\r
- Mask m = mask;\r
\r
- train.template loop<BLOCK_STACK, Dist>(maxDistance, m, vecDiff, \r
- s_trainIdx, s_imgIdx, s_dist, s_count, s_globInd, \r
- trainIdx.ptr(blockIdx.x), imgIdx.ptr(blockIdx.x), distance.ptr(blockIdx.x), maxCount, \r
- s_mem + BLOCK_DIM_X * threadIdx.y);\r
-\r
- if (threadIdx.x == 0 && threadIdx.y == 0)\r
- nMatches[blockIdx.x] = s_globInd;\r
+ #endif\r
}\r
\r
- ///////////////////////////////////////////////////////////////////////////////\r
- // Radius Match kernel caller\r
-\r
- template <int BLOCK_DIM_X, int BLOCK_DIM_Y, int BLOCK_STACK, typename Dist, typename T, typename Train, typename Mask>\r
- void radiusMatchSimple_caller(const DevMem2D_<T>& query, const Train& train, float maxDistance, const Mask& mask, \r
- const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int* nMatches,\r
+ template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> \r
+ void match(const DevMem2D_<T>& query, const DevMem2D_<T>& train, float maxDistance, const Mask& mask, \r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
cudaStream_t stream)\r
{\r
- StaticAssert<BLOCK_STACK >= BLOCK_DIM_Y>::check();\r
- StaticAssert<BLOCK_STACK <= BLOCK_DIM_X * BLOCK_DIM_Y>::check();\r
+ const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
+ const dim3 grid(divUp(train.rows, BLOCK_SIZE), divUp(query.rows, BLOCK_SIZE));\r
\r
- const dim3 grid(query.rows, 1, 1);\r
- const dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y, 1);\r
+ const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);\r
\r
- radiusMatch<BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_STACK, VecDiffGlobal<BLOCK_DIM_X, T>, Dist, T>\r
- <<<grid, threads, 0, stream>>>(query, train, maxDistance, mask, trainIdx, imgIdx, distance, nMatches, trainIdx.cols);\r
+ match<BLOCK_SIZE, false, Dist><<<grid, block, smemSize, stream>>>(query, 0, train, maxDistance, mask, \r
+ trainIdx, PtrStepi(), distance, nMatches.data, trainIdx.cols);\r
cudaSafeCall( cudaGetLastError() );\r
\r
if (stream == 0)\r
cudaSafeCall( cudaDeviceSynchronize() );\r
}\r
\r
- template <int BLOCK_DIM_X, int BLOCK_DIM_Y, int BLOCK_STACK, int MAX_LEN, bool LEN_EQ_MAX_LEN, typename Dist, typename T, typename Train, typename Mask>\r
- void radiusMatchCached_caller(const DevMem2D_<T>& query, const Train& train, float maxDistance, const Mask& mask, \r
- const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int* nMatches, \r
+ template <int BLOCK_SIZE, typename Dist, typename T> \r
+ void match(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, float maxDistance, const DevMem2D* masks, \r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
cudaStream_t stream)\r
{\r
- StaticAssert<BLOCK_STACK >= BLOCK_DIM_Y>::check();\r
- StaticAssert<BLOCK_STACK <= BLOCK_DIM_X * BLOCK_DIM_Y>::check();\r
- StaticAssert<BLOCK_DIM_X * BLOCK_DIM_Y >= MAX_LEN>::check();\r
- StaticAssert<MAX_LEN % BLOCK_DIM_X == 0>::check();\r
+ const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
\r
- const dim3 grid(query.rows, 1, 1);\r
- const dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y, 1);\r
+ const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);\r
\r
- radiusMatch<BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_STACK, VecDiffCachedRegister<BLOCK_DIM_X, MAX_LEN, LEN_EQ_MAX_LEN, typename Dist::value_type>, Dist, T>\r
- <<<grid, threads, 0, stream>>>(query, train, maxDistance, mask, trainIdx, imgIdx, distance, nMatches, trainIdx.cols);\r
- cudaSafeCall( cudaGetLastError() );\r
+ for (int i = 0; i < n; ++i)\r
+ {\r
+ const DevMem2D_<T> train = trains[i];\r
+\r
+ const dim3 grid(divUp(train.rows, BLOCK_SIZE), divUp(query.rows, BLOCK_SIZE));\r
+\r
+ if (masks != 0 && masks[i].data)\r
+ {\r
+ match<BLOCK_SIZE, true, Dist><<<grid, block, smemSize, stream>>>(query, i, train, maxDistance, SingleMask(masks[i]), \r
+ trainIdx, imgIdx, distance, nMatches.data, trainIdx.cols);\r
+ }\r
+ else\r
+ {\r
+ match<BLOCK_SIZE, true, Dist><<<grid, block, smemSize, stream>>>(query, i, train, maxDistance, WithOutMask(), \r
+ trainIdx, imgIdx, distance, nMatches.data, trainIdx.cols);\r
+ }\r
+ cudaSafeCall( cudaGetLastError() );\r
+ }\r
\r
if (stream == 0)\r
cudaSafeCall( cudaDeviceSynchronize() );\r
}\r
\r
///////////////////////////////////////////////////////////////////////////////\r
- // Radius Match Dispatcher\r
- \r
- template <typename Dist, typename T, typename Train, typename Mask>\r
- void radiusMatchDispatcher(const DevMem2D_<T>& query, const Train& train, float maxDistance, const Mask& mask, \r
- const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, \r
- cudaStream_t stream)\r
+ // Match dispatcher\r
+\r
+ template <typename Dist, typename T, typename Mask> \r
+ void matchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, float maxDistance, const Mask& mask, \r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ int cc, cudaStream_t stream)\r
{\r
- if (query.cols < 64)\r
+ if (query.cols <= 64)\r
{\r
- radiusMatchCached_caller<16, 16, 64, 64, false, Dist>(\r
- query, train, maxDistance, mask, \r
- static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), (int*)nMatches.data,\r
- stream);\r
+ matchUnrolled<16, 64, Dist>(query, train, maxDistance, mask, trainIdx, distance, nMatches, stream);\r
}\r
- else if (query.cols == 64)\r
+ else if (query.cols <= 128)\r
{\r
- radiusMatchCached_caller<16, 16, 64, 64, true, Dist>(\r
- query, train, maxDistance, mask, \r
- static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), (int*)nMatches.data,\r
- stream);\r
+ matchUnrolled<16, 128, Dist>(query, train, maxDistance, mask, trainIdx, distance, nMatches, stream);\r
}\r
- else if (query.cols < 128)\r
+ else if (query.cols <= 256)\r
{\r
- radiusMatchCached_caller<16, 16, 64, 128, false, Dist>(\r
- query, train, maxDistance, mask, \r
- static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), (int*)nMatches.data,\r
- stream);\r
+ matchUnrolled<16, 256, Dist>(query, train, maxDistance, mask, trainIdx, distance, nMatches, stream);\r
+ }\r
+ else if (query.cols <= 512)\r
+ { \r
+ matchUnrolled<16, 512, Dist>(query, train, maxDistance, mask, trainIdx, distance, nMatches, stream);\r
+ }\r
+ else if (query.cols <= 1024)\r
+ { \r
+ matchUnrolled<16, 1024, Dist>(query, train, maxDistance, mask, trainIdx, distance, nMatches, stream);\r
}\r
- else if (query.cols == 128)\r
+ else\r
{\r
- radiusMatchCached_caller<16, 16, 64, 128, true, Dist>(\r
- query, train, maxDistance, mask, \r
- static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), (int*)nMatches.data,\r
- stream);\r
+ match<16, Dist>(query, train, maxDistance, mask, trainIdx, distance, nMatches, stream);\r
}\r
- else if (query.cols < 256)\r
+ }\r
+\r
+ template <typename Dist, typename T> \r
+ void matchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, float maxDistance, const DevMem2D* masks, \r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ int cc, cudaStream_t stream)\r
+ {\r
+ if (query.cols <= 64)\r
+ {\r
+ matchUnrolled<16, 64, Dist>(query, trains, n, maxDistance, masks, trainIdx, imgIdx, distance, nMatches, stream);\r
+ }\r
+ else if (query.cols <= 128)\r
{\r
- radiusMatchCached_caller<16, 16, 64, 256, false, Dist>(\r
- query, train, maxDistance, mask, \r
- static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), (int*)nMatches.data,\r
- stream);\r
+ matchUnrolled<16, 128, Dist>(query, trains, n, maxDistance, masks, trainIdx, imgIdx, distance, nMatches, stream);\r
}\r
- else if (query.cols == 256)\r
+ else if (query.cols <= 256)\r
{\r
- radiusMatchCached_caller<16, 16, 64, 256, true, Dist>(\r
- query, train, maxDistance, mask, \r
- static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), (int*)nMatches.data, \r
- stream);\r
+ matchUnrolled<16, 256, Dist>(query, trains, n, maxDistance, masks, trainIdx, imgIdx, distance, nMatches, stream);\r
+ }\r
+ else if (query.cols <= 512)\r
+ { \r
+ matchUnrolled<16, 512, Dist>(query, trains, n, maxDistance, masks, trainIdx, imgIdx, distance, nMatches, stream);\r
+ }\r
+ else if (query.cols <= 1024)\r
+ { \r
+ matchUnrolled<16, 1024, Dist>(query, trains, n, maxDistance, masks, trainIdx, imgIdx, distance, nMatches, stream);\r
}\r
else\r
{\r
- radiusMatchSimple_caller<16, 16, 64, Dist>(\r
- query, train, maxDistance, mask, \r
- static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), (int*)nMatches.data,\r
- stream);\r
+ match<16, Dist>(query, trains, n, maxDistance, masks, trainIdx, imgIdx, distance, nMatches, stream);\r
}\r
- } \r
+ } \r
\r
///////////////////////////////////////////////////////////////////////////////\r
// Radius Match caller\r
\r
- template <typename T> void radiusMatchSingleL1_gpu(const DevMem2D& query, const DevMem2D& train_, float maxDistance, const DevMem2D& mask, \r
- const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, \r
- cudaStream_t stream)\r
+ template <typename T> void matchL1_gpu(const DevMem2D& query, const DevMem2D& train, float maxDistance, const DevMem2D& mask, \r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ int cc, cudaStream_t stream)\r
{\r
- SingleTrain<T> train(static_cast< DevMem2D_<T> >(train_));\r
-\r
if (mask.data)\r
{\r
- radiusMatchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), train, maxDistance, SingleMask(mask), \r
- trainIdx, DevMem2D(), distance, nMatches, \r
- stream);\r
+ matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, SingleMask(mask), \r
+ trainIdx, distance, nMatches, \r
+ cc, stream);\r
}\r
else\r
{\r
- radiusMatchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), train, maxDistance, WithOutMask(), \r
- trainIdx, DevMem2D(), distance, nMatches, \r
- stream);\r
+ matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, WithOutMask(), \r
+ trainIdx, distance, nMatches, \r
+ cc, stream);\r
}\r
}\r
\r
- template void radiusMatchSingleL1_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- //template void radiusMatchSingleL1_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- template void radiusMatchSingleL1_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- template void radiusMatchSingleL1_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- template void radiusMatchSingleL1_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- template void radiusMatchSingleL1_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
+ template void matchL1_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ //template void matchL1_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ template void matchL1_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ template void matchL1_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ template void matchL1_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ template void matchL1_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
\r
- template <typename T> void radiusMatchSingleL2_gpu(const DevMem2D& query, const DevMem2D& train_, float maxDistance, const DevMem2D& mask, \r
- const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, \r
- cudaStream_t stream)\r
+ template <typename T> void matchL2_gpu(const DevMem2D& query, const DevMem2D& train, float maxDistance, const DevMem2D& mask, \r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ int cc, cudaStream_t stream)\r
{\r
- SingleTrain<T> train(static_cast< DevMem2D_<T> >(train_));\r
-\r
if (mask.data)\r
{\r
- radiusMatchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), train, maxDistance, SingleMask(mask), \r
- trainIdx, DevMem2D(), distance, nMatches, \r
- stream);\r
+ matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, SingleMask(mask), \r
+ trainIdx, distance, nMatches, \r
+ cc, stream);\r
}\r
else\r
{\r
- radiusMatchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), train, maxDistance, WithOutMask(), \r
- trainIdx, DevMem2D(), distance, nMatches, \r
- stream);\r
+ matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, WithOutMask(), \r
+ trainIdx, distance, nMatches, \r
+ cc, stream);\r
}\r
}\r
\r
- //template void radiusMatchSingleL2_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- //template void radiusMatchSingleL2_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- //template void radiusMatchSingleL2_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- //template void radiusMatchSingleL2_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- //template void radiusMatchSingleL2_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- template void radiusMatchSingleL2_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
+ //template void matchL2_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ //template void matchL2_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ //template void matchL2_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ //template void matchL2_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ //template void matchL2_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ template void matchL2_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
\r
- template <typename T> void radiusMatchSingleHamming_gpu(const DevMem2D& query, const DevMem2D& train_, float maxDistance, const DevMem2D& mask, \r
- const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, \r
- cudaStream_t stream)\r
+ template <typename T> void matchHamming_gpu(const DevMem2D& query, const DevMem2D& train, float maxDistance, const DevMem2D& mask, \r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ int cc, cudaStream_t stream)\r
{\r
- SingleTrain<T> train(static_cast< DevMem2D_<T> >(train_));\r
-\r
if (mask.data)\r
{\r
- radiusMatchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), train, maxDistance, SingleMask(mask), \r
- trainIdx, DevMem2D(), distance, nMatches, \r
- stream);\r
+ matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, SingleMask(mask), \r
+ trainIdx, distance, nMatches, \r
+ cc, stream);\r
}\r
else\r
{\r
- radiusMatchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), train, maxDistance, WithOutMask(), \r
- trainIdx, DevMem2D(), distance, nMatches, \r
- stream);\r
+ matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, WithOutMask(), \r
+ trainIdx, distance, nMatches, \r
+ cc, stream);\r
}\r
}\r
\r
- template void radiusMatchSingleHamming_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- //template void radiusMatchSingleHamming_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- template void radiusMatchSingleHamming_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- //template void radiusMatchSingleHamming_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- template void radiusMatchSingleHamming_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
+ template void matchHamming_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ //template void matchHamming_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ template void matchHamming_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ //template void matchHamming_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ template void matchHamming_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
\r
- template <typename T> void radiusMatchCollectionL1_gpu(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, \r
- const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, \r
- cudaStream_t stream)\r
+ template <typename T> void matchL1_gpu(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, \r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ int cc, cudaStream_t stream)\r
{\r
- TrainCollection<T> train((DevMem2D_<T>*)trainCollection.ptr(), trainCollection.cols, query.cols);\r
-\r
- if (maskCollection.data)\r
- {\r
- radiusMatchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), train, maxDistance, MaskCollection(maskCollection.data), \r
- trainIdx, imgIdx, distance, nMatches, \r
- stream);\r
- }\r
- else\r
- {\r
- radiusMatchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), train, maxDistance, WithOutMask(), \r
- trainIdx, imgIdx, distance, nMatches, \r
- stream);\r
- }\r
+ matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains, n, maxDistance, masks, \r
+ trainIdx, imgIdx, distance, nMatches, \r
+ cc, stream);\r
}\r
\r
- template void radiusMatchCollectionL1_gpu<uchar >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- //template void radiusMatchCollectionL1_gpu<schar >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- template void radiusMatchCollectionL1_gpu<ushort>(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- template void radiusMatchCollectionL1_gpu<short >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- template void radiusMatchCollectionL1_gpu<int >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- template void radiusMatchCollectionL1_gpu<float >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
+ template void matchL1_gpu<uchar >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ //template void matchL1_gpu<schar >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ template void matchL1_gpu<ushort>(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ template void matchL1_gpu<short >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ template void matchL1_gpu<int >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ template void matchL1_gpu<float >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
\r
- template <typename T> void radiusMatchCollectionL2_gpu(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, \r
- const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, \r
- cudaStream_t stream)\r
+ template <typename T> void matchL2_gpu(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, \r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ int cc, cudaStream_t stream)\r
{\r
- TrainCollection<T> train((DevMem2D_<T>*)trainCollection.ptr(), trainCollection.cols, query.cols);\r
-\r
- if (maskCollection.data)\r
- {\r
- radiusMatchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), train, maxDistance, MaskCollection(maskCollection.data), \r
- trainIdx, imgIdx, distance, nMatches, \r
- stream);\r
- }\r
- else\r
- {\r
- radiusMatchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), train, maxDistance, WithOutMask(), \r
- trainIdx, imgIdx, distance, nMatches, \r
- stream);\r
- }\r
+ matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains, n, maxDistance, masks, \r
+ trainIdx, imgIdx, distance, nMatches, \r
+ cc, stream);\r
}\r
\r
- //template void radiusMatchCollectionL2_gpu<uchar >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- //template void radiusMatchCollectionL2_gpu<schar >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- //template void radiusMatchCollectionL2_gpu<ushort>(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- //template void radiusMatchCollectionL2_gpu<short >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- //template void radiusMatchCollectionL2_gpu<int >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- template void radiusMatchCollectionL2_gpu<float >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
+ //template void matchL2_gpu<uchar >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ //template void matchL2_gpu<schar >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ //template void matchL2_gpu<ushort>(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ //template void matchL2_gpu<short >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ //template void matchL2_gpu<int >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ template void matchL2_gpu<float >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
\r
- template <typename T> void radiusMatchCollectionHamming_gpu(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, \r
- const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, \r
- cudaStream_t stream)\r
+ template <typename T> void matchHamming_gpu(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, \r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ int cc, cudaStream_t stream)\r
{\r
- TrainCollection<T> train((DevMem2D_<T>*)trainCollection.ptr(), trainCollection.cols, query.cols);\r
-\r
- if (maskCollection.data)\r
- {\r
- radiusMatchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), train, maxDistance, MaskCollection(maskCollection.data), \r
- trainIdx, imgIdx, distance, nMatches, \r
- stream);\r
- }\r
- else\r
- {\r
- radiusMatchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), train, maxDistance, WithOutMask(), \r
- trainIdx, imgIdx, distance, nMatches, \r
- stream);\r
- }\r
+ matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains, n, maxDistance, masks, \r
+ trainIdx, imgIdx, distance, nMatches, \r
+ cc, stream);\r
}\r
\r
- template void radiusMatchCollectionHamming_gpu<uchar >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- //template void radiusMatchCollectionHamming_gpu<schar >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- template void radiusMatchCollectionHamming_gpu<ushort>(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- //template void radiusMatchCollectionHamming_gpu<short >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
- template void radiusMatchCollectionHamming_gpu<int >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);\r
+ template void matchHamming_gpu<uchar >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ //template void matchHamming_gpu<schar >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ template void matchHamming_gpu<ushort>(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ //template void matchHamming_gpu<short >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
+ template void matchHamming_gpu<int >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
}}}\r
{\r
namespace detail\r
{\r
+ ///////////////////////////////////////////////////////////////////////////////\r
+ // Reduction\r
+\r
template <int n> struct WarpReductor\r
{\r
template <typename T, typename Op> static __device__ __forceinline__ void reduce(volatile T* data, T& partial_reduction, int tid, const Op& op)\r
}\r
};\r
\r
+ ///////////////////////////////////////////////////////////////////////////////\r
+ // PredValWarpReductor\r
\r
template <int n> struct PredValWarpReductor;\r
template <> struct PredValWarpReductor<64>\r
}\r
}\r
};\r
+\r
+ ///////////////////////////////////////////////////////////////////////////////\r
+ // PredVal2WarpReductor\r
+\r
+ template <int n> struct PredVal2WarpReductor;\r
+ template <> struct PredVal2WarpReductor<64>\r
+ {\r
+ template <typename T, typename V1, typename V2, typename Pred> \r
+ static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)\r
+ {\r
+ if (tid < 32)\r
+ {\r
+ myData = sdata[tid];\r
+ myVal1 = sval1[tid];\r
+ myVal2 = sval2[tid];\r
+\r
+ T reg = sdata[tid + 32];\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 32];\r
+ sval2[tid] = myVal2 = sval2[tid + 32];\r
+ }\r
+\r
+ reg = sdata[tid + 16];\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 16];\r
+ sval2[tid] = myVal2 = sval2[tid + 16];\r
+ }\r
+\r
+ reg = sdata[tid + 8];\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 8];\r
+ sval2[tid] = myVal2 = sval2[tid + 8];\r
+ }\r
+\r
+ reg = sdata[tid + 4];\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 4];\r
+ sval2[tid] = myVal2 = sval2[tid + 4];\r
+ }\r
+ \r
+ reg = sdata[tid + 2];\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 2];\r
+ sval2[tid] = myVal2 = sval2[tid + 2];\r
+ }\r
+ \r
+ reg = sdata[tid + 1];\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 1];\r
+ sval2[tid] = myVal2 = sval2[tid + 1];\r
+ }\r
+ }\r
+ }\r
+ };\r
+ template <> struct PredVal2WarpReductor<32>\r
+ {\r
+ template <typename T, typename V1, typename V2, typename Pred> \r
+ static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)\r
+ {\r
+ if (tid < 16)\r
+ {\r
+ myData = sdata[tid];\r
+ myVal1 = sval1[tid];\r
+ myVal2 = sval2[tid];\r
+\r
+ T reg = sdata[tid + 16];\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 16];\r
+ sval2[tid] = myVal2 = sval2[tid + 16];\r
+ }\r
+\r
+ reg = sdata[tid + 8];\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 8];\r
+ sval2[tid] = myVal2 = sval2[tid + 8];\r
+ }\r
+\r
+ reg = sdata[tid + 4];\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 4];\r
+ sval2[tid] = myVal2 = sval2[tid + 4];\r
+ }\r
+ \r
+ reg = sdata[tid + 2];\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 2];\r
+ sval2[tid] = myVal2 = sval2[tid + 2];\r
+ }\r
+ \r
+ reg = sdata[tid + 1];\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 1];\r
+ sval2[tid] = myVal2 = sval2[tid + 1];\r
+ }\r
+ }\r
+ }\r
+ };\r
+\r
+ template <> struct PredVal2WarpReductor<16>\r
+ {\r
+ template <typename T, typename V1, typename V2, typename Pred> \r
+ static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)\r
+ {\r
+ if (tid < 8)\r
+ {\r
+ myData = sdata[tid];\r
+ myVal1 = sval1[tid];\r
+ myVal2 = sval2[tid];\r
+\r
+ T reg = reg = sdata[tid + 8];\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 8];\r
+ sval2[tid] = myVal2 = sval2[tid + 8];\r
+ }\r
+\r
+ reg = sdata[tid + 4];\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 4];\r
+ sval2[tid] = myVal2 = sval2[tid + 4];\r
+ }\r
+ \r
+ reg = sdata[tid + 2];\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 2];\r
+ sval2[tid] = myVal2 = sval2[tid + 2];\r
+ }\r
+ \r
+ reg = sdata[tid + 1];\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 1];\r
+ sval2[tid] = myVal2 = sval2[tid + 1];\r
+ }\r
+ }\r
+ }\r
+ };\r
+ template <> struct PredVal2WarpReductor<8>\r
+ {\r
+ template <typename T, typename V1, typename V2, typename Pred> \r
+ static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)\r
+ {\r
+ if (tid < 4)\r
+ {\r
+ myData = sdata[tid];\r
+ myVal1 = sval1[tid];\r
+ myVal2 = sval2[tid];\r
+\r
+ T reg = reg = sdata[tid + 4];\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 4];\r
+ sval2[tid] = myVal2 = sval2[tid + 4];\r
+ }\r
+ \r
+ reg = sdata[tid + 2];\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 2];\r
+ sval2[tid] = myVal2 = sval2[tid + 2];\r
+ }\r
+ \r
+ reg = sdata[tid + 1];\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 1];\r
+ sval2[tid] = myVal2 = sval2[tid + 1];\r
+ }\r
+ }\r
+ }\r
+ };\r
+\r
+ template <bool warp> struct PredVal2ReductionDispatcher;\r
+ template <> struct PredVal2ReductionDispatcher<true>\r
+ {\r
+ template <int n, typename T, typename V1, typename V2, typename Pred> \r
+ static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)\r
+ {\r
+ PredVal2WarpReductor<n>::reduce(myData, myVal1, myVal2, sdata, sval1, sval2, tid, pred);\r
+ }\r
+ };\r
+ template <> struct PredVal2ReductionDispatcher<false>\r
+ {\r
+ template <int n, typename T, typename V1, typename V2, typename Pred> \r
+ static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)\r
+ {\r
+ myData = sdata[tid];\r
+ myVal1 = sval1[tid];\r
+ myVal2 = sval2[tid];\r
+\r
+ if (n >= 512 && tid < 256) \r
+ {\r
+ T reg = sdata[tid + 256];\r
+\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 256];\r
+ sval2[tid] = myVal2 = sval2[tid + 256];\r
+ }\r
+ __syncthreads(); \r
+ }\r
+ if (n >= 256 && tid < 128) \r
+ {\r
+ T reg = sdata[tid + 128];\r
+\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 128];\r
+ sval2[tid] = myVal2 = sval2[tid + 128];\r
+ }\r
+ __syncthreads(); \r
+ }\r
+ if (n >= 128 && tid < 64) \r
+ {\r
+ T reg = sdata[tid + 64];\r
+\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 64];\r
+ sval2[tid] = myVal2 = sval2[tid + 64];\r
+ }\r
+ __syncthreads(); \r
+ } \r
+\r
+ if (tid < 32)\r
+ {\r
+ if (n >= 64) \r
+ { \r
+ T reg = sdata[tid + 32];\r
+\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 32];\r
+ sval2[tid] = myVal2 = sval2[tid + 32];\r
+ }\r
+ }\r
+ if (n >= 32) \r
+ { \r
+ T reg = sdata[tid + 16];\r
+\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 16];\r
+ sval2[tid] = myVal2 = sval2[tid + 16];\r
+ }\r
+ }\r
+ if (n >= 16) \r
+ { \r
+ T reg = sdata[tid + 8];\r
+\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 8];\r
+ sval2[tid] = myVal2 = sval2[tid + 8];\r
+ }\r
+ }\r
+ if (n >= 8) \r
+ { \r
+ T reg = sdata[tid + 4];\r
+\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 4];\r
+ sval2[tid] = myVal2 = sval2[tid + 4];\r
+ }\r
+ }\r
+ if (n >= 4) \r
+ { \r
+ T reg = sdata[tid + 2];\r
+\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 2];\r
+ sval2[tid] = myVal2 = sval2[tid + 2];\r
+ } \r
+ }\r
+ if (n >= 2) \r
+ { \r
+ T reg = sdata[tid + 1];\r
+\r
+ if (pred(reg, myData))\r
+ {\r
+ sdata[tid] = myData = reg;\r
+ sval1[tid] = myVal1 = sval1[tid + 1];\r
+ sval2[tid] = myVal2 = sval2[tid + 1];\r
+ }\r
+ }\r
+ }\r
+ }\r
+ };\r
}\r
}}}\r
\r
///////////////////////////////////////////////////////////////////////////////\r
// Reduction\r
\r
- // reduction\r
template <int n, typename T, typename Op> __device__ __forceinline__ void reduce(volatile T* data, T& partial_reduction, int tid, const Op& op)\r
{\r
StaticAssert<n >= 8 && n <= 512>::check();\r
StaticAssert<n >= 8 && n <= 512>::check();\r
detail::PredValReductionDispatcher<n <= 64>::reduce<n>(myData, myVal, sdata, sval, tid, pred);\r
}\r
+\r
+ template <int n, typename T, typename V1, typename V2, typename Pred> \r
+ __device__ __forceinline__ void reducePredVal2(volatile T* sdata, T& myData, V1* sval1, V1& myVal1, V2* sval2, V2& myVal2, int tid, const Pred& pred)\r
+ {\r
+ StaticAssert<n >= 8 && n <= 512>::check();\r
+ detail::PredVal2ReductionDispatcher<n <= 64>::reduce<n>(myData, myVal1, myVal2, sdata, sval1, sval2, tid, pred);\r
+ }\r
\r
///////////////////////////////////////////////////////////////////////////////\r
// Solve linear system\r
\r
// Find 1->2 matches\r
pair_matches.clear();\r
- matcher.knnMatch(descriptors1_, descriptors2_, train_idx_, distance_, all_dist_, 2);\r
+ matcher.knnMatchSingle(descriptors1_, descriptors2_, train_idx_, distance_, all_dist_, 2);\r
matcher.knnMatchDownload(train_idx_, distance_, pair_matches);\r
for (size_t i = 0; i < pair_matches.size(); ++i)\r
{\r
\r
// Find 2->1 matches\r
pair_matches.clear();\r
- matcher.knnMatch(descriptors2_, descriptors1_, train_idx_, distance_, all_dist_, 2);\r
+ matcher.knnMatchSingle(descriptors2_, descriptors1_, train_idx_, distance_, all_dist_, 2);\r
matcher.knnMatchDownload(train_idx_, distance_, pair_matches);\r
for (size_t i = 0; i < pair_matches.size(); ++i)\r
{\r
\r
// Output\r
vector< vector<DMatch> > matches(2);\r
- vector< vector<DMatch> > d_matches(2);\r
+ gpu::GpuMat d_trainIdx, d_distance, d_allDist, d_nMatches;\r
\r
SUBTEST << "match";\r
\r
+ matcher.match(query, train, matches[0]);\r
CPU_ON;\r
matcher.match(query, train, matches[0]);\r
CPU_OFF;\r
\r
+ d_matcher.matchSingle(d_query, d_train, d_trainIdx, d_distance);\r
GPU_ON;\r
- d_matcher.match(d_query, d_train, d_matches[0]);\r
+ d_matcher.matchSingle(d_query, d_train, d_trainIdx, d_distance);\r
GPU_OFF;\r
\r
- SUBTEST << "knnMatch";\r
- int knn = 2;\r
+ SUBTEST << "knnMatch, 2";\r
\r
+ matcher.knnMatch(query, train, matches, 2);\r
CPU_ON;\r
- matcher.knnMatch(query, train, matches, knn);\r
+ matcher.knnMatch(query, train, matches, 2);\r
CPU_OFF;\r
\r
+ d_matcher.knnMatchSingle(d_query, d_train, d_trainIdx, d_distance, d_allDist, 2);\r
GPU_ON;\r
- d_matcher.knnMatch(d_query, d_train, d_matches, knn);\r
+ d_matcher.knnMatchSingle(d_query, d_train, d_trainIdx, d_distance, d_allDist, 2);\r
+ GPU_OFF;\r
+\r
+ SUBTEST << "knnMatch, 3";\r
+\r
+ matcher.knnMatch(query, train, matches, 3);\r
+ CPU_ON;\r
+ matcher.knnMatch(query, train, matches, 3);\r
+ CPU_OFF;\r
+\r
+ d_matcher.knnMatchSingle(d_query, d_train, d_trainIdx, d_distance, d_allDist, 3);\r
+ GPU_ON;\r
+ d_matcher.knnMatchSingle(d_query, d_train, d_trainIdx, d_distance, d_allDist, 3);\r
GPU_OFF;\r
\r
SUBTEST << "radiusMatch";\r
float max_distance = 2.0f;\r
\r
+ matcher.radiusMatch(query, train, matches, max_distance);\r
CPU_ON;\r
matcher.radiusMatch(query, train, matches, max_distance);\r
CPU_OFF;\r
\r
+ d_matcher.radiusMatchSingle(d_query, d_train, d_trainIdx, d_distance, d_nMatches, max_distance);\r
GPU_ON;\r
- d_matcher.radiusMatch(d_query, d_train, d_matches, max_distance);\r
+ d_matcher.radiusMatchSingle(d_query, d_train, d_trainIdx, d_distance, d_nMatches, max_distance);\r
GPU_OFF;\r
}\r
\r