/// @todo Convert doxy comments to rst
/// @todo Move stuff that doesn't need to be public into linemod.cpp
-/**
- * \brief Compute quantized orientation image from color image.
- *
- * Implements section 2.2 "Computing the Gradient Orientations."
- *
- * \param[in] src The source 8-bit, 3-channel image.
- * \param[out] magnitude Destination floating-point array of squared magnitudes.
- * \param[out] angle Destination 8-bit array of orientations. Each bit
- * represents one bin of the orientation space.
- * \param threshold Magnitude threshold. Keep only gradients whose norms are
- * larger than this.
- */
-void quantizedOrientations(const Mat& src, Mat& magnitude,
- Mat& angle, float threshold);
-
-/**
- * \brief Compute quantized normal image from depth image.
- *
- * Implements section 2.6 "Extension to Dense Depth Sensors."
- *
- * \param[in] src The source 16-bit depth image (in mm).
- * \param[out] dst The destination 8-bit image. Each bit represents one bin of
- * the view cone.
- * \param distance_threshold Ignore pixels beyond this distance.
- * \param difference_threshold When computing normals, ignore contributions of pixels whose
- * depth difference with the central pixel is above this threshold.
- *
- * \todo Should also need camera model, or at least focal lengths? Replace distance_threshold with mask?
- */
-void quantizedNormals(const Mat& src, Mat& dst, int distance_threshold,
- int difference_threshold = 50);
/**
* \brief Discriminant feature described by its location and label.
};
/**
- * \brief Crop a set of overlapping templates from different modalities.
- *
- * \param[in,out] templates Set of templates representing the same object view.
- *
- * \return The bounding box of all the templates in original image coordinates.
- */
-Rect cropTemplates(std::vector<Template>& templates);
-
-/**
* \brief Represents a modality operating over an image pyramid.
*/
class QuantizedPyramid
void colormap(const Mat& quantized, Mat& dst);
/**
- * \brief Spread binary labels in a quantized image.
- *
- * Implements section 2.3 "Spreading the Orientations."
- *
- * \param[in] src The source 8-bit quantized image.
- * \param[out] dst Destination 8-bit spread image.
- * \param T Sampling step. Spread labels T/2 pixels in each direction.
- */
-void spread(const Mat& src, Mat& dst, int T);
-
-/**
- * \brief Precompute response maps for a spread quantized image.
- *
- * Implements section 2.4 "Precomputing Response Maps."
- *
- * \param[in] src The source 8-bit spread quantized image.
- * \param[out] response_maps Vector of 8 response maps, one for each bit label.
- */
-void computeResponseMaps(const Mat& src, std::vector<Mat>& response_maps);
-
-/**
- * \brief Convert a response map to fast linearized ordering.
- *
- * Implements section 2.5 "Linearizing the Memory for Parallelization."
- *
- * \param[in] response_map The 2D response map, an 8-bit image.
- * \param[out] linearized The response map in linearized order. It has T*T rows,
- * each of which is a linear memory of length (W/T)*(H/T).
- * \param T Sampling step.
- */
-void linearize(const Mat& response_map, Mat& linearized, int T);
-
-/**
- * \brief Compute similarity measure for a given template at each sampled image location.
- *
- * Uses linear memories to compute the similarity measure as described in Fig. 7.
- *
- * \param[in] linear_memories Vector of 8 linear memories, one for each label.
- * \param[in] templ Template to match against.
- * \param[out] dst Destination 8-bit similarity image of size (W/T, H/T).
- * \param size Size (W, H) of the original input image.
- * \param T Sampling step.
- */
-void similarity(const std::vector<Mat>& linear_memories, const Template& templ,
- Mat& dst, Size size, int T);
-
-/**
- * \brief Compute similarity measure for a given template in a local region.
- *
- * \param[in] linear_memories Vector of 8 linear memories, one for each label.
- * \param[in] templ Template to match against.
- * \param[out] dst Destination 8-bit similarity image, 16x16.
- * \param size Size (W, H) of the original input image.
- * \param T Sampling step.
- * \param center Center of the local region.
- */
-void similarityLocal(const std::vector<Mat>& linear_memories, const Template& templ,
- Mat& dst, Size size, int T, Point center);
-
-/**
- * \brief Accumulate one or more 8-bit similarity images.
- *
- * \param[in] similarities Source 8-bit similarity images.
- * \param[out] dst Destination 16-bit similarity image.
- */
-void addSimilarities(const std::vector<Mat>& similarities, Mat& dst);
-
-/**
* \brief Represents a successful template match.
*/
struct Match
* \param modalities Modalities to use (color gradients, depth normals, ...).
* \param T_pyramid Value of the sampling step T at each pyramid level. The
* number of pyramid levels is T_pyramid.size().
- * \param pyramid_distance Scale factor between pyramid levels.
*/
- Detector(const std::vector< Ptr<Modality> >& modalities,
- const std::vector<int>& T_pyramid, double pyramid_distance = 2.0);
+ Detector(const std::vector< Ptr<Modality> >& modalities, const std::vector<int>& T_pyramid);
/**
* \brief Detect objects by template matching.
protected:
std::vector< Ptr<Modality> > modalities;
int pyramid_levels;
- double pyramid_distance;
std::vector<int> T_at_level;
typedef std::vector<Template> TemplatePyramid;
// struct Template
+/**
+ * \brief Crop a set of overlapping templates from different modalities.
+ *
+ * \param[in,out] templates Set of templates representing the same object view.
+ *
+ * \return The bounding box of all the templates in original image coordinates.
+ */
Rect cropTemplates(std::vector<Template>& templates)
{
int min_x = std::numeric_limits<int>::max();
void hysteresisGradient(Mat& magnitude, Mat& angle,
Mat& ap_tmp, float threshold);
-// Implements section 2.2
+/**
+ * \brief Compute quantized orientation image from color image.
+ *
+ * Implements section 2.2 "Computing the Gradient Orientations."
+ *
+ * \param[in] src The source 8-bit, 3-channel image.
+ * \param[out] magnitude Destination floating-point array of squared magnitudes.
+ * \param[out] angle Destination 8-bit array of orientations. Each bit
+ * represents one bin of the orientation space.
+ * \param threshold Magnitude threshold. Keep only gradients whose norms are
+ * larger than this.
+ */
void quantizedOrientations(const Mat& src, Mat& magnitude,
Mat& angle, float threshold)
{
b[1] += fj * delta;
}
+/**
+ * \brief Compute quantized normal image from depth image.
+ *
+ * Implements section 2.6 "Extension to Dense Depth Sensors."
+ *
+ * \param[in] src The source 16-bit depth image (in mm).
+ * \param[out] dst The destination 8-bit image. Each bit represents one bin of
+ * the view cone.
+ * \param distance_threshold Ignore pixels beyond this distance.
+ * \param difference_threshold When computing normals, ignore contributions of pixels whose
+ * depth difference with the central pixel is above this threshold.
+ *
+ * \todo Should also need camera model, or at least focal lengths? Replace distance_threshold with mask?
+ */
void quantizedNormals(const Mat& src, Mat& dst, int distance_threshold,
int difference_threshold)
{
}
}
+/**
+ * \brief Spread binary labels in a quantized image.
+ *
+ * Implements section 2.3 "Spreading the Orientations."
+ *
+ * \param[in] src The source 8-bit quantized image.
+ * \param[out] dst Destination 8-bit spread image.
+ * \param T Sampling step. Spread labels T/2 pixels in each direction.
+ */
void spread(const Mat& src, Mat& dst, int T)
{
// Allocate and zero-initialize spread (OR'ed) image
// Auto-generated by create_similarity_lut.py
CV_DECL_ALIGNED(16) static const unsigned char SIMILARITY_LUT[256] = {0, 4, 3, 4, 2, 4, 3, 4, 1, 4, 3, 4, 2, 4, 3, 4, 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 0, 3, 4, 4, 3, 3, 4, 4, 2, 3, 4, 4, 3, 3, 4, 4, 0, 1, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 3, 3, 4, 4, 4, 4, 3, 3, 3, 3, 4, 4, 4, 4, 0, 2, 1, 2, 0, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 0, 3, 2, 3, 1, 3, 2, 3, 0, 3, 2, 3, 1, 3, 2, 3, 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 0, 4, 3, 4, 2, 4, 3, 4, 1, 4, 3, 4, 2, 4, 3, 4, 0, 1, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 0, 3, 4, 4, 3, 3, 4, 4, 2, 3, 4, 4, 3, 3, 4, 4, 0, 2, 1, 2, 0, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 0, 2, 3, 3, 4, 4, 4, 4, 3, 3, 3, 3, 4, 4, 4, 4, 0, 3, 2, 3, 1, 3, 2, 3, 0, 3, 2, 3, 1, 3, 2, 3, 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4};
+/**
+ * \brief Precompute response maps for a spread quantized image.
+ *
+ * Implements section 2.4 "Precomputing Response Maps."
+ *
+ * \param[in] src The source 8-bit spread quantized image.
+ * \param[out] response_maps Vector of 8 response maps, one for each bit label.
+ */
void computeResponseMaps(const Mat& src, std::vector<Mat>& response_maps)
{
CV_Assert((src.rows * src.cols) % 16 == 0);
}
}
+/**
+ * \brief Convert a response map to fast linearized ordering.
+ *
+ * Implements section 2.5 "Linearizing the Memory for Parallelization."
+ *
+ * \param[in] response_map The 2D response map, an 8-bit image.
+ * \param[out] linearized The response map in linearized order. It has T*T rows,
+ * each of which is a linear memory of length (W/T)*(H/T).
+ * \param T Sampling step.
+ */
void linearize(const Mat& response_map, Mat& linearized, int T)
{
CV_Assert(response_map.rows % T == 0);
return memory + lm_index;
}
-// NOTE: Returning dst as uint8 instead of uint16
+/**
+ * \brief Compute similarity measure for a given template at each sampled image location.
+ *
+ * Uses linear memories to compute the similarity measure as described in Fig. 7.
+ *
+ * \param[in] linear_memories Vector of 8 linear memories, one for each label.
+ * \param[in] templ Template to match against.
+ * \param[out] dst Destination 8-bit similarity image of size (W/T, H/T).
+ * \param size Size (W, H) of the original input image.
+ * \param T Sampling step.
+ */
void similarity(const std::vector<Mat>& linear_memories, const Template& templ,
Mat& dst, Size size, int T)
{
}
}
-// NOTE: Returning dst as uint8 instead of uint16
+/**
+ * \brief Compute similarity measure for a given template in a local region.
+ *
+ * \param[in] linear_memories Vector of 8 linear memories, one for each label.
+ * \param[in] templ Template to match against.
+ * \param[out] dst Destination 8-bit similarity image, 16x16.
+ * \param size Size (W, H) of the original input image.
+ * \param T Sampling step.
+ * \param center Center of the local region.
+ */
void similarityLocal(const std::vector<Mat>& linear_memories, const Template& templ,
Mat& dst, Size size, int T, Point center)
{
}
}
+/**
+ * \brief Accumulate one or more 8-bit similarity images.
+ *
+ * \param[in] similarities Source 8-bit similarity images.
+ * \param[out] dst Destination 16-bit similarity image.
+ */
void addSimilarities(const std::vector<Mat>& similarities, Mat& dst)
{
if (similarities.size() == 1)
}
Detector::Detector(const std::vector< Ptr<Modality> >& modalities,
- const std::vector<int>& T_pyramid, double pyramid_distance)
+ const std::vector<int>& T_pyramid)
: modalities(modalities),
pyramid_levels(T_pyramid.size()),
- pyramid_distance(pyramid_distance),
T_at_level(T_pyramid)
{
- CV_Assert(pyramid_distance == 2.0);
}
void Detector::match(const std::vector<Mat>& sources, float threshold, std::vector<Match>& matches,
float threshold;
};
-bool non_negative_assert(const Template& templ)
-{
- for(size_t j = 0; j < templ.features.size(); ++j)
- assert(templ.features[j].x >= 0 && templ.features[j].y >= 0);
- return true;
-}
-
void Detector::matchClass(const LinearMemoryPyramid& lm_pyramid,
const std::vector<Size>& sizes,
float threshold, std::vector<Match>& matches,
for (int m = 0; m < (int)candidates.size(); ++m)
{
Match& match = candidates[m];
- int x = match.x * 2 + 1; /// @todo Support other pyramid_distance
+ int x = match.x * 2 + 1; /// @todo Support other pyramid distance
int y = match.y * 2 + 1;
// Require 8 (reduced) row/cols to the up/left
{
class_templates.clear();
pyramid_levels = fn["pyramid_levels"];
- pyramid_distance = fn["pyramid_distance"];
fn["T"] >> T_at_level;
modalities.clear();
void Detector::write(FileStorage& fs) const
{
fs << "pyramid_levels" << pyramid_levels;
- fs << "pyramid_distance" << pyramid_distance;
fs << "T" << "[:" << T_at_level << "]";
fs << "modalities" << "[";
for ( ; mod_it != mod_it_end; ++mod_it, ++i)
CV_Assert(modalities[i]->name() == (std::string)(*mod_it));
CV_Assert((int)fn["pyramid_levels"] == pyramid_levels);
- CV_Assert((int)fn["pyramid_distance"] == pyramid_distance);
// Detector should not already have this class
std::string class_id;
fs << modalities[i]->name();
fs << "]"; // modalities
fs << "pyramid_levels" << pyramid_levels;
- fs << "pyramid_distance" << pyramid_distance;
fs << "template_pyramids" << "[";
for (size_t i = 0; i < tps.size(); ++i)
{
}
}
-
static const int T_DEFAULTS[] = {5, 8};
Ptr<Detector> getDefaultLINE()