Updated FLANN
authorMarius Muja <no@email>
Sun, 19 Jun 2011 23:46:23 +0000 (23:46 +0000)
committerMarius Muja <no@email>
Sun, 19 Jun 2011 23:46:23 +0000 (23:46 +0000)
36 files changed:
modules/flann/include/opencv2/flann/all_indices.h
modules/flann/include/opencv2/flann/allocator.h
modules/flann/include/opencv2/flann/any.h [new file with mode: 0644]
modules/flann/include/opencv2/flann/autotuned_index.h
modules/flann/include/opencv2/flann/composite_index.h
modules/flann/include/opencv2/flann/config.h [new file with mode: 0644]
modules/flann/include/opencv2/flann/defines.h [new file with mode: 0644]
modules/flann/include/opencv2/flann/dist.h
modules/flann/include/opencv2/flann/dynamic_bitset.h [new file with mode: 0644]
modules/flann/include/opencv2/flann/flann.hpp
modules/flann/include/opencv2/flann/flann_base.hpp
modules/flann/include/opencv2/flann/general.h
modules/flann/include/opencv2/flann/ground_truth.h
modules/flann/include/opencv2/flann/hdf5.h
modules/flann/include/opencv2/flann/heap.h
modules/flann/include/opencv2/flann/hierarchical_clustering_index.h [new file with mode: 0644]
modules/flann/include/opencv2/flann/index_testing.h
modules/flann/include/opencv2/flann/kdtree_index.h
modules/flann/include/opencv2/flann/kdtree_single_index.h [new file with mode: 0644]
modules/flann/include/opencv2/flann/kmeans_index.h
modules/flann/include/opencv2/flann/linear_index.h
modules/flann/include/opencv2/flann/logger.h
modules/flann/include/opencv2/flann/lsh_index.h [new file with mode: 0644]
modules/flann/include/opencv2/flann/lsh_table.h [new file with mode: 0644]
modules/flann/include/opencv2/flann/matrix.h
modules/flann/include/opencv2/flann/nn_index.h
modules/flann/include/opencv2/flann/object_factory.h
modules/flann/include/opencv2/flann/params.h [new file with mode: 0644]
modules/flann/include/opencv2/flann/random.h
modules/flann/include/opencv2/flann/result_set.h
modules/flann/include/opencv2/flann/sampling.h
modules/flann/include/opencv2/flann/saving.h
modules/flann/include/opencv2/flann/simplex_downhill.h
modules/flann/include/opencv2/flann/timer.h
modules/flann/src/flann.cpp
modules/flann/src/precomp.hpp

index 898ac09..ff53fd8 100644 (file)
  *************************************************************************/
 
 
-#ifndef _OPENCV_ALL_INDICES_H_
-#define _OPENCV_ALL_INDICES_H_
+#ifndef OPENCV_FLANN_ALL_INDICES_H_
+#define OPENCV_FLANN_ALL_INDICES_H_
 
-#include "opencv2/flann/general.h"
+#include "general.h"
 
-#include "opencv2/flann/nn_index.h"
-#include "opencv2/flann/kdtree_index.h"
-#include "opencv2/flann/kmeans_index.h"
-#include "opencv2/flann/composite_index.h"
-#include "opencv2/flann/linear_index.h"
-#include "opencv2/flann/autotuned_index.h"
+#include "nn_index.h"
+#include "kdtree_index.h"
+#include "kdtree_single_index.h"
+#include "kmeans_index.h"
+#include "composite_index.h"
+#include "linear_index.h"
+#include "hierarchical_clustering_index.h"
+#include "lsh_index.h"
+#include "autotuned_index.h"
 
-namespace cvflann 
+
+namespace cvflann
+{
+
+template<typename KDTreeCapability, typename VectorSpace, typename Distance>
+struct index_creator
+{
+    static NNIndex<Distance>* create(const Matrix<typename Distance::ElementType>& dataset, const IndexParams& params, const Distance& distance)
+    {
+        flann_algorithm_t index_type = get_param<flann_algorithm_t>(params, "algorithm");
+
+        NNIndex<Distance>* nnIndex;
+        switch (index_type) {
+        case FLANN_INDEX_LINEAR:
+            nnIndex = new LinearIndex<Distance>(dataset, params, distance);
+            break;
+        case FLANN_INDEX_KDTREE_SINGLE:
+            nnIndex = new KDTreeSingleIndex<Distance>(dataset, params, distance);
+            break;
+        case FLANN_INDEX_KDTREE:
+            nnIndex = new KDTreeIndex<Distance>(dataset, params, distance);
+            break;
+        case FLANN_INDEX_KMEANS:
+            nnIndex = new KMeansIndex<Distance>(dataset, params, distance);
+            break;
+        case FLANN_INDEX_COMPOSITE:
+            nnIndex = new CompositeIndex<Distance>(dataset, params, distance);
+            break;
+        case FLANN_INDEX_AUTOTUNED:
+            nnIndex = new AutotunedIndex<Distance>(dataset, params, distance);
+            break;
+        case FLANN_INDEX_HIERARCHICAL:
+            nnIndex = new HierarchicalClusteringIndex<Distance>(dataset, params, distance);
+            break;
+        case FLANN_INDEX_LSH:
+            nnIndex = new LshIndex<Distance>(dataset, params, distance);
+            break;
+        default:
+            throw FLANNException("Unknown index type");
+        }
+
+        return nnIndex;
+    }
+};
+
+template<typename VectorSpace, typename Distance>
+struct index_creator<False,VectorSpace,Distance>
 {
+    static NNIndex<Distance>* create(const Matrix<typename Distance::ElementType>& dataset, const IndexParams& params, const Distance& distance)
+    {
+        flann_algorithm_t index_type = get_param<flann_algorithm_t>(params, "algorithm");
 
-template<typename T>
-NNIndex<T>* create_index_by_type(const Matrix<T>& dataset, const IndexParams& params)
+        NNIndex<Distance>* nnIndex;
+        switch (index_type) {
+        case FLANN_INDEX_LINEAR:
+            nnIndex = new LinearIndex<Distance>(dataset, params, distance);
+            break;
+        case FLANN_INDEX_KMEANS:
+            nnIndex = new KMeansIndex<Distance>(dataset, params, distance);
+            break;
+        case FLANN_INDEX_HIERARCHICAL:
+            nnIndex = new HierarchicalClusteringIndex<Distance>(dataset, params, distance);
+            break;
+        case FLANN_INDEX_LSH:
+            nnIndex = new LshIndex<Distance>(dataset, params, distance);
+            break;
+        default:
+            throw FLANNException("Unknown index type");
+        }
+
+        return nnIndex;
+    }
+};
+
+template<typename Distance>
+struct index_creator<False,False,Distance>
 {
-       flann_algorithm_t index_type = params.getIndexType();
-
-       NNIndex<T>* nnIndex;
-       switch (index_type) {
-       case FLANN_INDEX_LINEAR:
-               nnIndex = new LinearIndex<T>(dataset, (const LinearIndexParams&)params);
-               break;
-       case FLANN_INDEX_KDTREE:
-               nnIndex = new KDTreeIndex<T>(dataset, (const KDTreeIndexParams&)params);
-               break;
-       case FLANN_INDEX_KMEANS:
-               nnIndex = new KMeansIndex<T>(dataset, (const KMeansIndexParams&)params);
-               break;
-       case FLANN_INDEX_COMPOSITE:
-               nnIndex = new CompositeIndex<T>(dataset, (const CompositeIndexParams&) params);
-               break;
-       case FLANN_INDEX_AUTOTUNED:
-               nnIndex = new AutotunedIndex<T>(dataset, (const AutotunedIndexParams&) params);
-               break;
-       default:
-               throw FLANNException("Unknown index type");
-       }
-
-       return nnIndex;
+    static NNIndex<Distance>* create(const Matrix<typename Distance::ElementType>& dataset, const IndexParams& params, const Distance& distance)
+    {
+        flann_algorithm_t index_type = get_param<flann_algorithm_t>(params, "algorithm");
+
+        NNIndex<Distance>* nnIndex;
+        switch (index_type) {
+        case FLANN_INDEX_LINEAR:
+            nnIndex = new LinearIndex<Distance>(dataset, params, distance);
+            break;
+        case FLANN_INDEX_HIERARCHICAL:
+            nnIndex = new HierarchicalClusteringIndex<Distance>(dataset, params, distance);
+            break;
+        case FLANN_INDEX_LSH:
+            nnIndex = new LshIndex<Distance>(dataset, params, distance);
+            break;
+        default:
+            throw FLANNException("Unknown index type");
+        }
+
+        return nnIndex;
+    }
+};
+
+template<typename Distance>
+NNIndex<Distance>* create_index_by_type(const Matrix<typename Distance::ElementType>& dataset, const IndexParams& params, const Distance& distance)
+{
+    return index_creator<typename Distance::is_kdtree_distance,
+                         typename Distance::is_vector_space_distance,
+                         Distance>::create(dataset, params,distance);
 }
 
-} //namespace cvflann
+}
 
-#endif /* _OPENCV_ALL_INDICES_H_ */
+#endif /* OPENCV_FLANN_ALL_INDICES_H_ */
index 0215ac6..6ca44fc 100644 (file)
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *************************************************************************/
 
-#ifndef _OPENCV_ALLOCATOR_H_
-#define _OPENCV_ALLOCATOR_H_
+#ifndef OPENCV_FLANN_ALLOCATOR_H_
+#define OPENCV_FLANN_ALLOCATOR_H_
 
 #include <stdlib.h>
 #include <stdio.h>
 
+
 namespace cvflann
 {
 
@@ -47,8 +48,8 @@ namespace cvflann
 template <typename T>
 T* allocate(size_t count = 1)
 {
-       T* mem = (T*) ::malloc(sizeof(T)*count);
-       return mem;
+    T* mem = (T*) ::malloc(sizeof(T)*count);
+    return mem;
 }
 
 
@@ -70,118 +71,118 @@ T* allocate(size_t count = 1)
 const size_t     WORDSIZE=16;
 const  size_t     BLOCKSIZE=8192;
 
-class CV_EXPORTS PooledAllocator
+class PooledAllocator
 {
-       /* We maintain memory alignment to word boundaries by requiring that all
-               allocations be in multiples of the machine wordsize.  */
-         /* Size of machine word in bytes.  Must be power of 2. */
-       /* Minimum number of bytes requested at a time from     the system.  Must be multiple of WORDSIZE. */
+    /* We maintain memory alignment to word boundaries by requiring that all
+        allocations be in multiples of the machine wordsize.  */
+    /* Size of machine word in bytes.  Must be power of 2. */
+    /* Minimum number of bytes requested at a time from        the system.  Must be multiple of WORDSIZE. */
 
 
-       int     remaining;  /* Number of bytes left in current block of storage. */
-       void*   base;     /* Pointer to base of current block of storage. */
-       void*   loc;      /* Current location in block to next allocate memory. */
-       int     blocksize;
+    int     remaining;  /* Number of bytes left in current block of storage. */
+    void*   base;     /* Pointer to base of current block of storage. */
+    void*   loc;      /* Current location in block to next allocate memory. */
+    int     blocksize;
 
 
 public:
-       int     usedMemory;
-       int     wastedMemory;
-
-       /**
-               Default constructor. Initializes a new pool.
-       */
-       PooledAllocator(int blocksize = BLOCKSIZE)
-       {
-       this->blocksize = blocksize;
-               remaining = 0;
-               base = NULL;
-
-               usedMemory = 0;
-               wastedMemory = 0;
-       }
-
-       /**
-        * Destructor. Frees all the memory allocated in this pool.
-        */
-       ~PooledAllocator()
-       {
-               void *prev;
-
-               while (base != NULL) {
-                       prev = *((void **) base);  /* Get pointer to prev block. */
-                       ::free(base);
-                       base = prev;
-               }
-       }
-
-       /**
-        * Returns a pointer to a piece of new memory of the given size in bytes
-        * allocated from the pool.
-        */
-       void* allocateBytes(int size)
-       {
-               int blocksize;
-
-               /* Round size up to a multiple of wordsize.  The following expression
-                       only works for WORDSIZE that is a power of 2, by masking last bits of
-                       incremented size to zero.
-               */
-               size = (size + (WORDSIZE - 1)) & ~(WORDSIZE - 1);
-
-               /* Check whether a new block must be allocated.  Note that the first word
-                       of a block is reserved for a pointer to the previous block.
-               */
-               if (size > remaining) {
-
-                       wastedMemory += remaining;
-
-               /* Allocate new storage. */
-                       blocksize = (size + sizeof(void*) + (WORDSIZE-1) > BLOCKSIZE) ?
-                                               size + sizeof(void*) + (WORDSIZE-1) : BLOCKSIZE;
-
-                       // use the standard C malloc to allocate memory
-                       void* m = ::malloc(blocksize);
-                       if (!m) {
+    int     usedMemory;
+    int     wastedMemory;
+
+    /**
+        Default constructor. Initializes a new pool.
+     */
+    PooledAllocator(int blocksize = BLOCKSIZE)
+    {
+        this->blocksize = blocksize;
+        remaining = 0;
+        base = NULL;
+
+        usedMemory = 0;
+        wastedMemory = 0;
+    }
+
+    /**
+     * Destructor. Frees all the memory allocated in this pool.
+     */
+    ~PooledAllocator()
+    {
+        void* prev;
+
+        while (base != NULL) {
+            prev = *((void**) base); /* Get pointer to prev block. */
+            ::free(base);
+            base = prev;
+        }
+    }
+
+    /**
+     * Returns a pointer to a piece of new memory of the given size in bytes
+     * allocated from the pool.
+     */
+    void* allocateMemory(int size)
+    {
+        int blocksize;
+
+        /* Round size up to a multiple of wordsize.  The following expression
+            only works for WORDSIZE that is a power of 2, by masking last bits of
+            incremented size to zero.
+         */
+        size = (size + (WORDSIZE - 1)) & ~(WORDSIZE - 1);
+
+        /* Check whether a new block must be allocated.  Note that the first word
+            of a block is reserved for a pointer to the previous block.
+         */
+        if (size > remaining) {
+
+            wastedMemory += remaining;
+
+            /* Allocate new storage. */
+            blocksize = (size + sizeof(void*) + (WORDSIZE-1) > BLOCKSIZE) ?
+                        size + sizeof(void*) + (WORDSIZE-1) : BLOCKSIZE;
+
+            // use the standard C malloc to allocate memory
+            void* m = ::malloc(blocksize);
+            if (!m) {
                 fprintf(stderr,"Failed to allocate memory.\n");
-                exit(1);
-                       }
-
-                       /* Fill first word of new block with pointer to previous block. */
-                       ((void **) m)[0] = base;
-                       base = m;
-
-                       int shift = 0;
-                       //int shift = (WORDSIZE - ( (((size_t)m) + sizeof(void*)) & (WORDSIZE-1))) & (WORDSIZE-1);
-
-                       remaining = blocksize - sizeof(void*) - shift;
-                       loc = ((char*)m + sizeof(void*) + shift);
-               }
-               void* rloc = loc;
-               loc = (char*)loc + size;
-               remaining -= size;
-
-               usedMemory += size;
-
-               return rloc;
-       }
-
-       /**
-        * Allocates (using this pool) a generic type T.
-        *
-        * Params:
-        *     count = number of instances to allocate.
-        * Returns: pointer (of type T*) to memory buffer
-        */
+                return NULL;
+            }
+
+            /* Fill first word of new block with pointer to previous block. */
+            ((void**) m)[0] = base;
+            base = m;
+
+            int shift = 0;
+            //int shift = (WORDSIZE - ( (((size_t)m) + sizeof(void*)) & (WORDSIZE-1))) & (WORDSIZE-1);
+
+            remaining = blocksize - sizeof(void*) - shift;
+            loc = ((char*)m + sizeof(void*) + shift);
+        }
+        void* rloc = loc;
+        loc = (char*)loc + size;
+        remaining -= size;
+
+        usedMemory += size;
+
+        return rloc;
+    }
+
+    /**
+     * Allocates (using this pool) a generic type T.
+     *
+     * Params:
+     *     count = number of instances to allocate.
+     * Returns: pointer (of type T*) to memory buffer
+     */
     template <typename T>
-       T* allocate(size_t count = 1)
-       {
-               T* mem = (T*) this->allocateBytes((int)(sizeof(T)*count));
-               return mem;
-       }
+    T* allocate(size_t count = 1)
+    {
+        T* mem = (T*) this->allocateMemory((int)(sizeof(T)*count));
+        return mem;
+    }
 
 };
 
-} // namespace cvflann
+}
 
-#endif //_OPENCV_ALLOCATOR_H_
+#endif //OPENCV_FLANN_ALLOCATOR_H_
diff --git a/modules/flann/include/opencv2/flann/any.h b/modules/flann/include/opencv2/flann/any.h
new file mode 100644 (file)
index 0000000..aaa87df
--- /dev/null
@@ -0,0 +1,277 @@
+#ifndef OPENCV_FLANN_ANY_H_
+#define OPENCV_FLANN_ANY_H_
+/*
+ * (C) Copyright Christopher Diggins 2005-2011
+ * (C) Copyright Pablo Aguilar 2005
+ * (C) Copyright Kevlin Henney 2001
+ *
+ * Distributed under the Boost Software License, Version 1.0. (See
+ * accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt
+ *
+ * Adapted for FLANN by Marius Muja
+ */
+
+
+
+#include <stdexcept>
+#include <ostream>
+
+namespace cdiggins
+{
+
+namespace anyimpl
+{
+
+struct bad_any_cast
+{
+};
+
+struct empty_any
+{
+};
+
+struct base_any_policy
+{
+    virtual void static_delete(void** x) = 0;
+    virtual void copy_from_value(void const* src, void** dest) = 0;
+    virtual void clone(void* const* src, void** dest) = 0;
+    virtual void move(void* const* src, void** dest) = 0;
+    virtual void* get_value(void** src) = 0;
+    virtual size_t get_size() = 0;
+    virtual void print(std::ostream& out, void* const* src) = 0;
+};
+
+template<typename T>
+struct typed_base_any_policy : base_any_policy
+{
+    virtual size_t get_size() { return sizeof(T); }
+
+};
+
+template<typename T>
+struct small_any_policy : typed_base_any_policy<T>
+{
+    virtual void static_delete(void**) { }
+    virtual void copy_from_value(void const* src, void** dest)
+    {
+        new (dest) T(* reinterpret_cast<T const*>(src));
+    }
+    virtual void clone(void* const* src, void** dest) { *dest = *src; }
+    virtual void move(void* const* src, void** dest) { *dest = *src; }
+    virtual void* get_value(void** src) { return reinterpret_cast<void*>(src); }
+    virtual void print(std::ostream& out, void* const* src) { out << *reinterpret_cast<T const*>(src); }
+};
+
+template<typename T>
+struct big_any_policy : typed_base_any_policy<T>
+{
+    virtual void static_delete(void** x)
+    {
+        if (* x) delete (* reinterpret_cast<T**>(x)); *x = NULL;
+    }
+    virtual void copy_from_value(void const* src, void** dest)
+    {
+        *dest = new T(*reinterpret_cast<T const*>(src));
+    }
+    virtual void clone(void* const* src, void** dest)
+    {
+        *dest = new T(**reinterpret_cast<T* const*>(src));
+    }
+    virtual void move(void* const* src, void** dest)
+    {
+        (*reinterpret_cast<T**>(dest))->~T();
+        **reinterpret_cast<T**>(dest) = **reinterpret_cast<T* const*>(src);
+    }
+    virtual void* get_value(void** src) { return *src; }
+    virtual void print(std::ostream& out, void* const* src) { out << *reinterpret_cast<T const*>(*src); }
+};
+
+template<typename T>
+struct choose_policy
+{
+    typedef big_any_policy<T> type;
+};
+
+template<typename T>
+struct choose_policy<T*>
+{
+    typedef small_any_policy<T*> type;
+};
+
+struct any;
+
+/// Choosing the policy for an any type is illegal, but should never happen.
+/// This is designed to throw a compiler error.
+template<>
+struct choose_policy<any>
+{
+    typedef void type;
+};
+
+/// Specializations for small types.
+#define SMALL_POLICY(TYPE) \
+    template<> \
+    struct choose_policy<TYPE> { typedef small_any_policy<TYPE> type; \
+    };
+
+SMALL_POLICY(signed char);
+SMALL_POLICY(unsigned char);
+SMALL_POLICY(signed short);
+SMALL_POLICY(unsigned short);
+SMALL_POLICY(signed int);
+SMALL_POLICY(unsigned int);
+SMALL_POLICY(signed long);
+SMALL_POLICY(unsigned long);
+SMALL_POLICY(float);
+SMALL_POLICY(bool);
+
+#undef SMALL_POLICY
+
+/// This function will return a different policy for each type.
+template<typename T>
+base_any_policy* get_policy()
+{
+    static typename choose_policy<T>::type policy;
+    return &policy;
+}
+} // namespace anyimpl
+
+struct any
+{
+private:
+    // fields
+    anyimpl::base_any_policy* policy;
+    void* object;
+
+public:
+    /// Initializing constructor.
+    template <typename T>
+    any(const T& x)
+        : policy(anyimpl::get_policy<anyimpl::empty_any>()), object(NULL)
+    {
+        assign(x);
+    }
+
+    /// Empty constructor.
+    any()
+        : policy(anyimpl::get_policy<anyimpl::empty_any>()), object(NULL)
+    { }
+
+    /// Special initializing constructor for string literals.
+    any(const char* x)
+        : policy(anyimpl::get_policy<anyimpl::empty_any>()), object(NULL)
+    {
+        assign(x);
+    }
+
+    /// Copy constructor.
+    any(const any& x)
+        : policy(anyimpl::get_policy<anyimpl::empty_any>()), object(NULL)
+    {
+        assign(x);
+    }
+
+    /// Destructor.
+    ~any()
+    {
+        policy->static_delete(&object);
+    }
+
+    /// Assignment function from another any.
+    any& assign(const any& x)
+    {
+        reset();
+        policy = x.policy;
+        policy->clone(&x.object, &object);
+        return *this;
+    }
+
+    /// Assignment function.
+    template <typename T>
+    any& assign(const T& x)
+    {
+        reset();
+        policy = anyimpl::get_policy<T>();
+        policy->copy_from_value(&x, &object);
+        return *this;
+    }
+
+    /// Assignment operator.
+    template<typename T>
+    any& operator=(const T& x)
+    {
+        return assign(x);
+    }
+
+    /// Assignment operator, specialed for literal strings.
+    /// They have types like const char [6] which don't work as expected.
+    any& operator=(const char* x)
+    {
+        return assign(x);
+    }
+
+    /// Utility functions
+    any& swap(any& x)
+    {
+        std::swap(policy, x.policy);
+        std::swap(object, x.object);
+        return *this;
+    }
+
+    /// Cast operator. You can only cast to the original type.
+    template<typename T>
+    T& cast()
+    {
+        if (policy != anyimpl::get_policy<T>()) throw anyimpl::bad_any_cast();
+        T* r = reinterpret_cast<T*>(policy->get_value(&object));
+        return *r;
+    }
+
+    /// Cast operator. You can only cast to the original type.
+    template<typename T>
+    const T& cast() const
+    {
+        if (policy != anyimpl::get_policy<T>()) throw anyimpl::bad_any_cast();
+        T* r = reinterpret_cast<T*>(policy->get_value((void**)&object));
+        return *r;
+    }
+
+    /// Returns true if the any contains no value.
+    bool empty() const
+    {
+        return policy == anyimpl::get_policy<anyimpl::empty_any>();
+    }
+
+    /// Frees any allocated memory, and sets the value to NULL.
+    void reset()
+    {
+        policy->static_delete(&object);
+        policy = anyimpl::get_policy<anyimpl::empty_any>();
+    }
+
+    /// Returns true if the two types are the same.
+    bool compatible(const any& x) const
+    {
+        return policy == x.policy;
+    }
+
+    /// Returns if the type is compatible with the policy
+    template<typename T>
+    bool has_type()
+    {
+        return policy == anyimpl::get_policy<T>();
+    }
+
+    friend std::ostream& operator <<(std::ostream& out, const any& any_val);
+};
+
+inline std::ostream& operator <<(std::ostream& out, const any& any_val)
+{
+    any_val.policy->print(out,&any_val.object);
+    return out;
+}
+
+}
+
+#endif // OPENCV_FLANN_ANY_H_
index 0e19f1d..6be0fd2 100644 (file)
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *************************************************************************/
-
-#ifndef _OPENCV_AUTOTUNEDINDEX_H_
-#define _OPENCV_AUTOTUNEDINDEX_H_
-
-#include "opencv2/flann/general.h"
-#include "opencv2/flann/nn_index.h"
-#include "opencv2/flann/ground_truth.h"
-#include "opencv2/flann/index_testing.h"
-#include "opencv2/flann/sampling.h"
-#include "opencv2/flann/all_indices.h"
+#ifndef OPENCV_FLANN_AUTOTUNED_INDEX_H_
+#define OPENCV_FLANN_AUTOTUNED_INDEX_H_
+
+#include "general.h"
+#include "nn_index.h"
+#include "ground_truth.h"
+#include "index_testing.h"
+#include "sampling.h"
+#include "kdtree_index.h"
+#include "kdtree_single_index.h"
+#include "kmeans_index.h"
+#include "composite_index.h"
+#include "linear_index.h"
+#include "logger.h"
 
 namespace cvflann
 {
 
-struct AutotunedIndexParams : public IndexParams {
-       AutotunedIndexParams( float target_precision_ = 0.8, float build_weight_ = 0.01,
-                       float memory_weight_ = 0, float sample_fraction_ = 0.1) :
-               IndexParams(FLANN_INDEX_AUTOTUNED),
-               target_precision(target_precision_),
-               build_weight(build_weight_),
-               memory_weight(memory_weight_),
-               sample_fraction(sample_fraction_) {};
-
-       float target_precision;    // precision desired (used for autotuning, -1 otherwise)
-       float build_weight;        // build tree time weighting factor
-       float memory_weight;       // index memory weighting factor
-    float sample_fraction;     // what fraction of the dataset to use for autotuning
-
-       void print() const
-       {
-               logger().info("Index type: %d\n",(int)algorithm);
-               logger().info("logger(). precision: %g\n", target_precision);
-               logger().info("Build weight: %g\n", build_weight);
-               logger().info("Memory weight: %g\n", memory_weight);
-               logger().info("Sample fraction: %g\n", sample_fraction);
-       }
+template<typename Distance>
+NNIndex<Distance>* create_index_by_type(const Matrix<typename Distance::ElementType>& dataset, const IndexParams& params, const Distance& distance);
+
+
+struct AutotunedIndexParams : public IndexParams
+{
+    AutotunedIndexParams(float target_precision = 0.8, float build_weight = 0.01, float memory_weight = 0, float sample_fraction = 0.1)
+    {
+        (*this)["algorithm"] = FLANN_INDEX_AUTOTUNED;
+        // precision desired (used for autotuning, -1 otherwise)
+        (*this)["target_precision"] = target_precision;
+        // build tree time weighting factor
+        (*this)["build_weight"] = build_weight;
+        // index memory weighting factor
+        (*this)["memory_weight"] = memory_weight;
+        // what fraction of the dataset to use for autotuning
+        (*this)["sample_fraction"] = sample_fraction;
+    }
 };
 
 
-template <typename ELEM_TYPE, typename DIST_TYPE = typename DistType<ELEM_TYPE>::type >
-class AutotunedIndex : public NNIndex<ELEM_TYPE>
+template <typename Distance>
+class AutotunedIndex : public NNIndex<Distance>
 {
-       NNIndex<ELEM_TYPE>* bestIndex;
+public:
+    typedef typename Distance::ElementType ElementType;
+    typedef typename Distance::ResultType DistanceType;
 
-       IndexParams* bestParams;
-       SearchParams bestSearchParams;
+    AutotunedIndex(const Matrix<ElementType>& inputData, const IndexParams& params = AutotunedIndexParams(), Distance d = Distance()) :
+        dataset_(inputData), distance_(d)
+    {
+        target_precision_ = get_param(params, "target_precision",0.8f);
+        build_weight_ =  get_param(params,"build_weight", 0.01f);
+        memory_weight_ = get_param(params, "memory_weight", 0.0f);
+        sample_fraction_ = get_param(params,"sample_fraction", 0.1f);
+        bestIndex_ = NULL;
+    }
 
-    Matrix<ELEM_TYPE> sampledDataset;
-    Matrix<ELEM_TYPE> testDataset;
-    Matrix<int> gt_matches;
+    AutotunedIndex(const AutotunedIndex&);
+    AutotunedIndex& operator=(const AutotunedIndex&);
 
-    float speedup;
+    virtual ~AutotunedIndex()
+    {
+        if (bestIndex_ != NULL) {
+            delete bestIndex_;
+            bestIndex_ = NULL;
+        }
+    }
 
-       /**
-        * The dataset used by this index
-        */
-    const Matrix<ELEM_TYPE> dataset;
+    /**
+     *          Method responsible with building the index.
+     */
+    virtual void buildIndex()
+    {
+        bestParams_ = estimateBuildParams();
+        Logger::info("----------------------------------------------------\n");
+        Logger::info("Autotuned parameters:\n");
+        print_params(bestParams_);
+        Logger::info("----------------------------------------------------\n");
+
+        bestIndex_ = create_index_by_type(dataset_, bestParams_, distance_);
+        bestIndex_->buildIndex();
+        speedup_ = estimateSearchParams(bestSearchParams_);
+        Logger::info("----------------------------------------------------\n");
+        Logger::info("Search parameters:\n");
+        print_params(bestSearchParams_);
+        Logger::info("----------------------------------------------------\n");
+    }
 
     /**
-     * Index parameters
+     *  Saves the index to a stream
+     */
+    virtual void saveIndex(FILE* stream)
+    {
+        save_value(stream, (int)bestIndex_->getType());
+        bestIndex_->saveIndex(stream);
+        save_value(stream, get_param<int>(bestSearchParams_, "checks"));
+    }
+
+    /**
+     *  Loads the index from a stream
      */
-    const AutotunedIndexParams& index_params;
+    virtual void loadIndex(FILE* stream)
+    {
+        int index_type;
 
-       AutotunedIndex& operator=(const AutotunedIndex&);
-       AutotunedIndex(const AutotunedIndex&);
+        load_value(stream, index_type);
+        IndexParams params;
+        params["algorithm"] = (flann_algorithm_t)index_type;
+        bestIndex_ = create_index_by_type<Distance>(dataset_, params, distance_);
+        bestIndex_->loadIndex(stream);
+        int checks;
+        load_value(stream, checks);
+        bestSearchParams_["checks"] = checks;
+    }
 
-public:
+    /**
+     *      Method that searches for nearest-neighbors
+     */
+    virtual void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams)
+    {
+        int checks = get_param(searchParams,"checks",FLANN_CHECKS_AUTOTUNED);
+        if (checks == FLANN_CHECKS_AUTOTUNED) {
+            bestIndex_->findNeighbors(result, vec, bestSearchParams_);
+        }
+        else {
+            bestIndex_->findNeighbors(result, vec, searchParams);
+        }
+    }
 
-    AutotunedIndex(const Matrix<ELEM_TYPE>& inputData, const AutotunedIndexParams& params = AutotunedIndexParams() ) :
-       dataset(inputData), index_params(params)
-       {
-        bestIndex = NULL;
-        bestParams = NULL;
-       }
 
-    virtual ~AutotunedIndex()
+    IndexParams getParameters() const
     {
-       if (bestIndex!=NULL) {
-               delete bestIndex;
-       }
-       if (bestParams!=NULL) {
-               delete bestParams;
-       }
-    };
+        return bestIndex_->getParameters();
+    }
+
+    SearchParams getSearchParameters() const
+    {
+        return bestSearchParams_;
+    }
+
+    float getSpeedup() const
+    {
+        return speedup_;
+    }
 
-    /**
-               Method responsible with building the index.
-       */
-       virtual void buildIndex()
-       {
-               bestParams = estimateBuildParams();
-               logger().info("----------------------------------------------------\n");
-               logger().info("Autotuned parameters:\n");
-               bestParams->print();
-               logger().info("----------------------------------------------------\n");
-       flann_algorithm_t index_type = bestParams->getIndexType();
-       switch (index_type) {
-       case FLANN_INDEX_LINEAR:
-               bestIndex = new LinearIndex<ELEM_TYPE>(dataset, (const LinearIndexParams&)*bestParams);
-               break;
-       case FLANN_INDEX_KDTREE:
-               bestIndex = new KDTreeIndex<ELEM_TYPE>(dataset, (const KDTreeIndexParams&)*bestParams);
-               break;
-       case FLANN_INDEX_KMEANS:
-               bestIndex = new KMeansIndex<ELEM_TYPE>(dataset, (const KMeansIndexParams&)*bestParams);
-               break;
-       default:
-               throw FLANNException("Unknown algorithm choosen by the autotuning, most likely a bug.");
-       }
-               bestIndex->buildIndex();
-               speedup = estimateSearchParams(bestSearchParams);
-       }
 
     /**
-        Saves the index to a stream
-    */
-    virtual void saveIndex(FILE* stream)
+     *      Number of features in this index.
+     */
+    virtual size_t size() const
     {
-       save_value(stream, (int)bestIndex->getType());
-       bestIndex->saveIndex(stream);
-       save_value(stream, bestSearchParams);
+        return bestIndex_->size();
     }
 
     /**
-        Loads the index from a stream
-    */
-    virtual void loadIndex(FILE* stream)
+     *  The length of each vector in this index.
+     */
+    virtual size_t veclen() const
     {
-       int index_type;
-       load_value(stream,index_type);
-       IndexParams* params = ParamsFactory_instance().create((flann_algorithm_t)index_type);
-       bestIndex = create_index_by_type(dataset, *params);
-       bestIndex->loadIndex(stream);
-       load_value(stream, bestSearchParams);
+        return bestIndex_->veclen();
     }
 
-       /**
-               Method that searches for nearest-neighbors
-       */
-       virtual void findNeighbors(ResultSet<ELEM_TYPE>& result, const ELEM_TYPE* vec, const SearchParams& searchParams)
-       {
-               if (searchParams.checks==-2) {
-                       bestIndex->findNeighbors(result, vec, bestSearchParams);
-               }
-               else {
-                       bestIndex->findNeighbors(result, vec, searchParams);
-               }
-       }
-
-
-       const IndexParams* getParameters() const
-       {
-               return bestIndex->getParameters();
-       }
-
-
-       /**
-               Number of features in this index.
-       */
-       virtual size_t size() const
-       {
-               return bestIndex->size();
-       }
-
-       /**
-               The length of each vector in this index.
-       */
-       virtual size_t veclen() const
-       {
-               return bestIndex->veclen();
-       }
-
-       /**
-        The amount of memory (in bytes) this index uses.
-       */
-       virtual int usedMemory() const
-       {
-               return bestIndex->usedMemory();
-       }
+    /**
+     * The amount of memory (in bytes) this index uses.
+     */
+    virtual int usedMemory() const
+    {
+        return bestIndex_->usedMemory();
+    }
 
     /**
-    * Algorithm name
-    */
+     * Algorithm name
+     */
     virtual flann_algorithm_t getType() const
     {
-       return FLANN_INDEX_AUTOTUNED;
+        return FLANN_INDEX_AUTOTUNED;
     }
 
 private:
 
-    struct CostData {
+    struct CostData
+    {
         float searchTimeCost;
         float buildTimeCost;
-        float timeCost;
         float memoryCost;
         float totalCost;
+        IndexParams params;
     };
 
-    typedef std::pair<CostData, KDTreeIndexParams> KDTreeCostData;
-    typedef std::pair<CostData, KMeansIndexParams> KMeansCostData;
-
-
-    void evaluate_kmeans(CostData& cost, const KMeansIndexParams& kmeans_params)
+    void evaluate_kmeans(CostData& cost)
     {
         StartStopTimer t;
         int checks;
         const int nn = 1;
 
-        logger().info("KMeansTree using params: max_iterations=%d, branching=%d\n", kmeans_params.iterations, kmeans_params.branching);
-        KMeansIndex<ELEM_TYPE> kmeans(sampledDataset, kmeans_params);
+        Logger::info("KMeansTree using params: max_iterations=%d, branching=%d\n",
+                     get_param<int>(cost.params,"iterations"),
+                     get_param<int>(cost.params,"branching"));
+        KMeansIndex<Distance> kmeans(sampledDataset_, cost.params, distance_);
         // measure index build time
         t.start();
         kmeans.buildIndex();
@@ -244,25 +232,24 @@ private:
         float buildTime = (float)t.value;
 
         // measure search time
-        float searchTime = test_index_precision(kmeans, sampledDataset, testDataset, gt_matches, index_params.target_precision, checks, nn);;
+        float searchTime = test_index_precision(kmeans, sampledDataset_, testDataset_, gt_matches_, target_precision_, checks, distance_, nn);
 
-        float datasetMemory = (float)(sampledDataset.rows*sampledDataset.cols*sizeof(float));
-        cost.memoryCost = (kmeans.usedMemory()+datasetMemory)/datasetMemory;
+        float datasetMemory = float(sampledDataset_.rows * sampledDataset_.cols * sizeof(float));
+        cost.memoryCost = (kmeans.usedMemory() + datasetMemory) / datasetMemory;
         cost.searchTimeCost = searchTime;
         cost.buildTimeCost = buildTime;
-        cost.timeCost = (buildTime*index_params.build_weight+searchTime);
-        logger().info("KMeansTree buildTime=%g, searchTime=%g, timeCost=%g, buildTimeFactor=%g\n",buildTime, searchTime, cost.timeCost, index_params.build_weight);
+        Logger::info("KMeansTree buildTime=%g, searchTime=%g, build_weight=%g\n", buildTime, searchTime, build_weight_);
     }
 
 
-     void evaluate_kdtree(CostData& cost, const KDTreeIndexParams& kdtree_params)
+    void evaluate_kdtree(CostData& cost)
     {
         StartStopTimer t;
         int checks;
         const int nn = 1;
 
-        logger().info("KDTree using params: trees=%d\n",kdtree_params.trees);
-        KDTreeIndex<ELEM_TYPE> kdtree(sampledDataset, kdtree_params);
+        Logger::info("KDTree using params: trees=%d\n", get_param<int>(cost.params,"trees"));
+        KDTreeIndex<Distance> kdtree(sampledDataset_, cost.params, distance_);
 
         t.start();
         kdtree.buildIndex();
@@ -270,267 +257,220 @@ private:
         float buildTime = (float)t.value;
 
         //measure search time
-        float searchTime = test_index_precision(kdtree, sampledDataset, testDataset, gt_matches, index_params.target_precision, checks, nn);
+        float searchTime = test_index_precision(kdtree, sampledDataset_, testDataset_, gt_matches_, target_precision_, checks, distance_, nn);
 
-        float datasetMemory = (float)(sampledDataset.rows*sampledDataset.cols*sizeof(float));
-        cost.memoryCost = (kdtree.usedMemory()+datasetMemory)/datasetMemory;
+        float datasetMemory = float(sampledDataset_.rows * sampledDataset_.cols * sizeof(float));
+        cost.memoryCost = (kdtree.usedMemory() + datasetMemory) / datasetMemory;
         cost.searchTimeCost = searchTime;
         cost.buildTimeCost = buildTime;
-        cost.timeCost = (buildTime*index_params.build_weight+searchTime);
-        logger().info("KDTree buildTime=%g, searchTime=%g, timeCost=%g\n",buildTime, searchTime, cost.timeCost);
+        Logger::info("KDTree buildTime=%g, searchTime=%g\n", buildTime, searchTime);
     }
 
 
-//    struct KMeansSimpleDownhillFunctor {
-//
-//        Autotune& autotuner;
-//        KMeansSimpleDownhillFunctor(Autotune& autotuner_) : autotuner(autotuner_) {};
-//
-//        float operator()(int* params) {
-//
-//            float maxFloat = numeric_limits<float>::max();
-//
-//            if (params[0]<2) return maxFloat;
-//            if (params[1]<0) return maxFloat;
-//
-//            CostData c;
-//            c.params["algorithm"] = KMEANS;
-//            c.params["centers-init"] = CENTERS_RANDOM;
-//            c.params["branching"] = params[0];
-//            c.params["max-iterations"] = params[1];
-//
-//            autotuner.evaluate_kmeans(c);
-//
-//            return c.timeCost;
-//
-//        }
-//    };
-//
-//    struct KDTreeSimpleDownhillFunctor {
-//
-//        Autotune& autotuner;
-//        KDTreeSimpleDownhillFunctor(Autotune& autotuner_) : autotuner(autotuner_) {};
-//
-//        float operator()(int* params) {
-//            float maxFloat = numeric_limits<float>::max();
-//
-//            if (params[0]<1) return maxFloat;
-//
-//            CostData c;
-//            c.params["algorithm"] = KDTREE;
-//            c.params["trees"] = params[0];
-//
-//            autotuner.evaluate_kdtree(c);
-//
-//            return c.timeCost;
-//
-//        }
-//    };
-
-
-
-    KMeansCostData optimizeKMeans()
+    //    struct KMeansSimpleDownhillFunctor {
+    //
+    //        Autotune& autotuner;
+    //        KMeansSimpleDownhillFunctor(Autotune& autotuner_) : autotuner(autotuner_) {};
+    //
+    //        float operator()(int* params) {
+    //
+    //            float maxFloat = numeric_limits<float>::max();
+    //
+    //            if (params[0]<2) return maxFloat;
+    //            if (params[1]<0) return maxFloat;
+    //
+    //            CostData c;
+    //            c.params["algorithm"] = KMEANS;
+    //            c.params["centers-init"] = CENTERS_RANDOM;
+    //            c.params["branching"] = params[0];
+    //            c.params["max-iterations"] = params[1];
+    //
+    //            autotuner.evaluate_kmeans(c);
+    //
+    //            return c.timeCost;
+    //
+    //        }
+    //    };
+    //
+    //    struct KDTreeSimpleDownhillFunctor {
+    //
+    //        Autotune& autotuner;
+    //        KDTreeSimpleDownhillFunctor(Autotune& autotuner_) : autotuner(autotuner_) {};
+    //
+    //        float operator()(int* params) {
+    //            float maxFloat = numeric_limits<float>::max();
+    //
+    //            if (params[0]<1) return maxFloat;
+    //
+    //            CostData c;
+    //            c.params["algorithm"] = KDTREE;
+    //            c.params["trees"] = params[0];
+    //
+    //            autotuner.evaluate_kdtree(c);
+    //
+    //            return c.timeCost;
+    //
+    //        }
+    //    };
+
+
+
+    void optimizeKMeans(std::vector<CostData>& costs)
     {
-        logger().info("KMEANS, Step 1: Exploring parameter space\n");
+        Logger::info("KMEANS, Step 1: Exploring parameter space\n");
 
         // explore kmeans parameters space using combinations of the parameters below
         int maxIterations[] = { 1, 5, 10, 15 };
         int branchingFactors[] = { 16, 32, 64, 128, 256 };
 
-        int kmeansParamSpaceSize = ARRAY_LEN(maxIterations)*ARRAY_LEN(branchingFactors);
-
-        std::vector<KMeansCostData> kmeansCosts(kmeansParamSpaceSize);
-
-//        CostData* kmeansCosts = new CostData[kmeansParamSpaceSize];
+        int kmeansParamSpaceSize = FLANN_ARRAY_LEN(maxIterations) * FLANN_ARRAY_LEN(branchingFactors);
+        costs.reserve(costs.size() + kmeansParamSpaceSize);
 
         // evaluate kmeans for all parameter combinations
-        int cnt = 0;
-        for (size_t i=0; i<ARRAY_LEN(maxIterations); ++i) {
-            for (size_t j=0; j<ARRAY_LEN(branchingFactors); ++j) {
-
-               kmeansCosts[cnt].second.centers_init = FLANN_CENTERS_RANDOM;
-               kmeansCosts[cnt].second.iterations = maxIterations[i];
-               kmeansCosts[cnt].second.branching = branchingFactors[j];
-
-                evaluate_kmeans(kmeansCosts[cnt].first, kmeansCosts[cnt].second);
-
-                int k = cnt;
-                // order by time cost
-                while (k>0 && kmeansCosts[k].first.timeCost < kmeansCosts[k-1].first.timeCost) {
-                    swap(kmeansCosts[k],kmeansCosts[k-1]);
-                    --k;
-                }
-                ++cnt;
-            }
-        }
-
-//         logger().info("KMEANS, Step 2: simplex-downhill optimization\n");
-//
-//         const int n = 2;
-//         // choose initial simplex points as the best parameters so far
-//         int kmeansNMPoints[n*(n+1)];
-//         float kmeansVals[n+1];
-//         for (int i=0;i<n+1;++i) {
-//             kmeansNMPoints[i*n] = (int)kmeansCosts[i].params["branching"];
-//             kmeansNMPoints[i*n+1] = (int)kmeansCosts[i].params["max-iterations"];
-//             kmeansVals[i] = kmeansCosts[i].timeCost;
-//         }
-//         KMeansSimpleDownhillFunctor kmeans_cost_func(*this);
-//         // run optimization
-//         optimizeSimplexDownhill(kmeansNMPoints,n,kmeans_cost_func,kmeansVals);
-//         // store results
-//         for (int i=0;i<n+1;++i) {
-//             kmeansCosts[i].params["branching"] = kmeansNMPoints[i*2];
-//             kmeansCosts[i].params["max-iterations"] = kmeansNMPoints[i*2+1];
-//             kmeansCosts[i].timeCost = kmeansVals[i];
-//         }
-
-        float optTimeCost = kmeansCosts[0].first.timeCost;
-        // recompute total costs factoring in the memory costs
-        for (int i=0;i<kmeansParamSpaceSize;++i) {
-            kmeansCosts[i].first.totalCost = (kmeansCosts[i].first.timeCost/optTimeCost + index_params.memory_weight * kmeansCosts[i].first.memoryCost);
-
-            int k = i;
-            while (k>0 && kmeansCosts[k].first.totalCost < kmeansCosts[k-1].first.totalCost) {
-                swap(kmeansCosts[k],kmeansCosts[k-1]);
-                k--;
+        for (size_t i = 0; i < FLANN_ARRAY_LEN(maxIterations); ++i) {
+            for (size_t j = 0; j < FLANN_ARRAY_LEN(branchingFactors); ++j) {
+                CostData cost;
+                cost.params["algorithm"] = FLANN_INDEX_KMEANS;
+                cost.params["centers_init"] = FLANN_CENTERS_RANDOM;
+                cost.params["iterations"] = maxIterations[i];
+                cost.params["branching"] = branchingFactors[j];
+
+                evaluate_kmeans(cost);
+                costs.push_back(cost);
             }
         }
-        // display the costs obtained
-        for (int i=0;i<kmeansParamSpaceSize;++i) {
-            logger().info("KMeans, branching=%d, iterations=%d, time_cost=%g[%g] (build=%g, search=%g), memory_cost=%g, cost=%g\n",
-                kmeansCosts[i].second.branching, kmeansCosts[i].second.iterations,
-            kmeansCosts[i].first.timeCost,kmeansCosts[i].first.timeCost/optTimeCost,
-            kmeansCosts[i].first.buildTimeCost, kmeansCosts[i].first.searchTimeCost,
-            kmeansCosts[i].first.memoryCost,kmeansCosts[i].first.totalCost);
-        }
 
-        return kmeansCosts[0];
+        //         Logger::info("KMEANS, Step 2: simplex-downhill optimization\n");
+        //
+        //         const int n = 2;
+        //         // choose initial simplex points as the best parameters so far
+        //         int kmeansNMPoints[n*(n+1)];
+        //         float kmeansVals[n+1];
+        //         for (int i=0;i<n+1;++i) {
+        //             kmeansNMPoints[i*n] = (int)kmeansCosts[i].params["branching"];
+        //             kmeansNMPoints[i*n+1] = (int)kmeansCosts[i].params["max-iterations"];
+        //             kmeansVals[i] = kmeansCosts[i].timeCost;
+        //         }
+        //         KMeansSimpleDownhillFunctor kmeans_cost_func(*this);
+        //         // run optimization
+        //         optimizeSimplexDownhill(kmeansNMPoints,n,kmeans_cost_func,kmeansVals);
+        //         // store results
+        //         for (int i=0;i<n+1;++i) {
+        //             kmeansCosts[i].params["branching"] = kmeansNMPoints[i*2];
+        //             kmeansCosts[i].params["max-iterations"] = kmeansNMPoints[i*2+1];
+        //             kmeansCosts[i].timeCost = kmeansVals[i];
+        //         }
     }
 
 
-    KDTreeCostData optimizeKDTree()
+    void optimizeKDTree(std::vector<CostData>& costs)
     {
-
-        logger().info("KD-TREE, Step 1: Exploring parameter space\n");
+        Logger::info("KD-TREE, Step 1: Exploring parameter space\n");
 
         // explore kd-tree parameters space using the parameters below
         int testTrees[] = { 1, 4, 8, 16, 32 };
 
-        size_t kdtreeParamSpaceSize = ARRAY_LEN(testTrees);
-        std::vector<KDTreeCostData> kdtreeCosts(kdtreeParamSpaceSize);
-
         // evaluate kdtree for all parameter combinations
-        int cnt = 0;
-        for (size_t i=0; i<ARRAY_LEN(testTrees); ++i) {
-               kdtreeCosts[cnt].second.trees = testTrees[i];
-
-            evaluate_kdtree(kdtreeCosts[cnt].first, kdtreeCosts[cnt].second);
-
-            int k = cnt;
-            // order by time cost
-            while (k>0 && kdtreeCosts[k].first.timeCost < kdtreeCosts[k-1].first.timeCost) {
-                swap(kdtreeCosts[k],kdtreeCosts[k-1]);
-                --k;
-            }
-            ++cnt;
-        }
+        for (size_t i = 0; i < FLANN_ARRAY_LEN(testTrees); ++i) {
+            CostData cost;
+            cost.params["trees"] = testTrees[i];
 
-//         logger().info("KD-TREE, Step 2: simplex-downhill optimization\n");
-//
-//         const int n = 1;
-//         // choose initial simplex points as the best parameters so far
-//         int kdtreeNMPoints[n*(n+1)];
-//         float kdtreeVals[n+1];
-//         for (int i=0;i<n+1;++i) {
-//             kdtreeNMPoints[i] = (int)kdtreeCosts[i].params["trees"];
-//             kdtreeVals[i] = kdtreeCosts[i].timeCost;
-//         }
-//         KDTreeSimpleDownhillFunctor kdtree_cost_func(*this);
-//         // run optimization
-//         optimizeSimplexDownhill(kdtreeNMPoints,n,kdtree_cost_func,kdtreeVals);
-//         // store results
-//         for (int i=0;i<n+1;++i) {
-//             kdtreeCosts[i].params["trees"] = kdtreeNMPoints[i];
-//             kdtreeCosts[i].timeCost = kdtreeVals[i];
-//         }
-
-        float optTimeCost = kdtreeCosts[0].first.timeCost;
-        // recompute costs for kd-tree factoring in memory cost
-        for (size_t i=0;i<kdtreeParamSpaceSize;++i) {
-            kdtreeCosts[i].first.totalCost = (kdtreeCosts[i].first.timeCost/optTimeCost + index_params.memory_weight * kdtreeCosts[i].first.memoryCost);
-
-            int k = (int)i;
-            while (k>0 && kdtreeCosts[k].first.totalCost < kdtreeCosts[k-1].first.totalCost) {
-                swap(kdtreeCosts[k],kdtreeCosts[k-1]);
-                k--;
-            }
-        }
-        // display costs obtained
-        for (size_t i=0;i<kdtreeParamSpaceSize;++i) {
-            logger().info("kd-tree, trees=%d, time_cost=%g[%g] (build=%g, search=%g), memory_cost=%g, cost=%g\n",
-            kdtreeCosts[i].second.trees,kdtreeCosts[i].first.timeCost,kdtreeCosts[i].first.timeCost/optTimeCost,
-            kdtreeCosts[i].first.buildTimeCost, kdtreeCosts[i].first.searchTimeCost,
-            kdtreeCosts[i].first.memoryCost,kdtreeCosts[i].first.totalCost);
+            evaluate_kdtree(cost);
+            costs.push_back(cost);
         }
 
-        return kdtreeCosts[0];
+        //         Logger::info("KD-TREE, Step 2: simplex-downhill optimization\n");
+        //
+        //         const int n = 1;
+        //         // choose initial simplex points as the best parameters so far
+        //         int kdtreeNMPoints[n*(n+1)];
+        //         float kdtreeVals[n+1];
+        //         for (int i=0;i<n+1;++i) {
+        //             kdtreeNMPoints[i] = (int)kdtreeCosts[i].params["trees"];
+        //             kdtreeVals[i] = kdtreeCosts[i].timeCost;
+        //         }
+        //         KDTreeSimpleDownhillFunctor kdtree_cost_func(*this);
+        //         // run optimization
+        //         optimizeSimplexDownhill(kdtreeNMPoints,n,kdtree_cost_func,kdtreeVals);
+        //         // store results
+        //         for (int i=0;i<n+1;++i) {
+        //             kdtreeCosts[i].params["trees"] = kdtreeNMPoints[i];
+        //             kdtreeCosts[i].timeCost = kdtreeVals[i];
+        //         }
     }
 
     /**
-        Chooses the best nearest-neighbor algorithm and estimates the optimal
-        parameters to use when building the index (for a given precision).
-        Returns a dictionary with the optimal parameters.
-    */
-    IndexParams* estimateBuildParams()
+     *  Chooses the best nearest-neighbor algorithm and estimates the optimal
+     *  parameters to use when building the index (for a given precision).
+     *  Returns a dictionary with the optimal parameters.
+     */
+    IndexParams estimateBuildParams()
     {
-        int sampleSize = int(index_params.sample_fraction*dataset.rows);
-        int testSampleSize = std::min(sampleSize/10, 1000);
+        std::vector<CostData> costs;
 
-        logger().info("Entering autotuning, dataset size: %d, sampleSize: %d, testSampleSize: %d\n",dataset.rows, sampleSize, testSampleSize);
+        int sampleSize = int(sample_fraction_ * dataset_.rows);
+        int testSampleSize = std::min(sampleSize / 10, 1000);
+
+        Logger::info("Entering autotuning, dataset size: %d, sampleSize: %d, testSampleSize: %d, target precision: %g\n", dataset_.rows, sampleSize, testSampleSize, target_precision_);
 
         // For a very small dataset, it makes no sense to build any fancy index, just
         // use linear search
-        if (testSampleSize<10) {
-            logger().info("Choosing linear, dataset too small\n");
-            return new LinearIndexParams();
+        if (testSampleSize < 10) {
+            Logger::info("Choosing linear, dataset too small\n");
+            return LinearIndexParams();
         }
 
         // We use a fraction of the original dataset to speedup the autotune algorithm
-        sampledDataset = random_sample(dataset,sampleSize);
+        sampledDataset_ = random_sample(dataset_, sampleSize);
         // We use a cross-validation approach, first we sample a testset from the dataset
-        testDataset = random_sample(sampledDataset,testSampleSize,true);
+        testDataset_ = random_sample(sampledDataset_, testSampleSize, true);
 
         // We compute the ground truth using linear search
-        logger().info("Computing ground truth... \n");
-        gt_matches = Matrix<int>(new int[testDataset.rows],(long)testDataset.rows, 1);
+        Logger::info("Computing ground truth... \n");
+        gt_matches_ = Matrix<int>(new int[testDataset_.rows], testDataset_.rows, 1);
         StartStopTimer t;
         t.start();
-        compute_ground_truth(sampledDataset, testDataset, gt_matches, 0);
+        compute_ground_truth<Distance>(sampledDataset_, testDataset_, gt_matches_, 0, distance_);
         t.stop();
-        float bestCost = (float)t.value;
-        IndexParams* bestParams = new LinearIndexParams();
+
+        CostData linear_cost;
+        linear_cost.searchTimeCost = (float)t.value;
+        linear_cost.buildTimeCost = 0;
+        linear_cost.memoryCost = 0;
+        linear_cost.params["algorithm"] = FLANN_INDEX_LINEAR;
+
+        costs.push_back(linear_cost);
 
         // Start parameter autotune process
-        logger().info("Autotuning parameters...\n");
+        Logger::info("Autotuning parameters...\n");
 
+        optimizeKMeans(costs);
+        optimizeKDTree(costs);
 
-        KMeansCostData kmeansCost = optimizeKMeans();
-        if (kmeansCost.first.totalCost<bestCost) {
-            bestParams = new KMeansIndexParams(kmeansCost.second);
-            bestCost = kmeansCost.first.totalCost;
+        float bestTimeCost = costs[0].searchTimeCost;
+        for (size_t i = 0; i < costs.size(); ++i) {
+            float timeCost = costs[i].buildTimeCost * build_weight_ + costs[i].searchTimeCost;
+            if (timeCost < bestTimeCost) {
+                bestTimeCost = timeCost;
+            }
         }
 
-        KDTreeCostData kdtreeCost = optimizeKDTree();
-
-        if (kdtreeCost.first.totalCost<bestCost) {
-            bestParams = new KDTreeIndexParams(kdtreeCost.second);
-            bestCost = kdtreeCost.first.totalCost;
+        float bestCost = costs[0].searchTimeCost / bestTimeCost;
+        IndexParams bestParams = costs[0].params;
+        if (bestTimeCost > 0) {
+            for (size_t i = 0; i < costs.size(); ++i) {
+                float crtCost = (costs[i].buildTimeCost * build_weight_ + costs[i].searchTimeCost) / bestTimeCost +
+                                memory_weight_ * costs[i].memoryCost;
+                if (crtCost < bestCost) {
+                    bestCost = crtCost;
+                    bestParams = costs[i].params;
+                }
+            }
         }
 
-        gt_matches.release();
-        sampledDataset.release();
-        testDataset.release();
+        delete[] gt_matches_.data;
+        delete[] testDataset_.data;
+        delete[] sampledDataset_.data;
 
         return bestParams;
     }
@@ -538,48 +478,48 @@ private:
 
 
     /**
-        Estimates the search time parameters needed to get the desired precision.
-        Precondition: the index is built
-        Postcondition: the searchParams will have the optimum params set, also the speedup obtained over linear search.
-    */
+     *  Estimates the search time parameters needed to get the desired precision.
+     *  Precondition: the index is built
+     *  Postcondition: the searchParams will have the optimum params set, also the speedup obtained over linear search.
+     */
     float estimateSearchParams(SearchParams& searchParams)
     {
         const int nn = 1;
         const size_t SAMPLE_COUNT = 1000;
 
-        assert(bestIndex!=NULL);   // must have a valid index
+        assert(bestIndex_ != NULL); // must have a valid index
 
         float speedup = 0;
 
-        int samples = (int)std::min(dataset.rows/10, SAMPLE_COUNT);
-        if (samples>0) {
-            Matrix<ELEM_TYPE> testDataset = random_sample(dataset,samples);
+        int samples = (int)std::min(dataset_.rows / 10, SAMPLE_COUNT);
+        if (samples > 0) {
+            Matrix<ElementType> testDataset = random_sample(dataset_, samples);
 
-            logger().info("Computing ground truth\n");
+            Logger::info("Computing ground truth\n");
 
             // we need to compute the ground truth first
-            Matrix<int> gt_matches(new int[testDataset.rows],(long)testDataset.rows,1);
+            Matrix<int> gt_matches(new int[testDataset.rows], testDataset.rows, 1);
             StartStopTimer t;
             t.start();
-            compute_ground_truth(dataset, testDataset, gt_matches,1);
+            compute_ground_truth<Distance>(dataset_, testDataset, gt_matches, 1, distance_);
             t.stop();
             float linear = (float)t.value;
 
             int checks;
-            logger().info("Estimating number of checks\n");
+            Logger::info("Estimating number of checks\n");
 
             float searchTime;
             float cb_index;
-            if (bestIndex->getType() == FLANN_INDEX_KMEANS) {
-                logger().info("KMeans algorithm, estimating cluster border factor\n");
-                KMeansIndex<ELEM_TYPE>* kmeans = (KMeansIndex<ELEM_TYPE>*)bestIndex;
+            if (bestIndex_->getType() == FLANN_INDEX_KMEANS) {
+                Logger::info("KMeans algorithm, estimating cluster border factor\n");
+                KMeansIndex<Distance>* kmeans = (KMeansIndex<Distance>*)bestIndex_;
                 float bestSearchTime = -1;
                 float best_cb_index = -1;
                 int best_checks = -1;
-                for (cb_index = 0;cb_index<1.1f; cb_index+=0.2f) {
+                for (cb_index = 0; cb_index < 1.1f; cb_index += 0.2f) {
                     kmeans->set_cb_index(cb_index);
-                    searchTime = test_index_precision(*kmeans, dataset, testDataset, gt_matches, index_params.target_precision, checks, nn, 1);
-                    if (searchTime<bestSearchTime || bestSearchTime == -1) {
+                    searchTime = test_index_precision(*kmeans, dataset_, testDataset, gt_matches, target_precision_, checks, distance_, nn, 1);
+                    if ((searchTime < bestSearchTime) || (bestSearchTime == -1)) {
                         bestSearchTime = searchTime;
                         best_cb_index = cb_index;
                         best_checks = checks;
@@ -590,26 +530,54 @@ private:
                 checks = best_checks;
 
                 kmeans->set_cb_index(best_cb_index);
-                logger().info("Optimum cb_index: %g\n",cb_index);
-                ((KMeansIndexParams*)bestParams)->cb_index = cb_index;
+                Logger::info("Optimum cb_index: %g\n", cb_index);
+                bestParams_["cb_index"] = cb_index;
             }
             else {
-                searchTime = test_index_precision(*bestIndex, dataset, testDataset, gt_matches, index_params.target_precision, checks, nn, 1);
+                searchTime = test_index_precision(*bestIndex_, dataset_, testDataset, gt_matches, target_precision_, checks, distance_, nn, 1);
             }
 
-            logger().info("Required number of checks: %d \n",checks);;
-            searchParams.checks = checks;
+            Logger::info("Required number of checks: %d \n", checks);
+            searchParams["checks"] = checks;
 
-            speedup = linear/searchTime;
+            speedup = linear / searchTime;
 
-            gt_matches.release();
+            delete[] gt_matches.data;
+            delete[] testDataset.data;
         }
 
         return speedup;
     }
 
-};
+private:
+    NNIndex<Distance>* bestIndex_;
+
+    IndexParams bestParams_;
+    SearchParams bestSearchParams_;
 
-} // namespace cvflann
+    Matrix<ElementType> sampledDataset_;
+    Matrix<ElementType> testDataset_;
+    Matrix<int> gt_matches_;
+
+    float speedup_;
+
+    /**
+     * The dataset used by this index
+     */
+    const Matrix<ElementType> dataset_;
+
+    /**
+     * Index parameters
+     */
+    float target_precision_;
+    float build_weight_;
+    float memory_weight_;
+    float sample_fraction_;
+
+    Distance distance_;
+
+
+};
+}
 
-#endif /* _OPENCV_AUTOTUNEDINDEX_H_ */
+#endif /* OPENCV_FLANN_AUTOTUNED_INDEX_H_ */
index 7738bf6..527ca1a 100644 (file)
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *************************************************************************/
 
-#ifndef _OPENCV_COMPOSITETREE_H_
-#define _OPENCV_COMPOSITETREE_H_
+#ifndef OPENCV_FLANN_COMPOSITE_INDEX_H_
+#define OPENCV_FLANN_COMPOSITE_INDEX_H_
 
-#include "opencv2/flann/general.h"
-#include "opencv2/flann/nn_index.h"
+#include "general.h"
+#include "nn_index.h"
+#include "kdtree_index.h"
+#include "kmeans_index.h"
 
 namespace cvflann
 {
 
-
-struct CompositeIndexParams : public IndexParams {
-       CompositeIndexParams(int trees_ = 4, int branching_ = 32, int iterations_ = 11,
-                       flann_centers_init_t centers_init_ = FLANN_CENTERS_RANDOM, float cb_index_ = 0.2 ) :
-               IndexParams(FLANN_INDEX_COMPOSITE),
-               trees(trees_),
-               branching(branching_),
-               iterations(iterations_),
-               centers_init(centers_init_),
-               cb_index(cb_index_) {};
-
-       int trees;                 // number of randomized trees to use (for kdtree)
-       int branching;             // branching factor (for kmeans tree)
-       int iterations;            // max iterations to perform in one kmeans clustering (kmeans tree)
-       flann_centers_init_t centers_init;          // algorithm used for picking the initial cluster centers for kmeans tree
-    float cb_index;            // cluster boundary index. Used when searching the kmeans tree
-
-       void print() const
-       {
-               logger().info("Index type: %d\n",(int)algorithm);
-               logger().info("Trees: %d\n", trees);
-               logger().info("Branching: %d\n", branching);
-               logger().info("Iterations: %d\n", iterations);
-               logger().info("Centres initialisation: %d\n", centers_init);
-               logger().info("Cluster boundary weight: %g\n", cb_index);
-       }
+/**
+ * Index parameters for the CompositeIndex.
+ */
+struct CompositeIndexParams : public IndexParams
+{
+    CompositeIndexParams(int trees = 4, int branching = 32, int iterations = 11,
+                         flann_centers_init_t centers_init = FLANN_CENTERS_RANDOM, float cb_index = 0.2 )
+    {
+        (*this)["algorithm"] = FLANN_INDEX_KMEANS;
+        // number of randomized trees to use (for kdtree)
+        (*this)["trees"] = trees;
+        // branching factor
+        (*this)["branching"] = branching;
+        // max iterations to perform in one kmeans clustering (kmeans tree)
+        (*this)["iterations"] = iterations;
+        // algorithm used for picking the initial cluster centers for kmeans tree
+        (*this)["centers_init"] = centers_init;
+        // cluster boundary index. Used when searching the kmeans tree
+        (*this)["cb_index"] = cb_index;
+    }
 };
 
 
-
-template <typename ELEM_TYPE, typename DIST_TYPE = typename DistType<ELEM_TYPE>::type >
-class CompositeIndex : public NNIndex<ELEM_TYPE>
+/**
+ * This index builds a kd-tree index and a k-means index and performs nearest
+ * neighbour search both indexes. This gives a slight boost in search performance
+ * as some of the neighbours that are missed by one index are found by the other.
+ */
+template <typename Distance>
+class CompositeIndex : public NNIndex<Distance>
 {
-       KMeansIndex<ELEM_TYPE, DIST_TYPE>* kmeans;
-       KDTreeIndex<ELEM_TYPE, DIST_TYPE>* kdtree;
-
-    const Matrix<ELEM_TYPE> dataset;
-
-    const IndexParams& index_params;
-
-       CompositeIndex& operator=(const CompositeIndex&);
-       CompositeIndex(const CompositeIndex&);
 public:
+    typedef typename Distance::ElementType ElementType;
+    typedef typename Distance::ResultType DistanceType;
+
+    /**
+     * Index constructor
+     * @param inputData dataset containing the points to index
+     * @param params Index parameters
+     * @param d Distance functor
+     * @return
+     */
+    CompositeIndex(const Matrix<ElementType>& inputData, const IndexParams& params = CompositeIndexParams(),
+                   Distance d = Distance()) : index_params_(params)
+    {
+        kdtree_index_ = new KDTreeIndex<Distance>(inputData, params, d);
+        kmeans_index_ = new KMeansIndex<Distance>(inputData, params, d);
 
-       CompositeIndex(const Matrix<ELEM_TYPE>& inputData, const CompositeIndexParams& params = CompositeIndexParams() ) :
-               dataset(inputData), index_params(params)
-       {
-               KDTreeIndexParams kdtree_params(params.trees);
-               KMeansIndexParams kmeans_params(params.branching, params.iterations, params.centers_init, params.cb_index);
-
-               kdtree = new KDTreeIndex<ELEM_TYPE, DIST_TYPE>(inputData,kdtree_params);
-               kmeans = new KMeansIndex<ELEM_TYPE, DIST_TYPE>(inputData,kmeans_params);
-
-       }
+    }
 
-       virtual ~CompositeIndex()
-       {
-               delete kdtree;
-               delete kmeans;
-       }
+    CompositeIndex(const CompositeIndex&);
+    CompositeIndex& operator=(const CompositeIndex&);
 
+    virtual ~CompositeIndex()
+    {
+        delete kdtree_index_;
+        delete kmeans_index_;
+    }
 
+    /**
+     * @return The index type
+     */
     flann_algorithm_t getType() const
     {
         return FLANN_INDEX_COMPOSITE;
     }
 
-
+    /**
+     * @return Size of the index
+     */
     size_t size() const
-       {
-               return dataset.rows;
-       }
-
-       size_t veclen() const
-       {
-               return dataset.cols;
-       }
-
+    {
+        return kdtree_index_->size();
+    }
 
-       int usedMemory() const
-       {
-               return kmeans->usedMemory()+kdtree->usedMemory();
-       }
+    /**
+     * \returns The dimensionality of the features in this index.
+     */
+    size_t veclen() const
+    {
+        return kdtree_index_->veclen();
+    }
 
-       void buildIndex()
-       {
-               logger().info("Building kmeans tree...\n");
-               kmeans->buildIndex();
-               logger().info("Building kdtree tree...\n");
-               kdtree->buildIndex();
-       }
+    /**
+     * \returns The amount of memory (in bytes) used by the index.
+     */
+    int usedMemory() const
+    {
+        return kmeans_index_->usedMemory() + kdtree_index_->usedMemory();
+    }
 
+    /**
+     * \brief Builds the index
+     */
+    void buildIndex()
+    {
+        Logger::info("Building kmeans tree...\n");
+        kmeans_index_->buildIndex();
+        Logger::info("Building kdtree tree...\n");
+        kdtree_index_->buildIndex();
+    }
 
+    /**
+     * \brief Saves the index to a stream
+     * \param stream The stream to save the index to
+     */
     void saveIndex(FILE* stream)
     {
-       kmeans->saveIndex(stream);
-       kdtree->saveIndex(stream);
+        kmeans_index_->saveIndex(stream);
+        kdtree_index_->saveIndex(stream);
     }
 
-
+    /**
+     * \brief Loads the index from a stream
+     * \param stream The stream from which the index is loaded
+     */
     void loadIndex(FILE* stream)
     {
-       kmeans->loadIndex(stream);
-       kdtree->loadIndex(stream);
+        kmeans_index_->loadIndex(stream);
+        kdtree_index_->loadIndex(stream);
     }
 
-       void findNeighbors(ResultSet<ELEM_TYPE>& result, const ELEM_TYPE* vec, const SearchParams& searchParams)
-       {
-               kmeans->findNeighbors(result,vec,searchParams);
-               kdtree->findNeighbors(result,vec,searchParams);
-       }
+    /**
+     * \returns The index parameters
+     */
+    IndexParams getParameters() const
+    {
+        return index_params_;
+    }
+
+    /**
+     * \brief Method that searches for nearest-neighbours
+     */
+    void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams)
+    {
+        kmeans_index_->findNeighbors(result, vec, searchParams);
+        kdtree_index_->findNeighbors(result, vec, searchParams);
+    }
 
-       const IndexParams* getParameters() const
-       {
-               return &index_params;
-       }
+private:
+    /** The k-means index */
+    KMeansIndex<Distance>* kmeans_index_;
 
+    /** The kd-tree index */
+    KDTreeIndex<Distance>* kdtree_index_;
 
+    /** The index parameters */
+    const IndexParams index_params_;
 };
 
-} // namespace cvflann
+}
 
-#endif //_OPENCV_COMPOSITETREE_H_
+#endif //OPENCV_FLANN_COMPOSITE_INDEX_H_
diff --git a/modules/flann/include/opencv2/flann/config.h b/modules/flann/include/opencv2/flann/config.h
new file mode 100644 (file)
index 0000000..ca6138d
--- /dev/null
@@ -0,0 +1,35 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2011  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2011  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+
+#ifndef OPENCV_FLANN_CONFIG_H_
+#define OPENCV_FLANN_CONFIG_H_
+
+#define FLANN_VERSION "1.6.10"
+
+#endif /* OPENCV_FLANN_CONFIG_H_ */
diff --git a/modules/flann/include/opencv2/flann/defines.h b/modules/flann/include/opencv2/flann/defines.h
new file mode 100644 (file)
index 0000000..d1a0af8
--- /dev/null
@@ -0,0 +1,160 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2011  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2011  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+
+#ifndef OPENCV_FLANN_DEFINES_H_
+#define OPENCV_FLANN_DEFINES_H_
+
+#include "config.h"
+
+#ifdef WIN32
+/* win32 dll export/import directives */
+ #ifdef FLANN_EXPORTS
+  #define FLANN_EXPORT __declspec(dllexport)
+ #elif defined(FLANN_STATIC)
+  #define FLANN_EXPORT
+ #else
+  #define FLANN_EXPORT __declspec(dllimport)
+ #endif
+#else
+/* unix needs nothing */
+ #define FLANN_EXPORT
+#endif
+
+
+#ifdef __GNUC__
+#define FLANN_DEPRECATED __attribute__ ((deprecated))
+#elif defined(_MSC_VER)
+#define FLANN_DEPRECATED  __declspec(deprecated)
+#else
+#pragma message("WARNING: You need to implement FLANN_DEPRECATED for this compiler")
+#define FLANN_DEPRECATED
+#endif
+
+
+#if __amd64__ || __x86_64__ || _WIN64 || _M_X64
+#define FLANN_PLATFORM_64_BIT
+#else
+#define FLANN_PLATFORM_32_BIT
+#endif
+
+
+#define FLANN_ARRAY_LEN(a) (sizeof(a)/sizeof(a[0]))
+
+/* Nearest neighbour index algorithms */
+enum flann_algorithm_t
+{
+    FLANN_INDEX_LINEAR = 0,
+    FLANN_INDEX_KDTREE = 1,
+    FLANN_INDEX_KMEANS = 2,
+    FLANN_INDEX_COMPOSITE = 3,
+    FLANN_INDEX_KDTREE_SINGLE = 4,
+    FLANN_INDEX_HIERARCHICAL = 5,
+    FLANN_INDEX_LSH = 6,
+    FLANN_INDEX_SAVED = 254,
+    FLANN_INDEX_AUTOTUNED = 255,
+
+    // deprecated constants, should use the FLANN_INDEX_* ones instead
+    LINEAR = 0,
+    KDTREE = 1,
+    KMEANS = 2,
+    COMPOSITE = 3,
+    KDTREE_SINGLE = 4,
+    SAVED = 254,
+    AUTOTUNED = 255
+};
+
+
+
+enum flann_centers_init_t
+{
+    FLANN_CENTERS_RANDOM = 0,
+    FLANN_CENTERS_GONZALES = 1,
+    FLANN_CENTERS_KMEANSPP = 2,
+
+    // deprecated constants, should use the FLANN_CENTERS_* ones instead
+    CENTERS_RANDOM = 0,
+    CENTERS_GONZALES = 1,
+    CENTERS_KMEANSPP = 2
+};
+
+enum flann_log_level_t
+{
+    FLANN_LOG_NONE = 0,
+    FLANN_LOG_FATAL = 1,
+    FLANN_LOG_ERROR = 2,
+    FLANN_LOG_WARN = 3,
+    FLANN_LOG_INFO = 4,
+};
+
+enum flann_distance_t
+{
+    FLANN_DIST_EUCLIDEAN = 1,
+    FLANN_DIST_L2 = 1,
+    FLANN_DIST_MANHATTAN = 2,
+    FLANN_DIST_L1 = 2,
+    FLANN_DIST_MINKOWSKI = 3,
+    FLANN_DIST_MAX   = 4,
+    FLANN_DIST_HIST_INTERSECT   = 5,
+    FLANN_DIST_HELLINGER = 6,
+    FLANN_DIST_CHI_SQUARE = 7,
+    FLANN_DIST_CS         = 7,
+    FLANN_DIST_KULLBACK_LEIBLER  = 8,
+    FLANN_DIST_KL                = 8,
+
+    // deprecated constants, should use the FLANN_DIST_* ones instead
+    EUCLIDEAN = 1,
+    MANHATTAN = 2,
+    MINKOWSKI = 3,
+    MAX_DIST   = 4,
+    HIST_INTERSECT   = 5,
+    HELLINGER = 6,
+    CS         = 7,
+    KL         = 8,
+    KULLBACK_LEIBLER  = 8
+};
+
+enum flann_datatype_t
+{
+    FLANN_INT8 = 0,
+    FLANN_INT16 = 1,
+    FLANN_INT32 = 2,
+    FLANN_INT64 = 3,
+    FLANN_UINT8 = 4,
+    FLANN_UINT16 = 5,
+    FLANN_UINT32 = 6,
+    FLANN_UINT64 = 7,
+    FLANN_FLOAT32 = 8,
+    FLANN_FLOAT64 = 9
+};
+
+const int FLANN_CHECKS_UNLIMITED = -1;
+const int FLANN_CHECKS_AUTOTUNED = -2;
+
+
+#endif /* OPENCV_FLANN_DEFINES_H_ */
index 2ddbee3..dcb4b5d 100644 (file)
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *************************************************************************/
 
-#ifndef _OPENCV_DIST_H_
-#define _OPENCV_DIST_H_
+#ifndef OPENCV_FLANN_DIST_H_
+#define OPENCV_FLANN_DIST_H_
 
 #include <cmath>
+#include <cstdlib>
+#include <string.h>
+#include <stdint.h>
+
+#include "defines.h"
 
-#include "opencv2/flann/general.h"
 
 namespace cvflann
 {
 
-/**
- * Distance function by default set to the custom distance
- * function. This can be set to a specific distance function
- * for further efficiency.
- */
-#define flann_dist custom_dist
-//#define flann_dist euclidean_dist
+template<typename T>
+inline T abs(T x) { return (x<0) ? -x : x; }
+
+template<>
+inline int abs<int>(int x) { return ::abs(x); }
+
+template<>
+inline float abs<float>(float x) { return fabsf(x); }
+
+template<>
+inline double abs<double>(double x) { return fabs(x); }
+
+template<>
+inline long double abs<long double>(long double x) { return fabsl(x); }
+
+
+template<typename T>
+struct Accumulator { typedef T Type; };
+template<>
+struct Accumulator<unsigned char>  { typedef float Type; };
+template<>
+struct Accumulator<unsigned short> { typedef float Type; };
+template<>
+struct Accumulator<unsigned int> { typedef float Type; };
+template<>
+struct Accumulator<char>   { typedef float Type; };
+template<>
+struct Accumulator<short>  { typedef float Type; };
+template<>
+struct Accumulator<int> { typedef float Type; };
+
+
+class True
+{
+};
+
+class False
+{
+};
 
 
 /**
- *  Compute the squared Euclidean distance between two vectors.
- *
- *     This is highly optimised, with loop unrolling, as it is one
- *     of the most expensive inner loops.
+ * Squared Euclidean distance functor.
  *
- *     The computation of squared root at the end is omitted for
- *     efficiency.
+ * This is the simpler, unrolled version. This is preferable for
+ * very low dimensionality data (eg 3D points)
  */
-template <typename Iterator1, typename Iterator2>
-double euclidean_dist(Iterator1 first1, Iterator1 last1, Iterator2 first2, double acc = 0)
+template<class T>
+struct L2_Simple
 {
-       double distsq = acc;
-       double diff0, diff1, diff2, diff3;
-       Iterator1 lastgroup = last1 - 3;
-
-       /* Process 4 items with each loop for efficiency. */
-       while (first1 < lastgroup) {
-               diff0 = first1[0] - first2[0];
-               diff1 = first1[1] - first2[1];
-               diff2 = first1[2] - first2[2];
-               diff3 = first1[3] - first2[3];
-               distsq += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3;
-               first1 += 4;
-               first2 += 4;
-       }
-       /* Process last 0-3 pixels.  Not needed for standard vector lengths. */
-       while (first1 < last1) {
-               diff0 = *first1++ - *first2++;
-               distsq += diff0 * diff0;
-       }
-       return distsq;
-}
+    typedef True is_kdtree_distance;
+    typedef True is_vector_space_distance;
+
+    typedef T ElementType;
+    typedef typename Accumulator<T>::Type ResultType;
+
+    template <typename Iterator1, typename Iterator2>
+    ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const
+    {
+        ResultType result = ResultType();
+        ResultType diff;
+        for(size_t i = 0; i < size; ++i ) {
+            diff = *a++ - *b++;
+            result += diff*diff;
+        }
+        return result;
+    }
+
+    template <typename U, typename V>
+    inline ResultType accum_dist(const U& a, const V& b, int) const
+    {
+        return (a-b)*(a-b);
+    }
+};
 
-CV_EXPORTS double euclidean_dist(const unsigned char* first1, const unsigned char* last1, unsigned char* first2, double acc);
 
 
 /**
- *  Compute the Manhattan (L_1) distance between two vectors.
- *
- *     This is highly optimised, with loop unrolling, as it is one
- *     of the most expensive inner loops.
+ * Squared Euclidean distance functor, optimized version
  */
-template <typename Iterator1, typename Iterator2>
-double manhattan_dist(Iterator1 first1, Iterator1 last1, Iterator2 first2, double acc = 0)
+template<class T>
+struct L2
 {
-       double distsq = acc;
-       double diff0, diff1, diff2, diff3;
-       Iterator1 lastgroup = last1 - 3;
-
-       /* Process 4 items with each loop for efficiency. */
-       while (first1 < lastgroup) {
-               diff0 = fabs(first1[0] - first2[0]);
-               diff1 = fabs(first1[1] - first2[1]);
-               diff2 = fabs(first1[2] - first2[2]);
-               diff3 = fabs(first1[3] - first2[3]);
-               distsq += diff0 + diff1 + diff2  + diff3;
-               first1 += 4;
-               first2 += 4;
-       }
-       /* Process last 0-3 pixels.  Not needed for standard vector lengths. */
-       while (first1 < last1) {
-               diff0 = fabs(*first1++ - *first2++);
-               distsq += diff0;
-       }
-       return distsq;
-}
+    typedef True is_kdtree_distance;
+    typedef True is_vector_space_distance;
+
+    typedef T ElementType;
+    typedef typename Accumulator<T>::Type ResultType;
+
+    /**
+     *  Compute the squared Euclidean distance between two vectors.
+     *
+     * This is highly optimised, with loop unrolling, as it is one
+     * of the most expensive inner loops.
+     *
+     * The computation of squared root at the end is omitted for
+     * efficiency.
+     */
+    template <typename Iterator1, typename Iterator2>
+    ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const
+    {
+        ResultType result = ResultType();
+        ResultType diff0, diff1, diff2, diff3;
+        Iterator1 last = a + size;
+        Iterator1 lastgroup = last - 3;
+
+        /* Process 4 items with each loop for efficiency. */
+        while (a < lastgroup) {
+            diff0 = a[0] - b[0];
+            diff1 = a[1] - b[1];
+            diff2 = a[2] - b[2];
+            diff3 = a[3] - b[3];
+            result += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3;
+            a += 4;
+            b += 4;
+
+            if ((worst_dist>0)&&(result>worst_dist)) {
+                return result;
+            }
+        }
+        /* Process last 0-3 pixels.  Not needed for standard vector lengths. */
+        while (a < last) {
+            diff0 = *a++ - *b++;
+            result += diff0 * diff0;
+        }
+        return result;
+    }
+
+    /**
+     * Partial euclidean distance, using just one dimension. This is used by the
+     * kd-tree when computing partial distances while traversing the tree.
+     *
+     * Squared root is omitted for efficiency.
+     */
+    template <typename U, typename V>
+    inline ResultType accum_dist(const U& a, const V& b, int) const
+    {
+        return (a-b)*(a-b);
+    }
+};
 
 
-CV_EXPORTS int flann_minkowski_order();
-/**
- *  Compute the Minkowski (L_p) distance between two vectors.
- *
- *     This is highly optimised, with loop unrolling, as it is one
- *     of the most expensive inner loops.
- *
- *     The computation of squared root at the end is omitted for
- *     efficiency.
+/*
+ * Manhattan distance functor, optimized version
  */
-template <typename Iterator1, typename Iterator2>
-double minkowski_dist(Iterator1 first1, Iterator1 last1, Iterator2 first2, double acc = 0)
+template<class T>
+struct L1
 {
-       double distsq = acc;
-       double diff0, diff1, diff2, diff3;
-       Iterator1 lastgroup = last1 - 3;
-
-       int p = flann_minkowski_order();
-
-       /* Process 4 items with each loop for efficiency. */
-       while (first1 < lastgroup) {
-               diff0 = fabs(first1[0] - first2[0]);
-               diff1 = fabs(first1[1] - first2[1]);
-               diff2 = fabs(first1[2] - first2[2]);
-               diff3 = fabs(first1[3] - first2[3]);
-               distsq += pow(diff0,p) + pow(diff1,p) + pow(diff2,p)  + pow(diff3,p);
-               first1 += 4;
-               first2 += 4;
-       }
-       /* Process last 0-3 pixels.  Not needed for standard vector lengths. */
-       while (first1 < last1) {
-               diff0 = fabs(*first1++ - *first2++);
-               distsq += pow(diff0,p);
-       }
-       return distsq;
-}
+    typedef True is_kdtree_distance;
+    typedef True is_vector_space_distance;
+
+    typedef T ElementType;
+    typedef typename Accumulator<T>::Type ResultType;
+
+    /**
+     *  Compute the Manhattan (L_1) distance between two vectors.
+     *
+     * This is highly optimised, with loop unrolling, as it is one
+     * of the most expensive inner loops.
+     */
+    template <typename Iterator1, typename Iterator2>
+    ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const
+    {
+        ResultType result = ResultType();
+        ResultType diff0, diff1, diff2, diff3;
+        Iterator1 last = a + size;
+        Iterator1 lastgroup = last - 3;
+
+        /* Process 4 items with each loop for efficiency. */
+        while (a < lastgroup) {
+            diff0 = abs(a[0] - b[0]);
+            diff1 = abs(a[1] - b[1]);
+            diff2 = abs(a[2] - b[2]);
+            diff3 = abs(a[3] - b[3]);
+            result += diff0 + diff1 + diff2 + diff3;
+            a += 4;
+            b += 4;
+
+            if ((worst_dist>0)&&(result>worst_dist)) {
+                return result;
+            }
+        }
+        /* Process last 0-3 pixels.  Not needed for standard vector lengths. */
+        while (a < last) {
+            diff0 = abs(*a++ - *b++);
+            result += diff0;
+        }
+        return result;
+    }
+
+    /**
+     * Partial distance, used by the kd-tree.
+     */
+    template <typename U, typename V>
+    inline ResultType accum_dist(const U& a, const V& b, int) const
+    {
+        return abs(a-b);
+    }
+};
+
 
 
-// L_infinity distance (NOT A VALID KD-TREE DISTANCE - NOT DIMENSIONWISE ADDITIVE)
-template <typename Iterator1, typename Iterator2>
-double max_dist(Iterator1 first1, Iterator1 last1, Iterator2 first2, double acc = 0)
+template<class T>
+struct MinkowskiDistance
 {
-       double dist = acc;
-       Iterator1 lastgroup = last1 - 3;
-       double diff0, diff1, diff2, diff3;
-
-       /* Process 4 items with each loop for efficiency. */
-       while (first1 < lastgroup) {
-               diff0 = fabs(first1[0] - first2[0]);
-               diff1 = fabs(first1[1] - first2[1]);
-               diff2 = fabs(first1[2] - first2[2]);
-               diff3 = fabs(first1[3] - first2[3]);
-               if (diff0 > dist) dist = diff0;
-               if (diff1 > dist) dist = diff1;
-               if (diff2 > dist) dist = diff2;
-               if (diff3 > dist) dist = diff3;
-               first1 += 4;
-               first2 += 4;
-       }
-       /* Process last 0-3 pixels.  Not needed for standard vector lengths. */
-       while (first1 < last1) {
-               diff0 = fabs(*first1++ - *first2++);
-               dist = (diff0 > dist) ? diff0 : dist;
-       }
-       return dist;
-}
+    typedef True is_kdtree_distance;
+    typedef True is_vector_space_distance;
+
+    typedef T ElementType;
+    typedef typename Accumulator<T>::Type ResultType;
+
+    int order;
+
+    MinkowskiDistance(int order_) : order(order_) {}
+
+    /**
+     *  Compute the Minkowsky (L_p) distance between two vectors.
+     *
+     * This is highly optimised, with loop unrolling, as it is one
+     * of the most expensive inner loops.
+     *
+     * The computation of squared root at the end is omitted for
+     * efficiency.
+     */
+    template <typename Iterator1, typename Iterator2>
+    ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const
+    {
+        ResultType result = ResultType();
+        ResultType diff0, diff1, diff2, diff3;
+        Iterator1 last = a + size;
+        Iterator1 lastgroup = last - 3;
+
+        /* Process 4 items with each loop for efficiency. */
+        while (a < lastgroup) {
+            diff0 = abs(a[0] - b[0]);
+            diff1 = abs(a[1] - b[1]);
+            diff2 = abs(a[2] - b[2]);
+            diff3 = abs(a[3] - b[3]);
+            result += pow(diff0,order) + pow(diff1,order) + pow(diff2,order) + pow(diff3,order);
+            a += 4;
+            b += 4;
+
+            if ((worst_dist>0)&&(result>worst_dist)) {
+                return result;
+            }
+        }
+        /* Process last 0-3 pixels.  Not needed for standard vector lengths. */
+        while (a < last) {
+            diff0 = abs(*a++ - *b++);
+            result += pow(diff0,order);
+        }
+        return result;
+    }
+
+    /**
+     * Partial distance, used by the kd-tree.
+     */
+    template <typename U, typename V>
+    inline ResultType accum_dist(const U& a, const V& b, int) const
+    {
+        return pow(static_cast<ResultType>(abs(a-b)),order);
+    }
+};
 
 
-template <typename Iterator1, typename Iterator2>
-double hist_intersection_kernel(Iterator1 first1, Iterator1 last1, Iterator2 first2)
+
+template<class T>
+struct MaxDistance
 {
-       double kernel = 0;
-       Iterator1 lastgroup = last1 - 3;
-       double min0, min1, min2, min3;
-
-       /* Process 4 items with each loop for efficiency. */
-       while (first1 < lastgroup) {
-               min0 = first1[0] < first2[0] ? first1[0] : first2[0];
-               min1 = first1[1] < first2[1] ? first1[1] : first2[1];
-               min2 = first1[2] < first2[2] ? first1[2] : first2[2];
-               min3 = first1[3] < first2[3] ? first1[3] : first2[3];
-               kernel += min0 + min1 + min2 + min3;
-               first1 += 4;
-               first2 += 4;
-       }
-       /* Process last 0-3 pixels.  Not needed for standard vector lengths. */
-       while (first1 < last1) {
-               min0 = first1[0] < first2[0] ? first1[0] : first2[0];
-               kernel += min0;
-               first1++;
-               first2++;
-       }
-       return kernel;
-}
+    typedef False is_kdtree_distance;
+    typedef True is_vector_space_distance;
+
+    typedef T ElementType;
+    typedef typename Accumulator<T>::Type ResultType;
+
+    /**
+     *  Compute the max distance (L_infinity) between two vectors.
+     *
+     *  This distance is not a valid kdtree distance, it's not dimensionwise additive.
+     */
+    template <typename Iterator1, typename Iterator2>
+    ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const
+    {
+        ResultType result = ResultType();
+        ResultType diff0, diff1, diff2, diff3;
+        Iterator1 last = a + size;
+        Iterator1 lastgroup = last - 3;
+
+        /* Process 4 items with each loop for efficiency. */
+        while (a < lastgroup) {
+            diff0 = abs(a[0] - b[0]);
+            diff1 = abs(a[1] - b[1]);
+            diff2 = abs(a[2] - b[2]);
+            diff3 = abs(a[3] - b[3]);
+            if (diff0>result) {result = diff0; }
+            if (diff1>result) {result = diff1; }
+            if (diff2>result) {result = diff2; }
+            if (diff3>result) {result = diff3; }
+            a += 4;
+            b += 4;
+
+            if ((worst_dist>0)&&(result>worst_dist)) {
+                return result;
+            }
+        }
+        /* Process last 0-3 pixels.  Not needed for standard vector lengths. */
+        while (a < last) {
+            diff0 = abs(*a++ - *b++);
+            result = (diff0>result) ? diff0 : result;
+        }
+        return result;
+    }
+
+    /* This distance functor is not dimension-wise additive, which
+     * makes it an invalid kd-tree distance, not implementing the accum_dist method */
 
-template <typename Iterator1, typename Iterator2>
-double hist_intersection_dist_sq(Iterator1 first1, Iterator1 last1, Iterator2 first2, double acc = 0)
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Hamming distance functor - counts the bit differences between two strings - useful for the Brief descriptor
+ * bit count of A exclusive XOR'ed with B
+ */
+struct HammingLUT
 {
-       double dist_sq = acc - 2 * hist_intersection_kernel(first1, last1, first2);
-       while (first1 < last1) {
-               dist_sq += *first1 + *first2;
-               first1++;
-               first2++;
-       }
-       return dist_sq;
-}
+    typedef False is_kdtree_distance;
+    typedef False is_vector_space_distance;
+
+    typedef unsigned char ElementType;
+    typedef int ResultType;
+
+    /** this will count the bits in a ^ b
+     */
+    ResultType operator()(const unsigned char* a, const unsigned char* b, int size) const
+    {
+        ResultType result = 0;
+        for (int i = 0; i < size; i++) {
+            result += byteBitsLookUp(a[i] ^ b[i]);
+        }
+        return result;
+    }
+
+
+    /** \brief given a byte, count the bits using a look up table
+     *  \param b the byte to count bits.  The look up table has an entry for all
+     *  values of b, where that entry is the number of bits.
+     *  \return the number of bits in byte b
+     */
+    static unsigned char byteBitsLookUp(unsigned char b)
+    {
+        static const unsigned char table[256]  = {
+            /* 0 */ 0, /* 1 */ 1, /* 2 */ 1, /* 3 */ 2,
+            /* 4 */ 1, /* 5 */ 2, /* 6 */ 2, /* 7 */ 3,
+            /* 8 */ 1, /* 9 */ 2, /* a */ 2, /* b */ 3,
+            /* c */ 2, /* d */ 3, /* e */ 3, /* f */ 4,
+            /* 10 */ 1, /* 11 */ 2, /* 12 */ 2, /* 13 */ 3,
+            /* 14 */ 2, /* 15 */ 3, /* 16 */ 3, /* 17 */ 4,
+            /* 18 */ 2, /* 19 */ 3, /* 1a */ 3, /* 1b */ 4,
+            /* 1c */ 3, /* 1d */ 4, /* 1e */ 4, /* 1f */ 5,
+            /* 20 */ 1, /* 21 */ 2, /* 22 */ 2, /* 23 */ 3,
+            /* 24 */ 2, /* 25 */ 3, /* 26 */ 3, /* 27 */ 4,
+            /* 28 */ 2, /* 29 */ 3, /* 2a */ 3, /* 2b */ 4,
+            /* 2c */ 3, /* 2d */ 4, /* 2e */ 4, /* 2f */ 5,
+            /* 30 */ 2, /* 31 */ 3, /* 32 */ 3, /* 33 */ 4,
+            /* 34 */ 3, /* 35 */ 4, /* 36 */ 4, /* 37 */ 5,
+            /* 38 */ 3, /* 39 */ 4, /* 3a */ 4, /* 3b */ 5,
+            /* 3c */ 4, /* 3d */ 5, /* 3e */ 5, /* 3f */ 6,
+            /* 40 */ 1, /* 41 */ 2, /* 42 */ 2, /* 43 */ 3,
+            /* 44 */ 2, /* 45 */ 3, /* 46 */ 3, /* 47 */ 4,
+            /* 48 */ 2, /* 49 */ 3, /* 4a */ 3, /* 4b */ 4,
+            /* 4c */ 3, /* 4d */ 4, /* 4e */ 4, /* 4f */ 5,
+            /* 50 */ 2, /* 51 */ 3, /* 52 */ 3, /* 53 */ 4,
+            /* 54 */ 3, /* 55 */ 4, /* 56 */ 4, /* 57 */ 5,
+            /* 58 */ 3, /* 59 */ 4, /* 5a */ 4, /* 5b */ 5,
+            /* 5c */ 4, /* 5d */ 5, /* 5e */ 5, /* 5f */ 6,
+            /* 60 */ 2, /* 61 */ 3, /* 62 */ 3, /* 63 */ 4,
+            /* 64 */ 3, /* 65 */ 4, /* 66 */ 4, /* 67 */ 5,
+            /* 68 */ 3, /* 69 */ 4, /* 6a */ 4, /* 6b */ 5,
+            /* 6c */ 4, /* 6d */ 5, /* 6e */ 5, /* 6f */ 6,
+            /* 70 */ 3, /* 71 */ 4, /* 72 */ 4, /* 73 */ 5,
+            /* 74 */ 4, /* 75 */ 5, /* 76 */ 5, /* 77 */ 6,
+            /* 78 */ 4, /* 79 */ 5, /* 7a */ 5, /* 7b */ 6,
+            /* 7c */ 5, /* 7d */ 6, /* 7e */ 6, /* 7f */ 7,
+            /* 80 */ 1, /* 81 */ 2, /* 82 */ 2, /* 83 */ 3,
+            /* 84 */ 2, /* 85 */ 3, /* 86 */ 3, /* 87 */ 4,
+            /* 88 */ 2, /* 89 */ 3, /* 8a */ 3, /* 8b */ 4,
+            /* 8c */ 3, /* 8d */ 4, /* 8e */ 4, /* 8f */ 5,
+            /* 90 */ 2, /* 91 */ 3, /* 92 */ 3, /* 93 */ 4,
+            /* 94 */ 3, /* 95 */ 4, /* 96 */ 4, /* 97 */ 5,
+            /* 98 */ 3, /* 99 */ 4, /* 9a */ 4, /* 9b */ 5,
+            /* 9c */ 4, /* 9d */ 5, /* 9e */ 5, /* 9f */ 6,
+            /* a0 */ 2, /* a1 */ 3, /* a2 */ 3, /* a3 */ 4,
+            /* a4 */ 3, /* a5 */ 4, /* a6 */ 4, /* a7 */ 5,
+            /* a8 */ 3, /* a9 */ 4, /* aa */ 4, /* ab */ 5,
+            /* ac */ 4, /* ad */ 5, /* ae */ 5, /* af */ 6,
+            /* b0 */ 3, /* b1 */ 4, /* b2 */ 4, /* b3 */ 5,
+            /* b4 */ 4, /* b5 */ 5, /* b6 */ 5, /* b7 */ 6,
+            /* b8 */ 4, /* b9 */ 5, /* ba */ 5, /* bb */ 6,
+            /* bc */ 5, /* bd */ 6, /* be */ 6, /* bf */ 7,
+            /* c0 */ 2, /* c1 */ 3, /* c2 */ 3, /* c3 */ 4,
+            /* c4 */ 3, /* c5 */ 4, /* c6 */ 4, /* c7 */ 5,
+            /* c8 */ 3, /* c9 */ 4, /* ca */ 4, /* cb */ 5,
+            /* cc */ 4, /* cd */ 5, /* ce */ 5, /* cf */ 6,
+            /* d0 */ 3, /* d1 */ 4, /* d2 */ 4, /* d3 */ 5,
+            /* d4 */ 4, /* d5 */ 5, /* d6 */ 5, /* d7 */ 6,
+            /* d8 */ 4, /* d9 */ 5, /* da */ 5, /* db */ 6,
+            /* dc */ 5, /* dd */ 6, /* de */ 6, /* df */ 7,
+            /* e0 */ 3, /* e1 */ 4, /* e2 */ 4, /* e3 */ 5,
+            /* e4 */ 4, /* e5 */ 5, /* e6 */ 5, /* e7 */ 6,
+            /* e8 */ 4, /* e9 */ 5, /* ea */ 5, /* eb */ 6,
+            /* ec */ 5, /* ed */ 6, /* ee */ 6, /* ef */ 7,
+            /* f0 */ 4, /* f1 */ 5, /* f2 */ 5, /* f3 */ 6,
+            /* f4 */ 5, /* f5 */ 6, /* f6 */ 6, /* f7 */ 7,
+            /* f8 */ 5, /* f9 */ 6, /* fa */ 6, /* fb */ 7,
+            /* fc */ 6, /* fd */ 7, /* fe */ 7, /* ff */ 8
+        };
+        return table[b];
+    }
+};
 
+/**
+ * Hamming distance functor (pop count between two binary vectors, i.e. xor them and count the number of bits set)
+ * That code was taken from brief.cpp in OpenCV
+ */
+template<class T>
+struct Hamming
+{
+    typedef False is_kdtree_distance;
+    typedef False is_vector_space_distance;
+
+
+    typedef T ElementType;
+    typedef int ResultType;
+
+    template<typename Iterator1, typename Iterator2>
+    ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const
+    {
+        ResultType result = 0;
+#if __GNUC__
+#if ANDROID && HAVE_NEON
+        static uint64_t features = android_getCpuFeatures();
+        if ((features& ANDROID_CPU_ARM_FEATURE_NEON)) {
+            for (size_t i = 0; i < size; i += 16) {
+                uint8x16_t A_vec = vld1q_u8 (a + i);
+                uint8x16_t B_vec = vld1q_u8 (b + i);
+                //uint8x16_t veorq_u8 (uint8x16_t, uint8x16_t)
+                uint8x16_t AxorB = veorq_u8 (A_vec, B_vec);
+
+                uint8x16_t bitsSet += vcntq_u8 (AxorB);
+                //uint16x8_t vpadalq_u8 (uint16x8_t, uint8x16_t)
+                uint16x8_t bitSet8 = vpaddlq_u8 (bitsSet);
+                uint32x4_t bitSet4 = vpaddlq_u16 (bitSet8);
+
+                uint64x2_t bitSet2 = vpaddlq_u32 (bitSet4);
+                result += vgetq_lane_u64 (bitSet2,0);
+                result += vgetq_lane_u64 (bitSet2,1);
+            }
+        }
+        else
+#endif
+        //for portability just use unsigned long -- and use the __builtin_popcountll (see docs for __builtin_popcountll)
+        typedef unsigned long long pop_t;
+        const size_t modulo = size % sizeof(pop_t);
+        const pop_t* a2 = reinterpret_cast<const pop_t*> (a);
+        const pop_t* b2 = reinterpret_cast<const pop_t*> (b);
+        const pop_t* a2_end = a2 + (size / sizeof(pop_t));
+
+        for (; a2 != a2_end; ++a2, ++b2) result += __builtin_popcountll((*a2) ^ (*b2));
+
+        if (modulo) {
+            //in the case where size is not dividable by sizeof(size_t)
+            //need to mask off the bits at the end
+            pop_t a_final = 0, b_final = 0;
+            memcpy(&a_final, a2, modulo);
+            memcpy(&b_final, b2, modulo);
+            result += __builtin_popcountll(a_final ^ b_final);
+        }
+#else
+        HammingLUT lut;
+        result = lut(reinterpret_cast<const unsigned char*> (a),
+                     reinterpret_cast<const unsigned char*> (b), size * sizeof(pop_t));
+#endif
+        return result;
+    }
+};
 
-// Hellinger distance
-template <typename Iterator1, typename Iterator2>
-double hellinger_dist(Iterator1 first1, Iterator1 last1, Iterator2 first2, double acc = 0)
+template<typename T>
+struct Hamming2
 {
-       double distsq = acc;
-       double diff0, diff1, diff2, diff3;
-       Iterator1 lastgroup = last1 - 3;
-
-       /* Process 4 items with each loop for efficiency. */
-       while (first1 < lastgroup) {
-               diff0 = sqrt(first1[0]) - sqrt(first2[0]);
-               diff1 = sqrt(first1[1]) - sqrt(first2[1]);
-               diff2 = sqrt(first1[2]) - sqrt(first2[2]);
-               diff3 = sqrt(first1[3]) - sqrt(first2[3]);
-               distsq += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3;
-               first1 += 4;
-               first2 += 4;
-       }
-       /* Process last 0-3 pixels.  Not needed for standard vector lengths. */
-       while (first1 < last1) {
-               diff0 = sqrt(*first1++) - sqrt(*first2++);
-               distsq += diff0 * diff0;
-       }
-       return distsq;
-}
+    typedef False is_kdtree_distance;
+    typedef False is_vector_space_distance;
+
+    typedef T ElementType;
+    typedef int ResultType;
+
+    /** This is popcount_3() from:
+     * http://en.wikipedia.org/wiki/Hamming_weight */
+    unsigned int popcnt32(uint32_t n) const
+    {
+        n -= ((n >> 1) & 0x55555555);
+        n = (n & 0x33333333) + ((n >> 2) & 0x33333333);
+        return (((n + (n >> 4))& 0xF0F0F0F)* 0x1010101) >> 24;
+    }
+
+    unsigned int popcnt64(uint64_t n) const
+    {
+        n -= ((n >> 1) & 0x5555555555555555);
+        n = (n & 0x3333333333333333) + ((n >> 2) & 0x3333333333333333);
+        return (((n + (n >> 4))& 0x0f0f0f0f0f0f0f0f)* 0x0101010101010101) >> 56;
+    }
+
+    template <typename Iterator1, typename Iterator2>
+    ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const
+    {
+#ifdef FLANN_PLATFORM_64_BIT
+        const uint64_t* pa = reinterpret_cast<const uint64_t*>(a);
+        const uint64_t* pb = reinterpret_cast<const uint64_t*>(b);
+        ResultType result = 0;
+        size /= (sizeof(uint64_t)/sizeof(unsigned char));
+        for(size_t i = 0; i < size; ++i ) {
+            result += popcnt64(*pa ^ *pb);
+            ++pa;
+            ++pb;
+        }
+#else
+        const uint32_t* pa = reinterpret_cast<const uint32_t*>(a);
+        const uint32_t* pb = reinterpret_cast<const uint32_t*>(b);
+        ResultType result = 0;
+        size /= (sizeof(uint32_t)/sizeof(unsigned char));
+        for(size_t i = 0; i < size; ++i ) {
+               result += popcnt32(*pa ^ *pb);
+               ++pa;
+               ++pb;
+        }
+#endif
+        return result;
+    }
+};
 
 
-// chi-dsquare distance
-template <typename Iterator1, typename Iterator2>
-double chi_square_dist(Iterator1 first1, Iterator1 last1, Iterator2 first2, double acc = 0)
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+template<class T>
+struct HistIntersectionDistance
 {
-       double dist = acc;
-
-       while (first1 < last1) {
-               double sum = *first1 + *first2;
-               if (sum > 0) {
-                       double diff = *first1 - *first2;
-                       dist += diff * diff / sum;
-               }
-               first1++;
-               first2++;
-       }
-       return dist;
-}
+    typedef True is_kdtree_distance;
+    typedef True is_vector_space_distance;
+
+    typedef T ElementType;
+    typedef typename Accumulator<T>::Type ResultType;
+
+    /**
+     *  Compute the histogram intersection distance
+     */
+    template <typename Iterator1, typename Iterator2>
+    ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const
+    {
+        ResultType result = ResultType();
+        ResultType min0, min1, min2, min3;
+        Iterator1 last = a + size;
+        Iterator1 lastgroup = last - 3;
+
+        /* Process 4 items with each loop for efficiency. */
+        while (a < lastgroup) {
+            min0 = a[0] < b[0] ? a[0] : b[0];
+            min1 = a[1] < b[1] ? a[1] : b[1];
+            min2 = a[2] < b[2] ? a[2] : b[2];
+            min3 = a[3] < b[3] ? a[3] : b[3];
+            result += min0 + min1 + min2 + min3;
+            a += 4;
+            b += 4;
+            if ((worst_dist>0)&&(result>worst_dist)) {
+                return result;
+            }
+        }
+        /* Process last 0-3 pixels.  Not needed for standard vector lengths. */
+        while (a < last) {
+            min0 = *a < *b ? *a : *b;
+            result += min0;
+        }
+        return result;
+    }
+
+    /**
+     * Partial distance, used by the kd-tree.
+     */
+    template <typename U, typename V>
+    inline ResultType accum_dist(const U& a, const V& b, int) const
+    {
+        return a<b ? a : b;
+    }
+};
 
 
-// Kullback–Leibler divergence (NOT SYMMETRIC)
-template <typename Iterator1, typename Iterator2>
-double kl_divergence(Iterator1 first1, Iterator1 last1, Iterator2 first2, double acc = 0)
+
+template<class T>
+struct HellingerDistance
 {
-       double div = acc;
-
-       while (first1 < last1) {
-               if (*first2 != 0) {
-                       double ratio = *first1 / *first2;
-                       if (ratio > 0) {
-                               div += *first1 * log(ratio);
-                       }
-               }
-               first1++;
-               first2++;
-       }
-       return div;
-}
+    typedef True is_kdtree_distance;
+    typedef True is_vector_space_distance;
+
+    typedef T ElementType;
+    typedef typename Accumulator<T>::Type ResultType;
+
+    /**
+     *  Compute the histogram intersection distance
+     */
+    template <typename Iterator1, typename Iterator2>
+    ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const
+    {
+        ResultType result = ResultType();
+        ResultType diff0, diff1, diff2, diff3;
+        Iterator1 last = a + size;
+        Iterator1 lastgroup = last - 3;
+
+        /* Process 4 items with each loop for efficiency. */
+        while (a < lastgroup) {
+            diff0 = sqrt(static_cast<ResultType>(a[0])) - sqrt(static_cast<ResultType>(b[0]));
+            diff1 = sqrt(static_cast<ResultType>(a[1])) - sqrt(static_cast<ResultType>(b[1]));
+            diff2 = sqrt(static_cast<ResultType>(a[2])) - sqrt(static_cast<ResultType>(b[2]));
+            diff3 = sqrt(static_cast<ResultType>(a[3])) - sqrt(static_cast<ResultType>(b[3]));
+            result += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3;
+            a += 4;
+            b += 4;
+        }
+        while (a < last) {
+            diff0 = sqrt(static_cast<ResultType>(*a++)) - sqrt(static_cast<ResultType>(*b++));
+            result += diff0 * diff0;
+        }
+        return result;
+    }
+
+    /**
+     * Partial distance, used by the kd-tree.
+     */
+    template <typename U, typename V>
+    inline ResultType accum_dist(const U& a, const V& b, int) const
+    {
+        return sqrt(static_cast<ResultType>(a)) - sqrt(static_cast<ResultType>(b));
+    }
+};
 
 
+template<class T>
+struct ChiSquareDistance
+{
+    typedef True is_kdtree_distance;
+    typedef True is_vector_space_distance;
+
+    typedef T ElementType;
+    typedef typename Accumulator<T>::Type ResultType;
+
+    /**
+     *  Compute the chi-square distance
+     */
+    template <typename Iterator1, typename Iterator2>
+    ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const
+    {
+        ResultType result = ResultType();
+        ResultType sum, diff;
+        Iterator1 last = a + size;
+
+        while (a < last) {
+            sum = *a + *b;
+            if (sum>0) {
+                diff = *a - *b;
+                result += diff*diff/sum;
+            }
+            ++a;
+            ++b;
+
+            if ((worst_dist>0)&&(result>worst_dist)) {
+                return result;
+            }
+        }
+        return result;
+    }
+
+    /**
+     * Partial distance, used by the kd-tree.
+     */
+    template <typename U, typename V>
+    inline ResultType accum_dist(const U& a, const V& b, int) const
+    {
+        ResultType result = ResultType();
+        ResultType sum, diff;
+
+        sum = a+b;
+        if (sum>0) {
+            diff = a-b;
+            result = diff*diff/sum;
+        }
+        return result;
+    }
+};
 
 
-CV_EXPORTS flann_distance_t flann_distance_type();
-/**
- * Custom distance function. The distance computed is dependent on the value
- * of the 'flann_distance_type' global variable.
- *
- * If the last argument 'acc' is passed, the result is accumulated to the value
- * of this argument.
- */
-template <typename Iterator1, typename Iterator2>
-double custom_dist(Iterator1 first1, Iterator1 last1, Iterator2 first2, double acc = 0)
+template<class T>
+struct KL_Divergence
 {
-       switch (flann_distance_type()) {
-       case FLANN_DIST_EUCLIDEAN:
-               return euclidean_dist(first1, last1, first2, acc);
-       case FLANN_DIST_MANHATTAN:
-               return manhattan_dist(first1, last1, first2, acc);
-       case FLANN_DIST_MINKOWSKI:
-               return minkowski_dist(first1, last1, first2, acc);
-       case FLANN_DIST_MAX:
-               return max_dist(first1, last1, first2, acc);
-       case FLANN_DIST_HIST_INTERSECT:
-               return hist_intersection_dist_sq(first1, last1, first2, acc);
-       case FLANN_DIST_HELLINGER:
-               return hellinger_dist(first1, last1, first2, acc);
-       case FLANN_DIST_CS:
-               return chi_square_dist(first1, last1, first2, acc);
-       case FLANN_DIST_KL:
-               return kl_divergence(first1, last1, first2, acc);
-       default:
-               return euclidean_dist(first1, last1, first2, acc);
-       }
-}
+    typedef True is_kdtree_distance;
+    typedef True is_vector_space_distance;
+
+    typedef T ElementType;
+    typedef typename Accumulator<T>::Type ResultType;
+
+    /**
+     *  Compute the Kullback–Leibler divergence
+     */
+    template <typename Iterator1, typename Iterator2>
+    ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const
+    {
+        ResultType result = ResultType();
+        Iterator1 last = a + size;
+
+        while (a < last) {
+            if (* a != 0) {
+                ResultType ratio = *a / *b;
+                if (ratio>0) {
+                    result += *a * log(ratio);
+                }
+            }
+            ++a;
+            ++b;
+
+            if ((worst_dist>0)&&(result>worst_dist)) {
+                return result;
+            }
+        }
+        return result;
+    }
+
+    /**
+     * Partial distance, used by the kd-tree.
+     */
+    template <typename U, typename V>
+    inline ResultType accum_dist(const U& a, const V& b, int) const
+    {
+        ResultType result = ResultType();
+        ResultType ratio = a / b;
+        if (ratio>0) {
+            result = a * log(ratio);
+        }
+        return result;
+    }
+};
+
+
 
 /*
  * This is a "zero iterator". It basically behaves like a zero filled
@@ -333,28 +801,36 @@ double custom_dist(Iterator1 first1, Iterator1 last1, Iterator2 first2, double a
  * zero-filled array.
  */
 template <typename T>
-struct ZeroIterator {
+struct ZeroIterator
+{
 
-       T operator*() {
-               return 0;
-       }
+    T operator*()
+    {
+        return 0;
+    }
 
-       T operator[](int) {
-               return 0;
-       }
+    T operator[](int)
+    {
+        return 0;
+    }
 
-       ZeroIterator<T>& operator ++(int) {
-               return *this;
-       }
+    const ZeroIterator<T>& operator ++()
+    {
+        return *this;
+    }
 
-       ZeroIterator<T>& operator+=(int) {
-               return *this;
-       }
+    ZeroIterator<T> operator ++(int)
+    {
+        return *this;
+    }
 
-};
+    ZeroIterator<T>& operator+=(int)
+    {
+        return *this;
+    }
 
-CV_EXPORTS ZeroIterator<float>& zero();
+};
 
-} // namespace cvflann
+}
 
-#endif //_OPENCV_DIST_H_
+#endif //OPENCV_FLANN_DIST_H_
diff --git a/modules/flann/include/opencv2/flann/dynamic_bitset.h b/modules/flann/include/opencv2/flann/dynamic_bitset.h
new file mode 100644 (file)
index 0000000..4fb8138
--- /dev/null
@@ -0,0 +1,152 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+/***********************************************************************
+ * Author: Vincent Rabaud
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_DYNAMIC_BITSET_H_
+#define OPENCV_FLANN_DYNAMIC_BITSET_H_
+
+//#define FLANN_USE_BOOST 1
+#if FLANN_USE_BOOST
+#include <boost/dynamic_bitset.hpp>
+typedef boost::dynamic_bitset<> DynamicBitset;
+#else
+
+#include <limits.h>
+
+#include "dist.h"
+
+/** Class re-implementing the boost version of it
+ * This helps not depending on boost, it also does not do the bound checks
+ * and has a way to reset a block for speed
+ */
+class DynamicBitset
+{
+public:
+    /** @param default constructor
+     */
+    DynamicBitset()
+    {
+    }
+
+    /** @param only constructor we use in our code
+     * @param the size of the bitset (in bits)
+     */
+    DynamicBitset(size_t size)
+    {
+        resize(size);
+        reset();
+    }
+
+    /** Sets all the bits to 0
+     */
+    void clear()
+    {
+        std::fill(bitset_.begin(), bitset_.end(), 0);
+    }
+
+    /** @brief checks if the bitset is empty
+     * @return true if the bitset is empty
+     */
+    bool empty() const
+    {
+        return bitset_.empty();
+    }
+
+    /** @param set all the bits to 0
+     */
+    void reset()
+    {
+        std::fill(bitset_.begin(), bitset_.end(), 0);
+    }
+
+    /** @brief set one bit to 0
+     * @param
+     */
+    void reset(size_t index)
+    {
+        bitset_[index / cell_bit_size_] &= ~(size_t(1) << (index % cell_bit_size_));
+    }
+
+    /** @brief sets a specific bit to 0, and more bits too
+     * This function is useful when resetting a given set of bits so that the
+     * whole bitset ends up being 0: if that's the case, we don't care about setting
+     * other bits to 0
+     * @param
+     */
+    void reset_block(size_t index)
+    {
+        bitset_[index / cell_bit_size_] = 0;
+    }
+
+    /** @param resize the bitset so that it contains at least size bits
+     * @param size
+     */
+    void resize(size_t size)
+    {
+        size_ = size;
+        bitset_.resize(size / cell_bit_size_ + 1);
+    }
+
+    /** @param set a bit to true
+     * @param index the index of the bit to set to 1
+     */
+    void set(size_t index)
+    {
+        bitset_[index / cell_bit_size_] |= size_t(1) << (index % cell_bit_size_);
+    }
+
+    /** @param gives the number of contained bits
+     */
+    size_t size() const
+    {
+        return size_;
+    }
+
+    /** @param check if a bit is set
+     * @param index the index of the bit to check
+     * @return true if the bit is set
+     */
+    bool test(size_t index) const
+    {
+        return (bool)(bitset_[index / cell_bit_size_] & (size_t(1) << (index % cell_bit_size_)));
+    }
+
+private:
+    std::vector<size_t> bitset_;
+    size_t size_;
+    static const unsigned int cell_bit_size_ = CHAR_BIT * sizeof(size_t);
+};
+
+#endif
+
+#endif // OPENCV_FLANN_DYNAMIC_BITSET_H_
index 642b17b..2e05acb 100644 (file)
 
 #include "opencv2/flann/flann_base.hpp"
 
+namespace cvflann
+{
+    inline flann_distance_t& flann_distance_type_()
+    {
+        static flann_distance_t distance_type = FLANN_DIST_L2;
+        return distance_type;
+    }
+
+    FLANN_DEPRECATED inline void set_distance_type(flann_distance_t distance_type, int order  = 0)
+    {
+        flann_distance_type_() = distance_type;
+    }
+}
+
+
 namespace cv
 {
 namespace flann
@@ -61,132 +76,356 @@ template <> struct CvType<int> { static int type() { return CV_32S; } };
 template <> struct CvType<float> { static int type() { return CV_32F; } };
 template <> struct CvType<double> { static int type() { return CV_64F; } };
 
-    
+
+// bring the flann parameters into this namespace
 using ::cvflann::IndexParams;
 using ::cvflann::LinearIndexParams;
 using ::cvflann::KDTreeIndexParams;
 using ::cvflann::KMeansIndexParams;
 using ::cvflann::CompositeIndexParams;
+using ::cvflann::LshIndexParams;
+using ::cvflann::HierarchicalClusteringIndexParams;
 using ::cvflann::AutotunedIndexParams;
 using ::cvflann::SavedIndexParams;
-
 using ::cvflann::SearchParams;
 
+// bring the flann distances into this namespace
+using ::cvflann::L2_Simple;
+using ::cvflann::L2;
+using ::cvflann::L1;
+using ::cvflann::MinkowskiDistance;
+using ::cvflann::MaxDistance;
+using ::cvflann::HammingLUT;
+using ::cvflann::Hamming;
+using ::cvflann::Hamming2;
+using ::cvflann::HistIntersectionDistance;
+using ::cvflann::HellingerDistance;
+using ::cvflann::ChiSquareDistance;
+using ::cvflann::KL_Divergence;
 
-template <typename T>
-class CV_EXPORTS Index_ {
-       ::cvflann::Index<T>* nnIndex;
 
+template <typename Distance>
+class GenericIndex 
+{
 public:
+        typedef typename Distance::ElementType ElementType;
+        typedef typename Distance::ResultType DistanceType;
+
+        GenericIndex(const Mat& features, const IndexParams& params, Distance distance = Distance());
+
+        ~GenericIndex();
+
+        void knnSearch(const vector<ElementType>& query, vector<int>& indices, 
+                       vector<DistanceType>& dists, int knn, const SearchParams& params);
+        void knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const SearchParams& params);
+
+        int radiusSearch(const vector<ElementType>& query, vector<int>& indices, 
+                         vector<DistanceType>& dists, DistanceType radius, const SearchParams& params);
+        int radiusSearch(const Mat& query, Mat& indices, Mat& dists, 
+                         DistanceType radius, const SearchParams& params);
+
+        void save(std::string filename) { nnIndex->save(filename); }
+
+        int veclen() const { return nnIndex->veclen(); }
+
+        int size() const { return nnIndex->size(); }
+
+        IndexParams getParameters() { return nnIndex->getParameters(); }
+
+        FLANN_DEPRECATED const IndexParams* getIndexParameters() { return nnIndex->getIndexParameters(); }
+
+private:
+        ::cvflann::Index<Distance>* nnIndex;
+};
+
+
+#define FLANN_DISTANCE_CHECK \
+    if ( ::cvflann::flann_distance_type_() != FLANN_DIST_L2) { \
+        printf("[WARNING] You are using cv::flann::Index (or cv::flann::GenericIndex) and have also changed "\
+        "the distance using cvflann::set_distance_type. This is no longer working as expected "\
+        "(cv::flann::Index always uses L2). You should create the index templated on the distance, "\
+        "for example for L1 distance use: GenericIndex< L1<float> > \n"); \
+    }
+    
+
+template <typename Distance>
+GenericIndex<Distance>::GenericIndex(const Mat& dataset, const IndexParams& params, Distance distance)
+{
+    CV_Assert(dataset.type() == CvType<ElementType>::type());
+    CV_Assert(dataset.isContinuous());
+    ::cvflann::Matrix<ElementType> m_dataset((ElementType*)dataset.ptr<ElementType>(0), dataset.rows, dataset.cols);
+    
+    nnIndex = new ::cvflann::Index<Distance>(m_dataset, params, distance);
+    
+    FLANN_DISTANCE_CHECK
+    
+    nnIndex->buildIndex();
+}
+
+template <typename Distance>
+GenericIndex<Distance>::~GenericIndex()
+{
+    delete nnIndex;
+}
+
+template <typename Distance>
+void GenericIndex<Distance>::knnSearch(const vector<ElementType>& query, vector<int>& indices, vector<DistanceType>& dists, int knn, const SearchParams& searchParams)
+{
+    ::cvflann::Matrix<ElementType> m_query((ElementType*)&query[0], 1, query.size());
+    ::cvflann::Matrix<int> m_indices(&indices[0], 1, indices.size());
+    ::cvflann::Matrix<DistanceType> m_dists(&dists[0], 1, dists.size());
+
+    FLANN_DISTANCE_CHECK
+
+    nnIndex->knnSearch(m_query,m_indices,m_dists,knn,searchParams);
+}
+
+
+template <typename Distance>
+void GenericIndex<Distance>::knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const SearchParams& searchParams)
+{
+    CV_Assert(queries.type() == CvType<ElementType>::type());
+    CV_Assert(queries.isContinuous());
+    ::cvflann::Matrix<ElementType> m_queries((ElementType*)queries.ptr<ElementType>(0), queries.rows, queries.cols);
+    
+    CV_Assert(indices.type() == CV_32S);
+    CV_Assert(indices.isContinuous());
+    ::cvflann::Matrix<int> m_indices((int*)indices.ptr<int>(0), indices.rows, indices.cols);
+    
+    CV_Assert(dists.type() == CvType<DistanceType>::type());
+    CV_Assert(dists.isContinuous());
+    ::cvflann::Matrix<DistanceType> m_dists((DistanceType*)dists.ptr<DistanceType>(0), dists.rows, dists.cols);
+
+    FLANN_DISTANCE_CHECK
+    
+    nnIndex->knnSearch(m_queries,m_indices,m_dists,knn, searchParams);
+}
+
+template <typename Distance>
+int GenericIndex<Distance>::radiusSearch(const vector<ElementType>& query, vector<int>& indices, vector<DistanceType>& dists, DistanceType radius, const SearchParams& searchParams)
+{
+    ::cvflann::Matrix<ElementType> m_query((ElementType*)&query[0], 1, query.size());
+    ::cvflann::Matrix<int> m_indices(&indices[0], 1, indices.size());
+    ::cvflann::Matrix<DistanceType> m_dists(&dists[0], 1, dists.size());
+
+    FLANN_DISTANCE_CHECK
+    
+    return nnIndex->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);
+}
+
+template <typename Distance>
+int GenericIndex<Distance>::radiusSearch(const Mat& query, Mat& indices, Mat& dists, DistanceType radius, const SearchParams& searchParams)
+{
+    CV_Assert(query.type() == CvType<ElementType>::type());
+    CV_Assert(query.isContinuous());
+    ::cvflann::Matrix<ElementType> m_query((ElementType*)query.ptr<ElementType>(0), query.rows, query.cols);
+    
+    CV_Assert(indices.type() == CV_32S);
+    CV_Assert(indices.isContinuous());
+    ::cvflann::Matrix<int> m_indices((int*)indices.ptr<int>(0), indices.rows, indices.cols);
+    
+    CV_Assert(dists.type() == CvType<DistanceType>::type());
+    CV_Assert(dists.isContinuous());
+    ::cvflann::Matrix<DistanceType> m_dists((DistanceType*)dists.ptr<DistanceType>(0), dists.rows, dists.cols);
+    
+    FLANN_DISTANCE_CHECK
+    
+    return nnIndex->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);
+}
+
+
+typedef GenericIndex< L2<float> > Index;
+
+
+/**
+ * @deprecated Use GenericIndex class instead
+ */
+template <typename T>
+class FLANN_DEPRECATED Index_ {
+public:
+        typedef typename L2<T>::ElementType ElementType;
+        typedef typename L2<T>::ResultType DistanceType;
+
        Index_(const Mat& features, const IndexParams& params);
 
        ~Index_();
 
-       void knnSearch(const vector<T>& query, vector<int>& indices, vector<float>& dists, int knn, const SearchParams& params);
+       void knnSearch(const vector<ElementType>& query, vector<int>& indices, vector<DistanceType>& dists, int knn, const SearchParams& params);
        void knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const SearchParams& params);
 
-       int radiusSearch(const vector<T>& query, vector<int>& indices, vector<float>& dists, float radius, const SearchParams& params);
-       int radiusSearch(const Mat& query, Mat& indices, Mat& dists, float radius, const SearchParams& params);
+       int radiusSearch(const vector<ElementType>& query, vector<int>& indices, vector<DistanceType>& dists, DistanceType radius, const SearchParams& params);
+       int radiusSearch(const Mat& query, Mat& indices, Mat& dists, DistanceType radius, const SearchParams& params);
+
+       void save(std::string filename) 
+        { 
+            if (nnIndex_L1) nnIndex_L1->save(filename);
+            if (nnIndex_L2) nnIndex_L2->save(filename);
+        }
 
-       void save(std::string filename) { nnIndex->save(filename); }
+       int veclen() const 
+       { 
+            if (nnIndex_L1) return nnIndex_L1->veclen();
+            if (nnIndex_L2) return nnIndex_L2->veclen();            
+        }
 
-       int veclen() const { return nnIndex->veclen(); }
+       int size() const 
+       { 
+            if (nnIndex_L1) return nnIndex_L1->size();
+            if (nnIndex_L2) return nnIndex_L2->size(); 
+        }
 
-       int size() const { return nnIndex->size(); }
+        IndexParams getParameters() 
+        { 
+            if (nnIndex_L1) return nnIndex_L1->getParameters();
+            if (nnIndex_L2) return nnIndex_L2->getParameters();
+            
+        }
 
-       const IndexParams* getIndexParameters() { return nnIndex->getIndexParameters(); }
+        FLANN_DEPRECATED const IndexParams* getIndexParameters() 
+        { 
+            if (nnIndex_L1) return nnIndex_L1->getIndexParameters();
+            if (nnIndex_L2) return nnIndex_L2->getIndexParameters(); 
+        }
 
+private:
+        // providing backwards compatibility for L2 and L1 distances (most common)
+        ::cvflann::Index< L2<ElementType> >* nnIndex_L2;
+        ::cvflann::Index< L1<ElementType> >* nnIndex_L1;
 };
 
 
 template <typename T>
 Index_<T>::Index_(const Mat& dataset, const IndexParams& params)
 {
-    CV_Assert(dataset.type() == CvType<T>::type());
+    printf("[WARNING] The cv::flann::Index_<T> class is deperecated, use cv::flann::GenericIndex<Distance> instead\n");
+    
+    CV_Assert(dataset.type() == CvType<ElementType>::type());
     CV_Assert(dataset.isContinuous());
-    ::cvflann::Matrix<T> m_dataset((T*)dataset.ptr<T>(0), dataset.rows, dataset.cols);
+    ::cvflann::Matrix<ElementType> m_dataset((ElementType*)dataset.ptr<ElementType>(0), dataset.rows, dataset.cols);
     
-    nnIndex = new ::cvflann::Index<T>(m_dataset, params);
-    nnIndex->buildIndex();
+    if ( ::cvflann::flann_distance_type_() == FLANN_DIST_L2 ) {
+        nnIndex_L1 = NULL;
+        nnIndex_L2 = new ::cvflann::Index< L2<ElementType> >(m_dataset, params);
+    }
+    else if ( ::cvflann::flann_distance_type_() == FLANN_DIST_L1 ) {
+        nnIndex_L1 = new ::cvflann::Index< L1<ElementType> >(m_dataset, params);
+        nnIndex_L2 = NULL;        
+    }
+    else {
+        printf("[ERROR] cv::flann::Index_<T> only provides backwards compatibility for the L1 and L2 distances. "
+        "For other distance types you must use cv::flann::GenericIndex<Distance>\n");
+        CV_Assert(0);
+    }
+    if (nnIndex_L1) nnIndex_L1->buildIndex();
+    if (nnIndex_L2) nnIndex_L2->buildIndex();
 }
 
 template <typename T>
 Index_<T>::~Index_()
 {
-    delete nnIndex;
+    if (nnIndex_L1) delete nnIndex_L1;
+    if (nnIndex_L2) delete nnIndex_L2;
 }
 
 template <typename T>
-void Index_<T>::knnSearch(const vector<T>& query, vector<int>& indices, vector<float>& dists, int knn, const SearchParams& searchParams)
+void Index_<T>::knnSearch(const vector<ElementType>& query, vector<int>& indices, vector<DistanceType>& dists, int knn, const SearchParams& searchParams)
 {
-    ::cvflann::Matrix<T> m_query((T*)&query[0], 1, (int)query.size());
-    ::cvflann::Matrix<int> m_indices(&indices[0], 1, (int)indices.size());
-    ::cvflann::Matrix<float> m_dists(&dists[0], 1, (int)dists.size());
+    ::cvflann::Matrix<ElementType> m_query((ElementType*)&query[0], 1, query.size());
+    ::cvflann::Matrix<int> m_indices(&indices[0], 1, indices.size());
+    ::cvflann::Matrix<DistanceType> m_dists(&dists[0], 1, dists.size());
     
-    nnIndex->knnSearch(m_query,m_indices,m_dists,knn,searchParams);
+    if (nnIndex_L1) nnIndex_L1->knnSearch(m_query,m_indices,m_dists,knn,searchParams);
+    if (nnIndex_L2) nnIndex_L2->knnSearch(m_query,m_indices,m_dists,knn,searchParams);
 }
 
 
 template <typename T>
 void Index_<T>::knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const SearchParams& searchParams)
 {
-    CV_Assert(queries.type() == CvType<T>::type());
+    CV_Assert(queries.type() == CvType<ElementType>::type());
     CV_Assert(queries.isContinuous());
-    ::cvflann::Matrix<T> m_queries((T*)queries.ptr<T>(0), queries.rows, queries.cols);
+    ::cvflann::Matrix<ElementType> m_queries((ElementType*)queries.ptr<ElementType>(0), queries.rows, queries.cols);
     
     CV_Assert(indices.type() == CV_32S);
     CV_Assert(indices.isContinuous());
     ::cvflann::Matrix<int> m_indices((int*)indices.ptr<int>(0), indices.rows, indices.cols);
     
-    CV_Assert(dists.type() == CV_32F);
+    CV_Assert(dists.type() == CvType<DistanceType>::type());
     CV_Assert(dists.isContinuous());
-    ::cvflann::Matrix<float> m_dists((float*)dists.ptr<float>(0), dists.rows, dists.cols);
-    
-    nnIndex->knnSearch(m_queries,m_indices,m_dists,knn, searchParams);
+    ::cvflann::Matrix<DistanceType> m_dists((DistanceType*)dists.ptr<DistanceType>(0), dists.rows, dists.cols);
+
+    if (nnIndex_L1) nnIndex_L1->knnSearch(m_queries,m_indices,m_dists,knn, searchParams);
+    if (nnIndex_L2) nnIndex_L2->knnSearch(m_queries,m_indices,m_dists,knn, searchParams);
 }
 
 template <typename T>
-int Index_<T>::radiusSearch(const vector<T>& query, vector<int>& indices, vector<float>& dists, float radius, const SearchParams& searchParams)
+int Index_<T>::radiusSearch(const vector<ElementType>& query, vector<int>& indices, vector<DistanceType>& dists, DistanceType radius, const SearchParams& searchParams)
 {
-    ::cvflann::Matrix<T> m_query((T*)&query[0], 1, (int)query.size());
-    ::cvflann::Matrix<int> m_indices(&indices[0], 1, (int)indices.size());
-    ::cvflann::Matrix<float> m_dists(&dists[0], 1, (int)dists.size());
+    ::cvflann::Matrix<ElementType> m_query((ElementType*)&query[0], 1, query.size());
+    ::cvflann::Matrix<int> m_indices(&indices[0], 1, indices.size());
+    ::cvflann::Matrix<DistanceType> m_dists(&dists[0], 1, dists.size());
     
-    return nnIndex->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);
+    if (nnIndex_L1) return nnIndex_L1->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);
+    if (nnIndex_L2) return nnIndex_L2->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);
 }
 
 template <typename T>
-int Index_<T>::radiusSearch(const Mat& query, Mat& indices, Mat& dists, float radius, const SearchParams& searchParams)
+int Index_<T>::radiusSearch(const Mat& query, Mat& indices, Mat& dists, DistanceType radius, const SearchParams& searchParams)
 {
-    CV_Assert(query.type() == CvType<T>::type());
+    CV_Assert(query.type() == CvType<ElementType>::type());
     CV_Assert(query.isContinuous());
-    ::cvflann::Matrix<T> m_query((T*)query.ptr<T>(0), query.rows, query.cols);
+    ::cvflann::Matrix<ElementType> m_query((ElementType*)query.ptr<ElementType>(0), query.rows, query.cols);
     
     CV_Assert(indices.type() == CV_32S);
     CV_Assert(indices.isContinuous());
     ::cvflann::Matrix<int> m_indices((int*)indices.ptr<int>(0), indices.rows, indices.cols);
     
-    CV_Assert(dists.type() == CV_32F);
+    CV_Assert(dists.type() == CvType<DistanceType>::type());
     CV_Assert(dists.isContinuous());
-    ::cvflann::Matrix<float> m_dists((float*)dists.ptr<float>(0), dists.rows, dists.cols);
+    ::cvflann::Matrix<DistanceType> m_dists((DistanceType*)dists.ptr<DistanceType>(0), dists.rows, dists.cols);
     
-    return nnIndex->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);
+    if (nnIndex_L1) return nnIndex_L1->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);
+    if (nnIndex_L2) return nnIndex_L2->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);
 }
 
-typedef Index_<float> Index;
 
-template <typename ELEM_TYPE, typename DIST_TYPE>
-int hierarchicalClustering(const Mat& features, Mat& centers, const KMeansIndexParams& params)
+template <typename Distance>
+int hierarchicalClustering(const Mat& features, Mat& centers, const KMeansIndexParams& params,
+                           Distance d = Distance())
 {
-    CV_Assert(features.type() == CvType<ELEM_TYPE>::type());
+    typedef typename Distance::ElementType ElementType;
+    typedef typename Distance::ResultType DistanceType;
+    
+    CV_Assert(features.type() == CvType<ElementType>::type());
     CV_Assert(features.isContinuous());
-    ::cvflann::Matrix<ELEM_TYPE> m_features((ELEM_TYPE*)features.ptr<ELEM_TYPE>(0), features.rows, features.cols);
+    ::cvflann::Matrix<ElementType> m_features((ElementType*)features.ptr<ElementType>(0), features.rows, features.cols);
     
-    CV_Assert(centers.type() == CvType<DIST_TYPE>::type());
+    CV_Assert(centers.type() == CvType<DistanceType>::type());
     CV_Assert(centers.isContinuous());
-    ::cvflann::Matrix<DIST_TYPE> m_centers((DIST_TYPE*)centers.ptr<DIST_TYPE>(0), centers.rows, centers.cols);
-    
-    return ::cvflann::hierarchicalClustering<ELEM_TYPE,DIST_TYPE>(m_features, m_centers, params);
+    ::cvflann::Matrix<DistanceType> m_centers((DistanceType*)centers.ptr<DistanceType>(0), centers.rows, centers.cols);
+
+    return ::cvflann::hierarchicalClustering<Distance>(m_features, m_centers, params, d);
+}
+
+
+template <typename ELEM_TYPE, typename DIST_TYPE>
+FLANN_DEPRECATED int hierarchicalClustering(const Mat& features, Mat& centers, const KMeansIndexParams& params)
+{
+    printf("[WARNING] cv::flann::hierarchicalClustering<ELEM_TYPE,DIST_TYPE> is deprecated, use "
+        "cv::flann::hierarchicalClustering<Distance> instead\n");
+        
+    if ( ::cvflann::flann_distance_type_() == FLANN_DIST_L2 ) {
+        return hierarchicalClustering< L2<ELEM_TYPE> >(features, centers, params);
+    }
+    else if ( ::cvflann::flann_distance_type_() == FLANN_DIST_L1 ) {
+        return hierarchicalClustering< L1<ELEM_TYPE> >(features, centers, params);
+    }
+    else {
+        printf("[ERROR] cv::flann::hierarchicalClustering<ELEM_TYPE,DIST_TYPE> only provides backwards "
+        "compatibility for the L1 and L2 distances. "
+        "For other distance types you must use cv::flann::hierarchicalClustering<Distance>\n");
+        CV_Assert(0);
+    }
 }
 
 } } // namespace cv::flann
index 46143df..0426262 100644 (file)
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *************************************************************************/
 
-#ifndef _OPENCV_FLANN_BASE_HPP_
-#define _OPENCV_FLANN_BASE_HPP_
+#ifndef FLANN_BASE_HPP_
+#define FLANN_BASE_HPP_
 
 #include <vector>
 #include <string>
 #include <cassert>
 #include <cstdio>
 
-#include "opencv2/flann/general.h"
-#include "opencv2/flann/matrix.h"
-#include "opencv2/flann/result_set.h"
-#include "opencv2/flann/index_testing.h"
-#include "opencv2/flann/object_factory.h"
-#include "opencv2/flann/saving.h"
+#include "general.h"
+#include "matrix.h"
+#include "params.h"
+#include "saving.h"
 
-#include "opencv2/flann/all_indices.h"
+#include "all_indices.h"
 
 namespace cvflann
 {
 
-
 /**
-Sets the log level used for all flann functions
-
-Params:
-    level = verbosity level
-*/
-CV_EXPORTS void log_verbosity(int level);
-
+ * Sets the log level used for all flann functions
+ * @param level Verbosity level
+ */
+inline void log_verbosity(int level)
+{
+    if (level >= 0) {
+        Logger::setLevel(level);
+    }
+}
 
 /**
- * Sets the distance type to use throughout FLANN.
- * If distance type specified is MINKOWSKI, the second argument
- * specifies which order the minkowski distance should have.
+ * (Deprecated) Index parameters for creating a saved index.
  */
-CV_EXPORTS void set_distance_type(flann_distance_t distance_type, int order);
-
-
-struct CV_EXPORTS SavedIndexParams : public IndexParams {
-       SavedIndexParams(std::string filename_) : IndexParams(FLANN_INDEX_SAVED), filename(filename_) {}
-
-       std::string filename;           // filename of the stored index
-
-       void print() const
-       {
-               logger().info("Index type: %d\n",(int)algorithm);
-               logger().info("Filename: %s\n", filename.c_str());
-       }
-};
-
-template<typename T>
-class CV_EXPORTS Index {
-       NNIndex<T>* nnIndex;
-    bool built;
-
-public:
-       Index(const Matrix<T>& features, const IndexParams& params);
-
-       ~Index();
-
-       void buildIndex();
-
-       void knnSearch(const Matrix<T>& queries, Matrix<int>& indices, Matrix<float>& dists, int knn, const SearchParams& params);
-
-       int radiusSearch(const Matrix<T>& query, Matrix<int>& indices, Matrix<float>& dists, float radius, const SearchParams& params);
-
-       void save(std::string filename);
-
-       int veclen() const;
-
-       int size() const;
-
-       NNIndex<T>* getIndex() { return nnIndex; }
-
-       const IndexParams* getIndexParameters() { return nnIndex->getParameters(); }
+struct SavedIndexParams : public IndexParams
+{
+    SavedIndexParams(std::string filename)
+    {
+        (* this)["algorithm"] = FLANN_INDEX_SAVED;
+        (*this)["filename"] = filename;
+    }
 };
 
 
-template<typename T>
-NNIndex<T>* load_saved_index(const Matrix<T>& dataset, const std::string& filename)
+template<typename Distance>
+NNIndex<Distance>* load_saved_index(const Matrix<typename Distance::ElementType>& dataset, const std::string& filename, Distance distance)
 {
-       FILE* fin = fopen(filename.c_str(), "rb");
-       if (fin==NULL) {
-               return NULL;
-       }
-       IndexHeader header = load_header(fin);
-       if (header.data_type!=Datatype<T>::type()) {
-               throw FLANNException("Datatype of saved index is different than of the one to be created.");
-       }
-       if (size_t(header.rows)!=dataset.rows || size_t(header.cols)!=dataset.cols) {
-               throw FLANNException("The index saved belongs to a different dataset");
-       }
-
-       IndexParams* params = ParamsFactory_instance().create(header.index_type);
-       NNIndex<T>* nnIndex = create_index_by_type(dataset, *params);
-       nnIndex->loadIndex(fin);
-       fclose(fin);
-
-       return nnIndex;
-}
+    typedef typename Distance::ElementType ElementType;
 
+    FILE* fin = fopen(filename.c_str(), "rb");
+    if (fin == NULL) {
+        return NULL;
+    }
+    IndexHeader header = load_header(fin);
+    if (header.data_type != Datatype<ElementType>::type()) {
+        throw FLANNException("Datatype of saved index is different than of the one to be created.");
+    }
+    if ((size_t(header.rows) != dataset.rows)||(size_t(header.cols) != dataset.cols)) {
+        throw FLANNException("The index saved belongs to a different dataset");
+    }
 
-template<typename T>
-Index<T>::Index(const Matrix<T>& dataset, const IndexParams& params)
-{
-       flann_algorithm_t index_type = params.getIndexType();
-    built = false;
-
-       if (index_type==FLANN_INDEX_SAVED) {
-               nnIndex = load_saved_index(dataset, ((const SavedIndexParams&)params).filename);
-        built = true;
-       }
-       else {
-               nnIndex = create_index_by_type(dataset, params);
-       }
-}
+    IndexParams params;
+    params["algorithm"] = header.index_type;
+    NNIndex<Distance>* nnIndex = create_index_by_type<Distance>(dataset, params, distance);
+    nnIndex->loadIndex(fin);
+    fclose(fin);
 
-template<typename T>
-Index<T>::~Index()
-{
-       delete nnIndex;
+    return nnIndex;
 }
 
-template<typename T>
-void Index<T>::buildIndex()
-{
-       if (!built)     {
-               nnIndex->buildIndex();
-               built = true;
-       }
-}
 
-template<typename T>
-void Index<T>::knnSearch(const Matrix<T>& queries, Matrix<int>& indices, Matrix<float>& dists, int knn, const SearchParams& searchParams)
+template<typename Distance>
+class Index : public NNIndex<Distance>
 {
-    if (!built) {
-        throw FLANNException("You must build the index before searching.");
+public:
+    typedef typename Distance::ElementType ElementType;
+    typedef typename Distance::ResultType DistanceType;
+
+    Index(const Matrix<ElementType>& features, const IndexParams& params, Distance distance = Distance() )
+        : index_params_(params)
+    {
+        flann_algorithm_t index_type = get_param<flann_algorithm_t>(params,"algorithm");
+        loaded_ = false;
+
+        if (index_type == FLANN_INDEX_SAVED) {
+            nnIndex_ = load_saved_index<Distance>(features, get_param<std::string>(params,"filename"), distance);
+            loaded_ = true;
+        }
+        else {
+            nnIndex_ = create_index_by_type<Distance>(features, params, distance);
+        }
     }
-       assert(queries.cols==nnIndex->veclen());
-       assert(indices.rows>=queries.rows);
-       assert(dists.rows>=queries.rows);
-       assert(int(indices.cols)>=knn);
-       assert(int(dists.cols)>=knn);
 
-    KNNResultSet<T> resultSet(knn);
+    ~Index()
+    {
+        delete nnIndex_;
+    }
 
-    for (size_t i = 0; i < queries.rows; i++) {
-        T* target = queries[i];
-        resultSet.init(target, (int)queries.cols);
+    /**
+     * Builds the index.
+     */
+    void buildIndex()
+    {
+        if (!loaded_) {
+            nnIndex_->buildIndex();
+        }
+    }
 
-        nnIndex->findNeighbors(resultSet, target, searchParams);
+    void save(std::string filename)
+    {
+        FILE* fout = fopen(filename.c_str(), "wb");
+        if (fout == NULL) {
+            throw FLANNException("Cannot open file");
+        }
+        save_header(fout, *nnIndex_);
+        saveIndex(fout);
+        fclose(fout);
+    }
 
-        int* neighbors = resultSet.getNeighbors();
-        float* distances = resultSet.getDistances();
-        memcpy(indices[i], neighbors, knn*sizeof(int));
-        memcpy(dists[i], distances, knn*sizeof(float));
+    /**
+     * \brief Saves the index to a stream
+     * \param stream The stream to save the index to
+     */
+    virtual void saveIndex(FILE* stream)
+    {
+        nnIndex_->saveIndex(stream);
     }
-}
 
-template<typename T>
-int Index<T>::radiusSearch(const Matrix<T>& query, Matrix<int>& indices, Matrix<float>& dists, float radius, const SearchParams& searchParams)
-{
-    if (!built) {
-        throw FLANNException("You must build the index before searching.");
+    /**
+     * \brief Loads the index from a stream
+     * \param stream The stream from which the index is loaded
+     */
+    virtual void loadIndex(FILE* stream)
+    {
+        nnIndex_->loadIndex(stream);
     }
-       if (query.rows!=1) {
-           fprintf(stderr, "I can only search one feature at a time for range search\n");
-               return -1;
-       }
-       assert(query.cols==nnIndex->veclen());
 
-       RadiusResultSet<T> resultSet(radius);
-       resultSet.init(query.data, (int)query.cols);
-       nnIndex->findNeighbors(resultSet,query.data,searchParams);
+    /**
+     * \returns number of features in this index.
+     */
+    size_t veclen() const
+    {
+        return nnIndex_->veclen();
+    }
 
-       // TODO: optimise here
-       int* neighbors = resultSet.getNeighbors();
-       float* distances = resultSet.getDistances();
-       size_t count_nn = std::min(resultSet.size(), indices.cols);
+    /**
+     * \returns The dimensionality of the features in this index.
+     */
+    size_t size() const
+    {
+        return nnIndex_->size();
+    }
 
-       assert (dists.cols>=count_nn);
+    /**
+     * \returns The index type (kdtree, kmeans,...)
+     */
+    flann_algorithm_t getType() const
+    {
+        return nnIndex_->getType();
+    }
 
-       for (size_t i=0;i<count_nn;++i) {
-               indices[0][i] = neighbors[i];
-               dists[0][i] = distances[i];
-       }
+    /**
+     * \returns The amount of memory (in bytes) used by the index.
+     */
+    virtual int usedMemory() const
+    {
+        return nnIndex_->usedMemory();
+    }
 
-       return (int)count_nn;
-}
 
+    /**
+     * \returns The index parameters
+     */
+    IndexParams getParameters() const
+    {
+        return nnIndex_->getParameters();
+    }
 
-template<typename T>
-void Index<T>::save(std::string filename)
-{
-       FILE* fout = fopen(filename.c_str(), "wb");
-       if (fout==NULL) {
-               throw FLANNException("Cannot open file");
-       }
-       save_header(fout, *nnIndex);
-       nnIndex->saveIndex(fout);
-       fclose(fout);
-}
+    /**
+     * \brief Perform k-nearest neighbor search
+     * \param[in] queries The query points for which to find the nearest neighbors
+     * \param[out] indices The indices of the nearest neighbors found
+     * \param[out] dists Distances to the nearest neighbors found
+     * \param[in] knn Number of nearest neighbors to return
+     * \param[in] params Search parameters
+     */
+    void knnSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, int knn, const SearchParams& params)
+    {
+        nnIndex_->knnSearch(queries, indices, dists, knn, params);
+    }
 
+    /**
+     * \brief Perform radius search
+     * \param[in] query The query point
+     * \param[out] indices The indinces of the neighbors found within the given radius
+     * \param[out] dists The distances to the nearest neighbors found
+     * \param[in] radius The radius used for search
+     * \param[in] params Search parameters
+     * \returns Number of neighbors found
+     */
+    int radiusSearch(const Matrix<ElementType>& query, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params)
+    {
+        return nnIndex_->radiusSearch(query, indices, dists, radius, params);
+    }
 
-template<typename T>
-int Index<T>::size() const
-{
-       return nnIndex->size();
-}
+    /**
+     * \brief Method that searches for nearest-neighbours
+     */
+    void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams)
+    {
+        nnIndex_->findNeighbors(result, vec, searchParams);
+    }
 
-template<typename T>
-int Index<T>::veclen() const
-{
-       return nnIndex->veclen();
-}
+    /**
+     * \brief Returns actual index
+     */
+    FLANN_DEPRECATED NNIndex<Distance>* getIndex()
+    {
+        return nnIndex_;
+    }
+
+    /**
+     * \brief Returns index parameters.
+     * \deprecated use getParameters() instead.
+     */
+    FLANN_DEPRECATED  const IndexParams* getIndexParameters()
+    {
+        return &index_params_;
+    }
 
+private:
+    /** Pointer to actual index class */
+    NNIndex<Distance>* nnIndex_;
+    /** Indices if the index was loaded from a file */
+    bool loaded_;
+    /** Parameters passed to the index */
+    IndexParams index_params_;
+};
 
-template <typename ELEM_TYPE, typename DIST_TYPE>
-int hierarchicalClustering(const Matrix<ELEM_TYPE>& features, Matrix<DIST_TYPE>& centers, const KMeansIndexParams& params)
+/**
+ * Performs a hierarchical clustering of the points passed as argument and then takes a cut in the
+ * the clustering tree to return a flat clustering.
+ * @param[in] points Points to be clustered
+ * @param centers The computed cluster centres. Matrix should be preallocated and centers.rows is the
+ *  number of clusters requested.
+ * @param params Clustering parameters (The same as for cvflann::KMeansIndex)
+ * @param d Distance to be used for clustering (eg: cvflann::L2)
+ * @return number of clusters computed (can be different than clusters.rows and is the highest number
+ * of the form (branching-1)*K+1 smaller than clusters.rows).
+ */
+template <typename Distance>
+int hierarchicalClustering(const Matrix<typename Distance::ElementType>& points, Matrix<typename Distance::ResultType>& centers,
+                           const KMeansIndexParams& params, Distance d = Distance())
 {
-    KMeansIndex<ELEM_TYPE, DIST_TYPE> kmeans(features, params);
-       kmeans.buildIndex();
+    KMeansIndex<Distance> kmeans(points, params, d);
+    kmeans.buildIndex();
 
     int clusterNum = kmeans.getClusterCenters(centers);
-       return clusterNum;
+    return clusterNum;
 }
 
-} // namespace cvflann
-#endif /* _OPENCV_FLANN_BASE_HPP_ */
+}
+#endif /* FLANN_BASE_HPP_ */
index 880cc84..87e7e2f 100644 (file)
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *************************************************************************/
 
-#ifndef _OPENCV_GENERAL_H_
-#define _OPENCV_GENERAL_H_
-
-#ifdef __cplusplus
+#ifndef OPENCV_FLANN_GENERAL_H_
+#define OPENCV_FLANN_GENERAL_H_
 
+#include "defines.h"
 #include <stdexcept>
 #include <cassert>
-#include "opencv2/flann/object_factory.h"
-#include "opencv2/flann/logger.h"
-
-namespace cvflann {
-
-#undef ARRAY_LEN
-#define ARRAY_LEN(a) (sizeof(a)/sizeof(a[0]))
-
-/* Nearest neighbour index algorithms */
-enum flann_algorithm_t {
-       FLANN_INDEX_LINEAR = 0,
-       FLANN_INDEX_KDTREE = 1,
-       FLANN_INDEX_KMEANS = 2,
-       FLANN_INDEX_COMPOSITE = 3,
-       FLANN_INDEX_SAVED = 254,
-       FLANN_INDEX_AUTOTUNED = 255
-};
-
-enum flann_centers_init_t {
-       FLANN_CENTERS_RANDOM = 0,
-       FLANN_CENTERS_GONZALES = 1,
-       FLANN_CENTERS_KMEANSPP = 2
-};
-
-
-enum flann_distance_t {
-       FLANN_DIST_EUCLIDEAN = 1,
-       FLANN_DIST_L2 = 1,
-       FLANN_DIST_MANHATTAN = 2,
-       FLANN_DIST_L1 = 2,
-       FLANN_DIST_MINKOWSKI = 3,
-       FLANN_DIST_MAX       = 4,
-       FLANN_DIST_HIST_INTERSECT = 5,
-       FLANN_DIST_HELLINGER = 6,
-       FLANN_DIST_CHI_SQUARE = 7,
-       FLANN_DIST_CS         = 7,
-       FLANN_DIST_KULLBACK_LEIBLER        = 8,
-       FLANN_DIST_KL        = 8
-};
-
-enum flann_datatype_t {
-       FLANN_INT8 = 0,
-       FLANN_INT16 = 1,
-       FLANN_INT32 = 2,
-       FLANN_INT64 = 3,
-       FLANN_UINT8 = 4,
-       FLANN_UINT16 = 5,
-       FLANN_UINT32 = 6,
-       FLANN_UINT64 = 7,
-       FLANN_FLOAT32 = 8,
-       FLANN_FLOAT64 = 9
-};
-
-template <typename ELEM_TYPE>
-struct DistType
-{
-       typedef ELEM_TYPE type;
-};
 
-template <>
-struct DistType<unsigned char>
+namespace cvflann
 {
-       typedef float type;
-};
 
-template <>
-struct DistType<int>
+class FLANNException : public std::runtime_error
 {
-       typedef float type;
-};
-
-
-class FLANNException : public std::runtime_error {
- public:
-   FLANNException(const char* message) : std::runtime_error(message) { }
-
-   FLANNException(const std::string& message) : std::runtime_error(message) { }
- };
-
-
-struct CV_EXPORTS IndexParams {
-protected:
-       IndexParams(flann_algorithm_t algorithm_) : algorithm(algorithm_) {};
-       
 public:
-       virtual ~IndexParams() {}
-       virtual flann_algorithm_t getIndexType() const { return algorithm; }
-
-       virtual void print() const = 0;
-
-       flann_algorithm_t algorithm;
-};
-
-
-typedef ObjectFactory<IndexParams, flann_algorithm_t> ParamsFactory;
-CV_EXPORTS ParamsFactory& ParamsFactory_instance();
-
-struct CV_EXPORTS SearchParams {
-       SearchParams(int checks_ = 32) :
-               checks(checks_) {};
+    FLANNException(const char* message) : std::runtime_error(message) { }
 
-       int checks;
+    FLANNException(const std::string& message) : std::runtime_error(message) { }
 };
 
-} // namespace cvflann
+}
 
-#endif
 
-#endif  /* _OPENCV_GENERAL_H_ */
+#endif  /* OPENCV_FLANN_GENERAL_H_ */
index cb21324..8f1c698 100644 (file)
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *************************************************************************/
 
-#ifndef _OPENCV_GROUND_TRUTH_H_
-#define _OPENCV_GROUND_TRUTH_H_
+#ifndef OPENCV_FLANN_GROUND_TRUTH_H_
+#define OPENCV_FLANN_GROUND_TRUTH_H_
+
+#include "dist.h"
+#include "matrix.h"
 
-#include "opencv2/flann/dist.h"
-#include "opencv2/flann/matrix.h"
 
 namespace cvflann
 {
 
-template <typename T>
-void find_nearest(const Matrix<T>& dataset, T* query, int* matches, int nn, int skip = 0)
+template <typename Distance>
+void find_nearest(const Matrix<typename Distance::ElementType>& dataset, typename Distance::ElementType* query, int* matches, int nn,
+                  int skip = 0, Distance distance = Distance())
 {
+    typedef typename Distance::ElementType ElementType;
+    typedef typename Distance::ResultType DistanceType;
     int n = nn + skip;
 
-    T* query_end = query + dataset.cols;
-
-    long* match = new long[n];
-    T* dists = new T[n];
+    int* match = new int[n];
+    DistanceType* dists = new DistanceType[n];
 
-    dists[0] = (float)flann_dist(query, query_end, dataset[0]);
+    dists[0] = distance(dataset[0], query, dataset.cols);
     match[0] = 0;
     int dcnt = 1;
 
-    for (size_t i=1;i<dataset.rows;++i) {
-        T tmp = (T)flann_dist(query, query_end, dataset[i]);
+    for (size_t i=1; i<dataset.rows; ++i) {
+        DistanceType tmp = distance(dataset[i], query, dataset.cols);
 
         if (dcnt<n) {
-            match[dcnt] = (long)i;
+            match[dcnt] = i;
             dists[dcnt++] = tmp;
         }
         else if (tmp < dists[dcnt-1]) {
             dists[dcnt-1] = tmp;
-            match[dcnt-1] = (long)i;
+            match[dcnt-1] = i;
         }
 
         int j = dcnt-1;
@@ -72,7 +74,7 @@ void find_nearest(const Matrix<T>& dataset, T* query, int* matches, int nn, int
         }
     }
 
-    for (int i=0;i<nn;++i) {
+    for (int i=0; i<nn; ++i) {
         matches[i] = match[i+skip];
     }
 
@@ -81,15 +83,16 @@ void find_nearest(const Matrix<T>& dataset, T* query, int* matches, int nn, int
 }
 
 
-template <typename T>
-void compute_ground_truth(const Matrix<T>& dataset, const Matrix<T>& testset, Matrix<int>& matches, int skip=0)
+template <typename Distance>
+void compute_ground_truth(const Matrix<typename Distance::ElementType>& dataset, const Matrix<typename Distance::ElementType>& testset, Matrix<int>& matches,
+                          int skip=0, Distance d = Distance())
 {
-    for (size_t i=0;i<testset.rows;++i) {
-        find_nearest(dataset, testset[i], matches[i], (int)matches.cols, skip);
+    for (size_t i=0; i<testset.rows; ++i) {
+        find_nearest<Distance>(dataset, testset[i], matches[i], (int)matches.cols, skip, d);
     }
 }
 
 
-} // namespace cvflann
+}
 
-#endif //_OPENCV_GROUND_TRUTH_H_
+#endif //OPENCV_FLANN_GROUND_TRUTH_H_
index 727d5a1..ff92434 100644 (file)
  *************************************************************************/
 
 
-#ifndef _OPENCV_HDF5_H_
-#define _OPENCV_HDF5_H_
+#ifndef OPENCV_FLANN_HDF5_H_
+#define OPENCV_FLANN_HDF5_H_
 
-#include <H5Cpp.h>
+#include <hdf5.h>
 
-#include "opencv2/flann/matrix.h"
+#include "matrix.h"
 
 
+namespace cvflann
+{
 
-#ifndef H5_NO_NAMESPACE
-    using namespace H5;
-#endif
+namespace
+{
 
-namespace cvflann 
+template<typename T>
+hid_t get_hdf5_type()
 {
+    throw FLANNException("Unsupported type for IO operations");
+}
+
+template<>
+hid_t get_hdf5_type<char>() { return H5T_NATIVE_CHAR; }
+template<>
+hid_t get_hdf5_type<unsigned char>() { return H5T_NATIVE_UCHAR; }
+template<>
+hid_t get_hdf5_type<short int>() { return H5T_NATIVE_SHORT; }
+template<>
+hid_t get_hdf5_type<unsigned short int>() { return H5T_NATIVE_USHORT; }
+template<>
+hid_t get_hdf5_type<int>() { return H5T_NATIVE_INT; }
+template<>
+hid_t get_hdf5_type<unsigned int>() { return H5T_NATIVE_UINT; }
+template<>
+hid_t get_hdf5_type<long>() { return H5T_NATIVE_LONG; }
+template<>
+hid_t get_hdf5_type<unsigned long>() { return H5T_NATIVE_ULONG; }
+template<>
+hid_t get_hdf5_type<float>() { return H5T_NATIVE_FLOAT; }
+template<>
+hid_t get_hdf5_type<double>() { return H5T_NATIVE_DOUBLE; }
+template<>
+hid_t get_hdf5_type<long double>() { return H5T_NATIVE_LDOUBLE; }
+}
 
 
-namespace {
+#define CHECK_ERROR(x,y) if ((x)<0) throw FLANNException((y));
 
 template<typename T>
-PredType get_hdf5_type()
+void save_to_file(const cvflann::Matrix<T>& dataset, const std::string& filename, const std::string& name)
 {
-       throw FLANNException("Unsupported type for IO operations");
-}
 
-template<> PredType get_hdf5_type<char>() { return PredType::NATIVE_CHAR; }
-template<> PredType get_hdf5_type<unsigned char>() { return PredType::NATIVE_UCHAR; }
-template<> PredType get_hdf5_type<short int>() { return PredType::NATIVE_SHORT; }
-template<> PredType get_hdf5_type<unsigned short int>() { return PredType::NATIVE_USHORT; }
-template<> PredType get_hdf5_type<int>() { return PredType::NATIVE_INT; }
-template<> PredType get_hdf5_type<unsigned int>() { return PredType::NATIVE_UINT; }
-template<> PredType get_hdf5_type<long>() { return PredType::NATIVE_LONG; }
-template<> PredType get_hdf5_type<unsigned long>() { return PredType::NATIVE_ULONG; }
-template<> PredType get_hdf5_type<float>() { return PredType::NATIVE_FLOAT; }
-template<> PredType get_hdf5_type<double>() { return PredType::NATIVE_DOUBLE; }
-template<> PredType get_hdf5_type<long double>() { return PredType::NATIVE_LDOUBLE; }
+#if H5Eset_auto_vers == 2
+    H5Eset_auto( H5E_DEFAULT, NULL, NULL );
+#else
+    H5Eset_auto( NULL, NULL );
+#endif
+
+    herr_t status;
+    hid_t file_id;
+    file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
+    if (file_id < 0) {
+        file_id = H5Fcreate(filename.c_str(), H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT);
+    }
+    CHECK_ERROR(file_id,"Error creating hdf5 file.");
+
+    hsize_t     dimsf[2];              // dataset dimensions
+    dimsf[0] = dataset.rows;
+    dimsf[1] = dataset.cols;
+
+    hid_t space_id = H5Screate_simple(2, dimsf, NULL);
+    hid_t memspace_id = H5Screate_simple(2, dimsf, NULL);
+
+    hid_t dataset_id;
+#if H5Dcreate_vers == 2
+    dataset_id = H5Dcreate2(file_id, name.c_str(), get_hdf5_type<T>(), space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+#else
+    dataset_id = H5Dcreate(file_id, name.c_str(), get_hdf5_type<T>(), space_id, H5P_DEFAULT);
+#endif
+
+    if (dataset_id<0) {
+#if H5Dopen_vers == 2
+        dataset_id = H5Dopen2(file_id, name.c_str(), H5P_DEFAULT);
+#else
+        dataset_id = H5Dopen(file_id, name.c_str());
+#endif
+    }
+    CHECK_ERROR(dataset_id,"Error creating or opening dataset in file.");
+
+    status = H5Dwrite(dataset_id, get_hdf5_type<T>(), memspace_id, space_id, H5P_DEFAULT, dataset.data );
+    CHECK_ERROR(status, "Error writing to dataset");
+
+    H5Sclose(memspace_id);
+    H5Sclose(space_id);
+    H5Dclose(dataset_id);
+    H5Fclose(file_id);
 
 }
 
 
 template<typename T>
-void save_to_file(const cvflann::Matrix<T>& flann_dataset, const std::string& filename, const std::string& name)
+void load_from_file(cvflann::Matrix<T>& dataset, const std::string& filename, const std::string& name)
 {
-       // Try block to detect exceptions raised by any of the calls inside it
-       try
-       {
-               /*
-                * Turn off the auto-printing when failure occurs so that we can
-                * handle the errors appropriately
-                */
-               Exception::dontPrint();
-
-               /*
-                * Create a new file using H5F_ACC_TRUNC access,
-                * default file creation properties, and default file
-                * access properties.
-                */
-               H5File file( filename, H5F_ACC_TRUNC );
-
-               /*
-                * Define the size of the array and create the data space for fixed
-                * size dataset.
-                */
-               hsize_t     dimsf[2];              // dataset dimensions
-               dimsf[0] = flann_dataset.rows;
-               dimsf[1] = flann_dataset.cols;
-               DataSpace dataspace( 2, dimsf );
-
-               /*
-                * Create a new dataset within the file using defined dataspace and
-                * datatype and default dataset creation properties.
-                */
-               DataSet dataset = file.createDataSet( name, get_hdf5_type<T>(), dataspace );
-
-               /*
-                * Write the data to the dataset using default memory space, file
-                * space, and transfer properties.
-                */
-               dataset.write( flann_dataset.data, get_hdf5_type<T>() );
-       }  // end of try block
-       catch( H5::Exception& error )
-       {
-               error.printError();
-               throw FLANNException(error.getDetailMsg());
-       }
+    herr_t status;
+    hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
+    CHECK_ERROR(file_id,"Error opening hdf5 file.");
+
+    hid_t dataset_id;
+#if H5Dopen_vers == 2
+    dataset_id = H5Dopen2(file_id, name.c_str(), H5P_DEFAULT);
+#else
+    dataset_id = H5Dopen(file_id, name.c_str());
+#endif
+    CHECK_ERROR(dataset_id,"Error opening dataset in file.");
+
+    hid_t space_id = H5Dget_space(dataset_id);
+
+    hsize_t dims_out[2];
+    H5Sget_simple_extent_dims(space_id, dims_out, NULL);
+
+    dataset = cvflann::Matrix<T>(new T[dims_out[0]*dims_out[1]], dims_out[0], dims_out[1]);
+
+    status = H5Dread(dataset_id, get_hdf5_type<T>(), H5S_ALL, H5S_ALL, H5P_DEFAULT, dataset[0]);
+    CHECK_ERROR(status, "Error reading dataset");
+
+    H5Sclose(space_id);
+    H5Dclose(dataset_id);
+    H5Fclose(file_id);
 }
 
 
+#ifdef HAVE_MPI
+
+namespace mpi
+{
+/**
+ * Loads a the hyperslice corresponding to this processor from a hdf5 file.
+ * @param flann_dataset Dataset where the data is loaded
+ * @param filename HDF5 file name
+ * @param name Name of dataset inside file
+ */
 template<typename T>
-void load_from_file(cvflann::Matrix<T>& flann_dataset, const std::string& filename, const std::string& name)
+void load_from_file(cvflann::Matrix<T>& dataset, const std::string& filename, const std::string& name)
 {
-       try
-       {
-               Exception::dontPrint();
-
-               H5File file( filename, H5F_ACC_RDONLY );
-               DataSet dataset = file.openDataSet( name );
-
-               /*
-                * Check the type used by the dataset matches
-                */
-               if ( !(dataset.getDataType()==get_hdf5_type<T>())) {
-                       throw FLANNException("Dataset matrix type does not match the type to be read.");
-               }
-
-               /*
-                * Get dataspace of the dataset.
-                */
-               DataSpace dataspace = dataset.getSpace();
-
-               /*
-                * Get the dimension size of each dimension in the dataspace and
-                * display them.
-                */
-               hsize_t dims_out[2];
-               dataspace.getSimpleExtentDims( dims_out, NULL);
-               
-               flann_dataset.rows = dims_out[0];
-               flann_dataset.cols = dims_out[1];
-               flann_dataset.data = new T[flann_dataset.rows*flann_dataset.cols];
-
-               dataset.read( flann_dataset.data, get_hdf5_type<T>() );
-       }  // end of try block
-       catch( H5::Exception &error )
-       {
-               error.printError();
-               throw FLANNException(error.getDetailMsg());
-       }
-}
+    MPI_Comm comm  = MPI_COMM_WORLD;
+    MPI_Info info  = MPI_INFO_NULL;
+
+    int mpi_size, mpi_rank;
+    MPI_Comm_size(comm, &mpi_size);
+    MPI_Comm_rank(comm, &mpi_rank);
+
+    herr_t status;
+
+    hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
+    H5Pset_fapl_mpio(plist_id, comm, info);
+    hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, plist_id);
+    CHECK_ERROR(file_id,"Error opening hdf5 file.");
+    H5Pclose(plist_id);
+    hid_t dataset_id;
+#if H5Dopen_vers == 2
+    dataset_id = H5Dopen2(file_id, name.c_str(), H5P_DEFAULT);
+#else
+    dataset_id = H5Dopen(file_id, name.c_str());
+#endif
+    CHECK_ERROR(dataset_id,"Error opening dataset in file.");
+
+    hid_t space_id = H5Dget_space(dataset_id);
+    hsize_t dims[2];
+    H5Sget_simple_extent_dims(space_id, dims, NULL);
 
+    hsize_t count[2];
+    hsize_t offset[2];
 
-} // namespace cvflann
+    hsize_t item_cnt = dims[0]/mpi_size+(dims[0]%mpi_size==0 ? 0 : 1);
+    hsize_t cnt = (mpi_rank<mpi_size-1 ? item_cnt : dims[0]-item_cnt*(mpi_size-1));
+
+    count[0] = cnt;
+    count[1] = dims[1];
+    offset[0] = mpi_rank*item_cnt;
+    offset[1] = 0;
+
+    hid_t memspace_id = H5Screate_simple(2,count,NULL);
+
+    H5Sselect_hyperslab(space_id, H5S_SELECT_SET, offset, NULL, count, NULL);
+
+    dataset.rows = count[0];
+    dataset.cols = count[1];
+    dataset.data = new T[dataset.rows*dataset.cols];
+
+    plist_id = H5Pcreate(H5P_DATASET_XFER);
+    H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
+    status = H5Dread(dataset_id, get_hdf5_type<T>(), memspace_id, space_id, plist_id, dataset.data);
+    CHECK_ERROR(status, "Error reading dataset");
+
+    H5Pclose(plist_id);
+    H5Sclose(space_id);
+    H5Sclose(memspace_id);
+    H5Dclose(dataset_id);
+    H5Fclose(file_id);
+}
+}
+#endif // HAVE_MPI
+} // namespace cvflann::mpi
 
-#endif /* _OPENCV_HDF5_H_ */
+#endif /* OPENCV_FLANN_HDF5_H_ */
index 0e05451..a189717 100644 (file)
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *************************************************************************/
 
-#ifndef _OPENCV_HEAP_H_
-#define _OPENCV_HEAP_H_
-
+#ifndef OPENCV_FLANN_HEAP_H_
+#define OPENCV_FLANN_HEAP_H_
 
 #include <algorithm>
+#include <vector>
 
 namespace cvflann
 {
@@ -43,166 +43,123 @@ namespace cvflann
  * The priority queue is implemented with a heap.  A heap is a complete
  * (full) binary tree in which each parent is less than both of its
  * children, but the order of the children is unspecified.
- * Note that a heap uses 1-based indexing to allow for power-of-2
- * location of parents and children.  We ignore element 0 of Heap array.
  */
 template <typename T>
-class Heap {
+class Heap
+{
 
-       /**
-       * Storage array for the heap.
-       * Type T must be comparable.
-       */
-       T* heap;
+    /**
+     * Storage array for the heap.
+     * Type T must be comparable.
+     */
+    std::vector<T> heap;
     int length;
 
-       /**
-        * Number of element in the heap
-        */
-       int count;
+    /**
+     * Number of element in the heap
+     */
+    int count;
 
 
 
 public:
-       /**
-        * Constructor.
-        *
-        * Params:
-        *     size = heap size
-        */
-
-       Heap(int size)
-       {
-        length = size+1;
-               heap = new T[length];  // heap uses 1-based indexing
-               count = 0;
-       }
-
-
-       /**
-        * Destructor.
-        *
-        */
-       ~Heap()
-       {
-               delete[] heap;
-       }
-
-       /**
-        *
-        * Returns: heap size
-        */
-       int size()
-       {
-               return count;
-       }
-
-       /**
-        * Tests if the heap is empty
-        *
-        * Returns: true is heap empty, false otherwise
-        */
-       bool empty()
-       {
-               return size()==0;
-       }
-
-       /**
-        * Clears the heap.
-        */
-       void clear()
-       {
-               count = 0;
-       }
-
-
-       /**
-        * Insert a new element in the heap.
-        *
-        * We select the next empty leaf node, and then keep moving any larger
-        * parents down until the right location is found to store this element.
-        *
-        * Params:
-        *     value = the new element to be inserted in the heap
-        */
-       void insert(T value)
-       {
-               /* If heap is full, then return without adding this element. */
-               if (count == length-1) {
-                       return;
-               }
-
-               int loc = ++(count);   /* Remember 1-based indexing. */
-
-               /* Keep moving parents down until a place is found for this node. */
-               int par = loc / 2;                 /* Location of parent. */
-               while (par > 0  && value < heap[par]) {
-                       heap[loc] = heap[par];     /* Move parent down to loc. */
-                       loc = par;
-                       par = loc / 2;
-               }
-               /* Insert the element at the determined location. */
-               heap[loc] = value;
-       }
-
-
-
-       /**
-        * Returns the node of minimum value from the heap (top of the heap).
-        *
-        * Params:
-        *     value = out parameter used to return the min element
-        * Returns: false if heap empty
-        */
-       bool popMin(T& value)
-       {
-               if (count == 0) {
-                       return false;
-               }
-
-               /* Switch first node with last. */
-        std::swap(heap[1],heap[count]);
-
-               count -= 1;
-               heapify(1);      /* Move new node 1 to right position. */
-
-               value = heap[count + 1];
-               return true;  /* Return old last node. */
-       }
-
-
-       /**
-        * Reorganizes the heap (a parent is smaller than its children)
-        * starting with a node.
-        *
-        * Params:
-        *     parent = node form which to start heap reorganization.
-        */
-       void heapify(int parent)
-       {
-               int minloc = parent;
-
-               /* Check the left child */
-               int left = 2 * parent;
-               if (left <= count && heap[left] < heap[parent]) {
-                       minloc = left;
-               }
-
-               /* Check the right child */
-               int right = left + 1;
-               if (right <= count && heap[right] < heap[minloc]) {
-                       minloc = right;
-               }
-
-               /* If a child was smaller, than swap parent with it and Heapify. */
-               if (minloc != parent) {
-            std::swap(heap[parent],heap[minloc]);
-                       heapify(minloc);
-               }
-       }
-
+    /**
+     * Constructor.
+     *
+     * Params:
+     *     size = heap size
+     */
+
+    Heap(int size)
+    {
+        length = size;
+        heap.reserve(length);
+        count = 0;
+    }
+
+    /**
+     *
+     * Returns: heap size
+     */
+    int size()
+    {
+        return count;
+    }
+
+    /**
+     * Tests if the heap is empty
+     *
+     * Returns: true is heap empty, false otherwise
+     */
+    bool empty()
+    {
+        return size()==0;
+    }
+
+    /**
+     * Clears the heap.
+     */
+    void clear()
+    {
+        heap.clear();
+        count = 0;
+    }
+
+    struct CompareT
+    {
+        bool operator()(const T& t_1, const T& t_2) const
+        {
+            return !(t_1 < t_2);
+        }
+    };
+
+    /**
+     * Insert a new element in the heap.
+     *
+     * We select the next empty leaf node, and then keep moving any larger
+     * parents down until the right location is found to store this element.
+     *
+     * Params:
+     *     value = the new element to be inserted in the heap
+     */
+    void insert(T value)
+    {
+        /* If heap is full, then return without adding this element. */
+        if (count == length) {
+            return;
+        }
+
+        heap.push_back(value);
+        static CompareT compareT;
+        std::push_heap(heap.begin(), heap.end(), compareT);
+        ++count;
+    }
+
+
+
+    /**
+     * Returns the node of minimum value from the heap (top of the heap).
+     *
+     * Params:
+     *     value = out parameter used to return the min element
+     * Returns: false if heap empty
+     */
+    bool popMin(T& value)
+    {
+        if (count == 0) {
+            return false;
+        }
+
+        value = heap[0];
+        static CompareT compareT;
+        std::pop_heap(heap.begin(), heap.end(), compareT);
+        heap.pop_back();
+        --count;
+
+        return true;  /* Return old last node. */
+    }
 };
 
-} // namespace cvflann
+}
 
-#endif //_OPENCV_HEAP_H_
+#endif //OPENCV_FLANN_HEAP_H_
diff --git a/modules/flann/include/opencv2/flann/hierarchical_clustering_index.h b/modules/flann/include/opencv2/flann/hierarchical_clustering_index.h
new file mode 100644 (file)
index 0000000..64d7cf1
--- /dev/null
@@ -0,0 +1,717 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2011  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2011  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_HIERARCHICAL_CLUSTERING_INDEX_H_
+#define OPENCV_FLANN_HIERARCHICAL_CLUSTERING_INDEX_H_
+
+#include <algorithm>
+#include <string>
+#include <map>
+#include <cassert>
+#include <limits>
+#include <cmath>
+
+#include "general.h"
+#include "nn_index.h"
+#include "dist.h"
+#include "matrix.h"
+#include "result_set.h"
+#include "heap.h"
+#include "allocator.h"
+#include "random.h"
+#include "saving.h"
+
+
+namespace cvflann
+{
+
+struct HierarchicalClusteringIndexParams : public IndexParams
+{
+    HierarchicalClusteringIndexParams(int branching = 32,
+                                      flann_centers_init_t centers_init = FLANN_CENTERS_RANDOM,
+                                      int trees = 4, int leaf_size = 100)
+    {
+        (*this)["algorithm"] = FLANN_INDEX_HIERARCHICAL;
+        // The branching factor used in the hierarchical clustering
+        (*this)["branching"] = branching;
+        // Algorithm used for picking the initial cluster centers
+        (*this)["centers_init"] = centers_init;
+        // number of parallel trees to build
+        (*this)["trees"] = trees;
+        // maximum leaf size
+        (*this)["leaf_size"] = leaf_size;
+    }
+};
+
+
+/**
+ * Hierarchical index
+ *
+ * Contains a tree constructed through a hierarchical clustering
+ * and other information for indexing a set of points for nearest-neighbour matching.
+ */
+template <typename Distance>
+class HierarchicalClusteringIndex : public NNIndex<Distance>
+{
+public:
+    typedef typename Distance::ElementType ElementType;
+    typedef typename Distance::ResultType DistanceType;
+
+private:
+
+
+    typedef void (HierarchicalClusteringIndex::* centersAlgFunction)(int, int*, int, int*, int&);
+
+    /**
+     * The function used for choosing the cluster centers.
+     */
+    centersAlgFunction chooseCenters;
+
+
+
+    /**
+     * Chooses the initial centers in the k-means clustering in a random manner.
+     *
+     * Params:
+     *     k = number of centers
+     *     vecs = the dataset of points
+     *     indices = indices in the dataset
+     *     indices_length = length of indices vector
+     *
+     */
+    void chooseCentersRandom(int k, int* indices, int indices_length, int* centers, int& centers_length)
+    {
+        UniqueRandom r(indices_length);
+
+        int index;
+        for (index=0; index<k; ++index) {
+            bool duplicate = true;
+            int rnd;
+            while (duplicate) {
+                duplicate = false;
+                rnd = r.next();
+                if (rnd<0) {
+                    centers_length = index;
+                    return;
+                }
+
+                centers[index] = indices[rnd];
+
+                for (int j=0; j<index; ++j) {
+                    float sq = distance(dataset[centers[index]], dataset[centers[j]], dataset.cols);
+                    if (sq<1e-16) {
+                        duplicate = true;
+                    }
+                }
+            }
+        }
+
+        centers_length = index;
+    }
+
+
+    /**
+     * Chooses the initial centers in the k-means using Gonzales' algorithm
+     * so that the centers are spaced apart from each other.
+     *
+     * Params:
+     *     k = number of centers
+     *     vecs = the dataset of points
+     *     indices = indices in the dataset
+     * Returns:
+     */
+    void chooseCentersGonzales(int k, int* indices, int indices_length, int* centers, int& centers_length)
+    {
+        int n = indices_length;
+
+        int rnd = rand_int(n);
+        assert(rnd >=0 && rnd < n);
+
+        centers[0] = indices[rnd];
+
+        int index;
+        for (index=1; index<k; ++index) {
+
+            int best_index = -1;
+            float best_val = 0;
+            for (int j=0; j<n; ++j) {
+                float dist = distance(dataset[centers[0]],dataset[indices[j]],dataset.cols);
+                for (int i=1; i<index; ++i) {
+                    float tmp_dist = distance(dataset[centers[i]],dataset[indices[j]],dataset.cols);
+                    if (tmp_dist<dist) {
+                        dist = tmp_dist;
+                    }
+                }
+                if (dist>best_val) {
+                    best_val = dist;
+                    best_index = j;
+                }
+            }
+            if (best_index!=-1) {
+                centers[index] = indices[best_index];
+            }
+            else {
+                break;
+            }
+        }
+        centers_length = index;
+    }
+
+
+    /**
+     * Chooses the initial centers in the k-means using the algorithm
+     * proposed in the KMeans++ paper:
+     * Arthur, David; Vassilvitskii, Sergei - k-means++: The Advantages of Careful Seeding
+     *
+     * Implementation of this function was converted from the one provided in Arthur's code.
+     *
+     * Params:
+     *     k = number of centers
+     *     vecs = the dataset of points
+     *     indices = indices in the dataset
+     * Returns:
+     */
+    void chooseCentersKMeanspp(int k, int* indices, int indices_length, int* centers, int& centers_length)
+    {
+        int n = indices_length;
+
+        double currentPot = 0;
+        DistanceType* closestDistSq = new DistanceType[n];
+
+        // Choose one random center and set the closestDistSq values
+        int index = rand_int(n);
+        assert(index >=0 && index < n);
+        centers[0] = indices[index];
+
+        for (int i = 0; i < n; i++) {
+            closestDistSq[i] = distance(dataset[indices[i]], dataset[indices[index]], dataset.cols);
+            currentPot += closestDistSq[i];
+        }
+
+
+        const int numLocalTries = 1;
+
+        // Choose each center
+        int centerCount;
+        for (centerCount = 1; centerCount < k; centerCount++) {
+
+            // Repeat several trials
+            double bestNewPot = -1;
+            int bestNewIndex;
+            for (int localTrial = 0; localTrial < numLocalTries; localTrial++) {
+
+                // Choose our center - have to be slightly careful to return a valid answer even accounting
+                // for possible rounding errors
+                double randVal = rand_double(currentPot);
+                for (index = 0; index < n-1; index++) {
+                    if (randVal <= closestDistSq[index]) break;
+                    else randVal -= closestDistSq[index];
+                }
+
+                // Compute the new potential
+                double newPot = 0;
+                for (int i = 0; i < n; i++) newPot += std::min( distance(dataset[indices[i]], dataset[indices[index]], dataset.cols), closestDistSq[i] );
+
+                // Store the best result
+                if ((bestNewPot < 0)||(newPot < bestNewPot)) {
+                    bestNewPot = newPot;
+                    bestNewIndex = index;
+                }
+            }
+
+            // Add the appropriate center
+            centers[centerCount] = indices[bestNewIndex];
+            currentPot = bestNewPot;
+            for (int i = 0; i < n; i++) closestDistSq[i] = std::min( distance(dataset[indices[i]], dataset[indices[bestNewIndex]], dataset.cols), closestDistSq[i] );
+        }
+
+        centers_length = centerCount;
+
+        delete[] closestDistSq;
+    }
+
+
+public:
+
+
+    /**
+     * Index constructor
+     *
+     * Params:
+     *          inputData = dataset with the input features
+     *          params = parameters passed to the hierarchical k-means algorithm
+     */
+    HierarchicalClusteringIndex(const Matrix<ElementType>& inputData, const IndexParams& index_params = HierarchicalClusteringIndexParams(),
+                                Distance d = Distance())
+        : dataset(inputData), params(index_params), root(NULL), indices(NULL), distance(d)
+    {
+        memoryCounter = 0;
+
+        size_ = dataset.rows;
+        veclen_ = dataset.cols;
+
+        branching_ = get_param(params,"branching",32);
+        centers_init_ = get_param(params,"centers_init", FLANN_CENTERS_RANDOM);
+        trees_ = get_param(params,"trees",4);
+        leaf_size_ = get_param(params,"leaf_size",100);
+
+        if (centers_init_==FLANN_CENTERS_RANDOM) {
+            chooseCenters = &HierarchicalClusteringIndex::chooseCentersRandom;
+        }
+        else if (centers_init_==FLANN_CENTERS_GONZALES) {
+            chooseCenters = &HierarchicalClusteringIndex::chooseCentersGonzales;
+        }
+        else if (centers_init_==FLANN_CENTERS_KMEANSPP) {
+            chooseCenters = &HierarchicalClusteringIndex::chooseCentersKMeanspp;
+        }
+        else {
+            throw FLANNException("Unknown algorithm for choosing initial centers.");
+        }
+
+        trees_ = get_param(params,"trees",4);
+        root = new NodePtr[trees_];
+        indices = new int*[trees_];
+    }
+
+    HierarchicalClusteringIndex(const HierarchicalClusteringIndex&);
+    HierarchicalClusteringIndex& operator=(const HierarchicalClusteringIndex&);
+
+    /**
+     * Index destructor.
+     *
+     * Release the memory used by the index.
+     */
+    virtual ~HierarchicalClusteringIndex()
+    {
+        if (indices!=NULL) {
+            delete[] indices;
+        }
+    }
+
+    /**
+     *  Returns size of index.
+     */
+    size_t size() const
+    {
+        return size_;
+    }
+
+    /**
+     * Returns the length of an index feature.
+     */
+    size_t veclen() const
+    {
+        return veclen_;
+    }
+
+
+    /**
+     * Computes the inde memory usage
+     * Returns: memory used by the index
+     */
+    int usedMemory() const
+    {
+        return pool.usedMemory+pool.wastedMemory+memoryCounter;
+    }
+
+    /**
+     * Builds the index
+     */
+    void buildIndex()
+    {
+        if (branching_<2) {
+            throw FLANNException("Branching factor must be at least 2");
+        }
+        for (int i=0; i<trees_; ++i) {
+            indices[i] = new int[size_];
+            for (size_t j=0; j<size_; ++j) {
+                indices[i][j] = j;
+            }
+            root[i] = pool.allocate<Node>();
+            computeClustering(root[i], indices[i], size_, branching_,0);
+        }
+    }
+
+
+    flann_algorithm_t getType() const
+    {
+        return FLANN_INDEX_HIERARCHICAL;
+    }
+
+
+    void saveIndex(FILE* stream)
+    {
+        save_value(stream, branching_);
+        save_value(stream, trees_);
+        save_value(stream, centers_init_);
+        save_value(stream, leaf_size_);
+        save_value(stream, memoryCounter);
+        for (int i=0; i<trees_; ++i) {
+            save_value(stream, *indices[i], size_);
+            save_tree(stream, root[i], i);
+        }
+
+    }
+
+
+    void loadIndex(FILE* stream)
+    {
+        load_value(stream, branching_);
+        load_value(stream, trees_);
+        load_value(stream, centers_init_);
+        load_value(stream, leaf_size_);
+        load_value(stream, memoryCounter);
+        indices = new int*[trees_];
+        root = new NodePtr[trees_];
+        for (int i=0; i<trees_; ++i) {
+            indices[i] = new int[size_];
+            load_value(stream, *indices[i], size_);
+            load_tree(stream, root[i], i);
+        }
+
+        params["algorithm"] = getType();
+        params["branching"] = branching_;
+        params["trees"] = trees_;
+        params["centers_init"] = centers_init_;
+        params["leaf_size"] = leaf_size_;
+    }
+
+
+    /**
+     * Find set of nearest neighbors to vec. Their indices are stored inside
+     * the result object.
+     *
+     * Params:
+     *     result = the result object in which the indices of the nearest-neighbors are stored
+     *     vec = the vector for which to search the nearest neighbors
+     *     searchParams = parameters that influence the search algorithm (checks)
+     */
+    void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams)
+    {
+
+        int maxChecks = get_param(searchParams,"checks",32);
+
+        // Priority queue storing intermediate branches in the best-bin-first search
+        Heap<BranchSt>* heap = new Heap<BranchSt>(size_);
+
+        std::vector<bool> checked(size_,false);
+        int checks = 0;
+        for (int i=0; i<trees_; ++i) {
+            findNN(root[i], result, vec, checks, maxChecks, heap, checked);
+        }
+
+        BranchSt branch;
+        while (heap->popMin(branch) && (checks<maxChecks || !result.full())) {
+            NodePtr node = branch.node;
+            findNN(node, result, vec, checks, maxChecks, heap, checked);
+        }
+        assert(result.full());
+
+        delete heap;
+
+    }
+
+    IndexParams getParameters() const
+    {
+        return params;
+    }
+
+
+private:
+
+    /**
+     * Struture representing a node in the hierarchical k-means tree.
+     */
+    struct Node
+    {
+        /**
+         * The cluster center index
+         */
+        int pivot;
+        /**
+         * The cluster size (number of points in the cluster)
+         */
+        int size;
+        /**
+         * Child nodes (only for non-terminal nodes)
+         */
+        Node** childs;
+        /**
+         * Node points (only for terminal nodes)
+         */
+        int* indices;
+        /**
+         * Level
+         */
+        int level;
+    };
+    typedef Node* NodePtr;
+
+
+
+    /**
+     * Alias definition for a nicer syntax.
+     */
+    typedef BranchStruct<NodePtr, DistanceType> BranchSt;
+
+
+
+    void save_tree(FILE* stream, NodePtr node, int num)
+    {
+        save_value(stream, *node);
+        if (node->childs==NULL) {
+            int indices_offset = node->indices - indices[num];
+            save_value(stream, indices_offset);
+        }
+        else {
+            for(int i=0; i<branching_; ++i) {
+                save_tree(stream, node->childs[i], num);
+            }
+        }
+    }
+
+
+    void load_tree(FILE* stream, NodePtr& node, int num)
+    {
+        node = pool.allocate<Node>();
+        load_value(stream, *node);
+        if (node->childs==NULL) {
+            int indices_offset;
+            load_value(stream, indices_offset);
+            node->indices = indices[num] + indices_offset;
+        }
+        else {
+            node->childs = pool.allocate<NodePtr>(branching_);
+            for(int i=0; i<branching_; ++i) {
+                load_tree(stream, node->childs[i], num);
+            }
+        }
+    }
+
+
+
+
+    void computeLabels(int* indices, int indices_length,  int* centers, int centers_length, int* labels, DistanceType& cost)
+    {
+        cost = 0;
+        for (int i=0; i<indices_length; ++i) {
+            ElementType* point = dataset[indices[i]];
+            DistanceType dist = distance(point, dataset[centers[0]], veclen_);
+            labels[i] = 0;
+            for (int j=1; j<centers_length; ++j) {
+                DistanceType new_dist = distance(point, dataset[centers[j]], veclen_);
+                if (dist>new_dist) {
+                    labels[i] = j;
+                    dist = new_dist;
+                }
+            }
+            cost += dist;
+        }
+    }
+
+    /**
+     * The method responsible with actually doing the recursive hierarchical
+     * clustering
+     *
+     * Params:
+     *     node = the node to cluster
+     *     indices = indices of the points belonging to the current node
+     *     branching = the branching factor to use in the clustering
+     *
+     * TODO: for 1-sized clusters don't store a cluster center (it's the same as the single cluster point)
+     */
+    void computeClustering(NodePtr node, int* indices, int indices_length, int branching, int level)
+    {
+        node->size = indices_length;
+        node->level = level;
+
+        if (indices_length < leaf_size_) { // leaf node
+            node->indices = indices;
+            std::sort(node->indices,node->indices+indices_length);
+            node->childs = NULL;
+            return;
+        }
+
+        std::vector<int> centers(branching);
+        std::vector<int> labels(indices_length);
+
+        int centers_length;
+        (this->*chooseCenters)(branching, indices, indices_length, &centers[0], centers_length);
+
+        if (centers_length<branching) {
+            node->indices = indices;
+            std::sort(node->indices,node->indices+indices_length);
+            node->childs = NULL;
+            return;
+        }
+
+
+        //     assign points to clusters
+        DistanceType cost;
+        computeLabels(indices, indices_length, &centers[0], centers_length, &labels[0], cost);
+
+        node->childs = pool.allocate<NodePtr>(branching);
+        int start = 0;
+        int end = start;
+        for (int i=0; i<branching; ++i) {
+            for (int j=0; j<indices_length; ++j) {
+                if (labels[j]==i) {
+                    std::swap(indices[j],indices[end]);
+                    std::swap(labels[j],labels[end]);
+                    end++;
+                }
+            }
+
+            node->childs[i] = pool.allocate<Node>();
+            node->childs[i]->pivot = centers[i];
+            node->childs[i]->indices = NULL;
+            computeClustering(node->childs[i],indices+start, end-start, branching, level+1);
+            start=end;
+        }
+    }
+
+
+
+    /**
+     * Performs one descent in the hierarchical k-means tree. The branches not
+     * visited are stored in a priority queue.
+     *
+     * Params:
+     *      node = node to explore
+     *      result = container for the k-nearest neighbors found
+     *      vec = query points
+     *      checks = how many points in the dataset have been checked so far
+     *      maxChecks = maximum dataset points to checks
+     */
+
+
+    void findNN(NodePtr node, ResultSet<DistanceType>& result, const ElementType* vec, int& checks, int maxChecks,
+                Heap<BranchSt>* heap, std::vector<bool>& checked)
+    {
+        if (node->childs==NULL) {
+            if (checks>=maxChecks) {
+                if (result.full()) return;
+            }
+            checks += node->size;
+            for (int i=0; i<node->size; ++i) {
+                int index = node->indices[i];
+                if (!checked[index]) {
+                    DistanceType dist = distance(dataset[index], vec, veclen_);
+                    result.addPoint(dist, index);
+                    checked[index] = true;
+                }
+            }
+        }
+        else {
+            DistanceType* domain_distances = new DistanceType[branching_];
+            int best_index = 0;
+            domain_distances[best_index] = distance(vec, dataset[node->childs[best_index]->pivot], veclen_);
+            for (int i=1; i<branching_; ++i) {
+                domain_distances[i] = distance(vec, dataset[node->childs[i]->pivot], veclen_);
+                if (domain_distances[i]<domain_distances[best_index]) {
+                    best_index = i;
+                }
+            }
+            for (int i=0; i<branching_; ++i) {
+                if (i!=best_index) {
+                    heap->insert(BranchSt(node->childs[i],domain_distances[i]));
+                }
+            }
+            delete[] domain_distances;
+            findNN(node->childs[best_index],result,vec, checks, maxChecks, heap, checked);
+        }
+    }
+
+private:
+
+
+    /**
+     * The dataset used by this index
+     */
+    const Matrix<ElementType> dataset;
+
+    /**
+     * Parameters used by this index
+     */
+    IndexParams params;
+
+
+    /**
+     * Number of features in the dataset.
+     */
+    size_t size_;
+
+    /**
+     * Length of each feature.
+     */
+    size_t veclen_;
+
+    /**
+     * The root node in the tree.
+     */
+    NodePtr* root;
+
+    /**
+     *  Array of indices to vectors in the dataset.
+     */
+    int** indices;
+
+
+    /**
+     * The distance
+     */
+    Distance distance;
+
+    /**
+     * Pooled memory allocator.
+     *
+     * Using a pooled memory allocator is more efficient
+     * than allocating memory directly when there is a large
+     * number small of memory allocations.
+     */
+    PooledAllocator pool;
+
+    /**
+     * Memory occupied by the index.
+     */
+    int memoryCounter;
+
+    /** index parameters */
+    int branching_;
+    int trees_;
+    flann_centers_init_t centers_init_;
+    int leaf_size_;
+
+
+};
+
+}
+
+#endif /* OPENCV_FLANN_HIERARCHICAL_CLUSTERING_INDEX_H_ */
index 476c2c2..c98a14b 100644 (file)
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *************************************************************************/
 
-#ifndef _OPENCV_TESTING_H_
-#define _OPENCV_TESTING_H_
+#ifndef OPENCV_FLANN_INDEX_TESTING_H_
+#define OPENCV_FLANN_INDEX_TESTING_H_
 
 #include <cstring>
 #include <cassert>
+#include <cmath>
 
-#include "opencv2/flann/matrix.h"
-#include "opencv2/flann/nn_index.h"
-#include "opencv2/flann/result_set.h"
-#include "opencv2/flann/logger.h"
-#include "opencv2/flann/timer.h"
-
+#include "matrix.h"
+#include "nn_index.h"
+#include "result_set.h"
+#include "logger.h"
+#include "timer.h"
 
 
 namespace cvflann
 {
 
-CV_EXPORTS int countCorrectMatches(int* neighbors, int* groundTruth, int n);
+inline int countCorrectMatches(int* neighbors, int* groundTruth, int n)
+{
+    int count = 0;
+    for (int i=0; i<n; ++i) {
+        for (int k=0; k<n; ++k) {
+            if (neighbors[i]==groundTruth[k]) {
+                count++;
+                break;
+            }
+        }
+    }
+    return count;
+}
 
 
-template <typename ELEM_TYPE>
-float computeDistanceRaport(const Matrix<ELEM_TYPE>& inputData, ELEM_TYPE* target, int* neighbors, int* groundTruth, int veclen, int n)
+template <typename Distance>
+typename Distance::ResultType computeDistanceRaport(const Matrix<typename Distance::ElementType>& inputData, typename Distance::ElementType* target,
+                                                    int* neighbors, int* groundTruth, int veclen, int n, const Distance& distance)
 {
-       ELEM_TYPE* target_end = target + veclen;
-    float ret = 0;
-    for (int i=0;i<n;++i) {
-        float den = (float)flann_dist(target,target_end, inputData[groundTruth[i]]);
-        float num = (float)flann_dist(target,target_end, inputData[neighbors[i]]);
+    typedef typename Distance::ResultType DistanceType;
 
-        if (den==0 && num==0) {
+    DistanceType ret = 0;
+    for (int i=0; i<n; ++i) {
+        DistanceType den = distance(inputData[groundTruth[i]], target, veclen);
+        DistanceType num = distance(inputData[neighbors[i]], target, veclen);
+
+        if ((den==0)&&(num==0)) {
             ret += 1;
         }
         else {
@@ -68,20 +82,28 @@ float computeDistanceRaport(const Matrix<ELEM_TYPE>& inputData, ELEM_TYPE* targe
     return ret;
 }
 
-template <typename ELEM_TYPE>
-float search_with_ground_truth(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE>& inputData, const Matrix<ELEM_TYPE>& testData, const Matrix<int>& matches, int nn, int checks, float& time, float& dist, int skipMatches)
+template <typename Distance>
+float search_with_ground_truth(NNIndex<Distance>& index, const Matrix<typename Distance::ElementType>& inputData,
+                               const Matrix<typename Distance::ElementType>& testData, const Matrix<int>& matches, int nn, int checks,
+                               float& time, typename Distance::ResultType& dist, const Distance& distance, int skipMatches)
 {
+    typedef typename Distance::ResultType DistanceType;
+
     if (matches.cols<size_t(nn)) {
-        logger().info("matches.cols=%d, nn=%d\n",matches.cols,nn);
+        Logger::info("matches.cols=%d, nn=%d\n",matches.cols,nn);
 
         throw FLANNException("Ground truth is not computed for as many neighbors as requested");
     }
 
-    KNNResultSet<ELEM_TYPE> resultSet(nn+skipMatches);
+    KNNResultSet<DistanceType> resultSet(nn+skipMatches);
     SearchParams searchParams(checks);
 
+    int* indices = new int[nn+skipMatches];
+    DistanceType* dists = new DistanceType[nn+skipMatches];
+    int* neighbors = indices + skipMatches;
+
     int correct = 0;
-    float distR = 0;
+    DistanceType distR = 0;
     StartStopTimer t;
     int repeats = 0;
     while (t.value<0.2) {
@@ -90,65 +112,69 @@ float search_with_ground_truth(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE
         correct = 0;
         distR = 0;
         for (size_t i = 0; i < testData.rows; i++) {
-            ELEM_TYPE* target = testData[i];
-            resultSet.init(target, (int)testData.cols);
-            index.findNeighbors(resultSet,target, searchParams);
-            int* neighbors = resultSet.getNeighbors();
-            neighbors = neighbors+skipMatches;
+            resultSet.init(indices, dists);
+            index.findNeighbors(resultSet, testData[i], searchParams);
 
             correct += countCorrectMatches(neighbors,matches[i], nn);
-            distR += computeDistanceRaport(inputData, target,neighbors,matches[i], (int)testData.cols, nn);
+            distR += computeDistanceRaport<Distance>(inputData, testData[i], neighbors, matches[i], testData.cols, nn, distance);
         }
         t.stop();
     }
-    time = (float)(t.value/repeats);
+    time = float(t.value/repeats);
 
+    delete[] indices;
+    delete[] dists;
 
     float precicion = (float)correct/(nn*testData.rows);
 
     dist = distR/(testData.rows*nn);
 
-    logger().info("%8d %10.4g %10.5g %10.5g %10.5g\n",
-            checks, precicion, time, 1000.0 * time / testData.rows, dist);
+    Logger::info("%8d %10.4g %10.5g %10.5g %10.5g\n",
+                 checks, precicion, time, 1000.0 * time / testData.rows, dist);
 
     return precicion;
 }
 
 
-template <typename ELEM_TYPE>
-float test_index_checks(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE>& inputData, const Matrix<ELEM_TYPE>& testData, const Matrix<int>& matches,
-            int checks, float& precision, int nn = 1, int skipMatches = 0)
+template <typename Distance>
+float test_index_checks(NNIndex<Distance>& index, const Matrix<typename Distance::ElementType>& inputData,
+                        const Matrix<typename Distance::ElementType>& testData, const Matrix<int>& matches,
+                        int checks, float& precision, const Distance& distance, int nn = 1, int skipMatches = 0)
 {
-    logger().info("  Nodes  Precision(%)   Time(s)   Time/vec(ms)  Mean dist\n");
-    logger().info("---------------------------------------------------------\n");
+    typedef typename Distance::ResultType DistanceType;
+
+    Logger::info("  Nodes  Precision(%)   Time(s)   Time/vec(ms)  Mean dist\n");
+    Logger::info("---------------------------------------------------------\n");
 
     float time = 0;
-    float dist = 0;
-    precision = search_with_ground_truth(index, inputData, testData, matches, nn, checks, time, dist, skipMatches);
+    DistanceType dist = 0;
+    precision = search_with_ground_truth(index, inputData, testData, matches, nn, checks, time, dist, distance, skipMatches);
 
     return time;
 }
 
-template <typename ELEM_TYPE>
-float test_index_precision(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE>& inputData, const Matrix<ELEM_TYPE>& testData, const Matrix<int>& matches,
-             float precision, int& checks, int nn = 1, int skipMatches = 0)
+template <typename Distance>
+float test_index_precision(NNIndex<Distance>& index, const Matrix<typename Distance::ElementType>& inputData,
+                           const Matrix<typename Distance::ElementType>& testData, const Matrix<int>& matches,
+                           float precision, int& checks, const Distance& distance, int nn = 1, int skipMatches = 0)
 {
-       const float SEARCH_EPS = 0.001f;
+    typedef typename Distance::ResultType DistanceType;
+    const float SEARCH_EPS = 0.001f;
 
-    logger().info("  Nodes  Precision(%)   Time(s)   Time/vec(ms)  Mean dist\n");
-    logger().info("---------------------------------------------------------\n");
+    Logger::info("  Nodes  Precision(%)   Time(s)   Time/vec(ms)  Mean dist\n");
+    Logger::info("---------------------------------------------------------\n");
 
     int c2 = 1;
     float p2;
     int c1 = 1;
     float p1;
     float time;
-    float dist;
+    DistanceType dist;
 
-    p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, skipMatches);
+    p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, distance, skipMatches);
 
     if (p2>precision) {
-        logger().info("Got as close as I can\n");
+        Logger::info("Got as close as I can\n");
         checks = c2;
         return time;
     }
@@ -157,18 +183,18 @@ float test_index_precision(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE>& i
         c1 = c2;
         p1 = p2;
         c2 *=2;
-        p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, skipMatches);
+        p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, distance, skipMatches);
     }
 
     int cx;
     float realPrecision;
     if (fabs(p2-precision)>SEARCH_EPS) {
-        logger().info("Start linear estimation\n");
+        Logger::info("Start linear estimation\n");
         // after we got to values in the vecinity of the desired precision
         // use linear approximation get a better estimation
 
         cx = (c1+c2)/2;
-        realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, skipMatches);
+        realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, distance, skipMatches);
         while (fabs(realPrecision-precision)>SEARCH_EPS) {
 
             if (realPrecision<precision) {
@@ -179,17 +205,18 @@ float test_index_precision(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE>& i
             }
             cx = (c1+c2)/2;
             if (cx==c1) {
-                logger().info("Got as close as I can\n");
+                Logger::info("Got as close as I can\n");
                 break;
             }
-            realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, skipMatches);
+            realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, distance, skipMatches);
         }
 
         c2 = cx;
         p2 = realPrecision;
 
-    } else {
-        logger().info("No need for linear estimation\n");
+    }
+    else {
+        Logger::info("No need for linear estimation\n");
         cx = c2;
         realPrecision = p2;
     }
@@ -199,11 +226,14 @@ float test_index_precision(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE>& i
 }
 
 
-template <typename ELEM_TYPE>
-float test_index_precisions(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE>& inputData, const Matrix<ELEM_TYPE>& testData, const Matrix<int>& matches,
-                    float* precisions, int precisions_length, int nn = 1, int skipMatches = 0, float maxTime = 0)
+template <typename Distance>
+void test_index_precisions(NNIndex<Distance>& index, const Matrix<typename Distance::ElementType>& inputData,
+                           const Matrix<typename Distance::ElementType>& testData, const Matrix<int>& matches,
+                           float* precisions, int precisions_length, const Distance& distance, int nn = 1, int skipMatches = 0, float maxTime = 0)
 {
-       const float SEARCH_EPS = 0.001;
+    typedef typename Distance::ResultType DistanceType;
+
+    const float SEARCH_EPS = 0.001;
 
     // make sure precisions array is sorted
     std::sort(precisions, precisions+precisions_length);
@@ -211,8 +241,8 @@ float test_index_precisions(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE>&
     int pindex = 0;
     float precision = precisions[pindex];
 
-    logger().info("  Nodes  Precision(%)   Time(s)   Time/vec(ms)  Mean dist");
-    logger().info("---------------------------------------------------------");
+    Logger::info("  Nodes  Precision(%)   Time(s)   Time/vec(ms)  Mean dist\n");
+    Logger::info("---------------------------------------------------------\n");
 
     int c2 = 1;
     float p2;
@@ -221,9 +251,9 @@ float test_index_precisions(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE>&
     float p1;
 
     float time;
-    float dist;
+    DistanceType dist;
 
-    p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, skipMatches);
+    p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, distance, skipMatches);
 
     // if precision for 1 run down the tree is already
     // better then some of the requested precisions, then
@@ -233,30 +263,30 @@ float test_index_precisions(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE>&
     }
 
     if (pindex==precisions_length) {
-        logger().info("Got as close as I can\n");
-        return time;
+        Logger::info("Got as close as I can\n");
+        return;
     }
 
-    for (int i=pindex;i<precisions_length;++i) {
+    for (int i=pindex; i<precisions_length; ++i) {
 
         precision = precisions[i];
         while (p2<precision) {
             c1 = c2;
             p1 = p2;
             c2 *=2;
-            p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, skipMatches);
-            if (maxTime> 0 && time > maxTime && p2<precision) return time;
+            p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, distance, skipMatches);
+            if ((maxTime> 0)&&(time > maxTime)&&(p2<precision)) return;
         }
 
         int cx;
         float realPrecision;
         if (fabs(p2-precision)>SEARCH_EPS) {
-            logger().info("Start linear estimation\n");
+            Logger::info("Start linear estimation\n");
             // after we got to values in the vecinity of the desired precision
             // use linear approximation get a better estimation
 
             cx = (c1+c2)/2;
-            realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, skipMatches);
+            realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, distance, skipMatches);
             while (fabs(realPrecision-precision)>SEARCH_EPS) {
 
                 if (realPrecision<precision) {
@@ -267,25 +297,25 @@ float test_index_precisions(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE>&
                 }
                 cx = (c1+c2)/2;
                 if (cx==c1) {
-                    logger().info("Got as close as I can\n");
+                    Logger::info("Got as close as I can\n");
                     break;
                 }
-                realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, skipMatches);
+                realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, distance, skipMatches);
             }
 
             c2 = cx;
             p2 = realPrecision;
 
-        } else {
-            logger().info("No need for linear estimation\n");
+        }
+        else {
+            Logger::info("No need for linear estimation\n");
             cx = c2;
             realPrecision = p2;
         }
 
     }
-    return time;
 }
 
-} // namespace cvflann
+}
 
-#endif //_OPENCV_TESTING_H_
+#endif //OPENCV_FLANN_INDEX_TESTING_H_
index 7344165..d524779 100644 (file)
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *************************************************************************/
 
-#ifndef _OPENCV_KDTREE_H_
-#define _OPENCV_KDTREE_H_
+#ifndef OPENCV_FLANN_KDTREE_INDEX_H_
+#define OPENCV_FLANN_KDTREE_INDEX_H_
 
 #include <algorithm>
 #include <map>
 #include <cassert>
 #include <cstring>
 
-#include "opencv2/flann/general.h"
-#include "opencv2/flann/nn_index.h"
-#include "opencv2/flann/matrix.h"
-#include "opencv2/flann/result_set.h"
-#include "opencv2/flann/heap.h"
-#include "opencv2/flann/allocator.h"
-#include "opencv2/flann/random.h"
-#include "opencv2/flann/saving.h"
+#include "general.h"
+#include "nn_index.h"
+#include "dynamic_bitset.h"
+#include "matrix.h"
+#include "result_set.h"
+#include "heap.h"
+#include "allocator.h"
+#include "random.h"
+#include "saving.h"
 
 
 namespace cvflann
 {
 
-struct CV_EXPORTS KDTreeIndexParams : public IndexParams {
-       KDTreeIndexParams(int trees_ = 4) : IndexParams(FLANN_INDEX_KDTREE), trees(trees_) {};
-
-       int trees;                 // number of randomized trees to use (for kdtree)
-
-       void print() const
-       {
-               logger().info("Index type: %d\n",(int)algorithm);
-               logger().info("Trees: %d\n", trees);
-       }
-
+struct KDTreeIndexParams : public IndexParams
+{
+    KDTreeIndexParams(int trees = 4)
+    {
+        (*this)["algorithm"] = FLANN_INDEX_KDTREE;
+        (*this)["trees"] = trees;
+    }
 };
 
 
@@ -69,223 +66,126 @@ struct CV_EXPORTS KDTreeIndexParams : public IndexParams {
  * Contains the k-d trees and other information for indexing a set of points
  * for nearest-neighbor matching.
  */
-template <typename ELEM_TYPE, typename DIST_TYPE = typename DistType<ELEM_TYPE>::type >
-class KDTreeIndex : public NNIndex<ELEM_TYPE>
+template <typename Distance>
+class KDTreeIndex : public NNIndex<Distance>
 {
+public:
+    typedef typename Distance::ElementType ElementType;
+    typedef typename Distance::ResultType DistanceType;
 
-       enum {
-               /**
-                * To improve efficiency, only SAMPLE_MEAN random values are used to
-                * compute the mean and variance at each level when building a tree.
-                * A value of 100 seems to perform as well as using all values.
-                */
-               SAMPLE_MEAN = 100,
-               /**
-                * Top random dimensions to consider
-                *
-                * When creating random trees, the dimension on which to subdivide is
-                * selected at random from among the top RAND_DIM dimensions with the
-                * highest variance.  A value of 5 works well.
-                */
-               RAND_DIM=5
-       };
-
-
-       /**
-        * Number of randomized trees that are used
-        */
-       int numTrees;
-
-       /**
-        *  Array of indices to vectors in the dataset.
-        */
-       int* vind;
-
-
-       /**
-        * The dataset used by this index
-        */
-       const Matrix<ELEM_TYPE> dataset;
-
-    const IndexParams& index_params;
-
-       size_t size_;
-       size_t veclen_;
-
-
-    DIST_TYPE* mean;
-    DIST_TYPE* var;
-
-
-       /*--------------------- Internal Data Structures --------------------------*/
-
-       /**
-        * A node of the binary k-d tree.
-        *
-        *  This is   All nodes that have vec[divfeat] < divval are placed in the
-        *   child1 subtree, else child2., A leaf node is indicated if both children are NULL.
-        */
-       struct TreeSt {
-               /**
-                * Index of the vector feature used for subdivision.
-                * If this is a leaf node (both children are NULL) then
-                * this holds vector index for this leaf.
-                */
-               int divfeat;
-               /**
-                * The value used for subdivision.
-                */
-               DIST_TYPE divval;
-               /**
-                * The child nodes.
-                */
-               TreeSt *child1, *child2;
-       };
-       typedef TreeSt* Tree;
 
     /**
-     * Array of k-d trees used to find neighbours.
+     * KDTree constructor
+     *
+     * Params:
+     *          inputData = dataset with the input features
+     *          params = parameters passed to the kdtree algorithm
      */
-    Tree* trees;
-    typedef BranchStruct<Tree> BranchSt;
-    typedef BranchSt* Branch;
+    KDTreeIndex(const Matrix<ElementType>& inputData, const IndexParams& params = KDTreeIndexParams(),
+                Distance d = Distance() ) :
+        dataset_(inputData), index_params_(params), distance_(d)
+    {
+        size_ = dataset_.rows;
+        veclen_ = dataset_.cols;
+
+        trees_ = get_param(index_params_,"trees",4);
+        tree_roots_ = new NodePtr[trees_];
 
-       /**
-        * Pooled memory allocator.
-        *
-        * Using a pooled memory allocator is more efficient
-        * than allocating memory directly when there is a large
-        * number small of memory allocations.
-        */
-       PooledAllocator pool;
+        // Create a permutable array of indices to the input vectors.
+        vind_.resize(size_);
+        for (size_t i = 0; i < size_; ++i) {
+            vind_[i] = int(i);
+        }
 
+        mean_ = new DistanceType[veclen_];
+        var_ = new DistanceType[veclen_];
+    }
 
 
-public:
+    KDTreeIndex(const KDTreeIndex&);
+    KDTreeIndex& operator=(const KDTreeIndex&);
 
-    flann_algorithm_t getType() const
+    /**
+     * Standard destructor
+     */
+    ~KDTreeIndex()
     {
-        return FLANN_INDEX_KDTREE;
+        if (tree_roots_!=NULL) {
+            delete[] tree_roots_;
+        }
+        delete[] mean_;
+        delete[] var_;
     }
 
-       /**
-        * KDTree constructor
-        *
-        * Params:
-        *              inputData = dataset with the input features
-        *              params = parameters passed to the kdtree algorithm
-        */
-       KDTreeIndex(const Matrix<ELEM_TYPE>& inputData, const KDTreeIndexParams& params = KDTreeIndexParams() ) :
-               dataset(inputData), index_params(params)
-       {
-        size_ = dataset.rows;
-        veclen_ = dataset.cols;
-
-        numTrees = params.trees;
-        trees = new Tree[numTrees];
-
-               // get the parameters
-//        if (params.find("trees") != params.end()) {
-//             numTrees = (int)params["trees"];
-//             trees = new Tree[numTrees];
-//        }
-//        else {
-//             numTrees = -1;
-//             trees = NULL;
-//        }
-
-               // Create a permutable array of indices to the input vectors.
-               vind = new int[size_];
-               for (size_t i = 0; i < size_; i++) {
-                       vind[i] = (int)i;
-               }
-
-        mean = new DIST_TYPE[veclen_];
-        var = new DIST_TYPE[veclen_];
-       }
-
-       /**
-        * Standard destructor
-        */
-       ~KDTreeIndex()
-       {
-               delete[] vind;
-               if (trees!=NULL) {
-                       delete[] trees;
-               }
-               delete[] mean;
-        delete[] var;
-       }
-
-
-       /**
-        * Builds the index
-        */
-       void buildIndex()
-       {
-               /* Construct the randomized trees. */
-               for (int i = 0; i < numTrees; i++) {
-                       /* Randomize the order of vectors to allow for unbiased sampling. */
-                       for (int j = (int)size_; j > 0; --j) {
-                               int rnd = rand_int(j);
-                std::swap(vind[j-1], vind[rnd]);
-                       }
-                       trees[i] = divideTree(0, (int)size_ - 1);
-               }
-       }
+    /**
+     * Builds the index
+     */
+    void buildIndex()
+    {
+        /* Construct the randomized trees. */
+        for (int i = 0; i < trees_; i++) {
+            /* Randomize the order of vectors to allow for unbiased sampling. */
+            std::random_shuffle(vind_.begin(), vind_.end());
+            tree_roots_[i] = divideTree(vind_.data(), int(size_) );
+        }
+    }
 
 
+    flann_algorithm_t getType() const
+    {
+        return FLANN_INDEX_KDTREE;
+    }
+
 
     void saveIndex(FILE* stream)
     {
-       save_value(stream, numTrees);
-       for (int i=0;i<numTrees;++i) {
-               save_tree(stream, trees[i]);
-       }
+        save_value(stream, trees_);
+        for (int i=0; i<trees_; ++i) {
+            save_tree(stream, tree_roots_[i]);
+        }
     }
 
 
 
     void loadIndex(FILE* stream)
     {
-       load_value(stream, numTrees);
-
-       if (trees!=NULL) {
-               delete[] trees;
-       }
-       trees = new Tree[numTrees];
-       for (int i=0;i<numTrees;++i) {
-               load_tree(stream,trees[i]);
-       }
-    }
+        load_value(stream, trees_);
+        if (tree_roots_!=NULL) {
+            delete[] tree_roots_;
+        }
+        tree_roots_ = new NodePtr[trees_];
+        for (int i=0; i<trees_; ++i) {
+            load_tree(stream,tree_roots_[i]);
+        }
 
+        index_params_["algorithm"] = getType();
+        index_params_["trees"] = tree_roots_;
+    }
 
     /**
-    *  Returns size of index.
-    */
+     *  Returns size of index.
+     */
     size_t size() const
     {
         return size_;
     }
 
     /**
-    * Returns the length of an index feature.
-    */
+     * Returns the length of an index feature.
+     */
     size_t veclen() const
     {
         return veclen_;
     }
 
-
-       /**
-        * Computes the inde memory usage
-        * Returns: memory used by the index
-        */
-       int usedMemory() const
-       {
-               return  (int)(pool.usedMemory+pool.wastedMemory+dataset.rows*sizeof(int));   // pool memory and vind array memory
-       }
-
+    /**
+     * Computes the inde memory usage
+     * Returns: memory used by the index
+     */
+    int usedMemory() const
+    {
+        return int(pool_.usedMemory+pool_.wastedMemory+dataset_.rows*sizeof(int));  // pool memory and vind array memory
+    }
 
     /**
      * Find set of nearest neighbors to vec. Their indices are stored inside
@@ -296,326 +196,426 @@ public:
      *     vec = the vector for which to search the nearest neighbors
      *     maxCheck = the maximum number of restarts (in a best-bin-first manner)
      */
-    void findNeighbors(ResultSet<ELEM_TYPE>& result, const ELEM_TYPE* vec, const SearchParams& searchParams)
+    void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams)
     {
-        int maxChecks = searchParams.checks;
+        int maxChecks = get_param(searchParams,"checks", 32);
+        float epsError = 1+get_param(searchParams,"eps",0.0f);
 
-        if (maxChecks<0) {
-            getExactNeighbors(result, vec);
-        } else {
-            getNeighbors(result, vec, maxChecks);
+        if (maxChecks==FLANN_CHECKS_UNLIMITED) {
+            getExactNeighbors(result, vec, epsError);
+        }
+        else {
+            getNeighbors(result, vec, maxChecks, epsError);
         }
     }
 
-       const IndexParams* getParameters() const
-       {
-               return &index_params;
-       }
+    IndexParams getParameters() const
+    {
+        return index_params_;
+    }
 
 private:
 
-       KDTreeIndex& operator=(const KDTreeIndex&);
-       KDTreeIndex(const KDTreeIndex&);
 
+    /*--------------------- Internal Data Structures --------------------------*/
+    struct Node
+    {
+        /**
+         * Dimension used for subdivision.
+         */
+        int divfeat;
+        /**
+         * The values used for subdivision.
+         */
+        DistanceType divval;
+        /**
+         * The child nodes.
+         */
+        Node* child1, * child2;
+    };
+    typedef Node* NodePtr;
+    typedef BranchStruct<NodePtr, DistanceType> BranchSt;
+    typedef BranchSt* Branch;
 
-    void save_tree(FILE* stream, Tree tree)
+
+
+    void save_tree(FILE* stream, NodePtr tree)
     {
-       save_value(stream, *tree);
-       if (tree->child1!=NULL) {
-               save_tree(stream, tree->child1);
-       }
-       if (tree->child2!=NULL) {
-               save_tree(stream, tree->child2);
-       }
+        save_value(stream, *tree);
+        if (tree->child1!=NULL) {
+            save_tree(stream, tree->child1);
+        }
+        if (tree->child2!=NULL) {
+            save_tree(stream, tree->child2);
+        }
     }
 
 
-    void load_tree(FILE* stream, Tree& tree)
+    void load_tree(FILE* stream, NodePtr& tree)
     {
-       tree = pool.allocate<TreeSt>();
-       load_value(stream, *tree);
-       if (tree->child1!=NULL) {
-               load_tree(stream, tree->child1);
-       }
-       if (tree->child2!=NULL) {
-               load_tree(stream, tree->child2);
-       }
+        tree = pool_.allocate<Node>();
+        load_value(stream, *tree);
+        if (tree->child1!=NULL) {
+            load_tree(stream, tree->child1);
+        }
+        if (tree->child2!=NULL) {
+            load_tree(stream, tree->child2);
+        }
     }
 
 
-       /**
-        * Create a tree node that subdivides the list of vecs from vind[first]
-        * to vind[last].  The routine is called recursively on each sublist.
-        * Place a pointer to this new tree node in the location pTree.
-        *
-        * Params: pTree = the new node to create
-        *                      first = index of the first vector
-        *                      last = index of the last vector
-        */
-       Tree divideTree(int first, int last)
-       {
-               Tree node = pool.allocate<TreeSt>(); // allocate memory
-
-               /* If only one exemplar remains, then make this a leaf node. */
-               if (first == last) {
-                       node->child1 = node->child2 = NULL;    /* Mark as leaf node. */
-                       node->divfeat = vind[first];    /* Store index of this vec. */
-               }
-               else {
-                       chooseDivision(node, first, last);
-                       subdivide(node, first, last);
-               }
-
-               return node;
-       }
-
-
-       /**
-        * Choose which feature to use in order to subdivide this set of vectors.
-        * Make a random choice among those with the highest variance, and use
-        * its variance as the threshold value.
-        */
-       void chooseDivision(Tree node, int first, int last)
-       {
-        memset(mean,0,veclen_*sizeof(DIST_TYPE));
-        memset(var,0,veclen_*sizeof(DIST_TYPE));
-
-               /* Compute mean values.  Only the first SAMPLE_MEAN values need to be
-                       sampled to get a good estimate.
-               */
-               int end = std::min(first + SAMPLE_MEAN, last);
-               for (int j = first; j <= end; ++j) {
-                       ELEM_TYPE* v = dataset[vind[j]];
+    /**
+     * Create a tree node that subdivides the list of vecs from vind[first]
+     * to vind[last].  The routine is called recursively on each sublist.
+     * Place a pointer to this new tree node in the location pTree.
+     *
+     * Params: pTree = the new node to create
+     *                  first = index of the first vector
+     *                  last = index of the last vector
+     */
+    NodePtr divideTree(int* ind, int count)
+    {
+        NodePtr node = pool_.allocate<Node>(); // allocate memory
+
+        /* If too few exemplars remain, then make this a leaf node. */
+        if ( count == 1) {
+            node->child1 = node->child2 = NULL;    /* Mark as leaf node. */
+            node->divfeat = *ind;    /* Store index of this vec. */
+        }
+        else {
+            int idx;
+            int cutfeat;
+            DistanceType cutval;
+            meanSplit(ind, count, idx, cutfeat, cutval);
+
+            node->divfeat = cutfeat;
+            node->divval = cutval;
+            node->child1 = divideTree(ind, idx);
+            node->child2 = divideTree(ind+idx, count-idx);
+        }
+
+        return node;
+    }
+
+
+    /**
+     * Choose which feature to use in order to subdivide this set of vectors.
+     * Make a random choice among those with the highest variance, and use
+     * its variance as the threshold value.
+     */
+    void meanSplit(int* ind, int count, int& index, int& cutfeat, DistanceType& cutval)
+    {
+        memset(mean_,0,veclen_*sizeof(DistanceType));
+        memset(var_,0,veclen_*sizeof(DistanceType));
+
+        /* Compute mean values.  Only the first SAMPLE_MEAN values need to be
+            sampled to get a good estimate.
+         */
+        int cnt = std::min((int)SAMPLE_MEAN+1, count);
+        for (int j = 0; j < cnt; ++j) {
+            ElementType* v = dataset_[ind[j]];
             for (size_t k=0; k<veclen_; ++k) {
-                mean[k] += v[k];
+                mean_[k] += v[k];
             }
-               }
+        }
         for (size_t k=0; k<veclen_; ++k) {
-            mean[k] /= (end - first + 1);
+            mean_[k] /= cnt;
         }
 
-               /* Compute variances (no need to divide by count). */
-               for (int j = first; j <= end; ++j) {
-                       ELEM_TYPE* v = dataset[vind[j]];
+        /* Compute variances (no need to divide by count). */
+        for (int j = 0; j < cnt; ++j) {
+            ElementType* v = dataset_[ind[j]];
             for (size_t k=0; k<veclen_; ++k) {
-                DIST_TYPE dist = v[k] - mean[k];
-                var[k] += dist * dist;
+                DistanceType dist = v[k] - mean_[k];
+                var_[k] += dist * dist;
             }
-               }
-               /* Select one of the highest variance indices at random. */
-               node->divfeat = selectDivision(var);
-               node->divval = mean[node->divfeat];
-
-       }
-
-
-       /**
-        * Select the top RAND_DIM largest values from v and return the index of
-        * one of these selected at random.
-        */
-       int selectDivision(DIST_TYPE* v)
-       {
-               int num = 0;
-               int topind[RAND_DIM];
-
-               /* Create a list of the indices of the top RAND_DIM values. */
-               for (size_t i = 0; i < veclen_; ++i) {
-                       if (num < RAND_DIM  ||  v[i] > v[topind[num-1]]) {
-                               /* Put this element at end of topind. */
-                               if (num < RAND_DIM) {
-                                       topind[num++] = (int)i;            /* Add to list. */
-                               }
-                               else {
-                                       topind[num-1] = (int)i;         /* Replace last element. */
-                               }
-                               /* Bubble end value down to right location by repeated swapping. */
-                               int j = num - 1;
-                               while (j > 0  &&  v[topind[j]] > v[topind[j-1]]) {
+        }
+        /* Select one of the highest variance indices at random. */
+        cutfeat = selectDivision(var_);
+        cutval = mean_[cutfeat];
+
+        int lim1, lim2;
+        planeSplit(ind, count, cutfeat, cutval, lim1, lim2);
+
+        if (lim1>count/2) index = lim1;
+        else if (lim2<count/2) index = lim2;
+        else index = count/2;
+
+        /* If either list is empty, it means that all remaining features
+         * are identical. Split in the middle to maintain a balanced tree.
+         */
+        if ((lim1==count)||(lim2==0)) index = count/2;
+    }
+
+
+    /**
+     * Select the top RAND_DIM largest values from v and return the index of
+     * one of these selected at random.
+     */
+    int selectDivision(DistanceType* v)
+    {
+        int num = 0;
+        size_t topind[RAND_DIM];
+
+        /* Create a list of the indices of the top RAND_DIM values. */
+        for (size_t i = 0; i < veclen_; ++i) {
+            if ((num < RAND_DIM)||(v[i] > v[topind[num-1]])) {
+                /* Put this element at end of topind. */
+                if (num < RAND_DIM) {
+                    topind[num++] = i;            /* Add to list. */
+                }
+                else {
+                    topind[num-1] = i;         /* Replace last element. */
+                }
+                /* Bubble end value down to right location by repeated swapping. */
+                int j = num - 1;
+                while (j > 0  &&  v[topind[j]] > v[topind[j-1]]) {
                     std::swap(topind[j], topind[j-1]);
-                                       --j;
-                               }
-                       }
-               }
-               /* Select a random integer in range [0,num-1], and return that index. */
-               int rnd = rand_int(num);
-               return topind[rnd];
-       }
-
-
-       /**
-        *  Subdivide the list of exemplars using the feature and division
-        *  value given in this node.  Call divideTree recursively on each list.
-       */
-       void subdivide(Tree node, int first, int last)
-       {
-               /* Move vector indices for left subtree to front of list. */
-               int i = first;
-               int j = last;
-               while (i <= j) {
-                       int ind = vind[i];
-                       ELEM_TYPE val = dataset[ind][node->divfeat];
-                       if (val < node->divval) {
-                               ++i;
-                       } else {
-                               /* Move to end of list by swapping vind i and j. */
-                std::swap(vind[i], vind[j]);
-                               --j;
-                       }
-               }
-               /* If either list is empty, it means we have hit the unlikely case
-                       in which all remaining features are identical. Split in the middle
-            to maintain a balanced tree.
-               */
-               if ( (i == first) || (i == last+1)) {
-            i = (first+last+1)/2;
-               }
-
-               node->child1 = divideTree(first, i - 1);
-               node->child2 = divideTree(i, last);
-       }
-
-
-
-       /**
-        * Performs an exact nearest neighbor search. The exact search performs a full
-        * traversal of the tree.
-        */
-       void getExactNeighbors(ResultSet<ELEM_TYPE>& result, const ELEM_TYPE* vec)
-       {
-//             checkID -= 1;  /* Set a different unique ID for each search. */
-
-               if (numTrees > 1) {
+                    --j;
+                }
+            }
+        }
+        /* Select a random integer in range [0,num-1], and return that index. */
+        int rnd = rand_int(num);
+        return (int)topind[rnd];
+    }
+
+
+    /**
+     *  Subdivide the list of points by a plane perpendicular on axe corresponding
+     *  to the 'cutfeat' dimension at 'cutval' position.
+     *
+     *  On return:
+     *  dataset[ind[0..lim1-1]][cutfeat]<cutval
+     *  dataset[ind[lim1..lim2-1]][cutfeat]==cutval
+     *  dataset[ind[lim2..count]][cutfeat]>cutval
+     */
+    void planeSplit(int* ind, int count, int cutfeat, DistanceType cutval, int& lim1, int& lim2)
+    {
+        /* Move vector indices for left subtree to front of list. */
+        int left = 0;
+        int right = count-1;
+        for (;; ) {
+            while (left<=right && dataset_[ind[left]][cutfeat]<cutval) ++left;
+            while (left<=right && dataset_[ind[right]][cutfeat]>=cutval) --right;
+            if (left>right) break;
+            std::swap(ind[left], ind[right]); ++left; --right;
+        }
+        lim1 = left;
+        right = count-1;
+        for (;; ) {
+            while (left<=right && dataset_[ind[left]][cutfeat]<=cutval) ++left;
+            while (left<=right && dataset_[ind[right]][cutfeat]>cutval) --right;
+            if (left>right) break;
+            std::swap(ind[left], ind[right]); ++left; --right;
+        }
+        lim2 = left;
+    }
+
+    /**
+     * Performs an exact nearest neighbor search. The exact search performs a full
+     * traversal of the tree.
+     */
+    void getExactNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, float epsError)
+    {
+        //             checkID -= 1;  /* Set a different unique ID for each search. */
+
+        if (trees_ > 1) {
             fprintf(stderr,"It doesn't make any sense to use more than one tree for exact search");
-               }
-               if (numTrees>0) {
-                       searchLevelExact(result, vec, trees[0], 0.0);
-               }
-               assert(result.full());
-       }
-
-       /**
-        * Performs the approximate nearest-neighbor search. The search is approximate
-        * because the tree traversal is abandoned after a given number of descends in
-        * the tree.
-        */
-       void getNeighbors(ResultSet<ELEM_TYPE>& result, const ELEM_TYPE* vec, int maxCheck)
-       {
-               int i;
-               BranchSt branch;
-
-               int checkCount = 0;
-               Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_);
-        std::vector<bool> checked(size_,false);
-
-               /* Search once through each tree down to root. */
-               for (i = 0; i < numTrees; ++i) {
-                       searchLevel(result, vec, trees[i], 0.0, checkCount, maxCheck, heap, checked);
-               }
-
-               /* Keep searching other branches from heap until finished. */
-               while ( heap->popMin(branch) && (checkCount < maxCheck || !result.full() )) {
-                       searchLevel(result, vec, branch.node, branch.mindistsq, checkCount, maxCheck, heap, checked);
-               }
-
-               delete heap;
-
-               assert(result.full());
-       }
-
-
-       /**
-        *  Search starting from a given node of the tree.  Based on any mismatches at
-        *  higher levels, all exemplars below this level must have a distance of
-        *  at least "mindistsq".
-       */
-       void searchLevel(ResultSet<ELEM_TYPE>& result, const ELEM_TYPE* vec, Tree node, float mindistsq, int& checkCount, int maxCheck,
-                       Heap<BranchSt>* heap, std::vector<bool>& checked)
-       {
-               if (result.worstDist()<mindistsq) {
-//                     printf("Ignoring branch, too far\n");
-                       return;
-               }
-
-               /* If this is a leaf node, then do check and return. */
-               if (node->child1 == NULL  &&  node->child2 == NULL) {
-
-                       /* Do not check same node more than once when searching multiple trees.
-                               Once a vector is checked, we set its location in vind to the
-                               current checkID.
-                       */
-                       if (checked[node->divfeat] == true || checkCount>=maxCheck) {
-                               if (result.full()) return;
-                       }
+        }
+        if (trees_>0) {
+            searchLevelExact(result, vec, tree_roots_[0], 0.0, epsError);
+        }
+        assert(result.full());
+    }
+
+    /**
+     * Performs the approximate nearest-neighbor search. The search is approximate
+     * because the tree traversal is abandoned after a given number of descends in
+     * the tree.
+     */
+    void getNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, int maxCheck, float epsError)
+    {
+        int i;
+        BranchSt branch;
+
+        int checkCount = 0;
+        Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_);
+        DynamicBitset checked(size_);
+
+        /* Search once through each tree down to root. */
+        for (i = 0; i < trees_; ++i) {
+            searchLevel(result, vec, tree_roots_[i], 0, checkCount, maxCheck, epsError, heap, checked);
+        }
+
+        /* Keep searching other branches from heap until finished. */
+        while ( heap->popMin(branch) && (checkCount < maxCheck || !result.full() )) {
+            searchLevel(result, vec, branch.node, branch.mindist, checkCount, maxCheck, epsError, heap, checked);
+        }
+
+        delete heap;
+
+        assert(result.full());
+    }
+
+
+    /**
+     *  Search starting from a given node of the tree.  Based on any mismatches at
+     *  higher levels, all exemplars below this level must have a distance of
+     *  at least "mindistsq".
+     */
+    void searchLevel(ResultSet<DistanceType>& result_set, const ElementType* vec, NodePtr node, DistanceType mindist, int& checkCount, int maxCheck,
+                     float epsError, Heap<BranchSt>* heap, DynamicBitset& checked)
+    {
+        if (result_set.worstDist()<mindist) {
+            //                 printf("Ignoring branch, too far\n");
+            return;
+        }
+
+        /* If this is a leaf node, then do check and return. */
+        if ((node->child1 == NULL)&&(node->child2 == NULL)) {
+            /*  Do not check same node more than once when searching multiple trees.
+                Once a vector is checked, we set its location in vind to the
+                current checkID.
+             */
+            int index = node->divfeat;
+            if ( checked.test(index) || ((checkCount>=maxCheck)&& result_set.full()) ) return;
+            checked.set(index);
             checkCount++;
-                       checked[node->divfeat] = true;
-
-                       result.addPoint(dataset[node->divfeat],node->divfeat);
-                       return;
-               }
-
-               /* Which child branch should be taken first? */
-               ELEM_TYPE val = vec[node->divfeat];
-               DIST_TYPE diff = val - node->divval;
-               Tree bestChild = (diff < 0) ? node->child1 : node->child2;
-               Tree otherChild = (diff < 0) ? node->child2 : node->child1;
-
-               /* Create a branch record for the branch not taken.  Add distance
-                       of this feature boundary (we don't attempt to correct for any
-                       use of this feature in a parent node, which is unlikely to
-                       happen and would have only a small effect).  Don't bother
-                       adding more branches to heap after halfway point, as cost of
-                       adding exceeds their value.
-               */
-
-               DIST_TYPE new_distsq = (DIST_TYPE)flann_dist(&val, &val+1, &node->divval, mindistsq);
-//             if (2 * checkCount < maxCheck  ||  !result.full()) {
-               if (new_distsq < result.worstDist() ||  !result.full()) {
-                       heap->insert( BranchSt::make_branch(otherChild, new_distsq) );
-               }
-
-               /* Call recursively to search next level down. */
-               searchLevel(result, vec, bestChild, mindistsq, checkCount, maxCheck, heap, checked);
-       }
-
-       /**
-        * Performs an exact search in the tree starting from a node.
-        */
-       void searchLevelExact(ResultSet<ELEM_TYPE>& result, const ELEM_TYPE* vec, Tree node, float mindistsq)
-       {
-               if (mindistsq>result.worstDist()) {
-                       return;
-               }
-
-               /* If this is a leaf node, then do check and return. */
-               if (node->child1 == NULL  &&  node->child2 == NULL) {
-
-                       /* Do not check same node more than once when searching multiple trees.
-                               Once a vector is checked, we set its location in vind to the
-                               current checkID.
-                       */
-//                     if (vind[node->divfeat] == checkID)
-//                             return;
-//                     vind[node->divfeat] = checkID;
-
-                       result.addPoint(dataset[node->divfeat],node->divfeat);
-                       return;
-               }
-
-               /* Which child branch should be taken first? */
-               ELEM_TYPE val = vec[node->divfeat];
-               DIST_TYPE diff = val - node->divval;
-               Tree bestChild = (diff < 0) ? node->child1 : node->child2;
-               Tree otherChild = (diff < 0) ? node->child2 : node->child1;
-
-
-               /* Call recursively to search next level down. */
-               searchLevelExact(result, vec, bestChild, mindistsq);
-               DIST_TYPE new_distsq = (DIST_TYPE)flann_dist(&val, &val+1, &node->divval, mindistsq);
-               searchLevelExact(result, vec, otherChild, new_distsq);
-       }
-
-};   // class KDTree
-
-} // namespace cvflann
-
-#endif //_OPENCV_KDTREE_H_
+
+            DistanceType dist = distance_(dataset_[index], vec, veclen_);
+            result_set.addPoint(dist,index);
+
+            return;
+        }
+
+        /* Which child branch should be taken first? */
+        ElementType val = vec[node->divfeat];
+        DistanceType diff = val - node->divval;
+        NodePtr bestChild = (diff < 0) ? node->child1 : node->child2;
+        NodePtr otherChild = (diff < 0) ? node->child2 : node->child1;
+
+        /* Create a branch record for the branch not taken.  Add distance
+            of this feature boundary (we don't attempt to correct for any
+            use of this feature in a parent node, which is unlikely to
+            happen and would have only a small effect).  Don't bother
+            adding more branches to heap after halfway point, as cost of
+            adding exceeds their value.
+         */
+
+        DistanceType new_distsq = mindist + distance_.accum_dist(val, node->divval, node->divfeat);
+        //             if (2 * checkCount < maxCheck  ||  !result.full()) {
+        if ((new_distsq*epsError < result_set.worstDist())||  !result_set.full()) {
+            heap->insert( BranchSt(otherChild, new_distsq) );
+        }
+
+        /* Call recursively to search next level down. */
+        searchLevel(result_set, vec, bestChild, mindist, checkCount, maxCheck, epsError, heap, checked);
+    }
+
+    /**
+     * Performs an exact search in the tree starting from a node.
+     */
+    void searchLevelExact(ResultSet<DistanceType>& result_set, const ElementType* vec, const NodePtr node, DistanceType mindist, const float epsError)
+    {
+        /* If this is a leaf node, then do check and return. */
+        if ((node->child1 == NULL)&&(node->child2 == NULL)) {
+            int index = node->divfeat;
+            DistanceType dist = distance_(dataset_[index], vec, veclen_);
+            result_set.addPoint(dist,index);
+            return;
+        }
+
+        /* Which child branch should be taken first? */
+        ElementType val = vec[node->divfeat];
+        DistanceType diff = val - node->divval;
+        NodePtr bestChild = (diff < 0) ? node->child1 : node->child2;
+        NodePtr otherChild = (diff < 0) ? node->child2 : node->child1;
+
+        /* Create a branch record for the branch not taken.  Add distance
+            of this feature boundary (we don't attempt to correct for any
+            use of this feature in a parent node, which is unlikely to
+            happen and would have only a small effect).  Don't bother
+            adding more branches to heap after halfway point, as cost of
+            adding exceeds their value.
+         */
+
+        DistanceType new_distsq = mindist + distance_.accum_dist(val, node->divval, node->divfeat);
+
+        /* Call recursively to search next level down. */
+        searchLevelExact(result_set, vec, bestChild, mindist, epsError);
+
+        if (new_distsq*epsError<=result_set.worstDist()) {
+            searchLevelExact(result_set, vec, otherChild, new_distsq, epsError);
+        }
+    }
+
+
+private:
+
+    enum
+    {
+        /**
+         * To improve efficiency, only SAMPLE_MEAN random values are used to
+         * compute the mean and variance at each level when building a tree.
+         * A value of 100 seems to perform as well as using all values.
+         */
+        SAMPLE_MEAN = 100,
+        /**
+         * Top random dimensions to consider
+         *
+         * When creating random trees, the dimension on which to subdivide is
+         * selected at random from among the top RAND_DIM dimensions with the
+         * highest variance.  A value of 5 works well.
+         */
+        RAND_DIM=5
+    };
+
+
+    /**
+     * Number of randomized trees that are used
+     */
+    int trees_;
+
+    /**
+     *  Array of indices to vectors in the dataset.
+     */
+    std::vector<int> vind_;
+
+    /**
+     * The dataset used by this index
+     */
+    const Matrix<ElementType> dataset_;
+
+    IndexParams index_params_;
+
+    size_t size_;
+    size_t veclen_;
+
+
+    DistanceType* mean_;
+    DistanceType* var_;
+
+
+    /**
+     * Array of k-d trees used to find neighbours.
+     */
+    NodePtr* tree_roots_;
+
+    /**
+     * Pooled memory allocator.
+     *
+     * Using a pooled memory allocator is more efficient
+     * than allocating memory directly when there is a large
+     * number small of memory allocations.
+     */
+    PooledAllocator pool_;
+
+    Distance distance_;
+
+
+};   // class KDTreeForest
+
+}
+
+#endif //OPENCV_FLANN_KDTREE_INDEX_H_
diff --git a/modules/flann/include/opencv2/flann/kdtree_single_index.h b/modules/flann/include/opencv2/flann/kdtree_single_index.h
new file mode 100644 (file)
index 0000000..f890af5
--- /dev/null
@@ -0,0 +1,642 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_KDTREE_SINGLE_INDEX_H_
+#define OPENCV_FLANN_KDTREE_SINGLE_INDEX_H_
+
+#include <algorithm>
+#include <map>
+#include <cassert>
+#include <cstring>
+
+#include "general.h"
+#include "nn_index.h"
+#include "matrix.h"
+#include "result_set.h"
+#include "heap.h"
+#include "allocator.h"
+#include "random.h"
+#include "saving.h"
+
+namespace cvflann
+{
+
+struct KDTreeSingleIndexParams : public IndexParams
+{
+    KDTreeSingleIndexParams(int leaf_max_size = 10, bool reorder = true, int dim = -1)
+    {
+        (*this)["algorithm"] = FLANN_INDEX_KDTREE_SINGLE;
+        (*this)["leaf_max_size"] = leaf_max_size;
+        (*this)["reorder"] = reorder;
+        (*this)["dim"] = dim;
+    }
+};
+
+
+/**
+ * Randomized kd-tree index
+ *
+ * Contains the k-d trees and other information for indexing a set of points
+ * for nearest-neighbor matching.
+ */
+template <typename Distance>
+class KDTreeSingleIndex : public NNIndex<Distance>
+{
+public:
+    typedef typename Distance::ElementType ElementType;
+    typedef typename Distance::ResultType DistanceType;
+
+
+    /**
+     * KDTree constructor
+     *
+     * Params:
+     *          inputData = dataset with the input features
+     *          params = parameters passed to the kdtree algorithm
+     */
+    KDTreeSingleIndex(const Matrix<ElementType>& inputData, const IndexParams& params = KDTreeSingleIndexParams(),
+                      Distance d = Distance() ) :
+        dataset_(inputData), index_params_(params), distance_(d)
+    {
+        size_ = dataset_.rows;
+        dim_ = dataset_.cols;
+        int dim_param = get_param(params,"dim",-1);
+        if (dim_param>0) dim_ = dim_param;
+        leaf_max_size_ = get_param(params,"leaf_max_size",10);
+        reorder_ = get_param(params,"reorder",true);
+
+        // Create a permutable array of indices to the input vectors.
+        vind_.resize(size_);
+        for (size_t i = 0; i < size_; i++) {
+            vind_[i] = i;
+        }
+    }
+
+    KDTreeSingleIndex(const KDTreeSingleIndex&);
+    KDTreeSingleIndex& operator=(const KDTreeSingleIndex&);
+
+    /**
+     * Standard destructor
+     */
+    ~KDTreeSingleIndex()
+    {
+        if (reorder_) delete[] data_.data;
+    }
+
+    /**
+     * Builds the index
+     */
+    void buildIndex()
+    {
+        computeBoundingBox(root_bbox_);
+        root_node_ = divideTree(0, size_, root_bbox_ );   // construct the tree
+
+        if (reorder_) {
+            delete[] data_.data;
+            data_ = cvflann::Matrix<ElementType>(new ElementType[size_*dim_], size_, dim_);
+            for (size_t i=0; i<size_; ++i) {
+                for (size_t j=0; j<dim_; ++j) {
+                    data_[i][j] = dataset_[vind_[i]][j];
+                }
+            }
+        }
+        else {
+            data_ = dataset_;
+        }
+    }
+
+    flann_algorithm_t getType() const
+    {
+        return FLANN_INDEX_KDTREE_SINGLE;
+    }
+
+
+    void saveIndex(FILE* stream)
+    {
+        save_value(stream, size_);
+        save_value(stream, dim_);
+        save_value(stream, root_bbox_);
+        save_value(stream, reorder_);
+        save_value(stream, leaf_max_size_);
+        save_value(stream, vind_);
+        if (reorder_) {
+            save_value(stream, data_);
+        }
+        save_tree(stream, root_node_);
+    }
+
+
+    void loadIndex(FILE* stream)
+    {
+        load_value(stream, size_);
+        load_value(stream, dim_);
+        load_value(stream, root_bbox_);
+        load_value(stream, reorder_);
+        load_value(stream, leaf_max_size_);
+        load_value(stream, vind_);
+        if (reorder_) {
+            load_value(stream, data_);
+        }
+        else {
+            data_ = dataset_;
+        }
+        load_tree(stream, root_node_);
+
+
+        index_params_["algorithm"] = getType();
+        index_params_["leaf_max_size"] = leaf_max_size_;
+        index_params_["reorder"] = reorder_;
+    }
+
+    /**
+     *  Returns size of index.
+     */
+    size_t size() const
+    {
+        return size_;
+    }
+
+    /**
+     * Returns the length of an index feature.
+     */
+    size_t veclen() const
+    {
+        return dim_;
+    }
+
+    /**
+     * Computes the inde memory usage
+     * Returns: memory used by the index
+     */
+    int usedMemory() const
+    {
+        return pool_.usedMemory+pool_.wastedMemory+dataset_.rows*sizeof(int);  // pool memory and vind array memory
+    }
+
+
+    /**
+     * \brief Perform k-nearest neighbor search
+     * \param[in] queries The query points for which to find the nearest neighbors
+     * \param[out] indices The indices of the nearest neighbors found
+     * \param[out] dists Distances to the nearest neighbors found
+     * \param[in] knn Number of nearest neighbors to return
+     * \param[in] params Search parameters
+     */
+    void knnSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, int knn, const SearchParams& params)
+    {
+        assert(queries.cols == veclen());
+        assert(indices.rows >= queries.rows);
+        assert(dists.rows >= queries.rows);
+        assert(int(indices.cols) >= knn);
+        assert(int(dists.cols) >= knn);
+
+        KNNSimpleResultSet<DistanceType> resultSet(knn);
+        for (size_t i = 0; i < queries.rows; i++) {
+            resultSet.init(indices[i], dists[i]);
+            findNeighbors(resultSet, queries[i], params);
+        }
+    }
+
+    IndexParams getParameters() const
+    {
+        return index_params_;
+    }
+
+    /**
+     * Find set of nearest neighbors to vec. Their indices are stored inside
+     * the result object.
+     *
+     * Params:
+     *     result = the result object in which the indices of the nearest-neighbors are stored
+     *     vec = the vector for which to search the nearest neighbors
+     *     maxCheck = the maximum number of restarts (in a best-bin-first manner)
+     */
+    void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams)
+    {
+        float epsError = 1+get_param(searchParams,"eps",0.0f);
+
+        std::vector<DistanceType> dists(dim_,0);
+        DistanceType distsq = computeInitialDistances(vec, dists);
+        searchLevel(result, vec, root_node_, distsq, dists, epsError);
+    }
+
+private:
+
+
+    /*--------------------- Internal Data Structures --------------------------*/
+    struct Node
+    {
+        union {
+            struct
+            {
+                /**
+                 * Indices of points in leaf node
+                 */
+                int left, right;
+            };
+            struct
+            {
+                /**
+                 * Dimension used for subdivision.
+                 */
+                int divfeat;
+                /**
+                 * The values used for subdivision.
+                 */
+                DistanceType divlow, divhigh;
+            };
+        };
+        /**
+         * The child nodes.
+         */
+        Node* child1, * child2;
+    };
+    typedef Node* NodePtr;
+
+
+    struct Interval
+    {
+        ElementType low, high;
+    };
+
+    typedef std::vector<Interval> BoundingBox;
+
+    typedef BranchStruct<NodePtr, DistanceType> BranchSt;
+    typedef BranchSt* Branch;
+
+
+
+
+    void save_tree(FILE* stream, NodePtr tree)
+    {
+        save_value(stream, *tree);
+        if (tree->child1!=NULL) {
+            save_tree(stream, tree->child1);
+        }
+        if (tree->child2!=NULL) {
+            save_tree(stream, tree->child2);
+        }
+    }
+
+
+    void load_tree(FILE* stream, NodePtr& tree)
+    {
+        tree = pool_.allocate<Node>();
+        load_value(stream, *tree);
+        if (tree->child1!=NULL) {
+            load_tree(stream, tree->child1);
+        }
+        if (tree->child2!=NULL) {
+            load_tree(stream, tree->child2);
+        }
+    }
+
+
+    void computeBoundingBox(BoundingBox& bbox)
+    {
+        bbox.resize(dim_);
+        for (size_t i=0; i<dim_; ++i) {
+            bbox[i].low = dataset_[0][i];
+            bbox[i].high = dataset_[0][i];
+        }
+        for (size_t k=1; k<dataset_.rows; ++k) {
+            for (size_t i=0; i<dim_; ++i) {
+                if (dataset_[k][i]<bbox[i].low) bbox[i].low = dataset_[k][i];
+                if (dataset_[k][i]>bbox[i].high) bbox[i].high = dataset_[k][i];
+            }
+        }
+    }
+
+
+    /**
+     * Create a tree node that subdivides the list of vecs from vind[first]
+     * to vind[last].  The routine is called recursively on each sublist.
+     * Place a pointer to this new tree node in the location pTree.
+     *
+     * Params: pTree = the new node to create
+     *                  first = index of the first vector
+     *                  last = index of the last vector
+     */
+    NodePtr divideTree(int left, int right, BoundingBox& bbox)
+    {
+        NodePtr node = pool_.allocate<Node>(); // allocate memory
+
+        /* If too few exemplars remain, then make this a leaf node. */
+        if ( (right-left) <= leaf_max_size_) {
+            node->child1 = node->child2 = NULL;    /* Mark as leaf node. */
+            node->left = left;
+            node->right = right;
+
+            // compute bounding-box of leaf points
+            for (size_t i=0; i<dim_; ++i) {
+                bbox[i].low = dataset_[vind_[left]][i];
+                bbox[i].high = dataset_[vind_[left]][i];
+            }
+            for (int k=left+1; k<right; ++k) {
+                for (size_t i=0; i<dim_; ++i) {
+                    if (bbox[i].low>dataset_[vind_[k]][i]) bbox[i].low=dataset_[vind_[k]][i];
+                    if (bbox[i].high<dataset_[vind_[k]][i]) bbox[i].high=dataset_[vind_[k]][i];
+                }
+            }
+        }
+        else {
+            int idx;
+            int cutfeat;
+            DistanceType cutval;
+            middleSplit_(&vind_[0]+left, right-left, idx, cutfeat, cutval, bbox);
+
+            node->divfeat = cutfeat;
+
+            BoundingBox left_bbox(bbox);
+            left_bbox[cutfeat].high = cutval;
+            node->child1 = divideTree(left, left+idx, left_bbox);
+
+            BoundingBox right_bbox(bbox);
+            right_bbox[cutfeat].low = cutval;
+            node->child2 = divideTree(left+idx, right, right_bbox);
+
+            node->divlow = left_bbox[cutfeat].high;
+            node->divhigh = right_bbox[cutfeat].low;
+
+            for (size_t i=0; i<dim_; ++i) {
+                bbox[i].low = std::min(left_bbox[i].low, right_bbox[i].low);
+                bbox[i].high = std::max(left_bbox[i].high, right_bbox[i].high);
+            }
+        }
+
+        return node;
+    }
+
+    void computeMinMax(int* ind, int count, int dim, ElementType& min_elem, ElementType& max_elem)
+    {
+        min_elem = dataset_[ind[0]][dim];
+        max_elem = dataset_[ind[0]][dim];
+        for (int i=1; i<count; ++i) {
+            ElementType val = dataset_[ind[i]][dim];
+            if (val<min_elem) min_elem = val;
+            if (val>max_elem) max_elem = val;
+        }
+    }
+
+    void middleSplit(int* ind, int count, int& index, int& cutfeat, DistanceType& cutval, const BoundingBox& bbox)
+    {
+        // find the largest span from the approximate bounding box
+        ElementType max_span = bbox[0].high-bbox[0].low;
+        cutfeat = 0;
+        cutval = (bbox[0].high+bbox[0].low)/2;
+        for (size_t i=1; i<dim_; ++i) {
+            ElementType span = bbox[i].low-bbox[i].low;
+            if (span>max_span) {
+                max_span = span;
+                cutfeat = i;
+                cutval = (bbox[i].high+bbox[i].low)/2;
+            }
+        }
+
+        // compute exact span on the found dimension
+        ElementType min_elem, max_elem;
+        computeMinMax(ind, count, cutfeat, min_elem, max_elem);
+        cutval = (min_elem+max_elem)/2;
+        max_span = max_elem - min_elem;
+
+        // check if a dimension of a largest span exists
+        size_t k = cutfeat;
+        for (size_t i=0; i<dim_; ++i) {
+            if (i==k) continue;
+            ElementType span = bbox[i].high-bbox[i].low;
+            if (span>max_span) {
+                computeMinMax(ind, count, i, min_elem, max_elem);
+                span = max_elem - min_elem;
+                if (span>max_span) {
+                    max_span = span;
+                    cutfeat = i;
+                    cutval = (min_elem+max_elem)/2;
+                }
+            }
+        }
+        int lim1, lim2;
+        planeSplit(ind, count, cutfeat, cutval, lim1, lim2);
+
+        if (lim1>count/2) index = lim1;
+        else if (lim2<count/2) index = lim2;
+        else index = count/2;
+    }
+
+
+    void middleSplit_(int* ind, int count, int& index, int& cutfeat, DistanceType& cutval, const BoundingBox& bbox)
+    {
+        const float EPS=0.00001f;
+        ElementType max_span = bbox[0].high-bbox[0].low;
+        for (size_t i=1; i<dim_; ++i) {
+            ElementType span = bbox[i].high-bbox[i].low;
+            if (span>max_span) {
+                max_span = span;
+            }
+        }
+        ElementType max_spread = -1;
+        cutfeat = 0;
+        for (size_t i=0; i<dim_; ++i) {
+            ElementType span = bbox[i].high-bbox[i].low;
+            if (span>(ElementType)((1-EPS)*max_span)) {
+                ElementType min_elem, max_elem;
+                computeMinMax(ind, count, cutfeat, min_elem, max_elem);
+                ElementType spread = max_elem-min_elem;
+                if (spread>max_spread) {
+                    cutfeat = i;
+                    max_spread = spread;
+                }
+            }
+        }
+        // split in the middle
+        DistanceType split_val = (bbox[cutfeat].low+bbox[cutfeat].high)/2;
+        ElementType min_elem, max_elem;
+        computeMinMax(ind, count, cutfeat, min_elem, max_elem);
+
+        if (split_val<min_elem) cutval = min_elem;
+        else if (split_val>max_elem) cutval = max_elem;
+        else cutval = split_val;
+
+        int lim1, lim2;
+        planeSplit(ind, count, cutfeat, cutval, lim1, lim2);
+
+        if (lim1>count/2) index = lim1;
+        else if (lim2<count/2) index = lim2;
+        else index = count/2;
+    }
+
+
+    /**
+     *  Subdivide the list of points by a plane perpendicular on axe corresponding
+     *  to the 'cutfeat' dimension at 'cutval' position.
+     *
+     *  On return:
+     *  dataset[ind[0..lim1-1]][cutfeat]<cutval
+     *  dataset[ind[lim1..lim2-1]][cutfeat]==cutval
+     *  dataset[ind[lim2..count]][cutfeat]>cutval
+     */
+    void planeSplit(int* ind, int count, int cutfeat, DistanceType cutval, int& lim1, int& lim2)
+    {
+        /* Move vector indices for left subtree to front of list. */
+        int left = 0;
+        int right = count-1;
+        for (;; ) {
+            while (left<=right && dataset_[ind[left]][cutfeat]<cutval) ++left;
+            while (left<=right && dataset_[ind[right]][cutfeat]>=cutval) --right;
+            if (left>right) break;
+            std::swap(ind[left], ind[right]); ++left; --right;
+        }
+        /* If either list is empty, it means that all remaining features
+         * are identical. Split in the middle to maintain a balanced tree.
+         */
+        lim1 = left;
+        right = count-1;
+        for (;; ) {
+            while (left<=right && dataset_[ind[left]][cutfeat]<=cutval) ++left;
+            while (left<=right && dataset_[ind[right]][cutfeat]>cutval) --right;
+            if (left>right) break;
+            std::swap(ind[left], ind[right]); ++left; --right;
+        }
+        lim2 = left;
+    }
+
+    DistanceType computeInitialDistances(const ElementType* vec, std::vector<DistanceType>& dists)
+    {
+        DistanceType distsq = 0.0;
+
+        for (size_t i = 0; i < dim_; ++i) {
+            if (vec[i] < root_bbox_[i].low) {
+                dists[i] = distance_.accum_dist(vec[i], root_bbox_[i].low, i);
+                distsq += dists[i];
+            }
+            if (vec[i] > root_bbox_[i].high) {
+                dists[i] = distance_.accum_dist(vec[i], root_bbox_[i].high, i);
+                distsq += dists[i];
+            }
+        }
+
+        return distsq;
+    }
+
+    /**
+     * Performs an exact search in the tree starting from a node.
+     */
+    void searchLevel(ResultSet<DistanceType>& result_set, const ElementType* vec, const NodePtr node, DistanceType mindistsq,
+                     std::vector<DistanceType>& dists, const float epsError)
+    {
+        /* If this is a leaf node, then do check and return. */
+        if ((node->child1 == NULL)&&(node->child2 == NULL)) {
+            DistanceType worst_dist = result_set.worstDist();
+            for (int i=node->left; i<node->right; ++i) {
+                int index = reorder_ ? i : vind_[i];
+                DistanceType dist = distance_(vec, data_[index], dim_, worst_dist);
+                if (dist<worst_dist) {
+                    result_set.addPoint(dist,vind_[i]);
+                }
+            }
+            return;
+        }
+
+        /* Which child branch should be taken first? */
+        int idx = node->divfeat;
+        ElementType val = vec[idx];
+        DistanceType diff1 = val - node->divlow;
+        DistanceType diff2 = val - node->divhigh;
+
+        NodePtr bestChild;
+        NodePtr otherChild;
+        DistanceType cut_dist;
+        if ((diff1+diff2)<0) {
+            bestChild = node->child1;
+            otherChild = node->child2;
+            cut_dist = distance_.accum_dist(val, node->divhigh, idx);
+        }
+        else {
+            bestChild = node->child2;
+            otherChild = node->child1;
+            cut_dist = distance_.accum_dist( val, node->divlow, idx);
+        }
+
+        /* Call recursively to search next level down. */
+        searchLevel(result_set, vec, bestChild, mindistsq, dists, epsError);
+
+        DistanceType dst = dists[idx];
+        mindistsq = mindistsq + cut_dist - dst;
+        dists[idx] = cut_dist;
+        if (mindistsq*epsError<=result_set.worstDist()) {
+            searchLevel(result_set, vec, otherChild, mindistsq, dists, epsError);
+        }
+        dists[idx] = dst;
+    }
+
+private:
+
+    /**
+     * The dataset used by this index
+     */
+    const Matrix<ElementType> dataset_;
+
+    IndexParams index_params_;
+
+    int leaf_max_size_;
+    bool reorder_;
+
+
+    /**
+     *  Array of indices to vectors in the dataset.
+     */
+    std::vector<int> vind_;
+
+    Matrix<ElementType> data_;
+
+    size_t size_;
+    size_t dim_;
+
+    /**
+     * Array of k-d trees used to find neighbours.
+     */
+    NodePtr root_node_;
+
+    BoundingBox root_bbox_;
+
+    /**
+     * Pooled memory allocator.
+     *
+     * Using a pooled memory allocator is more efficient
+     * than allocating memory directly when there is a large
+     * number small of memory allocations.
+     */
+    PooledAllocator pool_;
+
+    Distance distance_;
+};   // class KDTree
+
+}
+
+#endif //OPENCV_FLANN_KDTREE_SINGLE_INDEX_H_
index 67abba5..624c27a 100644 (file)
@@ -28,8 +28,8 @@
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *************************************************************************/
 
-#ifndef _OPENCV_KMEANSTREE_H_
-#define _OPENCV_KMEANSTREE_H_
+#ifndef OPENCV_FLANN_KMEANS_INDEX_H_
+#define OPENCV_FLANN_KMEANS_INDEX_H_
 
 #include <algorithm>
 #include <string>
 #include <limits>
 #include <cmath>
 
-#include "opencv2/flann/general.h"
-#include "opencv2/flann/nn_index.h"
-#include "opencv2/flann/matrix.h"
-#include "opencv2/flann/result_set.h"
-#include "opencv2/flann/heap.h"
-#include "opencv2/flann/allocator.h"
-#include "opencv2/flann/random.h"
+#include "general.h"
+#include "nn_index.h"
+#include "dist.h"
+#include "matrix.h"
+#include "result_set.h"
+#include "heap.h"
+#include "allocator.h"
+#include "random.h"
+#include "saving.h"
+#include "logger.h"
 
 
 namespace cvflann
 {
 
-struct CV_EXPORTS KMeansIndexParams : public IndexParams {
-       KMeansIndexParams(int branching_ = 32, int iterations_ = 11,
-                       flann_centers_init_t centers_init_ = FLANN_CENTERS_RANDOM, float cb_index_ = 0.2 ) :
-               IndexParams(FLANN_INDEX_KMEANS),
-               branching(branching_),
-               iterations(iterations_),
-               centers_init(centers_init_),
-               cb_index(cb_index_) {};
-
-       int branching;             // branching factor (for kmeans tree)
-       int iterations;            // max iterations to perform in one kmeans clustering (kmeans tree)
-       flann_centers_init_t centers_init;          // algorithm used for picking the initial cluster centers for kmeans tree
-    float cb_index;            // cluster boundary index. Used when searching the kmeans tree
-
-       void print() const
-       {
-               logger().info("Index type: %d\n",(int)algorithm);
-               logger().info("Branching: %d\n", branching);
-               logger().info("Iterations: %d\n", iterations);
-               logger().info("Centres initialisation: %d\n", centers_init);
-               logger().info("Cluster boundary weight: %g\n", cb_index);
-       }
-
+struct KMeansIndexParams : public IndexParams
+{
+    KMeansIndexParams(int branching = 32, int iterations = 11,
+                      flann_centers_init_t centers_init = FLANN_CENTERS_RANDOM, float cb_index = 0.2 )
+    {
+        (*this)["algorithm"] = FLANN_INDEX_KMEANS;
+        // branching factor
+        (*this)["branching"] = branching;
+        // max iterations to perform in one kmeans clustering (kmeans tree)
+        (*this)["iterations"] = iterations;
+        // algorithm used for picking the initial cluster centers for kmeans tree
+        (*this)["centers_init"] = centers_init;
+        // cluster boundary index. Used when searching the kmeans tree
+        (*this)["cb_index"] = cb_index;
+    }
 };
 
 
@@ -82,145 +77,40 @@ struct CV_EXPORTS KMeansIndexParams : public IndexParams {
  * Contains a tree constructed through a hierarchical kmeans clustering
  * and other information for indexing a set of points for nearest-neighbour matching.
  */
-template <typename ELEM_TYPE, typename DIST_TYPE = typename DistType<ELEM_TYPE>::type >
-class KMeansIndex : public NNIndex<ELEM_TYPE>
+template <typename Distance>
+class KMeansIndex : public NNIndex<Distance>
 {
+public:
+    typedef typename Distance::ElementType ElementType;
+    typedef typename Distance::ResultType DistanceType;
 
-       /**
-        * The branching factor used in the hierarchical k-means clustering
-        */
-       int branching;
-
-       /**
-        * Maximum number of iterations to use when performing k-means
-        * clustering
-        */
-       int max_iter;
-
-     /**
-     * Cluster border index. This is used in the tree search phase when determining
-     * the closest cluster to explore next. A zero value takes into account only
-     * the cluster centres, a value greater then zero also take into account the size
-     * of the cluster.
-     */
-    float cb_index;
-
-       /**
-        * The dataset used by this index
-        */
-    const Matrix<ELEM_TYPE> dataset;
-
-    const IndexParams& index_params;
-
-    /**
-    * Number of features in the dataset.
-    */
-    size_t size_;
-
-    /**
-    * Length of each feature.
-    */
-    size_t veclen_;
-
-
-       /**
-        * Struture representing a node in the hierarchical k-means tree.
-        */
-       struct KMeansNodeSt     {
-               /**
-                * The cluster center.
-                */
-               DIST_TYPE* pivot;
-               /**
-                * The cluster radius.
-                */
-               DIST_TYPE radius;
-               /**
-                * The cluster mean radius.
-                */
-               DIST_TYPE mean_radius;
-               /**
-                * The cluster variance.
-                */
-               DIST_TYPE variance;
-               /**
-                * The cluster size (number of points in the cluster)
-                */
-               int size;
-               /**
-                * Child nodes (only for non-terminal nodes)
-                */
-               KMeansNodeSt** childs;
-               /**
-                * Node points (only for terminal nodes)
-                */
-               int* indices;
-               /**
-                * Level
-                */
-               int level;
-       };
-    typedef KMeansNodeSt* KMeansNode;
 
 
+    typedef void (KMeansIndex::* centersAlgFunction)(int, int*, int, int*, int&);
 
     /**
-     * Alias definition for a nicer syntax.
+     * The function used for choosing the cluster centers.
      */
-    typedef BranchStruct<KMeansNode> BranchSt;
-
-
-       /**
-        * The root node in the tree.
-        */
-       KMeansNode root;
-
-       /**
-        *  Array of indices to vectors in the dataset.
-        */
-       int* indices;
-
-
-       /**
-        * Pooled memory allocator.
-        *
-        * Using a pooled memory allocator is more efficient
-        * than allocating memory directly when there is a large
-        * number small of memory allocations.
-        */
-       PooledAllocator pool;
-
-       /**
-        * Memory occupied by the index.
-        */
-       int memoryCounter;
-
-
-       typedef void (KMeansIndex::*centersAlgFunction)(int, int*, int, int*, int&);
-
-    /**
-    * The function used for choosing the cluster centers.
-    */
     centersAlgFunction chooseCenters;
 
 
 
     /**
-    * Chooses the initial centers in the k-means clustering in a random manner.
-    *
-    * Params:
-    *     k = number of centers
-    *     vecs = the dataset of points
-    *     indices = indices in the dataset
-    *     indices_length = length of indices vector
-    *
-    */
+     * Chooses the initial centers in the k-means clustering in a random manner.
+     *
+     * Params:
+     *     k = number of centers
+     *     vecs = the dataset of points
+     *     indices = indices in the dataset
+     *     indices_length = length of indices vector
+     *
+     */
     void chooseCentersRandom(int k, int* indices, int indices_length, int* centers, int& centers_length)
     {
         UniqueRandom r(indices_length);
 
         int index;
-        for (index=0;index<k;++index) {
+        for (index=0; index<k; ++index) {
             bool duplicate = true;
             int rnd;
             while (duplicate) {
@@ -233,8 +123,8 @@ class KMeansIndex : public NNIndex<ELEM_TYPE>
 
                 centers[index] = indices[rnd];
 
-                for (int j=0;j<index;++j) {
-                    float sq = (float)flann_dist(dataset[centers[index]],dataset[centers[index]]+dataset.cols,dataset[centers[j]]);
+                for (int j=0; j<index; ++j) {
+                    DistanceType sq = distance_(dataset_[centers[index]], dataset_[centers[j]], dataset_.cols);
                     if (sq<1e-16) {
                         duplicate = true;
                     }
@@ -247,15 +137,15 @@ class KMeansIndex : public NNIndex<ELEM_TYPE>
 
 
     /**
-    * Chooses the initial centers in the k-means using Gonzales' algorithm
-    * so that the centers are spaced apart from each other.
-    *
-    * Params:
-    *     k = number of centers
-    *     vecs = the dataset of points
-    *     indices = indices in the dataset
-    * Returns:
-    */
+     * Chooses the initial centers in the k-means using Gonzales' algorithm
+     * so that the centers are spaced apart from each other.
+     *
+     * Params:
+     *     k = number of centers
+     *     vecs = the dataset of points
+     *     indices = indices in the dataset
+     * Returns:
+     */
     void chooseCentersGonzales(int k, int* indices, int indices_length, int* centers, int& centers_length)
     {
         int n = indices_length;
@@ -269,11 +159,11 @@ class KMeansIndex : public NNIndex<ELEM_TYPE>
         for (index=1; index<k; ++index) {
 
             int best_index = -1;
-            float best_val = 0;
-            for (int j=0;j<n;++j) {
-                float dist = (float)flann_dist(dataset[centers[0]],dataset[centers[0]]+dataset.cols,dataset[indices[j]]);
-                for (int i=1;i<index;++i) {
-                        float tmp_dist = (float)flann_dist(dataset[centers[i]],dataset[centers[i]]+dataset.cols,dataset[indices[j]]);
+            DistanceType best_val = 0;
+            for (int j=0; j<n; ++j) {
+                DistanceType dist = distance_(dataset_[centers[0]],dataset_[indices[j]],dataset_.cols);
+                for (int i=1; i<index; ++i) {
+                    DistanceType tmp_dist = distance_(dataset_[centers[i]],dataset_[indices[j]],dataset_.cols);
                     if (tmp_dist<dist) {
                         dist = tmp_dist;
                     }
@@ -295,24 +185,24 @@ class KMeansIndex : public NNIndex<ELEM_TYPE>
 
 
     /**
-    * Chooses the initial centers in the k-means using the algorithm
-    * proposed in the KMeans++ paper:
-    * Arthur, David; Vassilvitskii, Sergei - k-means++: The Advantages of Careful Seeding
-    *
-    * Implementation of this function was converted from the one provided in Arthur's code.
-    *
-    * Params:
-    *     k = number of centers
-    *     vecs = the dataset of points
-    *     indices = indices in the dataset
-    * Returns:
-    */
+     * Chooses the initial centers in the k-means using the algorithm
+     * proposed in the KMeans++ paper:
+     * Arthur, David; Vassilvitskii, Sergei - k-means++: The Advantages of Careful Seeding
+     *
+     * Implementation of this function was converted from the one provided in Arthur's code.
+     *
+     * Params:
+     *     k = number of centers
+     *     vecs = the dataset of points
+     *     indices = indices in the dataset
+     * Returns:
+     */
     void chooseCentersKMeanspp(int k, int* indices, int indices_length, int* centers, int& centers_length)
     {
         int n = indices_length;
 
         double currentPot = 0;
-        double* closestDistSq = new double[n];
+        DistanceType* closestDistSq = new DistanceType[n];
 
         // Choose one random center and set the closestDistSq values
         int index = rand_int(n);
@@ -320,7 +210,7 @@ class KMeansIndex : public NNIndex<ELEM_TYPE>
         centers[0] = indices[index];
 
         for (int i = 0; i < n; i++) {
-            closestDistSq[i] = flann_dist(dataset[indices[i]], dataset[indices[i]] + dataset.cols, dataset[indices[index]]);
+            closestDistSq[i] = distance_(dataset_[indices[i]], dataset_[indices[index]], dataset_.cols);
             currentPot += closestDistSq[i];
         }
 
@@ -338,21 +228,18 @@ class KMeansIndex : public NNIndex<ELEM_TYPE>
 
                 // Choose our center - have to be slightly careful to return a valid answer even accounting
                 // for possible rounding errors
-            double randVal = rand_double(currentPot);
+                double randVal = rand_double(currentPot);
                 for (index = 0; index < n-1; index++) {
-                    if (randVal <= closestDistSq[index])
-                        break;
-                    else
-                        randVal -= closestDistSq[index];
+                    if (randVal <= closestDistSq[index]) break;
+                    else randVal -= closestDistSq[index];
                 }
 
                 // Compute the new potential
                 double newPot = 0;
-                for (int i = 0; i < n; i++)
-                    newPot += std::min( flann_dist(dataset[indices[i]], dataset[indices[i]] + dataset.cols, dataset[indices[index]]), closestDistSq[i] );
+                for (int i = 0; i < n; i++) newPot += std::min( distance_(dataset_[indices[i]], dataset_[indices[index]], dataset_.cols), closestDistSq[i] );
 
                 // Store the best result
-                if (bestNewPot < 0 || newPot < bestNewPot) {
+                if ((bestNewPot < 0)||(newPot < bestNewPot)) {
                     bestNewPot = newPot;
                     bestNewIndex = index;
                 }
@@ -361,90 +248,93 @@ class KMeansIndex : public NNIndex<ELEM_TYPE>
             // Add the appropriate center
             centers[centerCount] = indices[bestNewIndex];
             currentPot = bestNewPot;
-            for (int i = 0; i < n; i++)
-                closestDistSq[i] = std::min( flann_dist(dataset[indices[i]], dataset[indices[i]]+dataset.cols, dataset[indices[bestNewIndex]]), closestDistSq[i] );
+            for (int i = 0; i < n; i++) closestDistSq[i] = std::min( distance_(dataset_[indices[i]], dataset_[indices[bestNewIndex]], dataset_.cols), closestDistSq[i] );
         }
 
         centers_length = centerCount;
 
-       delete[] closestDistSq;
+        delete[] closestDistSq;
     }
 
 
 
 public:
 
-
     flann_algorithm_t getType() const
     {
         return FLANN_INDEX_KMEANS;
     }
 
-       /**
-        * Index constructor
-        *
-        * Params:
-        *              inputData = dataset with the input features
-        *              params = parameters passed to the hierarchical k-means algorithm
-        */
-       KMeansIndex(const Matrix<ELEM_TYPE>& inputData, const KMeansIndexParams& params = KMeansIndexParams() )
-               : dataset(inputData), index_params(params), root(NULL), indices(NULL)
-       {
-               memoryCounter = 0;
-
-        size_ = dataset.rows;
-        veclen_ = dataset.cols;
-
-        branching = params.branching;
-        max_iter = params.iterations;
-        if (max_iter<0) {
-               max_iter = (std::numeric_limits<int>::max)();
+    /**
+     * Index constructor
+     *
+     * Params:
+     *          inputData = dataset with the input features
+     *          params = parameters passed to the hierarchical k-means algorithm
+     */
+    KMeansIndex(const Matrix<ElementType>& inputData, const IndexParams& params = KMeansIndexParams(),
+                Distance d = Distance())
+        : dataset_(inputData), index_params_(params), root_(NULL), indices_(NULL), distance_(d)
+    {
+        memoryCounter_ = 0;
+
+        size_ = dataset_.rows;
+        veclen_ = dataset_.cols;
+
+        branching_ = get_param(params,"branching",32);
+        iterations_ = get_param(params,"iterations",11);
+        if (iterations_<0) {
+            iterations_ = (std::numeric_limits<int>::max)();
         }
-        flann_centers_init_t centersInit = params.centers_init;
+        centers_init_  = get_param(params,"centers_init",FLANN_CENTERS_RANDOM);
 
-        if (centersInit==FLANN_CENTERS_RANDOM) {
-               chooseCenters = &KMeansIndex::chooseCentersRandom;
+        if (centers_init_==FLANN_CENTERS_RANDOM) {
+            chooseCenters = &KMeansIndex::chooseCentersRandom;
+        }
+        else if (centers_init_==FLANN_CENTERS_GONZALES) {
+            chooseCenters = &KMeansIndex::chooseCentersGonzales;
+        }
+        else if (centers_init_==FLANN_CENTERS_KMEANSPP) {
+            chooseCenters = &KMeansIndex::chooseCentersKMeanspp;
         }
-        else if (centersInit==FLANN_CENTERS_GONZALES) {
-               chooseCenters = &KMeansIndex::chooseCentersGonzales;
+        else {
+            throw FLANNException("Unknown algorithm for choosing initial centers.");
         }
-        else if (centersInit==FLANN_CENTERS_KMEANSPP) {
-                       chooseCenters = &KMeansIndex::chooseCentersKMeanspp;
+        cb_index_ = 0.4f;
+
+    }
+
+
+    KMeansIndex(const KMeansIndex&);
+    KMeansIndex& operator=(const KMeansIndex&);
+
+
+    /**
+     * Index destructor.
+     *
+     * Release the memory used by the index.
+     */
+    virtual ~KMeansIndex()
+    {
+        if (root_ != NULL) {
+            free_centers(root_);
         }
-               else {
-                       throw FLANNException("Unknown algorithm for choosing initial centers.");
-               }
-        cb_index = 0.4f;
-
-       }
-
-
-       /**
-        * Index destructor.
-        *
-        * Release the memory used by the index.
-        */
-       virtual ~KMeansIndex()
-       {
-               if (root != NULL) {
-                       free_centers(root);
-               }
-        if (indices!=NULL) {
-                 delete[] indices;
+        if (indices_!=NULL) {
+            delete[] indices_;
         }
-       }
+    }
 
     /**
-    *  Returns size of index.
-    */
-       size_t size() const
+     *  Returns size of index.
+     */
+    size_t size() const
     {
         return size_;
     }
 
     /**
-    * Returns the length of an index feature.
-    */
+     * Returns the length of an index feature.
+     */
     size_t veclen() const
     {
         return veclen_;
@@ -453,67 +343,73 @@ public:
 
     void set_cb_index( float index)
     {
-        cb_index = index;
+        cb_index_ = index;
     }
 
+    /**
+     * Computes the inde memory usage
+     * Returns: memory used by the index
+     */
+    int usedMemory() const
+    {
+        return pool_.usedMemory+pool_.wastedMemory+memoryCounter_;
+    }
 
-       /**
-        * Computes the inde memory usage
-        * Returns: memory used by the index
-        */
-       int usedMemory() const
-       {
-               return  pool.usedMemory+pool.wastedMemory+memoryCounter;
-       }
-
-       /**
-        * Builds the index
-        */
-       void buildIndex()
-       {
-               if (branching<2) {
-                       throw FLANNException("Branching factor must be at least 2");
-               }
+    /**
+     * Builds the index
+     */
+    void buildIndex()
+    {
+        if (branching_<2) {
+            throw FLANNException("Branching factor must be at least 2");
+        }
 
-               indices = new int[size_];
-               for (size_t i=0;i<size_;++i) {
-                       indices[i] = (int)i;
-               }
+        indices_ = new int[size_];
+        for (size_t i=0; i<size_; ++i) {
+            indices_[i] = int(i);
+        }
 
-               root = pool.allocate<KMeansNodeSt>();
-               computeNodeStatistics(root, indices, (int)size_);
-               computeClustering(root, indices, (int)size_, branching,0);
-       }
+        root_ = pool_.allocate<KMeansNode>();
+        computeNodeStatistics(root_, indices_, (int)size_);
+        computeClustering(root_, indices_, (int)size_, branching_,0);
+    }
 
 
     void saveIndex(FILE* stream)
     {
-       save_value(stream, branching);
-       save_value(stream, max_iter);
-       save_value(stream, memoryCounter);
-       save_value(stream, cb_index);
-       save_value(stream, *indices, (int)size_);
+        save_value(stream, branching_);
+        save_value(stream, iterations_);
+        save_value(stream, memoryCounter_);
+        save_value(stream, cb_index_);
+        save_value(stream, *indices_, (int)size_);
 
-               save_tree(stream, root);
+        save_tree(stream, root_);
     }
 
 
     void loadIndex(FILE* stream)
     {
-       load_value(stream, branching);
-       load_value(stream, max_iter);
-       load_value(stream, memoryCounter);
-       load_value(stream, cb_index);
-       if (indices!=NULL) {
-               delete[] indices;
-       }
-               indices = new int[size_];
-       load_value(stream, *indices, (int)size_);
-
-       if (root!=NULL) {
-               free_centers(root);
-       }
-               load_tree(stream, root);
+        load_value(stream, branching_);
+        load_value(stream, iterations_);
+        load_value(stream, memoryCounter_);
+        load_value(stream, cb_index_);
+        if (indices_!=NULL) {
+            delete[] indices_;
+        }
+        indices_ = new int[size_];
+        load_value(stream, *indices_, size_);
+
+        if (root_!=NULL) {
+            free_centers(root_);
+        }
+        load_tree(stream, root_);
+
+        index_params_["algorithm"] = getType();
+        index_params_["branching"] = branching_;
+        index_params_["iterations"] = iterations_;
+        index_params_["centers_init"] = centers_init_;
+        index_params_["cb_index"] = cb_index_;
+
     }
 
 
@@ -526,25 +422,24 @@ public:
      *     vec = the vector for which to search the nearest neighbors
      *     searchParams = parameters that influence the search algorithm (checks, cb_index)
      */
-    void findNeighbors(ResultSet<ELEM_TYPE>& result, const ELEM_TYPE* vec, const SearchParams& searchParams)
+    void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams)
     {
 
-        int maxChecks = searchParams.checks;
+        int maxChecks = get_param(searchParams,"checks",32);
 
-        if (maxChecks<0) {
-            findExactNN(root, result, vec);
+        if (maxChecks==FLANN_CHECKS_UNLIMITED) {
+            findExactNN(root_, result, vec);
         }
         else {
-             // Priority queue storing intermediate branches in the best-bin-first search
+            // Priority queue storing intermediate branches in the best-bin-first search
             Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_);
 
             int checks = 0;
-
-            findNN(root, result, vec, checks, maxChecks, heap);
+            findNN(root_, result, vec, checks, maxChecks, heap);
 
             BranchSt branch;
             while (heap->popMin(branch) && (checks<maxChecks || !result.full())) {
-                KMeansNode node = branch.node;
+                KMeansNodePtr node = branch.node;
                 findNN(node, result, vec, checks, maxChecks, heap);
             }
             assert(result.full());
@@ -554,7 +449,6 @@ public:
 
     }
 
-
     /**
      * Clustering function that takes a cut in the hierarchical k-means
      * tree and return the clusters centers of that clustering.
@@ -562,332 +456,377 @@ public:
      *     numClusters = number of clusters to have in the clustering computed
      * Returns: number of cluster centers
      */
-    int getClusterCenters(Matrix<DIST_TYPE>& centers)
+    int getClusterCenters(Matrix<DistanceType>& centers)
     {
         int numClusters = centers.rows;
         if (numClusters<1) {
             throw FLANNException("Number of clusters must be at least 1");
         }
 
-        float variance;
-        KMeansNode* clusters = new KMeansNode[numClusters];
+        DistanceType variance;
+        KMeansNodePtr* clusters = new KMeansNodePtr[numClusters];
 
-        int clusterCount = getMinVarianceClusters(root, clusters, numClusters, variance);
+        int clusterCount = getMinVarianceClusters(root_, clusters, numClusters, variance);
 
-//         logger().info("Clusters requested: %d, returning %d\n",numClusters, clusterCount);
+        Logger::info("Clusters requested: %d, returning %d\n",numClusters, clusterCount);
 
-
-        for (int i=0;i<clusterCount;++i) {
-            DIST_TYPE* center = clusters[i]->pivot;
-            for (size_t j=0;j<veclen_;++j) {
+        for (int i=0; i<clusterCount; ++i) {
+            DistanceType* center = clusters[i]->pivot;
+            for (size_t j=0; j<veclen_; ++j) {
                 centers[i][j] = center[j];
             }
         }
-               delete[] clusters;
+        delete[] clusters;
 
         return clusterCount;
     }
 
-       const IndexParams* getParameters() const
-       {
-               return &index_params;
-       }
+    IndexParams getParameters() const
+    {
+        return index_params_;
+    }
 
 
 private:
+    /**
+     * Struture representing a node in the hierarchical k-means tree.
+     */
+    struct KMeansNode
+    {
+        /**
+         * The cluster center.
+         */
+        DistanceType* pivot;
+        /**
+         * The cluster radius.
+         */
+        DistanceType radius;
+        /**
+         * The cluster mean radius.
+         */
+        DistanceType mean_radius;
+        /**
+         * The cluster variance.
+         */
+        DistanceType variance;
+        /**
+         * The cluster size (number of points in the cluster)
+         */
+        int size;
+        /**
+         * Child nodes (only for non-terminal nodes)
+         */
+        KMeansNode** childs;
+        /**
+         * Node points (only for terminal nodes)
+         */
+        int* indices;
+        /**
+         * Level
+         */
+        int level;
+    };
+    typedef KMeansNode* KMeansNodePtr;
 
-       KMeansIndex& operator=(const KMeansIndex&);
-       KMeansIndex(const KMeansIndex&);
+    /**
+     * Alias definition for a nicer syntax.
+     */
+    typedef BranchStruct<KMeansNodePtr, DistanceType> BranchSt;
 
-    void save_tree(FILE* stream, KMeansNode node)
+
+
+
+    void save_tree(FILE* stream, KMeansNodePtr node)
     {
-       save_value(stream, *node);
-       save_value(stream, *(node->pivot), (int)veclen_);
-       if (node->childs==NULL) {
-               int indices_offset = (int)(node->indices - indices);
-               save_value(stream, indices_offset);
-       }
-       else {
-               for(int i=0; i<branching; ++i) {
-                       save_tree(stream, node->childs[i]);
-               }
-       }
+        save_value(stream, *node);
+        save_value(stream, *(node->pivot), (int)veclen_);
+        if (node->childs==NULL) {
+            int indices_offset = (int)(node->indices - indices_);
+            save_value(stream, indices_offset);
+        }
+        else {
+            for(int i=0; i<branching_; ++i) {
+                save_tree(stream, node->childs[i]);
+            }
+        }
     }
 
 
-    void load_tree(FILE* stream, KMeansNode& node)
+    void load_tree(FILE* stream, KMeansNodePtr& node)
     {
-       node = pool.allocate<KMeansNodeSt>();
-       load_value(stream, *node);
-       node->pivot = new DIST_TYPE[veclen_];
-       load_value(stream, *(node->pivot), (int)veclen_);
-       if (node->childs==NULL) {
-               int indices_offset;
-               load_value(stream, indices_offset);
-               node->indices = indices + indices_offset;
-       }
-       else {
-               node->childs = pool.allocate<KMeansNode>(branching);
-               for(int i=0; i<branching; ++i) {
-                       load_tree(stream, node->childs[i]);
-               }
-       }
+        node = pool_.allocate<KMeansNode>();
+        load_value(stream, *node);
+        node->pivot = new DistanceType[veclen_];
+        load_value(stream, *(node->pivot), (int)veclen_);
+        if (node->childs==NULL) {
+            int indices_offset;
+            load_value(stream, indices_offset);
+            node->indices = indices_ + indices_offset;
+        }
+        else {
+            node->childs = pool_.allocate<KMeansNodePtr>(branching_);
+            for(int i=0; i<branching_; ++i) {
+                load_tree(stream, node->childs[i]);
+            }
+        }
     }
 
 
     /**
-    * Helper function
-    */
-    void free_centers(KMeansNode node)
+     * Helper function
+     */
+    void free_centers(KMeansNodePtr node)
     {
         delete[] node->pivot;
         if (node->childs!=NULL) {
-            for (int k=0;k<branching;++k) {
+            for (int k=0; k<branching_; ++k) {
                 free_centers(node->childs[k]);
             }
         }
     }
 
-       /**
-        * Computes the statistics of a node (mean, radius, variance).
-        *
-        * Params:
-        *     node = the node to use
-        *     indices = the indices of the points belonging to the node
-        */
-       void computeNodeStatistics(KMeansNode node, int* indices, int indices_length) {
-
-               double radius = 0;
-               double variance = 0;
-               DIST_TYPE* mean = new DIST_TYPE[veclen_];
-               memoryCounter += (int)(veclen_*sizeof(DIST_TYPE));
-
-        memset(mean,0,veclen_*sizeof(float));
-
-               for (size_t i=0;i<size_;++i) {
-                       ELEM_TYPE* vec = dataset[indices[i]];
-            for (size_t j=0;j<veclen_;++j) {
+    /**
+     * Computes the statistics of a node (mean, radius, variance).
+     *
+     * Params:
+     *     node = the node to use
+     *     indices = the indices of the points belonging to the node
+     */
+    void computeNodeStatistics(KMeansNodePtr node, int* indices, int indices_length)
+    {
+
+        DistanceType radius = 0;
+        DistanceType variance = 0;
+        DistanceType* mean = new DistanceType[veclen_];
+        memoryCounter_ += int(veclen_*sizeof(DistanceType));
+
+        memset(mean,0,veclen_*sizeof(DistanceType));
+
+        for (size_t i=0; i<size_; ++i) {
+            ElementType* vec = dataset_[indices[i]];
+            for (size_t j=0; j<veclen_; ++j) {
                 mean[j] += vec[j];
             }
-                       variance += flann_dist(vec,vec+veclen_,zero());
-               }
-               for (size_t j=0;j<veclen_;++j) {
-                       mean[j] /= size_;
-               }
-               variance /= size_;
-               variance -= flann_dist(mean,mean+veclen_,zero());
-
-               double tmp = 0;
-               for (int i=0;i<indices_length;++i) {
-                       tmp = flann_dist(mean, mean + veclen_, dataset[indices[i]]);
-                       if (tmp>radius) {
-                               radius = tmp;
-                       }
-               }
-
-               node->variance = (DIST_TYPE)variance;
-               node->radius = (DIST_TYPE)radius;
-               node->pivot = mean;
-       }
-
-
-       /**
-        * The method responsible with actually doing the recursive hierarchical
-        * clustering
-        *
-        * Params:
-        *     node = the node to cluster
-        *     indices = indices of the points belonging to the current node
-        *     branching = the branching factor to use in the clustering
-        *
-        * TODO: for 1-sized clusters don't store a cluster center (it's the same as the single cluster point)
-        */
-       void computeClustering(KMeansNode node, int* indices, int indices_length, int branching, int level)
-       {
-               node->size = indices_length;
-               node->level = level;
-
-               if (indices_length < branching) {
-                       node->indices = indices;
+            variance += distance_(vec, ZeroIterator<ElementType>(), veclen_);
+        }
+        for (size_t j=0; j<veclen_; ++j) {
+            mean[j] /= size_;
+        }
+        variance /= size_;
+        variance -= distance_(mean, ZeroIterator<ElementType>(), veclen_);
+
+        DistanceType tmp = 0;
+        for (int i=0; i<indices_length; ++i) {
+            tmp = distance_(mean, dataset_[indices[i]], veclen_);
+            if (tmp>radius) {
+                radius = tmp;
+            }
+        }
+
+        node->variance = variance;
+        node->radius = radius;
+        node->pivot = mean;
+    }
+
+
+    /**
+     * The method responsible with actually doing the recursive hierarchical
+     * clustering
+     *
+     * Params:
+     *     node = the node to cluster
+     *     indices = indices of the points belonging to the current node
+     *     branching = the branching factor to use in the clustering
+     *
+     * TODO: for 1-sized clusters don't store a cluster center (it's the same as the single cluster point)
+     */
+    void computeClustering(KMeansNodePtr node, int* indices, int indices_length, int branching, int level)
+    {
+        node->size = indices_length;
+        node->level = level;
+
+        if (indices_length < branching) {
+            node->indices = indices;
             std::sort(node->indices,node->indices+indices_length);
             node->childs = NULL;
-                       return;
-               }
+            return;
+        }
 
-               int* centers_idx = new int[branching];
+        int* centers_idx = new int[branching];
         int centers_length;
-               (this->*chooseCenters)(branching, indices, indices_length, centers_idx, centers_length);
+        (this->*chooseCenters)(branching, indices, indices_length, centers_idx, centers_length);
 
-               if (centers_length<branching) {
+        if (centers_length<branching) {
             node->indices = indices;
             std::sort(node->indices,node->indices+indices_length);
             node->childs = NULL;
-                       return;
-               }
+            delete [] centers_idx;
+            return;
+        }
 
 
-        Matrix<double> dcenters(new double[branching*veclen_],branching,(long)veclen_);
+        Matrix<double> dcenters(new double[branching*veclen_],branching,veclen_);
         for (int i=0; i<centers_length; ++i) {
-               ELEM_TYPE* vec = dataset[centers_idx[i]];
+            ElementType* vec = dataset_[centers_idx[i]];
             for (size_t k=0; k<veclen_; ++k) {
                 dcenters[i][k] = double(vec[k]);
             }
         }
-               delete[] centers_idx;
+        delete[] centers_idx;
 
-               float* radiuses = new float[branching];
-               int* count = new int[branching];
-        for (int i=0;i<branching;++i) {
+        DistanceType* radiuses = new DistanceType[branching];
+        int* count = new int[branching];
+        for (int i=0; i<branching; ++i) {
             radiuses[i] = 0;
             count[i] = 0;
         }
 
         //     assign points to clusters
-               int* belongs_to = new int[indices_length];
-               for (int i=0;i<indices_length;++i) {
-
-                       double sq_dist = flann_dist(dataset[indices[i]], dataset[indices[i]] + veclen_ ,dcenters[0]);
-                       belongs_to[i] = 0;
-                       for (int j=1;j<branching;++j) {
-                               double new_sq_dist = flann_dist(dataset[indices[i]], dataset[indices[i]]+veclen_, dcenters[j]);
-                               if (sq_dist>new_sq_dist) {
-                                       belongs_to[i] = j;
-                                       sq_dist = new_sq_dist;
-                               }
-                       }
+        int* belongs_to = new int[indices_length];
+        for (int i=0; i<indices_length; ++i) {
+
+            DistanceType sq_dist = distance_(dataset_[indices[i]], dcenters[0], veclen_);
+            belongs_to[i] = 0;
+            for (int j=1; j<branching; ++j) {
+                DistanceType new_sq_dist = distance_(dataset_[indices[i]], dcenters[j], veclen_);
+                if (sq_dist>new_sq_dist) {
+                    belongs_to[i] = j;
+                    sq_dist = new_sq_dist;
+                }
+            }
             if (sq_dist>radiuses[belongs_to[i]]) {
-                radiuses[belongs_to[i]] = (float)sq_dist;
+                radiuses[belongs_to[i]] = sq_dist;
             }
-                       count[belongs_to[i]]++;
-               }
+            count[belongs_to[i]]++;
+        }
 
-               bool converged = false;
-               int iteration = 0;
-               while (!converged && iteration<max_iter) {
-                       converged = true;
-                       iteration++;
+        bool converged = false;
+        int iteration = 0;
+        while (!converged && iteration<iterations_) {
+            converged = true;
+            iteration++;
 
-                       // compute the new cluster centers
-                       for (int i=0;i<branching;++i) {
+            // compute the new cluster centers
+            for (int i=0; i<branching; ++i) {
                 memset(dcenters[i],0,sizeof(double)*veclen_);
                 radiuses[i] = 0;
-                       }
-            for (int i=0;i<indices_length;++i) {
-                               ELEM_TYPE* vec = dataset[indices[i]];
-                               double* center = dcenters[belongs_to[i]];
-                               for (size_t k=0;k<veclen_;++k) {
-                                       center[k] += vec[k];
-                               }
-                       }
-                       for (int i=0;i<branching;++i) {
+            }
+            for (int i=0; i<indices_length; ++i) {
+                ElementType* vec = dataset_[indices[i]];
+                double* center = dcenters[belongs_to[i]];
+                for (size_t k=0; k<veclen_; ++k) {
+                    center[k] += vec[k];
+                }
+            }
+            for (int i=0; i<branching; ++i) {
                 int cnt = count[i];
-                for (size_t k=0;k<veclen_;++k) {
+                for (size_t k=0; k<veclen_; ++k) {
                     dcenters[i][k] /= cnt;
                 }
-                       }
-
-                       // reassign points to clusters
-                       for (int i=0;i<indices_length;++i) {
-                               float sq_dist = (float)flann_dist(dataset[indices[i]], dataset[indices[i]]+veclen_ ,dcenters[0]);
-                               int new_centroid = 0;
-                               for (int j=1;j<branching;++j) {
-                                       float new_sq_dist = (float)flann_dist(dataset[indices[i]], dataset[indices[i]]+veclen_,dcenters[j]);
-                                       if (sq_dist>new_sq_dist) {
-                                               new_centroid = j;
-                                               sq_dist = new_sq_dist;
-                                       }
-                               }
-                               if (sq_dist>radiuses[new_centroid]) {
-                                       radiuses[new_centroid] = sq_dist;
-                               }
-                               if (new_centroid != belongs_to[i]) {
-                                       count[belongs_to[i]]--;
-                                       count[new_centroid]++;
-                                       belongs_to[i] = new_centroid;
-
-                                       converged = false;
-                               }
-                       }
-
-                       for (int i=0;i<branching;++i) {
-                               // if one cluster converges to an empty cluster,
-                               // move an element into that cluster
-                               if (count[i]==0) {
-                                       int j = (i+1)%branching;
-                                       while (count[j]<=1) {
-                                               j = (j+1)%branching;
-                                       }
-
-                                       for (int k=0;k<indices_length;++k) {
-                                               if (belongs_to[k]==j) {
-                                                       belongs_to[k] = i;
-                                                       count[j]--;
-                                                       count[i]++;
-                                                       break;
-                                               }
-                                       }
-                                       converged = false;
-                               }
-                       }
-
-               }
-
-        DIST_TYPE** centers = new DIST_TYPE*[branching];
+            }
+
+            // reassign points to clusters
+            for (int i=0; i<indices_length; ++i) {
+                DistanceType sq_dist = distance_(dataset_[indices[i]], dcenters[0], veclen_);
+                int new_centroid = 0;
+                for (int j=1; j<branching; ++j) {
+                    DistanceType new_sq_dist = distance_(dataset_[indices[i]], dcenters[j], veclen_);
+                    if (sq_dist>new_sq_dist) {
+                        new_centroid = j;
+                        sq_dist = new_sq_dist;
+                    }
+                }
+                if (sq_dist>radiuses[new_centroid]) {
+                    radiuses[new_centroid] = sq_dist;
+                }
+                if (new_centroid != belongs_to[i]) {
+                    count[belongs_to[i]]--;
+                    count[new_centroid]++;
+                    belongs_to[i] = new_centroid;
+
+                    converged = false;
+                }
+            }
+
+            for (int i=0; i<branching; ++i) {
+                // if one cluster converges to an empty cluster,
+                // move an element into that cluster
+                if (count[i]==0) {
+                    int j = (i+1)%branching;
+                    while (count[j]<=1) {
+                        j = (j+1)%branching;
+                    }
+
+                    for (int k=0; k<indices_length; ++k) {
+                        if (belongs_to[k]==j) {
+                            belongs_to[k] = i;
+                            count[j]--;
+                            count[i]++;
+                            break;
+                        }
+                    }
+                    converged = false;
+                }
+            }
+
+        }
+
+        DistanceType** centers = new DistanceType*[branching];
 
         for (int i=0; i<branching; ++i) {
-                       centers[i] = new DIST_TYPE[veclen_];
-                       memoryCounter += (int)(veclen_*sizeof(DIST_TYPE));
+            centers[i] = new DistanceType[veclen_];
+            memoryCounter_ += veclen_*sizeof(DistanceType);
             for (size_t k=0; k<veclen_; ++k) {
-                centers[i][k] = (DIST_TYPE)dcenters[i][k];
+                centers[i][k] = (DistanceType)dcenters[i][k];
             }
-               }
-
-
-               // compute kmeans clustering for each of the resulting clusters
-               node->childs = pool.allocate<KMeansNode>(branching);
-               int start = 0;
-               int end = start;
-               for (int c=0;c<branching;++c) {
-                       int s = count[c];
-
-                       double variance = 0;
-                   double mean_radius =0;
-                       for (int i=0;i<indices_length;++i) {
-                               if (belongs_to[i]==c) {
-                                       double d = flann_dist(dataset[indices[i]],dataset[indices[i]]+veclen_,zero());
-                                       variance += d;
-                                       mean_radius += sqrt(d);
+        }
+
+
+        // compute kmeans clustering for each of the resulting clusters
+        node->childs = pool_.allocate<KMeansNodePtr>(branching);
+        int start = 0;
+        int end = start;
+        for (int c=0; c<branching; ++c) {
+            int s = count[c];
+
+            DistanceType variance = 0;
+            DistanceType mean_radius =0;
+            for (int i=0; i<indices_length; ++i) {
+                if (belongs_to[i]==c) {
+                    DistanceType d = distance_(dataset_[indices[i]], ZeroIterator<ElementType>(), veclen_);
+                    variance += d;
+                    mean_radius += sqrt(d);
                     std::swap(indices[i],indices[end]);
                     std::swap(belongs_to[i],belongs_to[end]);
-                                       end++;
-                               }
-                       }
-                       variance /= s;
-                       mean_radius /= s;
-                       variance -= flann_dist(centers[c],centers[c]+veclen_,zero());
-
-                       node->childs[c] = pool.allocate<KMeansNodeSt>();
-                       node->childs[c]->radius = radiuses[c];
-                       node->childs[c]->pivot = centers[c];
-                       node->childs[c]->variance = (float)variance;
-                       node->childs[c]->mean_radius = (float)mean_radius;
-                       node->childs[c]->indices = NULL;
-                       computeClustering(node->childs[c],indices+start, end-start, branching, level+1);
-                       start=end;
-               }
-
-               delete[] dcenters.data;
-               delete[] centers;
-               delete[] radiuses;
-               delete[] count;
-               delete[] belongs_to;
-       }
-
-
-
-       /**
-        * Performs one descent in the hierarchical k-means tree. The branches not
-        * visited are stored in a priority queue.
+                    end++;
+                }
+            }
+            variance /= s;
+            mean_radius /= s;
+            variance -= distance_(centers[c], ZeroIterator<ElementType>(), veclen_);
+
+            node->childs[c] = pool_.allocate<KMeansNode>();
+            node->childs[c]->radius = radiuses[c];
+            node->childs[c]->pivot = centers[c];
+            node->childs[c]->variance = variance;
+            node->childs[c]->mean_radius = mean_radius;
+            node->childs[c]->indices = NULL;
+            computeClustering(node->childs[c],indices+start, end-start, branching, level+1);
+            start=end;
+        }
+
+        delete[] dcenters.data;
+        delete[] centers;
+        delete[] radiuses;
+        delete[] count;
+        delete[] belongs_to;
+    }
+
+
+
+    /**
+     * Performs one descent in the hierarchical k-means tree. The branches not
+     * visited are stored in a priority queue.
      *
      * Params:
      *      node = node to explore
@@ -898,218 +837,279 @@ private:
      */
 
 
-       void findNN(KMeansNode node, ResultSet<ELEM_TYPE>& result, const ELEM_TYPE* vec, int& checks, int maxChecks,
-                       Heap<BranchSt>* heap)
-       {
-               // Ignore those clusters that are too far away
-               {
-                       DIST_TYPE bsq = (DIST_TYPE)flann_dist(vec, vec+veclen_, node->pivot);
-                       DIST_TYPE rsq = node->radius;
-                       DIST_TYPE wsq = result.worstDist();
-
-                       DIST_TYPE val = bsq-rsq-wsq;
-                       DIST_TYPE val2 = val*val-4*rsq*wsq;
-
-                       //if (val>0) {
-                       if (val>0 && val2>0) {
-                               return;
-                       }
-               }
+    void findNN(KMeansNodePtr node, ResultSet<DistanceType>& result, const ElementType* vec, int& checks, int maxChecks,
+                Heap<BranchSt>* heap)
+    {
+        // Ignore those clusters that are too far away
+        {
+            DistanceType bsq = distance_(vec, node->pivot, veclen_);
+            DistanceType rsq = node->radius;
+            DistanceType wsq = result.worstDist();
+
+            DistanceType val = bsq-rsq-wsq;
+            DistanceType val2 = val*val-4*rsq*wsq;
+
+            //if (val>0) {
+            if ((val>0)&&(val2>0)) {
+                return;
+            }
+        }
 
-               if (node->childs==NULL) {
+        if (node->childs==NULL) {
             if (checks>=maxChecks) {
                 if (result.full()) return;
             }
             checks += node->size;
-                       for (int i=0;i<node->size;++i) {
-                               result.addPoint(dataset[node->indices[i]], node->indices[i]);
-                       }
-               }
-               else {
-                       float* domain_distances = new float[branching];
-                       int closest_center = exploreNodeBranches(node, vec, domain_distances, heap);
-                       delete[] domain_distances;
-                       findNN(node->childs[closest_center],result,vec, checks, maxChecks, heap);
-               }
-       }
-
-       /**
-        * Helper function that computes the nearest childs of a node to a given query point.
-        * Params:
-        *     node = the node
-        *     q = the query point
-        *     distances = array with the distances to each child node.
-        * Returns:
-        */
-       int exploreNodeBranches(KMeansNode node, const ELEM_TYPE* q, float* domain_distances, Heap<BranchSt>* heap)
-       {
-
-               int best_index = 0;
-               domain_distances[best_index] = (float)flann_dist(q,q+veclen_,node->childs[best_index]->pivot);
-               for (int i=1;i<branching;++i) {
-                       domain_distances[i] = (float)flann_dist(q,q+veclen_,node->childs[i]->pivot);
-                       if (domain_distances[i]<domain_distances[best_index]) {
-                               best_index = i;
-                       }
-               }
-
-//             float* best_center = node->childs[best_index]->pivot;
-               for (int i=0;i<branching;++i) {
-                       if (i != best_index) {
-                               domain_distances[i] -= cb_index*node->childs[i]->variance;
-
-//                             float dist_to_border = getDistanceToBorder(node.childs[i].pivot,best_center,q);
-//                             if (domain_distances[i]<dist_to_border) {
-//                                     domain_distances[i] = dist_to_border;
-//                             }
-                               heap->insert(BranchSt::make_branch(node->childs[i],domain_distances[i]));
-                       }
-               }
-
-               return best_index;
-       }
-
-
-       /**
-        * Function the performs exact nearest neighbor search by traversing the entire tree.
-        */
-       void findExactNN(KMeansNode node, ResultSet<ELEM_TYPE>& result, const ELEM_TYPE* vec)
-       {
-               // Ignore those clusters that are too far away
-               {
-                       float bsq = (float)flann_dist(vec, vec+veclen_, node->pivot);
-                       float rsq = node->radius;
-                       float wsq = result.worstDist();
-
-                       float val = bsq-rsq-wsq;
-                       float val2 = val*val-4*rsq*wsq;
-
-       //              if (val>0) {
-                       if (val>0 && val2>0) {
-                               return;
-                       }
-               }
-
-
-               if (node->childs==NULL) {
-                       for (int i=0;i<node->size;++i) {
-                               result.addPoint(dataset[node->indices[i]], node->indices[i]);
-                       }
-               }
-               else {
-                       int* sort_indices = new int[branching];
-
-                       getCenterOrdering(node, vec, sort_indices);
-
-                       for (int i=0; i<branching; ++i) {
-                               findExactNN(node->childs[sort_indices[i]],result,vec);
-                       }
-
-                       delete[] sort_indices;
-               }
-       }
-
-
-       /**
-        * Helper function.
-        *
-        * I computes the order in which to traverse the child nodes of a particular node.
-        */
-       void getCenterOrdering(KMeansNode node, const ELEM_TYPE* q, int* sort_indices)
-       {
-               float* domain_distances = new float[branching];
-               for (int i=0;i<branching;++i) {
-                       float dist = (float)flann_dist(q, q+veclen_, node->childs[i]->pivot);
-
-                       int j=0;
-                       while (domain_distances[j]<dist && j<i) j++;
-                       for (int k=i;k>j;--k) {
-                               domain_distances[k] = domain_distances[k-1];
-                               sort_indices[k] = sort_indices[k-1];
-                       }
-                       domain_distances[j] = dist;
-                       sort_indices[j] = i;
-               }
-               delete[] domain_distances;
-       }
-
-       /**
-        * Method that computes the squared distance from the query point q
-        * from inside region with center c to the border between this
-        * region and the region with center p
-        */
-       float getDistanceToBorder(float* p, float* c, float* q)
-       {
-               float sum = 0;
-               float sum2 = 0;
-
-               for (int i=0;i<veclen_; ++i) {
-                       float t = c[i]-p[i];
-                       sum += t*(q[i]-(c[i]+p[i])/2);
-                       sum2 += t*t;
-               }
-
-               return sum*sum/sum2;
-       }
-
-
-       /**
-        * Helper function the descends in the hierarchical k-means tree by spliting those clusters that minimize
-        * the overall variance of the clustering.
-        * Params:
-        *     root = root node
-        *     clusters = array with clusters centers (return value)
-        *     varianceValue = variance of the clustering (return value)
-        * Returns:
-        */
-       int getMinVarianceClusters(KMeansNode root, KMeansNode* clusters, int clusters_length, float& varianceValue)
-       {
-               int clusterCount = 1;
-               clusters[0] = root;
-
-               float meanVariance = root->variance*root->size;
-
-               while (clusterCount<clusters_length) {
-                       float minVariance = (std::numeric_limits<float>::max)();
-                       int splitIndex = -1;
-
-                       for (int i=0;i<clusterCount;++i) {
-                               if (clusters[i]->childs != NULL) {
-
-                                       float variance = meanVariance - clusters[i]->variance*clusters[i]->size;
-
-                                       for (int j=0;j<branching;++j) {
-                                               variance += clusters[i]->childs[j]->variance*clusters[i]->childs[j]->size;
-                                       }
-                                       if (variance<minVariance) {
-                                               minVariance = variance;
-                                               splitIndex = i;
-                                       }
-                               }
-                       }
-
-                       if (splitIndex==-1) break;
-                       if ( (branching+clusterCount-1) > clusters_length) break;
-
-                       meanVariance = minVariance;
-
-                       // split node
-                       KMeansNode toSplit = clusters[splitIndex];
-                       clusters[splitIndex] = toSplit->childs[0];
-                       for (int i=1;i<branching;++i) {
-                               clusters[clusterCount++] = toSplit->childs[i];
-                       }
-               }
-
-               varianceValue = meanVariance/root->size;
-               return clusterCount;
-       }
-};
+            for (int i=0; i<node->size; ++i) {
+                int index = node->indices[i];
+                DistanceType dist = distance_(dataset_[index], vec, veclen_);
+                result.addPoint(dist, index);
+            }
+        }
+        else {
+            DistanceType* domain_distances = new DistanceType[branching_];
+            int closest_center = exploreNodeBranches(node, vec, domain_distances, heap);
+            delete[] domain_distances;
+            findNN(node->childs[closest_center],result,vec, checks, maxChecks, heap);
+        }
+    }
+
+    /**
+     * Helper function that computes the nearest childs of a node to a given query point.
+     * Params:
+     *     node = the node
+     *     q = the query point
+     *     distances = array with the distances to each child node.
+     * Returns:
+     */
+    int exploreNodeBranches(KMeansNodePtr node, const ElementType* q, DistanceType* domain_distances, Heap<BranchSt>* heap)
+    {
+
+        int best_index = 0;
+        domain_distances[best_index] = distance_(q, node->childs[best_index]->pivot, veclen_);
+        for (int i=1; i<branching_; ++i) {
+            domain_distances[i] = distance_(q, node->childs[i]->pivot, veclen_);
+            if (domain_distances[i]<domain_distances[best_index]) {
+                best_index = i;
+            }
+        }
+
+        //             float* best_center = node->childs[best_index]->pivot;
+        for (int i=0; i<branching_; ++i) {
+            if (i != best_index) {
+                domain_distances[i] -= cb_index_*node->childs[i]->variance;
+
+                //                             float dist_to_border = getDistanceToBorder(node.childs[i].pivot,best_center,q);
+                //                             if (domain_distances[i]<dist_to_border) {
+                //                                     domain_distances[i] = dist_to_border;
+                //                             }
+                heap->insert(BranchSt(node->childs[i],domain_distances[i]));
+            }
+        }
+
+        return best_index;
+    }
+
+
+    /**
+     * Function the performs exact nearest neighbor search by traversing the entire tree.
+     */
+    void findExactNN(KMeansNodePtr node, ResultSet<DistanceType>& result, const ElementType* vec)
+    {
+        // Ignore those clusters that are too far away
+        {
+            DistanceType bsq = distance_(vec, node->pivot, veclen_);
+            DistanceType rsq = node->radius;
+            DistanceType wsq = result.worstDist();
+
+            DistanceType val = bsq-rsq-wsq;
+            DistanceType val2 = val*val-4*rsq*wsq;
+
+            //                  if (val>0) {
+            if ((val>0)&&(val2>0)) {
+                return;
+            }
+        }
+
+
+        if (node->childs==NULL) {
+            for (int i=0; i<node->size; ++i) {
+                int index = node->indices[i];
+                DistanceType dist = distance_(dataset_[index], vec, veclen_);
+                result.addPoint(dist, index);
+            }
+        }
+        else {
+            int* sort_indices = new int[branching_];
+
+            getCenterOrdering(node, vec, sort_indices);
+
+            for (int i=0; i<branching_; ++i) {
+                findExactNN(node->childs[sort_indices[i]],result,vec);
+            }
+
+            delete[] sort_indices;
+        }
+    }
+
+
+    /**
+     * Helper function.
+     *
+     * I computes the order in which to traverse the child nodes of a particular node.
+     */
+    void getCenterOrdering(KMeansNodePtr node, const ElementType* q, int* sort_indices)
+    {
+        DistanceType* domain_distances = new DistanceType[branching_];
+        for (int i=0; i<branching_; ++i) {
+            DistanceType dist = distance_(q, node->childs[i]->pivot, veclen_);
+
+            int j=0;
+            while (domain_distances[j]<dist && j<i) j++;
+            for (int k=i; k>j; --k) {
+                domain_distances[k] = domain_distances[k-1];
+                sort_indices[k] = sort_indices[k-1];
+            }
+            domain_distances[j] = dist;
+            sort_indices[j] = i;
+        }
+        delete[] domain_distances;
+    }
+
+    /**
+     * Method that computes the squared distance from the query point q
+     * from inside region with center c to the border between this
+     * region and the region with center p
+     */
+    DistanceType getDistanceToBorder(DistanceType* p, DistanceType* c, DistanceType* q)
+    {
+        DistanceType sum = 0;
+        DistanceType sum2 = 0;
+
+        for (int i=0; i<veclen_; ++i) {
+            DistanceType t = c[i]-p[i];
+            sum += t*(q[i]-(c[i]+p[i])/2);
+            sum2 += t*t;
+        }
+
+        return sum*sum/sum2;
+    }
+
+
+    /**
+     * Helper function the descends in the hierarchical k-means tree by spliting those clusters that minimize
+     * the overall variance of the clustering.
+     * Params:
+     *     root = root node
+     *     clusters = array with clusters centers (return value)
+     *     varianceValue = variance of the clustering (return value)
+     * Returns:
+     */
+    int getMinVarianceClusters(KMeansNodePtr root, KMeansNodePtr* clusters, int clusters_length, DistanceType& varianceValue)
+    {
+        int clusterCount = 1;
+        clusters[0] = root;
+
+        DistanceType meanVariance = root->variance*root->size;
+
+        while (clusterCount<clusters_length) {
+            DistanceType minVariance = (std::numeric_limits<DistanceType>::max)();
+            int splitIndex = -1;
+
+            for (int i=0; i<clusterCount; ++i) {
+                if (clusters[i]->childs != NULL) {
+
+                    DistanceType variance = meanVariance - clusters[i]->variance*clusters[i]->size;
+
+                    for (int j=0; j<branching_; ++j) {
+                        variance += clusters[i]->childs[j]->variance*clusters[i]->childs[j]->size;
+                    }
+                    if (variance<minVariance) {
+                        minVariance = variance;
+                        splitIndex = i;
+                    }
+                }
+            }
+
+            if (splitIndex==-1) break;
+            if ( (branching_+clusterCount-1) > clusters_length) break;
+
+            meanVariance = minVariance;
+
+            // split node
+            KMeansNodePtr toSplit = clusters[splitIndex];
+            clusters[splitIndex] = toSplit->childs[0];
+            for (int i=1; i<branching_; ++i) {
+                clusters[clusterCount++] = toSplit->childs[i];
+            }
+        }
+
+        varianceValue = meanVariance/root->size;
+        return clusterCount;
+    }
 
+private:
+    /** The branching factor used in the hierarchical k-means clustering */
+    int branching_;
+
+    /** Maximum number of iterations to use when performing k-means clustering */
+    int iterations_;
+
+    /** Algorithm for choosing the cluster centers */
+    flann_centers_init_t centers_init_;
+
+    /**
+     * Cluster border index. This is used in the tree search phase when determining
+     * the closest cluster to explore next. A zero value takes into account only
+     * the cluster centres, a value greater then zero also take into account the size
+     * of the cluster.
+     */
+    float cb_index_;
+
+    /**
+     * The dataset used by this index
+     */
+    const Matrix<ElementType> dataset_;
+
+    /** Index parameters */
+    IndexParams index_params_;
+
+    /**
+     * Number of features in the dataset.
+     */
+    size_t size_;
 
+    /**
+     * Length of each feature.
+     */
+    size_t veclen_;
+
+    /**
+     * The root node in the tree.
+     */
+    KMeansNodePtr root_;
+
+    /**
+     *  Array of indices to vectors in the dataset.
+     */
+    int* indices_;
 
-//register_index(KMEANS,KMeansTree)
+    /**
+     * The distance
+     */
+    Distance distance_;
+
+    /**
+     * Pooled memory allocator.
+     */
+    PooledAllocator pool_;
+
+    /**
+     * Memory occupied by the index.
+     */
+    int memoryCounter_;
+};
 
-} // namespace cvflann
+}
 
-#endif //_OPENCV_KMEANSTREE_H_
+#endif //OPENCV_FLANN_KMEANS_INDEX_H_
index 3a17ade..ecb99f2 100644 (file)
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *************************************************************************/
 
-#ifndef _OPENCV_LINEARSEARCH_H_
-#define _OPENCV_LINEARSEARCH_H_
-
-#include "opencv2/flann/general.h"
-#include "opencv2/flann/nn_index.h"
+#ifndef OPENCV_FLANN_LINEAR_INDEX_H_
+#define OPENCV_FLANN_LINEAR_INDEX_H_
 
+#include "general.h"
+#include "nn_index.h"
 
 namespace cvflann
 {
 
-struct CV_EXPORTS LinearIndexParams : public IndexParams {
-       LinearIndexParams() : IndexParams(FLANN_INDEX_LINEAR) {};
-
-       void print() const
-       {
-               logger().info("Index type: %d\n",(int)algorithm);
-       }
+struct LinearIndexParams : public IndexParams
+{
+    LinearIndexParams()
+    {
+        (* this)["algorithm"] = FLANN_INDEX_LINEAR;
+    }
 };
 
-
-template <typename ELEM_TYPE, typename DIST_TYPE = typename DistType<ELEM_TYPE>::type >
-class LinearIndex : public NNIndex<ELEM_TYPE>
+template <typename Distance>
+class LinearIndex : public NNIndex<Distance>
 {
-       const Matrix<ELEM_TYPE> dataset;
-       const LinearIndexParams& index_params;
+public:
 
-       LinearIndex(const LinearIndex&);
-       LinearIndex& operator=(const LinearIndex&);
+    typedef typename Distance::ElementType ElementType;
+    typedef typename Distance::ResultType DistanceType;
 
-public:
 
-       LinearIndex(const Matrix<ELEM_TYPE>& inputData, const LinearIndexParams& params = LinearIndexParams() ) :
-               dataset(inputData), index_params(params)
-       {
-       }
+    LinearIndex(const Matrix<ElementType>& inputData, const IndexParams& params = LinearIndexParams(),
+                Distance d = Distance()) :
+        dataset_(inputData), index_params_(params), distance_(d)
+    {
+    }
+
+    LinearIndex(const LinearIndex&);
+    LinearIndex& operator=(const LinearIndex&);
 
     flann_algorithm_t getType() const
     {
@@ -70,52 +69,64 @@ public:
     }
 
 
-       size_t size() const
-       {
-               return dataset.rows;
-       }
+    size_t size() const
+    {
+        return dataset_.rows;
+    }
 
-       size_t veclen() const
-       {
-               return dataset.cols;
-       }
+    size_t veclen() const
+    {
+        return dataset_.cols;
+    }
 
 
-       int usedMemory() const
-       {
-               return 0;
-       }
+    int usedMemory() const
+    {
+        return 0;
+    }
 
-       void buildIndex()
-       {
-               /* nothing to do here for linear search */
-       }
+    void buildIndex()
+    {
+        /* nothing to do here for linear search */
+    }
 
     void saveIndex(FILE*)
     {
-               /* nothing to do here for linear search */
+        /* nothing to do here for linear search */
     }
 
 
     void loadIndex(FILE*)
     {
-               /* nothing to do here for linear search */
+        /* nothing to do here for linear search */
+
+        index_params_["algorithm"] = getType();
+    }
+
+    void findNeighbors(ResultSet<DistanceType>& resultSet, const ElementType* vec, const SearchParams& /*searchParams*/)
+    {
+        ElementType* data = dataset_.data;
+        for (size_t i = 0; i < dataset_.rows; ++i, data += dataset_.cols) {
+            DistanceType dist = distance_(data, vec, dataset_.cols);
+            resultSet.addPoint(dist, i);
+        }
     }
 
-       void findNeighbors(ResultSet<ELEM_TYPE>& resultSet, const ELEM_TYPE*, const SearchParams&)
-       {
-               for (size_t i=0;i<dataset.rows;++i) {
-                       resultSet.addPoint(dataset[i],(int)i);
-               }
-       }
+    IndexParams getParameters() const
+    {
+        return index_params_;
+    }
 
-       const IndexParams* getParameters() const
-       {
-               return &index_params;
-       }
+private:
+    /** The dataset */
+    const Matrix<ElementType> dataset_;
+    /** Index parameters */
+    IndexParams index_params_;
+    /** Index distance */
+    Distance distance_;
 
 };
 
-} // namespace cvflann
+}
 
-#endif // _OPENCV_LINEARSEARCH_H_
+#endif // OPENCV_FLANN_LINEAR_INDEX_H_
index 979756d..303f0c9 100644 (file)
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *************************************************************************/
 
-#ifndef _OPENCV_LOGGER_H_
-#define _OPENCV_LOGGER_H_
+#ifndef FLANN_LOGGER_H
+#define FLANN_LOGGER_H
 
-#include <cstdio>
+#include <stdio.h>
 #include <stdarg.h>
 
+#include "defines.h"
+
+
 namespace cvflann
 {
-    
-enum flann_log_level_t {
-    FLANN_LOG_NONE = 0,
-    FLANN_LOG_FATAL = 1,
-    FLANN_LOG_ERROR = 2,
-    FLANN_LOG_WARN = 3,
-    FLANN_LOG_INFO = 4
-};
-    
-class CV_EXPORTS Logger
-{
-    FILE* stream;
-    int logLevel;
 
-public:
-
-    Logger() : stream(stdout), logLevel(FLANN_LOG_WARN) {};
+class Logger
+{
+    Logger() : stream(stdout), logLevel(FLANN_LOG_WARN) {}
 
     ~Logger()
     {
-        if (stream!=NULL && stream!=stdout) {
+        if ((stream!=NULL)&&(stream!=stdout)) {
             fclose(stream);
         }
     }
 
-    void setDestination(const char* name)
+    static Logger& instance()
+    {
+        static Logger logger;
+        return logger;
+    }
+
+    void _setDestination(const char* name)
     {
         if (name==NULL) {
             stream = stdout;
@@ -74,23 +70,61 @@ public:
         }
     }
 
-    void setLevel(int level) { logLevel = level; }
-
-    int log(int level, const char* fmt, ...);
-
-    int log(int level, const char* fmt, va_list arglist);
+    int _log(int level, const char* fmt, va_list arglist)
+    {
+        if (level > logLevel ) return -1;
+        int ret = vfprintf(stream, fmt, arglist);
+        return ret;
+    }
 
-    int fatal(const char* fmt, ...);
+public:
+    /**
+     * Sets the logging level. All messages with lower priority will be ignored.
+     * @param level Logging level
+     */
+    static void setLevel(int level) { instance().logLevel = level; }
+
+    /**
+     * Sets the logging destination
+     * @param name Filename or NULL for console
+     */
+    static void setDestination(const char* name) { instance()._setDestination(name); }
+
+    /**
+     * Print log message
+     * @param level Log level
+     * @param fmt Message format
+     * @return
+     */
+    static int log(int level, const char* fmt, ...)
+    {
+        va_list arglist;
+        va_start(arglist, fmt);
+        int ret = instance()._log(level,fmt,arglist);
+        va_end(arglist);
+        return ret;
+    }
 
-    int error(const char* fmt, ...);
+#define LOG_METHOD(NAME,LEVEL) \
+    static int NAME(const char* fmt, ...) \
+    { \
+        va_list ap; \
+        va_start(ap, fmt); \
+        int ret = instance()._log(LEVEL, fmt, ap); \
+        va_end(ap); \
+        return ret; \
+    }
 
-    int warn(const char* fmt, ...);
+    LOG_METHOD(fatal, FLANN_LOG_FATAL)
+    LOG_METHOD(error, FLANN_LOG_ERROR)
+    LOG_METHOD(warn, FLANN_LOG_WARN)
+    LOG_METHOD(info, FLANN_LOG_INFO)
 
-    int info(const char* fmt, ...);
+private:
+    FILE* stream;
+    int logLevel;
 };
 
-CV_EXPORTS Logger& logger();
-
-} // namespace cvflann
+}
 
-#endif //_OPENCV_LOGGER_H_
+#endif //FLANN_LOGGER_H
diff --git a/modules/flann/include/opencv2/flann/lsh_index.h b/modules/flann/include/opencv2/flann/lsh_index.h
new file mode 100644 (file)
index 0000000..a777990
--- /dev/null
@@ -0,0 +1,388 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+/***********************************************************************
+ * Author: Vincent Rabaud
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_LSH_INDEX_H_
+#define OPENCV_FLANN_LSH_INDEX_H_
+
+#include <algorithm>
+#include <cassert>
+#include <cstring>
+#include <map>
+#include <vector>
+
+#include "general.h"
+#include "nn_index.h"
+#include "matrix.h"
+#include "result_set.h"
+#include "heap.h"
+#include "lsh_table.h"
+#include "allocator.h"
+#include "random.h"
+#include "saving.h"
+
+namespace cvflann
+{
+
+struct LshIndexParams : public IndexParams
+{
+    LshIndexParams(unsigned int table_number, unsigned int key_size, unsigned int multi_probe_level)
+    {
+        (* this)["algorithm"] = FLANN_INDEX_LSH;
+        // The number of hash tables to use
+        (*this)["table_number"] = table_number;
+        // The length of the key in the hash tables
+        (*this)["key_size"] = key_size;
+        // Number of levels to use in multi-probe (0 for standard LSH)
+        (*this)["multi_probe_level"] = multi_probe_level;
+    }
+};
+
+/**
+ * Randomized kd-tree index
+ *
+ * Contains the k-d trees and other information for indexing a set of points
+ * for nearest-neighbor matching.
+ */
+template<typename Distance>
+class LshIndex : public NNIndex<Distance>
+{
+public:
+    typedef typename Distance::ElementType ElementType;
+    typedef typename Distance::ResultType DistanceType;
+
+    /** Constructor
+     * @param input_data dataset with the input features
+     * @param params parameters passed to the LSH algorithm
+     * @param d the distance used
+     */
+    LshIndex(const Matrix<ElementType>& input_data, const IndexParams& params = LshIndexParams(),
+             Distance d = Distance()) :
+        dataset_(input_data), index_params_(params), distance_(d)
+    {
+        table_number_ = get_param<unsigned int>(index_params_,"table_number",12);
+        key_size_ = get_param<unsigned int>(index_params_,"key_size",20);
+        multi_probe_level_ = get_param<unsigned int>(index_params_,"multi_probe_level",2);
+
+        feature_size_ = dataset_.cols;
+        fill_xor_mask(0, key_size_, multi_probe_level_, xor_masks_);
+    }
+
+
+    LshIndex(const LshIndex&);
+    LshIndex& operator=(const LshIndex&);
+
+    /**
+     * Builds the index
+     */
+    void buildIndex()
+    {
+        tables_.resize(table_number_);
+        for (unsigned int i = 0; i < table_number_; ++i) {
+            lsh::LshTable<ElementType>& table = tables_[i];
+            table = lsh::LshTable<ElementType>(feature_size_, key_size_);
+
+            // Add the features to the table
+            table.add(dataset_);
+        }
+    }
+
+    flann_algorithm_t getType() const
+    {
+        return FLANN_INDEX_LSH;
+    }
+
+
+    void saveIndex(FILE* stream)
+    {
+        save_value(stream,table_number_);
+        save_value(stream,key_size_);
+        save_value(stream,multi_probe_level_);
+        save_value(stream, dataset_);
+    }
+
+    void loadIndex(FILE* stream)
+    {
+        load_value(stream, table_number_);
+        load_value(stream, key_size_);
+        load_value(stream, multi_probe_level_);
+        load_value(stream, dataset_);
+        // Building the index is so fast we can afford not storing it
+        buildIndex();
+
+        index_params_["algorithm"] = getType();
+        index_params_["table_number"] = table_number_;
+        index_params_["key_size"] = key_size_;
+        index_params_["multi_probe_level"] = multi_probe_level_;
+    }
+
+    /**
+     *  Returns size of index.
+     */
+    size_t size() const
+    {
+        return dataset_.rows;
+    }
+
+    /**
+     * Returns the length of an index feature.
+     */
+    size_t veclen() const
+    {
+        return feature_size_;
+    }
+
+    /**
+     * Computes the index memory usage
+     * Returns: memory used by the index
+     */
+    int usedMemory() const
+    {
+        return dataset_.rows * sizeof(int);
+    }
+
+
+    IndexParams getParameters() const
+    {
+        return index_params_;
+    }
+
+    /**
+     * \brief Perform k-nearest neighbor search
+     * \param[in] queries The query points for which to find the nearest neighbors
+     * \param[out] indices The indices of the nearest neighbors found
+     * \param[out] dists Distances to the nearest neighbors found
+     * \param[in] knn Number of nearest neighbors to return
+     * \param[in] params Search parameters
+     */
+    virtual void knnSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, int knn, const SearchParams& params)
+    {
+        assert(queries.cols == veclen());
+        assert(indices.rows >= queries.rows);
+        assert(dists.rows >= queries.rows);
+        assert(int(indices.cols) >= knn);
+        assert(int(dists.cols) >= knn);
+
+
+        KNNUniqueResultSet<DistanceType> resultSet(knn);
+        for (size_t i = 0; i < queries.rows; i++) {
+            resultSet.clear();
+            findNeighbors(resultSet, queries[i], params);
+            if (get_param(params,"sorted",true)) resultSet.sortAndCopy(indices[i], dists[i], knn);
+            else resultSet.copy(indices[i], dists[i], knn);
+        }
+    }
+
+
+    /**
+     * Find set of nearest neighbors to vec. Their indices are stored inside
+     * the result object.
+     *
+     * Params:
+     *     result = the result object in which the indices of the nearest-neighbors are stored
+     *     vec = the vector for which to search the nearest neighbors
+     *     maxCheck = the maximum number of restarts (in a best-bin-first manner)
+     */
+    void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& /*searchParams*/)
+    {
+        getNeighbors(vec, result);
+    }
+
+private:
+    /** Defines the comparator on score and index
+     */
+    typedef std::pair<float, unsigned int> ScoreIndexPair;
+    struct SortScoreIndexPairOnSecond
+    {
+        bool operator()(const ScoreIndexPair& left, const ScoreIndexPair& right) const
+        {
+            return left.second < right.second;
+        }
+    };
+
+    /** Fills the different xor masks to use when getting the neighbors in multi-probe LSH
+     * @param key the key we build neighbors from
+     * @param lowest_index the lowest index of the bit set
+     * @param level the multi-probe level we are at
+     * @param xor_masks all the xor mask
+     */
+    void fill_xor_mask(lsh::BucketKey key, int lowest_index, unsigned int level,
+                       std::vector<lsh::BucketKey>& xor_masks)
+    {
+        xor_masks.push_back(key);
+        if (level == 0) return;
+        for (int index = lowest_index - 1; index >= 0; --index) {
+            // Create a new key
+            lsh::BucketKey new_key = key | (1 << index);
+            fill_xor_mask(new_key, index, level - 1, xor_masks);
+        }
+    }
+
+    /** Performs the approximate nearest-neighbor search.
+     * @param vec the feature to analyze
+     * @param do_radius flag indicating if we check the radius too
+     * @param radius the radius if it is a radius search
+     * @param do_k flag indicating if we limit the number of nn
+     * @param k_nn the number of nearest neighbors
+     * @param checked_average used for debugging
+     */
+    void getNeighbors(const ElementType* vec, bool do_radius, float radius, bool do_k, unsigned int k_nn,
+                      float& checked_average)
+    {
+        static std::vector<ScoreIndexPair> score_index_heap;
+
+        if (do_k) {
+            unsigned int worst_score = std::numeric_limits<unsigned int>::max();
+            typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin();
+            typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end();
+            for (; table != table_end; ++table) {
+                size_t key = table->getKey(vec);
+                std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin();
+                std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end();
+                for (; xor_mask != xor_mask_end; ++xor_mask) {
+                    size_t sub_key = key ^ (*xor_mask);
+                    const lsh::Bucket* bucket = table->getBucketFromKey(sub_key);
+                    if (bucket == 0) continue;
+
+                    // Go over each descriptor index
+                    std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin();
+                    std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end();
+                    DistanceType hamming_distance;
+
+                    // Process the rest of the candidates
+                    for (; training_index < last_training_index; ++training_index) {
+                        hamming_distance = distance_(vec, dataset_[*training_index], dataset_.cols);
+
+                        if (hamming_distance < worst_score) {
+                            // Insert the new element
+                            score_index_heap.push_back(ScoreIndexPair(hamming_distance, training_index));
+                            std::push_heap(score_index_heap.begin(), score_index_heap.end());
+
+                            if (score_index_heap.size() > (unsigned int)k_nn) {
+                                // Remove the highest distance value as we have too many elements
+                                std::pop_heap(score_index_heap.begin(), score_index_heap.end());
+                                score_index_heap.pop_back();
+                                // Keep track of the worst score
+                                worst_score = score_index_heap.front().first;
+                            }
+                        }
+                    }
+                }
+            }
+        }
+        else {
+            typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin();
+            typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end();
+            for (; table != table_end; ++table) {
+                size_t key = table->getKey(vec);
+                std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin();
+                std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end();
+                for (; xor_mask != xor_mask_end; ++xor_mask) {
+                    size_t sub_key = key ^ (*xor_mask);
+                    const lsh::Bucket* bucket = table->getBucketFromKey(sub_key);
+                    if (bucket == 0) continue;
+
+                    // Go over each descriptor index
+                    std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin();
+                    std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end();
+                    DistanceType hamming_distance;
+
+                    // Process the rest of the candidates
+                    for (; training_index < last_training_index; ++training_index) {
+                        // Compute the Hamming distance
+                        hamming_distance = distance_(vec, dataset_[*training_index], dataset_.cols);
+                        if (hamming_distance < radius) score_index_heap.push_back(ScoreIndexPair(hamming_distance, training_index));
+                    }
+                }
+            }
+        }
+    }
+
+    /** Performs the approximate nearest-neighbor search.
+     * This is a slower version than the above as it uses the ResultSet
+     * @param vec the feature to analyze
+     */
+    void getNeighbors(const ElementType* vec, ResultSet<DistanceType>& result)
+    {
+        typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin();
+        typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end();
+        for (; table != table_end; ++table) {
+            size_t key = table->getKey(vec);
+            std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin();
+            std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end();
+            for (; xor_mask != xor_mask_end; ++xor_mask) {
+                size_t sub_key = key ^ (*xor_mask);
+                const lsh::Bucket* bucket = table->getBucketFromKey(sub_key);
+                if (bucket == 0) continue;
+
+                // Go over each descriptor index
+                std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin();
+                std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end();
+                DistanceType hamming_distance;
+
+                // Process the rest of the candidates
+                for (; training_index < last_training_index; ++training_index) {
+                    // Compute the Hamming distance
+                    hamming_distance = distance_(vec, dataset_[*training_index], dataset_.cols);
+                    result.addPoint(hamming_distance, *training_index);
+                }
+            }
+        }
+    }
+
+    /** The different hash tables */
+    std::vector<lsh::LshTable<ElementType> > tables_;
+
+    /** The data the LSH tables where built from */
+    Matrix<ElementType> dataset_;
+
+    /** The size of the features (as ElementType[]) */
+    unsigned int feature_size_;
+
+    IndexParams index_params_;
+
+    /** table number */
+    unsigned int table_number_;
+    /** key size */
+    unsigned int key_size_;
+    /** How far should we look for neighbors in multi-probe LSH */
+    unsigned int multi_probe_level_;
+
+    /** The XOR masks to apply to a key to get the neighboring buckets */
+    std::vector<lsh::BucketKey> xor_masks_;
+
+    Distance distance_;
+};
+}
+
+#endif //OPENCV_FLANN_LSH_INDEX_H_
diff --git a/modules/flann/include/opencv2/flann/lsh_table.h b/modules/flann/include/opencv2/flann/lsh_table.h
new file mode 100644 (file)
index 0000000..cf39993
--- /dev/null
@@ -0,0 +1,477 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+/***********************************************************************
+ * Author: Vincent Rabaud
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_LSH_TABLE_H_
+#define OPENCV_FLANN_LSH_TABLE_H_
+
+#include <algorithm>
+#include <iostream>
+#include <iomanip>
+#include <limits.h>
+// TODO as soon as we use C++0x, use the code in USE_UNORDERED_MAP
+#if USE_UNORDERED_MAP
+#include <unordered_map>
+#else
+#include <map>
+#endif
+#include <math.h>
+#include <stdint.h>
+
+#include "dynamic_bitset.h"
+#include "matrix.h"
+
+namespace cvflann
+{
+
+namespace lsh
+{
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+/** What is stored in an LSH bucket
+ */
+typedef uint32_t FeatureIndex;
+/** The id from which we can get a bucket back in an LSH table
+ */
+typedef unsigned int BucketKey;
+
+/** A bucket in an LSH table
+ */
+typedef std::vector<FeatureIndex> Bucket;
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+/** POD for stats about an LSH table
+ */
+struct LshStats
+{
+    std::vector<unsigned int> bucket_sizes_;
+    size_t n_buckets_;
+    size_t bucket_size_mean_;
+    size_t bucket_size_median_;
+    size_t bucket_size_min_;
+    size_t bucket_size_max_;
+    size_t bucket_size_std_dev;
+    /** Each contained vector contains three value: beginning/end for interval, number of elements in the bin
+     */
+    std::vector<std::vector<unsigned int> > size_histogram_;
+};
+
+/** Overload the << operator for LshStats
+ * @param out the streams
+ * @param stats the stats to display
+ * @return the streams
+ */
+inline std::ostream& operator <<(std::ostream& out, const LshStats& stats)
+{
+    size_t w = 20;
+    out << "Lsh Table Stats:\n" << std::setw(w) << std::setiosflags(std::ios::right) << "N buckets : "
+    << stats.n_buckets_ << "\n" << std::setw(w) << std::setiosflags(std::ios::right) << "mean size : "
+    << std::setiosflags(std::ios::left) << stats.bucket_size_mean_ << "\n" << std::setw(w)
+    << std::setiosflags(std::ios::right) << "median size : " << stats.bucket_size_median_ << "\n" << std::setw(w)
+    << std::setiosflags(std::ios::right) << "min size : " << std::setiosflags(std::ios::left)
+    << stats.bucket_size_min_ << "\n" << std::setw(w) << std::setiosflags(std::ios::right) << "max size : "
+    << std::setiosflags(std::ios::left) << stats.bucket_size_max_;
+
+    // Display the histogram
+    out << std::endl << std::setw(w) << std::setiosflags(std::ios::right) << "histogram : "
+    << std::setiosflags(std::ios::left);
+    for (std::vector<std::vector<unsigned int> >::const_iterator iterator = stats.size_histogram_.begin(), end =
+             stats.size_histogram_.end(); iterator != end; ++iterator) out << (*iterator)[0] << "-" << (*iterator)[1] << ": " << (*iterator)[2] << ",  ";
+
+    return out;
+}
+
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+/** Lsh hash table. As its key is a sub-feature, and as usually
+ * the size of it is pretty small, we keep it as a continuous memory array.
+ * The value is an index in the corpus of features (we keep it as an unsigned
+ * int for pure memory reasons, it could be a size_t)
+ */
+template<typename ElementType>
+class LshTable
+{
+public:
+    /** A container of all the feature indices. Optimized for space
+     */
+#if USE_UNORDERED_MAP
+    typedef std::unordered_map<BucketKey, Bucket> BucketsSpace;
+#else
+    typedef std::map<BucketKey, Bucket> BucketsSpace;
+#endif
+
+    /** A container of all the feature indices. Optimized for speed
+     */
+    typedef std::vector<Bucket> BucketsSpeed;
+
+    /** Default constructor
+     */
+    LshTable()
+    {
+    }
+
+    /** Default constructor
+     * Create the mask and allocate the memory
+     * @param feature_size is the size of the feature (considered as a ElementType[])
+     * @param key_size is the number of bits that are turned on in the feature
+     */
+    LshTable(unsigned int /*feature_size*/, unsigned int /*key_size*/)
+    {
+        std::cerr << "LSH is not implemented for that type" << std::endl;
+        throw;
+    }
+
+    /** Add a feature to the table
+     * @param value the value to store for that feature
+     * @param feature the feature itself
+     */
+    void add(unsigned int value, const ElementType* feature)
+    {
+        // Add the value to the corresponding bucket
+        BucketKey key = getKey(feature);
+
+        switch (speed_level_) {
+        case kArray:
+            // That means we get the buckets from an array
+            buckets_speed_[key].push_back(value);
+            break;
+        case kBitsetHash:
+            // That means we can check the bitset for the presence of a key
+            key_bitset_.set(key);
+            buckets_space_[key].push_back(value);
+            break;
+        case kHash:
+        {
+            // That means we have to check for the hash table for the presence of a key
+            buckets_space_[key].push_back(value);
+            break;
+        }
+        }
+    }
+
+    /** Add a set of features to the table
+     * @param dataset the values to store
+     */
+    void add(Matrix<ElementType> dataset)
+    {
+#if USE_UNORDERED_MAP
+        if (!use_speed_) buckets_space_.rehash((buckets_space_.size() + dataset.rows) * 1.2);
+#endif
+        // Add the features to the table
+        for (unsigned int i = 0; i < dataset.rows; ++i) add(i, dataset[i]);
+        // Now that the table is full, optimize it for speed/space
+        optimize();
+    }
+
+    /** Get a bucket given the key
+     * @param key
+     * @return
+     */
+    inline const Bucket* getBucketFromKey(BucketKey key) const
+    {
+        // Generate other buckets
+        switch (speed_level_) {
+        case kArray:
+            // That means we get the buckets from an array
+            return &buckets_speed_[key];
+            break;
+        case kBitsetHash:
+            // That means we can check the bitset for the presence of a key
+            if (key_bitset_.test(key)) return &buckets_space_.at(key);
+            else return 0;
+            break;
+        case kHash:
+        {
+            // That means we have to check for the hash table for the presence of a key
+            BucketsSpace::const_iterator bucket_it, bucket_end = buckets_space_.end();
+            bucket_it = buckets_space_.find(key);
+            // Stop here if that bucket does not exist
+            if (bucket_it == bucket_end) return 0;
+            else return &bucket_it->second;
+            break;
+        }
+        }
+        return 0;
+    }
+
+    /** Compute the sub-signature of a feature
+     */
+    size_t getKey(const ElementType* /*feature*/) const
+    {
+        std::cerr << "LSH is not implemented for that type" << std::endl;
+        throw;
+        return 1;
+    }
+
+    /** Get statistics about the table
+     * @return
+     */
+    LshStats getStats() const;
+
+private:
+    /** defines the speed fo the implementation
+     * kArray uses a vector for storing data
+     * kBitsetHash uses a hash map but checks for the validity of a key with a bitset
+     * kHash uses a hash map only
+     */
+    enum SpeedLevel
+    {
+        kArray, kBitsetHash, kHash
+    };
+
+    /** Initialize some variables
+     */
+    void initialize(size_t key_size)
+    {
+        speed_level_ = kHash;
+        key_size_ = key_size;
+    }
+
+    /** Optimize the table for speed/space
+     */
+    void optimize()
+    {
+        // If we are already using the fast storage, no need to do anything
+        if (speed_level_ == kArray) return;
+
+        // Use an array if it will be more than half full
+        if (buckets_space_.size() > (unsigned int)((1 << key_size_) / 2)) {
+            speed_level_ = kArray;
+            // Fill the array version of it
+            buckets_speed_.resize(1 << key_size_);
+            for (BucketsSpace::const_iterator key_bucket = buckets_space_.begin(); key_bucket != buckets_space_.end(); ++key_bucket) buckets_speed_[key_bucket->first] = key_bucket->second;
+
+            // Empty the hash table
+            buckets_space_.clear();
+            return;
+        }
+
+        // If the bitset is going to use less than 10% of the RAM of the hash map (at least 1 size_t for the key and two
+        // for the vector) or less than 512MB (key_size_ <= 30)
+        if (((std::max(buckets_space_.size(), buckets_speed_.size()) * CHAR_BIT * 3 * sizeof(BucketKey)) / 10
+             >= size_t(1 << key_size_)) || (key_size_ <= 32)) {
+            speed_level_ = kBitsetHash;
+            key_bitset_.resize(1 << key_size_);
+            key_bitset_.reset();
+            // Try with the BucketsSpace
+            for (BucketsSpace::const_iterator key_bucket = buckets_space_.begin(); key_bucket != buckets_space_.end(); ++key_bucket) key_bitset_.set(key_bucket->first);
+        }
+        else {
+            speed_level_ = kHash;
+            key_bitset_.clear();
+        }
+    }
+
+    /** The vector of all the buckets if they are held for speed
+     */
+    BucketsSpeed buckets_speed_;
+
+    /** The hash table of all the buckets in case we cannot use the speed version
+     */
+    BucketsSpace buckets_space_;
+
+    /** What is used to store the data */
+    SpeedLevel speed_level_;
+
+    /** If the subkey is small enough, it will keep track of which subkeys are set through that bitset
+     * That is just a speedup so that we don't look in the hash table (which can be mush slower that checking a bitset)
+     */
+    DynamicBitset key_bitset_;
+
+    /** The size of the sub-signature in bits
+     */
+    unsigned int key_size_;
+
+    // Members only used for the unsigned char specialization
+    /** The mask to apply to a feature to get the hash key
+     * Only used in the unsigned char case
+     */
+    std::vector<size_t> mask_;
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Specialization for unsigned char
+
+template<>
+inline LshTable<unsigned char>::LshTable(unsigned int feature_size, unsigned int subsignature_size)
+{
+    initialize(subsignature_size);
+    // Allocate the mask
+    mask_ = std::vector<size_t>((size_t)ceil((float)(feature_size * sizeof(char)) / (float)sizeof(size_t)), 0);
+
+    // A bit brutal but fast to code
+    std::vector<size_t> indices(feature_size * CHAR_BIT);
+    for (size_t i = 0; i < feature_size * CHAR_BIT; ++i) indices[i] = i;
+    std::random_shuffle(indices.begin(), indices.end());
+
+    // Generate a random set of order of subsignature_size_ bits
+    for (unsigned int i = 0; i < key_size_; ++i) {
+        size_t index = indices[i];
+
+        // Set that bit in the mask
+        size_t divisor = CHAR_BIT * sizeof(size_t);
+        size_t idx = index / divisor; //pick the right size_t index
+        mask_[idx] |= size_t(1) << (index % divisor); //use modulo to find the bit offset
+    }
+
+    // Set to 1 if you want to display the mask for debug
+#if 0
+    {
+        size_t bcount = 0;
+        BOOST_FOREACH(size_t mask_block, mask_){
+            out << std::setw(sizeof(size_t) * CHAR_BIT / 4) << std::setfill('0') << std::hex << mask_block
+                << std::endl;
+            bcount += __builtin_popcountll(mask_block);
+        }
+        out << "bit count : " << std::dec << bcount << std::endl;
+        out << "mask size : " << mask_.size() << std::endl;
+        return out;
+    }
+#endif
+}
+
+/** Return the Subsignature of a feature
+ * @param feature the feature to analyze
+ */
+template<>
+inline size_t LshTable<unsigned char>::getKey(const unsigned char* feature) const
+{
+    // no need to check if T is dividable by sizeof(size_t) like in the Hamming
+    // distance computation as we have a mask
+    const size_t* feature_block_ptr = reinterpret_cast<const size_t*> (feature);
+
+    // Figure out the subsignature of the feature
+    // Given the feature ABCDEF, and the mask 001011, the output will be
+    // 000CEF
+    size_t subsignature = 0;
+    size_t bit_index = 1;
+
+    for (std::vector<size_t>::const_iterator pmask_block = mask_.begin(); pmask_block != mask_.end(); ++pmask_block) {
+        // get the mask and signature blocks
+        size_t feature_block = *feature_block_ptr;
+        size_t mask_block = *pmask_block;
+        while (mask_block) {
+            // Get the lowest set bit in the mask block
+            size_t lowest_bit = mask_block & (-mask_block);
+            // Add it to the current subsignature if necessary
+            subsignature += (feature_block & lowest_bit) ? bit_index : 0;
+            // Reset the bit in the mask block
+            mask_block ^= lowest_bit;
+            // increment the bit index for the subsignature
+            bit_index <<= 1;
+        }
+        // Check the next feature block
+        ++feature_block_ptr;
+    }
+    return subsignature;
+}
+
+template<>
+inline LshStats LshTable<unsigned char>::getStats() const
+{
+    LshStats stats;
+    stats.bucket_size_mean_ = 0;
+    if ((buckets_speed_.empty()) && (buckets_space_.empty())) {
+        stats.n_buckets_ = 0;
+        stats.bucket_size_median_ = 0;
+        stats.bucket_size_min_ = 0;
+        stats.bucket_size_max_ = 0;
+        return stats;
+    }
+
+    if (!buckets_speed_.empty()) {
+        for (BucketsSpeed::const_iterator pbucket = buckets_speed_.begin(); pbucket != buckets_speed_.end(); ++pbucket) {
+            stats.bucket_sizes_.push_back(pbucket->size());
+            stats.bucket_size_mean_ += pbucket->size();
+        }
+        stats.bucket_size_mean_ /= buckets_speed_.size();
+        stats.n_buckets_ = buckets_speed_.size();
+    }
+    else {
+        for (BucketsSpace::const_iterator x = buckets_space_.begin(); x != buckets_space_.end(); ++x) {
+            stats.bucket_sizes_.push_back(x->second.size());
+            stats.bucket_size_mean_ += x->second.size();
+        }
+        stats.bucket_size_mean_ /= buckets_space_.size();
+        stats.n_buckets_ = buckets_space_.size();
+    }
+
+    std::sort(stats.bucket_sizes_.begin(), stats.bucket_sizes_.end());
+
+    //  BOOST_FOREACH(int size, stats.bucket_sizes_)
+    //          std::cout << size << " ";
+    //  std::cout << std::endl;
+    stats.bucket_size_median_ = stats.bucket_sizes_[stats.bucket_sizes_.size() / 2];
+    stats.bucket_size_min_ = stats.bucket_sizes_.front();
+    stats.bucket_size_max_ = stats.bucket_sizes_.back();
+
+    // TODO compute mean and std
+    /*float mean, stddev;
+       stats.bucket_size_mean_ = mean;
+       stats.bucket_size_std_dev = stddev;*/
+
+    // Include a histogram of the buckets
+    unsigned int bin_start = 0;
+    unsigned int bin_end = 20;
+    bool is_new_bin = true;
+    for (std::vector<unsigned int>::iterator iterator = stats.bucket_sizes_.begin(), end = stats.bucket_sizes_.end(); iterator
+         != end; )
+        if (*iterator < bin_end) {
+            if (is_new_bin) {
+                stats.size_histogram_.push_back(std::vector<unsigned int>(3, 0));
+                stats.size_histogram_.back()[0] = bin_start;
+                stats.size_histogram_.back()[1] = bin_end - 1;
+                is_new_bin = false;
+            }
+            ++stats.size_histogram_.back()[2];
+            ++iterator;
+        }
+        else {
+            bin_start += 20;
+            bin_end += 20;
+            is_new_bin = true;
+        }
+
+    return stats;
+}
+
+// End the two namespaces
+}
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#endif /* OPENCV_FLANN_LSH_TABLE_H_ */
index 170dc6d..51b6c63 100644 (file)
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *************************************************************************/
 
-#ifndef _OPENCV_DATASET_H_
-#define _OPENCV_DATASET_H_
+#ifndef OPENCV_FLANN_DATASET_H_
+#define OPENCV_FLANN_DATASET_H_
 
 #include <stdio.h>
 
-#include "opencv2/flann/general.h"
+#include "general.h"
 
-
-namespace cvflann 
+namespace cvflann
 {
 
 /**
-* Class that implements a simple rectangular matrix stored in a memory buffer and
-* provides convenient matrix-like access using the [] operators.
-*/
+ * Class that implements a simple rectangular matrix stored in a memory buffer and
+ * provides convenient matrix-like access using the [] operators.
+ */
 template <typename T>
-class Matrix {
+class Matrix
+{
 public:
+    typedef T type;
+
     size_t rows;
     size_t cols;
+    size_t stride;
     T* data;
 
-    Matrix() : rows(0), cols(0), data(NULL)
+    Matrix() : rows(0), cols(0), stride(0), data(NULL)
     {
     }
 
-    Matrix(T* data_, long rows_, long cols_) :
-        rows(rows_), cols(cols_), data(data_)
-       {
-       }
+    Matrix(T* data_, size_t rows_, size_t cols_, size_t stride_ = 0) :
+        rows(rows_), cols(cols_),  stride(stride_), data(data_)
+    {
+        if (stride==0) stride = cols;
+    }
 
     /**
      * Convenience function for deallocating the storage data.
      */
-    void release()
+    FLANN_DEPRECATED void free()
     {
-        if (data!=NULL) delete[] data;
+        fprintf(stderr, "The cvflann::Matrix<T>::free() method is deprecated "
+                "and it does not do any memory deallocation any more.  You are"
+                "responsible for deallocating the matrix memory (by doing"
+                "'delete[] matrix.data' for example)");
     }
 
-       ~Matrix()
-       {
-       }
-
     /**
-    * Operator that return a (pointer to a) row of the data.
-    */
-    T* operator[](size_t index)
-    {
-        return data+index*cols;
-    }
-
+     * Operator that return a (pointer to a) row of the data.
+     */
     T* operator[](size_t index) const
     {
-        return data+index*cols;
+        return data+index*stride;
     }
 };
 
@@ -95,7 +93,7 @@ public:
     flann_datatype_t type;
 
     UntypedMatrix(void* data_, long rows_, long cols_) :
-        rows(rows_), cols(cols_), data(data_)
+        rows(rows_), cols(cols_), data(data_)
     {
     }
 
@@ -113,6 +111,6 @@ public:
 
 
 
-} // namespace cvflann
+}
 
-#endif //_OPENCV_DATASET_H_
+#endif //OPENCV_FLANN_DATASET_H_
index 081f9af..da4dd7f 100644 (file)
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *************************************************************************/
 
-#ifndef _OPENCV_NNINDEX_H_
-#define _OPENCV_NNINDEX_H_
+#ifndef FLANN_NNINDEX_H
+#define FLANN_NNINDEX_H
 
 #include <string>
 
-#include "opencv2/flann/general.h"
-#include "opencv2/flann/matrix.h"
+#include "general.h"
+#include "matrix.h"
+#include "result_set.h"
+#include "params.h"
 
 namespace cvflann
 {
 
-
-template <typename ELEM_TYPE>
-class ResultSet;
-
 /**
-* Nearest-neighbour index base class
-*/
-template <typename ELEM_TYPE>
+ * Nearest-neighbour index base class
+ */
+template <typename Distance>
 class NNIndex
 {
-public:
+    typedef typename Distance::ElementType ElementType;
+    typedef typename Distance::ResultType DistanceType;
 
-       virtual ~NNIndex() {};
-
-       /**
-       Method responsible with building the index.
-       */
-       virtual void buildIndex() = 0;
-
-       /**
-       Saves the index to a stream
-       */
-       virtual void saveIndex(FILE* stream) = 0;
-
-       /**
-       Loads the index from a stream
-       */
-       virtual void loadIndex(FILE* stream) = 0;
-
-       /**
-       Method that searches for nearest-neighbors
-       */
-       virtual void findNeighbors(ResultSet<ELEM_TYPE>& result, const ELEM_TYPE* vec, const SearchParams& searchParams) = 0;
-
-       /**
-       Number of features in this index.
-       */
-       virtual size_t size() const = 0;
-
-       /**
-       The length of each vector in this index.
-       */
-       virtual size_t veclen() const = 0;
-
-       /**
-       The amount of memory (in bytes) this index uses.
-       */
-       virtual int usedMemory() const = 0;
-
-       /**
-       * Algorithm name
-       */
-       virtual flann_algorithm_t getType() const = 0;
-
-       /**
-        * Returns the parameters used for the index
-        */
-       virtual const IndexParams* getParameters() const = 0;
+public:
 
+    virtual ~NNIndex() {}
+
+    /**
+     * \brief Builds the index
+     */
+    virtual void buildIndex() = 0;
+
+    /**
+     * \brief Perform k-nearest neighbor search
+     * \param[in] queries The query points for which to find the nearest neighbors
+     * \param[out] indices The indices of the nearest neighbors found
+     * \param[out] dists Distances to the nearest neighbors found
+     * \param[in] knn Number of nearest neighbors to return
+     * \param[in] params Search parameters
+     */
+    virtual void knnSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, int knn, const SearchParams& params)
+    {
+        assert(queries.cols == veclen());
+        assert(indices.rows >= queries.rows);
+        assert(dists.rows >= queries.rows);
+        assert(int(indices.cols) >= knn);
+        assert(int(dists.cols) >= knn);
+
+#if 0
+        KNNResultSet<DistanceType> resultSet(knn);
+        for (size_t i = 0; i < queries.rows; i++) {
+            resultSet.init(indices[i], dists[i]);
+            findNeighbors(resultSet, queries[i], params);
+        }
+#else
+        KNNUniqueResultSet<DistanceType> resultSet(knn);
+        for (size_t i = 0; i < queries.rows; i++) {
+            resultSet.clear();
+            findNeighbors(resultSet, queries[i], params);
+            if (get_param(params,"sorted",true)) resultSet.sortAndCopy(indices[i], dists[i], knn);
+            else resultSet.copy(indices[i], dists[i], knn);
+        }
+#endif
+    }
+
+    /**
+     * \brief Perform radius search
+     * \param[in] query The query point
+     * \param[out] indices The indinces of the neighbors found within the given radius
+     * \param[out] dists The distances to the nearest neighbors found
+     * \param[in] radius The radius used for search
+     * \param[in] params Search parameters
+     * \returns Number of neighbors found
+     */
+    virtual int radiusSearch(const Matrix<ElementType>& query, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params)
+    {
+        if (query.rows != 1) {
+            fprintf(stderr, "I can only search one feature at a time for range search\n");
+            return -1;
+        }
+        assert(query.cols == veclen());
+        assert(indices.cols == dists.cols);
+
+        int n = 0;
+        int* indices_ptr = NULL;
+        DistanceType* dists_ptr = NULL;
+        if (indices.cols > 0) {
+            n = indices.cols;
+            indices_ptr = indices[0];
+            dists_ptr = dists[0];
+        }
+
+        RadiusUniqueResultSet<DistanceType> resultSet(radius);
+        resultSet.clear();
+        findNeighbors(resultSet, query[0], params);
+        if (n>0) {
+            if (get_param(params,"sorted",true)) resultSet.sortAndCopy(indices_ptr, dists_ptr, n);
+            else resultSet.copy(indices_ptr, dists_ptr, n);
+        }
+
+        return resultSet.size();
+    }
+
+    /**
+     * \brief Saves the index to a stream
+     * \param stream The stream to save the index to
+     */
+    virtual void saveIndex(FILE* stream) = 0;
+
+    /**
+     * \brief Loads the index from a stream
+     * \param stream The stream from which the index is loaded
+     */
+    virtual void loadIndex(FILE* stream) = 0;
+
+    /**
+     * \returns number of features in this index.
+     */
+    virtual size_t size() const = 0;
+
+    /**
+     * \returns The dimensionality of the features in this index.
+     */
+    virtual size_t veclen() const = 0;
+
+    /**
+     * \returns The amount of memory (in bytes) used by the index.
+     */
+    virtual int usedMemory() const = 0;
+
+    /**
+     * \returns The index type (kdtree, kmeans,...)
+     */
+    virtual flann_algorithm_t getType() const = 0;
+
+    /**
+     * \returns The index parameters
+     */
+    virtual IndexParams getParameters() const = 0;
+
+
+    /**
+     * \brief Method that searches for nearest-neighbours
+     */
+    virtual void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) = 0;
 };
 
+}
 
-} // namespace cvflann
-
-#endif //_OPENCV_NNINDEX_H_
+#endif //FLANN_NNINDEX_H
index 5c51e0d..7f971c5 100644 (file)
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *************************************************************************/
 
-#ifndef _OPENCV_OBJECT_FACTORY_H_
-#define _OPENCV_OBJECT_FACTORY_H_
+#ifndef OPENCV_FLANN_OBJECT_FACTORY_H_
+#define OPENCV_FLANN_OBJECT_FACTORY_H_
 
-#include "opencv2/core/types_c.h"
 #include <map>
 
 namespace cvflann
 {
 
-template<typename BaseClass, typename DerivedClass>
-BaseClass* createObject()
+class CreatorNotFound
 {
-       return new DerivedClass();
-}
+};
 
-template<typename BaseClass, typename UniqueIdType>
+template<typename BaseClass,
+         typename UniqueIdType,
+         typename ObjectCreator = BaseClass* (*)()>
 class ObjectFactory
 {
-       typedef BaseClass* (*CreateObjectFunc)();
-       std::map<UniqueIdType, CreateObjectFunc> object_registry;
+    typedef ObjectFactory<BaseClass,UniqueIdType,ObjectCreator> ThisClass;
+    typedef std::map<UniqueIdType, ObjectCreator> ObjectRegistry;
 
-       // singleton class, private constructor
-       //ObjectFactory() {};
+    // singleton class, private constructor
+    ObjectFactory() {}
 
 public:
-   typedef typename std::map<UniqueIdType, CreateObjectFunc>::iterator Iterator;
-
-
-   template<typename DerivedClass>
-   bool register_(UniqueIdType id)
-   {
-      if (object_registry.find(id) != object_registry.end())
-               return false;
-
-      object_registry[id] = &createObject<BaseClass, DerivedClass>;
-      return true;
-   }
-
-   bool unregister(UniqueIdType id)
-   {
-      return (object_registry.erase(id) == 1);
-   }
-
-   BaseClass* create(UniqueIdType id)
-   {
-      Iterator iter = object_registry.find(id);
-
-      if (iter == object_registry.end())
-         return NULL;
-
-      return ((*iter).second)();
-   }
-
-   /*static ObjectFactory<BaseClass,UniqueIdType>& instance()
-   {
-          static ObjectFactory<BaseClass,UniqueIdType> the_factory;
-          return the_factory;
-   }*/
 
+    bool subscribe(UniqueIdType id, ObjectCreator creator)
+    {
+        if (object_registry.find(id) != object_registry.end()) return false;
+
+        object_registry[id] = creator;
+        return true;
+    }
+
+    bool unregister(UniqueIdType id)
+    {
+        return object_registry.erase(id) == 1;
+    }
+
+    ObjectCreator create(UniqueIdType id)
+    {
+        typename ObjectRegistry::const_iterator iter = object_registry.find(id);
+
+        if (iter == object_registry.end()) {
+            throw CreatorNotFound();
+        }
+
+        return iter->second;
+    }
+
+    static ThisClass& instance()
+    {
+        static ThisClass the_factory;
+        return the_factory;
+    }
+private:
+    ObjectRegistry object_registry;
 };
 
-} // namespace cvflann
+}
 
-#endif /* OBJECT_FACTORY_H_ */
+#endif /* OPENCV_FLANN_OBJECT_FACTORY_H_ */
diff --git a/modules/flann/include/opencv2/flann/params.h b/modules/flann/include/opencv2/flann/params.h
new file mode 100644 (file)
index 0000000..9f3a468
--- /dev/null
@@ -0,0 +1,97 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2011  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2011  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+
+#ifndef OPENCV_FLANN_PARAMS_H_
+#define OPENCV_FLANN_PARAMS_H_
+
+#include "any.h"
+#include "general.h"
+#include <iostream>
+#include <map>
+
+
+namespace cvflann
+{
+
+typedef cdiggins::any any;
+typedef std::map<std::string, any> IndexParams;
+
+struct SearchParams : public IndexParams
+{
+    SearchParams(int checks = 32, float eps = 0, bool sorted = true )
+    {
+        // how many leafs to visit when searching for neighbours (-1 for unlimited)
+        (*this)["checks"] = checks;
+        // search for eps-approximate neighbours (default: 0)
+        (*this)["eps"] = eps;
+        // only for radius search, require neighbours sorted by distance (default: true)
+        (*this)["sorted"] = sorted;
+    }
+};
+
+
+template<typename T>
+T get_param(const IndexParams& params, std::string name, const T& default_value)
+{
+    IndexParams::const_iterator it = params.find(name);
+    if (it != params.end()) {
+        return it->second.cast<T>();
+    }
+    else {
+        return default_value;
+    }
+}
+
+template<typename T>
+T get_param(const IndexParams& params, std::string name)
+{
+    IndexParams::const_iterator it = params.find(name);
+    if (it != params.end()) {
+        return it->second.cast<T>();
+    }
+    else {
+        throw FLANNException(std::string("Missing parameter '")+name+std::string("' in the parameters given"));
+    }
+}
+
+inline void print_params(const IndexParams& params)
+{
+    IndexParams::const_iterator it;
+
+    for(it=params.begin(); it!=params.end(); ++it) {
+        std::cout << it->first << " : " << it->second << std::endl;
+    }
+}
+
+
+
+}
+
+
+#endif /* OPENCV_FLANN_PARAMS_H_ */
index d29a123..b702807 100644 (file)
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *************************************************************************/
 
-#ifndef _OPENCV_RANDOM_H_
-#define _OPENCV_RANDOM_H_
+#ifndef FLANN_RANDOM_H
+#define FLANN_RANDOM_H
 
 #include <algorithm>
 #include <cstdlib>
-#include <cassert>
+#include <vector>
 
+#include "general.h"
 
 namespace cvflann
 {
 
 /**
  * Seeds the random number generator
+ *  @param seed Random seed
  */
-CV_EXPORTS void seed_random(unsigned int seed);
+inline void seed_random(unsigned int seed)
+{
+    srand(seed);
+}
 
 /*
  * Generates a random double value.
  */
-CV_EXPORTS double rand_double(double high = 1.0, double low=0);
+/**
+ * Generates a random double value.
+ * @param high Upper limit
+ * @param low Lower limit
+ * @return Random double value
+ */
+inline double rand_double(double high = 1.0, double low = 0)
+{
+    return low + ((high-low) * (std::rand() / (RAND_MAX + 1.0)));
+}
 
-/*
+/**
  * Generates a random integer value.
+ * @param high Upper limit
+ * @param low Lower limit
+ * @return Random integer value
  */
-CV_EXPORTS int rand_int(int high = RAND_MAX, int low = 0);
-
+inline int rand_int(int high = RAND_MAX, int low = 0)
+{
+    return low + (int) ( double(high-low) * (std::rand() / (RAND_MAX + 1.0)));
+}
 
 /**
  * Random number generator that returns a distinct number from
  * the [0,n) interval each time.
- *
- * TODO: improve on this to use a generator function instead of an
- * array of randomly permuted numbers
  */
-class CV_EXPORTS UniqueRandom
+class UniqueRandom
 {
-       int* vals;
-    int size;
-       int counter;
+    std::vector<int> vals_;
+    int size_;
+    int counter_;
 
 public:
-       /**
-        * Constructor.
-        * Params:
-        *     n = the size of the interval from which to generate
-        *              random numbers.
-        */
-       UniqueRandom(int n) : vals(NULL) {
-               init(n);
-       }
-
-       ~UniqueRandom()
-       {
-               delete[] vals;
-       }
-
-       /**
-        * Initializes the number generator.
-        * Params:
-        *              n = the size of the interval from which to generate
-        *              random numbers.
-        */
-       void init(int n)
-       {
-       // create and initialize an array of size n
-               if (vals == NULL || n!=size) {
-            delete[] vals;
-               size = n;
-            vals = new int[size];
-       }
-       for(int i=0;i<size;++i) {
-                       vals[i] = i;
-               }
-
-               // shuffle the elements in the array
-        // Fisher-Yates shuffle
-               for (int i=size;i>0;--i) {
-//                     int rand = cast(int) (drand48() * n);
-                       int rnd = rand_int(i);
-                       assert(rnd >=0 && rnd < i);
-            std::swap(vals[i-1], vals[rnd]);
-               }
-
-               counter = 0;
-       }
-
-       /**
-        * Return a distinct random integer in greater or equal to 0 and less
-        * than 'n' on each call. It should be called maximum 'n' times.
-        * Returns: a random integer
-        */
-       int next() {
-               if (counter==size) {
-                       return -1;
-               } else {
-                       return vals[counter++];
-               }
-       }
+    /**
+     * Constructor.
+     * @param n Size of the interval from which to generate
+     * @return
+     */
+    UniqueRandom(int n)
+    {
+        init(n);
+    }
+
+    /**
+     * Initializes the number generator.
+     * @param n the size of the interval from which to generate random numbers.
+     */
+    void init(int n)
+    {
+        // create and initialize an array of size n
+        vals_.resize(n);
+        size_ = n;
+        for (int i = 0; i < size_; ++i) vals_[i] = i;
+
+        // shuffle the elements in the array
+        std::random_shuffle(vals_.begin(), vals_.end());
+
+        counter_ = 0;
+    }
+
+    /**
+     * Return a distinct random integer in greater or equal to 0 and less
+     * than 'n' on each call. It should be called maximum 'n' times.
+     * Returns: a random integer
+     */
+    int next()
+    {
+        if (counter_ == size_) {
+            return -1;
+        }
+        else {
+            return vals_[counter_++];
+        }
+    }
 };
 
-} // namespace cvflann
+}
+
+#endif //FLANN_RANDOM_H
+
 
-#endif //_OPENCV_RANDOM_H_
index 5b1a8e2..047466f 100644 (file)
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *************************************************************************/
 
-#ifndef _OPENCV_RESULTSET_H_
-#define _OPENCV_RESULTSET_H_
-
+#ifndef FLANN_RESULTSET_H
+#define FLANN_RESULTSET_H
 
 #include <algorithm>
+#include <cstring>
+#include <iostream>
 #include <limits>
+#include <set>
 #include <vector>
-#include "opencv2/flann/dist.h"
-
 
 namespace cvflann
 {
 
 /* This record represents a branch point when finding neighbors in
-       the tree.  It contains a record of the minimum distance to the query
-       point, as well as the node at which the search resumes.
-*/
+    the tree.  It contains a record of the minimum distance to the query
+    point, as well as the node at which the search resumes.
+ */
 
-template <typename T>
-struct BranchStruct {
-       T node;           /* Tree node at which search resumes */
-       float mindistsq;     /* Minimum distance to query for all nodes below. */
+template <typename T, typename DistanceType>
+struct BranchStruct
+{
+    T node;           /* Tree node at which search resumes */
+    DistanceType mindist;     /* Minimum distance to query for all nodes below. */
 
-       bool operator<(const BranchStruct<T>& rhs)
-       {
-        return mindistsq<rhs.mindistsq;
-       }
+    BranchStruct() {}
+    BranchStruct(const T& aNode, DistanceType dist) : node(aNode), mindist(dist) {}
 
-    static BranchStruct<T> make_branch(const T& aNode, float dist)
+    bool operator<(const BranchStruct<T, DistanceType>& rhs) const
     {
-        BranchStruct<T> branch;
-        branch.node = aNode;
-        branch.mindistsq = dist;
-        return branch;
+        return mindist<rhs.mindist;
     }
 };
 
 
+template <typename DistanceType>
+class ResultSet
+{
+public:
+    virtual ~ResultSet() {}
 
+    virtual bool full() const = 0;
 
+    virtual void addPoint(DistanceType dist, int index) = 0;
 
-template <typename ELEM_TYPE>
-class ResultSet
+    virtual DistanceType worstDist() const = 0;
+
+};
+
+/**
+ * KNNSimpleResultSet does not ensure that the element it holds are unique.
+ * Is used in those cases where the nearest neighbour algorithm used does not
+ * attempt to insert the same element multiple times.
+ */
+template <typename DistanceType>
+class KNNSimpleResultSet : public ResultSet<DistanceType>
 {
-protected:
+    int* indices;
+    DistanceType* dists;
+    int capacity;
+    int count;
+    DistanceType worst_distance_;
 
 public:
+    KNNSimpleResultSet(int capacity_) : capacity(capacity_), count(0)
+    {
+    }
 
-       virtual ~ResultSet() {};
+    void init(int* indices_, DistanceType* dists_)
+    {
+        indices = indices_;
+        dists = dists_;
+        count = 0;
+        worst_distance_ = (std::numeric_limits<DistanceType>::max)();
+        dists[capacity-1] = worst_distance_;
+    }
+
+    size_t size() const
+    {
+        return count;
+    }
+
+    bool full() const
+    {
+        return count == capacity;
+    }
+
+
+    void addPoint(DistanceType dist, int index)
+    {
+        if (dist >= worst_distance_) return;
+        int i;
+        for (i=count; i>0; --i) {
+#ifdef FLANN_FIRST_MATCH
+            if ( (dists[i-1]>dist) || ((dist==dists[i-1])&&(indices[i-1]>index)) )
+#else
+            if (dists[i-1]>dist)
+#endif
+            {
+                if (i<capacity) {
+                    dists[i] = dists[i-1];
+                    indices[i] = indices[i-1];
+                }
+            }
+            else break;
+        }
+        if (count < capacity) ++count;
+        dists[i] = dist;
+        indices[i] = index;
+        worst_distance_ = dists[capacity-1];
+    }
+
+    DistanceType worstDist() const
+    {
+        return worst_distance_;
+    }
+};
 
-       virtual void init(const ELEM_TYPE* target_, int veclen_) = 0;
+/**
+ * K-Nearest neighbour result set. Ensures that the elements inserted are unique
+ */
+template <typename DistanceType>
+class KNNResultSet : public ResultSet<DistanceType>
+{
+    int* indices;
+    DistanceType* dists;
+    int capacity;
+    int count;
+    DistanceType worst_distance_;
 
-       virtual int* getNeighbors() = 0;
+public:
+    KNNResultSet(int capacity_) : capacity(capacity_), count(0)
+    {
+    }
 
-       virtual float* getDistances() = 0;
+    void init(int* indices_, DistanceType* dists_)
+    {
+        indices = indices_;
+        dists = dists_;
+        count = 0;
+        worst_distance_ = (std::numeric_limits<DistanceType>::max)();
+        dists[capacity-1] = worst_distance_;
+    }
 
-       virtual size_t size() const = 0;
+    size_t size() const
+    {
+        return count;
+    }
 
-       virtual bool full() const = 0;
+    bool full() const
+    {
+        return count == capacity;
+    }
 
-       virtual bool addPoint(const ELEM_TYPE* point, int index) = 0;
 
-       virtual float worstDist() const = 0;
+    void addPoint(DistanceType dist, int index)
+    {
+        if (dist >= worst_distance_) return;
+        int i;
+        for (i = count; i > 0; --i) {
+#ifdef FLANN_FIRST_MATCH
+            if ( (dists[i-1]<=dist) && ((dist!=dists[i-1])||(indices[i-1]<=index)) )
+#else
+            if (dists[i-1]<=dist)
+#endif
+            {
+                // Check for duplicate indices
+                int j = i - 1;
+                while ((j >= 0) && (dists[j] == dist)) {
+                    if (indices[j] == index) {
+                        return;
+                    }
+                    --j;
+                }
+                break;
+            }
+        }
+
+        if (count < capacity) ++count;
+        for (int j = count-1; j > i; --j) {
+            dists[j] = dists[j-1];
+            indices[j] = indices[j-1];
+        }
+        dists[i] = dist;
+        indices[i] = index;
+        worst_distance_ = dists[capacity-1];
+    }
 
+    DistanceType worstDist() const
+    {
+        return worst_distance_;
+    }
 };
 
 
-template <typename ELEM_TYPE>
-class KNNResultSet : public ResultSet<ELEM_TYPE>
+/**
+ * A result-set class used when performing a radius based search.
+ */
+template <typename DistanceType>
+class RadiusResultSet : public ResultSet<DistanceType>
 {
-       const ELEM_TYPE* target;
-       const ELEM_TYPE* target_end;
-    int veclen;
+    DistanceType radius;
+    int* indices;
+    DistanceType* dists;
+    size_t capacity;
+    size_t count;
 
-       int* indices;
-       float* dists;
-    int capacity;
+public:
+    RadiusResultSet(DistanceType radius_, int* indices_, DistanceType* dists_, int capacity_) :
+        radius(radius_), indices(indices_), dists(dists_), capacity(capacity_)
+    {
+        init();
+    }
 
-       int count;
+    ~RadiusResultSet()
+    {
+    }
 
-public:
-       KNNResultSet(int capacity_, ELEM_TYPE* target_ = NULL, int veclen_ = 0 ) :
-                       target(target_), veclen(veclen_), capacity(capacity_), count(0)
-       {
-               target_end = target + veclen;
-
-        indices = new int[capacity_];
-        dists = new float[capacity_];
-       }
-
-       ~KNNResultSet()
-       {
-               delete[] indices;
-               delete[] dists;
-       }
-
-       void init(const ELEM_TYPE* target_, int veclen_)
-       {
-        target = target_;
-        veclen = veclen_;
-        target_end = target + veclen;
+    void init()
+    {
         count = 0;
-       }
+    }
 
+    size_t size() const
+    {
+        return count;
+    }
 
-       int* getNeighbors()
-       {
-               return indices;
-       }
+    bool full() const
+    {
+        return true;
+    }
+
+    void addPoint(DistanceType dist, int index)
+    {
+        if (dist<radius) {
+            if ((capacity>0)&&(count < capacity)) {
+                dists[count] = dist;
+                indices[count] = index;
+            }
+            count++;
+        }
+    }
+
+    DistanceType worstDist() const
+    {
+        return radius;
+    }
+
+};
 
-    float* getDistances()
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+/** Class that holds the k NN neighbors
+ * Faster than KNNResultSet as it uses a binary heap and does not maintain two arrays
+ */
+template<typename DistanceType>
+class UniqueResultSet : public ResultSet<DistanceType>
+{
+public:
+    struct DistIndex
+    {
+        DistIndex(DistanceType dist, unsigned int index) :
+            dist_(dist), index_(index)
+        {
+        }
+        bool operator<(const DistIndex dist_index) const
+        {
+            return (dist_ < dist_index.dist_) || ((dist_ == dist_index.dist_) && index_ < dist_index.index_);
+        }
+        DistanceType dist_;
+        unsigned int index_;
+    };
+
+    /** Default cosntructor */
+    UniqueResultSet() :
+        worst_distance_(std::numeric_limits<DistanceType>::max())
     {
-        return dists;
     }
 
+    /** Check the status of the set
+     * @return true if we have k NN
+     */
+    inline bool full() const
+    {
+        return is_full_;
+    }
+
+    /** Remove all elements in the set
+     */
+    virtual void clear() = 0;
+
+    /** Copy the set to two C arrays
+     * @param indices pointer to a C array of indices
+     * @param dist pointer to a C array of distances
+     * @param n_neighbors the number of neighbors to copy
+     */
+    virtual void copy(int* indices, DistanceType* dist, int n_neighbors = -1) const
+    {
+        if (n_neighbors < 0) {
+            for (typename std::set<DistIndex>::const_iterator dist_index = dist_indices_.begin(), dist_index_end =
+                     dist_indices_.end(); dist_index != dist_index_end; ++dist_index, ++indices, ++dist) {
+                *indices = dist_index->index_;
+                *dist = dist_index->dist_;
+            }
+        }
+        else {
+            int i = 0;
+            for (typename std::set<DistIndex>::const_iterator dist_index = dist_indices_.begin(), dist_index_end =
+                     dist_indices_.end(); (dist_index != dist_index_end) && (i < n_neighbors); ++dist_index, ++indices, ++dist, ++i) {
+                *indices = dist_index->index_;
+                *dist = dist_index->dist_;
+            }
+        }
+    }
+
+    /** Copy the set to two C arrays but sort it according to the distance first
+     * @param indices pointer to a C array of indices
+     * @param dist pointer to a C array of distances
+     * @param n_neighbors the number of neighbors to copy
+     */
+    virtual void sortAndCopy(int* indices, DistanceType* dist, int n_neighbors = -1) const
+    {
+        copy(indices, dist, n_neighbors);
+    }
+
+    /** The number of neighbors in the set
+     * @return
+     */
     size_t size() const
     {
-       return count;
-    }
-
-       bool full() const
-       {
-               return count == capacity;
-       }
-
-
-       bool addPoint(const ELEM_TYPE* point, int index)
-       {
-               for (int i=0;i<count;++i) {
-                       if (indices[i]==index) return false;
-               }
-               float dist = (float)flann_dist(target, target_end, point);
-
-               if (count<capacity) {
-                       indices[count] = index;
-                       dists[count] = dist;
-                       ++count;
-               }
-               else if (dist < dists[count-1] || (dist == dists[count-1] && index < indices[count-1])) {
-//         else if (dist < dists[count-1]) {
-                       indices[count-1] = index;
-                       dists[count-1] = dist;
-               }
-               else {
-                       return false;
-               }
-
-               int i = count-1;
-               // bubble up
-               while (i>=1 && (dists[i]<dists[i-1] || (dists[i]==dists[i-1] && indices[i]<indices[i-1]) ) ) {
-//         while (i>=1 && (dists[i]<dists[i-1]) ) {
-            std::swap(indices[i],indices[i-1]);
-            std::swap(dists[i],dists[i-1]);
-                       i--;
-               }
-
-               return true;
-       }
-
-       float worstDist() const
-       {
-               return (count<capacity) ? (std::numeric_limits<float>::max)() : dists[count-1];
-       }
+        return dist_indices_.size();
+    }
+
+    /** The distance of the furthest neighbor
+     * If we don't have enough neighbors, it returns the max possible value
+     * @return
+     */
+    inline DistanceType worstDist() const
+    {
+        return worst_distance_;
+    }
+protected:
+    /** Flag to say if the set is full */
+    bool is_full_;
+
+    /** The worst distance found so far */
+    DistanceType worst_distance_;
+
+    /** The best candidates so far */
+    std::set<DistIndex> dist_indices_;
 };
 
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
 
-/**
- * A result-set class used when performing a radius based search.
+/** Class that holds the k NN neighbors
+ * Faster than KNNResultSet as it uses a binary heap and does not maintain two arrays
  */
-template <typename ELEM_TYPE>
-class RadiusResultSet : public ResultSet<ELEM_TYPE>
+template<typename DistanceType>
+class KNNUniqueResultSet : public UniqueResultSet<DistanceType>
 {
-       const ELEM_TYPE* target;
-       const ELEM_TYPE* target_end;
-    int veclen;
+public:
+    /** Constructor
+     * @param capacity the number of neighbors to store at max
+     */
+    KNNUniqueResultSet(unsigned int capacity) : capacity_(capacity)
+    {
+        this->is_full_ = false;
+        this->clear();
+    }
 
-       struct Item {
-               int index;
-               float dist;
+    /** Add a possible candidate to the best neighbors
+     * @param dist distance for that neighbor
+     * @param index index of that neighbor
+     */
+    inline void addPoint(DistanceType dist, int index)
+    {
+        // Don't do anything if we are worse than the worst
+        if (dist >= worst_distance_) return;
+        dist_indices_.insert(DistIndex(dist, index));
+
+        if (is_full_) {
+            if (dist_indices_.size() > capacity_) {
+                dist_indices_.erase(*dist_indices_.rbegin());
+                worst_distance_ = dist_indices_.rbegin()->dist_;
+            }
+        }
+        else if (dist_indices_.size() == capacity_) {
+            is_full_ = true;
+            worst_distance_ = dist_indices_.rbegin()->dist_;
+        }
+    }
 
-               bool operator<(Item rhs) {
-                       return dist<rhs.dist;
-               }
-       };
+    /** Remove all elements in the set
+     */
+    void clear()
+    {
+        dist_indices_.clear();
+        worst_distance_ = std::numeric_limits<DistanceType>::max();
+        is_full_ = false;
+    }
 
-    std::vector<Item> items;
-       float radius;
+protected:
+    typedef typename UniqueResultSet<DistanceType>::DistIndex DistIndex;
+    using UniqueResultSet<DistanceType>::is_full_;
+    using UniqueResultSet<DistanceType>::worst_distance_;
+    using UniqueResultSet<DistanceType>::dist_indices_;
 
-       bool sorted;
-       int* indices;
-       float* dists;
-       size_t count;
+    /** The number of neighbors to keep */
+    unsigned int capacity_;
+};
 
-private:
-       void resize_vecs()
-       {
-               if (items.size()>count) {
-                       if (indices!=NULL) delete[] indices;
-                       if (dists!=NULL) delete[] dists;
-                       count = items.size();
-                       indices = new int[count];
-                       dists = new float[count];
-               }
-       }
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
 
+/** Class that holds the radius nearest neighbors
+ * It is more accurate than RadiusResult as it is not limited in the number of neighbors
+ */
+template<typename DistanceType>
+class RadiusUniqueResultSet : public UniqueResultSet<DistanceType>
+{
 public:
-       RadiusResultSet(float radius_) :
-               radius(radius_), indices(NULL), dists(NULL)
-       {
-               sorted = false;
-               items.reserve(16);
-               count = 0;
-       }
-
-       ~RadiusResultSet()
-       {
-               if (indices!=NULL) delete[] indices;
-               if (dists!=NULL) delete[] dists;
-       }
-
-       void init(const ELEM_TYPE* target_, int veclen_)
-       {
-        target = target_;
-        veclen = veclen_;
-        target_end = target + veclen;
-        items.clear();
-        sorted = false;
-       }
-
-       int* getNeighbors()
-       {
-               if (!sorted) {
-                       sorted = true;
-                       sort_heap(items.begin(), items.end());
-               }
-               resize_vecs();
-               for (size_t i=0;i<items.size();++i) {
-                       indices[i] = items[i].index;
-               }
-               return indices;
-       }
-
-    float* getDistances()
-    {
-               if (!sorted) {
-                       sorted = true;
-                       sort_heap(items.begin(), items.end());
-               }
-               resize_vecs();
-               for (size_t i=0;i<items.size();++i) {
-                       dists[i] = items[i].dist;
-               }
-        return dists;
+    /** Constructor
+     * @param capacity the number of neighbors to store at max
+     */
+    RadiusUniqueResultSet(DistanceType radius) :
+        radius_(radius)
+    {
+        is_full_ = true;
     }
 
-    size_t size() const
+    /** Add a possible candidate to the best neighbors
+     * @param dist distance for that neighbor
+     * @param index index of that neighbor
+     */
+    void addPoint(DistanceType dist, int index)
+    {
+        if (dist <= radius_) dist_indices_.insert(DistIndex(dist, index));
+    }
+
+    /** Remove all elements in the set
+     */
+    inline void clear()
+    {
+        dist_indices_.clear();
+    }
+
+
+    /** Check the status of the set
+     * @return alwys false
+     */
+    inline bool full() const
     {
-       return items.size();
+        return true;
     }
 
-       bool full() const
-       {
-               return true;
-       }
+    /** The distance of the furthest neighbor
+     * If we don't have enough neighbors, it returns the max possible value
+     * @return
+     */
+    inline DistanceType worstDist() const
+    {
+        return radius_;
+    }
+private:
+    typedef typename UniqueResultSet<DistanceType>::DistIndex DistIndex;
+    using UniqueResultSet<DistanceType>::dist_indices_;
+    using UniqueResultSet<DistanceType>::is_full_;
 
-       bool addPoint(const ELEM_TYPE* point, int index)
-       {
-               Item it;
-               it.index = index;
-               it.dist = (float)flann_dist(target, target_end, point);
-               if (it.dist<=radius) {
-                       items.push_back(it);
-                       push_heap(items.begin(), items.end());
-            return true;
-               }
-        return false;
-       }
+    /** The furthest distance a neighbor can be */
+    DistanceType radius_;
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+/** Class that holds the k NN neighbors within a radius distance
+ */
+template<typename DistanceType>
+class KNNRadiusUniqueResultSet : public KNNUniqueResultSet<DistanceType>
+{
+public:
+    /** Constructor
+     * @param capacity the number of neighbors to store at max
+     */
+    KNNRadiusUniqueResultSet(unsigned int capacity, DistanceType radius)
+    {
+        this->capacity_ = capacity;
+        this->radius_ = radius;
+        this->dist_indices_.reserve(capacity_);
+        this->clear();
+    }
+
+    /** Remove all elements in the set
+     */
+    void clear()
+    {
+        dist_indices_.clear();
+        worst_distance_ = radius_;
+        is_full_ = false;
+    }
+private:
+    using KNNUniqueResultSet<DistanceType>::dist_indices_;
+    using KNNUniqueResultSet<DistanceType>::is_full_;
+    using KNNUniqueResultSet<DistanceType>::worst_distance_;
 
-       float worstDist() const
-       {
-               return radius;
-       }
+    /** The maximum number of neighbors to consider */
+    unsigned int capacity_;
 
+    /** The maximum distance of a neighbor */
+    DistanceType radius_;
 };
+}
 
-} // namespace cvflann
+#endif //FLANN_RESULTSET_H
 
-#endif //_OPENCV_RESULTSET_H_
index 95f6e15..fd65150 100644 (file)
  *************************************************************************/
 
 
-#ifndef _OPENCV_SAMPLING_H_
-#define _OPENCV_SAMPLING_H_
-
-
-#include "opencv2/flann/matrix.h"
-#include "opencv2/flann/random.h"
+#ifndef OPENCV_FLANN_SAMPLING_H_
+#define OPENCV_FLANN_SAMPLING_H_
 
+#include "matrix.h"
+#include "random.h"
 
 namespace cvflann
 {
@@ -41,54 +39,43 @@ namespace cvflann
 template<typename T>
 Matrix<T> random_sample(Matrix<T>& srcMatrix, long size, bool remove = false)
 {
-    UniqueRandom rand((int)srcMatrix.rows);
-    Matrix<T> newSet(new T[size * srcMatrix.cols], size, (long)srcMatrix.cols);
+    Matrix<T> newSet(new T[size * srcMatrix.cols], size,srcMatrix.cols);
 
-    T *src,*dest;
-    for (long i=0;i<size;++i) {
-        long r = rand.next();
+    T* src,* dest;
+    for (long i=0; i<size; ++i) {
+        long r = rand_int(srcMatrix.rows-i);
         dest = newSet[i];
         src = srcMatrix[r];
-        for (size_t j=0;j<srcMatrix.cols;++j) {
-            dest[j] = src[j];
-        }
+        std::copy(src, src+srcMatrix.cols, dest);
         if (remove) {
-            dest = srcMatrix[srcMatrix.rows-i-1];
-            src = srcMatrix[r];
-            for (size_t j=0;j<srcMatrix.cols;++j) {
-                std::swap(*src,*dest);
-                src++;
-                dest++;
-            }
+            src = srcMatrix[srcMatrix.rows-i-1];
+            dest = srcMatrix[r];
+            std::copy(src, src+srcMatrix.cols, dest);
         }
     }
-
     if (remove) {
-       srcMatrix.rows -= size;
+        srcMatrix.rows -= size;
     }
-
     return newSet;
 }
 
 template<typename T>
 Matrix<T> random_sample(const Matrix<T>& srcMatrix, size_t size)
 {
-    UniqueRandom rand((int)srcMatrix.rows);
-    Matrix<T> newSet(new T[size * srcMatrix.cols], (long)size, (long)srcMatrix.cols);
+    UniqueRandom rand(srcMatrix.rows);
+    Matrix<T> newSet(new T[size * srcMatrix.cols], size,srcMatrix.cols);
 
-    T *src,*dest;
-    for (size_t i=0;i<size;++i) {
+    T* src,* dest;
+    for (size_t i=0; i<size; ++i) {
         long r = rand.next();
         dest = newSet[i];
         src = srcMatrix[r];
-        for (size_t j=0;j<srcMatrix.cols;++j) {
-            dest[j] = src[j];
-        }
+        std::copy(src, src+srcMatrix.cols, dest);
     }
-
     return newSet;
 }
 
-} // namespace cvflann
+} // namespace
+
 
-#endif /* _OPENCV_SAMPLING_H_ */
+#endif /* OPENCV_FLANN_SAMPLING_H_ */
index 52116ec..08b9915 100644 (file)
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *************************************************************************/
 
-#ifndef _OPENCV_SAVING_H_
-#define _OPENCV_SAVING_H_
+#ifndef OPENCV_FLANN_SAVING_H_
+#define OPENCV_FLANN_SAVING_H_
 
-#include "opencv2/flann/general.h"
-#include "opencv2/flann/nn_index.h"
-#include <cstdio>
 #include <cstring>
+#include <vector>
+
+#include "general.h"
+#include "nn_index.h"
+
+#define FLANN_SIGNATURE "FLANN_INDEX"
 
 namespace cvflann
 {
-template <typename T> struct Datatype {};
-template<> struct Datatype<char> { static flann_datatype_t type() { return FLANN_INT8; } };
-template<> struct Datatype<short> { static flann_datatype_t type() { return FLANN_INT16; } };
-template<> struct Datatype<int> { static flann_datatype_t type() { return FLANN_INT32; } };
-template<> struct Datatype<unsigned char> { static flann_datatype_t type() { return FLANN_UINT8; } };
-template<> struct Datatype<unsigned short> { static flann_datatype_t type() { return FLANN_UINT16; } };
-template<> struct Datatype<unsigned int> { static flann_datatype_t type() { return FLANN_UINT32; } };
-template<> struct Datatype<float> { static flann_datatype_t type() { return FLANN_FLOAT32; } };
-template<> struct Datatype<double> { static flann_datatype_t type() { return FLANN_FLOAT64; } };
 
+template <typename T>
+struct Datatype {};
+template<>
+struct Datatype<char> { static flann_datatype_t type() { return FLANN_INT8; } };
+template<>
+struct Datatype<short> { static flann_datatype_t type() { return FLANN_INT16; } };
+template<>
+struct Datatype<int> { static flann_datatype_t type() { return FLANN_INT32; } };
+template<>
+struct Datatype<unsigned char> { static flann_datatype_t type() { return FLANN_UINT8; } };
+template<>
+struct Datatype<unsigned short> { static flann_datatype_t type() { return FLANN_UINT16; } };
+template<>
+struct Datatype<unsigned int> { static flann_datatype_t type() { return FLANN_UINT32; } };
+template<>
+struct Datatype<float> { static flann_datatype_t type() { return FLANN_FLOAT32; } };
+template<>
+struct Datatype<double> { static flann_datatype_t type() { return FLANN_FLOAT64; } };
 
-CV_EXPORTS const char* FLANN_SIGNATURE();
-CV_EXPORTS const char* FLANN_VERSION();
 
 /**
  * Structure representing the index header.
  */
-struct CV_EXPORTS IndexHeader
+struct IndexHeader
 {
-       char signature[16];
-       char version[16];
-       flann_datatype_t data_type;
-       flann_algorithm_t index_type;
-       int rows;
-       int cols;
+    char signature[16];
+    char version[16];
+    flann_datatype_t data_type;
+    flann_algorithm_t index_type;
+    size_t rows;
+    size_t cols;
 };
 
 /**
@@ -69,20 +79,20 @@ struct CV_EXPORTS IndexHeader
  * @param stream - Stream to save to
  * @param index - The index to save
  */
-template<typename ELEM_TYPE>
-void save_header(FILE* stream, const NNIndex<ELEM_TYPE>& index)
+template<typename Distance>
+void save_header(FILE* stream, const NNIndex<Distance>& index)
 {
-       IndexHeader header;
-       memset(header.signature, 0 , sizeof(header.signature));
-       strcpy(header.signature, FLANN_SIGNATURE());
-       memset(header.version, 0 , sizeof(header.version));
-       strcpy(header.version, FLANN_VERSION());
-       header.data_type = Datatype<ELEM_TYPE>::type();
-       header.index_type = index.getType();
-       header.rows = (int)index.size();
-       header.cols = index.veclen();
-
-       std::fwrite(&header, sizeof(header),1,stream);
+    IndexHeader header;
+    memset(header.signature, 0, sizeof(header.signature));
+    strcpy(header.signature, FLANN_SIGNATURE);
+    memset(header.version, 0, sizeof(header.version));
+    strcpy(header.version, FLANN_VERSION);
+    header.data_type = Datatype<typename Distance::ElementType>::type();
+    header.index_type = index.getType();
+    header.rows = index.size();
+    header.cols = index.veclen();
+
+    std::fwrite(&header, sizeof(header),1,stream);
 }
 
 
@@ -91,25 +101,84 @@ void save_header(FILE* stream, const NNIndex<ELEM_TYPE>& index)
  * @param stream - Stream to load from
  * @return Index header
  */
-CV_EXPORTS IndexHeader load_header(FILE* stream);
+inline IndexHeader load_header(FILE* stream)
+{
+    IndexHeader header;
+    int read_size = fread(&header,sizeof(header),1,stream);
+
+    if (read_size!=1) {
+        throw FLANNException("Invalid index file, cannot read");
+    }
+
+    if (strcmp(header.signature,FLANN_SIGNATURE)!=0) {
+        throw FLANNException("Invalid index file, wrong signature");
+    }
+
+    return header;
+
+}
 
 
 template<typename T>
-void save_value(FILE* stream, const T& value, int count = 1)
+void save_value(FILE* stream, const T& value, size_t count = 1)
 {
-       fwrite(&value, 1, sizeof(value)*count, stream);
+    fwrite(&value, sizeof(value),count, stream);
 }
 
+template<typename T>
+void save_value(FILE* stream, const cvflann::Matrix<T>& value)
+{
+    fwrite(&value, sizeof(value),1, stream);
+    fwrite(value.data, sizeof(T),value.rows*value.cols, stream);
+}
 
 template<typename T>
-void load_value(FILE* stream, T& value, int count = 1)
+void save_value(FILE* stream, const std::vector<T>& value)
 {
-       int read_cnt = (int)fread(&value, sizeof(value),count, stream);
-       if (read_cnt!=count) {
-               throw FLANNException("Cannot read from file");
-       }
+    size_t size = value.size();
+    fwrite(&size, sizeof(size_t), 1, stream);
+    fwrite(&value[0], sizeof(T), size, stream);
 }
 
-} // namespace cvflann
+template<typename T>
+void load_value(FILE* stream, T& value, size_t count = 1)
+{
+    size_t read_cnt = fread(&value, sizeof(value), count, stream);
+    if (read_cnt != count) {
+        throw FLANNException("Cannot read from file");
+    }
+}
+
+template<typename T>
+void load_value(FILE* stream, cvflann::Matrix<T>& value)
+{
+    size_t read_cnt = fread(&value, sizeof(value), 1, stream);
+    if (read_cnt != 1) {
+        throw FLANNException("Cannot read from file");
+    }
+    value.data = new T[value.rows*value.cols];
+    read_cnt = fread(value.data, sizeof(T), value.rows*value.cols, stream);
+    if (read_cnt != int(value.rows*value.cols)) {
+        throw FLANNException("Cannot read from file");
+    }
+}
+
+
+template<typename T>
+void load_value(FILE* stream, std::vector<T>& value)
+{
+    size_t size;
+    size_t read_cnt = fread(&size, sizeof(size_t), 1, stream);
+    if (read_cnt!=1) {
+        throw FLANNException("Cannot read from file");
+    }
+    value.resize(size);
+    read_cnt = fread(&value[0], sizeof(T), size, stream);
+    if (read_cnt!=int(size)) {
+        throw FLANNException("Cannot read from file");
+    }
+}
+
+}
 
-#endif /* _OPENCV_SAVING_H_ */
+#endif /* OPENCV_FLANN_SAVING_H_ */
index 21d3e94..145901a 100644 (file)
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *************************************************************************/
 
-#ifndef _OPENCV_SIMPLEX_DOWNHILL_H_
-#define _OPENCV_SIMPLEX_DOWNHILL_H_
+#ifndef OPENCV_FLANN_SIMPLEX_DOWNHILL_H_
+#define OPENCV_FLANN_SIMPLEX_DOWNHILL_H_
 
 namespace cvflann
 {
 
 /**
     Adds val to array vals (and point to array points) and keeping the arrays sorted by vals.
-*/
+ */
 template <typename T>
 void addValue(int pos, float val, float* vals, T* point, T* points, int n)
 {
     vals[pos] = val;
-    for (int i=0;i<n;++i) {
+    for (int i=0; i<n; ++i) {
         points[pos*n+i] = point[i];
     }
 
@@ -49,7 +49,7 @@ void addValue(int pos, float val, float* vals, T* point, T* points, int n)
     int j=pos;
     while (j>0 && vals[j]<vals[j-1]) {
         swap(vals[j],vals[j-1]);
-        for (int i=0;i<n;++i) {
+        for (int i=0; i<n; ++i) {
             swap(points[j*n+i],points[(j-1)*n+i]);
         }
         --j;
@@ -64,7 +64,7 @@ void addValue(int pos, float val, float* vals, T* point, T* points, int n)
                     vals is the cost function in the n+1 simplex points, if NULL it will be computed
 
     Postcondition: returns optimum value and points[0..n] are the optimum parameters
-*/
+ */
 template <typename T, typename F>
 float optimizeSimplexDownhill(T* points, int n, F func, float* vals = NULL )
 {
@@ -84,7 +84,7 @@ float optimizeSimplexDownhill(T* points, int n, F func, float* vals = NULL )
     if (vals == NULL) {
         ownVals = true;
         vals = new float[n+1];
-        for (int i=0;i<n+1;++i) {
+        for (int i=0; i<n+1; ++i) {
             float val = func(points+i*n);
             addValue(i, val, vals, points+i*n, points, n);
         }
@@ -96,18 +96,18 @@ float optimizeSimplexDownhill(T* points, int n, F func, float* vals = NULL )
         if (iterations++ > MAX_ITERATIONS) break;
 
         // compute average of simplex points (except the highest point)
-        for (int j=0;j<n;++j) {
+        for (int j=0; j<n; ++j) {
             p_o[j] = 0;
-            for (int i=0;i<n;++i) {
+            for (int i=0; i<n; ++i) {
                 p_o[i] += points[j*n+i];
             }
         }
-        for (int i=0;i<n;++i) {
+        for (int i=0; i<n; ++i) {
             p_o[i] /= n;
         }
 
         bool converged = true;
-        for (int i=0;i<n;++i) {
+        for (int i=0; i<n; ++i) {
             if (p_o[i] != points[nn+i]) {
                 converged = false;
             }
@@ -115,15 +115,15 @@ float optimizeSimplexDownhill(T* points, int n, F func, float* vals = NULL )
         if (converged) break;
 
         // trying a reflection
-        for (int i=0;i<n;++i) {
+        for (int i=0; i<n; ++i) {
             p_r[i] = p_o[i] + alpha*(p_o[i]-points[nn+i]);
         }
         float val_r = func(p_r);
 
-        if (val_r>=vals[0] && val_r<vals[n]) {
+        if ((val_r>=vals[0])&&(val_r<vals[n])) {
             // reflection between second highest and lowest
             // add it to the simplex
-            logger().info("Choosing reflection\n");
+            Logger::info("Choosing reflection\n");
             addValue(n, val_r,vals, p_r, points, n);
             continue;
         }
@@ -132,37 +132,37 @@ float optimizeSimplexDownhill(T* points, int n, F func, float* vals = NULL )
             // value is smaller than smalest in simplex
 
             // expand some more to see if it drops further
-            for (int i=0;i<n;++i) {
+            for (int i=0; i<n; ++i) {
                 p_e[i] = 2*p_r[i]-p_o[i];
             }
             float val_e = func(p_e);
 
             if (val_e<val_r) {
-                logger().info("Choosing reflection and expansion\n");
+                Logger::info("Choosing reflection and expansion\n");
                 addValue(n, val_e,vals,p_e,points,n);
             }
             else {
-                logger().info("Choosing reflection\n");
+                Logger::info("Choosing reflection\n");
                 addValue(n, val_r,vals,p_r,points,n);
             }
             continue;
         }
         if (val_r>=vals[n]) {
-            for (int i=0;i<n;++i) {
+            for (int i=0; i<n; ++i) {
                 p_e[i] = (p_o[i]+points[nn+i])/2;
             }
             float val_e = func(p_e);
 
             if (val_e<vals[n]) {
-                logger().info("Choosing contraction\n");
+                Logger::info("Choosing contraction\n");
                 addValue(n,val_e,vals,p_e,points,n);
                 continue;
             }
         }
         {
-          logger().info("Full contraction\n");
-            for (int j=1;j<=n;++j) {
-                for (int i=0;i<n;++i) {
+            Logger::info("Full contraction\n");
+            for (int j=1; j<=n; ++j) {
+                for (int i=0; i<n; ++i) {
                     points[j*n+i] = (points[j*n+i]+points[i])/2;
                 }
                 float val = func(points+j*n);
@@ -181,6 +181,6 @@ float optimizeSimplexDownhill(T* points, int n, F func, float* vals = NULL )
     return bestVal;
 }
 
-} // namespace cvflann
+}
 
-#endif //_OPENCV_SIMPLEX_DOWNHILL_H_
+#endif //OPENCV_FLANN_SIMPLEX_DOWNHILL_H_
index db32057..a06c5c0 100644 (file)
@@ -28,8 +28,8 @@
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *************************************************************************/
 
-#ifndef _OPENCV_TIMER_H_
-#define _OPENCV_TIMER_H_
+#ifndef FLANN_TIMER_H
+#define FLANN_TIMER_H
 
 #include <time.h>
 
@@ -42,7 +42,7 @@ namespace cvflann
  *
  * Can be used to time portions of code.
  */
-class CV_EXPORTS StartStopTimer
+class StartStopTimer
 {
     clock_t startTime;
 
@@ -64,14 +64,16 @@ public:
     /**
      * Starts the timer.
      */
-    void start() {
+    void start()
+    {
         startTime = clock();
     }
 
     /**
      * Stops the timer and updates timer value.
      */
-    void stop() {
+    void stop()
+    {
         clock_t stopTime = clock();
         value += ( (double)stopTime - startTime) / CLOCKS_PER_SEC;
     }
@@ -79,12 +81,13 @@ public:
     /**
      * Resets the timer value to 0.
      */
-    void reset() {
+    void reset()
+    {
         value = 0;
     }
 
 };
 
-}// namespace cvflann
+}
 
-#endif // _OPENCV_TIMER_H_
+#endif // FLANN_TIMER_H
index 30a82ea..77c439f 100644 (file)
 
 #include "precomp.hpp"
 
-namespace cvflann
-{
-// ----------------------- dist.cpp ---------------------------
-
-/** Global variable indicating the distance metric
- * to be used.
- */
-flann_distance_t flann_distance_type_ = FLANN_DIST_EUCLIDEAN;
-flann_distance_t flann_distance_type() { return flann_distance_type_; }
-
-/**
- * Zero iterator that emulates a zero feature.
- */
-ZeroIterator<float> zero_;
-ZeroIterator<float>& zero() { return zero_; }
-
-/**
- * Order of Minkowski distance to use.
- */
-int flann_minkowski_order_;
-int flann_minkowski_order() { return flann_minkowski_order_; }
-
-
-double euclidean_dist(const unsigned char* first1, const unsigned char* last1, unsigned char* first2, double acc)
-{
-       double distsq = acc;
-       double diff0, diff1, diff2, diff3;
-       const unsigned char* lastgroup = last1 - 3;
-
-       while (first1 < lastgroup) {
-               diff0 = first1[0] - first2[0];
-               diff1 = first1[1] - first2[1];
-               diff2 = first1[2] - first2[2];
-               diff3 = first1[3] - first2[3];
-               distsq += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3;
-               first1 += 4;
-               first2 += 4;
-       }
-       while (first1 < last1) {
-               diff0 = *first1++ - *first2++;
-               distsq += diff0 * diff0;
-       }
-       return distsq;
-}
-
-// ----------------------- index_testing.cpp ---------------------------
-
-int countCorrectMatches(int* neighbors, int* groundTruth, int n)
-{
-    int count = 0;
-    for (int i=0;i<n;++i) {
-        for (int k=0;k<n;++k) {
-            if (neighbors[i]==groundTruth[k]) {
-                count++;
-                break;
-            }
-        }
-    }
-    return count;
-}
-
-// ----------------------- logger.cpp ---------------------------
-
-Logger logger_;
-
-Logger& logger() { return logger_; }
-
-int Logger::log(int level, const char* fmt, ...)
-{
-    if (level > logLevel ) return -1;
-
-    int ret;
-    va_list arglist;
-    va_start(arglist, fmt);
-    ret = vfprintf(stream, fmt, arglist);
-    va_end(arglist);
-
-    return ret;
-}
-
-int Logger::log(int level, const char* fmt, va_list arglist)
-{
-    if (level > logLevel ) return -1;
-
-    int ret;
-    ret = vfprintf(stream, fmt, arglist);
-
-    return ret;
-}
-
-
-#define LOG_METHOD(NAME,LEVEL) \
-    int Logger::NAME(const char* fmt, ...) \
-    { \
-        int ret; \
-        va_list ap; \
-        va_start(ap, fmt); \
-        ret = log(LEVEL, fmt, ap); \
-        va_end(ap); \
-        return ret; \
-    }
-
-
-LOG_METHOD(fatal, FLANN_LOG_FATAL)
-LOG_METHOD(error, FLANN_LOG_ERROR)
-LOG_METHOD(warn, FLANN_LOG_WARN)
-LOG_METHOD(info, FLANN_LOG_INFO)
-
-// ----------------------- random.cpp ---------------------------
-
-void seed_random(unsigned int seed)
-{
-    srand(seed);
-}
-
-double rand_double(double high, double low)
-{
-    return low + ((high-low) * (std::rand() / (RAND_MAX + 1.0)));
-}
-
-
-int rand_int(int high, int low)
-{
-    return low + (int) ( double(high-low) * (std::rand() / (RAND_MAX + 1.0)));
-}
-
-// ----------------------- saving.cpp ---------------------------
-
-const char FLANN_SIGNATURE_[] = "FLANN_INDEX";
-const char FLANN_VERSION_[] = "1.5.0";
-
-const char* FLANN_SIGNATURE() { return FLANN_SIGNATURE_; }
-const char* FLANN_VERSION() { return FLANN_VERSION_; }
-
-IndexHeader load_header(FILE* stream)
-{
-       IndexHeader header;
-       size_t read_size = fread(&header,sizeof(header),1,stream);
-
-       if (read_size!=1) {
-               throw FLANNException("Invalid index file, cannot read");
-       }
-
-       if (strcmp(header.signature,FLANN_SIGNATURE())!=0) {
-               throw FLANNException("Invalid index file, wrong signature");
-       }
-
-       return header;
-
-}
-
-// ----------------------- flann.cpp ---------------------------
-
-
-void log_verbosity(int level)
-{
-    if (level>=0) {
-        logger().setLevel(level);
-    }
-}
-
-void set_distance_type(flann_distance_t distance_type, int order)
-{
-       flann_distance_type_ = distance_type;
-       flann_minkowski_order_ = order;
-}
-
-
-static ParamsFactory the_factory;
-
-ParamsFactory& ParamsFactory_instance()
-{
-    return the_factory;
-}
-
-class StaticInit
-{
-public:
-       StaticInit()
-       {
-               ParamsFactory_instance().register_<LinearIndexParams>(FLANN_INDEX_LINEAR);
-               ParamsFactory_instance().register_<KDTreeIndexParams>(FLANN_INDEX_KDTREE);
-               ParamsFactory_instance().register_<KMeansIndexParams>(FLANN_INDEX_KMEANS);
-               ParamsFactory_instance().register_<CompositeIndexParams>(FLANN_INDEX_COMPOSITE);
-               ParamsFactory_instance().register_<AutotunedIndexParams>(FLANN_INDEX_AUTOTUNED);
-//             ParamsFactory::instance().register_<SavedIndexParams>(FLANN_INDEX_SAVED);
-       }
-};
-StaticInit __init;
-
-
-} // namespace cvflann
-
 
 
index 8c511ce..5b9d10f 100644 (file)
@@ -7,6 +7,7 @@
 \r
 #include "opencv2/flann/dist.h"\r
 #include "opencv2/flann/index_testing.h"\r
+#include "opencv2/flann/params.h"\r
 #include "opencv2/flann/saving.h"\r
 #include "opencv2/flann/general.h"\r
 \r