Prepare Downhill Simplex for pull request
authorAlex Leontiev <alozz1991@gmail.com>
Thu, 1 Aug 2013 12:42:59 +0000 (20:42 +0800)
committerAlex Leontiev <alozz1991@gmail.com>
Fri, 30 Aug 2013 13:35:47 +0000 (21:35 +0800)
This is an implementation of so-called downhill simplex method
(https://en.wikipedia.org/wiki/Nelder%E2%80%93Mead_method)

Please, let me know if you have any comments, whoever you'd be.

modules/optim/doc/downhill_simplex_method.rst [new file with mode: 0644]
modules/optim/doc/optim.rst
modules/optim/include/opencv2/optim.hpp
modules/optim/src/debug.hpp [new file with mode: 0644]
modules/optim/src/lpsolver.cpp
modules/optim/src/simplex.cpp [new file with mode: 0644]
modules/optim/test/test_downhill_simplex.cpp [new file with mode: 0644]

diff --git a/modules/optim/doc/downhill_simplex_method.rst b/modules/optim/doc/downhill_simplex_method.rst
new file mode 100644 (file)
index 0000000..8bb9be8
--- /dev/null
@@ -0,0 +1,164 @@
+Downhill Simplex Method
+=======================
+
+.. highlight:: cpp
+
+optim::DownhillSolver
+---------------------------------
+
+.. ocv:class:: optim::DownhillSolver
+
+This class is used to perform the non-linear non-constrained *minimization* of a function, given on an *n*-dimensional Euclidean space,
+using the **Nelder-Mead method**, also known as **downhill simplex method**. The basic idea about the method can be obtained from
+(`http://en.wikipedia.org/wiki/Nelder-Mead\_method <http://en.wikipedia.org/wiki/Nelder-Mead_method>`_). It should be noted, that
+this method, although deterministic, is rather a heuristic and therefore may converge to a local minima, not necessary a global one.
+It is iterative optimization technique, which at each step uses an information about the values of a function evaluated only at
+*n+1* points, arranged as a *simplex* in *n*-dimensional space (hence the second name of the method). At each step new point is
+chosen to evaluate function at, obtained value is compared with previous ones and based on this information simplex changes it's shape
+, slowly moving to the local minimum.
+
+Algorithm stops when the number of function evaluations done exceeds ``termcrit.maxCount``, when the function values at the
+vertices of simplex are within ``termcrit.epsilon`` range or simplex becomes so small that it
+can enclosed in a box with ``termcrit.epsilon`` sides, whatever comes first, for some defined by user
+positive integer ``termcrit.maxCount`` and positive non-integer ``termcrit.epsilon``.
+
+::
+
+    class CV_EXPORTS Solver : public Algorithm
+    {
+    public:
+        class CV_EXPORTS Function
+        {
+        public:
+           virtual ~Function() {}
+           //! ndim - dimensionality
+           virtual double calc(const double* x) const = 0;     
+        };
+
+        virtual Ptr<Function> getFunction() const = 0;
+        virtual void setFunction(const Ptr<Function>& f) = 0;
+
+        virtual TermCriteria getTermCriteria() const = 0;
+        virtual void setTermCriteria(const TermCriteria& termcrit) = 0;
+
+        // x contain the initial point before the call and the minima position (if algorithm converged) after. x is assumed to be (something that
+        // after getMat() will return) row-vector or column-vector. *It's size  and should
+        // be consisted with previous dimensionality data given, if any (otherwise, it determines dimensionality)*
+        virtual double minimize(InputOutputArray x) = 0;
+    };
+
+    class CV_EXPORTS DownhillSolver : public Solver
+    {
+    public:
+        //! returns row-vector, even if the column-vector was given
+        virtual void getInitStep(OutputArray step) const=0;
+        //!This should be called at least once before the first call to minimize() and step is assumed to be (something that
+        //! after getMat() will return) row-vector or column-vector. *It's dimensionality determines the dimensionality of a problem.*
+        virtual void setInitStep(InputArray step)=0;
+    };
+
+It should be noted, that ``optim::DownhillSolver`` is a derivative of the abstract interface ``optim::Solver``, which in
+turn is derived from the ``Algorithm`` interface and is used to encapsulate the functionality, common to all non-linear optimization
+algorithms in the ``optim`` module.
+
+optim::DownhillSolver::getFunction
+--------------------------------------------
+
+Getter for the optimized function. The optimized function is represented by ``Solver::Function`` interface, which requires 
+derivatives to implement the sole method ``calc(double*)`` to evaluate the function.
+
+.. ocv:function:: Ptr<Solver::Function> optim::DownhillSolver::getFunction()
+
+    :return: Smart-pointer to an object that implements ``Solver::Function`` interface - it represents the function that is being optimized. It can be empty, if no function was given so far.
+
+optim::DownhillSolver::setFunction
+-----------------------------------------------
+
+Setter for the optimized function. *It should be called at least once before the call to* ``DownhillSolver::minimize()``, as
+default value is not usable.
+
+.. ocv:function:: void optim::DownhillSolver::setFunction(const Ptr<Solver::Function>& f)
+
+    :param f: The new function to optimize.
+
+optim::DownhillSolver::getTermCriteria
+----------------------------------------------------
+
+Getter for the previously set terminal criteria for this algorithm.
+
+.. ocv:function:: TermCriteria optim::DownhillSolver::getTermCriteria()
+
+    :return: Deep copy of the terminal criteria used at the moment.
+
+optim::DownhillSolver::setTermCriteria
+------------------------------------------
+
+Set terminal criteria for downhill simplex method. Two things should be noted. First, this method *is not necessary* to be called
+before the first call to ``DownhillSolver::minimize()``, as the default value is sensible. Second, the method will raise an error
+if ``termcrit.type!=(TermCriteria::MAX_ITER+TermCriteria::EPS)``, ``termcrit.epsilon<=0`` or ``termcrit.maxCount<=0``. That is,
+both ``epsilon`` and ``maxCount`` should be set to positive values (non-integer and integer respectively) and they represent
+tolerance and maximal number of function evaluations that is allowed.
+
+Algorithm stops when the number of function evaluations done exceeds ``termcrit.maxCount``, when the function values at the
+vertices of simplex are within ``termcrit.epsilon`` range or simplex becomes so small that it
+can enclosed in a box with ``termcrit.epsilon`` sides, whatever comes first.
+
+.. ocv:function:: void optim::DownhillSolver::setTermCriteria(const TermCriteria& termcrit)
+
+    :param termcrit: Terminal criteria to be used, represented as ``TermCriteria`` structure (defined elsewhere in openCV). Mind you, that it should meet ``(termcrit.type==(TermCriteria::MAX_ITER+TermCriteria::EPS) && termcrit.epsilon>0 && termcrit.maxCount>0)``, otherwise the error will be raised.
+
+optim::DownhillSolver::getInitStep
+-----------------------------------
+
+Returns the initial step that will be used in downhill simplex algorithm. See the description
+of corresponding setter (follows next) for the meaning of this parameter.
+
+.. ocv:function:: void optim::getInitStep(OutputArray step)
+
+    :param step: Initial step that will be used in algorithm. Note, that although corresponding setter accepts column-vectors as well as row-vectors, this method will return a row-vector.
+
+optim::DownhillSolver::setInitStep
+----------------------------------
+
+Sets the initial step that will be used in downhill simplex algorithm. Step, together with initial point (givin in ``DownhillSolver::minimize``)
+are two *n*-dimensional vectors that are used to determine the shape of initial simplex. Roughly said, initial point determines the position
+of a simplex (it will become simplex's centroid), while step determines the spread (size in each dimension) of a simplex. To be more precise,
+if :math:`s,x_0\in\mathbb{R}^n` are the initial step and initial point respectively, the vertices of a simplex will be: :math:`v_0:=x_0-\frac{1}{2}
+s` and :math:`v_i:=x_0+s_i` for :math:`i=1,2,\dots,n` where :math:`s_i` denotes projections of the initial step of *n*-th coordinate (the result
+of projection is treated to be vector given by :math:`s_i:=e_i\cdot\left<e_i\cdot s\right>`, where :math:`e_i` form canonical basis)
+
+.. ocv:function:: void optim::setInitStep(InputArray step)
+
+    :param step: Initial step that will be used in algorithm. Roughly said, it determines the spread (size in each dimension) of an initial simplex.
+
+optim::DownhillSolver::minimize
+-----------------------------------
+
+The main method of the ``DownhillSolver``. It actually runs the algorithm and performs the minimization. The sole input parameter determines the 
+centroid of the starting simplex (roughly, it tells where to start), all the others (terminal criteria, initial step, function to be minimized)
+are supposed to be set via the setters before the call to this method or the default values (not always sensible) will be used.
+
+.. ocv:function:: double optim::DownhillSolver::minimize(InputOutputArray x)
+
+    :param x: The initial point, that will become a centroid of an initial simplex. After the algorithm will terminate, it will be setted to the
+    point where the algorithm stops, the point of possible minimum.
+
+    :return: The value of a function at the point found.
+
+Explain parameters.
+
+optim::createDownhillSolver
+------------------------------------
+
+This function returns the reference to the ready-to-use ``DownhillSolver`` object. All the parameters are optional, so this procedure can be called
+even without parameters at all. In this case, the default values will be used. As default value for terminal criteria are the only sensible ones,
+``DownhillSolver::setFunction()`` and ``DownhillSolver::setInitStep()`` should be called upon the obtained object, if the respective parameters
+were not given to ``createDownhillSolver()``. Otherwise, the two ways (give parameters to ``createDownhillSolver()`` or miss the out and call the
+``DownhillSolver::setFunction()`` and ``DownhillSolver::setInitStep()``) are absolutely equivalent (and will drop the same errors in the same way,
+should invalid input be detected).
+
+.. ocv:function:: Ptr<optim::DownhillSolver> optim::createDownhillSolver(const Ptr<Solver::Function>& f,InputArray initStep, TermCriteria termcrit)
+
+    :param f: Pointer to the function that will be minimized, similarly to the one you submit via ``DownhillSolver::setFunction``.
+    :param step: Initial step, that will be used to construct the initial simplex, similarly to the one you submit via ``DownhillSolver::setInitStep``.
+    :param termcrit: Terminal criteria to the algorithm, similarly to the one you submit via ``DownhillSolver::setTermCriteria``.
index bead212..cdbaeac 100644 (file)
@@ -8,3 +8,4 @@ optim. Generic numerical optimization
     :maxdepth: 2
 
     linear_programming
+    downhill_simplex_method
index 0bbedad..ad26a09 100644 (file)
 
 namespace cv{namespace optim
 {
+class CV_EXPORTS Solver : public Algorithm
+{
+public:
+    class CV_EXPORTS Function
+    {
+    public:
+       virtual ~Function() {}
+       //! ndim - dimensionality
+       virtual double calc(const double* x) const = 0;     
+    };
+
+    virtual Ptr<Function> getFunction() const = 0;
+    virtual void setFunction(const Ptr<Function>& f) = 0;
+
+    virtual TermCriteria getTermCriteria() const = 0;
+    virtual void setTermCriteria(const TermCriteria& termcrit) = 0;
+
+    // x contain the initial point before the call and the minima position (if algorithm converged) after. x is assumed to be (something that
+    // after getMat() will return) row-vector or column-vector. *It's size  and should
+    // be consisted with previous dimensionality data given, if any (otherwise, it determines dimensionality)*
+    virtual double minimize(InputOutputArray x) = 0;
+};
+
+//! downhill simplex class
+class CV_EXPORTS DownhillSolver : public Solver
+{
+public:
+    //! returns row-vector, even if the column-vector was given
+    virtual void getInitStep(OutputArray step) const=0;
+    //!This should be called at least once before the first call to minimize() and step is assumed to be (something that
+    //! after getMat() will return) row-vector or column-vector. *It's dimensionality determines the dimensionality of a problem.*
+    virtual void setInitStep(InputArray step)=0;
+};
+
+// both minRange & minError are specified by termcrit.epsilon; In addition, user may specify the number of iterations that the algorithm does.
+CV_EXPORTS_W Ptr<DownhillSolver> createDownhillSolver(const Ptr<Solver::Function>& f=Ptr<Solver::Function>(),
+        InputArray initStep=Mat_<double>(1,1,0.0),
+        TermCriteria termcrit=TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5000,0.000001));
+
 //!the return codes for solveLP() function
 enum
 {
diff --git a/modules/optim/src/debug.hpp b/modules/optim/src/debug.hpp
new file mode 100644 (file)
index 0000000..fe5d00e
--- /dev/null
@@ -0,0 +1,18 @@
+namespace cv{namespace optim{
+#ifdef ALEX_DEBUG
+#define dprintf(x) printf x
+static void print_matrix(const Mat& x){
+    printf("\ttype:%d vs %d,\tsize: %d-on-%d\n",x.type(),CV_64FC1,x.rows,x.cols);
+    for(int i=0;i<x.rows;i++){
+        printf("\t[");
+        for(int j=0;j<x.cols;j++){
+            printf("%g, ",x.at<double>(i,j));
+        }
+        printf("]\n");
+    }
+}
+#else
+#define dprintf(x)
+#define print_matrix(x)
+#endif
+}}
index 924b756..a046dda 100644 (file)
@@ -2,16 +2,12 @@
 #include <climits>
 #include <algorithm>
 #include <cstdarg>
+#include <debug.hpp>
 
 namespace cv{namespace optim{
 using std::vector;
 
 #ifdef ALEX_DEBUG
-#define dprintf(x) printf x
-static void print_matrix(const Mat& x){
-    print(x);
-    printf("\n");
-}
 static void print_simplex_state(const Mat& c,const Mat& b,double v,const std::vector<int> N,const std::vector<int> B){
     printf("\tprint simplex state\n");
 
@@ -32,8 +28,6 @@ static void print_simplex_state(const Mat& c,const Mat& b,double v,const std::ve
     printf("\n");
 }
 #else
-#define dprintf(x)
-#define print_matrix(x)
 #define print_simplex_state(c,b,v,N,B)
 #endif
 
diff --git a/modules/optim/src/simplex.cpp b/modules/optim/src/simplex.cpp
new file mode 100644 (file)
index 0000000..eeb23c5
--- /dev/null
@@ -0,0 +1,289 @@
+#include "precomp.hpp"
+#include "debug.hpp"
+
+namespace cv{namespace optim{
+
+    class DownhillSolverImpl : public DownhillSolver
+    {
+    public:
+        void getInitStep(OutputArray step) const;
+        void setInitStep(InputArray step);
+        Ptr<Function> getFunction() const;
+        void setFunction(const Ptr<Function>& f);
+        TermCriteria getTermCriteria() const;
+        DownhillSolverImpl();
+        void setTermCriteria(const TermCriteria& termcrit);
+        double minimize(InputOutputArray x);
+    protected:
+        Ptr<Solver::Function> _Function;
+        TermCriteria _termcrit;
+        Mat _step;
+    private:
+        inline void compute_coord_sum(Mat_<double>& points,Mat_<double>& coord_sum);
+        inline void create_initial_simplex(Mat_<double>& simplex,Mat& step);
+        inline double inner_downhill_simplex(cv::Mat_<double>& p,double MinRange,double MinError,int& nfunk,
+                const Ptr<Solver::Function>& f,int nmax);
+        inline double try_new_point(Mat_<double>& p,Mat_<double>& y,Mat_<double>& coord_sum,const Ptr<Solver::Function>& f,int ihi,
+                double fac,Mat_<double>& ptry);
+    };
+
+    double DownhillSolverImpl::try_new_point(
+        Mat_<double>& p, 
+        Mat_<double>& y,
+        Mat_<double>&  coord_sum, 
+        const Ptr<Solver::Function>& f,
+        int      ihi, 
+        double   fac,
+        Mat_<double>& ptry
+        )
+    {
+        int ndim=p.cols;
+        int j;
+        double fac1,fac2,ytry;
+        
+        fac1=(1.0-fac)/ndim;
+        fac2=fac1-fac;
+        for (j=0;j<ndim;j++) 
+        {
+            ptry(j)=coord_sum(j)*fac1-p(ihi,j)*fac2;
+        }
+        ytry=f->calc((double*)ptry.data);
+        if (ytry < y(ihi)) 
+        {
+            y(ihi)=ytry;
+            for (j=0;j<ndim;j++) 
+            {
+                coord_sum(j) += ptry(j)-p(ihi,j);
+                p(ihi,j)=ptry(j);
+            }
+        }
+        
+        return ytry;
+    }
+
+    /*
+    Performs the actual minimization of Solver::Function f (after the initialization was done)
+
+    The matrix p[ndim+1][1..ndim] represents ndim+1 vertices that
+    form a simplex - each row is an ndim vector.
+    On output, nfunk gives the number of function evaluations taken.
+    */
+    double DownhillSolverImpl::inner_downhill_simplex( 
+        cv::Mat_<double>&   p, 
+        double     MinRange,
+        double     MinError,
+        int&       nfunk,
+        const Ptr<Solver::Function>& f,
+        int nmax
+        )
+    {
+        int ndim=p.cols;
+        double res;
+        int i,ihi,ilo,inhi,j,mpts=ndim+1;
+        double error, range,ysave,ytry;
+        Mat_<double> coord_sum(1,ndim,0.0),buf(1,ndim,0.0),y(1,ndim,0.0);
+
+        nfunk = 0;
+        
+        for(i=0;i<ndim+1;++i)
+        {
+            y(i) = f->calc(p[i]);
+        }
+
+        nfunk = ndim+1;
+        
+        compute_coord_sum(p,coord_sum);
+        
+        for (;;) 
+        {
+            ilo=0;
+            /*  find highest (worst), next-to-worst, and lowest
+                (best) points by going through all of them. */
+            ihi = y(0)>y(1) ? (inhi=1,0) : (inhi=0,1);
+            for (i=0;i<mpts;i++) 
+            {
+                if (y(i) <= y(ilo)) 
+                    ilo=i;
+                if (y(i) > y(ihi)) 
+                {
+                    inhi=ihi;
+                    ihi=i;
+                } 
+                else if (y(i) > y(inhi) && i != ihi) 
+                    inhi=i;
+            }
+
+            /* check stop criterion */
+            error=fabs(y(ihi)-y(ilo));
+            range=0;
+            for(i=0;i<ndim;++i)
+            {
+                double min = p(0,i);
+                double max = p(0,i);
+                double d;
+                for(j=1;j<=ndim;++j)
+                {
+                    if( min > p(j,i) ) min = p(j,i);
+                    if( max < p(j,i) ) max = p(j,i);
+                }
+                d = fabs(max-min);
+                if(range < d) range = d;
+            }
+
+            if(range <= MinRange || error <= MinError) 
+            { /* Put best point and value in first slot. */
+                std::swap(y(0),y(ilo));
+                for (i=0;i<ndim;i++) 
+                {
+                    std::swap(p(0,i),p(ilo,i));
+                }
+                break;
+            }
+
+            if (nfunk >= nmax){
+                dprintf(("nmax exceeded\n"));
+                return y(ilo);
+            }
+            nfunk += 2;
+            /*Begin a new iteration. First, reflect the worst point about the centroid of others */
+            ytry = try_new_point(p,y,coord_sum,f,ihi,-1.0,buf);
+            if (ytry <= y(ilo))
+            { /*If that's better than the best point, go twice as far in that direction*/
+                ytry = try_new_point(p,y,coord_sum,f,ihi,2.0,buf);
+            }
+            else if (ytry >= y(inhi)) 
+            {   /* The new point is worse than the second-highest, but better
+                  than the worst so do not go so far in that direction */
+                ysave = y(ihi);
+                ytry = try_new_point(p,y,coord_sum,f,ihi,0.5,buf);
+                if (ytry >= ysave) 
+                { /* Can't seem to improve things. Contract the simplex to good point
+               in hope to find a simplex landscape. */
+                    for (i=0;i<mpts;i++) 
+                    {
+                        if (i != ilo) 
+                        {
+                            for (j=0;j<ndim;j++)
+                            {
+                                p(i,j) = coord_sum(j) = 0.5*(p(i,j)+p(ilo,j));
+                            }
+                            y(i)=f->calc((double*)coord_sum.data);
+                        }
+                    }
+                    nfunk += ndim;
+                    compute_coord_sum(p,coord_sum);
+                }
+            } else --(nfunk); /* correct nfunk */
+            dprintf(("this is simplex on iteration %d\n",nfunk));
+            print_matrix(p);
+        } /* go to next iteration. */
+        res = y(0);
+
+        return res;
+    }
+
+    void DownhillSolverImpl::compute_coord_sum(Mat_<double>& points,Mat_<double>& coord_sum){
+        for (int j=0;j<points.cols;j++) {
+            double sum=0.0;
+            for (int i=0;i<points.rows;i++){
+                sum += points(i,j);
+                coord_sum(0,j)=sum;
+            }
+        }
+    }
+
+    void DownhillSolverImpl::create_initial_simplex(Mat_<double>& simplex,Mat& step){
+        for(int i=1;i<=step.cols;++i)
+        {
+            simplex.row(0).copyTo(simplex.row(i));
+            simplex(i,i-1)+= 0.5*step.at<double>(0,i-1);
+        }
+        simplex.row(0) -= 0.5*step;
+
+        dprintf(("this is simplex\n"));
+        print_matrix(simplex);
+    }
+
+    double DownhillSolverImpl::minimize(InputOutputArray x){
+        dprintf(("hi from minimize\n"));
+        CV_Assert(_Function.empty()==false);
+        dprintf(("termcrit:\n\ttype: %d\n\tmaxCount: %d\n\tEPS: %g\n",_termcrit.type,_termcrit.maxCount,_termcrit.epsilon));
+        dprintf(("step\n"));
+        print_matrix(_step);
+
+        Mat x_mat=x.getMat();
+        CV_Assert(MIN(x_mat.rows,x_mat.cols)==1);
+        CV_Assert(MAX(x_mat.rows,x_mat.cols)==_step.cols);
+        CV_Assert(x_mat.type()==CV_64FC1);
+
+        Mat_<double> proxy_x;
+
+        if(x_mat.rows>1){
+            proxy_x=x_mat.t();
+        }else{
+            proxy_x=x_mat;
+        }
+
+        int count=0;
+        int ndim=_step.cols;
+        Mat_<double> simplex=Mat_<double>(ndim+1,ndim,0.0);
+
+        simplex.row(0).copyTo(proxy_x);
+        create_initial_simplex(simplex,_step);
+        double res = inner_downhill_simplex(
+                simplex,_termcrit.epsilon, _termcrit.epsilon, count,_Function,_termcrit.maxCount);
+        simplex.row(0).copyTo(proxy_x);
+
+        dprintf(("%d iterations done\n",count));
+
+        if(x_mat.rows>1){
+            Mat(x_mat.rows, 1, CV_64F, (double*)proxy_x.data).copyTo(x);
+        }
+        return res;
+    }
+    DownhillSolverImpl::DownhillSolverImpl(){
+        _Function=Ptr<Function>();
+        _step=Mat_<double>();
+    }
+    Ptr<Solver::Function> DownhillSolverImpl::getFunction()const{
+        return _Function;
+    }
+    void DownhillSolverImpl::setFunction(const Ptr<Function>& f){
+        _Function=f;
+    }
+    TermCriteria DownhillSolverImpl::getTermCriteria()const{
+        return _termcrit;
+    }
+    void DownhillSolverImpl::setTermCriteria(const TermCriteria& termcrit){
+        CV_Assert(termcrit.type==(TermCriteria::MAX_ITER+TermCriteria::EPS) && termcrit.epsilon>0 && termcrit.maxCount>0);
+        _termcrit=termcrit;
+    }
+    // both minRange & minError are specified by termcrit.epsilon; In addition, user may specify the number of iterations that the algorithm does.
+    Ptr<DownhillSolver> createDownhillSolver(const Ptr<Solver::Function>& f, InputArray initStep, TermCriteria termcrit){
+        DownhillSolver *DS=new DownhillSolverImpl();
+        DS->setFunction(f);
+        DS->setInitStep(initStep);
+        DS->setTermCriteria(termcrit);
+        return Ptr<DownhillSolver>(DS);
+    }
+    void DownhillSolverImpl::getInitStep(OutputArray step)const{
+        step.create(1,_step.cols,CV_64FC1);
+        _step.copyTo(step);
+    }
+    void DownhillSolverImpl::setInitStep(InputArray step){
+        //set dimensionality and make a deep copy of step
+        Mat m=step.getMat();
+        dprintf(("m.cols=%d\nm.rows=%d\n",m.cols,m.rows));
+        CV_Assert(MIN(m.cols,m.rows)==1 && m.type()==CV_64FC1);
+        int ndim=MAX(m.cols,m.rows);
+        if(ndim!=_step.cols){
+            _step=Mat_<double>(1,ndim);
+        }
+        if(m.rows==1){
+            m.copyTo(_step);
+        }else{
+            Mat step_t=Mat_<double>(ndim,1,(double*)_step.data);
+            m.copyTo(step_t);
+        }
+    }
+}}
diff --git a/modules/optim/test/test_downhill_simplex.cpp b/modules/optim/test/test_downhill_simplex.cpp
new file mode 100644 (file)
index 0000000..cd2f099
--- /dev/null
@@ -0,0 +1,63 @@
+#include "test_precomp.hpp"
+#include <cstdlib>
+#include <cmath>
+#include <algorithm>
+
+static void mytest(cv::Ptr<cv::optim::DownhillSolver> solver,cv::Ptr<cv::optim::Solver::Function> ptr_F,cv::Mat& x,cv::Mat& step, 
+        cv::Mat& etalon_x,double etalon_res){
+    solver->setFunction(ptr_F);
+    int ndim=MAX(step.cols,step.rows);
+    solver->setInitStep(step);
+    cv::Mat settedStep;
+    solver->getInitStep(settedStep);
+    ASSERT_TRUE(settedStep.rows==1 && settedStep.cols==ndim);
+    ASSERT_TRUE(std::equal(step.begin<double>(),step.end<double>(),settedStep.begin<double>()));
+    std::cout<<"step setted:\n\t"<<step<<std::endl;
+    double res=solver->minimize(x);
+    std::cout<<"res:\n\t"<<res<<std::endl;
+    std::cout<<"x:\n\t"<<x<<std::endl;
+    std::cout<<"etalon_res:\n\t"<<etalon_res<<std::endl;
+    std::cout<<"etalon_x:\n\t"<<etalon_x<<std::endl;
+    double tol=solver->getTermCriteria().epsilon;
+    ASSERT_TRUE(std::abs(res-etalon_res)<tol);
+    /*for(cv::Mat_<double>::iterator it1=x.begin<double>(),it2=etalon_x.begin<double>();it1!=x.end<double>();it1++,it2++){
+        ASSERT_TRUE(std::abs((*it1)-(*it2))<tol);
+    }*/
+    std::cout<<"--------------------------\n";
+}
+
+class SphereF:public cv::optim::Solver::Function{
+public:
+    double calc(const double* x)const{
+        return x[0]*x[0]+x[1]*x[1];
+    }
+};
+class RosenbrockF:public cv::optim::Solver::Function{
+    double calc(const double* x)const{
+        return 100*(x[1]-x[0]*x[0])*(x[1]-x[0]*x[0])+(1-x[0])*(1-x[0]);
+    }
+};
+
+TEST(Optim_Downhill, regression_basic){
+    cv::Ptr<cv::optim::DownhillSolver> solver=cv::optim::createDownhillSolver();
+#if 1
+    {
+        cv::Ptr<cv::optim::Solver::Function> ptr_F(new SphereF());
+        cv::Mat x=(cv::Mat_<double>(1,2)<<1.0,1.0),
+            step=(cv::Mat_<double>(2,1)<<-0.5,-0.5),
+            etalon_x=(cv::Mat_<double>(1,2)<<-0.0,0.0);
+        double etalon_res=0.0;
+        mytest(solver,ptr_F,x,step,etalon_x,etalon_res);
+    }
+#endif
+#if 1
+    {
+        cv::Ptr<cv::optim::Solver::Function> ptr_F(new RosenbrockF());
+        cv::Mat x=(cv::Mat_<double>(2,1)<<0.0,0.0),
+            step=(cv::Mat_<double>(2,1)<<0.5,+0.5),
+            etalon_x=(cv::Mat_<double>(2,1)<<1.0,1.0);
+        double etalon_res=0.0;
+        mytest(solver,ptr_F,x,step,etalon_x,etalon_res);
+    }
+#endif
+}