optimizerLM.h

Go to the documentation of this file.
00001 
00015 #ifndef _DLR_OPTIMIZERLM_H_
00016 #define _DLR_OPTIMIZERLM_H_
00017 
00018 #include <vector>
00019 #include <dlrCommon/types.h>
00020 #include <dlrOptimization/optimizer.h>
00021 #include <dlrOptimization/optimizerLineSearch.h>
00022 #include <dlrLinearAlgebra/linearAlgebra.h>
00023 #include <dlrNumeric/utilities.h>
00024 
00025 namespace dlr {
00026 
00027   namespace optimization {
00028 
00040     template <class Functor>
00041     class OptimizerLM
00042       : public Optimizer<Functor>
00043     {
00044     public:
00045       // Typedefs for convenience
00046       typedef typename Functor::argument_type argument_type;
00047       typedef typename Functor::result_type result_type;
00048 
00049 
00055       OptimizerLM();
00056 
00057 
00067       explicit OptimizerLM(const Functor& functor);
00068 
00069 
00075       OptimizerLM(const OptimizerLM& source);
00076 
00077 
00082       virtual
00083       ~OptimizerLM();
00084 
00085     
00086 //     /** 
00087 //      * This method returns the number of function calls required to
00088 //      * complete the previous minimization.  If the minimization
00089 //      * parameter "restarts" is 0, there will be only one element in
00090 //      * the returned vector.  If restarts is greater than 0, the first
00091 //      * element of the return value will reflect the number of function
00092 //      * calls in the initial minimization, and subsequent numbers will
00093 //      * reflect the number of function calls in the following restarted
00094 //      * minimizations.  If a valid minimization has not been performed
00095 //      * since the last update to startPoint, parameters, etc., then the
00096 //      * return value will be an empty vector.
00097 //      *
00098 //      * @return a vector of function call counts.
00099 //      */
00100 //     virtual std::vector<size_t>
00101 //     getNumberOfFunctionCalls() {return this->m_functionCallCount;}
00102 
00103     
00104 //     /** 
00105 //      * This method returns the number of gradient calls required to
00106 //      * complete the previous minimization.  If the minimization
00107 //      * parameter "restarts" is 0, there will be only one element in
00108 //      * the returned vector.  If restarts is greater than 0, the first
00109 //      * element of the return value will reflect the number of gradient
00110 //      * calls in the initial minimization, and subsequent numbers will
00111 //      * reflect the number of gradient calls in the following restarted
00112 //      * minimizations.  If a valid minimization has not been performed
00113 //      * since the last update to startPoint, parameters, etc., then the
00114 //      * return value will be an empty vector.
00115 //      *
00116 //      * @return a vector of gradient call counts.
00117 //      */
00118 //     virtual std::vector<size_t>
00119 //     getNumberOfGradientCalls() {return this->m_gradientCallCount;}
00120     
00121 //     /** 
00122 //      * This method returns the number of iterations required to
00123 //      * complete the previous minimization.  If the minimization
00124 //      * parameter "restarts" is 0, there will be only one element in
00125 //      * the returned vector.  If restarts is greater than 0, the first
00126 //      * element of the return value will reflect the number of
00127 //      * iterations in the initial minimization, and subsequent numbers
00128 //      * will reflect the number of iterations in the following
00129 //      * restarted minimizations.  If a valid minimization has not been
00130 //      * performed since the last update to startPoint, parameters,
00131 //      * etc., then the return value will be an empty vector.
00132 //      *
00133 //      * @return a vector of iteration counts.
00134 //      */
00135 //     virtual std::vector<size_t>
00136 //     getNumberOfIterations() {return this->m_iterationCount;}
00137 
00138 
00149       virtual void
00150       setMinimumGradientMagnitude(double minimumGradientMagnitude);
00151 
00152     
00181       virtual void
00182       setParameters(double initialLambda = 1.0,
00183                     size_t maxIterations = 40,
00184                     double maxLambda = 1.0E7,
00185                     double minLambda = 1.0E-13,
00186                     double minError = 0.0,
00187                     double minimumGradientMagnitude = 1.0E-5,
00188                     double minDrop = 1.0E-4,
00189                     size_t strikes = 3,
00190                     int maxBackSteps = -1,
00191                     int verbosity = 0);
00192     
00193     
00202       virtual void
00203       setStartPoint(const typename Functor::argument_type& startPoint);
00204 
00205     
00217       virtual void
00218       setVerbosity(int verbosity) {m_verbosity = verbosity;}
00219 
00220     
00228       virtual OptimizerLM&
00229       operator=(const OptimizerLM& source);
00230     
00231     protected:
00232 
00249       double
00250       gradientConvergenceMetric(const argument_type& theta,
00251                                 const result_type& value,
00252                                 const argument_type& gradient);
00253 
00254     
00263       virtual
00264       std::pair<typename Functor::argument_type, typename Functor::result_type>
00265       run();
00266 
00267 
00268       inline virtual void
00269       verboseWrite(const char* message, int verbosity);
00270 
00271 
00272       template <class Type>
00273       inline void
00274       verboseWrite(const char* intro, const Type& subject, int verbosity);
00275     
00276       // Data members.
00277       double m_initialLambda;
00278       int m_maxBackSteps;
00279       size_t m_maxIterations;
00280       double m_maxLambda;
00281       double m_minDrop;
00282       double m_minError;
00283       double m_minGrad;
00284       double m_minLambda;
00285       argument_type m_startPoint;
00286       size_t m_strikes;
00287       int m_verbosity;
00288 
00289     }; // class OptimizerLM
00290 
00291   } // namespace optimization
00292 
00293 } // namespace dlr
00294 
00295 
00296 /* ======= Declarations to maintain compatibility with legacy code. ======= */
00297 
00298 namespace dlr {
00299 
00300   using optimization::OptimizerLM;
00301 
00302 } // namespace dlr
00303 
00304 
00305 /*******************************************************************
00306  * Member function definitions follow.  This would be a .cpp file
00307  * if it weren't templated.
00308  *******************************************************************/
00309 
00310 #include <math.h>
00311 #include <dlrNumeric/array1D.h>
00312 #include <dlrNumeric/array2D.h>
00313 #include <dlrNumeric/utilities.h>
00314 #include <dlrOptimization/optimizerCommon.h>
00315 
00316 namespace dlr {
00317 
00318   namespace optimization {
00319   
00320     template <class Functor>
00321     OptimizerLM<Functor>::
00322     OptimizerLM()
00323       : Optimizer<Functor>(),
00324         m_initialLambda(),
00325         m_maxBackSteps(),
00326         m_maxIterations(),
00327         m_maxLambda(),
00328         m_minDrop(),
00329         m_minError(),
00330         m_minGrad(),
00331         m_minLambda(),
00332         m_startPoint(),
00333         m_strikes(),
00334         m_verbosity()
00335     {
00336       this->setParameters();
00337     }
00338 
00339 
00340     template <class Functor>
00341     OptimizerLM<Functor>::
00342     OptimizerLM(const Functor& functor)
00343       : Optimizer<Functor>(functor),
00344         m_initialLambda(),
00345         m_maxBackSteps(),
00346         m_maxIterations(),
00347         m_maxLambda(),
00348         m_minDrop(),
00349         m_minError(),
00350         m_minGrad(),
00351         m_minLambda(),
00352         m_startPoint(),
00353         m_strikes(),
00354         m_verbosity()
00355     {
00356       this->setParameters();
00357     }
00358   
00359 
00360     template<class Functor>
00361     OptimizerLM<Functor>::
00362     OptimizerLM(const OptimizerLM& source)
00363       : Optimizer<Functor>(source),
00364         m_initialLambda(source.m_initialLambda),
00365         m_maxBackSteps(source.m_maxBackSteps),
00366         m_maxIterations(source.m_maxIterations),
00367         m_maxLambda(source.m_maxLambda),
00368         m_minDrop(source.m_minDrop),
00369         m_minError(source.m_minError),
00370         m_minGrad(source.m_minGrad),
00371         m_minLambda(source.m_minLambda),
00372         m_startPoint(source.m_startPoint.size()),
00373         m_strikes(source.m_strikes),
00374         m_verbosity(source.m_verbosity)
00375     {
00376       copyArgumentType(source.m_startPoint, this->m_startPoint);
00377     }
00378 
00379   
00380     template <class Functor>
00381     OptimizerLM<Functor>::
00382     ~OptimizerLM()
00383     {
00384       // Empty
00385     }
00386 
00387 
00388     // This method sets one of the termination criteria of the
00389     // optimization.
00390     template<class Functor>
00391     void
00392     OptimizerLM<Functor>::
00393     setMinimumGradientMagnitude(double minimumGradientMagnitude)
00394     {
00395       this->m_minGrad = minimumGradientMagnitude * minimumGradientMagnitude;
00396     }
00397 
00398   
00399     template<class Functor>
00400     void
00401     OptimizerLM<Functor>::
00402     setParameters(double initialLambda,
00403                   size_t maxIterations,
00404                   double maxLambda,
00405                   double minLambda,
00406                   double minError,
00407                   double minimumGradientMagnitude,
00408                   double minDrop,
00409                   size_t strikes,
00410                   int maxBackSteps,
00411                   int verbosity)
00412     {
00413       // Copy input arguments.
00414       this->m_initialLambda = initialLambda;
00415       this->m_maxIterations = maxIterations;
00416       this->m_maxLambda = maxLambda;
00417       this->m_minLambda = minLambda;
00418       this->m_minError = minError;
00419       this->m_minGrad = minimumGradientMagnitude * minimumGradientMagnitude;
00420       this->m_minDrop = minDrop;
00421       this->m_strikes = strikes;
00422       this->m_maxBackSteps = maxBackSteps;
00423       this->m_verbosity = verbosity;
00424 
00425 //     // Reset memory of previous minimization.
00426 //     this->m_functionCallCount.clear();
00427 //     this->m_gradientCallCount.clear();
00428 //     this->m_iterationCount.clear();
00429     
00430       // We've changed the parameters, so we'll have to rerun the 
00431       // optimization.  Indicate this by setting the inherited member
00432       // m_needsOptimization.    
00433       Optimizer<Functor>::m_needsOptimization = true;
00434     }    
00435 
00436 
00437     template<class Functor>
00438     void
00439     OptimizerLM<Functor>::
00440     setStartPoint(const typename Functor::argument_type& startPoint)
00441     {
00442       copyArgumentType(startPoint, this->m_startPoint);
00443 
00444 //     // Reset memory of previous minimization.
00445 //     this->m_functionCallCount.clear();
00446 //     this->m_gradientCallCount.clear();
00447 //     this->m_iterationCount.clear();
00448 
00449       // We've changed the parameters, so we'll have to rerun the 
00450       // optimization.  Indicate this by setting the inherited member
00451       // m_needsOptimization.    
00452       Optimizer<Functor>::m_needsOptimization = true;
00453     }
00454 
00455 
00456     template<class Functor>
00457     OptimizerLM<Functor>&
00458     OptimizerLM<Functor>::
00459     operator=(const OptimizerLM<Functor>& source)
00460     {
00461       if(&source != this) {
00462         Optimizer<Functor>::operator=(source);
00463         m_initialLambda = source.m_initialLambda;
00464         m_maxBackSteps = source.m_maxBackSteps;
00465         m_maxIterations = source.m_maxIterations;
00466         m_maxLambda = source.m_maxLambda;
00467         m_minDrop = source.m_minDrop;
00468         m_minError = source.m_minError;
00469         m_minGrad = source.m_minGrad;
00470         m_minLambda = source.m_minLambda;
00471         copyArgumentType(source.m_startPoint, this->m_startPoint);
00472         m_strikes = source.m_strikes;
00473         m_verbosity = source.m_verbosity;
00474       }
00475       return *this;
00476     }
00477 
00478 
00479     // =============== Protected member functions below =============== //
00480 
00481     template <class Functor>
00482     std::pair<typename Functor::argument_type, typename Functor::result_type>
00483     OptimizerLM<Functor>::
00484     run()
00485     {
00486       // Check that we have a valid startPoint.
00487       if(this->m_startPoint.size() == 0) {
00488         DLR_THROW3(StateException, "OptimizerLM<Functor>::run()",
00489                    "startPoint has not been initialized.");
00490       }
00491     
00492       // Initialize working location so that we start at the right place.
00493       argument_type theta(this->m_startPoint.size());
00494       copyArgumentType(this->m_startPoint, theta);
00495 
00496       // Initialize variables relating to convergence and convergence
00497       // failure.
00498       size_t strikes = 0;
00499       int backtrackCount = 0;
00500 
00501       // Initialize variables which will take function return values and
00502       // derivatives.
00503       result_type errorValue;
00504       argument_type dEdX(theta.size());
00505       Array2D<Float64> d2EdX2(theta.size(), theta.size());
00506 
00507       // Initialize intermediate values used by the minimization.
00508       Array2D<Float64> BMatrix(theta.size(), theta.size());
00509       Array1D<Float64> deltaX(theta.size());
00510       argument_type xCond(theta.size());
00511       Array1D<result_type> errorHistory =
00512         zeros(m_maxIterations + 1, type_tag<result_type>());
00513       Float64 lambda = m_initialLambda;
00514 
00515       // Get initial value of error function.
00516       errorValue = this->m_functor(theta);
00517       errorHistory[0] = errorValue;
00518       this->verboseWrite("Error History:\n", errorHistory, 1);
00519 
00520       // Loop until termination.
00521       size_t iterationIndex = 0;
00522       for(; iterationIndex < m_maxIterations; ++iterationIndex) {
00523 
00524         // Compute gradient.
00525         this->m_functor.computeGradientAndHessian(theta, dEdX, d2EdX2);
00526 
00527         // Gradient almost zero?
00528         if(dotArgumentType(dEdX,dEdX) <= m_minGrad) {
00529           this->verboseWrite("Tiny gradient, terminating iteration.\n", 1);
00530           break;
00531         }
00532 
00533         // Adjust lambda.
00534         while(lambda <= m_maxLambda) {
00535 
00536           // Precondition the Hessian matrix.
00537           std::copy(d2EdX2.begin(), d2EdX2.end(), BMatrix.begin());
00538           for(size_t diagIndex = 0; diagIndex < theta.size(); ++diagIndex) {
00539             BMatrix(diagIndex, diagIndex) += lambda;
00540           }
00541 
00542           // Solve B * deltaX = dEdX'
00543           copyArgumentType(dEdX, deltaX);
00544           linearSolveInPlace(BMatrix, deltaX);
00545         
00546           // We have a new candidate location in the error space.
00547           for(size_t elementIndex = 0; elementIndex < theta.size();
00548               ++elementIndex) {
00549             xCond[elementIndex] = theta[elementIndex] - deltaX[elementIndex];
00550           }
00551 
00552           // Do we have a decrease in the error function at the candidate
00553           // location?
00554           errorValue = m_functor(xCond);
00555           if(errorValue < errorHistory[iterationIndex]) {
00556             // Yes. Go on to the next iteration.
00557             backtrackCount = 0;
00558             errorHistory[iterationIndex + 1] = errorValue;
00559             copyArgumentType(xCond, theta);
00560             lambda /= 10.0;
00561             if(lambda < m_minLambda) {
00562               lambda = m_minLambda;
00563             }
00564             this->verboseWrite("Lambda = ", lambda, 1);
00565             this->verboseWrite("Theta = ", theta, 2);
00566             break;
00567           } else {
00568             // Error did not decrease.  Try a bigger lambda.
00569             ++backtrackCount;
00570             lambda *= 10.0;
00571             if(lambda > m_maxLambda) {
00572               break;
00573             }
00574             this->verboseWrite("Lambda = ", lambda, 1);
00575 
00576             // Make sure we haven't exceeded the maxBackSteps
00577             // termination criterion.
00578             if(m_maxBackSteps >= 0
00579                && backtrackCount > m_maxBackSteps) {
00580               break;
00581             }
00582 
00583           }
00584         }
00585 
00586         if(m_verbosity >= 1 ) {
00587           std::cout << "Error History:\n" << errorHistory << std::endl;
00588         }
00589 // Note(xxx):  Not sure if we still want this functionality.
00590 //       if(m_iterationFunctorPtr != 0) {
00591 //         m_iterationFunctorPtr(theta);
00592 //       }
00593     
00594         // Test termination conditions.
00595         Float64 drop =
00596           (errorHistory[iterationIndex] - errorValue)
00597           / errorHistory[iterationIndex];
00598         if(drop < m_minDrop) {
00599           ++strikes;
00600           if(m_verbosity >= 2) {
00601             fprintf(stderr, "strikes = %d\n", strikes );
00602           }
00603         } else {
00604           strikes = 0;
00605         }
00606 
00607         if(lambda >= m_maxLambda 
00608            || strikes == m_strikes 
00609            || errorHistory[iterationIndex] <= m_minError 
00610            || (m_maxBackSteps >= 0 && backtrackCount >= m_maxBackSteps)) {
00611           if(m_verbosity >= 1) {
00612             std::cout << "Stopping with lambda = " << lambda
00613                       << " (" << m_maxLambda << ")\n"
00614                       << "              strikes = " << strikes
00615                       << " (" << m_strikes << ")\n"
00616                       << "              error = " << errorHistory[iterationIndex]
00617                       << " (" << m_minError << ")\n"
00618                       << "              backTrackCount = " << backtrackCount
00619                       << " (" << m_maxBackSteps << ")" << std::endl;
00620           }
00621           break;
00622         }
00623       }
00624       return std::make_pair(theta, errorHistory[iterationIndex]);
00625     }
00626 
00627 
00628     template <class Functor>
00629     inline void
00630     OptimizerLM<Functor>::
00631     verboseWrite(const char* message, int verbosity)
00632     {
00633       if(verbosity <= this->m_verbosity) {
00634         std::cout << message << std::flush;
00635       }
00636     }
00637 
00638 
00639     template <class Functor> template <class Type>
00640     inline void
00641     OptimizerLM<Functor>::
00642     verboseWrite(const char* intro, const Type& subject, int verbosity)
00643     {
00644       if(verbosity <= this->m_verbosity) {
00645         std::cout << intro << subject << std::endl;
00646       }
00647     }
00648 
00649   } // namespace optimization
00650 
00651 } // namespace dlr
00652 
00653 #endif // #ifndef _DLR_OPTIMIZERLM_H_

Generated on Mon Jul 9 20:34:03 2007 for dlrLibs Utility Libraries by  doxygen 1.5.2