00001
00015 #ifndef _DLR_GRADIENTFUNCTION_H_
00016 #define _DLR_GRADIENTFUNCTION_H_
00017
00018 #include <functional>
00019
00020 namespace dlr {
00021
00022 namespace optimization {
00023
00048 template <class Functor>
00049 class GradientFunction
00050 : public std::unary_function<typename Functor::argument_type,
00051 typename Functor::result_type>
00052 {
00053 public:
00064 GradientFunction(const Functor& functor, double epsilon=1.0e-6) :
00065 m_functor(functor), m_epsilon(epsilon) {
00066 if(epsilon == 0.0) {
00067 DLR_THROW3(ValueException, "GradientFunction::GradientFunction()",
00068 "Invalid value (0.0) for argument epsilon.");
00069 }
00070 }
00071
00075 virtual ~GradientFunction() {}
00076
00085 typename Functor::argument_type
00086 gradient(const typename Functor::argument_type& theta);
00087
00094 typename Functor::result_type
00095 operator()(const typename Functor::argument_type& theta) {
00096 return m_functor(theta);
00097 }
00098
00099 private:
00100 Functor m_functor;
00101 double m_epsilon;
00102
00103 };
00104
00105 }
00106
00107 }
00108
00109
00110
00111
00112 namespace dlr {
00113
00114 using optimization::GradientFunction;
00115
00116 }
00117
00118
00119
00120
00121
00122
00123
00124 namespace dlr {
00125
00126 namespace optimization {
00127
00128
00129
00130 template <class Functor>
00131 typename Functor::argument_type
00132 GradientFunction<Functor>::
00133 gradient(const typename Functor::argument_type& theta)
00134 {
00135
00136 typename Functor::argument_type thetaMinus(theta.size());
00137 typename Functor::argument_type thetaPlus(theta.size());
00138
00139 typename Functor::argument_type result(theta.size());
00140
00141 for(size_t index = 0; index < theta.size(); ++index) {
00142 thetaMinus[index] = theta[index];
00143 thetaPlus[index] = theta[index];
00144 }
00145
00146 for(size_t index = 0; index < theta.size(); ++index) {
00147
00148 thetaMinus[index] = theta[index] - m_epsilon;
00149 thetaPlus[index] = theta[index] + m_epsilon;
00150
00151 typename Functor::result_type valueMinus =
00152 m_functor.operator()(thetaMinus);
00153 typename Functor::result_type valuePlus =
00154 m_functor.operator()(thetaPlus);
00155 result[index] = (valuePlus - valueMinus) / (2 * m_epsilon);
00156
00157 thetaMinus[index] = theta[index];
00158 thetaPlus[index] = theta[index];
00159 }
00160 return result;
00161 }
00162
00163 }
00164
00165 }
00166
00167 #endif // #ifndef _DLR_GRADIENTFUNCTION_H_