NLPInterfacePack_ExampleNLPObjGrad.cpp

00001 // @HEADER
00002 // ***********************************************************************
00003 // 
00004 // Moocho: Multi-functional Object-Oriented arCHitecture for Optimization
00005 //                  Copyright (2003) Sandia Corporation
00006 // 
00007 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
00008 // license for use of this work by or on behalf of the U.S. Government.
00009 // 
00010 // This library is free software; you can redistribute it and/or modify
00011 // it under the terms of the GNU Lesser General Public License as
00012 // published by the Free Software Foundation; either version 2.1 of the
00013 // License, or (at your option) any later version.
00014 //  
00015 // This library is distributed in the hope that it will be useful, but
00016 // WITHOUT ANY WARRANTY; without even the implied warranty of
00017 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00018 // Lesser General Public License for more details.
00019 //  
00020 // You should have received a copy of the GNU Lesser General Public
00021 // License along with this library; if not, write to the Free Software
00022 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
00023 // USA
00024 // Questions? Contact Roscoe A. Bartlett (rabartl@sandia.gov) 
00025 // 
00026 // ***********************************************************************
00027 // @HEADER
00028 
00029 #include <assert.h>
00030 
00031 #include <stdexcept>
00032 #include <limits>
00033 
00034 #include "NLPInterfacePack_ExampleNLPObjGrad.hpp"
00035 #include "ExampleNLPDirectRTOps.h"
00036 #include "AbstractLinAlgPack_BasisSystemComposite.hpp"
00037 #include "AbstractLinAlgPack_VectorMutable.hpp"
00038 #include "AbstractLinAlgPack_VectorStdOps.hpp"
00039 #include "AbstractLinAlgPack_VectorAuxiliaryOps.hpp"
00040 #include "AbstractLinAlgPack_LinAlgOpPack.hpp"
00041 #include "RTOpPack_RTOpC.hpp"
00042 #include "Teuchos_dyn_cast.hpp"
00043 #include "Teuchos_TestForException.hpp"
00044 #include "Teuchos_AbstractFactoryStd.hpp"
00045 
00046 namespace {
00047 
00048 static RTOpPack::RTOpC explnlp2_c_eval_op;
00049 
00050 class init_rtop_server_t {
00051 public:
00052   init_rtop_server_t() {
00053     TEST_FOR_EXCEPT(0!=RTOp_TOp_explnlp2_c_eval_construct(&explnlp2_c_eval_op.op()));
00054   }
00055 }; 
00056 init_rtop_server_t  init_rtop_server;
00057 
00058 } // end namespace
00059 
00060 namespace NLPInterfacePack {
00061 
00062 ExampleNLPObjGrad::ExampleNLPObjGrad(
00063   const VectorSpace::space_ptr_t&  vec_space
00064   ,value_type                      xo
00065   ,bool                            has_bounds
00066   ,bool                            dep_bounded
00067   )
00068   :vec_space_(vec_space), vec_space_comp_(Teuchos::null)
00069   ,initialized_(false), obj_scale_(1.0)
00070   ,has_bounds_(has_bounds), force_xinit_in_bounds_(true), n_(2*vec_space->dim())
00071 {
00072   namespace rcp = MemMngPack;
00073 
00074   // Assert the size of the NLP
00075   TEST_FOR_EXCEPTION(
00076     vec_space->dim() <= 0, std::logic_error
00077     ,"ExampleNLPObjGrad::ExampleNLPObjGrad(...) Error!" );
00078 
00079   // Setup the aggregate vector space object
00080   BasisSystemComposite::initialize_space_x(
00081     vec_space, vec_space, &var_dep_, &var_indep_, &vec_space_comp_ );
00082 
00083   // Set the initial starting point.
00084   xinit_ = vec_space_comp_->create_member();
00085   *xinit_ = xo;
00086 
00087   /*
00088     Setup the sparse bounds
00089     
00090     xl(i) = 0.01  \ 
00091                     }  for i <: bounded_rng
00092     xu(i) = 20    /
00093   */
00094 
00095   xl_ = vec_space_comp_->create_member();
00096   xu_ = vec_space_comp_->create_member();
00097 
00098   if(has_bounds) {
00099     const Range1D
00100       bounded_rng   = ( dep_bounded ? var_dep_   : var_indep_ ),
00101       unbounded_rng = ( dep_bounded ? var_indep_ : var_dep_   );
00102     *xl_->sub_view(bounded_rng)   = 0.01;
00103     *xl_->sub_view(unbounded_rng) = -NLP::infinite_bound();
00104     *xu_->sub_view(bounded_rng)   = 20.0;
00105     *xu_->sub_view(unbounded_rng) = +NLP::infinite_bound();
00106   }
00107   else {
00108     *xl_ = -NLP::infinite_bound();
00109     *xu_ = +NLP::infinite_bound();
00110   }
00111 }
00112 
00113 // Overridden public members from NLP
00114 
00115 void ExampleNLPObjGrad::initialize(bool test_setup)
00116 {
00117   if( initialized_ ) {
00118     NLPObjGrad::initialize(test_setup);
00119     return;
00120   }
00121 
00122   AbstractLinAlgPack::force_in_bounds( *xl_, *xu_, xinit_.get() );
00123 
00124   NLPObjGrad::initialize(test_setup);
00125 
00126   initialized_ = true;
00127 }
00128 
00129 bool ExampleNLPObjGrad::is_initialized() const
00130 {
00131   return initialized_;
00132 }
00133 
00134 size_type ExampleNLPObjGrad::n() const
00135 {
00136   assert_is_initialized();
00137   return n_;
00138 }
00139 
00140 size_type ExampleNLPObjGrad::m() const
00141 {
00142   assert_is_initialized();
00143   return n_ / 2;
00144 }
00145 
00146 NLP::vec_space_ptr_t ExampleNLPObjGrad::space_x() const
00147 {
00148   return vec_space_comp_;
00149 }
00150 
00151 NLP::vec_space_ptr_t ExampleNLPObjGrad::space_c() const
00152 {
00153   return vec_space_;
00154 }
00155 
00156 size_type ExampleNLPObjGrad::num_bounded_x() const
00157 {
00158   return has_bounds_ ? n_/2 : 0;
00159 }
00160 
00161 void ExampleNLPObjGrad::force_xinit_in_bounds(bool force_xinit_in_bounds)
00162 {
00163   force_xinit_in_bounds_ = force_xinit_in_bounds;
00164 }
00165 
00166 bool ExampleNLPObjGrad::force_xinit_in_bounds() const
00167 {
00168   return force_xinit_in_bounds_;
00169 }
00170 
00171 const Vector& ExampleNLPObjGrad::xinit() const
00172 {
00173   assert_is_initialized();
00174   return *xinit_;
00175 }
00176 
00177 const Vector& ExampleNLPObjGrad::xl() const
00178 {
00179   assert_is_initialized();
00180   return *xl_;
00181 }
00182 
00183 const Vector& ExampleNLPObjGrad::xu() const
00184 {
00185   assert_is_initialized();
00186   return *xu_;
00187 }
00188 
00189 value_type ExampleNLPObjGrad::max_var_bounds_viol() const
00190 {
00191   return std::numeric_limits<value_type>::max(); // No limits on the bounds
00192 }
00193 
00194 void ExampleNLPObjGrad::scale_f( value_type scale_f )
00195 {
00196   assert_is_initialized();
00197   obj_scale_ = scale_f;
00198 }
00199 
00200 value_type ExampleNLPObjGrad::scale_f() const
00201 {
00202   assert_is_initialized();
00203   return obj_scale_;
00204 }
00205 
00206 void ExampleNLPObjGrad::report_final_solution(
00207   const Vector&    x
00208   ,const Vector*   lambda
00209   ,const Vector*   nu
00210   ,bool            optimal
00211   )
00212 {
00213   assert_is_initialized();
00214   // Do what you want with the solution (or final values) here.
00215   // For this example we will just ignore it.
00216 }
00217 
00218 Range1D ExampleNLPObjGrad::var_dep() const
00219 {
00220   return var_dep_;
00221 }
00222 
00223 Range1D ExampleNLPObjGrad::var_indep() const
00224 {
00225   return var_indep_;
00226 }
00227 
00228 // Overridden protected members from NLP
00229 
00230 void ExampleNLPObjGrad::imp_calc_f(const Vector& x, bool newx
00231   , const ZeroOrderInfo& zero_order_info) const
00232 {
00233   using AbstractLinAlgPack::dot;
00234   assert_is_initialized();
00235   f(); // assert f is set
00236   TEST_FOR_EXCEPTION( n() != x.dim(), std::length_error, "ExampleNLPObjGrad::imp_calc_f(...)"  );
00237   // f(x) = (obj_scale/2) * sum( x(i)^2, for i = 1..n )
00238   *zero_order_info.f = obj_scale_ / 2.0 * dot(x,x);
00239 }
00240 
00241 void ExampleNLPObjGrad::imp_calc_c(const Vector& x, bool newx
00242   , const ZeroOrderInfo& zero_order_info) const
00243 {
00244   assert_is_initialized();
00245   const size_type n = this->n();
00246   TEST_FOR_EXCEPTION( n != x.dim(), std::length_error, "ExampleNLPObjGrad::imp_calc_c(...)"  );
00247 
00248   // c(x)(j) = x(j) * (x(m+j) -1) - 10 * x(m+j) = 0, for j = 1...m
00249 
00250   Vector::vec_ptr_t
00251     xD= x.sub_view(var_dep()),
00252     xI = x.sub_view(var_indep());
00253 
00254   const Vector*  vecs[]      = { xD.get(), xI.get() };
00255   VectorMutable* targ_vecs[] = { zero_order_info.c };
00256   AbstractLinAlgPack::apply_op(explnlp2_c_eval_op,2,vecs,1,targ_vecs,NULL);
00257 
00258 }
00259 
00260 void ExampleNLPObjGrad::imp_calc_h(
00261   const Vector& x, bool newx, const ZeroOrderInfo& zero_order_info) const
00262 {
00263   TEST_FOR_EXCEPT(true); // Should never be called!
00264 }
00265 
00266 // Overridden protected members from NLPFirstOrder
00267 
00268 void ExampleNLPObjGrad::imp_calc_Gf(const Vector& x, bool newx
00269   , const ObjGradInfo& obj_grad_info) const
00270 {
00271   assert_is_initialized();
00272   TEST_FOR_EXCEPTION( n() != x.dim(), std::length_error, "ExampleNLPObjGrad::imp_calc_Gf(...)"  );
00273   // Gf = obj_scale * x
00274   LinAlgOpPack::V_StV(obj_grad_info.Gf,obj_scale_,x);
00275 }
00276 
00277 } // end namespace NLPInterfacePack

Generated on Wed May 12 21:57:50 2010 for MOOCHO by  doxygen 1.4.7