MoochoPack_CheckConvergenceStd_Strategy.cpp

00001 // @HEADER
00002 // ***********************************************************************
00003 // 
00004 // Moocho: Multi-functional Object-Oriented arCHitecture for Optimization
00005 //                  Copyright (2003) Sandia Corporation
00006 // 
00007 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
00008 // license for use of this work by or on behalf of the U.S. Government.
00009 // 
00010 // This library is free software; you can redistribute it and/or modify
00011 // it under the terms of the GNU Lesser General Public License as
00012 // published by the Free Software Foundation; either version 2.1 of the
00013 // License, or (at your option) any later version.
00014 //  
00015 // This library is distributed in the hope that it will be useful, but
00016 // WITHOUT ANY WARRANTY; without even the implied warranty of
00017 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00018 // Lesser General Public License for more details.
00019 //  
00020 // You should have received a copy of the GNU Lesser General Public
00021 // License along with this library; if not, write to the Free Software
00022 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
00023 // USA
00024 // Questions? Contact Roscoe A. Bartlett (rabartl@sandia.gov) 
00025 // 
00026 // ***********************************************************************
00027 // @HEADER
00028 
00029 #include <assert.h>
00030 
00031 #include <ostream>
00032 #include <limits>
00033 #include <sstream>
00034 
00035 #include "MoochoPack_CheckConvergenceStd_Strategy.hpp"
00036 #include "MoochoPack_NLPAlgoContainer.hpp"
00037 #include "MoochoPack_Exceptions.hpp"
00038 #include "MoochoPack_moocho_algo_conversion.hpp"
00039 #include "IterationPack_print_algorithm_step.hpp"
00040 #include "AbstractLinAlgPack_MatrixOpOut.hpp"
00041 #include "AbstractLinAlgPack_VectorMutable.hpp"
00042 #include "AbstractLinAlgPack_VectorAuxiliaryOps.hpp"
00043 #include "AbstractLinAlgPack_VectorStdOps.hpp"
00044 #include "AbstractLinAlgPack_VectorOut.hpp"
00045 #include "AbstractLinAlgPack_assert_print_nan_inf.hpp"
00046 #include "AbstractLinAlgPack_LinAlgOpPack.hpp"
00047 #include "Teuchos_dyn_cast.hpp"
00048 #include "Teuchos_TestForException.hpp"
00049 
00050 namespace MoochoPack {
00051 
00052 CheckConvergenceStd_Strategy::CheckConvergenceStd_Strategy(
00053   EOptErrorCheck         opt_error_check
00054   ,EScaleKKTErrorBy      scale_opt_error_by
00055   ,EScaleKKTErrorBy      scale_feas_error_by
00056   ,EScaleKKTErrorBy      scale_comp_error_by
00057   ,bool                  scale_opt_error_by_Gf
00058   )
00059   :
00060   CheckConvergence_Strategy(
00061     opt_error_check,
00062     scale_opt_error_by,
00063     scale_feas_error_by,
00064     scale_comp_error_by,
00065     scale_opt_error_by_Gf
00066     )
00067   {}
00068 
00069 bool CheckConvergenceStd_Strategy::Converged(
00070   Algorithm& _algo
00071   )
00072   {
00073   using AbstractLinAlgPack::assert_print_nan_inf;
00074   using AbstractLinAlgPack::combined_nu_comp_err;
00075   
00076   NLPAlgo        &algo = rsqp_algo(_algo);
00077   NLPAlgoState   &s    = algo.rsqp_state();
00078   NLP            &nlp  = algo.nlp();
00079 
00080   EJournalOutputLevel olevel = algo.algo_cntr().journal_output_level();
00081   std::ostream& out = algo.track().journal_out();
00082 
00083   const size_type
00084     n  = nlp.n(),
00085     m  = nlp.m(),
00086     nb = nlp.num_bounded_x();
00087 
00088   // Get the iteration quantities
00089   IterQuantityAccess<value_type>
00090     &opt_kkt_err_iq  = s.opt_kkt_err(),
00091     &feas_kkt_err_iq = s.feas_kkt_err(),
00092       &comp_kkt_err_iq = s.comp_kkt_err();
00093   
00094   IterQuantityAccess<VectorMutable>
00095     &x_iq       = s.x(),
00096     &d_iq       = s.d(),
00097     &Gf_iq      = s.Gf(),
00098     *c_iq       = m     ? &s.c()      : NULL,
00099     *rGL_iq     = n > m ? &s.rGL()    : NULL,
00100     *GL_iq      = n > m ? &s.GL()     : NULL,
00101     *nu_iq      = n > m ? &s.nu()     : NULL;
00102 
00103   // opt_err = (||rGL||inf or ||GL||) / (||Gf|| + scale_kkt_factor)
00104   value_type 
00105     norm_inf_Gf_k = 0.0,
00106     norm_inf_GLrGL_k = 0.0;
00107 
00108   if( n > m && scale_opt_error_by_Gf() && Gf_iq.updated_k(0) ) {
00109     assert_print_nan_inf(
00110       norm_inf_Gf_k = Gf_iq.get_k(0).norm_inf(),
00111       "||Gf_k||inf",true,&out
00112       );
00113   }
00114 
00115   // NOTE:
00116   // The strategy object CheckConvergenceIP_Strategy assumes
00117   // that this will always be the gradient of the lagrangian
00118   // of the original problem, not the gradient of the lagrangian
00119   // for psi. (don't use augmented nlp info here)
00120   if( n > m ) {
00121     if( opt_error_check() == OPT_ERROR_REDUCED_GRADIENT_LAGR ) {
00122       assert_print_nan_inf( norm_inf_GLrGL_k = rGL_iq->get_k(0).norm_inf(),
00123                   "||rGL_k||inf",true,&out);
00124     }
00125     else {
00126       assert_print_nan_inf( norm_inf_GLrGL_k = GL_iq->get_k(0).norm_inf(),
00127                   "||GL_k||inf",true,&out);
00128     }
00129   }
00130 
00131   const value_type
00132     opt_scale_factor = 1.0 + norm_inf_Gf_k,
00133     opt_err = norm_inf_GLrGL_k / opt_scale_factor;
00134   
00135   // feas_err
00136   const value_type feas_err = ( ( m ? c_iq->get_k(0).norm_inf() : 0.0 ) );
00137 
00138   // comp_err
00139   value_type comp_err = 0.0;
00140   if ( n > m ) {
00141     if (nb > 0) {
00142       comp_err = combined_nu_comp_err(nu_iq->get_k(0), x_iq.get_k(0), nlp.xl(), nlp.xu());
00143     }
00144     if(m) {
00145       assert_print_nan_inf( feas_err,"||c_k||inf",true,&out);
00146     }
00147   }
00148 
00149   // scaling factors
00150   const value_type 
00151     scale_opt_factor = CalculateScalingFactor(s, scale_opt_error_by()),
00152     scale_feas_factor = CalculateScalingFactor(s, scale_feas_error_by()),
00153     scale_comp_factor = CalculateScalingFactor(s, scale_comp_error_by());
00154   
00155   // kkt_err
00156   const value_type
00157     opt_kkt_err_k  = opt_err/scale_opt_factor,
00158      feas_kkt_err_k = feas_err/scale_feas_factor,
00159     comp_kkt_err_k = comp_err/scale_comp_factor;
00160 
00161   // update the iteration quantities
00162   if(n > m) opt_kkt_err_iq.set_k(0) = opt_kkt_err_k;
00163   feas_kkt_err_iq.set_k(0) = feas_kkt_err_k;
00164   comp_kkt_err_iq.set_k(0) = comp_kkt_err_k;
00165 
00166   // step_err
00167   value_type step_err = 0.0;
00168   if( d_iq.updated_k(0) ) {
00169     step_err = AbstractLinAlgPack::max_rel_step(x_iq.get_k(0),d_iq.get_k(0));
00170     assert_print_nan_inf( step_err,"max(d(i)/max(1,x(i)),i=1...n)",true,&out);
00171   }
00172 
00173   const value_type
00174     opt_tol   = algo.algo_cntr().opt_tol(),
00175     feas_tol  = algo.algo_cntr().feas_tol(),
00176     comp_tol  = algo.algo_cntr().comp_tol(),
00177     step_tol  = algo.algo_cntr().step_tol();
00178   
00179   const bool found_solution = 
00180     opt_kkt_err_k < opt_tol 
00181     && feas_kkt_err_k < feas_tol 
00182     && comp_kkt_err_k < comp_tol 
00183     && step_err < step_tol;
00184   
00185   if( int(olevel) >= int(PRINT_ALGORITHM_STEPS) || (int(olevel) >= int(PRINT_BASIC_ALGORITHM_INFO) && found_solution) )
00186   {
00187     out 
00188       << "\nscale_opt_factor = " << scale_opt_factor
00189       << " (scale_opt_error_by = " << (scale_opt_error_by()==SCALE_BY_ONE ? "SCALE_BY_ONE"
00190                        : (scale_opt_error_by()==SCALE_BY_NORM_INF_X ? "SCALE_BY_NORM_INF_X"
00191                         : "SCALE_BY_NORM_2_X" ) ) << ")"
00192 
00193       << "\nscale_feas_factor = " << scale_feas_factor
00194       << " (scale_feas_error_by = " << (scale_feas_error_by()==SCALE_BY_ONE ? "SCALE_BY_ONE"
00195                        : (scale_feas_error_by()==SCALE_BY_NORM_INF_X ? "SCALE_BY_NORM_INF_X"
00196                         : "SCALE_BY_NORM_2_X" ) ) << ")"
00197 
00198       << "\nscale_comp_factor = " << scale_comp_factor
00199       << " (scale_comp_error_by = " << (scale_comp_error_by()==SCALE_BY_ONE ? "SCALE_BY_ONE"
00200                        : (scale_comp_error_by()==SCALE_BY_NORM_INF_X ? "SCALE_BY_NORM_INF_X"
00201                         : "SCALE_BY_NORM_2_X" ) ) << ")"
00202       << "\nopt_scale_factor = " << opt_scale_factor
00203       << " (scale_opt_error_by_Gf = " << (scale_opt_error_by_Gf()?"true":"false") << ")"
00204       << "\nopt_kkt_err_k    = " << opt_kkt_err_k << ( opt_kkt_err_k < opt_tol ? " < " : " > " )
00205       << "opt_tol  = " << opt_tol
00206       << "\nfeas_kkt_err_k   = " << feas_kkt_err_k << ( feas_kkt_err_k < feas_tol ? " < " : " > " )
00207       << "feas_tol = " << feas_tol
00208       << "\ncomp_kkt_err_k   = " << comp_kkt_err_k << ( comp_kkt_err_k < comp_tol ? " < " : " > " )
00209       << "comp_tol = " << comp_tol
00210       << "\nstep_err         = " << step_err << ( step_err < step_tol ? " < " : " > " )
00211       << "step_tol = " << step_tol
00212       << std::endl;
00213     }
00214   
00215   return found_solution;
00216 
00217   }
00218 
00219 void CheckConvergenceStd_Strategy::print_step( const Algorithm& _algo, std::ostream& out, const std::string& L ) const
00220   {
00221   out
00222     << L << "*** Check to see if the KKT error is small enough for convergence\n"
00223     << L << "if scale_(opt|feas|comp)_error_by == SCALE_BY_ONE then\n"
00224     << L << "    scale_(opt|feas|comp)_factor = 1.0\n"
00225     << L << "else if scale_(opt|feas|comp)_error_by == SCALE_BY_NORM_2_X then\n"
00226     << L << "    scale_(opt|feas|comp)_factor = 1.0 + norm_2(x_k)\n"
00227     << L << "else if scale_(opt|feas|comp)_error_by == SCALE_BY_NORM_INF_X then\n"
00228     << L << "    scale_(opt|feas|comp)_factor = 1.0 + norm_inf(x_k)\n"
00229     << L << "end\n"
00230     << L << "if scale_opt_error_by_Gf == true then\n"
00231     << L << "    opt_scale_factor = 1.0 + norm_inf(Gf_k)\n"
00232     << L << "else\n"
00233     << L << "    opt_scale_factor = 1.0\n"
00234     << L << "end\n";
00235   if( opt_error_check() == OPT_ERROR_REDUCED_GRADIENT_LAGR )
00236     {
00237     out
00238       << L << "opt_err = norm_inf(rGL_k)/opt_scale_factor\n";
00239     }
00240   else
00241     {
00242     out
00243       << L << "opt_err = norm_inf(GL_k)/opt_scale_factor\n";
00244     }
00245 
00246   out
00247     << L << "feas_err = norm_inf(c_k)\n"
00248     << L << "comp_err = max(i, nu(i)*(xu(i)-x(i)), -nu(i)*(x(i)-xl(i)))\n"
00249     << L << "opt_kkt_err_k = opt_err/scale_opt_factor\n"
00250     << L << "feas_kkt_err_k = feas_err/scale_feas_factor\n"
00251     << L << "comp_kkt_err_k = feas_err/scale_comp_factor\n"
00252     << L << "if d_k is updated then\n"
00253     << L << "    step_err = max( |d_k(i)|/(1+|x_k(i)|), i=1..n )\n"
00254     << L << "else\n"
00255     << L << "    step_err = 0\n"
00256     << L << "end\n"
00257     << L << "if opt_kkt_err_k < opt_tol\n"
00258     << L << "       and feas_kkt_err_k < feas_tol\n"
00259     << L << "       and step_err < step_tol then\n"
00260     << L << "   report optimal x_k, lambda_k and nu_k to the nlp\n"
00261     << L << "   terminate, the solution has beed found!\n"
00262     << L << "end\n";
00263   }
00264 
00265 
00266 value_type CheckConvergenceStd_Strategy::CalculateScalingFactor( NLPAlgoState& state, EScaleKKTErrorBy scale_by ) const
00267   {
00268   // scale_kkt_factor
00269   value_type scale_factor = 1.0;
00270   switch(scale_by) 
00271     {
00272     case SCALE_BY_ONE:
00273       scale_factor = 1.0;
00274       break;
00275     case SCALE_BY_NORM_2_X:
00276       scale_factor = 1.0 + state.x().get_k(0).norm_2();
00277       break;
00278     case SCALE_BY_NORM_INF_X:
00279       scale_factor = 1.0 + state.x().get_k(0).norm_inf();
00280       break;
00281     default:
00282       TEST_FOR_EXCEPT(true);  // Should never be called
00283     }
00284 
00285   return scale_factor;
00286   }
00287 
00288 } // end namespace MoochoPack
00289 

Generated on Tue Jul 13 09:29:31 2010 for MoochoPack : Framework for Large-Scale Optimization Algorithms by  doxygen 1.4.7