MoochoPack_LineSearchWatchDog_Step.cpp

00001 #if 0
00002 
00003 // @HEADER
00004 // ***********************************************************************
00005 // 
00006 // Moocho: Multi-functional Object-Oriented arCHitecture for Optimization
00007 //                  Copyright (2003) Sandia Corporation
00008 // 
00009 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
00010 // license for use of this work by or on behalf of the U.S. Government.
00011 // 
00012 // This library is free software; you can redistribute it and/or modify
00013 // it under the terms of the GNU Lesser General Public License as
00014 // published by the Free Software Foundation; either version 2.1 of the
00015 // License, or (at your option) any later version.
00016 //  
00017 // This library is distributed in the hope that it will be useful, but
00018 // WITHOUT ANY WARRANTY; without even the implied warranty of
00019 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00020 // Lesser General Public License for more details.
00021 //  
00022 // You should have received a copy of the GNU Lesser General Public
00023 // License along with this library; if not, write to the Free Software
00024 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
00025 // USA
00026 // Questions? Contact Roscoe A. Bartlett (rabartl@sandia.gov) 
00027 // 
00028 // ***********************************************************************
00029 // @HEADER
00030 
00031 #include <ostream>
00032 #include <typeinfo>
00033 
00034 #include "MoochoPack_LineSearchWatchDog_Step.hpp"
00035 #include "MoochoPack_MoochoAlgorithmStepNames.hpp"
00036 #include "MoochoPack_moocho_algo_conversion.hpp"
00037 #include "IterationPack_print_algorithm_step.hpp"
00038 #include "ConstrainedOptPack_MeritFuncCalc1DQuadratic.hpp"
00039 #include "ConstrainedOptPack_MeritFuncCalcNLP.hpp"
00040 #include "ConstrainedOptPack_print_vector_change_stats.hpp"
00041 #include "ConstrainedOptPack/src/VectorWithNorms.h"
00042 #include "AbstractLinAlgPack/src/AbstractLinAlgPack_MatrixOp.hpp"
00043 #include "DenseLinAlgPack_DVectorClass.hpp"
00044 #include "DenseLinAlgPack_DVectorOp.hpp"
00045 #include "DenseLinAlgPack_DVectorOut.hpp"
00046 #include "DenseLinAlgPack_LinAlgOpPack.hpp"
00047 
00048 namespace {
00049   const int NORMAL_LINE_SEARCH = -1;
00050 }
00051 
00052 namespace LinAlgOpPack {
00053   using AbstractLinAlgPack::Vp_StMtV;
00054 }
00055 
00056 MoochoPack::LineSearchWatchDog_Step::LineSearchWatchDog_Step(
00057       const direct_line_search_ptr_t& direct_line_search
00058     , const merit_func_ptr_t&     merit_func
00059     , value_type            eta
00060     , value_type            opt_kkt_err_threshold
00061     , value_type            feas_kkt_err_threshold
00062     )
00063   :
00064       direct_line_search_(direct_line_search)
00065     , merit_func_(merit_func)
00066     , eta_(eta)
00067     , opt_kkt_err_threshold_(opt_kkt_err_threshold)
00068     , feas_kkt_err_threshold_(feas_kkt_err_threshold)
00069     , watch_k_(NORMAL_LINE_SEARCH)
00070 {}
00071 
00072 bool MoochoPack::LineSearchWatchDog_Step::do_step(Algorithm& _algo
00073   , poss_type step_poss, IterationPack::EDoStepType type, poss_type assoc_step_poss)
00074 {
00075   using DenseLinAlgPack::norm_inf;
00076   using DenseLinAlgPack::V_VpV;
00077   using DenseLinAlgPack::Vp_StV;
00078   using DenseLinAlgPack::Vt_S;
00079 
00080   using LinAlgOpPack::Vp_V;
00081   using LinAlgOpPack::V_MtV;
00082 
00083   using ConstrainedOptPack::print_vector_change_stats;
00084 
00085   NLPAlgo &algo = rsqp_algo(_algo);
00086   NLPAlgoState  &s    = algo.rsqp_state();
00087   NLP     &nlp  = algo.nlp();
00088 
00089   EJournalOutputLevel olevel = algo.algo_cntr().journal_output_level();
00090   std::ostream& out = algo.track().journal_out();
00091   out << std::boolalpha;
00092 
00093   // print step header.
00094   if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) {
00095     using IterationPack::print_algorithm_step;
00096     print_algorithm_step( algo, step_poss, type, assoc_step_poss, out );
00097   }
00098 
00099   // /////////////////////////////////////////
00100   // Set references to iteration quantities
00101   //
00102   // Set k+1 first then go back to get k to ensure
00103   // we have backward storage.
00104   
00105   DVector
00106     &x_kp1 = s.x().set_k(+1).v();
00107   value_type
00108     &f_kp1 = s.f().set_k(+1);
00109   DVector
00110     &c_kp1 = s.c().set_k(+1).v();
00111 
00112   const value_type
00113     &f_k = s.f().get_k(0);
00114   const DVector
00115     &c_k = s.c().get_k(0).v();
00116   const DVector
00117     &x_k = s.x().get_k(0).v();
00118   const DVector
00119     &d_k = s.d().get_k(0).v();
00120   value_type
00121     &alpha_k = s.alpha().get_k(0);
00122 
00123   // /////////////////////////////////////
00124   // Compute Dphi_k, phi_kp1 and phi_k
00125 
00126   // Dphi_k
00127   const value_type
00128     Dphi_k = merit_func().deriv();
00129   if( Dphi_k >= 0 ) {
00130     throw LineSearchFailure( "LineSearch2ndOrderCorrect_Step::do_step(...) : " 
00131       "Error, d_k is not a descent direction for the merit function " );
00132   }
00133 
00134   // ph_kp1
00135   value_type
00136     &phi_kp1 = s.phi().set_k(+1) = merit_func().value( f_kp1, c_kp1 );
00137 
00138   // Must compute phi(x) at the base point x_k since the penalty parameter may have changed.
00139   const value_type
00140     &phi_k = s.phi().set_k(0) = merit_func().value( f_k, c_k );
00141 
00142   // //////////////////////////////////////
00143   // Setup the calculation merit function
00144 
00145   // Here f_kp1, and c_kp1 are updated at the same time the
00146   // line search is being performed.
00147   nlp.set_f( &f_kp1 );
00148   nlp.set_c( &c_kp1 );
00149   MeritFuncCalcNLP
00150     phi_calc( &merit_func(), &nlp );
00151 
00152   // ////////////////////////////////
00153   // Use Watchdog near the solution
00154 
00155   if( watch_k_ == NORMAL_LINE_SEARCH ) {
00156     const value_type
00157       opt_kkt_err_k = s.opt_kkt_err().get_k(0),
00158       feas_kkt_err_k  = s.feas_kkt_err().get_k(0);
00159     if( opt_kkt_err_k <= opt_kkt_err_threshold() && feas_kkt_err_k <= feas_kkt_err_threshold() ) {
00160       if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) {
00161         out << "\nopt_kkt_err_k = " << opt_kkt_err_k << " <= opt_kkt_err_threshold = "
00162             << opt_kkt_err_threshold() << std::endl
00163           << "\nfeas_kkt_err_k = " << feas_kkt_err_k << " <= feas_kkt_err_threshold = "
00164             << feas_kkt_err_threshold() << std::endl
00165           << "\nSwitching to watchdog linesearch ...\n";
00166       }
00167       watch_k_ = 0;
00168     }
00169   }
00170 
00171   if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) {
00172     out << "\nTrial point:\n"
00173       << "phi_k   = " << phi_k << std::endl
00174       << "Dphi_k  = " << Dphi_k << std::endl
00175       << "phi_kp1 = " << phi_kp1 << std::endl;
00176   }
00177 
00178   bool  ls_success = true,
00179       step_return = true;
00180 
00181   switch( watch_k_ ) {
00182     case 0:
00183     {
00184       // Take  a full step
00185       const value_type phi_cord = phi_k + eta() * Dphi_k;
00186       const bool accept_step = phi_kp1 <= phi_cord;
00187 
00188       if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) {
00189         out << "\n*** Zeroth watchdog iteration:\n"
00190           << "\nphi_kp1 = " << phi_kp1 << ( accept_step ? " <= " : " > " )
00191             << "phi_k + eta * Dphi_k = " << phi_cord << std::endl;
00192       }
00193 
00194       if( phi_kp1 > phi_cord ) {
00195         if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) {
00196           out << "\nAccept this increase for now but watch out next iteration!\n";
00197         }
00198         // Save this initial point
00199         xo_   = x_k;
00200         fo_   = f_k;
00201         nrm_co_ = norm_inf( c_k );
00202         do_   = d_k;
00203         phio_ = phi_k;
00204         Dphio_  = Dphi_k;
00205         phiop1_ = phi_kp1;
00206         // Slip the update of the penalty parameter
00207         const value_type mu_k = s.mu().get_k(0);
00208         s.mu().set_k(+1) = mu_k;
00209         // Move on to the next step in the watchdog procedure
00210         watch_k_ = 1;
00211       }
00212       else {
00213         if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) {
00214           out << "\nAll is good!\n";
00215         }
00216         // watch_k_ stays 0
00217       }
00218       step_return = true;
00219       break;
00220     }
00221     case 1:
00222     {
00223       if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) {
00224         out << "\n*** First watchdog iteration:\n"
00225           << "\nDo a line search to determine x_kp1 = x_k + alpha_k * d_k ...\n";
00226       }
00227       // Now do a line search but and we require some type of reduction
00228       const DVectorSlice xd[2] = { x_k(), d_k() };
00229       MeritFuncCalc1DQuadratic phi_calc_1d( phi_calc, 1, xd, &x_kp1() );
00230       ls_success = direct_line_search().do_line_search( phi_calc_1d, phi_k
00231         , &alpha_k, &phi_kp1
00232         , (int)olevel >= (int)PRINT_ALGORITHM_STEPS ?
00233           &out : static_cast<std::ostream*>(0)  );
00234 
00235       // If the linesearch failed then the rest of the tests will catch this.
00236 
00237       value_type phi_cord = 0;
00238       bool test1, test2;
00239 
00240       if(   ( test1 = ( phi_k <= phio_ ) )
00241         || ( test2 = phi_kp1 <= ( phi_cord = phio_ + eta() * Dphio_ ) )   )
00242       {
00243         // We will accept this step and and move on.
00244         if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) {
00245           out
00246             << "\nphi_k = " << phi_k << ( test1 ? " <= " : " > " )
00247               << "phi_km1 = " << phio_ << std::endl
00248             << "phi_kp1 = " << phi_kp1 << ( test2 ? " <= " : " > " )
00249               << "phi_km1 + eta * Dphi_km1 = " << phi_cord << std::endl
00250             << "This is a sufficent reduction so reset watchdog.\n";
00251         }
00252         watch_k_ = 0;
00253         step_return = true;
00254       }
00255       else if ( ! ( test1 = ( phi_kp1 <= phio_ ) ) ) {
00256         // Even this reduction is no good!
00257         // Go back to original point and do a linesearch from there.
00258         if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) {
00259           out
00260             << "\nphi_kp1 = " << phi_kp1 << " > phi_km1 = " << phio_ << std::endl
00261             << "This is not good reduction in phi so do linesearch from x_km1\n"
00262             << "\n* Go back to x_km1: x_kp1 = x_k - alpha_k * d_k ...\n";
00263         }
00264 
00265         // Go back from x_k to x_km1 for iteration k:
00266         //
00267         // x_kp1 = x_km1
00268         // x_kp1 = x_k - alpha_km1 * d_km1
00269         //
00270         // A negative sign for alpha is an indication that we are backtracking.
00271         //
00272         s.alpha().set_k(0)      = -1.0;
00273         s.d().set_k(0).v()      = do_;
00274         s.f().set_k(+1)       = fo_;
00275 
00276         if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) {
00277           out << "Output iteration k ...\n"
00278             << "k = k+1\n";
00279         }
00280 
00281         // Output these iteration quantities
00282         algo.track().output_iteration( algo );  // k
00283         // Transition to iteration k+1
00284         s.next_iteration();
00285 
00286         // Take the step from x_k = x_km2 to x_kp1 for iteration k (k+1):
00287         //
00288         // x_kp1 = x_km2 + alpha_n * d_km2
00289         // x_kp1 = x_k   + alpha_n * d_km1
00290         // x_kp1 = x_n
00291         //        
00292         if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) {
00293           out << "\n* Take the step from x_k = x_km2 to x_kp1 for iteration k (k+1)\n"
00294             << "Find: x_kp1 = x_k + alpha_k * d_k = x_km2 + alpha_k * d_km2\n ...\n";
00295         }
00296 
00297         // alpha_k = 1.0
00298         value_type &alpha_k = s.alpha().set_k(0) = 1.0;
00299         
00300         // /////////////////////////////////////
00301         // Compute Dphi_k and phi_k
00302 
00303         // x_k
00304         const DVector &x_k                = xo_;
00305 
00306         // d_k
00307         const DVector &d_k = s.d().set_k(0).v()     = do_;
00308 
00309         // Dphi_k
00310         const value_type &Dphi_k            = Dphio_;
00311 
00312         // phi_k
00313         const value_type &phi_k = s.phi().set_k(0)    = phio_;
00314 
00315         // Here f_kp1, and c_kp1 are updated at the same time the
00316         // line search is being performed.
00317         algo.nlp().set_f( &s.f().set_k(+1) );
00318         algo.nlp().set_c( &s.c().set_k(+1).v() );
00319         phi_calc.set_nlp( algo.get_nlp() );
00320 
00321         // ////////////////////////////////////////
00322         // Compute x_xp1 and ph_kp1 for full step
00323 
00324         // x_kp1 = x_k + alpha_k * d_k
00325         DVector &x_kp1 = s.x().set_k(+1).v();
00326         V_VpV( &x_kp1, x_k, d_k );
00327 
00328         // phi_kp1
00329         value_type &phi_kp1 = s.phi().set_k(+1)     = phiop1_;
00330 
00331         const DVectorSlice xd[2] = { x_k(), d_k() };
00332         MeritFuncCalc1DQuadratic phi_calc_1d( phi_calc, 1, xd, &x_kp1() );
00333         ls_success = direct_line_search().do_line_search(
00334             phi_calc_1d, phi_k
00335           , &alpha_k, &phi_kp1
00336           , (int)olevel >= (int)PRINT_ALGORITHM_STEPS ?
00337             &out : static_cast<std::ostream*>(0)  );
00338 
00339         if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) {
00340           out << "\nOutput iteration k (k+1) ...\n"
00341             << "k = k+1 (k+2)\n"
00342             << "Reinitialize watchdog algorithm\n";
00343         }
00344 
00345         // Output these iteration quantities
00346         algo.track().output_iteration( algo );  // (k+1)
00347         // Transition to iteration k+1 (k+2)
00348         s.next_iteration();
00349 
00350         watch_k_ = 0; // Reinitialize the watchdog
00351 
00352         // Any update for k (k+2) should use the last updated value
00353         // which was for k-2 (k) since there is not much info for k-1 (k+1).
00354         // Be careful here and make sure this is square with other steps.
00355 
00356         algo.do_step_next( EvalNewPoint_name );
00357         step_return = false;  // Redirect control
00358       }
00359       else {
00360         if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) {
00361           out
00362             << "phi_kp1 = " << phi_kp1 << " <= phi_km1 = " << phio_ << std::endl
00363             << "\nAccept this step but do a linesearch next iteration!\n";
00364         }
00365         // Slip the update of the penalty parameter
00366         const value_type mu_k = s.mu().get_k(0);
00367         s.mu().set_k(+1) = mu_k;
00368         // Do the last stage of the watchdog procedure next iteration.
00369         watch_k_ = 2;
00370         step_return = true;
00371       }
00372       break;
00373     }
00374     case NORMAL_LINE_SEARCH:
00375     case 2:
00376     {
00377       if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) {
00378         if( watch_k_ == 2 ) {
00379           out << "\n*** Second watchdog iteration:\n"
00380             << "Do a line search to determine x_kp1 = x_k + alpha_k * d_k ...\n";
00381         }
00382         else {
00383           out << "\n*** Normal linesearch:\n"
00384             << "Do a line search to determine x_kp1 = x_k + alpha_k * d_k ...\n";
00385         }
00386       }
00387 
00388       const DVectorSlice xd[2] = { x_k(), d_k() };
00389       MeritFuncCalc1DQuadratic phi_calc_1d( phi_calc, 1, xd, &x_kp1() );
00390       ls_success = direct_line_search().do_line_search( phi_calc_1d, phi_k
00391         , &alpha_k, &phi_kp1
00392         , (int)olevel >= (int)PRINT_ALGORITHM_STEPS ?
00393           &out : static_cast<std::ostream*>(0)  );
00394 
00395       if( watch_k_ == 2 )
00396         watch_k_ = 0;
00397 
00398       step_return = true;
00399       break;
00400     }
00401     default:
00402       TEST_FOR_EXCEPT(true);  // Only local programming error
00403   }
00404 
00405   if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) {
00406     out << "\nalpha    = " << s.alpha().get_k(0) << "\n";
00407     out << "\nphi_kp1 = " << s.phi().get_k(+1) << "\n";
00408   }
00409 
00410   if( static_cast<int>(olevel) >= static_cast<int>(PRINT_VECTORS) ) {
00411     out << "\nd_k = \n" << s.d().get_k(0)();
00412     out << "\nx_kp1 = \n" << s.x().get_k(+1)();
00413   }
00414 
00415   if( !ls_success )
00416     throw LineSearchFailure("LineSearchWatchDog_Step::do_step(): Line search failure");
00417 
00418   return step_return;
00419 
00420 }
00421 
00422 void MoochoPack::LineSearchWatchDog_Step::print_step( const Algorithm& algo
00423   , poss_type step_poss, IterationPack::EDoStepType type, poss_type assoc_step_poss
00424   , std::ostream& out, const std::string& L ) const
00425 {
00426   out << L << "*** Use the Watchdog linesearch when near solution.\n"
00427     << L << "default: opt_kkt_err_threshold = 0.0\n"
00428     << L << "         feas_kkt_err_threshold = 0.0\n"
00429     << L << "         eta = 1.0e-4\n"
00430     << L << "         watch_k = NORMAL_LINE_SEARCH\n"
00431     << L << "begin definition of NLP merit function phi.value(f(x),c(x)):\n";
00432 
00433   merit_func().print_merit_func( out, L + "    " );
00434   
00435   out << L << "end definition\n"
00436     << L << "Dphi_k = phi.deriv()\n"
00437     << L << "if Dphi_k >= 0 then\n"
00438     << L << "    throw line_search_failure\n"
00439     << L << "end\n"
00440     << L << "phi_kp1 = phi_k.value(f_kp1,c_kp1)\n"
00441     << L << "phi_k = phi.value(f_k,c_k)\n"
00442     << L << "if watch_k == NORMAL_LINE_SEARCH then\n"
00443     << L << "    if opt_kkt_err <= opt_kkt_err_threshold\n"
00444     << L << "      and feas_kkt_err <= feas_kkt_err_threshold then\n"
00445     << L << "        *** Start using watchdog from now on!\n"
00446     << L << "        watch_k = 0\n"
00447     << L << "    end\n"
00448     << L << "end\n"
00449     << L << "if watch_k == 0 then\n"
00450     << L << "    *** Zeroth watchdog iteration\n"
00451     << L << "    if phi_kp1 >= phi_k + eta * Dphi_k then\n"
00452     << L << "        *** Accept this increase for now but watch out next iteration!\n"
00453     << L << "        *** Save the first point\n"
00454     << L << "        xo     = x_k\n"
00455     << L << "        fo     = f_k\n"
00456     << L << "        nrm_co = norm_inf_c_k\n"
00457     << L << "        do     = d_k\n"
00458     << L << "        phio   = phi_k\n"
00459     << L << "        Dphio  = Dphi_k\n"
00460     << L << "        phiop1 = phi_kp1\n"
00461     << L << "        *** Skip the update of the penalty parameter next iteration.\n"
00462     << L << "        mu_kp1 = mu_k\n"
00463     << L << "        *** Continue with next step in watchdog\n"
00464     << L << "        watch_k = 1\n"
00465     << L << "    else\n"
00466     << L << "        *** This is a good step so take it!\n"
00467     << L << "    end\n"
00468     << L << "else if watch_k == 1 then\n"
00469     << L << "    *** First watchdog iteration\n"
00470     << L << "    Do line search for: x_kp1 = x_k + alpha_k + d_k\n"
00471     << L << "        -> alpha_k, x_kp1, f_kp1, c_kp1, phi_kp1\n"
00472     << L << "    if ( phi_k <= phio ) or ( phi_kp1 <= phio + eta * Dphio ) then\n"
00473     << L << "        *** We will accept this step and reinitialize the watchdog\n"
00474     << L << "        watch_k = 0\n"
00475     << L << "    else if ( phi_kp1 > phio ) then\n"
00476     << L << "        *** This reduction is no good!\n"
00477     << L << "        *** Go back from x_k to x_km1 for this iteration (k)\n"
00478     << L << "        alpha_k        = -1.0\n"
00479     << L << "        d_k            = do\n"
00480     << L << "        f_kp1          = fo\n"
00481     << L << "        Output this iteration (k)\n"
00482     << L << "        k = k+1\n"
00483     << L << "        *** Go from x_k = x_km2 to x_kp1 for this iteration (k+1)\n"
00484     << L << "        alpha_k        = 1\n"
00485     << L << "        x_k            = xo\n"
00486     << L << "        d_k            = do\n"
00487     << L << "        Dphi_k         = Dphio\n"
00488     << L << "        phi_k          = phio\n"
00489     << L << "        x_kp1          = x_k + d_k\n"
00490     << L << "        phi_kp1        = phiop1\n"
00491     << L << "        Do line search for: x_kp1 = x_k + alpha_k + d_k\n"
00492     << L << "            -> alpha_k, x_kp1, f_kp1, c_kp1, phi_kp1\n"
00493     << L << "        Output this iteration (k+1)\n"
00494     << L << "        k = k+1\n"
00495     << L << "        *** Any updates for k (k+2) should use the last updated value\n"
00496     << L << "        *** which was for k-2 (k) since there is not much info for k-1 (k+1).\n"
00497     << L << "        *** Be careful here and make sure this works with other steps.\n"
00498     << L << "        goto EvalNewPoint\n"
00499     << L << "    else\n"
00500     << L << "        *** Accept this reduction but do a linesearch next iteration!\n"
00501     << L << "        *** Skip the update of the penalty parameter next iteration.\n"
00502     << L << "        mu_kp1 = mu_k\n"
00503     << L << "        *** Continue with next step in watchdog\n"
00504     << L << "        watch_k = 2\n"
00505     << L << "    end\n"
00506     << L << "else if ( watch_k == 2 ) then\n"
00507     << L << "    *** Second watchdog iteration\n"
00508     << L << "    Do line search for: x_kp1 = x_k + alpha_k + d_k\n"
00509     << L << "        -> alpha_k, x_kp1, f_kp1, c_kp1, phi_kp1\n"
00510     << L << "    *** Reset the watchdog algorithm\n"
00511     << L << "    watch_k = 1\n"
00512     << L << "else if ( watch_k == NORMAL_LINE_SEARCH ) then\n"
00513     << L << "    Do line search for: x_kp1 = x_k + alpha_k + d_k\n"
00514     << L << "        -> alpha_k, x_kp1, f_kp1, c_kp1, phi_kp1\n"
00515     << L << "    begin direct line search : \""
00516         << typeName(direct_line_search()) << "\"\n";
00517 
00518   direct_line_search().print_algorithm( out, L + "    " );
00519 
00520   out
00521     << L << "    end direct line search\n"
00522     << L << "end\n"
00523     << L << "if maximum number of linesearch iterations are exceeded then\n"
00524     << L << "    throw line_search_failure\n"
00525     << L << "end\n";
00526 }
00527 
00528 #endif // 0

Generated on Tue Jul 13 09:29:32 2010 for MoochoPack : Framework for Large-Scale Optimization Algorithms by  doxygen 1.4.7