MoochoPack_NLPAlgoConfigMamaJama.cpp

00001 // @HEADER
00002 // ***********************************************************************
00003 // 
00004 // Moocho: Multi-functional Object-Oriented arCHitecture for Optimization
00005 //                  Copyright (2003) Sandia Corporation
00006 // 
00007 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
00008 // license for use of this work by or on behalf of the U.S. Government.
00009 // 
00010 // This library is free software; you can redistribute it and/or modify
00011 // it under the terms of the GNU Lesser General Public License as
00012 // published by the Free Software Foundation; either version 2.1 of the
00013 // License, or (at your option) any later version.
00014 //  
00015 // This library is distributed in the hope that it will be useful, but
00016 // WITHOUT ANY WARRANTY; without even the implied warranty of
00017 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00018 // Lesser General Public License for more details.
00019 //  
00020 // You should have received a copy of the GNU Lesser General Public
00021 // License along with this library; if not, write to the Free Software
00022 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
00023 // USA
00024 // Questions? Contact Roscoe A. Bartlett (rabartl@sandia.gov) 
00025 // 
00026 // ***********************************************************************
00027 // @HEADER
00028 
00029 #include <assert.h>
00030 
00031 #include <sstream>
00032 #include <typeinfo>
00033 #include <iostream>
00034 
00035 #include "MoochoPack_NLPAlgoConfigMamaJama.hpp"
00036 #include "MoochoPack_NLPAlgo.hpp"
00037 #include "MoochoPack_NLPAlgoContainer.hpp"
00038 #include "IterationPack_AlgorithmSetOptions.hpp"
00039 #include "AbstractLinAlgPack_MatrixSymPosDefCholFactor.hpp"                     // rHL 
00040 //#include "ConstrainedOptPack_MatrixSymPosDefInvCholFactor.hpp"    // .
00041 #include "ConstrainedOptPack_MatrixSymPosDefLBFGS.hpp"        // .
00042 //#include "ConstrainedOptPack_MatrixHessianSuperBasicInitDiagonal.hpp/ | rHL (super basics)
00043 //#include "AbstractLinAlgPack/src/AbstractLinAlgPack_MatrixSymDiagStd.hpp"                          // |
00044 
00045 #include "ConstrainedOptPack_VariableBoundsTester.hpp"
00046 
00047 #include "NLPInterfacePack_NLPDirect.hpp"
00048 
00049 #include "NLPInterfacePack_CalcFiniteDiffProd.hpp"
00050 #include "NLPInterfacePack_NLPVarReductPerm.hpp"
00051 
00052 // line search
00053 #include "ConstrainedOptPack_DirectLineSearchArmQuad_Strategy.hpp"
00054 #include "ConstrainedOptPack_DirectLineSearchArmQuad_StrategySetOptions.hpp"
00055 #include "ConstrainedOptPack_MeritFuncNLPL1.hpp"
00056 #include "ConstrainedOptPack_MeritFuncNLPModL1.hpp"
00057 
00058 // Basis permutations and direct sparse solvers
00059 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
00060 #include "ConstrainedOptPack_DecompositionSystemVarReductPerm.hpp"
00061 #endif
00062 
00063 #include "ConstrainedOptPack_QPSolverRelaxedTester.hpp"
00064 #include "ConstrainedOptPack_QPSolverRelaxedTesterSetOptions.hpp"
00065 #include "ConstrainedOptPack_QPSolverRelaxedQPSchur.hpp"
00066 #include "ConstrainedOptPack_QPSolverRelaxedQPSchurSetOptions.hpp"
00067 #include "ConstrainedOptPack_QPSchurInitKKTSystemHessianFull.hpp"
00068 //#include "ConstrainedOptPack_QPSchurInitKKTSystemHessianSuperBasic.hpp"
00069 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK
00070 #include "ConstrainedOptPack_QPSolverRelaxedQPKWIK.hpp"
00071 #endif
00072 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPOPT
00073 //#include "ConstrainedOptPack_QPSolverRelaxedQPOPT.hpp"
00074 #endif
00075 
00076 #include "MoochoPack_MoochoAlgorithmStepNames.hpp"
00077 
00078 /*
00079 #include "MoochoPack_EvalNewPointStd_StepSetOptions.hpp"
00080 #include "MoochoPack_EvalNewPointTailoredApproach_StepSetOptions.hpp"
00081 #include "MoochoPack_EvalNewPointTailoredApproachCoordinate_Step.hpp"
00082 #include "MoochoPack_EvalNewPointTailoredApproachOrthogonal_Step.hpp"
00083 */
00084 
00085 #include "MoochoPack_ReducedGradientStd_Step.hpp"
00086 #include "MoochoPack_InitFinDiffReducedHessian_Step.hpp"
00087 #include "MoochoPack_InitFinDiffReducedHessian_StepSetOptions.hpp"
00088 #include "MoochoPack_ReducedHessianSerialization_Step.hpp"
00089 #include "MoochoPack_ReducedHessianSerialization_StepSetOptions.hpp"
00090 #include "MoochoPack_ReducedHessianSecantUpdateStd_Step.hpp"
00091 #include "MoochoPack_ReducedHessianSecantUpdateBFGSFull_Strategy.hpp"
00092 
00093 
00094 //#include "MoochoPack_ReducedHessianSecantUpdateBFGSProjected_Strategy.hpp"
00095 //#include "MoochoPack_ReducedHessianSecantUpdateBFGSProjected_StrategySetOptions.hpp"
00096 //#include "MoochoPack_ReducedHessianSecantUpdateLPBFGS_Strategy.hpp"
00097 //#include "MoochoPack_ReducedHessianSecantUpdateLPBFGS_StrategySetOptions.hpp"
00098 #include "MoochoPack_BFGSUpdate_Strategy.hpp"
00099 #include "MoochoPack_BFGSUpdate_StrategySetOptions.hpp"
00100 #include "MoochoPack_QuasiNormalStepStd_Step.hpp"
00101 #include "MoochoPack_CheckDescentQuasiNormalStep_Step.hpp"
00102 #include "MoochoPack_CheckDecompositionFromPy_Step.hpp"
00103 #include "MoochoPack_CheckDecompositionFromRPy_Step.hpp"
00104 #include "MoochoPack_TangentialStepWithoutBounds_Step.hpp"
00105 #include "MoochoPack_TangentialStepWithInequStd_Step.hpp"
00106 #include "MoochoPack_TangentialStepWithInequStd_StepSetOptions.hpp"
00107 #include "MoochoPack_SetDBoundsStd_AddedStep.hpp"
00108 #include "MoochoPack_QPFailureReinitReducedHessian_Step.hpp"
00109 #include "MoochoPack_CalcDFromYPYZPZ_Step.hpp"
00110 #include "MoochoPack_CalcDFromYPY_Step.hpp"
00111 #include "MoochoPack_CalcDFromZPZ_Step.hpp"
00112 #include "MoochoPack_LineSearchFailureNewDecompositionSelection_Step.hpp"
00113 #include "MoochoPack_LineSearchFilter_Step.hpp"
00114 #include "MoochoPack_LineSearchFilter_StepSetOptions.hpp"
00115 #include "MoochoPack_LineSearchFullStep_Step.hpp"
00116 #include "MoochoPack_LineSearchDirect_Step.hpp"
00117 #include "MoochoPack_LineSearchNLE_Step.hpp"
00118 //#include "MoochoPack_LineSearch2ndOrderCorrect_Step.hpp"
00119 //#include "MoochoPack_LineSearch2ndOrderCorrect_StepSetOptions.hpp"
00120 //#include "MoochoPack_FeasibilityStepReducedStd_Strategy.hpp"
00121 //#include "MoochoPack_FeasibilityStepReducedStd_StrategySetOptions.hpp"
00122 //#include "MoochoPack_QuasiRangeSpaceStepStd_Strategy.hpp"
00123 //#include "MoochoPack_QuasiRangeSpaceStepTailoredApproach_Strategy.hpp"
00124 //#include "MoochoPack_LineSearchWatchDog_Step.hpp"
00125 //#include "MoochoPack_LineSearchWatchDog_StepSetOptions.hpp"
00126 //#include "MoochoPack_LineSearchFullStepAfterKIter_Step.hpp"
00127 //#include "MoochoPack_CalcLambdaIndepStd_AddedStep.hpp"
00128 #include "MoochoPack_CalcReducedGradLagrangianStd_AddedStep.hpp"
00129 #include "MoochoPack_CheckConvergenceStd_AddedStep.hpp"
00130 #include "MoochoPack_CheckConvergenceStd_Strategy.hpp"
00131 #include "MoochoPack_CheckSkipBFGSUpdateStd_StepSetOptions.hpp"
00132 #include "MoochoPack_MeritFunc_DummyUpdate_Step.hpp"
00133 #include "MoochoPack_MeritFunc_PenaltyParamUpdate_AddedStepSetOptions.hpp"
00134 #include "MoochoPack_MeritFunc_PenaltyParamUpdateMultFree_AddedStep.hpp"
00135 //#include "MoochoPack_MeritFunc_PenaltyParamUpdateWithMult_AddedStep.hpp"
00136 //#include "MoochoPack_MeritFunc_PenaltyParamsUpdateWithMult_AddedStep.hpp"
00137 //#include "MoochoPack_MeritFunc_ModifiedL1LargerSteps_AddedStep.hpp"
00138 //#include "MoochoPack_MeritFunc_ModifiedL1LargerSteps_AddedStepSetOptions.hpp"
00139 //#include "MoochoPack_ActSetStats_AddedStep.hpp"
00140 //#include "MoochoPack_NumFixedDepIndep_AddedStep.hpp"
00141 
00142 #include "MoochoPack_act_set_stats.hpp"
00143 #include "MoochoPack_qp_solver_stats.hpp"
00144 #include "MoochoPack_quasi_newton_stats.hpp"
00145 
00146 //#include "AbstractLinAlgPack_sparse_bounds.hpp"
00147 
00148 // Misc utilities
00149 #include "Teuchos_AbstractFactoryStd.hpp"
00150 #include "Teuchos_dyn_cast.hpp"
00151 #include "ReleaseResource_ref_count_ptr.hpp"
00152 #include "Teuchos_TestForException.hpp"
00153 
00154 // Stuff to read in options
00155 #include "OptionsFromStreamPack_StringToIntMap.hpp"
00156 #include "OptionsFromStreamPack_StringToBool.hpp"
00157 
00158 // Stuff for exact reduced hessian
00159 //#include "MoochoPack_ReducedHessianExactStd_Step.hpp"
00160 //#include "MoochoPack_CrossTermExactStd_Step.hpp"
00161 //#include "MoochoPack_DampenCrossTermStd_Step.hpp"
00162 
00163 namespace {
00164   const double INF_BASIS_COND_CHANGE_FRAC      = 1e+20;
00165 }
00166 
00167 namespace MoochoPack {
00168 
00169 //
00170 // Here is where we define the default values for the algorithm.  These
00171 // should agree with what are in the Moocho.opt.NLPAlgoConfigMamaJama file.
00172 //
00173 NLPAlgoConfigMamaJama::SOptionValues::SOptionValues()
00174   :max_basis_cond_change_frac_(-1.0)
00175   ,exact_reduced_hessian_(false)
00176   ,quasi_newton_(QN_AUTO)
00177   ,num_lbfgs_updates_stored_(-1)
00178   ,lbfgs_auto_scaling_(true)
00179   ,hessian_initialization_(INIT_HESS_AUTO)
00180   ,qp_solver_type_(QP_AUTO)
00181   ,reinit_hessian_on_qp_fail_(true)
00182   ,line_search_method_(LINE_SEARCH_AUTO)
00183   ,merit_function_type_(MERIT_FUNC_AUTO)
00184   ,l1_penalty_param_update_(L1_PENALTY_PARAM_AUTO)
00185   ,full_steps_after_k_(-1)
00186 {}
00187 
00188 NLPAlgoConfigMamaJama::NLPAlgoConfigMamaJama()
00189 {}
00190 
00191 NLPAlgoConfigMamaJama::~NLPAlgoConfigMamaJama()
00192 {}
00193 
00194 // overridden from NLPAlgoConfig
00195 
00196 void NLPAlgoConfigMamaJama::set_options( const options_ptr_t& options )
00197 {
00198   options_ = options;
00199   decomp_sys_step_builder_.set_options(options);
00200 }
00201 
00202 const NLPAlgoConfig::options_ptr_t&
00203 NLPAlgoConfigMamaJama::get_options() const
00204 {
00205   return options_;
00206 }
00207 
00208 void NLPAlgoConfigMamaJama::config_algo_cntr(
00209   NLPAlgoContainer   *algo_cntr
00210   ,std::ostream       *trase_out
00211   )
00212 {
00213   using Teuchos::RefCountPtr;
00214   using Teuchos::dyn_cast;
00215 
00216   if(trase_out) {
00217     *trase_out
00218       << std::endl
00219       << "*****************************************************************\n"
00220       << "*** NLPAlgoConfigMamaJama Configuration                       ***\n"
00221       << "***                                                           ***\n"
00222       << "*** Here, summary information about how the algorithm is      ***\n"
00223       << "*** configured is printed so that the user can see how the    ***\n"
00224       << "*** properties of the NLP and the set options influence       ***\n"
00225       << "*** how an algorithm is configured.                           ***\n"
00226       << "*****************************************************************\n";
00227   }
00228 
00229   // ////////////////////////////////////////////////////////////
00230   // A. ???
00231 
00232   // /////////////////////////////////////////////////////////////////////////
00233   // B. Create an algo object, give to algo_cntr, then give algo_cntr to algo
00234 
00235   if(trase_out)
00236     *trase_out << "\n*** Creating the NLPAlgo algo object ...\n";
00237 
00238   typedef Teuchos::RefCountPtr<NLPAlgo> algo_ptr_t;
00239   algo_ptr_t algo = Teuchos::rcp(new NLPAlgo);
00240   TEST_FOR_EXCEPTION(!algo.get(),std::runtime_error,"Error!");
00241   if(options_.get()) {
00242     IterationPack::AlgorithmSetOptions
00243       opt_setter( algo.get() );
00244     opt_setter.set_options( *options_ );
00245   }
00246   algo_cntr->set_algo(algo);
00247   algo->set_algo_cntr(algo_cntr);
00248   
00249   // /////////////////////////////////////////////
00250   // C. Configure algo
00251 
00252   // /////////////////////////////////////////////////////
00253   // C.0 Set the nlp and track objects
00254 
00255   if(trase_out)
00256     *trase_out << "\n*** Setting the NLP and track objects to the algo object ...\n";
00257 
00258   algo->set_nlp( algo_cntr->get_nlp().get() );
00259   algo->set_track( algo_cntr->get_track() );
00260 
00261   // ////////////////////////////////////////////////
00262   // Determine what the options are:
00263 
00264   // Readin the options
00265   if(options_.get()) {
00266     readin_options( *options_, &uov_, trase_out );
00267   }
00268   else {
00269     if(trase_out) {
00270       *trase_out
00271         << "\n*** Warning, no OptionsFromStream object was set so a default set"
00272           " of options will be used!\n";
00273     }
00274   } 
00275 
00276   NLP &nlp = algo->nlp();
00277   nlp.initialize(algo->algo_cntr().check_results());
00278   // Get the dimensions of the NLP
00279   const size_type
00280     n   = nlp.n(),
00281     m   = nlp.m(),
00282     r   = m, // ToDo: Compute this for real!
00283     //dof = n - r,
00284     nb  = nlp.num_bounded_x();
00285 
00286   // Process the NLP
00287   NLPFirstOrder    *nlp_foi = NULL;
00288   NLPSecondOrder   *nlp_soi = NULL;
00289   NLPDirect        *nlp_fod = NULL;
00290   bool             tailored_approach = false;
00291   decomp_sys_step_builder_.process_nlp_and_options(
00292     trase_out, nlp
00293     ,&nlp_foi, &nlp_soi, &nlp_fod, &tailored_approach
00294     );
00295 
00296   const int max_dof_quasi_newton_dense
00297     = decomp_sys_step_builder_.current_option_values().max_dof_quasi_newton_dense_;
00298 
00299   // //////////////////////////////////////////////////////
00300   // C.1.  Sort out the options
00301 
00302   if(trase_out)
00303     *trase_out
00304       << "\n*** Sorting out some of the options given input options ...\n";
00305 
00306   if( m ) {
00307     if( tailored_approach ) {
00308       // Change the options for the tailored approach. 
00309       if(trase_out) {
00310         *trase_out
00311           << "\nThis is a tailored approach NLP (NLPDirect) which forces the following options:\n"
00312           << "merit_function_type         = L1;\n"
00313           << "l1_penalty_parameter_update = MULT_FREE;\n"
00314           << "null_space_matrix           = EXPLICIT;\n"
00315           ;
00316       }
00317       cov_.merit_function_type_
00318         = MERIT_FUNC_L1;
00319       cov_.l1_penalty_param_update_
00320         = L1_PENALTY_PARAM_MULT_FREE;
00321       decomp_sys_step_builder_.current_option_values().null_space_matrix_type_
00322         = DecompositionSystemStateStepBuilderStd::NULL_SPACE_MATRIX_EXPLICIT;
00323     }
00324     
00325     if( !tailored_approach && uov_.merit_function_type_ != MERIT_FUNC_L1  ) {
00326       if(trase_out) {
00327         *trase_out
00328           << "\nThe only merit function currently supported is:\n"
00329           << "merit_function_type         = L1;\n"
00330           ;
00331       }
00332       cov_.merit_function_type_   = MERIT_FUNC_L1;
00333     }
00334   }
00335   else {
00336     if( uov_.line_search_method_ == LINE_SEARCH_NONE ) {
00337       if(trase_out) {
00338         *trase_out
00339           << "\nThere are no equality constraints (m == 0) and line_search_method==NONE so set the following options:\n"
00340           << "line_search_method          = NONE;\n"
00341           << "merit_function_type         = L1;\n"
00342           ;
00343       }
00344       cov_.line_search_method_       = LINE_SEARCH_NONE;
00345       cov_.merit_function_type_      = MERIT_FUNC_L1;
00346     }
00347     else {
00348       if(trase_out) {
00349         *trase_out
00350           << "\nThere are no equality constraints (m == 0) and line_search_method==AUTO so set the following options:\n"
00351           << "line_search_method          = DIRECT;\n"
00352           << "merit_function_type         = L1;\n"
00353           ;
00354       }
00355       cov_.line_search_method_       = LINE_SEARCH_DIRECT;
00356       cov_.merit_function_type_      = MERIT_FUNC_L1;
00357     }
00358   }
00359 
00360   // Decide what type of quasi-newton update to use
00361   switch( uov_.quasi_newton_ ) {
00362     case QN_AUTO: {
00363       if(trase_out)
00364         *trase_out
00365           << "\nquasi_newton == AUTO:"
00366           << "\nnlp.num_bounded_x() == " << nlp.num_bounded_x() << ":\n";
00367       if( n - r > max_dof_quasi_newton_dense ) {
00368         if(trase_out)
00369           *trase_out
00370             << "n-r = " << n-r << " > max_dof_quasi_newton_dense = "
00371             << max_dof_quasi_newton_dense <<  ":\n"
00372             << "setting quasi_newton == LBFGS\n";
00373         cov_.quasi_newton_ = QN_LBFGS;
00374       }
00375       else {
00376         if(trase_out)
00377           *trase_out
00378             << "n-r = " << n-r << " <= max_dof_quasi_newton_dense = "
00379             << max_dof_quasi_newton_dense << ":\n"
00380             << "setting quasi_newton == BFGS\n";
00381         cov_.quasi_newton_ = QN_BFGS;
00382       }
00383       break;
00384     }
00385     case QN_BFGS:
00386     case QN_PBFGS:
00387     case QN_LBFGS:
00388     case QN_LPBFGS:
00389       cov_.quasi_newton_ = uov_.quasi_newton_;
00390       break;
00391       default:
00392       TEST_FOR_EXCEPT(true); // Invalid option!
00393   }
00394 
00395   if( uov_.qp_solver_type_ == QP_AUTO && nb == 0 ) {
00396     cov_.qp_solver_type_ = QP_AUTO;
00397   }
00398 
00399   // ToDo: Sort out the rest of the options!
00400 
00401   // Set the default options that where not already set yet
00402   set_default_options(uov_,&cov_,trase_out);
00403 
00404   // ToDo: Implement the 2nd order correction linesearch
00405   if( cov_.line_search_method_ == LINE_SEARCH_2ND_ORDER_CORRECT ) {
00406     if(trase_out)
00407       *trase_out <<
00408         "\nline_search_method == 2ND_ORDER_CORRECT:\n"
00409         "Sorry, the second order corrrection linesearch is not updated yet!\n"
00410         "setting line_search_method = FILTER ...\n";
00411     cov_.line_search_method_ = LINE_SEARCH_FILTER;
00412   }
00413   if( cov_.line_search_method_ == LINE_SEARCH_WATCHDOG ) {
00414     if(trase_out)
00415       *trase_out <<
00416         "\nline_search_method ==WATCHDOG:\n"
00417         "Sorry, the watchdog linesearch is not updated yet!\n"
00418         "setting line_search_method = DIRECT ...\n";
00419     cov_.line_search_method_ = LINE_SEARCH_DIRECT;
00420   }
00421   
00422   // Adjust the Quasi-Newton if QPKWIK is used!
00423   if( cov_.qp_solver_type_ == QP_QPKWIK && nb ) {
00424     if(trase_out)
00425       *trase_out
00426         << "\nqp_solver == QPKWIK and nlp.num_bounded_x() == " << nb << " > 0:\n"
00427         << "Setting quasi_newton == BFGS...\n";
00428     cov_.quasi_newton_ = QN_BFGS;
00429   }
00430 
00431   // /////////////////////////////////////////////////////
00432   // C.1. Create the decomposition system object
00433 
00434   typedef RefCountPtr<DecompositionSystem> decomp_sys_ptr_t;
00435   decomp_sys_ptr_t decomp_sys;
00436   if(
00437 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
00438     true
00439 #else
00440     n > m
00441 #endif
00442     )
00443   {
00444     decomp_sys_step_builder_.create_decomp_sys(
00445       trase_out, nlp, nlp_foi, nlp_soi, nlp_fod, tailored_approach
00446       ,&decomp_sys
00447       );
00448   }
00449 
00450 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
00451   RefCountPtr<DecompositionSystemVarReductPerm>
00452     decomp_sys_perm = Teuchos::rcp_dynamic_cast<DecompositionSystemVarReductPerm>(decomp_sys);
00453 #endif
00454 
00455   // /////////////////////////////////////////////////////
00456   // C.2. Create and set the state object
00457 
00458   if(trase_out)
00459     *trase_out
00460       << "\n*** Creating the state object and setting up iteration quantity objects ...\n";
00461 
00462   {
00463     //
00464     // Create the state object with the vector spaces
00465     //
00466 
00467     typedef RefCountPtr<NLPAlgoState>   state_ptr_t;
00468     state_ptr_t
00469       state = Teuchos::rcp(
00470         new NLPAlgoState(
00471           decomp_sys
00472           ,nlp.space_x()
00473           ,nlp.space_c()
00474           ,( m
00475              ? ( tailored_approach
00476                ? ( nlp_fod->var_dep().size() 
00477                  ? nlp.space_x()->sub_space(nlp_fod->var_dep())->clone()
00478                  : Teuchos::null )
00479                : decomp_sys->space_range() // could be NULL for BasisSystemPerm
00480                )
00481              : Teuchos::null
00482             )
00483           ,( m
00484              ? ( tailored_approach
00485                ?( nlp_fod->var_indep().size()
00486                 ? nlp.space_x()->sub_space(nlp_fod->var_indep())->clone()
00487                 : Teuchos::null )
00488                : decomp_sys->space_null() // could be NULL for BasisSystemPerm
00489                )
00490              : nlp.space_x()
00491             )
00492           )
00493         );
00494         
00495     //
00496     // Set the iteration quantities for the NLP matrix objects
00497     //
00498 
00499     decomp_sys_step_builder_.add_iter_quantities(
00500       trase_out, nlp, nlp_foi, nlp_soi, nlp_fod, tailored_approach, decomp_sys
00501       ,state
00502       );
00503 
00504     // Add reduced Hessian
00505 
00506     if( !cov_.exact_reduced_hessian_ ) {
00507       RefCountPtr<Teuchos::AbstractFactory<MatrixSymOp> >
00508         abstract_factory_rHL = Teuchos::null;
00509       // Only maintain the orginal matrix if we have inequality constraints and therefore will be
00510       // needing a QP solver (which may be QPSchur which needs an accurate original matrix for
00511       // iterative refinement).
00512       const bool
00513         maintain_original = nb;
00514       // Maintain the factor if a QP solver is needed, QPSchur is being used, or we are checking
00515       // results
00516       const bool
00517         maintain_inverse = ( (!nb && m==r) || cov_.qp_solver_type_==QP_QPSCHUR
00518                    || algo->algo_cntr().check_results() );
00519       switch( cov_.quasi_newton_ ) {
00520         case QN_BFGS:
00521           abstract_factory_rHL = Teuchos::rcp(
00522             new Teuchos::AbstractFactoryStd<MatrixSymOp,MatrixSymPosDefCholFactor,MatrixSymPosDefCholFactor::PostMod>(
00523               MatrixSymPosDefCholFactor::PostMod(
00524                 maintain_original      // maintain_original
00525                 ,maintain_inverse      // maintain_factor
00526                 ,true                  // allow_factor      (always!)
00527                 )
00528               )
00529             );
00530           break;
00531         case QN_LBFGS:
00532           abstract_factory_rHL = Teuchos::rcp(
00533             new Teuchos::AbstractFactoryStd<MatrixSymOp,MatrixSymPosDefLBFGS,MatrixSymPosDefLBFGS::PostMod>(
00534               MatrixSymPosDefLBFGS::PostMod(
00535                 cov_.num_lbfgs_updates_stored_  //
00536                 ,maintain_original              // maintain_original
00537                 ,maintain_inverse               // maintain_inverse
00538                 ,cov_.lbfgs_auto_scaling_       // 
00539                 )
00540               )
00541             );
00542           break;
00543         default:
00544           TEST_FOR_EXCEPT(true); // Should not be called for now!
00545       }
00546       
00547       state->set_iter_quant(
00548         rHL_name
00549         ,Teuchos::rcp(
00550           new IterQuantityAccessContiguous<MatrixSymOp>(
00551             1
00552             ,rHL_name
00553             ,abstract_factory_rHL
00554             )
00555           )
00556         );
00557     }
00558     else {
00559       TEST_FOR_EXCEPT(true); // ToDo: Add rHL for an exact reduced Hessian!
00560     }
00561     
00562     //
00563     // Set the NLP merit function 
00564     //
00565 
00566     if( cov_.line_search_method_ != LINE_SEARCH_NONE 
00567        && cov_.line_search_method_ != LINE_SEARCH_FILTER) {
00568       RefCountPtr<Teuchos::AbstractFactory<MeritFuncNLP> >
00569         merit_func_factory = Teuchos::null;
00570       switch( cov_.merit_function_type_ ) {
00571         case MERIT_FUNC_L1:
00572           merit_func_factory = Teuchos::rcp(
00573             new Teuchos::AbstractFactoryStd<MeritFuncNLP,MeritFuncNLPL1>());
00574           break;
00575         case MERIT_FUNC_MOD_L1:
00576         case MERIT_FUNC_MOD_L1_INCR:
00577           merit_func_factory = Teuchos::rcp(
00578             new Teuchos::AbstractFactoryStd<MeritFuncNLP,MeritFuncNLPModL1>());
00579           break;
00580         default:
00581           TEST_FOR_EXCEPT(true);  // local programming error
00582       }
00583       state->set_iter_quant(
00584         merit_func_nlp_name
00585         ,Teuchos::rcp(
00586           new IterQuantityAccessContiguous<MeritFuncNLP>(
00587             1
00588             ,merit_func_nlp_name
00589             ,merit_func_factory
00590             )
00591           )
00592         );
00593     }
00594 
00595     if (cov_.line_search_method_ == LINE_SEARCH_FILTER)
00596       {
00597         // Add the filter iteration quantity
00598         state->set_iter_quant(
00599         FILTER_IQ_STRING
00600         ,Teuchos::rcp(
00601              new IterQuantityAccessContiguous<Filter_T>(1,FILTER_IQ_STRING)
00602         )
00603         );
00604       }
00605 
00606     if( nb ) {
00607       // Add bounds on d
00608       state->set_iter_quant(
00609         dl_name
00610         ,Teuchos::rcp(
00611           new IterQuantityAccessContiguous<VectorMutable>(
00612             2, dl_name
00613             , nlp.space_x() ) )
00614         );
00615       state->set_iter_quant(
00616         du_name
00617         ,Teuchos::rcp(
00618           new IterQuantityAccessContiguous<VectorMutable>(
00619             2, du_name
00620             , nlp.space_x() ) )
00621         );
00622       // Add active-set iteration quantity
00623       state->set_iter_quant(
00624         act_set_stats_name
00625         ,Teuchos::rcp(
00626           new IterQuantityAccessContiguous<ActSetStats>( 1, act_set_stats_name ) )
00627         );
00628       // Add QP solver stats iteration quantity
00629       state->set_iter_quant(
00630         qp_solver_stats_name
00631         ,Teuchos::rcp(
00632           new IterQuantityAccessContiguous<QPSolverStats>( 1, qp_solver_stats_name ) )
00633         );
00634     }
00635 
00636     //
00637     // Resize the number of storage locations (these can be changed later).
00638     //
00639     // Also, touch all of the value_type, index_type and vector iteration quantities
00640     // that we know about so that when state.dump_iter_quant() is called, all of the
00641     // iteration quantities will be included.
00642     //
00643     
00644     typedef IterQuantityAccessContiguous<value_type>            IQ_scalar_cngs;
00645     typedef IterQuantityAccessContiguous<VectorMutable>   IQ_vector_cngs;
00646 
00647     dyn_cast<IQ_vector_cngs>(state->x()).resize(2);
00648     dyn_cast<IQ_scalar_cngs>(state->f()).resize(2);
00649     if(m) dyn_cast<IQ_vector_cngs>(state->c()).resize(2);
00650     state->Gf();
00651     if(m && nlp_foi) state->Gc();
00652 
00653     if( m
00654 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
00655        && decomp_sys_perm.get() == NULL
00656 #endif
00657       ) state->py();
00658     if(m) dyn_cast<IQ_vector_cngs>(state->Ypy()).resize(2);
00659     if( m
00660 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
00661       && decomp_sys_perm.get() == NULL
00662 #endif
00663       ) state->pz();
00664     if(m) dyn_cast<IQ_vector_cngs>(state->Zpz()).resize(2);
00665     dyn_cast<IQ_vector_cngs>(state->d()).resize(2);
00666 
00667     if( n > m ) {
00668       dyn_cast<IQ_vector_cngs>(state->rGf()).resize(2);
00669       state->w();
00670       state->zeta();
00671       state->qp_grad();
00672     }
00673     state->eta();
00674     
00675     dyn_cast<IQ_scalar_cngs>(state->alpha()).resize(2);
00676     dyn_cast<IQ_scalar_cngs>(state->mu()).resize(2);
00677     dyn_cast<IQ_scalar_cngs>(state->phi()).resize(2);
00678 
00679     dyn_cast<IQ_scalar_cngs>(state->opt_kkt_err()).resize(2);
00680     dyn_cast<IQ_scalar_cngs>(state->feas_kkt_err()).resize(2);
00681     if( n > m ) {
00682       dyn_cast<IQ_vector_cngs>(state->rGL()).resize(2);
00683     }
00684     if(m) dyn_cast<IQ_vector_cngs>(state->lambda()).resize(2);
00685     dyn_cast<IQ_vector_cngs>(state->nu()).resize(2);
00686 
00687     // Set the state object
00688     algo->set_state( state );
00689   }
00690 
00691   // /////////////////////////////////////////////////////
00692   // C.3  Create and set the step objects
00693 
00694   if(trase_out)
00695     *trase_out << "\n*** Creating and setting the step objects ...\n";
00696 
00697 //  typedef RefCountPtr<QPSolverRelaxed> qp_solver_ptr_t;
00698 //  qp_solver_ptr_t qp_solver;
00699 //  typedef ConstrainedOptPack::QPSolverRelaxedTester QPSolverRelaxedTester;
00700 //  typedef RefCountPtr<QPSolverRelaxedTester> qp_tester_ptr_t;
00701 //  qp_tester_ptr_t qp_tester;
00702 //  typedef RefCountPtr<FeasibilityStepReducedStd_Strategy> feasibility_step_strategy_ptr_t;
00703 //  feasibility_step_strategy_ptr_t  feasibility_step_strategy;
00704 
00705   {
00706 
00707     //
00708     // Create some standard step objects that will be used by many different
00709     // specific algorithms
00710     //
00711     
00712     typedef RefCountPtr<AlgorithmStep>   algo_step_ptr_t;
00713 
00714     // Create the EvalNewPoint step and associated objects
00715     algo_step_ptr_t                                    eval_new_point_step           = Teuchos::null;
00716     RefCountPtr<CalcFiniteDiffProd>                  calc_fd_prod                  = Teuchos::null;
00717     RefCountPtr<VariableBoundsTester>                bounds_tester                 = Teuchos::null;
00718     RefCountPtr<NewDecompositionSelection_Strategy>  new_decomp_selection_strategy = Teuchos::null;
00719     decomp_sys_step_builder_.create_eval_new_point(
00720       trase_out, nlp, nlp_foi, nlp_soi, nlp_fod, tailored_approach, decomp_sys
00721       ,&eval_new_point_step, &calc_fd_prod, &bounds_tester, &new_decomp_selection_strategy
00722       );
00723 
00724     // QuasiNormal_Step
00725     algo_step_ptr_t    quansi_normal_step_step = Teuchos::null;
00726     if( !tailored_approach ) {
00727       quansi_normal_step_step = Teuchos::rcp(new QuasiNormalStepStd_Step());
00728     }
00729 
00730     // Check and change decomposition 
00731     algo_step_ptr_t    check_decomp_from_py_step  = Teuchos::null;
00732     algo_step_ptr_t    check_decomp_from_Rpy_step = Teuchos::null;
00733     if( new_decomp_selection_strategy.get() && cov_.max_basis_cond_change_frac_ < INF_BASIS_COND_CHANGE_FRAC ) {
00734       check_decomp_from_py_step = Teuchos::rcp(
00735         new CheckDecompositionFromPy_Step(
00736           new_decomp_selection_strategy
00737           ,cov_.max_basis_cond_change_frac_
00738           ) );
00739       check_decomp_from_Rpy_step = Teuchos::rcp(
00740         new CheckDecompositionFromRPy_Step(
00741           new_decomp_selection_strategy
00742           ,cov_.max_basis_cond_change_frac_
00743           ) );
00744     }
00745 
00746     // CheckDescentQuasiNormalStep
00747     algo_step_ptr_t    check_descent_quansi_normal_step_step = Teuchos::null;
00748     if( algo->algo_cntr().check_results() ) {
00749       check_descent_quansi_normal_step_step = Teuchos::rcp(new CheckDescentQuasiNormalStep_Step(calc_fd_prod));
00750     }
00751 
00752     // ReducedGradient_Step
00753     algo_step_ptr_t    reduced_gradient_step = Teuchos::null;
00754     if( !tailored_approach ) {
00755       reduced_gradient_step = Teuchos::rcp(new ReducedGradientStd_Step());
00756     }
00757 
00758     // CheckSkipBFGSUpdate
00759     algo_step_ptr_t    check_skip_bfgs_update_step = Teuchos::null;
00760     if(!cov_.exact_reduced_hessian_) {
00761       RefCountPtr<CheckSkipBFGSUpdateStd_Step>
00762         step = Teuchos::rcp(new CheckSkipBFGSUpdateStd_Step());
00763       if(options_.get()) {
00764         CheckSkipBFGSUpdateStd_StepSetOptions
00765           opt_setter( step.get() );
00766         opt_setter.set_options( *options_ );
00767       }
00768       check_skip_bfgs_update_step = step;
00769     }
00770 
00771     // ReducedHessian_Step
00772     algo_step_ptr_t    reduced_hessian_step = Teuchos::null;
00773     {
00774       // Get the strategy object that will perform the actual secant update.
00775       RefCountPtr<ReducedHessianSecantUpdate_Strategy>
00776         secant_update_strategy = Teuchos::null;
00777       switch( cov_.quasi_newton_ )
00778       {
00779           case QN_BFGS:
00780           case QN_PBFGS:
00781           case QN_LBFGS:
00782           case QN_LPBFGS:
00783         {
00784           // create and setup the actual BFGS strategy object
00785           typedef RefCountPtr<BFGSUpdate_Strategy> bfgs_strategy_ptr_t;
00786           bfgs_strategy_ptr_t
00787             bfgs_strategy = Teuchos::rcp(new BFGSUpdate_Strategy);
00788           if(options_.get()) { 
00789             BFGSUpdate_StrategySetOptions
00790               opt_setter( bfgs_strategy.get() );
00791             opt_setter.set_options( *options_ );
00792           }
00793           switch( cov_.quasi_newton_ ) {
00794               case QN_BFGS:
00795               case QN_LBFGS:
00796             {
00797               secant_update_strategy = Teuchos::rcp(new ReducedHessianSecantUpdateBFGSFull_Strategy(bfgs_strategy));
00798               break;
00799             }
00800               case QN_PBFGS:
00801               case QN_LPBFGS:
00802             {
00803               TEST_FOR_EXCEPTION(
00804                 true, std::logic_error
00805                 ,"NLPAlgoConfigMamaJama::config_algo_cntr(...) : Error, "
00806                 "The quansi_newton options of PBFGS and LPBFGS have not been updated yet!" );
00807               break;
00808             }
00809           }
00810           break;
00811         }
00812         default:
00813           TEST_FOR_EXCEPT(true);
00814       }
00815       
00816       // Finally build the step object
00817       reduced_hessian_step = Teuchos::rcp(
00818         new ReducedHessianSecantUpdateStd_Step( secant_update_strategy ) );
00819       // Add the QuasiNewtonStats iteration quantity
00820       algo->state().set_iter_quant(
00821         quasi_newton_stats_name
00822         ,Teuchos::rcp(new IterQuantityAccessContiguous<QuasiNewtonStats>(
00823           1
00824           ,quasi_newton_stats_name
00825 #ifdef _MIPS_CXX
00826           ,Teuchos::RefCountPtr<Teuchos::AbstractFactoryStd<QuasiNewtonStats,QuasiNewtonStats> >(
00827             new Teuchos::AbstractFactoryStd<QuasiNewtonStats,QuasiNewtonStats>())
00828 #endif
00829           )
00830         ));
00831     }
00832 
00833     // Initialization of the Reduced Hessian
00834     algo_step_ptr_t  init_red_hess_step = Teuchos::null;
00835     if(
00836       cov_.hessian_initialization_ == INIT_HESS_FIN_DIFF_SCALE_IDENTITY
00837       || cov_.hessian_initialization_ == INIT_HESS_FIN_DIFF_SCALE_DIAGONAL
00838       || cov_.hessian_initialization_ == INIT_HESS_FIN_DIFF_SCALE_DIAGONAL_ABS
00839       )
00840     {
00841       // InitFinDiffReducedHessian_Step
00842       Algorithm::poss_type poss;
00843       assert(poss = algo->get_step_poss( ReducedHessian_name ) );
00844       InitFinDiffReducedHessian_Step::EInitializationMethod
00845         init_hess;
00846       switch( cov_.hessian_initialization_ ) {
00847         case INIT_HESS_FIN_DIFF_SCALE_IDENTITY:
00848           init_hess = InitFinDiffReducedHessian_Step::SCALE_IDENTITY;
00849           break;
00850         case INIT_HESS_FIN_DIFF_SCALE_DIAGONAL:
00851           init_hess = InitFinDiffReducedHessian_Step::SCALE_DIAGONAL;
00852           break;
00853         case INIT_HESS_FIN_DIFF_SCALE_DIAGONAL_ABS:
00854           init_hess = InitFinDiffReducedHessian_Step::SCALE_DIAGONAL_ABS;
00855           break;
00856         default:
00857           TEST_FOR_EXCEPT(true);  // only local programming error?
00858       }
00859       Teuchos::RefCountPtr<InitFinDiffReducedHessian_Step>
00860         _init_red_hess_step = Teuchos::rcp(new InitFinDiffReducedHessian_Step(init_hess));
00861       if(options_.get()) {
00862         InitFinDiffReducedHessian_StepSetOptions
00863           opt_setter( _init_red_hess_step.get() );
00864         opt_setter.set_options( *options_ );
00865       }
00866       init_red_hess_step = _init_red_hess_step;
00867     }
00868     else if(cov_.hessian_initialization_==INIT_HESS_SERIALIZE ) {
00869       Teuchos::RefCountPtr<ReducedHessianSerialization_Step>
00870         _init_red_hess_step = Teuchos::rcp(new ReducedHessianSerialization_Step());
00871       if(options_.get()) {
00872         ReducedHessianSerialization_StepSetOptions
00873           opt_setter( _init_red_hess_step.get() );
00874         opt_setter.set_options( *options_ );
00875       }
00876       init_red_hess_step = _init_red_hess_step;
00877     }
00878     
00879     // NullSpace_Step
00880     algo_step_ptr_t    set_d_bounds_step    = Teuchos::null;
00881     algo_step_ptr_t    tangential_step_step = Teuchos::null;
00882     if( nb == 0 ) {
00883       tangential_step_step = Teuchos::rcp(new TangentialStepWithoutBounds_Step());
00884     }
00885     else {
00886       // Step object that sets bounds for QP subproblem
00887       set_d_bounds_step = Teuchos::rcp(new SetDBoundsStd_AddedStep());
00888       // QP Solver object
00889       Teuchos::RefCountPtr<QPSolverRelaxed>  qp_solver = Teuchos::null;
00890       switch( cov_.qp_solver_type_ ) {
00891         case QP_QPSCHUR: {
00892           using ConstrainedOptPack::QPSolverRelaxedQPSchur;
00893           using ConstrainedOptPack::QPSolverRelaxedQPSchurSetOptions;
00894           using ConstrainedOptPack::QPSchurInitKKTSystemHessianFull;
00895 //          using ConstrainedOptPack::QPSchurInitKKTSystemHessianSuperBasic;
00896           Teuchos::RefCountPtr<ConstrainedOptPack::QPSolverRelaxedQPSchur::InitKKTSystem>
00897             init_kkt_sys = Teuchos::null;
00898           switch( cov_.quasi_newton_ ) {
00899             case QN_BFGS:
00900             case QN_LBFGS:
00901               init_kkt_sys = Teuchos::rcp(new QPSchurInitKKTSystemHessianFull());
00902               break;
00903             case QN_PBFGS:
00904             case QN_LPBFGS:
00905               TEST_FOR_EXCEPTION(true,std::logic_error,"Error! PBFGS and LPBFGS are not updated yet!");
00906 /*
00907               init_kkt_sys = new QPSchurInitKKTSystemHessianSuperBasic();
00908 */
00909               break;
00910             default:
00911               TEST_FOR_EXCEPT(true);
00912           }
00913           Teuchos::RefCountPtr<QPSolverRelaxedQPSchur>
00914             _qp_solver = Teuchos::rcp(new QPSolverRelaxedQPSchur(init_kkt_sys));
00915           if(options_.get()) {
00916             QPSolverRelaxedQPSchurSetOptions
00917               qp_options_setter(_qp_solver.get());
00918             qp_options_setter.set_options( *options_ );
00919           }
00920           qp_solver = _qp_solver; // give ownership to delete!
00921           break;
00922         }
00923         case QP_QPKWIK: {
00924 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK
00925           using ConstrainedOptPack::QPSolverRelaxedQPKWIK;
00926           Teuchos::RefCountPtr<QPSolverRelaxedQPKWIK>
00927             _qp_solver = Teuchos::rcp(new QPSolverRelaxedQPKWIK());
00928           qp_solver = _qp_solver; // give ownership to delete!
00929 #else
00930           TEST_FOR_EXCEPTION(
00931             true,std::logic_error,"Error! QPKWIK interface is not supported since "
00932             "CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK is not defined!");
00933 #endif
00934           break;
00935         }
00936         case QP_QPOPT: {
00937           TEST_FOR_EXCEPTION(true,std::logic_error,"Error! QPOPT interface is not updated yet!");
00938 /*
00939 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPOPT
00940           using ConstrainedOptPack::QPSolverRelaxedQPOPT;
00941           QPSolverRelaxedQPOPT
00942             *_qp_solver = new QPSolverRelaxedQPOPT();
00943           qp_solver = _qp_solver; // give ownership to delete!
00944 #else
00945           throw std::logic_error(
00946             "NLPAlgoConfigMamaJama::config_algo_cntr(...) : QPOPT is not supported,"
00947             " must define CONSTRAINED_OPTIMIZATION_PACK_USE_QPOPT!" );
00948 #endif
00949 */
00950           break;
00951         }
00952         default:
00953           TEST_FOR_EXCEPT(true);
00954       }
00955       // QP solver tester
00956       Teuchos::RefCountPtr<QPSolverRelaxedTester> 
00957         qp_solver_tester = Teuchos::rcp(new QPSolverRelaxedTester());
00958       if(options_.get()) {
00959         QPSolverRelaxedTesterSetOptions
00960           opt_setter( qp_solver_tester.get() );
00961         opt_setter.set_options( *options_ );
00962       }
00963       // The null-space step
00964       Teuchos::RefCountPtr<TangentialStepWithInequStd_Step>
00965         tangential_step_with_inequ_step = Teuchos::rcp(
00966           new TangentialStepWithInequStd_Step(
00967             qp_solver, qp_solver_tester ) );
00968       if(options_.get()) {
00969         TangentialStepWithInequStd_StepSetOptions
00970           opt_setter( tangential_step_with_inequ_step.get() );
00971         opt_setter.set_options( *options_ );
00972       }
00973       tangential_step_step = tangential_step_with_inequ_step;
00974       // Step for reinitialization reduced Hessian on QP failure
00975       tangential_step_step = Teuchos::rcp(
00976         new QPFailureReinitReducedHessian_Step(tangential_step_step)
00977         );
00978     }
00979 
00980     // CalcDFromYPYZPZ_Step
00981     algo_step_ptr_t    calc_d_from_Ypy_Zpy_step = Teuchos::null;
00982     {
00983       calc_d_from_Ypy_Zpy_step = Teuchos::rcp(new CalcDFromYPYZPZ_Step());
00984     }
00985 
00986     // CalcReducedGradLagrangianStd_AddedStep
00987     algo_step_ptr_t    calc_reduced_grad_lagr_step = Teuchos::null;
00988     {
00989       calc_reduced_grad_lagr_step = Teuchos::rcp(
00990         new CalcReducedGradLagrangianStd_AddedStep() );
00991     }
00992 
00993     // CheckConvergence_Step
00994     algo_step_ptr_t    check_convergence_step = Teuchos::null;
00995       {
00996       // Create the strategy object
00997       RefCountPtr<CheckConvergenceStd_Strategy>
00998         check_convergence_strategy = Teuchos::rcp(new CheckConvergenceStd_Strategy());
00999 
01000       if(options_.get()) 
01001         {
01002         CheckConvergence_StrategySetOptions
01003           opt_setter( check_convergence_strategy.get() );
01004         opt_setter.set_options( *options_ );
01005         }
01006       
01007       RefCountPtr<CheckConvergenceStd_AddedStep>
01008         _check_convergence_step = Teuchos::rcp(new CheckConvergenceStd_AddedStep(check_convergence_strategy));
01009       
01010       check_convergence_step = _check_convergence_step;
01011       }
01012 
01013     // MeritFuncPenaltyParamUpdate_Step
01014     algo_step_ptr_t    merit_func_penalty_param_update_step = Teuchos::null;
01015     if( cov_.line_search_method_ == LINE_SEARCH_FILTER ) {
01016       // We don't need to update a penalty parameter for the filter method :-)
01017     }
01018     else if( cov_.line_search_method_ != LINE_SEARCH_NONE ) {
01019       RefCountPtr<MeritFunc_PenaltyParamUpdate_AddedStep>
01020         param_update_step = Teuchos::null;
01021       switch( cov_.merit_function_type_ ) {
01022         case MERIT_FUNC_L1: {
01023           switch(cov_.l1_penalty_param_update_) {
01024             case L1_PENALTY_PARAM_WITH_MULT:
01025 //              param_update_step
01026 //                = Teuchos::rcp(new  MeritFunc_PenaltyParamUpdateWithMult_AddedStep());
01027               TEST_FOR_EXCEPTION(
01028                 true, std::logic_error
01029                 ,"NLPAlgoConfigMamaJama::config_algo_cntr(...) : Error, "
01030                 "The l1_penalty_parameter_update option of MULT_FREE has not been updated yet!" );
01031               break;
01032             case L1_PENALTY_PARAM_MULT_FREE:
01033               param_update_step
01034                 = Teuchos::rcp(new  MeritFunc_PenaltyParamUpdateMultFree_AddedStep());
01035               break;
01036             default:
01037               TEST_FOR_EXCEPT(true);
01038           }
01039           break;
01040         }
01041         case MERIT_FUNC_MOD_L1:
01042         case MERIT_FUNC_MOD_L1_INCR:
01043 //          param_update_step = new  MeritFunc_PenaltyParamsUpdateWithMult_AddedStep(
01044 //                      Teuchos::rcp_implicit_cast<MeritFuncNLP>(merit_func) );
01045           TEST_FOR_EXCEPTION(
01046             true, std::logic_error
01047             ,"NLPAlgoConfigMamaJama::config_algo_cntr(...) : Error, "
01048             "The merit_function_type options of MODIFIED_L1 and MODIFIED_L1_INCR have not been updated yet!" );
01049           break;
01050         default:
01051           TEST_FOR_EXCEPT(true);  // local programming error
01052       }
01053       if(options_.get()) {
01054         MeritFunc_PenaltyParamUpdate_AddedStepSetOptions
01055           ppu_options_setter( param_update_step.get() );
01056         ppu_options_setter.set_options( *options_ );
01057       }
01058       merit_func_penalty_param_update_step = param_update_step;
01059     }
01060 
01061     // LineSearchFull_Step
01062     algo_step_ptr_t    line_search_full_step_step = Teuchos::null;
01063     {
01064       line_search_full_step_step = Teuchos::rcp(new LineSearchFullStep_Step(bounds_tester));
01065     }
01066 
01067     // LineSearch_Step
01068     algo_step_ptr_t    line_search_step = Teuchos::null;
01069     if( cov_.line_search_method_ != LINE_SEARCH_NONE ) {
01070       RefCountPtr<DirectLineSearchArmQuad_Strategy>
01071         direct_line_search = Teuchos::rcp(new  DirectLineSearchArmQuad_Strategy());
01072       if(options_.get()) {
01073         ConstrainedOptPack::DirectLineSearchArmQuad_StrategySetOptions
01074           ls_options_setter( direct_line_search.get(), "DirectLineSearchArmQuadSQPStep" );
01075         ls_options_setter.set_options( *options_ );
01076       }
01077       if( n > m ) {
01078         switch( cov_.line_search_method_ ) {
01079           case LINE_SEARCH_DIRECT: {
01080             line_search_step = Teuchos::rcp(new LineSearchDirect_Step(direct_line_search));
01081             break;
01082           }
01083           case LINE_SEARCH_2ND_ORDER_CORRECT: {
01084             TEST_FOR_EXCEPTION(
01085               true, std::logic_error
01086               ,"NLPAlgoConfigMamaJama::config_algo_cntr(...) : Error, "
01087               "The line_search_method option of 2ND_ORDER_CORRECT has not been updated yet!" );
01088             break;
01089           }
01090           case LINE_SEARCH_WATCHDOG: {
01091             TEST_FOR_EXCEPTION(
01092               true, std::logic_error
01093               ,"NLPAlgoConfigMamaJama::config_algo_cntr(...) : Error, "
01094               "The line_search_method option of WATCHDOG has not been updated yet!" );
01095             break;
01096           }
01097           case LINE_SEARCH_FILTER: {
01098             Teuchos::RefCountPtr<LineSearchFilter_Step> 
01099               line_search_filter_step = Teuchos::rcp(
01100                 new LineSearchFilter_Step(algo_cntr->get_nlp())
01101                 );
01102             
01103             if(options_.get()) 
01104             {
01105               LineSearchFilter_StepSetOptions options_setter(line_search_filter_step.get());
01106               options_setter.set_options(*options_);
01107             }
01108 
01109             line_search_step = line_search_filter_step;         
01110             break;
01111           }
01112           case LINE_SEARCH_AUTO:
01113           case LINE_SEARCH_NONE:
01114           default:
01115             TEST_FOR_EXCEPT(true); // Should not get here!
01116         }
01117       }
01118       else {
01119         line_search_step = Teuchos::rcp( new LineSearchNLE_Step(direct_line_search) );
01120       }
01121     }
01122     
01123     // LineSearchFailure
01124     if( new_decomp_selection_strategy.get() ) {
01125       line_search_step = Teuchos::rcp(
01126         new LineSearchFailureNewDecompositionSelection_Step(
01127           line_search_step
01128           ,new_decomp_selection_strategy
01129           )
01130         );
01131     }
01132   
01133     //
01134     // Create the algorithm depending on the type of NLP we are trying to solve.
01135     //
01136 
01137     if( m == 0 ) {
01138 
01139       if( nb == 0 ) {
01140         //
01141         // Unconstrained NLP (m == 0, num_bounded_x == 0)
01142         //
01143         if(trase_out)
01144           *trase_out 
01145             << "\nConfiguring an algorithm for an unconstrained "
01146             << "NLP (m == 0, num_bounded_x == 0) ...\n";
01147       }
01148       else {
01149         //
01150         // Simple bound constrained NLP (m == 0, num_bounded_x > 0)
01151         //
01152         if(trase_out)
01153           *trase_out 
01154             << "\nConfiguring an algorithm for a simple bound constrained "
01155             << "NLP (m == 0, num_bounded_x > 0) ...\n";
01156       }
01157 
01158       int step_num       = 0;
01159   
01160       // EvalNewPoint
01161       algo->insert_step( ++step_num, EvalNewPoint_name, eval_new_point_step );
01162       
01163       // ReducedGradient
01164       algo->insert_step( ++step_num, ReducedGradient_name, reduced_gradient_step );
01165 
01166       if( nb == 0 ) {
01167         
01168         // CalcReducedGradLagrangian
01169         algo->insert_step( ++step_num, CalcReducedGradLagrangian_name, calc_reduced_grad_lagr_step );
01170 
01171         // CheckConvergence
01172         algo->insert_step( ++step_num, CheckConvergence_name, check_convergence_step );
01173 
01174       }
01175 
01176       // ReducedHessian
01177       algo->insert_step( ++step_num, ReducedHessian_name, reduced_hessian_step );
01178 
01179       // (.-1) Initialize reduced Hessian
01180       if(init_red_hess_step.get()) {
01181         algo->insert_assoc_step(
01182           step_num, IterationPack::PRE_STEP, 1
01183           ,"ReducedHessianInitialization"
01184           ,init_red_hess_step
01185           );
01186       }
01187       
01188       // TangentialStep
01189       algo->insert_step( ++step_num, TangentialStep_name, tangential_step_step );
01190       if( nb > 0 ) {
01191         // SetDBoundsStd
01192         algo->insert_assoc_step(
01193           step_num
01194           ,IterationPack::PRE_STEP
01195           ,1
01196           ,"SetDBoundsStd"
01197           ,set_d_bounds_step
01198           );
01199       }
01200 
01201       // CalcDFromZPZ
01202       algo->insert_step( ++step_num, "CalcDFromZpz", Teuchos::rcp(new CalcDFromZPZ_Step()) );
01203 
01204       if( nb > 0 ) {
01205         
01206         // CalcReducedGradLagrangian
01207         algo->insert_step( ++step_num, CalcReducedGradLagrangian_name, calc_reduced_grad_lagr_step );
01208 
01209         // CheckConvergence
01210         algo->insert_step( ++step_num, CheckConvergence_name, check_convergence_step );
01211 
01212       }
01213 
01214       // LineSearch
01215       if( cov_.line_search_method_ == LINE_SEARCH_NONE ) {
01216         algo->insert_step( ++step_num, LineSearch_name, line_search_full_step_step );
01217       }
01218       else {
01219         // Main line search step
01220         algo->insert_step( ++step_num, LineSearch_name, line_search_step );
01221         // Insert presteps
01222         Algorithm::poss_type
01223           pre_step_i = 0;
01224         // (.-?) LineSearchFullStep
01225         algo->insert_assoc_step(
01226           step_num
01227           ,IterationPack::PRE_STEP
01228           ,++pre_step_i
01229           ,"LineSearchFullStep"
01230           ,line_search_full_step_step
01231           );
01232         // (.-?) MeritFunc_DummyUpdate
01233         algo->insert_assoc_step(
01234           step_num
01235           ,IterationPack::PRE_STEP
01236           ,++pre_step_i
01237           ,"MeritFunc_DummyUpdate"
01238           ,Teuchos::rcp(new MeritFunc_DummyUpdate_Step())
01239           );
01240       }
01241       
01242     }
01243     else if( n == m ) {
01244       //
01245       // System of Nonlinear equations (n == m)
01246       //
01247       if(trase_out)
01248         *trase_out 
01249           << "\nConfiguring an algorithm for a system of nonlinear equations "
01250           << "NLP (n == m) ...\n";
01251       
01252       if(algo->state().get_iter_quant_id(merit_func_nlp_name)!=IterationPack::AlgorithmState::DOES_NOT_EXIST)
01253         algo->state().erase_iter_quant(merit_func_nlp_name);
01254 
01255       int step_num       = 0;
01256       int assoc_step_num = 0;
01257   
01258       // EvalNewPoint
01259       algo->insert_step( ++step_num, EvalNewPoint_name, eval_new_point_step );
01260       if( check_descent_quansi_normal_step_step.get() && tailored_approach && algo->algo_cntr().check_results() )
01261       {
01262         algo->insert_assoc_step(
01263           step_num
01264           ,IterationPack::POST_STEP
01265           ,1
01266           ,"CheckDescentQuasiNormalStep"
01267           ,check_descent_quansi_normal_step_step
01268           );
01269       }
01270       
01271       // QuasiNormalStep
01272       if( !tailored_approach ) {
01273         algo->insert_step( ++step_num, QuasiNormalStep_name, quansi_normal_step_step );
01274         assoc_step_num = 0;
01275         if( check_decomp_from_py_step.get() )
01276           algo->insert_assoc_step(
01277             step_num
01278             ,IterationPack::POST_STEP
01279             ,++assoc_step_num
01280             ,"CheckDecompositionFromPy"
01281             ,check_decomp_from_py_step
01282             );
01283         if( check_decomp_from_Rpy_step.get() )
01284           algo->insert_assoc_step(
01285             step_num
01286             ,IterationPack::POST_STEP
01287             ,++assoc_step_num
01288             ,"CheckDecompositionFromRPy"
01289             ,check_decomp_from_Rpy_step
01290             );
01291         if( check_descent_quansi_normal_step_step.get() )
01292           algo->insert_assoc_step(
01293             step_num
01294             ,IterationPack::POST_STEP
01295             ,++assoc_step_num
01296             ,"CheckDescentQuasiNormalStep"
01297             ,check_descent_quansi_normal_step_step
01298             );
01299       }
01300 
01301       // CheckConvergence
01302       algo->insert_step( ++step_num, CheckConvergence_name, check_convergence_step );
01303 
01304       // CalcDFromYPY
01305       algo->insert_step( ++step_num, "CalcDFromYpy", Teuchos::rcp(new CalcDFromYPY_Step()) );
01306       
01307       // LineSearch
01308       if( cov_.line_search_method_ == LINE_SEARCH_NONE ) {
01309         algo->insert_step( ++step_num, LineSearch_name, line_search_full_step_step );
01310       }
01311       else {
01312         // Main line search step
01313         algo->insert_step( ++step_num, LineSearch_name, line_search_step );
01314         // Insert presteps
01315         Algorithm::poss_type
01316           pre_step_i = 0;
01317         // (.-?) LineSearchFullStep
01318         algo->insert_assoc_step(
01319           step_num
01320           ,IterationPack::PRE_STEP
01321           ,++pre_step_i
01322           ,"LineSearchFullStep"
01323           ,line_search_full_step_step
01324           );
01325       }
01326 
01327     }
01328     else if ( m > 0 || nb > 0 ) {
01329       //
01330       // General nonlinear NLP ( m > 0 )
01331       //
01332       if( nb == 0 ) {
01333         //
01334         // Nonlinear equality constrained NLP ( m > 0 && num_bounded_x == 0 )
01335         //
01336         if(trase_out)
01337           *trase_out 
01338             << "\nConfiguring an algorithm for a nonlinear equality constrained "
01339             << "NLP ( m > 0 && num_bounded_x == 0) ...\n";
01340       }
01341       else {
01342         //
01343         // Nonlinear inequality constrained NLP ( num_bounded_x > 0 )
01344         //
01345         if(trase_out)
01346           *trase_out 
01347             << "\nConfiguring an algorithm for a nonlinear generally constrained "
01348             << "NLP ( num_bounded_x > 0 ) ...\n";
01349       }
01350 
01351       int step_num       = 0;
01352       int assoc_step_num = 0;
01353   
01354       // EvalNewPoint
01355       algo->insert_step( ++step_num, EvalNewPoint_name, eval_new_point_step );
01356       if( check_descent_quansi_normal_step_step.get() && tailored_approach && algo->algo_cntr().check_results() )
01357       {
01358         algo->insert_assoc_step(
01359           step_num
01360           ,IterationPack::POST_STEP
01361           ,1
01362           ,"CheckDescentQuasiNormalStep"
01363           ,check_descent_quansi_normal_step_step
01364           );
01365       }
01366       
01367       // QuasiNormalStep
01368       if( !tailored_approach ) {
01369         algo->insert_step( ++step_num, QuasiNormalStep_name, quansi_normal_step_step );
01370         assoc_step_num = 0;
01371         if( check_decomp_from_py_step.get() )
01372           algo->insert_assoc_step(
01373             step_num
01374             ,IterationPack::POST_STEP
01375             ,++assoc_step_num
01376             ,"CheckDecompositionFromPy"
01377             ,check_decomp_from_py_step
01378             );
01379         if( check_decomp_from_Rpy_step.get() )
01380           algo->insert_assoc_step(
01381             step_num
01382             ,IterationPack::POST_STEP
01383             ,++assoc_step_num
01384             ,"CheckDecompositionFromRPy"
01385             ,check_decomp_from_Rpy_step
01386             );
01387         if( check_descent_quansi_normal_step_step.get() )
01388           algo->insert_assoc_step(
01389             step_num
01390             ,IterationPack::POST_STEP
01391             ,++assoc_step_num
01392             ,"CheckDescentQuasiNormalStep"
01393             ,check_descent_quansi_normal_step_step
01394             );
01395       }
01396 
01397       // ReducedGradient
01398       if( !tailored_approach ) {
01399         algo->insert_step( ++step_num, ReducedGradient_name, reduced_gradient_step );
01400       }
01401 
01402       if( nb == 0 ) {
01403 
01404         // CalcReducedGradLagrangian
01405         algo->insert_step( ++step_num, CalcReducedGradLagrangian_name, calc_reduced_grad_lagr_step );
01406 
01407         // CalcLagrangeMultDecomposed
01408         // Compute these here so that in case we converge we can report them
01409         if( !tailored_approach ) {
01410           // ToDo: Insert this step
01411         }
01412 
01413         // CheckConvergence
01414         algo->insert_step( ++step_num, CheckConvergence_name, check_convergence_step );
01415       }
01416 
01417       // ReducedHessian
01418       algo->insert_step( ++step_num, ReducedHessian_name, reduced_hessian_step );
01419 
01420       // (.-1) Initialize reduced Hessian
01421       if(init_red_hess_step.get()) {
01422         algo->insert_assoc_step(
01423           step_num, IterationPack::PRE_STEP, 1
01424           ,"ReducedHessianInitialization"
01425           ,init_red_hess_step
01426           );
01427       }
01428 
01429       // (.-1) CheckSkipBFGSUpdate
01430       algo->insert_assoc_step(
01431         step_num
01432         ,IterationPack::PRE_STEP
01433         ,1
01434         ,CheckSkipBFGSUpdate_name
01435         ,check_skip_bfgs_update_step
01436         );
01437 
01438       // TangentialStep
01439       algo->insert_step( ++step_num, TangentialStep_name, tangential_step_step );
01440       if( nb > 0 ) {
01441         // SetDBoundsStd
01442         algo->insert_assoc_step(
01443           step_num
01444           ,IterationPack::PRE_STEP
01445           ,1
01446           ,"SetDBoundsStd"
01447           ,set_d_bounds_step
01448           );
01449       }
01450 
01451       // CalcDFromYPYZPZ
01452       algo->insert_step( ++step_num, CalcDFromYPYZPZ_name, calc_d_from_Ypy_Zpy_step );
01453       
01454       if( nb > 0 ) {
01455 
01456         // CalcReducedGradLagrangian
01457         algo->insert_step( ++step_num, CalcReducedGradLagrangian_name, calc_reduced_grad_lagr_step );
01458 
01459         // CalcLagrangeMultDecomposed
01460         // Compute these here so that in case we converge we can report them
01461         if( !tailored_approach ) {
01462           // ToDo: Insert this step
01463         }
01464 
01465         // CheckConvergence
01466         algo->insert_step( ++step_num, CheckConvergence_name, check_convergence_step );
01467       }
01468 
01469       // LineSearch
01470       if( cov_.line_search_method_ == LINE_SEARCH_NONE ) {
01471         algo->insert_step( ++step_num, LineSearch_name, line_search_full_step_step );
01472       }
01473       else {
01474         // Main line search step
01475         algo->insert_step( ++step_num, LineSearch_name, line_search_step );
01476         // Insert presteps
01477         Algorithm::poss_type
01478           pre_step_i = 0;
01479         // (.-?) LineSearchFullStep
01480         algo->insert_assoc_step(
01481           step_num
01482           ,IterationPack::PRE_STEP
01483           ,++pre_step_i
01484           ,"LineSearchFullStep"
01485           ,line_search_full_step_step
01486           );
01487         // (.-?) MeritFunc_PenaltyPramUpdate
01488         if(merit_func_penalty_param_update_step.get()) {
01489           algo->insert_assoc_step(
01490             step_num
01491             ,IterationPack::PRE_STEP
01492             ,++pre_step_i
01493             ,"MeritFunc_PenaltyParamUpdate"
01494             ,merit_func_penalty_param_update_step
01495             );
01496         }
01497       }
01498 
01499     }
01500     else {
01501       TEST_FOR_EXCEPT(true); // Error, this should not ever be called!
01502     }
01503   }
01504   
01505 }
01506 
01507 void NLPAlgoConfigMamaJama::init_algo(NLPAlgoInterface* _algo)
01508 {
01509   using Teuchos::dyn_cast;
01510 
01511   TEST_FOR_EXCEPTION(
01512     _algo == NULL, std::invalid_argument
01513     ,"NLPAlgoConfigMamaJama::init_algo(_algo) : Error, "
01514     "_algo can not be NULL" );
01515 
01516   NLPAlgo              &algo    = dyn_cast<NLPAlgo>(*_algo);
01517   NLPAlgoState         &state   = algo.rsqp_state();
01518   NLP                  &nlp     = algo.nlp();
01519 
01520   algo.max_iter( algo.algo_cntr().max_iter() );
01521   algo.max_run_time( algo.algo_cntr().max_run_time() );
01522 
01523   // Reset the iteration count to zero
01524   state.k(0);
01525 
01526   // Get organized output of vectors and matrices even if setw is not used by Step objects.
01527   algo.track().journal_out()
01528     << std::setprecision(algo.algo_cntr().journal_print_digits())
01529     << std::scientific;
01530 
01531   // set the first step
01532   algo.do_step_first(1);
01533 
01534   // The rest of the algorithm should initialize itself
01535 }
01536 
01537 // private
01538 
01539 void NLPAlgoConfigMamaJama::readin_options(
01540     const OptionsFromStreamPack::OptionsFromStream     &options
01541   , SOptionValues                                      *ov
01542   , std::ostream                                       *trase_out
01543   )
01544 {
01545   namespace ofsp = OptionsFromStreamPack;
01546   using   ofsp::OptionsFromStream;
01547   typedef   OptionsFromStream::options_group_t    options_group_t;
01548   using   ofsp::StringToIntMap;
01549   using   ofsp::StringToBool;
01550 
01551   assert(ov); // only a local class error
01552 
01553   // Get the options group for "NLPAlgoConfigMamaJama"
01554   const std::string opt_grp_name = "NLPAlgoConfigMamaJama";
01555   const OptionsFromStream::options_group_t optgrp = options.options_group( opt_grp_name );
01556   if( OptionsFromStream::options_group_exists( optgrp ) ) {
01557 
01558     // Define map for options group "MamaJama".
01559     const int num_opts = 11;
01560     enum EMamaJama {
01561       MAX_BASIS_COND_CHANGE_FRAC
01562       ,EXACT_REDUCED_HESSIAN
01563       ,QUASI_NEWTON
01564       ,NUM_LBFGS_UPDATES_STORED
01565       ,LBFGS_AUTO_SCALING
01566       ,HESSIAN_INITIALIZATION
01567       ,QP_SOLVER
01568       ,REINIT_HESSIAN_ON_QP_FAIL
01569       ,LINE_SEARCH_METHOD
01570       ,MERIT_FUNCTION_TYPE
01571       ,L1_PENALTY_PARAM_UPDATE
01572     };
01573     const char* SMamaJama[num_opts] = {
01574       "max_basis_cond_change_frac"
01575       ,"exact_reduced_hessian"
01576       ,"quasi_newton"
01577       ,"num_lbfgs_updates_stored"
01578       ,"lbfgs_auto_scaling"
01579       ,"hessian_initialization"
01580       ,"qp_solver"
01581       ,"reinit_hessian_on_qp_fail"
01582       ,"line_search_method"
01583       ,"merit_function_type"
01584       ,"l1_penalty_parameter_update"
01585     };
01586     StringToIntMap  mama_jama_map(  opt_grp_name, num_opts, SMamaJama );
01587 
01588     options_group_t::const_iterator itr = optgrp.begin();
01589     for( ; itr != optgrp.end(); ++itr ) {
01590       switch( (EMamaJama)mama_jama_map( ofsp::option_name(itr) ) ) {
01591         case MAX_BASIS_COND_CHANGE_FRAC:
01592           ov->max_basis_cond_change_frac_ = ::atof( ofsp::option_value(itr).c_str() );
01593           break;
01594         case EXACT_REDUCED_HESSIAN:
01595           ov->exact_reduced_hessian_ = StringToBool( "exact_reduced_hessian", ofsp::option_value(itr).c_str() );
01596           break;
01597         case QUASI_NEWTON:
01598         {
01599           const std::string &opt_val = ofsp::option_value(itr);
01600           if( opt_val == "AUTO" )
01601             ov->quasi_newton_ = QN_AUTO;
01602           else if( opt_val == "BFGS" )
01603             ov->quasi_newton_ = QN_BFGS;
01604           else if( opt_val == "PBFGS" )
01605             ov->quasi_newton_ = QN_PBFGS;
01606           else if( opt_val == "LBFGS" )
01607             ov->quasi_newton_ = QN_LBFGS;
01608           else if( opt_val == "LPBFGS" )
01609             ov->quasi_newton_ = QN_LPBFGS;
01610           else
01611             TEST_FOR_EXCEPTION(
01612               true, std::invalid_argument
01613               ,"NLPAlgoConfigMamaJama::readin_options(...) : "
01614               "Error, incorrect value for \"quasi_newton\" "
01615               ", Only options of BFGS, PBFGS"
01616               ", LBFGS, LPBFGS and AUTO are avalible."
01617               );
01618           break;
01619         }
01620         case NUM_LBFGS_UPDATES_STORED:
01621           ov->num_lbfgs_updates_stored_ = ::atoi( ofsp::option_value(itr).c_str() );
01622           break;
01623         case LBFGS_AUTO_SCALING:
01624           ov->lbfgs_auto_scaling_
01625             = StringToBool( "lbfgs_auto_scaling", ofsp::option_value(itr).c_str() );
01626           break;
01627         case HESSIAN_INITIALIZATION:
01628         {
01629           const std::string &opt_val = ofsp::option_value(itr);
01630           if( opt_val == "IDENTITY" )
01631             ov->hessian_initialization_ = INIT_HESS_IDENTITY;
01632           else if( opt_val == "FINITE_DIFF_SCALE_IDENTITY" )
01633             ov->hessian_initialization_ = INIT_HESS_FIN_DIFF_SCALE_IDENTITY;
01634           else if( opt_val == "FINITE_DIFF_DIAGONAL" )
01635             ov->hessian_initialization_ = INIT_HESS_FIN_DIFF_SCALE_DIAGONAL;
01636           else if( opt_val == "FINITE_DIFF_DIAGONAL_ABS" )
01637             ov->hessian_initialization_ = INIT_HESS_FIN_DIFF_SCALE_DIAGONAL_ABS;
01638           else if( opt_val == "AUTO" )
01639             ov->hessian_initialization_ = INIT_HESS_AUTO;
01640           else if( opt_val == "SERIALIZE" )
01641             ov->hessian_initialization_ = INIT_HESS_SERIALIZE;
01642           else
01643             TEST_FOR_EXCEPTION(
01644               true, std::invalid_argument
01645               ,"NLPAlgoConfigMamaJama::readin_options(...) : "
01646               "Error, incorrect value for \"hessian_initialization\" "
01647               ", Only options of IDENTITY, SERIALIZE, FINITE_DIFF_SCALE_IDENTITY,"
01648               " FINITE_DIFF_DIAGONAL, FINITE_DIFF_DIAGONAL_ABS and AUTO"
01649               " are available"  );
01650           break;
01651         }
01652         case QP_SOLVER:
01653         {
01654           const std::string &qp_solver = ofsp::option_value(itr);
01655           if( qp_solver == "AUTO" ) {
01656             ov->qp_solver_type_ = QP_AUTO;
01657           } else if( qp_solver == "QPSOL" ) {
01658             ov->qp_solver_type_ = QP_QPSOL;
01659           } else if( qp_solver == "QPOPT" ) {
01660 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPOPT
01661             ov->qp_solver_type_ = QP_QPOPT;
01662 #else
01663             TEST_FOR_EXCEPTION(
01664               true, std::invalid_argument
01665               ,"NLPAlgoConfigMamaJama::readin_options(...) : QPOPT is not supported,"
01666               " must define CONSTRAINED_OPTIMIZATION_PACK_USE_QPOPT!" );
01667 #endif
01668           } else if( qp_solver == "QPKWIK" ) {
01669 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK
01670             ov->qp_solver_type_ = QP_QPKWIK;
01671 #else
01672             TEST_FOR_EXCEPTION(
01673               true, std::invalid_argument
01674               ,"NLPAlgoConfigMamaJama::readin_options(...) : QPKWIK is not supported,"
01675               " must define CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK!" );
01676 #endif
01677           } else if( qp_solver == "QPSCHUR" ) {
01678             ov->qp_solver_type_ = QP_QPSCHUR;
01679           } else {
01680             TEST_FOR_EXCEPTION(
01681               true, std::invalid_argument
01682               ,"NLPAlgoConfigMamaJama::readin_options(...) : "
01683               "Error, incorrect value for \"qp_solver\" "
01684               "Only qp solvers QPOPT, QPSOL, QPKWIK, QPSCHUR and AUTO are avalible."  );
01685           }
01686           break;
01687         }
01688         case REINIT_HESSIAN_ON_QP_FAIL:
01689           ov->reinit_hessian_on_qp_fail_ = StringToBool( "reinit_hessian_on_qp_fail", ofsp::option_value(itr).c_str() );
01690           break;
01691         case LINE_SEARCH_METHOD:
01692         {
01693           const std::string &option = ofsp::option_value(itr);
01694           if( option == "NONE" ) {
01695             ov->line_search_method_ = LINE_SEARCH_NONE;
01696           } else if( option == "DIRECT" ) {
01697             ov->line_search_method_ = LINE_SEARCH_DIRECT;
01698           } else if( option == "2ND_ORDER_CORRECT" ) {
01699             ov->line_search_method_ = LINE_SEARCH_2ND_ORDER_CORRECT;
01700           } else if( option == "WATCHDOG" ) {
01701             ov->line_search_method_ = LINE_SEARCH_WATCHDOG;
01702           } else if( option == "AUTO" ) {
01703             ov->line_search_method_ = LINE_SEARCH_AUTO;
01704           } else if( option == "FILTER" ) {
01705             ov->line_search_method_ = LINE_SEARCH_FILTER;
01706           } else {
01707             TEST_FOR_EXCEPTION(
01708               true, std::invalid_argument
01709               ,"NLPAlgoConfigMamaJama::readin_options(...) : "
01710               "Error, incorrect value for \"line_search_method\".\n"
01711               "Only the options NONE, DIRECT, 2ND_ORDER_CORRECT, FILTER, WATCHDOG "
01712               "and AUTO are avalible." );
01713           }
01714           break;
01715         }
01716         case MERIT_FUNCTION_TYPE:
01717         {
01718           const std::string &option = ofsp::option_value(itr);
01719           if( option == "L1" )
01720             ov->merit_function_type_ = MERIT_FUNC_L1;
01721           else if( option == "MODIFIED_L1" )
01722             ov->merit_function_type_ = MERIT_FUNC_MOD_L1;
01723           else if( option == "MODIFIED_L1_INCR" )
01724             ov->merit_function_type_ = MERIT_FUNC_MOD_L1_INCR;
01725           else if( option == "AUTO" )
01726             ov->merit_function_type_ = MERIT_FUNC_AUTO;
01727           else
01728             TEST_FOR_EXCEPTION(
01729               true, std::invalid_argument
01730               ,"NLPAlgoConfigMamaJama::readin_options(...) : "
01731               "Error, incorrect value for \"merit_function_type\".\n"
01732               "Only the options L1, MODIFIED_L1, MODIFIED_L1_INCR "
01733               "and AUTO are avalible." );
01734           break;
01735         }
01736         case L1_PENALTY_PARAM_UPDATE:
01737         {
01738           const std::string &option = ofsp::option_value(itr);
01739           if( option == "WITH_MULT" )
01740             ov->l1_penalty_param_update_
01741               = L1_PENALTY_PARAM_WITH_MULT;
01742           else if( option == "MULT_FREE" )
01743             ov->l1_penalty_param_update_
01744               = L1_PENALTY_PARAM_MULT_FREE;
01745           else if( option == "AUTO" )
01746             ov->l1_penalty_param_update_
01747               = L1_PENALTY_PARAM_AUTO;
01748           else
01749             TEST_FOR_EXCEPTION(
01750               true, std::invalid_argument
01751               ,"NLPAlgoConfigMamaJama::readin_options(...) : "
01752               "Error, incorrect value for \"l1_penalty_param_update\".\n"
01753               "Only the options WITH_MULT, MULT_FREE and AUTO"
01754               "are avalible."  );
01755           break;
01756         }
01757         default:
01758           TEST_FOR_EXCEPT(true);  // this would be a local programming error only.
01759       }
01760     }
01761   }
01762   else {
01763     if(trase_out)
01764       *trase_out
01765         << "\n\n*** Warning!  The options group \"NLPAlgoConfigMamaJama\" was not found.\n"
01766         << "Using a default set of options instead ... \n";
01767   }
01768 }
01769 
01770 //
01771 // This is where some of the default options are set and the user is alerted to what their
01772 // value is.
01773 //
01774 void NLPAlgoConfigMamaJama::set_default_options( 
01775   const SOptionValues     &uov
01776   ,SOptionValues          *cov
01777   ,std::ostream           *trase_out
01778   )
01779 {
01780   if(trase_out)
01781     *trase_out
01782       << "\n*** Setting option defaults for options not set by the user or determined some other way ...\n";
01783 
01784   if( cov->max_basis_cond_change_frac_ < 0.0 &&  uov.max_basis_cond_change_frac_ < 0.0 ) {
01785     if(trase_out)
01786       *trase_out
01787         << "\nmax_basis_cond_change_frac < 0 : setting max_basis_cond_change_frac = 1e+4 \n";
01788     cov->max_basis_cond_change_frac_ = 1e+4;
01789   }
01790   else {
01791     cov->max_basis_cond_change_frac_ = uov.max_basis_cond_change_frac_;
01792   }
01793   cov->exact_reduced_hessian_ = uov.exact_reduced_hessian_;
01794   if( cov->quasi_newton_ == QN_AUTO && uov.quasi_newton_ == QN_AUTO ) {
01795     if(trase_out)
01796       *trase_out
01797         << "\nquasi_newton == AUTO: setting quasi_newton = BFGS\n";
01798     cov->quasi_newton_ = QN_BFGS;
01799   }
01800   else if(cov->quasi_newton_ == QN_AUTO) {
01801     cov->quasi_newton_ = uov.quasi_newton_;
01802   }
01803   if( cov->num_lbfgs_updates_stored_ < 0 && uov.num_lbfgs_updates_stored_ < 0 ) {
01804     if(trase_out)
01805       *trase_out
01806         << "\nnum_lbfgs_updates_stored < 0 : setting num_lbfgs_updates_stored = 10\n";
01807     cov->num_lbfgs_updates_stored_ = 10;
01808   }
01809   else if(cov->num_lbfgs_updates_stored_ < 0) {
01810     cov->num_lbfgs_updates_stored_ = uov.num_lbfgs_updates_stored_;
01811   }
01812   cov->lbfgs_auto_scaling_ = uov.lbfgs_auto_scaling_;
01813   if( cov->hessian_initialization_ == INIT_HESS_AUTO && uov.hessian_initialization_ == INIT_HESS_AUTO ) {
01814     if(trase_out)
01815        *trase_out
01816         << "\nhessian_initialization == AUTO: setting hessian_initialization = IDENTITY\n";
01817     cov->hessian_initialization_ = INIT_HESS_IDENTITY;
01818 /*
01819     if(trase_out)
01820       *trase_out
01821         << "\nhessian_initialization == AUTO: setting hessian_initialization = FINITE_DIFF_DIAGONAL_ABS\n";
01822     cov->hessian_initialization_ = INIT_HESS_FIN_DIFF_SCALE_DIAGONAL_ABS;
01823 */
01824   }
01825   else if(cov->hessian_initialization_ == INIT_HESS_AUTO) {
01826     cov->hessian_initialization_ = uov.hessian_initialization_;
01827   }
01828   if( cov->qp_solver_type_ == QP_AUTO && uov.qp_solver_type_ == QP_AUTO ) {
01829 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK
01830     if(trase_out)
01831       *trase_out
01832         << "\nqp_solver_type == AUTO: setting qp_solver_type = QPKWIK\n";
01833     cov->qp_solver_type_ = QP_QPKWIK;
01834 #else
01835     if(trase_out)
01836       *trase_out
01837         << "\nqp_solver_type == AUTO: setting qp_solver_type = QPSCHUR\n";
01838     cov->qp_solver_type_ = QP_QPSCHUR;
01839 #endif
01840   }
01841   else if(cov->qp_solver_type_ == QP_AUTO) {
01842     cov->qp_solver_type_ = uov.qp_solver_type_;
01843   }
01844   cov->reinit_hessian_on_qp_fail_ = uov.reinit_hessian_on_qp_fail_;
01845   if( cov->line_search_method_ == LINE_SEARCH_AUTO && uov.line_search_method_ == LINE_SEARCH_AUTO ) {
01846     if(trase_out)
01847       *trase_out
01848         << "\nline_search_method == AUTO: setting line_search_method = FILTER\n";
01849     cov->line_search_method_ = LINE_SEARCH_FILTER;
01850   }
01851   else if(cov->line_search_method_ == LINE_SEARCH_AUTO) {
01852     cov->line_search_method_ = uov.line_search_method_;
01853   }
01854   if( cov->merit_function_type_ == MERIT_FUNC_AUTO && uov.merit_function_type_ == MERIT_FUNC_AUTO ) {
01855     if(trase_out)
01856       *trase_out
01857         << "\nmerit_function_type == AUTO: setting merit_function_type = MODIFIED_L1_INCR\n";
01858     cov->merit_function_type_ = MERIT_FUNC_MOD_L1_INCR;
01859   }
01860   else if(cov->merit_function_type_ == MERIT_FUNC_AUTO) {
01861     cov->merit_function_type_ = uov.merit_function_type_;
01862   }
01863   if( cov->l1_penalty_param_update_ == L1_PENALTY_PARAM_AUTO && uov.l1_penalty_param_update_ == L1_PENALTY_PARAM_AUTO ) {
01864     if(trase_out)
01865       *trase_out
01866         << "\nl1_penalty_param_update == AUTO: setting l1_penalty_param_update = MULT_FREE\n";
01867     cov->l1_penalty_param_update_ = L1_PENALTY_PARAM_MULT_FREE;
01868   }
01869   else if(cov->l1_penalty_param_update_ == L1_PENALTY_PARAM_AUTO) {
01870     cov->l1_penalty_param_update_ = uov.l1_penalty_param_update_;
01871   }
01872   if( cov->full_steps_after_k_ < 0 && uov.full_steps_after_k_ < 0 ) {
01873     if(trase_out)
01874       *trase_out
01875         << "\nfull_steps_after_k < 0 : the line search will never be turned off after so many iterations\n";
01876   }
01877   else {
01878     cov->full_steps_after_k_ = uov.full_steps_after_k_;
01879   }
01880   if(trase_out)
01881     *trase_out
01882       << "\n*** End setting default options\n";
01883 }
01884 
01885 } // end namespace MoochoPack

Generated on Thu Sep 18 12:34:28 2008 for MoochoPack : Framework for Large-Scale Optimization Algorithms by doxygen 1.3.9.1