MOOCHO (Single Doxygen Collection) Version of the Day
MoochoPack_NLPAlgoConfigMamaJama.cpp
Go to the documentation of this file.
00001 // @HEADER
00002 // ***********************************************************************
00003 // 
00004 // Moocho: Multi-functional Object-Oriented arCHitecture for Optimization
00005 //                  Copyright (2003) Sandia Corporation
00006 // 
00007 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
00008 // license for use of this work by or on behalf of the U.S. Government.
00009 // 
00010 // This library is free software; you can redistribute it and/or modify
00011 // it under the terms of the GNU Lesser General Public License as
00012 // published by the Free Software Foundation; either version 2.1 of the
00013 // License, or (at your option) any later version.
00014 //  
00015 // This library is distributed in the hope that it will be useful, but
00016 // WITHOUT ANY WARRANTY; without even the implied warranty of
00017 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00018 // Lesser General Public License for more details.
00019 //  
00020 // You should have received a copy of the GNU Lesser General Public
00021 // License along with this library; if not, write to the Free Software
00022 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
00023 // USA
00024 // Questions? Contact Roscoe A. Bartlett (rabartl@sandia.gov) 
00025 // 
00026 // ***********************************************************************
00027 // @HEADER
00028 
00029 #include <assert.h>
00030 
00031 #include <sstream>
00032 #include <typeinfo>
00033 #include <iostream>
00034 
00035 #include "MoochoPack_NLPAlgoConfigMamaJama.hpp"
00036 #include "MoochoPack_NLPAlgo.hpp"
00037 #include "MoochoPack_NLPAlgoContainer.hpp"
00038 #include "IterationPack_AlgorithmSetOptions.hpp"
00039 #include "AbstractLinAlgPack_MatrixSymPosDefCholFactor.hpp"                     // rHL 
00040 //#include "ConstrainedOptPack_MatrixSymPosDefInvCholFactor.hpp"    // .
00041 #include "ConstrainedOptPack_MatrixSymPosDefLBFGS.hpp"        // .
00042 //#include "ConstrainedOptPack_MatrixHessianSuperBasicInitDiagonal.hpp/ | rHL (super basics)
00043 //#include "AbstractLinAlgPack/src/AbstractLinAlgPack_MatrixSymDiagStd.hpp"                          // |
00044 
00045 #include "ConstrainedOptPack_VariableBoundsTester.hpp"
00046 
00047 #include "NLPInterfacePack_NLPDirect.hpp"
00048 
00049 #include "NLPInterfacePack_CalcFiniteDiffProd.hpp"
00050 #include "NLPInterfacePack_NLPVarReductPerm.hpp"
00051 
00052 // line search
00053 #include "ConstrainedOptPack_DirectLineSearchArmQuad_Strategy.hpp"
00054 #include "ConstrainedOptPack_DirectLineSearchArmQuad_StrategySetOptions.hpp"
00055 #include "ConstrainedOptPack_MeritFuncNLPL1.hpp"
00056 #include "ConstrainedOptPack_MeritFuncNLPModL1.hpp"
00057 
00058 // Basis permutations and direct sparse solvers
00059 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
00060 #include "ConstrainedOptPack_DecompositionSystemVarReductPerm.hpp"
00061 #endif
00062 
00063 #include "ConstrainedOptPack_QPSolverRelaxedTester.hpp"
00064 #include "ConstrainedOptPack_QPSolverRelaxedTesterSetOptions.hpp"
00065 #include "ConstrainedOptPack_QPSolverRelaxedQPSchur.hpp"
00066 #include "ConstrainedOptPack_QPSolverRelaxedQPSchurSetOptions.hpp"
00067 #include "ConstrainedOptPack_QPSchurInitKKTSystemHessianFull.hpp"
00068 //#include "ConstrainedOptPack_QPSchurInitKKTSystemHessianSuperBasic.hpp"
00069 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK
00070 #include "ConstrainedOptPack_QPSolverRelaxedQPKWIK.hpp"
00071 #endif
00072 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPOPT
00073 //#include "ConstrainedOptPack_QPSolverRelaxedQPOPT.hpp"
00074 #endif
00075 
00076 #include "MoochoPack_MoochoAlgorithmStepNames.hpp"
00077 
00078 /*
00079 #include "MoochoPack_EvalNewPointStd_StepSetOptions.hpp"
00080 #include "MoochoPack_EvalNewPointTailoredApproach_StepSetOptions.hpp"
00081 #include "MoochoPack_EvalNewPointTailoredApproachCoordinate_Step.hpp"
00082 #include "MoochoPack_EvalNewPointTailoredApproachOrthogonal_Step.hpp"
00083 */
00084 
00085 #include "MoochoPack_ReducedGradientStd_Step.hpp"
00086 #include "MoochoPack_InitFinDiffReducedHessian_Step.hpp"
00087 #include "MoochoPack_InitFinDiffReducedHessian_StepSetOptions.hpp"
00088 #include "MoochoPack_ReducedHessianSerialization_Step.hpp"
00089 #include "MoochoPack_ReducedHessianSerialization_StepSetOptions.hpp"
00090 #include "MoochoPack_ReducedHessianSecantUpdateStd_Step.hpp"
00091 #include "MoochoPack_ReducedHessianSecantUpdateBFGSFull_Strategy.hpp"
00092 
00093 
00094 //#include "MoochoPack_ReducedHessianSecantUpdateBFGSProjected_Strategy.hpp"
00095 //#include "MoochoPack_ReducedHessianSecantUpdateBFGSProjected_StrategySetOptions.hpp"
00096 //#include "MoochoPack_ReducedHessianSecantUpdateLPBFGS_Strategy.hpp"
00097 //#include "MoochoPack_ReducedHessianSecantUpdateLPBFGS_StrategySetOptions.hpp"
00098 #include "MoochoPack_BFGSUpdate_Strategy.hpp"
00099 #include "MoochoPack_BFGSUpdate_StrategySetOptions.hpp"
00100 #include "MoochoPack_QuasiNormalStepStd_Step.hpp"
00101 #include "MoochoPack_CheckDescentQuasiNormalStep_Step.hpp"
00102 #include "MoochoPack_CheckDecompositionFromPy_Step.hpp"
00103 #include "MoochoPack_CheckDecompositionFromRPy_Step.hpp"
00104 #include "MoochoPack_TangentialStepWithoutBounds_Step.hpp"
00105 #include "MoochoPack_TangentialStepWithInequStd_Step.hpp"
00106 #include "MoochoPack_TangentialStepWithInequStd_StepSetOptions.hpp"
00107 #include "MoochoPack_SetDBoundsStd_AddedStep.hpp"
00108 #include "MoochoPack_QPFailureReinitReducedHessian_Step.hpp"
00109 #include "MoochoPack_CalcDFromYPYZPZ_Step.hpp"
00110 #include "MoochoPack_CalcDFromYPY_Step.hpp"
00111 #include "MoochoPack_CalcDFromZPZ_Step.hpp"
00112 #include "MoochoPack_LineSearchFailureNewDecompositionSelection_Step.hpp"
00113 #include "MoochoPack_LineSearchFilter_Step.hpp"
00114 #include "MoochoPack_LineSearchFilter_StepSetOptions.hpp"
00115 #include "MoochoPack_LineSearchFullStep_Step.hpp"
00116 #include "MoochoPack_LineSearchDirect_Step.hpp"
00117 #include "MoochoPack_LineSearchNLE_Step.hpp"
00118 //#include "MoochoPack_LineSearch2ndOrderCorrect_Step.hpp"
00119 //#include "MoochoPack_LineSearch2ndOrderCorrect_StepSetOptions.hpp"
00120 //#include "MoochoPack_FeasibilityStepReducedStd_Strategy.hpp"
00121 //#include "MoochoPack_FeasibilityStepReducedStd_StrategySetOptions.hpp"
00122 //#include "MoochoPack_QuasiRangeSpaceStepStd_Strategy.hpp"
00123 //#include "MoochoPack_QuasiRangeSpaceStepTailoredApproach_Strategy.hpp"
00124 //#include "MoochoPack_LineSearchWatchDog_Step.hpp"
00125 //#include "MoochoPack_LineSearchWatchDog_StepSetOptions.hpp"
00126 //#include "MoochoPack_LineSearchFullStepAfterKIter_Step.hpp"
00127 //#include "MoochoPack_CalcLambdaIndepStd_AddedStep.hpp"
00128 #include "MoochoPack_CalcReducedGradLagrangianStd_AddedStep.hpp"
00129 #include "MoochoPack_CheckConvergenceStd_AddedStep.hpp"
00130 #include "MoochoPack_CheckConvergenceStd_Strategy.hpp"
00131 #include "MoochoPack_CheckSkipBFGSUpdateStd_StepSetOptions.hpp"
00132 #include "MoochoPack_MeritFunc_DummyUpdate_Step.hpp"
00133 #include "MoochoPack_MeritFunc_PenaltyParamUpdate_AddedStepSetOptions.hpp"
00134 #include "MoochoPack_MeritFunc_PenaltyParamUpdateMultFree_AddedStep.hpp"
00135 //#include "MoochoPack_MeritFunc_PenaltyParamUpdateWithMult_AddedStep.hpp"
00136 //#include "MoochoPack_MeritFunc_PenaltyParamsUpdateWithMult_AddedStep.hpp"
00137 //#include "MoochoPack_MeritFunc_ModifiedL1LargerSteps_AddedStep.hpp"
00138 //#include "MoochoPack_MeritFunc_ModifiedL1LargerSteps_AddedStepSetOptions.hpp"
00139 //#include "MoochoPack_ActSetStats_AddedStep.hpp"
00140 //#include "MoochoPack_NumFixedDepIndep_AddedStep.hpp"
00141 
00142 #include "MoochoPack_act_set_stats.hpp"
00143 #include "MoochoPack_qp_solver_stats.hpp"
00144 #include "MoochoPack_quasi_newton_stats.hpp"
00145 
00146 //#include "AbstractLinAlgPack_sparse_bounds.hpp"
00147 
00148 // Misc utilities
00149 #include "Teuchos_AbstractFactoryStd.hpp"
00150 #include "Teuchos_dyn_cast.hpp"
00151 #include "ReleaseResource_ref_count_ptr.hpp"
00152 #include "Teuchos_TestForException.hpp"
00153 
00154 // Stuff to read in options
00155 #include "OptionsFromStreamPack_StringToIntMap.hpp"
00156 #include "OptionsFromStreamPack_StringToBool.hpp"
00157 
00158 // Stuff for exact reduced hessian
00159 //#include "MoochoPack_ReducedHessianExactStd_Step.hpp"
00160 //#include "MoochoPack_CrossTermExactStd_Step.hpp"
00161 //#include "MoochoPack_DampenCrossTermStd_Step.hpp"
00162 
00163 namespace {
00164   const double INF_BASIS_COND_CHANGE_FRAC      = 1e+20;
00165 }
00166 
00167 namespace MoochoPack {
00168 
00169 //
00170 // Here is where we define the default values for the algorithm.  These
00171 // should agree with what are in the Moocho.opt.NLPAlgoConfigMamaJama file.
00172 //
00173 NLPAlgoConfigMamaJama::SOptionValues::SOptionValues()
00174   :max_basis_cond_change_frac_(-1.0)
00175   ,exact_reduced_hessian_(false)
00176   ,quasi_newton_(QN_AUTO)
00177   ,num_lbfgs_updates_stored_(-1)
00178   ,lbfgs_auto_scaling_(true)
00179   ,hessian_initialization_(INIT_HESS_AUTO)
00180   ,qp_solver_type_(QP_AUTO)
00181   ,reinit_hessian_on_qp_fail_(true)
00182   ,line_search_method_(LINE_SEARCH_AUTO)
00183   ,merit_function_type_(MERIT_FUNC_AUTO)
00184   ,l1_penalty_param_update_(L1_PENALTY_PARAM_AUTO)
00185   ,full_steps_after_k_(-1)
00186   ,max_pz_norm_(-1.0)
00187   ,num_pz_damp_iters_(0)
00188 {}
00189 
00190 NLPAlgoConfigMamaJama::NLPAlgoConfigMamaJama()
00191 {}
00192 
00193 NLPAlgoConfigMamaJama::~NLPAlgoConfigMamaJama()
00194 {}
00195 
00196 // overridden from NLPAlgoConfig
00197 
00198 void NLPAlgoConfigMamaJama::set_options( const options_ptr_t& options )
00199 {
00200   options_ = options;
00201   decomp_sys_step_builder_.set_options(options);
00202 }
00203 
00204 const NLPAlgoConfig::options_ptr_t&
00205 NLPAlgoConfigMamaJama::get_options() const
00206 {
00207   return options_;
00208 }
00209 
00210 void NLPAlgoConfigMamaJama::config_algo_cntr(
00211   NLPAlgoContainer   *algo_cntr
00212   ,std::ostream       *trase_out
00213   )
00214 {
00215   using Teuchos::RCP;
00216   using Teuchos::dyn_cast;
00217 
00218   if(trase_out) {
00219     *trase_out
00220       << std::endl
00221       << "*****************************************************************\n"
00222       << "*** NLPAlgoConfigMamaJama Configuration                       ***\n"
00223       << "***                                                           ***\n"
00224       << "*** Here, summary information about how the algorithm is      ***\n"
00225       << "*** configured is printed so that the user can see how the    ***\n"
00226       << "*** properties of the NLP and the set options influence       ***\n"
00227       << "*** how an algorithm is configured.                           ***\n"
00228       << "*****************************************************************\n";
00229   }
00230 
00231   // ////////////////////////////////////////////////////////////
00232   // A. ???
00233 
00234   // /////////////////////////////////////////////////////////////////////////
00235   // B. Create an algo object, give to algo_cntr, then give algo_cntr to algo
00236 
00237   if(trase_out)
00238     *trase_out << "\n*** Creating the NLPAlgo algo object ...\n";
00239 
00240   typedef Teuchos::RCP<NLPAlgo> algo_ptr_t;
00241   algo_ptr_t algo = Teuchos::rcp(new NLPAlgo);
00242   TEST_FOR_EXCEPTION(!algo.get(),std::runtime_error,"Error!");
00243   if(options_.get()) {
00244     IterationPack::AlgorithmSetOptions
00245       opt_setter( algo.get() );
00246     opt_setter.set_options( *options_ );
00247   }
00248   algo_cntr->set_algo(algo);
00249   algo->set_algo_cntr(algo_cntr);
00250   
00251   // /////////////////////////////////////////////
00252   // C. Configure algo
00253 
00254   // /////////////////////////////////////////////////////
00255   // C.0 Set the nlp and track objects
00256 
00257   if(trase_out)
00258     *trase_out << "\n*** Setting the NLP and track objects to the algo object ...\n";
00259 
00260   algo->set_nlp( algo_cntr->get_nlp().get() );
00261   algo->set_track( algo_cntr->get_track() );
00262 
00263   // ////////////////////////////////////////////////
00264   // Determine what the options are:
00265 
00266   // Readin the options
00267   if(options_.get()) {
00268     readin_options( *options_, &uov_, trase_out );
00269   }
00270   else {
00271     if(trase_out) {
00272       *trase_out
00273         << "\n*** Warning, no OptionsFromStream object was set so a default set"
00274           " of options will be used!\n";
00275     }
00276   } 
00277 
00278   NLP &nlp = algo->nlp();
00279   nlp.initialize(algo->algo_cntr().check_results());
00280   // Get the dimensions of the NLP
00281   const size_type
00282     n   = nlp.n(),
00283     m   = nlp.m(),
00284     r   = m, // ToDo: Compute this for real!
00285     //dof = n - r,
00286     nb  = nlp.num_bounded_x();
00287 
00288   // Process the NLP
00289   NLPFirstOrder    *nlp_foi = NULL;
00290   NLPSecondOrder   *nlp_soi = NULL;
00291   NLPDirect        *nlp_fod = NULL;
00292   bool             tailored_approach = false;
00293   decomp_sys_step_builder_.process_nlp_and_options(
00294     trase_out, nlp
00295     ,&nlp_foi, &nlp_soi, &nlp_fod, &tailored_approach
00296     );
00297 
00298   const int max_dof_quasi_newton_dense
00299     = decomp_sys_step_builder_.current_option_values().max_dof_quasi_newton_dense_;
00300 
00301   // //////////////////////////////////////////////////////
00302   // C.1.  Sort out the options
00303 
00304   if(trase_out)
00305     *trase_out
00306       << "\n*** Sorting out some of the options given input options ...\n";
00307 
00308   if( m ) {
00309     if( tailored_approach ) {
00310       // Change the options for the tailored approach. 
00311       if(trase_out) {
00312         *trase_out
00313           << "\nThis is a tailored approach NLP (NLPDirect) which forces the following options:\n"
00314           << "merit_function_type         = L1;\n"
00315           << "l1_penalty_parameter_update = MULT_FREE;\n"
00316           << "null_space_matrix           = EXPLICIT;\n"
00317           ;
00318       }
00319       cov_.merit_function_type_
00320         = MERIT_FUNC_L1;
00321       cov_.l1_penalty_param_update_
00322         = L1_PENALTY_PARAM_MULT_FREE;
00323       decomp_sys_step_builder_.current_option_values().null_space_matrix_type_
00324         = DecompositionSystemStateStepBuilderStd::NULL_SPACE_MATRIX_EXPLICIT;
00325     }
00326     
00327     if( !tailored_approach && uov_.merit_function_type_ != MERIT_FUNC_L1  ) {
00328       if(trase_out) {
00329         *trase_out
00330           << "\nThe only merit function currently supported is:\n"
00331           << "merit_function_type         = L1;\n"
00332           ;
00333       }
00334       cov_.merit_function_type_   = MERIT_FUNC_L1;
00335     }
00336   }
00337   else {
00338     if( uov_.line_search_method_ == LINE_SEARCH_NONE ) {
00339       if(trase_out) {
00340         *trase_out
00341           << "\nThere are no equality constraints (m == 0) and line_search_method==NONE so set the following options:\n"
00342           << "line_search_method          = NONE;\n"
00343           << "merit_function_type         = L1;\n"
00344           ;
00345       }
00346       cov_.line_search_method_       = LINE_SEARCH_NONE;
00347       cov_.merit_function_type_      = MERIT_FUNC_L1;
00348     }
00349     else {
00350       if(trase_out) {
00351         *trase_out
00352           << "\nThere are no equality constraints (m == 0) and line_search_method==AUTO so set the following options:\n"
00353           << "line_search_method          = DIRECT;\n"
00354           << "merit_function_type         = L1;\n"
00355           ;
00356       }
00357       cov_.line_search_method_       = LINE_SEARCH_DIRECT;
00358       cov_.merit_function_type_      = MERIT_FUNC_L1;
00359     }
00360   }
00361 
00362   // Decide what type of quasi-newton update to use
00363   switch( uov_.quasi_newton_ ) {
00364     case QN_AUTO: {
00365       if(trase_out)
00366         *trase_out
00367           << "\nquasi_newton == AUTO:"
00368           << "\nnlp.num_bounded_x() == " << nlp.num_bounded_x() << ":\n";
00369       if( n - r > max_dof_quasi_newton_dense ) {
00370         if(trase_out)
00371           *trase_out
00372             << "n-r = " << n-r << " > max_dof_quasi_newton_dense = "
00373             << max_dof_quasi_newton_dense <<  ":\n"
00374             << "setting quasi_newton == LBFGS\n";
00375         cov_.quasi_newton_ = QN_LBFGS;
00376       }
00377       else {
00378         if(trase_out)
00379           *trase_out
00380             << "n-r = " << n-r << " <= max_dof_quasi_newton_dense = "
00381             << max_dof_quasi_newton_dense << ":\n"
00382             << "setting quasi_newton == BFGS\n";
00383         cov_.quasi_newton_ = QN_BFGS;
00384       }
00385       break;
00386     }
00387     case QN_BFGS:
00388     case QN_PBFGS:
00389     case QN_LBFGS:
00390     case QN_LPBFGS:
00391       cov_.quasi_newton_ = uov_.quasi_newton_;
00392       break;
00393       default:
00394       TEST_FOR_EXCEPT(true); // Invalid option!
00395   }
00396 
00397   if( uov_.qp_solver_type_ == QP_AUTO && nb == 0 ) {
00398     cov_.qp_solver_type_ = QP_AUTO;
00399   }
00400 
00401   // ToDo: Sort out the rest of the options!
00402 
00403   // Set the default options that where not already set yet
00404   set_default_options(uov_,&cov_,trase_out);
00405 
00406   // ToDo: Implement the 2nd order correction linesearch
00407   if( cov_.line_search_method_ == LINE_SEARCH_2ND_ORDER_CORRECT ) {
00408     if(trase_out)
00409       *trase_out <<
00410         "\nline_search_method == 2ND_ORDER_CORRECT:\n"
00411         "Sorry, the second order corrrection linesearch is not updated yet!\n"
00412         "setting line_search_method = FILTER ...\n";
00413     cov_.line_search_method_ = LINE_SEARCH_FILTER;
00414   }
00415   if( cov_.line_search_method_ == LINE_SEARCH_WATCHDOG ) {
00416     if(trase_out)
00417       *trase_out <<
00418         "\nline_search_method ==WATCHDOG:\n"
00419         "Sorry, the watchdog linesearch is not updated yet!\n"
00420         "setting line_search_method = DIRECT ...\n";
00421     cov_.line_search_method_ = LINE_SEARCH_DIRECT;
00422   }
00423   
00424   // Adjust the Quasi-Newton if QPKWIK is used!
00425   if( cov_.qp_solver_type_ == QP_QPKWIK && nb ) {
00426     if(trase_out)
00427       *trase_out
00428         << "\nqp_solver == QPKWIK and nlp.num_bounded_x() == " << nb << " > 0:\n"
00429         << "Setting quasi_newton == BFGS...\n";
00430     cov_.quasi_newton_ = QN_BFGS;
00431   }
00432 
00433   // /////////////////////////////////////////////////////
00434   // C.1. Create the decomposition system object
00435 
00436   typedef RCP<DecompositionSystem> decomp_sys_ptr_t;
00437   decomp_sys_ptr_t decomp_sys;
00438   if(
00439 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
00440     true
00441 #else
00442     n > m
00443 #endif
00444     )
00445   {
00446     decomp_sys_step_builder_.create_decomp_sys(
00447       trase_out, nlp, nlp_foi, nlp_soi, nlp_fod, tailored_approach
00448       ,&decomp_sys
00449       );
00450   }
00451 
00452 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
00453   RCP<DecompositionSystemVarReductPerm>
00454     decomp_sys_perm = Teuchos::rcp_dynamic_cast<DecompositionSystemVarReductPerm>(decomp_sys);
00455 #endif
00456 
00457   // /////////////////////////////////////////////////////
00458   // C.2. Create and set the state object
00459 
00460   if(trase_out)
00461     *trase_out
00462       << "\n*** Creating the state object and setting up iteration quantity objects ...\n";
00463 
00464   {
00465     //
00466     // Create the state object with the vector spaces
00467     //
00468 
00469     typedef RCP<NLPAlgoState>   state_ptr_t;
00470     state_ptr_t
00471       state = Teuchos::rcp(
00472         new NLPAlgoState(
00473           decomp_sys
00474           ,nlp.space_x()
00475           ,nlp.space_c()
00476           ,( m
00477              ? ( tailored_approach
00478                ? ( nlp_fod->var_dep().size() 
00479                  ? nlp.space_x()->sub_space(nlp_fod->var_dep())->clone()
00480                  : Teuchos::null )
00481                : decomp_sys->space_range() // could be NULL for BasisSystemPerm
00482                )
00483              : Teuchos::null
00484             )
00485           ,( m
00486              ? ( tailored_approach
00487                ?( nlp_fod->var_indep().size()
00488                 ? nlp.space_x()->sub_space(nlp_fod->var_indep())->clone()
00489                 : Teuchos::null )
00490                : decomp_sys->space_null() // could be NULL for BasisSystemPerm
00491                )
00492              : nlp.space_x()
00493             )
00494           )
00495         );
00496         
00497     //
00498     // Set the iteration quantities for the NLP matrix objects
00499     //
00500 
00501     decomp_sys_step_builder_.add_iter_quantities(
00502       trase_out, nlp, nlp_foi, nlp_soi, nlp_fod, tailored_approach, decomp_sys
00503       ,state
00504       );
00505 
00506     // Add reduced Hessian
00507 
00508     if( !cov_.exact_reduced_hessian_ ) {
00509       RCP<Teuchos::AbstractFactory<MatrixSymOp> >
00510         abstract_factory_rHL = Teuchos::null;
00511       // Only maintain the orginal matrix if we have inequality constraints and therefore will be
00512       // needing a QP solver (which may be QPSchur which needs an accurate original matrix for
00513       // iterative refinement).
00514       const bool
00515         maintain_original = nb;
00516       // Maintain the factor if a QP solver is needed, QPSchur is being used, or we are checking
00517       // results
00518       const bool
00519         maintain_inverse = ( (!nb && m==r) || cov_.qp_solver_type_==QP_QPSCHUR
00520                    || algo->algo_cntr().check_results() );
00521       switch( cov_.quasi_newton_ ) {
00522         case QN_BFGS:
00523           abstract_factory_rHL = Teuchos::rcp(
00524             new Teuchos::AbstractFactoryStd<MatrixSymOp,MatrixSymPosDefCholFactor,MatrixSymPosDefCholFactor::PostMod>(
00525               MatrixSymPosDefCholFactor::PostMod(
00526                 maintain_original      // maintain_original
00527                 ,maintain_inverse      // maintain_factor
00528                 ,true                  // allow_factor      (always!)
00529                 )
00530               )
00531             );
00532           break;
00533         case QN_LBFGS:
00534           abstract_factory_rHL = Teuchos::rcp(
00535             new Teuchos::AbstractFactoryStd<MatrixSymOp,MatrixSymPosDefLBFGS,MatrixSymPosDefLBFGS::PostMod>(
00536               MatrixSymPosDefLBFGS::PostMod(
00537                 cov_.num_lbfgs_updates_stored_  //
00538                 ,maintain_original              // maintain_original
00539                 ,maintain_inverse               // maintain_inverse
00540                 ,cov_.lbfgs_auto_scaling_       // 
00541                 )
00542               )
00543             );
00544           break;
00545         default:
00546           TEST_FOR_EXCEPT(true); // Should not be called for now!
00547       }
00548       
00549       state->set_iter_quant(
00550         rHL_name
00551         ,Teuchos::rcp(
00552           new IterQuantityAccessContiguous<MatrixSymOp>(
00553             1
00554             ,rHL_name
00555             ,abstract_factory_rHL
00556             )
00557           )
00558         );
00559     }
00560     else {
00561       TEST_FOR_EXCEPT(true); // ToDo: Add rHL for an exact reduced Hessian!
00562     }
00563     
00564     //
00565     // Set the NLP merit function 
00566     //
00567 
00568     if( cov_.line_search_method_ != LINE_SEARCH_NONE 
00569        && cov_.line_search_method_ != LINE_SEARCH_FILTER) {
00570       RCP<Teuchos::AbstractFactory<MeritFuncNLP> >
00571         merit_func_factory = Teuchos::null;
00572       switch( cov_.merit_function_type_ ) {
00573         case MERIT_FUNC_L1:
00574           merit_func_factory = Teuchos::rcp(
00575             new Teuchos::AbstractFactoryStd<MeritFuncNLP,MeritFuncNLPL1>());
00576           break;
00577         case MERIT_FUNC_MOD_L1:
00578         case MERIT_FUNC_MOD_L1_INCR:
00579           merit_func_factory = Teuchos::rcp(
00580             new Teuchos::AbstractFactoryStd<MeritFuncNLP,MeritFuncNLPModL1>());
00581           break;
00582         default:
00583           TEST_FOR_EXCEPT(true);  // local programming error
00584       }
00585       state->set_iter_quant(
00586         merit_func_nlp_name
00587         ,Teuchos::rcp(
00588           new IterQuantityAccessContiguous<MeritFuncNLP>(
00589             1
00590             ,merit_func_nlp_name
00591             ,merit_func_factory
00592             )
00593           )
00594         );
00595     }
00596 
00597     if (cov_.line_search_method_ == LINE_SEARCH_FILTER)
00598       {
00599         // Add the filter iteration quantity
00600         state->set_iter_quant(
00601         FILTER_IQ_STRING
00602         ,Teuchos::rcp(
00603              new IterQuantityAccessContiguous<Filter_T>(1,FILTER_IQ_STRING)
00604         )
00605         );
00606       }
00607 
00608     if( nb ) {
00609       // Add bounds on d
00610       state->set_iter_quant(
00611         dl_name
00612         ,Teuchos::rcp(
00613           new IterQuantityAccessContiguous<VectorMutable>(
00614             2, dl_name
00615             , nlp.space_x() ) )
00616         );
00617       state->set_iter_quant(
00618         du_name
00619         ,Teuchos::rcp(
00620           new IterQuantityAccessContiguous<VectorMutable>(
00621             2, du_name
00622             , nlp.space_x() ) )
00623         );
00624       // Add active-set iteration quantity
00625       state->set_iter_quant(
00626         act_set_stats_name
00627         ,Teuchos::rcp(
00628           new IterQuantityAccessContiguous<ActSetStats>( 1, act_set_stats_name ) )
00629         );
00630       // Add QP solver stats iteration quantity
00631       state->set_iter_quant(
00632         qp_solver_stats_name
00633         ,Teuchos::rcp(
00634           new IterQuantityAccessContiguous<QPSolverStats>( 1, qp_solver_stats_name ) )
00635         );
00636     }
00637 
00638     //
00639     // Resize the number of storage locations (these can be changed later).
00640     //
00641     // Also, touch all of the value_type, index_type and vector iteration quantities
00642     // that we know about so that when state.dump_iter_quant() is called, all of the
00643     // iteration quantities will be included.
00644     //
00645     
00646     typedef IterQuantityAccessContiguous<value_type>            IQ_scalar_cngs;
00647     typedef IterQuantityAccessContiguous<VectorMutable>   IQ_vector_cngs;
00648 
00649     dyn_cast<IQ_vector_cngs>(state->x()).resize(2);
00650     dyn_cast<IQ_scalar_cngs>(state->f()).resize(2);
00651     if(m) dyn_cast<IQ_vector_cngs>(state->c()).resize(2);
00652     state->Gf();
00653     if(m && nlp_foi) state->Gc();
00654 
00655     if( m
00656 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
00657        && decomp_sys_perm.get() == NULL
00658 #endif
00659       ) state->py();
00660     if(m) dyn_cast<IQ_vector_cngs>(state->Ypy()).resize(2);
00661     if( m
00662 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
00663       && decomp_sys_perm.get() == NULL
00664 #endif
00665       ) state->pz();
00666     if(m) dyn_cast<IQ_vector_cngs>(state->Zpz()).resize(2);
00667     dyn_cast<IQ_vector_cngs>(state->d()).resize(2);
00668 
00669     if( n > m ) {
00670       dyn_cast<IQ_vector_cngs>(state->rGf()).resize(2);
00671       state->w();
00672       state->zeta();
00673       state->qp_grad();
00674     }
00675     state->eta();
00676     
00677     dyn_cast<IQ_scalar_cngs>(state->alpha()).resize(2);
00678     dyn_cast<IQ_scalar_cngs>(state->mu()).resize(2);
00679     dyn_cast<IQ_scalar_cngs>(state->phi()).resize(2);
00680 
00681     dyn_cast<IQ_scalar_cngs>(state->opt_kkt_err()).resize(2);
00682     dyn_cast<IQ_scalar_cngs>(state->feas_kkt_err()).resize(2);
00683     if( n > m ) {
00684       dyn_cast<IQ_vector_cngs>(state->rGL()).resize(2);
00685     }
00686     if(m) dyn_cast<IQ_vector_cngs>(state->lambda()).resize(2);
00687     dyn_cast<IQ_vector_cngs>(state->nu()).resize(2);
00688 
00689     // Set the state object
00690     algo->set_state( state );
00691   }
00692 
00693   // /////////////////////////////////////////////////////
00694   // C.3  Create and set the step objects
00695 
00696   if(trase_out)
00697     *trase_out << "\n*** Creating and setting the step objects ...\n";
00698 
00699 //  typedef RCP<QPSolverRelaxed> qp_solver_ptr_t;
00700 //  qp_solver_ptr_t qp_solver;
00701 //  typedef ConstrainedOptPack::QPSolverRelaxedTester QPSolverRelaxedTester;
00702 //  typedef RCP<QPSolverRelaxedTester> qp_tester_ptr_t;
00703 //  qp_tester_ptr_t qp_tester;
00704 //  typedef RCP<FeasibilityStepReducedStd_Strategy> feasibility_step_strategy_ptr_t;
00705 //  feasibility_step_strategy_ptr_t  feasibility_step_strategy;
00706 
00707   {
00708 
00709     //
00710     // Create some standard step objects that will be used by many different
00711     // specific algorithms
00712     //
00713     
00714     typedef RCP<AlgorithmStep>   algo_step_ptr_t;
00715 
00716     // Create the EvalNewPoint step and associated objects
00717     algo_step_ptr_t                                    eval_new_point_step           = Teuchos::null;
00718     RCP<CalcFiniteDiffProd>                  calc_fd_prod                  = Teuchos::null;
00719     RCP<VariableBoundsTester>                bounds_tester                 = Teuchos::null;
00720     RCP<NewDecompositionSelection_Strategy>  new_decomp_selection_strategy = Teuchos::null;
00721     decomp_sys_step_builder_.create_eval_new_point(
00722       trase_out, nlp, nlp_foi, nlp_soi, nlp_fod, tailored_approach, decomp_sys
00723       ,&eval_new_point_step, &calc_fd_prod, &bounds_tester, &new_decomp_selection_strategy
00724       );
00725 
00726     // QuasiNormal_Step
00727     algo_step_ptr_t    quansi_normal_step_step = Teuchos::null;
00728     if( !tailored_approach ) {
00729       quansi_normal_step_step = Teuchos::rcp(new QuasiNormalStepStd_Step());
00730     }
00731 
00732     // Check and change decomposition 
00733     algo_step_ptr_t    check_decomp_from_py_step  = Teuchos::null;
00734     algo_step_ptr_t    check_decomp_from_Rpy_step = Teuchos::null;
00735     if( new_decomp_selection_strategy.get() && cov_.max_basis_cond_change_frac_ < INF_BASIS_COND_CHANGE_FRAC ) {
00736       check_decomp_from_py_step = Teuchos::rcp(
00737         new CheckDecompositionFromPy_Step(
00738           new_decomp_selection_strategy
00739           ,cov_.max_basis_cond_change_frac_
00740           ) );
00741       check_decomp_from_Rpy_step = Teuchos::rcp(
00742         new CheckDecompositionFromRPy_Step(
00743           new_decomp_selection_strategy
00744           ,cov_.max_basis_cond_change_frac_
00745           ) );
00746     }
00747 
00748     // CheckDescentQuasiNormalStep
00749     algo_step_ptr_t    check_descent_quansi_normal_step_step = Teuchos::null;
00750     if( algo->algo_cntr().check_results() ) {
00751       check_descent_quansi_normal_step_step = Teuchos::rcp(new CheckDescentQuasiNormalStep_Step(calc_fd_prod));
00752     }
00753 
00754     // ReducedGradient_Step
00755     algo_step_ptr_t    reduced_gradient_step = Teuchos::null;
00756     if( !tailored_approach ) {
00757       reduced_gradient_step = Teuchos::rcp(new ReducedGradientStd_Step());
00758     }
00759 
00760     // CheckSkipBFGSUpdate
00761     algo_step_ptr_t    check_skip_bfgs_update_step = Teuchos::null;
00762     if(!cov_.exact_reduced_hessian_) {
00763       RCP<CheckSkipBFGSUpdateStd_Step>
00764         step = Teuchos::rcp(new CheckSkipBFGSUpdateStd_Step());
00765       if(options_.get()) {
00766         CheckSkipBFGSUpdateStd_StepSetOptions
00767           opt_setter( step.get() );
00768         opt_setter.set_options( *options_ );
00769       }
00770       check_skip_bfgs_update_step = step;
00771     }
00772 
00773     // ReducedHessian_Step
00774     algo_step_ptr_t    reduced_hessian_step = Teuchos::null;
00775     {
00776       // Get the strategy object that will perform the actual secant update.
00777       RCP<ReducedHessianSecantUpdate_Strategy>
00778         secant_update_strategy = Teuchos::null;
00779       switch( cov_.quasi_newton_ )
00780       {
00781           case QN_BFGS:
00782           case QN_PBFGS:
00783           case QN_LBFGS:
00784           case QN_LPBFGS:
00785         {
00786           // create and setup the actual BFGS strategy object
00787           typedef RCP<BFGSUpdate_Strategy> bfgs_strategy_ptr_t;
00788           bfgs_strategy_ptr_t
00789             bfgs_strategy = Teuchos::rcp(new BFGSUpdate_Strategy);
00790           if(options_.get()) { 
00791             BFGSUpdate_StrategySetOptions
00792               opt_setter( bfgs_strategy.get() );
00793             opt_setter.set_options( *options_ );
00794           }
00795           switch( cov_.quasi_newton_ ) {
00796               case QN_BFGS:
00797               case QN_LBFGS:
00798             {
00799               secant_update_strategy = Teuchos::rcp(new ReducedHessianSecantUpdateBFGSFull_Strategy(bfgs_strategy));
00800               break;
00801             }
00802               case QN_PBFGS:
00803               case QN_LPBFGS:
00804             {
00805               TEST_FOR_EXCEPTION(
00806                 true, std::logic_error
00807                 ,"NLPAlgoConfigMamaJama::config_algo_cntr(...) : Error, "
00808                 "The quansi_newton options of PBFGS and LPBFGS have not been updated yet!" );
00809               break;
00810             }
00811           }
00812           break;
00813         }
00814         default:
00815           TEST_FOR_EXCEPT(true);
00816       }
00817       
00818       // Finally build the step object
00819       reduced_hessian_step = Teuchos::rcp(
00820         new ReducedHessianSecantUpdateStd_Step( secant_update_strategy ) );
00821       // Add the QuasiNewtonStats iteration quantity
00822       algo->state().set_iter_quant(
00823         quasi_newton_stats_name
00824         ,Teuchos::rcp(new IterQuantityAccessContiguous<QuasiNewtonStats>(
00825           1
00826           ,quasi_newton_stats_name
00827 #ifdef _MIPS_CXX
00828           ,Teuchos::RCP<Teuchos::AbstractFactoryStd<QuasiNewtonStats,QuasiNewtonStats> >(
00829             new Teuchos::AbstractFactoryStd<QuasiNewtonStats,QuasiNewtonStats>())
00830 #endif
00831           )
00832         ));
00833     }
00834 
00835     // Initialization of the Reduced Hessian
00836     algo_step_ptr_t  init_red_hess_step = Teuchos::null;
00837     if(
00838       cov_.hessian_initialization_ == INIT_HESS_FIN_DIFF_SCALE_IDENTITY
00839       || cov_.hessian_initialization_ == INIT_HESS_FIN_DIFF_SCALE_DIAGONAL
00840       || cov_.hessian_initialization_ == INIT_HESS_FIN_DIFF_SCALE_DIAGONAL_ABS
00841       )
00842     {
00843       // InitFinDiffReducedHessian_Step
00844       Algorithm::poss_type poss;
00845       TEST_FOR_EXCEPT( !( poss = algo->get_step_poss( ReducedHessian_name )  ) );
00846       InitFinDiffReducedHessian_Step::EInitializationMethod
00847         init_hess;
00848       switch( cov_.hessian_initialization_ ) {
00849         case INIT_HESS_FIN_DIFF_SCALE_IDENTITY:
00850           init_hess = InitFinDiffReducedHessian_Step::SCALE_IDENTITY;
00851           break;
00852         case INIT_HESS_FIN_DIFF_SCALE_DIAGONAL:
00853           init_hess = InitFinDiffReducedHessian_Step::SCALE_DIAGONAL;
00854           break;
00855         case INIT_HESS_FIN_DIFF_SCALE_DIAGONAL_ABS:
00856           init_hess = InitFinDiffReducedHessian_Step::SCALE_DIAGONAL_ABS;
00857           break;
00858         default:
00859           TEST_FOR_EXCEPT(true);  // only local programming error?
00860       }
00861       Teuchos::RCP<InitFinDiffReducedHessian_Step>
00862         _init_red_hess_step = Teuchos::rcp(new InitFinDiffReducedHessian_Step(init_hess));
00863       if(options_.get()) {
00864         InitFinDiffReducedHessian_StepSetOptions
00865           opt_setter( _init_red_hess_step.get() );
00866         opt_setter.set_options( *options_ );
00867       }
00868       init_red_hess_step = _init_red_hess_step;
00869     }
00870     else if(cov_.hessian_initialization_==INIT_HESS_SERIALIZE ) {
00871       Teuchos::RCP<ReducedHessianSerialization_Step>
00872         _init_red_hess_step = Teuchos::rcp(new ReducedHessianSerialization_Step());
00873       if(options_.get()) {
00874         ReducedHessianSerialization_StepSetOptions
00875           opt_setter( _init_red_hess_step.get() );
00876         opt_setter.set_options( *options_ );
00877       }
00878       init_red_hess_step = _init_red_hess_step;
00879     }
00880     
00881     // NullSpace_Step
00882     algo_step_ptr_t    set_d_bounds_step    = Teuchos::null;
00883     algo_step_ptr_t    tangential_step_step = Teuchos::null;
00884     if( nb == 0 ) {
00885       Teuchos::RCP<TangentialStepWithoutBounds_Step>
00886         tangental_step_output_bounds_step
00887         = Teuchos::rcp(new TangentialStepWithoutBounds_Step());
00888       tangental_step_output_bounds_step->max_pz_norm(cov_.max_pz_norm_);
00889       tangental_step_output_bounds_step->num_pz_damp_iters(cov_.num_pz_damp_iters_);
00890       tangential_step_step = tangental_step_output_bounds_step;
00891     }
00892     else {
00893       // Step object that sets bounds for QP subproblem
00894       set_d_bounds_step = Teuchos::rcp(new SetDBoundsStd_AddedStep());
00895       // QP Solver object
00896       Teuchos::RCP<QPSolverRelaxed>  qp_solver = Teuchos::null;
00897       switch( cov_.qp_solver_type_ ) {
00898         case QP_QPSCHUR: {
00899           using ConstrainedOptPack::QPSolverRelaxedQPSchur;
00900           using ConstrainedOptPack::QPSolverRelaxedQPSchurSetOptions;
00901           using ConstrainedOptPack::QPSchurInitKKTSystemHessianFull;
00902 //          using ConstrainedOptPack::QPSchurInitKKTSystemHessianSuperBasic;
00903           Teuchos::RCP<ConstrainedOptPack::QPSolverRelaxedQPSchur::InitKKTSystem>
00904             init_kkt_sys = Teuchos::null;
00905           switch( cov_.quasi_newton_ ) {
00906             case QN_BFGS:
00907             case QN_LBFGS:
00908               init_kkt_sys = Teuchos::rcp(new QPSchurInitKKTSystemHessianFull());
00909               break;
00910             case QN_PBFGS:
00911             case QN_LPBFGS:
00912               TEST_FOR_EXCEPTION(true,std::logic_error,"Error! PBFGS and LPBFGS are not updated yet!");
00913 /*
00914               init_kkt_sys = new QPSchurInitKKTSystemHessianSuperBasic();
00915 */
00916               break;
00917             default:
00918               TEST_FOR_EXCEPT(true);
00919           }
00920           Teuchos::RCP<QPSolverRelaxedQPSchur>
00921             _qp_solver = Teuchos::rcp(new QPSolverRelaxedQPSchur(init_kkt_sys));
00922           if(options_.get()) {
00923             QPSolverRelaxedQPSchurSetOptions
00924               qp_options_setter(_qp_solver.get());
00925             qp_options_setter.set_options( *options_ );
00926           }
00927           qp_solver = _qp_solver; // give ownership to delete!
00928           break;
00929         }
00930         case QP_QPKWIK: {
00931 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK
00932           using ConstrainedOptPack::QPSolverRelaxedQPKWIK;
00933           Teuchos::RCP<QPSolverRelaxedQPKWIK>
00934             _qp_solver = Teuchos::rcp(new QPSolverRelaxedQPKWIK());
00935           qp_solver = _qp_solver; // give ownership to delete!
00936 #else
00937           TEST_FOR_EXCEPTION(
00938             true,std::logic_error,"Error! QPKWIK interface is not supported since "
00939             "CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK is not defined!");
00940 #endif
00941           break;
00942         }
00943         case QP_QPOPT: {
00944           TEST_FOR_EXCEPTION(true,std::logic_error,"Error! QPOPT interface is not updated yet!");
00945 /*
00946 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPOPT
00947           using ConstrainedOptPack::QPSolverRelaxedQPOPT;
00948           QPSolverRelaxedQPOPT
00949             *_qp_solver = new QPSolverRelaxedQPOPT();
00950           qp_solver = _qp_solver; // give ownership to delete!
00951 #else
00952           throw std::logic_error(
00953             "NLPAlgoConfigMamaJama::config_algo_cntr(...) : QPOPT is not supported,"
00954             " must define CONSTRAINED_OPTIMIZATION_PACK_USE_QPOPT!" );
00955 #endif
00956 */
00957           break;
00958         }
00959         default:
00960           TEST_FOR_EXCEPT(true);
00961       }
00962       // QP solver tester
00963       Teuchos::RCP<QPSolverRelaxedTester> 
00964         qp_solver_tester = Teuchos::rcp(new QPSolverRelaxedTester());
00965       if(options_.get()) {
00966         QPSolverRelaxedTesterSetOptions
00967           opt_setter( qp_solver_tester.get() );
00968         opt_setter.set_options( *options_ );
00969       }
00970       // The null-space step
00971       Teuchos::RCP<TangentialStepWithInequStd_Step>
00972         tangential_step_with_inequ_step = Teuchos::rcp(
00973           new TangentialStepWithInequStd_Step(
00974             qp_solver, qp_solver_tester ) );
00975       if(options_.get()) {
00976         TangentialStepWithInequStd_StepSetOptions
00977           opt_setter( tangential_step_with_inequ_step.get() );
00978         opt_setter.set_options( *options_ );
00979       }
00980       tangential_step_step = tangential_step_with_inequ_step;
00981       // Step for reinitialization reduced Hessian on QP failure
00982       tangential_step_step = Teuchos::rcp(
00983         new QPFailureReinitReducedHessian_Step(tangential_step_step)
00984         );
00985     }
00986 
00987     // CalcDFromYPYZPZ_Step
00988     algo_step_ptr_t    calc_d_from_Ypy_Zpy_step = Teuchos::null;
00989     {
00990       calc_d_from_Ypy_Zpy_step = Teuchos::rcp(new CalcDFromYPYZPZ_Step());
00991     }
00992 
00993     // CalcReducedGradLagrangianStd_AddedStep
00994     algo_step_ptr_t    calc_reduced_grad_lagr_step = Teuchos::null;
00995     {
00996       calc_reduced_grad_lagr_step = Teuchos::rcp(
00997         new CalcReducedGradLagrangianStd_AddedStep() );
00998     }
00999 
01000     // CheckConvergence_Step
01001     algo_step_ptr_t    check_convergence_step = Teuchos::null;
01002       {
01003       // Create the strategy object
01004       RCP<CheckConvergenceStd_Strategy>
01005         check_convergence_strategy = Teuchos::rcp(new CheckConvergenceStd_Strategy());
01006 
01007       if(options_.get()) 
01008         {
01009         CheckConvergence_StrategySetOptions
01010           opt_setter( check_convergence_strategy.get() );
01011         opt_setter.set_options( *options_ );
01012         }
01013       
01014       RCP<CheckConvergenceStd_AddedStep>
01015         _check_convergence_step = Teuchos::rcp(new CheckConvergenceStd_AddedStep(check_convergence_strategy));
01016       
01017       check_convergence_step = _check_convergence_step;
01018       }
01019 
01020     // MeritFuncPenaltyParamUpdate_Step
01021     algo_step_ptr_t    merit_func_penalty_param_update_step = Teuchos::null;
01022     if( cov_.line_search_method_ == LINE_SEARCH_FILTER ) {
01023       // We don't need to update a penalty parameter for the filter method :-)
01024     }
01025     else if( cov_.line_search_method_ != LINE_SEARCH_NONE ) {
01026       RCP<MeritFunc_PenaltyParamUpdate_AddedStep>
01027         param_update_step = Teuchos::null;
01028       switch( cov_.merit_function_type_ ) {
01029         case MERIT_FUNC_L1: {
01030           switch(cov_.l1_penalty_param_update_) {
01031             case L1_PENALTY_PARAM_WITH_MULT:
01032 //              param_update_step
01033 //                = Teuchos::rcp(new  MeritFunc_PenaltyParamUpdateWithMult_AddedStep());
01034               TEST_FOR_EXCEPTION(
01035                 true, std::logic_error
01036                 ,"NLPAlgoConfigMamaJama::config_algo_cntr(...) : Error, "
01037                 "The l1_penalty_parameter_update option of MULT_FREE has not been updated yet!" );
01038               break;
01039             case L1_PENALTY_PARAM_MULT_FREE:
01040               param_update_step
01041                 = Teuchos::rcp(new  MeritFunc_PenaltyParamUpdateMultFree_AddedStep());
01042               break;
01043             default:
01044               TEST_FOR_EXCEPT(true);
01045           }
01046           break;
01047         }
01048         case MERIT_FUNC_MOD_L1:
01049         case MERIT_FUNC_MOD_L1_INCR:
01050 //          param_update_step = new  MeritFunc_PenaltyParamsUpdateWithMult_AddedStep(
01051 //                      Teuchos::rcp_implicit_cast<MeritFuncNLP>(merit_func) );
01052           TEST_FOR_EXCEPTION(
01053             true, std::logic_error
01054             ,"NLPAlgoConfigMamaJama::config_algo_cntr(...) : Error, "
01055             "The merit_function_type options of MODIFIED_L1 and MODIFIED_L1_INCR have not been updated yet!" );
01056           break;
01057         default:
01058           TEST_FOR_EXCEPT(true);  // local programming error
01059       }
01060       if(options_.get()) {
01061         MeritFunc_PenaltyParamUpdate_AddedStepSetOptions
01062           ppu_options_setter( param_update_step.get() );
01063         ppu_options_setter.set_options( *options_ );
01064       }
01065       merit_func_penalty_param_update_step = param_update_step;
01066     }
01067 
01068     // LineSearchFull_Step
01069     algo_step_ptr_t    line_search_full_step_step = Teuchos::null;
01070     {
01071       line_search_full_step_step = Teuchos::rcp(new LineSearchFullStep_Step(bounds_tester));
01072     }
01073 
01074     // LineSearch_Step
01075     algo_step_ptr_t    line_search_step = Teuchos::null;
01076     if( cov_.line_search_method_ != LINE_SEARCH_NONE ) {
01077       RCP<DirectLineSearchArmQuad_Strategy>
01078         direct_line_search = Teuchos::rcp(new  DirectLineSearchArmQuad_Strategy());
01079       if(options_.get()) {
01080         ConstrainedOptPack::DirectLineSearchArmQuad_StrategySetOptions
01081           ls_options_setter( direct_line_search.get(), "DirectLineSearchArmQuadSQPStep" );
01082         ls_options_setter.set_options( *options_ );
01083       }
01084       if( n > m ) {
01085         switch( cov_.line_search_method_ ) {
01086           case LINE_SEARCH_DIRECT: {
01087             line_search_step = Teuchos::rcp(new LineSearchDirect_Step(direct_line_search));
01088             break;
01089           }
01090           case LINE_SEARCH_2ND_ORDER_CORRECT: {
01091             TEST_FOR_EXCEPTION(
01092               true, std::logic_error
01093               ,"NLPAlgoConfigMamaJama::config_algo_cntr(...) : Error, "
01094               "The line_search_method option of 2ND_ORDER_CORRECT has not been updated yet!" );
01095             break;
01096           }
01097           case LINE_SEARCH_WATCHDOG: {
01098             TEST_FOR_EXCEPTION(
01099               true, std::logic_error
01100               ,"NLPAlgoConfigMamaJama::config_algo_cntr(...) : Error, "
01101               "The line_search_method option of WATCHDOG has not been updated yet!" );
01102             break;
01103           }
01104           case LINE_SEARCH_FILTER: {
01105             Teuchos::RCP<LineSearchFilter_Step> 
01106               line_search_filter_step = Teuchos::rcp(
01107                 new LineSearchFilter_Step(algo_cntr->get_nlp())
01108                 );
01109             
01110             if(options_.get()) 
01111             {
01112               LineSearchFilter_StepSetOptions options_setter(line_search_filter_step.get());
01113               options_setter.set_options(*options_);
01114             }
01115 
01116             line_search_step = line_search_filter_step;         
01117             break;
01118           }
01119           case LINE_SEARCH_AUTO:
01120           case LINE_SEARCH_NONE:
01121           default:
01122             TEST_FOR_EXCEPT(true); // Should not get here!
01123         }
01124       }
01125       else {
01126         line_search_step = Teuchos::rcp( new LineSearchNLE_Step(direct_line_search) );
01127       }
01128     }
01129     
01130     // LineSearchFailure
01131     if( new_decomp_selection_strategy.get() ) {
01132       line_search_step = Teuchos::rcp(
01133         new LineSearchFailureNewDecompositionSelection_Step(
01134           line_search_step
01135           ,new_decomp_selection_strategy
01136           )
01137         );
01138     }
01139   
01140     //
01141     // Create the algorithm depending on the type of NLP we are trying to solve.
01142     //
01143 
01144     if( m == 0 ) {
01145 
01146       if( nb == 0 ) {
01147         //
01148         // Unconstrained NLP (m == 0, num_bounded_x == 0)
01149         //
01150         if(trase_out)
01151           *trase_out 
01152             << "\nConfiguring an algorithm for an unconstrained "
01153             << "NLP (m == 0, num_bounded_x == 0) ...\n";
01154       }
01155       else {
01156         //
01157         // Simple bound constrained NLP (m == 0, num_bounded_x > 0)
01158         //
01159         if(trase_out)
01160           *trase_out 
01161             << "\nConfiguring an algorithm for a simple bound constrained "
01162             << "NLP (m == 0, num_bounded_x > 0) ...\n";
01163       }
01164 
01165       int step_num       = 0;
01166   
01167       // EvalNewPoint
01168       algo->insert_step( ++step_num, EvalNewPoint_name, eval_new_point_step );
01169       
01170       // ReducedGradient
01171       algo->insert_step( ++step_num, ReducedGradient_name, reduced_gradient_step );
01172 
01173       if( nb == 0 ) {
01174         
01175         // CalcReducedGradLagrangian
01176         algo->insert_step( ++step_num, CalcReducedGradLagrangian_name, calc_reduced_grad_lagr_step );
01177 
01178         // CheckConvergence
01179         algo->insert_step( ++step_num, CheckConvergence_name, check_convergence_step );
01180 
01181       }
01182 
01183       // ReducedHessian
01184       algo->insert_step( ++step_num, ReducedHessian_name, reduced_hessian_step );
01185 
01186       // (.-1) Initialize reduced Hessian
01187       if(init_red_hess_step.get()) {
01188         algo->insert_assoc_step(
01189           step_num, IterationPack::PRE_STEP, 1
01190           ,"ReducedHessianInitialization"
01191           ,init_red_hess_step
01192           );
01193       }
01194       
01195       // TangentialStep
01196       algo->insert_step( ++step_num, TangentialStep_name, tangential_step_step );
01197       if( nb > 0 ) {
01198         // SetDBoundsStd
01199         algo->insert_assoc_step(
01200           step_num
01201           ,IterationPack::PRE_STEP
01202           ,1
01203           ,"SetDBoundsStd"
01204           ,set_d_bounds_step
01205           );
01206       }
01207 
01208       // CalcDFromZPZ
01209       algo->insert_step( ++step_num, "CalcDFromZpz", Teuchos::rcp(new CalcDFromZPZ_Step()) );
01210 
01211       if( nb > 0 ) {
01212         
01213         // CalcReducedGradLagrangian
01214         algo->insert_step( ++step_num, CalcReducedGradLagrangian_name, calc_reduced_grad_lagr_step );
01215 
01216         // CheckConvergence
01217         algo->insert_step( ++step_num, CheckConvergence_name, check_convergence_step );
01218 
01219       }
01220 
01221       // LineSearch
01222       if( cov_.line_search_method_ == LINE_SEARCH_NONE ) {
01223         algo->insert_step( ++step_num, LineSearch_name, line_search_full_step_step );
01224       }
01225       else {
01226         // Main line search step
01227         algo->insert_step( ++step_num, LineSearch_name, line_search_step );
01228         // Insert presteps
01229         Algorithm::poss_type
01230           pre_step_i = 0;
01231         // (.-?) LineSearchFullStep
01232         algo->insert_assoc_step(
01233           step_num
01234           ,IterationPack::PRE_STEP
01235           ,++pre_step_i
01236           ,"LineSearchFullStep"
01237           ,line_search_full_step_step
01238           );
01239         // (.-?) MeritFunc_DummyUpdate
01240         algo->insert_assoc_step(
01241           step_num
01242           ,IterationPack::PRE_STEP
01243           ,++pre_step_i
01244           ,"MeritFunc_DummyUpdate"
01245           ,Teuchos::rcp(new MeritFunc_DummyUpdate_Step())
01246           );
01247       }
01248       
01249     }
01250     else if( n == m ) {
01251       //
01252       // System of Nonlinear equations (n == m)
01253       //
01254       if(trase_out)
01255         *trase_out 
01256           << "\nConfiguring an algorithm for a system of nonlinear equations "
01257           << "NLP (n == m) ...\n";
01258       
01259       if(algo->state().get_iter_quant_id(merit_func_nlp_name)!=IterationPack::AlgorithmState::DOES_NOT_EXIST)
01260         algo->state().erase_iter_quant(merit_func_nlp_name);
01261 
01262       int step_num       = 0;
01263       int assoc_step_num = 0;
01264   
01265       // EvalNewPoint
01266       algo->insert_step( ++step_num, EvalNewPoint_name, eval_new_point_step );
01267       if( check_descent_quansi_normal_step_step.get() && tailored_approach && algo->algo_cntr().check_results() )
01268       {
01269         algo->insert_assoc_step(
01270           step_num
01271           ,IterationPack::POST_STEP
01272           ,1
01273           ,"CheckDescentQuasiNormalStep"
01274           ,check_descent_quansi_normal_step_step
01275           );
01276       }
01277       
01278       // QuasiNormalStep
01279       if( !tailored_approach ) {
01280         algo->insert_step( ++step_num, QuasiNormalStep_name, quansi_normal_step_step );
01281         assoc_step_num = 0;
01282         if( check_decomp_from_py_step.get() )
01283           algo->insert_assoc_step(
01284             step_num
01285             ,IterationPack::POST_STEP
01286             ,++assoc_step_num
01287             ,"CheckDecompositionFromPy"
01288             ,check_decomp_from_py_step
01289             );
01290         if( check_decomp_from_Rpy_step.get() )
01291           algo->insert_assoc_step(
01292             step_num
01293             ,IterationPack::POST_STEP
01294             ,++assoc_step_num
01295             ,"CheckDecompositionFromRPy"
01296             ,check_decomp_from_Rpy_step
01297             );
01298         if( check_descent_quansi_normal_step_step.get() )
01299           algo->insert_assoc_step(
01300             step_num
01301             ,IterationPack::POST_STEP
01302             ,++assoc_step_num
01303             ,"CheckDescentQuasiNormalStep"
01304             ,check_descent_quansi_normal_step_step
01305             );
01306       }
01307 
01308       // CheckConvergence
01309       algo->insert_step( ++step_num, CheckConvergence_name, check_convergence_step );
01310 
01311       // CalcDFromYPY
01312       algo->insert_step( ++step_num, "CalcDFromYpy", Teuchos::rcp(new CalcDFromYPY_Step()) );
01313       
01314       // LineSearch
01315       if( cov_.line_search_method_ == LINE_SEARCH_NONE ) {
01316         algo->insert_step( ++step_num, LineSearch_name, line_search_full_step_step );
01317       }
01318       else {
01319         // Main line search step
01320         algo->insert_step( ++step_num, LineSearch_name, line_search_step );
01321         // Insert presteps
01322         Algorithm::poss_type
01323           pre_step_i = 0;
01324         // (.-?) LineSearchFullStep
01325         algo->insert_assoc_step(
01326           step_num
01327           ,IterationPack::PRE_STEP
01328           ,++pre_step_i
01329           ,"LineSearchFullStep"
01330           ,line_search_full_step_step
01331           );
01332       }
01333 
01334     }
01335     else if ( m > 0 || nb > 0 ) {
01336       //
01337       // General nonlinear NLP ( m > 0 )
01338       //
01339       if( nb == 0 ) {
01340         //
01341         // Nonlinear equality constrained NLP ( m > 0 && num_bounded_x == 0 )
01342         //
01343         if(trase_out)
01344           *trase_out 
01345             << "\nConfiguring an algorithm for a nonlinear equality constrained "
01346             << "NLP ( m > 0 && num_bounded_x == 0) ...\n";
01347       }
01348       else {
01349         //
01350         // Nonlinear inequality constrained NLP ( num_bounded_x > 0 )
01351         //
01352         if(trase_out)
01353           *trase_out 
01354             << "\nConfiguring an algorithm for a nonlinear generally constrained "
01355             << "NLP ( num_bounded_x > 0 ) ...\n";
01356       }
01357 
01358       int step_num       = 0;
01359       int assoc_step_num = 0;
01360   
01361       // EvalNewPoint
01362       algo->insert_step( ++step_num, EvalNewPoint_name, eval_new_point_step );
01363       if( check_descent_quansi_normal_step_step.get() && tailored_approach && algo->algo_cntr().check_results() )
01364       {
01365         algo->insert_assoc_step(
01366           step_num
01367           ,IterationPack::POST_STEP
01368           ,1
01369           ,"CheckDescentQuasiNormalStep"
01370           ,check_descent_quansi_normal_step_step
01371           );
01372       }
01373       
01374       // QuasiNormalStep
01375       if( !tailored_approach ) {
01376         algo->insert_step( ++step_num, QuasiNormalStep_name, quansi_normal_step_step );
01377         assoc_step_num = 0;
01378         if( check_decomp_from_py_step.get() )
01379           algo->insert_assoc_step(
01380             step_num
01381             ,IterationPack::POST_STEP
01382             ,++assoc_step_num
01383             ,"CheckDecompositionFromPy"
01384             ,check_decomp_from_py_step
01385             );
01386         if( check_decomp_from_Rpy_step.get() )
01387           algo->insert_assoc_step(
01388             step_num
01389             ,IterationPack::POST_STEP
01390             ,++assoc_step_num
01391             ,"CheckDecompositionFromRPy"
01392             ,check_decomp_from_Rpy_step
01393             );
01394         if( check_descent_quansi_normal_step_step.get() )
01395           algo->insert_assoc_step(
01396             step_num
01397             ,IterationPack::POST_STEP
01398             ,++assoc_step_num
01399             ,"CheckDescentQuasiNormalStep"
01400             ,check_descent_quansi_normal_step_step
01401             );
01402       }
01403 
01404       // ReducedGradient
01405       if( !tailored_approach ) {
01406         algo->insert_step( ++step_num, ReducedGradient_name, reduced_gradient_step );
01407       }
01408 
01409       if( nb == 0 ) {
01410 
01411         // CalcReducedGradLagrangian
01412         algo->insert_step( ++step_num, CalcReducedGradLagrangian_name, calc_reduced_grad_lagr_step );
01413 
01414         // CalcLagrangeMultDecomposed
01415         // Compute these here so that in case we converge we can report them
01416         if( !tailored_approach ) {
01417           // ToDo: Insert this step
01418         }
01419 
01420         // CheckConvergence
01421         algo->insert_step( ++step_num, CheckConvergence_name, check_convergence_step );
01422       }
01423 
01424       // ReducedHessian
01425       algo->insert_step( ++step_num, ReducedHessian_name, reduced_hessian_step );
01426 
01427       // (.-1) Initialize reduced Hessian
01428       if(init_red_hess_step.get()) {
01429         algo->insert_assoc_step(
01430           step_num, IterationPack::PRE_STEP, 1
01431           ,"ReducedHessianInitialization"
01432           ,init_red_hess_step
01433           );
01434       }
01435 
01436       // (.-1) CheckSkipBFGSUpdate
01437       algo->insert_assoc_step(
01438         step_num
01439         ,IterationPack::PRE_STEP
01440         ,1
01441         ,CheckSkipBFGSUpdate_name
01442         ,check_skip_bfgs_update_step
01443         );
01444 
01445       // TangentialStep
01446       algo->insert_step( ++step_num, TangentialStep_name, tangential_step_step );
01447       if( nb > 0 ) {
01448         // SetDBoundsStd
01449         algo->insert_assoc_step(
01450           step_num
01451           ,IterationPack::PRE_STEP
01452           ,1
01453           ,"SetDBoundsStd"
01454           ,set_d_bounds_step
01455           );
01456       }
01457 
01458       // CalcDFromYPYZPZ
01459       algo->insert_step( ++step_num, CalcDFromYPYZPZ_name, calc_d_from_Ypy_Zpy_step );
01460       
01461       if( nb > 0 ) {
01462 
01463         // CalcReducedGradLagrangian
01464         algo->insert_step( ++step_num, CalcReducedGradLagrangian_name, calc_reduced_grad_lagr_step );
01465 
01466         // CalcLagrangeMultDecomposed
01467         // Compute these here so that in case we converge we can report them
01468         if( !tailored_approach ) {
01469           // ToDo: Insert this step
01470         }
01471 
01472         // CheckConvergence
01473         algo->insert_step( ++step_num, CheckConvergence_name, check_convergence_step );
01474       }
01475 
01476       // LineSearch
01477       if( cov_.line_search_method_ == LINE_SEARCH_NONE ) {
01478         algo->insert_step( ++step_num, LineSearch_name, line_search_full_step_step );
01479       }
01480       else {
01481         // Main line search step
01482         algo->insert_step( ++step_num, LineSearch_name, line_search_step );
01483         // Insert presteps
01484         Algorithm::poss_type
01485           pre_step_i = 0;
01486         // (.-?) LineSearchFullStep
01487         algo->insert_assoc_step(
01488           step_num
01489           ,IterationPack::PRE_STEP
01490           ,++pre_step_i
01491           ,"LineSearchFullStep"
01492           ,line_search_full_step_step
01493           );
01494         // (.-?) MeritFunc_PenaltyPramUpdate
01495         if(merit_func_penalty_param_update_step.get()) {
01496           algo->insert_assoc_step(
01497             step_num
01498             ,IterationPack::PRE_STEP
01499             ,++pre_step_i
01500             ,"MeritFunc_PenaltyParamUpdate"
01501             ,merit_func_penalty_param_update_step
01502             );
01503         }
01504       }
01505 
01506     }
01507     else {
01508       TEST_FOR_EXCEPT(true); // Error, this should not ever be called!
01509     }
01510   }
01511   
01512 }
01513 
01514 void NLPAlgoConfigMamaJama::init_algo(NLPAlgoInterface* _algo)
01515 {
01516   using Teuchos::dyn_cast;
01517 
01518   TEST_FOR_EXCEPTION(
01519     _algo == NULL, std::invalid_argument
01520     ,"NLPAlgoConfigMamaJama::init_algo(_algo) : Error, "
01521     "_algo can not be NULL" );
01522 
01523   NLPAlgo              &algo    = dyn_cast<NLPAlgo>(*_algo);
01524   NLPAlgoState         &state   = algo.rsqp_state();
01525   NLP                  &nlp     = algo.nlp();
01526 
01527   algo.max_iter( algo.algo_cntr().max_iter() );
01528   algo.max_run_time( algo.algo_cntr().max_run_time() );
01529 
01530   // Reset the iteration count to zero
01531   state.k(0);
01532 
01533   // Get organized output of vectors and matrices even if setw is not used by Step objects.
01534   algo.track().journal_out()
01535     << std::setprecision(algo.algo_cntr().journal_print_digits())
01536     << std::scientific;
01537 
01538   // set the first step
01539   algo.do_step_first(1);
01540 
01541   // The rest of the algorithm should initialize itself
01542 }
01543 
01544 // private
01545 
01546 void NLPAlgoConfigMamaJama::readin_options(
01547     const OptionsFromStreamPack::OptionsFromStream     &options
01548   , SOptionValues                                      *ov
01549   , std::ostream                                       *trase_out
01550   )
01551 {
01552   namespace ofsp = OptionsFromStreamPack;
01553   using   ofsp::OptionsFromStream;
01554   typedef   OptionsFromStream::options_group_t    options_group_t;
01555   using   ofsp::StringToIntMap;
01556   using   ofsp::StringToBool;
01557 
01558   TEST_FOR_EXCEPT( !( ov ) ); // only a local class error
01559 
01560   // Get the options group for "NLPAlgoConfigMamaJama"
01561   const std::string opt_grp_name = "NLPAlgoConfigMamaJama";
01562   const OptionsFromStream::options_group_t optgrp = options.options_group( opt_grp_name );
01563   if( OptionsFromStream::options_group_exists( optgrp ) ) {
01564 
01565     // Define map for options group "MamaJama".
01566     const int num_opts = 13;
01567     enum EMamaJama {
01568       MAX_BASIS_COND_CHANGE_FRAC
01569       ,EXACT_REDUCED_HESSIAN
01570       ,QUASI_NEWTON
01571       ,NUM_LBFGS_UPDATES_STORED
01572       ,LBFGS_AUTO_SCALING
01573       ,HESSIAN_INITIALIZATION
01574       ,QP_SOLVER
01575       ,REINIT_HESSIAN_ON_QP_FAIL
01576       ,LINE_SEARCH_METHOD
01577       ,MERIT_FUNCTION_TYPE
01578       ,L1_PENALTY_PARAM_UPDATE
01579       ,MAX_PZ_NORM
01580       ,NUM_PZ_DAMP_ITERS
01581     };
01582     const char* SMamaJama[num_opts] = {
01583       "max_basis_cond_change_frac"
01584       ,"exact_reduced_hessian"
01585       ,"quasi_newton"
01586       ,"num_lbfgs_updates_stored"
01587       ,"lbfgs_auto_scaling"
01588       ,"hessian_initialization"
01589       ,"qp_solver"
01590       ,"reinit_hessian_on_qp_fail"
01591       ,"line_search_method"
01592       ,"merit_function_type"
01593       ,"l1_penalty_parameter_update"
01594       ,"max_pz_norm"
01595       ,"num_pz_damp_iters"
01596     };
01597     StringToIntMap  mama_jama_map(  opt_grp_name, num_opts, SMamaJama );
01598 
01599     options_group_t::const_iterator itr = optgrp.begin();
01600     for( ; itr != optgrp.end(); ++itr ) {
01601       switch( (EMamaJama)mama_jama_map( ofsp::option_name(itr) ) ) {
01602         case MAX_BASIS_COND_CHANGE_FRAC:
01603           ov->max_basis_cond_change_frac_ = std::atof( ofsp::option_value(itr).c_str() );
01604           break;
01605         case EXACT_REDUCED_HESSIAN:
01606           ov->exact_reduced_hessian_ = StringToBool( "exact_reduced_hessian", ofsp::option_value(itr).c_str() );
01607           break;
01608         case QUASI_NEWTON:
01609         {
01610           const std::string &opt_val = ofsp::option_value(itr);
01611           if( opt_val == "AUTO" )
01612             ov->quasi_newton_ = QN_AUTO;
01613           else if( opt_val == "BFGS" )
01614             ov->quasi_newton_ = QN_BFGS;
01615           else if( opt_val == "PBFGS" )
01616             ov->quasi_newton_ = QN_PBFGS;
01617           else if( opt_val == "LBFGS" )
01618             ov->quasi_newton_ = QN_LBFGS;
01619           else if( opt_val == "LPBFGS" )
01620             ov->quasi_newton_ = QN_LPBFGS;
01621           else
01622             TEST_FOR_EXCEPTION(
01623               true, std::invalid_argument
01624               ,"NLPAlgoConfigMamaJama::readin_options(...) : "
01625               "Error, incorrect value for \"quasi_newton\" "
01626               ", Only options of BFGS, PBFGS"
01627               ", LBFGS, LPBFGS and AUTO are avalible."
01628               );
01629           break;
01630         }
01631         case NUM_LBFGS_UPDATES_STORED:
01632           ov->num_lbfgs_updates_stored_ = std::atoi( ofsp::option_value(itr).c_str() );
01633           break;
01634         case LBFGS_AUTO_SCALING:
01635           ov->lbfgs_auto_scaling_
01636             = StringToBool( "lbfgs_auto_scaling", ofsp::option_value(itr).c_str() );
01637           break;
01638         case HESSIAN_INITIALIZATION:
01639         {
01640           const std::string &opt_val = ofsp::option_value(itr);
01641           if( opt_val == "IDENTITY" )
01642             ov->hessian_initialization_ = INIT_HESS_IDENTITY;
01643           else if( opt_val == "FINITE_DIFF_SCALE_IDENTITY" )
01644             ov->hessian_initialization_ = INIT_HESS_FIN_DIFF_SCALE_IDENTITY;
01645           else if( opt_val == "FINITE_DIFF_DIAGONAL" )
01646             ov->hessian_initialization_ = INIT_HESS_FIN_DIFF_SCALE_DIAGONAL;
01647           else if( opt_val == "FINITE_DIFF_DIAGONAL_ABS" )
01648             ov->hessian_initialization_ = INIT_HESS_FIN_DIFF_SCALE_DIAGONAL_ABS;
01649           else if( opt_val == "AUTO" )
01650             ov->hessian_initialization_ = INIT_HESS_AUTO;
01651           else if( opt_val == "SERIALIZE" )
01652             ov->hessian_initialization_ = INIT_HESS_SERIALIZE;
01653           else
01654             TEST_FOR_EXCEPTION(
01655               true, std::invalid_argument
01656               ,"NLPAlgoConfigMamaJama::readin_options(...) : "
01657               "Error, incorrect value for \"hessian_initialization\" "
01658               ", Only options of IDENTITY, SERIALIZE, FINITE_DIFF_SCALE_IDENTITY,"
01659               " FINITE_DIFF_DIAGONAL, FINITE_DIFF_DIAGONAL_ABS and AUTO"
01660               " are available"  );
01661           break;
01662         }
01663         case QP_SOLVER:
01664         {
01665           const std::string &qp_solver = ofsp::option_value(itr);
01666           if( qp_solver == "AUTO" ) {
01667             ov->qp_solver_type_ = QP_AUTO;
01668           } else if( qp_solver == "QPSOL" ) {
01669             ov->qp_solver_type_ = QP_QPSOL;
01670           } else if( qp_solver == "QPOPT" ) {
01671 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPOPT
01672             ov->qp_solver_type_ = QP_QPOPT;
01673 #else
01674             TEST_FOR_EXCEPTION(
01675               true, std::invalid_argument
01676               ,"NLPAlgoConfigMamaJama::readin_options(...) : QPOPT is not supported,"
01677               " must define CONSTRAINED_OPTIMIZATION_PACK_USE_QPOPT!" );
01678 #endif
01679           } else if( qp_solver == "QPKWIK" ) {
01680 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK
01681             ov->qp_solver_type_ = QP_QPKWIK;
01682 #else
01683             TEST_FOR_EXCEPTION(
01684               true, std::invalid_argument
01685               ,"NLPAlgoConfigMamaJama::readin_options(...) : QPKWIK is not supported,"
01686               " must define CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK!" );
01687 #endif
01688           } else if( qp_solver == "QPSCHUR" ) {
01689             ov->qp_solver_type_ = QP_QPSCHUR;
01690           } else {
01691             TEST_FOR_EXCEPTION(
01692               true, std::invalid_argument
01693               ,"NLPAlgoConfigMamaJama::readin_options(...) : "
01694               "Error, incorrect value for \"qp_solver\" "
01695               "Only qp solvers QPOPT, QPSOL, QPKWIK, QPSCHUR and AUTO are avalible."  );
01696           }
01697           break;
01698         }
01699         case REINIT_HESSIAN_ON_QP_FAIL:
01700           ov->reinit_hessian_on_qp_fail_ = StringToBool( "reinit_hessian_on_qp_fail", ofsp::option_value(itr).c_str() );
01701           break;
01702         case LINE_SEARCH_METHOD:
01703         {
01704           const std::string &option = ofsp::option_value(itr);
01705           if( option == "NONE" ) {
01706             ov->line_search_method_ = LINE_SEARCH_NONE;
01707           } else if( option == "DIRECT" ) {
01708             ov->line_search_method_ = LINE_SEARCH_DIRECT;
01709           } else if( option == "2ND_ORDER_CORRECT" ) {
01710             ov->line_search_method_ = LINE_SEARCH_2ND_ORDER_CORRECT;
01711           } else if( option == "WATCHDOG" ) {
01712             ov->line_search_method_ = LINE_SEARCH_WATCHDOG;
01713           } else if( option == "AUTO" ) {
01714             ov->line_search_method_ = LINE_SEARCH_AUTO;
01715           } else if( option == "FILTER" ) {
01716             ov->line_search_method_ = LINE_SEARCH_FILTER;
01717           } else {
01718             TEST_FOR_EXCEPTION(
01719               true, std::invalid_argument
01720               ,"NLPAlgoConfigMamaJama::readin_options(...) : "
01721               "Error, incorrect value for \"line_search_method\".\n"
01722               "Only the options NONE, DIRECT, 2ND_ORDER_CORRECT, FILTER, WATCHDOG "
01723               "and AUTO are avalible." );
01724           }
01725           break;
01726         }
01727         case MERIT_FUNCTION_TYPE:
01728         {
01729           const std::string &option = ofsp::option_value(itr);
01730           if( option == "L1" )
01731             ov->merit_function_type_ = MERIT_FUNC_L1;
01732           else if( option == "MODIFIED_L1" )
01733             ov->merit_function_type_ = MERIT_FUNC_MOD_L1;
01734           else if( option == "MODIFIED_L1_INCR" )
01735             ov->merit_function_type_ = MERIT_FUNC_MOD_L1_INCR;
01736           else if( option == "AUTO" )
01737             ov->merit_function_type_ = MERIT_FUNC_AUTO;
01738           else
01739             TEST_FOR_EXCEPTION(
01740               true, std::invalid_argument
01741               ,"NLPAlgoConfigMamaJama::readin_options(...) : "
01742               "Error, incorrect value for \"merit_function_type\".\n"
01743               "Only the options L1, MODIFIED_L1, MODIFIED_L1_INCR "
01744               "and AUTO are avalible." );
01745           break;
01746         }
01747         case L1_PENALTY_PARAM_UPDATE:
01748         {
01749           const std::string &option = ofsp::option_value(itr);
01750           if( option == "WITH_MULT" )
01751             ov->l1_penalty_param_update_
01752               = L1_PENALTY_PARAM_WITH_MULT;
01753           else if( option == "MULT_FREE" )
01754             ov->l1_penalty_param_update_
01755               = L1_PENALTY_PARAM_MULT_FREE;
01756           else if( option == "AUTO" )
01757             ov->l1_penalty_param_update_
01758               = L1_PENALTY_PARAM_AUTO;
01759           else
01760             TEST_FOR_EXCEPTION(
01761               true, std::invalid_argument
01762               ,"NLPAlgoConfigMamaJama::readin_options(...) : "
01763               "Error, incorrect value for \"l1_penalty_param_update\".\n"
01764               "Only the options WITH_MULT, MULT_FREE and AUTO"
01765               "are avalible."  );
01766           break;
01767         }
01768         case MAX_PZ_NORM: {
01769           const std::string &option = ofsp::option_value(itr);
01770           ov->max_pz_norm_ = std::atof(option.c_str());
01771           break;
01772         }
01773         case NUM_PZ_DAMP_ITERS: {
01774           const std::string &option = ofsp::option_value(itr);
01775           ov->num_pz_damp_iters_ = std::atoi(option.c_str());
01776           break;
01777         }
01778 
01779         default:
01780           TEST_FOR_EXCEPT(true);  // this would be a local programming error only.
01781       }
01782     }
01783   }
01784   else {
01785     if(trase_out)
01786       *trase_out
01787         << "\n\n*** Warning!  The options group \"NLPAlgoConfigMamaJama\" was not found.\n"
01788         << "Using a default set of options instead ... \n";
01789   }
01790 }
01791 
01792 //
01793 // This is where some of the default options are set and the user is alerted to what their
01794 // value is.
01795 //
01796 void NLPAlgoConfigMamaJama::set_default_options( 
01797   const SOptionValues     &uov
01798   ,SOptionValues          *cov
01799   ,std::ostream           *trase_out
01800   )
01801 {
01802   if(trase_out)
01803     *trase_out
01804       << "\n*** Setting option defaults for options not set by the user or determined some other way ...\n";
01805 
01806   if( cov->max_basis_cond_change_frac_ < 0.0 &&  uov.max_basis_cond_change_frac_ < 0.0 ) {
01807     if(trase_out)
01808       *trase_out
01809         << "\nmax_basis_cond_change_frac < 0 : setting max_basis_cond_change_frac = 1e+4 \n";
01810     cov->max_basis_cond_change_frac_ = 1e+4;
01811   }
01812   else {
01813     cov->max_basis_cond_change_frac_ = uov.max_basis_cond_change_frac_;
01814   }
01815   cov->exact_reduced_hessian_ = uov.exact_reduced_hessian_;
01816   if( cov->quasi_newton_ == QN_AUTO && uov.quasi_newton_ == QN_AUTO ) {
01817     if(trase_out)
01818       *trase_out
01819         << "\nquasi_newton == AUTO: setting quasi_newton = BFGS\n";
01820     cov->quasi_newton_ = QN_BFGS;
01821   }
01822   else if(cov->quasi_newton_ == QN_AUTO) {
01823     cov->quasi_newton_ = uov.quasi_newton_;
01824   }
01825   if( cov->num_lbfgs_updates_stored_ < 0 && uov.num_lbfgs_updates_stored_ < 0 ) {
01826     if(trase_out)
01827       *trase_out
01828         << "\nnum_lbfgs_updates_stored < 0 : setting num_lbfgs_updates_stored = 10\n";
01829     cov->num_lbfgs_updates_stored_ = 10;
01830   }
01831   else if(cov->num_lbfgs_updates_stored_ < 0) {
01832     cov->num_lbfgs_updates_stored_ = uov.num_lbfgs_updates_stored_;
01833   }
01834   cov->lbfgs_auto_scaling_ = uov.lbfgs_auto_scaling_;
01835   if( cov->hessian_initialization_ == INIT_HESS_AUTO && uov.hessian_initialization_ == INIT_HESS_AUTO ) {
01836     if(trase_out)
01837        *trase_out
01838         << "\nhessian_initialization == AUTO: setting hessian_initialization = IDENTITY\n";
01839     cov->hessian_initialization_ = INIT_HESS_IDENTITY;
01840 /*
01841     if(trase_out)
01842       *trase_out
01843         << "\nhessian_initialization == AUTO: setting hessian_initialization = FINITE_DIFF_DIAGONAL_ABS\n";
01844     cov->hessian_initialization_ = INIT_HESS_FIN_DIFF_SCALE_DIAGONAL_ABS;
01845 */
01846   }
01847   else if(cov->hessian_initialization_ == INIT_HESS_AUTO) {
01848     cov->hessian_initialization_ = uov.hessian_initialization_;
01849   }
01850   if( cov->qp_solver_type_ == QP_AUTO && uov.qp_solver_type_ == QP_AUTO ) {
01851 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK
01852     if(trase_out)
01853       *trase_out
01854         << "\nqp_solver_type == AUTO: setting qp_solver_type = QPKWIK\n";
01855     cov->qp_solver_type_ = QP_QPKWIK;
01856 #else
01857     if(trase_out)
01858       *trase_out
01859         << "\nqp_solver_type == AUTO: setting qp_solver_type = QPSCHUR\n";
01860     cov->qp_solver_type_ = QP_QPSCHUR;
01861 #endif
01862   }
01863   else if(cov->qp_solver_type_ == QP_AUTO) {
01864     cov->qp_solver_type_ = uov.qp_solver_type_;
01865   }
01866   cov->reinit_hessian_on_qp_fail_ = uov.reinit_hessian_on_qp_fail_;
01867   if( cov->line_search_method_ == LINE_SEARCH_AUTO && uov.line_search_method_ == LINE_SEARCH_AUTO ) {
01868     if(trase_out)
01869       *trase_out
01870         << "\nline_search_method == AUTO: setting line_search_method = FILTER\n";
01871     cov->line_search_method_ = LINE_SEARCH_FILTER;
01872   }
01873   else if(cov->line_search_method_ == LINE_SEARCH_AUTO) {
01874     cov->line_search_method_ = uov.line_search_method_;
01875   }
01876   if( cov->merit_function_type_ == MERIT_FUNC_AUTO && uov.merit_function_type_ == MERIT_FUNC_AUTO ) {
01877     if(trase_out)
01878       *trase_out
01879         << "\nmerit_function_type == AUTO: setting merit_function_type = MODIFIED_L1_INCR\n";
01880     cov->merit_function_type_ = MERIT_FUNC_MOD_L1_INCR;
01881   }
01882   else if(cov->merit_function_type_ == MERIT_FUNC_AUTO) {
01883     cov->merit_function_type_ = uov.merit_function_type_;
01884   }
01885   if( cov->l1_penalty_param_update_ == L1_PENALTY_PARAM_AUTO && uov.l1_penalty_param_update_ == L1_PENALTY_PARAM_AUTO ) {
01886     if(trase_out)
01887       *trase_out
01888         << "\nl1_penalty_param_update == AUTO: setting l1_penalty_param_update = MULT_FREE\n";
01889     cov->l1_penalty_param_update_ = L1_PENALTY_PARAM_MULT_FREE;
01890   }
01891   else if(cov->l1_penalty_param_update_ == L1_PENALTY_PARAM_AUTO) {
01892     cov->l1_penalty_param_update_ = uov.l1_penalty_param_update_;
01893   }
01894   if( cov->full_steps_after_k_ < 0 && uov.full_steps_after_k_ < 0 ) {
01895     if(trase_out)
01896       *trase_out
01897         << "\nfull_steps_after_k < 0 : the line search will never be turned off after so many iterations\n";
01898   }
01899   else {
01900     cov->full_steps_after_k_ = uov.full_steps_after_k_;
01901   }
01902   cov->max_pz_norm_ = uov.max_pz_norm_;
01903   cov->num_pz_damp_iters_ = uov.num_pz_damp_iters_;
01904   if(trase_out)
01905     *trase_out
01906       << "\n*** End setting default options\n";
01907 }
01908 
01909 } // end namespace MoochoPack
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines