MoochoPack : Framework for Large-Scale Optimization Algorithms Version of the Day
MoochoPack_NLPAlgoConfigMamaJama.cpp
00001 // @HEADER
00002 // ***********************************************************************
00003 // 
00004 // Moocho: Multi-functional Object-Oriented arCHitecture for Optimization
00005 //                  Copyright (2003) Sandia Corporation
00006 // 
00007 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
00008 // license for use of this work by or on behalf of the U.S. Government.
00009 // 
00010 // Redistribution and use in source and binary forms, with or without
00011 // modification, are permitted provided that the following conditions are
00012 // met:
00013 //
00014 // 1. Redistributions of source code must retain the above copyright
00015 // notice, this list of conditions and the following disclaimer.
00016 //
00017 // 2. Redistributions in binary form must reproduce the above copyright
00018 // notice, this list of conditions and the following disclaimer in the
00019 // documentation and/or other materials provided with the distribution.
00020 //
00021 // 3. Neither the name of the Corporation nor the names of the
00022 // contributors may be used to endorse or promote products derived from
00023 // this software without specific prior written permission.
00024 //
00025 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
00026 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
00027 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
00028 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
00029 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
00030 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
00031 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00032 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00033 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00034 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00035 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00036 //
00037 // Questions? Contact Roscoe A. Bartlett (rabartl@sandia.gov) 
00038 // 
00039 // ***********************************************************************
00040 // @HEADER
00041 
00042 #include <assert.h>
00043 
00044 #include <sstream>
00045 #include <typeinfo>
00046 #include <iostream>
00047 
00048 #include "MoochoPack_NLPAlgoConfigMamaJama.hpp"
00049 #include "MoochoPack_NLPAlgo.hpp"
00050 #include "MoochoPack_NLPAlgoContainer.hpp"
00051 #include "IterationPack_AlgorithmSetOptions.hpp"
00052 #include "AbstractLinAlgPack_MatrixSymPosDefCholFactor.hpp"                     // rHL 
00053 //#include "ConstrainedOptPack_MatrixSymPosDefInvCholFactor.hpp"    // .
00054 #include "ConstrainedOptPack_MatrixSymPosDefLBFGS.hpp"        // .
00055 //#include "ConstrainedOptPack_MatrixHessianSuperBasicInitDiagonal.hpp/ | rHL (super basics)
00056 //#include "AbstractLinAlgPack/src/AbstractLinAlgPack_MatrixSymDiagStd.hpp"                          // |
00057 
00058 #include "ConstrainedOptPack_VariableBoundsTester.hpp"
00059 
00060 #include "NLPInterfacePack_NLPDirect.hpp"
00061 
00062 #include "NLPInterfacePack_CalcFiniteDiffProd.hpp"
00063 #include "NLPInterfacePack_NLPVarReductPerm.hpp"
00064 
00065 // line search
00066 #include "ConstrainedOptPack_DirectLineSearchArmQuad_Strategy.hpp"
00067 #include "ConstrainedOptPack_DirectLineSearchArmQuad_StrategySetOptions.hpp"
00068 #include "ConstrainedOptPack_MeritFuncNLPL1.hpp"
00069 #include "ConstrainedOptPack_MeritFuncNLPModL1.hpp"
00070 
00071 // Basis permutations and direct sparse solvers
00072 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
00073 #include "ConstrainedOptPack_DecompositionSystemVarReductPerm.hpp"
00074 #endif
00075 
00076 #include "ConstrainedOptPack_QPSolverRelaxedTester.hpp"
00077 #include "ConstrainedOptPack_QPSolverRelaxedTesterSetOptions.hpp"
00078 #include "ConstrainedOptPack_QPSolverRelaxedQPSchur.hpp"
00079 #include "ConstrainedOptPack_QPSolverRelaxedQPSchurSetOptions.hpp"
00080 #include "ConstrainedOptPack_QPSchurInitKKTSystemHessianFull.hpp"
00081 //#include "ConstrainedOptPack_QPSchurInitKKTSystemHessianSuperBasic.hpp"
00082 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK
00083 #include "ConstrainedOptPack_QPSolverRelaxedQPKWIK.hpp"
00084 #endif
00085 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPOPT
00086 //#include "ConstrainedOptPack_QPSolverRelaxedQPOPT.hpp"
00087 #endif
00088 
00089 #include "MoochoPack_MoochoAlgorithmStepNames.hpp"
00090 
00091 /*
00092 #include "MoochoPack_EvalNewPointStd_StepSetOptions.hpp"
00093 #include "MoochoPack_EvalNewPointTailoredApproach_StepSetOptions.hpp"
00094 #include "MoochoPack_EvalNewPointTailoredApproachCoordinate_Step.hpp"
00095 #include "MoochoPack_EvalNewPointTailoredApproachOrthogonal_Step.hpp"
00096 */
00097 
00098 #include "MoochoPack_ReducedGradientStd_Step.hpp"
00099 #include "MoochoPack_InitFinDiffReducedHessian_Step.hpp"
00100 #include "MoochoPack_InitFinDiffReducedHessian_StepSetOptions.hpp"
00101 #include "MoochoPack_ReducedHessianSerialization_Step.hpp"
00102 #include "MoochoPack_ReducedHessianSerialization_StepSetOptions.hpp"
00103 #include "MoochoPack_ReducedHessianSecantUpdateStd_Step.hpp"
00104 #include "MoochoPack_ReducedHessianSecantUpdateBFGSFull_Strategy.hpp"
00105 
00106 
00107 //#include "MoochoPack_ReducedHessianSecantUpdateBFGSProjected_Strategy.hpp"
00108 //#include "MoochoPack_ReducedHessianSecantUpdateBFGSProjected_StrategySetOptions.hpp"
00109 //#include "MoochoPack_ReducedHessianSecantUpdateLPBFGS_Strategy.hpp"
00110 //#include "MoochoPack_ReducedHessianSecantUpdateLPBFGS_StrategySetOptions.hpp"
00111 #include "MoochoPack_BFGSUpdate_Strategy.hpp"
00112 #include "MoochoPack_BFGSUpdate_StrategySetOptions.hpp"
00113 #include "MoochoPack_QuasiNormalStepStd_Step.hpp"
00114 #include "MoochoPack_CheckDescentQuasiNormalStep_Step.hpp"
00115 #include "MoochoPack_CheckDecompositionFromPy_Step.hpp"
00116 #include "MoochoPack_CheckDecompositionFromRPy_Step.hpp"
00117 #include "MoochoPack_TangentialStepWithoutBounds_Step.hpp"
00118 #include "MoochoPack_TangentialStepWithInequStd_Step.hpp"
00119 #include "MoochoPack_TangentialStepWithInequStd_StepSetOptions.hpp"
00120 #include "MoochoPack_SetDBoundsStd_AddedStep.hpp"
00121 #include "MoochoPack_QPFailureReinitReducedHessian_Step.hpp"
00122 #include "MoochoPack_CalcDFromYPYZPZ_Step.hpp"
00123 #include "MoochoPack_CalcDFromYPY_Step.hpp"
00124 #include "MoochoPack_CalcDFromZPZ_Step.hpp"
00125 #include "MoochoPack_LineSearchFailureNewDecompositionSelection_Step.hpp"
00126 #include "MoochoPack_LineSearchFilter_Step.hpp"
00127 #include "MoochoPack_LineSearchFilter_StepSetOptions.hpp"
00128 #include "MoochoPack_LineSearchFullStep_Step.hpp"
00129 #include "MoochoPack_LineSearchDirect_Step.hpp"
00130 #include "MoochoPack_LineSearchNLE_Step.hpp"
00131 //#include "MoochoPack_LineSearch2ndOrderCorrect_Step.hpp"
00132 //#include "MoochoPack_LineSearch2ndOrderCorrect_StepSetOptions.hpp"
00133 //#include "MoochoPack_FeasibilityStepReducedStd_Strategy.hpp"
00134 //#include "MoochoPack_FeasibilityStepReducedStd_StrategySetOptions.hpp"
00135 //#include "MoochoPack_QuasiRangeSpaceStepStd_Strategy.hpp"
00136 //#include "MoochoPack_QuasiRangeSpaceStepTailoredApproach_Strategy.hpp"
00137 //#include "MoochoPack_LineSearchWatchDog_Step.hpp"
00138 //#include "MoochoPack_LineSearchWatchDog_StepSetOptions.hpp"
00139 //#include "MoochoPack_LineSearchFullStepAfterKIter_Step.hpp"
00140 //#include "MoochoPack_CalcLambdaIndepStd_AddedStep.hpp"
00141 #include "MoochoPack_CalcReducedGradLagrangianStd_AddedStep.hpp"
00142 #include "MoochoPack_CheckConvergenceStd_AddedStep.hpp"
00143 #include "MoochoPack_CheckConvergenceStd_Strategy.hpp"
00144 #include "MoochoPack_CheckSkipBFGSUpdateStd_StepSetOptions.hpp"
00145 #include "MoochoPack_MeritFunc_DummyUpdate_Step.hpp"
00146 #include "MoochoPack_MeritFunc_PenaltyParamUpdate_AddedStepSetOptions.hpp"
00147 #include "MoochoPack_MeritFunc_PenaltyParamUpdateMultFree_AddedStep.hpp"
00148 //#include "MoochoPack_MeritFunc_PenaltyParamUpdateWithMult_AddedStep.hpp"
00149 //#include "MoochoPack_MeritFunc_PenaltyParamsUpdateWithMult_AddedStep.hpp"
00150 //#include "MoochoPack_MeritFunc_ModifiedL1LargerSteps_AddedStep.hpp"
00151 //#include "MoochoPack_MeritFunc_ModifiedL1LargerSteps_AddedStepSetOptions.hpp"
00152 //#include "MoochoPack_ActSetStats_AddedStep.hpp"
00153 //#include "MoochoPack_NumFixedDepIndep_AddedStep.hpp"
00154 
00155 #include "MoochoPack_act_set_stats.hpp"
00156 #include "MoochoPack_qp_solver_stats.hpp"
00157 #include "MoochoPack_quasi_newton_stats.hpp"
00158 
00159 //#include "AbstractLinAlgPack_sparse_bounds.hpp"
00160 
00161 // Misc utilities
00162 #include "Teuchos_AbstractFactoryStd.hpp"
00163 #include "Teuchos_dyn_cast.hpp"
00164 #include "ReleaseResource_ref_count_ptr.hpp"
00165 #include "Teuchos_Assert.hpp"
00166 
00167 // Stuff to read in options
00168 #include "OptionsFromStreamPack_StringToIntMap.hpp"
00169 #include "OptionsFromStreamPack_StringToBool.hpp"
00170 
00171 // Stuff for exact reduced hessian
00172 //#include "MoochoPack_ReducedHessianExactStd_Step.hpp"
00173 //#include "MoochoPack_CrossTermExactStd_Step.hpp"
00174 //#include "MoochoPack_DampenCrossTermStd_Step.hpp"
00175 
00176 namespace {
00177   const double INF_BASIS_COND_CHANGE_FRAC      = 1e+20;
00178 }
00179 
00180 namespace MoochoPack {
00181 
00182 //
00183 // Here is where we define the default values for the algorithm.  These
00184 // should agree with what are in the Moocho.opt.NLPAlgoConfigMamaJama file.
00185 //
00186 NLPAlgoConfigMamaJama::SOptionValues::SOptionValues()
00187   :max_basis_cond_change_frac_(-1.0)
00188   ,exact_reduced_hessian_(false)
00189   ,quasi_newton_(QN_AUTO)
00190   ,num_lbfgs_updates_stored_(-1)
00191   ,lbfgs_auto_scaling_(true)
00192   ,hessian_initialization_(INIT_HESS_AUTO)
00193   ,qp_solver_type_(QP_AUTO)
00194   ,reinit_hessian_on_qp_fail_(true)
00195   ,line_search_method_(LINE_SEARCH_AUTO)
00196   ,merit_function_type_(MERIT_FUNC_AUTO)
00197   ,l1_penalty_param_update_(L1_PENALTY_PARAM_AUTO)
00198   ,full_steps_after_k_(-1)
00199   ,max_pz_norm_(-1.0)
00200   ,num_pz_damp_iters_(0)
00201 {}
00202 
00203 NLPAlgoConfigMamaJama::NLPAlgoConfigMamaJama()
00204 {}
00205 
00206 NLPAlgoConfigMamaJama::~NLPAlgoConfigMamaJama()
00207 {}
00208 
00209 // overridden from NLPAlgoConfig
00210 
00211 void NLPAlgoConfigMamaJama::set_options( const options_ptr_t& options )
00212 {
00213   options_ = options;
00214   decomp_sys_step_builder_.set_options(options);
00215 }
00216 
00217 const NLPAlgoConfig::options_ptr_t&
00218 NLPAlgoConfigMamaJama::get_options() const
00219 {
00220   return options_;
00221 }
00222 
00223 void NLPAlgoConfigMamaJama::config_algo_cntr(
00224   NLPAlgoContainer   *algo_cntr
00225   ,std::ostream       *trase_out
00226   )
00227 {
00228   using Teuchos::RCP;
00229   using Teuchos::dyn_cast;
00230 
00231   if(trase_out) {
00232     *trase_out
00233       << std::endl
00234       << "*****************************************************************\n"
00235       << "*** NLPAlgoConfigMamaJama Configuration                       ***\n"
00236       << "***                                                           ***\n"
00237       << "*** Here, summary information about how the algorithm is      ***\n"
00238       << "*** configured is printed so that the user can see how the    ***\n"
00239       << "*** properties of the NLP and the set options influence       ***\n"
00240       << "*** how an algorithm is configured.                           ***\n"
00241       << "*****************************************************************\n";
00242   }
00243 
00244   // ////////////////////////////////////////////////////////////
00245   // A. ???
00246 
00247   // /////////////////////////////////////////////////////////////////////////
00248   // B. Create an algo object, give to algo_cntr, then give algo_cntr to algo
00249 
00250   if(trase_out)
00251     *trase_out << "\n*** Creating the NLPAlgo algo object ...\n";
00252 
00253   typedef Teuchos::RCP<NLPAlgo> algo_ptr_t;
00254   algo_ptr_t algo = Teuchos::rcp(new NLPAlgo);
00255   TEUCHOS_TEST_FOR_EXCEPTION(!algo.get(),std::runtime_error,"Error!");
00256   if(options_.get()) {
00257     IterationPack::AlgorithmSetOptions
00258       opt_setter( algo.get() );
00259     opt_setter.set_options( *options_ );
00260   }
00261   algo_cntr->set_algo(algo);
00262   algo->set_algo_cntr(algo_cntr);
00263   
00264   // /////////////////////////////////////////////
00265   // C. Configure algo
00266 
00267   // /////////////////////////////////////////////////////
00268   // C.0 Set the nlp and track objects
00269 
00270   if(trase_out)
00271     *trase_out << "\n*** Setting the NLP and track objects to the algo object ...\n";
00272 
00273   algo->set_nlp( algo_cntr->get_nlp().get() );
00274   algo->set_track( algo_cntr->get_track() );
00275 
00276   // ////////////////////////////////////////////////
00277   // Determine what the options are:
00278 
00279   // Readin the options
00280   if(options_.get()) {
00281     readin_options( *options_, &uov_, trase_out );
00282   }
00283   else {
00284     if(trase_out) {
00285       *trase_out
00286         << "\n*** Warning, no OptionsFromStream object was set so a default set"
00287           " of options will be used!\n";
00288     }
00289   } 
00290 
00291   NLP &nlp = algo->nlp();
00292   nlp.initialize(algo->algo_cntr().check_results());
00293   // Get the dimensions of the NLP
00294   const size_type
00295     n   = nlp.n(),
00296     m   = nlp.m(),
00297     r   = m, // ToDo: Compute this for real!
00298     //dof = n - r,
00299     nb  = nlp.num_bounded_x();
00300 
00301   // Process the NLP
00302   NLPFirstOrder    *nlp_foi = NULL;
00303   NLPSecondOrder   *nlp_soi = NULL;
00304   NLPDirect        *nlp_fod = NULL;
00305   bool             tailored_approach = false;
00306   decomp_sys_step_builder_.process_nlp_and_options(
00307     trase_out, nlp
00308     ,&nlp_foi, &nlp_soi, &nlp_fod, &tailored_approach
00309     );
00310 
00311   const int max_dof_quasi_newton_dense
00312     = decomp_sys_step_builder_.current_option_values().max_dof_quasi_newton_dense_;
00313 
00314   // //////////////////////////////////////////////////////
00315   // C.1.  Sort out the options
00316 
00317   if(trase_out)
00318     *trase_out
00319       << "\n*** Sorting out some of the options given input options ...\n";
00320 
00321   if( m ) {
00322     if( tailored_approach ) {
00323       // Change the options for the tailored approach. 
00324       if(trase_out) {
00325         *trase_out
00326           << "\nThis is a tailored approach NLP (NLPDirect) which forces the following options:\n"
00327           << "merit_function_type         = L1;\n"
00328           << "l1_penalty_parameter_update = MULT_FREE;\n"
00329           << "null_space_matrix           = EXPLICIT;\n"
00330           ;
00331       }
00332       cov_.merit_function_type_
00333         = MERIT_FUNC_L1;
00334       cov_.l1_penalty_param_update_
00335         = L1_PENALTY_PARAM_MULT_FREE;
00336       decomp_sys_step_builder_.current_option_values().null_space_matrix_type_
00337         = DecompositionSystemStateStepBuilderStd::NULL_SPACE_MATRIX_EXPLICIT;
00338     }
00339     
00340     if( !tailored_approach && uov_.merit_function_type_ != MERIT_FUNC_L1  ) {
00341       if(trase_out) {
00342         *trase_out
00343           << "\nThe only merit function currently supported is:\n"
00344           << "merit_function_type         = L1;\n"
00345           ;
00346       }
00347       cov_.merit_function_type_   = MERIT_FUNC_L1;
00348     }
00349   }
00350   else {
00351     if( uov_.line_search_method_ == LINE_SEARCH_NONE ) {
00352       if(trase_out) {
00353         *trase_out
00354           << "\nThere are no equality constraints (m == 0) and line_search_method==NONE so set the following options:\n"
00355           << "line_search_method          = NONE;\n"
00356           << "merit_function_type         = L1;\n"
00357           ;
00358       }
00359       cov_.line_search_method_       = LINE_SEARCH_NONE;
00360       cov_.merit_function_type_      = MERIT_FUNC_L1;
00361     }
00362     else {
00363       if(trase_out) {
00364         *trase_out
00365           << "\nThere are no equality constraints (m == 0) and line_search_method==AUTO so set the following options:\n"
00366           << "line_search_method          = DIRECT;\n"
00367           << "merit_function_type         = L1;\n"
00368           ;
00369       }
00370       cov_.line_search_method_       = LINE_SEARCH_DIRECT;
00371       cov_.merit_function_type_      = MERIT_FUNC_L1;
00372     }
00373   }
00374 
00375   // Decide what type of quasi-newton update to use
00376   switch( uov_.quasi_newton_ ) {
00377     case QN_AUTO: {
00378       if(trase_out)
00379         *trase_out
00380           << "\nquasi_newton == AUTO:"
00381           << "\nnlp.num_bounded_x() == " << nlp.num_bounded_x() << ":\n";
00382       if( n - r > max_dof_quasi_newton_dense ) {
00383         if(trase_out)
00384           *trase_out
00385             << "n-r = " << n-r << " > max_dof_quasi_newton_dense = "
00386             << max_dof_quasi_newton_dense <<  ":\n"
00387             << "setting quasi_newton == LBFGS\n";
00388         cov_.quasi_newton_ = QN_LBFGS;
00389       }
00390       else {
00391         if(trase_out)
00392           *trase_out
00393             << "n-r = " << n-r << " <= max_dof_quasi_newton_dense = "
00394             << max_dof_quasi_newton_dense << ":\n"
00395             << "setting quasi_newton == BFGS\n";
00396         cov_.quasi_newton_ = QN_BFGS;
00397       }
00398       break;
00399     }
00400     case QN_BFGS:
00401     case QN_PBFGS:
00402     case QN_LBFGS:
00403     case QN_LPBFGS:
00404       cov_.quasi_newton_ = uov_.quasi_newton_;
00405       break;
00406       default:
00407       TEUCHOS_TEST_FOR_EXCEPT(true); // Invalid option!
00408   }
00409 
00410   if( uov_.qp_solver_type_ == QP_AUTO && nb == 0 ) {
00411     cov_.qp_solver_type_ = QP_AUTO;
00412   }
00413 
00414   // ToDo: Sort out the rest of the options!
00415 
00416   // Set the default options that where not already set yet
00417   set_default_options(uov_,&cov_,trase_out);
00418 
00419   // ToDo: Implement the 2nd order correction linesearch
00420   if( cov_.line_search_method_ == LINE_SEARCH_2ND_ORDER_CORRECT ) {
00421     if(trase_out)
00422       *trase_out <<
00423         "\nline_search_method == 2ND_ORDER_CORRECT:\n"
00424         "Sorry, the second order corrrection linesearch is not updated yet!\n"
00425         "setting line_search_method = FILTER ...\n";
00426     cov_.line_search_method_ = LINE_SEARCH_FILTER;
00427   }
00428   if( cov_.line_search_method_ == LINE_SEARCH_WATCHDOG ) {
00429     if(trase_out)
00430       *trase_out <<
00431         "\nline_search_method ==WATCHDOG:\n"
00432         "Sorry, the watchdog linesearch is not updated yet!\n"
00433         "setting line_search_method = DIRECT ...\n";
00434     cov_.line_search_method_ = LINE_SEARCH_DIRECT;
00435   }
00436   
00437   // Adjust the Quasi-Newton if QPKWIK is used!
00438   if( cov_.qp_solver_type_ == QP_QPKWIK && nb ) {
00439     if(trase_out)
00440       *trase_out
00441         << "\nqp_solver == QPKWIK and nlp.num_bounded_x() == " << nb << " > 0:\n"
00442         << "Setting quasi_newton == BFGS...\n";
00443     cov_.quasi_newton_ = QN_BFGS;
00444   }
00445 
00446   // /////////////////////////////////////////////////////
00447   // C.1. Create the decomposition system object
00448 
00449   typedef RCP<DecompositionSystem> decomp_sys_ptr_t;
00450   decomp_sys_ptr_t decomp_sys;
00451   if(
00452 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
00453     true
00454 #else
00455     n > m
00456 #endif
00457     )
00458   {
00459     decomp_sys_step_builder_.create_decomp_sys(
00460       trase_out, nlp, nlp_foi, nlp_soi, nlp_fod, tailored_approach
00461       ,&decomp_sys
00462       );
00463   }
00464 
00465 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
00466   RCP<DecompositionSystemVarReductPerm>
00467     decomp_sys_perm = Teuchos::rcp_dynamic_cast<DecompositionSystemVarReductPerm>(decomp_sys);
00468 #endif
00469 
00470   // /////////////////////////////////////////////////////
00471   // C.2. Create and set the state object
00472 
00473   if(trase_out)
00474     *trase_out
00475       << "\n*** Creating the state object and setting up iteration quantity objects ...\n";
00476 
00477   {
00478     //
00479     // Create the state object with the vector spaces
00480     //
00481 
00482     typedef RCP<NLPAlgoState>   state_ptr_t;
00483     state_ptr_t
00484       state = Teuchos::rcp(
00485         new NLPAlgoState(
00486           decomp_sys
00487           ,nlp.space_x()
00488           ,nlp.space_c()
00489           ,( m
00490              ? ( tailored_approach
00491                ? ( nlp_fod->var_dep().size() 
00492                  ? nlp.space_x()->sub_space(nlp_fod->var_dep())->clone()
00493                  : Teuchos::null )
00494                : decomp_sys->space_range() // could be NULL for BasisSystemPerm
00495                )
00496              : Teuchos::null
00497             )
00498           ,( m
00499              ? ( tailored_approach
00500                ?( nlp_fod->var_indep().size()
00501                 ? nlp.space_x()->sub_space(nlp_fod->var_indep())->clone()
00502                 : Teuchos::null )
00503                : decomp_sys->space_null() // could be NULL for BasisSystemPerm
00504                )
00505              : nlp.space_x()
00506             )
00507           )
00508         );
00509         
00510     //
00511     // Set the iteration quantities for the NLP matrix objects
00512     //
00513 
00514     decomp_sys_step_builder_.add_iter_quantities(
00515       trase_out, nlp, nlp_foi, nlp_soi, nlp_fod, tailored_approach, decomp_sys
00516       ,state
00517       );
00518 
00519     // Add reduced Hessian
00520 
00521     if( !cov_.exact_reduced_hessian_ ) {
00522       RCP<Teuchos::AbstractFactory<MatrixSymOp> >
00523         abstract_factory_rHL = Teuchos::null;
00524       // Only maintain the orginal matrix if we have inequality constraints and therefore will be
00525       // needing a QP solver (which may be QPSchur which needs an accurate original matrix for
00526       // iterative refinement).
00527       const bool
00528         maintain_original = nb;
00529       // Maintain the factor if a QP solver is needed, QPSchur is being used, or we are checking
00530       // results
00531       const bool
00532         maintain_inverse = ( (!nb && m==r) || cov_.qp_solver_type_==QP_QPSCHUR
00533                    || algo->algo_cntr().check_results() );
00534       switch( cov_.quasi_newton_ ) {
00535         case QN_BFGS:
00536           abstract_factory_rHL = Teuchos::rcp(
00537             new Teuchos::AbstractFactoryStd<MatrixSymOp,MatrixSymPosDefCholFactor,MatrixSymPosDefCholFactor::PostMod>(
00538               MatrixSymPosDefCholFactor::PostMod(
00539                 maintain_original      // maintain_original
00540                 ,maintain_inverse      // maintain_factor
00541                 ,true                  // allow_factor      (always!)
00542                 )
00543               )
00544             );
00545           break;
00546         case QN_LBFGS:
00547           abstract_factory_rHL = Teuchos::rcp(
00548             new Teuchos::AbstractFactoryStd<MatrixSymOp,MatrixSymPosDefLBFGS,MatrixSymPosDefLBFGS::PostMod>(
00549               MatrixSymPosDefLBFGS::PostMod(
00550                 cov_.num_lbfgs_updates_stored_  //
00551                 ,maintain_original              // maintain_original
00552                 ,maintain_inverse               // maintain_inverse
00553                 ,cov_.lbfgs_auto_scaling_       // 
00554                 )
00555               )
00556             );
00557           break;
00558         default:
00559           TEUCHOS_TEST_FOR_EXCEPT(true); // Should not be called for now!
00560       }
00561       
00562       state->set_iter_quant(
00563         rHL_name
00564         ,Teuchos::rcp(
00565           new IterQuantityAccessContiguous<MatrixSymOp>(
00566             1
00567             ,rHL_name
00568             ,abstract_factory_rHL
00569             )
00570           )
00571         );
00572     }
00573     else {
00574       TEUCHOS_TEST_FOR_EXCEPT(true); // ToDo: Add rHL for an exact reduced Hessian!
00575     }
00576     
00577     //
00578     // Set the NLP merit function 
00579     //
00580 
00581     if( cov_.line_search_method_ != LINE_SEARCH_NONE 
00582        && cov_.line_search_method_ != LINE_SEARCH_FILTER) {
00583       RCP<Teuchos::AbstractFactory<MeritFuncNLP> >
00584         merit_func_factory = Teuchos::null;
00585       switch( cov_.merit_function_type_ ) {
00586         case MERIT_FUNC_L1:
00587           merit_func_factory = Teuchos::rcp(
00588             new Teuchos::AbstractFactoryStd<MeritFuncNLP,MeritFuncNLPL1>());
00589           break;
00590         case MERIT_FUNC_MOD_L1:
00591         case MERIT_FUNC_MOD_L1_INCR:
00592           merit_func_factory = Teuchos::rcp(
00593             new Teuchos::AbstractFactoryStd<MeritFuncNLP,MeritFuncNLPModL1>());
00594           break;
00595         default:
00596           TEUCHOS_TEST_FOR_EXCEPT(true);  // local programming error
00597       }
00598       state->set_iter_quant(
00599         merit_func_nlp_name
00600         ,Teuchos::rcp(
00601           new IterQuantityAccessContiguous<MeritFuncNLP>(
00602             1
00603             ,merit_func_nlp_name
00604             ,merit_func_factory
00605             )
00606           )
00607         );
00608     }
00609 
00610     if (cov_.line_search_method_ == LINE_SEARCH_FILTER)
00611       {
00612         // Add the filter iteration quantity
00613         state->set_iter_quant(
00614         FILTER_IQ_STRING
00615         ,Teuchos::rcp(
00616              new IterQuantityAccessContiguous<Filter_T>(1,FILTER_IQ_STRING)
00617         )
00618         );
00619       }
00620 
00621     if( nb ) {
00622       // Add bounds on d
00623       state->set_iter_quant(
00624         dl_name
00625         ,Teuchos::rcp(
00626           new IterQuantityAccessContiguous<VectorMutable>(
00627             2, dl_name
00628             , nlp.space_x() ) )
00629         );
00630       state->set_iter_quant(
00631         du_name
00632         ,Teuchos::rcp(
00633           new IterQuantityAccessContiguous<VectorMutable>(
00634             2, du_name
00635             , nlp.space_x() ) )
00636         );
00637       // Add active-set iteration quantity
00638       state->set_iter_quant(
00639         act_set_stats_name
00640         ,Teuchos::rcp(
00641           new IterQuantityAccessContiguous<ActSetStats>( 1, act_set_stats_name ) )
00642         );
00643       // Add QP solver stats iteration quantity
00644       state->set_iter_quant(
00645         qp_solver_stats_name
00646         ,Teuchos::rcp(
00647           new IterQuantityAccessContiguous<QPSolverStats>( 1, qp_solver_stats_name ) )
00648         );
00649     }
00650 
00651     //
00652     // Resize the number of storage locations (these can be changed later).
00653     //
00654     // Also, touch all of the value_type, index_type and vector iteration quantities
00655     // that we know about so that when state.dump_iter_quant() is called, all of the
00656     // iteration quantities will be included.
00657     //
00658     
00659     typedef IterQuantityAccessContiguous<value_type>            IQ_scalar_cngs;
00660     typedef IterQuantityAccessContiguous<VectorMutable>   IQ_vector_cngs;
00661 
00662     dyn_cast<IQ_vector_cngs>(state->x()).resize(2);
00663     dyn_cast<IQ_scalar_cngs>(state->f()).resize(2);
00664     if(m) dyn_cast<IQ_vector_cngs>(state->c()).resize(2);
00665     state->Gf();
00666     if(m && nlp_foi) state->Gc();
00667 
00668     if( m
00669 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
00670        && decomp_sys_perm.get() == NULL
00671 #endif
00672       ) state->py();
00673     if(m) dyn_cast<IQ_vector_cngs>(state->Ypy()).resize(2);
00674     if( m
00675 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
00676       && decomp_sys_perm.get() == NULL
00677 #endif
00678       ) state->pz();
00679     if(m) dyn_cast<IQ_vector_cngs>(state->Zpz()).resize(2);
00680     dyn_cast<IQ_vector_cngs>(state->d()).resize(2);
00681 
00682     if( n > m ) {
00683       dyn_cast<IQ_vector_cngs>(state->rGf()).resize(2);
00684       state->w();
00685       state->zeta();
00686       state->qp_grad();
00687     }
00688     state->eta();
00689     
00690     dyn_cast<IQ_scalar_cngs>(state->alpha()).resize(2);
00691     dyn_cast<IQ_scalar_cngs>(state->mu()).resize(2);
00692     dyn_cast<IQ_scalar_cngs>(state->phi()).resize(2);
00693 
00694     dyn_cast<IQ_scalar_cngs>(state->opt_kkt_err()).resize(2);
00695     dyn_cast<IQ_scalar_cngs>(state->feas_kkt_err()).resize(2);
00696     if( n > m ) {
00697       dyn_cast<IQ_vector_cngs>(state->rGL()).resize(2);
00698     }
00699     if(m) dyn_cast<IQ_vector_cngs>(state->lambda()).resize(2);
00700     dyn_cast<IQ_vector_cngs>(state->nu()).resize(2);
00701 
00702     // Set the state object
00703     algo->set_state( state );
00704   }
00705 
00706   // /////////////////////////////////////////////////////
00707   // C.3  Create and set the step objects
00708 
00709   if(trase_out)
00710     *trase_out << "\n*** Creating and setting the step objects ...\n";
00711 
00712 //  typedef RCP<QPSolverRelaxed> qp_solver_ptr_t;
00713 //  qp_solver_ptr_t qp_solver;
00714 //  typedef ConstrainedOptPack::QPSolverRelaxedTester QPSolverRelaxedTester;
00715 //  typedef RCP<QPSolverRelaxedTester> qp_tester_ptr_t;
00716 //  qp_tester_ptr_t qp_tester;
00717 //  typedef RCP<FeasibilityStepReducedStd_Strategy> feasibility_step_strategy_ptr_t;
00718 //  feasibility_step_strategy_ptr_t  feasibility_step_strategy;
00719 
00720   {
00721 
00722     //
00723     // Create some standard step objects that will be used by many different
00724     // specific algorithms
00725     //
00726     
00727     typedef RCP<AlgorithmStep>   algo_step_ptr_t;
00728 
00729     // Create the EvalNewPoint step and associated objects
00730     algo_step_ptr_t                                    eval_new_point_step           = Teuchos::null;
00731     RCP<CalcFiniteDiffProd>                  calc_fd_prod                  = Teuchos::null;
00732     RCP<VariableBoundsTester>                bounds_tester                 = Teuchos::null;
00733     RCP<NewDecompositionSelection_Strategy>  new_decomp_selection_strategy = Teuchos::null;
00734     decomp_sys_step_builder_.create_eval_new_point(
00735       trase_out, nlp, nlp_foi, nlp_soi, nlp_fod, tailored_approach, decomp_sys
00736       ,&eval_new_point_step, &calc_fd_prod, &bounds_tester, &new_decomp_selection_strategy
00737       );
00738 
00739     // QuasiNormal_Step
00740     algo_step_ptr_t    quansi_normal_step_step = Teuchos::null;
00741     if( !tailored_approach ) {
00742       quansi_normal_step_step = Teuchos::rcp(new QuasiNormalStepStd_Step());
00743     }
00744 
00745     // Check and change decomposition 
00746     algo_step_ptr_t    check_decomp_from_py_step  = Teuchos::null;
00747     algo_step_ptr_t    check_decomp_from_Rpy_step = Teuchos::null;
00748     if( new_decomp_selection_strategy.get() && cov_.max_basis_cond_change_frac_ < INF_BASIS_COND_CHANGE_FRAC ) {
00749       check_decomp_from_py_step = Teuchos::rcp(
00750         new CheckDecompositionFromPy_Step(
00751           new_decomp_selection_strategy
00752           ,cov_.max_basis_cond_change_frac_
00753           ) );
00754       check_decomp_from_Rpy_step = Teuchos::rcp(
00755         new CheckDecompositionFromRPy_Step(
00756           new_decomp_selection_strategy
00757           ,cov_.max_basis_cond_change_frac_
00758           ) );
00759     }
00760 
00761     // CheckDescentQuasiNormalStep
00762     algo_step_ptr_t    check_descent_quansi_normal_step_step = Teuchos::null;
00763     if( algo->algo_cntr().check_results() ) {
00764       check_descent_quansi_normal_step_step = Teuchos::rcp(new CheckDescentQuasiNormalStep_Step(calc_fd_prod));
00765     }
00766 
00767     // ReducedGradient_Step
00768     algo_step_ptr_t    reduced_gradient_step = Teuchos::null;
00769     if( !tailored_approach ) {
00770       reduced_gradient_step = Teuchos::rcp(new ReducedGradientStd_Step());
00771     }
00772 
00773     // CheckSkipBFGSUpdate
00774     algo_step_ptr_t    check_skip_bfgs_update_step = Teuchos::null;
00775     if(!cov_.exact_reduced_hessian_) {
00776       RCP<CheckSkipBFGSUpdateStd_Step>
00777         step = Teuchos::rcp(new CheckSkipBFGSUpdateStd_Step());
00778       if(options_.get()) {
00779         CheckSkipBFGSUpdateStd_StepSetOptions
00780           opt_setter( step.get() );
00781         opt_setter.set_options( *options_ );
00782       }
00783       check_skip_bfgs_update_step = step;
00784     }
00785 
00786     // ReducedHessian_Step
00787     algo_step_ptr_t    reduced_hessian_step = Teuchos::null;
00788     {
00789       // Get the strategy object that will perform the actual secant update.
00790       RCP<ReducedHessianSecantUpdate_Strategy>
00791         secant_update_strategy = Teuchos::null;
00792       switch( cov_.quasi_newton_ )
00793       {
00794           case QN_BFGS:
00795           case QN_PBFGS:
00796           case QN_LBFGS:
00797           case QN_LPBFGS:
00798         {
00799           // create and setup the actual BFGS strategy object
00800           typedef RCP<BFGSUpdate_Strategy> bfgs_strategy_ptr_t;
00801           bfgs_strategy_ptr_t
00802             bfgs_strategy = Teuchos::rcp(new BFGSUpdate_Strategy);
00803           if(options_.get()) { 
00804             BFGSUpdate_StrategySetOptions
00805               opt_setter( bfgs_strategy.get() );
00806             opt_setter.set_options( *options_ );
00807           }
00808           switch( cov_.quasi_newton_ ) {
00809               case QN_BFGS:
00810               case QN_LBFGS:
00811             {
00812               secant_update_strategy = Teuchos::rcp(new ReducedHessianSecantUpdateBFGSFull_Strategy(bfgs_strategy));
00813               break;
00814             }
00815               case QN_PBFGS:
00816               case QN_LPBFGS:
00817             {
00818               TEUCHOS_TEST_FOR_EXCEPTION(
00819                 true, std::logic_error
00820                 ,"NLPAlgoConfigMamaJama::config_algo_cntr(...) : Error, "
00821                 "The quansi_newton options of PBFGS and LPBFGS have not been updated yet!" );
00822               break;
00823             }
00824           }
00825           break;
00826         }
00827         default:
00828           TEUCHOS_TEST_FOR_EXCEPT(true);
00829       }
00830       
00831       // Finally build the step object
00832       reduced_hessian_step = Teuchos::rcp(
00833         new ReducedHessianSecantUpdateStd_Step( secant_update_strategy ) );
00834       // Add the QuasiNewtonStats iteration quantity
00835       algo->state().set_iter_quant(
00836         quasi_newton_stats_name
00837         ,Teuchos::rcp(new IterQuantityAccessContiguous<QuasiNewtonStats>(
00838           1
00839           ,quasi_newton_stats_name
00840 #ifdef _MIPS_CXX
00841           ,Teuchos::RCP<Teuchos::AbstractFactoryStd<QuasiNewtonStats,QuasiNewtonStats> >(
00842             new Teuchos::AbstractFactoryStd<QuasiNewtonStats,QuasiNewtonStats>())
00843 #endif
00844           )
00845         ));
00846     }
00847 
00848     // Initialization of the Reduced Hessian
00849     algo_step_ptr_t  init_red_hess_step = Teuchos::null;
00850     if(
00851       cov_.hessian_initialization_ == INIT_HESS_FIN_DIFF_SCALE_IDENTITY
00852       || cov_.hessian_initialization_ == INIT_HESS_FIN_DIFF_SCALE_DIAGONAL
00853       || cov_.hessian_initialization_ == INIT_HESS_FIN_DIFF_SCALE_DIAGONAL_ABS
00854       )
00855     {
00856       // InitFinDiffReducedHessian_Step
00857       Algorithm::poss_type poss;
00858       TEUCHOS_TEST_FOR_EXCEPT( !( poss = algo->get_step_poss( ReducedHessian_name )  ) );
00859       InitFinDiffReducedHessian_Step::EInitializationMethod
00860         init_hess;
00861       switch( cov_.hessian_initialization_ ) {
00862         case INIT_HESS_FIN_DIFF_SCALE_IDENTITY:
00863           init_hess = InitFinDiffReducedHessian_Step::SCALE_IDENTITY;
00864           break;
00865         case INIT_HESS_FIN_DIFF_SCALE_DIAGONAL:
00866           init_hess = InitFinDiffReducedHessian_Step::SCALE_DIAGONAL;
00867           break;
00868         case INIT_HESS_FIN_DIFF_SCALE_DIAGONAL_ABS:
00869           init_hess = InitFinDiffReducedHessian_Step::SCALE_DIAGONAL_ABS;
00870           break;
00871         default:
00872           TEUCHOS_TEST_FOR_EXCEPT(true);  // only local programming error?
00873       }
00874       Teuchos::RCP<InitFinDiffReducedHessian_Step>
00875         _init_red_hess_step = Teuchos::rcp(new InitFinDiffReducedHessian_Step(init_hess));
00876       if(options_.get()) {
00877         InitFinDiffReducedHessian_StepSetOptions
00878           opt_setter( _init_red_hess_step.get() );
00879         opt_setter.set_options( *options_ );
00880       }
00881       init_red_hess_step = _init_red_hess_step;
00882     }
00883     else if(cov_.hessian_initialization_==INIT_HESS_SERIALIZE ) {
00884       Teuchos::RCP<ReducedHessianSerialization_Step>
00885         _init_red_hess_step = Teuchos::rcp(new ReducedHessianSerialization_Step());
00886       if(options_.get()) {
00887         ReducedHessianSerialization_StepSetOptions
00888           opt_setter( _init_red_hess_step.get() );
00889         opt_setter.set_options( *options_ );
00890       }
00891       init_red_hess_step = _init_red_hess_step;
00892     }
00893     
00894     // NullSpace_Step
00895     algo_step_ptr_t    set_d_bounds_step    = Teuchos::null;
00896     algo_step_ptr_t    tangential_step_step = Teuchos::null;
00897     if( nb == 0 ) {
00898       Teuchos::RCP<TangentialStepWithoutBounds_Step>
00899         tangental_step_output_bounds_step
00900         = Teuchos::rcp(new TangentialStepWithoutBounds_Step());
00901       tangental_step_output_bounds_step->max_pz_norm(cov_.max_pz_norm_);
00902       tangental_step_output_bounds_step->num_pz_damp_iters(cov_.num_pz_damp_iters_);
00903       tangential_step_step = tangental_step_output_bounds_step;
00904     }
00905     else {
00906       // Step object that sets bounds for QP subproblem
00907       set_d_bounds_step = Teuchos::rcp(new SetDBoundsStd_AddedStep());
00908       // QP Solver object
00909       Teuchos::RCP<QPSolverRelaxed>  qp_solver = Teuchos::null;
00910       switch( cov_.qp_solver_type_ ) {
00911         case QP_QPSCHUR: {
00912           using ConstrainedOptPack::QPSolverRelaxedQPSchur;
00913           using ConstrainedOptPack::QPSolverRelaxedQPSchurSetOptions;
00914           using ConstrainedOptPack::QPSchurInitKKTSystemHessianFull;
00915 //          using ConstrainedOptPack::QPSchurInitKKTSystemHessianSuperBasic;
00916           Teuchos::RCP<ConstrainedOptPack::QPSolverRelaxedQPSchur::InitKKTSystem>
00917             init_kkt_sys = Teuchos::null;
00918           switch( cov_.quasi_newton_ ) {
00919             case QN_BFGS:
00920             case QN_LBFGS:
00921               init_kkt_sys = Teuchos::rcp(new QPSchurInitKKTSystemHessianFull());
00922               break;
00923             case QN_PBFGS:
00924             case QN_LPBFGS:
00925               TEUCHOS_TEST_FOR_EXCEPTION(true,std::logic_error,"Error! PBFGS and LPBFGS are not updated yet!");
00926 /*
00927               init_kkt_sys = new QPSchurInitKKTSystemHessianSuperBasic();
00928 */
00929               break;
00930             default:
00931               TEUCHOS_TEST_FOR_EXCEPT(true);
00932           }
00933           Teuchos::RCP<QPSolverRelaxedQPSchur>
00934             _qp_solver = Teuchos::rcp(new QPSolverRelaxedQPSchur(init_kkt_sys));
00935           if(options_.get()) {
00936             QPSolverRelaxedQPSchurSetOptions
00937               qp_options_setter(_qp_solver.get());
00938             qp_options_setter.set_options( *options_ );
00939           }
00940           qp_solver = _qp_solver; // give ownership to delete!
00941           break;
00942         }
00943         case QP_QPKWIK: {
00944 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK
00945           using ConstrainedOptPack::QPSolverRelaxedQPKWIK;
00946           Teuchos::RCP<QPSolverRelaxedQPKWIK>
00947             _qp_solver = Teuchos::rcp(new QPSolverRelaxedQPKWIK());
00948           qp_solver = _qp_solver; // give ownership to delete!
00949 #else
00950           TEUCHOS_TEST_FOR_EXCEPTION(
00951             true,std::logic_error,"Error! QPKWIK interface is not supported since "
00952             "CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK is not defined!");
00953 #endif
00954           break;
00955         }
00956         case QP_QPOPT: {
00957           TEUCHOS_TEST_FOR_EXCEPTION(true,std::logic_error,"Error! QPOPT interface is not updated yet!");
00958 /*
00959 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPOPT
00960           using ConstrainedOptPack::QPSolverRelaxedQPOPT;
00961           QPSolverRelaxedQPOPT
00962             *_qp_solver = new QPSolverRelaxedQPOPT();
00963           qp_solver = _qp_solver; // give ownership to delete!
00964 #else
00965           throw std::logic_error(
00966             "NLPAlgoConfigMamaJama::config_algo_cntr(...) : QPOPT is not supported,"
00967             " must define CONSTRAINED_OPTIMIZATION_PACK_USE_QPOPT!" );
00968 #endif
00969 */
00970           break;
00971         }
00972         default:
00973           TEUCHOS_TEST_FOR_EXCEPT(true);
00974       }
00975       // QP solver tester
00976       Teuchos::RCP<QPSolverRelaxedTester> 
00977         qp_solver_tester = Teuchos::rcp(new QPSolverRelaxedTester());
00978       if(options_.get()) {
00979         QPSolverRelaxedTesterSetOptions
00980           opt_setter( qp_solver_tester.get() );
00981         opt_setter.set_options( *options_ );
00982       }
00983       // The null-space step
00984       Teuchos::RCP<TangentialStepWithInequStd_Step>
00985         tangential_step_with_inequ_step = Teuchos::rcp(
00986           new TangentialStepWithInequStd_Step(
00987             qp_solver, qp_solver_tester ) );
00988       if(options_.get()) {
00989         TangentialStepWithInequStd_StepSetOptions
00990           opt_setter( tangential_step_with_inequ_step.get() );
00991         opt_setter.set_options( *options_ );
00992       }
00993       tangential_step_step = tangential_step_with_inequ_step;
00994       // Step for reinitialization reduced Hessian on QP failure
00995       tangential_step_step = Teuchos::rcp(
00996         new QPFailureReinitReducedHessian_Step(tangential_step_step)
00997         );
00998     }
00999 
01000     // CalcDFromYPYZPZ_Step
01001     algo_step_ptr_t    calc_d_from_Ypy_Zpy_step = Teuchos::null;
01002     {
01003       calc_d_from_Ypy_Zpy_step = Teuchos::rcp(new CalcDFromYPYZPZ_Step());
01004     }
01005 
01006     // CalcReducedGradLagrangianStd_AddedStep
01007     algo_step_ptr_t    calc_reduced_grad_lagr_step = Teuchos::null;
01008     {
01009       calc_reduced_grad_lagr_step = Teuchos::rcp(
01010         new CalcReducedGradLagrangianStd_AddedStep() );
01011     }
01012 
01013     // CheckConvergence_Step
01014     algo_step_ptr_t    check_convergence_step = Teuchos::null;
01015       {
01016       // Create the strategy object
01017       RCP<CheckConvergenceStd_Strategy>
01018         check_convergence_strategy = Teuchos::rcp(new CheckConvergenceStd_Strategy());
01019 
01020       if(options_.get()) 
01021         {
01022         CheckConvergence_StrategySetOptions
01023           opt_setter( check_convergence_strategy.get() );
01024         opt_setter.set_options( *options_ );
01025         }
01026       
01027       RCP<CheckConvergenceStd_AddedStep>
01028         _check_convergence_step = Teuchos::rcp(new CheckConvergenceStd_AddedStep(check_convergence_strategy));
01029       
01030       check_convergence_step = _check_convergence_step;
01031       }
01032 
01033     // MeritFuncPenaltyParamUpdate_Step
01034     algo_step_ptr_t    merit_func_penalty_param_update_step = Teuchos::null;
01035     if( cov_.line_search_method_ == LINE_SEARCH_FILTER ) {
01036       // We don't need to update a penalty parameter for the filter method :-)
01037     }
01038     else if( cov_.line_search_method_ != LINE_SEARCH_NONE ) {
01039       RCP<MeritFunc_PenaltyParamUpdate_AddedStep>
01040         param_update_step = Teuchos::null;
01041       switch( cov_.merit_function_type_ ) {
01042         case MERIT_FUNC_L1: {
01043           switch(cov_.l1_penalty_param_update_) {
01044             case L1_PENALTY_PARAM_WITH_MULT:
01045 //              param_update_step
01046 //                = Teuchos::rcp(new  MeritFunc_PenaltyParamUpdateWithMult_AddedStep());
01047               TEUCHOS_TEST_FOR_EXCEPTION(
01048                 true, std::logic_error
01049                 ,"NLPAlgoConfigMamaJama::config_algo_cntr(...) : Error, "
01050                 "The l1_penalty_parameter_update option of MULT_FREE has not been updated yet!" );
01051               break;
01052             case L1_PENALTY_PARAM_MULT_FREE:
01053               param_update_step
01054                 = Teuchos::rcp(new  MeritFunc_PenaltyParamUpdateMultFree_AddedStep());
01055               break;
01056             default:
01057               TEUCHOS_TEST_FOR_EXCEPT(true);
01058           }
01059           break;
01060         }
01061         case MERIT_FUNC_MOD_L1:
01062         case MERIT_FUNC_MOD_L1_INCR:
01063 //          param_update_step = new  MeritFunc_PenaltyParamsUpdateWithMult_AddedStep(
01064 //                      Teuchos::rcp_implicit_cast<MeritFuncNLP>(merit_func) );
01065           TEUCHOS_TEST_FOR_EXCEPTION(
01066             true, std::logic_error
01067             ,"NLPAlgoConfigMamaJama::config_algo_cntr(...) : Error, "
01068             "The merit_function_type options of MODIFIED_L1 and MODIFIED_L1_INCR have not been updated yet!" );
01069           break;
01070         default:
01071           TEUCHOS_TEST_FOR_EXCEPT(true);  // local programming error
01072       }
01073       if(options_.get()) {
01074         MeritFunc_PenaltyParamUpdate_AddedStepSetOptions
01075           ppu_options_setter( param_update_step.get() );
01076         ppu_options_setter.set_options( *options_ );
01077       }
01078       merit_func_penalty_param_update_step = param_update_step;
01079     }
01080 
01081     // LineSearchFull_Step
01082     algo_step_ptr_t    line_search_full_step_step = Teuchos::null;
01083     {
01084       line_search_full_step_step = Teuchos::rcp(new LineSearchFullStep_Step(bounds_tester));
01085     }
01086 
01087     // LineSearch_Step
01088     algo_step_ptr_t    line_search_step = Teuchos::null;
01089     if( cov_.line_search_method_ != LINE_SEARCH_NONE ) {
01090       RCP<DirectLineSearchArmQuad_Strategy>
01091         direct_line_search = Teuchos::rcp(new  DirectLineSearchArmQuad_Strategy());
01092       if(options_.get()) {
01093         ConstrainedOptPack::DirectLineSearchArmQuad_StrategySetOptions
01094           ls_options_setter( direct_line_search.get(), "DirectLineSearchArmQuadSQPStep" );
01095         ls_options_setter.set_options( *options_ );
01096       }
01097       if( n > m ) {
01098         switch( cov_.line_search_method_ ) {
01099           case LINE_SEARCH_DIRECT: {
01100             line_search_step = Teuchos::rcp(new LineSearchDirect_Step(direct_line_search));
01101             break;
01102           }
01103           case LINE_SEARCH_2ND_ORDER_CORRECT: {
01104             TEUCHOS_TEST_FOR_EXCEPTION(
01105               true, std::logic_error
01106               ,"NLPAlgoConfigMamaJama::config_algo_cntr(...) : Error, "
01107               "The line_search_method option of 2ND_ORDER_CORRECT has not been updated yet!" );
01108             break;
01109           }
01110           case LINE_SEARCH_WATCHDOG: {
01111             TEUCHOS_TEST_FOR_EXCEPTION(
01112               true, std::logic_error
01113               ,"NLPAlgoConfigMamaJama::config_algo_cntr(...) : Error, "
01114               "The line_search_method option of WATCHDOG has not been updated yet!" );
01115             break;
01116           }
01117           case LINE_SEARCH_FILTER: {
01118             Teuchos::RCP<LineSearchFilter_Step> 
01119               line_search_filter_step = Teuchos::rcp(
01120                 new LineSearchFilter_Step(algo_cntr->get_nlp())
01121                 );
01122             
01123             if(options_.get()) 
01124             {
01125               LineSearchFilter_StepSetOptions options_setter(line_search_filter_step.get());
01126               options_setter.set_options(*options_);
01127             }
01128 
01129             line_search_step = line_search_filter_step;         
01130             break;
01131           }
01132           case LINE_SEARCH_AUTO:
01133           case LINE_SEARCH_NONE:
01134           default:
01135             TEUCHOS_TEST_FOR_EXCEPT(true); // Should not get here!
01136         }
01137       }
01138       else {
01139         line_search_step = Teuchos::rcp( new LineSearchNLE_Step(direct_line_search) );
01140       }
01141     }
01142     
01143     // LineSearchFailure
01144     if( new_decomp_selection_strategy.get() ) {
01145       line_search_step = Teuchos::rcp(
01146         new LineSearchFailureNewDecompositionSelection_Step(
01147           line_search_step
01148           ,new_decomp_selection_strategy
01149           )
01150         );
01151     }
01152   
01153     //
01154     // Create the algorithm depending on the type of NLP we are trying to solve.
01155     //
01156 
01157     if( m == 0 ) {
01158 
01159       if( nb == 0 ) {
01160         //
01161         // Unconstrained NLP (m == 0, num_bounded_x == 0)
01162         //
01163         if(trase_out)
01164           *trase_out 
01165             << "\nConfiguring an algorithm for an unconstrained "
01166             << "NLP (m == 0, num_bounded_x == 0) ...\n";
01167       }
01168       else {
01169         //
01170         // Simple bound constrained NLP (m == 0, num_bounded_x > 0)
01171         //
01172         if(trase_out)
01173           *trase_out 
01174             << "\nConfiguring an algorithm for a simple bound constrained "
01175             << "NLP (m == 0, num_bounded_x > 0) ...\n";
01176       }
01177 
01178       int step_num       = 0;
01179   
01180       // EvalNewPoint
01181       algo->insert_step( ++step_num, EvalNewPoint_name, eval_new_point_step );
01182       
01183       // ReducedGradient
01184       algo->insert_step( ++step_num, ReducedGradient_name, reduced_gradient_step );
01185 
01186       if( nb == 0 ) {
01187         
01188         // CalcReducedGradLagrangian
01189         algo->insert_step( ++step_num, CalcReducedGradLagrangian_name, calc_reduced_grad_lagr_step );
01190 
01191         // CheckConvergence
01192         algo->insert_step( ++step_num, CheckConvergence_name, check_convergence_step );
01193 
01194       }
01195 
01196       // ReducedHessian
01197       algo->insert_step( ++step_num, ReducedHessian_name, reduced_hessian_step );
01198 
01199       // (.-1) Initialize reduced Hessian
01200       if(init_red_hess_step.get()) {
01201         algo->insert_assoc_step(
01202           step_num, IterationPack::PRE_STEP, 1
01203           ,"ReducedHessianInitialization"
01204           ,init_red_hess_step
01205           );
01206       }
01207       
01208       // TangentialStep
01209       algo->insert_step( ++step_num, TangentialStep_name, tangential_step_step );
01210       if( nb > 0 ) {
01211         // SetDBoundsStd
01212         algo->insert_assoc_step(
01213           step_num
01214           ,IterationPack::PRE_STEP
01215           ,1
01216           ,"SetDBoundsStd"
01217           ,set_d_bounds_step
01218           );
01219       }
01220 
01221       // CalcDFromZPZ
01222       algo->insert_step( ++step_num, "CalcDFromZpz", Teuchos::rcp(new CalcDFromZPZ_Step()) );
01223 
01224       if( nb > 0 ) {
01225         
01226         // CalcReducedGradLagrangian
01227         algo->insert_step( ++step_num, CalcReducedGradLagrangian_name, calc_reduced_grad_lagr_step );
01228 
01229         // CheckConvergence
01230         algo->insert_step( ++step_num, CheckConvergence_name, check_convergence_step );
01231 
01232       }
01233 
01234       // LineSearch
01235       if( cov_.line_search_method_ == LINE_SEARCH_NONE ) {
01236         algo->insert_step( ++step_num, LineSearch_name, line_search_full_step_step );
01237       }
01238       else {
01239         // Main line search step
01240         algo->insert_step( ++step_num, LineSearch_name, line_search_step );
01241         // Insert presteps
01242         Algorithm::poss_type
01243           pre_step_i = 0;
01244         // (.-?) LineSearchFullStep
01245         algo->insert_assoc_step(
01246           step_num
01247           ,IterationPack::PRE_STEP
01248           ,++pre_step_i
01249           ,"LineSearchFullStep"
01250           ,line_search_full_step_step
01251           );
01252         // (.-?) MeritFunc_DummyUpdate
01253         algo->insert_assoc_step(
01254           step_num
01255           ,IterationPack::PRE_STEP
01256           ,++pre_step_i
01257           ,"MeritFunc_DummyUpdate"
01258           ,Teuchos::rcp(new MeritFunc_DummyUpdate_Step())
01259           );
01260       }
01261       
01262     }
01263     else if( n == m ) {
01264       //
01265       // System of Nonlinear equations (n == m)
01266       //
01267       if(trase_out)
01268         *trase_out 
01269           << "\nConfiguring an algorithm for a system of nonlinear equations "
01270           << "NLP (n == m) ...\n";
01271       
01272       if(algo->state().get_iter_quant_id(merit_func_nlp_name)!=IterationPack::AlgorithmState::DOES_NOT_EXIST)
01273         algo->state().erase_iter_quant(merit_func_nlp_name);
01274 
01275       int step_num       = 0;
01276       int assoc_step_num = 0;
01277   
01278       // EvalNewPoint
01279       algo->insert_step( ++step_num, EvalNewPoint_name, eval_new_point_step );
01280       if( check_descent_quansi_normal_step_step.get() && tailored_approach && algo->algo_cntr().check_results() )
01281       {
01282         algo->insert_assoc_step(
01283           step_num
01284           ,IterationPack::POST_STEP
01285           ,1
01286           ,"CheckDescentQuasiNormalStep"
01287           ,check_descent_quansi_normal_step_step
01288           );
01289       }
01290       
01291       // QuasiNormalStep
01292       if( !tailored_approach ) {
01293         algo->insert_step( ++step_num, QuasiNormalStep_name, quansi_normal_step_step );
01294         assoc_step_num = 0;
01295         if( check_decomp_from_py_step.get() )
01296           algo->insert_assoc_step(
01297             step_num
01298             ,IterationPack::POST_STEP
01299             ,++assoc_step_num
01300             ,"CheckDecompositionFromPy"
01301             ,check_decomp_from_py_step
01302             );
01303         if( check_decomp_from_Rpy_step.get() )
01304           algo->insert_assoc_step(
01305             step_num
01306             ,IterationPack::POST_STEP
01307             ,++assoc_step_num
01308             ,"CheckDecompositionFromRPy"
01309             ,check_decomp_from_Rpy_step
01310             );
01311         if( check_descent_quansi_normal_step_step.get() )
01312           algo->insert_assoc_step(
01313             step_num
01314             ,IterationPack::POST_STEP
01315             ,++assoc_step_num
01316             ,"CheckDescentQuasiNormalStep"
01317             ,check_descent_quansi_normal_step_step
01318             );
01319       }
01320 
01321       // CheckConvergence
01322       algo->insert_step( ++step_num, CheckConvergence_name, check_convergence_step );
01323 
01324       // CalcDFromYPY
01325       algo->insert_step( ++step_num, "CalcDFromYpy", Teuchos::rcp(new CalcDFromYPY_Step()) );
01326       
01327       // LineSearch
01328       if( cov_.line_search_method_ == LINE_SEARCH_NONE ) {
01329         algo->insert_step( ++step_num, LineSearch_name, line_search_full_step_step );
01330       }
01331       else {
01332         // Main line search step
01333         algo->insert_step( ++step_num, LineSearch_name, line_search_step );
01334         // Insert presteps
01335         Algorithm::poss_type
01336           pre_step_i = 0;
01337         // (.-?) LineSearchFullStep
01338         algo->insert_assoc_step(
01339           step_num
01340           ,IterationPack::PRE_STEP
01341           ,++pre_step_i
01342           ,"LineSearchFullStep"
01343           ,line_search_full_step_step
01344           );
01345       }
01346 
01347     }
01348     else if ( m > 0 || nb > 0 ) {
01349       //
01350       // General nonlinear NLP ( m > 0 )
01351       //
01352       if( nb == 0 ) {
01353         //
01354         // Nonlinear equality constrained NLP ( m > 0 && num_bounded_x == 0 )
01355         //
01356         if(trase_out)
01357           *trase_out 
01358             << "\nConfiguring an algorithm for a nonlinear equality constrained "
01359             << "NLP ( m > 0 && num_bounded_x == 0) ...\n";
01360       }
01361       else {
01362         //
01363         // Nonlinear inequality constrained NLP ( num_bounded_x > 0 )
01364         //
01365         if(trase_out)
01366           *trase_out 
01367             << "\nConfiguring an algorithm for a nonlinear generally constrained "
01368             << "NLP ( num_bounded_x > 0 ) ...\n";
01369       }
01370 
01371       int step_num       = 0;
01372       int assoc_step_num = 0;
01373   
01374       // EvalNewPoint
01375       algo->insert_step( ++step_num, EvalNewPoint_name, eval_new_point_step );
01376       if( check_descent_quansi_normal_step_step.get() && tailored_approach && algo->algo_cntr().check_results() )
01377       {
01378         algo->insert_assoc_step(
01379           step_num
01380           ,IterationPack::POST_STEP
01381           ,1
01382           ,"CheckDescentQuasiNormalStep"
01383           ,check_descent_quansi_normal_step_step
01384           );
01385       }
01386       
01387       // QuasiNormalStep
01388       if( !tailored_approach ) {
01389         algo->insert_step( ++step_num, QuasiNormalStep_name, quansi_normal_step_step );
01390         assoc_step_num = 0;
01391         if( check_decomp_from_py_step.get() )
01392           algo->insert_assoc_step(
01393             step_num
01394             ,IterationPack::POST_STEP
01395             ,++assoc_step_num
01396             ,"CheckDecompositionFromPy"
01397             ,check_decomp_from_py_step
01398             );
01399         if( check_decomp_from_Rpy_step.get() )
01400           algo->insert_assoc_step(
01401             step_num
01402             ,IterationPack::POST_STEP
01403             ,++assoc_step_num
01404             ,"CheckDecompositionFromRPy"
01405             ,check_decomp_from_Rpy_step
01406             );
01407         if( check_descent_quansi_normal_step_step.get() )
01408           algo->insert_assoc_step(
01409             step_num
01410             ,IterationPack::POST_STEP
01411             ,++assoc_step_num
01412             ,"CheckDescentQuasiNormalStep"
01413             ,check_descent_quansi_normal_step_step
01414             );
01415       }
01416 
01417       // ReducedGradient
01418       if( !tailored_approach ) {
01419         algo->insert_step( ++step_num, ReducedGradient_name, reduced_gradient_step );
01420       }
01421 
01422       if( nb == 0 ) {
01423 
01424         // CalcReducedGradLagrangian
01425         algo->insert_step( ++step_num, CalcReducedGradLagrangian_name, calc_reduced_grad_lagr_step );
01426 
01427         // CalcLagrangeMultDecomposed
01428         // Compute these here so that in case we converge we can report them
01429         if( !tailored_approach ) {
01430           // ToDo: Insert this step
01431         }
01432 
01433         // CheckConvergence
01434         algo->insert_step( ++step_num, CheckConvergence_name, check_convergence_step );
01435       }
01436 
01437       // ReducedHessian
01438       algo->insert_step( ++step_num, ReducedHessian_name, reduced_hessian_step );
01439 
01440       // (.-1) Initialize reduced Hessian
01441       if(init_red_hess_step.get()) {
01442         algo->insert_assoc_step(
01443           step_num, IterationPack::PRE_STEP, 1
01444           ,"ReducedHessianInitialization"
01445           ,init_red_hess_step
01446           );
01447       }
01448 
01449       // (.-1) CheckSkipBFGSUpdate
01450       algo->insert_assoc_step(
01451         step_num
01452         ,IterationPack::PRE_STEP
01453         ,1
01454         ,CheckSkipBFGSUpdate_name
01455         ,check_skip_bfgs_update_step
01456         );
01457 
01458       // TangentialStep
01459       algo->insert_step( ++step_num, TangentialStep_name, tangential_step_step );
01460       if( nb > 0 ) {
01461         // SetDBoundsStd
01462         algo->insert_assoc_step(
01463           step_num
01464           ,IterationPack::PRE_STEP
01465           ,1
01466           ,"SetDBoundsStd"
01467           ,set_d_bounds_step
01468           );
01469       }
01470 
01471       // CalcDFromYPYZPZ
01472       algo->insert_step( ++step_num, CalcDFromYPYZPZ_name, calc_d_from_Ypy_Zpy_step );
01473       
01474       if( nb > 0 ) {
01475 
01476         // CalcReducedGradLagrangian
01477         algo->insert_step( ++step_num, CalcReducedGradLagrangian_name, calc_reduced_grad_lagr_step );
01478 
01479         // CalcLagrangeMultDecomposed
01480         // Compute these here so that in case we converge we can report them
01481         if( !tailored_approach ) {
01482           // ToDo: Insert this step
01483         }
01484 
01485         // CheckConvergence
01486         algo->insert_step( ++step_num, CheckConvergence_name, check_convergence_step );
01487       }
01488 
01489       // LineSearch
01490       if( cov_.line_search_method_ == LINE_SEARCH_NONE ) {
01491         algo->insert_step( ++step_num, LineSearch_name, line_search_full_step_step );
01492       }
01493       else {
01494         // Main line search step
01495         algo->insert_step( ++step_num, LineSearch_name, line_search_step );
01496         // Insert presteps
01497         Algorithm::poss_type
01498           pre_step_i = 0;
01499         // (.-?) LineSearchFullStep
01500         algo->insert_assoc_step(
01501           step_num
01502           ,IterationPack::PRE_STEP
01503           ,++pre_step_i
01504           ,"LineSearchFullStep"
01505           ,line_search_full_step_step
01506           );
01507         // (.-?) MeritFunc_PenaltyPramUpdate
01508         if(merit_func_penalty_param_update_step.get()) {
01509           algo->insert_assoc_step(
01510             step_num
01511             ,IterationPack::PRE_STEP
01512             ,++pre_step_i
01513             ,"MeritFunc_PenaltyParamUpdate"
01514             ,merit_func_penalty_param_update_step
01515             );
01516         }
01517       }
01518 
01519     }
01520     else {
01521       TEUCHOS_TEST_FOR_EXCEPT(true); // Error, this should not ever be called!
01522     }
01523   }
01524   
01525 }
01526 
01527 void NLPAlgoConfigMamaJama::init_algo(NLPAlgoInterface* _algo)
01528 {
01529   using Teuchos::dyn_cast;
01530 
01531   TEUCHOS_TEST_FOR_EXCEPTION(
01532     _algo == NULL, std::invalid_argument
01533     ,"NLPAlgoConfigMamaJama::init_algo(_algo) : Error, "
01534     "_algo can not be NULL" );
01535 
01536   NLPAlgo              &algo    = dyn_cast<NLPAlgo>(*_algo);
01537   NLPAlgoState         &state   = algo.rsqp_state();
01538   NLP                  &nlp     = algo.nlp();
01539 
01540   algo.max_iter( algo.algo_cntr().max_iter() );
01541   algo.max_run_time( algo.algo_cntr().max_run_time() );
01542 
01543   // Reset the iteration count to zero
01544   state.k(0);
01545 
01546   // Get organized output of vectors and matrices even if setw is not used by Step objects.
01547   algo.track().journal_out()
01548     << std::setprecision(algo.algo_cntr().journal_print_digits())
01549     << std::scientific;
01550 
01551   // set the first step
01552   algo.do_step_first(1);
01553 
01554   // The rest of the algorithm should initialize itself
01555 }
01556 
01557 // private
01558 
01559 void NLPAlgoConfigMamaJama::readin_options(
01560     const OptionsFromStreamPack::OptionsFromStream     &options
01561   , SOptionValues                                      *ov
01562   , std::ostream                                       *trase_out
01563   )
01564 {
01565   namespace ofsp = OptionsFromStreamPack;
01566   using   ofsp::OptionsFromStream;
01567   typedef   OptionsFromStream::options_group_t    options_group_t;
01568   using   ofsp::StringToIntMap;
01569   using   ofsp::StringToBool;
01570 
01571   TEUCHOS_TEST_FOR_EXCEPT( !( ov ) ); // only a local class error
01572 
01573   // Get the options group for "NLPAlgoConfigMamaJama"
01574   const std::string opt_grp_name = "NLPAlgoConfigMamaJama";
01575   const OptionsFromStream::options_group_t optgrp = options.options_group( opt_grp_name );
01576   if( OptionsFromStream::options_group_exists( optgrp ) ) {
01577 
01578     // Define map for options group "MamaJama".
01579     const int num_opts = 13;
01580     enum EMamaJama {
01581       MAX_BASIS_COND_CHANGE_FRAC
01582       ,EXACT_REDUCED_HESSIAN
01583       ,QUASI_NEWTON
01584       ,NUM_LBFGS_UPDATES_STORED
01585       ,LBFGS_AUTO_SCALING
01586       ,HESSIAN_INITIALIZATION
01587       ,QP_SOLVER
01588       ,REINIT_HESSIAN_ON_QP_FAIL
01589       ,LINE_SEARCH_METHOD
01590       ,MERIT_FUNCTION_TYPE
01591       ,L1_PENALTY_PARAM_UPDATE
01592       ,MAX_PZ_NORM
01593       ,NUM_PZ_DAMP_ITERS
01594     };
01595     const char* SMamaJama[num_opts] = {
01596       "max_basis_cond_change_frac"
01597       ,"exact_reduced_hessian"
01598       ,"quasi_newton"
01599       ,"num_lbfgs_updates_stored"
01600       ,"lbfgs_auto_scaling"
01601       ,"hessian_initialization"
01602       ,"qp_solver"
01603       ,"reinit_hessian_on_qp_fail"
01604       ,"line_search_method"
01605       ,"merit_function_type"
01606       ,"l1_penalty_parameter_update"
01607       ,"max_pz_norm"
01608       ,"num_pz_damp_iters"
01609     };
01610     StringToIntMap  mama_jama_map(  opt_grp_name, num_opts, SMamaJama );
01611 
01612     options_group_t::const_iterator itr = optgrp.begin();
01613     for( ; itr != optgrp.end(); ++itr ) {
01614       switch( (EMamaJama)mama_jama_map( ofsp::option_name(itr) ) ) {
01615         case MAX_BASIS_COND_CHANGE_FRAC:
01616           ov->max_basis_cond_change_frac_ = std::atof( ofsp::option_value(itr).c_str() );
01617           break;
01618         case EXACT_REDUCED_HESSIAN:
01619           ov->exact_reduced_hessian_ = StringToBool( "exact_reduced_hessian", ofsp::option_value(itr).c_str() );
01620           break;
01621         case QUASI_NEWTON:
01622         {
01623           const std::string &opt_val = ofsp::option_value(itr);
01624           if( opt_val == "AUTO" )
01625             ov->quasi_newton_ = QN_AUTO;
01626           else if( opt_val == "BFGS" )
01627             ov->quasi_newton_ = QN_BFGS;
01628           else if( opt_val == "PBFGS" )
01629             ov->quasi_newton_ = QN_PBFGS;
01630           else if( opt_val == "LBFGS" )
01631             ov->quasi_newton_ = QN_LBFGS;
01632           else if( opt_val == "LPBFGS" )
01633             ov->quasi_newton_ = QN_LPBFGS;
01634           else
01635             TEUCHOS_TEST_FOR_EXCEPTION(
01636               true, std::invalid_argument
01637               ,"NLPAlgoConfigMamaJama::readin_options(...) : "
01638               "Error, incorrect value for \"quasi_newton\" "
01639               ", Only options of BFGS, PBFGS"
01640               ", LBFGS, LPBFGS and AUTO are avalible."
01641               );
01642           break;
01643         }
01644         case NUM_LBFGS_UPDATES_STORED:
01645           ov->num_lbfgs_updates_stored_ = std::atoi( ofsp::option_value(itr).c_str() );
01646           break;
01647         case LBFGS_AUTO_SCALING:
01648           ov->lbfgs_auto_scaling_
01649             = StringToBool( "lbfgs_auto_scaling", ofsp::option_value(itr).c_str() );
01650           break;
01651         case HESSIAN_INITIALIZATION:
01652         {
01653           const std::string &opt_val = ofsp::option_value(itr);
01654           if( opt_val == "IDENTITY" )
01655             ov->hessian_initialization_ = INIT_HESS_IDENTITY;
01656           else if( opt_val == "FINITE_DIFF_SCALE_IDENTITY" )
01657             ov->hessian_initialization_ = INIT_HESS_FIN_DIFF_SCALE_IDENTITY;
01658           else if( opt_val == "FINITE_DIFF_DIAGONAL" )
01659             ov->hessian_initialization_ = INIT_HESS_FIN_DIFF_SCALE_DIAGONAL;
01660           else if( opt_val == "FINITE_DIFF_DIAGONAL_ABS" )
01661             ov->hessian_initialization_ = INIT_HESS_FIN_DIFF_SCALE_DIAGONAL_ABS;
01662           else if( opt_val == "AUTO" )
01663             ov->hessian_initialization_ = INIT_HESS_AUTO;
01664           else if( opt_val == "SERIALIZE" )
01665             ov->hessian_initialization_ = INIT_HESS_SERIALIZE;
01666           else
01667             TEUCHOS_TEST_FOR_EXCEPTION(
01668               true, std::invalid_argument
01669               ,"NLPAlgoConfigMamaJama::readin_options(...) : "
01670               "Error, incorrect value for \"hessian_initialization\" "
01671               ", Only options of IDENTITY, SERIALIZE, FINITE_DIFF_SCALE_IDENTITY,"
01672               " FINITE_DIFF_DIAGONAL, FINITE_DIFF_DIAGONAL_ABS and AUTO"
01673               " are available"  );
01674           break;
01675         }
01676         case QP_SOLVER:
01677         {
01678           const std::string &qp_solver = ofsp::option_value(itr);
01679           if( qp_solver == "AUTO" ) {
01680             ov->qp_solver_type_ = QP_AUTO;
01681           } else if( qp_solver == "QPSOL" ) {
01682             ov->qp_solver_type_ = QP_QPSOL;
01683           } else if( qp_solver == "QPOPT" ) {
01684 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPOPT
01685             ov->qp_solver_type_ = QP_QPOPT;
01686 #else
01687             TEUCHOS_TEST_FOR_EXCEPTION(
01688               true, std::invalid_argument
01689               ,"NLPAlgoConfigMamaJama::readin_options(...) : QPOPT is not supported,"
01690               " must define CONSTRAINED_OPTIMIZATION_PACK_USE_QPOPT!" );
01691 #endif
01692           } else if( qp_solver == "QPKWIK" ) {
01693 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK
01694             ov->qp_solver_type_ = QP_QPKWIK;
01695 #else
01696             TEUCHOS_TEST_FOR_EXCEPTION(
01697               true, std::invalid_argument
01698               ,"NLPAlgoConfigMamaJama::readin_options(...) : QPKWIK is not supported,"
01699               " must define CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK!" );
01700 #endif
01701           } else if( qp_solver == "QPSCHUR" ) {
01702             ov->qp_solver_type_ = QP_QPSCHUR;
01703           } else {
01704             TEUCHOS_TEST_FOR_EXCEPTION(
01705               true, std::invalid_argument
01706               ,"NLPAlgoConfigMamaJama::readin_options(...) : "
01707               "Error, incorrect value for \"qp_solver\" "
01708               "Only qp solvers QPOPT, QPSOL, QPKWIK, QPSCHUR and AUTO are avalible."  );
01709           }
01710           break;
01711         }
01712         case REINIT_HESSIAN_ON_QP_FAIL:
01713           ov->reinit_hessian_on_qp_fail_ = StringToBool( "reinit_hessian_on_qp_fail", ofsp::option_value(itr).c_str() );
01714           break;
01715         case LINE_SEARCH_METHOD:
01716         {
01717           const std::string &option = ofsp::option_value(itr);
01718           if( option == "NONE" ) {
01719             ov->line_search_method_ = LINE_SEARCH_NONE;
01720           } else if( option == "DIRECT" ) {
01721             ov->line_search_method_ = LINE_SEARCH_DIRECT;
01722           } else if( option == "2ND_ORDER_CORRECT" ) {
01723             ov->line_search_method_ = LINE_SEARCH_2ND_ORDER_CORRECT;
01724           } else if( option == "WATCHDOG" ) {
01725             ov->line_search_method_ = LINE_SEARCH_WATCHDOG;
01726           } else if( option == "AUTO" ) {
01727             ov->line_search_method_ = LINE_SEARCH_AUTO;
01728           } else if( option == "FILTER" ) {
01729             ov->line_search_method_ = LINE_SEARCH_FILTER;
01730           } else {
01731             TEUCHOS_TEST_FOR_EXCEPTION(
01732               true, std::invalid_argument
01733               ,"NLPAlgoConfigMamaJama::readin_options(...) : "
01734               "Error, incorrect value for \"line_search_method\".\n"
01735               "Only the options NONE, DIRECT, 2ND_ORDER_CORRECT, FILTER, WATCHDOG "
01736               "and AUTO are avalible." );
01737           }
01738           break;
01739         }
01740         case MERIT_FUNCTION_TYPE:
01741         {
01742           const std::string &option = ofsp::option_value(itr);
01743           if( option == "L1" )
01744             ov->merit_function_type_ = MERIT_FUNC_L1;
01745           else if( option == "MODIFIED_L1" )
01746             ov->merit_function_type_ = MERIT_FUNC_MOD_L1;
01747           else if( option == "MODIFIED_L1_INCR" )
01748             ov->merit_function_type_ = MERIT_FUNC_MOD_L1_INCR;
01749           else if( option == "AUTO" )
01750             ov->merit_function_type_ = MERIT_FUNC_AUTO;
01751           else
01752             TEUCHOS_TEST_FOR_EXCEPTION(
01753               true, std::invalid_argument
01754               ,"NLPAlgoConfigMamaJama::readin_options(...) : "
01755               "Error, incorrect value for \"merit_function_type\".\n"
01756               "Only the options L1, MODIFIED_L1, MODIFIED_L1_INCR "
01757               "and AUTO are avalible." );
01758           break;
01759         }
01760         case L1_PENALTY_PARAM_UPDATE:
01761         {
01762           const std::string &option = ofsp::option_value(itr);
01763           if( option == "WITH_MULT" )
01764             ov->l1_penalty_param_update_
01765               = L1_PENALTY_PARAM_WITH_MULT;
01766           else if( option == "MULT_FREE" )
01767             ov->l1_penalty_param_update_
01768               = L1_PENALTY_PARAM_MULT_FREE;
01769           else if( option == "AUTO" )
01770             ov->l1_penalty_param_update_
01771               = L1_PENALTY_PARAM_AUTO;
01772           else
01773             TEUCHOS_TEST_FOR_EXCEPTION(
01774               true, std::invalid_argument
01775               ,"NLPAlgoConfigMamaJama::readin_options(...) : "
01776               "Error, incorrect value for \"l1_penalty_param_update\".\n"
01777               "Only the options WITH_MULT, MULT_FREE and AUTO"
01778               "are avalible."  );
01779           break;
01780         }
01781         case MAX_PZ_NORM: {
01782           const std::string &option = ofsp::option_value(itr);
01783           ov->max_pz_norm_ = std::atof(option.c_str());
01784           break;
01785         }
01786         case NUM_PZ_DAMP_ITERS: {
01787           const std::string &option = ofsp::option_value(itr);
01788           ov->num_pz_damp_iters_ = std::atoi(option.c_str());
01789           break;
01790         }
01791 
01792         default:
01793           TEUCHOS_TEST_FOR_EXCEPT(true);  // this would be a local programming error only.
01794       }
01795     }
01796   }
01797   else {
01798     if(trase_out)
01799       *trase_out
01800         << "\n\n*** Warning!  The options group \"NLPAlgoConfigMamaJama\" was not found.\n"
01801         << "Using a default set of options instead ... \n";
01802   }
01803 }
01804 
01805 //
01806 // This is where some of the default options are set and the user is alerted to what their
01807 // value is.
01808 //
01809 void NLPAlgoConfigMamaJama::set_default_options( 
01810   const SOptionValues     &uov
01811   ,SOptionValues          *cov
01812   ,std::ostream           *trase_out
01813   )
01814 {
01815   if(trase_out)
01816     *trase_out
01817       << "\n*** Setting option defaults for options not set by the user or determined some other way ...\n";
01818 
01819   if( cov->max_basis_cond_change_frac_ < 0.0 &&  uov.max_basis_cond_change_frac_ < 0.0 ) {
01820     if(trase_out)
01821       *trase_out
01822         << "\nmax_basis_cond_change_frac < 0 : setting max_basis_cond_change_frac = 1e+4 \n";
01823     cov->max_basis_cond_change_frac_ = 1e+4;
01824   }
01825   else {
01826     cov->max_basis_cond_change_frac_ = uov.max_basis_cond_change_frac_;
01827   }
01828   cov->exact_reduced_hessian_ = uov.exact_reduced_hessian_;
01829   if( cov->quasi_newton_ == QN_AUTO && uov.quasi_newton_ == QN_AUTO ) {
01830     if(trase_out)
01831       *trase_out
01832         << "\nquasi_newton == AUTO: setting quasi_newton = BFGS\n";
01833     cov->quasi_newton_ = QN_BFGS;
01834   }
01835   else if(cov->quasi_newton_ == QN_AUTO) {
01836     cov->quasi_newton_ = uov.quasi_newton_;
01837   }
01838   if( cov->num_lbfgs_updates_stored_ < 0 && uov.num_lbfgs_updates_stored_ < 0 ) {
01839     if(trase_out)
01840       *trase_out
01841         << "\nnum_lbfgs_updates_stored < 0 : setting num_lbfgs_updates_stored = 10\n";
01842     cov->num_lbfgs_updates_stored_ = 10;
01843   }
01844   else if(cov->num_lbfgs_updates_stored_ < 0) {
01845     cov->num_lbfgs_updates_stored_ = uov.num_lbfgs_updates_stored_;
01846   }
01847   cov->lbfgs_auto_scaling_ = uov.lbfgs_auto_scaling_;
01848   if( cov->hessian_initialization_ == INIT_HESS_AUTO && uov.hessian_initialization_ == INIT_HESS_AUTO ) {
01849     if(trase_out)
01850        *trase_out
01851         << "\nhessian_initialization == AUTO: setting hessian_initialization = IDENTITY\n";
01852     cov->hessian_initialization_ = INIT_HESS_IDENTITY;
01853 /*
01854     if(trase_out)
01855       *trase_out
01856         << "\nhessian_initialization == AUTO: setting hessian_initialization = FINITE_DIFF_DIAGONAL_ABS\n";
01857     cov->hessian_initialization_ = INIT_HESS_FIN_DIFF_SCALE_DIAGONAL_ABS;
01858 */
01859   }
01860   else if(cov->hessian_initialization_ == INIT_HESS_AUTO) {
01861     cov->hessian_initialization_ = uov.hessian_initialization_;
01862   }
01863   if( cov->qp_solver_type_ == QP_AUTO && uov.qp_solver_type_ == QP_AUTO ) {
01864 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK
01865     if(trase_out)
01866       *trase_out
01867         << "\nqp_solver_type == AUTO: setting qp_solver_type = QPKWIK\n";
01868     cov->qp_solver_type_ = QP_QPKWIK;
01869 #else
01870     if(trase_out)
01871       *trase_out
01872         << "\nqp_solver_type == AUTO: setting qp_solver_type = QPSCHUR\n";
01873     cov->qp_solver_type_ = QP_QPSCHUR;
01874 #endif
01875   }
01876   else if(cov->qp_solver_type_ == QP_AUTO) {
01877     cov->qp_solver_type_ = uov.qp_solver_type_;
01878   }
01879   cov->reinit_hessian_on_qp_fail_ = uov.reinit_hessian_on_qp_fail_;
01880   if( cov->line_search_method_ == LINE_SEARCH_AUTO && uov.line_search_method_ == LINE_SEARCH_AUTO ) {
01881     if(trase_out)
01882       *trase_out
01883         << "\nline_search_method == AUTO: setting line_search_method = FILTER\n";
01884     cov->line_search_method_ = LINE_SEARCH_FILTER;
01885   }
01886   else if(cov->line_search_method_ == LINE_SEARCH_AUTO) {
01887     cov->line_search_method_ = uov.line_search_method_;
01888   }
01889   if( cov->merit_function_type_ == MERIT_FUNC_AUTO && uov.merit_function_type_ == MERIT_FUNC_AUTO ) {
01890     if(trase_out)
01891       *trase_out
01892         << "\nmerit_function_type == AUTO: setting merit_function_type = MODIFIED_L1_INCR\n";
01893     cov->merit_function_type_ = MERIT_FUNC_MOD_L1_INCR;
01894   }
01895   else if(cov->merit_function_type_ == MERIT_FUNC_AUTO) {
01896     cov->merit_function_type_ = uov.merit_function_type_;
01897   }
01898   if( cov->l1_penalty_param_update_ == L1_PENALTY_PARAM_AUTO && uov.l1_penalty_param_update_ == L1_PENALTY_PARAM_AUTO ) {
01899     if(trase_out)
01900       *trase_out
01901         << "\nl1_penalty_param_update == AUTO: setting l1_penalty_param_update = MULT_FREE\n";
01902     cov->l1_penalty_param_update_ = L1_PENALTY_PARAM_MULT_FREE;
01903   }
01904   else if(cov->l1_penalty_param_update_ == L1_PENALTY_PARAM_AUTO) {
01905     cov->l1_penalty_param_update_ = uov.l1_penalty_param_update_;
01906   }
01907   if( cov->full_steps_after_k_ < 0 && uov.full_steps_after_k_ < 0 ) {
01908     if(trase_out)
01909       *trase_out
01910         << "\nfull_steps_after_k < 0 : the line search will never be turned off after so many iterations\n";
01911   }
01912   else {
01913     cov->full_steps_after_k_ = uov.full_steps_after_k_;
01914   }
01915   cov->max_pz_norm_ = uov.max_pz_norm_;
01916   cov->num_pz_damp_iters_ = uov.num_pz_damp_iters_;
01917   if(trase_out)
01918     *trase_out
01919       << "\n*** End setting default options\n";
01920 }
01921 
01922 } // end namespace MoochoPack
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends