NLPInterfacePack_NLPFirstDerivTester.cpp

Go to the documentation of this file.
00001 // @HEADER
00002 // ***********************************************************************
00003 // 
00004 // Moocho: Multi-functional Object-Oriented arCHitecture for Optimization
00005 //                  Copyright (2003) Sandia Corporation
00006 // 
00007 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
00008 // license for use of this work by or on behalf of the U.S. Government.
00009 // 
00010 // This library is free software; you can redistribute it and/or modify
00011 // it under the terms of the GNU Lesser General Public License as
00012 // published by the Free Software Foundation; either version 2.1 of the
00013 // License, or (at your option) any later version.
00014 //  
00015 // This library is distributed in the hope that it will be useful, but
00016 // WITHOUT ANY WARRANTY; without even the implied warranty of
00017 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00018 // Lesser General Public License for more details.
00019 //  
00020 // You should have received a copy of the GNU Lesser General Public
00021 // License along with this library; if not, write to the Free Software
00022 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
00023 // USA
00024 // Questions? Contact Roscoe A. Bartlett (rabartl@sandia.gov) 
00025 // 
00026 // ***********************************************************************
00027 // @HEADER
00028 
00029 #include <assert.h>
00030 #include <math.h>
00031 
00032 #include <iomanip>
00033 #include <sstream>
00034 #include <limits>
00035 
00036 #include "NLPInterfacePack_NLPFirstDerivTester.hpp"
00037 #include "NLPInterfacePack_NLPFirstOrder.hpp"
00038 #include "AbstractLinAlgPack_MatrixOp.hpp"
00039 #include "AbstractLinAlgPack_VectorMutable.hpp"
00040 #include "AbstractLinAlgPack_VectorOut.hpp"
00041 #include "AbstractLinAlgPack_VectorSpace.hpp"
00042 #include "AbstractLinAlgPack_VectorStdOps.hpp"
00043 #include "AbstractLinAlgPack_LinAlgOpPack.hpp"
00044 #include "AbstractLinAlgPack_EtaVector.hpp"
00045 #include "AbstractLinAlgPack_assert_print_nan_inf.hpp"
00046 #include "TestingHelperPack_update_success.hpp"
00047 #include "Teuchos_TestForException.hpp"
00048 
00049 namespace {
00050 template< class T >
00051 inline
00052 T my_max( const T& v1, const T& v2 ) { return v1 > v2 ? v1 : v2; }
00053 } // end namespace
00054 
00055 namespace NLPInterfacePack {
00056 
00057 NLPFirstDerivTester::NLPFirstDerivTester(
00058   const calc_fd_prod_ptr_t  &calc_fd_prod
00059   ,ETestingMethod           fd_testing_method
00060   ,size_type                num_fd_directions
00061   ,value_type               warning_tol
00062   ,value_type               error_tol
00063   )
00064   :calc_fd_prod_(calc_fd_prod)
00065   ,fd_testing_method_(fd_testing_method)
00066   ,num_fd_directions_(num_fd_directions)
00067   ,warning_tol_(warning_tol)
00068   ,error_tol_(error_tol)
00069 {}
00070 
00071 bool NLPFirstDerivTester::finite_diff_check(
00072   NLP               *nlp
00073   ,const Vector     &xo
00074   ,const Vector     *xl
00075   ,const Vector     *xu
00076   ,const MatrixOp   *Gc
00077   ,const Vector     *Gf
00078   ,bool             print_all_warnings
00079   ,std::ostream     *out
00080   ) const
00081 {
00082   namespace rcp = MemMngPack;
00083   using AbstractLinAlgPack::assert_print_nan_inf;
00084 
00085   const size_type
00086     //n  = nlp->n(),
00087     m  = nlp->m();
00088 
00089   // ///////////////////////////////////
00090   // Validate the input
00091 
00092   TEST_FOR_EXCEPTION(
00093     !m && Gc, std::invalid_argument
00094     ,"NLPFirstDerivTester::finite_diff_check(...) : "
00095     "Error, Gc must be NULL if m == 0" );
00096 
00097   assert_print_nan_inf(xo, "xo",true,out);
00098   if(Gf)
00099     assert_print_nan_inf(*Gf, "Gf",true,out); 
00100 
00101   bool success = true;
00102 
00103   try {
00104 
00105   switch(fd_testing_method_) {
00106     case FD_COMPUTE_ALL:
00107       return fd_check_all(nlp,xo,xl,xu,Gc,Gf,print_all_warnings,out);
00108     case FD_DIRECTIONAL:
00109       return fd_directional_check(nlp,xo,xl,xu,Gc,Gf,print_all_warnings,out);
00110     default:
00111       TEST_FOR_EXCEPT(true);
00112   }
00113 
00114   } // end try
00115   catch( const AbstractLinAlgPack::NaNInfException& except ) {
00116     if(out)
00117       *out
00118         << "Error: found a NaN or Inf.  Stoping tests!\n";
00119     success = false;
00120   }
00121   
00122   return success; // will never be executed
00123 }
00124 
00125 // private
00126 
00127 bool NLPFirstDerivTester::fd_check_all(
00128   NLP               *nlp
00129   ,const Vector     &xo
00130   ,const Vector     *xl
00131   ,const Vector     *xu
00132   ,const MatrixOp   *Gc
00133   ,const Vector     *Gf
00134   ,bool             print_all_warnings
00135   ,std::ostream     *out
00136   ) const
00137 {
00138   using std::setw;
00139   using std::endl;
00140   using std::right;
00141 
00142   namespace rcp = MemMngPack;
00143   using AbstractLinAlgPack::sum;
00144   using AbstractLinAlgPack::dot;
00145   using AbstractLinAlgPack::Vp_StV;
00146   using AbstractLinAlgPack::assert_print_nan_inf;
00147   using AbstractLinAlgPack::random_vector;
00148   using LinAlgOpPack::V_StV;
00149   using LinAlgOpPack::V_MtV;
00150 
00151    using TestingHelperPack::update_success;
00152 
00153   bool success = true;
00154 
00155   const size_type
00156     n  = nlp->n();
00157   //m  = nlp->m();
00158 
00159   // //////////////////////////////////////////////
00160   // Validate the input
00161 
00162   NLP::vec_space_ptr_t
00163     space_x = nlp->space_x(),
00164     space_c = nlp->space_c();
00165 
00166   const CalcFiniteDiffProd
00167     &fd_deriv_prod = this->calc_fd_prod();
00168 
00169   const value_type
00170     //rand_y_l = -1.0, rand_y_u = 1.0,
00171     small_num = ::pow(std::numeric_limits<value_type>::epsilon(),0.25);
00172 
00173   if(out)
00174     *out
00175       << "\nComparing Gf and/or Gc with finite-difference values "
00176         "FDGf and/or FDGc one variable i at a time ...\n";
00177 
00178   value_type  max_Gf_warning_viol = 0.0;
00179   int         num_Gf_warning_viol = 0;
00180   value_type  max_Gc_warning_viol = 0.0;
00181   int         num_Gc_warning_viol = 0;
00182 
00183   VectorSpace::vec_mut_ptr_t
00184     e_i       = space_x->create_member(),
00185     Gc_i      = ( Gc ? space_c->create_member()  : Teuchos::null ),
00186     FDGc_i    = ( Gc ? space_c->create_member()  : Teuchos::null );
00187   *e_i = 0.0;
00188 
00189   for( size_type i = 1; i <= n; ++ i ) {
00190     EtaVector eta_i(i,n);
00191     e_i->set_ele(i,1.0);
00192     // Compute exact??? values
00193     value_type
00194       Gf_i = Gf ? Gf->get_ele(i) : 0.0;
00195     if(Gc)
00196       V_MtV( Gc_i.get(), *Gc, BLAS_Cpp::trans, eta_i() );
00197     // Compute finite difference values
00198     value_type
00199       FDGf_i;
00200     const bool preformed_fd = fd_deriv_prod.calc_deriv_product(
00201       xo,xl,xu
00202       ,*e_i
00203       ,NULL // fo
00204       ,NULL // co
00205       ,true // check_nan_inf
00206       ,nlp
00207       ,Gf ? &FDGf_i : NULL
00208       ,Gc ? FDGc_i.get() : NULL
00209       ,out
00210       );
00211     if( !preformed_fd ) {
00212       if(out)
00213         *out
00214           << "\nError, the finite difference computation was not preformed due to cramped bounds\n"
00215           << "Finite difference test failed!\n" << endl;
00216       return false;
00217     }
00218     
00219     // Compare the quantities
00220     // Gf
00221     assert_print_nan_inf(FDGf_i, "FDGf'*e_i",true,out);
00222     const value_type
00223       Gf_err = ::fabs( Gf_i - FDGf_i ) / ( ::fabs(Gf_i) + ::fabs(FDGf_i) + small_num );
00224     if(out)
00225       *out
00226         << "\nrel_err(Gf("<<i<<"),FDGf'*e("<<i<<")) = "
00227         << "rel_err(" << Gf_i << "," << FDGf_i << ") = "
00228         << Gf_err << endl;
00229     if( Gf_err >= warning_tol() ) {
00230       max_Gf_warning_viol = my_max( max_Gf_warning_viol, Gf_err );
00231       ++num_Gf_warning_viol;
00232     }
00233     if( Gf_err >= error_tol() ) {
00234       if(out)
00235         *out
00236           << "\nError, exceeded Gf_error_tol = " << error_tol() << endl
00237           << "Stoping the tests!\n";
00238       if(print_all_warnings)
00239         *out
00240           << "\ne_i =\n"     << *e_i
00241           << "\nGf_i =\n"    << Gf_i << std::endl
00242           << "\nFDGf_i =\n"  << FDGf_i << std::endl;
00243       update_success( false, &success );
00244       return false;
00245     }
00246     // Gc
00247     if(Gc) {
00248       const value_type
00249         sum_Gc_i   = sum(*Gc_i),
00250         sum_FDGc_i = sum(*FDGc_i);
00251       assert_print_nan_inf(sum_FDGc_i, "sum(FDGc'*e_i)",true,out);
00252       const value_type
00253         calc_err = ::fabs( ( sum_Gc_i - sum_FDGc_i )
00254                    /( ::fabs(sum_Gc_i) + ::fabs(sum_FDGc_i) + small_num ) );
00255       if(out)
00256         *out
00257           << "\nrel_err(sum(Gc'*e("<<i<<")),sum(FDGc'*e("<<i<<"))) = "
00258           << "rel_err(" << sum_Gc_i << "," << sum_FDGc_i << ") = "
00259           << calc_err << endl;
00260       if( calc_err >= warning_tol() ) {
00261         max_Gc_warning_viol = my_max( max_Gc_warning_viol, calc_err );
00262         ++num_Gc_warning_viol;
00263       }
00264       if( calc_err >= error_tol() ) {
00265         if(out)
00266           *out
00267             << "\nError, rel_err(sum(Gc'*e("<<i<<")),sum(FDGc'*e("<<i<<"))) = "
00268             << "rel_err(" << sum_Gc_i << "," << sum_FDGc_i << ") = "
00269             << calc_err << endl
00270             << "exceeded error_tol = " << error_tol() << endl
00271             << "Stoping the tests!\n";
00272         if(print_all_warnings)
00273           *out << "\ne_i =\n"     << *e_i
00274              << "\nGc_i =\n"    << *Gc_i
00275              << "\nFDGc_i =\n"  << *FDGc_i;
00276         update_success( false, &success );
00277         return false;
00278       }
00279       if( calc_err >= warning_tol() ) {
00280         if(out)
00281           *out
00282             << "\nWarning, rel_err(sum(Gc'*e("<<i<<")),sum(FDGc'*e("<<i<<"))) = "
00283             << "rel_err(" << sum_Gc_i << "," << sum_FDGc_i << ") = "
00284             << calc_err << endl
00285             << "exceeded warning_tol = " << warning_tol() << endl;
00286       }
00287     }
00288     e_i->set_ele(i,0.0);
00289   }
00290   if(out && num_Gf_warning_viol)
00291     *out
00292       << "\nFor Gf, there were " << num_Gf_warning_viol << " warning tolerance "
00293       << "violations out of num_fd_directions = " << num_fd_directions()
00294       << " computations of FDGf'*e(i)\n"
00295       << "and the maximum violation was " << max_Gf_warning_viol
00296       << " > Gf_waring_tol = " << warning_tol() << endl;
00297   if(out && num_Gc_warning_viol)
00298     *out
00299       << "\nFor Gc, there were " << num_Gc_warning_viol << " warning tolerance "
00300       << "violations out of num_fd_directions = " << num_fd_directions()
00301       << " computations of FDGc'*e(i)\n"
00302       << "and the maximum violation was " << max_Gc_warning_viol
00303       << " > Gc_waring_tol = " << warning_tol() << endl;
00304   if(out)
00305     *out
00306       << "\nCongradulations!  All of the computed errors were within the specified error tolerance!\n";
00307 
00308   return true;
00309 
00310 }
00311 
00312 bool NLPFirstDerivTester::fd_directional_check(
00313   NLP               *nlp
00314   ,const Vector     &xo
00315   ,const Vector     *xl
00316   ,const Vector     *xu
00317   ,const MatrixOp   *Gc
00318   ,const Vector     *Gf
00319   ,bool             print_all_warnings
00320   ,std::ostream     *out
00321   ) const
00322 {
00323   using std::setw;
00324   using std::endl;
00325   using std::right;
00326 
00327   namespace rcp = MemMngPack;
00328   using AbstractLinAlgPack::sum;
00329   using AbstractLinAlgPack::dot;
00330   using AbstractLinAlgPack::Vp_StV;
00331   using AbstractLinAlgPack::assert_print_nan_inf;
00332   using AbstractLinAlgPack::random_vector;
00333   using LinAlgOpPack::V_StV;
00334   using LinAlgOpPack::V_MtV;
00335 
00336    using TestingHelperPack::update_success;
00337 
00338   bool success = true;
00339 
00340   //const size_type
00341   //n  = nlp->n(),
00342   //m  = nlp->m();
00343 
00344   // //////////////////////////////////////////////
00345   // Validate the input
00346 
00347   NLP::vec_space_ptr_t
00348     space_x = nlp->space_x(),
00349     space_c = nlp->space_c();
00350 
00351   const CalcFiniteDiffProd
00352     &fd_deriv_prod = this->calc_fd_prod();
00353 
00354   const value_type
00355     rand_y_l = -1.0, rand_y_u = 1.0,
00356     small_num = ::pow(std::numeric_limits<value_type>::epsilon(),0.25);
00357 
00358   if(out)
00359     *out
00360       << "\nComparing directional products Gf'*y and/or Gc'*y with finite difference values "
00361         " FDGf'*y and/or FDGc'*y for random y's ...\n";
00362 
00363   value_type  max_Gf_warning_viol = 0.0;
00364   int         num_Gf_warning_viol = 0;
00365 
00366   VectorSpace::vec_mut_ptr_t
00367     y         = space_x->create_member(),
00368     Gc_prod   = ( Gc ? space_c->create_member()  : Teuchos::null ),
00369     FDGc_prod = ( Gc ? space_c->create_member()  : Teuchos::null );
00370 
00371   const int num_fd_directions_used = ( num_fd_directions() > 0 ? num_fd_directions() : 1 );
00372 
00373   for( int direc_i = 1; direc_i <= num_fd_directions_used; ++direc_i ) {
00374     if( num_fd_directions() > 0 ) {
00375       random_vector( rand_y_l, rand_y_u, y.get() );
00376       if(out)
00377         *out
00378           << "\n****"
00379           << "\n**** Random directional vector " << direc_i << " ( ||y||_1 / n = "
00380           << (y->norm_1() / y->dim()) << " )"
00381           << "\n***\n";
00382     }
00383     else {
00384       *y = 1.0;
00385       if(out)
00386         *out
00387           << "\n****"
00388           << "\n**** Ones vector y ( ||y||_1 / n = "<<(y->norm_1()/y->dim())<<" )"
00389           << "\n***\n";
00390     }
00391     // Compute exact??? values
00392     value_type
00393       Gf_y = Gf ? dot( *Gf, *y ) : 0.0;
00394     if(Gc)
00395       V_MtV( Gc_prod.get(), *Gc, BLAS_Cpp::trans, *y );
00396     // Compute finite difference values
00397     value_type
00398       FDGf_y;
00399     const bool preformed_fd = fd_deriv_prod.calc_deriv_product(
00400       xo,xl,xu
00401       ,*y
00402       ,NULL // fo
00403       ,NULL // co
00404       ,true // check_nan_inf
00405       ,nlp
00406       ,Gf ? &FDGf_y : NULL
00407       ,Gc ? FDGc_prod.get() : NULL
00408       ,out
00409       );
00410     if( !preformed_fd ) {
00411       if(out)
00412         *out
00413           << "\nError, the finite difference computation was not preformed due to cramped bounds\n"
00414           << "Finite difference test failed!\n" << endl;
00415       return false;
00416     }
00417     
00418     // Compare the quantities
00419     // Gf
00420     assert_print_nan_inf(FDGf_y, "FDGf'*y",true,out);
00421     const value_type
00422       Gf_err = ::fabs( Gf_y - FDGf_y ) / ( ::fabs(Gf_y) + ::fabs(FDGf_y) + small_num );
00423     if(out)
00424       *out
00425         << "\nrel_err(Gf'*y,FDGf'*y) = "
00426         << "rel_err(" << Gf_y << "," << FDGf_y << ") = "
00427         << Gf_err << endl;
00428     if( Gf_err >= warning_tol() ) {
00429       max_Gf_warning_viol = my_max( max_Gf_warning_viol, Gf_err );
00430       ++num_Gf_warning_viol;
00431     }
00432     if( Gf_err >= error_tol() ) {
00433       if(out)
00434         *out
00435           << "\nError, exceeded Gf_error_tol = " << error_tol() << endl
00436           << "Stoping the tests!\n";
00437       return false;
00438     }
00439     // Gc
00440     if(Gc) {
00441       const value_type
00442         sum_Gc_prod   = sum(*Gc_prod),
00443         sum_FDGc_prod = sum(*FDGc_prod);
00444       assert_print_nan_inf(sum_FDGc_prod, "sum(FDGc'*y)",true,out);
00445       const value_type
00446         calc_err = ::fabs( ( sum_Gc_prod - sum_FDGc_prod )
00447                    /( ::fabs(sum_Gc_prod) + ::fabs(sum_FDGc_prod) + small_num ) );
00448       if(out)
00449         *out
00450           << "\nrel_err(sum(Gc'*y),sum(FDGc'*y)) = "
00451           << "rel_err(" << sum_Gc_prod << "," << sum_FDGc_prod << ") = "
00452           << calc_err << endl;
00453       if( calc_err >= error_tol() ) {
00454         if(out)
00455           *out
00456             << "\nError, rel_err(sum(Gc'*y),sum(FDGc'*y)) = "
00457             << "rel_err(" << sum_Gc_prod << "," << sum_FDGc_prod << ") = "
00458             << calc_err << endl
00459             << "exceeded error_tol = " << error_tol() << endl
00460             << "Stoping the tests!\n";
00461         if(print_all_warnings)
00462           *out << "\ny =\n"          << *y
00463              << "\nGc_prod =\n"    << *Gc_prod
00464              << "\nFDGc_prod =\n"  << *FDGc_prod;
00465         update_success( false, &success );
00466         return false;
00467       }
00468       if( calc_err >= warning_tol() ) {
00469         if(out)
00470           *out
00471             << "\nWarning, rel_err(sum(Gc'*y),sum(FDGc'*y)) = "
00472             << "rel_err(" << sum_Gc_prod << "," << sum_FDGc_prod << ") = "
00473             << calc_err << endl
00474             << "exceeded warning_tol = " << warning_tol() << endl;
00475       }
00476     }
00477   }
00478   if(out && num_Gf_warning_viol)
00479     *out
00480       << "\nFor Gf, there were " << num_Gf_warning_viol << " warning tolerance "
00481       << "violations out of num_fd_directions = " << num_fd_directions()
00482       << " computations of FDGf'*y\n"
00483       << "and the maximum violation was " << max_Gf_warning_viol
00484       << " > Gf_waring_tol = " << warning_tol() << endl;
00485 
00486   if(out)
00487     *out
00488       << "\nCongradulations!  All of the computed errors were within the specified error tolerance!\n";
00489 
00490   return true;
00491 }
00492 
00493 } // end namespace NLPInterfacePack
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines
Generated on Wed Apr 13 10:11:00 2011 for MOOCHO (Single Doxygen Collection) by  doxygen 1.6.3