NLPInterfacePack_CalcFiniteDiffProd.cpp

Go to the documentation of this file.
00001 // @HEADER
00002 // ***********************************************************************
00003 // 
00004 // Moocho: Multi-functional Object-Oriented arCHitecture for Optimization
00005 //                  Copyright (2003) Sandia Corporation
00006 // 
00007 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
00008 // license for use of this work by or on behalf of the U.S. Government.
00009 // 
00010 // This library is free software; you can redistribute it and/or modify
00011 // it under the terms of the GNU Lesser General Public License as
00012 // published by the Free Software Foundation; either version 2.1 of the
00013 // License, or (at your option) any later version.
00014 //  
00015 // This library is distributed in the hope that it will be useful, but
00016 // WITHOUT ANY WARRANTY; without even the implied warranty of
00017 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00018 // Lesser General Public License for more details.
00019 //  
00020 // You should have received a copy of the GNU Lesser General Public
00021 // License along with this library; if not, write to the Free Software
00022 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
00023 // USA
00024 // Questions? Contact Roscoe A. Bartlett (rabartl@sandia.gov) 
00025 // 
00026 // ***********************************************************************
00027 // @HEADER
00028 
00029 #include <assert.h>
00030 #include <math.h>
00031 
00032 #include <typeinfo>
00033 #include <iomanip>
00034 #include <sstream>
00035 #include <limits>
00036 
00037 #include "NLPInterfacePack_CalcFiniteDiffProd.hpp"
00038 #include "NLPInterfacePack_NLP.hpp"
00039 #include "AbstractLinAlgPack_VectorSpace.hpp"
00040 #include "AbstractLinAlgPack_VectorOut.hpp"
00041 #include "AbstractLinAlgPack_VectorMutable.hpp"
00042 #include "AbstractLinAlgPack_LinAlgOpPack.hpp"
00043 #include "AbstractLinAlgPack_assert_print_nan_inf.hpp"
00044 #include "AbstractLinAlgPack_VectorAuxiliaryOps.hpp"
00045 #include "Teuchos_FancyOStream.hpp"
00046 #include "Teuchos_TestForException.hpp"
00047 
00048 namespace NLPInterfacePack {
00049 
00050 CalcFiniteDiffProd::CalcFiniteDiffProd(
00051   EFDMethodOrder              fd_method_order
00052   ,EFDStepSelect              fd_step_select
00053   ,value_type                 fd_step_size
00054   ,value_type                 fd_step_size_min
00055   ,value_type                 fd_step_size_f
00056   ,value_type                 fd_step_size_c
00057   )
00058   :fd_method_order_(fd_method_order)
00059   ,fd_step_select_(fd_step_select)
00060   ,fd_step_size_(fd_step_size)
00061   ,fd_step_size_min_(fd_step_size_min)
00062   ,fd_step_size_f_(fd_step_size_f)
00063   ,fd_step_size_c_(fd_step_size_c)
00064 {}
00065 
00066 bool CalcFiniteDiffProd::calc_deriv_product(
00067   const Vector       &xo
00068   ,const Vector      *xl
00069   ,const Vector      *xu
00070   ,const Vector      &v
00071   ,const value_type  *fo
00072   ,const Vector      *co
00073   ,bool              check_nan_inf
00074   ,NLP               *nlp
00075   ,value_type        *Gf_prod
00076   ,VectorMutable     *Gc_prod
00077   ,std::ostream      *out_arg
00078   ,bool              trace
00079   ,bool              dump_all
00080   ) const
00081 {
00082 
00083   using std::setw;
00084   using std::endl;
00085   using std::right;
00086 
00087   using BLAS_Cpp::rows;
00088   using BLAS_Cpp::cols;
00089   
00090   typedef VectorSpace::vec_mut_ptr_t  vec_mut_ptr_t;
00091   using AbstractLinAlgPack::Vt_S;
00092   using AbstractLinAlgPack::Vp_StV;
00093   using AbstractLinAlgPack::max_near_feas_step;
00094   using AbstractLinAlgPack::assert_print_nan_inf;
00095   using LinAlgOpPack::V_StV;
00096 
00097   Teuchos::RefCountPtr<Teuchos::FancyOStream>
00098     out = Teuchos::getFancyOStream(Teuchos::rcp(out_arg,false));
00099   Teuchos::OSTab tab(out);
00100 
00101   //
00102   // The gradient of the contraints is defined as the matrix Gc as:
00103   //
00104   // Gc= [ Gc1, Gc2, ..., Gcm ]
00105   //
00106   //     [  dc1/dx(1)    dc2/dx(1)  ...    dcm/dx(1)  ]
00107   //     [  dc1/dx(2)    dc2/dx(2)  ...    dcm/dx(2)  ]
00108   // Gc= [  .            .          ...    .          ]
00109   //     [  dc1/dx(n)    dc2/dx(n)  ...    dcm/dx(n)  ]
00110   //
00111   //     [  (dc/dx(1))'  ]
00112   //     [  (dc/dx(2))'  ]
00113   // Gc= [  .            ]
00114   //     [  (dc/dx(n))'  ]
00115   //
00116   // The gradient of the objective function is defined as the
00117   // vector Gf as:
00118   //
00119   //     [  (df/dx(1))'  ]
00120   //     [  (df/dx(2))'  ]
00121   // Gf= [  .            ]
00122   //     [  (df/dx(n))'  ]
00123   //
00124   // To illustrate the theory behind this implementation consider
00125   // the generic multi-variable function g(x) <: R^n -> R.  Now let's
00126   // consider we have the base point xo and the vector v to
00127   // perturb g(x) along.  First form the function g(xo+a*v) and then
00128   // let's compute dg/da at a = 0:
00129   // 
00130   // (1) d(g(xo+a*v))/d(a) at a = 0
00131   //         = sum( dg/dx(i) * dx(i)/da, i = 1...n)
00132   //         = sum( dg/dx(i) * v(i), i = 1...n)
00133   //         = Gf'*v
00134   //
00135   // Now we can approximate (1) using central differences as:
00136   // 
00137   // (2) d(g(xo+a*v))/d(a) at a = 0
00138   //          \approx ( g(xo+h*v) - g(xo+h*v) ) / (2*h)
00139   //
00140   // If we equate (1) and (2) we have the approximation:
00141   // 
00142   // (3) Gg' * v \approx ( g(xo+h*v) - g(xo+h*v) ) / (2*h)
00143   // 
00144   // It should be clear how this applies to computing Gf'*v and Gc'*v.
00145   // 
00146 
00147   const size_type
00148     n = nlp->n(),
00149     m = nlp->m();
00150 
00151   const value_type
00152     max_bnd_viol = nlp->max_var_bounds_viol();
00153 
00154   // /////////////////////////////////////////
00155   // Validate the input
00156 
00157   TEST_FOR_EXCEPTION(
00158     m==0 && Gc_prod, std::invalid_argument
00159     ,"CalcFiniteDiffProd::calc_deriv(...) : "
00160     "Error, if nlp->m() == 0, then Gc_prod must equal NULL" );
00161   TEST_FOR_EXCEPTION(
00162     Gc_prod && !Gc_prod->space().is_compatible(*nlp->space_c())
00163     ,std::invalid_argument
00164     ,"CalcFiniteDiffProd::calc_deriv(...) : "
00165     "Error, Gc_prod (type \' "<<typeid(*Gc_prod).name()<<"\' "
00166     "is not compatible with the NLP" );
00167   TEST_FOR_EXCEPTION(
00168     (xl && !xu) || (!xl && xu), std::invalid_argument
00169     ,"CalcFiniteDiffProd::calc_deriv(...) : "
00170     "Error, both xl = "<<xl<<" and xu = "<<xu
00171     <<" must be NULL or not NULL" );
00172 
00173   assert_print_nan_inf(xo,"xo",true,out.get()); 
00174 
00175   switch(this->fd_method_order()) {
00176     case FD_ORDER_ONE:
00177       if(out.get()&&trace) *out<<"\nUsing one-sided, first-order finite differences ...\n";
00178       break;
00179     case FD_ORDER_TWO:
00180       if(out.get()&&trace) *out<<"\nUsing one-sided, second-order finite differences ...\n";
00181       break;
00182     case FD_ORDER_TWO_CENTRAL:
00183       if(out.get()&&trace) *out<<"\nUsing second-order central finite differences ...\n";
00184       break;
00185     case FD_ORDER_TWO_AUTO:
00186       if(out.get()&&trace) *out<<"\nUsing auto selection of some second-order finite difference method ...\n";
00187       break;
00188     case FD_ORDER_FOUR:
00189       if(out.get()&&trace) *out<<"\nUsing one-sided, fourth-order finite differences ...\n";
00190       break;
00191     case FD_ORDER_FOUR_CENTRAL:
00192       if(out.get()&&trace) *out<<"\nUsing fourth-order central finite differences ...\n";
00193       break;
00194     case FD_ORDER_FOUR_AUTO:
00195       if(out.get()&&trace) *out<<"\nUsing auto selection of some fourth-order finite difference method ...\n";
00196       break;
00197     default:
00198       TEST_FOR_EXCEPT(true); // Should not get here!
00199   }
00200 
00201   // ////////////////////////
00202   // Find the step size
00203 
00204   //
00205   // Get defaults for the optimal step sizes
00206   //
00207 
00208   const value_type
00209     sqrt_epsilon = ::pow(std::numeric_limits<value_type>::epsilon(),1.0/2.0),
00210     u_optimal_1  = sqrt_epsilon,
00211     u_optimal_2  = ::pow(sqrt_epsilon,1.0/2.0),
00212     u_optimal_4  = ::pow(sqrt_epsilon,1.0/4.0),
00213     xo_norm_inf  = xo.norm_inf();
00214 
00215   value_type
00216     uh_opt = 0.0;
00217   switch(this->fd_method_order()) {
00218     case FD_ORDER_ONE:
00219       uh_opt = u_optimal_1 * ( fd_step_select() == FD_STEP_ABSOLUTE ? 1.0 : xo_norm_inf + 1.0 );
00220       break;
00221     case FD_ORDER_TWO:
00222     case FD_ORDER_TWO_CENTRAL:
00223     case FD_ORDER_TWO_AUTO:
00224       uh_opt = u_optimal_2 * ( fd_step_select() == FD_STEP_ABSOLUTE ? 1.0 : xo_norm_inf + 1.0 );
00225       break;
00226     case FD_ORDER_FOUR:
00227     case FD_ORDER_FOUR_CENTRAL:
00228     case FD_ORDER_FOUR_AUTO:
00229       uh_opt = u_optimal_4 * ( fd_step_select() == FD_STEP_ABSOLUTE ? 1.0 : xo_norm_inf + 1.0 );
00230       break;
00231     default:
00232       TEST_FOR_EXCEPT(true); // Should not get here!
00233   }
00234 
00235   if(out.get()&&trace) *out<<"\nDefault optimal step length uh_opt = " << uh_opt << " ...\n";
00236 
00237   //
00238   // Set the step sizes used.
00239   //
00240 
00241   value_type
00242     uh      = this->fd_step_size(),
00243     uh_f    = this->fd_step_size_f(),
00244     uh_c    = this->fd_step_size_c(),
00245     uh_min  = this->fd_step_size_min();
00246 
00247   // uh
00248   if( uh < 0 )
00249     uh = uh_opt;
00250   else if(fd_step_select() == FD_STEP_RELATIVE)
00251     uh *= (xo_norm_inf + 1.0);
00252   // uh_f
00253   if( uh_f < 0 )
00254     uh_f = uh;
00255   else if(fd_step_select() == FD_STEP_RELATIVE)
00256     uh_f *= (xo_norm_inf + 1.0);
00257   // uh_c
00258   if( uh_c < 0 )
00259     uh_c = uh;
00260   else if(fd_step_select() == FD_STEP_RELATIVE)
00261     uh_c *= (xo_norm_inf + 1.0);
00262 
00263   if(out.get()&&trace) *out<<"\nIndividual step sizes initally set: uh="<<uh<<",uh_f="<<uh_f<<",uh_c="<<uh_c<<"\n";
00264 
00265   //
00266    // Determine the maximum step size that can be used and
00267   // still stay in the relaxed bounds.
00268   //
00269   // ToDo: Consider cramped bounds, one sided differences!
00270   //
00271 
00272   value_type max_u_feas = std::numeric_limits<value_type>::max();
00273   if( xl ) {
00274     std::pair<value_type,value_type>
00275       u_pn
00276       = max_near_feas_step(
00277         xo
00278         ,v
00279         ,*xl
00280         ,*xu
00281         ,max_bnd_viol
00282         );
00283     if( u_pn.first < -u_pn.second )
00284       max_u_feas = u_pn.first;
00285     else
00286       max_u_feas = u_pn.second;
00287     const value_type abs_max_u_feas = ::fabs(max_u_feas);
00288     if( abs_max_u_feas < uh ) {
00289       if( abs_max_u_feas < uh_min ) {
00290         if(out.get())
00291           *out
00292             << "Warning, the size of the maximum finite difference step length\n"
00293             << "that does not violate the relaxed variable bounds uh = "
00294             << max_u_feas << " is less than the mimimum allowable step length\n"
00295             << "uh_min = " << uh_min << " and the finite difference "
00296             << "derivatives are not computed!\n";
00297         return false;
00298       }
00299       if(out.get())
00300         *out
00301           << "Warning, the size of the maximum finite difference step length\n"
00302           << "that does not violate the relaxed variable bounds uh = "
00303           << max_u_feas << " is less than the desired step length\n"
00304           << "uh = " << uh << " and the finite difference "
00305           << "derivatives may be much less accurate!\n";
00306     }
00307   }
00308 
00309   //
00310   // Set the actual method being used
00311   //
00312   // ToDo: Consider cramped bounds and method order.
00313   //
00314   
00315   EFDMethodOrder  fd_method_order = this->fd_method_order();
00316   switch(fd_method_order) {
00317     case FD_ORDER_TWO_AUTO:
00318       fd_method_order = FD_ORDER_TWO_CENTRAL;
00319       break;
00320     case FD_ORDER_FOUR_AUTO:
00321       fd_method_order = FD_ORDER_FOUR_CENTRAL;
00322       break;
00323   }
00324 
00325   // Compute the actual individual step size so as to stay in bounds
00326   const value_type
00327     abs_max_u_feas = ::fabs(max_u_feas);
00328   value_type
00329      num_u_i = 0;
00330   switch(fd_method_order) {
00331     case FD_ORDER_ONE:
00332       num_u_i = 1.0;
00333       break;
00334     case FD_ORDER_TWO:
00335       num_u_i = 2.0;
00336       break;
00337     case FD_ORDER_TWO_CENTRAL:
00338       num_u_i = 1.0;
00339       break;
00340     case FD_ORDER_FOUR:
00341       num_u_i = 4.0;
00342       break;
00343     case FD_ORDER_FOUR_CENTRAL:
00344       num_u_i = 2.0;
00345       break;
00346     default:
00347       TEST_FOR_EXCEPT(true); // Should not get here!
00348   }
00349 
00350   uh   = ( abs_max_u_feas/num_u_i < uh   ? max_u_feas/num_u_i : uh   ); // This can be a negative number!
00351   uh_f = ( abs_max_u_feas/num_u_i < uh_f ? max_u_feas/num_u_i : uh_f ); //""
00352   uh_c = ( abs_max_u_feas/num_u_i < uh_c ? max_u_feas/num_u_i : uh_c ); //""
00353 
00354   if( uh_min < 0 ) {
00355     uh_min = uh / 100.0;
00356   }
00357 
00358   if(out.get()&&trace) *out<<"\nIndividual step sizes to fit in bounds: uh="<<uh<<",uh_f="<<uh_f<<",uh_c="<<uh_c<<"\n";
00359 
00360   //
00361   // Remember some stuff
00362   //
00363   
00364   value_type        *f_saved = NULL;
00365   VectorMutable     *c_saved = NULL;
00366 
00367   f_saved = nlp->get_f();
00368   if(m)  c_saved = nlp->get_c();
00369 
00370   int p_saved;
00371   if(out.get())
00372     p_saved = out->precision();
00373 
00374   // ///////////////////////////////////////////////
00375   // Compute the directional derivatives
00376 
00377   try {
00378 
00379   value_type
00380     f;
00381   vec_mut_ptr_t
00382     x = nlp->space_x()->create_member();
00383   vec_mut_ptr_t
00384     c = m  && Gc_prod ? nlp->space_c()->create_member() : Teuchos::null;
00385   
00386   // Set the quanitities used to compute with
00387 
00388   nlp->set_f(&f);
00389   if(m)  nlp->set_c( c.get() );
00390 
00391   const int dbl_p = 15;
00392   if(out.get())
00393     *out << std::setprecision(dbl_p);
00394 
00395   //
00396   // Compute the weighted sum of the terms
00397   //
00398 
00399   int          num_evals  = 0;
00400   value_type   dwgt       = 0.0;
00401   switch(fd_method_order) {
00402     case FD_ORDER_ONE: // may only need one eval if f(xo) etc is passed in
00403       num_evals = 2;
00404       dwgt      = 1.0;
00405       break;
00406     case FD_ORDER_TWO: // may only need two evals if c(xo) etc is passed in
00407       num_evals = 3;
00408       dwgt      = 2.0;
00409       break;
00410     case FD_ORDER_TWO_CENTRAL:
00411       num_evals = 2;
00412       dwgt      = 2.0;
00413       break;
00414     case FD_ORDER_FOUR:
00415       num_evals = 5;
00416       dwgt      = 12.0;
00417       break;
00418     case FD_ORDER_FOUR_CENTRAL:
00419       num_evals = 5;
00420       dwgt      = 12.0;
00421       break;
00422     default:
00423       TEST_FOR_EXCEPT(true); // Should not get here!
00424   }
00425   if(Gc_prod) *Gc_prod = 0.0;
00426   if(Gf_prod) *Gf_prod = 0.0;
00427   for( int eval_i = 1; eval_i <= num_evals; ++eval_i ) {
00428     // Set the step constant and the weighting constant
00429     value_type
00430       uh_i   = 0.0,
00431       wgt_i  = 0.0;
00432     switch(fd_method_order) {
00433       case FD_ORDER_ONE: {
00434         switch(eval_i) {
00435           case 1:
00436             uh_i  = +0.0;
00437             wgt_i = -1.0;
00438             break;
00439           case 2:
00440             uh_i  = +1.0;
00441             wgt_i = +1.0;
00442             break;
00443         }
00444         break;
00445       }
00446       case FD_ORDER_TWO: {
00447         switch(eval_i) {
00448           case 1:
00449             uh_i  = +0.0;
00450             wgt_i = -3.0;
00451             break;
00452           case 2:
00453             uh_i  = +1.0;
00454             wgt_i = +4.0;
00455             break;
00456           case 3:
00457             uh_i  = +2.0;
00458             wgt_i = -1.0;
00459             break;
00460         }
00461         break;
00462       }
00463       case FD_ORDER_TWO_CENTRAL: {
00464         switch(eval_i) {
00465           case 1:
00466             uh_i  = -1.0;
00467             wgt_i = -1.0;
00468             break;
00469           case 2:
00470             uh_i  = +1.0;
00471             wgt_i = +1.0;
00472             break;
00473         }
00474         break;
00475       }
00476       case FD_ORDER_FOUR: {
00477         switch(eval_i) {
00478           case 1:
00479             uh_i  = +0.0;
00480             wgt_i = -25.0;
00481             break;
00482           case 2:
00483             uh_i  = +1.0;
00484             wgt_i = +48.0;
00485             break;
00486           case 3:
00487             uh_i  = +2.0;
00488             wgt_i = -36.0;
00489             break;
00490           case 4:
00491             uh_i  = +3.0;
00492             wgt_i = +16.0;
00493             break;
00494           case 5:
00495             uh_i  = +4.0;
00496             wgt_i = -3.0;
00497             break;
00498         }
00499         break;
00500       }
00501       case FD_ORDER_FOUR_CENTRAL: {
00502         switch(eval_i) {
00503           case 1:
00504             uh_i  = -2.0;
00505             wgt_i = +1.0;
00506             break;
00507           case 2:
00508             uh_i  = -1.0;
00509             wgt_i = -8.0;
00510             break;
00511           case 3:
00512             uh_i  = +1.0;
00513             wgt_i = +8.0;
00514             break;
00515           case 4:
00516             uh_i  = +2.0;
00517             wgt_i = -1.0;
00518             break;
00519         }
00520         break;
00521       }
00522     }
00523 
00524     if(out.get()&&dump_all) {
00525       *out<<"\nxo =\n" << xo;
00526       *out<<"\nv =\n" << v;
00527       if(fo) *out << "\nfo = " << *fo << "\n";
00528       if(co) *out << "\nco =\n" << *co;
00529     }
00530     // Compute the weighted term and add it to the sum
00531     bool new_point = true;
00532     if(Gc_prod) {
00533       if( co && uh_i == 0.0 ) {
00534         if(out.get()&&trace) *out<<"\nBase c = co ...\n";
00535         *c = *co;
00536       }
00537       else {
00538         if( new_point || uh_c != uh ) {
00539           *x = xo; Vp_StV( x.get(), uh_i * uh_c, v ); // x = xo + uh_i*uh_c*v
00540         }
00541         if(out.get()&&trace) *out<<"\nComputing c = c(xo+"<<(uh_i*uh_c)<<"*v) ...\n";
00542         if(out.get()&&dump_all) *out<<"\nxo+"<<(uh_i*uh_c)<<"*v =\n" << *x;
00543         nlp->calc_c(*x,new_point);
00544       }
00545       new_point = false;
00546       if(out.get() && dump_all) *out << "\nc =\n" << *c;
00547       if(check_nan_inf)
00548         assert_print_nan_inf(*c,"c(xo+u*v)",true,out.get());
00549       if(out.get()&&trace) *out<<"\nGc_prod += "<<wgt_i<<"*c ...\n";
00550       Vp_StV( Gc_prod, wgt_i, *c );
00551       if(out.get() && dump_all) *out<<"\nGc_prod =\n" << *Gc_prod;
00552     }
00553     
00554     if(Gf_prod) {
00555       if( fo && uh_i == 0.0 ) {
00556         if(out.get()&&trace) *out<<"\nBase f = fo ...\n";
00557         f = *fo;
00558       }
00559       else {
00560         if( new_point || uh_f != uh ) {
00561           *x = xo; Vp_StV( x.get(), uh_i * uh_f, v ); // x = xo + uh_i*uh_f*v
00562           new_point = true;
00563         }
00564         if(out.get()&&trace) *out<<"\nComputing f = f(xo+"<<(uh_i*uh_f)<<"*v) ...\n";
00565         if(out.get() && dump_all) *out<<"\nxo+"<<(uh_i*uh_f)<<"*v =\n" << *x;
00566         nlp->calc_f(*x,new_point);
00567       }
00568       new_point = false;
00569       if(out.get() && dump_all) *out<<"\nf = " << f << "\n";
00570       if(check_nan_inf)
00571         assert_print_nan_inf(f,"f(xo+u*v)",true,out.get());
00572       if(out.get()&&trace) *out<<"\nGf_prod += "<<wgt_i<<"*f ...\n";
00573       *Gf_prod += wgt_i * f;
00574       if(out.get() && dump_all) *out<<"\nGf_prod = " << *Gf_prod << "\n";
00575     }
00576 
00577   }
00578 
00579   //
00580   // Multiply by the scaling factor!
00581   //
00582 
00583   if(Gc_prod) {
00584     if(out.get()&&trace) *out<<"\nGc_prod *= "<<(1.0 / (dwgt * uh_c))<<" ...\n";
00585     Vt_S( Gc_prod, 1.0 / (dwgt * uh_c) );
00586     if(out.get() && dump_all)
00587       *out<<"\nFinal Gc_prod =\n" << *Gc_prod;
00588   }
00589     
00590   if(Gf_prod) {
00591     if(out.get()&&trace) *out<<"\nGf_prod *= "<<(1.0 / (dwgt * uh_f))<<" ...\n";
00592     *Gf_prod *= ( 1.0 / (dwgt * uh_f) );
00593     if(out.get() && dump_all)
00594       *out<<"\nFinal Gf_prod = " << *Gf_prod << "\n";
00595   }
00596 
00597   }  // end try
00598   catch(...) {
00599     nlp->set_f( f_saved );
00600     if(m)  nlp->set_c( c_saved );
00601     if(out.get())
00602       *out << std::setprecision(p_saved);
00603     throw;
00604   }
00605   
00606   nlp->set_f( f_saved );
00607   if(m)  nlp->set_c( c_saved );
00608   if(out.get())
00609     *out << std::setprecision(p_saved);
00610   
00611   return true;
00612 }
00613 
00614 }  // end namespace NLPInterfacePack

Generated on Thu Sep 18 12:35:18 2008 for MOOCHO (Single Doxygen Collection) by doxygen 1.3.9.1