ml_preconditioner.cpp

//@HEADER
// ************************************************************************
// 
//               ML: A Multilevel Preconditioner Package
//                 Copyright (2002) Sandia Corporation
// 
// Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
// license for use of this work by or on behalf of the U.S. Government.
// 
// This library is free software; you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as
// published by the Free Software Foundation; either version 2.1 of the
// License, or (at your option) any later version.
//  
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
// Lesser General Public License for more details.
//  
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
// USA
// Questions? Contact Michael A. Heroux (maherou@sandia.gov) 
// 
// ************************************************************************
//@HEADER

// Goal of this example is to present the basic usage of
// the ML_Epetra::MultiLevelPreconditioner class.
// The example builds a simple matrix and solves the corresponding
// linear system using AztecOO and ML as a preconditioner. It finally
// checks the accuracy of the computed solution.
//
// The problem should converge as follows:
//
// proc       iterations       condition number
//   1             14               1.78
//   2             15               2.39
//   4             15               2.20

// \author Marzio Sala, SNL 9214
//
// \data Last modified on 19-Jan-05

#include "ml_include.h"

// The C++ interface of ML (more precisely,
// ML_Epetra::MultiLevelPreconditioner), requires Trilinos to be
// configured with --enable-epetra --enable-teuchos. This example also
// requires --enable-triutils (for the definition of the linear systems)
// and --enable-aztecoo (to solve the linear system)

#if defined(HAVE_ML_EPETRA) && defined(HAVE_ML_TEUCHOS) && defined(HAVE_ML_TRIUTILS) && defined(HAVE_ML_AZTECOO)

// epetra objects
#ifdef HAVE_MPI
#include "mpi.h"
#include "Epetra_MpiComm.h"
#else
#include "Epetra_SerialComm.h"
#endif
#include "Epetra_Map.h"
#include "Epetra_Vector.h"
#include "Epetra_CrsMatrix.h"
#include "Epetra_LinearProblem.h"
// required to build the example matrix
#include "Trilinos_Util_CrsMatrixGallery.h"
// required by the linear system solver
#include "AztecOO.h"
// required by ML
#include "ml_MultiLevelPreconditioner.h"

using namespace Teuchos;
using namespace Trilinos_Util;

// ============== //
// example driver //
// ============== //

int main(int argc, char *argv[])
{
  
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  // Creates the linear problem using the class 
  // `Trilinos_Util::CrsMatrixGallery.'
  //
  // Several matrix examples are supported; please refer to the
  // Trilinos tutorial for more details.
  // Most of the examples using the ML_Epetra::MultiLevelPreconditioner
  // class are based on Epetra_CrsMatrix. Example
  // `ml/examples/MatrixFormats/ml_EpetraVbr.cpp' shows how to define a
  // Epetra_VbrMatrix.

  int i;
  if (argc > 1)
    i = (int) strtol(argv[1],NULL,10);
  else
    i = 10000;

  int NumProc = Comm.NumProc();
  
  // `laplace_2d' is a symmetric matrix; an example of non-symmetric
  // matrix is `recirc_2d' (advection-diffusion in a box, with
  // recirculating flow). In both cases, the global number of nodes 
  // must be a square number
  CrsMatrixGallery Gallery("recirc_2d", Comm);
  Gallery.Set("problem_size", i);
  
  // The following methods of CrsMatrixGallery are used to get pointers
  // to internally stored Epetra_RowMatrix and Epetra_LinearProblem.
  Epetra_RowMatrix*     A       = Gallery.GetMatrix();
  Epetra_LinearProblem* Problem = Gallery.GetLinearProblem();

  // As we wish to use AztecOO, we need to construct a solver object 
  // for this problem
  AztecOO solver(*Problem);

  // =========================== begin of ML part ===========================
  
  // create a parameter list for ML options
  ParameterList MLList;

  // Option and parameter list that control the behavior of the outer
  // AztecOO Krylov method.
  int *options    = new int[AZ_OPTIONS_SIZE];
  double *params  = new double[AZ_PARAMS_SIZE];

  // Sets default parameters for classic smoothed aggregation. After this
  // call, MLList contains the default values for the ML parameters,
  // as required by typical smoothed aggregation for symmetric systems.
  // Other sets of parameters are available for non-symmetric systems
  // ("DD" and "DD-ML"), and for the Maxwell equations ("maxwell").
  ML_Epetra::SetDefaults("SA",MLList,options,params);
  
  // overwrite some parameters. Please refer to the user's guide
  // for more information
  // some of the parameters do not differ from their default value,
  // and they are here reported for the sake of clarity
  
  // output level, 0 being silent and 10 verbose
  MLList.set("output", 10);
  // maximum number of levels
  MLList.set("max levels",5);
  // set finest level to 0
  MLList.set("increasing or decreasing","increasing");

  // use Uncoupled scheme to create the aggregate
  MLList.set("aggregation: type", "Uncoupled");

  // smoother is symmetric Gauss-Seidel. Example file 
  // `ml/examples/TwoLevelDD/ml_2level_DD.cpp' shows how to use
  // AZTEC's preconditioners as smoothers
  MLList.set("smoother: type","symmetric Gauss-Seidel");

  // use both pre and post smoothing
  MLList.set("smoother: pre or post", "both");

#ifdef HAVE_ML_AMESOS
  // solve with serial direct solver KLU
  MLList.set("coarse: type","Amesos-KLU");
#else
  // this is for testing purposes only, you should have 
  // a direct solver for the coarse problem (either Amesos, or the SuperLU/
  // SuperLU_DIST interface of ML)
  MLList.set("aggregation: type", "MIS");
  MLList.set("smoother: type","Jacobi");
  MLList.set("coarse: type","Jacobi");
#endif

  // this enables repartitioning of coarse level multigrid operators in parallel
#if defined(HAVE_ML_PARMETIS_3x)
  MLList.set("repartition: enable",1);
  MLList.set("repartition: min per proc",500);
  MLList.set("repartition: partitioner","ParMETIS");
#endif

  // Creates the preconditioning object. We suggest to use `new' and
  // `delete' because the destructor contains some calls to MPI (as
  // required by ML and possibly Amesos). This is an issue only if the
  // destructor is called **after** MPI_Finalize().
  ML_Epetra::MultiLevelPreconditioner* MLPrec = 
    new ML_Epetra::MultiLevelPreconditioner(*A, MLList);

  // verify unused parameters on process 0 (put -1 to print on all
  // processes)
  MLPrec->PrintUnused(0);

  // ML allows the user to cheaply recompute the preconditioner. You can
  // simply uncomment the following line:
  // 
  // MLPrec->ReComputePreconditioner();
  //
  // It is supposed that the linear system matrix has different values, but
  // **exactly** the same structure and layout. The code re-built the
  // hierarchy and re-setup the smoothers and the coarse solver using
  // already available information on the hierarchy. A particular
  // care is required to use ReComputePreconditioner() with nonzero
  // threshold.

  // =========================== end of ML part =============================
  
  // tell AztecOO to use the ML preconditioner, specify the solver 
  // and the output, then solve with 500 maximum iterations and 1e-12 
  // of tolerance (see AztecOO's user guide for more details)
  
  solver.SetPrecOperator(MLPrec);
  solver.SetAztecOption(AZ_solver, AZ_gmres);
  solver.SetAztecOption(AZ_output, 32);
  solver.Iterate(500, 1e-12);

  // destroy the preconditioner
  delete MLPrec;

  delete [] options;
  delete [] params;
  
  // compute the real residual

  double residual, diff;
  Gallery.ComputeResidual(&residual);
  Gallery.ComputeDiffBetweenStartingAndExactSolutions(&diff);
  
  if( Comm.MyPID()==0 ) {
    cout << "||b-Ax||_2 = " << residual << endl;
    cout << "||x_exact - x||_2 = " << diff << endl;
  }

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  // For testing purposes
#if defined(HAVE_ML_PARMETIS_3x)
  // If repartitioning, check for convergence in parallel or serial
  if (residual > 1e-5) {
    exit(EXIT_FAILURE);
  }
#else
  // If no repartitioning, check for convergence only in serial
  if ( (NumProc == 1) && (residual > 1e-5) ) {
    exit(EXIT_FAILURE);
  }
#endif


  exit(EXIT_SUCCESS);
 
}

#else

#include <stdlib.h>
#include <stdio.h>
#ifdef HAVE_MPI
#include "mpi.h"
#endif

int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
#endif

  puts("Please configure ML with:");
  puts("--enable-epetra");
  puts("--enable-teuchos");
  puts("--enable-aztecoo");
  puts("--enable-triutils");

#ifdef HAVE_MPI
  MPI_Finalize();
#endif
  
  exit(EXIT_SUCCESS);
}

#endif /* #if defined(HAVE_ML_EPETRA) && defined(HAVE_ML_TEUCHOS) && defined(HAVE_ML_TRIUTILS) && defined(HAVE_ML_AZTECOO) */

Generated on Thu Sep 18 12:40:57 2008 for ML by doxygen 1.3.9.1