Amesos Package Browser (Single Doxygen Collection) Development
example_AmesosFactory_HB.cpp
Go to the documentation of this file.
00001 
00002 // @HEADER
00003 // ***********************************************************************
00004 // 
00005 //            Amesos: An Interface to Direct Solvers
00006 //                 Copyright (2004) Sandia Corporation
00007 // 
00008 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
00009 // license for use of this work by or on behalf of the U.S. Government.
00010 // 
00011 // This library is free software; you can redistribute it and/or modify
00012 // it under the terms of the GNU Lesser General Public License as
00013 // published by the Free Software Foundation; either version 2.1 of the
00014 // License, or (at your option) any later version.
00015 //  
00016 // This library is distributed in the hope that it will be useful, but
00017 // WITHOUT ANY WARRANTY; without even the implied warranty of
00018 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00019 // Lesser General Public License for more details.
00020 //  
00021 // You should have received a copy of the GNU Lesser General Public
00022 // License along with this library; if not, write to the Free Software
00023 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
00024 // USA
00025 // Questions? Contact Michael A. Heroux (maherou@sandia.gov) 
00026 // 
00027 // ***********************************************************************
00028 // @HEADER
00029 
00030 #include "Amesos_ConfigDefs.h"
00031 
00032 // This example needs Galeri to generate the linear system.
00033 // You must have configured Trilinos with --enable-galeri
00034 // in order to compile this example
00035 
00036 #ifdef HAVE_MPI
00037 #include "mpi.h"
00038 #include "Epetra_MpiComm.h"
00039 #else
00040 #include "Epetra_SerialComm.h"
00041 #endif
00042 #include "Amesos.h"
00043 #include "Epetra_CrsMatrix.h"
00044 #include "Epetra_Import.h"
00045 #include "Epetra_Export.h"
00046 #include "Epetra_Map.h"
00047 #include "Epetra_MultiVector.h"
00048 #include "Epetra_Vector.h"
00049 #include "Epetra_LinearProblem.h"
00050 #include "Galeri_ReadHB.h"
00051 
00052 // ==================== //
00053 // M A I N  D R I V E R //
00054 // ==================== //
00055 //
00056 // This example will:
00057 // 1.- Read an H/B matrix from file;
00058 // 2.- redistribute the linear system matrix to the
00059 //     available processes
00060 // 3.- set up LHS/RHS if not present
00061 // 4.- create an Amesos_BaseSolver object
00062 // 5.- solve the linear problem.
00063 //
00064 // This example can be run with any number of processors.
00065 //
00066 // Author: Marzio Sala, ETHZ/COLAB
00067 // Last modified: Oct-05
00068 
00069 int main(int argc, char *argv[]) 
00070 {
00071 #ifdef HAVE_MPI
00072   MPI_Init(&argc, &argv);
00073   Epetra_MpiComm Comm(MPI_COMM_WORLD);
00074 #else
00075   Epetra_SerialComm Comm;
00076 #endif
00077 
00078   std::string matrix_file;
00079   std::string solver_type;
00080 
00081   if (argc > 1)
00082     matrix_file = argv[1]; // read it from command line
00083   else
00084     matrix_file = "662_bus.rsa"; // file containing the HB matrix.
00085 
00086   if (argc > 2)
00087     solver_type = argv[2]; // read it form command line
00088   else
00089     solver_type = "Klu"; // default
00090 
00091   if (Comm.MyPID() == 0)
00092     std::cout << "Reading matrix `" << matrix_file << "'";
00093   
00094   // ================= //
00095   // reading HB matrix //
00096   // ================= //
00097   
00098   // HB files are for serial matrices. Hence, only
00099   // process 0 reads this matrix (and if present
00100   // solution and RHS). Then, this matrix will be redistributed
00101   // using epetra capabilities.
00102   // All variables that begin with "read" refer to the
00103   // HB matrix read by process 0.
00104   Epetra_Map* readMap;
00105   Epetra_CrsMatrix* readA; 
00106   Epetra_Vector* readx; 
00107   Epetra_Vector* readb;
00108   Epetra_Vector* readxexact;
00109   
00110   try 
00111   {
00112     Galeri::ReadHB(matrix_file.c_str(), Comm, readMap,
00113                    readA, readx, readb, readxexact);
00114   }
00115   catch(...)
00116   {
00117     std::cout << "Caught exception, maybe file name is incorrect" << std::endl;
00118 #ifdef HAVE_MPI
00119     MPI_Finalize();
00120 #else
00121     // not to break test harness
00122     exit(EXIT_SUCCESS);
00123 #endif
00124   }
00125 
00126   // Create uniform distributed map.
00127   // Note that linear map are used for simplicity only!
00128   // Amesos (through Epetra) can support *any* map.
00129   Epetra_Map* mapPtr = 0;
00130   
00131   if(readMap->GlobalIndicesInt())
00132     mapPtr = new Epetra_Map((int) readMap->NumGlobalElements(), 0, Comm);
00133   else if(readMap->GlobalIndicesLongLong())
00134     mapPtr = new Epetra_Map(readMap->NumGlobalElements(), 0, Comm);
00135   else
00136     assert(false);
00137   
00138   Epetra_Map& map = *mapPtr;
00139   
00140   // Create the distributed matrix, based on Map.
00141   Epetra_CrsMatrix A(Copy, map, 0);
00142 
00143   const Epetra_Map &OriginalMap = readA->RowMatrixRowMap() ; 
00144   assert (OriginalMap.SameAs(*readMap)); 
00145   Epetra_Export exporter(OriginalMap, map);
00146 
00147   Epetra_Vector x(map);          // distributed solution
00148   Epetra_Vector b(map);          // distributed rhs
00149   Epetra_Vector xexact(map);     // distributed exact solution
00150 
00151   // Exports from data defined on processor 0 to distributed.
00152   x.Export(*readx, exporter, Add);
00153   b.Export(*readb, exporter, Add);
00154   xexact.Export(*readxexact, exporter, Add);
00155   A.Export(*readA, exporter, Add);
00156   A.FillComplete();
00157     
00158   // deletes memory
00159   delete readMap;
00160   delete readA; 
00161   delete readx; 
00162   delete readb;
00163   delete readxexact;
00164   delete mapPtr;
00165 
00166   // Creates an epetra linear problem, contaning matrix
00167   // A, solution x and rhs b.
00168   Epetra_LinearProblem Problem(&A,&x,&b);
00169   
00170   // ======================================================= //
00171   // B E G I N N I N G   O F   T H E   A M E S O S   P A R T //
00172   // ======================================================= //
00173 
00174   if (!Comm.MyPID()) 
00175     std::cout << "Calling Amesos..." << std::endl;
00176 
00177   // For comments on the commands in this section, please
00178   // see file example_AmesosFactory.cpp.
00179   
00180   Amesos_BaseSolver* Solver = 0;
00181   Amesos Factory;
00182   
00183   Solver = Factory.Create(solver_type,Problem);
00184 
00185   // Factory.Create() returns 0 if the requested solver
00186   // is not available
00187   if (Solver == 0) {
00188     std::cerr << "Selected solver (" << solver_type << ") is not available" << std::endl;
00189     // return ok not to break test harness even if
00190     // the solver is not available
00191 #ifdef HAVE_MPI
00192     MPI_Finalize();
00193 #endif
00194     return(EXIT_SUCCESS);
00195   }
00196 
00197   // Calling solve to compute the solution. This calls the symbolic
00198   // factorization and the numeric factorization.
00199   Solver->Solve();
00200 
00201   // Print out solver timings and get timings in parameter list.
00202   Solver->PrintStatus();
00203   Solver->PrintTiming();
00204 
00205   Teuchos::ParameterList TimingsList;
00206   Solver->GetTiming( TimingsList );
00207 
00208   // You can find out how much time was spent in ...
00209   double sfact_time, nfact_time, solve_time;
00210   double mtx_conv_time, mtx_redist_time, vec_redist_time;
00211 
00212   // 1) The symbolic factorization
00213   //    (parameter doesn't always exist)
00214   sfact_time = TimingsList.get( "Total symbolic factorization time", 0.0 );
00215 
00216   // 2) The numeric factorization
00217   //    (always exists if NumericFactorization() is called)
00218   nfact_time = Teuchos::getParameter<double>( TimingsList, "Total numeric factorization time" );
00219 
00220   // 3) Solving the linear system
00221   //    (always exists if Solve() is called)
00222   solve_time = Teuchos::getParameter<double>( TimingsList, "Total solve time" );
00223 
00224   // 4) Converting the matrix to the accepted format for the solver
00225   //    (always exists if SymbolicFactorization() is called)
00226   mtx_conv_time = Teuchos::getParameter<double>( TimingsList, "Total solve time" );
00227 
00228   // 5) Redistributing the matrix for each solve to the accepted format for the solver
00229   mtx_redist_time = TimingsList.get( "Total matrix redistribution time", 0.0 );
00230 
00231   // 6) Redistributing the vector for each solve to the accepted format for the solver
00232   vec_redist_time = TimingsList.get( "Total vector redistribution time", 0.0 );
00233 
00234   // =========================================== //
00235   // E N D   O F   T H E   A M E S O S   P A R T //
00236   // =========================================== //
00237 
00238   // Computes ||Ax - b|| //
00239 
00240   double residual;
00241 
00242   Epetra_Vector Ax(b.Map());
00243   A.Multiply(false, x, Ax);
00244   Ax.Update(1.0, b, -1.0);
00245   Ax.Norm2(&residual);
00246 
00247   if (!Comm.MyPID()) 
00248     std::cout << "After Amesos solution, ||b - A * x||_2 = " << residual << std::endl;
00249 
00250   // delete Solver. Do this before calling MPI_Finalize() because
00251   // MPI calls can occur.
00252   delete Solver;
00253 
00254   if (residual > 1e-5)
00255     return(EXIT_FAILURE);
00256 
00257 #ifdef HAVE_MPI
00258   MPI_Finalize();
00259 #endif
00260 
00261   return(EXIT_SUCCESS);
00262 
00263 } // end of main()
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines