graphedge_weights.cpp

This example shows how to use graph partitioning methods with edge weights.

00001 //@HEADER
00002 // ************************************************************************
00003 //
00004 //               Isorropia: Partitioning and Load Balancing Package
00005 //                 Copyright (2006) Sandia Corporation
00006 //
00007 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
00008 // license for use of this work by or on behalf of the U.S. Government.
00009 //
00010 // This library is free software; you can redistribute it and/or modify
00011 // it under the terms of the GNU Lesser General Public License as
00012 // published by the Free Software Foundation; either version 2.1 of the
00013 // License, or (at your option) any later version.
00014 //
00015 // This library is distributed in the hope that it will be useful, but
00016 // WITHOUT ANY WARRANTY; without even the implied warranty of
00017 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00018 // Lesser General Public License for more details.
00019 //
00020 // You should have received a copy of the GNU Lesser General Public
00021 // License along with this library; if not, write to the Free Software
00022 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
00023 // USA
00024 //
00025 // ************************************************************************
00026 //@HEADER
00027 
00028 //--------------------------------------------------------------------
00029 //This file is a self-contained example of creating an Epetra_RowMatrix
00030 //object, and using Isorropia to create a rebalanced copy of it.
00031 //Furthermore graph-edge weights are used to influence the repartitioning.
00032 //--------------------------------------------------------------------
00033 
00034 //Include Isorropia_Exception.hpp only because the helper functions at
00035 //the bottom of this file (which create the epetra objects) can
00036 //potentially throw exceptions.
00037 #include <Isorropia_Exception.hpp>
00038 
00039 //The Isorropia symbols being demonstrated are declared
00040 //in these headers:
00041 #include <Isorropia_Epetra.hpp>
00042 #include <Isorropia_EpetraCostDescriber.hpp>
00043 #include <Isorropia_EpetraRedistributor.hpp>
00044 #include <Isorropia_EpetraPartitioner.hpp>
00045 
00046 #ifdef HAVE_MPI
00047 #include <mpi.h>
00048 #endif
00049 
00050 #ifdef HAVE_EPETRA
00051 #ifdef HAVE_MPI
00052 #include <Epetra_MpiComm.h>
00053 #else
00054 #include <Epetra_SerialComm.h>
00055 #endif
00056 #include <Epetra_Map.h>
00057 #include <Epetra_Vector.h>
00058 #include <Epetra_CrsMatrix.h>
00059 #include <Epetra_FECrsMatrix.h>
00060 #endif
00061 
00062 #include "ispatest_lbeval_utils.hpp"
00063 
00064 int main(int argc, char** argv) {
00065 #if defined(HAVE_MPI) && defined(HAVE_EPETRA)
00066 
00067   int p, numProcs = 1;
00068   int localProc = 0;
00069 
00070   //first, set up our MPI environment...
00071   MPI_Init(&argc, &argv);
00072   MPI_Comm_rank(MPI_COMM_WORLD, &localProc);
00073   MPI_Comm_size(MPI_COMM_WORLD, &numProcs);
00074 
00075 // This program can only run on 3 processors, to make things
00076 // work out easy.
00077 //
00078   if (numProcs != 3) {
00079     std::cout << "num-procs="<<numProcs<<". This program can only "
00080       << "run on 3 procs. Exiting."<<std::endl;
00081     MPI_Finalize();
00082     return(0);
00083   }
00084 
00085 //Consider the following mesh of 4 2-D quad elements:
00086 //
00087 //  *-------*-------*
00088 // 8|      7|      6|
00089 //  |  E2   |  E3   |
00090 //  *-------*-------*
00091 // 3|      2|      5|
00092 //  |  E0   |  E1   |
00093 //  *-------*-------*
00094 // 0       1       4
00095 //
00096 // Node-ids are to the lower-left of each node (*).
00097 //
00098 // Mimicing a finite-element application, we will say that
00099 // each node has 1 scalar degree-of-freedom, and assemble
00100 // a matrix which would have 9 global rows and columns.
00101 //
00102 // Each processor will have 3 rows. We'll set up a strange
00103 // initial map, where nodes are distributed as follows:
00104 //
00105 // proc 0: nodes 0,3,8,
00106 // proc 1: nodes 1,2,7
00107 // proc 2: nodes 4,5,6.
00108 //
00109 // After we assemble our matrix, we'll create another matrix
00110 // and populate it with graph edge weights such that the
00111 // partitioner repartitions the problem so that nodes are
00112 // laid out as follows:
00113 //
00114 // proc 0: nodes 0, 1, 4
00115 // proc 1: nodes 3, 2, 5
00116 // proc 2: nodes 8, 7, 6
00117 //
00118 
00119   int nodesPerElem = 4;
00120   int global_n = 9;
00121 
00122   //First, set up the initial map:
00123 
00124   std::vector<int> mynodes(3);
00125   if (localProc == 0) {
00126     mynodes[0] = 0; mynodes[1] = 3; mynodes[2] = 8;
00127   }
00128   if (localProc == 1) {
00129     mynodes[0] = 1; mynodes[1] = 2; mynodes[2] = 7;
00130   }
00131   if (localProc == 2) {
00132     mynodes[0] = 4; mynodes[1] = 5; mynodes[2] = 6;
00133   }
00134 
00135   Epetra_MpiComm comm(MPI_COMM_WORLD);
00136   Epetra_Map origmap(global_n, 3, &mynodes[0], 0, comm);
00137 
00138   Teuchos::RCP<Epetra_FECrsMatrix> matrix =
00139     Teuchos::rcp(new Epetra_FECrsMatrix(Copy, origmap, 0));
00140 
00141   //We'll assemble elements E0 and E1 on proc 0,
00142   //               element E2 or proc 1,
00143   //               element E3 on proc 2.
00144 
00145   std::vector<int> indices(nodesPerElem);
00146   std::vector<double> coefs(nodesPerElem*nodesPerElem,2.0);
00147 
00148   if (localProc == 0) {
00149     //element E0:
00150     indices[0] = 0; indices[1] = 1; indices[2] = 2; indices[3] = 3;
00151     matrix->InsertGlobalValues(nodesPerElem, &indices[0], &coefs[0]);
00152 
00153     //element E1:
00154     indices[0] = 1; indices[1] = 4; indices[2] = 5; indices[3] = 2;
00155     matrix->InsertGlobalValues(nodesPerElem, &indices[0], &coefs[0]);
00156   }
00157   else if (localProc == 1) {
00158     //element E2:
00159     indices[0] = 3; indices[1] = 2; indices[2] = 7; indices[3] = 8;
00160     matrix->InsertGlobalValues(nodesPerElem, &indices[0], &coefs[0]);
00161   }
00162   else { //localProc==2
00163     //element E3:
00164     indices[0] = 2; indices[1] = 5; indices[2] = 6; indices[3] = 7;
00165     matrix->InsertGlobalValues(nodesPerElem, &indices[0], &coefs[0]);
00166   }
00167 
00168   int err = matrix->GlobalAssemble();
00169   if (err != 0) {
00170     std::cout << "err="<<err<<" returned from matrix->GlobalAssemble()"
00171       << std::endl;
00172   }
00173 
00174 //  std::cout << "matrix: " << std::endl;
00175 //  std::cout << *matrix << std::endl;
00176 
00177   //We'll need a Teuchos::ParameterList object to pass to the
00178   //Isorropia::Epetra::Partitioner class.
00179   Teuchos::ParameterList paramlist;
00180 
00181 #ifdef HAVE_ISORROPIA_ZOLTAN
00182   // If Zoltan is available, we'll specify that the Zoltan package be
00183   // used for the partitioning operation, by creating a parameter
00184   // sublist named "Zoltan".
00185   // In the sublist, we'll set parameters that we want sent to Zoltan.
00186 
00187   Teuchos::ParameterList& sublist = paramlist.sublist("Zoltan");
00188   sublist.set("LB_METHOD", "GRAPH");
00189   sublist.set("GRAPH_PACKAGE", "PHG");
00190 
00191   //sublist.set("DEBUG_LEVEL", "1"); // Zoltan will print out parameters
00192   //sublist.set("DEBUG_LEVEL", "5");   // proc 0 will trace Zoltan calls
00193   //sublist.set("DEBUG_MEMORY", "2");  // Zoltan will trace alloc & free
00194 
00195 #else
00196   // If Zoltan is not available, a simple linear partitioner will be
00197   // used to partition such that the number of nonzeros is equal (or
00198   // close to equal) on each processor. No parameter is necessary to
00199   // specify this.
00200 #endif
00201 
00202 
00203   Teuchos::RCP<Isorropia::Epetra::CostDescriber> costs =
00204     Teuchos::rcp(new Isorropia::Epetra::CostDescriber);
00205 
00206   //Next create a matrix which is a copy of the matrix we just
00207   //assembled, but we'll replace the values with graph edge weights.
00208 
00209   Teuchos::RCP<Epetra_FECrsMatrix> ge_weights =
00210     Teuchos::rcp(new Epetra_FECrsMatrix(*matrix));
00211 
00212   Teuchos::RCP<Epetra_CrsMatrix> crs_ge_weights;
00213   crs_ge_weights = ge_weights;
00214 
00215   //Fill the matrix with a "default" weight of 1.0.
00216   crs_ge_weights->PutScalar(1.0);
00217 
00218   //Now we'll put a "large" weight on edges that connect nodes
00219   //0 and 1, 1 and 4,
00220   //3 and 2, 2 and 5,
00221   //8 and 7, 7 and 6.
00222 
00223   double weight = 500.0;
00224 
00225   if (localProc == 0) {
00226     //row 0, edge 1
00227     indices[0] = 1;
00228     coefs[0] = weight;
00229     crs_ge_weights->ReplaceGlobalValues(0, 1, &coefs[0], &indices[0]);
00230 
00231     //row 3, edge 2
00232     indices[0] = 2;
00233     coefs[0] = weight;
00234     crs_ge_weights->ReplaceGlobalValues(3, 1, &coefs[0], &indices[0]);
00235 
00236     //row 8, edge 7
00237     indices[0] = 7;
00238     coefs[0] = weight;
00239     crs_ge_weights->ReplaceGlobalValues(8, 1, &coefs[0], &indices[0]);
00240   }
00241 
00242   if (localProc == 1) {
00243     //row 1, edges 0 and 4
00244     indices[0] = 0; indices[1] = 4;
00245     coefs[0] = weight; coefs[1] = weight;
00246     crs_ge_weights->ReplaceGlobalValues(1, 2, &coefs[0], &indices[0]);
00247 
00248     //row 2, edges 3 and 5
00249     indices[0] = 3; indices[1] = 5;
00250     coefs[0] = weight; coefs[1] = weight;
00251     crs_ge_weights->ReplaceGlobalValues(2, 2, &coefs[0], &indices[0]);
00252 
00253     //row 7, edges 6 and 8
00254     indices[0] = 6; indices[1] = 8;
00255     coefs[0] = weight;
00256     crs_ge_weights->ReplaceGlobalValues(7, 2, &coefs[0], &indices[0]);
00257   }
00258 
00259   if (localProc == 2) {
00260     //row 4, edge 1
00261     indices[0] = 1;
00262     coefs[0] = weight;
00263     crs_ge_weights->ReplaceGlobalValues(4, 1, &coefs[0], &indices[0]);
00264 
00265     //row 5, edge 2
00266     indices[0] = 2;
00267     coefs[0] = weight;
00268     crs_ge_weights->ReplaceGlobalValues(5, 1, &coefs[0], &indices[0]);
00269 
00270     //row 6, edge 7
00271     indices[0] = 7;
00272     coefs[0] = weight;
00273     crs_ge_weights->ReplaceGlobalValues(6, 1, &coefs[0], &indices[0]);
00274   }
00275 
00276 // std::cout << "crs_ge_weights: " << std::endl
00277 //       << *crs_ge_weights << std::endl;
00278 
00279   //Now give the graph edge weights to the CostDescriber:
00280   costs->setGraphEdgeWeights(crs_ge_weights);
00281 
00282   Teuchos::RCP<const Epetra_RowMatrix> rowmatrix;
00283   rowmatrix = matrix;
00284 
00285   //Now create the partitioner object using an Isorropia factory-like
00286   //function...
00287   Teuchos::RCP<Isorropia::Epetra::Partitioner> partitioner =
00288     Teuchos::rcp(new Isorropia::Epetra::Partitioner(rowmatrix, costs, paramlist));
00289 
00290   //Next create a Redistributor object and use it to create a
00291   //repartitioned copy of the matrix
00292 
00293   Isorropia::Epetra::Redistributor rd(partitioner);
00294 
00295   Teuchos::RCP<Epetra_CrsMatrix> bal_matrix;
00296 
00297   //Use a try-catch block because Isorropia will throw an exception
00298   //if it encounters an error.
00299 
00300   if (localProc == 0) {
00301     std::cout << " calling Isorropia::Epetra::Redistributor::redistribute..."
00302         << std::endl;
00303   }
00304 
00305   try {
00306     bal_matrix = rd.redistribute(*rowmatrix);
00307   }
00308   catch(std::exception& exc) {
00309     std::cout << "linsys example: Isorropia::Epetra::Redistributor threw "
00310          << "exception '" << exc.what() << "' on proc "
00311          << localProc << std::endl;
00312     MPI_Finalize();
00313     return(-1);
00314   }
00315   // Results
00316 
00317   double goalWeight = 1.0 / (double)numProcs;
00318   double bal0, bal1, cutn0, cutn1, cutl0, cutl1, cutWgt0, cutWgt1;
00319   int numCuts0, numCuts1;
00320 
00321   // Balance and cut quality before partitioning
00322 
00323   ispatest::compute_graph_metrics(*rowmatrix, *costs, goalWeight,
00324                      bal0, numCuts0, cutWgt0, cutn0, cutl0);
00325 
00326   // Balance and cut quality after partitioning
00327 
00328   Teuchos::RCP<Epetra_CrsMatrix> new_weights = rd.redistribute(*crs_ge_weights);
00329   Isorropia::Epetra::CostDescriber new_costs;
00330   new_costs.setGraphEdgeWeights(new_weights);
00331 
00332   ispatest::compute_graph_metrics(*bal_matrix, new_costs, goalWeight,
00333                      bal1, numCuts1, cutWgt1, cutn1, cutl1);
00334 
00335   if (localProc == 0){
00336     std::cout << "Before partitioning: Number of cuts " << numCuts0 << " Cut weight " << cutWgt0 << std::endl;
00337     std::cout << "                     Balance " << bal0 << " cutN " << cutn0 << " cutL " << cutl0;
00338     std::cout << std::endl;
00339 
00340     std::cout << "After partitioning:  Number of cuts " << numCuts1 << " Cut weight " << cutWgt1 << std::endl;
00341     std::cout << "                     Balance " << bal1 << " cutN " << cutn1 << " cutL " << cutl1;
00342     std::cout << std::endl;
00343   }
00344 
00345 
00346 
00347   MPI_Finalize();
00348 
00349 #else
00350   std::cout << "part_redist: must have both MPI and EPETRA. Make sure Trilinos "
00351     << "is configured with --enable-mpi and --enable-epetra." << std::endl;
00352 #endif
00353 
00354   return(0);
00355 }
00356