test/Directory/cxx_main.cpp

Go to the documentation of this file.
00001 //@HEADER
00002 // ************************************************************************
00003 // 
00004 //               Epetra: Linear Algebra Services Package 
00005 //                 Copyright (2001) Sandia Corporation
00006 // 
00007 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
00008 // license for use of this work by or on behalf of the U.S. Government.
00009 // 
00010 // This library is free software; you can redistribute it and/or modify
00011 // it under the terms of the GNU Lesser General Public License as
00012 // published by the Free Software Foundation; either version 2.1 of the
00013 // License, or (at your option) any later version.
00014 //  
00015 // This library is distributed in the hope that it will be useful, but
00016 // WITHOUT ANY WARRANTY; without even the implied warranty of
00017 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00018 // Lesser General Public License for more details.
00019 //  
00020 // You should have received a copy of the GNU Lesser General Public
00021 // License along with this library; if not, write to the Free Software
00022 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
00023 // USA
00024 // Questions? Contact Michael A. Heroux (maherou@sandia.gov) 
00025 // 
00026 // ************************************************************************
00027 //@HEADER
00028 
00029 // Epetra_BlockMap Test routine
00030 
00031 #include "Epetra_Time.h"
00032 #include "Epetra_BlockMap.h"
00033 #include "Epetra_Map.h"
00034 #ifdef EPETRA_MPI
00035 #include "Epetra_MpiComm.h"
00036 #include <mpi.h>
00037 #endif
00038 #include "Epetra_SerialComm.h"
00039 #include "Epetra_Util.h"
00040 #include "../epetra_test_err.h"
00041 #include "Epetra_Version.h"
00042 #include "Epetra_Directory.h"
00043 
00044 int directory_test_1(Epetra_Comm& Comm);
00045 int directory_test_2(Epetra_Comm& Comm);
00046 int directory_test_3(Epetra_Comm& Comm);
00047 int directory_test_4(Epetra_Comm& Comm);
00048 int directory_test_5(Epetra_Comm& Comm);
00049 
00050 int main(int argc, char *argv[]) {
00051   bool verbose = false;
00052   // Check if we should print results to standard out
00053   if (argc > 1) {
00054     if ((argv[1][0] == '-') && (argv[1][1] == 'v')) {
00055       verbose = true;
00056     }
00057   }
00058 
00059   int returnierr = 0;
00060 
00061 #ifdef EPETRA_MPI
00062 
00063   // Initialize MPI
00064   MPI_Init(&argc,&argv);
00065   Epetra_MpiComm Comm(MPI_COMM_WORLD);
00066 #else
00067   Epetra_SerialComm Comm;
00068 #endif
00069 
00070   if (!verbose) {
00071     Comm.SetTracebackMode(0); // This should shut down any error traceback reporting
00072   }
00073   int MyPID = Comm.MyPID();
00074 
00075   int verbose_int = verbose ? 1 : 0;
00076   Comm.Broadcast(&verbose_int, 1, 0);
00077   verbose = verbose_int==1 ? true : false;
00078 
00079   if (verbose && MyPID==0)
00080     cout << Epetra_Version() << endl << endl;
00081 
00082   EPETRA_TEST_ERR( directory_test_1(Comm), returnierr );
00083 
00084   EPETRA_TEST_ERR( directory_test_2(Comm), returnierr );
00085 
00086   EPETRA_TEST_ERR( directory_test_3(Comm), returnierr );
00087 
00088   EPETRA_TEST_ERR( directory_test_4(Comm), returnierr );
00089 
00090   EPETRA_TEST_ERR( directory_test_5(Comm), returnierr );
00091 
00092 #ifdef EPETRA_MPI
00093   MPI_Finalize();
00094 #endif
00095 
00096   if (MyPID == 0) {
00097     if (returnierr == 0) {
00098       cout << "Epetra_Directory tests passed."<<endl;
00099     }
00100     else {
00101       cout << "Epetra_Directory tests failed."<<endl;
00102     }
00103   }
00104 
00105   return returnierr;
00106 }
00107 
00108 int directory_test_1(Epetra_Comm& Comm)
00109 {
00110   //set up a map with arbitrary distribution of IDs, but with unique
00111   //processor ID ownership (i.e., each ID only appears on 1 processor)
00112 
00113   int myPID = Comm.MyPID();
00114   int numProcs = Comm.NumProc();
00115 
00116   if (numProcs < 2) return(0);
00117 
00118   int myFirstID = (myPID+1)*(myPID+1);
00119   int myNumIDs = 3+myPID;
00120 
00121   int* myIDs = new int[myNumIDs];
00122   int i;
00123   for(i=0; i<myNumIDs; ++i) {
00124     myIDs[i] = myFirstID+i;
00125   }
00126 
00127   Epetra_BlockMap blkmap(-1, myNumIDs, myIDs, 1, 0, Comm);
00128 
00129   Epetra_Directory* directory = Comm.CreateDirectory(blkmap);
00130 
00131   int proc = myPID+1;
00132   if (proc >= numProcs) proc = 0;
00133 
00134   int procNumIDs = 3+proc;
00135   int procFirstID = (proc+1)*(proc+1);
00136   int procLastID = procFirstID+procNumIDs - 1;
00137 
00138   int queryProc1 = -1;
00139   int queryProc2 = -1;
00140 
00141   int err = directory->GetDirectoryEntries(blkmap, 1, &procFirstID,
00142              &queryProc1, NULL, NULL);
00143   err += directory->GetDirectoryEntries(blkmap, 1, &procLastID,
00144           &queryProc2, NULL, NULL);
00145   delete directory;
00146   delete [] myIDs;
00147 
00148   if (queryProc1 != proc || queryProc2 != proc) {
00149     return(-1);
00150   }
00151 
00152   return(0);
00153 }
00154 
00155 int directory_test_2(Epetra_Comm& Comm)
00156 {
00157   //set up a Epetra_BlockMap with arbitrary distribution of IDs, but with unique
00158   //processor ID ownership (i.e., each ID only appears on 1 processor)
00159   //
00160   //the thing that makes this Epetra_BlockMap nasty is that higher-numbered
00161   //processors own lower IDs.
00162 
00163   int myPID = Comm.MyPID();
00164   int numProcs = Comm.NumProc();
00165 
00166   if (numProcs < 2) return(0);
00167 
00168   int myFirstID = (numProcs-myPID)*(numProcs-myPID);
00169   int myNumIDs = 3;
00170 
00171   int* myIDs = new int[myNumIDs];
00172   int i;
00173   for(i=0; i<myNumIDs; ++i) {
00174     myIDs[i] = myFirstID+i;
00175   }
00176 
00177   Epetra_BlockMap blkmap(-1, myNumIDs, myIDs, 1, 0, Comm);
00178 
00179   Epetra_Directory* directory = Comm.CreateDirectory(blkmap);
00180 
00181   int proc = myPID+1;
00182   if (proc >= numProcs) proc = 0;
00183 
00184   int procNumIDs = 3;
00185   int procFirstID = (numProcs-proc)*(numProcs-proc);
00186   int procLastID = procFirstID+procNumIDs - 1;
00187 
00188   int queryProc1 = -1;
00189   int queryProc2 = -1;
00190 
00191   int err = directory->GetDirectoryEntries(blkmap, 1, &procFirstID,
00192              &queryProc1, NULL, NULL);
00193   err += directory->GetDirectoryEntries(blkmap, 1, &procLastID,
00194           &queryProc2, NULL, NULL);
00195   delete directory;
00196   delete [] myIDs;
00197 
00198   if (queryProc1 != proc || queryProc2 != proc) {
00199     return(-1);
00200   }
00201 
00202   return(0);
00203 }
00204 
00205 int directory_test_3(Epetra_Comm& Comm)
00206 {
00207   //set up a map with arbitrary distribution of IDs, including non-unique
00208   //processor ID ownership (i.e., some IDs appear on more than 1 processor)
00209 
00210   int myPID = Comm.MyPID();
00211   int numProcs = Comm.NumProc();
00212 
00213   if (numProcs < 2) return(0);
00214 
00215   int myFirstID = (myPID+1)*(myPID+1);
00216   int myNumIDs = 4;
00217 
00218   int* myIDs = new int[myNumIDs];
00219   int i;
00220   for(i=0; i<myNumIDs-1; ++i) {
00221     myIDs[i] = myFirstID+i;
00222   }
00223 
00224   int nextProc = myPID+1;
00225   if (nextProc >= numProcs) nextProc = 0;
00226 
00227   int nextProcFirstID = (nextProc+1)*(nextProc+1);
00228   myIDs[myNumIDs-1] = nextProcFirstID;
00229 
00230   Epetra_BlockMap blkmap(-1, myNumIDs, myIDs, 1, 0, Comm);
00231 
00232   Epetra_Directory* directory = Comm.CreateDirectory(blkmap);
00233 
00234   bool uniqueGIDs = directory->GIDsAllUniquelyOwned();
00235 
00236   delete directory;
00237   delete [] myIDs;
00238 
00239   if (uniqueGIDs) {
00240     return(-1);
00241   }
00242 
00243   return(0);
00244 }
00245 
00246 int directory_test_4(Epetra_Comm& Comm)
00247 {
00248   int myPID = Comm.MyPID();
00249   int numProcs = Comm.NumProc();
00250 
00251   if (numProcs < 2) return(0);
00252 
00253   //Set up a map with overlapping ranges of GIDs.
00254   int num = 5;
00255   int numMyGIDs = 2*num;
00256   int myFirstGID = myPID*num;
00257 
00258   int* myGIDs = new int[numMyGIDs];
00259 
00260   for(int i=0; i<numMyGIDs; ++i) {
00261     myGIDs[i] = myFirstGID+i;
00262   }
00263 
00264   Epetra_Map overlappingmap(-1, numMyGIDs, myGIDs, 0, Comm);
00265 
00266   delete [] myGIDs;
00267 
00268   int numGlobal0 = overlappingmap.NumGlobalElements();
00269 
00270   Epetra_Map uniquemap1 =
00271     Epetra_Util::Create_OneToOne_Map(overlappingmap);
00272 
00273   bool use_high_sharing_proc = true;
00274 
00275   Epetra_Map uniquemap2 =
00276     Epetra_Util::Create_OneToOne_Map(overlappingmap, use_high_sharing_proc);
00277 
00278   int numGlobal1 = uniquemap1.NumGlobalElements();
00279   int numGlobal2 = uniquemap2.NumGlobalElements();
00280 
00281   //The two one-to-one maps should have the same number of global elems.
00282   if (numGlobal1 != numGlobal2) {
00283     return(-1);
00284   }
00285 
00286   //The number of global elems should be greater in the original map
00287   //than in the one-to-one map.
00288   if (numGlobal0 <= numGlobal1) {
00289     return(-2);
00290   }
00291 
00292   int numLocal1 = uniquemap1.NumMyElements();
00293   int numLocal2 = uniquemap2.NumMyElements();
00294 
00295   //If this is proc 0 or proc numProcs-1, then the number of
00296   //local elements should be different in the two one-to-one maps.
00297   if ((myPID==0 || myPID==numProcs-1) && numLocal1 == numLocal2) {
00298     return(-3);
00299   }
00300 
00301   return(0);
00302 }
00303 
00304 int directory_test_5(Epetra_Comm& Comm)
00305 {
00306   int myPID = Comm.MyPID();
00307   int numProcs = Comm.NumProc();
00308 
00309   if (numProcs < 2) return(0);
00310 
00311   //Set up a map with overlapping ranges of GIDs.
00312   int num = 5;
00313   int numMyGIDs = 2*num;
00314   int myFirstGID = myPID*num;
00315 
00316   int* myGIDs = new int[numMyGIDs];
00317   int* sizes = new int[numMyGIDs];
00318 
00319   for(int i=0; i<numMyGIDs; ++i) {
00320     myGIDs[i] = myFirstGID+i;
00321     sizes[i] = myFirstGID+i+1;
00322   }
00323 
00324   Epetra_BlockMap overlappingmap(-1, numMyGIDs, myGIDs, sizes, 0, Comm);
00325 
00326   delete [] myGIDs;
00327   delete [] sizes;
00328 
00329   int numGlobal0 = overlappingmap.NumGlobalElements();
00330 
00331   Epetra_BlockMap uniquemap1 =
00332     Epetra_Util::Create_OneToOne_BlockMap(overlappingmap);
00333 
00334   bool use_high_sharing_proc = true;
00335 
00336   Epetra_BlockMap uniquemap2 =
00337     Epetra_Util::Create_OneToOne_BlockMap(overlappingmap, use_high_sharing_proc);
00338 
00339   int numGlobal1 = uniquemap1.NumGlobalElements();
00340   int numGlobal2 = uniquemap2.NumGlobalElements();
00341 
00342   //The two one-to-one maps should have the same number of global elems.
00343   if (numGlobal1 != numGlobal2) {
00344     return(-1);
00345   }
00346 
00347   //The number of global elems should be greater in the original map
00348   //than in the one-to-one map.
00349   if (numGlobal0 <= numGlobal1) {
00350     return(-2);
00351   }
00352 
00353   int numLocal1 = uniquemap1.NumMyElements();
00354   int numLocal2 = uniquemap2.NumMyElements();
00355 
00356   //If this is proc 0 or proc numProcs-1, then the number of
00357   //local elements should be different in the two one-to-one maps.
00358   if ((myPID==0 || myPID==numProcs-1) && numLocal1 == numLocal2) {
00359     return(-3);
00360   }
00361 
00362   return(0);
00363 }

Generated on Thu Sep 18 12:37:56 2008 for Epetra Package Browser (Single Doxygen Collection) by doxygen 1.3.9.1