FEI Version of the Day
fei_Vector_core.cpp
00001 /*
00002 // @HEADER
00003 // ************************************************************************
00004 //             FEI: Finite Element Interface to Linear Solvers
00005 //                  Copyright (2005) Sandia Corporation.
00006 //
00007 // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, the
00008 // U.S. Government retains certain rights in this software.
00009 //
00010 // Redistribution and use in source and binary forms, with or without
00011 // modification, are permitted provided that the following conditions are
00012 // met:
00013 //
00014 // 1. Redistributions of source code must retain the above copyright
00015 // notice, this list of conditions and the following disclaimer.
00016 //
00017 // 2. Redistributions in binary form must reproduce the above copyright
00018 // notice, this list of conditions and the following disclaimer in the
00019 // documentation and/or other materials provided with the distribution.
00020 //
00021 // 3. Neither the name of the Corporation nor the names of the
00022 // contributors may be used to endorse or promote products derived from
00023 // this software without specific prior written permission.
00024 //
00025 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
00026 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
00027 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
00028 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
00029 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
00030 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
00031 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00032 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00033 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00034 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00035 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00036 //
00037 // Questions? Contact Alan Williams (william@sandia.gov) 
00038 //
00039 // ************************************************************************
00040 // @HEADER
00041 */
00042 
00043 
00044 #include "fei_fstream.hpp"
00045 
00046 #include "fei_Vector_core.hpp"
00047 #include "fei_VectorSpace.hpp"
00048 #include "fei_CSVec.hpp"
00049 #include "snl_fei_RecordCollection.hpp"
00050 #include "fei_TemplateUtils.hpp"
00051 #include "fei_impl_utils.hpp"
00052 
00053 #include <fstream>
00054 #include <sstream>
00055 #undef fei_file
00056 #define fei_file "fei_Vector_core.cpp"
00057 
00058 #include "fei_ErrMacros.hpp"
00059 
00060 fei::Vector_core::Vector_core(fei::SharedPtr<fei::VectorSpace> vecSpace,
00061                               int numLocalEqns)
00062   : eqnComm_(),
00063     vecSpace_(vecSpace),
00064     comm_(vecSpace->getCommunicator()),
00065     firstLocalOffset_(0),
00066     lastLocalOffset_(0),
00067     numLocal_(0),
00068     work_indices_(),
00069     work_indices2_(),
00070     haveFEVector_(false),
00071     remotelyOwned_(),
00072     sendProcs_(),
00073     recvProcs_(),
00074     recv_sizes_(),
00075     sendRecvProcsNeedUpdated_(true),
00076     overlapAlreadySet_(false),
00077     dbgprefix_("Vcore: ")
00078 {
00079   eqnComm_.reset(new fei::EqnComm(comm_,numLocalEqns));
00080   remotelyOwned_.resize(fei::numProcs(comm_));
00081   for(unsigned i=0; i<remotelyOwned_.size(); ++i) {
00082     remotelyOwned_[i] = new CSVec;
00083   }
00084 
00085   const std::vector<int>& offsets = eqnComm_->getGlobalOffsets();
00086   firstLocalOffset_ = offsets[fei::localProc(comm_)];
00087   lastLocalOffset_ = offsets[fei::localProc(comm_)+1] - 1;
00088   numLocal_ = lastLocalOffset_ - firstLocalOffset_ + 1;
00089 
00090   if (output_level_ >= fei::BRIEF_LOGS && output_stream_ != NULL) {
00091     FEI_OSTREAM& os = *output_stream_;
00092     os<<dbgprefix_<<" ctor firstLocal="<<firstLocalOffset_<<", lastLocal="
00093      <<lastLocalOffset_<<FEI_ENDL;
00094   }
00095 }
00096 
00097 fei::Vector_core::~Vector_core()
00098 {
00099   for(unsigned i=0; i<remotelyOwned_.size(); ++i) {
00100     delete remotelyOwned_[i];
00101   }
00102 }
00103 
00104 void fei::Vector_core::setOverlap(int numRemoteEqns,
00105                                   const int* remoteEqns)
00106 {
00107   if (numRemoteEqns == 0 && remoteEqns == NULL) {
00108     if (overlapAlreadySet_) return;
00109   }
00110 
00111   if (output_level_ >= fei::BRIEF_LOGS && output_stream_ != NULL) {
00112     FEI_OSTREAM& os = *output_stream_;
00113     os << dbgprefix_<<"setOverlap"<<FEI_ENDL;
00114   }
00115 
00116   int local_proc = fei::localProc(comm_);
00117 
00118   if (numRemoteEqns != 0 && remoteEqns != NULL) {
00119     for(int i=0; i<numRemoteEqns; ++i) {
00120       int proc = eqnComm_->getOwnerProc(remoteEqns[i]);
00121       if (proc == local_proc) continue;
00122 
00123       fei::add_entry(*remotelyOwned_[proc], remoteEqns[i], 0.0);
00124     }
00125   }
00126   else {
00127     std::vector<int> eqns;
00128     vecSpace_->getIndices_SharedAndOwned(eqns);
00129 
00130     for(size_t i=0; i<eqns.size(); ++i) {
00131       int proc = eqnComm_->getOwnerProc(eqns[i]);
00132       if (proc == local_proc) continue;
00133 
00134       fei::add_entry(*remotelyOwned_[proc], eqns[i], 0.0);
00135     }
00136   }
00137 
00138   overlapAlreadySet_ = true;
00139 }
00140 
00141 int fei::Vector_core::scatterToOverlap()
00142 {
00143   if (fei::numProcs(comm_) == 1 || haveFEVector()) {
00144     return(0);
00145   }
00146 
00147 #ifndef FEI_SER
00148   if (!overlapAlreadySet_) {
00149     setOverlap();
00150   }
00151 
00152   //...and now the overlap is whatever is in our remotelyOwned_ vectors.
00153 
00154 //  if (sendRecvProcsNeedUpdated_) {
00155     //first find out which procs we'll be receiving from.
00156     std::vector<int> recvProcs;
00157     for(unsigned i=0; i<remotelyOwned_.size(); ++i) {
00158       if ((int)i == fei::localProc(comm_)) continue;
00159       if (remotelyOwned_[i]->size() == 0) continue;
00160   
00161       recvProcs.push_back((int)i);
00162     }
00163   
00164     //find out the send-procs.
00165     std::vector<int> sendProcs;
00166     fei::mirrorProcs(comm_, recvProcs, sendProcs);
00167     sendRecvProcsNeedUpdated_ = false;
00168 //  }
00169 
00170   //declare arrays to send from, and corresponding sizes
00171   std::vector<std::vector<int> > send_ints(sendProcs.size());
00172   std::vector<std::vector<double> > send_doubles(sendProcs.size());
00173   std::vector<int> send_sizes(sendProcs.size());
00174 
00175   std::vector<MPI_Request> mpiReqs(sendProcs.size()+recvProcs.size());
00176   std::vector<MPI_Status> mpiStatuses(sendProcs.size()+recvProcs.size());
00177   int tag1 = 11111;
00178   int tag2 = 11112;
00179 
00180   //first, the procs we're going to send to, have to let us know
00181   //how much data we're supposed to send. So we have to receive
00182   //sizes and then indices from the "send"-procs.
00183   for(unsigned i=0; i<sendProcs.size(); ++i) {
00184     MPI_Irecv(&send_sizes[i], 1, MPI_INT, sendProcs[i],
00185               tag1, comm_, &mpiReqs[i]);
00186   }
00187 
00188   //now we'll send the sizes of our remotely-owned data to the
00189   //procs that we will be receiving the data from, and also the
00190   //indices that we want to receive.
00191   for(unsigned i=0; i<recvProcs.size(); ++i) {
00192     int proc = recvProcs[i];
00193 
00194     int size = remotelyOwned_[proc]->size();
00195     MPI_Send(&size, 1, MPI_INT, proc, tag1, comm_);
00196   }
00197  
00198   MPI_Waitall(sendProcs.size(), &mpiReqs[0], &mpiStatuses[0]);
00199 
00200   //now resize our send_ints and send_doubles arrays, and post the recvs
00201   //for indices that we're supposed to pack.
00202   for(unsigned i=0; i<sendProcs.size(); ++i) {
00203     int proc = sendProcs[i];
00204     int size = send_sizes[i];
00205     send_ints[i].resize(size);
00206     MPI_Irecv(&(send_ints[i][0]), size, MPI_INT, proc, tag1,
00207               comm_, &mpiReqs[i]);
00208     send_doubles[i].resize(size);
00209   }
00210 
00211   //now send the indices that we want to receive data for.
00212   for(unsigned i=0; i<recvProcs.size(); ++i) {
00213     int proc = recvProcs[i];
00214     int size = remotelyOwned_[proc]->size();
00215     int* indices = &(remotelyOwned_[proc]->indices())[0];
00216     MPI_Send(indices, size, MPI_INT, proc, tag1, comm_);
00217   }
00218 
00219   MPI_Waitall(sendProcs.size(), &mpiReqs[0], &mpiStatuses[0]);
00220 
00221   //now post our recvs.
00222   for(unsigned i=0; i<recvProcs.size(); ++i) {
00223     int proc = recvProcs[i];
00224     int size = remotelyOwned_[proc]->size();
00225     double* coefs = &(remotelyOwned_[proc]->coefs())[0];
00226     MPI_Irecv(coefs, size, MPI_DOUBLE, proc, tag2, comm_, &mpiReqs[i]);
00227   }
00228 
00229   //now pack and send the coefs that the other procs need from us.
00230   for(unsigned i=0; i<sendProcs.size(); ++i) {
00231     int proc = sendProcs[i];
00232 
00233     int num = send_sizes[i];
00234     int err = copyOutOfUnderlyingVector(num, &(send_ints[i][0]),
00235                                         &(send_doubles[i][0]), 0);
00236     if (err != 0) {
00237       FEI_COUT << "fei::Vector_core::scatterToOverlap ERROR getting data to send."<<FEI_ENDL;
00238       return(err);
00239     }
00240 
00241     MPI_Send(&(send_doubles[i][0]), num, MPI_DOUBLE, proc,
00242              tag2, comm_);
00243   }
00244 
00245   MPI_Waitall(recvProcs.size(), &mpiReqs[0], &mpiStatuses[0]);
00246 
00247 #endif  //#ifndef FEI_SER
00248 
00249   return(0);
00250 }
00251 
00252 int fei::Vector_core::copyOut(int numValues,
00253           const int* indices,
00254           double* values,
00255           int vectorIndex) const
00256 {
00257   const std::vector<CSVec*>& remote = remotelyOwned();
00258 
00259   for(int i=0; i<numValues; ++i) {
00260     int ind = indices[i];
00261 
00262     int local = ind - firstLocalOffset_;
00263     if (local < 0 || local >= numLocal_) {
00264       if (ind < 0) {
00265   continue;
00266       }
00267 
00268       int proc = eqnComm_->getOwnerProc(ind);
00269 
00270       int insertPoint = -1;
00271       int idx = fei::binarySearch(ind, remote[proc]->indices(), insertPoint);
00272       if (idx < 0) {
00273   fei::console_out() << "fei::Vector_core::copyOut: proc " << fei::localProc(comm_)
00274        << ", index " << ind << " not in remotelyOwned_ vec object for proc "
00275             <<proc<<FEI_ENDL;
00276   ERReturn(-1);
00277       }
00278       else {
00279   values[i] = remote[proc]->coefs()[idx];
00280       }
00281     }
00282     else {
00283       CHK_ERR( copyOutOfUnderlyingVector(1, &ind, &(values[i]), vectorIndex) );
00284     }
00285   }
00286 
00287   return(0);
00288 }
00289 
00290 int fei::Vector_core::giveToVector(int numValues,
00291                const int* indices,
00292                const double* values,
00293                bool sumInto,
00294                int vectorIndex)
00295 {
00296   std::vector<CSVec*>& remote = remotelyOwned();
00297 
00298   for(int i=0; i<numValues; ++i) {
00299     int ind = indices[i];
00300     double val = values[i];
00301 
00302     if (ind < 0) {
00303 //      throw std::runtime_error("negative index not allowed");
00304       //preservation of existing behavior: certain Sierra scenarios
00305       //involve passing negative indices for positions that should be
00306       //ignored... so we'll continue rather than throwing.
00307       continue;
00308     }
00309     int local = ind - firstLocalOffset_;
00310     if (local < 0 || local >= numLocal_) {
00311       int proc = eqnComm_->getOwnerProc(ind);
00312       if (output_level_ >= fei::BRIEF_LOGS && output_stream_ != NULL) {
00313         FEI_OSTREAM& os = *output_stream_;
00314         os << dbgprefix_<<"giveToVector remote["<<proc<<"]("
00315          <<ind<<","<<val<<")"<<FEI_ENDL;
00316       }
00317 
00318       if (sumInto) {
00319   fei::add_entry( *remote[proc], ind, val);
00320       }
00321       else {
00322   fei::put_entry( *remote[proc], ind, val);
00323       }
00324     }
00325     else {
00326       int err = giveToUnderlyingVector(1, &ind, &val, sumInto, vectorIndex);
00327       if (err != 0) {
00328   fei::console_out() << "giveToVector sumIn ERROR, ind: " << ind
00329        << ", val: " << val << FEI_ENDL;
00330   ERReturn(-1);
00331       }
00332     }
00333   }
00334 
00335   return(0);
00336 }
00337 
00338 int fei::Vector_core::assembleFieldData(int fieldID,
00339               int idType,
00340               int numIDs,
00341               const int* IDs,
00342               const double* data,
00343               bool sumInto,
00344               int vectorIndex)
00345 {
00346   if (vecSpace_.get() == NULL) ERReturn(-1);
00347 
00348   int fieldSize = vecSpace_->getFieldSize(fieldID);
00349 
00350   work_indices_.resize(numIDs*fieldSize);
00351   int* indicesPtr = &work_indices_[0];
00352 
00353   CHK_ERR( vecSpace_->getGlobalIndices(numIDs, IDs, idType, fieldID,
00354           indicesPtr) );
00355 
00356   CHK_ERR( giveToVector(numIDs*fieldSize, indicesPtr, data, sumInto, vectorIndex) );
00357 
00358   return(0);
00359 }
00360 
00361 void pack_send_buffers(const std::vector<int>& sendProcs,
00362                        const std::vector<fei::CSVec*>& remotelyOwned,
00363                        std::vector<std::vector<char> >& send_chars)
00364 {
00365   for(size_t i=0; i<sendProcs.size(); ++i) {
00366     int proc = sendProcs[i];
00367     fei::impl_utils::pack_indices_coefs(remotelyOwned[proc]->indices(),
00368                        remotelyOwned[proc]->coefs(), send_chars[i]);
00369 
00370     fei::set_values(*remotelyOwned[proc], 0.0);
00371   }
00372 }
00373 
00374 int fei::Vector_core::gatherFromOverlap(bool accumulate)
00375 {
00376   if (fei::numProcs(comm_) == 1 || haveFEVector()) {
00377     return(0);
00378   }
00379 
00380 //int localproc = fei::localProc(comm_);
00381 //std::ostringstream osstr;
00382 //osstr << "proc."<<localproc;
00383 //std::string str=osstr.str();
00384 //static std::ofstream ofs(str.c_str());
00385 
00386 #ifndef FEI_SER
00387   //declare arrays to hold the buffers we'll be receiving.
00388   std::vector<std::vector<char> > recv_chars;
00389   std::vector<std::vector<char> > send_chars(sendProcs_.size());
00390 
00391   std::vector<MPI_Request> mpiReqs;
00392   std::vector<MPI_Request> mpiReqs2;
00393   std::vector<MPI_Status> mpiStatuses;
00394   int tag1 = 11111;
00395 
00396   if (sendRecvProcsNeedUpdated_) {
00397     //first create the list of procs we'll be sending to.
00398     for(unsigned i=0; i<remotelyOwned_.size(); ++i) {
00399       if ((int)i == fei::localProc(comm_)) continue;
00400       if (remotelyOwned_[i]->size() == 0) continue;
00401   
00402       sendProcs_.push_back(i);
00403     }
00404   
00405     std::vector<int> tmpSendProcs;
00406     vecSpace_->getSendProcs(tmpSendProcs);
00407     for(size_t i=0; i<tmpSendProcs.size(); ++i) {
00408       bool found = false;
00409       for(size_t j=0; j<sendProcs_.size(); ++j) {
00410         if (sendProcs_[j] == tmpSendProcs[i]) {
00411           found = true;
00412           break;
00413         }
00414         if (sendProcs_[j] > tmpSendProcs[i]) {
00415           sendProcs_.insert(sendProcs_.begin()+j, tmpSendProcs[i]);
00416           found = true;
00417           break;
00418         }
00419       }
00420       if (!found) sendProcs_.push_back(tmpSendProcs[i]);
00421     }
00422 
00423     recvProcs_.clear();
00424     fei::mirrorProcs(comm_, sendProcs_, recvProcs_);
00425     sendRecvProcsNeedUpdated_ = false;
00426   }
00427   
00428 //  int num_remote_sends_needed = 0;
00429 //  for(unsigned i=0; i<remotelyOwned_.size(); ++i) {
00430 //    if ((int)i==fei::localProc(comm_)) continue;
00431 //    if (remotelyOwned_[i]->size() > 0) num_remote_sends_needed++;
00432 //  }
00433 //  if (num_remote_sends_needed != (int)sendProcs_.size()) {
00434 //    std::cout << "proc " << fei::localProc(comm_)<<", num_remote_sends_needed="<<num_remote_sends_needed<<", sendProcs_.size()="<<sendProcs_.size()<<std::endl;
00435 //  }
00436 
00437   send_chars.resize(sendProcs_.size());
00438 
00439   pack_send_buffers(sendProcs_, remotelyOwned_, send_chars);
00440 
00441   recv_sizes_.resize(recvProcs_.size());
00442   mpiReqs.resize(recvProcs_.size());
00443   mpiStatuses.resize(recvProcs_.size());
00444   mpiReqs2.resize(recvProcs_.size());
00445   mpiStatuses.resize(recvProcs_.size());
00446   recv_chars.resize(recvProcs_.size());
00447 
00448 
00449   //post the recvs for the sizes.
00450   for(size_t i=0; i<recvProcs_.size(); ++i) {
00451     int proc = recvProcs_[i];
00452     MPI_Irecv(&recv_sizes_[i], 1, MPI_INT, proc,
00453               tag1, comm_, &mpiReqs[i]);
00454   }
00455 
00456   //send the sizes of data we'll be sending.
00457   for(unsigned i=0; i<sendProcs_.size(); ++i) {
00458     int proc = sendProcs_[i];
00459     int size = send_chars[i].size();
00460 //std::cout<<"proc "<<fei::localProc(comm_)<<" sending "<<size<<" chars ("<<remotelyOwned_[proc]->indices().size()<<" indices/coefs) to proc "<<proc<<" accum="<<accumulate<<std::endl;
00461     MPI_Send(&size, 1, MPI_INT, proc, tag1, comm_);
00462   }
00463 
00464   //now post the recvs for the data.
00465   for(size_t i=0; i<recvProcs_.size(); ++i) {
00466     int index;
00467     MPI_Status status;
00468     MPI_Waitany(mpiReqs.size(), &mpiReqs[0], &index, &status);
00469 
00470     recv_chars[index].resize(recv_sizes_[index]);
00471     MPI_Irecv(&(recv_chars[index][0]), recv_sizes_[index], MPI_CHAR, recvProcs_[index],
00472               tag1, comm_, &mpiReqs2[index]);
00473   }
00474 
00475   //now send the outgoing data.
00476   for(size_t i=0; i<sendProcs_.size(); ++i) {
00477     int proc = sendProcs_[i];
00478     int size = send_chars[i].size();
00479     MPI_Send(&(send_chars[i][0]), size, MPI_CHAR, proc, tag1, comm_);
00480   }
00481 
00482   std::vector<int> indices;
00483   std::vector<double> coefs;
00484   int numRecvProcs = recvProcs_.size();
00485   for(size_t i=0; i<recvProcs_.size(); ++i) {
00486     int index;
00487     MPI_Status status;
00488     MPI_Waitany(numRecvProcs, &mpiReqs2[0], &index, &status);
00489   }
00490 
00491   //now store the data we've received.
00492   for(size_t i=0; i<recvProcs_.size(); ++i) {
00493     fei::impl_utils::unpack_indices_coefs(recv_chars[i], indices, coefs);
00494     int num = indices.size();
00495     if (num == 0) continue;
00496 //ofs<<"proc "<<fei::localProc(comm_)<<" recvd "<<num<<"inds/coefs from proc "<<recvProcs_[i]<<" accum="<<accumulate<<std::endl;
00497 //for(int ii=0; ii<num;++ii)ofs<<"   "<<indices[ii]<<" "<<coefs[ii]<<std::endl;
00498     int err = giveToUnderlyingVector(num, &(indices[0]),
00499                                      &(coefs[0]), accumulate, 0);
00500     if (err != 0) {
00501       FEI_COUT << "fei::Vector_core::gatherFromOverlap ERROR storing recvd data" << FEI_ENDL;
00502       return(err);
00503     }
00504   }
00505 
00506 #endif  //#ifndef FEI_SER
00507 
00508   return(0);
00509 }
00510 
00511 int fei::Vector_core::copyOutFieldData(int fieldID,
00512              int idType,
00513              int numIDs,
00514              const int* IDs,
00515              double* data,
00516              int vectorIndex)
00517 {
00518   if (vecSpace_.get() == NULL) ERReturn(-1);
00519 
00520   int fieldSize = vecSpace_->getFieldSize(fieldID);
00521 
00522   if (haveFEVector_) {
00523     snl_fei::RecordCollection* collection = NULL;
00524     CHK_ERR( vecSpace_->getRecordCollection(idType, collection) );
00525     int nodeNumber;
00526     int dofOffset;
00527     int foffset;
00528     std::vector<int>& eqnNums = vecSpace_->getEqnNumbers();
00529     int* vspcEqnPtr = eqnNums.size() > 0 ? &eqnNums[0] : NULL;
00530 
00531     int offset = 0;
00532     for(int i=0; i<numIDs; ++i) {
00533       fei::Record<int>* node = collection->getRecordWithID(IDs[i]);
00534       if (node == NULL) {
00535         ERReturn(-1);
00536       }
00537 
00538       nodeNumber = node->getNumber();
00539       int* eqnNumbers = vspcEqnPtr+node->getOffsetIntoEqnNumbers();
00540       node->getFieldMask()->getFieldEqnOffset(fieldID, foffset);
00541       dofOffset = eqnNumbers[foffset] - eqnNumbers[0];
00542       for(int j=0; j<fieldSize; ++j) {
00543   CHK_ERR( copyOut_FE(nodeNumber, dofOffset+j, data[offset++]));
00544       }
00545     }
00546   }
00547   else {
00548     work_indices_.resize(numIDs*fieldSize*2);
00549     int* indicesPtr = &work_indices_[0];
00550 
00551     CHK_ERR( vecSpace_->getGlobalIndices(numIDs, IDs, idType,
00552            fieldID, indicesPtr) );
00553 
00554     CHK_ERR( copyOut(numIDs*fieldSize, indicesPtr, data) );
00555   }
00556 
00557   return(0);
00558 }
00559 
00560 int fei::Vector_core::writeToFile(const char* filename,
00561                                     bool matrixMarketFormat)
00562 {
00563   int numProcs = fei::numProcs(comm_);
00564   int localProc =fei::localProc(comm_);
00565 
00566   double coef;
00567 
00568   static char mmbanner[] = "%%MatrixMarket matrix array real general";
00569 
00570   for(int p=0; p<numProcs; ++p) {
00571     fei::Barrier(comm_);
00572     if (p != localProc) continue;
00573 
00574     FEI_OFSTREAM* outFile = NULL;
00575     if (p==0) {
00576       outFile = new FEI_OFSTREAM(filename, IOS_OUT);
00577       FEI_OFSTREAM& ofref = *outFile;
00578       if (matrixMarketFormat) {
00579         ofref << mmbanner << FEI_ENDL;
00580         ofref << eqnComm_->getGlobalOffsets()[numProcs] << " 1" << FEI_ENDL;
00581       }
00582       else {
00583         ofref << eqnComm_->getGlobalOffsets()[numProcs] << FEI_ENDL;
00584       }
00585     }
00586     else outFile = new FEI_OFSTREAM(filename, IOS_APP);
00587     FEI_OFSTREAM& ofref = *outFile;
00588     ofref.setf(IOS_SCIENTIFIC, IOS_FLOATFIELD);
00589     ofref.precision(13);
00590 
00591     for(int i=firstLocalOffset_; i<=lastLocalOffset_; ++i) {
00592       CHK_ERR( copyOut(1, &i, &coef) );
00593       if (matrixMarketFormat) {
00594         ofref << " " << coef << FEI_ENDL;
00595       }
00596       else {
00597         ofref << i << " " << coef << FEI_ENDL;
00598       }
00599     }
00600     
00601     delete outFile;
00602   } 
00603     
00604   return(0);
00605 }
00606 
00607 int fei::Vector_core::writeToStream(FEI_OSTREAM& ostrm,
00608           bool matrixMarketFormat)
00609 {
00610   int numProcs = fei::numProcs(comm_);
00611   int local_proc =fei::localProc(comm_);
00612 
00613   double coef;
00614 
00615   static char mmbanner[] = "%%MatrixMarket matrix array real general";
00616 
00617   IOS_FMTFLAGS oldf = ostrm.setf(IOS_SCIENTIFIC, IOS_FLOATFIELD);
00618   ostrm.precision(13);
00619 
00620   for(int proc=0; proc<numProcs; ++proc) {
00621     fei::Barrier(comm_);
00622     if (proc != local_proc) continue;
00623 
00624     if (proc==0) {
00625       if (matrixMarketFormat) {
00626   ostrm << mmbanner << FEI_ENDL;
00627   ostrm << eqnComm_->getGlobalOffsets()[numProcs] << " 1" << FEI_ENDL;
00628       }
00629       else {
00630   ostrm << eqnComm_->getGlobalOffsets()[numProcs] << FEI_ENDL;
00631       }
00632     }
00633 
00634     for(int p=0; p<local_proc; ++p) {
00635       for(size_t ii=0; ii<remotelyOwned_[p]->size(); ++ii) {
00636   if (matrixMarketFormat) {
00637     ostrm << " " << remotelyOwned_[p]->coefs()[ii] << FEI_ENDL;
00638   }
00639   else {
00640     ostrm << " " << remotelyOwned_[p]->indices()[ii] << " "
00641     << remotelyOwned_[p]->coefs()[ii] << FEI_ENDL;
00642   }
00643       }
00644     }
00645 
00646     for(int i=firstLocalOffset_; i<=lastLocalOffset_; ++i) {
00647       CHK_ERR( copyOut(1, &i, &coef) );
00648       if (matrixMarketFormat) {
00649   ostrm << " " << coef << FEI_ENDL;
00650       }
00651       else {
00652   ostrm << " " << i << " " << coef << FEI_ENDL;
00653       }
00654     }
00655 
00656     for(int p=local_proc+1; p<numProcs; ++p) {
00657       for(size_t ii=0; ii<remotelyOwned_[p]->size(); ++ii) {
00658   if (matrixMarketFormat) {
00659     ostrm << " " << remotelyOwned_[p]->coefs()[ii] << FEI_ENDL;
00660   }
00661   else {
00662     ostrm << " " << remotelyOwned_[p]->indices()[ii] << " "
00663     << remotelyOwned_[p]->coefs()[ii] << FEI_ENDL;
00664   }
00665       }
00666     }
00667   }
00668 
00669   ostrm.setf(oldf, IOS_FLOATFIELD);
00670 
00671   return(0);
00672 }
00673 
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Friends