[MPICH] MPI-IO, vector dataype

Rajeev Thakur thakur at mcs.anl.gov
Thu May 3 12:05:35 CDT 2007


The write and read should use nints as the count instead of bufsize. (I
don't know if that alone will solve the problem.)

Rajeev 

> -----Original Message-----
> From: owner-mpich-discuss at mcs.anl.gov 
> [mailto:owner-mpich-discuss at mcs.anl.gov] On Behalf Of Russell 
> L. Carter
> Sent: Wednesday, May 02, 2007 5:21 PM
> To: mpich-discuss at mcs.anl.gov
> Subject: [MPICH] MPI-IO, vector dataype
> 
> Hi folks,
> I'm having some cognition problems understanding
> how vector datatype works with readall/writeall.
> (I.e, I'm likely being stupid).  At any rate,
> I've appended a short program derived from
> the UsingMPI2/moreio/read_all.c example that
> generates the following output.  I expected
> that there would be no lines output beginning
> with 'myrank, i, obuf[i]...'.  Any clues on
> where I'm being clueless?
> 
> Many thanks,
> Russell
> 
> 
>   mpiexec -n 2 /home/rcarter/mpibin/rwall
> Starting rwall.
> Starting rwall.
> process 0 wrote 128process  ints1
>   wrote 128 ints
> process process 1 read 0128 read  ints128
>   ints
> myrank, i, obuf[i], ibuf[i]: 0 8 8 0
> myrank, i, obuf[i], ibuf[i]: 0 9 9 2
> myrank, i, obuf[i], ibuf[i]: 0 10 10 4
> myrank, i, obuf[i], ibuf[i]: 0 11 11 6
> myrank, i, obuf[i], ibuf[i]: 0 12 12 8
> myrank, i, obuf[i], ibuf[i]: 0 13 13 10
> myrank, i, obuf[i], ibuf[i]: 0 14 14 12
> myrank, i, obuf[i], ibuf[i]: 0 15 15 14
> myrank, i, obuf[i], ibuf[i]: 0 17 17 18
> myrank, i, obuf[i], ibuf[i]: 0 18 18 20
> myrank, i, obuf[i], ibuf[i]: 0 19 19 22
> myrank, i, obuf[i], ibuf[i]: 0 20 20 24
> myrank, i, obuf[i], ibuf[i]: 0 21 21 26
> myrank, i, obuf[i], ibuf[i]: 0 22 22 28
> myrank, i, obuf[i], ibuf[i]: 0 23 23 30
> myrank, i, obuf[i], ibuf[i]: 0 24 24 32
> myrank, i, obuf[i], ibuf[i]: 0 25 25 34
> myrank, i, obuf[i], ibuf[i]: 0 26 26 36
> myrank, i, obuf[i], ibuf[i]: 0 27 27 38
> myrank, i, obuf[i], ibuf[i]: 0 28 28 40
> myrank, i, obuf[i], ibuf[i]: 0 29 29 42
> myrank, i, obuf[i], ibuf[i]: 0 30 30 44
> myrank, i, obuf[i], ibuf[i]: 0 31 31 46
> rwall end.
> rwall end.
> 
> Here's the program:
> 
> #include "mpi.h"
> #include <iostream>
> using namespace std;
> 
> struct tester
> {
>      tester()
>          : myrank(MPI::COMM_WORLD.Get_rank()),
>            nprocs(MPI::COMM_WORLD.Get_size()),
>            bufsize(FILESIZE/nprocs), nints(bufsize/sizeof(int)),
>            filetype(MPI::INT),
>            fname("pvfs2:/mnt/pvfs/tst/testfile")
>      {
>          std::ios::sync_with_stdio(false);
>          filetype.Create_vector(nints/INTS_PER_BLK, INTS_PER_BLK,
>                                 INTS_PER_BLK * nprocs);
>          filetype.Commit();
>          obuf = new int[bufsize];
>          ibuf = new int[bufsize];
>      }
> 
>      ~tester()
>      {
>          delete[] obuf;
>          delete[] ibuf;
>      }
> 
>      void write()
>      {
>          for (int i = 0; i < nints; ++i) {
>              obuf[i] = (myrank + 1) * i;
>          }
>          MPI::File f = MPI::File::Open(MPI::COMM_WORLD, fname.c_str(),
>                                        MPI_MODE_CREATE | 
> MPI_MODE_WRONLY,
>                                        MPI::INFO_NULL);
>          f.Set_view(INTS_PER_BLK*sizeof(int)*myrank, MPI_INT,
>                     filetype, "native", MPI_INFO_NULL);
>          f.Write(obuf, bufsize, MPI_INT, status);
>          cerr << "process " << myrank << " wrote "
>               << status.Get_count(MPI_INT) << " ints" << endl;
>          f.Close();
>      }
> 
>      void read()
>      {
>          MPI::File f = MPI::File::Open(MPI::COMM_WORLD, fname.c_str(),
>                                        MPI_MODE_RDONLY, 
> MPI::INFO_NULL);
>          f.Set_view(INTS_PER_BLK*sizeof(int)*myrank, MPI_INT,
>                     filetype, "native", MPI_INFO_NULL);
>          f.Read(ibuf, bufsize, MPI_INT, status);
>          f.Close();
>          cerr << "process " << myrank << " read "
>               << status.Get_count(MPI_INT) << " ints" << endl;
>          for (int i = 0; i < nints; ++i) {
>              if (obuf[i] != ibuf[i]) {
>                  cerr << "myrank, i, obuf[i], ibuf[i]: " << 
> myrank << " "
>                       << i << " " << obuf[i] << " " << 
> ibuf[i] << endl;
>              }
>          }
>      }
> 
> private:
>      static const int FILESIZE = 256,  INTS_PER_BLK = 8;
>      int myrank, nprocs, bufsize, nints, *obuf, *ibuf;
>      MPI::Datatype filetype;
>      string fname;
>      MPI::Status status;
> };
> 
> 
> int main()
> {
>      cerr << "Starting rwall.\n";
>      try {
>          MPI::Init();
>          tester t;
>          t.write();
>          t.read();
>          MPI::Finalize();
>      } catch (exception &e) {
>          cerr << "\nCaught exception: " << e.what() << endl;
>          return -1;
>      } catch (MPI::Exception& e) {
>          cerr << "\nError:\n" << e.Get_error_string();
>          return -2;
>      }
>      cerr << "rwall end.\n";
>      return 0;
> }
> 
> 




More information about the mpich-discuss mailing list