[MOAB-dev] Fw: nightly test

Jiangtao Hu jiangtao_ma at yahoo.com
Wed Mar 18 08:40:11 CDT 2009


In case you did update and found it's not working...

Jane


--- On Wed, 3/18/09, Jiangtao Hu <jiangtao_ma at yahoo.com> wrote:

> From: Jiangtao Hu <jiangtao_ma at yahoo.com>
> Subject: nightly test
> To: "kraftche at cae.wisc.edu" <kraftche at cae.wisc.edu>
> Date: Wednesday, March 18, 2009, 9:38 AM
> Hi, Jason
> 
> FYI. Yesterday when I first updated the source, I got the
> same error messages as nightly tests gave. What I did was,
> to copy back the configure.in in the corresponding dir, and
> first did "make clean", then removed the
> configure.in, and did "autoreconf -fi", this time
> it worked fine.
> 
> Jane
> 
> 
> --- On Tue, 3/17/09, kraftche at cae.wisc.edu
> <kraftche at cae.wisc.edu> wrote:
> 
> > From: kraftche at cae.wisc.edu
> <kraftche at cae.wisc.edu>
> > Subject: [MOAB-dev] r2724 - MOAB/trunk/parallel
> > To: moab-dev at mcs.anl.gov
> > Date: Tuesday, March 17, 2009, 4:22 PM
> > Author: kraftche
> > Date: 2009-03-17 15:22:58 -0500 (Tue, 17 Mar 2009)
> > New Revision: 2724
> > 
> > Modified:
> >    MOAB/trunk/parallel/MBParallelComm.cpp
> >    MOAB/trunk/parallel/MBParallelComm.hpp
> > Log:
> > add function 'exchange_all_shared_handles' to
> > assist in testing and debugging
> > 
> > Modified: MOAB/trunk/parallel/MBParallelComm.cpp
> >
> ===================================================================
> > --- MOAB/trunk/parallel/MBParallelComm.cpp	2009-03-17
> > 19:18:45 UTC (rev 2723)
> > +++ MOAB/trunk/parallel/MBParallelComm.cpp	2009-03-17
> > 20:22:58 UTC (rev 2724)
> > @@ -4492,8 +4492,114 @@
> >    remote_handles[num_part_ids_out-1] = entity;
> >    return result;
> >  }
> > + 
> >  
> > +MBErrorCode
> MBParallelComm::exchange_all_shared_handles(
> > shared_entity_map& result )
> > +{
> > +  MBErrorCode rval;
> > +  int ierr;
> > +  const int tag = 0x4A41534E;
> > +  const MPI_Comm comm = procConfig.proc_comm();
> >  
> > +  std::set<unsigned int> exch_procs;
> > +  rval = get_comm_procs(exch_procs);  
> > +  if (MB_SUCCESS != rval)
> > +    return rval;
> > +  const int num_proc = exch_procs.size();
> > +  std::vector<MPI_Request> send_req(num_proc),
> > recv_req(num_proc);
> > +  const std::vector<int> procs(
> exch_procs.begin(),
> > exch_procs.end() );
> > +  
> > +    // get all shared entities
> > +  MBRange all_shared;
> > +  MBTag pstatus = pstatus_tag();
> > +  for (MBEntityType type = MBVERTEX; type <
> > MBENTITYSET; ++type) {
> > +    rval =
> get_moab()->get_entities_by_type_and_tag( 0,
> > type, &pstatus, 0, 1, all_shared );
> > +    if (MB_SUCCESS != rval)
> > +      return rval;
> > +  }
> > +
> > +    // build up send buffers
> > +  shared_entity_map send_data;
> > +  int ent_procs[MAX_SHARING_PROCS];
> > +  MBEntityHandle handles[MAX_SHARING_PROCS];
> > +  int num_sharing;
> > +  SharedEntityData tmp;
> > +  for (MBRange::iterator i = all_shared.begin(); i !=
> > all_shared.end(); ++i) {
> > +    tmp.remote = *i; // swap local/remote so
> they're
> > correct on the remote proc.
> > +    rval = get_owner( *i, tmp.owner );
> > +    if (MB_SUCCESS != rval)
> > +      return rval;
> > +    
> > +    rval = get_sharing_parts( *i, ent_procs,
> num_sharing,
> > handles );
> > +    for (int j = 0; j < num_sharing; ++j) {
> > +      if (ent_procs[j] ==
> (int)proc_config().proc_rank())
> > +        continue;
> > +      tmp.local = handles[j];
> > +      send_data[ent_procs[j]].push_back( tmp );
> > +    }
> > +  }
> > +
> > +    // set up to receive sizes
> > +  std::vector<int> sizes_send(num_proc),
> > sizes_recv(num_proc);
> > +  for (int i = 0; i < num_proc; ++i) {
> > +    ierr = MPI_Irecv( &sizes_recv[i], 1, MPI_INT,
> > procs[i], tag, comm, &recv_req[i] );
> > +    if (ierr) 
> > +      return MB_FILE_WRITE_ERROR;
> > +  }
> > +  
> > +    // send sizes
> > +  for (int i = 0; i < num_proc; ++i) {
> > +    sizes_send[i] = send_data[procs[i]].size();
> > +    ierr = MPI_Isend( &sizes_send[i], 1, MPI_INT,
> > procs[i], tag, comm, &send_req[i] );
> > +    if (ierr) 
> > +      return MB_FILE_WRITE_ERROR;
> > +  }
> > +  
> > +    // receive sizes
> > +  std::vector<MPI_Status> stat(num_proc);
> > +  ierr = MPI_Waitall( num_proc, &recv_req[0],
> > &stat[0] );
> > +  if (ierr)
> > +    return MB_FILE_WRITE_ERROR;
> > +  
> > +    // wait until all sizes are sent (clean up
> pending
> > req's)
> > +  ierr = MPI_Waitall( num_proc, &send_req[0],
> > &stat[0] );
> > +  if (ierr)
> > +    return MB_FILE_WRITE_ERROR;
> > +  
> > +    // set up to receive data
> > +  for (int i = 0; i < num_proc; ++i) {
> > +    result[procs[i]].resize( sizes_recv[i] );
> > +    ierr = MPI_Irecv( &result[procs[i]][0], 
> > +                     
> > sizeof(SharedEntityData)*sizes_recv[i], 
> > +                      MPI_UNSIGNED_CHAR, 
> > +                      procs[i], tag, comm,
> > &recv_req[i] );
> > +    if (ierr) 
> > +      return MB_FILE_WRITE_ERROR;
> > +  }
> > +  
> > +    // send data
> > +  for (int i = 0; i < num_proc; ++i) {
> > +    ierr = MPI_Isend( &send_data[procs[i]][0], 
> > +                     
> > sizeof(SharedEntityData)*sizes_send[i], 
> > +                      MPI_UNSIGNED_CHAR, 
> > +                      procs[i], tag, comm,
> > &send_req[i] );
> > +    if (ierr) 
> > +      return MB_FILE_WRITE_ERROR;
> > +  }
> > +  
> > +    // receive data
> > +  ierr = MPI_Waitall( num_proc, &recv_req[0],
> > &stat[0] );
> > +  if (ierr)
> > +    return MB_FILE_WRITE_ERROR;
> > +  
> > +    // wait until everything is sent to release send
> > buffers
> > +  ierr = MPI_Waitall( num_proc, &send_req[0],
> > &stat[0] );
> > +  if (ierr)
> > +    return MB_FILE_WRITE_ERROR;
> > +  
> > +  return MB_SUCCESS;
> > +}
> > +
> >  #ifdef TEST_PARALLELCOMM
> >  
> >  #include <iostream>
> > 
> > Modified: MOAB/trunk/parallel/MBParallelComm.hpp
> >
> ===================================================================
> > --- MOAB/trunk/parallel/MBParallelComm.hpp	2009-03-17
> > 19:18:45 UTC (rev 2723)
> > +++ MOAB/trunk/parallel/MBParallelComm.hpp	2009-03-17
> > 20:22:58 UTC (rev 2724)
> > @@ -32,8 +32,37 @@
> >  #include <map>
> >  #include <set>
> >  #include "math.h"
> > +
> > +#ifdef SEEK_SET
> > +#  define SEEK_SET_OLD SEEK_SET
> > +#  undef SEEK_SET
> > +#endif
> > +#ifdef SEEK_CUR
> > +#  define SEEK_CUR_OLD SEEK_CUR
> > +#  undef SEEK_CUR
> > +#endif
> > +#ifdef SEEK_END
> > +#  define SEEK_END_OLD SEEK_END
> > +#  undef SEEK_END
> > +#endif
> >  #include "mpi.h"
> > +#ifdef SEEK_SET_OLD
> > +#  undef SEEK_SET
> > +#  define SEEK_SET SEEK_SET_OLD
> > +#  undef SEEK_SET_OLD
> > +#endif
> > +#ifdef SEEK_CUR_OLD
> > +#  undef SEEK_CUR
> > +#  define SEEK_CUR SEEK_CUR_OLD
> > +#  undef SEEK_CUR_OLD
> > +#endif
> > +#ifdef SEEK_END_OLD
> > +#  undef SEEK_END
> > +#  define SEEK_END SEEK_END_OLD
> > +#  undef SEEK_END_OLD
> > +#endif
> >  
> > +
> >  extern "C" {
> >    struct tuple_list;
> >  }
> > @@ -670,8 +699,25 @@
> >    MBErrorCode update_iface_sets(MBRange
> &sent_ents,
> >                                 
> > std::vector<MBEntityHandle> &remote_handles,
> 
> >                                  int from_proc);
> > -
> > +  
> >  public:  
> > +  struct SharedEntityData {
> > +    MBEntityHandle local;
> > +    MBEntityHandle remote;
> > +    int owner;
> > +  };
> > +  typedef std::vector< SharedEntityData >
> > shared_entity_vec;
> > +  //! Map indexed by processor ID and containing, for
> each
> > processor ID,
> > +  //! a list of <local,remote> handle pairs,
> where
> > the local handle is
> > +  //! the handle on this processor and the remove
> handle
> > is the handle on
> > +  //! the processor ID indicated by the map index.
> > +  typedef std::map< int, shared_entity_vec >
> > shared_entity_map;
> > +  //! Every processor sends shared entity handle data
> to
> > every other processor
> > +  //! that it shares entities with.  Passed back map
> is
> > all received data,
> > +  //! indexed by processor ID. This function is
> intended
> > to be used for 
> > +  //! debugging.
> > +  MBErrorCode exchange_all_shared_handles(
> > shared_entity_map& result );
> > +  
> >      //! replace handles in from_vec with
> corresponding
> > handles on
> >      //! to_proc (by checking shared[p/h]_tag and
> > shared[p/h]s_tag;
> >      //! if no remote handle and new_ents is non-null,
> > substitute


      


More information about the moab-dev mailing list