[MOAB-dev] commit/MOAB: 13 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Tue Jul 30 03:40:27 CDT 2013
13 new commits in MOAB:
https://bitbucket.org/fathomteam/moab/commits/1babfc769551/
Changeset: 1babfc769551
Branch: None
User: vijaysm
Date: 2013-07-16 21:06:19
Summary: Removing an unnecessary itaps_cast.
Affected #: 1 file
diff --git a/itaps/imesh/iMeshP_MOAB.cpp b/itaps/imesh/iMeshP_MOAB.cpp
index d67dfc9..ab45811 100644
--- a/itaps/imesh/iMeshP_MOAB.cpp
+++ b/itaps/imesh/iMeshP_MOAB.cpp
@@ -16,7 +16,7 @@
#include <assert.h>
#include <sstream>
-#ifdef USE_MPI
+#ifdef USE_MPI
#include "moab_mpi.h"
#endif
@@ -35,8 +35,8 @@ using namespace moab;
// if no template specializtion, disable some type checking
template <typename T, typename S> inline
T itaps_cast( S handle )
-{
- assert(sizeof(S) >= sizeof(T));
+{
+ assert(sizeof(S) >= sizeof(T));
return reinterpret_cast<T>(handle);
}
#else
@@ -139,8 +139,8 @@ iBase_TagHandle itaps_cast<iBase_TagHandle,Tag>( Tag h )
// Need a different function name for Tag because (currently)
// both Tag and iBase_EntityHandle are void**.
iBase_TagHandle itaps_tag_cast( Tag t )
-{
- assert(sizeof(iBase_TagHandle) >= sizeof(Tag));
+{
+ assert(sizeof(iBase_TagHandle) >= sizeof(Tag));
return reinterpret_cast<iBase_TagHandle>(t);
}
@@ -155,7 +155,7 @@ iBase_TagHandle itaps_tag_cast( Tag t )
static inline ErrorCode get_entities( Interface* iface,
EntityHandle set,
- int type, int topology,
+ int type, int topology,
Range& entities )
{
if (topology != iMesh_ALL_TOPOLOGIES)
@@ -169,12 +169,12 @@ static inline ErrorCode get_entities( Interface* iface,
static inline ErrorCode remove_not_owned( ParallelComm* pcomm, Range& ents )
{
ErrorCode rval;
-
+
std::vector<unsigned char> pstatus(ents.size());
rval = pcomm->get_moab()->tag_get_data(pcomm->pstatus_tag(), ents, &pstatus[0]);
if (MB_SUCCESS != rval)
return rval;
-
+
Range::iterator i = ents.begin();
std::vector<unsigned char>::const_iterator j;
for (j = pstatus.begin(); j != pstatus.end(); ++j) {
@@ -183,7 +183,7 @@ static inline ErrorCode remove_not_owned( ParallelComm* pcomm, Range& ents )
else
++i;
}
-
+
return MB_SUCCESS;
}
@@ -191,17 +191,17 @@ static inline ErrorCode count_owned( ParallelComm* pcomm, const Range& ents, int
{
ErrorCode rval;
n = 0;
-
+
std::vector<unsigned char> pstatus(ents.size());
rval = pcomm->get_moab()->tag_get_data(pcomm->pstatus_tag(), ents, &pstatus[0]);
if (MB_SUCCESS != rval)
return rval;
-
+
std::vector<unsigned char>::const_iterator j;
for (j = pstatus.begin(); j != pstatus.end(); ++j)
if (!(*j & PSTATUS_NOT_OWNED))
++n;
-
+
return MB_SUCCESS;
}
@@ -214,7 +214,7 @@ static void set_intersection_query( iMesh_Instance instance,
int* err )
{
ErrorCode rval;
-
+
if (!set1) {
rval = get_entities( MOABI, itaps_cast<EntityHandle>(set2), type, topo, result );
CHKERR(rval,"Invalid Part handle");
@@ -231,7 +231,7 @@ static void set_intersection_query( iMesh_Instance instance,
CHKERR(rval,"Invalid set handle");
result.merge( intersect( r1, r2) );
}
-
+
RETURN (iBase_SUCCESS);
}
@@ -245,24 +245,22 @@ static ErrorCode get_boundary_entities( ParallelComm* pcomm,
Range& entities_out)
{
int* adj_part_id_ptr = (adj_part_id == iMeshP_ALL_PARTS) ? 0 : &adj_part_id;
-
+
Range iface_sets;
- ErrorCode rval = pcomm->get_interface_sets(
- itaps_cast<EntityHandle>(part_handle),
- iface_sets, adj_part_id_ptr );
+ ErrorCode rval = pcomm->get_interface_sets( part_handle, iface_sets, adj_part_id_ptr );
if (MB_SUCCESS != rval)
return rval;
-
+
for (Range::iterator i = iface_sets.begin(); i != iface_sets.end(); ++i) {
rval = get_entities( pcomm->get_moab(), *i, entity_type, entity_topology, entities_out );
if (MB_SUCCESS != rval)
return rval;
}
-
+
return MB_SUCCESS;
}
-class PartBoundaryIter : public MBRangeIter
+class PartBoundaryIter : public MBRangeIter
{
private:
ParallelComm* pComm;
@@ -281,14 +279,14 @@ class PartBoundaryIter : public MBRangeIter
virtual ErrorCode reset( Interface* ) {
iterData.clear();
- ErrorCode result = get_boundary_entities( pComm, entSet, entType, entTopo,
+ ErrorCode result = get_boundary_entities( pComm, entSet, entType, entTopo,
adjPart, iterData );
iterPos = iterData.begin();
return result;
}
};
-template <class Container>
+template <class Container>
class SetIntersectIter : public MBIter<Container>
{
private:
@@ -323,19 +321,19 @@ class SetIntersectIter : public MBIter<Container>
list.resize(w);
return MB_SUCCESS;
}
-
- virtual ErrorCode reset(Interface* mb)
+
+ virtual ErrorCode reset(Interface* mb)
{
ErrorCode result = MBIter<Container>::reset(mb);
if (MB_SUCCESS != result)
return result;
-
+
result = intersect_with_set( mb, MBIter<Container>::iterData );
MBIter<Container>::iterPos = MBIter<Container>::iterData.begin();
return result;
}
};
-
+
/********************* iMeshP API **************************/
@@ -352,12 +350,12 @@ void iMeshP_createPartitionAll( iMesh_Instance instance,
*partition_handle = 0;
Tag prtn_tag;
- ErrorCode rval = MOABI->tag_get_handle( PARALLEL_PARITIONING_TAG_NAME,
- 1, MB_TYPE_INTEGER,
- prtn_tag,
- MB_TAG_SPARSE|MB_TAG_CREAT );
+ ErrorCode rval = MOABI->tag_get_handle( PARALLEL_PARITIONING_TAG_NAME,
+ 1, MB_TYPE_INTEGER,
+ prtn_tag,
+ MB_TAG_SPARSE|MB_TAG_CREAT );
CHKERR(rval,"tag creation failed");
-
+
EntityHandle handle;
rval = MOABI->create_meshset( MESHSET_SET, handle ); CHKERR(rval,"set creation failed");
ParallelComm* pcomm = ParallelComm::get_pcomm( MOABI, handle, &communicator );
@@ -365,7 +363,7 @@ void iMeshP_createPartitionAll( iMesh_Instance instance,
MOABI->delete_entities( &handle, 1 );
RETURN(iBase_FAILURE);
}
-
+
*partition_handle = itaps_cast<iMeshP_PartitionHandle>(handle);
RETURN (iBase_SUCCESS);
}
@@ -389,7 +387,7 @@ void iMeshP_getPartIdFromPartHandle( iMesh_Instance instance,
int *err )
{
int junk1 = 1, junk2;
- iMeshP_getPartIdsFromPartHandlesArr( instance, partition_handle, &part_handle, 1,
+ iMeshP_getPartIdsFromPartHandlesArr( instance, partition_handle, &part_handle, 1,
&part_id, &junk1, &junk2, err );
}
@@ -400,7 +398,7 @@ void iMeshP_getPartHandleFromPartId( iMesh_Instance instance,
int *err )
{
int junk1 = 1, junk2;
- iMeshP_getPartHandlesFromPartsIdsArr( instance, partition_handle, &part_id, 1,
+ iMeshP_getPartHandlesFromPartsIdsArr( instance, partition_handle, &part_id, 1,
&part_handle, &junk1, &junk2, err );
}
@@ -481,7 +479,7 @@ void iMeshP_getNumPartitions( iMesh_Instance instance,
std::vector<ParallelComm*> pcomms;
ErrorCode rval = ParallelComm::get_all_pcomm( MOABI, pcomms );
CHKERR(rval,"Internal error retreiving PComms");
-
+
std::vector<ParallelComm*>::iterator i;
*num_partitions_out = 0;
for (i = pcomms.begin(); i != pcomms.end(); ++i)
@@ -493,25 +491,25 @@ void iMeshP_getNumPartitions( iMesh_Instance instance,
void iMeshP_getPartitions( iMesh_Instance instance,
iMeshP_PartitionHandle **partition_handle,
- int *partition_handle_allocated,
- int *partition_handle_size,
+ int *partition_handle_allocated,
+ int *partition_handle_size,
int *err )
{
std::vector<ParallelComm*> pcomms;
ErrorCode rval = ParallelComm::get_all_pcomm( MOABI, pcomms );
CHKERR(rval,"Internal error retreiving PComms");
-
+
std::vector<ParallelComm*>::iterator i;
int count = 0;
for (i = pcomms.begin(); i != pcomms.end(); ++i)
if ((*i)->get_partitioning())
++count;
ALLOC_CHECK_ARRAY_NOFAIL( partition_handle, count );
-
+
*partition_handle_size = 0;
for (i = pcomms.begin(); i != pcomms.end(); ++i)
if ((*i)->get_partitioning())
- (*partition_handle)[(*partition_handle_size)++]
+ (*partition_handle)[(*partition_handle_size)++]
= itaps_cast<iMeshP_PartitionHandle>((*i)->get_partitioning());
RETURN (iBase_SUCCESS );
@@ -519,13 +517,13 @@ void iMeshP_getPartitions( iMesh_Instance instance,
void iMeshP_getNumGlobalParts( iMesh_Instance instance,
const iMeshP_PartitionHandle partition_handle,
- int *num_global_part,
+ int *num_global_part,
int *err )
{
ParallelComm* pcomm = PCOMM;
- if (!pcomm)
+ if (!pcomm)
ERROR (iBase_FAILURE,"No PComm");
-
+
ErrorCode rval = pcomm->get_global_part_count( *num_global_part );
CHKERR (rval,"PComm::get_global_part_count failed");
RETURN(iBase_SUCCESS);
@@ -533,13 +531,13 @@ void iMeshP_getNumGlobalParts( iMesh_Instance instance,
void iMeshP_getNumLocalParts(iMesh_Instance instance,
const iMeshP_PartitionHandle partition_handle,
- int *num_local_part,
+ int *num_local_part,
int *err)
{
ParallelComm* pcomm = PCOMM;
- if (!pcomm)
+ if (!pcomm)
ERROR (iBase_FAILURE,"No PComm");
-
+
*num_local_part = pcomm->partition_sets().size();
RETURN (iBase_SUCCESS);
}
@@ -554,7 +552,7 @@ void iMeshP_getLocalParts( iMesh_Instance instance,
ParallelComm* pcomm = PCOMM;
if (!pcomm)
ERROR (iBase_FAILURE,"No PComm");
-
+
RANGE_TO_ITAPS_ARRAY( pcomm->partition_sets(), part_handles );
RETURN (iBase_SUCCESS);
}
@@ -574,15 +572,15 @@ void iMeshP_getRankOfPartArr( iMesh_Instance instance,
const iMeshP_PartitionHandle partition_handle,
const iMeshP_Part *part_ids,
const int part_ids_size,
- int **rank,
- int *rank_allocated,
+ int **rank,
+ int *rank_allocated,
int *rank_size,
int *err )
{
ParallelComm* pcomm = PCOMM;
if (!pcomm)
ERROR (iBase_FAILURE,"No PComm");
-
+
ALLOC_CHECK_ARRAY( rank, part_ids_size );
ErrorCode rval = MB_SUCCESS;
for (int i = 0; i < part_ids_size; ++i) {
@@ -596,14 +594,14 @@ void iMeshP_getRankOfPartArr( iMesh_Instance instance,
void iMeshP_getNumOfTypeAll( iMesh_Instance instance,
const iMeshP_PartitionHandle partition_handle,
const iBase_EntitySetHandle entity_set_handle,
- const int entity_type,
- int *num_type,
+ const int entity_type,
+ int *num_type,
int *err )
{
ParallelComm* pcomm = PCOMM;
if (!pcomm)
ERROR (iBase_FAILURE,"No PComm");
-
+
Range entities;
ErrorCode rval = get_entities( MOABI,
itaps_cast<EntityHandle>(entity_set_handle),
@@ -613,13 +611,13 @@ void iMeshP_getNumOfTypeAll( iMesh_Instance instance,
int count = 0;
if (MB_SUCCESS == rval)
rval = count_owned( pcomm, entities, count );
-
+
int vals[2] = { count, rval }, sums[2];
int ierr = MPI_Allreduce( vals, sums, 2, MPI_INT, MPI_SUM, pcomm->proc_config().proc_comm() );
assert(iBase_SUCCESS == 0);
if (ierr || sums[1])
RETURN (iBase_FAILURE);
-
+
*num_type = sums[0];
RETURN (iBase_SUCCESS);
}
@@ -627,14 +625,14 @@ void iMeshP_getNumOfTypeAll( iMesh_Instance instance,
void iMeshP_getNumOfTopoAll( iMesh_Instance instance,
const iMeshP_PartitionHandle partition_handle,
const iBase_EntitySetHandle entity_set_handle,
- const int entity_topology,
- int *num_topo,
+ const int entity_topology,
+ int *num_topo,
int *err )
{
ParallelComm* pcomm = PCOMM;
if (!pcomm)
ERROR (iBase_FAILURE,"No PComm");
-
+
Range entities;
ErrorCode rval = get_entities( MOABI,
itaps_cast<EntityHandle>(entity_set_handle),
@@ -644,13 +642,13 @@ void iMeshP_getNumOfTopoAll( iMesh_Instance instance,
int count = 0;
if (MB_SUCCESS == rval)
rval = count_owned( pcomm, entities, count );
-
+
int vals[2] = { count, rval }, sums[2];
int ierr = MPI_Allreduce( vals, sums, 2, MPI_INT, MPI_SUM, pcomm->proc_config().proc_comm() );
assert(iBase_SUCCESS == 0);
if (ierr || sums[1])
RETURN (iBase_FAILURE);
-
+
*num_topo = sums[0];
RETURN (iBase_SUCCESS);
}
@@ -663,7 +661,7 @@ void iMeshP_createPart( iMesh_Instance instance,
ParallelComm* pcomm = PCOMM;
if (!pcomm)
ERROR (iBase_FAILURE,"No PComm");
-
+
EntityHandle h;
ErrorCode rval = pcomm->create_part( h );
CHKERR(rval,"Part creation failed");
@@ -678,7 +676,7 @@ void iMeshP_destroyPart( iMesh_Instance instance,
ParallelComm* pcomm = PCOMM;
if (!pcomm)
ERROR (iBase_FAILURE,"No PComm");
-
+
ErrorCode rval = pcomm->destroy_part( itaps_cast<EntityHandle>(part_handle) );
CHKERR(rval,"Part destruction failed");
RETURN(iBase_SUCCESS);
@@ -709,20 +707,20 @@ void iMeshP_getNumPartNborsArr( iMesh_Instance instance,
int *err )
{
ParallelComm* pcomm = PCOMM;
- if (!pcomm)
+ if (!pcomm)
ERROR (iBase_FAILURE,"No PComm");
ALLOC_CHECK_ARRAY( num_part_nbors, part_handles_size );
-
+
int n, neighbors[MAX_SHARING_PROCS];
ErrorCode rval;
for (int i = 0; i < part_handles_size; ++i) {
EntityHandle h = itaps_cast<EntityHandle>(part_handles[i]);
- rval = pcomm->get_part_neighbor_ids( h, neighbors, n );
+ rval = pcomm->get_part_neighbor_ids( h, neighbors, n );
CHKERR(rval,"error getting neighbor ids");
(*num_part_nbors)[i] = n;
}
-
+
KEEP_ARRAY(num_part_nbors);
RETURN(iBase_SUCCESS);
}
@@ -739,10 +737,10 @@ void iMeshP_getPartNbors( iMesh_Instance instance,
int *err )
{
int junk1 = 1, junk2 = 1;
- iMeshP_getPartNborsArr( instance, partition_handle,
- &part_handle, 1, entity_type,
+ iMeshP_getPartNborsArr( instance, partition_handle,
+ &part_handle, 1, entity_type,
&num_part_nbors, &junk1, &junk2,
- nbor_part_ids, nbor_part_ids_allocated,
+ nbor_part_ids, nbor_part_ids_allocated,
nbor_part_ids_size, err );
}
@@ -757,39 +755,39 @@ void iMeshP_getPartNborsArr( iMesh_Instance instance,
iMeshP_Part **nbor_part_ids,
int *nbor_part_ids_allocated,
int *nbor_part_ids_size,
- int *err )
+ int *err )
{
ParallelComm* pcomm = PCOMM;
- if (!pcomm)
+ if (!pcomm)
ERROR (iBase_FAILURE,"No PComm");
ALLOC_CHECK_ARRAY( num_part_nbors, part_handles_size );
-
+
std::vector<int> all_neighbors;
int n, pnbor[MAX_SHARING_PROCS];
ErrorCode rval;
for (int i = 0; i < part_handles_size; ++i) {
EntityHandle h = itaps_cast<EntityHandle>(part_handles[i]);
- rval = pcomm->get_part_neighbor_ids( h, pnbor, n );
+ rval = pcomm->get_part_neighbor_ids( h, pnbor, n );
CHKERR(rval,"error getting neighbor ids");
(*num_part_nbors)[i] = n;
std::copy( pnbor, pnbor+n, std::back_inserter(all_neighbors) );
}
-
+
ALLOC_CHECK_ARRAY_NOFAIL( nbor_part_ids, all_neighbors.size() );
memcpy( *nbor_part_ids, &all_neighbors[0], sizeof(int)*all_neighbors.size() );
-
+
KEEP_ARRAY(num_part_nbors);
RETURN(iBase_SUCCESS);
}
void iMeshP_getNumPartBdryEnts( iMesh_Instance instance,
const iMeshP_PartitionHandle partition_handle,
- const iMeshP_PartHandle part_handle,
- const int entity_type,
- const int entity_topology,
- const iMeshP_Part target_part_id,
- int *num_entities,
+ const iMeshP_PartHandle part_handle,
+ const int entity_type,
+ const int entity_topology,
+ const iMeshP_Part target_part_id,
+ int *num_entities,
int *err )
{
Range entities;
@@ -857,10 +855,10 @@ void iMeshP_initPartBdryEntArrIter( iMesh_Instance instance,
iBase_EntityArrIterator* entity_iterator,
int* err )
{
- *entity_iterator = new PartBoundaryIter( PCOMM,
- itaps_cast<EntityHandle>(part_handle),
+ *entity_iterator = new PartBoundaryIter( PCOMM,
+ itaps_cast<EntityHandle>(part_handle),
(iBase_EntityType)entity_type,
- (iMesh_EntityTopology)entity_topology,
+ (iMesh_EntityTopology)entity_topology,
nbor_part_id,
array_size );
ErrorCode result = (*entity_iterator)->reset( MOABI );
@@ -881,7 +879,7 @@ void iMeshP_getNumOfType( iMesh_Instance instance,
int *err )
{
Range r;
- set_intersection_query( instance, part_handle, entity_set_handle,
+ set_intersection_query( instance, part_handle, entity_set_handle,
entity_type, iMesh_ALL_TOPOLOGIES, r, err );
*num_type = r.size();
}
@@ -895,7 +893,7 @@ void iMeshP_getNumOfTopo( iMesh_Instance instance,
int *err )
{
Range r;
- set_intersection_query( instance, part_handle, entity_set_handle,
+ set_intersection_query( instance, part_handle, entity_set_handle,
iBase_ALL_TYPES, entity_topology, r, err );
*num_topo = r.size();
}
@@ -926,10 +924,10 @@ void iMeshP_getAdjEntIndices(iMesh_Instance instance,
const int allocated_offset = (*offset_allocated == 0);
// get source entities
- iMeshP_getEntities( instance,
+ iMeshP_getEntities( instance,
partition, part,
entity_set_handle,
- entity_type_requestor,
+ entity_type_requestor,
entity_topology_requestor,
entity_handles,
entity_handles_allocated,
@@ -960,7 +958,7 @@ void iMeshP_getAdjEntIndices(iMesh_Instance instance,
*adj_entity_indices_size = size;
if (allocated_indices) {
*adj_entity_indices = (int*)malloc(sizeof(iBase_EntityHandle)*size);
- if (!*adj_entity_indices)
+ if (!*adj_entity_indices)
*err = iBase_MEMORY_ALLOCATION_FAILED;
else
*adj_entity_indices_allocated = size;
@@ -985,7 +983,7 @@ void iMeshP_getAdjEntIndices(iMesh_Instance instance,
// Now create an array of unique sorted handles from all_adj_handles.
// We need to create a copy because we still need all_adj_handles. We
- // will eventually need to copy the resulting unique list into
+ // will eventually need to copy the resulting unique list into
// adj_entity_handles, so if adj_entity_handles is already allocated and
// of sufficient size, use it rather than allocating another temporary.
iBase_EntityHandle* unique_adj = 0;
@@ -1011,7 +1009,7 @@ void iMeshP_getAdjEntIndices(iMesh_Instance instance,
else
*adj_entity_handles_allocated = *adj_entity_handles_size;
}
- else if (*adj_entity_handles_allocated < *adj_entity_handles_size)
+ else if (*adj_entity_handles_allocated < *adj_entity_handles_size)
*err = iBase_BAD_ARRAY_DIMENSION;
if (iBase_SUCCESS != *err) {
free( unique_adj );
@@ -1041,7 +1039,7 @@ void iMeshP_getAdjEntIndices(iMesh_Instance instance,
// convert from adjacency list to indices into unique_adj
for (int i = 0; i < *adj_entity_indices_size; ++i)
- (*adj_entity_indices)[i] = std::lower_bound( unique_adj,
+ (*adj_entity_indices)[i] = std::lower_bound( unique_adj,
unique_adj + *adj_entity_handles_size, all_adj_handles[i] ) - unique_adj;
free( all_adj_handles );
}
@@ -1058,11 +1056,11 @@ void iMeshP_getEntities( iMesh_Instance instance,
int *err )
{
Range r;
- set_intersection_query( instance, part_handle, entity_set_handle,
+ set_intersection_query( instance, part_handle, entity_set_handle,
entity_type, entity_topology, r, err );
if (iBase_SUCCESS != *err)
return;
-
+
RANGE_TO_ITAPS_ARRAY( r, entity_handles );
RETURN(iBase_SUCCESS);
}
@@ -1092,7 +1090,7 @@ void iMeshP_getAdjEntities( iMesh_Instance instance,
r, err );
if (iBase_SUCCESS != *err)
return;
-
+
// count adjacencies
std::vector<EntityHandle> tmp_storage;
int num_adj = 0;
@@ -1111,14 +1109,14 @@ void iMeshP_getAdjEntities( iMesh_Instance instance,
num_adj += num_conn;
}
}
-
+
// get adjacencies
ALLOC_CHECK_ARRAY( adj_entity_handles, num_adj );
ALLOC_CHECK_ARRAY( offset, r.size() );
int arr_pos = 0;
int* offset_iter = *offset;
for (Range::iterator i = r.begin(); i != r.end(); ++i) {
- *offset_iter = arr_pos;
+ *offset_iter = arr_pos;
++offset_iter;
tmp_storage.clear();
@@ -1131,15 +1129,15 @@ void iMeshP_getAdjEntities( iMesh_Instance instance,
}
// get in_entity_set
- iMesh_isEntArrContained( instance,
- entity_set_handle,
+ iMesh_isEntArrContained( instance,
+ entity_set_handle,
*adj_entity_handles,
*adj_entity_handles_size,
in_entity_set,
in_entity_set_allocated,
in_entity_set_size,
err );
-
+
if (iBase_SUCCESS == *err) {
KEEP_ARRAY(adj_entity_handles);
KEEP_ARRAY(offset);
@@ -1155,9 +1153,9 @@ void iMeshP_initEntIter( iMesh_Instance instance,
iBase_EntityIterator* entity_iterator,
int *err )
{
- iMeshP_initEntArrIter( instance,
- partition_handle,
- part_handle,
+ iMeshP_initEntArrIter( instance,
+ partition_handle,
+ part_handle,
entity_set_handle,
requested_entity_type,
requested_entity_topology,
@@ -1177,13 +1175,13 @@ void iMeshP_initEntArrIter( iMesh_Instance instance,
int *err )
{
if (!entity_set_handle || entity_set_handle == part_handle) {
- iMesh_initEntArrIter( instance,
- part_handle,
- requested_entity_type,
+ iMesh_initEntArrIter( instance,
+ part_handle,
+ requested_entity_type,
requested_entity_topology,
requested_array_size,
0, // TODO: update this function for "resilient" arg
- entArr_iterator,
+ entArr_iterator,
err );
}
else {
@@ -1192,16 +1190,16 @@ void iMeshP_initEntArrIter( iMesh_Instance instance,
CHKERR(result,"Invalid entity set handle");
if (flags & MESHSET_ORDERED)
*entArr_iterator = new SetIntersectIter< std::vector<EntityHandle> >
- ( (iBase_EntityType)requested_entity_type,
- (iMesh_EntityTopology)requested_entity_topology,
- itaps_cast<EntityHandle>(entity_set_handle),
+ ( (iBase_EntityType)requested_entity_type,
+ (iMesh_EntityTopology)requested_entity_topology,
+ itaps_cast<EntityHandle>(entity_set_handle),
itaps_cast<EntityHandle>(part_handle),
requested_array_size );
else
*entArr_iterator = new SetIntersectIter< Range >
- ( (iBase_EntityType)requested_entity_type,
- (iMesh_EntityTopology)requested_entity_topology,
- itaps_cast<EntityHandle>(entity_set_handle),
+ ( (iBase_EntityType)requested_entity_type,
+ (iMesh_EntityTopology)requested_entity_topology,
+ itaps_cast<EntityHandle>(entity_set_handle),
itaps_cast<EntityHandle>(part_handle),
requested_array_size );
result = (*entArr_iterator)->reset( MOABI );
@@ -1209,7 +1207,7 @@ void iMeshP_initEntArrIter( iMesh_Instance instance,
delete *entArr_iterator;
CHKERR(result, "iMesh_initEntArrIter: ERROR getting entities of proper type or topology." );
RETURN(iBase_SUCCESS);
- }
+ }
}
void iMeshP_getEntOwnerPart( iMesh_Instance instance,
@@ -1217,7 +1215,7 @@ void iMeshP_getEntOwnerPart( iMesh_Instance instance,
const iBase_EntityHandle entity_handle,
iMeshP_Part *part_id,
int* err )
-{
+{
int junk1 = 1, junk2 = 1;
iMeshP_getEntOwnerPartArr( instance, partition_handle, &entity_handle, 1,
&part_id, &junk1, &junk2, err );
@@ -1233,9 +1231,9 @@ void iMeshP_getEntOwnerPartArr( iMesh_Instance instance,
int* err )
{
ParallelComm* pcomm = PCOMM;
- if (!pcomm)
+ if (!pcomm)
ERROR (iBase_FAILURE,"No PComm");
-
+
int id;
ALLOC_CHECK_ARRAY( part_ids, entity_handles_size );
ErrorCode rval = MB_SUCCESS;
@@ -1248,7 +1246,7 @@ void iMeshP_getEntOwnerPartArr( iMesh_Instance instance,
KEEP_ARRAY(part_ids);
RETURN(iBase_SUCCESS);
}
-
+
void iMeshP_isEntOwner( iMesh_Instance instance,
const iMeshP_PartitionHandle partition_handle,
const iMeshP_PartHandle part_handle,
@@ -1275,33 +1273,33 @@ void iMeshP_isEntOwnerArr( iMesh_Instance instance,
{
ErrorCode rval;
ParallelComm* pcomm = PCOMM;
- if (!pcomm)
+ if (!pcomm)
ERROR (iBase_FAILURE,"No PComm");
-
+
int id;
rval = pcomm->get_part_id( itaps_cast<EntityHandle>(part_handle), id );
CHKERR(rval,"error getting part id");
-
+
ALLOC_CHECK_ARRAY( is_owner, entity_handles_size );
*is_owner_size = entity_handles_size;
-
+
int owner;
for (int i = 0; i < entity_handles_size; ++i) {
rval = pcomm->get_owner( itaps_cast<EntityHandle>(entity_handles[i]), owner );
CHKERR(rval,"error getting owner");
(*is_owner)[i] = (owner == id);
}
-
+
KEEP_ARRAY(is_owner);
RETURN(iBase_SUCCESS);
}
void iMeshP_getEntStatus(iMesh_Instance instance,
/*in*/ const iMeshP_PartitionHandle partition_handle,
- /*in*/ const iMeshP_PartHandle part_handle,
- /*in*/ const iBase_EntityHandle entity_handle,
+ /*in*/ const iMeshP_PartHandle part_handle,
+ /*in*/ const iBase_EntityHandle entity_handle,
/*out*/ int* par_status, // Values=INTERNAL,BOUNDARY,GHOST
- int *err)
+ int *err)
{
int junk1 = 1, junk2 = 1;
iMeshP_getEntStatusArr( instance, partition_handle,
@@ -1312,30 +1310,30 @@ void iMeshP_getEntStatus(iMesh_Instance instance,
void iMeshP_getEntStatusArr(iMesh_Instance instance,
/*in*/ const iMeshP_PartitionHandle partition_handle,
- /*in*/ const iMeshP_PartHandle /*part_handle*/,
- /*in*/ const iBase_EntityHandle *entity_handles,
- /*in*/ const int entity_handles_size,
+ /*in*/ const iMeshP_PartHandle /*part_handle*/,
+ /*in*/ const iBase_EntityHandle *entity_handles,
+ /*in*/ const int entity_handles_size,
/*inout*/ int** par_status, // Values=INTERNAL,BOUNDARY,GHOST
- /*inout*/ int* par_status_allocated,
- /*inout*/ int* par_status_size,
- int *err)
+ /*inout*/ int* par_status_allocated,
+ /*inout*/ int* par_status_size,
+ int *err)
{
ParallelComm* pcomm = PCOMM;
- if (!pcomm)
+ if (!pcomm)
ERROR (iBase_FAILURE,"No PComm");
std::vector<unsigned char> pstatus(entity_handles_size);
- ErrorCode result = MOABI->tag_get_data(pcomm->pstatus_tag(),
- itaps_cast<const EntityHandle*>(entity_handles),
+ ErrorCode result = MOABI->tag_get_data(pcomm->pstatus_tag(),
+ itaps_cast<const EntityHandle*>(entity_handles),
entity_handles_size,
- &pstatus[0]);
+ &pstatus[0]);
CHKERR(result,"error getting pstatus_tag");
ALLOC_CHECK_ARRAY( par_status, entity_handles_size );
for (int i = 0; i < entity_handles_size; i++) {
- if (!pstatus[i])
+ if (!pstatus[i])
(*par_status)[i] = iMeshP_INTERNAL;
- else if (pstatus[i] & PSTATUS_GHOST)
+ else if (pstatus[i] & PSTATUS_GHOST)
(*par_status)[i] = iMeshP_GHOST;
else if (pstatus[i] & PSTATUS_INTERFACE)
(*par_status)[i] = iMeshP_BOUNDARY;
@@ -1352,11 +1350,11 @@ void iMeshP_getNumCopies( iMesh_Instance instance,
int *err )
{
ParallelComm* pcomm = PCOMM;
- if (!pcomm)
+ if (!pcomm)
ERROR (iBase_FAILURE,"No PComm");
int ids[MAX_SHARING_PROCS];
- ErrorCode rval = pcomm->get_sharing_parts(
+ ErrorCode rval = pcomm->get_sharing_parts(
itaps_cast<EntityHandle>(entity_handle),
ids, *num_copies_ent );
CHKERR(rval,"ParallelComm::get_sharing_parts failed");
@@ -1372,13 +1370,13 @@ void iMeshP_getCopyParts( iMesh_Instance instance,
int *err )
{
ParallelComm* pcomm = PCOMM;
- if (!pcomm)
+ if (!pcomm)
ERROR (iBase_FAILURE,"No PComm");
int ids[MAX_SHARING_PROCS], num_ids;
- ErrorCode rval = pcomm->get_sharing_parts(
+ ErrorCode rval = pcomm->get_sharing_parts(
itaps_cast<EntityHandle>(entity_handle),
- ids, num_ids );
+ ids, num_ids );
CHKERR(rval,"ParallelComm::get_sharing_parts failed");
ALLOC_CHECK_ARRAY_NOFAIL( part_ids, num_ids );
std::copy( ids, ids+num_ids, *part_ids );
@@ -1388,25 +1386,25 @@ void iMeshP_getCopyParts( iMesh_Instance instance,
void iMeshP_getCopies( iMesh_Instance instance,
- const iMeshP_PartitionHandle partition_handle,
- const iBase_EntityHandle entity_handle,
- iMeshP_Part **part_ids,
- int *part_ids_allocated,
- int *part_ids_size,
- iBase_EntityHandle **copies_entity_handles,
- int *copies_entity_handles_allocated,
- int *copies_entity_handles_size,
+ const iMeshP_PartitionHandle partition_handle,
+ const iBase_EntityHandle entity_handle,
+ iMeshP_Part **part_ids,
+ int *part_ids_allocated,
+ int *part_ids_size,
+ iBase_EntityHandle **copies_entity_handles,
+ int *copies_entity_handles_allocated,
+ int *copies_entity_handles_size,
int *err )
{
ParallelComm* pcomm = PCOMM;
- if (!pcomm)
+ if (!pcomm)
ERROR (iBase_FAILURE,"No PComm");
int ids[MAX_SHARING_PROCS], num_ids;
EntityHandle handles[MAX_SHARING_PROCS];
- ErrorCode rval = pcomm->get_sharing_parts(
+ ErrorCode rval = pcomm->get_sharing_parts(
itaps_cast<EntityHandle>(entity_handle),
- ids, num_ids, handles );
+ ids, num_ids, handles );
CHKERR(rval,"ParallelComm::get_sharing_parts failed");
ALLOC_CHECK_ARRAY_NOFAIL( part_ids, num_ids );
ALLOC_CHECK_ARRAY_NOFAIL( copies_entity_handles, num_ids );
@@ -1425,19 +1423,19 @@ void iMeshP_getCopyOnPart( iMesh_Instance instance,
int *err )
{
ParallelComm* pcomm = PCOMM;
- if (!pcomm)
+ if (!pcomm)
ERROR (iBase_FAILURE,"No PComm");
int ids[MAX_SHARING_PROCS], num_ids;
EntityHandle handles[MAX_SHARING_PROCS];
- ErrorCode rval = pcomm->get_sharing_parts(
+ ErrorCode rval = pcomm->get_sharing_parts(
itaps_cast<EntityHandle>(entity_handle),
- ids, num_ids, handles );
+ ids, num_ids, handles );
CHKERR(rval,"ParallelComm::get_sharing_parts failed");
int idx = std::find( ids, ids+num_ids, part_id ) - ids;
if (idx == num_ids)
RETURN (iBase_FAILURE);
-
+
*copy_entity_handle = itaps_cast<iBase_EntityHandle>(handles[idx]);
RETURN (iBase_SUCCESS);
}
@@ -1449,16 +1447,16 @@ void iMeshP_getOwnerCopy( iMesh_Instance instance,
iMeshP_Part *owner_part_id,
iBase_EntityHandle *owner_entity_handle,
int *err )
-{
+{
ParallelComm* pcomm = PCOMM;
- if (!pcomm)
+ if (!pcomm)
ERROR (iBase_FAILURE,"No PComm");
int id;
EntityHandle h;
- ErrorCode rval = pcomm->get_owning_part(
+ ErrorCode rval = pcomm->get_owning_part(
itaps_cast<EntityHandle>(entity_handle),
- id, &h );
+ id, &h );
CHKERR(rval,"Failed to get owner");
*owner_part_id = id;
*owner_entity_handle = itaps_cast<iBase_EntityHandle>(h);
@@ -1538,7 +1536,7 @@ void iMeshP_exchEntArrToPartsAll( iMesh_Instance instance,
rval = pcomm->get_part_owner(target_part_ids[i], target_p);
CHKERR(rval,"ParallelComm::get_part_owner failed");
- std::vector<unsigned int>::iterator vit =
+ std::vector<unsigned int>::iterator vit =
std::find(exchange_procs.begin(), exchange_procs.end(), target_p);
if (vit == exchange_procs.end()) {
ind = exchange_procs.size();
@@ -1546,14 +1544,14 @@ void iMeshP_exchEntArrToPartsAll( iMesh_Instance instance,
exchange_ents.push_back(new Range);
}
else ind = vit - exchange_procs.begin();
-
+
exchange_ents[ind]->insert(itaps_cast<EntityHandle>(entity_handles[i]));
}
-
+
std::vector<MPI_Request> recv_ent_reqs, recv_remoteh_reqs;
rval = pcomm->exchange_owned_meshs(exchange_procs, exchange_ents,
recv_ent_reqs, recv_remoteh_reqs,
- true);
+ true);
CHKERR(rval,"ParallelComm::exchange_owned_meshs failed");
// delete exchange list
@@ -1610,13 +1608,13 @@ void iMeshP_syncMeshAll( iMesh_Instance instance,
{
FIXME; // for now we only sync vertex coordinates
// need to update ParallelComm::update_shared_mesh to fix this
-
+
ParallelComm* pcomm = PCOMM;
ErrorCode rval = pcomm->update_shared_mesh();
CHKERR(rval,"update failed");
RETURN (iBase_SUCCESS);
}
-
+
void iMeshP_pushTags( iMesh_Instance instance,
const iMeshP_PartitionHandle partition_handle,
iBase_TagHandle source_tag,
@@ -1631,23 +1629,23 @@ void iMeshP_pushTags( iMesh_Instance instance,
types.first = types.second = mb_topology_table[entity_topo];
else if (entity_type != iBase_ALL_TYPES)
types = CN::TypeDimensionMap[entity_type];
- else {
- types.first = MBVERTEX;
- types.second = MBENTITYSET;
- --types.second;
+ else {
+ types.first = MBVERTEX;
+ types.second = MBENTITYSET;
+ --types.second;
}
-
+
std::vector<Tag> src_tags(1, itaps_cast<Tag>(source_tag));
std::vector<Tag> dst_tags(1, itaps_cast<Tag>(dest_tag));
-
+
ErrorCode rval;
Range entities;
for (EntityType t = types.first; t <= types.second; ++t) {
- rval = MOABI->get_entities_by_type_and_tag( 0, t, &src_tags[0], 0, 1,
+ rval = MOABI->get_entities_by_type_and_tag( 0, t, &src_tags[0], 0, 1,
entities, Interface::UNION );
CHKERR(rval,"error getting entities to push");
}
-
+
rval = pcomm->exchange_tags( src_tags, dst_tags, entities );
CHKERR(rval,"tag data communication failed");
RETURN (iBase_SUCCESS);
@@ -1665,7 +1663,7 @@ void iMeshP_pushTagsEnt( iMesh_Instance instance,
Range range;
const EntityHandle* ents = itaps_cast<const EntityHandle*>(entities);
std::copy( ents, ents+entities_size, range_inserter(range) );
-
+
std::vector<Tag> src_tags(1, itaps_cast<Tag>(source_tag));
std::vector<Tag> dst_tags(1, itaps_cast<Tag>(dest_tag));
ParallelComm* pcomm = PCOMM;
@@ -1705,7 +1703,7 @@ void iMeshP_createGhostEntsAll( iMesh_Instance instance,
if (include_copies) {
FIXME; RETURN(iBase_NOT_SUPPORTED);
}
-
+
ParallelComm* pcomm = PCOMM;
ErrorCode rval;
if (iBase_ALL_TYPES == ghost_dim) ghost_dim = -1;
@@ -1743,7 +1741,7 @@ static bool append_option( std::string& opt,
// options can't have a separator in them; XXX work around this
return iBase_INVALID_ARGUMENT;
}
-
+
// search for the required option
std::string search(&sep,1);
search += option;
@@ -1755,10 +1753,10 @@ static bool append_option( std::string& opt,
break;
i = end;
}
-
+
// if string already contained the option, just return
if (i != std::string::npos) return false;
-
+
opt += search;
if (default_value) {
opt += "=";
@@ -1788,7 +1786,7 @@ void iMeshP_loadAll( iMesh_Instance instance,
rval = MOABI->create_meshset( MESHSET_SET, partitioning );
CHKERR(rval,"failed to create meshset");
}
-
+
// get ParallelComm for partition
MPI_Comm default_comm = MPI_COMM_WORLD;
ParallelComm* pcomm = ParallelComm::get_pcomm( MOABI, partitioning, &default_comm );
@@ -1807,11 +1805,11 @@ void iMeshP_loadAll( iMesh_Instance instance,
id << pcomm->get_id();
append_option( opt, "moab:PCOMM", id.str().c_str() );
}
-
+
// load the file
iMesh_load( instance, entity_set_handle, name, opt.c_str(), err, name_len, opt.length() );
if (*err) return;
-
+
rval = pcomm->collective_sync_partition();
CHKERR(rval,"collective sync failed");
RETURN(iBase_SUCCESS);
@@ -1829,7 +1827,7 @@ void iMeshP_saveAll( iMesh_Instance instance,
EntityHandle set;
set = entity_set_handle ? itaps_cast<EntityHandle>(entity_set_handle)
: itaps_cast<EntityHandle>(partition);
- iMesh_save( instance, itaps_cast<iBase_EntitySetHandle>(set),
+ iMesh_save( instance, itaps_cast<iBase_EntitySetHandle>(set),
name, options, err, name_len, options_len );
}
@@ -1837,42 +1835,42 @@ void iMeshP_saveAll( iMesh_Instance instance,
-// Map from processes to parts:
+// Map from processes to parts:
// Given a partition handle and a process rank,
// return the part handles owned by the process.
// COMMUNICATION: None++.
void iMeshP_getPartsOnRank(iMesh_Instance instance,
const iMeshP_PartitionHandle partition_handle,
/*in*/ const int /*rank*/,
- /*inout*/ iMeshP_PartHandle **part_handles,
- /*inout*/ int *part_handles_allocated,
- /*out*/ int *part_handles_size,
- int *err)
+ /*inout*/ iMeshP_PartHandle **part_handles,
+ /*inout*/ int *part_handles_allocated,
+ /*out*/ int *part_handles_size,
+ int *err)
{
EntityHandle p = itaps_cast<EntityHandle>(partition_handle);
ParallelComm *pc = ParallelComm::get_pcomm(MOABI, p);
if (!pc) RETURN(iBase_ERROR_MAP[MB_FAILURE]);
Range part_sets;
-
+
ALLOC_CHECK_ARRAY_NOFAIL( part_handles, pc->partition_sets().size() );
Range::iterator rit;
int i;
- for (i = 0, rit = pc->partition_sets().begin();
+ for (i = 0, rit = pc->partition_sets().begin();
rit != pc->partition_sets().end(); rit++, i++)
(*part_handles)[i] = itaps_cast<iMeshP_PartHandle>(*rit);
-
+
RETURN(iBase_SUCCESS);
}
-
+
void iMeshP_getPartsArrOnRank(iMesh_Instance instance,
const iMeshP_PartitionHandle partition_handle,
/*in*/ const int *rank,
/*in*/ const int rank_size,
- /*inout*/ iMeshP_PartHandle **part_handles,
- /*inout*/ int *part_handles_allocated,
- /*out*/ int *part_handles_size,
- int *err)
+ /*inout*/ iMeshP_PartHandle **part_handles,
+ /*inout*/ int *part_handles_allocated,
+ /*out*/ int *part_handles_size,
+ int *err)
{
EntityHandle p = itaps_cast<EntityHandle>(partition_handle);
ParallelComm *pc = ParallelComm::get_pcomm(MOABI, p);
@@ -1881,7 +1879,7 @@ void iMeshP_saveAll( iMesh_Instance instance,
if (rank[0] != (int)pc->proc_config().proc_rank() || rank_size > 1) {
RETURN(iBase_ERROR_MAP[MB_NOT_IMPLEMENTED]);
}
-
+
iMeshP_getPartsOnRank(instance, partition_handle, rank[0],
part_handles, part_handles_allocated, part_handles_size,
err);
@@ -1901,7 +1899,7 @@ void iMeshP_assignGlobalIds(
const int largest_dim_only,
const int parallel,
const int owned_only,
- int *err)
+ int *err)
{
ErrorCode rval;
@@ -1911,7 +1909,7 @@ void iMeshP_assignGlobalIds(
rval = MB_FAILURE;
CHKERR(rval,"failed to get partition set");
}
-
+
EntityHandle this_mb_set = itaps_cast<EntityHandle>(this_set);
// get ParallelComm for partition
@@ -1922,15 +1920,15 @@ void iMeshP_assignGlobalIds(
}
rval = pcomm->assign_global_ids(this_mb_set, dimension, start_id, largest_dim_only, parallel, owned_only);
-
+
RETURN(rval);
}
-
+
void iMeshP_getCommunicator(
iMesh_Instance instance,
int *fcomm,
MPI_Comm *ccomm,
- int *err)
+ int *err)
{
*ccomm = MPI_Comm_f2c(*fcomm);
RETURN(iBase_SUCCESS);
https://bitbucket.org/fathomteam/moab/commits/3b58c039facd/
Changeset: 3b58c039facd
Branch: None
User: vijaysm
Date: 2013-07-16 21:06:19
Summary: Removing a redundant Internals.hpp include
Affected #: 1 file
diff --git a/test/range_test.cpp b/test/range_test.cpp
index 3d2547e..42e6fa7 100644
--- a/test/range_test.cpp
+++ b/test/range_test.cpp
@@ -1,5 +1,4 @@
#include "moab/Range.hpp"
-#include "Internals.hpp"
#include "TestUtil.hpp"
using namespace moab;
https://bitbucket.org/fathomteam/moab/commits/e7ac38e67d02/
Changeset: e7ac38e67d02
Branch: None
User: vijaysm
Date: 2013-07-16 21:06:19
Summary: Adding all the default tools to MOAB CMakeLists.
Affected #: 2 files
diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt
index 56e115c..cb2439d 100644
--- a/tools/CMakeLists.txt
+++ b/tools/CMakeLists.txt
@@ -7,8 +7,12 @@
option ( MOAB_BUILD_MBPERF "Build the MOAB performance tool?" ON )
option ( MOAB_BUILD_QVDUAL "Build the qvdual application?" OFF )
option ( MOAB_BUILD_MBSIZE "Build the mbsize tool?" ON )
+ option ( MOAB_BUILD_MBMEM "Build the mbmem tool?" ON )
option ( MOAB_BUILD_MBSKIN "Build the mbskin tool?" ON )
+ option ( MOAB_BUILD_MBDEPTH "Build the mbdepth tool?" ON )
option ( MOAB_BUILD_MBTAGPROP "Build the mbtagprop tool?" ON )
+ option ( MOAB_BUILD_MBGSETS "Build the mbgsets tool?" ON )
+ option ( MOAB_BUILD_MBCONVERT "Build the mbconvert tool?" ON )
option ( MOAB_BUILD_SPHEREDECOMP "Build the sphere decomposition tool?" ON )
option ( MOAB_BUILD_MBSURFPLOT "Build the mbsurfplot application?" ON )
option ( MOAB_BUILD_MBZOLTAN "Build the mbzoltan partitioner?" OFF )
@@ -21,8 +25,82 @@
# target_link_libraries( MOAB MOABrefiner )
endif ( MOAB_USE_MPI AND MPI_FOUND )
+ include_directories(
+ ${CMAKE_CURRENT_SOURCE_DIR}
+ ${MOAB_SOURCE_DIR}/src
+ ${MOAB_SOURCE_DIR}/src/parallel
+ )
+
+# target_link_libraries( mbcoupler MOAB iMesh )
+# if ( MOAB_USE_MPI )
+# target_link_libraries( mbcoupler MOABpar )
+# endif ( MOAB_USE_MPI )
+ # MBSIZE
+ if ( MOAB_BUILD_MBSIZE )
+ add_executable( mbsize size.cpp measure.cpp)
+ set_source_files_properties( size.cpp measure.cpp COMPILE_FLAGS "${MOAB_DEFINES}" )
+ target_link_libraries( mbsize MOAB )
+ endif ( MOAB_BUILD_MBSIZE )
+
+ # MBMEM
+ if ( MOAB_BUILD_MBMEM )
+ add_executable( mbmem mbmem.cpp)
+ set_source_files_properties( mbmem.cpp COMPILE_FLAGS "${MOAB_DEFINES}" )
+ target_link_libraries( mbmem MOAB )
+ endif ( MOAB_BUILD_MBMEM )
+
+ # MBCONVERT
+ if ( MOAB_BUILD_MBCONVERT )
+ add_executable( mbconvert convert.cpp)
+ set_source_files_properties( convert.cpp COMPILE_FLAGS "${MOAB_DEFINES}" )
+ target_link_libraries( mbconvert MOAB )
+ endif ( MOAB_BUILD_MBCONVERT )
+
+ # MBMEM
+ if ( MOAB_BUILD_MBDEPTH )
+ add_executable( mbdepth depth.cpp)
+ set_source_files_properties( depth.cpp COMPILE_FLAGS "${MOAB_DEFINES}" )
+ target_link_libraries( mbdepth MOAB )
+ endif ( MOAB_BUILD_MBDEPTH )
+
+ # MBSKIN
+ if ( MOAB_BUILD_MBSKIN )
+ add_executable( mbskin skin.cpp)
+ set_source_files_properties( skin.cpp COMPILE_FLAGS "${MOAB_DEFINES}" )
+ target_link_libraries( mbskin MOAB )
+ endif ( MOAB_BUILD_MBSKIN )
+
+ # MBSURFPLOT
+ if ( MOAB_BUILD_MBSURFPLOT )
+ add_executable( mbsurfplot surfplot.cpp )
+ set_source_files_properties( surfplot.cpp COMPILE_FLAGS "${MOAB_DEFINES}" )
+ target_link_libraries( mbsurfplot MOAB )
+ endif ( MOAB_BUILD_MBSURFPLOT )
+
+ # MBTAGPROP
+ if ( MOAB_BUILD_MBTAGPROP )
+ add_executable( mbtagprop parse.cpp propagate_tags.cpp )
+ set_source_files_properties( parse.cpp propagate_tags.cpp COMPILE_FLAGS "${MOAB_DEFINES}" )
+ target_link_libraries( mbtagprop MOAB )
+ endif ( MOAB_BUILD_MBTAGPROP )
+
+ # MBGSETS
+ if ( MOAB_BUILD_MBGSETS )
+ add_executable( mbgsets gsets.cc )
+ set_source_files_properties( gsets.cc COMPILE_FLAGS "${MOAB_DEFINES}" )
+ target_link_libraries( mbgsets MOAB )
+ endif ( MOAB_BUILD_MBGSETS )
+
+ # CGM
+ if ( MOAB_BUILD_DAGMC )
+ add_subdirectory( dagmc )
+ endif ( MOAB_BUILD_DAGMC )
# Zoltan
+ if ( MOAB_BUILD_MBZOLTAN )
+ add_subdirectory( mbzoltan )
+ endif ( MOAB_BUILD_MBZOLTAN )
+
# Chaco
# QVDual
diff --git a/tools/mbcoupler/CMakeLists.txt b/tools/mbcoupler/CMakeLists.txt
index 9c4279d..f7a440b 100644
--- a/tools/mbcoupler/CMakeLists.txt
+++ b/tools/mbcoupler/CMakeLists.txt
@@ -1,10 +1,10 @@
include_directories(
+ ${CMAKE_CURRENT_SOURCE_DIR}
${MOAB_SOURCE_DIR}/src
${MOAB_SOURCE_DIR}/src/parallel
${MOAB_SOURCE_DIR}/src/moab/point_locater/lotte
${MOAB_SOURCE_DIR}/itaps
${MOAB_SOURCE_DIR}/itaps/imesh
- ${MOAB_SOURCE_DIR}/tools/mbcoupler
)
set( MBCOUPLER_SRCS
https://bitbucket.org/fathomteam/moab/commits/50de785d4e1d/
Changeset: 50de785d4e1d
Branch: None
User: vijaysm
Date: 2013-07-16 21:06:19
Summary: Adding more tests - Need to look at Makefile.am to see how to run each test with default params.
Affected #: 1 file
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index e6c2d94..2cbea6c 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -1,6 +1,7 @@
add_subdirectory(io)
include_directories(
+ ${CMAKE_CURRENT_SOURCE_DIR}
${MOAB_SOURCE_DIR}/src
${MOAB_SOURCE_DIR}/src/parallel
)
@@ -40,6 +41,17 @@
target_link_libraries( bsp_tree_test MOAB )
add_test( TestBSPTree ${EXECUTABLE_OUTPUT_PATH}/bsp_tree_test )
+ add_executable( geomutiltests GeomUtilTests.cpp )
+ set_source_files_properties( GeomUtilTests.cpp
+ COMPILE_FLAGS "-DTEST ${MOAB_DEFINES}" )
+ target_link_libraries( geomutiltests MOAB )
+ add_test( TestGeomUtil ${EXECUTABLE_OUTPUT_PATH}/geomutiltests )
+
+ add_executable( range_test range_test.cpp )
+ set_source_files_properties( range_test.cpp
+ COMPILE_FLAGS "-DTEST ${MOAB_DEFINES}" )
+ target_link_libraries( range_test MOAB )
+ add_test( TestRange ${EXECUTABLE_OUTPUT_PATH}/range_test )
if ( MOAB_USE_MPI AND MPI_FOUND )
@@ -77,6 +89,14 @@ if ( MOAB_USE_MPI AND MPI_FOUND )
${MPIEXEC} ${MPIEXEC_NUMPROC_FLAG} 2 ${MPIEXEC_PREFLAGS}
${EXECUTABLE_OUTPUT_PATH}/parallel_unit_tests ${MPIEXEC_POSTFLAGS} ${MOAB_SOURCE_DIR}/parallel/ptest.cub )
set_source_files_properties( parallel/parallel_unit_tests.cpp
- COMPILE_FLAGS "-DIS_BUILDING_MB ${MOAB_DEFINES}" )
+ COMPILE_FLAGS "-DTEST ${MOAB_DEFINES}" )
+
+ add_executable ( pcomm_serial parallel/pcomm_serial.cpp )
+ target_link_libraries( pcomm_serial MOAB )
+ add_test( TestPCommSerial
+ ${MPIEXEC} ${MPIEXEC_NUMPROC_FLAG} 1 ${MPIEXEC_PREFLAGS}
+ ${EXECUTABLE_OUTPUT_PATH}/pcomm_serial ${MPIEXEC_POSTFLAGS} ${MOAB_SOURCE_DIR}/parallel/ptest.cub )
+ set_source_files_properties( parallel/pcomm_serial.cpp
+ COMPILE_FLAGS "-DTEST ${MOAB_DEFINES}" )
endif ( MOAB_USE_MPI AND MPI_FOUND )
https://bitbucket.org/fathomteam/moab/commits/120c3a37f327/
Changeset: 120c3a37f327
Branch: None
User: vijaysm
Date: 2013-07-16 21:06:20
Summary: More fixes for src, tools and test after further testing.
Affected #: 7 files
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 007d501..6b18435 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,3 +1,9 @@
+# check if we are using MPI - reset compilers accordingly
+if ( MOAB_USE_MPI )
+ SET(CMAKE_CXX_COMPILER ${MPI_CXX_COMPILER})
+ SET(CMAKE_C_COMPILER ${MPI_C_COMPILER})
+endif ( MOAB_USE_MPI )
+
project( MOAB )
cmake_minimum_required( VERSION 2.4 )
diff --git a/itaps/imesh/CMakeLists.txt b/itaps/imesh/CMakeLists.txt
index 5bba3f0..e98c0b1 100644
--- a/itaps/imesh/CMakeLists.txt
+++ b/itaps/imesh/CMakeLists.txt
@@ -35,11 +35,21 @@ set ( MOAB_IMESH_SRCS
${CMAKE_CURRENT_BINARY_DIR}/iMeshP_protos.h )
include_directories(
+ ${MOAB_BINARY_DIR}
+ ${MOAB_BINARY_DIR}/src
+ ${MOAB_BINARY_DIR}/src/parallel
${MOAB_SOURCE_DIR}/src
${MOAB_SOURCE_DIR}/itaps
${MOAB_SOURCE_DIR}/itaps/imesh
)
+if ( MOAB_USE_HDF5 AND HDF5_FOUND )
+ include_directories(
+ ${HDF5_INCLUDE_DIRECTORIES}
+ ${MOAB_SOURCE_DIR}/src/io/mhdf/include
+ )
+endif ( MOAB_USE_HDF5 AND HDF5_FOUND )
+
if ( MOAB_USE_MPI AND MPI_FOUND )
LIST ( APPEND MOAB_IMESH_SRCS
iMeshP_MOAB.cpp
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index c5467e6..5733795 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -94,9 +94,12 @@
ReaderWriterSet.cpp
)
include_directories(
+ ${MOAB_BINARY_DIR}
${MOAB_SOURCE_DIR}/src
+ ${MOAB_BINARY_DIR}/src
${MOAB_SOURCE_DIR}/src/io
${MOAB_SOURCE_DIR}/src/parallel
+ ${MOAB_BINARY_DIR}/src/parallel
${MOAB_SOURCE_DIR}/src/moab/point_locater/lotte
)
@@ -112,6 +115,7 @@
io/NCHelperEuler.cpp
io/NCHelperFV.cpp
io/NCHelperHOMME.cpp
+ io/NCHelperMPAS.cpp
SpectralMeshTool.cpp
)
include_directories(
@@ -119,6 +123,7 @@
)
endif ( NetCDF_FOUND )
+ MESSAGE("Adding HDF5 includes: ${HDF5_FOUND}")
if ( HDF5_FOUND )
set ( MOAB_DEFINES "${MOAB_DEFINES} -DHDF5_FILE" )
check_function_exists( H5Pset_fapl_mpio MOAB_HDF_HAVE_PARALLEL )
@@ -131,6 +136,7 @@
io/WriteHDF5.cpp
)
+ MESSAGE("Adding HDF5 includes: ${HDF5_INCLUDE_DIRECTORIES}")
include_directories(
${HDF5_INCLUDE_DIRECTORIES}
io/mhdf/include
@@ -173,4 +179,6 @@
autoconf_header( ${MOAB_SOURCE_DIR}/src/moab/Version.h.in ${MOAB_BINARY_DIR}/src/moab/Version.h )
autoconf_header( ${MOAB_SOURCE_DIR}/src/moab/EntityHandle.hpp.in ${MOAB_BINARY_DIR}/src/moab/EntityHandle.hpp )
+ autoconf_header( ${MOAB_SOURCE_DIR}/src/parallel/moab_mpi_config.h.in ${MOAB_BINARY_DIR}/src/parallel/moab_mpi_config.h )
+ autoconf_header( ${MOAB_SOURCE_DIR}/src/FCDefs.h.in ${MOAB_BINARY_DIR}/MOAB_FCDefs.h )
file( WRITE ${MOAB_BINARY_DIR}/src/MBCN_protos.h "" )
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index 2cbea6c..4689130 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -1,8 +1,11 @@
add_subdirectory(io)
include_directories(
+ ${MOAB_BINARY_DIR}
+ ${MOAB_BINARY_DIR}/src
${CMAKE_CURRENT_SOURCE_DIR}
${MOAB_SOURCE_DIR}/src
+ ${MOAB_BINARY_DIR}/src/parallel
${MOAB_SOURCE_DIR}/src/parallel
)
@@ -73,14 +76,15 @@ if ( MOAB_USE_MPI AND MPI_FOUND )
${EXECUTABLE_OUTPUT_PATH}/mbparallelcomm_test ${MPIEXEC_POSTFLAGS} -3 ${MOAB_SOURCE_DIR}/parallel/ptest.cub )
if ( MOAB_USE_HDF )
- add_executable( mhdf_parallel parallel/mhdf_parallel.c )
include_directories(
+ ${HDF5_INCLUDE_DIR}
${MOAB_SOURCE_DIR}/src/io/mhdf/include
)
+ add_executable( mhdf_parallel parallel/mhdf_parallel.c )
target_link_libraries( mhdf_parallel MOAB MOABpar mhdf )
add_test( TestMHDFParallel ${EXECUTABLE_OUTPUT_PATH}/mhdf_parallel )
set_source_files_properties( parallel/mhdf_parallel.c
- COMPILE_FLAGS "-DIS_BUILDING_MB ${MOAB_DEFINES}" )
+ COMPILE_FLAGS "-DTEST ${MOAB_DEFINES}" )
endif ( MOAB_USE_HDF )
add_executable ( parallel_unit_tests parallel/parallel_unit_tests.cpp )
diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt
index cb2439d..fd85330 100644
--- a/tools/CMakeLists.txt
+++ b/tools/CMakeLists.txt
@@ -21,20 +21,17 @@
if ( MOAB_USE_MPI AND MPI_FOUND )
add_subdirectory( refiner )
-# include_directories( ${MOAB_SOURCE_DIR}/refiner )
-# target_link_libraries( MOAB MOABrefiner )
endif ( MOAB_USE_MPI AND MPI_FOUND )
include_directories(
- ${CMAKE_CURRENT_SOURCE_DIR}
+ ${MOAB_BINARY_DIR}
+ ${MOAB_BINARY_DIR}/src
+ ${MOAB_BINARY_DIR}/src/parallel
${MOAB_SOURCE_DIR}/src
${MOAB_SOURCE_DIR}/src/parallel
+ ${CMAKE_CURRENT_SOURCE_DIR}
)
-# target_link_libraries( mbcoupler MOAB iMesh )
-# if ( MOAB_USE_MPI )
-# target_link_libraries( mbcoupler MOABpar )
-# endif ( MOAB_USE_MPI )
# MBSIZE
if ( MOAB_BUILD_MBSIZE )
add_executable( mbsize size.cpp measure.cpp)
diff --git a/tools/mbcoupler/CMakeLists.txt b/tools/mbcoupler/CMakeLists.txt
index f7a440b..da316c8 100644
--- a/tools/mbcoupler/CMakeLists.txt
+++ b/tools/mbcoupler/CMakeLists.txt
@@ -5,6 +5,9 @@ include_directories(
${MOAB_SOURCE_DIR}/src/moab/point_locater/lotte
${MOAB_SOURCE_DIR}/itaps
${MOAB_SOURCE_DIR}/itaps/imesh
+ ${MOAB_BINARY_DIR}
+ ${MOAB_BINARY_DIR}/src
+ ${MOAB_BINARY_DIR}/src/parallel
)
set( MBCOUPLER_SRCS
diff --git a/tools/refiner/CMakeLists.txt b/tools/refiner/CMakeLists.txt
index 230b34c..459e6f6 100644
--- a/tools/refiner/CMakeLists.txt
+++ b/tools/refiner/CMakeLists.txt
@@ -12,8 +12,12 @@ set ( MOAB_REFINER_SRCS
)
include_directories(
+ ${MOAB_BINARY_DIR}
+ ${MOAB_BINARY_DIR}/src
+ ${MOAB_BINARY_DIR}/src/parallel
${MOAB_SOURCE_DIR}/src
${MOAB_SOURCE_DIR}/src/parallel
+ ${CMAKE_CURRENT_SOURCE_DIR}
)
set_source_files_properties(
https://bitbucket.org/fathomteam/moab/commits/eb2ec27c4193/
Changeset: eb2ec27c4193
Branch: None
User: vijaysm
Date: 2013-07-16 21:09:28
Summary: Correct the HDF5_INCLUDE_DIR instead of HDF5_INCLUDE_DIRECTORIES.
Affected #: 2 files
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 5733795..280b203 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -123,7 +123,6 @@
)
endif ( NetCDF_FOUND )
- MESSAGE("Adding HDF5 includes: ${HDF5_FOUND}")
if ( HDF5_FOUND )
set ( MOAB_DEFINES "${MOAB_DEFINES} -DHDF5_FILE" )
check_function_exists( H5Pset_fapl_mpio MOAB_HDF_HAVE_PARALLEL )
@@ -136,9 +135,8 @@
io/WriteHDF5.cpp
)
- MESSAGE("Adding HDF5 includes: ${HDF5_INCLUDE_DIRECTORIES}")
include_directories(
- ${HDF5_INCLUDE_DIRECTORIES}
+ ${HDF5_INCLUDE_DIR}
io/mhdf/include
)
add_subdirectory( io/mhdf )
diff --git a/src/io/mhdf/CMakeLists.txt b/src/io/mhdf/CMakeLists.txt
index dadd650..5aeee9b 100644
--- a/src/io/mhdf/CMakeLists.txt
+++ b/src/io/mhdf/CMakeLists.txt
@@ -1,7 +1,7 @@
project(mhdf)
include_directories(
- ${HDF5_INCLUDE_DIRECTORIES}
+ ${HDF5_INCLUDE_DIR}
)
set ( mhdf_LIB_SRCS
https://bitbucket.org/fathomteam/moab/commits/8e84acd21a22/
Changeset: 8e84acd21a22
Branch: None
User: vijaysm
Date: 2013-07-19 05:38:06
Summary: Merge remote-tracking branch 'origin/master'
Affected #: 0 files
https://bitbucket.org/fathomteam/moab/commits/f88e972dafb9/
Changeset: f88e972dafb9
Branch: None
User: vijaysm
Date: 2013-07-19 05:50:35
Summary: Merge branch 'upmaster'
Affected #: 5 files
diff --git a/doc/DG/figure1.jpg b/doc/DG/figure1.jpg
new file mode 100644
index 0000000..e102a97
Binary files /dev/null and b/doc/DG/figure1.jpg differ
diff --git a/doc/DG/figure2.jpg b/doc/DG/figure2.jpg
new file mode 100644
index 0000000..7e8b199
Binary files /dev/null and b/doc/DG/figure2.jpg differ
diff --git a/doc/DG/figure3.jpg b/doc/DG/figure3.jpg
new file mode 100644
index 0000000..8810bfc
Binary files /dev/null and b/doc/DG/figure3.jpg differ
diff --git a/doc/DG/moabDG.h b/doc/DG/moabDG.h
index 1724436..51281b1 100644
--- a/doc/DG/moabDG.h
+++ b/doc/DG/moabDG.h
@@ -1,20 +1,20 @@
-/*! \page developerguide Developer's Guide (MOAB 4.6)
-
- \subpage dg-contents
-
- \subpage dg-figures
-
+/*! \page developerguide Developer's Guide (MOAB 4.6)
+
+ \subpage dg-contents
+
+ \subpage dg-figures
+
*/
-
-/*! \page dg-figures List of Figures
-
- \ref figure1
-
- \ref figure2
-
- \ref figure3
+
+/*! \page dg-figures List of Figures
+
+ \ref figure1
+
+ \ref figure2
+
+ \ref figure3
*/
-
+
/*! \page dg-contents Table of Contents
@@ -29,6 +29,10 @@
\section sequence EntitySequence & SequenceData
\subsection figure1 Figure 1: EntitySequences For One SequenceData
+ <img src="../../DG/figure1.jpg">
+
+ \ref dg-figures "List of Figures"
+
The <I>SequenceData</I> class manages as set of arrays of per-entity values. Each
<I>SequenceData</I> has a start and end handle denoting the block of entities for which
the arrays contain data. The arrays managed by a <I>SequenceData</I> instance are
@@ -104,6 +108,9 @@ enforces the following four rules on its contained data:
.
\subsection figure2 Figure 2: SequenceManager and Related Classes
+ <img src="../../DG/figure2.jpg">
+
+ \ref dg-figures "List of Figures"
The first three rules are required for the validity of the data model. The
fourth rule avoids unnecessary inefficiency. It is implemented by merging such
@@ -153,111 +160,114 @@ such as allocating the correct <I>EntitySequence</I> subtype for a given <I>Enti
<sup>2</sup>Given rule four for the data managed by a <I>TypeSequenceManager</I>, any <I>SequenceData</I> for which all handles are allocated will be referenced by exactly one <I>EntitySequence</I>.
\ref dg-contents "Top"
-
- \section s-mesh Structured Mesh
-
-Structured mesh storage is implemented using subclasses of <I>SequenceData</I>:
-<I>ScdElementData</I> and <I>ScdVertexData</I>. The <I>StructuredElementSeq</I> class is
-used to access the structured element connectivity. A standard <I>VertexSequence</I>
-instance is used to access the ScdVertexData because the vertex data storage
-is the same as for unstructured mesh.
-
- \ref dg-contents "Top"
-
- \section sets Entity Sets
-
-- MeshSetSequence
-
-The <I>MeshSetSequence</I> class is the same as most other subclasses of <I>EntitySequence</I>
-in that it utilizes SequenceData to store its data. A single array in the <I>SequenceData</I>
-is used to store instances of the MeshSet class, one per allocated <I>EntityHandle</I>.
-<I>SequenceData</I> allocates all of its managed arrays using malloc and free as
-simple arrays of bytes. <I>MeshSetSequence</I> does in-place construction and de-
-struction of <I>MeshSet</I> instances within that array. This is similar to what is
-done by <I>std::vector</I> and other container classes that may own more storage
-than is required at a given time for contained objects.
-
-- MeshSet
-
- \subsection figure3 Figure 3: SequenceManager and Related Classes
-
-The <I>MeshSet</I> class is used to represent a single entity set instance in MOAB.
-The class is optimized to minimize storage (further possible improvements in
-storage size are discussed later.)
-
-Figure 3 shows the memory layout of an instance of the <I>MeshSet</I> class.
-The flags member holds the set creation bit flags: <I>MESHSET_TRACK_OWNER</I>,
-<I>MESHSET_SET</I>, and <I>MESHSET_ORDERED</I>. The presence of the <I>MESHSET_TRACK_OWNER</I>
-indicates that reverse links from the contained entities back to the owning set
-should be maintained in the adjacency list of each entity. The <I>MESHSET_SET</I>
-and <I>MESHSET_ORDERED</I> bits are mutually exclusive, and as such most code only
-tests for the <I>MESHSET_ORDERED</I>, meaning that in practice the <I>MESHSET_SET</I> bit is
-ignored. <I>MESHSET_ORDERED</I> indicates that the set may contain duplicate handles
-and that the order that the handles are added to the set should be preserved.
-In practice, such sets are stored as a simple list of handles. <I>MESHSET_SET</I> (or in
-practice, the lack of <I>MESHSET_ORDERED</I>) indicates that the order of the handles
-need not be preserved and that the set may not contain duplicate handles. Such
-sets are stored in a sorted range-compacted format similar to that of the Range
-class.
-
-The memory for storing contents, parents, and children are each handled in
-the same way. The data in the class is composed of a 2-bit ‘size’ field and two
-values, where the two values may either be two handles or two pointers. The size
-bit-fields are grouped together to reduce the required amount of memory. If the
-numerical value of the 2-bit size field is 0 then the corresponding list is empty.
-If the 2-bit size field is either 1 or 2, then the contents of the corresponding list
-are stored directly in the corresponding two data fields of the MeshSet object.
-If the 2-bit size field has a value of 3 (11 binary), then the corresponding two
-data fields store the begin and end pointers of an external array of handles.
-The number of handles in the external array can be obtained by taking the
-difference of the start and end pointers. Note that unlike <I>std::vector</I>, we
-do not store both an allocated and used size. We store only the ‘used’ size
-and call std::realloc whenever the used size is modified, thus we rely on the
-std::malloc implementation in the standard C library to track ‘allocated’ size
-for us. In practice this performs well but does not return memory to the ‘system’
-when lists shrink (unless they shrink to zero). This overall scheme could exhibit
-poor performance if the size of one of the data lists in the set frequently changes
-between less than two and more than two handles, as this will result in frequent
-releasing and re-allocating of the memory for the corresponding array.
-
-If the <I>MESHSET_ORDERED</I> flag is not present, then the set contents list (parent
-and child lists are unaffected) is stored in a range-compacted format. In this
-format the number of handles stored in the array is always a multiple of two.
-Each consecutive pair of handles indicate the start and end, inclusive, of a range
-of handles contained in the set. All such handle range pairs are stored in sorted
-order and do not overlap. Nor is the end handle of one range ever one less than
-the start handle of the next. All such ‘adjacent’ range pairs are merged into a
-single pair. The code for insertion and removal of handles from range-formatted
-set content lists is fairly complex. The implementation will guarantee that a
-given call to insert entities into a range or remove entities from a range is never
-worse than O(ln n) + O(m + n), where ‘n’ is the number of handles to insert
-and ‘m’ is the number of handles already contained in the set. So it is generally
-much more efficient to build Ranges of handles to insert (and remove) and call
-MOAB to insert (or remove) the entire list at once rather than making may
-calls to insert (or remove) one or a few handles from the contents of a set.
-The set storage could probably be further minimized by allowing up to six
-handles in one of the lists to be elided. That is, as there are six potential ‘slots’
-in the MeshSet object then if two of the lists are empty it should be possible to store up to six values of the remaining list directly in the MeshSet object.
-However, the additional runtime cost of such complexity could easily outweigh
-any storage advantage. Further investigation into this has not been done because
-the primary motivation for the storage optimization was to support binary trees.
-
-Another possible optimization of storage would be to remove the <I>MeshSet</I>
-object entirely and instead store the data in a ‘blocked’ format. The corre-
-sponding <I>SequenceData</I> would contain four arrays: flags, parents, children, and
-contents instead of a single array of <I>MeshSet</I> objects. If this were done then
-no storage need ever be allocated for parent or child links if none of the sets
-in a <I>SequenceData</I> has parent or child links. The effectiveness of the storage
-reduction would depend greatly on how sets get grouped into <I>SequenceDatas</I>.
-This alternate storage scheme might also allow for better cache utilization as it
-would group like data together. It is often the case that application code that
-is querying the contents of one set will query the contents of many but never
-query the parents or children of any set. Or that an application will query only
-parent or child links of a set without every querying other set properties. The
-downside of this solution is that it makes the implementation a little less mod-
-ular and maintainable because the existing logic contained in the <I>MeshSet</I> class
-would need to be spread throughout the <I>MeshSetSequence</I> class.
-
- \ref dg-contents "Top"
+
+ \section s-mesh Structured Mesh
+
+Structured mesh storage is implemented using subclasses of <I>SequenceData</I>:
+<I>ScdElementData</I> and <I>ScdVertexData</I>. The <I>StructuredElementSeq</I> class is
+used to access the structured element connectivity. A standard <I>VertexSequence</I>
+instance is used to access the ScdVertexData because the vertex data storage
+is the same as for unstructured mesh.
+
+ \ref dg-contents "Top"
+
+ \section sets Entity Sets
+
+- MeshSetSequence
+
+The <I>MeshSetSequence</I> class is the same as most other subclasses of <I>EntitySequence</I>
+in that it utilizes SequenceData to store its data. A single array in the <I>SequenceData</I>
+is used to store instances of the MeshSet class, one per allocated <I>EntityHandle</I>.
+<I>SequenceData</I> allocates all of its managed arrays using malloc and free as
+simple arrays of bytes. <I>MeshSetSequence</I> does in-place construction and de-
+struction of <I>MeshSet</I> instances within that array. This is similar to what is
+done by <I>std::vector</I> and other container classes that may own more storage
+than is required at a given time for contained objects.
+
+- MeshSet
+
+ \subsection figure3 Figure 3: SequenceManager and Related Classes
+ <img src="../../DG/figure3.jpg">
+
+ \ref dg-figures "List of Figures"
+
+The <I>MeshSet</I> class is used to represent a single entity set instance in MOAB.
+The class is optimized to minimize storage (further possible improvements in
+storage size are discussed later.)
+
+Figure 3 shows the memory layout of an instance of the <I>MeshSet</I> class.
+The flags member holds the set creation bit flags: <I>MESHSET_TRACK_OWNER</I>,
+<I>MESHSET_SET</I>, and <I>MESHSET_ORDERED</I>. The presence of the <I>MESHSET_TRACK_OWNER</I>
+indicates that reverse links from the contained entities back to the owning set
+should be maintained in the adjacency list of each entity. The <I>MESHSET_SET</I>
+and <I>MESHSET_ORDERED</I> bits are mutually exclusive, and as such most code only
+tests for the <I>MESHSET_ORDERED</I>, meaning that in practice the <I>MESHSET_SET</I> bit is
+ignored. <I>MESHSET_ORDERED</I> indicates that the set may contain duplicate handles
+and that the order that the handles are added to the set should be preserved.
+In practice, such sets are stored as a simple list of handles. <I>MESHSET_SET</I> (or in
+practice, the lack of <I>MESHSET_ORDERED</I>) indicates that the order of the handles
+need not be preserved and that the set may not contain duplicate handles. Such
+sets are stored in a sorted range-compacted format similar to that of the Range
+class.
+
+The memory for storing contents, parents, and children are each handled in
+the same way. The data in the class is composed of a 2-bit ‘size’ field and two
+values, where the two values may either be two handles or two pointers. The size
+bit-fields are grouped together to reduce the required amount of memory. If the
+numerical value of the 2-bit size field is 0 then the corresponding list is empty.
+If the 2-bit size field is either 1 or 2, then the contents of the corresponding list
+are stored directly in the corresponding two data fields of the MeshSet object.
+If the 2-bit size field has a value of 3 (11 binary), then the corresponding two
+data fields store the begin and end pointers of an external array of handles.
+The number of handles in the external array can be obtained by taking the
+difference of the start and end pointers. Note that unlike <I>std::vector</I>, we
+do not store both an allocated and used size. We store only the ‘used’ size
+and call std::realloc whenever the used size is modified, thus we rely on the
+std::malloc implementation in the standard C library to track ‘allocated’ size
+for us. In practice this performs well but does not return memory to the ‘system’
+when lists shrink (unless they shrink to zero). This overall scheme could exhibit
+poor performance if the size of one of the data lists in the set frequently changes
+between less than two and more than two handles, as this will result in frequent
+releasing and re-allocating of the memory for the corresponding array.
+
+If the <I>MESHSET_ORDERED</I> flag is not present, then the set contents list (parent
+and child lists are unaffected) is stored in a range-compacted format. In this
+format the number of handles stored in the array is always a multiple of two.
+Each consecutive pair of handles indicate the start and end, inclusive, of a range
+of handles contained in the set. All such handle range pairs are stored in sorted
+order and do not overlap. Nor is the end handle of one range ever one less than
+the start handle of the next. All such ‘adjacent’ range pairs are merged into a
+single pair. The code for insertion and removal of handles from range-formatted
+set content lists is fairly complex. The implementation will guarantee that a
+given call to insert entities into a range or remove entities from a range is never
+worse than O(ln n) + O(m + n), where ‘n’ is the number of handles to insert
+and ‘m’ is the number of handles already contained in the set. So it is generally
+much more efficient to build Ranges of handles to insert (and remove) and call
+MOAB to insert (or remove) the entire list at once rather than making may
+calls to insert (or remove) one or a few handles from the contents of a set.
+The set storage could probably be further minimized by allowing up to six
+handles in one of the lists to be elided. That is, as there are six potential ‘slots’
+in the MeshSet object then if two of the lists are empty it should be possible to store up to six values of the remaining list directly in the MeshSet object.
+However, the additional runtime cost of such complexity could easily outweigh
+any storage advantage. Further investigation into this has not been done because
+the primary motivation for the storage optimization was to support binary trees.
+
+Another possible optimization of storage would be to remove the <I>MeshSet</I>
+object entirely and instead store the data in a ‘blocked’ format. The corre-
+sponding <I>SequenceData</I> would contain four arrays: flags, parents, children, and
+contents instead of a single array of <I>MeshSet</I> objects. If this were done then
+no storage need ever be allocated for parent or child links if none of the sets
+in a <I>SequenceData</I> has parent or child links. The effectiveness of the storage
+reduction would depend greatly on how sets get grouped into <I>SequenceDatas</I>.
+This alternate storage scheme might also allow for better cache utilization as it
+would group like data together. It is often the case that application code that
+is querying the contents of one set will query the contents of many but never
+query the parents or children of any set. Or that an application will query only
+parent or child links of a set without every querying other set properties. The
+downside of this solution is that it makes the implementation a little less mod-
+ular and maintainable because the existing logic contained in the <I>MeshSet</I> class
+would need to be spread throughout the <I>MeshSetSequence</I> class.
+
+ \ref dg-contents "Top"
*/
-
+
diff --git a/test/io/read_nc.cpp b/test/io/read_nc.cpp
index 558958c..caff6a4 100644
--- a/test/io/read_nc.cpp
+++ b/test/io/read_nc.cpp
@@ -13,6 +13,7 @@ static const char example_fv[] = "/io/fv26x46x72.t.3.nc";
#ifdef USE_MPI
#include "moab_mpi.h"
+#include "moab/ParallelComm.hpp"
#endif
// CAM-EUL
@@ -40,7 +41,7 @@ int main(int argc, char* argv[])
if (fail)
return 1;
#else
- argv[0] = argv[argc - argc]; // to remove the warnings in serial mode about unused variables
+ argv[0] = argv[argc - argc]; // To remove the warnings in serial mode about unused variables
#endif
result += RUN_TEST(test_read_eul_all);
@@ -58,7 +59,8 @@ int main(int argc, char* argv[])
#ifdef USE_MPI
fail = MPI_Finalize();
- if (fail) return 1;
+ if (fail)
+ return 1;
#endif
return result;
@@ -76,7 +78,7 @@ void test_read_eul_all()
rval = mb.load_file(example_eul, NULL, opts.c_str());
CHECK_ERR(rval);
- // check for proper tags
+ // Check for proper tags
Tag Ttag0, Ttag1, coordTag;
rval = mb.tag_get_handle("T0", 26, MB_TYPE_DOUBLE, Ttag0);
CHECK_ERR(rval);
@@ -100,13 +102,58 @@ void test_read_eul_onevar()
rval = mb.load_file(example_eul, NULL, opts.c_str());
CHECK_ERR(rval);
- // check for proper tags
+ // Check for proper tags
Tag Ttag0, Ttag1;
rval = mb.tag_get_handle("T0", 26, MB_TYPE_DOUBLE, Ttag0);
CHECK_ERR(rval);
rval = mb.tag_get_handle("T1", 26, MB_TYPE_DOUBLE, Ttag1);
CHECK_ERR(rval);
+
+ // Check values of tag T0 (first level) at some strategically chosen places below
+ int rank = 0;
+ int procs = 1;
+#ifdef USE_MPI
+ ParallelComm* pcomm = ParallelComm::get_pcomm(&mb, 0);
+ rank = pcomm->proc_config().proc_rank();
+ procs = pcomm->proc_config().proc_size();
+#endif
+
+ const double eps = 0.0001;
+ double val[4 * 26];
+
+ if (1 == procs) {
+ Range global_quads;
+ rval = mb.get_entities_by_type(0, MBQUAD, global_quads);
+ CHECK_ERR(rval);
+ CHECK_EQUAL((size_t)4608, global_quads.size());
+
+ EntityHandle gloabl_quad_ents[] = {global_quads[0], global_quads[4559], global_quads[48], global_quads[4607]};
+ rval = mb.tag_get_data(Ttag0, &gloabl_quad_ents[0], 4, val);
+
+ CHECK_REAL_EQUAL(252.8529, val[0], eps); // First global quad
+ CHECK_REAL_EQUAL(205.3905, val[26], eps); // 4660th global quad
+ CHECK_REAL_EQUAL(252.7116, val[52], eps); // 49th global quad
+ CHECK_REAL_EQUAL(200.6828, val[78], eps); // Last global quad
+ }
+ else if (2 == procs) {
+ Range local_quads;
+ rval = mb.get_entities_by_type(0, MBQUAD, local_quads);
+ CHECK_ERR(rval);
+ CHECK_EQUAL((size_t)2304, local_quads.size());
+
+ EntityHandle local_quad_ents[] = {local_quads[0], local_quads[2303]};
+ rval = mb.tag_get_data(Ttag0, &local_quad_ents[0], 2, val);
+
+ if (0 == rank) {
+ CHECK_REAL_EQUAL(252.8529, val[0], eps); // First local quad, first global quad
+ CHECK_REAL_EQUAL(205.3905, val[26], eps); // Last local quad, 4660th global quad
+ }
+ else if (1 == rank) {
+ CHECK_REAL_EQUAL(252.7116, val[0], eps); // First local quad, 49th global quad
+ CHECK_REAL_EQUAL(200.6828, val[26], eps); // Last local quad, last global quad
+ }
+ }
}
void test_read_eul_onetimestep()
@@ -121,7 +168,7 @@ void test_read_eul_onetimestep()
rval = mb.load_file(example_eul, NULL, opts.c_str());
CHECK_ERR(rval);
- // check for proper tags
+ // Check for proper tags
Tag Ttag0, Ttag1;
rval = mb.tag_get_handle("T0", 26, MB_TYPE_DOUBLE, Ttag0);
CHECK_EQUAL(rval, MB_TAG_NOT_FOUND);
@@ -135,7 +182,7 @@ void test_read_eul_nomesh()
Core moab;
Interface& mb = moab;
- // need a set for nomesh to work right
+ // Need a set for nomesh to work right
EntityHandle set;
ErrorCode rval = mb.create_meshset(MESHSET_SET, set);
CHECK_ERR(rval);
@@ -148,7 +195,7 @@ void test_read_eul_nomesh()
rval = mb.load_file(example_eul, &set, opts.c_str());
CHECK_ERR(rval);
- // check for proper tag
+ // Check for proper tag
Tag Ttag0, Ttag1;
rval = mb.tag_get_handle("T0", 26, MB_TYPE_DOUBLE, Ttag0);
CHECK_ERR(rval);
@@ -156,12 +203,12 @@ void test_read_eul_nomesh()
rval = mb.tag_get_handle("T1", 26, MB_TYPE_DOUBLE, Ttag1);
CHECK_EQUAL(rval, MB_TAG_NOT_FOUND);
- // now read 2nd timestep with nomesh option
+ // Now read 2nd timestep with nomesh option
opts = orig + std::string(";VARIABLE=T;TIMESTEP=1;NOMESH");
rval = mb.load_file(example_eul, &set, opts.c_str());
CHECK_ERR(rval);
- // check for proper tag
+ // Check for proper tag
rval = mb.tag_get_handle("T1", 26, MB_TYPE_DOUBLE, Ttag1);
CHECK_ERR(rval);
}
@@ -171,7 +218,7 @@ void test_read_eul_novars()
Core moab;
Interface& mb = moab;
- // need a set for nomesh to work right
+ // Need a set for nomesh to work right
EntityHandle set;
ErrorCode rval = mb.create_meshset(MESHSET_SET, set);
CHECK_ERR(rval);
@@ -188,7 +235,7 @@ void test_read_eul_novars()
rval = mb.load_file(example_eul, &set, opts.c_str());
CHECK_ERR(rval);
- // check for proper tag
+ // Check for proper tag
Tag Ttag0, Ttag1;
rval = mb.tag_get_handle("T0", 26, MB_TYPE_DOUBLE, Ttag0);
CHECK_EQUAL(rval, MB_TAG_NOT_FOUND);
@@ -203,12 +250,12 @@ void test_read_eul_novars()
rval = mb.tag_get_handle("T1", 26, MB_TYPE_DOUBLE, Ttag1);
CHECK_EQUAL(rval, MB_TAG_NOT_FOUND);
- // now read 2nd timestep with nomesh option
+ // Now read 2nd timestep with nomesh option
opts = orig + std::string(";VARIABLE=T;TIMESTEP=1;NOMESH");
rval = mb.load_file(example_eul, &set, opts.c_str());
CHECK_ERR(rval);
- // check for proper tag
+ // Check for proper tag
rval = mb.tag_get_handle("T1", 26, MB_TYPE_DOUBLE, Ttag1);
CHECK_ERR(rval);
}
@@ -225,7 +272,7 @@ void test_read_fv_all()
rval = mb.load_file(example_fv, NULL, opts.c_str());
CHECK_ERR(rval);
- // check for proper tags
+ // Check for proper tags
Tag Ttag0, Ttag1, coordTag;
rval = mb.tag_get_handle("T0", 26, MB_TYPE_DOUBLE, Ttag0);
CHECK_ERR(rval);
@@ -233,7 +280,7 @@ void test_read_fv_all()
rval = mb.tag_get_handle("T1", 26, MB_TYPE_DOUBLE, Ttag1);
CHECK_ERR(rval);
- rval=mb.tag_get_handle("COORDS", 3, MB_TYPE_DOUBLE, coordTag);
+ rval = mb.tag_get_handle("COORDS", 3, MB_TYPE_DOUBLE, coordTag);
CHECK_ERR(rval);
}
@@ -249,7 +296,7 @@ void test_read_fv_onevar()
rval = mb.load_file(example_fv, NULL, opts.c_str());
CHECK_ERR(rval);
- // check for proper tags
+ // Check for proper tags
Tag Ttag0, Ttag1;
rval = mb.tag_get_handle("T0", 26, MB_TYPE_DOUBLE, Ttag0);
CHECK_ERR(rval);
@@ -270,7 +317,7 @@ void test_read_fv_onetimestep()
rval = mb.load_file(example_fv, NULL, opts.c_str());
CHECK_ERR(rval);
- // check for proper tags
+ // Check for proper tags
Tag Ttag0, Ttag1;
rval = mb.tag_get_handle("T0", 26, MB_TYPE_DOUBLE, Ttag0);
CHECK_EQUAL(rval, MB_TAG_NOT_FOUND);
@@ -284,7 +331,7 @@ void test_read_fv_nomesh()
Core moab;
Interface& mb = moab;
- // need a set for nomesh to work right
+ // Need a set for nomesh to work right
EntityHandle set;
ErrorCode rval = mb.create_meshset(MESHSET_SET, set);
CHECK_ERR(rval);
@@ -297,7 +344,7 @@ void test_read_fv_nomesh()
rval = mb.load_file(example_fv, &set, opts.c_str());
CHECK_ERR(rval);
- // check for proper tag
+ // Check for proper tag
Tag Ttag0, Ttag1;
rval = mb.tag_get_handle("T0", 26, MB_TYPE_DOUBLE, Ttag0);
CHECK_ERR(rval);
@@ -305,12 +352,12 @@ void test_read_fv_nomesh()
rval = mb.tag_get_handle("T1", 26, MB_TYPE_DOUBLE, Ttag1);
CHECK_EQUAL(rval, MB_TAG_NOT_FOUND);
- // now read 2nd timestep with nomesh option
+ // Now read 2nd timestep with nomesh option
opts = orig + std::string(";VARIABLE=T;TIMESTEP=1;NOMESH");
rval = mb.load_file(example_fv, &set, opts.c_str());
CHECK_ERR(rval);
- // check for proper tag
+ // Check for proper tag
rval = mb.tag_get_handle("T1", 26, MB_TYPE_DOUBLE, Ttag1);
CHECK_ERR(rval);
}
@@ -320,7 +367,7 @@ void test_read_fv_novars()
Core moab;
Interface& mb = moab;
- // need a set for nomesh to work right
+ // Need a set for nomesh to work right
EntityHandle set;
ErrorCode rval = mb.create_meshset(MESHSET_SET, set);
CHECK_ERR(rval);
@@ -337,7 +384,7 @@ void test_read_fv_novars()
rval = mb.load_file(example_fv, &set, opts.c_str());
CHECK_ERR(rval);
- // check for proper tag
+ // Check for proper tag
Tag Ttag0, Ttag1;
rval = mb.tag_get_handle("T0", 26, MB_TYPE_DOUBLE, Ttag0);
CHECK_EQUAL(rval, MB_TAG_NOT_FOUND);
@@ -352,12 +399,12 @@ void test_read_fv_novars()
rval = mb.tag_get_handle("T1", 26, MB_TYPE_DOUBLE, Ttag1);
CHECK_EQUAL(rval, MB_TAG_NOT_FOUND);
- // now read 2nd timestep with nomesh option
+ // Now read 2nd timestep with nomesh option
opts = orig + std::string(";VARIABLE=T;TIMESTEP=1;NOMESH");
rval = mb.load_file(example_fv, &set, opts.c_str());
CHECK_ERR(rval);
- // check for proper tag
+ // Check for proper tag
rval = mb.tag_get_handle("T1", 26, MB_TYPE_DOUBLE, Ttag1);
CHECK_ERR(rval);
}
@@ -365,7 +412,7 @@ void test_read_fv_novars()
ErrorCode get_options(std::string& opts)
{
#ifdef USE_MPI
- // use parallel options
+ // Use parallel options
opts = std::string(";;PARALLEL=READ_PART;PARTITION_METHOD=SQIJ");
return MB_SUCCESS;
#else
https://bitbucket.org/fathomteam/moab/commits/a8d44de03339/
Changeset: a8d44de03339
Branch: None
User: vijaysm
Date: 2013-07-28 21:14:31
Summary: Merge remote-tracking branch 'upstream/master'
Affected #: 38 files
diff --git a/MeshFiles/unittest/125hex.g b/MeshFiles/unittest/125hex.g
index 6cf0433..2042583 100644
Binary files a/MeshFiles/unittest/125hex.g and b/MeshFiles/unittest/125hex.g differ
diff --git a/MeshFiles/unittest/1hex.g b/MeshFiles/unittest/1hex.g
index 3dcd658..0affc6e 100644
Binary files a/MeshFiles/unittest/1hex.g and b/MeshFiles/unittest/1hex.g differ
diff --git a/MeshFiles/unittest/1khex.g b/MeshFiles/unittest/1khex.g
index e6c9417..6b0a8fc 100644
Binary files a/MeshFiles/unittest/1khex.g and b/MeshFiles/unittest/1khex.g differ
diff --git a/MeshFiles/unittest/64bricks_512hex_256part.h5m b/MeshFiles/unittest/64bricks_512hex_256part.h5m
new file mode 100644
index 0000000..d7df13c
Binary files /dev/null and b/MeshFiles/unittest/64bricks_512hex_256part.h5m differ
diff --git a/configure.ac b/configure.ac
index b642189..24f425b 100644
--- a/configure.ac
+++ b/configure.ac
@@ -451,13 +451,18 @@ AC_SUBST(PNETCDF_LIBS)
# Documentation
#################################################################################
AC_ARG_ENABLE([docs],
-[AC_HELP_STRING([--enable-docs],[Specify directory where Doxygen program is installed])
-AC_HELP_STRING([--disable-docs],[Do not generate API documentation (default)])],
- [ENABLE_DOCS="$enableval"],[ENABLE_DOCS=no] )
-if test "x$ENABLE_DOCS" != "xno"; then
- AC_PATH_PROG( [DOXYGEN], [doxygen], [no] )
+[AC_HELP_STRING([--enable-docs],[indicate to check for doxygen installation])],
+ [ENABLE_DOCS=yes],[ENABLE_DOCS=no] )
+AC_ARG_WITH([doxygen],
+[AC_HELP_STRING([--with-doxygen=DIR], [Specify directory where Doxygen program is installed. By default, it is /usr/bin])],
+[WITH_DOXYGEN="$withval"], [WITH_DOXYGEN=no])
+
+if test "x$WITH_DOXYGEN" != "xno"; then
+ AC_PATH_PROGS( [DOXYGEN], [doxygen], [no],[$WITH_DOXYGEN] )
+else
+ AC_PATH_PROGS( [DOXYGEN], [doxygen], [no],[$PATH])
fi
-if test "x$ENABLE_DOCS" != "xno"; then
+if test "x$ENABLE_DOCS" = "xyes"; then
if test "x$DOXYGEN" = "xno"; then
AC_MSG_ERROR("Doxygen executable not found.")
fi
@@ -1207,7 +1212,6 @@ AC_CONFIG_FILES([Makefile
tools/vtkMOABReaderNew/CMakeLists.txt
doc/Makefile
doc/user.dox
- doc/dev.dox
doc/config.tex
MeshFiles/Makefile
MeshFiles/unittest/Makefile
diff --git a/doc/DG/styleguide.h b/doc/DG/styleguide.h
new file mode 100644
index 0000000..3a13e45
--- /dev/null
+++ b/doc/DG/styleguide.h
@@ -0,0 +1,91 @@
+/*!
+ \page styleguide Coding Style Guide
+Code developed in %MOAB should follow the coding styles described here. Any deviations from this style
+guide will result in severe berating and other verbal abuse.
+
+\section dirstructure MOAB Directory Structure
+%MOAB source code is organized in the following directory structure: \n
+ - doc: Documentation is put here, along with the input file for Doxygen. Most %MOAB documentation is doxygen-processed.
+ - examples: Examples of %MOAB usage, both small and large. These programs are not meant to be used as unit tests, but
+ rather as further documentation on %MOAB usage.
+ - src : Mesh related source codes. It includes:
+ - io: %MOAB Input/Output classes.
+ - moab: %MOAB core classes.
+ - lotte: Computational Meshing basics.
+ - parallel: Parallel mesh computation, i/o data processing methods.
+ - test: All unit test programs should go below this directory.
+ Please put the unit tests into their related subdirectories based on the test's
+ purpose if possible.
+If you're designing a new class or other code for %MOAB and are not sure where to put it, try to find something similar
+and put it there. Otherwise, email the %MOAB email list for pointers. <em> In general, you should not need to create new
+subdirectories in the %MOAB source code, except when implementing a new algorithm with more than about 2 files.</em>
+
+\section sourcestyle Source Code Style and Best Practices
+%MOAB code should abide by the following general rules:
+ - Names:
+ - Class names should be in the CamelBack style, e.g. EdgeMesh or VertexMesher.
+ - Class member variables should be camelBack, e.g. EdgeMesh::schemeType; each member variable, e.g. int memberVariable,
+ should have set/get functions void member_variable(int newval) and int member_variable(), respectively.
+ - Enumeration values should be all captitalized, with underscores avoided if possible (the enumeration name indicates
+ the general purpose of the enumeration, so e.g. we use EQUAL, not EQUAL_MESH)
+ - Source code should not contain tabs or MS-DOS newlines; tabs and other indentations should be set to a width of 2 spaces.
+ For general tips on how to set your editor for this, see the %MOAB-dev discussion starting with <a href="https://lists.mcs.anl.gov/mailman/private/moab-dev/2011/000519.html">this message</a>.
+ - Each class header should be fully commented; that includes:
+ - A \\file comment block at the top of the file; DO NOT include things like Author and Date blocks; this stuff is available
+ from subversion if we really need to know.
+ - A \\class comment block, formatted like those in the %MOAB core classes. THE FIELDS AFTER THE CLASS NAME ARE VERY IMPORTANT,
+ as they tell developers how to include the class declaration in their code. This information goes into the "Detailed
+ Description" portion of the class documentation. This block should include any features or limitations of the class.
+ Eventually, we'll impose some standard boilerplate that each meshing class should use.
+ Until then, please keep this block to around a paragraph.
+ - Each function in both the public and private interfaces should be commented, INCLUDING ANY ARGUMENTS AND RETURN VALUES.
+ See the %MOAB classes for examples of how to format these comments. As a rule of thumb, your code should run through
+ Doxygen without generating any warnings; in fact, Doxygen is sometimes helpful at pointing out inconsistencies in your
+ class declaration.
+ - Developers should avoid using \#include in header files, as they propagate dependencies more widely than necessary. The only
+ cases where other includes are needed are to import the declaration for a parent class, and to declare types used as
+ non-pointer and non-reference function arguments. In most cases, a forward-declaration statement (e.g. 'class MKCore')
+ will suffice.
+ - Naming classes and other top-level constructs:
+ - No names should be added to the global namespace. Everything should be
+ in the MOAB namespace. An exception can be made for names with a static
+ scope declared in a .cpp file, but class member functions never have a
+ static scope.
+ - Names should be kept as private as possible. If declaring a struct or
+ utility class that is used internally by some other class, consider
+ defining it in the .cpp file of the main class or a separate header
+ only included in that .cpp file and using (if necessary) only forward
+ delcarations (e.g. \c struct \c Point3D;) in the header file used
+ by other code. If that is not possible, then consider nesting the
+ definitions such that the scope of the name is limited to that of the
+ class using it.
+ - Any names introduced into the top-level MOAB namespace should be
+ sufficiently unique to avoid conflicts with other code. If you must
+ introduce a class to the top-level MOAB namespace, don't choose
+ an overly genereric name like \c Point3D .
+ - Constants and Macros
+ - Don't use a pre-processor macro where a const variable or an inline or
+ template function will suffice.
+ There is absolutely benefit to the former over the later with modern
+ compilers. Further, using macros bypasses typechecking that the compiler
+ would otherwise do for you and if used in headers, introduce names into
+ the global rather than MOAB namespace.
+ - Don't define constants that are already provided by standard libraries.
+ For example, use \c M_PI as defined in \c math.h rather than defining
+ your own constant.
+\section commits Making Repository Commits
+As a general rule, developers should update frequently, and commit changes often. However, the repository should always remain
+in a state where the code can be compiled. Most of the time, the code should also successfully execute "make check" run from the
+top-level directory. If you commit code that violates this principal, it should be your first priority to return the repository
+code to a compilable state, and your second priority to make sure "make check" runs without errors.
+
+Commits to the repository should also come with a non-trivial, useful, non-verbose log message. Oftentimes the best way to generate
+this message is to run 'commit -a', and include a comment on
+each file that changed, then Ctrl+O to write out, followed by 'Enter' and Ctrl+X. Many times it is helpful to state that 'make check runs successfully' at the end of the log message.
+Although it would be possible and many software projects do it, we prefer not to force successful execution of the test suite
+before every commit. Developers should make every effort to avoid having to impose this constraint, by running a make check
+before every commit.
+
+Top: \ref index
+
+ */
diff --git a/doc/MetaData/metadata.h b/doc/MetaData/metadata.h
index 5f53ecc..e02c112 100644
--- a/doc/MetaData/metadata.h
+++ b/doc/MetaData/metadata.h
@@ -9,23 +9,23 @@
/*! \page md-contents Table of Contents
- \ref meta-introduction
+ \ref meta-introduction
- \ref meta-conventions
+ \ref meta-conventions
- \ref meta-options
+ \ref meta-options
- \ref meta-references
+ \ref meta-references
- \ref appendixA
+ \ref appendixA
- \ref appendixB
+ \ref appendixB
- \ref appendixC
+ \ref appendixC
- \ref appendixD
+ \ref appendixD
- \ref appendixE
+ \ref appendixE
\section meta-introduction Introduction
@@ -37,7 +37,7 @@ Several specific tools are often used in concert with MOAB and bear special ment
The MOAB data model consists of the following basic types:
- <B>Entity</B>: The basic elements of topology, e.g. vertex, edge, triangle, tetrahedron, etc. MOAB represents all types in the finite element zoo, plus polygons and polyhedra.
-- <B>Entity Set</B>: An arbitrary collection of entities and other sets. Sets can have parent/child relations with other sets, and these relations are distinct from “contains” relations.
+- <B>Entity %Set</B>: An arbitrary collection of entities and other sets. Sets can have parent/child relations with other sets, and these relations are distinct from “contains” relations.
- <B>Interface</B>: The interface object through which other entities are accessed, in the sense of object-oriented-programming. iMesh refers to the interface as the “root” set.
- <B>Tag</B>: A piece of data that can be assigned a distinct value to each entity and entity set, and to the interface itself. Tags have a prescribed name, size in bytes, and data type; allowed data types are integer, double, entity handle, and byte or opaque.
.
@@ -100,7 +100,7 @@ In the geometric model, each FACE is bounded by zero or more EDGEs; other topolo
Geometric entities are sometimes assigned to application-specific groups. These groups are represented using entity sets, tagged with a “GROUP” tag whose value equals the group id. Group sets are “set”-type, and are not tracking sets. These sets contain the sets corresponding to geometric entities contained in the groups in the geometric model, as well as any mesh entities assigned to the group.
-<H3> Sense </H3>
+- <B> Sense </B>
A geometric face has a natural orientation, indicated by the direction of the normal to the face; similarly, edges have a natural orientation determined by the direction of the tangent. When faces bound regions, or edges bound faces, they do so with a sense; if a region includes a face with forward sense, that means the face's natural normal direction points out of the volume. If a face includes an edge with forward sense, that means that if one moves along the edge in the direction of its tangent, the material of the face is on the left hand side. The sense of a face (edge) with respect to a region (face) it bounds is stored using tags on the face (edge).
@@ -194,10 +194,16 @@ By default, all field data stored with the mesh is read with the mesh, and store
Indicates that no mesh should be read from the file. This option is used in conjunction with the “variable=” option, to read variables and assign them as tags to a previously-read mesh. If this option is used, applications should pass an entity set to the read function, which should contain the mesh previously read from the file.
-<H3>timestep=<step_number>[, ...] </H3>
+<H3>timestep=\<step_number\>[, ...] </H3>
Read the time step number whose time value is equal to or greater than the specified time value, for the specified variable(s). Tag names for the variable(s) will be formed by appending the time step number to the variable name. Multiple time step values can be specified, separated from each other by commas.
+<H3>timeval=\<time_value\>[, ...]</H3>
+
+Read the time step number whose time value is equal to or greater than the specified time value, for the
+specified variable(s). Tag names for the variable(s) will be formed by appending the time step number
+to the variable name. Multiple time step values can be specified, separated from each other by commas.
+
\ref md-contents "Top"
\section meta-references References
@@ -246,19 +252,19 @@ CATEGORY/C*32.
GEOM_SENSE_2/EH[2],
GEOM_SENSE_N_ENTS/EH*N,
GEOM_SENSE_N_SENSES/I*N</td>
-<td>Sets contain mesh owned by that entity; parent/child links to bounded/bounding entities in geometric model</td>
+<td>%Sets contain mesh owned by that entity; parent/child links to bounded/bounding entities in geometric model</td></tr><tr><td>Material type</td><td>S</td><td>MATERIAL_SET/I</td>
-<td>Set contains entities or sets assigned a common material type</td>
+<td>%Set contains entities or sets assigned a common material type</td></tr><tr><td>Boundary condition</td><td>S</td><td>DIRICHLET_SET/I, NEUMANN_SET/I</td>
-<td>Set contains entities or sets assigned a particular boundary condition; neumann sets usually contain edges (2D) or faces (3D)</td>
+<td>%Set contains entities or sets assigned a particular boundary condition; neumann sets usually contain edges (2D) or faces (3D)</td></tr><tr><td>Parallel mesh constructs</td>
@@ -393,6 +399,36 @@ GEOM_SENSE_N_SENSES/I*N</td><td>E,S</td><td>Rank of other processor sharing this entity/set </td></tr>
+<tr>
+<td>__PARALLEL_SHARED_HANDLES</td>
+<td>H*NP</td>
+<td>E,S</td>
+<td>Handles of this entity/set on sharing processors </td>
+</tr>
+<tr>
+<td>__PARALLEL_SHARED_PROCS</td>
+<td>I*NP</td>
+<td>E,S</td>
+<td>Ranks of other processors sharing this entity/set </td>
+</tr>
+<tr>
+<td>__PARALLEL_STATUS</td>
+<td>C*1</td>
+<td>E,S</td>
+<td>Bit-field indicating various parallel information </td>
+</tr>
+<tr>
+<td>SPECTRAL_ORDER</td>
+<td>I</td>
+<td>S</td>
+<td> Order of a spectral mesh </td>
+</tr>
+<tr>
+<td>SPECTRAL_VERTICES</td>
+<td>H*(O+1)^d</td>
+<td>E</td>
+<td> Vertices comprising a spectral element, ordered lexicographically; here, O=value of SPECTRAL_ORDER tag. </td>
+</tr></table>
\ref md-contents "Top"
@@ -402,7 +438,7 @@ GEOM_SENSE_N_SENSES/I*N</td>
\subsection table3 Table 3: Translation between CCMIO options and MOAB tags.
<Table border="1"><tr>
-<th> Set Type</th>
+<th> %Set Type</th><th>CCMIO Construct</th><th>MOAB Tag Name, Type</th></tr>
@@ -576,6 +612,11 @@ size (for details on the partitioning method used, see the src/io/ReadNC.cpp sou
Mesh is put into the entity set provided to the load_file function. This entity set is also annotated with
various tags representing information read from the file. These tags are described in Table 5.
+Reading unstructured NC files in the HOMME format is also supported. Currently a trivial
+element-based partition is the only option for parallel reading. As the data is unstructured, it is necessary to have a connectivity file to define the vertex adjacencies. The default convention is to have a file called HommeMapping.nc in the same directory as the the variable data file. If this convention is not followed, the connectivity file can be specified with the option -O CONN=”/path/to/connectivity.nc”. An example of mbconvert using the parallel read capability is shown below:
+
+<B> mpiexec -np 2 tools/mbconvert -O TRIVIAL_PARTITION -O DEBUG_IO=1 -o DEBUG_IO=9 -o PARALLEL=WRITE_PART /nfs2/hayes6/meshlab/homme_data/camrun.cam2.h0.0000-01-01-16200.nc output.h5m </B>
+
Several other things to note about reading climate data files into MOAB:
- Time-dependent variables: MOAB currently has no mechanism for time-dependent tags. Therefore, time-dependent variables are represented using one tag per timestep, with the tag name set as the variable name plus the timestep index. Thus, the first few timesteps for the variable TEMPERATURE would be represented in tags named TEMPERATURE0, TEMPERATURE1, etc.
- Cell- and face-centered variables: The climate data reader currently does not do cell- and face-
diff --git a/doc/dev.dox.in b/doc/dev.dox.in
deleted file mode 100644
index ecc27fa..0000000
--- a/doc/dev.dox.in
+++ /dev/null
@@ -1,842 +0,0 @@
- # Doxyfile 1.2.11.1
-
-# This file describes the settings to be used by the documentation system
-# doxygen (www.doxygen.org) for a project
-#
-# All text after a hash (#) is considered a comment and will be ignored
-# The format is:
-# TAG = value [value, ...]
-# For lists items can also be appended using:
-# TAG += value [value, ...]
-# Values that contain spaces should be placed between quotes (" ")
-
-#---------------------------------------------------------------------------
-# General configuration options
-#---------------------------------------------------------------------------
-
-# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
-# by quotes) that should identify the project.
-
-PROJECT_NAME = moab
-
-# The PROJECT_NUMBER tag can be used to enter a project or revision number.
-# This could be handy for archiving the generated documentation or
-# if some version control system is used.
-
-PROJECT_NUMBER =
-
-# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
-# base path where the generated documentation will be put.
-# If a relative path is entered, it will be relative to the location
-# where doxygen was started. If left blank the current directory will be used.
-
-OUTPUT_DIRECTORY = dev
-
-# The OUTPUT_LANGUAGE tag is used to specify the language in which all
-# documentation generated by doxygen is written. Doxygen will use this
-# information to generate all constant output in the proper language.
-# The default language is English, other supported languages are:
-# Brazilian, Chinese, Croatian, Czech, Danish, Dutch, Finnish, French,
-# German, Hungarian, Italian, Japanese, Korean, Norwegian, Polish,
-# Portuguese, Romanian, Russian, Slovak, Slovene, Spanish and Swedish.
-
-OUTPUT_LANGUAGE = English
-
-# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
-# documentation are documented, even if no documentation was available.
-# Private class members and static file members will be hidden unless
-# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
-
-EXTRACT_ALL = YES
-
-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
-# will be included in the documentation.
-
-EXTRACT_PRIVATE = YES
-
-# If the EXTRACT_STATIC tag is set to YES all static members of a file
-# will be included in the documentation.
-
-EXTRACT_STATIC = YES
-
-# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
-# undocumented members of documented classes, files or namespaces.
-# If set to NO (the default) these members will be included in the
-# various overviews, but no documentation section is generated.
-# This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_MEMBERS = NO
-
-# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
-# undocumented classes that are normally visible in the class hierarchy.
-# If set to NO (the default) these class will be included in the various
-# overviews. This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_CLASSES = NO
-
-# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
-# include brief member descriptions after the members that are listed in
-# the file and class documentation (similar to JavaDoc).
-# Set to NO to disable this.
-
-BRIEF_MEMBER_DESC = YES
-
-# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
-# the brief description of a member or function before the detailed description.
-# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
-# brief descriptions will be completely suppressed.
-
-REPEAT_BRIEF = YES
-
-# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
-# Doxygen will generate a detailed section even if there is only a brief
-# description.
-
-ALWAYS_DETAILED_SEC = YES
-
-# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
-# path before files name in the file list and in the header files. If set
-# to NO the shortest path that makes the file name unique will be used.
-
-FULL_PATH_NAMES = NO
-
-# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
-# can be used to strip a user defined part of the path. Stripping is
-# only done if one of the specified strings matches the left-hand part of
-# the path. It is allowed to use relative paths in the argument list.
-
-STRIP_FROM_PATH =
-
-# The INTERNAL_DOCS tag determines if documentation
-# that is typed after a \internal command is included. If the tag is set
-# to NO (the default) then the documentation will be excluded.
-# Set it to YES to include the internal documentation.
-
-INTERNAL_DOCS = NO
-
-# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
-# generate a class diagram (in Html and LaTeX) for classes with base or
-# super classes. Setting the tag to NO turns the diagrams off.
-
-CLASS_DIAGRAMS = YES
-
-# If the SOURCE_BROWSER tag is set to YES then a list of source files will
-# be generated. Documented entities will be cross-referenced with these sources.
-
-SOURCE_BROWSER = YES
-
-# Setting the INLINE_SOURCES tag to YES will include the body
-# of functions and classes directly in the documentation.
-
-INLINE_SOURCES = YES
-
-# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
-# doxygen to hide any special comment blocks from generated source code
-# fragments. Normal C and C++ comments will always remain visible.
-
-STRIP_CODE_COMMENTS = NO
-
-# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
-# file names in lower case letters. If set to YES upper case letters are also
-# allowed. This is useful if you have classes or files whose names only differ
-# in case and if your file system supports case sensitive file names. Windows
-# users are adviced to set this option to NO.
-
-CASE_SENSE_NAMES = YES
-
-# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
-# (but less readable) file names. This can be useful is your file systems
-# doesn't support long names like on DOS, Mac, or CD-ROM.
-
-SHORT_NAMES = NO
-
-# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
-# will show members with their full class and namespace scopes in the
-# documentation. If set to YES the scope will be hidden.
-
-HIDE_SCOPE_NAMES = NO
-
-# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
-# will generate a verbatim copy of the header file for each class for
-# which an include is specified. Set to NO to disable this.
-
-VERBATIM_HEADERS = YES
-
-# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
-# will put list of the files that are included by a file in the documentation
-# of that file.
-
-SHOW_INCLUDE_FILES = YES
-
-# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
-# will interpret the first line (until the first dot) of a JavaDoc-style
-# comment as the brief description. If set to NO, the JavaDoc
-# comments will behave just like the Qt-style comments (thus requiring an
-# explict @brief command for a brief description.
-
-JAVADOC_AUTOBRIEF = NO
-
-# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
-# member inherits the documentation from any documented member that it
-# reimplements.
-
-INHERIT_DOCS = YES
-
-# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
-# is inserted in the documentation for inline members.
-
-INLINE_INFO = YES
-
-# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
-# will sort the (detailed) documentation of file and class members
-# alphabetically by member name. If set to NO the members will appear in
-# declaration order.
-
-SORT_MEMBER_DOCS = YES
-
-# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
-# tag is set to YES, then doxygen will reuse the documentation of the first
-# member in the group (if any) for the other members of the group. By default
-# all members of a group must be documented explicitly.
-
-DISTRIBUTE_GROUP_DOC = NO
-
-# The TAB_SIZE tag can be used to set the number of spaces in a tab.
-# Doxygen uses this value to replace tabs by spaces in code fragments.
-
-TAB_SIZE = 8
-
-# The GENERATE_TODOLIST tag can be used to enable (YES) or
-# disable (NO) the todo list. This list is created by putting \todo
-# commands in the documentation.
-
-GENERATE_TODOLIST = YES
-
-# The GENERATE_TESTLIST tag can be used to enable (YES) or
-# disable (NO) the test list. This list is created by putting \test
-# commands in the documentation.
-
-GENERATE_TESTLIST = YES
-
-# The GENERATE_BUGLIST tag can be used to enable (YES) or
-# disable (NO) the bug list. This list is created by putting \bug
-# commands in the documentation.
-
-GENERATE_BUGLIST = YES
-
-# This tag can be used to specify a number of aliases that acts
-# as commands in the documentation. An alias has the form "name=value".
-# For example adding "sideeffect=\par Side Effects:\n" will allow you to
-# put the command \sideeffect (or @sideeffect) in the documentation, which
-# will result in a user defined paragraph with heading "Side Effects:".
-# You can put \n's in the value part of an alias to insert newlines.
-
-ALIASES =
-
-# The ENABLED_SECTIONS tag can be used to enable conditional
-# documentation sections, marked by \if sectionname ... \endif.
-
-ENABLED_SECTIONS =
-
-# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
-# the initial value of a variable or define consist of for it to appear in
-# the documentation. If the initializer consists of more lines than specified
-# here it will be hidden. Use a value of 0 to hide initializers completely.
-# The appearance of the initializer of individual variables and defines in the
-# documentation can be controlled using \showinitializer or \hideinitializer
-# command in the documentation regardless of this setting.
-
-MAX_INITIALIZER_LINES = 30
-
-# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
-# only. Doxygen will then generate output that is more tailored for C.
-# For instance some of the names that are used will be different. The list
-# of all members will be omitted, etc.
-
-OPTIMIZE_OUTPUT_FOR_C = NO
-
-# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
-# at the bottom of the documentation of classes and structs. If set to YES the
-# list will mention the files that were used to generate the documentation.
-
-SHOW_USED_FILES = YES
-
-#---------------------------------------------------------------------------
-# configuration options related to warning and progress messages
-#---------------------------------------------------------------------------
-
-# The QUIET tag can be used to turn on/off the messages that are generated
-# by doxygen. Possible values are YES and NO. If left blank NO is used.
-
-QUIET = NO
-
-# The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated by doxygen. Possible values are YES and NO. If left blank
-# NO is used.
-
-WARNINGS = YES
-
-# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
-# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
-# automatically be disabled.
-
-WARN_IF_UNDOCUMENTED = YES
-
-# The WARN_FORMAT tag determines the format of the warning messages that
-# doxygen can produce. The string should contain the $file, $line, and $text
-# tags, which will be replaced by the file and line number from which the
-# warning originated and the warning text.
-
-WARN_FORMAT =
-
-# The WARN_LOGFILE tag can be used to specify a file to which warning
-# and error messages should be written. If left blank the output is written
-# to stderr.
-
-WARN_LOGFILE =
-
-#---------------------------------------------------------------------------
-# configuration options related to the input files
-#---------------------------------------------------------------------------
-
-# The INPUT tag can be used to specify the files and/or directories that contain
-# documented source files. You may enter file names like "myfile.cpp" or
-# directories like "/usr/src/myproject". Separate the files or directories
-# with spaces.
-
-INPUT = @top_srcdir@/src \
- @top_srcdir@/tools \
- @top_srcdir@/itaps \
- @top_srcdir@/examples
-
-# If the value of the INPUT tag contains directories, you can use the
-# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank all files are included.
-
-FILE_PATTERNS = *.cpp *.hpp *.h *.dox
-#FILE_PATTERNS = *.dox
-
-# The RECURSIVE tag can be used to turn specify whether or not subdirectories
-# should be searched for input files as well. Possible values are YES and NO.
-# If left blank NO is used.
-
-RECURSIVE = YES
-
-# The EXCLUDE tag can be used to specify files and/or directories that should
-# excluded from the INPUT source files. This way you can easily exclude a
-# subdirectory from a directory tree whose root is specified with the INPUT tag.
-
-EXCLUDE =
-
-# If the value of the INPUT tag contains directories, you can use the
-# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
-# certain files from those directories.
-
-EXCLUDE_PATTERNS =
-
-# The EXAMPLE_PATH tag can be used to specify one or more files or
-# directories that contain example code fragments that are included (see
-# the \include command).
-
-EXAMPLE_PATH =
-
-# If the value of the EXAMPLE_PATH tag contains directories, you can use the
-# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank all files are included.
-
-EXAMPLE_PATTERNS =
-
-# The IMAGE_PATH tag can be used to specify one or more files or
-# directories that contain image that are included in the documentation (see
-# the \image command).
-
-IMAGE_PATH =
-
-# The INPUT_FILTER tag can be used to specify a program that doxygen should
-# invoke to filter for each input file. Doxygen will invoke the filter program
-# by executing (via popen()) the command <filter><input-file>, where <filter>
-# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
-# input file. Doxygen will then use the output that the filter program writes
-# to standard output.
-
-INPUT_FILTER =
-
-# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER) will be used to filter the input files when producing source
-# files to browse.
-
-FILTER_SOURCE_FILES = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the alphabetical class index
-#---------------------------------------------------------------------------
-
-# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
-# of all compounds will be generated. Enable this if the project
-# contains a lot of classes, structs, unions or interfaces.
-
-ALPHABETICAL_INDEX = YES
-
-# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
-# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
-# in which this list will be split (can be a number in the range [1..20])
-
-COLS_IN_ALPHA_INDEX = 5
-
-# In case all classes in a project start with a common prefix, all
-# classes will be put under the same header in the alphabetical index.
-# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
-# should be ignored while generating the index headers.
-
-IGNORE_PREFIX =
-
-#---------------------------------------------------------------------------
-# configuration options related to the HTML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
-# generate HTML output.
-
-GENERATE_HTML = YES
-
-# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `html' will be used as the default path.
-
-HTML_OUTPUT =
-
-# The HTML_HEADER tag can be used to specify a personal HTML header for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard header.
-
-HTML_HEADER =
-
-# The HTML_FOOTER tag can be used to specify a personal HTML footer for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard footer.
-
-HTML_FOOTER =
-
-# The HTML_STYLESHEET tag can be used to specify a user defined cascading
-# style sheet that is used by each HTML page. It can be used to
-# fine-tune the look of the HTML output. If the tag is left blank doxygen
-# will generate a default style sheet
-
-HTML_STYLESHEET =
-
-# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
-# files or namespaces will be aligned in HTML using tables. If set to
-# NO a bullet list will be used.
-
-HTML_ALIGN_MEMBERS = YES
-
-# If the GENERATE_HTMLHELP tag is set to YES, additional index files
-# will be generated that can be used as input for tools like the
-# Microsoft HTML help workshop to generate a compressed HTML help file (.chm)
-# of the generated HTML documentation.
-
-GENERATE_HTMLHELP = NO
-
-# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
-# controls if a separate .chi index file is generated (YES) or that
-# it should be included in the master .chm file (NO).
-
-GENERATE_CHI = NO
-
-# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
-# controls whether a binary table of contents is generated (YES) or a
-# normal table of contents (NO) in the .chm file.
-
-BINARY_TOC = NO
-
-# The TOC_EXPAND flag can be set to YES to add extra items for group members
-# to the contents of the Html help documentation and to the tree view.
-
-TOC_EXPAND = NO
-
-# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
-# top of each HTML page. The value NO (the default) enables the index and
-# the value YES disables it.
-
-DISABLE_INDEX = NO
-
-# This tag can be used to set the number of enum values (range [1..20])
-# that doxygen will group on one line in the generated HTML documentation.
-
-ENUM_VALUES_PER_LINE = 4
-
-# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be
-# generated containing a tree-like index structure (just like the one that
-# is generated for HTML Help). For this to work a browser that supports
-# JavaScript and frames is required (for instance Netscape 4.0+
-# or Internet explorer 4.0+).
-
-GENERATE_TREEVIEW = YES
-
-# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
-# used to set the initial width (in pixels) of the frame in which the tree
-# is shown.
-
-TREEVIEW_WIDTH = 250
-
-#---------------------------------------------------------------------------
-# configuration options related to the LaTeX output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
-# generate Latex output.
-
-GENERATE_LATEX = NO
-
-# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `latex' will be used as the default path.
-
-LATEX_OUTPUT =
-
-# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
-# LaTeX documents. This may be useful for small projects and may help to
-# save some trees in general.
-
-COMPACT_LATEX = NO
-
-# The PAPER_TYPE tag can be used to set the paper type that is used
-# by the printer. Possible values are: a4, a4wide, letter, legal and
-# executive. If left blank a4wide will be used.
-
-PAPER_TYPE = letter
-
-# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
-# packages that should be included in the LaTeX output.
-
-EXTRA_PACKAGES =
-
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
-# the generated latex document. The header should contain everything until
-# the first chapter. If it is left blank doxygen will generate a
-# standard header. Notice: only use this tag if you know what you are doing!
-
-LATEX_HEADER =
-
-# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
-# is prepared for conversion to pdf (using ps2pdf). The pdf file will
-# contain links (just like the HTML output) instead of page references
-# This makes the output suitable for online browsing using a pdf viewer.
-
-PDF_HYPERLINKS = NO
-
-# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
-# plain latex in the generated Makefile. Set this option to YES to get a
-# higher quality PDF documentation.
-
-USE_PDFLATEX = NO
-
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
-# command to the generated LaTeX files. This will instruct LaTeX to keep
-# running if errors occur, instead of asking the user for help.
-# This option is also used when generating formulas in HTML.
-
-LATEX_BATCHMODE = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the RTF output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
-# The RTF output is optimised for Word 97 and may not look very pretty with
-# other RTF readers or editors.
-
-GENERATE_RTF = NO
-
-# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `rtf' will be used as the default path.
-
-RTF_OUTPUT =
-
-# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
-# RTF documents. This may be useful for small projects and may help to
-# save some trees in general.
-
-COMPACT_RTF = NO
-
-# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
-# will contain hyperlink fields. The RTF file will
-# contain links (just like the HTML output) instead of page references.
-# This makes the output suitable for online browsing using WORD or other
-# programs which support those fields.
-# Note: wordpad (write) and others do not support links.
-
-RTF_HYPERLINKS = NO
-
-# Load stylesheet definitions from file. Syntax is similar to doxygen's
-# config file, i.e. a series of assigments. You only have to provide
-# replacements, missing definitions are set to their default value.
-
-RTF_STYLESHEET_FILE =
-
-# Set optional variables used in the generation of an rtf document.
-# Syntax is similar to doxygen's config file.
-
-RTF_EXTENSIONS_FILE =
-
-#---------------------------------------------------------------------------
-# configuration options related to the man page output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
-# generate man pages
-
-GENERATE_MAN = NO
-
-# The MAN_OUTPUT tag is used to specify where the man pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `man' will be used as the default path.
-
-MAN_OUTPUT =
-
-# The MAN_EXTENSION tag determines the extension that is added to
-# the generated man pages (default is the subroutine's section .3)
-
-MAN_EXTENSION =
-
-# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
-# then it will generate one additional man file for each entity
-# documented in the real man page(s). These additional files
-# only source the real man page, but without them the man command
-# would be unable to find the correct page. The default is NO.
-
-MAN_LINKS = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the XML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_XML tag is set to YES Doxygen will
-# generate an XML file that captures the structure of
-# the code including all documentation. Note that this
-# feature is still experimental and incomplete at the
-# moment.
-
-GENERATE_XML = NO
-
-#---------------------------------------------------------------------------
-# Configuration options related to the preprocessor
-#---------------------------------------------------------------------------
-
-# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
-# evaluate all C-preprocessor directives found in the sources and include
-# files.
-
-ENABLE_PREPROCESSING = YES
-
-# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
-# names in the source code. If set to NO (the default) only conditional
-# compilation will be performed. Macro expansion can be done in a controlled
-# way by setting EXPAND_ONLY_PREDEF to YES.
-
-MACRO_EXPANSION = NO
-
-# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
-# then the macro expansion is limited to the macros specified with the
-# PREDEFINED and EXPAND_AS_PREDEFINED tags.
-
-EXPAND_ONLY_PREDEF = NO
-
-# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
-# in the INCLUDE_PATH (see below) will be search if a #include is found.
-
-SEARCH_INCLUDES = YES
-
-# The INCLUDE_PATH tag can be used to specify one or more directories that
-# contain include files that are not input files but should be processed by
-# the preprocessor.
-
-INCLUDE_PATH =
-
-# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
-# patterns (like *.h and *.hpp) to filter out the header-files in the
-# directories. If left blank, the patterns specified with FILE_PATTERNS will
-# be used.
-
-INCLUDE_FILE_PATTERNS =
-
-# The PREDEFINED tag can be used to specify one or more macro names that
-# are defined before the preprocessor is started (similar to the -D option of
-# gcc). The argument of the tag is a list of macros of the form: name
-# or name=definition (no spaces). If the definition and the = are
-# omitted =1 is assumed.
-
-PREDEFINED =
-
-# If the MACRO_EXPANSION and EXPAND_PREDEF_ONLY tags are set to YES then
-# this tag can be used to specify a list of macro names that should be expanded.
-# The macro definition that is found in the sources will be used.
-# Use the PREDEFINED tag if you want to use a different macro definition.
-
-EXPAND_AS_DEFINED =
-
-# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
-# doxygen's preprocessor will remove all function-like macros that are alone
-# on a line and do not end with a semicolon. Such function macros are typically
-# used for boiler-plate code, and will confuse the parser if not removed.
-
-SKIP_FUNCTION_MACROS = YES
-
-#---------------------------------------------------------------------------
-# Configuration::addtions related to external references
-#---------------------------------------------------------------------------
-
-# The TAGFILES tag can be used to specify one or more tagfiles.
-
-TAGFILES =
-
-# When a file name is specified after GENERATE_TAGFILE, doxygen will create
-# a tag file that is based on the input files it reads.
-
-GENERATE_TAGFILE =
-
-# If the ALLEXTERNALS tag is set to YES all external classes will be listed
-# in the class index. If set to NO only the inherited external classes
-# will be listed.
-
-ALLEXTERNALS = NO
-
-# The PERL_PATH should be the absolute path and name of the perl script
-# interpreter (i.e. the result of `which perl').
-
-PERL_PATH =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the dot tool
-#---------------------------------------------------------------------------
-
-# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
-# available from the path. This tool is part of Graphviz, a graph visualization
-# toolkit from AT&T and Lucent Bell Labs. The other options in this section
-# have no effect if this option is set to NO (the default)
-
-HAVE_DOT = NO
-
-# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect inheritance relations. Setting this tag to YES will force the
-# the CLASS_DIAGRAMS tag to NO.
-
-CLASS_GRAPH = YES
-
-# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect implementation dependencies (inheritance, containment, and
-# class references variables) of the class with other documented classes.
-
-COLLABORATION_GRAPH = YES
-
-# If set to YES, the inheritance and collaboration graphs will show the
-# relations between templates and their instances.
-
-TEMPLATE_RELATIONS = YES
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
-# tags are set to YES then doxygen will generate a graph for each documented
-# file showing the direct and indirect include dependencies of the file with
-# other documented files.
-
-INCLUDE_GRAPH = YES
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
-# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
-# documented header file showing the documented files that directly or
-# indirectly include this file.
-
-INCLUDED_BY_GRAPH = YES
-
-# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
-# will graphical hierarchy of all classes instead of a textual one.
-
-GRAPHICAL_HIERARCHY = YES
-
-# The tag DOT_PATH can be used to specify the path where the dot tool can be
-# found. If left blank, it is assumed the dot tool can be found on the path.
-
-DOT_PATH =
-
-# The DOTFILE_DIRS tag can be used to specify one or more directories that
-# contain dot files that are included in the documentation (see the
-# \dotfile command).
-
-DOTFILE_DIRS =
-
-# The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width
-# (in pixels) of the graphs generated by dot. If a graph becomes larger than
-# this value, doxygen will try to truncate the graph, so that it fits within
-# the specified constraint. Beware that most browsers cannot cope with very
-# large images.
-
-MAX_DOT_GRAPH_WIDTH = 1024
-
-# The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height
-# (in pixels) of the graphs generated by dot. If a graph becomes larger than
-# this value, doxygen will try to truncate the graph, so that it fits within
-# the specified constraint. Beware that most browsers cannot cope with very
-# large images.
-
-MAX_DOT_GRAPH_HEIGHT = 1024
-
-# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
-# generate a legend page explaining the meaning of the various boxes and
-# arrows in the dot generated graphs.
-
-GENERATE_LEGEND = YES
-
-# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
-# remove the intermedate dot files that are used to generate
-# the various graphs.
-
-DOT_CLEANUP = YES
-
-#---------------------------------------------------------------------------
-# Configuration::addtions related to the search engine
-#---------------------------------------------------------------------------
-
-# The SEARCHENGINE tag specifies whether or not a search engine should be
-# used. If set to NO the values of all tags below this one will be ignored.
-
-SEARCHENGINE = NO
-
-# The CGI_NAME tag should be the name of the CGI script that
-# starts the search engine (doxysearch) with the correct parameters.
-# A script with this name will be generated by doxygen.
-
-CGI_NAME =
-
-# The CGI_URL tag should be the absolute URL to the directory where the
-# cgi binaries are located. See the documentation of your http daemon for
-# details.
-
-CGI_URL =
-
-# The DOC_URL tag should be the absolute URL to the directory where the
-# documentation is located. If left blank the absolute path to the
-# documentation, with file:// prepended to it, will be used.
-
-DOC_URL =
-
-# The DOC_ABSPATH tag should be the absolute path to the directory where the
-# documentation is located. If left blank the directory on the local machine
-# will be used.
-
-DOC_ABSPATH =
-
-# The BIN_ABSPATH tag must point to the directory where the doxysearch binary
-# is installed.
-
-BIN_ABSPATH =
-
-# The EXT_DOC_PATHS tag can be used to specify one or more paths to
-# documentation generated for other projects. This allows doxysearch to search
-# the documentation for these projects as well.
-
-EXT_DOC_PATHS =
diff --git a/doc/user.dox.in b/doc/user.dox.in
index b9f7e0c..adb22ef 100644
--- a/doc/user.dox.in
+++ b/doc/user.dox.in
@@ -30,7 +30,7 @@ PROJECT_NUMBER =
# If a relative path is entered, it will be relative to the location
# where doxygen was started. If left blank the current directory will be used.
-OUTPUT_DIRECTORY = user
+OUTPUT_DIRECTORY =
# The OUTPUT_LANGUAGE tag is used to specify the language in which all
# documentation generated by doxygen is written. Doxygen will use this
diff --git a/examples/GetEntities.cpp b/examples/GetEntities.cpp
new file mode 100644
index 0000000..6c6a956
--- /dev/null
+++ b/examples/GetEntities.cpp
@@ -0,0 +1,60 @@
+/** @example GetEntities.cpp
+ * Description: Get entities and report non-vertex entity connectivity and vertex adjacencies.\n
+ * This example shows how to get connectivity and adjacencies.\n
+ *
+ * To run: ./GetEntities [meshfile]\n
+ * (default values can run if users don't specify a mesh file)
+ */
+
+#include "moab/Core.hpp"
+#include "moab/Range.hpp"
+#include "moab/CN.hpp"
+#include <iostream>
+
+using namespace moab;
+using namespace std;
+
+#ifndef MESH_DIR
+#define MESH_DIR "."
+#endif
+
+string test_file_name = string(MESH_DIR) + string("/1hex.g");
+
+int main(int argc, char **argv) {
+
+ if (argc > 1){
+ //user has input a mesh file
+ test_file_name = argv[1];
+ }
+ // instantiate & load a mesh from a file
+ Core *mb = new Core();
+ ErrorCode rval = mb->load_mesh(test_file_name.c_str());
+ if (MB_SUCCESS != rval) return 1;
+
+ Range ents;
+
+ // get all entities in the database
+ rval = mb->get_entities_by_handle(0, ents);
+ if (MB_SUCCESS != rval) return 1;
+
+ for (Range::iterator it = ents.begin(); it != ents.end(); it++) {
+ if (MBVERTEX == mb->type_from_handle(*it)) {
+ Range adjs;
+ rval = mb->get_adjacencies(&(*it), 1, 3, false, adjs);
+ if (MB_SUCCESS != rval) return 1;
+ cout << "Vertex " << mb->id_from_handle(*it) << " adjacencies:" << endl;
+ adjs.print();
+ }
+ else if (mb->type_from_handle(*it) < MBENTITYSET) {
+ const EntityHandle *connect;
+ int num_connect;
+ rval = mb->get_connectivity(*it, connect, num_connect);
+ if (MB_SUCCESS != rval) return 1;
+ cout << CN::EntityTypeName(mb->type_from_handle(*it)) << " " << mb->id_from_handle(*it) << " vertex connectivity is: ";
+ for (int i = 0; i < num_connect; i++) cout << mb->id_from_handle(connect[i]) << " ";
+ cout << endl;
+ }
+ }
+
+ return 0;
+}
diff --git a/examples/HelloMOAB.cpp b/examples/HelloMOAB.cpp
index ebab971..6129233 100644
--- a/examples/HelloMOAB.cpp
+++ b/examples/HelloMOAB.cpp
@@ -2,7 +2,7 @@
* Description: read a mesh, get the entities.\n
* HelloMOAB is a simple test file which is used to read meshes from VTK file and test how many entities there are.\n
*
- * To run: ./HelloMOAB <meshfile>\n
+ * To run: ./HelloMOAB [meshfile]\n
* (default values can run if users don't specify a mesh file)
*/
@@ -14,6 +14,10 @@
using namespace moab;
using namespace std;
+#ifndef MESH_DIR
+#define MESH_DIR "."
+#endif
+
string test_file_name = string(MESH_DIR) + string("/3k-tri-sphere.vtk");
int main( int argc, char** argv )
@@ -27,38 +31,32 @@ int main( int argc, char** argv )
}
//load the mesh from vtk file
ErrorCode rval = iface->load_mesh( test_file_name.c_str() );
- assert( rval == MB_SUCCESS);
+ assert(rval == MB_SUCCESS);
- //get verts entities
+ // get verts entities, by type
Range verts;
rval = iface->get_entities_by_type(0, MBVERTEX, verts);
- assert( rval == MB_SUCCESS);
- //get edge entities
+ assert(rval == MB_SUCCESS);
+ //get edge entities, by type
Range edges;
rval = iface->get_entities_by_type(0, MBEDGE, edges);
assert(rval == MB_SUCCESS);
- //get triangular entities
- Range tri;
- rval = iface->get_entities_by_type(0, MBTRI, tri);
- assert( rval == MB_SUCCESS);
-
- //get quad entities
- Range quads;
- rval = iface->get_entities_by_type(0, MBQUAD, quads);
+ // get faces, by dimension, so we stay generic to entity type
+ Range faces;
+ rval = iface->get_entities_by_dimension(0, 2, faces);
assert(rval == MB_SUCCESS);
- //get hex entities
- Range hex;
- rval = iface->get_entities_by_type(0, MBHEX, hex);
+ //get regions, by dimension, so we stay generic to entity type
+ Range elems;
+ rval = iface->get_entities_by_dimension(0, 3, elems);
assert(rval == MB_SUCCESS);
//output the number of entities
cout << "Number of vertices is " << verts.size() << endl;
cout << "Number of edges is " << edges.size() << endl;
- cout << "Number of triangular faces is " << tri.size() << endl;
- cout << "Number of quad faces is " << quads.size() << endl;
- cout << "Number of hex is " << hex.size() << endl;
+ cout << "Number of faces is " << faces.size() << endl;
+ cout << "Number of elements is " << elems.size() << endl;
return 0;
}
diff --git a/examples/HelloMoabPar.cpp b/examples/HelloMoabPar.cpp
deleted file mode 100644
index a5a71aa..0000000
--- a/examples/HelloMoabPar.cpp
+++ /dev/null
@@ -1,154 +0,0 @@
-/** @example HelloMoabPar.cpp \n
- * \brief Read mesh into MOAB in parallel \n
- * This example shows the simplest way of telling MOAB to read in parallel.
- *
- * -# Initialize MPI and get the rank and number of processors \n
- * -# Process arguments (file name and options for parallel read) \n
- * -# Initialize MOAB \n
- * -# Load a partitioned file in parallel; \n
- * -# retrieve shared entities on each processor \n
- * -# Filter owned entities among shared ones on each processor \n
- * -# Exchange ghost layers, and repeat the reports \n
- *
- * <b>To compile</b>: \n
- * make HelloMoabPar MOAB_DIR=<installdir> \n
- * <b>To run</b>: mpiexec -np 4 HelloMoabPar \n
- * (depending on your configuration, LD_LIBRARY_PATH may need to contain <hdf5>/lib folder)
- *
- */
-
-#include "moab/ParallelComm.hpp"
-#include "MBParallelConventions.h"
-#include "moab/Core.hpp"
-#include <iostream>
-
-using namespace moab;
-
-int main(int argc, char **argv)
-{
- MPI_Init(&argc, &argv);
-
- int nprocs, rank;
- MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
- MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-
- std::string filename;
- std::string options;
- if (3 != argc)
- {
- if (rank == 0)
- {
- std::cout << "Usage: " << argv[0] << " <filename><options (separated by;)>\n ";
- }
- /* this file has a partition with 4 parts */
- filename = "../MeshFiles/unittest/disk.h5m";
- /* Options for reading
- * - read in parallel
- * - use PARALLEL_PARTITION tag
- * - resolve shared entities after reading
- */
- options = "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS";
- }
- else
- {
- filename = argv[1];
- options = argv[2];
- }
- if (rank == 0)
- std::cout << "reading file " << filename << "\n with options:" << options <<
- "\n on " << nprocs << " processors\n";
-
- // get MOAB instance and read the file with the specified options
- Interface *mbImpl = new Core;
- if (NULL == mbImpl) return 1;
- ErrorCode rval = mbImpl->load_file(filename.c_str(), 0, options.c_str());
- if (rval != MB_SUCCESS) return 1;
-
- // get the ParallelComm instance
- ParallelComm* pcomm = ParallelComm::get_pcomm(mbImpl, 0);
- MPI_Comm comm = pcomm->comm();
- if (0 == pcomm) return 1;
-
- // get all shared entities with other processors
- Range shared_ents;
- rval = pcomm->get_shared_entities(-1, // -1 means all other processors Range &shared_ents,
- shared_ents);
- if (rval != MB_SUCCESS) return 1;
- /* Among shared entities, get those owned by the current processor
- * For this, use a filter operation;
- * Each shared entity is owned by exactly one processor;
- * An entity could be simply-shared (with exactly one other processor) or
- * multi-shared.
- */
- Range owned_entities;
- rval = pcomm->filter_pstatus(shared_ents, // pass entities that we want to filter
- PSTATUS_NOT_OWNED, // status we are looking for
- PSTATUS_NOT, // operation applied ; so it will return owned entities (!not_owned = owned)
- -1, // this means all processors
- &owned_entities);
- if (rval != MB_SUCCESS) return 1;
- unsigned int nums[4]={0}; // to store the owned entities per dimension
- for (int i=0; i<3; i++)
- {
- nums[i]=(int)owned_entities.num_of_dimension(i);
- }
- int * rbuf;
- if (rank==0)
- rbuf = (int *)malloc(nprocs*4*sizeof(int));
- MPI_Gather( nums, 4, MPI_INT, rbuf, 4, MPI_INT, 0, comm);
- // print the stats gathered:
- if (rank == 0)
- {
- for (int i=0; i<nprocs; i++)
- {
- std::cout << " shared, owned entities on proc " << i << " :" << rbuf[4*i] << " verts, " <<
- rbuf[4*i+1] << " edges, " << rbuf[4*i+2] << " faces\n";
- }
-
- }
-
- /*
- * Now exchange 1 layer of ghost elements, using vertices as bridge
- * we could have done this as part of reading process, by passing an extra read option
- * ";PARALLEL_GHOSTS=2.0.1.0"
- */
- rval = pcomm->exchange_ghost_cells(2, // int ghost_dim,
- 0, // int bridge_dim,
- 1, //int num_layers,
- 0, //int addl_ents,
- true); // bool store_remote_handles);
- if (rval != MB_SUCCESS) return 1;
-
- // repeat the reports, after ghost exchange
- shared_ents.clear();
- owned_entities.clear();
- rval = pcomm->get_shared_entities(-1, // -1 means all other processors Range &shared_ents,
- shared_ents);
- if (rval != MB_SUCCESS) return 1;
- rval = pcomm->filter_pstatus(shared_ents,
- PSTATUS_NOT_OWNED,
- PSTATUS_NOT,
- -1,
- &owned_entities);
- if (rval != MB_SUCCESS) return 1;
-
- // find out how many shared entities of each dimension are owned on this processor
- for (int i=0; i<3; i++)
- nums[i]=(int)owned_entities.num_of_dimension(i);
-
- // gather the statistics on processor 0
- MPI_Gather( nums, 4, MPI_INT, rbuf, 4, MPI_INT, 0, comm);
- if (rank == 0)
- {
- std::cout << " \n\n After exchanging one ghost layer: \n";
- for (int i=0; i<nprocs; i++)
- {
- std::cout << " shared, owned entities on proc " << i << " :" << rbuf[4*i] << " verts, " <<
- rbuf[4*i+1] << " edges, " << rbuf[4*i+2] << " faces\n";
- }
- free(rbuf);
- }
- MPI_Finalize();
-
- return 0;
-}
diff --git a/examples/HelloParMOAB.cpp b/examples/HelloParMOAB.cpp
new file mode 100644
index 0000000..75daeda
--- /dev/null
+++ b/examples/HelloParMOAB.cpp
@@ -0,0 +1,103 @@
+/** @example HelloParMOAB.cpp \n
+ * \brief Read mesh into MOAB and resolve/exchange/report shared and ghosted entities \n
+ * <b>To run</b>: mpiexec -np 4 HelloMoabPar [filename]\n
+ *
+ */
+
+#include "moab/ParallelComm.hpp"
+#include "MBParallelConventions.h"
+#include "moab/Core.hpp"
+#include <iostream>
+
+using namespace moab;
+using namespace std;
+
+string test_file_name = string(MESH_DIR) + string("/64bricks_512hex_256part.h5m");
+
+int main(int argc, char **argv)
+{
+ MPI_Init(&argc, &argv);
+
+ string options;
+
+ // need option handling here for input filename
+ if (argc > 1){
+ //user has input a mesh file
+ test_file_name = argv[1];
+ }
+
+ options = "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS";
+
+ // get MOAB instance and read the file with the specified options
+ Interface *mb = new Core;
+ if (NULL == mb) return 1;
+ // get the ParallelComm instance
+ ParallelComm* pcomm = new ParallelComm(mb, MPI_COMM_WORLD);
+ int nprocs = pcomm->proc_config().proc_size(), rank = pcomm->proc_config().proc_rank();
+ MPI_Comm comm = pcomm->proc_config().proc_comm();
+
+ if (rank == 0)
+ cout << "Reading file " << test_file_name << "\n with options: " << options << endl
+ << " on " << nprocs << " processors\n";
+
+ ErrorCode rval = mb->load_file(test_file_name.c_str(), 0, options.c_str());
+ if (rval != MB_SUCCESS) return 1;
+
+ Range shared_ents;
+ // get entities shared with all other processors
+ rval = pcomm->get_shared_entities(-1, shared_ents);
+ if (rval != MB_SUCCESS) return 1;
+
+ // filter shared entities with not not_owned, which means owned
+ Range owned_entities;
+ rval = pcomm->filter_pstatus(shared_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &owned_entities);
+ if (rval != MB_SUCCESS) return 1;
+
+ unsigned int nums[4]={0}; // to store the owned entities per dimension
+ for (int i=0; i<4; i++) nums[i]=(int)owned_entities.num_of_dimension(i);
+ vector<int> rbuf(nprocs*4, 0);
+ MPI_Gather( nums, 4, MPI_INT, &rbuf[0], 4, MPI_INT, 0, MPI_COMM_WORLD);
+ // print the stats gathered:
+ if (rank == 0) {
+ for (int i=0; i<nprocs; i++)
+ cout << " Shared, owned entities on proc " << i << ": " << rbuf[4*i] << " verts, " <<
+ rbuf[4*i+1] << " edges, " << rbuf[4*i+2] << " faces, " << rbuf[4*i+3] << " elements" << endl;
+ }
+
+ // Now exchange 1 layer of ghost elements, using vertices as bridge
+ // (we could have done this as part of reading process, using the PARALLEL_GHOSTS read option)
+ rval = pcomm->exchange_ghost_cells(3, // int ghost_dim,
+ 0, // int bridge_dim,
+ 1, //int num_layers,
+ 0, //int addl_ents,
+ true); // bool store_remote_handles);
+ if (rval != MB_SUCCESS) return 1;
+
+ // repeat the reports, after ghost exchange
+ shared_ents.clear();
+ owned_entities.clear();
+ rval = pcomm->get_shared_entities(-1, shared_ents);
+ if (rval != MB_SUCCESS) return 1;
+ rval = pcomm->filter_pstatus(shared_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &owned_entities);
+ if (rval != MB_SUCCESS) return 1;
+
+ // find out how many shared entities of each dimension are owned on this processor
+ for (int i=0; i<4; i++)
+ nums[i]=(int)owned_entities.num_of_dimension(i);
+
+ // gather the statistics on processor 0
+ MPI_Gather( nums, 4, MPI_INT, &rbuf[0], 4, MPI_INT, 0, comm);
+ if (rank == 0)
+ {
+ cout << " \n\n After exchanging one ghost layer: \n";
+ for (int i=0; i<nprocs; i++)
+ {
+ cout << " Shared, owned entities on proc " << i << ": " << rbuf[4*i] << " verts, " <<
+ rbuf[4*i+1] << " edges, " << rbuf[4*i+2] << " faces, " << rbuf[4*i+3] << " elements" << endl;
+ }
+ }
+
+ MPI_Finalize();
+
+ return 0;
+}
diff --git a/examples/SetsNTags.cpp b/examples/SetsNTags.cpp
new file mode 100644
index 0000000..e2071c1
--- /dev/null
+++ b/examples/SetsNTags.cpp
@@ -0,0 +1,77 @@
+/** @example SetsNTags.cpp
+ * Description: Get the sets representing materials and Dirichlet/Neumann boundary conditions and list their contents.\n
+ * This example shows how to get entity sets, and tags on those sets.
+ *
+ * To run: ./SetsNTags [meshfile]\n
+ * (default values can run if users don't specify a mesh file)
+ */
+
+#include "moab/Core.hpp"
+#include "moab/Interface.hpp"
+#include "moab/Range.hpp"
+#include "MBTagConventions.hpp"
+
+#include <iostream>
+
+using namespace moab;
+using namespace std;
+
+#ifndef MESH_DIR
+#define MESH_DIR "."
+#endif
+
+string test_file_name = string(MESH_DIR) + string("/1hex.g");
+
+// tag names for these conventional tags come from MBTagConventions.hpp
+const char *tag_nms[] = {MATERIAL_SET_TAG_NAME, DIRICHLET_SET_TAG_NAME, NEUMANN_SET_TAG_NAME};
+
+int main(int argc, char **argv) {
+ // get the material set tag handle
+ Tag mtag;
+ ErrorCode rval;
+ Range sets, set_ents;
+
+ // instantiate & load a file
+ Interface *mb = new Core();
+
+ // need option handling here for input filename
+ if (argc > 1){
+ //user has input a mesh file
+ test_file_name = argv[1];
+ }
+
+ rval = mb->load_file(test_file_name.c_str());
+ if (MB_SUCCESS != rval) return 1;
+
+ // loop over set types
+ for (int i = 0; i < 3; i++) {
+ // get the tag handle for this tag name; tag should already exist (it was created during file read)
+ rval = mb->tag_get_handle(tag_nms[i], 1, MB_TYPE_INTEGER, mtag);
+ if (MB_SUCCESS != rval) return 1;
+
+ // get all the sets having that tag (with any value for that tag)
+ sets.clear();
+ rval = mb->get_entities_by_type_and_tag(0, MBENTITYSET, &mtag, NULL, 1, sets);
+ if (MB_SUCCESS != rval) return 1;
+
+ // iterate over each set, getting the entities and printing them
+ Range::iterator set_it;
+ for (set_it = sets.begin(); set_it != sets.end(); set_it++) {
+ // get the id for this set
+ int set_id;
+ rval = mb->tag_get_data(mtag, &(*set_it), 1, &set_id);
+ if (MB_SUCCESS != rval) return 1;
+
+ // get the entities in the set, recursively
+ rval = mb->get_entities_by_handle(*set_it, set_ents, true);
+ if (MB_SUCCESS != rval) return 1;
+
+ cout << tag_nms[i] << " " << set_id << " has "
+ << set_ents.size() << " entities:" << endl;
+ set_ents.print(" ");
+ set_ents.clear();
+ }
+ }
+
+ delete mb;
+}
diff --git a/examples/makefile b/examples/makefile
index 3402f96..d07b675 100644
--- a/examples/makefile
+++ b/examples/makefile
@@ -7,21 +7,25 @@ include ${MOAB_DIR}/lib/iMesh-Defs.inc
# MESH_DIR is the top-level MOAB source directory, used to locate mesh files that come with MOAB source
MESH_DIR="../MeshFiles/unittest"
-StructuredMeshSimple : StructuredMeshSimple.o
- ${MOAB_CXX} -o $@ $< ${MOAB_LIBS_LINK}
+EXAMPLES = HelloMOAB GetEntities SetsNTags StructuredMeshSimple DirectAccessWithHoles DirectAccessNoHoles
+PAREXAMPLES = HelloParMOAB ReduceExchangeTags
+F90EXAMPLES = DirectAccessNoHolesF90
+EXOIIEXAMPLES = TestExodusII
+
+default: ${EXAMPLES}
HelloMOAB : HelloMOAB.o
${MOAB_CXX} -o $@ $< ${MOAB_LIBS_LINK}
-ReduceExchangeTags : ReduceExchangeTags.o
+GetEntities: GetEntities.o
${MOAB_CXX} -o $@ $< ${MOAB_LIBS_LINK}
-HelloMoabPar: HelloMoabPar.o
- ${MOAB_CXX} -o $@ $< ${MOAB_LIBS_LINK}
-
-TestExodusII: TestExodusII.o
+SetsNTags: SetsNTags.o
${MOAB_CXX} -o $@ $< ${MOAB_LIBS_LINK}
+StructuredMeshSimple : StructuredMeshSimple.o
+ ${MOAB_CXX} -o $@ $< ${MOAB_LIBS_LINK}
+
DirectAccessWithHoles: DirectAccessWithHoles.o
${MOAB_CXX} -o $@ $< ${MOAB_LIBS_LINK}
@@ -31,6 +35,18 @@ DirectAccessNoHoles: DirectAccessNoHoles.o
DirectAccessNoHolesF90: DirectAccessNoHolesF90.o
${MOAB_CXX} -o $@ $< ${IMESH_LIBS}
+ReduceExchangeTags : ReduceExchangeTags.o
+ ${MOAB_CXX} -o $@ $< ${MOAB_LIBS_LINK}
+
+HelloParMOAB: HelloParMOAB.o
+ ${MOAB_CXX} -o $@ $< ${MOAB_LIBS_LINK}
+
+TestExodusII: TestExodusII.o
+ ${MOAB_CXX} -o $@ $< ${MOAB_LIBS_LINK}
+
+clean:
+ rm -rf *.o ${EXAMPLES} ${PAREXAMPLES} ${EXOIIEXAMPLES}
+
.cpp.o :
${MOAB_CXX} ${MOAB_CXXFLAGS} ${MOAB_INCLUDES} -DMESH_DIR=\"${MESH_DIR}\" -c $<
diff --git a/examples/old/SetsNTags.cpp b/examples/old/SetsNTags.cpp
deleted file mode 100644
index 9930891..0000000
--- a/examples/old/SetsNTags.cpp
+++ /dev/null
@@ -1,87 +0,0 @@
-#include "moab/Core.hpp"
-#include "moab/Interface.hpp"
-#include "moab/Range.hpp"
-
-#ifdef USE_MPI
-#include "moab_mpi.h"
-#endif
-
-#include <iostream>
-
-int main(int argc, char **argv) {
- // get the material set tag handle
- moab::Tag mtag;
- moab::ErrorCode rval;
- const char *tag_nms[] = {"MATERIAL_SET", "DIRICHLET_SET", "NEUMANN_SET"};
- moab::Range sets, set_ents;
-
- // instantiate & load a file
- moab::Interface *mb = new moab::Core();
- const char *par_opt = "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARTITION_DISTRIBUTE;PARALLEL_RESOLVE_SHARED_ENTS;SETS=SETS";
-
- bool parallel = false;
- if (argc > 2 && !strcmp(argv[1], "-p")) parallel = true;
- else if (argc == 1) {
- std::cout << "Usage: " << argv[0] << "[-p] <filename>" << std::endl;
- return 0;
- }
-
- if (parallel)
- rval = mb->load_file(argv[argc-1], 0, par_opt);
- else
- rval = mb->load_file(argv[argc-1]);
- if (moab::MB_SUCCESS != rval) return 1;
-
- // loop over set types
- for (int i = 0; i < 3; i++) {
- rval = mb->tag_get_handle(tag_nms[i], 1, moab::MB_TYPE_INTEGER, mtag);
- if (moab::MB_SUCCESS != rval) return 1;
-
- // get all the sets of that type in the mesh
- sets.clear();
- rval = mb->get_entities_by_type_and_tag(0, moab::MBENTITYSET, &mtag,
- NULL, 1, sets);
- if (moab::MB_SUCCESS != rval) return 1;
-
- // iterate over each set, getting entities
- moab::Range::iterator set_it;
- for (set_it = sets.begin(); set_it != sets.end(); set_it++) {
- moab::EntityHandle this_set = *set_it;
-
- // get the id for this set
- int set_id;
- rval = mb->tag_get_data(mtag, &this_set, 1, &set_id);
- if (moab::MB_SUCCESS != rval) return 1;
-
- // get the entities in the set, recursively
- rval = mb->get_entities_by_handle(this_set, set_ents, true);
- if (moab::MB_SUCCESS != rval) return 1;
-
- std::cout << tag_nms[i] << " " << set_id << " has "
- << set_ents.size() << " entities:" << std::endl;
- set_ents.print(" ");
- set_ents.clear();
- }
- }
-
- // do the same for all sets
- sets.clear();
- rval = mb->get_entities_by_type(0, moab::MBENTITYSET, sets);
- if (moab::MB_SUCCESS != rval) return 1;
-
- // print the sets
- rval = mb->list_entities(sets);
- if (moab::MB_SUCCESS != rval) return 1;
-
- rval = mb->list_entities(NULL, 1);
-
-#ifdef USE_MPI
- if (parallel) {
- MPI_Barrier(MPI_COMM_WORLD);
- std::cout << std::flush;
- std::cerr << std::flush;
- }
-#endif
-
- delete mb;
-}
diff --git a/src/AdaptiveKDTree.cpp b/src/AdaptiveKDTree.cpp
index 82c06e5..253440b 100644
--- a/src/AdaptiveKDTree.cpp
+++ b/src/AdaptiveKDTree.cpp
@@ -2033,7 +2033,10 @@ ErrorCode AdaptiveKDTree::ray_intersect_triangles( EntityHandle root,
}
tris_out[w] = *iter;
dists_out[w] = tri_t;
- ray_end = dists_out.back();
+ if (tris_out.size() >= (unsigned)max_ints)
+ // when we have already reached the max intx points, we cans safely reset
+ // ray_end, because we will accept new points only "closer" than the last one
+ ray_end = dists_out.back();
}
}
}
diff --git a/src/Core.cpp b/src/Core.cpp
index b556858..57ff342 100644
--- a/src/Core.cpp
+++ b/src/Core.cpp
@@ -609,6 +609,21 @@ ErrorCode Core::serial_load_file( const char* file_name,
Range new_ents;
get_entities_by_handle( 0, new_ents );
new_ents = subtract( new_ents, initial_ents );
+
+ // Check if gather set exists
+ EntityHandle gather_set;
+ rval = mMBReadUtil->get_gather_set(gather_set);
+ if (MB_SUCCESS == rval) {
+ // Exclude gather set itself
+ new_ents.erase(gather_set);
+
+ // Exclude gather set entities
+ Range gather_ents;
+ rval = get_entities_by_handle(gather_set, gather_ents);
+ if (MB_SUCCESS == rval)
+ new_ents = subtract(new_ents, gather_ents);
+ }
+
rval = add_entities( *file_set, new_ents );
}
diff --git a/src/ReadUtil.cpp b/src/ReadUtil.cpp
index 11320ac..7aeb00c 100644
--- a/src/ReadUtil.cpp
+++ b/src/ReadUtil.cpp
@@ -46,7 +46,6 @@ ErrorCode ReadUtil::get_node_coords(
std::vector<double*>& arrays,
int sequence_size)
{
-
ErrorCode error;
EntitySequence* seq = 0;
@@ -89,7 +88,6 @@ ErrorCode ReadUtil::get_element_connect(
EntityHandle*& array,
int sequence_size)
{
-
ErrorCode error;
EntitySequence* seq;
@@ -122,7 +120,6 @@ ErrorCode ReadUtil::get_element_connect(
* static_cast<ElementSequence*>(seq)->nodes_per_element();
return error;
-
}
ErrorCode ReadUtil::create_entity_sets( EntityID num_sets,
@@ -153,7 +150,6 @@ ErrorCode ReadUtil::create_entity_sets( EntityID num_sets,
return MB_SUCCESS;
}
-
ErrorCode ReadUtil::update_adjacencies(
const EntityHandle start_handle,
const int number_elements,
@@ -178,8 +174,6 @@ ErrorCode ReadUtil::update_adjacencies(
return MB_SUCCESS;
}
-
-
ErrorCode ReadUtil::report_error( const std::string& error )
{
if(mError)
@@ -188,7 +182,6 @@ ErrorCode ReadUtil::report_error( const std::string& error )
return MB_FAILURE;
}
-
ErrorCode ReadUtil::report_error( const char* error, ... )
{
va_list args;
@@ -410,4 +403,45 @@ ErrorCode ReadUtil::assign_ids( Tag id_tag,
return MB_SUCCESS;
}
+ErrorCode ReadUtil::create_gather_set(EntityHandle& gather_set)
+{
+ ErrorCode rval = mMB->create_meshset(MESHSET_SET, gather_set);
+ if (MB_SUCCESS != rval)
+ return rval;
+
+ Tag gather_set_tag;
+ rval = mMB->tag_get_handle("GATHER_SET", 1, MB_TYPE_INTEGER, gather_set_tag, MB_TAG_CREAT | MB_TAG_SPARSE);
+ if (MB_SUCCESS != rval)
+ return rval;
+
+ int gather_val = 1;
+ rval = mMB->tag_set_data(gather_set_tag, &gather_set, 1, &gather_val);
+ if (MB_SUCCESS != rval)
+ return rval;
+
+ return MB_SUCCESS;
+}
+
+ErrorCode ReadUtil::get_gather_set(EntityHandle& gather_set)
+{
+ Tag gather_set_tag;
+ ErrorCode rval = mMB->tag_get_handle("GATHER_SET", 1, MB_TYPE_INTEGER, gather_set_tag, MB_TAG_SPARSE);
+ if (MB_SUCCESS != rval)
+ return rval;
+
+ int gather_val = 1;
+ void* vals[] = {&gather_val};
+ Range gather_sets;
+ rval = mMB->get_entities_by_type_and_tag(0, MBENTITYSET, &gather_set_tag, vals, 1, gather_sets);
+ if (MB_SUCCESS != rval)
+ return rval;
+
+ if (gather_sets.empty())
+ return MB_ENTITY_NOT_FOUND;
+
+ gather_set = gather_sets[0];
+
+ return MB_SUCCESS;
+}
+
} // namespace moab
diff --git a/src/ReadUtil.hpp b/src/ReadUtil.hpp
index 796543b..0f33987 100644
--- a/src/ReadUtil.hpp
+++ b/src/ReadUtil.hpp
@@ -13,7 +13,6 @@
*
*/
-
#ifndef MB_READ_UTIL_HPP
#define MB_READ_UTIL_HPP
@@ -106,8 +105,15 @@ public:
ErrorCode assign_ids( Tag id_tag, const Range& ents, int start = 0 );
+
ErrorCode assign_ids( Tag id_tag, const EntityHandle* ents,
size_t num_ents, int start = 0 );
+
+ //! Create a new gather set with tag GATHER_SET
+ ErrorCode create_gather_set(EntityHandle& gather_set);
+
+ //! Get entity handle of an existing gather set
+ ErrorCode get_gather_set(EntityHandle& gather_set);
};
} // namespace moab
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/fathomteam/moab/commits/450fe76ddbff/
Changeset: 450fe76ddbff
Branch: None
User: vijaysm
Date: 2013-07-30 10:12:16
Summary: More fixes for src, tools and test after further testing.
Affected #: 1 file
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 280b203..b1dee1e 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -123,6 +123,7 @@
)
endif ( NetCDF_FOUND )
+ MESSAGE("Adding HDF5 includes: ${HDF5_FOUND}")
if ( HDF5_FOUND )
set ( MOAB_DEFINES "${MOAB_DEFINES} -DHDF5_FILE" )
check_function_exists( H5Pset_fapl_mpio MOAB_HDF_HAVE_PARALLEL )
@@ -135,6 +136,7 @@
io/WriteHDF5.cpp
)
+ MESSAGE("Adding HDF5 includes: ${HDF5_INCLUDE_DIRECTORIES}")
include_directories(
${HDF5_INCLUDE_DIR}
io/mhdf/include
https://bitbucket.org/fathomteam/moab/commits/1a7ee64efc6b/
Changeset: 1a7ee64efc6b
Branch: None
User: vijaysm
Date: 2013-07-30 10:12:16
Summary: Correct the HDF5_INCLUDE_DIR instead of HDF5_INCLUDE_DIRECTORIES.
Affected #: 1 file
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index b1dee1e..280b203 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -123,7 +123,6 @@
)
endif ( NetCDF_FOUND )
- MESSAGE("Adding HDF5 includes: ${HDF5_FOUND}")
if ( HDF5_FOUND )
set ( MOAB_DEFINES "${MOAB_DEFINES} -DHDF5_FILE" )
check_function_exists( H5Pset_fapl_mpio MOAB_HDF_HAVE_PARALLEL )
@@ -136,7 +135,6 @@
io/WriteHDF5.cpp
)
- MESSAGE("Adding HDF5 includes: ${HDF5_INCLUDE_DIRECTORIES}")
include_directories(
${HDF5_INCLUDE_DIR}
io/mhdf/include
https://bitbucket.org/fathomteam/moab/commits/5fc68bbd403d/
Changeset: 5fc68bbd403d
Branch: None
User: vijaysm
Date: 2013-07-30 10:15:51
Summary: Merged fathomteam/moab into master
Affected #: 12 files
diff --git a/MeshFiles/unittest/surfrandomtris-4part.h5m b/MeshFiles/unittest/surfrandomtris-4part.h5m
new file mode 100644
index 0000000..1ca984a
Binary files /dev/null and b/MeshFiles/unittest/surfrandomtris-4part.h5m differ
diff --git a/doc/MOAB-UG.doc b/doc/MOAB-UG.doc
index 281d619..5cf591c 100644
Binary files a/doc/MOAB-UG.doc and b/doc/MOAB-UG.doc differ
diff --git a/doc/README b/doc/README
new file mode 100644
index 0000000..76ed8b5
--- /dev/null
+++ b/doc/README
@@ -0,0 +1,21 @@
+How to generate doxygen document for MOAB in Linux:
+
+- Install doxygen, and include the doxygen installation path to PATH.
+
+- After checking out moab from bitbucket, do following under moab directory:
+
+ autoreconf -fi
+ ./configure [options] --enable-docs --with-doxygen=DIR
+ make
+ make check
+ cd doc
+ doxygen user.dox
+
+(--ebable-docs indicates to check for doxygen installation under your given --with-doxygen directory or, if --with-doxygen is not given, under your PATH. If doxygen execuable is not found, it'll report error and exit configure.
+
+If you are sure doxygen is installed and put into PATH, you don't need to add --enable-docs or --with-doxygen, going to doc directory and directly applying "doxygen user.dox" should generate all the documents.)
+
+- After doxygen is done, you should be able to open web browser, and view MOAB document at file://your_moab_dir/doc/html/index.html .
+
+
+
diff --git a/doc/UG/moabUG.h b/doc/UG/moabUG.h
index f3f186d..c5231fa 100644
--- a/doc/UG/moabUG.h
+++ b/doc/UG/moabUG.h
@@ -500,8 +500,8 @@ Applications calling the reader interface functions directly can specify the all
The reader interface consists of the following functions:
-- get_node_arrays: Given the number of vertices requested, the number of geometric dimensions, and a requested start id, allocates a block of vertex handles and returns pointers to coordinate arrays in memory, along with the actual start handle for that block of vertices.
-- get_element_array: Given the number of elements requested, the number of vertices per element, the element type and the requested start id, allocates the block of elements, and returns a pointer to the connectivity array for those elements and the actual start handle for that block. The number of vertices per element is necessary because those elements may include higher-order nodes, and MOAB stores these as part of the normal connectivity array.
+- get_node_coords: Given the number of vertices requested, the number of geometric dimensions, and a requested start id, allocates a block of vertex handles and returns pointers to coordinate arrays in memory, along with the actual start handle for that block of vertices.
+- get_element_connect: Given the number of elements requested, the number of vertices per element, the element type and the requested start id, allocates the block of elements, and returns a pointer to the connectivity array for those elements and the actual start handle for that block. The number of vertices per element is necessary because those elements may include higher-order nodes, and MOAB stores these as part of the normal connectivity array.
- update_adjacencies: This function takes the start handle for a block of elements and the connectivity of those elements, and updates adjacencies for those elements. Which adjacencies are updated depends on the options set in AEntityFactory.
.
@@ -515,12 +515,12 @@ ErrorCode rval = moab->get_interface("ReadUtilIface", &iface);
// allocate a block of vertex handles and read xyz’s into them
std::vector<double*> arrays;
EntityHandle startv, *starth;
-rval = iface->get_node_arrays(3, num_nodes, 0, startv, arrays);
+rval = iface->get_node_coords(3, num_nodes, 0, startv, arrays);
for (int i = 0; i < num_nodes; i++)
infile >> arrays[0][i] >> arrays[1][i] >> arrays[2][i];
// allocate block of hex handles and read connectivity into them
-rval = iface->get_element_array(num_hexes, 8, MBHEX, 0, starth);
+rval = iface->get_element_connect(num_hexes, 8, MBHEX, 0, starth);
for (int i = 0; i < 8*num_hexes; i++)
infile >> starth[i];
@@ -531,8 +531,8 @@ for (int i = 0; i < 8*num_hexes; i++)
The writer interface, declared in WriteUtilIface, provides functions that support writing vertex coordinates and element connectivity to storage locations input by the application. Assembling these data is a common task for writing mesh, and can be non-trivial when exporting only subsets of a mesh. The writer interface declares the following functions:
-- get_node_arrays: Given already-allocated memory and the number of vertices and dimensions, and a range of vertices, this function writes vertex coordinates to that memory. If a tag is input, that tag is also written with integer vertex ids, starting with 1, corresponding to the order the vertices appear in that sequence (these ids are used to write the connectivity array in the form of vertex indices).
-- get_element_array: Given a range of elements and the tag holding vertex ids, and a pointer to memory, the connectivity of the specified elements are written to that memory, in terms of the indices referenced by the specified tag. Again, the number of vertices per element is input, to allow the direct output of higher-order vertices.
+- get_node_coords: Given already-allocated memory and the number of vertices and dimensions, and a range of vertices, this function writes vertex coordinates to that memory. If a tag is input, that tag is also written with integer vertex ids, starting with 1, corresponding to the order the vertices appear in that sequence (these ids are used to write the connectivity array in the form of vertex indices).
+- get_element_connect: Given a range of elements and the tag holding vertex ids, and a pointer to memory, the connectivity of the specified elements are written to that memory, in terms of the indices referenced by the specified tag. Again, the number of vertices per element is input, to allow the direct output of higher-order vertices.
- gather_nodes_from_elements: Given a range of elements, this function returns the range of vertices used by those elements. If a bit-type tag is input, vertices returned are also marked with 0x1 using that tag. If no tag is input, the implementation of this function uses its own bit tag for marking, to avoid using an n2 algorithm for gathering vertices.
- reorder: Given a permutation vector, this function reorders the connectivity for entities with specified type and number of vertices per entity to match that permutation. This function is needed for writing connectivity into numbering systems other than that used internally in MOAB.
.
@@ -557,11 +557,11 @@ iface->assign_ids(verts, 0, 1);
// allocate space for coordinates & write them
std::vector<double*> arrays(3);
for (int i = 0; i < 3; i++) arrays[i] = new double[verts.size()];
-iface->get_node_arrays(3, verts.size(), verts, 0, 1, arrays);
+iface->get_node_coords(3, verts.size(), verts, 0, 1, arrays);
// put connect’y in array, in the form of indices into vertex array
std::vector<int> conn(8*hexes.size());
-iface->get_element_array(hexes.size(), 8, 0, hexes, 0, 1, &conn[0]);
+iface->get_element_connect(hexes.size(), 8, 0, hexes, 0, 1, &conn[0]);
\endcode
\ref contents
diff --git a/examples/LloydRelaxation.cpp b/examples/LloydRelaxation.cpp
new file mode 100644
index 0000000..0cf9d1e
--- /dev/null
+++ b/examples/LloydRelaxation.cpp
@@ -0,0 +1,219 @@
+/** @example LloydRelaxation.cpp \n
+ * \brief Perform Lloyd relaxation on a mesh and its dual \n
+ * <b>To run</b>: mpiexec -np <np> LloydRelaxation [filename]\n
+ *
+ * Briefly, Lloyd relaxation is a technique to smooth out a mesh. The centroid of each cell is computed from its
+ * vertex positions, then vertices are placed at the average of their connected cells' centroids.
+ *
+ * In the parallel algorithm, an extra ghost layer of cells is exchanged. This allows us to compute the centroids
+ * for boundary cells on each processor where they appear; this eliminates the need for one round of data exchange
+ * (for those centroids) between processors. New vertex positions must be sent from owning processors to processors
+ * sharing those vertices. Convergence is measured as the maximum distance moved by any vertex.
+ *
+ * In this implementation, a fixed number of iterations is performed. The final mesh is output to 'lloydfinal.h5m'
+ * in the current directory (H5M format must be used since the file is written in parallel).
+ */
+
+#include "moab/ParallelComm.hpp"
+#include "MBParallelConventions.h"
+#include "moab/Core.hpp"
+#include "moab/Skinner.hpp"
+#include "moab/CN.hpp"
+#include "moab/CartVect.hpp"
+#include <iostream>
+#include <sstream>
+
+using namespace moab;
+using namespace std;
+
+string test_file_name = string(MESH_DIR) + string("/surfrandomtris-4part.h5m");
+
+#define RC if (MB_SUCCESS != rval) return rval
+
+ErrorCode perform_lloyd_relaxation(ParallelComm *pc, Range &verts, Range &cells, Tag fixed,
+ int num_its, int report_its);
+
+int main(int argc, char **argv)
+{
+ int num_its = 10;
+ int report_its = 1;
+
+ MPI_Init(&argc, &argv);
+
+ // need option handling here for input filename
+ if (argc > 1){
+ //user has input a mesh file
+ test_file_name = argv[1];
+ }
+
+ // get MOAB and ParallelComm instances
+ Interface *mb = new Core;
+ if (NULL == mb) return 1;
+ // get the ParallelComm instance
+ ParallelComm* pcomm = new ParallelComm(mb, MPI_COMM_WORLD);
+ int nprocs = pcomm->size();
+
+ string options;
+ if (nprocs > 1) // if reading in parallel, need to tell it how
+ options = "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS;PARALLEL_GHOSTS=2.0.1;DEBUG_IO=0;DEBUG_PIO=0";
+
+ // read the file
+ ErrorCode rval = mb->load_file(test_file_name.c_str(), 0, options.c_str()); RC;
+
+ // make tag to specify fixed vertices, since it's input to the algorithm; use a default value of non-fixed
+ // so we only need to set the fixed tag for skin vertices
+ Tag fixed;
+ int def_val = 0;
+ rval = mb->tag_get_handle("fixed", 1, MB_TYPE_INTEGER, fixed, MB_TAG_CREAT | MB_TAG_DENSE, &def_val); RC;
+
+ // get all vertices and faces
+ Range verts, faces, skin_verts;
+ rval = mb->get_entities_by_type(0, MBVERTEX, verts); RC;
+ rval = mb->get_entities_by_dimension(0, 2, faces); RC;
+
+ // get the skin vertices of those faces and mark them as fixed; we don't want to fix the vertices on a
+ // part boundary, but since we exchanged a layer of ghost faces, those vertices aren't on the skin locally
+ // ok to mark non-owned skin vertices too, I won't move those anyway
+ // use MOAB's skinner class to find the skin
+ Skinner skinner(mb);
+ rval = skinner.find_skin(faces, true, skin_verts); RC; // 'true' param indicates we want vertices back, not faces
+
+ std::vector<int> fix_tag(skin_verts.size(), 1); // initialized to 1 to indicate fixed
+ rval = mb->tag_set_data(fixed, skin_verts, &fix_tag[0]); RC;
+
+ // now perform the Lloyd relaxation
+ rval = perform_lloyd_relaxation(pcomm, verts, faces, fixed, num_its, report_its); RC;
+
+ // delete fixed tag, since we created it here
+ rval = mb->tag_delete(fixed); RC;
+
+ // output file, using parallel write
+ rval = mb->write_file("lloydfinal.h5m", NULL, "PARALLEL=WRITE_PART"); RC;
+
+ // delete MOAB instance
+ delete mb;
+
+ MPI_Finalize();
+
+ return 0;
+}
+
+ErrorCode perform_lloyd_relaxation(ParallelComm *pcomm, Range &verts, Range &faces, Tag fixed,
+ int num_its, int report_its)
+{
+ ErrorCode rval;
+ Interface *mb = pcomm->get_moab();
+ int nprocs = pcomm->size();
+
+ // perform Lloyd relaxation:
+ // 1. setup: set vertex centroids from vertex coords; filter to owned verts; get fixed tags
+
+ // get all verts coords into tag; don't need to worry about filtering out fixed verts,
+ // we'll just be setting to their fixed coords
+ std::vector<double> vcentroids(3*verts.size());
+ rval = mb->get_coords(verts, &vcentroids[0]); RC;
+
+ Tag centroid;
+ rval = mb->tag_get_handle("centroid", 3, MB_TYPE_DOUBLE, centroid, MB_TAG_CREAT | MB_TAG_DENSE); RC;
+ rval = mb->tag_set_data(centroid, verts, &vcentroids[0]); RC;
+
+ // filter verts down to owned ones and get fixed tag for them
+ Range owned_verts, shared_owned_verts;
+ if (nprocs > 1) {
+ rval = pcomm->filter_pstatus(verts, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &owned_verts);
+ if (rval != MB_SUCCESS) return rval;
+ }
+ else
+ owned_verts = verts;
+ std::vector<int> fix_tag(owned_verts.size());
+ rval = mb->tag_get_data(fixed, owned_verts, &fix_tag[0]); RC;
+
+ // now fill vcentroids array with positions of just owned vertices, since those are the ones
+ // we're actually computing
+ vcentroids.resize(3*owned_verts.size());
+ rval = mb->tag_get_data(centroid, owned_verts, &vcentroids[0]); RC;
+
+ // get shared owned verts, for exchanging tags
+ rval = pcomm->get_shared_entities(-1, shared_owned_verts, 0, false, true); RC;
+ // workaround: if no shared owned verts, put a non-shared one in the list, to prevent exchanging tags
+ // for all shared entities
+ if (shared_owned_verts.empty()) shared_owned_verts.insert(*verts.begin());
+
+ // some declarations for later iterations
+ std::vector<double> fcentroids(3*faces.size()); // fcentroids for face centroids
+ std::vector<double> ctag(3*CN::MAX_NODES_PER_ELEMENT); // temporary coordinate storage for verts bounding a face
+ const EntityHandle *conn; // const ptr & size to face connectivity
+ int nconn;
+ Range::iterator fit, vit; // for iterating over faces, verts
+ int f, v; // for indexing into centroid vectors
+ std::vector<EntityHandle> adj_faces; // used in vertex iteration
+
+ // 2. for num_its iterations:
+ for (int nit = 0; nit < num_its; nit++) {
+
+ double mxdelta = 0.0;
+
+ // 2a. foreach face: centroid = sum(vertex centroids)/num_verts_in_cell
+ for (fit = faces.begin(), f = 0; fit != faces.end(); fit++, f++) {
+ // get verts for this face
+ rval = mb->get_connectivity(*fit, conn, nconn); RC;
+ // get centroid tags for those verts
+ rval = mb->tag_get_data(centroid, conn, nconn, &ctag[0]); RC;
+ fcentroids[3*f+0] = fcentroids[3*f+1] = fcentroids[3*f+2] = 0.0;
+ for (v = 0; v < nconn; v++) {
+ fcentroids[3*f+0] += ctag[3*v+0];
+ fcentroids[3*f+1] += ctag[3*v+1];
+ fcentroids[3*f+2] += ctag[3*v+2];
+ }
+ for (v = 0; v < 3; v++) fcentroids[3*f+v] /= nconn;
+ }
+ rval = mb->tag_set_data(centroid, faces, &fcentroids[0]); RC;
+
+ // 2b. foreach owned vertex:
+ for (vit = owned_verts.begin(), v = 0; vit != owned_verts.end(); vit++, v++) {
+ // if !fixed
+ if (fix_tag[v]) continue;
+ // vertex centroid = sum(cell centroids)/ncells
+ adj_faces.clear();
+ rval = mb->get_adjacencies(&(*vit), 1, 2, false, adj_faces); RC;
+ rval = mb->tag_get_data(centroid, &adj_faces[0], adj_faces.size(), &fcentroids[0]); RC;
+ double vnew[] = {0.0, 0.0, 0.0};
+ for (f = 0; f < (int)adj_faces.size(); f++) {
+ vnew[0] += fcentroids[3*f+0];
+ vnew[1] += fcentroids[3*f+1];
+ vnew[2] += fcentroids[3*f+2];
+ }
+ for (f = 0; f < 3; f++) vnew[f] /= adj_faces.size();
+ double delta = (CartVect(vnew)-CartVect(&vcentroids[3*v])).length();
+ mxdelta = std::max(delta, mxdelta);
+ for (f = 0; f < 3; f++) vcentroids[3*v+f] = vnew[f];
+ }
+
+ // set the centroid tag; having them only in vcentroids array isn't enough, as vertex centroids are
+ // accessed randomly in loop over faces
+ rval = mb->tag_set_data(centroid, owned_verts, &vcentroids[0]); RC;
+
+ // 2c. exchange tags on owned verts
+ if (nprocs > 1) {
+ rval = pcomm->exchange_tags(centroid, shared_owned_verts); RC;
+ }
+
+
+ if (!(nit%report_its)) {
+ // global reduce for maximum delta, then report it
+ double global_max = mxdelta;
+ if (nprocs > 1)
+ MPI_Reduce(&mxdelta, &global_max, 1, MPI_DOUBLE, MPI_MAX, 0, pcomm->comm());
+ if (1 == nprocs || !pcomm->rank())
+ cout << "Max delta = " << global_max << endl;
+ }
+ }
+
+ // write the tag back onto vertex coordinates
+ rval = mb->set_coords(owned_verts, &vcentroids[0]); RC;
+
+ // delete the centroid tag, since we don't need it anymore
+ rval = mb->tag_delete(centroid); RC;
+
+ return MB_SUCCESS;
+}
diff --git a/examples/makefile b/examples/makefile
index d07b675..3b848d2 100644
--- a/examples/makefile
+++ b/examples/makefile
@@ -8,7 +8,7 @@ include ${MOAB_DIR}/lib/iMesh-Defs.inc
MESH_DIR="../MeshFiles/unittest"
EXAMPLES = HelloMOAB GetEntities SetsNTags StructuredMeshSimple DirectAccessWithHoles DirectAccessNoHoles
-PAREXAMPLES = HelloParMOAB ReduceExchangeTags
+PAREXAMPLES = HelloParMOAB ReduceExchangeTags LloydRelaxation
F90EXAMPLES = DirectAccessNoHolesF90
EXOIIEXAMPLES = TestExodusII
@@ -23,6 +23,9 @@ GetEntities: GetEntities.o
SetsNTags: SetsNTags.o
${MOAB_CXX} -o $@ $< ${MOAB_LIBS_LINK}
+LloydRelaxation: LloydRelaxation.o
+ ${MOAB_CXX} -o $@ $< ${MOAB_LIBS_LINK}
+
StructuredMeshSimple : StructuredMeshSimple.o
${MOAB_CXX} -o $@ $< ${MOAB_LIBS_LINK}
diff --git a/src/io/NCHelper.cpp b/src/io/NCHelper.cpp
index 7d1a6c3..d28c824 100644
--- a/src/io/NCHelper.cpp
+++ b/src/io/NCHelper.cpp
@@ -83,12 +83,12 @@ ErrorCode NCHelper::read_variable_to_set_allocate(std::vector<ReadNC::VarData>&
{
if (vdatas[i].varDims.size() != 1)
{
- vdatas[i].readDims[t].push_back(tstep_nums[t]);
+ vdatas[i].readStarts[t].push_back(tstep_nums[t]);
vdatas[i].readCounts[t].push_back(1);
}
else
{
- vdatas[i].readDims[t].push_back(0);
+ vdatas[i].readStarts[t].push_back(0);
vdatas[i].readCounts[t].push_back(tstep_nums.size());
}
}
@@ -96,14 +96,14 @@ ErrorCode NCHelper::read_variable_to_set_allocate(std::vector<ReadNC::VarData>&
// Set up other dimensions and counts
if (vdatas[i].varDims.empty()) {
// Scalar variable
- vdatas[i].readDims[t].push_back(0);
+ vdatas[i].readStarts[t].push_back(0);
vdatas[i].readCounts[t].push_back(1);
}
else {
for (unsigned int idx = 0; idx != vdatas[i].varDims.size(); idx++){
if (tDim != vdatas[i].varDims[idx]){
// Push other variable dimensions, except time, which was already pushed
- vdatas[i].readDims[t].push_back(0);
+ vdatas[i].readStarts[t].push_back(0);
vdatas[i].readCounts[t].push_back(dimVals[vdatas[i].varDims[idx]]);
}
}
@@ -158,28 +158,28 @@ ErrorCode NCHelper::read_variable_to_set(EntityHandle file_set, std::vector<Read
switch (vdatas[i].varDataType) {
case NC_BYTE:
case NC_CHAR:
- success = NCFUNCAG(_vara_text)(_fileId, vdatas[i].varId, &vdatas[i].readDims[t][0], &vdatas[i].readCounts[t][0],
+ success = NCFUNCAG(_vara_text)(_fileId, vdatas[i].varId, &vdatas[i].readStarts[t][0], &vdatas[i].readCounts[t][0],
(char*) data NCREQ);
ERRORS(success, "Failed to read char data.");
break;
case NC_DOUBLE:
- success = NCFUNCAG(_vara_double)(_fileId, vdatas[i].varId, &vdatas[i].readDims[t][0], &vdatas[i].readCounts[t][0],
+ success = NCFUNCAG(_vara_double)(_fileId, vdatas[i].varId, &vdatas[i].readStarts[t][0], &vdatas[i].readCounts[t][0],
(double*) data NCREQ);
ERRORS(success, "Failed to read double data.");
break;
case NC_FLOAT: {
- success = NCFUNCAG(_vara_float)(_fileId, vdatas[i].varId, &vdatas[i].readDims[t][0], &vdatas[i].readCounts[t][0],
+ success = NCFUNCAG(_vara_float)(_fileId, vdatas[i].varId, &vdatas[i].readStarts[t][0], &vdatas[i].readCounts[t][0],
(float*) data NCREQ);
ERRORS(success, "Failed to read float data.");
break;
}
case NC_INT:
- success = NCFUNCAG(_vara_int)(_fileId, vdatas[i].varId, &vdatas[i].readDims[t][0], &vdatas[i].readCounts[t][0],
+ success = NCFUNCAG(_vara_int)(_fileId, vdatas[i].varId, &vdatas[i].readStarts[t][0], &vdatas[i].readCounts[t][0],
(int*) data NCREQ);
ERRORS(success, "Failed to read int data.");
break;
case NC_SHORT:
- success = NCFUNCAG(_vara_short)(_fileId, vdatas[i].varId, &vdatas[i].readDims[t][0], &vdatas[i].readCounts[t][0],
+ success = NCFUNCAG(_vara_short)(_fileId, vdatas[i].varId, &vdatas[i].readStarts[t][0], &vdatas[i].readCounts[t][0],
(short*) data NCREQ);
ERRORS(success, "Failed to read short data.");
break;
@@ -545,7 +545,7 @@ ErrorCode ScdNCHelper::read_scd_variable_setup(std::vector<std::string>& var_nam
for (unsigned int i = 0; i < vdatas.size(); i++) {
vdatas[i].varTags.resize(tstep_nums.size(), 0);
vdatas[i].varDatas.resize(tstep_nums.size());
- vdatas[i].readDims.resize(tstep_nums.size());
+ vdatas[i].readStarts.resize(tstep_nums.size());
vdatas[i].readCounts.resize(tstep_nums.size());
}
for (unsigned int i = 0; i < vsetdatas.size(); i++) {
@@ -553,13 +553,13 @@ ErrorCode ScdNCHelper::read_scd_variable_setup(std::vector<std::string>& var_nam
&& (vsetdatas[i].varDims.size() != 1)) {
vsetdatas[i].varTags.resize(tstep_nums.size(), 0);
vsetdatas[i].varDatas.resize(tstep_nums.size());
- vsetdatas[i].readDims.resize(tstep_nums.size());
+ vsetdatas[i].readStarts.resize(tstep_nums.size());
vsetdatas[i].readCounts.resize(tstep_nums.size());
}
else {
vsetdatas[i].varTags.resize(1, 0);
vsetdatas[i].varDatas.resize(1);
- vsetdatas[i].readDims.resize(1);
+ vsetdatas[i].readStarts.resize(1);
vsetdatas[i].readCounts.resize(1);
}
}
@@ -647,12 +647,12 @@ ErrorCode ScdNCHelper::read_scd_variable_to_nonset_allocate(EntityHandle file_se
// Set up the dimensions and counts
// First time
- vdatas[i].readDims[t].push_back(tstep_nums[t]);
+ vdatas[i].readStarts[t].push_back(tstep_nums[t]);
vdatas[i].readCounts[t].push_back(1);
// then z/y/x
if (vdatas[i].numLev != 1) {
- vdatas[i].readDims[t].push_back(0);
+ vdatas[i].readStarts[t].push_back(0);
vdatas[i].readCounts[t].push_back(vdatas[i].numLev);
}
@@ -660,11 +660,11 @@ ErrorCode ScdNCHelper::read_scd_variable_to_nonset_allocate(EntityHandle file_se
case ReadNC::ENTLOCVERT:
// vertices
// only structured mesh has j parameter that multiplies i to get total # vertices
- vdatas[i].readDims[t].push_back(lDims[1]);
+ vdatas[i].readStarts[t].push_back(lDims[1]);
vdatas[i].readCounts[t].push_back(lDims[4] - lDims[1] + 1);
- vdatas[i].readDims[t].push_back(lDims[0]);
+ vdatas[i].readStarts[t].push_back(lDims[0]);
vdatas[i].readCounts[t].push_back(lDims[3] - lDims[0] + 1);
- assert(vdatas[i].readDims[t].size() == vdatas[i].varDims.size());
+ assert(vdatas[i].readStarts[t].size() == vdatas[i].varDims.size());
range = &verts;
break;
case ReadNC::ENTLOCNSEDGE:
@@ -675,11 +675,11 @@ ErrorCode ScdNCHelper::read_scd_variable_to_nonset_allocate(EntityHandle file_se
break;
case ReadNC::ENTLOCFACE:
// faces
- vdatas[i].readDims[t].push_back(lCDims[1]);
- vdatas[i].readDims[t].push_back(lCDims[0]);
+ vdatas[i].readStarts[t].push_back(lCDims[1]);
+ vdatas[i].readStarts[t].push_back(lCDims[0]);
vdatas[i].readCounts[t].push_back(lCDims[4] - lCDims[1] + 1);
vdatas[i].readCounts[t].push_back(lCDims[3] - lCDims[0] + 1);
- assert(vdatas[i].readDims[t].size() == vdatas[i].varDims.size());
+ assert(vdatas[i].readStarts[t].size() == vdatas[i].varDims.size());
#ifdef USE_MPI
range = &faces_owned;
#else
@@ -737,7 +737,7 @@ ErrorCode ScdNCHelper::read_scd_variable_to_nonset(EntityHandle file_set, std::v
case NC_BYTE:
case NC_CHAR: {
std::vector<char> tmpchardata(sz);
- success = NCFUNCAG(_vara_text)(_fileId, vdatas[i].varId, &vdatas[i].readDims[t][0], &vdatas[i].readCounts[t][0],
+ success = NCFUNCAG(_vara_text)(_fileId, vdatas[i].varId, &vdatas[i].readStarts[t][0], &vdatas[i].readCounts[t][0],
&tmpchardata[0] NCREQ);
if (vdatas[i].numLev != 1)
// switch from k varying slowest to k varying fastest
@@ -751,7 +751,7 @@ ErrorCode ScdNCHelper::read_scd_variable_to_nonset(EntityHandle file_set, std::v
}
case NC_DOUBLE: {
std::vector<double> tmpdoubledata(sz);
- success = NCFUNCAG(_vara_double)(_fileId, vdatas[i].varId, &vdatas[i].readDims[t][0], &vdatas[i].readCounts[t][0],
+ success = NCFUNCAG(_vara_double)(_fileId, vdatas[i].varId, &vdatas[i].readStarts[t][0], &vdatas[i].readCounts[t][0],
&tmpdoubledata[0] NCREQ);
if (vdatas[i].numLev != 1)
// switch from k varying slowest to k varying fastest
@@ -765,7 +765,7 @@ ErrorCode ScdNCHelper::read_scd_variable_to_nonset(EntityHandle file_set, std::v
}
case NC_FLOAT: {
std::vector<float> tmpfloatdata(sz);
- success = NCFUNCAG(_vara_float)(_fileId, vdatas[i].varId, &vdatas[i].readDims[t][0], &vdatas[i].readCounts[t][0],
+ success = NCFUNCAG(_vara_float)(_fileId, vdatas[i].varId, &vdatas[i].readStarts[t][0], &vdatas[i].readCounts[t][0],
&tmpfloatdata[0] NCREQ);
if (vdatas[i].numLev != 1)
// Switch from k varying slowest to k varying fastest
@@ -779,7 +779,7 @@ ErrorCode ScdNCHelper::read_scd_variable_to_nonset(EntityHandle file_set, std::v
}
case NC_INT: {
std::vector<int> tmpintdata(sz);
- success = NCFUNCAG(_vara_int)(_fileId, vdatas[i].varId, &vdatas[i].readDims[t][0], &vdatas[i].readCounts[t][0],
+ success = NCFUNCAG(_vara_int)(_fileId, vdatas[i].varId, &vdatas[i].readStarts[t][0], &vdatas[i].readCounts[t][0],
&tmpintdata[0] NCREQ);
if (vdatas[i].numLev != 1)
// Switch from k varying slowest to k varying fastest
@@ -793,7 +793,7 @@ ErrorCode ScdNCHelper::read_scd_variable_to_nonset(EntityHandle file_set, std::v
}
case NC_SHORT: {
std::vector<short> tmpshortdata(sz);
- success = NCFUNCAG(_vara_short)(_fileId, vdatas[i].varId, &vdatas[i].readDims[t][0], &vdatas[i].readCounts[t][0],
+ success = NCFUNCAG(_vara_short)(_fileId, vdatas[i].varId, &vdatas[i].readStarts[t][0], &vdatas[i].readCounts[t][0],
&tmpshortdata[0] NCREQ);
if (vdatas[i].numLev != 1)
// Switch from k varying slowest to k varying fastest
diff --git a/src/io/NCHelperHOMME.cpp b/src/io/NCHelperHOMME.cpp
index d6659eb..058dfaf 100644
--- a/src/io/NCHelperHOMME.cpp
+++ b/src/io/NCHelperHOMME.cpp
@@ -582,7 +582,7 @@ ErrorCode NCHelperHOMME::read_ucd_variable_setup(std::vector<std::string>& var_n
for (unsigned int i = 0; i < vdatas.size(); i++) {
vdatas[i].varTags.resize(tstep_nums.size(), 0);
vdatas[i].varDatas.resize(tstep_nums.size());
- vdatas[i].readDims.resize(tstep_nums.size());
+ vdatas[i].readStarts.resize(tstep_nums.size());
vdatas[i].readCounts.resize(tstep_nums.size());
}
for (unsigned int i = 0; i < vsetdatas.size(); i++) {
@@ -590,13 +590,13 @@ ErrorCode NCHelperHOMME::read_ucd_variable_setup(std::vector<std::string>& var_n
&& (vsetdatas[i].varDims.size() != 1)) {
vsetdatas[i].varTags.resize(tstep_nums.size(), 0);
vsetdatas[i].varDatas.resize(tstep_nums.size());
- vsetdatas[i].readDims.resize(tstep_nums.size());
+ vsetdatas[i].readStarts.resize(tstep_nums.size());
vsetdatas[i].readCounts.resize(tstep_nums.size());
}
else {
vsetdatas[i].varTags.resize(1, 0);
vsetdatas[i].varDatas.resize(1);
- vsetdatas[i].readDims.resize(1);
+ vsetdatas[i].readStarts.resize(1);
vsetdatas[i].readCounts.resize(1);
}
}
@@ -657,12 +657,12 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset_allocate(EntityHandle file_
// Set up the dimensions and counts
// First: time
- vdatas[i].readDims[t].push_back(tstep_nums[t]);
+ vdatas[i].readStarts[t].push_back(tstep_nums[t]);
vdatas[i].readCounts[t].push_back(1);
// Next: numLev
if (vdatas[i].numLev != 1) {
- vdatas[i].readDims[t].push_back(0);
+ vdatas[i].readStarts[t].push_back(0);
vdatas[i].readCounts[t].push_back(vdatas[i].numLev);
}
@@ -672,9 +672,9 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset_allocate(EntityHandle file_
// vertices
// we will start from the first localGid, actually; we will reset that
// later on, anyway, in a loop
- vdatas[i].readDims[t].push_back(localGid[0] - 1);
+ vdatas[i].readStarts[t].push_back(localGid[0] - 1);
vdatas[i].readCounts[t].push_back(localGid.size());
- assert(vdatas[i].readDims[t].size() == vdatas[i].varDims.size());
+ assert(vdatas[i].readStarts[t].size() == vdatas[i].varDims.size());
range = &verts;
break;
default:
@@ -744,7 +744,7 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset_async(EntityHandle file_set
// localGid range;
// basically, we have to give a different point
// for data to start, for every subrange :(
- size_t nbDims = vdatas[i].readDims[t].size();
+ size_t nbDims = vdatas[i].readStarts[t].size();
// assume that the last dimension is for the ncol,
// node varying variable
@@ -755,13 +755,13 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset_async(EntityHandle file_set
pair_iter++, ic++) {
EntityHandle starth = pair_iter->first;
EntityHandle endh = pair_iter->second; // inclusive
- vdatas[i].readDims[t][nbDims - 1] = (NCDF_SIZE) (starth - 1);
+ vdatas[i].readStarts[t][nbDims - 1] = (NCDF_SIZE) (starth - 1);
vdatas[i].readCounts[t][nbDims - 1] = (NCDF_SIZE) (endh - starth + 1);
// do a partial read, in each subrange
// wait outside this loop
success = NCFUNCAG2(_vara_double)(_fileId, vdatas[i].varId,
- &(vdatas[i].readDims[t][0]), &(vdatas[i].readCounts[t][0]),
+ &(vdatas[i].readStarts[t][0]), &(vdatas[i].readCounts[t][0]),
&(tmpdoubledata[indexInDoubleArray]) NCREQ2);
ERRORS(success, "Failed to read double data in loop");
// we need to increment the index in double array for the
@@ -791,7 +791,7 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset_async(EntityHandle file_set
// localGid range;
// basically, we have to give a different point
// for data to start, for every subrange :(
- size_t nbDims = vdatas[i].readDims[t].size();
+ size_t nbDims = vdatas[i].readStarts[t].size();
// assume that the last dimension is for the ncol,
// node varying variable
@@ -802,13 +802,13 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset_async(EntityHandle file_set
pair_iter++, ic++) {
EntityHandle starth = pair_iter->first;
EntityHandle endh = pair_iter->second; // inclusive
- vdatas[i].readDims[t][nbDims - 1] = (NCDF_SIZE) (starth - 1);
+ vdatas[i].readStarts[t][nbDims - 1] = (NCDF_SIZE) (starth - 1);
vdatas[i].readCounts[t][nbDims - 1] = (NCDF_SIZE) (endh - starth + 1);
// do a partial read, in each subrange
// wait outside this loop
success = NCFUNCAG2(_vara_float)(_fileId, vdatas[i].varId,
- &(vdatas[i].readDims[t][0]), &(vdatas[i].readCounts[t][0]),
+ &(vdatas[i].readStarts[t][0]), &(vdatas[i].readCounts[t][0]),
&(tmpfloatdata[indexInFloatArray]) NCREQ2);
ERRORS(success, "Failed to read float data in loop");
// we need to increment the index in float array for the
@@ -889,7 +889,7 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset(EntityHandle file_set, std:
case NC_BYTE:
case NC_CHAR: {
std::vector<char> tmpchardata(sz);
- success = NCFUNCAG(_vara_text)(_fileId, vdatas[i].varId, &vdatas[i].readDims[t][0], &vdatas[i].readCounts[t][0],
+ success = NCFUNCAG(_vara_text)(_fileId, vdatas[i].varId, &vdatas[i].readStarts[t][0], &vdatas[i].readCounts[t][0],
&tmpchardata[0] NCREQ);
if (vdatas[i].numLev != 1)
// switch from k varying slowest to k varying fastest
@@ -910,7 +910,7 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset(EntityHandle file_set, std:
// localGid range;
// basically, we have to give a different point
// for data to start, for every subrange :(
- size_t nbDims = vdatas[i].readDims[t].size();
+ size_t nbDims = vdatas[i].readStarts[t].size();
// Assume that the last dimension is for the ncol
size_t indexInDoubleArray = 0;
@@ -920,11 +920,11 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset(EntityHandle file_set, std:
pair_iter++, ic++) {
EntityHandle starth = pair_iter->first;
EntityHandle endh = pair_iter->second; // Inclusive
- vdatas[i].readDims[t][nbDims - 1] = (NCDF_SIZE) (starth - 1);
+ vdatas[i].readStarts[t][nbDims - 1] = (NCDF_SIZE) (starth - 1);
vdatas[i].readCounts[t][nbDims - 1] = (NCDF_SIZE) (endh - starth + 1);
success = NCFUNCAG(_vara_double)(_fileId, vdatas[i].varId,
- &(vdatas[i].readDims[t][0]), &(vdatas[i].readCounts[t][0]),
+ &(vdatas[i].readStarts[t][0]), &(vdatas[i].readCounts[t][0]),
&(tmpdoubledata[indexInDoubleArray]) NCREQ);
ERRORS(success, "Failed to read float data in loop");
// We need to increment the index in double array for the
@@ -951,7 +951,7 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset(EntityHandle file_set, std:
// localGid range;
// basically, we have to give a different point
// for data to start, for every subrange :(
- size_t nbDims = vdatas[i].readDims[t].size();
+ size_t nbDims = vdatas[i].readStarts[t].size();
// Assume that the last dimension is for the ncol
size_t indexInFloatArray = 0;
@@ -961,11 +961,11 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset(EntityHandle file_set, std:
pair_iter++, ic++) {
EntityHandle starth = pair_iter->first;
EntityHandle endh = pair_iter->second; // Inclusive
- vdatas[i].readDims[t][nbDims-1] = (NCDF_SIZE) (starth - 1);
+ vdatas[i].readStarts[t][nbDims-1] = (NCDF_SIZE) (starth - 1);
vdatas[i].readCounts[t][nbDims-1] = (NCDF_SIZE) (endh - starth + 1);
success = NCFUNCAG(_vara_float)(_fileId, vdatas[i].varId,
- &(vdatas[i].readDims[t][0]), &(vdatas[i].readCounts[t][0]),
+ &(vdatas[i].readStarts[t][0]), &(vdatas[i].readCounts[t][0]),
&(tmpfloatdata[indexInFloatArray]) NCREQ);
ERRORS(success, "Failed to read float data in loop");
// We need to increment the index in float array for the
@@ -986,7 +986,7 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset(EntityHandle file_set, std:
}
case NC_INT: {
std::vector<int> tmpintdata(sz);
- success = NCFUNCAG(_vara_int)(_fileId, vdatas[i].varId, &vdatas[i].readDims[t][0], &vdatas[i].readCounts[t][0],
+ success = NCFUNCAG(_vara_int)(_fileId, vdatas[i].varId, &vdatas[i].readStarts[t][0], &vdatas[i].readCounts[t][0],
&tmpintdata[0] NCREQ);
if (vdatas[i].numLev != 1)
// Switch from k varying slowest to k varying fastest
@@ -1000,7 +1000,7 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset(EntityHandle file_set, std:
}
case NC_SHORT: {
std::vector<short> tmpshortdata(sz);
- success = NCFUNCAG(_vara_short)(_fileId, vdatas[i].varId, &vdatas[i].readDims[t][0], &vdatas[i].readCounts[t][0],
+ success = NCFUNCAG(_vara_short)(_fileId, vdatas[i].varId, &vdatas[i].readStarts[t][0], &vdatas[i].readCounts[t][0],
&tmpshortdata[0] NCREQ);
if (vdatas[i].numLev != 1)
// Switch from k varying slowest to k varying fastest
diff --git a/src/io/NCHelperMPAS.cpp b/src/io/NCHelperMPAS.cpp
index 24b7241..3fa43c0 100644
--- a/src/io/NCHelperMPAS.cpp
+++ b/src/io/NCHelperMPAS.cpp
@@ -155,10 +155,10 @@ ErrorCode NCHelperMPAS::init_mesh_vals(const FileOptions& opts, EntityHandle fil
int verticesOnEdgeVarId;
int success = NCFUNC(inq_varid)(_fileId, "verticesOnEdge", &verticesOnEdgeVarId);
ERRORS(success, "Failed to get variable id of verticesOnEdge.");
- NCDF_SIZE tmp_dims[2] = {0, 0};
+ NCDF_SIZE tmp_starts[2] = {0, 0};
NCDF_SIZE tmp_counts[2] = {static_cast<size_t>(nEdges), 2};
verticesOnEdge.resize(nEdges * 2);
- success = NCFUNCAG(_vara_int)(_fileId, verticesOnEdgeVarId, tmp_dims, tmp_counts, &verticesOnEdge[0] NCREQ);
+ success = NCFUNCAG(_vara_int)(_fileId, verticesOnEdgeVarId, tmp_starts, tmp_counts, &verticesOnEdge[0] NCREQ);
ERRORS(success, "Failed to read variable values of verticesOnEdge.");
// Determine the entity location type of a variable
@@ -559,7 +559,7 @@ ErrorCode NCHelperMPAS::read_ucd_variable_setup(std::vector<std::string>& var_na
for (unsigned int i = 0; i < vdatas.size(); i++) {
vdatas[i].varTags.resize(tstep_nums.size(), 0);
vdatas[i].varDatas.resize(tstep_nums.size());
- vdatas[i].readDims.resize(tstep_nums.size());
+ vdatas[i].readStarts.resize(tstep_nums.size());
vdatas[i].readCounts.resize(tstep_nums.size());
}
for (unsigned int i = 0; i < vsetdatas.size(); i++) {
@@ -567,13 +567,13 @@ ErrorCode NCHelperMPAS::read_ucd_variable_setup(std::vector<std::string>& var_na
&& (vsetdatas[i].varDims.size() != 1)) {
vsetdatas[i].varTags.resize(tstep_nums.size(), 0);
vsetdatas[i].varDatas.resize(tstep_nums.size());
- vsetdatas[i].readDims.resize(tstep_nums.size());
+ vsetdatas[i].readStarts.resize(tstep_nums.size());
vsetdatas[i].readCounts.resize(tstep_nums.size());
}
else {
vsetdatas[i].varTags.resize(1, 0);
vsetdatas[i].varDatas.resize(1);
- vsetdatas[i].readDims.resize(1);
+ vsetdatas[i].readStarts.resize(1);
vsetdatas[i].readCounts.resize(1);
}
}
@@ -655,26 +655,26 @@ ErrorCode NCHelperMPAS::read_ucd_variable_to_nonset_allocate(EntityHandle file_s
// Set up the dimensions and counts
// First: Time
- vdatas[i].readDims[t].push_back(tstep_nums[t]);
+ vdatas[i].readStarts[t].push_back(tstep_nums[t]);
vdatas[i].readCounts[t].push_back(1);
// Next: nCells or nEdges or nVertices
switch (vdatas[i].entLoc) {
case ReadNC::ENTLOCVERT:
// vertices
- vdatas[i].readDims[t].push_back(localGidVerts[0] - 1);
+ vdatas[i].readStarts[t].push_back(localGidVerts[0] - 1);
vdatas[i].readCounts[t].push_back(nLocalVertices);
range = &verts;
break;
case ReadNC::ENTLOCFACE:
// faces
- vdatas[i].readDims[t].push_back(localGidCells[0] - 1);
+ vdatas[i].readStarts[t].push_back(localGidCells[0] - 1);
vdatas[i].readCounts[t].push_back(nLocalCells);
range = &facesOwned;
break;
case ReadNC::ENTLOCEDGE:
// edges
- vdatas[i].readDims[t].push_back(localGidEdges[0] - 1);
+ vdatas[i].readStarts[t].push_back(localGidEdges[0] - 1);
vdatas[i].readCounts[t].push_back(nLocalEdges);
range = &edges;
break;
@@ -684,9 +684,9 @@ ErrorCode NCHelperMPAS::read_ucd_variable_to_nonset_allocate(EntityHandle file_s
}
// Last, numLev, even if it is 1
- vdatas[i].readDims[t].push_back(0);
+ vdatas[i].readStarts[t].push_back(0);
vdatas[i].readCounts[t].push_back(vdatas[i].numLev);
- assert(vdatas[i].readDims[t].size() == vdatas[i].varDims.size());
+ assert(vdatas[i].readStarts[t].size() == vdatas[i].varDims.size());
// Get ptr to tag space
if (vdatas[i].entLoc == ReadNC::ENTLOCFACE && numCellGroups > 1)
@@ -766,7 +766,7 @@ ErrorCode NCHelperMPAS::read_ucd_variable_to_nonset_async(EntityHandle file_set,
// localGid range;
// basically, we have to give a different point
// for data to start, for every subrange :(
- size_t nbDims = vdatas[i].readDims[t].size();
+ size_t nbDims = vdatas[i].readStarts[t].size();
// Assume that the last dimension is for the nVertLevels
size_t indexInDoubleArray = 0;
@@ -776,13 +776,13 @@ ErrorCode NCHelperMPAS::read_ucd_variable_to_nonset_async(EntityHandle file_set,
pair_iter++, ic++) {
EntityHandle starth = pair_iter->first;
EntityHandle endh = pair_iter->second; // inclusive
- vdatas[i].readDims[t][nbDims - 2] = (NCDF_SIZE) (starth - 1);
+ vdatas[i].readStarts[t][nbDims - 2] = (NCDF_SIZE) (starth - 1);
vdatas[i].readCounts[t][nbDims - 2] = (NCDF_SIZE) (endh - starth + 1);
// do a partial read, in each subrange
// wait outside this loop
success = NCFUNCAG2(_vara_double)(_fileId, vdatas[i].varId,
- &(vdatas[i].readDims[t][0]), &(vdatas[i].readCounts[t][0]),
+ &(vdatas[i].readStarts[t][0]), &(vdatas[i].readCounts[t][0]),
&(tmpdoubledata[indexInDoubleArray]) NCREQ2);
ERRORS(success, "Failed to read double data in loop");
// we need to increment the index in double array for the
@@ -909,7 +909,7 @@ ErrorCode NCHelperMPAS::read_ucd_variable_to_nonset(EntityHandle file_set, std::
// localGid range;
// basically, we have to give a different point
// for data to start, for every subrange :(
- size_t nbDims = vdatas[i].readDims[t].size();
+ size_t nbDims = vdatas[i].readStarts[t].size();
// Assume that the last dimension is for the nVertLevels
size_t indexInDoubleArray = 0;
@@ -919,11 +919,11 @@ ErrorCode NCHelperMPAS::read_ucd_variable_to_nonset(EntityHandle file_set, std::
pair_iter++, ic++) {
EntityHandle starth = pair_iter->first;
EntityHandle endh = pair_iter->second; // Inclusive
- vdatas[i].readDims[t][nbDims - 2] = (NCDF_SIZE) (starth - 1);
+ vdatas[i].readStarts[t][nbDims - 2] = (NCDF_SIZE) (starth - 1);
vdatas[i].readCounts[t][nbDims - 2] = (NCDF_SIZE) (endh - starth + 1);
success = NCFUNCAG(_vara_double)(_fileId, vdatas[i].varId,
- &(vdatas[i].readDims[t][0]), &(vdatas[i].readCounts[t][0]),
+ &(vdatas[i].readStarts[t][0]), &(vdatas[i].readCounts[t][0]),
&(tmpdoubledata[indexInDoubleArray]) NCREQ);
ERRORS(success, "Failed to read double data in loop");
// We need to increment the index in double array for the
diff --git a/src/io/ReadNC.hpp b/src/io/ReadNC.hpp
index abe690f..4efabab 100644
--- a/src/io/ReadNC.hpp
+++ b/src/io/ReadNC.hpp
@@ -127,7 +127,7 @@ private:
std::string varName;
std::vector<Tag> varTags; // One tag for each time step, varTags[t]
std::vector<void*> varDatas;
- std::vector<std::vector<NCDF_SIZE> > readDims; // Start value for this [t][dim]
+ std::vector<std::vector<NCDF_SIZE> > readStarts; // Start value for this [t][dim]
std::vector<std::vector<NCDF_SIZE> > readCounts; // Number of data values for this [t][dim]
int entLoc;
int numLev;
diff --git a/src/parallel/ParallelComm.cpp b/src/parallel/ParallelComm.cpp
index 348bad0..90bab23 100644
--- a/src/parallel/ParallelComm.cpp
+++ b/src/parallel/ParallelComm.cpp
@@ -6894,12 +6894,10 @@ ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
// take all shared entities if incoming list is empty
Range entities;
- if (entities_in.empty()) {
+ if (entities_in.empty())
std::copy(sharedEnts.begin(), sharedEnts.end(), range_inserter(entities));
- }
- else
- entities = entities_in;
-
+ else entities = entities_in;
+
int dum_ack_buff;
for (ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); sit++, ind++) {
@@ -6910,7 +6908,7 @@ ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
result = filter_pstatus(tag_ents, PSTATUS_SHARED, PSTATUS_AND, *sit);
RRA("Failed pstatus AND check.");
- // remote nonowned entities
+ // remove nonowned entities
if (!tag_ents.empty()) {
result = filter_pstatus(tag_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT);
RRA("Failed pstatus NOT check.");
@@ -6998,7 +6996,10 @@ ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
assert(src_tags.size() == dst_tags.size());
if (src_tags != dst_tags) {
std::vector<unsigned char> data;
- Range owned_ents(entities_in);
+ Range owned_ents;
+ if (entities_in.empty())
+ std::copy(sharedEnts.begin(), sharedEnts.end(), range_inserter(entities));
+ else owned_ents = entities_in;
result = filter_pstatus(owned_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT);
RRA("Failure to get subset of owned entities");
diff --git a/src/parallel/moab/ParallelComm.hpp b/src/parallel/moab/ParallelComm.hpp
index c558334..55bdd93 100644
--- a/src/parallel/moab/ParallelComm.hpp
+++ b/src/parallel/moab/ParallelComm.hpp
@@ -291,26 +291,38 @@ namespace moab {
/** \brief Exchange tags for all shared and ghosted entities
* This function should be called collectively over the communicator for this ParallelComm.
* If this version is called, all ghosted/shared entities should have a value for this
- * tag (or the tag should have a default value).
- * \param tags Vector of tag handles to be exchanged
+ * tag (or the tag should have a default value). If the entities vector is empty, all shared entities
+ * participate in the exchange. If a proc has no owned entities this function must still be called
+ * since it is collective.
+ * \param src_tags Vector of tag handles to be exchanged
+ * \param dst_tags Tag handles to store the tags on the non-owning procs
+ * \param entities Entities for which tags are exchanged
*/
ErrorCode exchange_tags( const std::vector<Tag> &src_tags,
- const std::vector<Tag> &dst_tags,
- const Range &entities);
+ const std::vector<Tag> &dst_tags,
+ const Range &entities);
/** \brief Exchange tags for all shared and ghosted entities
- * This function should be called collectively over the communicator for this ParallelComm
+ * This function should be called collectively over the communicator for this ParallelComm.
+ * If the entities vector is empty, all shared entities
+ * participate in the exchange. If a proc has no owned entities this function must still be called
+ * since it is collective.
* \param tag_name Name of tag to be exchanged
+ * \param entities Entities for which tags are exchanged
*/
ErrorCode exchange_tags( const char *tag_name,
- const Range &entities);
+ const Range &entities);
/** \brief Exchange tags for all shared and ghosted entities
- * This function should be called collectively over the communicator for this ParallelComm
+ * This function should be called collectively over the communicator for this ParallelComm.
+ * If the entities vector is empty, all shared entities
+ * participate in the exchange. If a proc has no owned entities this function must still be called
+ * since it is collective.
* \param tagh Handle of tag to be exchanged
+ * \param entities Entities for which tags are exchanged
*/
ErrorCode exchange_tags( Tag tagh,
- const Range &entities);
+ const Range &entities);
/** \brief Perform data reduction operation for all shared and ghosted entities
* This function should be called collectively over the communicator for this ParallelComm.
https://bitbucket.org/fathomteam/moab/commits/09c51ab3b008/
Changeset: 09c51ab3b008
Branch: master
User: vijaysm
Date: 2013-07-30 10:16:13
Summary: Merge branch 'master' of bitbucket.org:vijaysm/moab
Affected #: 0 files
Repository URL: https://bitbucket.org/fathomteam/moab/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the moab-dev
mailing list