[MOAB-dev] r3089 - in MOAB/trunk: . parallel
kraftche at cae.wisc.edu
kraftche at cae.wisc.edu
Mon Aug 3 15:52:53 CDT 2009
Author: kraftche
Date: 2009-08-03 15:52:53 -0500 (Mon, 03 Aug 2009)
New Revision: 3089
Modified:
MOAB/trunk/WriteHDF5.cpp
MOAB/trunk/parallel/WriteHDF5Parallel.cpp
Log:
more debug output, disable debug output, fix warning
Modified: MOAB/trunk/WriteHDF5.cpp
===================================================================
--- MOAB/trunk/WriteHDF5.cpp 2009-08-03 20:38:34 UTC (rev 3088)
+++ MOAB/trunk/WriteHDF5.cpp 2009-08-03 20:52:53 UTC (rev 3089)
@@ -80,9 +80,6 @@
#endif
-//#define TPRINT(A)
-#define TPRINT(A) tprint( (A) )
-
#ifdef VALGRIND
# include <valgrind/memcheck.h>
#else
@@ -345,6 +342,7 @@
void WriteHDF5::tprint( const char* fmt, ... )
{
+#ifdef DEBUG
static const clock_t t0 = clock();
va_list args;
va_start(args, fmt);
@@ -353,6 +351,7 @@
vsnprintf( buffer+n, sizeof(buffer)-n, fmt, args );
fputs( buffer, stderr );
va_end(args);
+#endif
}
@@ -449,7 +448,7 @@
if (MB_SUCCESS != init())
return MB_FAILURE;
-TPRINT("Gathering Mesh\n");
+tprint("Gathering Mesh\n");
// Gather mesh to export
exportList.clear();
@@ -470,7 +469,7 @@
//if (nodeSet.range.size() == 0)
// return MB_ENTITY_NOT_FOUND;
-TPRINT("Checking ID space\n");
+tprint("Checking ID space\n");
// Make sure ID space is sufficient
elem_count = nodeSet.range.size() + setSet.range.size();
@@ -483,7 +482,7 @@
return MB_FAILURE;
}
-TPRINT( "Creating File\n" );
+tprint( "Creating File\n" );
// Figure out the dimension in which to write the mesh.
int mesh_dim;
@@ -510,7 +509,7 @@
if (MB_SUCCESS != result)
return result;
-TPRINT("Writing Nodes.");
+tprint("Writing Nodes.");
// Write node coordinates
if (!nodeSet.range.empty()) {
@@ -519,7 +518,7 @@
return result;
}
-TPRINT("Writing connectivity.");
+tprint("Writing connectivity.");
// Write element connectivity
for (ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor) {
@@ -528,14 +527,14 @@
return result;
}
-TPRINT("Writing sets.");
+tprint("Writing sets.");
// Write meshsets
result = write_sets();
if (MB_SUCCESS != result)
return result;
-TPRINT("Writing adjacencies.");
+tprint("Writing adjacencies.");
// Write adjacencies
// Tim says don't save node adjacencies!
@@ -550,7 +549,7 @@
return result;
}
-TPRINT("Writing tags.");
+tprint("Writing tags.");
// Write tags
@@ -764,6 +763,7 @@
assert( nodeSet.max_num_ents >= remaining );
num_writes = (nodeSet.max_num_ents+chunk_size-1) / chunk_size;
}
+ long remaining_writes = num_writes;
long offset = nodeSet.offset;
MBRange::const_iterator iter = nodeSet.range.begin();
@@ -787,21 +787,23 @@
memset( buffer, 0, count * sizeof(double) );
}
- TPRINT(" writing node chunk");
+ tprint(" writing %c node chunk %ld of %ld, %ld values at %ld ",
+ (char)('x'+d), num_writes - remaining_writes, num_writes, offset, count );
mhdf_writeNodeCoordWithOpt( node_table, offset, count, d, buffer, writeProp, &status );
CHK_MHDF_ERR_1(status, node_table);
}
iter = end;
offset += count;
- --num_writes;
+ --remaining_writes;
}
// Do empty writes if necessary for parallel collective IO
- while (num_writes--) {
+ while (remaining_writes--) {
assert(writeProp != H5P_DEFAULT);
for (int d = 0; d < dim; ++d) {
- TPRINT(" writing empty node chunk.");
+ tprint(" writing (empty) %d node chunk %ld of %ld.",
+ (char)('x'+d), num_writes - remaining_writes, num_writes );
mhdf_writeNodeCoordWithOpt( node_table, offset, 0, d, 0, writeProp, &status );
CHK_MHDF_ERR_1(status, node_table);
}
@@ -842,6 +844,7 @@
assert( elems.max_num_ents >= remaining );
num_writes = (elems.max_num_ents+chunk_size-1) / chunk_size;
}
+ long remaining_writes = num_writes;
MBRange::iterator iter = elems.range.begin();
while (remaining)
@@ -861,19 +864,21 @@
if (0 == (buffer[i] = idMap.find( buffer[i] )))
return MB_FAILURE;
- TPRINT(" writing connectivity chunk");
+ tprint(" writing node connectivity %ld of %ld, %ld values at %ld ",
+ num_writes - remaining_writes, num_writes, offset, count );
mhdf_writeConnectivityWithOpt( elem_table, offset, count,
id_type, buffer, writeProp, &status );
CHK_MHDF_ERR_1(status, elem_table);
offset += count;
- --num_writes;
+ --remaining_writes;
}
// Do empty writes if necessary for parallel collective IO
- while (num_writes--) {
+ while (remaining_writes--) {
assert(writeProp != H5P_DEFAULT);
- TPRINT(" writing empty connectivity chunk.");
+ tprint(" writing (empty) connectivity chunk %ld of %ld.",
+ num_writes - remaining_writes, num_writes );
mhdf_writeConnectivityWithOpt( elem_table, offset, 0, id_type, 0, writeProp, &status );
CHK_MHDF_ERR_1(status, elem_table);
}
@@ -955,7 +960,7 @@
if (id_list.size() + count > buffer_size) {
// buffer is full, flush it
- TPRINT(" writing parent/child link chunk");
+ tprint(" writing parent/child link chunk");
mhdf_writeSetParentsChildren( table, offset, count, id_type, buffer, &status );
CHK_MHDF_ERR_1(status, table);
offset += count;
@@ -965,7 +970,7 @@
// If id_list still doesn't it in empty buffer, write it
// directly rather than trying to buffer it
if (id_list.size() > buffer_size) {
- TPRINT(" writing parent/child link chunk");
+ tprint(" writing parent/child link chunk");
mhdf_writeSetParentsChildren( table, offset, id_list.size(), id_type, &id_list[0], &status );
CHK_MHDF_ERR_1(status, table);
offset += id_list.size();
@@ -978,7 +983,7 @@
}
if (count) {
- TPRINT(" writing final parent/child link chunk");
+ tprint(" writing final parent/child link chunk");
mhdf_writeSetParentsChildren( table, offset, count, id_type, buffer, &status );
CHK_MHDF_ERR_1(status, table);
}
@@ -1037,7 +1042,6 @@
MBRange set_contents;
MBRange::const_iterator iter = sets.begin();
- const MBRange::const_iterator end = sets.end();
long set_offset = setSet.offset;
long content_offset = setContentsOffset;
long child_offset = setChildrenOffset;
@@ -1096,7 +1100,7 @@
if (id_list.size())
{
if (data_count + id_list.size() > content_chunk_size) {
- TPRINT(" writing set content chunk");
+ tprint(" writing set content chunk");
// If there isn't enough space remaining in the buffer,
// flush the buffer.
mhdf_writeSetData( content_table,
@@ -1114,7 +1118,7 @@
// the size of id_list is bigger than the entire buffer,
// write id_list directly.
if (id_list.size() > content_chunk_size) {
- TPRINT(" writing set content chunk");
+ tprint(" writing set content chunk");
mhdf_writeSetData( content_table,
content_buffer_offset,
id_list.size(),
@@ -1134,7 +1138,7 @@
}
}
- TPRINT(" writing set description chunk.");
+ tprint(" writing set description chunk.");
mhdf_writeSetMeta( set_table, set_offset, count, H5T_NATIVE_LONG,
buffer, &status );
CHK_MHDF_ERR_2C(status, set_table, writeSetContents, content_table );
@@ -1142,7 +1146,7 @@
}
if (data_count) {
- TPRINT(" writing final set content chunk");
+ tprint(" writing final set content chunk");
mhdf_writeSetData( content_table,
content_buffer_offset,
data_count,
@@ -1449,7 +1453,7 @@
// If buffer is full, flush it
if (count + adj_list.size() + 2 > (unsigned long)chunk_size)
{
- TPRINT(" writing adjacency chunk.");
+ tprint(" writing adjacency chunk.");
mhdf_writeAdjacencyWithOpt( table, offset, count, id_type, buffer, writeProp, &status );
CHK_MHDF_ERR_1(status, table);
VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize );
@@ -1468,7 +1472,7 @@
if (count)
{
- TPRINT(" writing final adjacency chunk.");
+ tprint(" writing final adjacency chunk.");
mhdf_writeAdjacencyWithOpt( table, offset, count, id_type, buffer, writeProp, &status );
CHK_MHDF_ERR_1(status, table);
@@ -1481,7 +1485,7 @@
while (num_writes > 0) {
--num_writes;
assert(writeProp != H5P_DEFAULT);
- TPRINT(" writing empty adjacency chunk.");
+ tprint(" writing empty adjacency chunk.");
mhdf_writeAdjacencyWithOpt( table, offset, 0, id_type, 0, writeProp, &status );
CHK_MHDF_ERR_1(status, table );
}
@@ -1722,7 +1726,7 @@
CHK_MB_ERR_0( rval );
// write the data
- TPRINT(" writing sparse tag entity chunk.");
+ tprint(" writing sparse tag entity chunk.");
mhdf_writeSparseTagEntitiesWithOpt( id_table, offset, count, id_type,
id_buffer, writeProp, &status );
CHK_MHDF_ERR_0( status );
@@ -1734,7 +1738,7 @@
// Do empty writes if necessary for parallel collective IO
while (num_writes--) {
assert(writeProp != H5P_DEFAULT);
- TPRINT(" writing empty sparse tag entity chunk.");
+ tprint(" writing empty sparse tag entity chunk.");
mhdf_writeSparseTagEntitiesWithOpt( id_table, offset, 0, id_type,
0, writeProp, &status );
CHK_MHDF_ERR_0( status );
@@ -1859,7 +1863,7 @@
count * mb_size / sizeof(MBEntityHandle) );
// write the data
- TPRINT(" writing sparse tag value chunk.");
+ tprint(" writing sparse tag value chunk.");
mhdf_writeSparseTagValuesWithOpt( tables[1], offset, count,
value_type, tag_buffer, writeProp, &status );
if (mhdf_isError(&status) && value_type && value_type != id_type)
@@ -1873,7 +1877,7 @@
// Do empty writes if necessary for parallel collective IO
while (num_writes--) {
assert(writeProp != H5P_DEFAULT);
- TPRINT(" writing empty sparse tag value chunk.");
+ tprint(" writing empty sparse tag value chunk.");
mhdf_writeSparseTagValuesWithOpt( tables[1], offset, 0,
value_type, 0, writeProp, &status );
CHK_MHDF_ERR_0( status );
@@ -1997,7 +2001,7 @@
if (bytes + size > data_buffer_size) {
// write out tag data buffer
if (bytes) { // bytes might be zero if tag value is larger than buffer
- TPRINT(" writing var-length sparse tag value chunk.");
+ tprint(" writing var-length sparse tag value chunk.");
mhdf_writeSparseTagValues( tables[1], data_offset,
bytes / type_size,
hdf_type, data_buffer,
@@ -2018,7 +2022,7 @@
&tmp_storage[0], tmp_storage.size() );
ptr = &tmp_storage[0];
}
- TPRINT(" writing var-length sparse tag value chunk.");
+ tprint(" writing var-length sparse tag value chunk.");
mhdf_writeSparseTagValues( tables[1], data_offset,
size / type_size, hdf_type, ptr,
&status );
@@ -2038,7 +2042,7 @@
}
// write offsets
- TPRINT(" writing var-length sparse tag index chunk.");
+ tprint(" writing var-length sparse tag index chunk.");
mhdf_writeSparseTagIndices( tables[2], offset_offset, count,
H5T_NATIVE_LONG, offset_buffer,
&status );
@@ -2050,7 +2054,7 @@
// flush data buffer
if (bytes) {
// write out tag data buffer
- TPRINT(" writing final var-length sparse tag value chunk.");
+ tprint(" writing final var-length sparse tag value chunk.");
mhdf_writeSparseTagValues( tables[1], data_offset, bytes / type_size,
hdf_type, data_buffer, &status );
CHK_MHDF_ERR_2(status, tables + 1);
@@ -2093,7 +2097,7 @@
}
mhdf_Status status;
- TPRINT(" writing QA history.");
+ tprint(" writing QA history.");
mhdf_writeHistory( filePtr, &strs[0], strs.size(), &status );
CHK_MHDF_ERR_0(status);
Modified: MOAB/trunk/parallel/WriteHDF5Parallel.cpp
===================================================================
--- MOAB/trunk/parallel/WriteHDF5Parallel.cpp 2009-08-03 20:38:34 UTC (rev 3088)
+++ MOAB/trunk/parallel/WriteHDF5Parallel.cpp 2009-08-03 20:52:53 UTC (rev 3089)
@@ -1,5 +1,6 @@
#undef DEBUG
+#undef TIME_DEBUG
#include <stdio.h>
#include <stdarg.h>
@@ -71,8 +72,6 @@
VALGRIND_MAKE_MEM_UNDEFINED( &v[0], v.size() * sizeof(T) );
}
-//#define TPRINT(A)
-#define TPRINT(A) tprint( (A) )
#ifdef DEBUG
# define START_SERIAL \
@@ -404,12 +403,12 @@
pcommAllocated = true;
}
-TPRINT("Gathering interface meshes");
+tprint("Gathering interface meshes");
rval = gather_interface_meshes();
if (MB_SUCCESS != rval) return rval;
/**************** get tag names for sets likely to be shared ***********/
-TPRINT("Getting shared entity sets");
+tprint("Getting shared entity sets");
rval = get_sharedset_tags();
if (MB_SUCCESS != rval) return rval;
@@ -424,7 +423,7 @@
for (MBEntityType i = MBEDGE; i < MBENTITYSET; ++i)
type_names[i] = MBCN::EntityTypeName( i );
-TPRINT("call mhdf_createFile");
+tprint("call mhdf_createFile");
filePtr = mhdf_createFile( filename, overwrite, type_names, MBMAXTYPE, &status );
if (!filePtr)
{
@@ -432,7 +431,7 @@
return MB_FAILURE;
}
-TPRINT("call write_qa");
+tprint("call write_qa");
rval = write_qa( qa_records );
if (MB_SUCCESS != rval) return rval;
}
@@ -440,36 +439,36 @@
/**************** Create node coordinate table ***************/
-TPRINT("creating node table");
+tprint("creating node table");
rval = create_node_table( dimension );
if (MB_SUCCESS != rval) return rval;
/**************** Create element tables ***************/
-TPRINT("negotiating element types");
+tprint("negotiating element types");
rval = negotiate_type_list();
if (MB_SUCCESS != rval) return rval;
-TPRINT("creating element table");
+tprint("creating element table");
rval = create_element_tables();
if (MB_SUCCESS != rval) return rval;
/*************** Exchange file IDs *****************/
-TPRINT("communicating file ids");
+tprint("communicating file ids");
rval = exchange_file_ids();
if (MB_SUCCESS != rval) return rval;
/**************** Create adjacency tables *********************/
-TPRINT("creating adjacency table");
+tprint("creating adjacency table");
rval = create_adjacency_tables();
if (MB_SUCCESS != rval) return rval;
/**************** Create meshset tables *********************/
-TPRINT("creating meshset table");
+tprint("creating meshset table");
rval = create_meshset_tables();
if (MB_SUCCESS != rval) return rval;
@@ -525,7 +524,7 @@
// Populate proc_tag_offsets on root processor with the values from
// tag_counts on each processor.
-TPRINT("communicating tag metadata");
+tprint("communicating tag metadata");
printdebug("Exchanging tag data for %d tags.\n", num_tags);
std::vector<unsigned long> proc_tag_offsets(2*num_tags*myPcomm->proc_config().proc_size());
VALGRIND_CHECK_MEM_IS_DEFINED( &tag_counts[0], 2*num_tags*sizeof(long) );
@@ -626,7 +625,7 @@
mhdf_closeFile( filePtr, &status );
}
-TPRINT("(re)opening file in parallel mode");
+tprint("(re)opening file in parallel mode");
unsigned long junk;
hid_t hdf_opt = H5Pcreate( H5P_FILE_ACCESS );
H5Pset_fapl_mpio( hdf_opt, myPcomm->proc_config().proc_comm(), MPI_INFO_NULL );
@@ -638,7 +637,7 @@
return MB_FAILURE;
}
-TPRINT("Exiting parallel_create_file");
+tprint("Exiting parallel_create_file");
return MB_SUCCESS;
}
@@ -1880,8 +1879,8 @@
{
//char buffer[256];
//sprintf(buffer, "write_shared_set_descriptions( %u )", (unsigned)parallelSets.size() );
-//TPRINT( buffer );
-TPRINT( "write_shared_set_descriptions" );
+//tprint( buffer );
+tprint( "write_shared_set_descriptions" );
const id_t start_id = setSet.first_id;
MBErrorCode rval;
@@ -1913,7 +1912,7 @@
CHECK_HDF(status);
}
-TPRINT( "finished write_shared_set_descriptions" );
+tprint( "finished write_shared_set_descriptions" );
return MB_SUCCESS;
}
@@ -1921,7 +1920,7 @@
MBErrorCode WriteHDF5Parallel::write_shared_set_contents( hid_t table )
{
-TPRINT( "write_shared_set_contents" );
+tprint( "write_shared_set_contents" );
MBErrorCode rval;
mhdf_Status status;
@@ -1952,14 +1951,14 @@
}
-TPRINT( "finished write_shared_set_contents" );
+tprint( "finished write_shared_set_contents" );
return MB_SUCCESS;
}
MBErrorCode WriteHDF5Parallel::write_shared_set_children( hid_t table )
{
-TPRINT( "write_shared_set_children" );
+tprint( "write_shared_set_children" );
MBErrorCode rval;
mhdf_Status status;
@@ -1990,14 +1989,14 @@
}
}
-TPRINT( "finished write_shared_set_children" );
+tprint( "finished write_shared_set_children" );
return MB_SUCCESS;
}
MBErrorCode WriteHDF5Parallel::write_shared_set_parents( hid_t table )
{
-TPRINT( "write_shared_set_parents" );
+tprint( "write_shared_set_parents" );
MBErrorCode rval;
mhdf_Status status;
@@ -2028,7 +2027,7 @@
}
}
-TPRINT( "finished write_shared_set_parents" );
+tprint( "finished write_shared_set_parents" );
return MB_SUCCESS;
}
@@ -2042,6 +2041,7 @@
void WriteHDF5Parallel::tprint( const char* fmt, ... )
{
+#ifdef TIME_DEBUG
static const double t0 = MPI_Wtime();
va_list args;
va_start(args, fmt);
@@ -2052,6 +2052,7 @@
vsnprintf( buffer+n, sizeof(buffer)-n, fmt, args );
fputs( buffer, stderr );
va_end(args);
+#endif
}
class TagNameCompare {
More information about the moab-dev
mailing list