[MOAB-dev] r3088 - in MOAB/trunk: . parallel
kraftche at cae.wisc.edu
kraftche at cae.wisc.edu
Mon Aug 3 15:38:35 CDT 2009
Author: kraftche
Date: 2009-08-03 15:38:34 -0500 (Mon, 03 Aug 2009)
New Revision: 3088
Modified:
MOAB/trunk/WriteHDF5.cpp
MOAB/trunk/WriteHDF5.hpp
MOAB/trunk/parallel/WriteHDF5Parallel.cpp
MOAB/trunk/parallel/WriteHDF5Parallel.hpp
Log:
more/better debugging output
Modified: MOAB/trunk/WriteHDF5.cpp
===================================================================
--- MOAB/trunk/WriteHDF5.cpp 2009-07-31 19:20:03 UTC (rev 3087)
+++ MOAB/trunk/WriteHDF5.cpp 2009-08-03 20:38:34 UTC (rev 3088)
@@ -37,6 +37,7 @@
#include <time.h>
#include <stdlib.h>
#include <string.h>
+#include <stdarg.h>
#include <limits>
#include <cstdio>
#include <iostream>
@@ -79,6 +80,9 @@
#endif
+//#define TPRINT(A)
+#define TPRINT(A) tprint( (A) )
+
#ifdef VALGRIND
# include <valgrind/memcheck.h>
#else
@@ -338,8 +342,21 @@
idMap.clear();
return MB_SUCCESS;
}
-
+void WriteHDF5::tprint( const char* fmt, ... )
+{
+ static const clock_t t0 = clock();
+ va_list args;
+ va_start(args, fmt);
+ char buffer[128];
+ size_t n = snprintf( buffer, sizeof(buffer), "%6.2f: \n", (double)(clock()-t0)/CLOCKS_PER_SEC );
+ vsnprintf( buffer+n, sizeof(buffer)-n, fmt, args );
+ fputs( buffer, stderr );
+ va_end(args);
+}
+
+
+
WriteHDF5::~WriteHDF5()
{
if (!writeUtil) // init() failed.
@@ -432,7 +449,7 @@
if (MB_SUCCESS != init())
return MB_FAILURE;
-DEBUGOUT("Gathering Mesh\n");
+TPRINT("Gathering Mesh\n");
// Gather mesh to export
exportList.clear();
@@ -453,7 +470,7 @@
//if (nodeSet.range.size() == 0)
// return MB_ENTITY_NOT_FOUND;
-DEBUGOUT("Checking ID space\n");
+TPRINT("Checking ID space\n");
// Make sure ID space is sufficient
elem_count = nodeSet.range.size() + setSet.range.size();
@@ -466,7 +483,7 @@
return MB_FAILURE;
}
-DEBUGOUT( "Creating File\n" );
+TPRINT( "Creating File\n" );
// Figure out the dimension in which to write the mesh.
int mesh_dim;
@@ -493,7 +510,7 @@
if (MB_SUCCESS != result)
return result;
-DEBUGOUT("Writing Nodes.\n");
+TPRINT("Writing Nodes.");
// Write node coordinates
if (!nodeSet.range.empty()) {
@@ -502,7 +519,7 @@
return result;
}
-DEBUGOUT("Writing connectivity.\n");
+TPRINT("Writing connectivity.");
// Write element connectivity
for (ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor) {
@@ -511,14 +528,14 @@
return result;
}
-DEBUGOUT("Writing sets.\n");
+TPRINT("Writing sets.");
// Write meshsets
result = write_sets();
if (MB_SUCCESS != result)
return result;
-DEBUGOUT("Writing adjacencies.\n");
+TPRINT("Writing adjacencies.");
// Write adjacencies
// Tim says don't save node adjacencies!
@@ -533,7 +550,7 @@
return result;
}
-DEBUGOUT("Writing tags.\n");
+TPRINT("Writing tags.");
// Write tags
@@ -770,6 +787,7 @@
memset( buffer, 0, count * sizeof(double) );
}
+ TPRINT(" writing node chunk");
mhdf_writeNodeCoordWithOpt( node_table, offset, count, d, buffer, writeProp, &status );
CHK_MHDF_ERR_1(status, node_table);
}
@@ -783,6 +801,7 @@
while (num_writes--) {
assert(writeProp != H5P_DEFAULT);
for (int d = 0; d < dim; ++d) {
+ TPRINT(" writing empty node chunk.");
mhdf_writeNodeCoordWithOpt( node_table, offset, 0, d, 0, writeProp, &status );
CHK_MHDF_ERR_1(status, node_table);
}
@@ -842,6 +861,7 @@
if (0 == (buffer[i] = idMap.find( buffer[i] )))
return MB_FAILURE;
+ TPRINT(" writing connectivity chunk");
mhdf_writeConnectivityWithOpt( elem_table, offset, count,
id_type, buffer, writeProp, &status );
CHK_MHDF_ERR_1(status, elem_table);
@@ -853,6 +873,7 @@
// Do empty writes if necessary for parallel collective IO
while (num_writes--) {
assert(writeProp != H5P_DEFAULT);
+ TPRINT(" writing empty connectivity chunk.");
mhdf_writeConnectivityWithOpt( elem_table, offset, 0, id_type, 0, writeProp, &status );
CHK_MHDF_ERR_1(status, elem_table);
}
@@ -934,6 +955,7 @@
if (id_list.size() + count > buffer_size) {
// buffer is full, flush it
+ TPRINT(" writing parent/child link chunk");
mhdf_writeSetParentsChildren( table, offset, count, id_type, buffer, &status );
CHK_MHDF_ERR_1(status, table);
offset += count;
@@ -943,6 +965,7 @@
// If id_list still doesn't it in empty buffer, write it
// directly rather than trying to buffer it
if (id_list.size() > buffer_size) {
+ TPRINT(" writing parent/child link chunk");
mhdf_writeSetParentsChildren( table, offset, id_list.size(), id_type, &id_list[0], &status );
CHK_MHDF_ERR_1(status, table);
offset += id_list.size();
@@ -955,6 +978,7 @@
}
if (count) {
+ TPRINT(" writing final parent/child link chunk");
mhdf_writeSetParentsChildren( table, offset, count, id_type, buffer, &status );
CHK_MHDF_ERR_1(status, table);
}
@@ -1072,6 +1096,7 @@
if (id_list.size())
{
if (data_count + id_list.size() > content_chunk_size) {
+ TPRINT(" writing set content chunk");
// If there isn't enough space remaining in the buffer,
// flush the buffer.
mhdf_writeSetData( content_table,
@@ -1089,6 +1114,7 @@
// the size of id_list is bigger than the entire buffer,
// write id_list directly.
if (id_list.size() > content_chunk_size) {
+ TPRINT(" writing set content chunk");
mhdf_writeSetData( content_table,
content_buffer_offset,
id_list.size(),
@@ -1108,6 +1134,7 @@
}
}
+ TPRINT(" writing set description chunk.");
mhdf_writeSetMeta( set_table, set_offset, count, H5T_NATIVE_LONG,
buffer, &status );
CHK_MHDF_ERR_2C(status, set_table, writeSetContents, content_table );
@@ -1115,6 +1142,7 @@
}
if (data_count) {
+ TPRINT(" writing final set content chunk");
mhdf_writeSetData( content_table,
content_buffer_offset,
data_count,
@@ -1421,6 +1449,7 @@
// If buffer is full, flush it
if (count + adj_list.size() + 2 > (unsigned long)chunk_size)
{
+ TPRINT(" writing adjacency chunk.");
mhdf_writeAdjacencyWithOpt( table, offset, count, id_type, buffer, writeProp, &status );
CHK_MHDF_ERR_1(status, table);
VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize );
@@ -1439,6 +1468,7 @@
if (count)
{
+ TPRINT(" writing final adjacency chunk.");
mhdf_writeAdjacencyWithOpt( table, offset, count, id_type, buffer, writeProp, &status );
CHK_MHDF_ERR_1(status, table);
@@ -1451,6 +1481,7 @@
while (num_writes > 0) {
--num_writes;
assert(writeProp != H5P_DEFAULT);
+ TPRINT(" writing empty adjacency chunk.");
mhdf_writeAdjacencyWithOpt( table, offset, 0, id_type, 0, writeProp, &status );
CHK_MHDF_ERR_1(status, table );
}
@@ -1691,6 +1722,7 @@
CHK_MB_ERR_0( rval );
// write the data
+ TPRINT(" writing sparse tag entity chunk.");
mhdf_writeSparseTagEntitiesWithOpt( id_table, offset, count, id_type,
id_buffer, writeProp, &status );
CHK_MHDF_ERR_0( status );
@@ -1702,6 +1734,7 @@
// Do empty writes if necessary for parallel collective IO
while (num_writes--) {
assert(writeProp != H5P_DEFAULT);
+ TPRINT(" writing empty sparse tag entity chunk.");
mhdf_writeSparseTagEntitiesWithOpt( id_table, offset, 0, id_type,
0, writeProp, &status );
CHK_MHDF_ERR_0( status );
@@ -1826,6 +1859,7 @@
count * mb_size / sizeof(MBEntityHandle) );
// write the data
+ TPRINT(" writing sparse tag value chunk.");
mhdf_writeSparseTagValuesWithOpt( tables[1], offset, count,
value_type, tag_buffer, writeProp, &status );
if (mhdf_isError(&status) && value_type && value_type != id_type)
@@ -1839,6 +1873,7 @@
// Do empty writes if necessary for parallel collective IO
while (num_writes--) {
assert(writeProp != H5P_DEFAULT);
+ TPRINT(" writing empty sparse tag value chunk.");
mhdf_writeSparseTagValuesWithOpt( tables[1], offset, 0,
value_type, 0, writeProp, &status );
CHK_MHDF_ERR_0( status );
@@ -1962,6 +1997,7 @@
if (bytes + size > data_buffer_size) {
// write out tag data buffer
if (bytes) { // bytes might be zero if tag value is larger than buffer
+ TPRINT(" writing var-length sparse tag value chunk.");
mhdf_writeSparseTagValues( tables[1], data_offset,
bytes / type_size,
hdf_type, data_buffer,
@@ -1982,6 +2018,7 @@
&tmp_storage[0], tmp_storage.size() );
ptr = &tmp_storage[0];
}
+ TPRINT(" writing var-length sparse tag value chunk.");
mhdf_writeSparseTagValues( tables[1], data_offset,
size / type_size, hdf_type, ptr,
&status );
@@ -2001,6 +2038,7 @@
}
// write offsets
+ TPRINT(" writing var-length sparse tag index chunk.");
mhdf_writeSparseTagIndices( tables[2], offset_offset, count,
H5T_NATIVE_LONG, offset_buffer,
&status );
@@ -2012,6 +2050,7 @@
// flush data buffer
if (bytes) {
// write out tag data buffer
+ TPRINT(" writing final var-length sparse tag value chunk.");
mhdf_writeSparseTagValues( tables[1], data_offset, bytes / type_size,
hdf_type, data_buffer, &status );
CHK_MHDF_ERR_2(status, tables + 1);
@@ -2054,6 +2093,7 @@
}
mhdf_Status status;
+ TPRINT(" writing QA history.");
mhdf_writeHistory( filePtr, &strs[0], strs.size(), &status );
CHK_MHDF_ERR_0(status);
Modified: MOAB/trunk/WriteHDF5.hpp
===================================================================
--- MOAB/trunk/WriteHDF5.hpp 2009-07-31 19:20:03 UTC (rev 3087)
+++ MOAB/trunk/WriteHDF5.hpp 2009-08-03 20:38:34 UTC (rev 3088)
@@ -106,6 +106,11 @@
virtual MBErrorCode write_shared_set_parents( hid_t )
{ return MB_SUCCESS;}
virtual MBErrorCode write_finished();
+ virtual void tprint( const char* fmt, ... )
+#ifdef __GNUC__
+__attribute__((format(printf,2,3)))
+#endif
+ ;
//! Gather tags
Modified: MOAB/trunk/parallel/WriteHDF5Parallel.cpp
===================================================================
--- MOAB/trunk/parallel/WriteHDF5Parallel.cpp 2009-07-31 19:20:03 UTC (rev 3087)
+++ MOAB/trunk/parallel/WriteHDF5Parallel.cpp 2009-08-03 20:38:34 UTC (rev 3088)
@@ -1,10 +1,8 @@
#undef DEBUG
-#ifdef DEBUG
-# include <stdio.h>
-# include <stdarg.h>
-#endif
+#include <stdio.h>
+#include <stdarg.h>
#include <stdio.h>
#include <time.h>
@@ -73,16 +71,8 @@
VALGRIND_MAKE_MEM_UNDEFINED( &v[0], v.size() * sizeof(T) );
}
-#define TPRINT(A)
-//#define TPRINT(A) tprint( (A) )
-static void tprint(const char* A)
-{
- int rank;
- MPI_Comm_rank( MPI_COMM_WORLD, &rank );
- char buffer[128];
- sprintf(buffer,"%02d: %6.2f: %s\n", rank, (double)clock()/CLOCKS_PER_SEC, A);
- fputs( buffer, stderr );
-}
+//#define TPRINT(A)
+#define TPRINT(A) tprint( (A) )
#ifdef DEBUG
# define START_SERIAL \
@@ -648,6 +638,7 @@
return MB_FAILURE;
}
+TPRINT("Exiting parallel_create_file");
return MB_SUCCESS;
}
@@ -2049,6 +2040,19 @@
return WriteHDF5::write_finished();
}
+void WriteHDF5Parallel::tprint( const char* fmt, ... )
+{
+ static const double t0 = MPI_Wtime();
+ va_list args;
+ va_start(args, fmt);
+ int rank;
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ char buffer[128];
+ size_t n = snprintf( buffer, sizeof(buffer), "%02d: %6.2f: \n", rank, MPI_Wtime()-t0 );
+ vsnprintf( buffer+n, sizeof(buffer)-n, fmt, args );
+ fputs( buffer, stderr );
+ va_end(args);
+}
class TagNameCompare {
MBInterface* iFace;
Modified: MOAB/trunk/parallel/WriteHDF5Parallel.hpp
===================================================================
--- MOAB/trunk/parallel/WriteHDF5Parallel.hpp 2009-07-31 19:20:03 UTC (rev 3087)
+++ MOAB/trunk/parallel/WriteHDF5Parallel.hpp 2009-08-03 20:38:34 UTC (rev 3088)
@@ -195,6 +195,12 @@
//! Virtual function overridden from WriteHDF5.
//! Release memory by clearing member lists.
MBErrorCode write_finished();
+
+ virtual void tprint( const char* fmt, ... )
+#ifdef __GNUC__
+__attribute__((format(printf,2,3)))
+#endif
+ ;
//! Remove any remote mesh entities from the passed range.
void remove_remote_entities( MBEntityHandle relative, MBRange& range );
More information about the moab-dev
mailing list