[MOAB-dev] r2993 - MOAB/trunk

kraftche at cae.wisc.edu kraftche at cae.wisc.edu
Fri Jul 10 15:09:37 CDT 2009


Author: kraftche
Date: 2009-07-10 15:09:37 -0500 (Fri, 10 Jul 2009)
New Revision: 2993

Modified:
   MOAB/trunk/ReadHDF5.cpp
Log:
fix rather serious bug when reading files containing a large number of sets

Modified: MOAB/trunk/ReadHDF5.cpp
===================================================================
--- MOAB/trunk/ReadHDF5.cpp	2009-07-10 17:20:31 UTC (rev 2992)
+++ MOAB/trunk/ReadHDF5.cpp	2009-07-10 20:09:37 UTC (rev 2993)
@@ -1868,6 +1868,14 @@
   mhdf_Status status;
   if (file_ids.empty())
     return MB_SUCCESS;
+
+// If doing a full reed, we should end up reading the entire offset
+// column in order
+#ifndef NDEBUG
+  const bool full_read = (file_ids.front() == (MBEntityHandle)start_id) 
+                      && (file_ids.size() == (size_t)entity_count);
+  int offset_idx = 0;
+#endif
     
     // things will get messed up if this isn't true
   assert( ranged_ids_in.subtract( file_ids ).empty() );
@@ -1900,14 +1908,26 @@
     sets.erase( sets.begin(), sets.begin() + count );
 
     if (start == start_id) {
+#ifndef NDEBUG
+      assert(!offset_idx);
+      offset_idx += count;
+#endif
       offset_buffer[0] = -1;
       tool.read_indices( 0, count, offset_buffer + 1, status );
     }
     else if (prev_start + prev_count == start) {
+#ifndef NDEBUG
+      assert(!full_read || offset_idx == start - start_id);
+      offset_idx = start - start_id + count;
+#endif
       offset_buffer[0] = offset_buffer[prev_count];
       tool.read_indices( start - start_id, count, offset_buffer + 1, status );
     }
     else {
+#ifndef NDEBUG
+      assert(!full_read);
+      offset_idx = start - start_id + count;
+#endif
       tool.read_indices( start - start_id - 1, count+1, offset_buffer, status );
     }
     if (is_error(status))
@@ -1958,7 +1978,12 @@
           return error(MB_FAILURE);
         MBEntityHandle* content_iter = content_buffer;
         for (long i = 0; i < read_count; ++i) {
-          long content_count = offset_buffer[i+1] - offset_buffer[i];
+#ifndef NDEBUG
+          size_t exp_off = file_id - start_id; // the offset we think we are at
+          size_t act_off = offset_idx - count + offset + i;
+          assert( exp_off == act_off );
+#endif
+          long content_count = offset_buffer[offset+i+1] - offset_buffer[offset+i];
           bool ranged = !ranged_ids.empty() && ((long)ranged_ids.front() == file_id);
           rval = tool.store_data( h, file_id, content_iter, content_count, ranged );
           if (MB_SUCCESS != rval)



More information about the moab-dev mailing list