[MOAB-dev] commit/MOAB: 2 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Mon Aug 5 14:08:57 CDT 2013
2 new commits in MOAB:
https://bitbucket.org/fathomteam/moab/commits/27011d8e9d16/
Changeset: 27011d8e9d16
Branch: None
User: danwu
Date: 2013-08-05 21:08:27
Summary: Some minor code changes: Remove localGid from ReadNC class and let NCHelperHOMME class use UcdNCHelper::localGidVerts instead; Move kji_to_jik() to ScdNCHelper and move kji_to_jik_stride() to UcdNCHelper; Use VarData::sz without recalculating the size when reading variables.
Affected #: 5 files
diff --git a/src/io/NCHelper.cpp b/src/io/NCHelper.cpp
index d28c824..b6158a9 100644
--- a/src/io/NCHelper.cpp
+++ b/src/io/NCHelper.cpp
@@ -67,18 +67,18 @@ ErrorCode NCHelper::read_variable_to_set_allocate(std::vector<ReadNC::VarData>&
for (unsigned int t = 0; t < tstep_nums.size(); t++) {
dbgOut.tprintf(2, "Reading variable %s, time step %d\n", vdatas[i].varName.c_str(), tstep_nums[t]);
- // get the tag to read into
+ // Get the tag to read into
if (!vdatas[i].varTags[t]) {
rval = _readNC->get_tag_to_set(vdatas[i], tstep_nums[t], vdatas[i].varTags[t]);
ERRORR(rval, "Trouble getting tag.");
}
- // assume point-based values for now?
+ // Assume point-based values for now?
if (-1 == tDim || dimVals[tDim] <= (int) t)
ERRORR(MB_INDEX_OUT_OF_RANGE, "Wrong value for timestep number.");
- // set up the dimensions and counts
- // first variable dimension is time, if it exists
+ // Set up the dimensions and counts
+ // First variable dimension is time, if it exists
if (vdatas[i].has_t)
{
if (vdatas[i].varDims.size() != 1)
@@ -146,12 +146,12 @@ ErrorCode NCHelper::read_variable_to_set(EntityHandle file_set, std::vector<Read
ErrorCode rval = read_variable_to_set_allocate(vdatas, tstep_nums);
ERRORR(rval, "Trouble allocating read variables to set.");
- // finally, read into that space
+ // Finally, read into that space
int success;
std::vector<int> requests(vdatas.size() * tstep_nums.size()), statuss(vdatas.size() * tstep_nums.size());
for (unsigned int i = 0; i < vdatas.size(); i++) {
if (dummyVarNames.find(vdatas[i].varName) != dummyVarNames.end() )
- continue;// this is a dummy one, we don't have it; we created it for the dummy tag
+ continue; // This is a dummy one, we don't have it; we created it for the dummy tag
for (unsigned int t = 0; t < tstep_nums.size(); t++) {
void* data = vdatas[i].varDatas[t];
@@ -209,7 +209,7 @@ ErrorCode NCHelper::read_variable_to_set(EntityHandle file_set, std::vector<Read
break;
}
}
- // debug output, if requested
+ // Debug output, if requested
if (1 == dbgOut.get_verbosity()) {
dbgOut.printf(1, "Read variables: %s", vdatas.begin()->varName.c_str());
for (unsigned int i = 1; i < vdatas.size(); i++)
@@ -436,7 +436,7 @@ ErrorCode ScdNCHelper::create_mesh(ScdInterface* scdi, const FileOptions& opts,
dbgOut.tprintf(1, "Vertex gids %d-%d\n", vmin, vmax);
#endif
- // add elements to the range passed in
+ // Add elements to the range passed in
faces.insert(scd_box->start_element(), scd_box->start_element() + scd_box->num_elements() - 1);
if (2 <= dbgOut.get_verbosity()) {
@@ -467,7 +467,7 @@ ErrorCode ScdNCHelper::read_variables(EntityHandle file_set, std::vector<std::st
ErrorCode rval = read_scd_variable_setup(var_names, tstep_nums, vdatas, vsetdatas);
ERRORR(rval, "Trouble setting up read variable.");
- // create COORDS tag for quads
+ // Create COORDS tag for quads
rval = _readNC->create_quad_coordinate_tag(file_set);
ERRORR(rval, "Trouble creating coordinate tags to entities quads");
@@ -537,7 +537,7 @@ ErrorCode ScdNCHelper::read_scd_variable_setup(std::vector<std::string>& var_nam
}
if (tstep_nums.empty() && -1 != tMin) {
- // no timesteps input, get them all
+ // No timesteps input, get them all
for (int i = tMin; i <= tMax; i++)
tstep_nums.push_back(i);
}
@@ -586,7 +586,7 @@ ErrorCode ScdNCHelper::read_scd_variable_to_nonset_allocate(EntityHandle file_se
Range* range = NULL;
- // get vertices in set
+ // Get vertices in set
Range verts;
rval = mbImpl->get_entities_by_dimension(file_set, 0, verts);
ERRORR(rval, "Trouble getting vertices in set.");
@@ -612,7 +612,7 @@ ErrorCode ScdNCHelper::read_scd_variable_to_nonset_allocate(EntityHandle file_se
ERRORR(rval, "Trouble getting owned faces in set.");
}
else
- faces_owned = faces; // not running in parallel, but still with MPI
+ faces_owned = faces; // Not running in parallel, but still with MPI
#endif
for (unsigned int i = 0; i < vdatas.size(); i++) {
@@ -646,20 +646,21 @@ ErrorCode ScdNCHelper::read_scd_variable_to_nonset_allocate(EntityHandle file_se
}
// Set up the dimensions and counts
- // First time
+ // First: time
vdatas[i].readStarts[t].push_back(tstep_nums[t]);
vdatas[i].readCounts[t].push_back(1);
- // then z/y/x
+ // Next: numLev
if (vdatas[i].numLev != 1) {
vdatas[i].readStarts[t].push_back(0);
vdatas[i].readCounts[t].push_back(vdatas[i].numLev);
}
+ // Finally: y and x
switch (vdatas[i].entLoc) {
case ReadNC::ENTLOCVERT:
- // vertices
- // only structured mesh has j parameter that multiplies i to get total # vertices
+ // Vertices
+ // Only structured mesh has j parameter that multiplies i to get total # vertices
vdatas[i].readStarts[t].push_back(lDims[1]);
vdatas[i].readCounts[t].push_back(lDims[4] - lDims[1] + 1);
vdatas[i].readStarts[t].push_back(lDims[0]);
@@ -674,7 +675,7 @@ ErrorCode ScdNCHelper::read_scd_variable_to_nonset_allocate(EntityHandle file_se
ERRORR(MB_FAILURE, "Reading edge data not implemented yet.");
break;
case ReadNC::ENTLOCFACE:
- // faces
+ // Faces
vdatas[i].readStarts[t].push_back(lCDims[1]);
vdatas[i].readStarts[t].push_back(lCDims[0]);
vdatas[i].readCounts[t].push_back(lCDims[4] - lCDims[1] + 1);
@@ -687,7 +688,7 @@ ErrorCode ScdNCHelper::read_scd_variable_to_nonset_allocate(EntityHandle file_se
#endif
break;
case ReadNC::ENTLOCSET:
- // set
+ // Set
break;
default:
ERRORR(MB_FAILURE, "Unrecognized entity location type.");
@@ -724,10 +725,9 @@ ErrorCode ScdNCHelper::read_scd_variable_to_nonset(EntityHandle file_set, std::v
int success;
std::vector<int> requests(vdatas.size() * tstep_nums.size()), statuss(vdatas.size() * tstep_nums.size());
for (unsigned int i = 0; i < vdatas.size(); i++) {
+ std::size_t sz = vdatas[i].sz;
+
for (unsigned int t = 0; t < tstep_nums.size(); t++) {
- std::size_t sz = 1;
- for (std::size_t idx = 0; idx != vdatas[i].readCounts[t].size(); idx++)
- sz *= vdatas[i].readCounts[t][idx];
void* data = vdatas[i].varDatas[t];
size_t ni = vdatas[i].readCounts[t][2];
size_t nj = vdatas[i].readCounts[t][3];
@@ -740,8 +740,8 @@ ErrorCode ScdNCHelper::read_scd_variable_to_nonset(EntityHandle file_set, std::v
success = NCFUNCAG(_vara_text)(_fileId, vdatas[i].varId, &vdatas[i].readStarts[t][0], &vdatas[i].readCounts[t][0],
&tmpchardata[0] NCREQ);
if (vdatas[i].numLev != 1)
- // switch from k varying slowest to k varying fastest
- success = _readNC->kji_to_jik(ni, nj, nk, data, &tmpchardata[0]);
+ // Switch from k varying slowest to k varying fastest
+ success = kji_to_jik(ni, nj, nk, data, &tmpchardata[0]);
else {
for (std::size_t idx = 0; idx != tmpchardata.size(); idx++)
((char*) data)[idx] = tmpchardata[idx];
@@ -754,8 +754,8 @@ ErrorCode ScdNCHelper::read_scd_variable_to_nonset(EntityHandle file_set, std::v
success = NCFUNCAG(_vara_double)(_fileId, vdatas[i].varId, &vdatas[i].readStarts[t][0], &vdatas[i].readCounts[t][0],
&tmpdoubledata[0] NCREQ);
if (vdatas[i].numLev != 1)
- // switch from k varying slowest to k varying fastest
- success = _readNC->kji_to_jik(ni, nj, nk, data, &tmpdoubledata[0]);
+ // Switch from k varying slowest to k varying fastest
+ success = kji_to_jik(ni, nj, nk, data, &tmpdoubledata[0]);
else {
for (std::size_t idx = 0; idx != tmpdoubledata.size(); idx++)
((double*) data)[idx] = tmpdoubledata[idx];
@@ -769,7 +769,7 @@ ErrorCode ScdNCHelper::read_scd_variable_to_nonset(EntityHandle file_set, std::v
&tmpfloatdata[0] NCREQ);
if (vdatas[i].numLev != 1)
// Switch from k varying slowest to k varying fastest
- success = _readNC->kji_to_jik(ni, nj, nk, data, &tmpfloatdata[0]);
+ success = kji_to_jik(ni, nj, nk, data, &tmpfloatdata[0]);
else {
for (std::size_t idx = 0; idx != tmpfloatdata.size(); idx++)
((float*) data)[idx] = tmpfloatdata[idx];
@@ -783,7 +783,7 @@ ErrorCode ScdNCHelper::read_scd_variable_to_nonset(EntityHandle file_set, std::v
&tmpintdata[0] NCREQ);
if (vdatas[i].numLev != 1)
// Switch from k varying slowest to k varying fastest
- success = _readNC->kji_to_jik(ni, nj, nk, data, &tmpintdata[0]);
+ success = kji_to_jik(ni, nj, nk, data, &tmpintdata[0]);
else {
for (std::size_t idx = 0; idx != tmpintdata.size(); idx++)
((int*) data)[idx] = tmpintdata[idx];
@@ -797,7 +797,7 @@ ErrorCode ScdNCHelper::read_scd_variable_to_nonset(EntityHandle file_set, std::v
&tmpshortdata[0] NCREQ);
if (vdatas[i].numLev != 1)
// Switch from k varying slowest to k varying fastest
- success = _readNC->kji_to_jik(ni, nj, nk, data, &tmpshortdata[0]);
+ success = kji_to_jik(ni, nj, nk, data, &tmpshortdata[0]);
else {
for (std::size_t idx = 0; idx != tmpshortdata.size(); idx++)
((short*) data)[idx] = tmpshortdata[idx];
@@ -827,7 +827,7 @@ ErrorCode ScdNCHelper::read_scd_variable_to_nonset(EntityHandle file_set, std::v
rval = tmp_rval;
}
}
- // debug output, if requested
+ // Debug output, if requested
if (1 == dbgOut.get_verbosity()) {
dbgOut.printf(1, "Read variables: %s", vdatas.begin()->varName.c_str());
for (unsigned int i = 1; i < vdatas.size(); i++)
@@ -853,11 +853,10 @@ ErrorCode UcdNCHelper::read_variables(EntityHandle file_set, std::vector<std::st
if (!vdatas.empty()) {
#ifdef PNETCDF_FILE
- // in serial, we will use the old read, everything is contiguous
- // in parallel, we will use async read in pnetcdf
- // the other mechanism is not working, forget about it
+ // With pnetcdf support, we will use async read
rval = read_ucd_variable_to_nonset_async(file_set, vdatas, tstep_nums);
#else
+ // Without pnetcdf support, we will use old read
rval = read_ucd_variable_to_nonset(file_set, vdatas, tstep_nums);
#endif
ERRORR(rval, "Trouble read variables to entities verts/edges/faces.");
diff --git a/src/io/NCHelper.hpp b/src/io/NCHelper.hpp
index 0ac9b0c..f59663e 100644
--- a/src/io/NCHelper.hpp
+++ b/src/io/NCHelper.hpp
@@ -72,6 +72,17 @@ private:
std::vector<int>& tstep_nums);
ErrorCode read_scd_variable_to_nonset(EntityHandle file_set, std::vector<ReadNC::VarData>& vdatas,
std::vector<int>& tstep_nums);
+
+ template <typename T> ErrorCode kji_to_jik(size_t ni, size_t nj, size_t nk, void* dest, T* source)
+ {
+ size_t nik = ni * nk, nij = ni * nj;
+ T* tmp_data = reinterpret_cast<T*>(dest);
+ for (std::size_t j = 0; j != nj; j++)
+ for (std::size_t i = 0; i != ni; i++)
+ for (std::size_t k = 0; k != nk; k++)
+ tmp_data[j*nik + i*nk + k] = source[k*nij + j*ni + i];
+ return MB_SUCCESS;
+ }
};
//! Child helper class for ucd mesh, e.g. CAM_SE (HOMME) or MPAS
@@ -107,6 +118,27 @@ private:
#endif
protected:
+ //! This version takes as input the moab range, from which we actually need just the
+ //! size of each sequence, for a proper transpose of the data
+ template <typename T> ErrorCode kji_to_jik_stride(size_t , size_t nj, size_t nk, void* dest, T* source, Range& localGid)
+ {
+ std::size_t idxInSource = 0; // Position of the start of the stride
+ // For each subrange, we will transpose a matrix of size
+ // subrange*nj*nk (subrange takes the role of ni)
+ T* tmp_data = reinterpret_cast<T*>(dest);
+ for (Range::pair_iterator pair_iter = localGid.pair_begin();
+ pair_iter != localGid.pair_end(); ++pair_iter) {
+ std::size_t size_range = pair_iter->second - pair_iter->first + 1;
+ std::size_t nik = size_range * nk, nij = size_range * nj;
+ for (std::size_t j = 0; j != nj; j++)
+ for (std::size_t i = 0; i != size_range; i++)
+ for (std::size_t k = 0; k != nk; k++)
+ tmp_data[idxInSource + j*nik + i*nk + k] = source[idxInSource + k*nij + j*size_range + i];
+ idxInSource += (size_range*nj*nk);
+ }
+ return MB_SUCCESS;
+ }
+
//! Dimension numbers for nCells, nEdges, nVertices, nLevels
int cDim, eDim, vDim, levDim;
diff --git a/src/io/NCHelperHOMME.cpp b/src/io/NCHelperHOMME.cpp
index 16a2348..535d7c3 100644
--- a/src/io/NCHelperHOMME.cpp
+++ b/src/io/NCHelperHOMME.cpp
@@ -104,7 +104,7 @@ ErrorCode NCHelperHOMME::init_mesh_vals(const FileOptions& opts, EntityHandle fi
if (-1 == gDims[0])
return MB_FAILURE;
- // set j coordinate to the number of quads
+ // Set j coordinate to the number of quads
gDims[1] = gDims[0];
gDims[4] = gDims[3] - 2;
@@ -117,7 +117,7 @@ ErrorCode NCHelperHOMME::init_mesh_vals(const FileOptions& opts, EntityHandle fi
if (-1 == gDims[2])
return MB_FAILURE;
- // read coordinate data
+ // Read coordinate data
std::map<std::string, ReadNC::VarData>::iterator vmit;
if (gDims[0] != -1) {
if ((vmit = varInfo.find("lon")) != varInfo.end() && (*vmit).second.varDims.size() == 1) {
@@ -129,7 +129,7 @@ ErrorCode NCHelperHOMME::init_mesh_vals(const FileOptions& opts, EntityHandle fi
}
}
- // store lat values in jlVals parameterized by j
+ // Store lat values in jlVals parameterized by j
if (gDims[1] != -1) {
if ((vmit = varInfo.find("lat")) != varInfo.end() && (*vmit).second.varDims.size() == 1) {
rval = _readNC->read_coordinate("lat", gDims[0], gDims[3], jlVals);
@@ -171,7 +171,7 @@ ErrorCode NCHelperHOMME::init_mesh_vals(const FileOptions& opts, EntityHandle fi
}
}
- // determine the entity location type of a variable
+ // Determine the entity location type of a variable
std::map<std::string, ReadNC::VarData>::iterator mit;
for (mit = varInfo.begin(); mit != varInfo.end(); ++mit) {
ReadNC::VarData& vd = (*mit).second;
@@ -182,9 +182,9 @@ ErrorCode NCHelperHOMME::init_mesh_vals(const FileOptions& opts, EntityHandle fi
std::copy(gDims, gDims + 6, lDims);
- // don't read coordinates of columns until we actually create the mesh
+ // Don't read coordinates of columns until we actually create the mesh
- // hack: create dummy tags, if needed, for variables like ncol and nbnd
+ // Hack: create dummy tags, if needed, for variables like ncol and nbnd
// with no corresponding variables
_readNC->init_dims_with_no_cvars_info();
@@ -200,12 +200,11 @@ ErrorCode NCHelperHOMME::check_existing_mesh(EntityHandle tmp_set)
Interface*& mbImpl = _readNC->mbImpl;
Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
bool& noMesh = _readNC->noMesh;
- Range& localGid = _readNC->localGid;
- if (noMesh && localGid.empty()) {
- // We need to populate localGid range with the gids of vertices from the tmp_set
- // localGid is important in reading the variable data into the nodes
- // also, for our purposes, localGid is truly the GLOBAL_ID tag data, not other
+ if (noMesh && localGidVerts.empty()) {
+ // We need to populate localGidVerts range with the gids of vertices from the tmp_set
+ // localGidVerts is important in reading the variable data into the nodes
+ // also, for our purposes, localGidVerts is truly the GLOBAL_ID tag data, not other
// file_id tags that could get passed around in other scenarios for parallel reading
// for nodal_partition, this local gid is easier, should be initialized with only
// the owned nodes
@@ -225,7 +224,7 @@ ErrorCode NCHelperHOMME::check_existing_mesh(EntityHandle tmp_set)
return rval;
// This will do a smart copy
- std::copy(gids.begin(), gids.end(), range_inserter(localGid));
+ std::copy(gids.begin(), gids.end(), range_inserter(localGidVerts));
}
}
@@ -246,17 +245,16 @@ ErrorCode NCHelperHOMME::create_mesh(ScdInterface* scdi, const FileOptions& opts
const Tag*& mpFileIdTag = _readNC->mpFileIdTag;
DebugOutput& dbgOut = _readNC->dbgOut;
bool& isParallel = _readNC->isParallel;
- Range& localGid = _readNC->localGid;
bool& spectralMesh = _readNC->spectralMesh;
int& gatherSetRank = _readNC->gatherSetRank;
- // need to get/read connectivity data before creating elements
+ // Need to get/read connectivity data before creating elements
std::string conn_fname;
- // try to open the connectivity file through CONN option, if used
+ // Try to open the connectivity file through CONN option, if used
ErrorCode rval = opts.get_str_option("CONN", conn_fname);
if (MB_SUCCESS != rval) {
- // default convention for reading HOMME is a file HommeMapping.nc in same dir as data file
+ // Default convention for reading HOMME is a file HommeMapping.nc in same dir as data file
conn_fname = std::string(fileName);
size_t idx = conn_fname.find_last_of("/");
if (idx != std::string::npos)
@@ -298,7 +296,7 @@ ErrorCode NCHelperHOMME::create_mesh(ScdInterface* scdi, const FileOptions& opts
conn_fname.c_str(), fileName.c_str(), gDims[3] - gDims[0] + 1, conn_vals[0]);
}
- // read connectivity into temporary variable
+ // Read connectivity into temporary variable
int num_fine_quads, num_coarse_quads, start_idx;
std::vector<std::string>::iterator vit;
int idx;
@@ -311,7 +309,7 @@ ErrorCode NCHelperHOMME::create_mesh(ScdInterface* scdi, const FileOptions& opts
}
int num_quads = conn_vals[idx];
- // get the connectivity into tmp_conn2 and permute into tmp_conn
+ // Get the connectivity into tmp_conn2 and permute into tmp_conn
int cornerVarId;
success = NCFUNC(inq_varid)(connectId, "element_corners", &cornerVarId);
ERRORS(success, "Failed to get variable id.");
@@ -321,7 +319,7 @@ ErrorCode NCHelperHOMME::create_mesh(ScdInterface* scdi, const FileOptions& opts
ERRORS(success, "Failed to get temporary connectivity.");
success = NCFUNC(close)(connectId);
ERRORS(success, "Failed on close.");
- // permute the connectivity
+ // Permute the connectivity
for (int i = 0; i < num_quads; i++) {
tmp_conn[4 * i] = tmp_conn2[i];
tmp_conn[4 * i + 1] = tmp_conn2[i + 1 * num_quads];
@@ -335,7 +333,7 @@ ErrorCode NCHelperHOMME::create_mesh(ScdInterface* scdi, const FileOptions& opts
if (rank == gatherSetRank)
create_gathers = true;
- // compute the number of local quads, accounting for coarse or fine representation
+ // Compute the number of local quads, accounting for coarse or fine representation
// spectral_unit is the # fine quads per coarse quad, or spectralOrder^2
int spectral_unit = (spectralMesh ? _spectralOrder * _spectralOrder : 1);
// num_coarse_quads is the number of quads instantiated in MOAB; if !spectralMesh, num_coarse_quads = num_fine_quads
@@ -350,12 +348,12 @@ ErrorCode NCHelperHOMME::create_mesh(ScdInterface* scdi, const FileOptions& opts
// num_fine_quads is the number of quads in the connectivity list in HommeMapping file assigned to this proc
num_fine_quads = spectral_unit * num_coarse_quads;
- // now create num_coarse_quads
+ // Now create num_coarse_quads
EntityHandle *conn_arr;
EntityHandle start_vertex;
Range tmp_range;
- // read connectivity into that space
+ // Read connectivity into that space
EntityHandle *sv_ptr = NULL, start_quad;
SpectralMeshTool smt(mbImpl, _spectralOrder);
if (!spectralMesh) {
@@ -366,10 +364,10 @@ ErrorCode NCHelperHOMME::create_mesh(ScdInterface* scdi, const FileOptions& opts
ERRORR(rval, "Failed to create quads.");
tmp_range.insert(start_quad, start_quad + num_coarse_quads - 1);
std::copy(&tmp_conn[start_idx], &tmp_conn[start_idx + 4 * num_fine_quads], conn_arr);
- std::copy(conn_arr, conn_arr + 4 * num_fine_quads, range_inserter(localGid));
+ std::copy(conn_arr, conn_arr + 4 * num_fine_quads, range_inserter(localGidVerts));
}
else {
- rval = smt.create_spectral_elems(&tmp_conn[0], num_fine_quads, 2, tmp_range, start_idx, &localGid);
+ rval = smt.create_spectral_elems(&tmp_conn[0], num_fine_quads, 2, tmp_range, start_idx, &localGidVerts);
ERRORR(rval, "Failed to create spectral elements.");
int count, v_per_e;
rval = mbImpl->connect_iterate(tmp_range.begin(), tmp_range.end(), conn_arr, v_per_e, count);
@@ -379,22 +377,22 @@ ErrorCode NCHelperHOMME::create_mesh(ScdInterface* scdi, const FileOptions& opts
ERRORR(rval, "Failed to get fine connectivity of spectral elements.");
}
- // on this proc, I get columns lDims[1]..lDims[4], inclusive; need to find which vertices those correspond to
- unsigned int num_local_verts = localGid.size();
+ // On this proc, I get columns lDims[1]..lDims[4], inclusive; need to find which vertices those correspond to
+ unsigned int num_local_verts = localGidVerts.size();
unsigned int num_total_verts = gDims[3] - gDims[0] + 1;
- // create vertices
+ // Create vertices
std::vector<double*> arrays;
rval = _readNC->readMeshIface->get_node_coords(3, num_local_verts, 0, start_vertex, arrays,
// might have to create gather mesh later
(create_gathers ? num_local_verts+num_total_verts : num_local_verts));
ERRORR(rval, "Couldn't create vertices in ucd mesh.");
- // set vertex coordinates
+ // Set vertex coordinates
Range::iterator rit;
double *xptr = arrays[0], *yptr = arrays[1], *zptr = arrays[2];
int i;
- for (i = 0, rit = localGid.begin(); i < (int)num_local_verts; i++, ++rit) {
+ for (i = 0, rit = localGidVerts.begin(); i < (int)num_local_verts; i++, ++rit) {
assert(*rit < ilVals.size() + 1);
xptr[i] = ilVals[(*rit) - 1]; // lon
yptr[i] = jlVals[(*rit) - 1]; // lat
@@ -413,7 +411,7 @@ ErrorCode NCHelperHOMME::create_mesh(ScdInterface* scdi, const FileOptions& opts
zptr[i] = rad * zmult;
}
- // get ptr to gid memory for vertices
+ // Get ptr to gid memory for vertices
Range vert_range(start_vertex, start_vertex + num_local_verts - 1);
void* data;
int count;
@@ -421,23 +419,23 @@ ErrorCode NCHelperHOMME::create_mesh(ScdInterface* scdi, const FileOptions& opts
ERRORR(rval, "Failed to get tag iterator.");
assert(count == (int) num_local_verts);
int* gid_data = (int*) data;
- std::copy(localGid.begin(), localGid.end(), gid_data);
- // duplicate global id data, which will be used to resolve sharing
+ std::copy(localGidVerts.begin(), localGidVerts.end(), gid_data);
+ // Duplicate global id data, which will be used to resolve sharing
if (mpFileIdTag) {
rval = mbImpl->tag_iterate(*mpFileIdTag, vert_range.begin(), vert_range.end(), count, data);
ERRORR(rval, "Failed to get tag iterator on file id tag.");
assert(count == (int) num_local_verts);
gid_data = (int*) data;
- std::copy(localGid.begin(), localGid.end(), gid_data);
+ std::copy(localGidVerts.begin(), localGidVerts.end(), gid_data);
}
- // create map from file ids to vertex handles, used later to set connectivity
+ // Create map from file ids to vertex handles, used later to set connectivity
std::map<EntityHandle, EntityHandle> vert_handles;
- for (rit = localGid.begin(), i = 0; rit != localGid.end(); ++rit, i++) {
+ for (rit = localGidVerts.begin(), i = 0; rit != localGidVerts.end(); ++rit, i++) {
vert_handles[*rit] = start_vertex + i;
}
- // compute proper handles in connectivity using offset
+ // Compute proper handles in connectivity using offset
for (int q = 0; q < 4 * num_coarse_quads; q++) {
conn_arr[q] = vert_handles[conn_arr[q]];
assert(conn_arr[q]);
@@ -450,13 +448,13 @@ ErrorCode NCHelperHOMME::create_mesh(ScdInterface* scdi, const FileOptions& opts
}
}
- // add new vertices and elements to the set
+ // Add new vertices and elements to the set
faces.merge(tmp_range);
tmp_range.insert(start_vertex, start_vertex + num_local_verts - 1);
rval = mbImpl->add_entities(file_set, tmp_range);
ERRORR(rval, "Couldn't add new vertices and quads/hexes to file set.");
- // mark the set with the spectral order
+ // Mark the set with the spectral order
Tag sporder;
rval = mbImpl->tag_get_handle("SPECTRAL_ORDER", 1, MB_TYPE_INTEGER, sporder, MB_TAG_CREAT | MB_TAG_SPARSE);
ERRORR(rval, "Couldn't create spectral order tag.");
@@ -468,9 +466,9 @@ ErrorCode NCHelperHOMME::create_mesh(ScdInterface* scdi, const FileOptions& opts
rval = _readNC->readMeshIface->create_gather_set(gather_set);
ERRORR(rval, "Trouble creating gather set.");
- // create vertices
+ // Create vertices
arrays.clear();
- // don't need to specify allocation number here, because we know enough verts were created before
+ // Don't need to specify allocation number here, because we know enough verts were created before
rval = _readNC->readMeshIface->get_node_coords(3, num_total_verts, 0, start_vertex, arrays);
ERRORR(rval, "Couldn't create vertices in ucd mesh for gather set.");
@@ -486,7 +484,7 @@ ErrorCode NCHelperHOMME::create_mesh(ScdInterface* scdi, const FileOptions& opts
zptr[i] = rad * zmult;
}
- // get ptr to gid memory for vertices
+ // Get ptr to gid memory for vertices
Range gather_verts(start_vertex, start_vertex + num_total_verts - 1);
rval = mbImpl->tag_iterate(mGlobalIdTag, gather_verts.begin(), gather_verts.end(), count, data);
ERRORR(rval, "Failed to get tag iterator.");
@@ -494,7 +492,7 @@ ErrorCode NCHelperHOMME::create_mesh(ScdInterface* scdi, const FileOptions& opts
gid_data = (int*) data;
for (int j = 1; j <= (int) num_total_verts; j++)
gid_data[j - 1] = j;
- // set the file id tag too, it should be bigger something not interfering with global id
+ // Set the file id tag too, it should be bigger something not interfering with global id
if (mpFileIdTag) {
rval = mbImpl->tag_iterate(*mpFileIdTag, gather_verts.begin(), gather_verts.end(), count, data);
ERRORR(rval, "Failed to get tag iterator in file id tag.");
@@ -507,9 +505,9 @@ ErrorCode NCHelperHOMME::create_mesh(ScdInterface* scdi, const FileOptions& opts
rval = mbImpl->add_entities(gather_set, gather_verts);
ERRORR(rval, "Couldn't add vertices to gather set.");
- // create quads
+ // Create quads
Range gather_quads;
- // don't need to specify allocation number here, because we know enough quads were created before
+ // Don't need to specify allocation number here, because we know enough quads were created before
rval = _readNC->readMeshIface->get_element_connect(num_quads, 4,
MBQUAD, 0, start_quad, conn_arr);
ERRORR(rval, "Failed to create quads.");
@@ -568,7 +566,7 @@ ErrorCode NCHelperHOMME::read_ucd_variable_setup(std::vector<std::string>& var_n
}
if (tstep_nums.empty() && -1 != tMin) {
- // no timesteps input, get them all
+ // No timesteps input, get them all
for (int i = tMin; i <= tMax; i++)
tstep_nums.push_back(i);
}
@@ -606,7 +604,6 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset_allocate(EntityHandle file_
std::vector<int>& dimVals = _readNC->dimVals;
int& tDim = _readNC->tDim;
DebugOutput& dbgOut = _readNC->dbgOut;
- Range& localGid = _readNC->localGid;
ErrorCode rval = MB_SUCCESS;
@@ -663,11 +660,11 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset_allocate(EntityHandle file_
// Finally: nVertices
switch (vdatas[i].entLoc) {
case ReadNC::ENTLOCVERT:
- // vertices
- // we will start from the first localGid, actually; we will reset that
+ // Vertices
+ // We will start from the first localGidVerts, actually; we will reset that
// later on, anyway, in a loop
- vdatas[i].readStarts[t].push_back(localGid[0] - 1);
- vdatas[i].readCounts[t].push_back(localGid.size());
+ vdatas[i].readStarts[t].push_back(localGidVerts[0] - 1);
+ vdatas[i].readCounts[t].push_back(localGidVerts.size());
assert(vdatas[i].readStarts[t].size() == vdatas[i].varDims.size());
range = &verts;
break;
@@ -699,23 +696,20 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset_allocate(EntityHandle file_
ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset_async(EntityHandle file_set, std::vector<ReadNC::VarData>& vdatas, std::vector<int>& tstep_nums)
{
DebugOutput& dbgOut = _readNC->dbgOut;
- Range& localGid = _readNC->localGid;
ErrorCode rval = read_ucd_variable_to_nonset_allocate(file_set, vdatas, tstep_nums);
ERRORR(rval, "Trouble allocating read variables.");
// Finally, read into that space
int success;
- // MPI_offset or size_t?
+
for (unsigned int i = 0; i < vdatas.size(); i++) {
- for (unsigned int t = 0; t < tstep_nums.size(); t++) {
- std::size_t sz = vdatas[i].numLev * vdatas[i].readCounts[t][2];
- if (sz <= 0)
- continue; // nothing to read, why worry?
+ std::size_t sz = vdatas[i].sz;
- // we will synchronize all these reads with the other processors,
+ for (unsigned int t = 0; t < tstep_nums.size(); t++) {
+ // We will synchronize all these reads with the other processors,
// so the wait will be inside this double loop; is it too much?
- size_t nb_reads = localGid.psize();
+ size_t nb_reads = localGidVerts.psize();
std::vector<int> requests(nb_reads), statuss(nb_reads);
size_t idxReq = 0;
void* data = vdatas[i].varDatas[t];
@@ -730,12 +724,12 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset_async(EntityHandle file_set
break;
}
case NC_DOUBLE: {
- // copy from float case
+ // Copy from float case
std::vector<double> tmpdoubledata(sz);
- // in the case of ucd mesh, and on multiple proc,
+ // In the case of ucd mesh, and on multiple proc,
// we need to read as many times as subranges we have in the
- // localGid range;
+ // localGidVerts range;
// basically, we have to give a different point
// for data to start, for every subrange :(
size_t nbDims = vdatas[i].readStarts[t].size();
@@ -744,32 +738,32 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset_async(EntityHandle file_set
size_t indexInDoubleArray = 0;
size_t ic = 0;
- for (Range::pair_iterator pair_iter = localGid.pair_begin();
- pair_iter != localGid.pair_end();
+ for (Range::pair_iterator pair_iter = localGidVerts.pair_begin();
+ pair_iter != localGidVerts.pair_end();
pair_iter++, ic++) {
EntityHandle starth = pair_iter->first;
EntityHandle endh = pair_iter->second; // inclusive
vdatas[i].readStarts[t][nbDims - 1] = (NCDF_SIZE) (starth - 1);
vdatas[i].readCounts[t][nbDims - 1] = (NCDF_SIZE) (endh - starth + 1);
- // do a partial read, in each subrange
+ // Do a partial read, in each subrange
// wait outside this loop
success = NCFUNCAG2(_vara_double)(_fileId, vdatas[i].varId,
&(vdatas[i].readStarts[t][0]), &(vdatas[i].readCounts[t][0]),
&(tmpdoubledata[indexInDoubleArray]) NCREQ2);
ERRORS(success, "Failed to read double data in loop");
- // we need to increment the index in double array for the
+ // We need to increment the index in double array for the
// next subrange
indexInDoubleArray += (endh - starth + 1) * 1 * vdatas[i].numLev;
}
- assert(ic == localGid.psize());
+ assert(ic == localGidVerts.psize());
success = ncmpi_wait_all(_fileId, requests.size(), &requests[0], &statuss[0]);
ERRORS(success, "Failed on wait_all.");
if (vdatas[i].numLev != 1)
// switch from k varying slowest to k varying fastest
- success = _readNC->kji_to_jik_stride(ni, nj, nk, data, &tmpdoubledata[0]);
+ success = kji_to_jik_stride(ni, nj, nk, data, &tmpdoubledata[0], localGidVerts);
else {
for (std::size_t idx = 0; idx != tmpdoubledata.size(); idx++)
((double*) data)[idx] = tmpdoubledata[idx];
@@ -780,43 +774,42 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset_async(EntityHandle file_set
case NC_FLOAT: {
std::vector<float> tmpfloatdata(sz);
- // in the case of ucd mesh, and on multiple proc,
+ // In the case of ucd mesh, and on multiple proc,
// we need to read as many times as subranges we have in the
- // localGid range;
+ // localGidVerts range;
// basically, we have to give a different point
// for data to start, for every subrange :(
size_t nbDims = vdatas[i].readStarts[t].size();
- // assume that the last dimension is for the ncol,
- // node varying variable
+ // Assume that the last dimension is for the ncol, number of vertices
size_t indexInFloatArray = 0;
size_t ic = 0;
- for (Range::pair_iterator pair_iter = localGid.pair_begin();
- pair_iter != localGid.pair_end();
+ for (Range::pair_iterator pair_iter = localGidVerts.pair_begin();
+ pair_iter != localGidVerts.pair_end();
pair_iter++, ic++) {
EntityHandle starth = pair_iter->first;
EntityHandle endh = pair_iter->second; // inclusive
vdatas[i].readStarts[t][nbDims - 1] = (NCDF_SIZE) (starth - 1);
vdatas[i].readCounts[t][nbDims - 1] = (NCDF_SIZE) (endh - starth + 1);
- // do a partial read, in each subrange
+ // Do a partial read, in each subrange
// wait outside this loop
success = NCFUNCAG2(_vara_float)(_fileId, vdatas[i].varId,
&(vdatas[i].readStarts[t][0]), &(vdatas[i].readCounts[t][0]),
&(tmpfloatdata[indexInFloatArray]) NCREQ2);
ERRORS(success, "Failed to read float data in loop");
- // we need to increment the index in float array for the
+ // We need to increment the index in float array for the
// next subrange
indexInFloatArray += (endh - starth + 1) * 1 * vdatas[i].numLev;
}
- assert(ic == localGid.psize());
+ assert(ic == localGidVerts.psize());
success = ncmpi_wait_all(_fileId, requests.size(), &requests[0], &statuss[0]);
ERRORS(success, "Failed on wait_all.");
if (vdatas[i].numLev != 1)
- // switch from k varying slowest to k varying fastest
- success = _readNC->kji_to_jik_stride(ni, nj, nk, data, &tmpfloatdata[0]);
+ // Switch from k varying slowest to k varying fastest
+ success = kji_to_jik_stride(ni, nj, nk, data, &tmpfloatdata[0], localGidVerts);
else {
for (std::size_t idx = 0; idx != tmpfloatdata.size(); idx++)
((float*) data)[idx] = tmpfloatdata[idx];
@@ -849,7 +842,7 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset_async(EntityHandle file_set
rval = tmp_rval;
}
}
- // debug output, if requested
+ // Debug output, if requested
if (1 == dbgOut.get_verbosity()) {
dbgOut.printf(1, "Read variables: %s", vdatas.begin()->varName.c_str());
for (unsigned int i = 1; i < vdatas.size(); i++)
@@ -863,7 +856,6 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset_async(EntityHandle file_set
ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset(EntityHandle file_set, std::vector<ReadNC::VarData>& vdatas, std::vector<int>& tstep_nums)
{
DebugOutput& dbgOut = _readNC->dbgOut;
- Range& localGid = _readNC->localGid;
ErrorCode rval = read_ucd_variable_to_nonset_allocate(file_set, vdatas, tstep_nums);
ERRORR(rval, "Trouble allocating read variables.");
@@ -872,8 +864,9 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset(EntityHandle file_set, std:
int success;
std::vector<int> requests(vdatas.size() * tstep_nums.size()), statuss(vdatas.size() * tstep_nums.size());
for (unsigned int i = 0; i < vdatas.size(); i++) {
+ std::size_t sz = vdatas[i].sz;
+
for (unsigned int t = 0; t < tstep_nums.size(); t++) {
- std::size_t sz = vdatas[i].numLev * vdatas[i].readCounts[t][2];
void* data = vdatas[i].varDatas[t];
size_t ni = vdatas[i].readCounts[t][2];
size_t nj = 1; // For HOMME, nj holds # quads, so here should set to 1
@@ -889,18 +882,18 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset(EntityHandle file_set, std:
// Copy from float case
std::vector<double> tmpdoubledata(sz);
- // in the case of ucd mesh, and on multiple proc,
+ // In the case of ucd mesh, and on multiple proc,
// we need to read as many times as subranges we have in the
- // localGid range;
+ // localGidVerts range;
// basically, we have to give a different point
// for data to start, for every subrange :(
size_t nbDims = vdatas[i].readStarts[t].size();
- // Assume that the last dimension is for the ncol
+ // Assume that the last dimension is for the ncol, number of vertices
size_t indexInDoubleArray = 0;
size_t ic = 0;
- for (Range::pair_iterator pair_iter = localGid.pair_begin();
- pair_iter != localGid.pair_end();
+ for (Range::pair_iterator pair_iter = localGidVerts.pair_begin();
+ pair_iter != localGidVerts.pair_end();
pair_iter++, ic++) {
EntityHandle starth = pair_iter->first;
EntityHandle endh = pair_iter->second; // Inclusive
@@ -915,11 +908,11 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset(EntityHandle file_set, std:
// next subrange
indexInDoubleArray += (endh - starth + 1) * 1 * vdatas[i].numLev;
}
- assert(ic == localGid.psize());
+ assert(ic == localGidVerts.psize());
if (vdatas[i].numLev != 1)
// Switch from k varying slowest to k varying fastest
- success = _readNC->kji_to_jik_stride(ni, nj, nk, data, &tmpdoubledata[0]);
+ success = kji_to_jik_stride(ni, nj, nk, data, &tmpdoubledata[0], localGidVerts);
else {
for (std::size_t idx = 0; idx != tmpdoubledata.size(); idx++)
((double*) data)[idx] = tmpdoubledata[idx];
@@ -930,9 +923,9 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset(EntityHandle file_set, std:
case NC_FLOAT: {
std::vector<float> tmpfloatdata(sz);
- // in the case of ucd mesh, and on multiple proc,
+ // In the case of ucd mesh, and on multiple proc,
// we need to read as many times as subranges we have in the
- // localGid range;
+ // localGidVerts range;
// basically, we have to give a different point
// for data to start, for every subrange :(
size_t nbDims = vdatas[i].readStarts[t].size();
@@ -940,8 +933,8 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset(EntityHandle file_set, std:
// Assume that the last dimension is for the ncol
size_t indexInFloatArray = 0;
size_t ic = 0;
- for (Range::pair_iterator pair_iter = localGid.pair_begin();
- pair_iter != localGid.pair_end();
+ for (Range::pair_iterator pair_iter = localGidVerts.pair_begin();
+ pair_iter != localGidVerts.pair_end();
pair_iter++, ic++) {
EntityHandle starth = pair_iter->first;
EntityHandle endh = pair_iter->second; // Inclusive
@@ -956,11 +949,11 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset(EntityHandle file_set, std:
// next subrange
indexInFloatArray += (endh - starth + 1) * 1 * vdatas[i].numLev;
}
- assert(ic == localGid.psize());
+ assert(ic == localGidVerts.psize());
if (vdatas[i].numLev != 1)
// Switch from k varying slowest to k varying fastest
- success = _readNC->kji_to_jik_stride(ni, nj, nk, data, &tmpfloatdata[0]);
+ success = kji_to_jik_stride(ni, nj, nk, data, &tmpfloatdata[0], localGidVerts);
else {
for (std::size_t idx = 0; idx != tmpfloatdata.size(); idx++)
((float*) data)[idx] = tmpfloatdata[idx];
@@ -998,7 +991,7 @@ ErrorCode NCHelperHOMME::read_ucd_variable_to_nonset(EntityHandle file_set, std:
rval = tmp_rval;
}
}
- // debug output, if requested
+ // Debug output, if requested
if (1 == dbgOut.get_verbosity()) {
dbgOut.printf(1, "Read variables: %s", vdatas.begin()->varName.c_str());
for (unsigned int i = 1; i < vdatas.size(); i++)
diff --git a/src/io/NCHelperMPAS.cpp b/src/io/NCHelperMPAS.cpp
index 469a89d..e9004ad 100644
--- a/src/io/NCHelperMPAS.cpp
+++ b/src/io/NCHelperMPAS.cpp
@@ -542,7 +542,7 @@ ErrorCode NCHelperMPAS::read_ucd_variable_setup(std::vector<std::string>& var_na
}
if (tstep_nums.empty() && -1 != tMin) {
- // no timesteps input, get them all
+ // No timesteps input, get them all
for (int i = tMin; i <= tMax; i++)
tstep_nums.push_back(i);
}
@@ -652,19 +652,19 @@ ErrorCode NCHelperMPAS::read_ucd_variable_to_nonset_allocate(EntityHandle file_s
// Next: nCells or nEdges or nVertices
switch (vdatas[i].entLoc) {
case ReadNC::ENTLOCVERT:
- // vertices
+ // Vertices
vdatas[i].readStarts[t].push_back(localGidVerts[0] - 1);
vdatas[i].readCounts[t].push_back(nLocalVertices);
range = &verts;
break;
case ReadNC::ENTLOCFACE:
- // faces
+ // Faces
vdatas[i].readStarts[t].push_back(localGidCells[0] - 1);
vdatas[i].readCounts[t].push_back(nLocalCells);
range = &facesOwned;
break;
case ReadNC::ENTLOCEDGE:
- // edges
+ // Edges
vdatas[i].readStarts[t].push_back(localGidEdges[0] - 1);
vdatas[i].readCounts[t].push_back(nLocalEdges);
range = &edges;
@@ -715,7 +715,7 @@ ErrorCode NCHelperMPAS::read_ucd_variable_to_nonset_async(EntityHandle file_set,
// Finally, read into that space
int success;
Range* pLocalGid = NULL;
- // MPI_offset or size_t?
+
for (unsigned int i = 0; i < vdatas.size(); i++) {
switch (vdatas[i].entLoc) {
case ReadNC::ENTLOCVERT:
@@ -732,12 +732,10 @@ ErrorCode NCHelperMPAS::read_ucd_variable_to_nonset_async(EntityHandle file_set,
break;
}
- for (unsigned int t = 0; t < tstep_nums.size(); t++) {
- std::size_t sz = 1;
- for (std::size_t idx = 0; idx != vdatas[i].readCounts[t].size(); ++idx)
- sz *= vdatas[i].readCounts[t][idx];
+ std::size_t sz = vdatas[i].sz;
- // we will synchronize all these reads with the other processors,
+ for (unsigned int t = 0; t < tstep_nums.size(); t++) {
+ // We will synchronize all these reads with the other processors,
// so the wait will be inside this double loop; is it too much?
size_t nb_reads = pLocalGid->psize();
std::vector<int> requests(nb_reads), statuss(nb_reads);
@@ -752,7 +750,7 @@ ErrorCode NCHelperMPAS::read_ucd_variable_to_nonset_async(EntityHandle file_set,
case NC_DOUBLE: {
std::vector<double> tmpdoubledata(sz);
- // in the case of ucd mesh, and on multiple proc,
+ // In the case of ucd mesh, and on multiple proc,
// we need to read as many times as subranges we have in the
// localGid range;
// basically, we have to give a different point
@@ -770,13 +768,13 @@ ErrorCode NCHelperMPAS::read_ucd_variable_to_nonset_async(EntityHandle file_set,
vdatas[i].readStarts[t][nbDims - 2] = (NCDF_SIZE) (starth - 1);
vdatas[i].readCounts[t][nbDims - 2] = (NCDF_SIZE) (endh - starth + 1);
- // do a partial read, in each subrange
+ // Do a partial read, in each subrange
// wait outside this loop
success = NCFUNCAG2(_vara_double)(_fileId, vdatas[i].varId,
&(vdatas[i].readStarts[t][0]), &(vdatas[i].readCounts[t][0]),
&(tmpdoubledata[indexInDoubleArray]) NCREQ2);
ERRORS(success, "Failed to read double data in loop");
- // we need to increment the index in double array for the
+ // We need to increment the index in double array for the
// next subrange
indexInDoubleArray += (endh - starth + 1) * 1 * vdatas[i].numLev;
}
@@ -842,7 +840,7 @@ ErrorCode NCHelperMPAS::read_ucd_variable_to_nonset_async(EntityHandle file_set,
rval = tmp_rval;
}
}
- // debug output, if requested
+ // Debug output, if requested
if (1 == dbgOut.get_verbosity()) {
dbgOut.printf(1, "Read variables: %s", vdatas.begin()->varName.c_str());
for (unsigned int i = 1; i < vdatas.size(); i++)
@@ -864,7 +862,7 @@ ErrorCode NCHelperMPAS::read_ucd_variable_to_nonset(EntityHandle file_set, std::
// Finally, read into that space
int success;
Range* pLocalGid = NULL;
- // MPI_offset or size_t?
+
std::vector<int> requests(vdatas.size() * tstep_nums.size()), statuss(vdatas.size() * tstep_nums.size());
for (unsigned int i = 0; i < vdatas.size(); i++) {
switch (vdatas[i].entLoc) {
@@ -882,9 +880,9 @@ ErrorCode NCHelperMPAS::read_ucd_variable_to_nonset(EntityHandle file_set, std::
break;
}
- for (unsigned int t = 0; t < tstep_nums.size(); t++) {
- std::size_t sz = vdatas[i].numLev * vdatas[i].readCounts[t][1];
+ std::size_t sz = vdatas[i].sz;
+ for (unsigned int t = 0; t < tstep_nums.size(); t++) {
switch (vdatas[i].varDataType) {
case NC_BYTE:
case NC_CHAR: {
@@ -895,7 +893,7 @@ ErrorCode NCHelperMPAS::read_ucd_variable_to_nonset(EntityHandle file_set, std::
// Copy from float case
std::vector<double> tmpdoubledata(sz);
- // in the case of ucd mesh, and on multiple proc,
+ // In the case of ucd mesh, and on multiple proc,
// we need to read as many times as subranges we have in the
// localGid range;
// basically, we have to give a different point
@@ -986,7 +984,7 @@ ErrorCode NCHelperMPAS::read_ucd_variable_to_nonset(EntityHandle file_set, std::
}
}
- // debug output, if requested
+ // Debug output, if requested
if (1 == dbgOut.get_verbosity()) {
dbgOut.printf(1, "Read variables: %s", vdatas.begin()->varName.c_str());
for (unsigned int i = 1; i < vdatas.size(); i++)
diff --git a/src/io/ReadNC.hpp b/src/io/ReadNC.hpp
index 4efabab..882fd98 100644
--- a/src/io/ReadNC.hpp
+++ b/src/io/ReadNC.hpp
@@ -189,38 +189,6 @@ private:
//! coordinate variables - this info is used for creating tags
void init_dims_with_no_cvars_info();
- template <typename T> ErrorCode kji_to_jik(size_t ni, size_t nj, size_t nk, void* dest, T* source)
- {
- size_t nik = ni * nk, nij = ni * nj;
- T* tmp_data = reinterpret_cast<T*>(dest);
- for (std::size_t j = 0; j != nj; j++)
- for (std::size_t i = 0; i != ni; i++)
- for (std::size_t k = 0; k != nk; k++)
- tmp_data[j*nik + i*nk + k] = source[k*nij + j*ni + i];
- return MB_SUCCESS;
- }
-
- //! this version takes as input the moab range, from which we actually need just the
- //! size of each sequence, for a proper transpose of the data
- //! we read one time step, one variable at a time, usually, so we will
- template <typename T> ErrorCode kji_to_jik_stride(size_t , size_t nj, size_t nk, void* dest, T* source)
- {
- std::size_t idxInSource = 0; // position of the start of the stride
- // for each subrange, we will transpose a matrix of size subrange*nj*nk (subrange takes
- // the role of ni)
- T* tmp_data = reinterpret_cast<T*>(dest);
- for (Range::pair_iterator pair_iter = localGid.pair_begin();
- pair_iter != localGid.pair_end(); ++pair_iter) {
- std::size_t size_range = pair_iter->second - pair_iter->first + 1;
- std::size_t nik = size_range * nk, nij = size_range * nj;
- for (std::size_t j = 0; j != nj; j++)
- for (std::size_t i = 0; i != size_range; i++)
- for (std::size_t k = 0; k != nk; k++)
- tmp_data[idxInSource + j*nik + i*nk + k] = source[idxInSource + k*nij + j*size_range + i];
- idxInSource += (size_range*nj*nk);
- }
- return MB_SUCCESS;
- }
//------------member variables ------------//
//! interface instance
@@ -286,9 +254,6 @@ private:
//! global id tag is preserved, and is needed later on.
const Tag* mpFileIdTag;
- //! offset of first vertex id
- //int vertexOffset;
-
//! debug stuff
DebugOutput dbgOut;
@@ -298,9 +263,6 @@ private:
//! partitioning method
int partMethod;
- //! used only by ucd mesh, e.g. HOMME grid
- Range localGid;
-
//! whether mesh is locally periodic in i or j
int locallyPeriodic[2];
https://bitbucket.org/fathomteam/moab/commits/921f811a0726/
Changeset: 921f811a0726
Branch: master
User: danwu
Date: 2013-08-05 21:08:34
Summary: Merge branch 'master' of https://bitbucket.org/fathomteam/moab
Affected #: 2 files
diff --git a/itaps/iBase.h b/itaps/iBase.h
index 933b9d6..e61f92d 100644
--- a/itaps/iBase.h
+++ b/itaps/iBase.h
@@ -269,8 +269,6 @@ enum iBase_TagValueType {
*
* \subpage imeshp
*
- * \subpage igeom
- *
* \subpage error
*
* \subpage trio
diff --git a/src/moab/Interface.hpp b/src/moab/Interface.hpp
index 1decd7a..a3f7b15 100644
--- a/src/moab/Interface.hpp
+++ b/src/moab/Interface.hpp
@@ -25,9 +25,17 @@
* MOAB's API is documented in the moab::Interface class. Questions and comments should be sent to moab-dev
* _at_ mcs.anl.gov.
*
+ * \ref h5mmain "H5M File Format API"
+ *
* \ref userguide "User's Guide (MOAB 4.6)"
*
* \ref developerguide "Developer's Guide (MOAB 4.6)"
+ *
+ * \ref styleguide "Coding Style Guide"
+ *
+ * \ref metadata "I/O and Meta-Data Storage Conventions in MOAB"
+ *
+ * \ref The "ITAPS Interfaces"
*/
#ifndef MOAB_INTERFACE_HPP
Repository URL: https://bitbucket.org/fathomteam/moab/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the moab-dev
mailing list