[MOAB-dev] commit/MOAB: 7 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Wed May 7 08:07:37 CDT 2014
7 new commits in MOAB:
https://bitbucket.org/fathomteam/moab/commits/af6caf79820a/
Changeset: af6caf79820a
Branch: None
User: jeffdaily
Date: 2014-05-06 23:05:04
Summary: first go at converting MPAS reader to GCRM
Affected #: 5 files
diff --git a/MeshFiles/unittest/io/gcrm_r3.nc b/MeshFiles/unittest/io/gcrm_r3.nc
new file mode 100644
index 0000000..6d1b8d2
Binary files /dev/null and b/MeshFiles/unittest/io/gcrm_r3.nc differ
diff --git a/src/io/NCHelperGCRM.cpp b/src/io/NCHelperGCRM.cpp
new file mode 100644
index 0000000..e85f051
--- /dev/null
+++ b/src/io/NCHelperGCRM.cpp
@@ -0,0 +1,1664 @@
+#include "NCHelperGCRM.hpp"
+#include "moab/ReadUtilIface.hpp"
+#include "moab/FileOptions.hpp"
+#include "moab/SpectralMeshTool.hpp"
+#include "MBTagConventions.hpp"
+
+#ifdef HAVE_ZOLTAN
+#include "MBZoltan.hpp"
+#endif
+
+#include <cmath>
+
+#define ERRORR(rval, str) \
+ if (MB_SUCCESS != rval) {_readNC->readMeshIface->report_error("Line %d: %s", __LINE__, str); return rval;}
+
+#define ERRORS(err, str) \
+ if (err) {_readNC->readMeshIface->report_error("Lines %d: %s", __LINE__, str); return MB_FAILURE;}
+
+namespace moab {
+
+const int DEFAULT_MAX_EDGES_PER_CELL = 6;
+
+NCHelperGCRM::NCHelperGCRM(ReadNC* readNC, int fileId, const FileOptions& opts, EntityHandle fileSet)
+: UcdNCHelper(readNC, fileId, opts, fileSet)
+, maxEdgesPerCell(DEFAULT_MAX_EDGES_PER_CELL)
+, numCellGroups(0)
+, createGatherSet(false)
+{
+ // Ignore variables containing topological information
+ ignoredVarNames.insert("grid");
+ ignoredVarNames.insert("cell_corners");
+ ignoredVarNames.insert("cell_edges");
+ ignoredVarNames.insert("edge_corners");
+ ignoredVarNames.insert("cell_neighbors");
+}
+
+bool NCHelperGCRM::can_read_file(ReadNC* readNC)
+{
+ std::vector<std::string>& dimNames = readNC->dimNames;
+
+ // If dimension name "vertexDegree" exists then it should be the GCRM grid
+ if (std::find(dimNames.begin(), dimNames.end(), std::string("cells")) != dimNames.end())
+ return true;
+
+ return false;
+}
+
+ErrorCode NCHelperGCRM::init_mesh_vals()
+{
+ std::vector<std::string>& dimNames = _readNC->dimNames;
+ std::vector<int>& dimLens = _readNC->dimLens;
+ std::map<std::string, ReadNC::VarData>& varInfo = _readNC->varInfo;
+
+ ErrorCode rval;
+ unsigned int idx;
+ std::vector<std::string>::iterator vit;
+
+ // Look for time dimension
+ if ((vit = std::find(dimNames.begin(), dimNames.end(), "Time")) != dimNames.end())
+ idx = vit - dimNames.begin();
+ else if ((vit = std::find(dimNames.begin(), dimNames.end(), "time")) != dimNames.end())
+ idx = vit - dimNames.begin();
+ else {
+ ERRORR(MB_FAILURE, "Couldn't find 'Time' or 'time' dimension.");
+ }
+ tDim = idx;
+ nTimeSteps = dimLens[idx];
+
+ // Get number of cells
+ if ((vit = std::find(dimNames.begin(), dimNames.end(), "cells")) != dimNames.end())
+ idx = vit - dimNames.begin();
+ else {
+ ERRORR(MB_FAILURE, "Couldn't find 'cells' dimension.");
+ }
+ cDim = idx;
+ nCells = dimLens[idx];
+
+ // Get number of edges
+ if ((vit = std::find(dimNames.begin(), dimNames.end(), "edges")) != dimNames.end())
+ idx = vit - dimNames.begin();
+ else {
+ ERRORR(MB_FAILURE, "Couldn't find 'edges' dimension.");
+ }
+ eDim = idx;
+ nEdges = dimLens[idx];
+
+ // Get number of vertices
+ vDim = -1;
+ if ((vit = std::find(dimNames.begin(), dimNames.end(), "corners")) != dimNames.end())
+ idx = vit - dimNames.begin();
+ else {
+ ERRORR(MB_FAILURE, "Couldn't find 'corners' dimension.");
+ }
+ vDim = idx;
+ nVertices = dimLens[idx];
+
+ // Get number of vertex levels
+ if ((vit = std::find(dimNames.begin(), dimNames.end(), "layers")) != dimNames.end())
+ idx = vit - dimNames.begin();
+ else {
+ ERRORR(MB_FAILURE, "Couldn't find 'layers' dimension.");
+ }
+ levDim = idx;
+ nLevels = dimLens[idx];
+
+ // Dimension numbers for other optional levels
+ std::vector<unsigned int> opt_lev_dims;
+
+ // Get number of interface levels
+ if ((vit = std::find(dimNames.begin(), dimNames.end(), "interfaces")) != dimNames.end()) {
+ idx = vit - dimNames.begin();
+ opt_lev_dims.push_back(idx);
+ }
+
+ std::map<std::string, ReadNC::VarData>::iterator vmit;
+
+ // Store time coordinate values in tVals
+ if (nTimeSteps > 0) {
+ if ((vmit = varInfo.find("Time")) != varInfo.end()) {
+ rval = read_coordinate("Time", 0, nTimeSteps - 1, tVals);
+ ERRORR(rval, "Trouble reading 'Time' variable.");
+ }
+ else if ((vmit = varInfo.find("time")) != varInfo.end()) {
+ rval = read_coordinate("time", 0, nTimeSteps - 1, tVals);
+ ERRORR(rval, "Trouble reading 'time' variable.");
+ }
+ else {
+ // If time variable does not exist, set dummy values to tVals
+ for (int t = 0; t < nTimeSteps; t++)
+ tVals.push_back((double)t);
+ }
+ }
+
+ // For each variable, determine the entity location type and number of levels
+ for (vmit = varInfo.begin(); vmit != varInfo.end(); ++vmit) {
+ ReadNC::VarData& vd = (*vmit).second;
+
+ vd.entLoc = ReadNC::ENTLOCSET;
+ if (std::find(vd.varDims.begin(), vd.varDims.end(), tDim) != vd.varDims.end()) {
+ if (std::find(vd.varDims.begin(), vd.varDims.end(), vDim) != vd.varDims.end())
+ vd.entLoc = ReadNC::ENTLOCVERT;
+ else if (std::find(vd.varDims.begin(), vd.varDims.end(), eDim) != vd.varDims.end())
+ vd.entLoc = ReadNC::ENTLOCEDGE;
+ else if (std::find(vd.varDims.begin(), vd.varDims.end(), cDim) != vd.varDims.end())
+ vd.entLoc = ReadNC::ENTLOCFACE;
+ }
+
+ vd.numLev = 1;
+ if (std::find(vd.varDims.begin(), vd.varDims.end(), levDim) != vd.varDims.end())
+ vd.numLev = nLevels;
+ else {
+ // If layers dimension is not found, try other optional levels such as interfaces
+ for (unsigned int i = 0; i < opt_lev_dims.size(); i++) {
+ if (std::find(vd.varDims.begin(), vd.varDims.end(), opt_lev_dims[i]) != vd.varDims.end()) {
+ vd.numLev = dimLens[opt_lev_dims[i]];
+ break;
+ }
+ }
+ }
+ }
+
+ // Hack: create dummy variables for dimensions (like nCells) with no corresponding coordinate variables
+ rval = create_dummy_variables();
+ ERRORR(rval, "Failed to create dummy variables.");
+
+ return MB_SUCCESS;
+}
+
+// When noMesh option is used on this read, the old ReadNC class instance for last read can get out
+// of scope (and deleted). The old instance initialized some variables properly when the mesh was
+// created, but they are now lost. The new instance (will not create the mesh with noMesh option)
+// has to restore them based on the existing mesh from last read
+ErrorCode NCHelperGCRM::check_existing_mesh()
+{
+ Interface*& mbImpl = _readNC->mbImpl;
+ Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
+ bool& noMesh = _readNC->noMesh;
+
+ if (noMesh) {
+ ErrorCode rval;
+
+ // Restore numCellGroups
+ if (0 == numCellGroups) {
+ Tag numCellGroupsTag;
+ rval = mbImpl->tag_get_handle("__NUM_CELL_GROUPS", 1, MB_TYPE_INTEGER, numCellGroupsTag);
+ if (MB_SUCCESS == rval)
+ rval = mbImpl->tag_get_data(numCellGroupsTag, &_fileSet, 1, &numCellGroups);
+ }
+
+ if (localGidVerts.empty()) {
+ // Get all vertices from tmp_set (it is the input set in no_mesh scenario)
+ Range local_verts;
+ rval = mbImpl->get_entities_by_dimension(_fileSet, 0, local_verts);
+ if (MB_FAILURE == rval)
+ return rval;
+
+ if (!local_verts.empty()) {
+ std::vector<int> gids(local_verts.size());
+
+ // !IMPORTANT : this has to be the GLOBAL_ID tag
+ rval = mbImpl->tag_get_data(mGlobalIdTag, local_verts, &gids[0]);
+ if (MB_FAILURE == rval)
+ return rval;
+
+ // Restore localGidVerts
+ std::copy(gids.rbegin(), gids.rend(), range_inserter(localGidVerts));
+ nLocalVertices = localGidVerts.size();
+ }
+ }
+
+ if (localGidEdges.empty()) {
+ // Get all edges from _fileSet (it is the input set in no_mesh scenario)
+ Range local_edges;
+ rval = mbImpl->get_entities_by_dimension(_fileSet, 1, local_edges);
+ if (MB_FAILURE == rval)
+ return rval;
+
+ if (!local_edges.empty()) {
+ std::vector<int> gids(local_edges.size());
+
+ // !IMPORTANT : this has to be the GLOBAL_ID tag
+ rval = mbImpl->tag_get_data(mGlobalIdTag, local_edges, &gids[0]);
+ if (MB_FAILURE == rval)
+ return rval;
+
+ // Restore localGidEdges
+ std::copy(gids.rbegin(), gids.rend(), range_inserter(localGidEdges));
+ nLocalEdges = localGidEdges.size();
+ }
+ }
+
+ if (localGidCells.empty()) {
+ // Get all cells from tmp_set (it is the input set in no_mesh scenario)
+ Range local_cells;
+ rval = mbImpl->get_entities_by_dimension(_fileSet, 2, local_cells);
+ if (MB_FAILURE == rval)
+ return rval;
+
+ if (!local_cells.empty()) {
+ std::vector<int> gids(local_cells.size());
+
+ // !IMPORTANT : this has to be the GLOBAL_ID tag
+ rval = mbImpl->tag_get_data(mGlobalIdTag, local_cells, &gids[0]);
+ if (MB_FAILURE == rval)
+ return rval;
+
+ // Restore localGidCells
+ std::copy(gids.rbegin(), gids.rend(), range_inserter(localGidCells));
+ nLocalCells = localGidCells.size();
+
+ if (numCellGroups > 1) {
+ // Restore cellHandleToGlobalID map
+ Range::const_iterator rit;
+ int i;
+ for (rit = local_cells.begin(), i = 0; rit != local_cells.end(); ++rit, i++)
+ cellHandleToGlobalID[*rit] = gids[i];
+ }
+ }
+ }
+ }
+
+ return MB_SUCCESS;
+}
+
+ErrorCode NCHelperGCRM::create_mesh(Range& faces)
+{
+ Interface*& mbImpl = _readNC->mbImpl;
+ int& gatherSetRank = _readNC->gatherSetRank;
+ bool& noMixedElements = _readNC->noMixedElements;
+ bool& noEdges = _readNC->noEdges;
+ DebugOutput& dbgOut = _readNC->dbgOut;
+
+ int rank = 0;
+ int procs = 1;
+#ifdef USE_MPI
+ bool& isParallel = _readNC->isParallel;
+ if (isParallel) {
+ ParallelComm*& myPcomm = _readNC->myPcomm;
+ rank = myPcomm->proc_config().proc_rank();
+ procs = myPcomm->proc_config().proc_size();
+ }
+#endif
+
+ // Need to know whether we'll be creating gather mesh
+ if (rank == gatherSetRank)
+ createGatherSet = true;
+
+ if (procs >= 2) {
+ // Compute the number of local cells on this proc
+ nLocalCells = int(std::floor(1.0 * nCells / procs));
+
+ // The starting global cell index in the GCRM file for this proc
+ int start_cell_idx = rank * nLocalCells;
+
+ // Number of extra cells after equal split over procs
+ int iextra = nCells % procs;
+
+ // Allocate extra cells over procs
+ if (rank < iextra)
+ nLocalCells++;
+ start_cell_idx += std::min(rank, iextra);
+
+ start_cell_idx++; // 0 based -> 1 based
+
+ // Redistribute local cells after trivial partition (e.g. apply Zoltan partition)
+ ErrorCode rval = redistribute_local_cells(start_cell_idx);
+ ERRORR(rval, "Failed to redistribute local cells after trivial partition.");
+ }
+ else {
+ nLocalCells = nCells;
+ localGidCells.insert(1, nLocalCells);
+ }
+ dbgOut.tprintf(1, " localGidCells.psize() = %d\n", (int)localGidCells.psize());
+ dbgOut.tprintf(1, " localGidCells.size() = %d\n", (int)localGidCells.size());
+
+ // Read vertices on each local cell, to get localGidVerts and cell connectivity later
+ int verticesOnCellVarId;
+ int success = NCFUNC(inq_varid)(_fileId, "cell_corners", &verticesOnCellVarId);
+ ERRORS(success, "Failed to get variable id of cell_corners.");
+ std::vector<int> vertices_on_local_cells(nLocalCells * maxEdgesPerCell);
+ dbgOut.tprintf(1, " nLocalCells = %d\n", (int)nLocalCells);
+ dbgOut.tprintf(1, " maxEdgesPerCell = %d\n", (int)maxEdgesPerCell);
+ dbgOut.tprintf(1, " vertices_on_local_cells.size() = %d\n", (int)vertices_on_local_cells.size());
+#ifdef PNETCDF_FILE
+ size_t nb_reads = localGidCells.psize();
+ std::vector<int> requests(nb_reads);
+ std::vector<int> statuss(nb_reads);
+ size_t idxReq = 0;
+#endif
+ size_t indexInArray = 0;
+ for (Range::pair_iterator pair_iter = localGidCells.pair_begin();
+ pair_iter != localGidCells.pair_end();
+ pair_iter++) {
+ EntityHandle starth = pair_iter->first;
+ EntityHandle endh = pair_iter->second;
+ dbgOut.tprintf(1, " cell_corners starth = %d\n", (int)starth);
+ dbgOut.tprintf(1, " cell_corners endh = %d\n", (int)endh);
+ NCDF_SIZE read_starts[2] = {static_cast<NCDF_SIZE>(starth - 1), 0};
+ NCDF_SIZE read_counts[2] = {static_cast<NCDF_SIZE>(endh - starth + 1),
+ static_cast<NCDF_SIZE>(maxEdgesPerCell)};
+
+ // Do a partial read in each subrange
+#ifdef PNETCDF_FILE
+ success = NCFUNCREQG(_vara_int)(_fileId, verticesOnCellVarId, read_starts, read_counts,
+ &(vertices_on_local_cells[indexInArray]), &requests[idxReq++]);
+#else
+ success = NCFUNCAG(_vara_int)(_fileId, verticesOnCellVarId, read_starts, read_counts,
+ &(vertices_on_local_cells[indexInArray]));
+#endif
+ ERRORS(success, "Failed to read cell_corners data in a loop");
+
+ // Increment the index for next subrange
+ indexInArray += (endh - starth + 1) * maxEdgesPerCell;
+ }
+
+#ifdef PNETCDF_FILE
+ // Wait outside the loop
+ success = NCFUNC(wait_all)(_fileId, requests.size(), &requests[0], &statuss[0]);
+ ERRORS(success, "Failed on wait_all.");
+#endif
+
+ // GCRM is 0 based, convert vertex indices from 0 to 1 based
+ for (std::size_t idx = 0; idx < vertices_on_local_cells.size(); idx++) {
+ vertices_on_local_cells[idx] += 1;
+ }
+
+ // Create local vertices
+ EntityHandle start_vertex;
+ ErrorCode rval = create_local_vertices(vertices_on_local_cells, start_vertex);
+ ERRORR(rval, "Failed to create local vertices for GCRM mesh.");
+
+ // Create local edges (unless NO_EDGES read option is set)
+ if (!noEdges) {
+ rval = create_local_edges(start_vertex);
+ ERRORR(rval, "Failed to create local edges for GCRM mesh.");
+ }
+
+ // Create local cells, either unpadded or padded
+ if (noMixedElements) {
+ rval = create_padded_local_cells(vertices_on_local_cells, start_vertex, faces);
+ ERRORR(rval, "Failed to create padded local cells for GCRM mesh.");
+ }
+ else {
+ rval = create_local_cells(vertices_on_local_cells, start_vertex, faces);
+ ERRORR(rval, "Failed to create local cells for GCRM mesh.");
+ }
+
+ // Set tag for numCellGroups
+ Tag numCellGroupsTag = 0;
+ rval = mbImpl->tag_get_handle("__NUM_CELL_GROUPS", 1, MB_TYPE_INTEGER, numCellGroupsTag, MB_TAG_SPARSE | MB_TAG_CREAT);
+ ERRORR(rval, "Failed to get __NUM_CELL_GROUPS tag.");
+ rval = mbImpl->tag_set_data(numCellGroupsTag, &_fileSet, 1, &numCellGroups);
+ ERRORR(rval, "Failed to set data for __NUM_CELL_GROUPS tag.");
+
+ if (createGatherSet) {
+ EntityHandle gather_set;
+ rval = _readNC->readMeshIface->create_gather_set(gather_set);
+ ERRORR(rval, "Failed to create gather set.");
+
+ // Create gather set vertices
+ EntityHandle start_gather_set_vertex;
+ rval = create_gather_set_vertices(gather_set, start_gather_set_vertex);
+ ERRORR(rval, "Failed to create gather set vertices for GCRM mesh");
+
+ // Create gather set edges (unless NO_EDGES read option is set)
+ if (!noEdges) {
+ rval = create_gather_set_edges(gather_set, start_gather_set_vertex);
+ ERRORR(rval, "Failed to create gather set edges for GCRM mesh.");
+ }
+
+ // Create gather set cells, either unpadded or padded
+ if (noMixedElements) {
+ rval = create_padded_gather_set_cells(gather_set, start_gather_set_vertex);
+ ERRORR(rval, "Failed to create padded gather set cells for GCRM mesh.");
+ }
+ else {
+ rval = create_gather_set_cells(gather_set, start_gather_set_vertex);
+ ERRORR(rval, "Failed to create gather set cells for GCRM mesh.");
+ }
+ }
+
+ return MB_SUCCESS;
+}
+
+ErrorCode NCHelperGCRM::read_ucd_variable_to_nonset_allocate(std::vector<ReadNC::VarData>& vdatas, std::vector<int>& tstep_nums)
+{
+ Interface*& mbImpl = _readNC->mbImpl;
+ std::vector<int>& dimLens = _readNC->dimLens;
+ bool& noEdges = _readNC->noEdges;
+ DebugOutput& dbgOut = _readNC->dbgOut;
+
+ ErrorCode rval = MB_SUCCESS;
+
+ Range* range = NULL;
+
+ // Get vertices in set
+ Range verts;
+ rval = mbImpl->get_entities_by_dimension(_fileSet, 0, verts);
+ ERRORR(rval, "Trouble getting vertices in set.");
+ assert("Should only have a single vertex subrange, since they were read in one shot" &&
+ verts.psize() == 1);
+
+ // Get edges in set
+ Range edges;
+ rval = mbImpl->get_entities_by_dimension(_fileSet, 1, edges);
+ ERRORR(rval, "Trouble getting edges in set.");
+
+ // Get faces in set
+ Range faces;
+ rval = mbImpl->get_entities_by_dimension(_fileSet, 2, faces);
+ ERRORR(rval, "Trouble getting faces in set.");
+ // Note, for GCRM faces.psize() can be more than 1
+
+#ifdef USE_MPI
+ bool& isParallel = _readNC->isParallel;
+ if (isParallel) {
+ ParallelComm*& myPcomm = _readNC->myPcomm;
+ rval = myPcomm->filter_pstatus(faces, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &facesOwned);
+ ERRORR(rval, "Trouble getting owned faces in set.");
+ }
+ else
+ facesOwned = faces; // not running in parallel, but still with MPI
+#else
+ facesOwned = faces;
+#endif
+
+ for (unsigned int i = 0; i < vdatas.size(); i++) {
+ // Skip edge variables, if specified by the read options
+ if (noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE)
+ continue;
+
+ // Support non-set variables with 3 dimensions like (Time, nCells, nVertLevels), or
+ // 2 dimensions like (Time, nCells)
+ assert(3 == vdatas[i].varDims.size() || 2 == vdatas[i].varDims.size());
+
+ // For a non-set variable, time should be the first dimension
+ assert(tDim == vdatas[i].varDims[0]);
+
+ // Set up readStarts and readCounts
+ vdatas[i].readStarts.resize(3);
+ vdatas[i].readCounts.resize(3);
+
+ // First: Time
+ vdatas[i].readStarts[0] = 0; // This value is timestep dependent, will be set later
+ vdatas[i].readCounts[0] = 1;
+
+ // Next: nVertices / nCells / nEdges
+ switch (vdatas[i].entLoc) {
+ case ReadNC::ENTLOCVERT:
+ // Vertices
+ // Start from the first localGidVerts
+ // Actually, this will be reset later on in a loop
+ vdatas[i].readStarts[1] = localGidVerts[0] - 1;
+ vdatas[i].readCounts[1] = nLocalVertices;
+ range = &verts;
+ break;
+ case ReadNC::ENTLOCFACE:
+ // Faces
+ // Start from the first localGidCells
+ // Actually, this will be reset later on in a loop
+ vdatas[i].readStarts[1] = localGidCells[0] - 1;
+ vdatas[i].readCounts[1] = nLocalCells;
+ range = &facesOwned;
+ break;
+ case ReadNC::ENTLOCEDGE:
+ // Edges
+ // Start from the first localGidEdges
+ // Actually, this will be reset later on in a loop
+ vdatas[i].readStarts[1] = localGidEdges[0] - 1;
+ vdatas[i].readCounts[1] = nLocalEdges;
+ range = &edges;
+ break;
+ default:
+ ERRORR(MB_FAILURE, "Unexpected entity location type for GCRM non-set variable.");
+ break;
+ }
+
+ // Finally: nVertLevels or other optional levels, it is possible that there
+ // is no level dimension for this non-set variable
+ vdatas[i].readStarts[2] = 0;
+ vdatas[i].readCounts[2] = vdatas[i].numLev;
+
+ // Get variable size
+ vdatas[i].sz = 1;
+ for (std::size_t idx = 0; idx != 3; idx++)
+ vdatas[i].sz *= vdatas[i].readCounts[idx];
+
+ for (unsigned int t = 0; t < tstep_nums.size(); t++) {
+ dbgOut.tprintf(2, "Reading variable %s, time step %d\n", vdatas[i].varName.c_str(), tstep_nums[t]);
+
+ if (tstep_nums[t] >= dimLens[tDim]) {
+ ERRORR(MB_INDEX_OUT_OF_RANGE, "Wrong value for a timestep number.");
+ }
+
+ // Get the tag to read into
+ if (!vdatas[i].varTags[t]) {
+ rval = get_tag_to_nonset(vdatas[i], tstep_nums[t], vdatas[i].varTags[t], vdatas[i].numLev);
+ ERRORR(rval, "Trouble getting tag.");
+ }
+
+ // Get ptr to tag space
+ if (vdatas[i].entLoc == ReadNC::ENTLOCFACE && numCellGroups > 1) {
+ // For a cell variable that is NOT on one contiguous chunk of faces, defer its tag space allocation
+ vdatas[i].varDatas[t] = NULL;
+ }
+ else {
+ assert(1 == range->psize());
+ void* data;
+ int count;
+ rval = mbImpl->tag_iterate(vdatas[i].varTags[t], range->begin(), range->end(), count, data);
+ ERRORR(rval, "Failed to iterate tag.");
+ assert((unsigned)count == range->size());
+ vdatas[i].varDatas[t] = data;
+ }
+ }
+ }
+
+ return rval;
+}
+
+#ifdef PNETCDF_FILE
+ErrorCode NCHelperGCRM::read_ucd_variable_to_nonset_async(std::vector<ReadNC::VarData>& vdatas, std::vector<int>& tstep_nums)
+{
+ Interface*& mbImpl = _readNC->mbImpl;
+ bool& noEdges = _readNC->noEdges;
+ DebugOutput& dbgOut = _readNC->dbgOut;
+
+ ErrorCode rval = read_ucd_variable_to_nonset_allocate(vdatas, tstep_nums);
+ ERRORR(rval, "Trouble allocating read variables.");
+
+ // Finally, read into that space
+ int success;
+ Range* pLocalGid = NULL;
+
+ for (unsigned int i = 0; i < vdatas.size(); i++) {
+ // Skip edge variables, if specified by the read options
+ if (noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE)
+ continue;
+
+ switch (vdatas[i].entLoc) {
+ case ReadNC::ENTLOCVERT:
+ pLocalGid = &localGidVerts;
+ break;
+ case ReadNC::ENTLOCFACE:
+ pLocalGid = &localGidCells;
+ break;
+ case ReadNC::ENTLOCEDGE:
+ pLocalGid = &localGidEdges;
+ break;
+ default:
+ ERRORR(MB_FAILURE, "Unrecognized entity location type.");
+ break;
+ }
+
+ std::size_t sz = vdatas[i].sz;
+
+ for (unsigned int t = 0; t < tstep_nums.size(); t++) {
+ // We will synchronize all these reads with the other processors,
+ // so the wait will be inside this double loop; is it too much?
+ size_t nb_reads = pLocalGid->psize();
+ std::vector<int> requests(nb_reads), statuss(nb_reads);
+ size_t idxReq = 0;
+
+ // Set readStart for each timestep along time dimension
+ vdatas[i].readStarts[0] = tstep_nums[t];
+
+ switch (vdatas[i].varDataType) {
+ case NC_BYTE:
+ case NC_CHAR: {
+ ERRORR(MB_FAILURE, "not implemented");
+ break;
+ }
+ case NC_DOUBLE: {
+ std::vector<double> tmpdoubledata(sz);
+
+ // In the case of ucd mesh, and on multiple proc,
+ // we need to read as many times as subranges we have in the
+ // localGid range;
+ // basically, we have to give a different point
+ // for data to start, for every subrange :(
+
+ size_t indexInDoubleArray = 0;
+ size_t ic = 0;
+ for (Range::pair_iterator pair_iter = pLocalGid->pair_begin();
+ pair_iter != pLocalGid->pair_end();
+ pair_iter++, ic++) {
+ EntityHandle starth = pair_iter->first;
+ EntityHandle endh = pair_iter->second; // inclusive
+ vdatas[i].readStarts[1] = (NCDF_SIZE) (starth - 1);
+ vdatas[i].readCounts[1] = (NCDF_SIZE) (endh - starth + 1);
+
+ // Do a partial read, in each subrange
+ // wait outside this loop
+ success = NCFUNCREQG(_vara_double)(_fileId, vdatas[i].varId,
+ &(vdatas[i].readStarts[0]), &(vdatas[i].readCounts[0]),
+ &(tmpdoubledata[indexInDoubleArray]), &requests[idxReq++]);
+ ERRORS(success, "Failed to read double data in loop");
+ // We need to increment the index in double array for the
+ // next subrange
+ indexInDoubleArray += (endh - starth + 1) * 1 * vdatas[i].numLev;
+ }
+ assert(ic == pLocalGid->psize());
+
+ success = NCFUNC(wait_all)(_fileId, requests.size(), &requests[0], &statuss[0]);
+ ERRORS(success, "Failed on wait_all.");
+
+ if (vdatas[i].entLoc == ReadNC::ENTLOCFACE && numCellGroups > 1) {
+ // For a cell variable that is NOT on one contiguous chunk of faces, allocate tag space for
+ // each cell group, and utilize cellHandleToGlobalID map to read tag data
+ Range::iterator iter = facesOwned.begin();
+ while (iter != facesOwned.end()) {
+ int count;
+ void* ptr;
+ rval = mbImpl->tag_iterate(vdatas[i].varTags[t], iter, facesOwned.end(), count, ptr);
+ ERRORR(rval, "Failed to iterate tag on owned faces.");
+
+ for (int j = 0; j < count; j++) {
+ int global_cell_idx = cellHandleToGlobalID[*(iter + j)]; // Global cell index, 1 based
+ int local_cell_idx = localGidCells.index(global_cell_idx); // Local cell index, 0 based
+ assert(local_cell_idx != -1);
+ for (int level = 0; level < vdatas[i].numLev; level++)
+ ((double*) ptr)[j * vdatas[i].numLev + level] = tmpdoubledata[local_cell_idx * vdatas[i].numLev + level];
+ }
+
+ iter += count;
+ }
+ }
+ else {
+ void* data = vdatas[i].varDatas[t];
+ for (std::size_t idx = 0; idx != tmpdoubledata.size(); idx++)
+ ((double*) data)[idx] = tmpdoubledata[idx];
+ }
+
+ break;
+ }
+ case NC_FLOAT: {
+ ERRORR(MB_FAILURE, "not implemented");
+ break;
+ }
+ case NC_INT: {
+ ERRORR(MB_FAILURE, "not implemented");
+ break;
+ }
+ case NC_SHORT: {
+ ERRORR(MB_FAILURE, "not implemented");
+ break;
+ }
+ default:
+ success = 1;
+ }
+
+ if (success)
+ ERRORR(MB_FAILURE, "Trouble reading variable.");
+ }
+ }
+
+ for (unsigned int i = 0; i < vdatas.size(); i++) {
+ if (noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE)
+ continue;
+
+ for (unsigned int t = 0; t < tstep_nums.size(); t++) {
+ dbgOut.tprintf(2, "Converting variable %s, time step %d\n", vdatas[i].varName.c_str(), tstep_nums[t]);
+ ErrorCode tmp_rval = convert_variable(vdatas[i], t);
+ if (MB_SUCCESS != tmp_rval)
+ rval = tmp_rval;
+ }
+ }
+ // Debug output, if requested
+ if (1 == dbgOut.get_verbosity()) {
+ dbgOut.printf(1, "Read variables: %s", vdatas.begin()->varName.c_str());
+ for (unsigned int i = 1; i < vdatas.size(); i++)
+ dbgOut.printf(1, ", %s ", vdatas[i].varName.c_str());
+ dbgOut.tprintf(1, "\n");
+ }
+
+ return rval;
+}
+#else
+ErrorCode NCHelperGCRM::read_ucd_variable_to_nonset(std::vector<ReadNC::VarData>& vdatas, std::vector<int>& tstep_nums)
+{
+ Interface*& mbImpl = _readNC->mbImpl;
+ bool& noEdges = _readNC->noEdges;
+ DebugOutput& dbgOut = _readNC->dbgOut;
+
+ ErrorCode rval = read_ucd_variable_to_nonset_allocate(vdatas, tstep_nums);
+ ERRORR(rval, "Trouble allocating read variables.");
+
+ // Finally, read into that space
+ int success;
+ Range* pLocalGid = NULL;
+
+ for (unsigned int i = 0; i < vdatas.size(); i++) {
+ // Skip edge variables, if specified by the read options
+ if (noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE)
+ continue;
+
+ switch (vdatas[i].entLoc) {
+ case ReadNC::ENTLOCVERT:
+ pLocalGid = &localGidVerts;
+ break;
+ case ReadNC::ENTLOCFACE:
+ pLocalGid = &localGidCells;
+ break;
+ case ReadNC::ENTLOCEDGE:
+ pLocalGid = &localGidEdges;
+ break;
+ default:
+ ERRORR(MB_FAILURE, "Unrecognized entity location type.");
+ break;
+ }
+
+ std::size_t sz = vdatas[i].sz;
+
+ for (unsigned int t = 0; t < tstep_nums.size(); t++) {
+ // Set readStart for each timestep along time dimension
+ vdatas[i].readStarts[0] = tstep_nums[t];
+
+ switch (vdatas[i].varDataType) {
+ case NC_BYTE:
+ case NC_CHAR: {
+ ERRORR(MB_FAILURE, "not implemented");
+ break;
+ }
+ case NC_DOUBLE: {
+ std::vector<double> tmpdoubledata(sz);
+
+ // In the case of ucd mesh, and on multiple proc,
+ // we need to read as many times as subranges we have in the
+ // localGid range;
+ // basically, we have to give a different point
+ // for data to start, for every subrange :(
+ size_t indexInDoubleArray = 0;
+ size_t ic = 0;
+ for (Range::pair_iterator pair_iter = pLocalGid->pair_begin();
+ pair_iter != pLocalGid->pair_end();
+ pair_iter++, ic++) {
+ EntityHandle starth = pair_iter->first;
+ EntityHandle endh = pair_iter->second; // Inclusive
+ vdatas[i].readStarts[1] = (NCDF_SIZE) (starth - 1);
+ vdatas[i].readCounts[1] = (NCDF_SIZE) (endh - starth + 1);
+
+ success = NCFUNCAG(_vara_double)(_fileId, vdatas[i].varId,
+ &(vdatas[i].readStarts[0]), &(vdatas[i].readCounts[0]),
+ &(tmpdoubledata[indexInDoubleArray]));
+ ERRORS(success, "Failed to read double data in loop");
+ // We need to increment the index in double array for the
+ // next subrange
+ indexInDoubleArray += (endh - starth + 1) * 1 * vdatas[i].numLev;
+ }
+ assert(ic == pLocalGid->psize());
+
+ if (vdatas[i].entLoc == ReadNC::ENTLOCFACE && numCellGroups > 1) {
+ // For a cell variable that is NOT on one contiguous chunk of faces, allocate tag space for
+ // each cell group, and utilize cellHandleToGlobalID map to read tag data
+ Range::iterator iter = facesOwned.begin();
+ while (iter != facesOwned.end()) {
+ int count;
+ void* ptr;
+ rval = mbImpl->tag_iterate(vdatas[i].varTags[t], iter, facesOwned.end(), count, ptr);
+ ERRORR(rval, "Failed to iterate tag on owned faces.");
+
+ for (int j = 0; j < count; j++) {
+ int global_cell_idx = cellHandleToGlobalID[*(iter + j)]; // Global cell index, 1 based
+ int local_cell_idx = localGidCells.index(global_cell_idx); // Local cell index, 0 based
+ assert(local_cell_idx != -1);
+ for (int level = 0; level < vdatas[i].numLev; level++)
+ ((double*) ptr)[j * vdatas[i].numLev + level] = tmpdoubledata[local_cell_idx * vdatas[i].numLev + level];
+ }
+
+ iter += count;
+ }
+ }
+ else {
+ void* data = vdatas[i].varDatas[t];
+ for (std::size_t idx = 0; idx != tmpdoubledata.size(); idx++)
+ ((double*) data)[idx] = tmpdoubledata[idx];
+ }
+
+ break;
+ }
+ case NC_FLOAT: {
+ ERRORR(MB_FAILURE, "not implemented");
+ break;
+ }
+ case NC_INT: {
+ ERRORR(MB_FAILURE, "not implemented");
+ break;
+ }
+ case NC_SHORT: {
+ ERRORR(MB_FAILURE, "not implemented");
+ break;
+ }
+ default:
+ success = 1;
+ }
+
+ if (success)
+ ERRORR(MB_FAILURE, "Trouble reading variable.");
+ }
+ }
+
+ for (unsigned int i = 0; i < vdatas.size(); i++) {
+ if (noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE)
+ continue;
+
+ for (unsigned int t = 0; t < tstep_nums.size(); t++) {
+ dbgOut.tprintf(2, "Converting variable %s, time step %d\n", vdatas[i].varName.c_str(), tstep_nums[t]);
+ ErrorCode tmp_rval = convert_variable(vdatas[i], t);
+ if (MB_SUCCESS != tmp_rval)
+ rval = tmp_rval;
+ }
+ }
+
+ // Debug output, if requested
+ if (1 == dbgOut.get_verbosity()) {
+ dbgOut.printf(1, "Read variables: %s", vdatas.begin()->varName.c_str());
+ for (unsigned int i = 1; i < vdatas.size(); i++)
+ dbgOut.printf(1, ", %s ", vdatas[i].varName.c_str());
+ dbgOut.tprintf(1, "\n");
+ }
+
+ return rval;
+}
+#endif
+
+ErrorCode NCHelperGCRM::redistribute_local_cells(int start_cell_idx)
+{
+ // If possible, apply Zoltan partition
+ if (_readNC->partMethod == ScdParData::RCBZOLTAN) {
+#if defined(USE_MPI) && defined(HAVE_ZOLTAN)
+ // Read x coordinates of cell centers
+ int xCellVarId;
+ int success = NCFUNC(inq_varid)(_fileId, "xCell", &xCellVarId);
+ ERRORS(success, "Failed to get variable id of xCell.");
+ std::vector<double> xCell(nLocalCells);
+ NCDF_SIZE read_start = static_cast<NCDF_SIZE>(start_cell_idx - 1);
+ NCDF_SIZE read_count = static_cast<NCDF_SIZE>(nLocalCells);
+ success = NCFUNCAG(_vara_double)(_fileId, xCellVarId, &read_start, &read_count, &xCell[0]);
+ ERRORS(success, "Failed to read xCell data.");
+
+ // Read y coordinates of cell centers
+ int yCellVarId;
+ success = NCFUNC(inq_varid)(_fileId, "yCell", &yCellVarId);
+ ERRORS(success, "Failed to get variable id of yCell.");
+ std::vector<double> yCell(nLocalCells);
+ success = NCFUNCAG(_vara_double)(_fileId, yCellVarId, &read_start, &read_count, &yCell[0]);
+ ERRORS(success, "Failed to read yCell data.");
+
+ // Read z coordinates of cell centers
+ int zCellVarId;
+ success = NCFUNC(inq_varid)(_fileId, "zCell", &zCellVarId);
+ ERRORS(success, "Failed to get variable id of zCell.");
+ std::vector<double> zCell(nLocalCells);
+ success = NCFUNCAG(_vara_double)(_fileId, zCellVarId, &read_start, &read_count, &zCell[0]);
+ ERRORS(success, "Failed to read zCell data.");
+
+ // Zoltan partition using RCB; maybe more studies would be good, as to which partition
+ // is better
+ Interface*& mbImpl = _readNC->mbImpl;
+ DebugOutput& dbgOut = _readNC->dbgOut;
+ MBZoltan* mbZTool = new MBZoltan(mbImpl, false, 0, NULL);
+ ErrorCode rval = mbZTool->repartition(xCell, yCell, zCell, start_cell_idx, "RCB", localGidCells);
+ delete mbZTool;
+ ERRORR(rval, "Error in Zoltan partitioning.");
+
+ dbgOut.tprintf(1, "After Zoltan partitioning, localGidCells.psize() = %d\n", (int)localGidCells.psize());
+ dbgOut.tprintf(1, " localGidCells.size() = %d\n", (int)localGidCells.size());
+
+ // This is important: local cells are now redistributed, so nLocalCells might be different!
+ nLocalCells = localGidCells.size();
+
+ return MB_SUCCESS;
+#endif
+ }
+
+ // By default, apply trivial partition
+ localGidCells.insert(start_cell_idx, start_cell_idx + nLocalCells - 1);
+
+ return MB_SUCCESS;
+}
+
+ErrorCode NCHelperGCRM::create_local_vertices(const std::vector<int>& vertices_on_local_cells, EntityHandle& start_vertex)
+{
+ Interface*& mbImpl = _readNC->mbImpl;
+ Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
+ const Tag*& mpFileIdTag = _readNC->mpFileIdTag;
+ DebugOutput& dbgOut = _readNC->dbgOut;
+ std::map<std::string, ReadNC::VarData>& varInfo = _readNC->varInfo;
+
+ // Make a copy of vertices_on_local_cells for sorting (keep original one to set cell connectivity later)
+ std::vector<int> vertices_on_local_cells_sorted(vertices_on_local_cells);
+ std::sort(vertices_on_local_cells_sorted.begin(), vertices_on_local_cells_sorted.end());
+ std::copy(vertices_on_local_cells_sorted.rbegin(), vertices_on_local_cells_sorted.rend(), range_inserter(localGidVerts));
+ nLocalVertices = localGidVerts.size();
+
+ dbgOut.tprintf(1, " localGidVerts.psize() = %d\n", (int)localGidVerts.psize());
+ dbgOut.tprintf(1, " localGidVerts.size() = %d\n", (int)localGidVerts.size());
+
+ // Create local vertices
+ std::vector<double*> arrays;
+ ErrorCode rval = _readNC->readMeshIface->get_node_coords(3, nLocalVertices, 0, start_vertex, arrays,
+ // Might have to create gather mesh later
+ (createGatherSet ? nLocalVertices + nVertices : nLocalVertices));
+ ERRORR(rval, "Failed to create local vertices.");
+
+ // Add local vertices to the file set
+ Range local_verts_range(start_vertex, start_vertex + nLocalVertices - 1);
+ rval = _readNC->mbImpl->add_entities(_fileSet, local_verts_range);
+ ERRORR(rval, "Failed to add local vertices to the file set.");
+
+ // Get ptr to GID memory for local vertices
+ int count = 0;
+ void* data = NULL;
+ rval = mbImpl->tag_iterate(mGlobalIdTag, local_verts_range.begin(), local_verts_range.end(), count, data);
+ ERRORR(rval, "Failed to iterate global id tag on local vertices.");
+ assert(count == nLocalVertices);
+ int* gid_data = (int*) data;
+ std::copy(localGidVerts.begin(), localGidVerts.end(), gid_data);
+
+ // Duplicate GID data, which will be used to resolve sharing
+ if (mpFileIdTag) {
+ rval = mbImpl->tag_iterate(*mpFileIdTag, local_verts_range.begin(), local_verts_range.end(), count, data);
+ ERRORR(rval, "Failed to iterate file id tag on local vertices.");
+ assert(count == nLocalVertices);
+ gid_data = (int*) data;
+ std::copy(localGidVerts.begin(), localGidVerts.end(), gid_data);
+ }
+
+#ifdef PNETCDF_FILE
+ size_t nb_reads = localGidVerts.psize();
+ std::vector<int> requests(nb_reads);
+ std::vector<int> statuss(nb_reads);
+ size_t idxReq = 0;
+#endif
+
+ // Store lev values in levVals
+ std::map<std::string, ReadNC::VarData>::iterator vmit;
+ if ((vmit = varInfo.find("layers")) != varInfo.end() && (*vmit).second.varDims.size() == 1) {
+ rval = read_coordinate("layers", 0, nLevels - 1, levVals);
+ ERRORR(rval, "Trouble reading 'layers' variable.");
+ }
+ else if ((vmit = varInfo.find("interfaces")) != varInfo.end() && (*vmit).second.varDims.size() == 1) {
+ rval = read_coordinate("interfaces", 0, nLevels - 1, levVals);
+ ERRORR(rval, "Trouble reading 'interfaces' variable.");
+ }
+ else {
+ ERRORR(MB_FAILURE, "Couldn't find 'layers' or 'interfaces' variable.");
+ }
+
+ {
+ // Decide whether down is positive
+ char posval[10] = {0};
+ int success = NCFUNC(get_att_text)(_fileId, (*vmit).second.varId, "positive", posval);
+ if (0 == success && !strncmp(posval, "down", 4)) {
+ for (std::vector<double>::iterator dvit = levVals.begin(); dvit != levVals.end(); ++dvit)
+ (*dvit) *= -1.0;
+ }
+ }
+
+ // Read x coordinates for local vertices
+ double* xptr = arrays[0];
+ int xVertexVarId;
+ int success = NCFUNC(inq_varid)(_fileId, "grid_corner_lon", &xVertexVarId);
+ ERRORS(success, "Failed to get variable id of grid_corner_lon.");
+ size_t indexInArray = 0;
+ for (Range::pair_iterator pair_iter = localGidVerts.pair_begin();
+ pair_iter != localGidVerts.pair_end();
+ pair_iter++) {
+ EntityHandle starth = pair_iter->first;
+ EntityHandle endh = pair_iter->second;
+ NCDF_SIZE read_start = (NCDF_SIZE) (starth - 1);
+ NCDF_SIZE read_count = (NCDF_SIZE) (endh - starth + 1);
+
+ // Do a partial read in each subrange
+#ifdef PNETCDF_FILE
+ success = NCFUNCREQG(_vara_double)(_fileId, xVertexVarId, &read_start, &read_count,
+ &(xptr[indexInArray]), &requests[idxReq++]);
+#else
+ success = NCFUNCAG(_vara_double)(_fileId, xVertexVarId, &read_start, &read_count,
+ &(xptr[indexInArray]));
+#endif
+ ERRORS(success, "Failed to read grid_corner_lon data in a loop");
+
+ // Increment the index for next subrange
+ indexInArray += (endh - starth + 1);
+ }
+
+#ifdef PNETCDF_FILE
+ // Wait outside the loop
+ success = NCFUNC(wait_all)(_fileId, requests.size(), &requests[0], &statuss[0]);
+ ERRORS(success, "Failed on wait_all.");
+#endif
+
+ // Read y coordinates for local vertices
+ double* yptr = arrays[1];
+ int yVertexVarId;
+ success = NCFUNC(inq_varid)(_fileId, "grid_corner_lat", &yVertexVarId);
+ ERRORS(success, "Failed to get variable id of grid_corner_lat.");
+#ifdef PNETCDF_FILE
+ idxReq = 0;
+#endif
+ indexInArray = 0;
+ for (Range::pair_iterator pair_iter = localGidVerts.pair_begin();
+ pair_iter != localGidVerts.pair_end();
+ pair_iter++) {
+ EntityHandle starth = pair_iter->first;
+ EntityHandle endh = pair_iter->second;
+ NCDF_SIZE read_start = (NCDF_SIZE) (starth - 1);
+ NCDF_SIZE read_count = (NCDF_SIZE) (endh - starth + 1);
+
+ // Do a partial read in each subrange
+#ifdef PNETCDF_FILE
+ success = NCFUNCREQG(_vara_double)(_fileId, yVertexVarId, &read_start, &read_count,
+ &(yptr[indexInArray]), &requests[idxReq++]);
+#else
+ success = NCFUNCAG(_vara_double)(_fileId, yVertexVarId, &read_start, &read_count,
+ &(yptr[indexInArray]));
+#endif
+ ERRORS(success, "Failed to read grid_corner_lat data in a loop");
+
+ // Increment the index for next subrange
+ indexInArray += (endh - starth + 1);
+ }
+
+#ifdef PNETCDF_FILE
+ // Wait outside the loop
+ success = NCFUNC(wait_all)(_fileId, requests.size(), &requests[0], &statuss[0]);
+ ERRORS(success, "Failed on wait_all.");
+#endif
+
+ // Convert lon/lat/rad to x/y/z
+ double* zptr = arrays[2];
+ const double pideg = acos(-1.0) / 180.0;
+ double rad = 8000.0 + levVals[0];
+ for (int i = 0; i < nLocalVertices; i++) {
+ double cosphi = cos(pideg * yptr[i]);
+ double zmult = sin(pideg * yptr[i]);
+ double xmult = cosphi * cos(xptr[i] * pideg);
+ double ymult = cosphi * sin(xptr[i] * pideg);
+ xptr[i] = rad * xmult;
+ yptr[i] = rad * ymult;
+ zptr[i] = rad * zmult;
+ }
+
+ return MB_SUCCESS;
+}
+
+ErrorCode NCHelperGCRM::create_local_edges(EntityHandle start_vertex)
+{
+ Interface*& mbImpl = _readNC->mbImpl;
+ Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
+ DebugOutput& dbgOut = _readNC->dbgOut;
+
+ // Read edges on each local cell, to get localGidEdges
+ int edgesOnCellVarId;
+ int success = NCFUNC(inq_varid)(_fileId, "cell_edges", &edgesOnCellVarId);
+ ERRORS(success, "Failed to get variable id of cell_edges.");
+
+ std::vector<int> edges_on_local_cells(nLocalCells * maxEdgesPerCell);
+ dbgOut.tprintf(1, " edges_on_local_cells.size() = %d\n", (int)edges_on_local_cells.size());
+
+#ifdef PNETCDF_FILE
+ size_t nb_reads = localGidCells.psize();
+ std::vector<int> requests(nb_reads);
+ std::vector<int> statuss(nb_reads);
+ size_t idxReq = 0;
+#endif
+ size_t indexInArray = 0;
+ for (Range::pair_iterator pair_iter = localGidCells.pair_begin();
+ pair_iter != localGidCells.pair_end();
+ pair_iter++) {
+ EntityHandle starth = pair_iter->first;
+ EntityHandle endh = pair_iter->second;
+ dbgOut.tprintf(1, " starth = %d\n", (int)starth);
+ dbgOut.tprintf(1, " endh = %d\n", (int)endh);
+ NCDF_SIZE read_starts[2] = {static_cast<NCDF_SIZE>(starth - 1), 0};
+ NCDF_SIZE read_counts[2] = {static_cast<NCDF_SIZE>(endh - starth + 1), static_cast<NCDF_SIZE>(maxEdgesPerCell)};
+
+ // Do a partial read in each subrange
+#ifdef PNETCDF_FILE
+ success = NCFUNCREQG(_vara_int)(_fileId, edgesOnCellVarId, read_starts, read_counts,
+ &(edges_on_local_cells[indexInArray]), &requests[idxReq++]);
+#else
+ success = NCFUNCAG(_vara_int)(_fileId, edgesOnCellVarId, read_starts, read_counts,
+ &(edges_on_local_cells[indexInArray]));
+#endif
+ ERRORS(success, "Failed to read cell_edges data in a loop");
+
+ // Increment the index for next subrange
+ indexInArray += (endh - starth + 1) * maxEdgesPerCell;
+ }
+
+#ifdef PNETCDF_FILE
+ // Wait outside the loop
+ success = NCFUNC(wait_all)(_fileId, requests.size(), &requests[0], &statuss[0]);
+ ERRORS(success, "Failed on wait_all.");
+#endif
+
+ // GCRM is 0 based, convert edge indices from 0 to 1 based
+ for (std::size_t idx = 0; idx < edges_on_local_cells.size(); idx++) {
+ edges_on_local_cells[idx] += 1;
+ }
+
+ // Collect local edges
+ std::sort(edges_on_local_cells.begin(), edges_on_local_cells.end());
+ std::copy(edges_on_local_cells.rbegin(), edges_on_local_cells.rend(), range_inserter(localGidEdges));
+ nLocalEdges = localGidEdges.size();
+
+ dbgOut.tprintf(1, " localGidEdges.psize() = %d\n", (int)localGidEdges.psize());
+ dbgOut.tprintf(1, " localGidEdges.size() = %d\n", (int)localGidEdges.size());
+
+ // Create local edges
+ EntityHandle start_edge;
+ EntityHandle* conn_arr_edges = NULL;
+ ErrorCode rval = _readNC->readMeshIface->get_element_connect(nLocalEdges, 2, MBEDGE, 0, start_edge, conn_arr_edges,
+ // Might have to create gather mesh later
+ (createGatherSet ? nLocalEdges + nEdges : nLocalEdges));
+ ERRORR(rval, "Failed to create edges.");
+
+ // Add local edges to the file set
+ Range local_edges_range(start_edge, start_edge + nLocalEdges - 1);
+ rval = _readNC->mbImpl->add_entities(_fileSet, local_edges_range);
+ ERRORR(rval, "Failed to add local edges to the file set.");
+
+ // Get ptr to GID memory for edges
+ int count = 0;
+ void* data = NULL;
+ rval = mbImpl->tag_iterate(mGlobalIdTag, local_edges_range.begin(), local_edges_range.end(), count, data);
+ ERRORR(rval, "Failed to iterate global id tag on local edges.");
+ assert(count == nLocalEdges);
+ int* gid_data = (int*) data;
+ std::copy(localGidEdges.begin(), localGidEdges.end(), gid_data);
+
+ int verticesOnEdgeVarId;
+
+ // Read vertices on each local edge, to get edge connectivity
+ success = NCFUNC(inq_varid)(_fileId, "edge_corners", &verticesOnEdgeVarId);
+ ERRORS(success, "Failed to get variable id of edge_corners.");
+ // Utilize the memory storage pointed by conn_arr_edges
+ int* vertices_on_local_edges = (int*) conn_arr_edges;
+#ifdef PNETCDF_FILE
+ nb_reads = localGidEdges.psize();
+ requests.resize(nb_reads);
+ statuss.resize(nb_reads);
+ idxReq = 0;
+#endif
+ indexInArray = 0;
+ for (Range::pair_iterator pair_iter = localGidEdges.pair_begin();
+ pair_iter != localGidEdges.pair_end();
+ pair_iter++) {
+ EntityHandle starth = pair_iter->first;
+ EntityHandle endh = pair_iter->second;
+ NCDF_SIZE read_starts[2] = {static_cast<NCDF_SIZE>(starth - 1), 0};
+ NCDF_SIZE read_counts[2] = {static_cast<NCDF_SIZE>(endh - starth + 1), 2};
+
+ // Do a partial read in each subrange
+#ifdef PNETCDF_FILE
+ success = NCFUNCREQG(_vara_int)(_fileId, verticesOnEdgeVarId, read_starts, read_counts,
+ &(vertices_on_local_edges[indexInArray]), &requests[idxReq++]);
+#else
+ success = NCFUNCAG(_vara_int)(_fileId, verticesOnEdgeVarId, read_starts, read_counts,
+ &(vertices_on_local_edges[indexInArray]));
+#endif
+ ERRORS(success, "Failed to read edge_corners data in a loop");
+
+ // Increment the index for next subrange
+ indexInArray += (endh - starth + 1) * 2;
+ }
+
+#ifdef PNETCDF_FILE
+ // Wait outside the loop
+ success = NCFUNC(wait_all)(_fileId, requests.size(), &requests[0], &statuss[0]);
+ ERRORS(success, "Failed on wait_all.");
+#endif
+
+ // GCRM is 0 based, convert edge indices from 0 to 1 based
+ for (int idx = 0; idx < nLocalEdges; idx++) {
+ vertices_on_local_edges[idx] += 1;
+ }
+
+ // Populate connectivity data for local edges
+ // Convert in-place from int (stored in the first half) to EntityHandle
+ // Reading backward is the trick
+ for (int edge_vert = nLocalEdges * 2 - 1; edge_vert >= 0; edge_vert--) {
+ int global_vert_idx = vertices_on_local_edges[edge_vert]; // Global vertex index, 1 based
+ int local_vert_idx = localGidVerts.index(global_vert_idx); // Local vertex index, 0 based
+ assert(local_vert_idx != -1);
+ conn_arr_edges[edge_vert] = start_vertex + local_vert_idx;
+ }
+
+ return MB_SUCCESS;
+}
+
+ErrorCode NCHelperGCRM::create_local_cells(const std::vector<int>& vertices_on_local_cells,
+ EntityHandle start_vertex, Range& faces)
+{
+ Interface*& mbImpl = _readNC->mbImpl;
+ Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
+
+ // Divide local cells into groups based on the number of edges
+ Range local_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
+ // Insert larger values before smaller ones to increase efficiency
+ for (int i = nLocalCells - 1; i >= 0; i--) {
+ int num_edges = DEFAULT_MAX_EDGES_PER_CELL;
+ local_cells_with_n_edges[num_edges].insert(localGidCells[i]); // Global cell index
+ }
+
+ std::vector<int> num_edges_on_cell_groups;
+ for (int i = 3; i <= maxEdgesPerCell; i++) {
+ if (local_cells_with_n_edges[i].size() > 0)
+ num_edges_on_cell_groups.push_back(i);
+ }
+ numCellGroups = num_edges_on_cell_groups.size();
+
+ EntityHandle* conn_arr_local_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
+ for (int i = 0; i < numCellGroups; i++) {
+ int num_edges_per_cell = num_edges_on_cell_groups[i];
+ int num_group_cells = (int)local_cells_with_n_edges[num_edges_per_cell].size();
+
+ // Create local cells for each non-empty cell group
+ EntityHandle start_element;
+ ErrorCode rval = _readNC->readMeshIface->get_element_connect(num_group_cells, num_edges_per_cell, MBPOLYGON, 0, start_element,
+ conn_arr_local_cells_with_n_edges[num_edges_per_cell], num_group_cells);
+ ERRORR(rval, "Failed to create cells");
+ faces.insert(start_element, start_element + num_group_cells - 1);
+
+ // Add local cells to the file set
+ Range local_cells_range(start_element, start_element + num_group_cells - 1);
+ rval = _readNC->mbImpl->add_entities(_fileSet, local_cells_range);
+ ERRORR(rval, "Failed to add local cells to the file set.");
+
+ // Get ptr to gid memory for local cells
+ int count = 0;
+ void* data = NULL;
+ rval = mbImpl->tag_iterate(mGlobalIdTag, local_cells_range.begin(), local_cells_range.end(), count, data);
+ ERRORR(rval, "Failed to iterate global id tag on local cells.");
+ assert(count == num_group_cells);
+ int* gid_data = (int*) data;
+ std::copy(local_cells_with_n_edges[num_edges_per_cell].begin(), local_cells_with_n_edges[num_edges_per_cell].end(), gid_data);
+
+ // Set connectivity array with proper local vertices handles
+ for (int j = 0; j < num_group_cells; j++) {
+ EntityHandle global_cell_idx = local_cells_with_n_edges[num_edges_per_cell][j]; // Global cell index, 1 based
+ int local_cell_idx = localGidCells.index(global_cell_idx); // Local cell index, 0 based
+ assert(local_cell_idx != -1);
+
+ if (numCellGroups > 1) {
+ // Populate cellHandleToGlobalID map to read cell variables
+ cellHandleToGlobalID[start_element + j] = global_cell_idx;
+ }
+
+ for (int k = 0; k < num_edges_per_cell; k++) {
+ EntityHandle global_vert_idx = vertices_on_local_cells[local_cell_idx * maxEdgesPerCell + k]; // Global vertex index, 1 based
+ int local_vert_idx = localGidVerts.index(global_vert_idx); // Local vertex index, 0 based
+ assert(local_vert_idx != -1);
+ conn_arr_local_cells_with_n_edges[num_edges_per_cell][j * num_edges_per_cell + k] =
+ start_vertex + local_vert_idx;
+ }
+ }
+ }
+
+ return MB_SUCCESS;
+}
+
+ErrorCode NCHelperGCRM::create_padded_local_cells(const std::vector<int>& vertices_on_local_cells,
+ EntityHandle start_vertex, Range& faces)
+{
+ Interface*& mbImpl = _readNC->mbImpl;
+ Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
+
+ // Only one group of cells (each cell is represented by a polygon with maxEdgesPerCell edges)
+ numCellGroups = 1;
+
+ // Create cells for this cell group
+ EntityHandle start_element;
+ EntityHandle* conn_arr_local_cells = NULL;
+ ErrorCode rval = _readNC->readMeshIface->get_element_connect(nLocalCells, maxEdgesPerCell, MBPOLYGON, 0, start_element, conn_arr_local_cells,
+ // Might have to create gather mesh later
+ (createGatherSet ? nLocalCells + nCells : nLocalCells));
+ ERRORR(rval, "Failed to create cells.");
+ faces.insert(start_element, start_element + nLocalCells - 1);
+
+ // Add local cells to the file set
+ Range local_cells_range(start_element, start_element + nLocalCells - 1);
+ rval = _readNC->mbImpl->add_entities(_fileSet, local_cells_range);
+ ERRORR(rval, "Failed to add local cells to the file set.");
+
+ // Get ptr to GID memory for local cells
+ int count = 0;
+ void* data = NULL;
+ rval = mbImpl->tag_iterate(mGlobalIdTag, local_cells_range.begin(), local_cells_range.end(), count, data);
+ ERRORR(rval, "Failed to iterate global id tag on local cells.");
+ assert(count == nLocalCells);
+ int* gid_data = (int*) data;
+ std::copy(localGidCells.begin(), localGidCells.end(), gid_data);
+
+ // Set connectivity array with proper local vertices handles
+ // vertices_on_local_cells array was already corrected to have the last vertices padded
+ // no need for extra checks considering
+ for (int local_cell_idx = 0; local_cell_idx < nLocalCells; local_cell_idx++) {
+ for (int i = 0; i < maxEdgesPerCell; i++) {
+ EntityHandle global_vert_idx = vertices_on_local_cells[local_cell_idx * maxEdgesPerCell + i]; // Global vertex index, 1 based
+ int local_vert_idx = localGidVerts.index(global_vert_idx); // Local vertex index, 0 based
+ assert(local_vert_idx != -1);
+ conn_arr_local_cells[local_cell_idx * maxEdgesPerCell + i] = start_vertex + local_vert_idx;
+ }
+ }
+
+ return MB_SUCCESS;
+}
+
+ErrorCode NCHelperGCRM::create_gather_set_vertices(EntityHandle gather_set, EntityHandle& gather_set_start_vertex)
+{
+ Interface*& mbImpl = _readNC->mbImpl;
+ Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
+ const Tag*& mpFileIdTag = _readNC->mpFileIdTag;
+
+ // Create gather set vertices
+ std::vector<double*> arrays;
+ // Don't need to specify allocation number here, because we know enough vertices were created before
+ ErrorCode rval = _readNC->readMeshIface->get_node_coords(3, nVertices, 0, gather_set_start_vertex, arrays);
+ ERRORR(rval, "Failed to create vertices.");
+
+ // Add vertices to the gather set
+ Range gather_set_verts_range(gather_set_start_vertex, gather_set_start_vertex + nVertices - 1);
+ rval = mbImpl->add_entities(gather_set, gather_set_verts_range);
+ ERRORR(rval, "Failed to add vertices to the gather set.");
+
+ // Read x coordinates for gather set vertices
+ double* xptr = arrays[0];
+ int xVertexVarId;
+ int success = NCFUNC(inq_varid)(_fileId, "xVertex", &xVertexVarId);
+ ERRORS(success, "Failed to get variable id of xVertex.");
+ NCDF_SIZE read_start = 0;
+ NCDF_SIZE read_count = static_cast<NCDF_SIZE>(nVertices);
+#ifdef PNETCDF_FILE
+ // Enter independent I/O mode, since this read is only for the gather processor
+ success = NCFUNC(begin_indep_data)(_fileId);
+ ERRORS(success, "Failed to begin independent I/O mode.");
+ success = NCFUNCG(_vara_double)(_fileId, xVertexVarId, &read_start, &read_count, xptr);
+ ERRORS(success, "Failed to read xVertex data.");
+ success = NCFUNC(end_indep_data)(_fileId);
+ ERRORS(success, "Failed to end independent I/O mode.");
+#else
+ success = NCFUNCG(_vara_double)(_fileId, xVertexVarId, &read_start, &read_count, xptr);
+ ERRORS(success, "Failed to read xVertex data.");
+#endif
+
+ // Read y coordinates for gather set vertices
+ double* yptr = arrays[1];
+ int yVertexVarId;
+ success = NCFUNC(inq_varid)(_fileId, "yVertex", &yVertexVarId);
+ ERRORS(success, "Failed to get variable id of yVertex.");
+#ifdef PNETCDF_FILE
+ // Enter independent I/O mode, since this read is only for the gather processor
+ success = NCFUNC(begin_indep_data)(_fileId);
+ ERRORS(success, "Failed to begin independent I/O mode.");
+ success = NCFUNCG(_vara_double)(_fileId, yVertexVarId, &read_start, &read_count, yptr);
+ ERRORS(success, "Failed to read yVertex data.");
+ success = NCFUNC(end_indep_data)(_fileId);
+ ERRORS(success, "Failed to end independent I/O mode.");
+#else
+ success = NCFUNCG(_vara_double)(_fileId, yVertexVarId, &read_start, &read_count, yptr);
+ ERRORS(success, "Failed to read yVertex data.");
+#endif
+
+ // Read z coordinates for gather set vertices
+ double* zptr = arrays[2];
+ int zVertexVarId;
+ success = NCFUNC(inq_varid)(_fileId, "zVertex", &zVertexVarId);
+ ERRORS(success, "Failed to get variable id of zVertex.");
+#ifdef PNETCDF_FILE
+ // Enter independent I/O mode, since this read is only for the gather processor
+ success = NCFUNC(begin_indep_data)(_fileId);
+ ERRORS(success, "Failed to begin independent I/O mode.");
+ success = NCFUNCG(_vara_double)(_fileId, zVertexVarId, &read_start, &read_count, zptr);
+ ERRORS(success, "Failed to read zVertex data.");
+ success = NCFUNC(end_indep_data)(_fileId);
+ ERRORS(success, "Failed to end independent I/O mode.");
+#else
+ success = NCFUNCG(_vara_double)(_fileId, zVertexVarId, &read_start, &read_count, zptr);
+ ERRORS(success, "Failed to read zVertex data.");
+#endif
+
+ // Get ptr to GID memory for gather set vertices
+ int count = 0;
+ void* data = NULL;
+ rval = mbImpl->tag_iterate(mGlobalIdTag, gather_set_verts_range.begin(), gather_set_verts_range.end(), count, data);
+ ERRORR(rval, "Failed to iterate global id tag on gather set vertices.");
+ assert(count == nVertices);
+ int* gid_data = (int*) data;
+ for (int j = 1; j <= nVertices; j++)
+ gid_data[j - 1] = j;
+
+ // Set the file id tag too, it should be bigger something not interfering with global id
+ if (mpFileIdTag) {
+ rval = mbImpl->tag_iterate(*mpFileIdTag, gather_set_verts_range.begin(), gather_set_verts_range.end(), count, data);
+ ERRORR(rval, "Failed to iterate file id tag on gather set vertices.");
+ assert(count == nVertices);
+ gid_data = (int*) data;
+ for (int j = 1; j <= nVertices; j++)
+ gid_data[j - 1] = nVertices + j; // Bigger than global id tag
+ }
+
+ return MB_SUCCESS;
+}
+
+ErrorCode NCHelperGCRM::create_gather_set_edges(EntityHandle gather_set, EntityHandle gather_set_start_vertex)
+{
+ Interface*& mbImpl = _readNC->mbImpl;
+
+ // Create gather set edges
+ EntityHandle start_edge;
+ EntityHandle* conn_arr_gather_set_edges = NULL;
+ // Don't need to specify allocation number here, because we know enough edges were created before
+ ErrorCode rval = _readNC->readMeshIface->get_element_connect(nEdges, 2, MBEDGE, 0, start_edge, conn_arr_gather_set_edges);
+ ERRORR(rval, "Failed to create edges.");
+
+ // Add edges to the gather set
+ Range gather_set_edges_range(start_edge, start_edge + nEdges - 1);
+ rval = mbImpl->add_entities(gather_set, gather_set_edges_range);
+ ERRORR(rval, "Failed to add edges to the gather set.");
+
+ // Read vertices on each edge
+ int verticesOnEdgeVarId;
+ int success = NCFUNC(inq_varid)(_fileId, "edge_corners", &verticesOnEdgeVarId);
+ ERRORS(success, "Failed to get variable id of edge_corners.");
+ // Utilize the memory storage pointed by conn_arr_gather_set_edges
+ int* vertices_on_gather_set_edges = (int*) conn_arr_gather_set_edges;
+ NCDF_SIZE read_starts[2] = {0, 0};
+ NCDF_SIZE read_counts[2] = {static_cast<NCDF_SIZE>(nEdges), 2};
+ #ifdef PNETCDF_FILE
+ // Enter independent I/O mode, since this read is only for the gather processor
+ success = NCFUNC(begin_indep_data)(_fileId);
+ ERRORS(success, "Failed to begin independent I/O mode.");
+ success = NCFUNCG(_vara_int)(_fileId, verticesOnEdgeVarId, read_starts, read_counts, vertices_on_gather_set_edges);
+ ERRORS(success, "Failed to read edge_corners data.");
+ success = NCFUNC(end_indep_data)(_fileId);
+ ERRORS(success, "Failed to end independent I/O mode.");
+ #else
+ success = NCFUNCG(_vara_int)(_fileId, verticesOnEdgeVarId, read_starts, read_counts, vertices_on_gather_set_edges);
+ ERRORS(success, "Failed to read edge_corners data.");
+ #endif
+
+ // Populate connectivity data for gather set edges
+ // Convert in-place from int (stored in the first half) to EntityHandle
+ // Reading backward is the trick
+ for (int edge_vert = nEdges * 2 - 1; edge_vert >= 0; edge_vert--) {
+ int gather_set_vert_idx = vertices_on_gather_set_edges[edge_vert]; // Global vertex index, 1 based
+ gather_set_vert_idx--; // 1 based -> 0 based
+ // Connectivity array is shifted by where the gather set vertices start
+ conn_arr_gather_set_edges[edge_vert] = gather_set_start_vertex + gather_set_vert_idx;
+ }
+
+ return MB_SUCCESS;
+}
+
+ErrorCode NCHelperGCRM::create_gather_set_cells(EntityHandle gather_set, EntityHandle gather_set_start_vertex)
+{
+ Interface*& mbImpl = _readNC->mbImpl;
+
+ // Read number of edges on each gather set cell
+ int nEdgesOnCellVarId;
+ int success = NCFUNC(inq_varid)(_fileId, "nEdgesOnCell", &nEdgesOnCellVarId);
+ ERRORS(success, "Failed to get variable id of nEdgesOnCell.");
+ std::vector<int> num_edges_on_gather_set_cells(nCells);
+ NCDF_SIZE read_start = 0;
+ NCDF_SIZE read_count = static_cast<NCDF_SIZE>(nCells);
+#ifdef PNETCDF_FILE
+ // Enter independent I/O mode, since this read is only for the gather processor
+ success = NCFUNC(begin_indep_data)(_fileId);
+ ERRORS(success, "Failed to begin independent I/O mode.");
+ success = NCFUNCG(_vara_int)(_fileId, nEdgesOnCellVarId, &read_start, &read_count, &num_edges_on_gather_set_cells[0]);
+ ERRORS(success, "Failed to read nEdgesOnCell data.");
+ success = NCFUNC(end_indep_data)(_fileId);
+ ERRORS(success, "Failed to end independent I/O mode.");
+#else
+ success = NCFUNCG(_vara_int)(_fileId, nEdgesOnCellVarId, &read_start, &read_count, &num_edges_on_gather_set_cells[0]);
+ ERRORS(success, "Failed to read nEdgesOnCell data.");
+#endif
+
+ // Read vertices on each gather set cell (connectivity)
+ int verticesOnCellVarId;
+ success = NCFUNC(inq_varid)(_fileId, "verticesOnCell", &verticesOnCellVarId);
+ ERRORS(success, "Failed to get variable id of verticesOnCell.");
+ std::vector<int> vertices_on_gather_set_cells(nCells * maxEdgesPerCell);
+ NCDF_SIZE read_starts[2] = {0, 0};
+ NCDF_SIZE read_counts[2] = {static_cast<NCDF_SIZE>(nCells), static_cast<NCDF_SIZE>(maxEdgesPerCell)};
+#ifdef PNETCDF_FILE
+ // Enter independent I/O mode, since this read is only for the gather processor
+ success = NCFUNC(begin_indep_data)(_fileId);
+ ERRORS(success, "Failed to begin independent I/O mode.");
+ success = NCFUNCG(_vara_int)(_fileId, verticesOnCellVarId, read_starts, read_counts, &vertices_on_gather_set_cells[0]);
+ ERRORS(success, "Failed to read verticesOnCell data.");
+ success = NCFUNC(end_indep_data)(_fileId);
+ ERRORS(success, "Failed to end independent I/O mode.");
+#else
+ success = NCFUNCG(_vara_int)(_fileId, verticesOnCellVarId, read_starts, read_counts, &vertices_on_gather_set_cells[0]);
+ ERRORS(success, "Failed to read verticesOnCell data.");
+#endif
+
+ // Divide gather set cells into groups based on the number of edges
+ Range gather_set_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
+ // Insert larger values before smaller values to increase efficiency
+ for (int i = nCells - 1; i >= 0; i--) {
+ int num_edges = num_edges_on_gather_set_cells[i];
+ gather_set_cells_with_n_edges[num_edges].insert(i + 1); // 0 based -> 1 based
+ }
+
+ // Create gather set cells
+ EntityHandle* conn_arr_gather_set_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
+ for (int num_edges_per_cell = 3; num_edges_per_cell <= maxEdgesPerCell; num_edges_per_cell++) {
+ int num_group_cells = gather_set_cells_with_n_edges[num_edges_per_cell].size();
+ if (num_group_cells > 0) {
+ EntityHandle start_element;
+ ErrorCode rval = _readNC->readMeshIface->get_element_connect(num_group_cells, num_edges_per_cell, MBPOLYGON, 0, start_element,
+ conn_arr_gather_set_cells_with_n_edges[num_edges_per_cell], num_group_cells);
+ ERRORR(rval, "Failed to create cells.");
+
+ // Add cells to the gather set
+ Range gather_set_cells_range(start_element, start_element + num_group_cells - 1);
+ rval = mbImpl->add_entities(gather_set, gather_set_cells_range);
+ ERRORR(rval, "Failed to add cells to the gather set.");
+
+ for (int j = 0; j < num_group_cells; j++) {
+ int gather_set_cell_idx = gather_set_cells_with_n_edges[num_edges_per_cell][j]; // Global cell index, 1 based
+ gather_set_cell_idx--; // 1 based -> 0 based
+
+ for (int k = 0; k < num_edges_per_cell; k++) {
+ EntityHandle gather_set_vert_idx = vertices_on_gather_set_cells[gather_set_cell_idx * maxEdgesPerCell + k]; // Global vertex index, 1 based
+ gather_set_vert_idx--; // 1 based -> 0 based
+
+ // Connectivity array is shifted by where the gather set vertices start
+ conn_arr_gather_set_cells_with_n_edges[num_edges_per_cell][j * num_edges_per_cell + k] =
+ gather_set_start_vertex + gather_set_vert_idx;
+ }
+ }
+ }
+ }
+
+ return MB_SUCCESS;
+}
+
+ErrorCode NCHelperGCRM::create_padded_gather_set_cells(EntityHandle gather_set, EntityHandle gather_set_start_vertex)
+{
+ Interface*& mbImpl = _readNC->mbImpl;
+
+ // Read number of edges on each gather set cell
+ int nEdgesOnCellVarId;
+ int success = NCFUNC(inq_varid)(_fileId, "nEdgesOnCell", &nEdgesOnCellVarId);
+ ERRORS(success, "Failed to get variable id of nEdgesOnCell.");
+ std::vector<int> num_edges_on_gather_set_cells(nCells);
+ NCDF_SIZE read_start = 0;
+ NCDF_SIZE read_count = static_cast<NCDF_SIZE>(nCells);
+#ifdef PNETCDF_FILE
+ // Enter independent I/O mode, since this read is only for the gather processor
+ success = NCFUNC(begin_indep_data)(_fileId);
+ ERRORS(success, "Failed to begin independent I/O mode.");
+ success = NCFUNCG(_vara_int)(_fileId, nEdgesOnCellVarId, &read_start, &read_count, &num_edges_on_gather_set_cells[0]);
+ ERRORS(success, "Failed to read nEdgesOnCell data.");
+ success = NCFUNC(end_indep_data)(_fileId);
+ ERRORS(success, "Failed to end independent I/O mode.");
+#else
+ success = NCFUNCG(_vara_int)(_fileId, nEdgesOnCellVarId, &read_start, &read_count, &num_edges_on_gather_set_cells[0]);
+ ERRORS(success, "Failed to read nEdgesOnCell data.");
+#endif
+
+ // Create gather set cells
+ EntityHandle start_element;
+ EntityHandle* conn_arr_gather_set_cells = NULL;
+ // Don't need to specify allocation number here, because we know enough cells were created before
+ ErrorCode rval = _readNC->readMeshIface->get_element_connect(nCells, maxEdgesPerCell, MBPOLYGON, 0, start_element, conn_arr_gather_set_cells);
+ ERRORR(rval, "Failed to create cells.");
+
+ // Add cells to the gather set
+ Range gather_set_cells_range(start_element, start_element + nCells - 1);
+ rval = mbImpl->add_entities(gather_set, gather_set_cells_range);
+ ERRORR(rval, "Failed to add cells to the gather set.");
+
+ // Read vertices on each gather set cell (connectivity)
+ int verticesOnCellVarId;
+ success = NCFUNC(inq_varid)(_fileId, "verticesOnCell", &verticesOnCellVarId);
+ ERRORS(success, "Failed to get variable id of verticesOnCell.");
+ // Utilize the memory storage pointed by conn_arr_gather_set_cells
+ int* vertices_on_gather_set_cells = (int*) conn_arr_gather_set_cells;
+ NCDF_SIZE read_starts[2] = {0, 0};
+ NCDF_SIZE read_counts[2] = {static_cast<NCDF_SIZE>(nCells), static_cast<NCDF_SIZE>(maxEdgesPerCell)};
+#ifdef PNETCDF_FILE
+ // Enter independent I/O mode, since this read is only for the gather processor
+ success = NCFUNC(begin_indep_data)(_fileId);
+ ERRORS(success, "Failed to begin independent I/O mode.");
+ success = NCFUNCG(_vara_int)(_fileId, verticesOnCellVarId, read_starts, read_counts, vertices_on_gather_set_cells);
+ ERRORS(success, "Failed to read verticesOnCell data.");
+ success = NCFUNC(end_indep_data)(_fileId);
+ ERRORS(success, "Failed to end independent I/O mode.");
+#else
+ success = NCFUNCG(_vara_int)(_fileId, verticesOnCellVarId, read_starts, read_counts, vertices_on_gather_set_cells);
+ ERRORS(success, "Failed to read verticesOnCell data.");
+#endif
+
+ // Correct gather set cell vertices array in the same way as local cell vertices array,
+ // replace the padded vertices with the last vertices in the corresponding cells
+ for (int gather_set_cell_idx = 0; gather_set_cell_idx < nCells; gather_set_cell_idx++) {
+ int num_edges = num_edges_on_gather_set_cells[gather_set_cell_idx];
+ int idx_in_gather_set_vert_arr = gather_set_cell_idx * maxEdgesPerCell;
+ int last_vert_idx = vertices_on_gather_set_cells[idx_in_gather_set_vert_arr + num_edges - 1];
+ for (int i = num_edges; i < maxEdgesPerCell; i++)
+ vertices_on_gather_set_cells[idx_in_gather_set_vert_arr + i] = last_vert_idx;
+ }
+
+ // Populate connectivity data for gather set cells
+ // Convert in-place from int (stored in the first half) to EntityHandle
+ // Reading backward is the trick
+ for (int cell_vert = nCells * maxEdgesPerCell - 1; cell_vert >= 0; cell_vert--) {
+ int gather_set_vert_idx = vertices_on_gather_set_cells[cell_vert]; // Global vertex index, 1 based
+ gather_set_vert_idx--; // 1 based -> 0 based
+ // Connectivity array is shifted by where the gather set vertices start
+ conn_arr_gather_set_cells[cell_vert] = gather_set_start_vertex + gather_set_vert_idx;
+ }
+
+ return MB_SUCCESS;
+}
+
+} // namespace moab
diff --git a/src/io/NCHelperGCRM.hpp b/src/io/NCHelperGCRM.hpp
new file mode 100644
index 0000000..3c445f3
--- /dev/null
+++ b/src/io/NCHelperGCRM.hpp
@@ -0,0 +1,85 @@
+//-------------------------------------------------------------------------
+// Filename : NCHelperGCRM.hpp
+//
+// Purpose : Climate NC file helper for GCRM grid
+//
+// Creator : Danqing Wu
+//-------------------------------------------------------------------------
+
+#ifndef NCHELPERGCRM_HPP
+#define NCHELPERGCRM_HPP
+
+#include "NCHelper.hpp"
+
+namespace moab {
+
+//! Child helper class for GCRM grid
+class NCHelperGCRM : public UcdNCHelper
+{
+public:
+ NCHelperGCRM(ReadNC* readNC, int fileId, const FileOptions& opts, EntityHandle fileSet);
+ static bool can_read_file(ReadNC* readNC);
+
+private:
+ //! Implementation of NCHelper::init_mesh_vals()
+ virtual ErrorCode init_mesh_vals();
+ //! Implementation of NCHelper::check_existing_mesh()
+ virtual ErrorCode check_existing_mesh();
+ //! Implementation of NCHelper::create_mesh()
+ virtual ErrorCode create_mesh(Range& faces);
+ //! Implementation of NCHelper::get_mesh_type_name()
+ virtual std::string get_mesh_type_name() { return "GCRM"; }
+
+ //! Implementation of UcdNCHelper::read_ucd_variable_to_nonset_allocate()
+ virtual ErrorCode read_ucd_variable_to_nonset_allocate(std::vector<ReadNC::VarData>& vdatas,
+ std::vector<int>& tstep_nums);
+#ifdef PNETCDF_FILE
+ //! Implementation of UcdNCHelper::read_ucd_variable_to_nonset_async()
+ virtual ErrorCode read_ucd_variable_to_nonset_async(std::vector<ReadNC::VarData>& vdatas,
+ std::vector<int>& tstep_nums);
+#else
+ //! Implementation of UcdNCHelper::read_ucd_variable_to_nonset()
+ virtual ErrorCode read_ucd_variable_to_nonset(std::vector<ReadNC::VarData>& vdatas,
+ std::vector<int>& tstep_nums);
+#endif
+
+ //! Redistribute local cells after trivial partition (e.g. Zoltan partition, if applicable)
+ ErrorCode redistribute_local_cells(int start_cell_index);
+
+ //! Create local vertices
+ ErrorCode create_local_vertices(const std::vector<int>& vertices_on_local_cells, EntityHandle& start_vertex);
+
+ //! Create local edges (optional)
+ ErrorCode create_local_edges(EntityHandle start_vertex);
+
+ //! Create local cells without padding (cells are divided into groups based on the number of edges)
+ ErrorCode create_local_cells(const std::vector<int>& vertices_on_local_cells,
+ EntityHandle start_vertex, Range& faces);
+
+ //! Create local cells with padding (padded cells will have the same number of edges)
+ ErrorCode create_padded_local_cells(const std::vector<int>& vertices_on_local_cells,
+ EntityHandle start_vertex, Range& faces);
+
+ //! Create gather set vertices
+ ErrorCode create_gather_set_vertices(EntityHandle gather_set, EntityHandle& gather_set_start_vertex);
+
+ //! Create gather set edges (optional)
+ ErrorCode create_gather_set_edges(EntityHandle gather_set, EntityHandle gather_set_start_vertex);
+
+ //! Create gather set cells without padding (cells are divided into groups based on the number of edges)
+ ErrorCode create_gather_set_cells(EntityHandle gather_set, EntityHandle gather_set_start_vertex);
+
+ //! Create gather set cells with padding (padded cells will have the same number of edges)
+ ErrorCode create_padded_gather_set_cells(EntityHandle gather_set, EntityHandle gather_set_start_vertex);
+
+private:
+ int maxEdgesPerCell;
+ int numCellGroups;
+ bool createGatherSet;
+ std::map<EntityHandle, int> cellHandleToGlobalID;
+ Range facesOwned;
+};
+
+} // namespace moab
+
+#endif
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/fathomteam/moab/commits/db359ddf9574/
Changeset: db359ddf9574
Branch: None
User: iulian07
Date: 2014-05-07 03:58:37
Summary: some changes for jeff's fork
add a small test, also write a vtk file as part of read no vars
Affected #: 6 files
diff --git a/src/io/Makefile.am b/src/io/Makefile.am
index 1269523..aeb5d6a 100644
--- a/src/io/Makefile.am
+++ b/src/io/Makefile.am
@@ -23,11 +23,13 @@ if NETCDF_FILE
NCHelperFV.cpp NCHelperFV.hpp \
NCHelperHOMME.cpp NCHelperHOMME.hpp \
NCHelperMPAS.cpp NCHelperMPAS.hpp \
+ NCHelperGCRM.cpp NCHelperCGRM.hpp \
NCWriteHelper.cpp NCWriteHelper.hpp \
NCWriteEuler.cpp NCWriteEuler.hpp \
NCWriteFV.cpp NCWriteFV.hpp \
NCWriteHOMME.cpp NCWriteHOMME.hpp \
NCWriteMPAS.cpp NCWriteMPAS.hpp \
+ NCWriteGCRM.cpp NCWriteCGRM.hpp \
ReadGCRM.cpp ReadGCRM.hpp
else
MOAB_NETCDF_SRCS =
diff --git a/src/io/NCHelper.cpp b/src/io/NCHelper.cpp
index 0e970d9..7f062b6 100644
--- a/src/io/NCHelper.cpp
+++ b/src/io/NCHelper.cpp
@@ -3,6 +3,7 @@
#include "NCHelperFV.hpp"
#include "NCHelperHOMME.hpp"
#include "NCHelperMPAS.hpp"
+#include "NCHelperGCRM.hpp"
#include <sstream>
@@ -51,6 +52,9 @@ NCHelper* NCHelper::get_nc_helper(ReadNC* readNC, int fileId, const FileOptions&
// For a HOMME connectivity file, there might be no CF convention
else if (NCHelperHOMME::can_read_file(readNC, fileId))
return new (std::nothrow) NCHelperHOMME(readNC, fileId, opts, fileSet);
+ // gcrm reader
+ else if (NCHelperGCRM::can_read_file(readNC))
+ return new (std::nothrow) NCHelperGCRM(readNC, fileId, opts, fileSet);
}
// Unknown NetCDF grid (will fill this in later for POP, CICE and CLM)
diff --git a/src/io/ReadNC.hpp b/src/io/ReadNC.hpp
index 67e2aa3..80fb02a 100644
--- a/src/io/ReadNC.hpp
+++ b/src/io/ReadNC.hpp
@@ -67,6 +67,7 @@ class ReadNC : public ReaderIface
friend class NCHelperFV;
friend class NCHelperHOMME;
friend class NCHelperMPAS;
+ friend class NCHelperGCRM;
public:
diff --git a/src/io/WriteNC.hpp b/src/io/WriteNC.hpp
index 60c295e..3db9dc2 100644
--- a/src/io/WriteNC.hpp
+++ b/src/io/WriteNC.hpp
@@ -75,6 +75,7 @@ class WriteNC : public WriterIface
friend class NCWriteFV;
friend class NCWriteHOMME;
friend class NCWriteMPAS;
+ friend class NCWriteGCRM;
public:
//! Factory method
diff --git a/test/io/Makefile.am b/test/io/Makefile.am
index b33eed8..cbc1b84 100644
--- a/test/io/Makefile.am
+++ b/test/io/Makefile.am
@@ -12,7 +12,7 @@ AM_CPPFLAGS += -DSRCDIR=$(srcdir) \
if NETCDF_FILE
EXODUS_TEST = exodus_test
- NC_TEST = read_nc read_ucd_nc read_mpas_nc write_nc
+ NC_TEST = read_nc read_ucd_nc read_mpas_nc write_nc read_gcrm_nc
else
EXODUS_TEST =
NC_TEST =
@@ -67,6 +67,7 @@ nastran_test_SOURCES = $(srcdir)/../TestUtil.hpp nastran_test.cc
read_nc_SOURCES = $(srcdir)/../TestUtil.hpp read_nc.cpp
read_ucd_nc_SOURCES = $(srcdir)/../TestUtil.hpp read_ucd_nc.cpp
read_mpas_nc_SOURCES = $(srcdir)/../TestUtil.hpp read_mpas_nc.cpp
+read_gcrm_nc_SOURCES = $(srcdir)/../TestUtil.hpp read_gcrm_nc.cpp
write_nc_SOURCES = $(srcdir)/../TestUtil.hpp write_nc.cpp
ideas_test_SOURCES = $(srcdir)/../TestUtil.hpp ideas_test.cc
stl_test_SOURCES = $(srcdir)/../TestUtil.hpp stl_test.cc
diff --git a/test/io/read_gcrm_nc.cpp b/test/io/read_gcrm_nc.cpp
new file mode 100644
index 0000000..831358d
--- /dev/null
+++ b/test/io/read_gcrm_nc.cpp
@@ -0,0 +1,548 @@
+#include "TestUtil.hpp"
+#include "moab/Core.hpp"
+#include "moab/ReadUtilIface.hpp"
+#include "MBTagConventions.hpp"
+
+using namespace moab;
+
+#ifdef MESHDIR
+static const char example[] = STRINGIFY(MESHDIR) "/io/gcrm_r3.nc";
+#else
+static const char example[] = "/io/gcrm_r3.nc";
+#endif
+
+#ifdef USE_MPI
+#include "moab_mpi.h"
+#include "moab/ParallelComm.hpp"
+#endif
+
+void test_read_all();
+void test_read_onevar();
+void test_read_onetimestep();
+void test_read_nomesh();
+void test_read_novars();
+void test_read_no_mixed_elements(); // Test read option NO_MIXED_ELEMENTS
+void test_read_no_edges(); // Test read option NO_EDGES
+void test_gather_onevar(); // Test gather set with one variable
+
+void get_options(std::string& opts);
+
+const double eps = 1e-20;
+
+int main(int argc, char* argv[])
+{
+ int result = 0;
+
+#ifdef USE_MPI
+ int fail = MPI_Init(&argc, &argv);
+ if (fail)
+ return 1;
+#else
+ argv[0] = argv[argc - argc]; // To remove the warnings in serial mode about unused variables
+#endif
+
+ //result += RUN_TEST(test_read_all);
+ //result += RUN_TEST(test_read_onevar);
+ //result += RUN_TEST(test_read_onetimestep);
+ //result += RUN_TEST(test_read_nomesh);
+ result += RUN_TEST(test_read_novars);
+ //result += RUN_TEST(test_read_no_mixed_elements);
+ //result += RUN_TEST(test_read_no_edges);
+ //result += RUN_TEST(test_gather_onevar);
+
+#ifdef USE_MPI
+ fail = MPI_Finalize();
+ if (fail)
+ return 1;
+#endif
+
+ return result;
+}
+
+void test_read_all()
+{
+ Core moab;
+ Interface& mb = moab;
+
+ std::string opts;
+ get_options(opts);
+
+ // Read mesh and read all variables at all timesteps
+ ErrorCode rval = mb.load_file(example, NULL, opts.c_str());
+ CHECK_ERR(rval);
+
+ int procs = 1;
+#ifdef USE_MPI
+ ParallelComm* pcomm = ParallelComm::get_pcomm(&mb, 0);
+ procs = pcomm->proc_config().proc_size();
+#endif
+
+ // Make check runs this test on one processor
+ if (1 == procs) {
+ // For each tag, check two values
+ double val[2];
+
+ // Check tags for vertex variable vorticity
+ Tag vorticity_tag0, vorticity_tag1;
+ rval = mb.tag_get_handle("vorticity0", 1, MB_TYPE_DOUBLE, vorticity_tag0);
+ CHECK_ERR(rval);
+ rval = mb.tag_get_handle("vorticity1", 1, MB_TYPE_DOUBLE, vorticity_tag1);
+ CHECK_ERR(rval);
+
+ // Get vertices (1280 edges)
+ Range verts;
+ rval = mb.get_entities_by_type(0, MBVERTEX, verts);
+ CHECK_ERR(rval);
+ CHECK_EQUAL((size_t)1280, verts.size());
+ CHECK_EQUAL((size_t)1, verts.psize());
+
+ // Check vorticity tag values on first two vertices
+ EntityHandle vert_ents[] = {verts[0], verts[1]};
+ rval = mb.tag_get_data(vorticity_tag0, vert_ents, 2, val);
+ CHECK_ERR(rval);
+ CHECK_REAL_EQUAL(1.1, val[0], eps);
+ CHECK_REAL_EQUAL(1.2, val[1], eps);
+ rval = mb.tag_get_data(vorticity_tag1, vert_ents, 2, val);
+ CHECK_ERR(rval);
+ CHECK_REAL_EQUAL(2.1, val[0], eps);
+ CHECK_REAL_EQUAL(2.2, val[1], eps);
+
+ // Check tags for edge variable u
+ Tag u_tag0, u_tag1;
+ rval = mb.tag_get_handle("u0", 1, MB_TYPE_DOUBLE, u_tag0);
+ CHECK_ERR(rval);
+ rval = mb.tag_get_handle("u1", 1, MB_TYPE_DOUBLE, u_tag1);
+ CHECK_ERR(rval);
+
+ // Get edges (1920 edges)
+ Range edges;
+ rval = mb.get_entities_by_type(0, MBEDGE, edges);
+ CHECK_ERR(rval);
+ CHECK_EQUAL((size_t)1920, edges.size());
+ CHECK_EQUAL((size_t)1, edges.psize());
+
+ // Check u tag values on two specified edges
+ EntityHandle edge_ents[] = {edges[5], edges[6]};
+ rval = mb.tag_get_data(u_tag0, edge_ents, 2, val);
+ CHECK_ERR(rval);
+ CHECK_REAL_EQUAL(1.113138721544778, val[0], eps);
+ CHECK_REAL_EQUAL(-1.113138721930009, val[1], eps);
+ rval = mb.tag_get_data(u_tag1, edge_ents, 2, val);
+ CHECK_ERR(rval);
+ CHECK_REAL_EQUAL(2.113138721544778, val[0], eps);
+ CHECK_REAL_EQUAL(-2.113138721930009, val[1], eps);
+
+ // Check tags for cell variable ke
+ Tag ke_tag0, ke_tag1;
+ rval = mb.tag_get_handle("ke0", 1, MB_TYPE_DOUBLE, ke_tag0);
+ CHECK_ERR(rval);
+ rval = mb.tag_get_handle("ke1", 1, MB_TYPE_DOUBLE, ke_tag1);
+ CHECK_ERR(rval);
+
+ // Get cells (12 pentagons and 630 hexagons)
+ Range cells;
+ rval = mb.get_entities_by_type(0, MBPOLYGON, cells);
+ CHECK_ERR(rval);
+ CHECK_EQUAL((size_t)642, cells.size());
+#ifdef USE_MPI
+ // If MOAB is compiled parallel, sequence size requested are increased
+ // by a factor of 1.5, to allow for ghosts. This will introduce a gap
+ // between the two face sequences.
+ CHECK_EQUAL((size_t)2, cells.psize());
+#else
+ CHECK_EQUAL((size_t)1, cells.psize());
+#endif
+
+ // Check ke tag values on first pentagon and first hexagon
+ EntityHandle cell_ents[] = {cells[0], cells[12]};
+ rval = mb.tag_get_data(ke_tag0, cell_ents, 2, val);
+ CHECK_ERR(rval);
+ CHECK_REAL_EQUAL(15.001, val[0], eps);
+ CHECK_REAL_EQUAL(16.013, val[1], eps);
+ rval = mb.tag_get_data(ke_tag1, cell_ents, 2, val);
+ CHECK_ERR(rval);
+ CHECK_REAL_EQUAL(25.001, val[0], eps);
+ CHECK_REAL_EQUAL(26.013, val[1], eps);
+ }
+}
+
+void test_read_onevar()
+{
+ Core moab;
+ Interface& mb = moab;
+
+ std::string opts;
+ get_options(opts);
+
+ // Read mesh and read cell variable ke at all timesteps
+ opts += ";VARIABLE=ke";
+ ErrorCode rval = mb.load_file(example, NULL, opts.c_str());
+ CHECK_ERR(rval);
+
+ int procs = 1;
+#ifdef USE_MPI
+ ParallelComm* pcomm = ParallelComm::get_pcomm(&mb, 0);
+ procs = pcomm->proc_config().proc_size();
+#endif
+
+ // Make check runs this test on one processor
+ if (1 == procs) {
+ // Check ke tags
+ Tag ke_tag0, ke_tag1;
+ rval = mb.tag_get_handle("ke0", 1, MB_TYPE_DOUBLE, ke_tag0);
+ CHECK_ERR(rval);
+ rval = mb.tag_get_handle("ke1", 1, MB_TYPE_DOUBLE, ke_tag1);
+ CHECK_ERR(rval);
+
+ // Get cells (12 pentagons and 630 hexagons)
+ Range cells;
+ rval = mb.get_entities_by_type(0, MBPOLYGON, cells);
+ CHECK_ERR(rval);
+ CHECK_EQUAL((size_t)642, cells.size());
+
+#ifdef USE_MPI
+ // If MOAB is compiled parallel, sequence size requested are increased
+ // by a factor of 1.5, to allow for ghosts. This will introduce a gap
+ // between the two face sequences.
+ CHECK_EQUAL((size_t)2, cells.psize());
+#else
+ CHECK_EQUAL((size_t)1, cells.psize());
+#endif
+
+ // Check ke tag values on 4 cells: first pentagon, last pentagon,
+ // first hexagon, and last hexagon
+ EntityHandle cell_ents[] = {cells[0], cells[11], cells[12], cells[641]};
+ double ke0_val[4];
+ rval = mb.tag_get_data(ke_tag0, cell_ents, 4, ke0_val);
+ CHECK_ERR(rval);
+ CHECK_REAL_EQUAL(15.001, ke0_val[0], eps);
+ CHECK_REAL_EQUAL(15.012, ke0_val[1], eps);
+ CHECK_REAL_EQUAL(16.013, ke0_val[2], eps);
+ CHECK_REAL_EQUAL(16.642, ke0_val[3], eps);
+ double ke1_val[4];
+ rval = mb.tag_get_data(ke_tag1, cell_ents, 4, ke1_val);
+ CHECK_ERR(rval);
+ CHECK_REAL_EQUAL(25.001, ke1_val[0], eps);
+ CHECK_REAL_EQUAL(25.012, ke1_val[1], eps);
+ CHECK_REAL_EQUAL(26.013, ke1_val[2], eps);
+ CHECK_REAL_EQUAL(26.642, ke1_val[3], eps);
+ }
+}
+
+void test_read_onetimestep()
+{
+ Core moab;
+ Interface& mb = moab;
+
+ std::string opts;
+ get_options(opts);
+
+ // Read mesh and read all variables at 2nd timestep
+ opts += ";TIMESTEP=1";
+ ErrorCode rval = mb.load_file(example, NULL, opts.c_str());
+ CHECK_ERR(rval);
+
+ // Check vorticity tags
+ Tag vorticity_tag0, vorticity_tag1;
+ rval = mb.tag_get_handle("vorticity0", 1, MB_TYPE_DOUBLE, vorticity_tag0);
+ // Tag vorticity0 should not exist
+ CHECK_EQUAL(rval, MB_TAG_NOT_FOUND);
+ rval = mb.tag_get_handle("vorticity1", 1, MB_TYPE_DOUBLE, vorticity_tag1);
+ CHECK_ERR(rval);
+}
+
+void test_read_nomesh()
+{
+ Core moab;
+ Interface& mb = moab;
+
+ // Need a file set for nomesh to work right
+ EntityHandle file_set;
+ ErrorCode rval = mb.create_meshset(MESHSET_SET, file_set);
+ CHECK_ERR(rval);
+
+ std::string orig, opts;
+ get_options(orig);
+
+ // Read mesh and read all variables at 1st timestep
+ opts = orig + ";TIMESTEP=0";
+ rval = mb.load_file(example, &file_set, opts.c_str());
+ CHECK_ERR(rval);
+
+ // Check u tags
+ Tag u_tag0, u_tag1;
+ rval = mb.tag_get_handle("u0", 1, MB_TYPE_DOUBLE, u_tag0);
+ CHECK_ERR(rval);
+ // Tag u1 should not exist
+ rval = mb.tag_get_handle("u1", 1, MB_TYPE_DOUBLE, u_tag1);
+ CHECK_EQUAL(rval, MB_TAG_NOT_FOUND);
+
+ // Read all variables at 2nd timestep 0, no need to read mesh
+ opts = orig + ";TIMESTEP=1;NOMESH";
+ rval = mb.load_file(example, &file_set, opts.c_str());
+ CHECK_ERR(rval);
+
+ // Check tag u1 again
+ rval = mb.tag_get_handle("u1", 1, MB_TYPE_DOUBLE, u_tag1);
+ // Tag u1 should exist at this time
+ CHECK_ERR(rval);
+}
+
+void test_read_novars()
+{
+ Core moab;
+ Interface& mb = moab;
+
+ // Need a file set for nomesh to work right
+ EntityHandle file_set;
+ ErrorCode rval = mb.create_meshset(MESHSET_SET, file_set);
+ CHECK_ERR(rval);
+
+ std::string orig, opts;
+ get_options(orig);
+ CHECK_ERR(rval);
+
+ // Read header info only, no mesh, no variables
+ opts = orig + ";NOMESH;VARIABLE=";
+ rval = mb.load_file(example, &file_set, opts.c_str());
+ CHECK_ERR(rval);
+
+ // Read mesh, but still no variables
+ opts = orig + ";VARIABLE=;TIMESTEP=0;DEBUG_IO=2";
+ rval = mb.load_file(example, &file_set, opts.c_str());
+ CHECK_ERR(rval);
+
+ rval = mb.write_file("gcrm.vtk");
+#if 0
+ // Check ke tags
+ Tag ke_tag0, ke_tag1;
+ rval = mb.tag_get_handle("ke0", 1, MB_TYPE_DOUBLE, ke_tag0);
+ // Tag ke0 should not exist
+ CHECK_EQUAL(rval, MB_TAG_NOT_FOUND);
+ rval = mb.tag_get_handle("ke1", 1, MB_TYPE_DOUBLE, ke_tag1);
+ // Tag ke1 should not exist
+ CHECK_EQUAL(rval, MB_TAG_NOT_FOUND);
+
+ // Read ke at 1st timestep, no need to read mesh
+ opts = orig + ";VARIABLE=ke;TIMESTEP=0;NOMESH";
+ rval = mb.load_file(example, &file_set, opts.c_str());
+ CHECK_ERR(rval);
+
+ // Check ke tags again
+ rval = mb.tag_get_handle("ke0", 1, MB_TYPE_DOUBLE, ke_tag0);
+ // Tag ke0 should exist at this time
+ CHECK_ERR(rval);
+ // Tag ke1 should still not exist
+ rval = mb.tag_get_handle("ke1", 1, MB_TYPE_DOUBLE, ke_tag1);
+ CHECK_EQUAL(rval, MB_TAG_NOT_FOUND);
+
+ // Read ke at 2nd timestep, no need to read mesh
+ opts = orig + ";VARIABLE=ke;TIMESTEP=1;NOMESH";
+ rval = mb.load_file(example, &file_set, opts.c_str());
+ CHECK_ERR(rval);
+
+ // Check tag ke1 again
+ rval = mb.tag_get_handle("ke1", 1, MB_TYPE_DOUBLE, ke_tag1);
+ // Tag ke1 should exist at this time
+ CHECK_ERR(rval);
+
+ int procs = 1;
+#ifdef USE_MPI
+ ParallelComm* pcomm = ParallelComm::get_pcomm(&mb, 0);
+ procs = pcomm->proc_config().proc_size();
+#endif
+
+ // Make check runs this test on one processor
+ if (1 == procs) {
+ // Get cells (12 pentagons and 630 hexagons)
+ Range cells;
+ rval = mb.get_entities_by_type(file_set, MBPOLYGON, cells);
+ CHECK_ERR(rval);
+ CHECK_EQUAL((size_t)642, cells.size());
+
+#ifdef USE_MPI
+ // If MOAB is compiled parallel, sequence size requested are increased
+ // by a factor of 1.5, to allow for ghosts. This will introduce a gap
+ // between the two face sequences.
+ CHECK_EQUAL((size_t)2, cells.psize());
+#else
+ CHECK_EQUAL((size_t)1, cells.psize());
+#endif
+
+ // Check ke tag values on 4 cells: first pentagon, last pentagon,
+ // first hexagon, and last hexagon
+ EntityHandle cell_ents[] = {cells[0], cells[11], cells[12], cells[641]};
+ double ke0_val[4];
+ rval = mb.tag_get_data(ke_tag0, cell_ents, 4, ke0_val);
+ CHECK_ERR(rval);
+ CHECK_REAL_EQUAL(15.001, ke0_val[0], eps);
+ CHECK_REAL_EQUAL(15.012, ke0_val[1], eps);
+ CHECK_REAL_EQUAL(16.013, ke0_val[2], eps);
+ CHECK_REAL_EQUAL(16.642, ke0_val[3], eps);
+ double ke1_val[4];
+ rval = mb.tag_get_data(ke_tag1, cell_ents, 4, ke1_val);
+ CHECK_ERR(rval);
+ CHECK_REAL_EQUAL(25.001, ke1_val[0], eps);
+ CHECK_REAL_EQUAL(25.012, ke1_val[1], eps);
+ CHECK_REAL_EQUAL(26.013, ke1_val[2], eps);
+ CHECK_REAL_EQUAL(26.642, ke1_val[3], eps);
+ }
+#endif
+}
+
+void test_read_no_mixed_elements()
+{
+ Core moab;
+ Interface& mb = moab;
+
+ std::string opts;
+ get_options(opts);
+
+ // Read mesh with no mixed elements and read all variables at all timesteps
+ opts += ";NO_MIXED_ELEMENTS";
+ ErrorCode rval = mb.load_file(example, NULL, opts.c_str());
+ CHECK_ERR(rval);
+
+ int procs = 1;
+#ifdef USE_MPI
+ ParallelComm* pcomm = ParallelComm::get_pcomm(&mb, 0);
+ procs = pcomm->proc_config().proc_size();
+#endif
+
+ // Make check runs this test on one processor
+ if (1 == procs) {
+ // Check ke tags
+ Tag ke_tag0, ke_tag1;
+ rval = mb.tag_get_handle("ke0", 1, MB_TYPE_DOUBLE, ke_tag0);
+ CHECK_ERR(rval);
+ rval = mb.tag_get_handle("ke1", 1, MB_TYPE_DOUBLE, ke_tag1);
+ CHECK_ERR(rval);
+
+ // Get cells (12 pentagons and 630 hexagons)
+ Range cells;
+ rval = mb.get_entities_by_type(0, MBPOLYGON, cells);
+ CHECK_ERR(rval);
+ CHECK_EQUAL((size_t)642, cells.size());
+ // Only one group of cells (pentagons are padded to hexagons,
+ // e.g. connectivity [1 2 3 4 5] => [1 2 3 4 5 5])
+ CHECK_EQUAL((size_t)1, cells.psize());
+
+ // Check ke tag values on 4 cells: first pentagon, last pentagon,
+ // first hexagon, and last hexagon
+ EntityHandle cell_ents[] = {cells[0], cells[11], cells[12], cells[641]};
+ double ke0_val[4];
+ rval = mb.tag_get_data(ke_tag0, cell_ents, 4, ke0_val);
+ CHECK_ERR(rval);
+ CHECK_REAL_EQUAL(15.001, ke0_val[0], eps);
+ CHECK_REAL_EQUAL(15.012, ke0_val[1], eps);
+ CHECK_REAL_EQUAL(16.013, ke0_val[2], eps);
+ CHECK_REAL_EQUAL(16.642, ke0_val[3], eps);
+ double ke1_val[4];
+ rval = mb.tag_get_data(ke_tag1, cell_ents, 4, ke1_val);
+ CHECK_ERR(rval);
+ CHECK_REAL_EQUAL(25.001, ke1_val[0], eps);
+ CHECK_REAL_EQUAL(25.012, ke1_val[1], eps);
+ CHECK_REAL_EQUAL(26.013, ke1_val[2], eps);
+ CHECK_REAL_EQUAL(26.642, ke1_val[3], eps);
+ }
+}
+
+void test_read_no_edges()
+{
+ Core moab;
+ Interface& mb = moab;
+
+ std::string opts;
+ get_options(opts);
+
+ opts += ";NO_EDGES;VARIABLE=";
+ ErrorCode rval = mb.load_file(example, NULL, opts.c_str());
+ CHECK_ERR(rval);
+
+ // Get edges
+ Range edges;
+ rval = mb.get_entities_by_type(0, MBEDGE, edges);
+ CHECK_ERR(rval);
+ CHECK_EQUAL((size_t)0, edges.size());
+}
+
+void test_gather_onevar()
+{
+ Core moab;
+ Interface& mb = moab;
+
+ EntityHandle file_set;
+ ErrorCode rval = mb.create_meshset(MESHSET_SET, file_set);
+ CHECK_ERR(rval);
+
+ std::string opts;
+ get_options(opts);
+
+ // Read cell variable ke and create gather set on processor 0
+ opts += ";VARIABLE=ke;GATHER_SET=0";
+ rval = mb.load_file(example, &file_set, opts.c_str());
+ CHECK_ERR(rval);
+
+#ifdef USE_MPI
+ ParallelComm* pcomm = ParallelComm::get_pcomm(&mb, 0);
+ int rank = pcomm->proc_config().proc_rank();
+
+ Range cells, cells_owned;
+ rval = mb.get_entities_by_type(file_set, MBPOLYGON, cells);
+ CHECK_ERR(rval);
+
+ // Get local owned cells
+ rval = pcomm->filter_pstatus(cells, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &cells_owned);
+ CHECK_ERR(rval);
+
+ EntityHandle gather_set = 0;
+ if (0 == rank) {
+ // Get gather set
+ ReadUtilIface* readUtilIface;
+ mb.query_interface(readUtilIface);
+ rval = readUtilIface->get_gather_set(gather_set);
+ CHECK_ERR(rval);
+ assert(gather_set != 0);
+ }
+
+ Tag ke_tag0, gid_tag;
+ rval = mb.tag_get_handle("ke0", 1, MB_TYPE_DOUBLE, ke_tag0, MB_TAG_DENSE);
+ CHECK_ERR(rval);
+
+ rval = mb.tag_get_handle(GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER, gid_tag, MB_TAG_DENSE);
+ CHECK_ERR(rval);
+
+ pcomm->gather_data(cells_owned, ke_tag0, gid_tag, gather_set, 0);
+
+ if (0 == rank) {
+ // Get gather set cells
+ Range gather_set_cells;
+ rval = mb.get_entities_by_type(gather_set, MBPOLYGON, gather_set_cells);
+ CHECK_ERR(rval);
+ CHECK_EQUAL((size_t)642, gather_set_cells.size());
+ CHECK_EQUAL((size_t)2, gather_set_cells.psize());
+
+ // Check ke0 tag values on 4 gather set cells: first pentagon, last pentagon,
+ // first hexagon, and last hexagon
+ double ke0_val[4];
+ EntityHandle cell_ents[] = {gather_set_cells[0], gather_set_cells[11],
+ gather_set_cells[12], gather_set_cells[641]};
+ rval = mb.tag_get_data(ke_tag0, cell_ents, 4, ke0_val);
+ CHECK_ERR(rval);
+ CHECK_REAL_EQUAL(15.001, ke0_val[0], eps);
+ CHECK_REAL_EQUAL(15.012, ke0_val[1], eps);
+ CHECK_REAL_EQUAL(16.013, ke0_val[2], eps);
+ CHECK_REAL_EQUAL(16.642, ke0_val[3], eps);
+ }
+#endif
+}
+
+void get_options(std::string& opts)
+{
+#ifdef USE_MPI
+ // Use parallel options
+ opts = std::string(";;PARALLEL=READ_PART;PARTITION_METHOD=TRIVIAL");
+#else
+ opts = std::string(";;");
+#endif
+}
https://bitbucket.org/fathomteam/moab/commits/61d3bf312cdb/
Changeset: 61d3bf312cdb
Branch: None
User: iulian07
Date: 2014-05-07 03:59:49
Summary: Merge branch 'master' of https://bitbucket.org/fathomteam/moab
Affected #: 1 file
diff --git a/src/io/NCHelper.cpp b/src/io/NCHelper.cpp
index 7f062b6..57f4792 100644
--- a/src/io/NCHelper.cpp
+++ b/src/io/NCHelper.cpp
@@ -331,6 +331,8 @@ ErrorCode NCHelper::read_variable_setup(std::vector<std::string>& var_names, std
std::vector<ReadNC::VarData>& vdatas, std::vector<ReadNC::VarData>& vsetdatas)
{
std::map<std::string, ReadNC::VarData>& varInfo = _readNC->varInfo;
+ std::vector<std::string>& dimNames = _readNC->dimNames;
+
std::map<std::string, ReadNC::VarData>::iterator mit;
// If empty read them all (except ignored variables)
@@ -345,7 +347,6 @@ ErrorCode NCHelper::read_variable_setup(std::vector<std::string>& var_names, std
continue;
// Dimension variables were read before creating conventional tags
- std::vector<std::string>& dimNames = _readNC->dimNames;
if (std::find(dimNames.begin(), dimNames.end(), vd.varName) != dimNames.end())
continue;
@@ -392,16 +393,14 @@ ErrorCode NCHelper::read_variable_setup(std::vector<std::string>& var_names, std
for (unsigned int i = 0; i < vsetdatas.size(); i++) {
if ((std::find(vsetdatas[i].varDims.begin(), vsetdatas[i].varDims.end(), tDim) != vsetdatas[i].varDims.end())
- && (vsetdatas[i].varDims.size() > 1)) {
- // Set variables with timesteps: time is the first dimension, followed
- // by other dimensions, e.g. xtime(Time, StrLen)
+ && (vsetdatas[i].varName != dimNames[tDim])) {
+ // Set variables with timesteps: e.g. xtime(Time) or xtime(Time, StrLen)
vsetdatas[i].varTags.resize(tstep_nums.size(), 0);
vsetdatas[i].varDatas.resize(tstep_nums.size());
vsetdatas[i].has_tsteps = true;
}
else {
- // Set variables without timesteps: no time dimension, or time is the only
- // dimension, e.g. lev(lev), xtime(Time)
+ // Set variables without timesteps: no time dimension, or time itself
vsetdatas[i].varTags.resize(1, 0);
vsetdatas[i].varDatas.resize(1);
vsetdatas[i].has_tsteps = false;
@@ -427,7 +426,7 @@ ErrorCode NCHelper::read_variable_to_set(std::vector<ReadNC::VarData>& vdatas, s
for (unsigned int t = 0; t < tstep_nums.size(); t++) {
void* data = vdatas[i].varDatas[t];
- // Set variables with timesteps, e.g. xtime(Time, StrLen)
+ // Set variables with timesteps, e.g. xtime(Time) or xtime(Time, StrLen)
if (vdatas[i].has_tsteps) {
// Set readStart for each timestep along time dimension
vdatas[i].readStarts[0] = tstep_nums[t];
@@ -494,7 +493,7 @@ ErrorCode NCHelper::read_variable_to_set(std::vector<ReadNC::VarData>& vdatas, s
}
vdatas[i].varDatas[t] = NULL;
- // Loop continues only for set variables with timesteps, e.g. xtime(Time, StrLen)
+ // Loop continues only for set variables with timesteps, e.g. xtime(Time) or xtime(Time, StrLen)
if (!vdatas[i].has_tsteps)
break;
}
@@ -885,7 +884,7 @@ ErrorCode NCHelper::read_variable_to_set_allocate(std::vector<ReadNC::VarData>&
rval = MB_FAILURE;
}
- // Loop continues only for set variables with timesteps, e.g. xtime(Time, StrLen)
+ // Loop continues only for set variables with timesteps, e.g. xtime(Time) or xtime(Time, StrLen)
if (!vdatas[i].has_tsteps)
break;
}
@@ -1030,6 +1029,10 @@ ErrorCode ScdNCHelper::create_mesh(Range& faces)
Range edges;
mbImpl->get_adjacencies(faces, 1, true, edges, Interface::UNION);
+ // Create COORDS tag for quads
+ rval = create_quad_coordinate_tag();
+ ERRORR(rval, "Trouble creating coordinate tags to entities quads");
+
return MB_SUCCESS;
}
@@ -1041,10 +1044,6 @@ ErrorCode ScdNCHelper::read_variables(std::vector<std::string>& var_names, std::
ErrorCode rval = read_variable_setup(var_names, tstep_nums, vdatas, vsetdatas);
ERRORR(rval, "Trouble setting up read variable.");
- // Create COORDS tag for quads
- rval = create_quad_coordinate_tag();
- ERRORR(rval, "Trouble creating coordinate tags to entities quads");
-
if (!vsetdatas.empty()) {
rval = read_variable_to_set(vsetdatas, tstep_nums);
ERRORR(rval, "Trouble read variables to set.");
@@ -1327,8 +1326,7 @@ ErrorCode ScdNCHelper::create_quad_coordinate_tag() {
ERRORR(rval, "Trouble getting owned QUAD entity.");
numOwnedEnts = ents_owned.size();
}
- else
- {
+ else {
numOwnedEnts = ents.size();
ents_owned = ents;
}
https://bitbucket.org/fathomteam/moab/commits/132fc09f6579/
Changeset: 132fc09f6579
Branch: None
User: iulian07
Date: 2014-05-07 05:02:08
Summary: more changes in jeff's fork
the biggest problem was the conversion to rad/degrees
the lat lon were already in radians, so no conversion necessary
Affected #: 6 files
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 40f7a59..5b5cd8f 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -73,7 +73,6 @@
io/GmshUtil.cpp
io/ReadABAQUS.cpp
io/ReadGmsh.cpp
- io/ReadGCRM.cpp
io/ReadIDEAS.cpp
io/ReadMCNP5.cpp
io/ReadNASTRAN.cpp
@@ -132,6 +131,7 @@
io/NCHelperFV.cpp
io/NCHelperHOMME.cpp
io/NCHelperMPAS.cpp
+ io/NCHelperGCRM.cpp
SpectralMeshTool.cpp
)
include_directories(
diff --git a/src/ReaderWriterSet.cpp b/src/ReaderWriterSet.cpp
index 1f254b0..47c4c2b 100644
--- a/src/ReaderWriterSet.cpp
+++ b/src/ReaderWriterSet.cpp
@@ -51,13 +51,11 @@
# include "WriteNC.hpp"
# include "WriteSLAC.hpp"
# include "ReadNC.hpp"
-# include "ReadGCRM.hpp"
#endif
// 2nd include of ReadNC in case we have pnetcdf and not netcdf
#ifdef PNETCDF_FILE
# include "ReadNC.hpp"
-# include "ReadGCRM.hpp"
#endif
#ifdef CGNS_FILE
@@ -105,7 +103,6 @@ ReaderWriterSet::ReaderWriterSet( Core* mdb, Error* handler )
#ifdef NETCDF_FILE
const char* exo_sufxs[] = { "exo", "exoII", "exo2", "g", "gen", NULL };
register_factory( ReadNCDF::factory, WriteNCDF::factory, "Exodus II", exo_sufxs, "EXODUS" );
- register_factory( ReadGCRM::factory, NULL, "GCRM NC", "nc", "GCRM" );
register_factory( ReadNC::factory, WriteNC::factory, "Climate NC", "nc", "NC" );
#endif
diff --git a/src/io/Makefile.am b/src/io/Makefile.am
index aeb5d6a..b8d2ff2 100644
--- a/src/io/Makefile.am
+++ b/src/io/Makefile.am
@@ -29,8 +29,7 @@ if NETCDF_FILE
NCWriteFV.cpp NCWriteFV.hpp \
NCWriteHOMME.cpp NCWriteHOMME.hpp \
NCWriteMPAS.cpp NCWriteMPAS.hpp \
- NCWriteGCRM.cpp NCWriteCGRM.hpp \
- ReadGCRM.cpp ReadGCRM.hpp
+ NCWriteGCRM.cpp NCWriteCGRM.hpp
else
MOAB_NETCDF_SRCS =
endif
@@ -48,8 +47,7 @@ if !NETCDF_FILE
NCWriteEuler.cpp NCWriteEuler.hpp \
NCWriteFV.cpp NCWriteFV.hpp \
NCWriteHOMME.cpp NCWriteHOMME.hpp \
- NCWriteMPAS.cpp NCWriteMPAS.hpp \
- ReadGCRM.cpp ReadGCRM.hpp
+ NCWriteMPAS.cpp NCWriteMPAS.hpp
endif
endif
diff --git a/src/io/NCHelperGCRM.cpp b/src/io/NCHelperGCRM.cpp
index e85f051..2fdb606 100644
--- a/src/io/NCHelperGCRM.cpp
+++ b/src/io/NCHelperGCRM.cpp
@@ -28,7 +28,7 @@ NCHelperGCRM::NCHelperGCRM(ReadNC* readNC, int fileId, const FileOptions& opts,
{
// Ignore variables containing topological information
ignoredVarNames.insert("grid");
- ignoredVarNames.insert("cell_corners");
+ // ignoredVarNames.insert("cell_corners"); this is actually needed
ignoredVarNames.insert("cell_edges");
ignoredVarNames.insert("edge_corners");
ignoredVarNames.insert("cell_neighbors");
@@ -1070,13 +1070,13 @@ ErrorCode NCHelperGCRM::create_local_vertices(const std::vector<int>& vertices_o
// Convert lon/lat/rad to x/y/z
double* zptr = arrays[2];
- const double pideg = acos(-1.0) / 180.0;
+ //const double pideg = acos(-1.0) / 180.0;
double rad = 8000.0 + levVals[0];
for (int i = 0; i < nLocalVertices; i++) {
- double cosphi = cos(pideg * yptr[i]);
- double zmult = sin(pideg * yptr[i]);
- double xmult = cosphi * cos(xptr[i] * pideg);
- double ymult = cosphi * sin(xptr[i] * pideg);
+ double cosphi = cos(yptr[i]);
+ double zmult = sin(yptr[i]);
+ double xmult = cosphi * cos(xptr[i]);
+ double ymult = cosphi * sin(xptr[i]);
xptr[i] = rad * xmult;
yptr[i] = rad * ymult;
zptr[i] = rad * zmult;
diff --git a/src/io/ReadGCRM.cpp b/src/io/ReadGCRM.cpp
deleted file mode 100644
index 0b86bd6..0000000
--- a/src/io/ReadGCRM.cpp
+++ /dev/null
@@ -1,693 +0,0 @@
-#include "ReadGCRM.hpp"
-
-#include <algorithm>
-#include <assert.h>
-#include <stdio.h>
-#include <string.h>
-#include <cmath>
-#include <cstdlib>
-#include <iostream>
-#include <sstream>
-#include <map>
-#include <dirent.h>
-
-#include "moab/Core.hpp"
-#include "moab/ReaderIface.hpp"
-#include "moab/ReadUtilIface.hpp"
-#include "MBTagConventions.hpp"
-#include "moab/FileOptions.hpp"
-
-#define ERRORR(rval, str) \
- if (MB_SUCCESS != rval) {readMeshIface->report_error("%s", str); return rval;}
-
-#define ERRORS(err, str) \
- if (err) {readMeshIface->report_error("%s", str); return MB_FAILURE;}
-
-namespace moab
-{
-
-ReaderIface* ReadGCRM::factory(Interface* iface) {
- return new ReadGCRM(iface);
-}
-
-ReadGCRM::ReadGCRM(Interface* impl) :
- CPU_WORD_SIZE(-1), IO_WORD_SIZE(-1), mbImpl(impl), fileId(-1), tMin(-1), tMax(-1), tDim(-1),
- numUnLim(-1), mCurrentMeshHandle(0), startVertex(0), startElem(0), mGlobalIdTag(0), max_line_length(-1),
- max_str_length(-1), vertexOffset(0), dbgOut(stderr), partMethod(-1), isParallel(false)
-
-#ifdef USE_MPI
-, myPcomm(NULL)
-#endif
-{
- assert(impl != NULL);
- impl->query_interface(readMeshIface);
-}
-
-ReadGCRM::~ReadGCRM()
-{
- if (readMeshIface) {
- mbImpl->release_interface(readMeshIface);
- readMeshIface = 0;
- }
-}
-
-void ReadGCRM::reset() {
- CPU_WORD_SIZE = -1;
- IO_WORD_SIZE = -1;
- fileId = -1;
- tMin = tMax = -1;
- numUnLim = -1;
- mCurrentMeshHandle = 0;
- startVertex = startElem = 0;
- mGlobalIdTag = 0;
- max_line_length = -1;
- max_str_length = -1;
- vertexOffset = 0;
- dbgOut = stderr;
- mCurrentMeshHandle = 0;
- vertexOffset = 0;
-
-#ifdef USE_MPI
- myPcomm = NULL;
-#endif
-}
-
-ErrorCode ReadGCRM::load_file( const char* /* file_name */,
- const EntityHandle* /* file_set*/,
- const FileOptions& /* opts */,
- const SubsetList* /* subset_list */,
- const Tag* /*file_id_tag*/)
-{
- // guarantee failure for now
- return MB_FAILURE;
-
- /* VSM: Temporarily commenting out all the relevant code until the above guaranteed failure is fixed */
- /*
- if (subset_list) {
- // see src/moab/ReaderIface.hpp, definition of SubsetList struct; this basically specifies
- // an integer tag and tag values for sets to read on this proc, or a part number and total # parts
- // for reading a trivial partition of entities
- }
-
- // save filename to member variable so we don't need to pass as an argument
- // to called functions
- fileName = file_name;
-
- // process options; see src/FileOptions.hpp for API for FileOptions class, and doc/metadata_info.doc for
- // a description of various options used by some of the readers in MOAB
- ErrorCode result = process_options(opts);
- if (MB_SUCCESS != result) {
- readMeshIface->report_error( "%s: problem reading options\n", fileName);
- return result;
- }
-
- // Open file; filePtr is member of ReadGCRM, change to whatever mechanism is used to identify file
- FILE* filePtr = fopen( fileName, "r" );
- if (!filePtr)
- {
- readMeshIface->report_error( "%s: fopen returned error.\n", fileName);
- return MB_FILE_DOES_NOT_EXIST;
- }
-
- // read number of verts, elements, sets
- long num_verts = 0, num_elems = 0, num_sets = 0;
-
- // read_ents keeps a running set of entities read from this file, including vertices, elements, and sets;
- // these will get added to file_set (if input) at the end of the read
- Range read_ents;
-
- // start_vertex is passed back so we know how to convert indices from the file into vertex handles; most
- // of the time this is done by adding start_vertex to the (0-based) index; if the index is 1-based, you also
- // need to subtract one; see read_elements for details
- EntityHandle start_vertex;
- result = read_vertices(num_verts, start_vertex, read_ents);
- if (MB_SUCCESS != result) return result;
-
- // create/read elements; this template assumes that all elements are the same type, so can be read in a single
- // call to read_elements, and kept track of with a single start_elem handle. If there are more entity types,
- // might have to keep these start handles in an array/vector. start_elem is only really needed if you're reading
- // sets later, and need to convert some file-based index to an entity handle
- EntityHandle start_elem;
- result = read_elements(num_elems, start_vertex, start_elem, read_ents);
- if (MB_SUCCESS != result) return result;
-
- // read/create entity sets; typically these sets have some tag identifying what they're for, see doc/metadata_info.doc
- // for examples of different kinds of sets and how they're marked
- result = create_sets(num_sets, start_vertex, num_verts, start_elem, num_elems, read_ents);
- if (MB_SUCCESS != result) return result;
-
- // finally, add all read_ents into the file set, if one was input
- if (file_set && *file_set) {
- result = mbImpl->add_entities(*file_set, read_ents);
- if (MB_SUCCESS != result) return result;
- }
-
- return result;
- */
-}
-
-ErrorCode ReadGCRM::read_vertices(int num_verts, EntityHandle &start_vertex, Range &read_ents)
-{
- // allocate nodes; these are allocated in one shot, get contiguous handles starting with start_handle,
- // and the reader is passed back double*'s pointing to MOAB's native storage for vertex coordinates
- // for those verts
- std::vector<double*> coord_arrays;
- ErrorCode result = readMeshIface->get_node_coords( 3, num_verts, 1, start_vertex, coord_arrays );
- if (MB_SUCCESS != result) {
- readMeshIface->report_error("%s: Trouble reading vertices\n", fileName);
- return result;
- }
-
- // fill in vertex coordinate arrays
- double *x = coord_arrays[0],
- *y = coord_arrays[1],
- *z = coord_arrays[2];
- for(long i = 0; i < num_verts; ++i) {
- // empty statement to avoid compiler warning
- if (x || y || z) {}
- // read x/y/z
- }
-
- if (num_verts) read_ents.insert(start_vertex, start_vertex + num_verts - 1);
-
- return result;
-}
-
-//! read/create elements
-ErrorCode ReadGCRM::read_elements(int num_elems, EntityHandle start_vertex,
- EntityHandle &start_elem, Range &read_ents)
-{
- // get the entity type being read
- EntityType ent_type = MBHEX;
-
- // get the number of vertices per entity
- int verts_per_elem = 8;
-
- // Create the element sequence; passes back a pointer to the internal storage for connectivity and the
- // starting entity handle
- EntityHandle* conn_array;
- ErrorCode result = readMeshIface->get_element_connect( num_elems, verts_per_elem, ent_type,
- 1, start_elem, conn_array );
- if (MB_SUCCESS != result) {
- readMeshIface->report_error("%s: Trouble reading elements\n", fileName);
- return result;
- }
-
- // read connectivity into conn_array directly
- for (long i = 0; i < num_elems; i++) {
- // read connectivity
- }
-
- // convert file-based connectivity indices to vertex handles in-place; be careful, if indices are smaller than handles,
- // need to do from the end of the list so we don't overwrite data
- //
- // here, we assume indices are smaller than handles, just for demonstration; create an integer-type pointer to connectivity
- // initialized to same start of connectivity array
- int *ind_array = reinterpret_cast<int*>(conn_array);
- // OFFSET is value of first vertex index in file; most files are 1-based, but some might be 0-based
- int OFFSET = 1;
- for (long i = num_elems*verts_per_elem-1; i >= 0; i--) {
- conn_array[i] = ind_array[i] + start_vertex + OFFSET;
-
- // this assert assumes last handle in read_ents is highest vertex handle in this file
- assert(conn_array[i] >= start_vertex && conn_array[i] <= *read_ents.rbegin());
- }
-
- // notify MOAB of the new elements
- result = readMeshIface->update_adjacencies(start_elem, num_elems, verts_per_elem, conn_array);
- if (MB_SUCCESS != result) return result;
-
- // add elements to read_ents
- if (num_elems) read_ents.insert(start_elem, start_elem+num_elems-1);
-
- return MB_SUCCESS;
-}
-
-//! read/create sets
- ErrorCode ReadGCRM::create_sets(int num_sets, EntityHandle /*start_vertex*/, int /*num_verts*/,
- EntityHandle /*start_elem*/, int /*num_elems*/, Range &read_ents)
-{
- ErrorCode result = MB_SUCCESS;
- EntityHandle this_set;
-
- for (int i = 0; i < num_sets; i++) {
- // create set
- result = mbImpl->create_meshset(MESHSET_SET, this_set);
- if (MB_SUCCESS != result) {
- readMeshIface->report_error("%s: Trouble creating set.\n", fileName);
- return result;
- }
-
- Range set_ents;
- // read/compute what's in this set; REMEMBER TO CONVERT THESE TO MOAB HANDLES
-
- // add them to the set
- result = mbImpl->add_entities(this_set, set_ents);
- if (MB_SUCCESS != result) {
- readMeshIface->report_error("%s: Trouble putting entities in set.\n", fileName);
- return result;
- }
-
- // add the new set to read_ents
- read_ents.insert(this_set);
- }
-
- return MB_SUCCESS;
-}
-
-ErrorCode ReadGCRM::process_options(const FileOptions &opts)
-{
- // mark all options seen, to avoid compile warning on unused variable
- opts.mark_all_seen();
- return MB_SUCCESS;
-}
-
-ErrorCode ReadGCRM::read_header() {
- CPU_WORD_SIZE = sizeof(double);
- IO_WORD_SIZE = sizeof(double);
-
- dbgOut.tprint(1, "Reading header...\n");
-
- // get the global attributes
- int numgatts;
- int success;
- success = NCFUNC(inq_natts )(fileId, &numgatts);
- ERRORS(success, "Couldn't get number of global attributes.");
-
- // read attributes into globalAtts
- ErrorCode result = get_attributes(NC_GLOBAL, numgatts, globalAtts);
- ERRORR(result, "Getting attributes.");
- dbgOut.tprintf(1, "Read %u attributes\n", (unsigned int) globalAtts.size());
-
- // read in dimensions into dimVals
- result = get_dimensions(fileId, dimNames, dimVals);
- ERRORR(result, "Getting dimensions.");
- dbgOut.tprintf(1, "Read %u dimensions\n", (unsigned int) dimVals.size());
-
- // read in variables into varInfo
- result = get_variables();
- ERRORR(result, "Getting variables.");
- dbgOut.tprintf(1, "Read %u variables\n", (unsigned int) varInfo.size());
-
- return MB_SUCCESS;
-}
-
-ErrorCode ReadGCRM::get_attributes(int var_id, int num_atts, std::map<std::string, AttData> &atts, const char *prefix) {
-
- char dum_name[120];
-
- for (int i = 0; i < num_atts; i++) {
- // get the name
- int success = NCFUNC(inq_attname)(fileId, var_id, i, dum_name);
- ERRORS(success, "Trouble getting attribute name.");
-
- AttData &data = atts[std::string(dum_name)];
- data.attName = std::string(dum_name);
- success = NCFUNC(inq_att)(fileId, var_id, dum_name, &data.attDataType, &data.attLen);
- ERRORS(success, "Trouble getting attribute info.");
- data.attVarId = var_id;
-
- dbgOut.tprintf(2, "%sAttribute %s: length=%u, varId=%d, type=%d\n", (prefix ? prefix : ""), data.attName.c_str(),
- (unsigned int) data.attLen, data.attVarId, data.attDataType);
- }
-
- return MB_SUCCESS;
-}
-
-ErrorCode ReadGCRM::get_dimensions(int file_id, std::vector<std::string> &dim_names, std::vector<int> &dim_vals) {
- // get the number of dimensions
- int num_dims;
- int success = NCFUNC(inq_ndims)(file_id, &num_dims);
- ERRORS(success, "Trouble getting number of dimensions.");
-
- if (num_dims > NC_MAX_DIMS) {
- readMeshIface->report_error("ReadGCRM: File contains %d dims but NetCDF library supports only %d\n", num_dims, (int) NC_MAX_DIMS);
- return MB_FAILURE;
- }
-
- char dim_name[NC_MAX_NAME + 1];
- NCDF_SIZE dum_len;
- dim_names.resize(num_dims);
- dim_vals.resize(num_dims);
-
- for (int i = 0; i < num_dims; i++) {
- success = NCFUNC(inq_dim)(file_id, i, dim_name, &dum_len);
- ERRORS(success, "Trouble getting dimension info.");
-
- dim_vals[i] = dum_len;
- dim_names[i] = std::string(dim_name);
-
- dbgOut.tprintf(2, "Dimension %s, length=%u\n", dim_name, (unsigned int) dum_len);
- }
-
- return MB_SUCCESS;
-}
-
-ErrorCode ReadGCRM::get_variables() {
- // first cache the number of time steps
- std::vector<std::string>::iterator vit = std::find(dimNames.begin(), dimNames.end(), "time");
- if (vit == dimNames.end())
- vit = std::find(dimNames.begin(), dimNames.end(), "t");
-
- int ntimes = 0;
- if (vit != dimNames.end())
- ntimes = dimVals[vit - dimNames.begin()];
- if (!ntimes)
- ntimes = 1;
-
- // get the number of variables
- int num_vars;
- int success = NCFUNC(inq_nvars)(fileId, &num_vars);
- ERRORS(success, "Trouble getting number of variables.");
-
- if (num_vars > NC_MAX_VARS) {
- readMeshIface->report_error("ReadGCRM: File contains %d vars but NetCDF library supports only %d\n", num_vars, (int) NC_MAX_VARS);
- return MB_FAILURE;
- }
-
- char var_name[NC_MAX_NAME + 1];
- int var_ndims;
-
- for (int i = 0; i < num_vars; i++) {
- // get the name first, so we can allocate a map iterate for this var
- success = NCFUNC(inq_varname )(fileId, i, var_name);
- ERRORS(success, "Trouble getting var name.");
- VarData &data = varInfo[std::string(var_name)];
- data.varName = std::string(var_name);
- data.varId = i;
- data.varTags.resize(ntimes, 0);
-
- // get the data type
- success = NCFUNC(inq_vartype)(fileId, i, &data.varDataType);
- ERRORS(success, "Trouble getting variable data type.");
-
- // get the number of dimensions, then the dimensions
- success = NCFUNC(inq_varndims)(fileId, i, &var_ndims);
- ERRORS(success, "Trouble getting number of dims of a variable.");
- data.varDims.resize(var_ndims);
-
- success = NCFUNC(inq_vardimid)(fileId, i, &data.varDims[0]);
- ERRORS(success, "Trouble getting variable dimensions.");
-
- // finally, get the number of attributes, then the attributes
- success = NCFUNC(inq_varnatts)(fileId, i, &data.numAtts);
- ERRORS(success, "Trouble getting number of dims of a variable.");
-
- // print debug info here so attribute info comes afterwards
- dbgOut.tprintf(2, "Variable %s: Id=%d, numAtts=%d, datatype=%d, num_dims=%u\n", data.varName.c_str(), data.varId, data.numAtts,
- data.varDataType, (unsigned int) data.varDims.size());
-
- ErrorCode rval = get_attributes(i, data.numAtts, data.varAtts, " ");
- ERRORR(rval, "Trouble getting attributes for a variable.");
-
- }
-
- return MB_SUCCESS;
-}
-
-ErrorCode ReadGCRM::read_tag_values(const char*, const char*, const FileOptions&, std::vector<int>&, const SubsetList*) {
- return MB_FAILURE;
-}
-
-ErrorCode ReadGCRM::create_tags(ScdInterface *scdi, EntityHandle file_set, const std::vector<int>& tstep_nums) {
- ErrorCode rval;
- std::string tag_name;
-
- // <__NUM_DIMS>
- Tag numDimsTag = 0;
- tag_name = "__NUM_DIMS";
- int numDims = dimNames.size();
- rval = mbImpl->tag_get_handle(tag_name.c_str(), 1, MB_TYPE_INTEGER, numDimsTag, MB_TAG_SPARSE | MB_TAG_CREAT);
- ERRORR(rval, "Trouble creating __NUM_DIMS tag.");
- rval = mbImpl->tag_set_data(numDimsTag, &file_set, 1, &numDims);
- ERRORR(rval, "Trouble setting data for __NUM_DIMS tag.");
- if (MB_SUCCESS == rval)
- dbgOut.tprintf(2, "Tag created for variable %s\n", tag_name.c_str());
-
- // <__NUM_VARS>
- Tag numVarsTag = 0;
- tag_name = "__NUM_VARS";
- int numVars = varInfo.size();
- rval = mbImpl->tag_get_handle(tag_name.c_str(), 1, MB_TYPE_INTEGER, numVarsTag, MB_TAG_SPARSE | MB_TAG_CREAT);
- ERRORR(rval, "Trouble creating __NUM_VARS tag.");
- rval = mbImpl->tag_set_data(numVarsTag, &file_set, 1, &numVars);
- ERRORR(rval, "Trouble setting data for __NUM_VARS tag.");
- if (MB_SUCCESS == rval)
- dbgOut.tprintf(2, "Tag created for variable %s\n", tag_name.c_str());
-
- // <__DIM_NAMES>
- Tag dimNamesTag = 0;
- tag_name = "__DIM_NAMES";
- std::string dimnames;
- unsigned int dimNamesSz = dimNames.size();
- for (unsigned int i = 0; i != dimNamesSz; ++i) {
- dimnames.append(dimNames[i]);
- dimnames.push_back('\0');
- }
- int dimnamesSz = dimnames.size();
- rval = mbImpl->tag_get_handle(tag_name.c_str(), 0, MB_TYPE_OPAQUE, dimNamesTag, MB_TAG_CREAT | MB_TAG_SPARSE | MB_TAG_VARLEN);
- ERRORR(rval, "Trouble creating __DIM_NAMES tag.");
- const void* ptr = dimnames.c_str();
- rval = mbImpl->tag_set_by_ptr(dimNamesTag, &file_set, 1, &ptr, &dimnamesSz);
- ERRORR(rval, "Trouble setting data for __DIM_NAMES tag.");
- if (MB_SUCCESS == rval)
- dbgOut.tprintf(2, "Tag created for variable %s\n", tag_name.c_str());
-
- // <__VAR_NAMES>
- Tag varNamesTag = 0;
- tag_name = "__VAR_NAMES";
- std::string varnames;
- std::map<std::string, VarData>::iterator mapIter;
- for (mapIter = varInfo.begin(); mapIter != varInfo.end(); ++mapIter) {
- varnames.append(mapIter->first);
- varnames.push_back('\0');
- }
- int varnamesSz = varnames.size();
- rval = mbImpl->tag_get_handle(tag_name.c_str(), 0, MB_TYPE_OPAQUE, varNamesTag, MB_TAG_CREAT | MB_TAG_SPARSE | MB_TAG_VARLEN);
- ERRORR(rval, "Trouble creating __VAR_NAMES tag.");
- ptr = varnames.c_str();
- rval = mbImpl->tag_set_by_ptr(varNamesTag, &file_set, 1, &ptr, &varnamesSz);
- ERRORR(rval, "Trouble setting data for __VAR_NAMES tag.");
- if (MB_SUCCESS == rval)
- dbgOut.tprintf(2, "Tag created for variable %s\n", tag_name.c_str());
-
- // __<dim_name>_LOC_MINMAX
- for (unsigned int i = 0; i != dimNamesSz; ++i) {
- if (dimNames[i] == "time") {
- std::stringstream ss_tag_name;
- ss_tag_name << "__" << dimNames[i] << "_LOC_MINMAX";
- tag_name = ss_tag_name.str();
- Tag tagh = 0;
- std::vector<int> val(2, 0);
- val[0] = tMin;
- val[1] = tMax;
- rval = mbImpl->tag_get_handle(tag_name.c_str(), 2, MB_TYPE_INTEGER, tagh, MB_TAG_SPARSE | MB_TAG_CREAT);
- ERRORR(rval, "Trouble creating __<dim_name>_LOC_MINMAX tag.");
- rval = mbImpl->tag_set_data(tagh, &file_set, 1, &val[0]);
- ERRORR(rval, "Trouble setting data for __<dim_name>_LOC_MINMAX tag.");
- if (MB_SUCCESS == rval)
- dbgOut.tprintf(2, "Tag created for variable %s\n", tag_name.c_str());
- }
- }
-
- // __<dim_name>_LOC_VALS
- for (unsigned int i = 0; i != dimNamesSz; ++i) {
- if (dimNames[i] != "time")
- continue;
- std::vector<int> val;
- if (!tstep_nums.empty())
- val = tstep_nums;
- else {
- val.resize(tVals.size());
- for (unsigned int j = 0; j != tVals.size(); ++j)
- val[j] = j;
- }
- Tag tagh = 0;
- std::stringstream ss_tag_name;
- ss_tag_name << "__" << dimNames[i] << "_LOC_VALS";
- tag_name = ss_tag_name.str();
- rval = mbImpl->tag_get_handle(tag_name.c_str(), val.size(), MB_TYPE_INTEGER, tagh, MB_TAG_SPARSE | MB_TAG_CREAT);
- ERRORR(rval, "Trouble creating __<dim_name>_LOC_VALS tag.");
- rval = mbImpl->tag_set_data(tagh, &file_set, 1, &val[0]);
- ERRORR(rval, "Trouble setting data for __<dim_name>_LOC_VALS tag.");
- if (MB_SUCCESS == rval)
- dbgOut.tprintf(2, "Tag created for variable %s\n", tag_name.c_str());
- }
-
- // __<var_name>_DIMS
- for (mapIter = varInfo.begin(); mapIter != varInfo.end(); ++mapIter) {
- Tag varNamesDimsTag = 0;
- std::stringstream ss_tag_name;
- ss_tag_name << "__" << mapIter->first << "_DIMS";
- tag_name = ss_tag_name.str();
- unsigned int varDimSz = varInfo[mapIter->first].varDims.size();
- if (varDimSz == 0)
- continue;
- varInfo[mapIter->first].varTags.resize(varDimSz, 0);
- for (unsigned int i = 0; i != varDimSz; ++i) {
- Tag tmptag = 0;
- std::string tmptagname = dimNames[varInfo[mapIter->first].varDims[i]];
- mbImpl->tag_get_handle(tmptagname.c_str(), 0, MB_TYPE_OPAQUE, tmptag, MB_TAG_ANY);
- varInfo[mapIter->first].varTags[i] = tmptag;
- }
- rval = mbImpl->tag_get_handle(tag_name.c_str(), varDimSz, MB_TYPE_HANDLE, varNamesDimsTag, MB_TAG_SPARSE | MB_TAG_CREAT);
- ERRORR(rval, "Trouble creating __<var_name>_DIMS tag.");
- rval = mbImpl->tag_set_data(varNamesDimsTag, &file_set, 1, &(varInfo[mapIter->first].varTags[0]));
- ERRORR(rval, "Trouble setting data for __<var_name>_DIMS tag.");
- if (MB_SUCCESS == rval)
- dbgOut.tprintf(2, "Tag created for variable %s\n", tag_name.c_str());
- }
-
- // <PARTITION_METHOD>
- Tag part_tag = scdi->part_method_tag();
- if (!part_tag)
- ERRORR(MB_FAILURE, "Trouble getting partition method tag.");
- rval = mbImpl->tag_set_data(part_tag, &file_set, 1, &partMethod);
- ERRORR(rval, "Trouble setting data for PARTITION_METHOD tag.");
- if (MB_SUCCESS == rval)
- dbgOut.tprintf(2, "Tag created for variable %s\n", tag_name.c_str());
-
- // <__GLOBAL_ATTRIBS>
- tag_name = "__GLOBAL_ATTRIBS";
- Tag globalAttTag = 0;
- rval = mbImpl->tag_get_handle(tag_name.c_str(), 0, MB_TYPE_OPAQUE, globalAttTag, MB_TAG_CREAT | MB_TAG_SPARSE | MB_TAG_VARLEN);
- ERRORR(rval, "Trouble creating __GLOBAL_ATTRIBS tag.");
- std::string gattVal;
- std::vector<int> gattLen;
- rval = create_attrib_string(globalAtts, gattVal, gattLen);
- ERRORR(rval, "Trouble creating attribute strings.");
- const void* gattptr = gattVal.c_str();
- int globalAttSz = gattVal.size();
- rval = mbImpl->tag_set_by_ptr(globalAttTag, &file_set, 1, &gattptr, &globalAttSz);
- ERRORR(rval, "Trouble setting data for __GLOBAL_ATTRIBS tag.");
- if (MB_SUCCESS == rval)
- dbgOut.tprintf(2, "Tag created for variable %s\n", tag_name.c_str());
-
- // <__GLOBAL_ATTRIBS_LEN>
- tag_name = "__GLOBAL_ATTRIBS_LEN";
- Tag globalAttLenTag = 0;
- if (gattLen.size() == 0)
- gattLen.push_back(0);
- rval = mbImpl->tag_get_handle(tag_name.c_str(), gattLen.size(), MB_TYPE_INTEGER, globalAttLenTag, MB_TAG_SPARSE | MB_TAG_CREAT);
- ERRORR(rval, "Trouble creating __GLOBAL_ATTRIBS_LEN tag.");
- rval = mbImpl->tag_set_data(globalAttLenTag, &file_set, 1, &gattLen[0]);
- ERRORR(rval, "Trouble setting data for __GLOBAL_ATTRIBS_LEN tag.");
- if (MB_SUCCESS == rval)
- dbgOut.tprintf(2, "Tag created for variable %s\n", tag_name.c_str());
-
- // __<var_name>_ATTRIBS and __<var_name>_ATTRIBS_LEN
- for (mapIter = varInfo.begin(); mapIter != varInfo.end(); ++mapIter) {
- std::stringstream ssTagName;
- ssTagName << "__" << mapIter->first << "_ATTRIBS";
- tag_name = ssTagName.str();
- Tag varAttTag = 0;
- rval = mbImpl->tag_get_handle(tag_name.c_str(), 0, MB_TYPE_OPAQUE, varAttTag, MB_TAG_CREAT | MB_TAG_SPARSE | MB_TAG_VARLEN);
- ERRORR(rval, "Trouble creating __<var_name>_ATTRIBS tag.");
- std::string varAttVal;
- std::vector<int> varAttLen;
- rval = create_attrib_string(mapIter->second.varAtts, varAttVal, varAttLen);
- ERRORR(rval, "Trouble creating attribute strings.");
- const void* varAttPtr = varAttVal.c_str();
- int varAttSz = varAttVal.size();
- rval = mbImpl->tag_set_by_ptr(varAttTag, &file_set, 1, &varAttPtr, &varAttSz);
- ERRORR(rval, "Trouble setting data for __<var_name>_ATTRIBS tag.");
- if (MB_SUCCESS == rval)
- dbgOut.tprintf(2, "Tag created for variable %s\n", tag_name.c_str());
- if (varAttLen.size() == 0)
- varAttLen.push_back(0);
- ssTagName << "_LEN";
- tag_name = ssTagName.str();
- Tag varAttLenTag = 0;
- rval = mbImpl->tag_get_handle(tag_name.c_str(), varAttLen.size(), MB_TYPE_INTEGER, varAttLenTag, MB_TAG_SPARSE | MB_TAG_CREAT);
- ERRORR(rval, "Trouble creating __<var_name>_ATTRIBS_LEN tag.");
- rval = mbImpl->tag_set_data(varAttLenTag, &file_set, 1, &varAttLen[0]);
- ERRORR(rval, "Trouble setting data for __<var_name>_ATTRIBS_LEN tag.");
- if (MB_SUCCESS == rval)
- dbgOut.tprintf(2, "Tag created for variable %s\n", tag_name.c_str());
- }
-
- // <__VAR_NAMES_LOCATIONS>
- tag_name = "__VAR_NAMES_LOCATIONS";
- Tag varNamesLocsTag = 0;
- std::vector<int> varNamesLocs(varInfo.size());
- rval = mbImpl->tag_get_handle(tag_name.c_str(), varNamesLocs.size(), MB_TYPE_INTEGER, varNamesLocsTag, MB_TAG_CREAT
- | MB_TAG_SPARSE);
- ERRORR(rval, "Trouble creating __VAR_NAMES_LOCATIONS tag.");
- for (mapIter = varInfo.begin(); mapIter != varInfo.end(); ++mapIter) {
- varNamesLocs[std::distance(varInfo.begin(), mapIter)] = mapIter->second.entLoc;
- }
- rval = mbImpl->tag_set_data(varNamesLocsTag, &file_set, 1, &varNamesLocs[0]);
- ERRORR(rval, "Trouble setting data for __VAR_NAMES_LOCATIONS tag.");
- if (MB_SUCCESS == rval)
- dbgOut.tprintf(2, "Tag created for variable %s\n", tag_name.c_str());
-
- return MB_SUCCESS;
-}
-
-ErrorCode ReadGCRM::create_attrib_string(const std::map<std::string, AttData>& attMap, std::string& attVal, std::vector<int>& attLen) {
- int success;
- std::stringstream ssAtt;
- unsigned int sz = 0;
- std::map<std::string, AttData>::const_iterator attIt = attMap.begin();
- for (; attIt != attMap.end(); ++attIt) {
- ssAtt << attIt->second.attName;
- ssAtt << '\0';
- void* attData = NULL;
- switch (attIt->second.attDataType) {
- case NC_BYTE:
- case NC_CHAR:
- sz = attIt->second.attLen;
- attData = (char *) malloc(sz);
- success = NCFUNC(get_att_text)(fileId, attIt->second.attVarId, attIt->second.attName.c_str(), (char*) attData);
- ERRORS(success, "Failed to read attribute char data.")
- ;
- ssAtt << "char;";
- break;
- case NC_DOUBLE:
- sz = attIt->second.attLen * sizeof(double);
- attData = (double *) malloc(sz);
- success = NCFUNC(get_att_double)(fileId, attIt->second.attVarId, attIt->second.attName.c_str(), (double*) attData);
- ERRORS(success, "Failed to read attribute double data.")
- ;
- ssAtt << "double;";
- break;
- case NC_FLOAT:
- sz = attIt->second.attLen * sizeof(float);
- attData = (float *) malloc(sz);
- success = NCFUNC(get_att_float)(fileId, attIt->second.attVarId, attIt->second.attName.c_str(), (float*) attData);
- ERRORS(success, "Failed to read attribute float data.")
- ;
- ssAtt << "float;";
- break;
- case NC_INT:
- sz = attIt->second.attLen * sizeof(int);
- attData = (int *) malloc(sz);
- success = NCFUNC(get_att_int)(fileId, attIt->second.attVarId, attIt->second.attName.c_str(), (int*) attData);
- ERRORS(success, "Failed to read attribute int data.")
- ;
- ssAtt << "int;";
- break;
- case NC_SHORT:
- sz = attIt->second.attLen * sizeof(short);
- attData = (short *) malloc(sz);
- success = NCFUNC(get_att_short)(fileId, attIt->second.attVarId, attIt->second.attName.c_str(), (short*) attData);
- ERRORS(success, "Failed to read attribute short data.")
- ;
- ssAtt << "short;";
- break;
- default:
- success = 1;
- }
- char* tmpc = (char *) attData;
- for (unsigned int counter = 0; counter != sz; ++counter)
- ssAtt << tmpc[counter];
- free(attData);
- ssAtt << ';';
- attLen.push_back(ssAtt.str().size() - 1);
- }
- attVal = ssAtt.str();
-
- return MB_SUCCESS;
-}
-
-} // namespace moab
diff --git a/src/io/ReadGCRM.hpp b/src/io/ReadGCRM.hpp
deleted file mode 100644
index d8a1fb1..0000000
--- a/src/io/ReadGCRM.hpp
+++ /dev/null
@@ -1,268 +0,0 @@
-#ifndef READGCRM_HPP
-#define READGCRM_HPP
-
-#ifndef IS_BUILDING_MB
-//#error "ReadNC.hpp isn't supposed to be included into an application"
-#endif
-
-#include <vector>
-#include <map>
-#include <string>
-
-#include "moab/Forward.hpp"
-#include "moab/ReaderIface.hpp"
-#include "moab/Range.hpp"
-#include "moab/ScdInterface.hpp"
-#include "DebugOutput.hpp"
-
-#ifdef USE_MPI
-# include "moab_mpi.h"
-# include "moab/ParallelComm.hpp"
-#endif
-
-#ifdef PNETCDF_FILE
-# include "pnetcdf.h"
-# define NCFUNC(func) ncmpi_ ## func
-# define NCFUNCA(func) ncmpi_ ## func ## _all
-// keep it this way , introduce another macro, used so far only for ucd mesh
-//# define NCASYNCH
-# ifdef NCASYNCH
-# define NCREQ , &requests[j]
-# define NCFUNCAG(func) ncmpi_iget ## func
-# define NCWAIT
-# else
-# define NCREQ2 , &requests[idxReq++]
-# define NCFUNCAG2(func) ncmpi_iget ## func
-# define NCREQ
-# define NCFUNCAG(func) ncmpi_get ## func ## _all
-# endif
-# define NCDF_SIZE MPI_Offset
-# define NCDF_DIFF MPI_Offset
-#else
-# include "netcdf.h"
-#define NCREQ
-#define NCGET get
-# define NCFUNC(func) nc_ ## func
-# define NCFUNCA(func) nc_ ## func
-# define NCFUNCAG(func) nc_get ## func
-# define NCDF_SIZE size_t
-# define NCDF_DIFF ptrdiff_t
-#endif
-
-namespace moab {
-
-class ReadUtilIface;
-
-/**
- * \brief GCRM for implementing new file readers in MOAB
- * This class is a GCRM for writing new file readers in MOAB. This shows how to efficiently create
- * vertices and elements using the ReadUtilIface class, and to translate indices in connectivity lists
- * into vertex handles created during the read.
- *
- * After writing the new reader class, you should also modify src/ReaderWriterSet.cpp, to register the
- * new reader along with the file extensions that it reads. This will turn on automatic creating of this
- * reader based on file extension, which is done in Core::serial_load_file.
- */
-class ReadGCRM : public ReaderIface
-{
-
-public:
-
- //! factory method
- static ReaderIface* factory( Interface* );
-
- virtual ErrorCode load_file( const char* file_name,
- const EntityHandle* file_set,
- const FileOptions& opts,
- const SubsetList* subset_list = 0,
- const Tag* file_id_tag = 0 );
-
- virtual ErrorCode read_tag_values( const char* file_name,
- const char* tag_name,
- const FileOptions& opts,
- std::vector<int>& tag_values_out,
- const SubsetList* subset_list = 0 );
-
- //! Constructor
- ReadGCRM(Interface* impl = NULL);
-
- //! Destructor
- virtual ~ReadGCRM();
-
- // ENTLOCNSEDGE for north/south edge
- // ENTLOCWEEDGE for west/east edge
- enum EntityLocation {ENTLOCNODE=0, ENTLOCNSEDGE, ENTLOCEWEDGE, ENTLOCQUAD, ENTLOCSET};
-
-private:
-
- /** \brief Read vertex data and create vertices in MOAB database
- * \param num_verts Number of vertices to be read
- * \param start_vertex Starting vertex handle; used later to offset connectivity indices
- * \param read_ents Range storing all entities read from this file
- */
- ErrorCode read_vertices(int num_verts, EntityHandle &start_vertex, Range &read_ents);
-
- /** \brief Read element data and create elements in MOAB database
- * \param num_elems Number of elements to be read
- * \param start_vertex Starting vertex handle; used to offset connectivity indices
- * \param start_elem Starting element handle; may be used later to offset set entities
- * \param read_ents Range storing all entities read from this file
- */
- ErrorCode read_elements(int num_elems, EntityHandle start_vertex,
- EntityHandle &start_elem, Range &read_ents);
-
- /** \brief Read entity set data and create/populate sets in MOAB database
- * \param num_sets Number of sets to be read
- * \param start_vertex Starting vertex handle
- * \param num_verts Total number of vertices read from file
- * \param start_elem Starting element handle
- * \param num_elems Total number of elements read from file
- * \param read_ents Range storing all entities read from this file
- */
- ErrorCode create_sets(int num_sets, EntityHandle start_vertex, int num_verts,
- EntityHandle start_elem, int num_elems, Range &read_ents);
-
-
- /** \brief Process options passed into the reader
- * \param opts Options passed into this read
- */
- ErrorCode process_options(const FileOptions &opts);
-
- void reset();
-
- //! read the header information
- ErrorCode read_header();
-
- class AttData
- {
- public:
- AttData() : attId(-1), attLen(0), attVarId(-2) {}
- int attId;
- NCDF_SIZE attLen;
- int attVarId;
- nc_type attDataType;
- std::string attName;
- };
-
- class VarData
- {
- public:
- VarData() : varId(-1), numAtts(-1), read(false), entLoc(ENTLOCSET), numLev(1), sz(0), has_t(false) {}
- int varId;
- int numAtts;
- nc_type varDataType;
- std::vector<int> varDims; // the dimension indices making up this multi-dimensional variable
- std::map<std::string,AttData> varAtts;
- std::string varName;
- bool read;
- std::vector<Tag> varTags; // one tag for each timestep, varTags[t]
- std::vector<void*> varDatas;
- std::vector<std::vector<NCDF_SIZE> > readDims; // start value for this [t][dim]
- std::vector<std::vector<NCDF_SIZE> > readCounts; // number of data values for this [t][dim]
- int entLoc;
- int numLev;
- int sz;
- bool has_t;
- };
-
- //! get all global attributes in the file
- ErrorCode get_attributes(int var_id, int num_atts, std::map<std::string,AttData> &atts,
- const char *prefix="");
-
- //! get all dimensions in the file
- ErrorCode get_dimensions(int file_id, std::vector<std::string> &dim_names, std::vector<int> &dim_vals);
-
- //! get the variable names and other info defined for this file
- ErrorCode get_variables();
-
- //! number of dimensions in this nc file
- unsigned int number_dimensions();
-
- ErrorCode get_tag(VarData &var_data, int tstep_num, Tag &tagh, int num_lev);
-
- //! create a character string attString of attMap. with '\0'
- //! terminating each attribute name, ';' separating the data type
- //! and value, and ';' separating one name/data type/value from
- //! the next'. attLen stores the end postion for each name/data
- //! type/ value.
- ErrorCode create_attrib_string(const std::map<std::string, AttData>& attMap,
- std::string& attString,
- std::vector<int>& attLen);
-
- ErrorCode create_tags(ScdInterface *scdi, EntityHandle file_set, const std::vector<int>& tstep_nums);
-
- ReadUtilIface* readMeshIface;
-
- int CPU_WORD_SIZE;
- int IO_WORD_SIZE;
-
- //! interface instance
- Interface* mbImpl;
-
- const char *fileName;
-
- //! file numbers assigned by netcdf
- int fileId, connectId;
-
- //! dimensions
- std::vector<std::string> dimNames;
- std::vector<int> dimVals;
-
- //! global attribs
- std::map<std::string,AttData> globalAtts;
-
- //! variable info
- std::map<std::string,VarData> varInfo;
-
- int tMin, tMax;
-
- //! values for t
- std::vector<double> tVals;
-
- //! dimension numbers for i, j, t
- int tDim;
-
- //! number of the dimension of unlimited dimension, if any
- int numUnLim;
-
- //! Meshset Handle for the mesh that is currently being read
- EntityHandle mCurrentMeshHandle;
-
- //! starting vertex and element handles for this read
- EntityHandle startVertex, startElem;
-
- //! Cached tags for reading. Note that all these tags are defined when the
- //! core is initialized.
- Tag mGlobalIdTag;
-
- int max_line_length, max_str_length;
-
- //! range of entities in initial mesh, before this read
- Range initRange;
-
- //! offset of first vertex id
- int vertexOffset;
-
- //! debug stuff
- DebugOutput dbgOut;
-
- //! partitioning method
- int partMethod;
-
- //! are we reading in parallel?
- bool isParallel;
-
-#ifdef USE_MPI
- ParallelComm *myPcomm;
-#endif
-
-};
-
-inline unsigned int ReadGCRM::number_dimensions()
-{
- return dimVals.size();
-}
-
-} // namespace moab
-
-#endif
https://bitbucket.org/fathomteam/moab/commits/fad5de56dfee/
Changeset: fad5de56dfee
Branch: None
User: iulian07
Date: 2014-05-07 05:28:46
Summary: edge indexing error for GCRM
correct all edge indices, not only half
Affected #: 1 file
diff --git a/src/io/NCHelperGCRM.cpp b/src/io/NCHelperGCRM.cpp
index 2fdb606..b7774e6 100644
--- a/src/io/NCHelperGCRM.cpp
+++ b/src/io/NCHelperGCRM.cpp
@@ -1214,7 +1214,7 @@ ErrorCode NCHelperGCRM::create_local_edges(EntityHandle start_vertex)
#endif
// GCRM is 0 based, convert edge indices from 0 to 1 based
- for (int idx = 0; idx < nLocalEdges; idx++) {
+ for (int idx = 0; idx < nLocalEdges*2; idx++) {
vertices_on_local_edges[idx] += 1;
}
https://bitbucket.org/fathomteam/moab/commits/4a2930f18f90/
Changeset: 4a2930f18f90
Branch: None
User: iulian07
Date: 2014-05-07 14:44:54
Summary: add a parallel read test for gcrm
so far, only trivial partition test, no vars test
more needed
Affected #: 4 files
diff --git a/src/io/NCHelperGCRM.cpp b/src/io/NCHelperGCRM.cpp
index b7774e6..1ea1841 100644
--- a/src/io/NCHelperGCRM.cpp
+++ b/src/io/NCHelperGCRM.cpp
@@ -610,7 +610,8 @@ ErrorCode NCHelperGCRM::read_ucd_variable_to_nonset_async(std::vector<ReadNC::Va
ERRORR(MB_FAILURE, "not implemented");
break;
}
- case NC_DOUBLE: {
+ case NC_DOUBLE:
+ case NC_FLOAT: {
std::vector<double> tmpdoubledata(sz);
// In the case of ucd mesh, and on multiple proc,
@@ -673,10 +674,7 @@ ErrorCode NCHelperGCRM::read_ucd_variable_to_nonset_async(std::vector<ReadNC::Va
break;
}
- case NC_FLOAT: {
- ERRORR(MB_FAILURE, "not implemented");
- break;
- }
+
case NC_INT: {
ERRORR(MB_FAILURE, "not implemented");
break;
@@ -698,12 +696,12 @@ ErrorCode NCHelperGCRM::read_ucd_variable_to_nonset_async(std::vector<ReadNC::Va
if (noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE)
continue;
- for (unsigned int t = 0; t < tstep_nums.size(); t++) {
+ /*for (unsigned int t = 0; t < tstep_nums.size(); t++) {
dbgOut.tprintf(2, "Converting variable %s, time step %d\n", vdatas[i].varName.c_str(), tstep_nums[t]);
ErrorCode tmp_rval = convert_variable(vdatas[i], t);
if (MB_SUCCESS != tmp_rval)
rval = tmp_rval;
- }
+ }*/
}
// Debug output, if requested
if (1 == dbgOut.get_verbosity()) {
@@ -761,7 +759,8 @@ ErrorCode NCHelperGCRM::read_ucd_variable_to_nonset(std::vector<ReadNC::VarData>
ERRORR(MB_FAILURE, "not implemented");
break;
}
- case NC_DOUBLE: {
+ case NC_DOUBLE:
+ case NC_FLOAT: {
std::vector<double> tmpdoubledata(sz);
// In the case of ucd mesh, and on multiple proc,
@@ -818,10 +817,6 @@ ErrorCode NCHelperGCRM::read_ucd_variable_to_nonset(std::vector<ReadNC::VarData>
break;
}
- case NC_FLOAT: {
- ERRORR(MB_FAILURE, "not implemented");
- break;
- }
case NC_INT: {
ERRORR(MB_FAILURE, "not implemented");
break;
@@ -843,12 +838,12 @@ ErrorCode NCHelperGCRM::read_ucd_variable_to_nonset(std::vector<ReadNC::VarData>
if (noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE)
continue;
- for (unsigned int t = 0; t < tstep_nums.size(); t++) {
+ /* for (unsigned int t = 0; t < tstep_nums.size(); t++) {
dbgOut.tprintf(2, "Converting variable %s, time step %d\n", vdatas[i].varName.c_str(), tstep_nums[t]);
ErrorCode tmp_rval = convert_variable(vdatas[i], t);
if (MB_SUCCESS != tmp_rval)
rval = tmp_rval;
- }
+ }*/
}
// Debug output, if requested
@@ -1296,6 +1291,20 @@ ErrorCode NCHelperGCRM::create_local_cells(const std::vector<int>& vertices_on_l
conn_arr_local_cells_with_n_edges[num_edges_per_cell][j * num_edges_per_cell + k] =
start_vertex + local_vert_idx;
}
+ // make sure that if some nodes are repeated, they are at the end of the connectivity array
+ // so, pentagons as hexagons should have a connectivity like 123455 and not 122345
+ EntityHandle *pvertex= &(conn_arr_local_cells_with_n_edges[num_edges_per_cell][j * num_edges_per_cell ]);
+ for (int k = 0; k < num_edges_per_cell-2; k++)
+ {
+ if( *(pvertex+k) == *(pvertex+k+1) )
+ {
+ // shift the connectivity
+ for (int kk=k+1; kk<num_edges_per_cell-2; kk++)
+ {
+ *(pvertex+kk)=*(pvertex+kk+1);
+ }
+ }
+ }
}
}
diff --git a/test/io/read_gcrm_nc.cpp b/test/io/read_gcrm_nc.cpp
index 831358d..d38ca2c 100644
--- a/test/io/read_gcrm_nc.cpp
+++ b/test/io/read_gcrm_nc.cpp
@@ -41,11 +41,11 @@ int main(int argc, char* argv[])
argv[0] = argv[argc - argc]; // To remove the warnings in serial mode about unused variables
#endif
- //result += RUN_TEST(test_read_all);
+ result += RUN_TEST(test_read_all);
//result += RUN_TEST(test_read_onevar);
//result += RUN_TEST(test_read_onetimestep);
//result += RUN_TEST(test_read_nomesh);
- result += RUN_TEST(test_read_novars);
+ //result += RUN_TEST(test_read_novars);
//result += RUN_TEST(test_read_no_mixed_elements);
//result += RUN_TEST(test_read_no_edges);
//result += RUN_TEST(test_gather_onevar);
@@ -66,11 +66,14 @@ void test_read_all()
std::string opts;
get_options(opts);
+ opts+=";DEBUG_IO=2";
// Read mesh and read all variables at all timesteps
- ErrorCode rval = mb.load_file(example, NULL, opts.c_str());
+ ErrorCode rval = mb.load_file(example, 0, opts.c_str());
CHECK_ERR(rval);
+ mb.write_file("gcrm.h5m");
+#if 0
int procs = 1;
#ifdef USE_MPI
ParallelComm* pcomm = ParallelComm::get_pcomm(&mb, 0);
@@ -164,6 +167,7 @@ void test_read_all()
CHECK_REAL_EQUAL(25.001, val[0], eps);
CHECK_REAL_EQUAL(26.013, val[1], eps);
}
+#endif
}
void test_read_onevar()
diff --git a/test/parallel/Makefile.am b/test/parallel/Makefile.am
index c0824c9..4c147ac 100644
--- a/test/parallel/Makefile.am
+++ b/test/parallel/Makefile.am
@@ -48,7 +48,7 @@ else
endif
if NETCDF_FILE
- NETCDF_TESTS = scdpart read_nc_par ucdtrvpart mpastrvpart
+ NETCDF_TESTS = scdpart read_nc_par ucdtrvpart mpastrvpart gcrm_par
else
NETCDF_TESTS =
endif
@@ -92,6 +92,7 @@ scdpart_SOURCES = scdpart.cpp
read_nc_par_SOURCES = ../io/read_nc.cpp
ucdtrvpart_SOURCES = ucdtrvpart.cpp
mpastrvpart_SOURCES = mpastrvpart.cpp
+gcrm_par_SOURCES = gcrm_par.cpp
write_nc_par_SOURCES = ../io/write_nc.cpp
par_spatial_locator_test_SOURCES = par_spatial_locator_test.cpp
diff --git a/test/parallel/gcrm_par.cpp b/test/parallel/gcrm_par.cpp
new file mode 100644
index 0000000..e32912f
--- /dev/null
+++ b/test/parallel/gcrm_par.cpp
@@ -0,0 +1,625 @@
+#include "TestUtil.hpp"
+#include "moab/Core.hpp"
+#include "moab/ParallelComm.hpp"
+#include "moab/ProgOptions.hpp"
+#include "MBParallelConventions.h"
+#include "moab/ReadUtilIface.hpp"
+#include "MBTagConventions.hpp"
+
+#include <sstream>
+
+using namespace moab;
+
+#ifdef MESHDIR
+static const char example[] = STRINGIFY(MESHDIR) "/io/gcrm_r3.nc";
+#endif
+
+void test_read_onevar_trivial();
+void test_read_onevar_trivial_no_mixed_elements();
+#if defined(USE_MPI) && defined(HAVE_ZOLTAN)
+void test_read_onevar_rcbzoltan();
+void test_read_onevar_rcbzoltan_no_mixed_elements();
+#endif
+
+void test_read_mesh_parallel_trivial();
+void test_read_mesh_parallel_trivial_no_mixed_elements();
+#if defined(USE_MPI) && defined(HAVE_ZOLTAN)
+void test_read_mesh_parallel_rcbzoltan();
+void test_read_mesh_parallel_rcbzoltan_no_mixed_elements();
+#endif
+
+void test_gather_onevar_on_rank0();
+void test_gather_onevar_on_rank1();
+
+void test_multiple_loads_of_same_file();
+void test_multiple_loads_of_same_file_no_mixed_elements();
+
+// Helper functions
+void read_one_cell_var(bool rcbzoltan, bool no_mixed_elements);
+void read_mesh_parallel(bool rcbzoltan, bool no_mixed_elements);
+void gather_one_cell_var(int gather_set_rank);
+void multiple_loads_of_same_file(bool no_mixed_elements);
+
+std::string read_options;
+const double eps = 1e-20;
+
+int main(int argc, char* argv[])
+{
+ MPI_Init(&argc, &argv);
+ int result = 0;
+
+ //result += RUN_TEST(test_read_onevar_trivial);
+ //result += RUN_TEST(test_read_onevar_trivial_no_mixed_elements);
+#if defined(USE_MPI) && defined(HAVE_ZOLTAN)
+ //result += RUN_TEST(test_read_onevar_rcbzoltan);
+ //result += RUN_TEST(test_read_onevar_rcbzoltan_no_mixed_elements);
+#endif
+
+ result += RUN_TEST(test_read_mesh_parallel_trivial);
+ //result += RUN_TEST(test_read_mesh_parallel_trivial_no_mixed_elements);
+#if defined(USE_MPI) && defined(HAVE_ZOLTAN)
+ //result += RUN_TEST(test_read_mesh_parallel_rcbzoltan);
+ //result += RUN_TEST(test_read_mesh_parallel_rcbzoltan_no_mixed_elements);
+#endif
+
+ //result += RUN_TEST(test_gather_onevar_on_rank0);
+ //result += RUN_TEST(test_gather_onevar_on_rank1);
+
+ //result += RUN_TEST(test_multiple_loads_of_same_file);
+ //result += RUN_TEST(test_multiple_loads_of_same_file_no_mixed_elements);
+
+ MPI_Finalize();
+ return result;
+}
+
+void test_read_onevar_trivial()
+{
+ read_one_cell_var(false, false);
+}
+
+void test_read_onevar_trivial_no_mixed_elements()
+{
+ read_one_cell_var(false, true);
+}
+
+void test_read_onevar_rcbzoltan()
+{
+ read_one_cell_var(true, false);
+}
+
+void test_read_onevar_rcbzoltan_no_mixed_elements()
+{
+ read_one_cell_var(true, true);
+}
+
+void test_read_mesh_parallel_trivial()
+{
+ read_mesh_parallel(false, false);
+}
+
+void test_read_mesh_parallel_trivial_no_mixed_elements()
+{
+ read_mesh_parallel(false, true);
+}
+
+void test_read_mesh_parallel_rcbzoltan()
+{
+ read_mesh_parallel(true, false);
+}
+
+void test_read_mesh_parallel_rcbzoltan_no_mixed_elements()
+{
+ read_mesh_parallel(true, true);
+}
+
+void test_gather_onevar_on_rank0()
+{
+ gather_one_cell_var(0);
+}
+
+void test_gather_onevar_on_rank1()
+{
+ gather_one_cell_var(1);
+}
+
+void test_multiple_loads_of_same_file()
+{
+ multiple_loads_of_same_file(false);
+}
+
+void test_multiple_loads_of_same_file_no_mixed_elements()
+{
+ multiple_loads_of_same_file(true);
+}
+
+// Helper functions
+void read_one_cell_var(bool rcbzoltan, bool no_mixed_elements)
+{
+ Core moab;
+ Interface& mb = moab;
+
+ read_options = "PARALLEL=READ_PART;PARTITION_METHOD=TRIVIAL;NO_EDGES;VARIABLE=ke";
+ if (rcbzoltan)
+ read_options = "PARALLEL=READ_PART;PARTITION_METHOD=RCBZOLTAN;NO_EDGES;VARIABLE=ke";
+
+ if (no_mixed_elements)
+ read_options += ";NO_MIXED_ELEMENTS";
+
+ ErrorCode rval = mb.load_file(example, NULL, read_options.c_str());
+ CHECK_ERR(rval);
+
+ // Get local edges
+ Range local_edges;
+ rval = mb.get_entities_by_type(0, MBEDGE, local_edges);
+ CHECK_ERR(rval);
+ CHECK_EQUAL((size_t)0, local_edges.size());
+
+ // Get local cells
+ Range local_cells;
+ rval = mb.get_entities_by_type(0, MBPOLYGON, local_cells);
+ CHECK_ERR(rval);
+
+ Tag gid_tag;
+ rval = mb.tag_get_handle(GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER, gid_tag, MB_TAG_DENSE);
+ CHECK_ERR(rval);
+
+ std::vector<int> gids(local_cells.size());
+ rval = mb.tag_get_data(gid_tag, local_cells, &gids[0]);
+ Range local_cell_gids;
+ std::copy(gids.rbegin(), gids.rend(), range_inserter(local_cell_gids));
+
+ ParallelComm* pcomm = ParallelComm::get_pcomm(&mb, 0);
+ int procs = pcomm->proc_config().proc_size();
+ int rank = pcomm->proc_config().proc_rank();
+
+ // Make check runs this test on two processors
+ if (2 == procs) {
+ CHECK_EQUAL((size_t)321, local_cells.size());
+ CHECK_EQUAL((size_t)321, local_cell_gids.size());
+
+ // Check tag for cell variable ke at timestep 0
+ Tag ke_tag0;
+ rval = mb.tag_get_handle("ke0", 1, MB_TYPE_DOUBLE, ke_tag0);
+ CHECK_ERR(rval);
+
+ // Check tag for cell variable ke at timestep 1
+ Tag ke_tag1;
+ rval = mb.tag_get_handle("ke1", 1, MB_TYPE_DOUBLE, ke_tag1);
+ CHECK_ERR(rval);
+
+ // Get ke0 and ke1 tag values on 3 local cells
+ EntityHandle cell_ents[] = {local_cells[0], local_cells[160], local_cells[320]};
+ double ke0_val[3];
+ rval = mb.tag_get_data(ke_tag0, cell_ents, 3, ke0_val);
+ CHECK_ERR(rval);
+ double ke1_val[3];
+ rval = mb.tag_get_data(ke_tag1, cell_ents, 3, ke1_val);
+ CHECK_ERR(rval);
+
+ if (rcbzoltan) {
+ if (no_mixed_elements)
+ CHECK_EQUAL((size_t)1, local_cells.psize());
+ else
+ CHECK_EQUAL((size_t)2, local_cells.psize());
+
+ CHECK_EQUAL((size_t)37, local_cell_gids.psize());
+
+ if (0 == rank) {
+ CHECK_EQUAL(1, (int)local_cell_gids[0]);
+ CHECK_EQUAL(284, (int)local_cell_gids[160]);
+ CHECK_EQUAL(630, (int)local_cell_gids[320]);
+
+ CHECK_REAL_EQUAL(15.001, ke0_val[0], eps);
+ CHECK_REAL_EQUAL(16.284, ke0_val[1], eps);
+ CHECK_REAL_EQUAL(16.630, ke0_val[2], eps);
+ CHECK_REAL_EQUAL(25.001, ke1_val[0], eps);
+ CHECK_REAL_EQUAL(26.284, ke1_val[1], eps);
+ CHECK_REAL_EQUAL(26.630, ke1_val[2], eps);
+ }
+ else if (1 == rank) {
+ CHECK_EQUAL(4, (int)local_cell_gids[0]);
+ CHECK_EQUAL(341, (int)local_cell_gids[160]);
+ CHECK_EQUAL(642, (int)local_cell_gids[320]);
+
+ CHECK_REAL_EQUAL(15.004, ke0_val[0], eps);
+ CHECK_REAL_EQUAL(16.341, ke0_val[1], eps);
+ CHECK_REAL_EQUAL(16.642, ke0_val[2], eps);
+ CHECK_REAL_EQUAL(25.004, ke1_val[0], eps);
+ CHECK_REAL_EQUAL(26.341, ke1_val[1], eps);
+ CHECK_REAL_EQUAL(26.642, ke1_val[2], eps);
+ }
+ }
+ else {
+ CHECK_EQUAL((size_t)1, local_cell_gids.psize());
+
+ if (0 == rank) {
+ if (no_mixed_elements)
+ CHECK_EQUAL((size_t)1, local_cells.psize());
+ else
+ CHECK_EQUAL((size_t)2, local_cells.psize());
+ CHECK_EQUAL(1, (int)local_cell_gids[0]);
+ CHECK_EQUAL(161, (int)local_cell_gids[160]);
+ CHECK_EQUAL(321, (int)local_cell_gids[320]);
+
+ CHECK_REAL_EQUAL(15.001, ke0_val[0], eps);
+ CHECK_REAL_EQUAL(16.161, ke0_val[1], eps);
+ CHECK_REAL_EQUAL(16.321, ke0_val[2], eps);
+ CHECK_REAL_EQUAL(25.001, ke1_val[0], eps);
+ CHECK_REAL_EQUAL(26.161, ke1_val[1], eps);
+ CHECK_REAL_EQUAL(26.321, ke1_val[2], eps);
+ }
+ else if (1 == rank) {
+ CHECK_EQUAL((size_t)1, local_cells.psize());
+ CHECK_EQUAL(322, (int)local_cell_gids[0]);
+ CHECK_EQUAL(482, (int)local_cell_gids[160]);
+ CHECK_EQUAL(642, (int)local_cell_gids[320]);
+
+ CHECK_REAL_EQUAL(16.322, ke0_val[0], eps);
+ CHECK_REAL_EQUAL(16.482, ke0_val[1], eps);
+ CHECK_REAL_EQUAL(16.642, ke0_val[2], eps);
+ CHECK_REAL_EQUAL(26.322, ke1_val[0], eps);
+ CHECK_REAL_EQUAL(26.482, ke1_val[1], eps);
+ CHECK_REAL_EQUAL(26.642, ke1_val[2], eps);
+ }
+ }
+ }
+}
+
+void read_mesh_parallel(bool rcbzoltan, bool no_mixed_elements)
+{
+ Core moab;
+ Interface& mb = moab;
+
+ read_options = "PARALLEL=READ_PART;PARTITION_METHOD=TRIVIAL;PARALLEL_RESOLVE_SHARED_ENTS;VARIABLE=";
+ if (rcbzoltan)
+ read_options = "PARALLEL=READ_PART;PARTITION_METHOD=RCBZOLTAN;PARALLEL_RESOLVE_SHARED_ENTS;VARIABLE=";
+
+ if (no_mixed_elements)
+ read_options += ";NO_MIXED_ELEMENTS";
+
+ ErrorCode rval = mb.load_file(example, NULL, read_options.c_str());
+ CHECK_ERR(rval);
+
+ ParallelComm* pcomm = ParallelComm::get_pcomm(&mb, 0);
+ int procs = pcomm->proc_config().proc_size();
+ int rank = pcomm->proc_config().proc_rank();
+
+ rval = pcomm->check_all_shared_handles();
+ CHECK_ERR(rval);
+
+ // Get local vertices
+ Range local_verts;
+ rval = mb.get_entities_by_type(0, MBVERTEX, local_verts);
+ CHECK_ERR(rval);
+
+ int verts_num = local_verts.size();
+#if 0
+ if (2 == procs) {
+ if (rcbzoltan) {
+ if (0 == rank)
+ CHECK_EQUAL(685, verts_num);
+ else if (1 == rank)
+ CHECK_EQUAL(685, verts_num); // Not owned vertices included
+ }
+ else {
+ if (0 == rank)
+ CHECK_EQUAL(1120, verts_num);
+ else if (1 == rank)
+ CHECK_EQUAL(1122, verts_num); // Not owned vertices included
+ }
+ }
+
+ rval = pcomm->filter_pstatus(local_verts, PSTATUS_NOT_OWNED, PSTATUS_NOT);
+ CHECK_ERR(rval);
+
+ verts_num = local_verts.size();
+ if (2 == procs) {
+ if (rcbzoltan) {
+ if (0 == rank)
+ CHECK_EQUAL(685, verts_num);
+ else if (1 == rank)
+ CHECK_EQUAL(595, verts_num); // Not owned vertices excluded
+ }
+ else {
+ if (0 == rank)
+ CHECK_EQUAL(1120, verts_num);
+ else if (1 == rank)
+ CHECK_EQUAL(160, verts_num); // Not owned vertices excluded
+ }
+ }
+
+ // Get local edges
+ Range local_edges;
+ rval = mb.get_entities_by_type(0, MBEDGE, local_edges);
+ CHECK_ERR(rval);
+
+ int edges_num = local_edges.size();
+ if (2 == procs) {
+ if (rcbzoltan) {
+ if (0 == rank)
+ CHECK_EQUAL(1005, edges_num);
+ else if (1 == rank)
+ CHECK_EQUAL(1005, edges_num); // Not owned edges included
+ }
+ else {
+ if (0 == rank)
+ CHECK_EQUAL(1438, edges_num);
+ else if (1 == rank)
+ CHECK_EQUAL(1444, edges_num); // Not owned edges included
+ }
+ }
+
+ rval = pcomm->filter_pstatus(local_edges, PSTATUS_NOT_OWNED, PSTATUS_NOT);
+ CHECK_ERR(rval);
+
+ edges_num = local_edges.size();
+ if (2 == procs) {
+ if (rcbzoltan) {
+ if (0 == rank)
+ CHECK_EQUAL(1005, edges_num);
+ else if (1 == rank)
+ CHECK_EQUAL(915, edges_num); // Not owned edges excluded
+ }
+ else {
+ if (0 == rank)
+ CHECK_EQUAL(1438, edges_num);
+ else if (1 == rank)
+ CHECK_EQUAL(482, edges_num); // Not owned edges excluded
+ }
+ }
+
+ // Get local cells
+ Range local_cells;
+ rval = mb.get_entities_by_type(0, MBPOLYGON, local_cells);
+ CHECK_ERR(rval);
+
+ int cells_num = local_cells.size();
+ if (2 == procs) {
+ CHECK_EQUAL(321, cells_num);
+
+ if (rcbzoltan) {
+ if (no_mixed_elements)
+ CHECK_EQUAL((size_t)1, local_cells.psize());
+ else
+ CHECK_EQUAL((size_t)2, local_cells.psize());
+ }
+ else {
+ if (0 == rank) {
+ if (no_mixed_elements)
+ CHECK_EQUAL((size_t)1, local_cells.psize());
+ else
+ CHECK_EQUAL((size_t)2, local_cells.psize());
+ }
+ else if (1 == rank)
+ CHECK_EQUAL((size_t)1, local_cells.psize());
+ }
+ }
+
+ rval = pcomm->filter_pstatus(local_cells, PSTATUS_NOT_OWNED, PSTATUS_NOT);
+ CHECK_ERR(rval);
+
+ cells_num = local_cells.size();
+ if (2 == procs) {
+ CHECK_EQUAL(321, cells_num);
+
+ if (rcbzoltan) {
+ if (no_mixed_elements)
+ CHECK_EQUAL((size_t)1, local_cells.psize());
+ else
+ CHECK_EQUAL((size_t)2, local_cells.psize());
+ }
+ else {
+ if (0 == rank) {
+ if (no_mixed_elements)
+ CHECK_EQUAL((size_t)1, local_cells.psize());
+ else
+ CHECK_EQUAL((size_t)2, local_cells.psize());
+ }
+ else if (1 == rank)
+ CHECK_EQUAL((size_t)1, local_cells.psize());
+ }
+ }
+
+ std::cout << "proc: " << rank << " verts:" << verts_num << "\n";
+
+ int total_verts_num;
+ MPI_Reduce(&verts_num, &total_verts_num, 1, MPI_INTEGER, MPI_SUM, 0, pcomm->proc_config().proc_comm());
+ if (0 == rank) {
+ std::cout << "total vertices: " << total_verts_num << "\n";
+ CHECK_EQUAL(1280, total_verts_num);
+ }
+
+ std::cout << "proc: " << rank << " edges:" << edges_num << "\n";
+
+ int total_edges_num;
+ MPI_Reduce(&edges_num, &total_edges_num, 1, MPI_INTEGER, MPI_SUM, 0, pcomm->proc_config().proc_comm());
+ if (0 == rank) {
+ std::cout << "total edges: " << total_edges_num << "\n";
+ CHECK_EQUAL(1920, total_edges_num);
+ }
+
+ std::cout << "proc: " << rank << " cells:" << cells_num << "\n";
+
+ int total_cells_num;
+ MPI_Reduce(&cells_num, &total_cells_num, 1, MPI_INTEGER, MPI_SUM, 0, pcomm->proc_config().proc_comm());
+ if (0 == rank) {
+ std::cout << "total cells: " << total_cells_num << "\n";
+ CHECK_EQUAL(642, total_cells_num);
+ }
+#endif
+#ifdef HDF5_PARALLEL
+ std::string write_options("PARALLEL=WRITE_PART;");
+
+ std::string output_file = "test_gcrm";
+ if (rcbzoltan)
+ output_file += "_rcbzoltan";
+ if (no_mixed_elements)
+ output_file += "_no_mixed_elements";
+ output_file += ".h5m";
+
+ mb.write_file(output_file.c_str(), NULL, write_options.c_str());
+#endif
+}
+
+void gather_one_cell_var(int gather_set_rank)
+{
+ Core moab;
+ Interface& mb = moab;
+
+ EntityHandle file_set;
+ ErrorCode rval = mb.create_meshset(MESHSET_SET, file_set);
+ CHECK_ERR(rval);
+
+ read_options = "PARALLEL=READ_PART;PARTITION_METHOD=TRIVIAL;PARALLEL_RESOLVE_SHARED_ENTS";
+ std::ostringstream gather_set_option;
+ gather_set_option << ";GATHER_SET=" << gather_set_rank;
+ read_options += gather_set_option.str();
+
+ rval = mb.load_file(example, &file_set, read_options.c_str());
+ CHECK_ERR(rval);
+
+ ParallelComm* pcomm = ParallelComm::get_pcomm(&mb, 0);
+ int procs = pcomm->proc_config().proc_size();
+ int rank = pcomm->proc_config().proc_rank();
+
+ // Make sure gather_set_rank is valid
+ if (gather_set_rank < 0 || gather_set_rank >= procs)
+ return;
+
+ Range cells, cells_owned;
+ rval = mb.get_entities_by_type(file_set, MBPOLYGON, cells);
+ CHECK_ERR(rval);
+
+ // Get local owned cells
+ rval = pcomm->filter_pstatus(cells, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &cells_owned);
+ CHECK_ERR(rval);
+
+ EntityHandle gather_set = 0;
+ if (gather_set_rank == rank) {
+ // Get gather set
+ ReadUtilIface* readUtilIface;
+ mb.query_interface(readUtilIface);
+ rval = readUtilIface->get_gather_set(gather_set);
+ CHECK_ERR(rval);
+ assert(gather_set != 0);
+ }
+
+ Tag ke_tag0, gid_tag;
+ rval = mb.tag_get_handle("ke0", 1, MB_TYPE_DOUBLE, ke_tag0, MB_TAG_DENSE);
+ CHECK_ERR(rval);
+
+ rval = mb.tag_get_handle(GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER, gid_tag, MB_TAG_DENSE);
+ CHECK_ERR(rval);
+
+ pcomm->gather_data(cells_owned, ke_tag0, gid_tag, gather_set, gather_set_rank);
+
+ if (gather_set_rank == rank) {
+ // Get gather set cells
+ Range gather_set_cells;
+ rval = mb.get_entities_by_type(gather_set, MBPOLYGON, gather_set_cells);
+ CHECK_EQUAL((size_t)642, gather_set_cells.size());
+ CHECK_EQUAL((size_t)2, gather_set_cells.psize());
+
+ // Check ke0 tag values on 4 gather set cells: first pentagon, last pentagon,
+ // first hexagon and last hexagon
+ EntityHandle cell_ents[] = {gather_set_cells[0], gather_set_cells[11],
+ gather_set_cells[12], gather_set_cells[641]};
+ double ke0_val[4];
+ rval = mb.tag_get_data(ke_tag0, &cell_ents[0], 4, ke0_val);
+
+ CHECK_REAL_EQUAL(15.001, ke0_val[0], eps);
+ CHECK_REAL_EQUAL(15.012, ke0_val[1], eps);
+ CHECK_REAL_EQUAL(16.013, ke0_val[2], eps);
+ CHECK_REAL_EQUAL(16.642, ke0_val[3], eps);
+ }
+}
+
+void multiple_loads_of_same_file(bool no_mixed_elements)
+{
+ Core moab;
+ Interface& mb = moab;
+
+ // Need a file set for nomesh to work right
+ EntityHandle file_set;
+ ErrorCode rval;
+ rval = mb.create_meshset(MESHSET_SET, file_set);
+ CHECK_ERR(rval);
+
+ // Read first only header information, no mesh, no variable
+ read_options = "PARALLEL=READ_PART;PARTITION;NOMESH;VARIABLE=;PARTITION_METHOD=TRIVIAL";
+ if (no_mixed_elements)
+ read_options += ";NO_MIXED_ELEMENTS";
+
+ rval = mb.load_file(example, &file_set, read_options.c_str());
+ CHECK_ERR(rval);
+
+ // Create mesh, no variable
+ read_options = "PARALLEL=READ_PART;PARTITION;PARALLEL_RESOLVE_SHARED_ENTS;PARTITION_METHOD=TRIVIAL;VARIABLE=";
+ if (no_mixed_elements)
+ read_options += ";NO_MIXED_ELEMENTS";
+
+ rval = mb.load_file(example, &file_set, read_options.c_str());
+ CHECK_ERR(rval);
+
+ // Read variable ke at timestep 0, no mesh
+ read_options = "PARALLEL=READ_PART;PARTITION;PARTITION_METHOD=TRIVIAL;NOMESH;VARIABLE=ke;TIMESTEP=0";
+ if (no_mixed_elements)
+ read_options += ";NO_MIXED_ELEMENTS";
+
+ rval = mb.load_file(example, &file_set, read_options.c_str());
+ CHECK_ERR(rval);
+
+ Range local_verts;
+ rval = mb.get_entities_by_type(file_set, MBVERTEX, local_verts);
+ CHECK_ERR(rval);
+
+ Range local_edges;
+ rval = mb.get_entities_by_type(file_set, MBEDGE, local_edges);
+ CHECK_ERR(rval);
+
+ Range local_cells;
+ rval = mb.get_entities_by_type(file_set, MBPOLYGON, local_cells);
+ CHECK_ERR(rval);
+
+ ParallelComm* pcomm = ParallelComm::get_pcomm(&mb, 0);
+ int procs = pcomm->proc_config().proc_size();
+ int rank = pcomm->proc_config().proc_rank();
+
+ // Make check runs this test on two processors
+ if (2 == procs) {
+ CHECK_EQUAL((size_t)321, local_cells.size());
+
+ // Check tag for cell variable ke at timestep 0
+ Tag ke_tag0;
+ rval = mb.tag_get_handle("ke0", 1, MB_TYPE_DOUBLE, ke_tag0);
+ CHECK_ERR(rval);
+
+ // Get ke0 tag values on 3 local cells
+ EntityHandle cell_ents[] = {local_cells[0], local_cells[160], local_cells[320]};
+ double ke0_val[3];
+ rval = mb.tag_get_data(ke_tag0, cell_ents, 3, ke0_val);
+ CHECK_ERR(rval);
+
+ if (0 == rank) {
+ CHECK_EQUAL((size_t)1120, local_verts.size());
+ CHECK_EQUAL((size_t)1438, local_edges.size());
+ if (no_mixed_elements)
+ CHECK_EQUAL((size_t)1, local_cells.psize());
+ else
+ CHECK_EQUAL((size_t)2, local_cells.psize());
+
+ CHECK_REAL_EQUAL(15.001, ke0_val[0], eps);
+ CHECK_REAL_EQUAL(16.161, ke0_val[1], eps);
+ CHECK_REAL_EQUAL(16.321, ke0_val[2], eps);
+ }
+ else if (1 == rank) {
+ CHECK_EQUAL((size_t)1122, local_verts.size());
+ CHECK_EQUAL((size_t)1444, local_edges.size());
+ CHECK_EQUAL((size_t)1, local_cells.psize());
+
+ CHECK_REAL_EQUAL(16.322, ke0_val[0], eps);
+ CHECK_REAL_EQUAL(16.482, ke0_val[1], eps);
+ CHECK_REAL_EQUAL(16.642, ke0_val[2], eps);
+ }
+ }
+}
https://bitbucket.org/fathomteam/moab/commits/fee40f824965/
Changeset: fee40f824965
Branch: master
User: iulian07
Date: 2014-05-07 15:00:12
Summary: add a zoltan test partition for gcrm
looks funny, something is wrong
Affected #: 2 files
diff --git a/src/io/NCHelperGCRM.cpp b/src/io/NCHelperGCRM.cpp
index 1ea1841..d420f33 100644
--- a/src/io/NCHelperGCRM.cpp
+++ b/src/io/NCHelperGCRM.cpp
@@ -863,10 +863,11 @@ ErrorCode NCHelperGCRM::redistribute_local_cells(int start_cell_idx)
// If possible, apply Zoltan partition
if (_readNC->partMethod == ScdParData::RCBZOLTAN) {
#if defined(USE_MPI) && defined(HAVE_ZOLTAN)
- // Read x coordinates of cell centers
+ // Read lat/lon coordinates of cell centers
+ // then convert to spherical , and use them as input to zoltan partition
int xCellVarId;
- int success = NCFUNC(inq_varid)(_fileId, "xCell", &xCellVarId);
- ERRORS(success, "Failed to get variable id of xCell.");
+ int success = NCFUNC(inq_varid)(_fileId, "grid_center_lat", &xCellVarId);
+ ERRORS(success, "Failed to get variable id of grid_center_lat.");
std::vector<double> xCell(nLocalCells);
NCDF_SIZE read_start = static_cast<NCDF_SIZE>(start_cell_idx - 1);
NCDF_SIZE read_count = static_cast<NCDF_SIZE>(nLocalCells);
@@ -875,20 +876,26 @@ ErrorCode NCHelperGCRM::redistribute_local_cells(int start_cell_idx)
// Read y coordinates of cell centers
int yCellVarId;
- success = NCFUNC(inq_varid)(_fileId, "yCell", &yCellVarId);
- ERRORS(success, "Failed to get variable id of yCell.");
+ success = NCFUNC(inq_varid)(_fileId, "grid_center_lon", &yCellVarId);
+ ERRORS(success, "Failed to get variable id of grid_center_lon.");
std::vector<double> yCell(nLocalCells);
success = NCFUNCAG(_vara_double)(_fileId, yCellVarId, &read_start, &read_count, &yCell[0]);
ERRORS(success, "Failed to read yCell data.");
- // Read z coordinates of cell centers
- int zCellVarId;
- success = NCFUNC(inq_varid)(_fileId, "zCell", &zCellVarId);
- ERRORS(success, "Failed to get variable id of zCell.");
std::vector<double> zCell(nLocalCells);
- success = NCFUNCAG(_vara_double)(_fileId, zCellVarId, &read_start, &read_count, &zCell[0]);
- ERRORS(success, "Failed to read zCell data.");
-
+ // convert to xyz cartesian coordinates
+
+ double rad=8000; // this is just approx
+ for (int i=0; i<nLocalCells; i++)
+ {
+ double cosphi = cos(yCell[i]);
+ double zmult = sin(yCell[i]);
+ double xmult = cosphi * cos(xCell[i]);
+ double ymult = cosphi * sin(xCell[i]);
+ xCell[i] = rad * xmult;
+ yCell[i] = rad * ymult;
+ zCell[i] = rad * zmult;
+ }
// Zoltan partition using RCB; maybe more studies would be good, as to which partition
// is better
Interface*& mbImpl = _readNC->mbImpl;
diff --git a/test/parallel/gcrm_par.cpp b/test/parallel/gcrm_par.cpp
index e32912f..e9ff71d 100644
--- a/test/parallel/gcrm_par.cpp
+++ b/test/parallel/gcrm_par.cpp
@@ -58,7 +58,7 @@ int main(int argc, char* argv[])
result += RUN_TEST(test_read_mesh_parallel_trivial);
//result += RUN_TEST(test_read_mesh_parallel_trivial_no_mixed_elements);
#if defined(USE_MPI) && defined(HAVE_ZOLTAN)
- //result += RUN_TEST(test_read_mesh_parallel_rcbzoltan);
+ result += RUN_TEST(test_read_mesh_parallel_rcbzoltan);
//result += RUN_TEST(test_read_mesh_parallel_rcbzoltan_no_mixed_elements);
#endif
Repository URL: https://bitbucket.org/fathomteam/moab/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the moab-dev
mailing list