#include <Cell.h>
|
| Cell ()=default |
|
| Cell (const Cell &)=default |
|
std::pair< double, double > | get_coordinate_range (enum Axis) const |
|
void | initialize (size_t i, size_t j, std::shared_ptr< UnitCell > unit_cell) |
|
std::shared_ptr< UnitCell > | unit () const |
| Provide access to the UnitCell that this cell uses.
|
|
std::shared_ptr< Ioss::Region > | region () const |
| Provide access to the Ioss::Region in the unit_cell that this cell uses.
|
|
bool | has_neighbor_i () const |
| True if this cell has a neighbor to its "left" (lower i)
|
|
bool | has_neighbor_j () const |
| True if this cell has a neighbor "below it" (lower j)
|
|
bool | has_neighbor (enum Loc loc) const |
|
bool | processor_boundary (enum Loc loc) const |
|
size_t | added_node_count (enum Mode mode, bool equivalence_nodes) const |
|
size_t | processor_boundary_node_count () const |
|
template<typename INT > |
void | populate_node_communication_map (const std::vector< INT > &node_map, std::vector< INT > &nodes, std::vector< INT > &procs) const |
|
std::array< int, 9 > | categorize_processor_boundary_nodes (int rank) const |
|
int | rank (enum Loc loc) const |
| The mpi rank that this cell, or the neighboring cells, will be on in a parallel run.
|
|
void | set_rank (enum Loc loc, int my_rank) |
| The mpi rank that this cell will be on in a parallel run.
|
|
std::vector< int > | categorize_nodes (enum Mode mode) const |
|
template<typename INT > |
std::vector< INT > | generate_node_map (Mode mode, bool equivalance_nodes, INT) const |
|
template<typename INT > |
void | populate_neighbor (Loc location, const std::vector< INT > &map, const Cell &neighbor) const |
|
|
std::shared_ptr< UnitCell > | m_unitCell |
| The UnitCell that occupies this location in the grid / latice.
|
|
std::array< int, 9 > | m_ranks {{0, -1, -1, -1, -1, -1, -1, -1, -1}} |
|
◆ Cell() [1/2]
◆ Cell() [2/2]
Cell::Cell |
( |
const Cell & |
| ) |
|
|
default |
◆ added_node_count()
size_t Cell::added_node_count |
( |
enum Mode |
mode, |
|
|
bool |
equivalence_nodes |
|
) |
| const |
Number of nodes that will be added to global node count when this cell is added to grid – accounts for coincident nodes if this cell has neighbor(s)
◆ categorize_nodes()
std::vector< int > Cell::categorize_nodes |
( |
enum Mode |
mode | ) |
const |
Create a vector of node_count
length which has the following values:
- 0: Node that is not shared with any "lower" neighbors.
- 1: Node on
min_I
face
- 2: Node on
min_J
face
- 3: Node on
min_I-min_J
line If mode == PROCESSOR
, then modify due to processor boundaries...
◆ categorize_processor_boundary_nodes()
std::array< int, 9 > Cell::categorize_processor_boundary_nodes |
( |
int |
rank | ) |
const |
Returns a std::array<int,9>
which categorizes whether the cells at each location are on the same rank as rank
. A value of 1
means the cell at that location is on the same rank; a value of 0
means it is on a different rank. Used to determine which nodes have already been accounted for on this rank and which this cell will add to the processor-local count.
◆ generate_node_map()
template<typename INT >
std::vector< INT > Cell::generate_node_map |
( |
Mode |
mode, |
|
|
bool |
equivalance_nodes, |
|
|
INT |
|
|
) |
| const |
◆ get_coordinate_range()
std::pair< double, double > Cell::get_coordinate_range |
( |
enum Axis |
axis | ) |
const |
◆ has_neighbor()
bool Cell::has_neighbor |
( |
enum Loc |
loc | ) |
const |
|
inline |
Return neighbor information for each possible direction. This is only valid after grid.decompose()
has been called
◆ has_neighbor_i()
bool Cell::has_neighbor_i |
( |
| ) |
const |
|
inline |
True if this cell has a neighbor to its "left" (lower i)
◆ has_neighbor_j()
bool Cell::has_neighbor_j |
( |
| ) |
const |
|
inline |
True if this cell has a neighbor "below it" (lower j)
◆ initialize()
void Cell::initialize |
( |
size_t |
i, |
|
|
size_t |
j, |
|
|
std::shared_ptr< UnitCell > |
unit_cell |
|
) |
| |
◆ populate_neighbor()
template<typename INT >
void Cell::populate_neighbor |
( |
Loc |
location, |
|
|
const std::vector< INT > & |
map, |
|
|
const Cell & |
neighbor |
|
) |
| const |
◆ populate_node_communication_map()
template<typename INT >
void Cell::populate_node_communication_map |
( |
const std::vector< INT > & |
node_map, |
|
|
std::vector< INT > & |
nodes, |
|
|
std::vector< INT > & |
procs |
|
) |
| const |
◆ processor_boundary()
bool Cell::processor_boundary |
( |
enum Loc |
loc | ) |
const |
|
inline |
True if this cell has a processor boundary to the specified direction Note that cell cannot compute this, but is "told" this during decomposition of the owning Grid
There is a processor boundary if the rank of the cell at that location is different than the rank of this cell...
◆ processor_boundary_node_count()
size_t Cell::processor_boundary_node_count |
( |
| ) |
const |
Number of nodes that this cell adds to the processor boundary node count. Assumes that cells are processed in "order", so accounts for corner nodes shared with another cell... Note that a node shared by more than one processor (e.g. a corner node) counts for each processor it is shared with.
◆ rank()
int Cell::rank |
( |
enum Loc |
loc | ) |
const |
|
inline |
The mpi rank that this cell, or the neighboring cells, will be on in a parallel run.
◆ region()
std::shared_ptr< Ioss::Region > Cell::region |
( |
| ) |
const |
|
inline |
Provide access to the Ioss::Region in the unit_cell that this cell uses.
◆ set_rank()
void Cell::set_rank |
( |
enum Loc |
loc, |
|
|
int |
my_rank |
|
) |
| |
|
inline |
The mpi rank that this cell will be on in a parallel run.
◆ unit()
std::shared_ptr< UnitCell > Cell::unit |
( |
| ) |
const |
|
inline |
Provide access to the UnitCell that this cell uses.
◆ m_communicationNodeCount
size_t Cell::m_communicationNodeCount {0} |
|
mutable |
The number of node/proc pairs that this cell adds to the communication node map.
◆ m_communicationNodeOffset
size_t Cell::m_communicationNodeOffset {0} |
|
mutable |
The offset into the commincation node output array for this cell in the file associated with the rank that this cell is on. Set by handle_communications() in Grid.C.
◆ m_globalElementIdOffset
std::map<std::string, size_t> Cell::m_globalElementIdOffset |
◆ m_globalNodeIdOffset
int64_t Cell::m_globalNodeIdOffset {0} |
◆ m_i
The i
location of this entry in the grid.
◆ m_j
The j
location of this entry in the grid.
◆ m_localElementIdOffset
std::map<std::string, size_t> Cell::m_localElementIdOffset |
◆ m_localNodeIdOffset
int64_t Cell::m_localNodeIdOffset {0} |
◆ m_localSurfaceOffset
std::map<std::string, size_t> Cell::m_localSurfaceOffset |
For each surface/sideset, this is the offset into the output element/face lists for this cells data. 0-based. Indexed by surface name.
◆ m_offX
double Cell::m_offX {0.0} |
The offset that must be added to the x
coordinates of the UnitCell to place it in the correct global location of the output mesh
◆ m_offY
double Cell::m_offY {0.0} |
The offset that must be added to the y
coordinates of the UnitCell to place it in the correct global location of the output mesh
◆ m_ranks
std::array<int, 9> Cell::m_ranks {{0, -1, -1, -1, -1, -1, -1, -1, -1}} |
|
private |
The MPI ranks of all surrounding cells in order: 6 7 8 TL T TR 4 0 5 L C R 1 2 3 BL B BR
◆ m_unitCell
std::shared_ptr<UnitCell> Cell::m_unitCell |
|
private |
The UnitCell that occupies this location in the grid / latice.
◆ min_I_nodes
std::vector<int64_t> Cell::min_I_nodes |
|
mutable |
A vector containing the global node ids of the nodes on the min_I
face of this unit cell. These nodes were generated by the "left" (lower i) neighbor. Once this cell uses this information, it can clear out the vector.
◆ min_J_nodes
std::vector<int64_t> Cell::min_J_nodes |
|
mutable |
A vector containing the global node ids of the nodes on the min_J
face of this unit cell. These nodes were generated by the "below" (lower j neighbor. Once this cell uses this information, it can clear out the vector.
The documentation for this class was generated from the following files: