Namespaces | |
internal | |
Typedefs | |
using | BufferSystem = GenericBufferSystem< RecvBuffer, SendBuffer > |
using | OpenMPBufferSystem = GenericOpenMPBufferSystem< RecvBuffer, SendBuffer > |
using | RecvBuffer = GenericRecvBuffer<> |
using | SendBuffer = GenericSendBuffer<> |
Enumerations | |
enum | Operation { MIN, MAX, SUM, PRODUCT, LOGICAL_AND, BITWISE_AND, LOGICAL_OR, BITWISE_OR, LOGICAL_XOR, BITWISE_XOR } |
enum | SetOperation { INTERSECTION, UNION } |
Functions | |
template<typename T , typename G > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buffer, const blockforest::BlockID &id) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buffer, blockforest::BlockID &id) |
template<typename T , typename G , typename MT > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const Matrix3< MT > &m) |
template<typename T , typename MT > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, Matrix3< MT > &m) |
template<typename T , typename G , typename VT > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const math::Quaternion< VT > &quat) |
template<typename T , typename VT > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, math::Quaternion< VT > &quat) |
template<typename T , typename G , typename V > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const math::Rot3< V > &obj) |
template<typename T , typename V > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, math::Rot3< V > &objparam) |
template<typename T , typename G , typename VT > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const Vector2< VT > &vec) |
template<typename T , typename VT > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, Vector2< VT > &vec) |
template<typename T , typename G , typename VT > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const Vector3< VT > &vec) |
template<typename T , typename VT > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, Vector3< VT > &vec) |
template<typename T > | |
void | broadcastObject (T &object, int senderRank=0, MPI_Comm comm=MPI_COMM_WORLD) |
Broadcasts an arbitrary sized object from one process to all other processes. More... | |
template<typename T , typename G , typename T1 , typename T2 > | |
GenericSendBuffer< T, G > & | operator<< (GenericSendBuffer< T, G > &buf, const std::pair< T1, T2 > &pair) |
template<typename T , typename T1 , typename T2 > | |
GenericRecvBuffer< T > & | operator>> (GenericRecvBuffer< T > &buf, std::pair< T1, T2 > &pair) |
template<typename T , typename G , typename Cont > | |
void | sendNonResizableContainer (GenericSendBuffer< T, G > &buf, const Cont &container) |
template<typename T , typename Cont > | |
void | recvNonResizableContainer (GenericRecvBuffer< T > &buf, Cont &container) |
template<typename T , typename G , typename CT , std::size_t N> | |
GenericSendBuffer< T, G > & | operator<< (GenericSendBuffer< T, G > &buf, const std::array< CT, N > &array) |
template<typename T , typename CT , std::size_t N> | |
GenericRecvBuffer< T > & | operator>> (GenericRecvBuffer< T > &buf, std::array< CT, N > &array) |
template<typename T , typename G , typename Cont > | |
void | sendContainer (GenericSendBuffer< T, G > &buf, const Cont &container) |
template<typename T , typename Cont > | |
void | recvContainer (GenericRecvBuffer< T > &buf, Cont &container) |
template<typename T , typename G , typename CT , typename CA > | |
GenericSendBuffer< T, G > & | operator<< (GenericSendBuffer< T, G > &buf, const std::vector< CT, CA > &c) |
template<typename T , typename CT , typename CA > | |
GenericRecvBuffer< T > & | operator>> (GenericRecvBuffer< T > &buf, std::vector< CT, CA > &c) |
template<typename T , typename G , typename CT > | |
GenericSendBuffer< T, G > & | operator<< (GenericSendBuffer< T, G > &buf, const std::basic_string< CT > &c) |
template<typename T , typename CT > | |
GenericRecvBuffer< T > & | operator>> (GenericRecvBuffer< T > &buf, std::basic_string< CT > &c) |
template<typename T , typename G , typename CT , typename CA > | |
GenericSendBuffer< T, G > & | operator<< (GenericSendBuffer< T, G > &buf, const std::deque< CT, CA > &c) |
template<typename T , typename CT , typename CA > | |
GenericRecvBuffer< T > & | operator>> (GenericRecvBuffer< T > &buf, std::deque< CT, CA > &c) |
template<typename T , typename G , typename CT , typename CA > | |
GenericSendBuffer< T, G > & | operator<< (GenericSendBuffer< T, G > &buf, const std::list< CT, CA > &c) |
template<typename T , typename CT , typename CA > | |
GenericRecvBuffer< T > & | operator>> (GenericRecvBuffer< T > &buf, std::list< CT, CA > &c) |
template<typename T , typename G > | |
GenericSendBuffer< T, G > & | packBoolVectorWithoutSize (GenericSendBuffer< T, G > &buf, const std::vector< bool > &bools) |
template<typename T > | |
GenericRecvBuffer< T > & | unpackBoolVectorWithoutSize (GenericRecvBuffer< T > &buf, std::vector< bool > &bools, size_t size) |
template<typename T , typename G > | |
GenericSendBuffer< T, G > & | operator<< (GenericSendBuffer< T, G > &buf, const std::vector< bool > &bools) |
template<typename T > | |
GenericRecvBuffer< T > & | operator>> (GenericRecvBuffer< T > &buf, std::vector< bool > &bools) |
template<typename T , typename G , typename Cont > | |
void | sendAssocContainer (GenericSendBuffer< T, G > &buf, const Cont &container) |
template<typename T , typename Cont > | |
void | recvAssocContainer (GenericRecvBuffer< T > &buf, Cont &container) |
template<typename T , typename Cont > | |
void | recvMap (GenericRecvBuffer< T > &buf, Cont &container) |
template<typename T , typename G , typename CK , typename CC , typename CA > | |
GenericSendBuffer< T, G > & | operator<< (GenericSendBuffer< T, G > &buf, const std::set< CK, CC, CA > &c) |
template<typename T , typename CK , typename CC , typename CA > | |
GenericRecvBuffer< T > & | operator>> (GenericRecvBuffer< T > &buf, std::set< CK, CC, CA > &c) |
template<typename T , typename G , typename CK , typename CC , typename CA > | |
GenericSendBuffer< T, G > & | operator<< (GenericSendBuffer< T, G > &buf, const std::unordered_set< CK, CC, CA > &c) |
template<typename T , typename CK , typename CC , typename CA > | |
GenericRecvBuffer< T > & | operator>> (GenericRecvBuffer< T > &buf, std::unordered_set< CK, CC, CA > &c) |
template<typename T , typename G , typename CK , typename CC , typename CA > | |
GenericSendBuffer< T, G > & | operator<< (GenericSendBuffer< T, G > &buf, const std::multiset< CK, CC, CA > &c) |
template<typename T , typename CK , typename CC , typename CA > | |
GenericRecvBuffer< T > & | operator>> (GenericRecvBuffer< T > &buf, std::multiset< CK, CC, CA > &c) |
template<typename T , typename G , typename CK , typename CT , typename CC , typename CA > | |
GenericSendBuffer< T, G > & | operator<< (GenericSendBuffer< T, G > &buf, const std::map< CK, CT, CC, CA > &c) |
template<typename T , typename CK , typename CT , typename CC , typename CA > | |
GenericRecvBuffer< T > & | operator>> (GenericRecvBuffer< T > &buf, std::map< CK, CT, CC, CA > &c) |
template<typename T , typename G , typename CK , typename CT , typename CC , typename CA > | |
GenericSendBuffer< T, G > & | operator<< (GenericSendBuffer< T, G > &buf, const std::unordered_map< CK, CT, CC, CA > &c) |
template<typename T , typename CK , typename CT , typename CC , typename CA > | |
GenericRecvBuffer< T > & | operator>> (GenericRecvBuffer< T > &buf, std::unordered_map< CK, CT, CC, CA > &c) |
template<typename T , typename G , typename CK , typename CT , typename CC , typename CA > | |
GenericSendBuffer< T, G > & | operator<< (GenericSendBuffer< T, G > &buf, const std::multimap< CK, CT, CC, CA > &c) |
template<typename T , typename CK , typename CT , typename CC , typename CA > | |
GenericRecvBuffer< T > & | operator>> (GenericRecvBuffer< T > &buf, std::multimap< CK, CT, CC, CA > &c) |
SendBuffer & | operator<< (SendBuffer &buf, const RandomUUID &uuid) |
RecvBuffer & | operator>> (RecvBuffer &buf, RandomUUID &uuid) |
template<typename T > | |
std::vector< T > | gather (T value, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD) |
Gathers values from MPI processes and stores them into a std::vector. More... | |
template<typename T > | |
std::vector< T > | allGather (T value, MPI_Comm comm=MPI_COMM_WORLD) |
Gathers values from MPI processes and stores them into a std::vector on all Processes. More... | |
template<> | |
std::vector< std::string > | gatherv (const std::vector< std::string > &values, int recvRank, MPI_Comm comm) |
template<> | |
std::vector< std::string > | allGatherv (const std::vector< std::string > &values, MPI_Comm comm) |
void | gathervBuffer (const mpi::SendBuffer &sendBuffer, mpi::RecvBuffer &recvBuffer, int targetRank=0, MPI_Comm comm=MPI_COMM_WORLD) |
Gathers the buffer content on a single target process. More... | |
void | allGathervBuffer (const mpi::SendBuffer &sendBuffer, mpi::RecvBuffer &recvBuffer, MPI_Comm comm=MPI_COMM_WORLD) |
Almost identical to mpi::gathervBuffer, the only difference: The result is stored on every process. More... | |
template<typename T > | |
std::vector< T > | gatherv (const std::vector< T > &values, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD) |
Gathers values from MPI processes and stores them into a std::vector. More... | |
template<typename T > | |
std::vector< T > | allGatherv (const std::vector< T > &values, MPI_Comm comm=MPI_COMM_WORLD) |
Gathers values from MPI processes and stores them into a std::vector on all Processes. More... | |
int | translateRank (const MPI_Comm srcComm, const MPI_Comm destComm, const int srcRank) |
This functions maps the rank in one communicator to the rank in another communicator. More... | |
std::vector< int > | translateRank (const MPI_Comm srcComm, const MPI_Comm destComm, const std::vector< int > &srcRank) |
This functions converts a array of ranks in one communicator to an array of ranks in another communicator. More... | |
void | writeMPIIO (const std::string &file, SendBuffer &buffer) |
Writes contents of local buffer to a single binary file via MPIIO. More... | |
void | readMPIIO (const std::string &file, RecvBuffer &buffer) |
Counterpart to writeMPIIO - has to be called with exactly the same process distribution. More... | |
static void | customTerminateHandler () |
Terminate Handler that calls MPI_Abort instead of std::abort. More... | |
void | writeMPITextFile (const std::string &filename, const std::string &processLocalPart, const MPI_Comm comm) |
Writes file using MPI IO with each process providing a part of it. More... | |
MPI_Op | toMPI_Op (Operation operation) |
template<typename T > | |
void | reduceInplace (T &value, Operation operation, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD) |
Reduces a value over all processes in-place. More... | |
void | reduceInplace (bool &value, Operation operation, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD) |
Reduces a boolean value over all processes in-place. More... | |
template<typename T > | |
T | reduce (const T value, Operation operation, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD) |
Reduces a value over all processes. More... | |
bool | reduce (const bool value, Operation operation, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD) |
Reduces a boolean value over all processes. More... | |
template<typename T > | |
void | reduceInplace (std::vector< T > &values, Operation operation, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD) |
Reduces values in a std::vector<T> over all processes in-place. More... | |
void | reduceInplace (std::vector< bool > &values, Operation operation, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD) |
Reduces boolean values in a std::vector<bool> over all processes in-place. More... | |
template<typename T > | |
void | reduceInplace (math::Vector3< T > &values, Operation operation, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD) |
Reduces values in a math::Vector3<T> over all processes in-place. More... | |
void | reduceInplace (math::Vector3< bool > &values, Operation operation, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD) |
Reduces booleans in a math::Vector3 over all processes in-place. More... | |
template<typename T > | |
math::Vector3< T > | reduce (const math::Vector3< T > &values, Operation operation, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD) |
Reduces values in a math::Vector3<T> over all processes. More... | |
math::Vector3< bool > | reduce (const math::Vector3< bool > &values, Operation operation, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD) |
Reduces boolean values in a Vector3 over all processes. More... | |
template<typename T > | |
T | allReduce (const T &value, Operation operation, MPI_Comm comm=MPI_COMM_WORLD) |
Reduces a value over all processes. More... | |
bool | allReduce (const bool value, Operation operation, MPI_Comm comm=MPI_COMM_WORLD) |
Reduces a boolean value over all processes. More... | |
template<typename T > | |
void | allReduceInplace (T &value, Operation operation, MPI_Comm comm=MPI_COMM_WORLD) |
Reduces a value over all processes in-place. More... | |
void | allReduceInplace (bool &value, Operation operation, MPI_Comm comm=MPI_COMM_WORLD) |
Reduces a boolean value over all processes in-place. More... | |
template<typename T > | |
void | allReduceInplace (std::vector< T > &values, Operation operation, MPI_Comm comm=MPI_COMM_WORLD) |
Reduces values in a std::vector<T> over all processes in-place. More... | |
void | allReduceInplace (std::vector< bool > &bools, Operation operation, MPI_Comm comm=MPI_COMM_WORLD) |
Reduces values in a std::vector<bool> over all processes in-place. More... | |
template<typename T > | |
void | allReduceInplace (math::Vector3< T > &values, Operation operation, MPI_Comm comm=MPI_COMM_WORLD) |
Reduces values in math::Vector3<T> over all processes in-place. More... | |
void | allReduceInplace (math::Vector3< bool > &bools, Operation operation, MPI_Comm comm=MPI_COMM_WORLD) |
Reduces boolean values in math::Vector3 over all processes in-place. More... | |
template<typename T > | |
std::vector< T > | allReduceSet (std::vector< T > values, SetOperation op, MPI_Comm mpiCommunicator=MPI_COMM_WORLD, int mpiTag=0) |
Reduces a set of values on all processes without using global mpi communication. More... | |
template<typename T , typename G , typename E > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buffer, const walberla::Set< E > &set) |
template<typename T , typename E > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buffer, walberla::Set< E > &set) |
template<typename T , typename G , typename GE > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buffer, const walberla::uid::UID< GE > &uid) |
template<typename T , typename GE > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buffer, walberla::uid::UID< GE > &uid) |
template<typename T , typename G > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buffer, const BlockDataID &id) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buffer, BlockDataID &id) |
template<typename T , typename G > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buffer, const ConstBlockDataID &id) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buffer, ConstBlockDataID &id) |
template<typename T , typename G , typename CM , bool CO, typename FM , int EQU> | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buffer, const walberla::lbm::LatticeModelBase< CM, CO, FM, EQU > &lm) |
template<typename T , typename CM , bool CO, typename FM , int EQU> | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buffer, walberla::lbm::LatticeModelBase< CM, CO, FM, EQU > &lm) |
template<typename T , typename G > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::data::ContactHistory &obj) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::data::ContactHistory &objparam) |
template<typename T , typename G > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const walberla::mesa_pd::data::particle_flags::FlagT &flags) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, walberla::mesa_pd::data::particle_flags::FlagT &flags) |
template<typename T , typename G > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::ContactHistoryNotification &obj) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::ContactHistoryNotification::Parameters &objparam) |
template<typename T , typename G > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::ForceTorqueNotification &obj) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::ForceTorqueNotification::Parameters &objparam) |
template<typename T , typename G > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::HeatFluxNotification &obj) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::HeatFluxNotification::Parameters &objparam) |
template<typename T , typename G > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::HydrodynamicForceTorqueNotification &obj) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::HydrodynamicForceTorqueNotification::Parameters &objparam) |
template<typename T , typename G > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::NewGhostParticleNotification &obj) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::NewGhostParticleNotification::Parameters &objparam) |
template<typename T , typename G > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::NumContactNotification &obj) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::NumContactNotification::Parameters &objparam) |
template<typename T , typename G > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::ParticleCopyNotification &obj) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::ParticleCopyNotification::Parameters &objparam) |
template<typename T , typename G > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::ParticleGhostCopyNotification &obj) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::ParticleGhostCopyNotification::Parameters &objparam) |
template<typename T , typename G > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::ParticleMigrationNotification &obj) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::ParticleMigrationNotification::Parameters &objparam) |
template<typename T , typename G > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::ParticleRemoteMigrationNotification &obj) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::ParticleRemoteMigrationNotification::Parameters &objparam) |
template<typename T , typename G > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::ParticleRemovalInformationNotification &obj) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::ParticleRemovalInformationNotification::Parameters &objparam) |
template<typename T , typename G > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::ParticleRemovalNotification &obj) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::ParticleRemovalNotification::Parameters &objparam) |
template<typename T , typename G > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::ParticleUpdateNotification &obj) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::ParticleUpdateNotification::Parameters &objparam) |
template<typename T , typename G > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::VelocityCorrectionNotification &obj) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::VelocityCorrectionNotification::Parameters &objparam) |
template<typename T , typename G > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::VelocityUpdateNotification &obj) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::VelocityUpdateNotification::Parameters &objparam) |
template<typename T , typename G > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const std::shared_ptr< mesa_pd::data::BaseShape > &bs) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, std::shared_ptr< mesa_pd::data::BaseShape > &bs) |
template<typename T , typename G > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const OpenMesh::BaseHandle &handle) |
template<typename T , typename HandleT > | |
mpi::GenericRecvBuffer< T > & | unpackOpenMeshHandle (mpi::GenericRecvBuffer< T > &buf, HandleT &handle) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, OpenMesh::VertexHandle &handle) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, OpenMesh::FaceHandle &handle) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, OpenMesh::HalfedgeHandle &handle) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, OpenMesh::EdgeHandle &handle) |
template<typename T , typename G , typename Scalar , int DIM> | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const OpenMesh::VectorT< Scalar, DIM > &v) |
template<typename T , typename Scalar , int DIM> | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, OpenMesh::VectorT< Scalar, DIM > &v) |
template<typename T , typename G > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const pe::debug::BodyData &bd) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, pe::debug::BodyData &bd) |
template<typename T , typename G > | |
mpi::GenericSendBuffer< T, G > & | operator<< (mpi::GenericSendBuffer< T, G > &buf, const pe::Owner &owner) |
template<typename T > | |
mpi::GenericRecvBuffer< T > & | operator>> (mpi::GenericRecvBuffer< T > &buf, pe::Owner &owner) |
Variables | |
const uint_t | BUFFER_DEBUG_OVERHEAD = 0 |
using walberla::mpi::BufferSystem = typedef GenericBufferSystem<RecvBuffer, SendBuffer> |
using walberla::mpi::OpenMPBufferSystem = typedef GenericOpenMPBufferSystem<RecvBuffer, SendBuffer> |
using walberla::mpi::RecvBuffer = typedef GenericRecvBuffer<> |
using walberla::mpi::SendBuffer = typedef GenericSendBuffer<> |
std::vector<T> walberla::mpi::allGather | ( | T | value, |
MPI_Comm | comm = MPI_COMM_WORLD |
||
) |
Gathers values from MPI processes and stores them into a std::vector on all Processes.
T has to be a native MPI_Datatype
value | The value gathered from the process |
comm | The MPI communicator used for communication |
std::vector< std::string > walberla::mpi::allGatherv | ( | const std::vector< std::string > & | values, |
MPI_Comm | comm | ||
) |
std::vector<T> walberla::mpi::allGatherv | ( | const std::vector< T > & | values, |
MPI_Comm | comm = MPI_COMM_WORLD |
||
) |
Gathers values from MPI processes and stores them into a std::vector on all Processes.
T has to be a native MPI_Datatype
values | The values gathered from the process |
comm | The MPI communicator used for communication |
void walberla::mpi::allGathervBuffer | ( | const mpi::SendBuffer & | sendBuffer, |
mpi::RecvBuffer & | recvBuffer, | ||
MPI_Comm | comm | ||
) |
Almost identical to mpi::gathervBuffer, the only difference: The result is stored on every process.
|
inline |
Reduces a boolean value over all processes.
T has to be a boolean value
value | The boolean value to be reduced |
operation | The operation to be performed |
comm | The MPI communicator used for communication |
T walberla::mpi::allReduce | ( | const T & | value, |
Operation | operation, | ||
MPI_Comm | comm = MPI_COMM_WORLD |
||
) |
Reduces a value over all processes.
T has to be an integer or floating point value
value | The value to be reduced |
operation | The operation to be performed |
comm | The MPI communicator used for communication |
|
inline |
Reduces a boolean value over all processes in-place.
T has to be a boolean value
value | The boolean value to be reduced |
operation | The operation to be performed (one of LOGICAL_AND, LOGICAL_OR or LOGICAL_XOR) |
comm | The MPI communicator used for communication |
|
inline |
Reduces boolean values in math::Vector3 over all processes in-place.
T has to be an integer or floating point value
values | The values to be reduced |
operation | The operation to be performed |
comm | The MPI communicator used for communication |
void walberla::mpi::allReduceInplace | ( | math::Vector3< T > & | values, |
Operation | operation, | ||
MPI_Comm | comm = MPI_COMM_WORLD |
||
) |
Reduces values in math::Vector3<T> over all processes in-place.
T has to be an integer or floating point value
values | The values to be reduced |
operation | The operation to be performed |
comm | The MPI communicator used for communication |
|
inline |
Reduces values in a std::vector<bool> over all processes in-place.
Specialization of allReduceInplace<T>
bools | The boolean values to be reduced |
operation | The operation to be performed (one of BITWISE_AND, BITWISE_OR or BITWISE_XOR) |
comm | The MPI communicator used for communication |
void walberla::mpi::allReduceInplace | ( | std::vector< T > & | values, |
Operation | operation, | ||
MPI_Comm | comm = MPI_COMM_WORLD |
||
) |
Reduces values in a std::vector<T> over all processes in-place.
T has to be an integer or floating point value
values | The values to be reduced |
operation | The operation to be performed |
comm | The MPI communicator used for communication |
void walberla::mpi::allReduceInplace | ( | T & | value, |
Operation | operation, | ||
MPI_Comm | comm = MPI_COMM_WORLD |
||
) |
Reduces a value over all processes in-place.
T has to be an integer or floating point value
value | The value to be reduced |
operation | The operation to be performed |
comm | The MPI communicator used for communication |
std::vector<T> walberla::mpi::allReduceSet | ( | std::vector< T > | values, |
SetOperation | op, | ||
MPI_Comm | mpiCommunicator = MPI_COMM_WORLD , |
||
int | mpiTag = 0 |
||
) |
Reduces a set of values on all processes without using global mpi communication.
The algorithm performs log(n) communication steps, where n is the number of processes in mpiCommunicator. The returned vector is a sorted set of unique values.
values | The local input values. Duplicates will internally be removed. Values have to be buffer packable and sortable. |
op | The operation to be performed on the set: intersection or union. |
mpiCommunicator | MPI communicator used for the reduction. |
mpiTag | MPI tag used for the reduction. |
void walberla::mpi::broadcastObject | ( | T & | object, |
int | senderRank = 0 , |
||
MPI_Comm | comm = MPI_COMM_WORLD |
||
) |
Broadcasts an arbitrary sized object from one process to all other processes.
T has to be able to be packed into SendBuffer and unpacked from RecvBuffer.
object | The object to be broadcasted |
senderRank | The rank of the process sending the object |
comm | The MPI communicator used for communication |
|
static |
Terminate Handler that calls MPI_Abort instead of std::abort.
Terminate handler is called when an exception is not caught and the program has to be aborted The standard terminate handler prints exception.what() and calls std::abort(). We overwrite the terminate handler when MPI was initialized, to call MPI_Abort in this case.
std::vector<T> walberla::mpi::gather | ( | T | value, |
int | recvRank = 0 , |
||
MPI_Comm | comm = MPI_COMM_WORLD |
||
) |
Gathers values from MPI processes and stores them into a std::vector.
T has to be a native MPI_Datatype
value | The value gathered from the process |
recvRank | The rank of the process receiving the gathered information |
comm | The MPI communicator used for communication |
std::vector< std::string > walberla::mpi::gatherv | ( | const std::vector< std::string > & | values, |
int | recvRank, | ||
MPI_Comm | comm | ||
) |
std::vector<T> walberla::mpi::gatherv | ( | const std::vector< T > & | values, |
int | recvRank = 0 , |
||
MPI_Comm | comm = MPI_COMM_WORLD |
||
) |
Gathers values from MPI processes and stores them into a std::vector.
T has to be a native MPI_Datatype
values | The values gathered from the process |
recvRank | The rank of the process receiving the gathered information |
comm | The MPI communicator used for communication |
void walberla::mpi::gathervBuffer | ( | const mpi::SendBuffer & | sendBuffer, |
mpi::RecvBuffer & | recvBuffer, | ||
int | targetRank = 0 , |
||
MPI_Comm | comm = MPI_COMM_WORLD |
||
) |
Gathers the buffer content on a single target process.
[in] | sendBuffer | sendBuffer with (possibly) different size on each process |
[out] | recvBuffer | recvBuffer which is left unchanged on all processes but targetRank on targetRank recvBuffer holds the gathered result |
[in] | targetRank | rank of the process where data is gathered |
[in] | comm | mpi communicator to use |
GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | GenericSendBuffer< T, G > & | buf, |
const std::array< CT, N > & | array | ||
) |
GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | GenericSendBuffer< T, G > & | buf, |
const std::basic_string< CT > & | c | ||
) |
GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | GenericSendBuffer< T, G > & | buf, |
const std::deque< CT, CA > & | c | ||
) |
GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | GenericSendBuffer< T, G > & | buf, |
const std::list< CT, CA > & | c | ||
) |
GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | GenericSendBuffer< T, G > & | buf, |
const std::map< CK, CT, CC, CA > & | c | ||
) |
GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | GenericSendBuffer< T, G > & | buf, |
const std::multimap< CK, CT, CC, CA > & | c | ||
) |
GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | GenericSendBuffer< T, G > & | buf, |
const std::multiset< CK, CC, CA > & | c | ||
) |
GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | GenericSendBuffer< T, G > & | buf, |
const std::pair< T1, T2 > & | pair | ||
) |
GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | GenericSendBuffer< T, G > & | buf, |
const std::set< CK, CC, CA > & | c | ||
) |
GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | GenericSendBuffer< T, G > & | buf, |
const std::unordered_map< CK, CT, CC, CA > & | c | ||
) |
GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | GenericSendBuffer< T, G > & | buf, |
const std::unordered_set< CK, CC, CA > & | c | ||
) |
GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | GenericSendBuffer< T, G > & | buf, |
const std::vector< bool > & | bools | ||
) |
GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | GenericSendBuffer< T, G > & | buf, |
const std::vector< CT, CA > & | c | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buf, |
const math::Quaternion< VT > & | quat | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buf, |
const math::Rot3< V > & | obj | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buf, |
const Matrix3< MT > & | m | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buf, |
const mesa_pd::ContactHistoryNotification & | obj | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buf, |
const mesa_pd::data::ContactHistory & | obj | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buf, |
const mesa_pd::ForceTorqueNotification & | obj | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buf, |
const mesa_pd::HeatFluxNotification & | obj | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buf, |
const mesa_pd::HydrodynamicForceTorqueNotification & | obj | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buf, |
const mesa_pd::NewGhostParticleNotification & | obj | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buf, |
const mesa_pd::NumContactNotification & | obj | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buf, |
const mesa_pd::ParticleCopyNotification & | obj | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buf, |
const mesa_pd::ParticleGhostCopyNotification & | obj | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buf, |
const mesa_pd::ParticleMigrationNotification & | obj | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buf, |
const mesa_pd::ParticleRemoteMigrationNotification & | obj | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buf, |
const mesa_pd::ParticleRemovalInformationNotification & | obj | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buf, |
const mesa_pd::ParticleRemovalNotification & | obj | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buf, |
const mesa_pd::ParticleUpdateNotification & | obj | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buf, |
const mesa_pd::VelocityCorrectionNotification & | obj | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buf, |
const mesa_pd::VelocityUpdateNotification & | obj | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buf, |
const OpenMesh::BaseHandle & | handle | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buf, |
const OpenMesh::VectorT< Scalar, DIM > & | v | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buf, |
const pe::debug::BodyData & | bd | ||
) |
|
inline |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buf, |
const std::shared_ptr< mesa_pd::data::BaseShape > & | bs | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buf, |
const Vector2< VT > & | vec | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buf, |
const Vector3< VT > & | vec | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buf, |
const walberla::mesa_pd::data::particle_flags::FlagT & | flags | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buffer, |
const BlockDataID & | id | ||
) |
|
inline |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buffer, |
const ConstBlockDataID & | id | ||
) |
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< | ( | mpi::GenericSendBuffer< T, G > & | buffer, |
const walberla::lbm::LatticeModelBase< CM, CO, FM, EQU > & | lm | ||
) |
|
inline |
|
inline |
|
inline |
GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | GenericRecvBuffer< T > & | buf, |
std::array< CT, N > & | array | ||
) |
GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | GenericRecvBuffer< T > & | buf, |
std::basic_string< CT > & | c | ||
) |
GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | GenericRecvBuffer< T > & | buf, |
std::deque< CT, CA > & | c | ||
) |
GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | GenericRecvBuffer< T > & | buf, |
std::list< CT, CA > & | c | ||
) |
GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | GenericRecvBuffer< T > & | buf, |
std::map< CK, CT, CC, CA > & | c | ||
) |
GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | GenericRecvBuffer< T > & | buf, |
std::multimap< CK, CT, CC, CA > & | c | ||
) |
GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | GenericRecvBuffer< T > & | buf, |
std::multiset< CK, CC, CA > & | c | ||
) |
GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | GenericRecvBuffer< T > & | buf, |
std::pair< T1, T2 > & | pair | ||
) |
GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | GenericRecvBuffer< T > & | buf, |
std::set< CK, CC, CA > & | c | ||
) |
GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | GenericRecvBuffer< T > & | buf, |
std::unordered_map< CK, CT, CC, CA > & | c | ||
) |
GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | GenericRecvBuffer< T > & | buf, |
std::unordered_set< CK, CC, CA > & | c | ||
) |
GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | GenericRecvBuffer< T > & | buf, |
std::vector< bool > & | bools | ||
) |
GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | GenericRecvBuffer< T > & | buf, |
std::vector< CT, CA > & | c | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
math::Quaternion< VT > & | quat | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
math::Rot3< V > & | objparam | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
Matrix3< MT > & | m | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
mesa_pd::ContactHistoryNotification::Parameters & | objparam | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
mesa_pd::data::ContactHistory & | objparam | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
mesa_pd::ForceTorqueNotification::Parameters & | objparam | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
mesa_pd::HeatFluxNotification::Parameters & | objparam | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
mesa_pd::HydrodynamicForceTorqueNotification::Parameters & | objparam | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
mesa_pd::NewGhostParticleNotification::Parameters & | objparam | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
mesa_pd::NumContactNotification::Parameters & | objparam | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
mesa_pd::ParticleCopyNotification::Parameters & | objparam | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
mesa_pd::ParticleGhostCopyNotification::Parameters & | objparam | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
mesa_pd::ParticleMigrationNotification::Parameters & | objparam | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
mesa_pd::ParticleRemoteMigrationNotification::Parameters & | objparam | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
mesa_pd::ParticleRemovalInformationNotification::Parameters & | objparam | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
mesa_pd::ParticleRemovalNotification::Parameters & | objparam | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
mesa_pd::ParticleUpdateNotification::Parameters & | objparam | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
mesa_pd::VelocityCorrectionNotification::Parameters & | objparam | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
mesa_pd::VelocityUpdateNotification::Parameters & | objparam | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
OpenMesh::EdgeHandle & | handle | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
OpenMesh::FaceHandle & | handle | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
OpenMesh::HalfedgeHandle & | handle | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
OpenMesh::VectorT< Scalar, DIM > & | v | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
OpenMesh::VertexHandle & | handle | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
pe::debug::BodyData & | bd | ||
) |
|
inline |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
std::shared_ptr< mesa_pd::data::BaseShape > & | bs | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
Vector2< VT > & | vec | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
Vector3< VT > & | vec | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buf, |
walberla::mesa_pd::data::particle_flags::FlagT & | flags | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buffer, |
BlockDataID & | id | ||
) |
|
inline |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buffer, |
ConstBlockDataID & | id | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> | ( | mpi::GenericRecvBuffer< T > & | buffer, |
walberla::lbm::LatticeModelBase< CM, CO, FM, EQU > & | lm | ||
) |
|
inline |
|
inline |
|
inline |
GenericSendBuffer<T,G>& walberla::mpi::packBoolVectorWithoutSize | ( | GenericSendBuffer< T, G > & | buf, |
const std::vector< bool > & | bools | ||
) |
void walberla::mpi::readMPIIO | ( | const std::string & | file, |
RecvBuffer & | buffer | ||
) |
Counterpart to writeMPIIO - has to be called with exactly the same process distribution.
Reads local part of the data into a buffer
void walberla::mpi::recvAssocContainer | ( | GenericRecvBuffer< T > & | buf, |
Cont & | container | ||
) |
void walberla::mpi::recvContainer | ( | GenericRecvBuffer< T > & | buf, |
Cont & | container | ||
) |
void walberla::mpi::recvMap | ( | GenericRecvBuffer< T > & | buf, |
Cont & | container | ||
) |
void walberla::mpi::recvNonResizableContainer | ( | GenericRecvBuffer< T > & | buf, |
Cont & | container | ||
) |
|
inline |
Reduces a boolean value over all processes.
value | The boolean value to be reduced |
operation | The operation to be performed (one of LOGICAL_AND, LOGICAL_OR or LOGICAL_XOR) |
recvRank | The rank of the process receiving the reduced value |
comm | The MPI communicator used for communication |
|
inline |
Reduces boolean values in a Vector3 over all processes.
value | The boolean value to be reduced |
operation | The operation to be performed (one of LOGICAL_AND, LOGICAL_OR or LOGICAL_XOR) |
recvRank | The rank of the process receiving the reduced value |
comm | The MPI communicator used for communication |
math::Vector3<T> walberla::mpi::reduce | ( | const math::Vector3< T > & | values, |
Operation | operation, | ||
int | recvRank = 0 , |
||
MPI_Comm | comm = MPI_COMM_WORLD |
||
) |
Reduces values in a math::Vector3<T> over all processes.
T has to be an integer or floating point value
value | The value to be reduced |
operation | The operation to be performed |
recvRank | The rank of the process receiving the reduced value |
comm | The MPI communicator used for communication |
T walberla::mpi::reduce | ( | const T | value, |
Operation | operation, | ||
int | recvRank = 0 , |
||
MPI_Comm | comm = MPI_COMM_WORLD |
||
) |
Reduces a value over all processes.
T has to be an integer or floating point value
value | The value to be reduced |
operation | The operation to be performed |
recvRank | The rank of the process receiving the reduced value |
comm | The MPI communicator used for communication |
|
inline |
Reduces a boolean value over all processes in-place.
value | The boolean value to be reduced |
operation | The operation to be performed (one of LOGICAL_AND, LOGICAL_OR or LOGICAL_XOR) |
recvRank | The rank of the process receiving the reduced value |
comm | The MPI communicator used for communication |
|
inline |
Reduces booleans in a math::Vector3 over all processes in-place.
T has to be an integer or floating point value
values | The values to be reduced |
operation | The operation to be performed |
recvRank | The rank of the process receiving the reduced values |
comm | The MPI communicator used for communication |
void walberla::mpi::reduceInplace | ( | math::Vector3< T > & | values, |
Operation | operation, | ||
int | recvRank = 0 , |
||
MPI_Comm | comm = MPI_COMM_WORLD |
||
) |
Reduces values in a math::Vector3<T> over all processes in-place.
T has to be an integer or floating point value
values | The values to be reduced |
operation | The operation to be performed |
recvRank | The rank of the process receiving the reduced values |
comm | The MPI communicator used for communication |
|
inline |
Reduces boolean values in a std::vector<bool> over all processes in-place.
Specialization of reduceInplace<T>
values | The boolean values to be reduced |
operation | The operation to be performed (one of BITWISE_AND, BITWISE_OR or BITWISE_XOR) |
recvRank | The rank of the process receiving the reduced values |
comm | The MPI communicator used for communication |
void walberla::mpi::reduceInplace | ( | std::vector< T > & | values, |
Operation | operation, | ||
int | recvRank = 0 , |
||
MPI_Comm | comm = MPI_COMM_WORLD |
||
) |
Reduces values in a std::vector<T> over all processes in-place.
T has to be an integer or floating point value
values | The values to be reduced |
operation | The operation to be performed |
recvRank | The rank of the process receiving the reduced values |
comm | The MPI communicator used for communication |
void walberla::mpi::reduceInplace | ( | T & | value, |
Operation | operation, | ||
int | recvRank = 0 , |
||
MPI_Comm | comm = MPI_COMM_WORLD |
||
) |
Reduces a value over all processes in-place.
T has to be an integer or floating point value
value | The value to be reduced |
operation | The operation to be performed |
recvRank | The rank of the process receiving the reduced value |
comm | The MPI communicator used for communication |
void walberla::mpi::sendAssocContainer | ( | GenericSendBuffer< T, G > & | buf, |
const Cont & | container | ||
) |
void walberla::mpi::sendContainer | ( | GenericSendBuffer< T, G > & | buf, |
const Cont & | container | ||
) |
void walberla::mpi::sendNonResizableContainer | ( | GenericSendBuffer< T, G > & | buf, |
const Cont & | container | ||
) |
|
inline |
int walberla::mpi::translateRank | ( | const MPI_Comm | srcComm, |
const MPI_Comm | destComm, | ||
const int | srcRank | ||
) |
This functions maps the rank in one communicator to the rank in another communicator.
srcComm | source communicator |
destComm | destination communicator |
srcRank | rank in the source communicator |
std::vector< int > walberla::mpi::translateRank | ( | const MPI_Comm | srcComm, |
const MPI_Comm | destComm, | ||
const std::vector< int > & | srcRank | ||
) |
This functions converts a array of ranks in one communicator to an array of ranks in another communicator.
srcComm | source communicator |
destComm | destination communicator |
srcRank | source ranks |
GenericRecvBuffer<T>& walberla::mpi::unpackBoolVectorWithoutSize | ( | GenericRecvBuffer< T > & | buf, |
std::vector< bool > & | bools, | ||
size_t | size | ||
) |
mpi::GenericRecvBuffer<T>& walberla::mpi::unpackOpenMeshHandle | ( | mpi::GenericRecvBuffer< T > & | buf, |
HandleT & | handle | ||
) |
void walberla::mpi::writeMPIIO | ( | const std::string & | file, |
SendBuffer & | buffer | ||
) |
Writes contents of local buffer to a single binary file via MPIIO.
Has to be called by all processes
void walberla::mpi::writeMPITextFile | ( | const std::string & | filename, |
const std::string & | processLocalPart, | ||
const MPI_Comm | comm | ||
) |
Writes file using MPI IO with each process providing a part of it.
This method has the be called collectively by all the processes in comm. The file will be assembled in the order of the ranks of the calling processes.
filename | The name of the file to be written |
processLocalPart | The part of the file belonging to the calling process (size may differ among processes) |
comm | The MPI communicator used for communication |
const uint_t walberla::mpi::BUFFER_DEBUG_OVERHEAD = 0 |