walberla::mpi Namespace Reference

Namespaces

 internal
 

Classes

struct  BufferSizeTrait
 
struct  BufferSizeTrait< BlockDataID >
 
struct  BufferSizeTrait< blockforest::BlockID >
 
struct  BufferSizeTrait< bool >
 
struct  BufferSizeTrait< Cell >
 
struct  BufferSizeTrait< CellInterval >
 
struct  BufferSizeTrait< char >
 
struct  BufferSizeTrait< ConstBlockDataID >
 
struct  BufferSizeTrait< double >
 
struct  BufferSizeTrait< float >
 
struct  BufferSizeTrait< free_surface::bubble_model::Bubble >
 
struct  BufferSizeTrait< free_surface::bubble_model::DistanceInfo >
 
struct  BufferSizeTrait< int >
 
struct  BufferSizeTrait< long >
 
struct  BufferSizeTrait< long long >
 
struct  BufferSizeTrait< math::Rot3< V > >
 
struct  BufferSizeTrait< mesa_pd::ForceTorqueNotification >
 
struct  BufferSizeTrait< mesa_pd::HeatFluxNotification >
 
struct  BufferSizeTrait< mesa_pd::HydrodynamicForceTorqueNotification >
 
struct  BufferSizeTrait< mesa_pd::NumContactNotification >
 
struct  BufferSizeTrait< mesa_pd::ParticleMigrationNotification >
 
struct  BufferSizeTrait< mesa_pd::ParticleRemoteMigrationNotification >
 
struct  BufferSizeTrait< mesa_pd::ParticleRemovalNotification >
 
struct  BufferSizeTrait< mesa_pd::VelocityCorrectionNotification >
 
struct  BufferSizeTrait< mesa_pd::VelocityUpdateNotification >
 
struct  BufferSizeTrait< OpenMesh::EdgeHandle >
 
struct  BufferSizeTrait< OpenMesh::FaceHandle >
 
struct  BufferSizeTrait< OpenMesh::HalfedgeHandle >
 
struct  BufferSizeTrait< OpenMesh::VectorT< Scalar, DIM > >
 
struct  BufferSizeTrait< OpenMesh::VertexHandle >
 
struct  BufferSizeTrait< pe::Owner >
 
struct  BufferSizeTrait< RandomUUID >
 
struct  BufferSizeTrait< short >
 
struct  BufferSizeTrait< signed char >
 
struct  BufferSizeTrait< std::array< T, N > >
 
struct  BufferSizeTrait< std::basic_string< T > >
 
struct  BufferSizeTrait< std::deque< T, A > >
 
struct  BufferSizeTrait< std::list< T, A > >
 
struct  BufferSizeTrait< std::map< K, T, C, A > >
 
struct  BufferSizeTrait< std::multimap< K, T, C, A > >
 
struct  BufferSizeTrait< std::multiset< T, C, A > >
 
struct  BufferSizeTrait< std::pair< T1, T2 > >
 
struct  BufferSizeTrait< std::set< T, C, A > >
 
struct  BufferSizeTrait< std::unordered_map< K, T, C, A > >
 
struct  BufferSizeTrait< std::unordered_set< T, C, A > >
 
struct  BufferSizeTrait< std::vector< T, A > >
 
struct  BufferSizeTrait< T, typename std::enable_if< std::is_enum< T >::value >::type >
 
struct  BufferSizeTrait< unsigned char >
 
struct  BufferSizeTrait< unsigned int >
 
struct  BufferSizeTrait< unsigned long >
 
struct  BufferSizeTrait< unsigned long long >
 
struct  BufferSizeTrait< unsigned short >
 
struct  BufferSizeTrait< walberla::lbm::LatticeModelBase< CM, CO, FM, EQU > >
 
struct  BufferSizeTrait< walberla::math::GenericAABB< T > >
 
struct  BufferSizeTrait< walberla::math::Matrix3< VT > >
 
struct  BufferSizeTrait< walberla::math::Quaternion< VT > >
 
struct  BufferSizeTrait< walberla::math::Vector2< VT > >
 
struct  BufferSizeTrait< walberla::math::Vector3< VT > >
 
struct  BufferSizeTrait< walberla::mesa_pd::data::particle_flags::FlagT >
 
struct  BufferSizeTrait< walberla::pe::debug::BodyData >
 
struct  BufferSizeTrait< walberla::Set< T > >
 
struct  BufferSizeTrait< walberla::uid::UID< GE > >
 
class  Datatype
 RAII class for MPI data types that commits and frees them. More...
 
class  Environment
 RAII Object to initialize and finalize MPI. More...
 
class  GenericBufferSystem
 Manages MPI Communication with a set of known communication partners. More...
 
class  GenericOpenMPBufferSystem
 Wrapper around BufferSystem for OpenMP parallel MPI communication. More...
 
class  GenericRecvBuffer
 Implementation of a MPI receive buffer. More...
 
class  GenericSendBuffer
 Implementation of a MPI send buffer. More...
 
class  MPIManager
 Encapsulates MPI Rank/Communicator information. More...
 
class  TokenizedScope
 Object that starts tokenizing in constructor and ends tokenizing when going out of scope. More...
 
class  Tokenizing
 MPI tokenizing ensures that not more than N processes execute the same code portion simultaneously. More...
 

Typedefs

using BufferSystem = GenericBufferSystem< RecvBuffer, SendBuffer >
 
using OpenMPBufferSystem = GenericOpenMPBufferSystem< RecvBuffer, SendBuffer >
 
using RecvBuffer = GenericRecvBuffer<>
 
using SendBuffer = GenericSendBuffer<>
 

Enumerations

enum  Operation {
  MIN, MAX, SUM, PRODUCT,
  LOGICAL_AND, BITWISE_AND, LOGICAL_OR, BITWISE_OR,
  LOGICAL_XOR, BITWISE_XOR
}
 
enum  SetOperation { INTERSECTION, UNION }
 

Functions

template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buffer, const blockforest::BlockID &id)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buffer, blockforest::BlockID &id)
 
template<typename T , typename G , typename MT >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const Matrix3< MT > &m)
 
template<typename T , typename MT >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, Matrix3< MT > &m)
 
template<typename T , typename G , typename VT >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const math::Quaternion< VT > &quat)
 
template<typename T , typename VT >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, math::Quaternion< VT > &quat)
 
template<typename T , typename G , typename V >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const math::Rot3< V > &obj)
 
template<typename T , typename V >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, math::Rot3< V > &objparam)
 
template<typename T , typename G , typename VT >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const Vector2< VT > &vec)
 
template<typename T , typename VT >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, Vector2< VT > &vec)
 
template<typename T , typename G , typename VT >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const Vector3< VT > &vec)
 
template<typename T , typename VT >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, Vector3< VT > &vec)
 
template<typename T >
void broadcastObject (T &object, int senderRank=0, MPI_Comm comm=MPI_COMM_WORLD)
 Broadcasts an arbitrary sized object from one process to all other processes. More...
 
template<typename T , typename G , typename T1 , typename T2 >
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const std::pair< T1, T2 > &pair)
 
template<typename T , typename T1 , typename T2 >
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, std::pair< T1, T2 > &pair)
 
template<typename T , typename G , typename Cont >
void sendNonResizableContainer (GenericSendBuffer< T, G > &buf, const Cont &container)
 
template<typename T , typename Cont >
void recvNonResizableContainer (GenericRecvBuffer< T > &buf, Cont &container)
 
template<typename T , typename G , typename CT , std::size_t N>
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const std::array< CT, N > &array)
 
template<typename T , typename CT , std::size_t N>
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, std::array< CT, N > &array)
 
template<typename T , typename G , typename Cont >
void sendContainer (GenericSendBuffer< T, G > &buf, const Cont &container)
 
template<typename T , typename Cont >
void recvContainer (GenericRecvBuffer< T > &buf, Cont &container)
 
template<typename T , typename G , typename CT , typename CA >
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const std::vector< CT, CA > &c)
 
template<typename T , typename CT , typename CA >
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, std::vector< CT, CA > &c)
 
template<typename T , typename G , typename CT >
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const std::basic_string< CT > &c)
 
template<typename T , typename CT >
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, std::basic_string< CT > &c)
 
template<typename T , typename G , typename CT , typename CA >
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const std::deque< CT, CA > &c)
 
template<typename T , typename CT , typename CA >
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, std::deque< CT, CA > &c)
 
template<typename T , typename G , typename CT , typename CA >
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const std::list< CT, CA > &c)
 
template<typename T , typename CT , typename CA >
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, std::list< CT, CA > &c)
 
template<typename T , typename G >
GenericSendBuffer< T, G > & packBoolVectorWithoutSize (GenericSendBuffer< T, G > &buf, const std::vector< bool > &bools)
 
template<typename T >
GenericRecvBuffer< T > & unpackBoolVectorWithoutSize (GenericRecvBuffer< T > &buf, std::vector< bool > &bools, size_t size)
 
template<typename T , typename G >
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const std::vector< bool > &bools)
 
template<typename T >
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, std::vector< bool > &bools)
 
template<typename T , typename G , typename Cont >
void sendAssocContainer (GenericSendBuffer< T, G > &buf, const Cont &container)
 
template<typename T , typename Cont >
void recvAssocContainer (GenericRecvBuffer< T > &buf, Cont &container)
 
template<typename T , typename Cont >
void recvMap (GenericRecvBuffer< T > &buf, Cont &container)
 
template<typename T , typename G , typename CK , typename CC , typename CA >
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const std::set< CK, CC, CA > &c)
 
template<typename T , typename CK , typename CC , typename CA >
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, std::set< CK, CC, CA > &c)
 
template<typename T , typename G , typename CK , typename CC , typename CA >
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const std::unordered_set< CK, CC, CA > &c)
 
template<typename T , typename CK , typename CC , typename CA >
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, std::unordered_set< CK, CC, CA > &c)
 
template<typename T , typename G , typename CK , typename CC , typename CA >
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const std::multiset< CK, CC, CA > &c)
 
template<typename T , typename CK , typename CC , typename CA >
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, std::multiset< CK, CC, CA > &c)
 
template<typename T , typename G , typename CK , typename CT , typename CC , typename CA >
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const std::map< CK, CT, CC, CA > &c)
 
template<typename T , typename CK , typename CT , typename CC , typename CA >
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, std::map< CK, CT, CC, CA > &c)
 
template<typename T , typename G , typename CK , typename CT , typename CC , typename CA >
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const std::unordered_map< CK, CT, CC, CA > &c)
 
template<typename T , typename CK , typename CT , typename CC , typename CA >
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, std::unordered_map< CK, CT, CC, CA > &c)
 
template<typename T , typename G , typename CK , typename CT , typename CC , typename CA >
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const std::multimap< CK, CT, CC, CA > &c)
 
template<typename T , typename CK , typename CT , typename CC , typename CA >
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, std::multimap< CK, CT, CC, CA > &c)
 
SendBufferoperator<< (SendBuffer &buf, const RandomUUID &uuid)
 
RecvBufferoperator>> (RecvBuffer &buf, RandomUUID &uuid)
 
template<typename T >
std::vector< T > gather (T value, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD)
 Gathers values from MPI processes and stores them into a std::vector. More...
 
template<typename T >
std::vector< T > allGather (T value, MPI_Comm comm=MPI_COMM_WORLD)
 Gathers values from MPI processes and stores them into a std::vector on all Processes. More...
 
template<>
std::vector< std::string > gatherv (const std::vector< std::string > &values, int recvRank, MPI_Comm comm)
 
template<>
std::vector< std::string > allGatherv (const std::vector< std::string > &values, MPI_Comm comm)
 
void gathervBuffer (const mpi::SendBuffer &sendBuffer, mpi::RecvBuffer &recvBuffer, int targetRank=0, MPI_Comm comm=MPI_COMM_WORLD)
 Gathers the buffer content on a single target process. More...
 
void allGathervBuffer (const mpi::SendBuffer &sendBuffer, mpi::RecvBuffer &recvBuffer, MPI_Comm comm=MPI_COMM_WORLD)
 Almost identical to mpi::gathervBuffer, the only difference: The result is stored on every process. More...
 
template<typename T >
std::vector< T > gatherv (const std::vector< T > &values, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD)
 Gathers values from MPI processes and stores them into a std::vector. More...
 
template<typename T >
std::vector< T > allGatherv (const std::vector< T > &values, MPI_Comm comm=MPI_COMM_WORLD)
 Gathers values from MPI processes and stores them into a std::vector on all Processes. More...
 
int translateRank (const MPI_Comm srcComm, const MPI_Comm destComm, const int srcRank)
 This functions maps the rank in one communicator to the rank in another communicator. More...
 
std::vector< int > translateRank (const MPI_Comm srcComm, const MPI_Comm destComm, const std::vector< int > &srcRank)
 This functions converts a array of ranks in one communicator to an array of ranks in another communicator. More...
 
void writeMPIIO (const std::string &file, SendBuffer &buffer)
 Writes contents of local buffer to a single binary file via MPIIO. More...
 
void readMPIIO (const std::string &file, RecvBuffer &buffer)
 Counterpart to writeMPIIO - has to be called with exactly the same process distribution. More...
 
static void customTerminateHandler ()
 Terminate Handler that calls MPI_Abort instead of std::abort. More...
 
void writeMPITextFile (const std::string &filename, const std::string &processLocalPart, const MPI_Comm comm)
 Writes file using MPI IO with each process providing a part of it. More...
 
MPI_Op toMPI_Op (Operation operation)
 
template<typename T >
void reduceInplace (T &value, Operation operation, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces a value over all processes in-place. More...
 
void reduceInplace (bool &value, Operation operation, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces a boolean value over all processes in-place. More...
 
template<typename T >
reduce (const T value, Operation operation, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces a value over all processes. More...
 
bool reduce (const bool value, Operation operation, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces a boolean value over all processes. More...
 
template<typename T >
void reduceInplace (std::vector< T > &values, Operation operation, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces values in a std::vector<T> over all processes in-place. More...
 
void reduceInplace (std::vector< bool > &values, Operation operation, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces boolean values in a std::vector<bool> over all processes in-place. More...
 
template<typename T >
void reduceInplace (math::Vector3< T > &values, Operation operation, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces values in a math::Vector3<T> over all processes in-place. More...
 
void reduceInplace (math::Vector3< bool > &values, Operation operation, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces booleans in a math::Vector3 over all processes in-place. More...
 
template<typename T >
math::Vector3< T > reduce (const math::Vector3< T > &values, Operation operation, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces values in a math::Vector3<T> over all processes. More...
 
math::Vector3< bool > reduce (const math::Vector3< bool > &values, Operation operation, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces boolean values in a Vector3 over all processes. More...
 
template<typename T >
allReduce (const T &value, Operation operation, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces a value over all processes. More...
 
bool allReduce (const bool value, Operation operation, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces a boolean value over all processes. More...
 
template<typename T >
void allReduceInplace (T &value, Operation operation, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces a value over all processes in-place. More...
 
void allReduceInplace (bool &value, Operation operation, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces a boolean value over all processes in-place. More...
 
template<typename T >
void allReduceInplace (std::vector< T > &values, Operation operation, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces values in a std::vector<T> over all processes in-place. More...
 
void allReduceInplace (std::vector< bool > &bools, Operation operation, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces values in a std::vector<bool> over all processes in-place. More...
 
template<typename T >
void allReduceInplace (math::Vector3< T > &values, Operation operation, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces values in math::Vector3<T> over all processes in-place. More...
 
void allReduceInplace (math::Vector3< bool > &bools, Operation operation, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces boolean values in math::Vector3 over all processes in-place. More...
 
template<typename T >
std::vector< T > allReduceSet (std::vector< T > values, SetOperation op, MPI_Comm mpiCommunicator=MPI_COMM_WORLD, int mpiTag=0)
 Reduces a set of values on all processes without using global mpi communication. More...
 
template<typename T , typename G , typename E >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buffer, const walberla::Set< E > &set)
 
template<typename T , typename E >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buffer, walberla::Set< E > &set)
 
template<typename T , typename G , typename GE >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buffer, const walberla::uid::UID< GE > &uid)
 
template<typename T , typename GE >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buffer, walberla::uid::UID< GE > &uid)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buffer, const BlockDataID &id)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buffer, BlockDataID &id)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buffer, const ConstBlockDataID &id)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buffer, ConstBlockDataID &id)
 
template<typename T , typename G , typename CM , bool CO, typename FM , int EQU>
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buffer, const walberla::lbm::LatticeModelBase< CM, CO, FM, EQU > &lm)
 
template<typename T , typename CM , bool CO, typename FM , int EQU>
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buffer, walberla::lbm::LatticeModelBase< CM, CO, FM, EQU > &lm)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::data::ContactHistory &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::data::ContactHistory &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const walberla::mesa_pd::data::particle_flags::FlagT &flags)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, walberla::mesa_pd::data::particle_flags::FlagT &flags)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::ContactHistoryNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::ContactHistoryNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::ForceTorqueNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::ForceTorqueNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::HeatFluxNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::HeatFluxNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::HydrodynamicForceTorqueNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::HydrodynamicForceTorqueNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::NewGhostParticleNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::NewGhostParticleNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::NumContactNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::NumContactNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::ParticleCopyNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::ParticleCopyNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::ParticleGhostCopyNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::ParticleGhostCopyNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::ParticleMigrationNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::ParticleMigrationNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::ParticleRemoteMigrationNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::ParticleRemoteMigrationNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::ParticleRemovalInformationNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::ParticleRemovalInformationNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::ParticleRemovalNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::ParticleRemovalNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::ParticleUpdateNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::ParticleUpdateNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::VelocityCorrectionNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::VelocityCorrectionNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::VelocityUpdateNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::VelocityUpdateNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const std::shared_ptr< mesa_pd::data::BaseShape > &bs)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, std::shared_ptr< mesa_pd::data::BaseShape > &bs)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const OpenMesh::BaseHandle &handle)
 
template<typename T , typename HandleT >
mpi::GenericRecvBuffer< T > & unpackOpenMeshHandle (mpi::GenericRecvBuffer< T > &buf, HandleT &handle)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, OpenMesh::VertexHandle &handle)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, OpenMesh::FaceHandle &handle)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, OpenMesh::HalfedgeHandle &handle)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, OpenMesh::EdgeHandle &handle)
 
template<typename T , typename G , typename Scalar , int DIM>
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const OpenMesh::VectorT< Scalar, DIM > &v)
 
template<typename T , typename Scalar , int DIM>
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, OpenMesh::VectorT< Scalar, DIM > &v)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const pe::debug::BodyData &bd)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, pe::debug::BodyData &bd)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const pe::Owner &owner)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, pe::Owner &owner)
 

Variables

const uint_t BUFFER_DEBUG_OVERHEAD = 0
 

Typedef Documentation

◆ BufferSystem

◆ OpenMPBufferSystem

◆ RecvBuffer

◆ SendBuffer

Enumeration Type Documentation

◆ Operation

Enumerator
MIN 
MAX 
SUM 
PRODUCT 
LOGICAL_AND 
BITWISE_AND 
LOGICAL_OR 
BITWISE_OR 
LOGICAL_XOR 
BITWISE_XOR 

◆ SetOperation

Enumerator
INTERSECTION 
UNION 

Function Documentation

◆ allGather()

template<typename T >
std::vector<T> walberla::mpi::allGather ( value,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Gathers values from MPI processes and stores them into a std::vector on all Processes.

T has to be a native MPI_Datatype

Parameters
valueThe value gathered from the process
commThe MPI communicator used for communication
Returns
A std::vector in with the result

◆ allGatherv() [1/2]

template<>
std::vector< std::string > walberla::mpi::allGatherv ( const std::vector< std::string > &  values,
MPI_Comm  comm 
)

◆ allGatherv() [2/2]

template<typename T >
std::vector<T> walberla::mpi::allGatherv ( const std::vector< T > &  values,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Gathers values from MPI processes and stores them into a std::vector on all Processes.

T has to be a native MPI_Datatype

Parameters
valuesThe values gathered from the process
commThe MPI communicator used for communication
Returns
A std::vector in with the result

◆ allGathervBuffer()

void walberla::mpi::allGathervBuffer ( const mpi::SendBuffer sendBuffer,
mpi::RecvBuffer recvBuffer,
MPI_Comm  comm 
)

Almost identical to mpi::gathervBuffer, the only difference: The result is stored on every process.

◆ allReduce() [1/2]

bool walberla::mpi::allReduce ( const bool  value,
Operation  operation,
MPI_Comm  comm = MPI_COMM_WORLD 
)
inline

Reduces a boolean value over all processes.

T has to be a boolean value

Parameters
valueThe boolean value to be reduced
operationThe operation to be performed
commThe MPI communicator used for communication
Returns
The reduced value on recvRank, 0 on all other ranks.

◆ allReduce() [2/2]

template<typename T >
T walberla::mpi::allReduce ( const T &  value,
Operation  operation,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Reduces a value over all processes.

T has to be an integer or floating point value

Parameters
valueThe value to be reduced
operationThe operation to be performed
commThe MPI communicator used for communication
Returns
The reduced value on recvRank, 0 on all other ranks.

◆ allReduceInplace() [1/6]

void walberla::mpi::allReduceInplace ( bool &  value,
Operation  operation,
MPI_Comm  comm = MPI_COMM_WORLD 
)
inline

Reduces a boolean value over all processes in-place.

T has to be a boolean value

Parameters
valueThe boolean value to be reduced
operationThe operation to be performed (one of LOGICAL_AND, LOGICAL_OR or LOGICAL_XOR)
commThe MPI communicator used for communication

◆ allReduceInplace() [2/6]

void walberla::mpi::allReduceInplace ( math::Vector3< bool > &  bools,
Operation  operation,
MPI_Comm  comm = MPI_COMM_WORLD 
)
inline

Reduces boolean values in math::Vector3 over all processes in-place.

T has to be an integer or floating point value

Parameters
valuesThe values to be reduced
operationThe operation to be performed
commThe MPI communicator used for communication

◆ allReduceInplace() [3/6]

template<typename T >
void walberla::mpi::allReduceInplace ( math::Vector3< T > &  values,
Operation  operation,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Reduces values in math::Vector3<T> over all processes in-place.

T has to be an integer or floating point value

Parameters
valuesThe values to be reduced
operationThe operation to be performed
commThe MPI communicator used for communication

◆ allReduceInplace() [4/6]

void walberla::mpi::allReduceInplace ( std::vector< bool > &  bools,
Operation  operation,
MPI_Comm  comm = MPI_COMM_WORLD 
)
inline

Reduces values in a std::vector<bool> over all processes in-place.

Specialization of allReduceInplace<T>

Parameters
boolsThe boolean values to be reduced
operationThe operation to be performed (one of BITWISE_AND, BITWISE_OR or BITWISE_XOR)
commThe MPI communicator used for communication

◆ allReduceInplace() [5/6]

template<typename T >
void walberla::mpi::allReduceInplace ( std::vector< T > &  values,
Operation  operation,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Reduces values in a std::vector<T> over all processes in-place.

T has to be an integer or floating point value

Parameters
valuesThe values to be reduced
operationThe operation to be performed
commThe MPI communicator used for communication

◆ allReduceInplace() [6/6]

template<typename T >
void walberla::mpi::allReduceInplace ( T &  value,
Operation  operation,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Reduces a value over all processes in-place.

T has to be an integer or floating point value

Parameters
valueThe value to be reduced
operationThe operation to be performed
commThe MPI communicator used for communication

◆ allReduceSet()

template<typename T >
std::vector<T> walberla::mpi::allReduceSet ( std::vector< T >  values,
SetOperation  op,
MPI_Comm  mpiCommunicator = MPI_COMM_WORLD,
int  mpiTag = 0 
)

Reduces a set of values on all processes without using global mpi communication.

The algorithm performs log(n) communication steps, where n is the number of processes in mpiCommunicator. The returned vector is a sorted set of unique values.

Parameters
valuesThe local input values. Duplicates will internally be removed. Values have to be buffer packable and sortable.
opThe operation to be performed on the set: intersection or union.
mpiCommunicatorMPI communicator used for the reduction.
mpiTagMPI tag used for the reduction.

◆ broadcastObject()

template<typename T >
void walberla::mpi::broadcastObject ( T &  object,
int  senderRank = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Broadcasts an arbitrary sized object from one process to all other processes.

T has to be able to be packed into SendBuffer and unpacked from RecvBuffer.

Parameters
objectThe object to be broadcasted
senderRankThe rank of the process sending the object
commThe MPI communicator used for communication

◆ customTerminateHandler()

static void walberla::mpi::customTerminateHandler ( )
static

Terminate Handler that calls MPI_Abort instead of std::abort.

Terminate handler is called when an exception is not caught and the program has to be aborted The standard terminate handler prints exception.what() and calls std::abort(). We overwrite the terminate handler when MPI was initialized, to call MPI_Abort in this case.

◆ gather()

template<typename T >
std::vector<T> walberla::mpi::gather ( value,
int  recvRank = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Gathers values from MPI processes and stores them into a std::vector.

T has to be a native MPI_Datatype

Parameters
valueThe value gathered from the process
recvRankThe rank of the process receiving the gathered information
commThe MPI communicator used for communication
Returns
A std::vector in with the result on recvRank, else an empty vector

◆ gatherv() [1/2]

template<>
std::vector< std::string > walberla::mpi::gatherv ( const std::vector< std::string > &  values,
int  recvRank,
MPI_Comm  comm 
)

◆ gatherv() [2/2]

template<typename T >
std::vector<T> walberla::mpi::gatherv ( const std::vector< T > &  values,
int  recvRank = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Gathers values from MPI processes and stores them into a std::vector.

T has to be a native MPI_Datatype

Parameters
valuesThe values gathered from the process
recvRankThe rank of the process receiving the gathered information
commThe MPI communicator used for communication
Returns
A std::vector in with the result on recvRank, else an empty vector

◆ gathervBuffer()

void walberla::mpi::gathervBuffer ( const mpi::SendBuffer sendBuffer,
mpi::RecvBuffer recvBuffer,
int  targetRank = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Gathers the buffer content on a single target process.

  • every process holds one mpi::SendBuffer ( can have different size on each process )
  • the buffer contents are gathered on process with targetRank
  • buffer contents are sorted by rank and stored consecutively in a mpi::RecvBuffer
Parameters
[in]sendBuffersendBuffer with (possibly) different size on each process
[out]recvBufferrecvBuffer which is left unchanged on all processes but targetRank on targetRank recvBuffer holds the gathered result
[in]targetRankrank of the process where data is gathered
[in]commmpi communicator to use

◆ operator<<() [1/47]

template<typename T , typename G , typename CT , std::size_t N>
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const std::array< CT, N > &  array 
)

◆ operator<<() [2/47]

template<typename T , typename G , typename CT >
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const std::basic_string< CT > &  c 
)

◆ operator<<() [3/47]

template<typename T , typename G , typename CT , typename CA >
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const std::deque< CT, CA > &  c 
)

◆ operator<<() [4/47]

template<typename T , typename G , typename CT , typename CA >
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const std::list< CT, CA > &  c 
)

◆ operator<<() [5/47]

template<typename T , typename G , typename CK , typename CT , typename CC , typename CA >
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const std::map< CK, CT, CC, CA > &  c 
)

◆ operator<<() [6/47]

template<typename T , typename G , typename CK , typename CT , typename CC , typename CA >
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const std::multimap< CK, CT, CC, CA > &  c 
)

◆ operator<<() [7/47]

template<typename T , typename G , typename CK , typename CC , typename CA >
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const std::multiset< CK, CC, CA > &  c 
)

◆ operator<<() [8/47]

template<typename T , typename G , typename T1 , typename T2 >
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const std::pair< T1, T2 > &  pair 
)

◆ operator<<() [9/47]

template<typename T , typename G , typename CK , typename CC , typename CA >
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const std::set< CK, CC, CA > &  c 
)

◆ operator<<() [10/47]

template<typename T , typename G , typename CK , typename CT , typename CC , typename CA >
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const std::unordered_map< CK, CT, CC, CA > &  c 
)

◆ operator<<() [11/47]

template<typename T , typename G , typename CK , typename CC , typename CA >
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const std::unordered_set< CK, CC, CA > &  c 
)

◆ operator<<() [12/47]

template<typename T , typename G >
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const std::vector< bool > &  bools 
)

◆ operator<<() [13/47]

template<typename T , typename G , typename CT , typename CA >
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const std::vector< CT, CA > &  c 
)

◆ operator<<() [14/47]

template<typename T , typename G , typename VT >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const math::Quaternion< VT > &  quat 
)

◆ operator<<() [15/47]

template<typename T , typename G , typename V >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const math::Rot3< V > &  obj 
)

◆ operator<<() [16/47]

template<typename T , typename G , typename MT >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const Matrix3< MT > &  m 
)

◆ operator<<() [17/47]

template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::ContactHistoryNotification obj 
)

◆ operator<<() [18/47]

template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::data::ContactHistory obj 
)

◆ operator<<() [19/47]

template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::ForceTorqueNotification obj 
)

◆ operator<<() [20/47]

template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::HeatFluxNotification obj 
)

◆ operator<<() [21/47]

template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::HydrodynamicForceTorqueNotification obj 
)

◆ operator<<() [22/47]

template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::NewGhostParticleNotification obj 
)

◆ operator<<() [23/47]

template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::NumContactNotification obj 
)

◆ operator<<() [24/47]

template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::ParticleCopyNotification obj 
)

◆ operator<<() [25/47]

template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::ParticleGhostCopyNotification obj 
)

◆ operator<<() [26/47]

template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::ParticleMigrationNotification obj 
)

◆ operator<<() [27/47]

template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::ParticleRemoteMigrationNotification obj 
)

◆ operator<<() [28/47]

template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::ParticleRemovalInformationNotification obj 
)

◆ operator<<() [29/47]

template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::ParticleRemovalNotification obj 
)

◆ operator<<() [30/47]

template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::ParticleUpdateNotification obj 
)

◆ operator<<() [31/47]

template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::VelocityCorrectionNotification obj 
)

◆ operator<<() [32/47]

template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::VelocityUpdateNotification obj 
)

◆ operator<<() [33/47]

template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const OpenMesh::BaseHandle &  handle 
)

◆ operator<<() [34/47]

template<typename T , typename G , typename Scalar , int DIM>
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const OpenMesh::VectorT< Scalar, DIM > &  v 
)

◆ operator<<() [35/47]

template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const pe::debug::BodyData bd 
)

◆ operator<<() [36/47]

template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const pe::Owner owner 
)
inline

◆ operator<<() [37/47]

template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const std::shared_ptr< mesa_pd::data::BaseShape > &  bs 
)

◆ operator<<() [38/47]

template<typename T , typename G , typename VT >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const Vector2< VT > &  vec 
)

◆ operator<<() [39/47]

template<typename T , typename G , typename VT >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const Vector3< VT > &  vec 
)

◆ operator<<() [40/47]

template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const walberla::mesa_pd::data::particle_flags::FlagT flags 
)

◆ operator<<() [41/47]

template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buffer,
const BlockDataID id 
)

◆ operator<<() [42/47]

template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buffer,
const blockforest::BlockID id 
)
inline

◆ operator<<() [43/47]

template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buffer,
const ConstBlockDataID id 
)

◆ operator<<() [44/47]

template<typename T , typename G , typename CM , bool CO, typename FM , int EQU>
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buffer,
const walberla::lbm::LatticeModelBase< CM, CO, FM, EQU > &  lm 
)

◆ operator<<() [45/47]

template<typename T , typename G , typename E >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buffer,
const walberla::Set< E > &  set 
)
inline

◆ operator<<() [46/47]

template<typename T , typename G , typename GE >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buffer,
const walberla::uid::UID< GE > &  uid 
)
inline

◆ operator<<() [47/47]

SendBuffer& walberla::mpi::operator<< ( SendBuffer buf,
const RandomUUID uuid 
)
inline

◆ operator>>() [1/50]

template<typename T , typename CT , std::size_t N>
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
std::array< CT, N > &  array 
)

◆ operator>>() [2/50]

template<typename T , typename CT >
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
std::basic_string< CT > &  c 
)

◆ operator>>() [3/50]

template<typename T , typename CT , typename CA >
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
std::deque< CT, CA > &  c 
)

◆ operator>>() [4/50]

template<typename T , typename CT , typename CA >
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
std::list< CT, CA > &  c 
)

◆ operator>>() [5/50]

template<typename T , typename CK , typename CT , typename CC , typename CA >
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
std::map< CK, CT, CC, CA > &  c 
)

◆ operator>>() [6/50]

template<typename T , typename CK , typename CT , typename CC , typename CA >
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
std::multimap< CK, CT, CC, CA > &  c 
)

◆ operator>>() [7/50]

template<typename T , typename CK , typename CC , typename CA >
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
std::multiset< CK, CC, CA > &  c 
)

◆ operator>>() [8/50]

template<typename T , typename T1 , typename T2 >
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
std::pair< T1, T2 > &  pair 
)

◆ operator>>() [9/50]

template<typename T , typename CK , typename CC , typename CA >
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
std::set< CK, CC, CA > &  c 
)

◆ operator>>() [10/50]

template<typename T , typename CK , typename CT , typename CC , typename CA >
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
std::unordered_map< CK, CT, CC, CA > &  c 
)

◆ operator>>() [11/50]

template<typename T , typename CK , typename CC , typename CA >
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
std::unordered_set< CK, CC, CA > &  c 
)

◆ operator>>() [12/50]

template<typename T >
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
std::vector< bool > &  bools 
)

◆ operator>>() [13/50]

template<typename T , typename CT , typename CA >
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
std::vector< CT, CA > &  c 
)

◆ operator>>() [14/50]

template<typename T , typename VT >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
math::Quaternion< VT > &  quat 
)

◆ operator>>() [15/50]

template<typename T , typename V >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
math::Rot3< V > &  objparam 
)

◆ operator>>() [16/50]

template<typename T , typename MT >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
Matrix3< MT > &  m 
)

◆ operator>>() [17/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::ContactHistoryNotification::Parameters objparam 
)

◆ operator>>() [18/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::data::ContactHistory objparam 
)

◆ operator>>() [19/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::ForceTorqueNotification::Parameters objparam 
)

◆ operator>>() [20/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::HeatFluxNotification::Parameters objparam 
)

◆ operator>>() [21/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::HydrodynamicForceTorqueNotification::Parameters objparam 
)

◆ operator>>() [22/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::NewGhostParticleNotification::Parameters objparam 
)

◆ operator>>() [23/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::NumContactNotification::Parameters objparam 
)

◆ operator>>() [24/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::ParticleCopyNotification::Parameters objparam 
)

◆ operator>>() [25/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::ParticleGhostCopyNotification::Parameters objparam 
)

◆ operator>>() [26/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::ParticleMigrationNotification::Parameters objparam 
)

◆ operator>>() [27/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::ParticleRemoteMigrationNotification::Parameters objparam 
)

◆ operator>>() [28/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::ParticleRemovalInformationNotification::Parameters objparam 
)

◆ operator>>() [29/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::ParticleRemovalNotification::Parameters objparam 
)

◆ operator>>() [30/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::ParticleUpdateNotification::Parameters objparam 
)

◆ operator>>() [31/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::VelocityCorrectionNotification::Parameters objparam 
)

◆ operator>>() [32/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::VelocityUpdateNotification::Parameters objparam 
)

◆ operator>>() [33/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
OpenMesh::EdgeHandle &  handle 
)

◆ operator>>() [34/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
OpenMesh::FaceHandle &  handle 
)

◆ operator>>() [35/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
OpenMesh::HalfedgeHandle &  handle 
)

◆ operator>>() [36/50]

template<typename T , typename Scalar , int DIM>
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
OpenMesh::VectorT< Scalar, DIM > &  v 
)

◆ operator>>() [37/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
OpenMesh::VertexHandle &  handle 
)

◆ operator>>() [38/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
pe::debug::BodyData bd 
)

◆ operator>>() [39/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
pe::Owner owner 
)
inline

◆ operator>>() [40/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
std::shared_ptr< mesa_pd::data::BaseShape > &  bs 
)

◆ operator>>() [41/50]

template<typename T , typename VT >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
Vector2< VT > &  vec 
)

◆ operator>>() [42/50]

template<typename T , typename VT >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
Vector3< VT > &  vec 
)

◆ operator>>() [43/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
walberla::mesa_pd::data::particle_flags::FlagT flags 
)

◆ operator>>() [44/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buffer,
BlockDataID id 
)

◆ operator>>() [45/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buffer,
blockforest::BlockID id 
)
inline

◆ operator>>() [46/50]

template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buffer,
ConstBlockDataID id 
)

◆ operator>>() [47/50]

template<typename T , typename CM , bool CO, typename FM , int EQU>
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buffer,
walberla::lbm::LatticeModelBase< CM, CO, FM, EQU > &  lm 
)

◆ operator>>() [48/50]

template<typename T , typename E >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buffer,
walberla::Set< E > &  set 
)
inline

◆ operator>>() [49/50]

template<typename T , typename GE >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buffer,
walberla::uid::UID< GE > &  uid 
)
inline

◆ operator>>() [50/50]

RecvBuffer& walberla::mpi::operator>> ( RecvBuffer buf,
RandomUUID uuid 
)
inline

◆ packBoolVectorWithoutSize()

template<typename T , typename G >
GenericSendBuffer<T,G>& walberla::mpi::packBoolVectorWithoutSize ( GenericSendBuffer< T, G > &  buf,
const std::vector< bool > &  bools 
)

◆ readMPIIO()

void walberla::mpi::readMPIIO ( const std::string &  file,
RecvBuffer buffer 
)

Counterpart to writeMPIIO - has to be called with exactly the same process distribution.

Reads local part of the data into a buffer

◆ recvAssocContainer()

template<typename T , typename Cont >
void walberla::mpi::recvAssocContainer ( GenericRecvBuffer< T > &  buf,
Cont &  container 
)

◆ recvContainer()

template<typename T , typename Cont >
void walberla::mpi::recvContainer ( GenericRecvBuffer< T > &  buf,
Cont &  container 
)

◆ recvMap()

template<typename T , typename Cont >
void walberla::mpi::recvMap ( GenericRecvBuffer< T > &  buf,
Cont &  container 
)

◆ recvNonResizableContainer()

template<typename T , typename Cont >
void walberla::mpi::recvNonResizableContainer ( GenericRecvBuffer< T > &  buf,
Cont &  container 
)

◆ reduce() [1/4]

bool walberla::mpi::reduce ( const bool  value,
Operation  operation,
int  recvRank = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)
inline

Reduces a boolean value over all processes.

Parameters
valueThe boolean value to be reduced
operationThe operation to be performed (one of LOGICAL_AND, LOGICAL_OR or LOGICAL_XOR)
recvRankThe rank of the process receiving the reduced value
commThe MPI communicator used for communication
Returns
The reduced boolean value on recvRank, false on all other ranks.

◆ reduce() [2/4]

math::Vector3<bool> walberla::mpi::reduce ( const math::Vector3< bool > &  values,
Operation  operation,
int  recvRank = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)
inline

Reduces boolean values in a Vector3 over all processes.

Parameters
valueThe boolean value to be reduced
operationThe operation to be performed (one of LOGICAL_AND, LOGICAL_OR or LOGICAL_XOR)
recvRankThe rank of the process receiving the reduced value
commThe MPI communicator used for communication
Returns
The reduced boolean value on recvRank, false on all other ranks.

◆ reduce() [3/4]

template<typename T >
math::Vector3<T> walberla::mpi::reduce ( const math::Vector3< T > &  values,
Operation  operation,
int  recvRank = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Reduces values in a math::Vector3<T> over all processes.

T has to be an integer or floating point value

Parameters
valueThe value to be reduced
operationThe operation to be performed
recvRankThe rank of the process receiving the reduced value
commThe MPI communicator used for communication
Returns
The reduced value on recvRank, 0 on all other ranks.

◆ reduce() [4/4]

template<typename T >
T walberla::mpi::reduce ( const T  value,
Operation  operation,
int  recvRank = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Reduces a value over all processes.

T has to be an integer or floating point value

Parameters
valueThe value to be reduced
operationThe operation to be performed
recvRankThe rank of the process receiving the reduced value
commThe MPI communicator used for communication
Returns
The reduced value on recvRank, 0 on all other ranks.

◆ reduceInplace() [1/6]

void walberla::mpi::reduceInplace ( bool &  value,
Operation  operation,
int  recvRank = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)
inline

Reduces a boolean value over all processes in-place.

Parameters
valueThe boolean value to be reduced
operationThe operation to be performed (one of LOGICAL_AND, LOGICAL_OR or LOGICAL_XOR)
recvRankThe rank of the process receiving the reduced value
commThe MPI communicator used for communication

◆ reduceInplace() [2/6]

void walberla::mpi::reduceInplace ( math::Vector3< bool > &  values,
Operation  operation,
int  recvRank = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)
inline

Reduces booleans in a math::Vector3 over all processes in-place.

T has to be an integer or floating point value

Parameters
valuesThe values to be reduced
operationThe operation to be performed
recvRankThe rank of the process receiving the reduced values
commThe MPI communicator used for communication

◆ reduceInplace() [3/6]

template<typename T >
void walberla::mpi::reduceInplace ( math::Vector3< T > &  values,
Operation  operation,
int  recvRank = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Reduces values in a math::Vector3<T> over all processes in-place.

T has to be an integer or floating point value

Parameters
valuesThe values to be reduced
operationThe operation to be performed
recvRankThe rank of the process receiving the reduced values
commThe MPI communicator used for communication

◆ reduceInplace() [4/6]

void walberla::mpi::reduceInplace ( std::vector< bool > &  values,
Operation  operation,
int  recvRank = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)
inline

Reduces boolean values in a std::vector<bool> over all processes in-place.

Specialization of reduceInplace<T>

Parameters
valuesThe boolean values to be reduced
operationThe operation to be performed (one of BITWISE_AND, BITWISE_OR or BITWISE_XOR)
recvRankThe rank of the process receiving the reduced values
commThe MPI communicator used for communication

◆ reduceInplace() [5/6]

template<typename T >
void walberla::mpi::reduceInplace ( std::vector< T > &  values,
Operation  operation,
int  recvRank = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Reduces values in a std::vector<T> over all processes in-place.

T has to be an integer or floating point value

Parameters
valuesThe values to be reduced
operationThe operation to be performed
recvRankThe rank of the process receiving the reduced values
commThe MPI communicator used for communication

◆ reduceInplace() [6/6]

template<typename T >
void walberla::mpi::reduceInplace ( T &  value,
Operation  operation,
int  recvRank = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Reduces a value over all processes in-place.

T has to be an integer or floating point value

Parameters
valueThe value to be reduced
operationThe operation to be performed
recvRankThe rank of the process receiving the reduced value
commThe MPI communicator used for communication

◆ sendAssocContainer()

template<typename T , typename G , typename Cont >
void walberla::mpi::sendAssocContainer ( GenericSendBuffer< T, G > &  buf,
const Cont &  container 
)

◆ sendContainer()

template<typename T , typename G , typename Cont >
void walberla::mpi::sendContainer ( GenericSendBuffer< T, G > &  buf,
const Cont &  container 
)

◆ sendNonResizableContainer()

template<typename T , typename G , typename Cont >
void walberla::mpi::sendNonResizableContainer ( GenericSendBuffer< T, G > &  buf,
const Cont &  container 
)

◆ toMPI_Op()

MPI_Op walberla::mpi::toMPI_Op ( Operation  operation)
inline

◆ translateRank() [1/2]

int walberla::mpi::translateRank ( const MPI_Comm  srcComm,
const MPI_Comm  destComm,
const int  srcRank 
)

This functions maps the rank in one communicator to the rank in another communicator.

Parameters
srcCommsource communicator
destCommdestination communicator
srcRankrank in the source communicator
Returns
rank in the destination communicator or -1 if not available

◆ translateRank() [2/2]

std::vector< int > walberla::mpi::translateRank ( const MPI_Comm  srcComm,
const MPI_Comm  destComm,
const std::vector< int > &  srcRank 
)

This functions converts a array of ranks in one communicator to an array of ranks in another communicator.

Parameters
srcCommsource communicator
destCommdestination communicator
srcRanksource ranks
Returns
converted ranks, -1 if not available

◆ unpackBoolVectorWithoutSize()

template<typename T >
GenericRecvBuffer<T>& walberla::mpi::unpackBoolVectorWithoutSize ( GenericRecvBuffer< T > &  buf,
std::vector< bool > &  bools,
size_t  size 
)

◆ unpackOpenMeshHandle()

template<typename T , typename HandleT >
mpi::GenericRecvBuffer<T>& walberla::mpi::unpackOpenMeshHandle ( mpi::GenericRecvBuffer< T > &  buf,
HandleT &  handle 
)

◆ writeMPIIO()

void walberla::mpi::writeMPIIO ( const std::string &  file,
SendBuffer buffer 
)

Writes contents of local buffer to a single binary file via MPIIO.

Has to be called by all processes

◆ writeMPITextFile()

void walberla::mpi::writeMPITextFile ( const std::string &  filename,
const std::string &  processLocalPart,
const MPI_Comm  comm 
)

Writes file using MPI IO with each process providing a part of it.

This method has the be called collectively by all the processes in comm. The file will be assembled in the order of the ranks of the calling processes.

Parameters
filenameThe name of the file to be written
processLocalPartThe part of the file belonging to the calling process (size may differ among processes)
commThe MPI communicator used for communication

Variable Documentation

◆ BUFFER_DEBUG_OVERHEAD

const uint_t walberla::mpi::BUFFER_DEBUG_OVERHEAD = 0