walberla::mpi Namespace Reference

Namespaces

 internal
 

Classes

struct  BufferSizeTrait
 
struct  BufferSizeTrait< BlockDataID >
 
struct  BufferSizeTrait< blockforest::BlockID >
 
struct  BufferSizeTrait< bool >
 
struct  BufferSizeTrait< Cell >
 
struct  BufferSizeTrait< CellInterval >
 
struct  BufferSizeTrait< char >
 
struct  BufferSizeTrait< ConstBlockDataID >
 
struct  BufferSizeTrait< double >
 
struct  BufferSizeTrait< float >
 
struct  BufferSizeTrait< int >
 
struct  BufferSizeTrait< long >
 
struct  BufferSizeTrait< long long >
 
struct  BufferSizeTrait< math::Rot3< V > >
 
struct  BufferSizeTrait< mesa_pd::ForceTorqueNotification >
 
struct  BufferSizeTrait< mesa_pd::HeatFluxNotification >
 
struct  BufferSizeTrait< mesa_pd::ParticleMigrationNotification >
 
struct  BufferSizeTrait< mesa_pd::ParticleRemoteMigrationNotification >
 
struct  BufferSizeTrait< mesa_pd::ParticleRemovalNotification >
 
struct  BufferSizeTrait< mesa_pd::VelocityCorrectionNotification >
 
struct  BufferSizeTrait< mesa_pd::VelocityUpdateNotification >
 
struct  BufferSizeTrait< OpenMesh::EdgeHandle >
 
struct  BufferSizeTrait< OpenMesh::FaceHandle >
 
struct  BufferSizeTrait< OpenMesh::HalfedgeHandle >
 
struct  BufferSizeTrait< OpenMesh::VectorT< Scalar, DIM > >
 
struct  BufferSizeTrait< OpenMesh::VertexHandle >
 
struct  BufferSizeTrait< pe::Owner >
 
struct  BufferSizeTrait< RandomUUID >
 
struct  BufferSizeTrait< short >
 
struct  BufferSizeTrait< signed char >
 
struct  BufferSizeTrait< std::array< T, N > >
 
struct  BufferSizeTrait< std::basic_string< T > >
 
struct  BufferSizeTrait< std::deque< T, A > >
 
struct  BufferSizeTrait< std::list< T, A > >
 
struct  BufferSizeTrait< std::map< K, T, C, A > >
 
struct  BufferSizeTrait< std::multimap< K, T, C, A > >
 
struct  BufferSizeTrait< std::multiset< T, C, A > >
 
struct  BufferSizeTrait< std::pair< T1, T2 > >
 
struct  BufferSizeTrait< std::set< T, C, A > >
 
struct  BufferSizeTrait< std::unordered_map< K, T, C, A > >
 
struct  BufferSizeTrait< std::unordered_set< T, C, A > >
 
struct  BufferSizeTrait< std::vector< T, A > >
 
struct  BufferSizeTrait< T, typename std::enable_if< std::is_enum< T >::value >::type >
 
struct  BufferSizeTrait< unsigned char >
 
struct  BufferSizeTrait< unsigned int >
 
struct  BufferSizeTrait< unsigned long >
 
struct  BufferSizeTrait< unsigned long long >
 
struct  BufferSizeTrait< unsigned short >
 
struct  BufferSizeTrait< walberla::lbm::LatticeModelBase< CM, CO, FM, EQU > >
 
struct  BufferSizeTrait< walberla::math::GenericAABB< T > >
 
struct  BufferSizeTrait< walberla::math::Matrix3< VT > >
 
struct  BufferSizeTrait< walberla::math::Quaternion< VT > >
 
struct  BufferSizeTrait< walberla::math::Vector2< VT > >
 
struct  BufferSizeTrait< walberla::math::Vector3< VT > >
 
struct  BufferSizeTrait< walberla::mesa_pd::data::particle_flags::FlagT >
 
struct  BufferSizeTrait< walberla::pe::debug::BodyData >
 
struct  BufferSizeTrait< walberla::Set< T > >
 
struct  BufferSizeTrait< walberla::uid::UID< GE > >
 
class  Datatype
 RAII class for MPI data types that commits and frees them. More...
 
class  Environment
 RAII Object to initialize and finalize MPI. More...
 
class  GenericBufferSystem
 Manages MPI Communication with a set of known communication partners. More...
 
class  GenericOpenMPBufferSystem
 Wrapper around BufferSystem for OpenMP parallel MPI communication. More...
 
class  GenericRecvBuffer
 Implementation of a MPI receive buffer. More...
 
class  GenericSendBuffer
 Implementation of a MPI send buffer. More...
 
class  MPIManager
 Encapsulates MPI Rank/Communicator information. More...
 
class  TokenizedScope
 Object that starts tokenizing in constructor and ends tokenizing when going out of scope. More...
 
class  Tokenizing
 MPI tokenizing ensures that not more than N processes execute the same code portion simultaneously. More...
 

Typedefs

typedef GenericBufferSystem< RecvBuffer, SendBufferBufferSystem
 
typedef GenericOpenMPBufferSystem< RecvBuffer, SendBufferOpenMPBufferSystem
 
typedef GenericRecvBuffer RecvBuffer
 
typedef GenericSendBuffer SendBuffer
 

Enumerations

enum  Operation {
  MIN, MAX, SUM, PRODUCT,
  LOGICAL_AND, BITWISE_AND, LOGICAL_OR, BITWISE_OR,
  LOGICAL_XOR, BITWISE_XOR
}
 
enum  SetOperation { INTERSECTION, UNION }
 

Functions

template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buffer, const blockforest::BlockID &id)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buffer, blockforest::BlockID &id)
 
template<typename T , typename G , typename MT >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const Matrix3< MT > &m)
 
template<typename T , typename MT >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, Matrix3< MT > &m)
 
template<typename T , typename G , typename VT >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const math::Quaternion< VT > &quat)
 
template<typename T , typename VT >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, math::Quaternion< VT > &quat)
 
template<typename T , typename G , typename V >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const math::Rot3< V > &obj)
 
template<typename T , typename V >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, math::Rot3< V > &objparam)
 
template<typename T , typename G , typename VT >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const Vector2< VT > &vec)
 
template<typename T , typename VT >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, Vector2< VT > &vec)
 
template<typename T , typename G , typename VT >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const Vector3< VT > &vec)
 
template<typename T , typename VT >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, Vector3< VT > &vec)
 
template<typename T >
void broadcastObject (T &object, int senderRank=0, MPI_Comm comm=MPI_COMM_WORLD)
 Broadcasts an arbitrary sized object from one process to all other processes. More...
 
template<typename T , typename G , typename T1 , typename T2 >
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const std::pair< T1, T2 > &pair)
 
template<typename T , typename T1 , typename T2 >
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, std::pair< T1, T2 > &pair)
 
template<typename T , typename G , typename Cont >
void sendNonResizableContainer (GenericSendBuffer< T, G > &buf, const Cont &container)
 
template<typename T , typename Cont >
void recvNonResizableContainer (GenericRecvBuffer< T > &buf, Cont &container)
 
template<typename T , typename G , typename CT , std::size_t N>
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const std::array< CT, N > &array)
 
template<typename T , typename CT , std::size_t N>
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, std::array< CT, N > &array)
 
template<typename T , typename G , typename Cont >
void sendContainer (GenericSendBuffer< T, G > &buf, const Cont &container)
 
template<typename T , typename Cont >
void recvContainer (GenericRecvBuffer< T > &buf, Cont &container)
 
template<typename T , typename G , typename CT , typename CA >
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const std::vector< CT, CA > &c)
 
template<typename T , typename CT , typename CA >
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, std::vector< CT, CA > &c)
 
template<typename T , typename G , typename CT >
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const std::basic_string< CT > &c)
 
template<typename T , typename CT >
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, std::basic_string< CT > &c)
 
template<typename T , typename G , typename CT , typename CA >
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const std::deque< CT, CA > &c)
 
template<typename T , typename CT , typename CA >
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, std::deque< CT, CA > &c)
 
template<typename T , typename G , typename CT , typename CA >
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const std::list< CT, CA > &c)
 
template<typename T , typename CT , typename CA >
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, std::list< CT, CA > &c)
 
template<typename T , typename G >
GenericSendBuffer< T, G > & packBoolVectorWithoutSize (GenericSendBuffer< T, G > &buf, const std::vector< bool > &bools)
 
template<typename T >
GenericRecvBuffer< T > & unpackBoolVectorWithoutSize (GenericRecvBuffer< T > &buf, std::vector< bool > &bools, size_t size)
 
template<typename T , typename G >
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const std::vector< bool > &bools)
 
template<typename T >
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, std::vector< bool > &bools)
 
template<typename T , typename G , typename Cont >
void sendAssocContainer (GenericSendBuffer< T, G > &buf, const Cont &container)
 
template<typename T , typename Cont >
void recvAssocContainer (GenericRecvBuffer< T > &buf, Cont &container)
 
template<typename T , typename Cont >
void recvMap (GenericRecvBuffer< T > &buf, Cont &container)
 
template<typename T , typename G , typename CK , typename CC , typename CA >
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const std::set< CK, CC, CA > &c)
 
template<typename T , typename CK , typename CC , typename CA >
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, std::set< CK, CC, CA > &c)
 
template<typename T , typename G , typename CK , typename CC , typename CA >
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const std::unordered_set< CK, CC, CA > &c)
 
template<typename T , typename CK , typename CC , typename CA >
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, std::unordered_set< CK, CC, CA > &c)
 
template<typename T , typename G , typename CK , typename CC , typename CA >
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const std::multiset< CK, CC, CA > &c)
 
template<typename T , typename CK , typename CC , typename CA >
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, std::multiset< CK, CC, CA > &c)
 
template<typename T , typename G , typename CK , typename CT , typename CC , typename CA >
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const std::map< CK, CT, CC, CA > &c)
 
template<typename T , typename CK , typename CT , typename CC , typename CA >
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, std::map< CK, CT, CC, CA > &c)
 
template<typename T , typename G , typename CK , typename CT , typename CC , typename CA >
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const std::unordered_map< CK, CT, CC, CA > &c)
 
template<typename T , typename CK , typename CT , typename CC , typename CA >
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, std::unordered_map< CK, CT, CC, CA > &c)
 
template<typename T , typename G , typename CK , typename CT , typename CC , typename CA >
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const std::multimap< CK, CT, CC, CA > &c)
 
template<typename T , typename CK , typename CT , typename CC , typename CA >
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, std::multimap< CK, CT, CC, CA > &c)
 
template<typename T , typename G , typename OT >
GenericSendBuffer< T, G > & operator<< (GenericSendBuffer< T, G > &buf, const walberla::optional< OT > &o)
 
template<typename T , typename OT >
GenericRecvBuffer< T > & operator>> (GenericRecvBuffer< T > &buf, walberla::optional< OT > &o)
 
SendBufferoperator<< (SendBuffer &buf, const RandomUUID &uuid)
 
RecvBufferoperator>> (RecvBuffer &buf, RandomUUID &uuid)
 
template<typename T >
std::vector< T > gather (T value, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD)
 Gathers values from MPI processes and stores them into a std::vector. More...
 
template<typename T >
std::vector< T > allGather (T value, MPI_Comm comm=MPI_COMM_WORLD)
 Gathers values from MPI processes and stores them into a std::vector on all Processes. More...
 
template<>
std::vector< std::string > gatherv (const std::vector< std::string > &values, int recvRank, MPI_Comm comm)
 
template<>
std::vector< std::string > allGatherv (const std::vector< std::string > &values, MPI_Comm comm)
 
void gathervBuffer (const mpi::SendBuffer &sendBuffer, mpi::RecvBuffer &recvBuffer, int targetRank=0, MPI_Comm comm=MPI_COMM_WORLD)
 Gathers the buffer content on a single target process. More...
 
void allGathervBuffer (const mpi::SendBuffer &sendBuffer, mpi::RecvBuffer &recvBuffer, MPI_Comm comm=MPI_COMM_WORLD)
 Almost identical to mpi::gathervBuffer, the only difference: The result is stored on every process. More...
 
template<typename T >
std::vector< T > gatherv (const std::vector< T > &values, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD)
 Gathers values from MPI processes and stores them into a std::vector. More...
 
template<typename T >
std::vector< T > allGatherv (const std::vector< T > &values, MPI_Comm comm=MPI_COMM_WORLD)
 Gathers values from MPI processes and stores them into a std::vector on all Processes. More...
 
int translateRank (const MPI_Comm srcComm, const MPI_Comm destComm, const int srcRank)
 This functions maps the rank in one communicator to the rank in another communicator. More...
 
std::vector< int > translateRank (const MPI_Comm srcComm, const MPI_Comm destComm, const std::vector< int > &srcRank)
 This functions converts a array of ranks in one communicator to an array of ranks in another communicator. More...
 
void writeMPIIO (const std::string &file, SendBuffer &buffer)
 Writes contents of local buffer to a single binary file via MPIIO. More...
 
void readMPIIO (const std::string &file, RecvBuffer &buffer)
 Counterpart to writeMPIIO - has to be called with exactly the same process distribution. More...
 
static void customTerminateHandler ()
 Terminate Handler that calls MPI_Abort instead of std::abort. More...
 
void writeMPITextFile (const std::string &filename, const std::string &processLocalPart, const MPI_Comm comm)
 Writes file using MPI IO with each process providing a part of it. More...
 
MPI_Op toMPI_Op (Operation operation)
 
template<typename T >
void reduceInplace (T &value, Operation operation, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces a value over all processes in-place. More...
 
void reduceInplace (bool &value, Operation operation, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces a boolean value over all processes in-place. More...
 
template<typename T >
reduce (const T value, Operation operation, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces a value over all processes. More...
 
bool reduce (const bool value, Operation operation, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces a boolean value over all processes. More...
 
template<typename T >
void reduceInplace (std::vector< T > &values, Operation operation, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces values in a std::vector<T> over all processes in-place. More...
 
void reduceInplace (std::vector< bool > &values, Operation operation, int recvRank=0, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces boolean values in a std::vector<bool> over all processes in-place. More...
 
template<typename T >
allReduce (const T &value, Operation operation, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces a value over all processes. More...
 
bool allReduce (const bool value, Operation operation, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces a boolean value over all processes. More...
 
template<typename T >
void allReduceInplace (T &value, Operation operation, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces a value over all processes in-place. More...
 
void allReduceInplace (bool &value, Operation operation, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces a boolean value over all processes in-place. More...
 
template<typename T >
void allReduceInplace (std::vector< T > &values, Operation operation, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces values in a std::vector<T> over all processes in-place. More...
 
void allReduceInplace (std::vector< bool > &bools, Operation operation, MPI_Comm comm=MPI_COMM_WORLD)
 Reduces values in a std::vector<bool> over all processes in-place. More...
 
template<typename T >
std::vector< T > allReduceSet (std::vector< T > values, SetOperation op, MPI_Comm mpiCommunicator=MPI_COMM_WORLD, int mpiTag=0)
 Reduces a set of values on all processes without using global mpi communication. More...
 
template<typename T , typename G , typename E >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buffer, const walberla::Set< E > &set)
 
template<typename T , typename E >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buffer, walberla::Set< E > &set)
 
template<typename T , typename G , typename GE >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buffer, const walberla::uid::UID< GE > &uid)
 
template<typename T , typename GE >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buffer, walberla::uid::UID< GE > &uid)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buffer, const BlockDataID &id)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buffer, BlockDataID &id)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buffer, const ConstBlockDataID &id)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buffer, ConstBlockDataID &id)
 
template<typename T , typename G , typename CM , bool CO, typename FM , int EQU>
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buffer, const walberla::lbm::LatticeModelBase< CM, CO, FM, EQU > &lm)
 
template<typename T , typename CM , bool CO, typename FM , int EQU>
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buffer, walberla::lbm::LatticeModelBase< CM, CO, FM, EQU > &lm)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::data::ContactHistory &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::data::ContactHistory &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const walberla::mesa_pd::data::particle_flags::FlagT &flags)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, walberla::mesa_pd::data::particle_flags::FlagT &flags)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::ContactHistoryNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::ContactHistoryNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::ForceTorqueNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::ForceTorqueNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::HeatFluxNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::HeatFluxNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::NewGhostParticleNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::NewGhostParticleNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::ParticleCopyNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::ParticleCopyNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::ParticleMigrationNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::ParticleMigrationNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::ParticleRemoteMigrationNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::ParticleRemoteMigrationNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::ParticleRemovalInformationNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::ParticleRemovalInformationNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::ParticleRemovalNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::ParticleRemovalNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::ParticleUpdateNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::ParticleUpdateNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::VelocityCorrectionNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::VelocityCorrectionNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const mesa_pd::VelocityUpdateNotification &obj)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, mesa_pd::VelocityUpdateNotification::Parameters &objparam)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const OpenMesh::BaseHandle &handle)
 
template<typename T , typename HandleT >
mpi::GenericRecvBuffer< T > & unpackOpenMeshHandle (mpi::GenericRecvBuffer< T > &buf, HandleT &handle)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, OpenMesh::VertexHandle &handle)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, OpenMesh::FaceHandle &handle)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, OpenMesh::HalfedgeHandle &handle)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, OpenMesh::EdgeHandle &handle)
 
template<typename T , typename G , typename Scalar , int DIM>
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const OpenMesh::VectorT< Scalar, DIM > &v)
 
template<typename T , typename Scalar , int DIM>
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, OpenMesh::VectorT< Scalar, DIM > &v)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const pe::debug::BodyData &bd)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, pe::debug::BodyData &bd)
 
template<typename T , typename G >
mpi::GenericSendBuffer< T, G > & operator<< (mpi::GenericSendBuffer< T, G > &buf, const pe::Owner &owner)
 
template<typename T >
mpi::GenericRecvBuffer< T > & operator>> (mpi::GenericRecvBuffer< T > &buf, pe::Owner &owner)
 

Variables

const uint_t BUFFER_DEBUG_OVERHEAD = 0
 

Typedef Documentation

Enumeration Type Documentation

Enumerator
MIN 
MAX 
SUM 
PRODUCT 
LOGICAL_AND 
BITWISE_AND 
LOGICAL_OR 
BITWISE_OR 
LOGICAL_XOR 
BITWISE_XOR 
Enumerator
INTERSECTION 
UNION 

Function Documentation

template<typename T >
std::vector<T> walberla::mpi::allGather ( value,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Gathers values from MPI processes and stores them into a std::vector on all Processes.

T has to be a native MPI_Datatype

Parameters
valueThe value gathered from the process
commThe MPI communicator used for communication
Returns
A std::vector in with the result
template<>
std::vector< std::string > walberla::mpi::allGatherv ( const std::vector< std::string > &  values,
MPI_Comm  comm 
)
template<typename T >
std::vector<T> walberla::mpi::allGatherv ( const std::vector< T > &  values,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Gathers values from MPI processes and stores them into a std::vector on all Processes.

T has to be a native MPI_Datatype

Parameters
valuesThe values gathered from the process
commThe MPI communicator used for communication
Returns
A std::vector in with the result
void walberla::mpi::allGathervBuffer ( const mpi::SendBuffer sendBuffer,
mpi::RecvBuffer recvBuffer,
MPI_Comm  comm 
)

Almost identical to mpi::gathervBuffer, the only difference: The result is stored on every process.

template<typename T >
T walberla::mpi::allReduce ( const T &  value,
Operation  operation,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Reduces a value over all processes.

T has to be an integer or floating point value

Parameters
valueThe value to be reduced
operationThe operation to be performed
commThe MPI communicator used for communication
Returns
The reduced value on recvRank, 0 on all other ranks.
bool walberla::mpi::allReduce ( const bool  value,
Operation  operation,
MPI_Comm  comm = MPI_COMM_WORLD 
)
inline

Reduces a boolean value over all processes.

T has to be a boolean value

Parameters
valueThe boolean value to be reduced
operationThe operation to be performed
commThe MPI communicator used for communication
Returns
The reduced value on recvRank, 0 on all other ranks.
template<typename T >
void walberla::mpi::allReduceInplace ( T &  value,
Operation  operation,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Reduces a value over all processes in-place.

T has to be an integer or floating point value

Parameters
valueThe value to be reduced
operationThe operation to be performed
commThe MPI communicator used for communication
void walberla::mpi::allReduceInplace ( bool &  value,
Operation  operation,
MPI_Comm  comm = MPI_COMM_WORLD 
)
inline

Reduces a boolean value over all processes in-place.

T has to be a boolean value

Parameters
valueThe boolean value to be reduced
operationThe operation to be performed (one of LOGICAL_AND, LOGICAL_OR or LOGICAL_XOR)
commThe MPI communicator used for communication
template<typename T >
void walberla::mpi::allReduceInplace ( std::vector< T > &  values,
Operation  operation,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Reduces values in a std::vector<T> over all processes in-place.

T has to be an integer or floating point value

Parameters
valuesThe values to be reduced
operationThe operation to be performed
commThe MPI communicator used for communication
void walberla::mpi::allReduceInplace ( std::vector< bool > &  bools,
Operation  operation,
MPI_Comm  comm = MPI_COMM_WORLD 
)
inline

Reduces values in a std::vector<bool> over all processes in-place.

Specialization of allReduceInplace<T>

Parameters
valuesThe boolean values to be reduced
operationThe operation to be performed (one of BITWISE_AND, BITWISE_OR or BITWISE_XOR)
commThe MPI communicator used for communication
template<typename T >
std::vector<T> walberla::mpi::allReduceSet ( std::vector< T >  values,
SetOperation  op,
MPI_Comm  mpiCommunicator = MPI_COMM_WORLD,
int  mpiTag = 0 
)

Reduces a set of values on all processes without using global mpi communication.

The algorithm performs log(n) communication steps, where n is the number of processes in mpiCommunicator. The returned vector is a sorted set of unique values.

Parameters
valuesThe local input values. Duplicates will internally be removed. Values have to be buffer packable and sortable.
opThe operation to be performed on the set: intersection or union.
mpiCommunicatorMPI communicator used for the reduction.
mpiTagMPI tag used for the reduction.
template<typename T >
void walberla::mpi::broadcastObject ( T &  object,
int  senderRank = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Broadcasts an arbitrary sized object from one process to all other processes.

T has to be able to be packed into SendBuffer and unpacked from RecvBuffer.

Parameters
objectThe object to be broadcasted
senderRankThe rank of the process sending the object
commThe MPI communicator used for communication
static void walberla::mpi::customTerminateHandler ( )
static

Terminate Handler that calls MPI_Abort instead of std::abort.

Terminate handler is called when an exception is not caught and the program has to be aborted The standard terminate handler prints exception.what() and calls std::abort(). We overwrite the terminate handler when MPI was initialized, to call MPI_Abort in this case.

template<typename T >
std::vector<T> walberla::mpi::gather ( value,
int  recvRank = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Gathers values from MPI processes and stores them into a std::vector.

T has to be a native MPI_Datatype

Parameters
valueThe value gathered from the process
recvRankThe rank of the process receiving the gathered information
commThe MPI communicator used for communication
Returns
A std::vector in with the result on recvRank, else an empty vector
template<>
std::vector< std::string > walberla::mpi::gatherv ( const std::vector< std::string > &  values,
int  recvRank,
MPI_Comm  comm 
)
template<typename T >
std::vector<T> walberla::mpi::gatherv ( const std::vector< T > &  values,
int  recvRank = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Gathers values from MPI processes and stores them into a std::vector.

T has to be a native MPI_Datatype

Parameters
valuesThe values gathered from the process
recvRankThe rank of the process receiving the gathered information
commThe MPI communicator used for communication
Returns
A std::vector in with the result on recvRank, else an empty vector
void walberla::mpi::gathervBuffer ( const mpi::SendBuffer sendBuffer,
mpi::RecvBuffer recvBuffer,
int  targetRank = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Gathers the buffer content on a single target process.

  • every process holds one mpi::SendBuffer ( can have different size on each process )
  • the buffer contents are gathered on process with targetRank
  • buffer contents are sorted by rank and stored consecutively in a mpi::RecvBuffer
Parameters
sendBuffer[in] sendBuffer with (possibly) different size on each process
recvBuffer[out] recvBuffer which is left unchanged on all processes but targetRank on targetRank recvBuffer holds the gathered result
targetRank[in] rank of the process where data is gathered
comm[in] mpi communicator to use
template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const walberla::mesa_pd::data::particle_flags::FlagT flags 
)
template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const pe::Owner owner 
)
inline
template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::HeatFluxNotification obj 
)
template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::ForceTorqueNotification obj 
)
template<typename T , typename G , typename T1 , typename T2 >
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const std::pair< T1, T2 > &  pair 
)
template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::ContactHistoryNotification obj 
)
template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::VelocityCorrectionNotification obj 
)
template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::ParticleRemovalNotification obj 
)
template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::ParticleRemoteMigrationNotification obj 
)
template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::VelocityUpdateNotification obj 
)
template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::ParticleMigrationNotification obj 
)
template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const pe::debug::BodyData bd 
)
template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::ParticleRemovalInformationNotification obj 
)
template<typename T , typename G , typename V >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const math::Rot3< V > &  obj 
)
template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::data::ContactHistory obj 
)
template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::ParticleUpdateNotification obj 
)
template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::NewGhostParticleNotification obj 
)
template<typename T , typename G , typename Scalar , int DIM>
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const OpenMesh::VectorT< Scalar, DIM > &  v 
)
template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buffer,
const BlockDataID &  id 
)
template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const mesa_pd::ParticleCopyNotification obj 
)
template<typename T , typename G , typename CT , std::size_t N>
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const std::array< CT, N > &  array 
)
template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buffer,
const blockforest::BlockID id 
)
inline
template<typename T , typename G , typename CM , bool CO, typename FM , int EQU>
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buffer,
const walberla::lbm::LatticeModelBase< CM, CO, FM, EQU > &  lm 
)
template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buffer,
const ConstBlockDataID &  id 
)
template<typename T , typename G , typename GE >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buffer,
const walberla::uid::UID< GE > &  uid 
)
inline
template<typename T , typename G , typename E >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buffer,
const walberla::Set< E > &  set 
)
inline
template<typename T , typename G , typename CT , typename CA >
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const std::vector< CT, CA > &  c 
)
template<typename T , typename G , typename CT >
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const std::basic_string< CT > &  c 
)
template<typename T , typename G , typename CT , typename CA >
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const std::deque< CT, CA > &  c 
)
template<typename T , typename G , typename VT >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const math::Quaternion< VT > &  quat 
)
template<typename T , typename G , typename CT , typename CA >
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const std::list< CT, CA > &  c 
)
template<typename T , typename G >
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const std::vector< bool > &  bools 
)
template<typename T , typename G , typename CK , typename CC , typename CA >
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const std::set< CK, CC, CA > &  c 
)
template<typename T , typename G , typename CK , typename CC , typename CA >
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const std::unordered_set< CK, CC, CA > &  c 
)
template<typename T , typename G , typename CK , typename CC , typename CA >
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const std::multiset< CK, CC, CA > &  c 
)
template<typename T , typename G , typename CK , typename CT , typename CC , typename CA >
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const std::map< CK, CT, CC, CA > &  c 
)
template<typename T , typename G , typename CK , typename CT , typename CC , typename CA >
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const std::unordered_map< CK, CT, CC, CA > &  c 
)
template<typename T , typename G , typename CK , typename CT , typename CC , typename CA >
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const std::multimap< CK, CT, CC, CA > &  c 
)
template<typename T , typename G , typename OT >
GenericSendBuffer<T,G>& walberla::mpi::operator<< ( GenericSendBuffer< T, G > &  buf,
const walberla::optional< OT > &  o 
)
template<typename T , typename G >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const OpenMesh::BaseHandle &  handle 
)
SendBuffer& walberla::mpi::operator<< ( SendBuffer buf,
const RandomUUID uuid 
)
inline
template<typename T , typename G , typename VT >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const Vector2< VT > &  vec 
)
template<typename T , typename G , typename MT >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const Matrix3< MT > &  m 
)
template<typename T , typename G , typename VT >
mpi::GenericSendBuffer<T,G>& walberla::mpi::operator<< ( mpi::GenericSendBuffer< T, G > &  buf,
const Vector3< VT > &  vec 
)
template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
OpenMesh::VertexHandle &  handle 
)
template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
OpenMesh::FaceHandle &  handle 
)
template<typename T , typename T1 , typename T2 >
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
std::pair< T1, T2 > &  pair 
)
template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
OpenMesh::HalfedgeHandle &  handle 
)
template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
OpenMesh::EdgeHandle &  handle 
)
template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::ParticleRemovalNotification::Parameters objparam 
)
template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
pe::Owner owner 
)
inline
template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::ParticleRemoteMigrationNotification::Parameters objparam 
)
template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
pe::debug::BodyData bd 
)
template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::ParticleMigrationNotification::Parameters objparam 
)
template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::ParticleRemovalInformationNotification::Parameters objparam 
)
template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::NewGhostParticleNotification::Parameters objparam 
)
template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::ParticleUpdateNotification::Parameters objparam 
)
template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::HeatFluxNotification::Parameters objparam 
)
template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::ContactHistoryNotification::Parameters objparam 
)
template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::VelocityCorrectionNotification::Parameters objparam 
)
template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
walberla::mesa_pd::data::particle_flags::FlagT flags 
)
template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::ForceTorqueNotification::Parameters objparam 
)
template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::data::ContactHistory objparam 
)
template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::VelocityUpdateNotification::Parameters objparam 
)
template<typename T , typename CT , std::size_t N>
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
std::array< CT, N > &  array 
)
template<typename T , typename Scalar , int DIM>
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
OpenMesh::VectorT< Scalar, DIM > &  v 
)
template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buffer,
BlockDataID &  id 
)
template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
mesa_pd::ParticleCopyNotification::Parameters objparam 
)
template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buffer,
ConstBlockDataID &  id 
)
template<typename T , typename CM , bool CO, typename FM , int EQU>
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buffer,
walberla::lbm::LatticeModelBase< CM, CO, FM, EQU > &  lm 
)
template<typename T , typename V >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
math::Rot3< V > &  objparam 
)
template<typename T , typename CT , typename CA >
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
std::vector< CT, CA > &  c 
)
template<typename T , typename CT >
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
std::basic_string< CT > &  c 
)
template<typename T , typename CT , typename CA >
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
std::deque< CT, CA > &  c 
)
template<typename T , typename CT , typename CA >
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
std::list< CT, CA > &  c 
)
template<typename T , typename E >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buffer,
walberla::Set< E > &  set 
)
inline
template<typename T >
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
std::vector< bool > &  bools 
)
template<typename T , typename GE >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buffer,
walberla::uid::UID< GE > &  uid 
)
inline
template<typename T , typename CK , typename CC , typename CA >
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
std::set< CK, CC, CA > &  c 
)
template<typename T >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buffer,
blockforest::BlockID id 
)
inline
template<typename T , typename CK , typename CC , typename CA >
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
std::unordered_set< CK, CC, CA > &  c 
)
template<typename T , typename CK , typename CC , typename CA >
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
std::multiset< CK, CC, CA > &  c 
)
template<typename T , typename CK , typename CT , typename CC , typename CA >
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
std::map< CK, CT, CC, CA > &  c 
)
template<typename T , typename CK , typename CT , typename CC , typename CA >
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
std::unordered_map< CK, CT, CC, CA > &  c 
)
template<typename T , typename CK , typename CT , typename CC , typename CA >
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
std::multimap< CK, CT, CC, CA > &  c 
)
template<typename T , typename OT >
GenericRecvBuffer<T>& walberla::mpi::operator>> ( GenericRecvBuffer< T > &  buf,
walberla::optional< OT > &  o 
)
RecvBuffer& walberla::mpi::operator>> ( RecvBuffer buf,
RandomUUID uuid 
)
inline
template<typename T , typename VT >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
math::Quaternion< VT > &  quat 
)
template<typename T , typename VT >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
Vector2< VT > &  vec 
)
template<typename T , typename MT >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
Matrix3< MT > &  m 
)
template<typename T , typename VT >
mpi::GenericRecvBuffer<T>& walberla::mpi::operator>> ( mpi::GenericRecvBuffer< T > &  buf,
Vector3< VT > &  vec 
)
template<typename T , typename G >
GenericSendBuffer<T,G>& walberla::mpi::packBoolVectorWithoutSize ( GenericSendBuffer< T, G > &  buf,
const std::vector< bool > &  bools 
)
void walberla::mpi::readMPIIO ( const std::string &  file,
RecvBuffer buffer 
)

Counterpart to writeMPIIO - has to be called with exactly the same process distribution.

Reads local part of the data into a buffer

template<typename T , typename Cont >
void walberla::mpi::recvAssocContainer ( GenericRecvBuffer< T > &  buf,
Cont &  container 
)
template<typename T , typename Cont >
void walberla::mpi::recvContainer ( GenericRecvBuffer< T > &  buf,
Cont &  container 
)
template<typename T , typename Cont >
void walberla::mpi::recvMap ( GenericRecvBuffer< T > &  buf,
Cont &  container 
)
template<typename T , typename Cont >
void walberla::mpi::recvNonResizableContainer ( GenericRecvBuffer< T > &  buf,
Cont &  container 
)
template<typename T >
T walberla::mpi::reduce ( const T  value,
Operation  operation,
int  recvRank = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Reduces a value over all processes.

T has to be an integer or floating point value

Parameters
valueThe value to be reduced
operationThe operation to be performed
recvRankThe rank of the process receiving the reduced value
commThe MPI communicator used for communication
Returns
The reduced value on recvRank, 0 on all other ranks.
bool walberla::mpi::reduce ( const bool  value,
Operation  operation,
int  recvRank = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)
inline

Reduces a boolean value over all processes.

Parameters
valueThe boolean value to be reduced
operationThe operation to be performed (one of LOGICAL_AND, LOGICAL_OR or LOGICAL_XOR)
recvRankThe rank of the process receiving the reduced value
commThe MPI communicator used for communication
Returns
The reduced boolean value on recvRank, false on all other ranks.
template<typename T >
void walberla::mpi::reduceInplace ( T &  value,
Operation  operation,
int  recvRank = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Reduces a value over all processes in-place.

T has to be an integer or floating point value

Parameters
valueThe value to be reduced
operationThe operation to be performed
recvRankThe rank of the process receiving the reduced value
commThe MPI communicator used for communication
void walberla::mpi::reduceInplace ( bool &  value,
Operation  operation,
int  recvRank = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)
inline

Reduces a boolean value over all processes in-place.

Parameters
valueThe boolean value to be reduced
operationThe operation to be performed (one of LOGICAL_AND, LOGICAL_OR or LOGICAL_XOR)
recvRankThe rank of the process receiving the reduced value
commThe MPI communicator used for communication
template<typename T >
void walberla::mpi::reduceInplace ( std::vector< T > &  values,
Operation  operation,
int  recvRank = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Reduces values in a std::vector<T> over all processes in-place.

T has to be an integer or floating point value

Parameters
valuesThe values to be reduced
operationThe operation to be performed
recvRankThe rank of the process receiving the reduced values
commThe MPI communicator used for communication
void walberla::mpi::reduceInplace ( std::vector< bool > &  values,
Operation  operation,
int  recvRank = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)
inline

Reduces boolean values in a std::vector<bool> over all processes in-place.

Specialization of reduceInplace<T>

Parameters
valuesThe boolean values to be reduced
operationThe operation to be performed (one of BITWISE_AND, BITWISE_OR or BITWISE_XOR)
recvRankThe rank of the process receiving the reduced values
commThe MPI communicator used for communication
template<typename T , typename G , typename Cont >
void walberla::mpi::sendAssocContainer ( GenericSendBuffer< T, G > &  buf,
const Cont &  container 
)
template<typename T , typename G , typename Cont >
void walberla::mpi::sendContainer ( GenericSendBuffer< T, G > &  buf,
const Cont &  container 
)
template<typename T , typename G , typename Cont >
void walberla::mpi::sendNonResizableContainer ( GenericSendBuffer< T, G > &  buf,
const Cont &  container 
)
MPI_Op walberla::mpi::toMPI_Op ( Operation  operation)
inline
int walberla::mpi::translateRank ( const MPI_Comm  srcComm,
const MPI_Comm  destComm,
const int  srcRank 
)

This functions maps the rank in one communicator to the rank in another communicator.

Parameters
srcCommsource communicator
destCommdestination communicator
srcRankrank in the source communicator
Returns
rank in the destination communicator or -1 if not available
std::vector< int > walberla::mpi::translateRank ( const MPI_Comm  srcComm,
const MPI_Comm  destComm,
const std::vector< int > &  srcRank 
)

This functions converts a array of ranks in one communicator to an array of ranks in another communicator.

Parameters
srcCommsource communicator
destCommdestination communicator
srcRanksource ranks
Returns
converted ranks, -1 if not available
template<typename T >
GenericRecvBuffer<T>& walberla::mpi::unpackBoolVectorWithoutSize ( GenericRecvBuffer< T > &  buf,
std::vector< bool > &  bools,
size_t  size 
)
template<typename T , typename HandleT >
mpi::GenericRecvBuffer<T>& walberla::mpi::unpackOpenMeshHandle ( mpi::GenericRecvBuffer< T > &  buf,
HandleT &  handle 
)
void walberla::mpi::writeMPIIO ( const std::string &  file,
SendBuffer buffer 
)

Writes contents of local buffer to a single binary file via MPIIO.

Has to be called by all processes

void walberla::mpi::writeMPITextFile ( const std::string &  filename,
const std::string &  processLocalPart,
const MPI_Comm  comm 
)

Writes file using MPI IO with each process providing a part of it.

This method has the be called collectively by all the processes in comm. The file will be assembled in the order of the ranks of the calling processes.

Parameters
filenameThe name of the file to be written
processLocalPartThe part of the file belonging to the calling process (size may differ among processes)

Variable Documentation

const uint_t walberla::mpi::BUFFER_DEBUG_OVERHEAD = 0