1#ifndef COLLECTIVECOMMUNICATION_H_
2#define COLLECTIVECOMMUNICATION_H_
7#include "utils/Logger.h"
8#include "CollectiveCommBase.h"
9#include "CollectiveCommunicationInterface.h"
10#include "utils/mardyn_assert.h"
15#define ENABLE_AGGLOMERATED_REDUCE 1
78 virtual void init(MPI_Comm communicator,
int numValues,
int key = 0)
override {
102 _types.push_back(MPI_UNSIGNED_LONG);
108 _types.push_back(MPI_FLOAT);
114 _types.push_back(MPI_DOUBLE);
120 _types.push_back(MPI_LONG_DOUBLE);
147 Log::global_log->debug() <<
"CollectiveCommunication: custom Allreduce" << std::endl;
148#if ENABLE_AGGLOMERATED_REDUCE
150 MPI_Op agglomeratedTypeAddOperator;
151 const int commutative = 1;
155 case ReduceType::SUM:
159 commutative, &agglomeratedTypeAddOperator));
161 case ReduceType::MAX:
164 (MPI_User_function * ) CollectiveCommunication::max,
165 commutative, &agglomeratedTypeAddOperator));
167 case ReduceType::MIN:
170 (MPI_User_function * ) CollectiveCommunication::min,
171 commutative, &agglomeratedTypeAddOperator));
174 Log::global_log->error()<<
"invalid reducetype, aborting." << std::endl;
180 MPI_CHECK(MPI_Op_free(&agglomeratedTypeAddOperator));
183 for (
unsigned int i = 0; i <
_types.size(); i++) {
184 MPI_Op op = MPI_NO_OP;
186 case ReduceType::SUM:
189 case ReduceType::MIN:
192 case ReduceType::MAX:
196 Log::global_log->error()<<
"invalid reducetype, aborting." << std::endl;
213 #if ENABLE_AGGLOMERATED_REDUCE
215 MPI_Op agglomeratedTypeAddOperator;
216 const int commutative = 1;
221 commutative, &agglomeratedTypeAddOperator));
224 MPI_CHECK(MPI_Op_free(&agglomeratedTypeAddOperator));
227 for(
unsigned int i = 0; i <
_types.size(); i++ ) {
246 int numblocks =
_values.size();
247 std::vector<int> blocklengths(numblocks);
248 std::vector<MPI_Aint> disps(numblocks);
250 for (
int i = 0; i < numblocks; i++) {
255 MPI_Datatype * startOfTypes = &(
_types[0]);
256#if MPI_VERSION >= 2 && MPI_SUBVERSION >= 0
258 MPI_Type_create_struct(numblocks, blocklengths.data(), disps.data(),
261 MPI_CHECK( MPI_Type_struct(numblocks, blocklengths.data(), disps.data(), startOfTypes, &
_agglomeratedType) );
278 MPI_Datatype *dtype) {
285 MPI_Type_get_envelope(*dtype, &numints, &numaddr, &numtypes,
288 std::vector<int> arrayInts(numints);
289 std::vector<MPI_Aint> arrayAddr(numaddr);
290 std::vector<MPI_Datatype> arrayTypes(numtypes);
293 MPI_Type_get_contents(*dtype, numints, numaddr, numtypes,
294 arrayInts.data(), arrayAddr.data(), arrayTypes.data()));
296 for (
int i = 0; i < numtypes; i++) {
297 if (arrayTypes[i] == MPI_INT) {
298 inoutvec[i].v_int += invec[i].v_int;
300 else if (arrayTypes[i] == MPI_UNSIGNED_LONG) {
301 inoutvec[i].v_unsLong += invec[i].v_unsLong;
303 else if (arrayTypes[i] == MPI_FLOAT) {
304 inoutvec[i].v_float += invec[i].v_float;
306 else if (arrayTypes[i] == MPI_DOUBLE) {
307 inoutvec[i].v_double += invec[i].v_double;
309 else if (arrayTypes[i] == MPI_LONG_DOUBLE) {
310 inoutvec[i].v_longDouble += invec[i].v_longDouble;
315 static void max(valType *invec, valType *inoutvec,
int *,
316 MPI_Datatype *dtype) {
323 MPI_Type_get_envelope(*dtype, &numints, &numaddr, &numtypes,
326 std::vector<int> arrayInts(numints);
327 std::vector<MPI_Aint> arrayAddr(numaddr);
328 std::vector<MPI_Datatype> arrayTypes(numtypes);
331 MPI_Type_get_contents(*dtype, numints, numaddr, numtypes,
332 arrayInts.data(), arrayAddr.data(), arrayTypes.data()));
334 for (
int i = 0; i < numtypes; i++) {
335 if (arrayTypes[i] == MPI_INT) {
336 inoutvec[i].v_int = std::max(inoutvec[i].v_int, invec[i].v_int);
338 else if (arrayTypes[i] == MPI_UNSIGNED_LONG) {
339 inoutvec[i].v_unsLong = std::max(inoutvec[i].v_unsLong, invec[i].v_unsLong);
341 else if (arrayTypes[i] == MPI_FLOAT) {
342 inoutvec[i].v_float = std::max(inoutvec[i].v_float, invec[i].v_float);
344 else if (arrayTypes[i] == MPI_DOUBLE) {
345 inoutvec[i].v_double = std::max(inoutvec[i].v_double, invec[i].v_double);
347 else if (arrayTypes[i] == MPI_LONG_DOUBLE) {
348 inoutvec[i].v_longDouble = std::max(inoutvec[i].v_longDouble, invec[i].v_longDouble);
353 static void min(valType *invec, valType *inoutvec,
int *,
354 MPI_Datatype *dtype) {
361 MPI_Type_get_envelope(*dtype, &numints, &numaddr, &numtypes,
364 std::vector<int> arrayInts(numints);
365 std::vector<MPI_Aint> arrayAddr(numaddr);
366 std::vector<MPI_Datatype> arrayTypes(numtypes);
369 MPI_Type_get_contents(*dtype, numints, numaddr, numtypes,
370 arrayInts.data(), arrayAddr.data(), arrayTypes.data()));
372 for (
int i = 0; i < numtypes; i++) {
373 if (arrayTypes[i] == MPI_INT) {
374 inoutvec[i].v_int = std::min(inoutvec[i].v_int, invec[i].v_int);
376 else if (arrayTypes[i] == MPI_UNSIGNED_LONG) {
377 inoutvec[i].v_unsLong = std::min(inoutvec[i].v_unsLong, invec[i].v_unsLong);
379 else if (arrayTypes[i] == MPI_FLOAT) {
380 inoutvec[i].v_float = std::min(inoutvec[i].v_float, invec[i].v_float);
382 else if (arrayTypes[i] == MPI_DOUBLE) {
383 inoutvec[i].v_double = std::min(inoutvec[i].v_double, invec[i].v_double);
385 else if (arrayTypes[i] == MPI_LONG_DOUBLE) {
386 inoutvec[i].v_longDouble = std::min(inoutvec[i].v_longDouble, invec[i].v_longDouble);
This class is a dummy class which ensures that the collective communication commands also work if the...
Definition: CollectiveCommBase.h:21
virtual void appendUnsLong(unsigned long unsLongValue) override
Definition: CollectiveCommBase.h:58
virtual void appendInt(int intValue) override
Definition: CollectiveCommBase.h:50
virtual void finalize() override
delete memory and MPI_Type
Definition: CollectiveCommBase.h:152
virtual void appendDouble(double doubleValue) override
Definition: CollectiveCommBase.h:74
virtual void appendLongDouble(long double longDoubleValue) override
Definition: CollectiveCommBase.h:82
void init(int numValues)
allocate memory for the values to be stored, initialize getter-iterator
Definition: CollectiveCommBase.h:43
virtual void appendFloat(float floatValue) override
Definition: CollectiveCommBase.h:66
std::vector< valType > _values
Vector to store the values which shall be communicated.
Definition: CollectiveCommBase.h:162
virtual size_t getTotalSize() override
Definition: CollectiveCommBase.h:156
This class provides an interface for the collective communication classes.
Definition: CollectiveCommunicationInterface.h:15
This class is used to transfer several values of different types with a single command.
Definition: CollectiveCommunication.h:65
virtual size_t getTotalSize() override
Definition: CollectiveCommunication.h:233
void setMPIType()
defines a MPI datatype which can be used to transfer a CollectiveCommunication object
Definition: CollectiveCommunication.h:245
void appendInt(int intValue) override
Definition: CollectiveCommunication.h:94
void scanSum() override
Performs a scan (sum)
Definition: CollectiveCommunication.h:212
MPI_Comm _communicator
Communicator to be used by the communication commands.
Definition: CollectiveCommunication.h:399
void appendDouble(double doubleValue) override
Definition: CollectiveCommunication.h:112
void broadcast(int root=0) override
Definition: CollectiveCommunication.h:132
virtual void init(MPI_Comm communicator, int numValues, int key=0) override
allocate memory for the values to be sent, initialize counters
Definition: CollectiveCommunication.h:78
void allreduceSum() override
Performs an all-reduce (sum)
Definition: CollectiveCommunication.h:142
virtual void allreduceSumAllowPrevious() override
Definition: CollectiveCommunication.h:207
void appendUnsLong(unsigned long unsLongValue) override
Definition: CollectiveCommunication.h:100
virtual void finalize() override
delete memory and MPI_Type
Definition: CollectiveCommunication.h:86
std::vector< MPI_Datatype > _types
Vector of the corresponding MPI types for the values stored in _values.
Definition: CollectiveCommunication.h:392
MPI_Datatype _agglomeratedType
Definition: CollectiveCommunication.h:396
void appendFloat(float floatValue) override
Definition: CollectiveCommunication.h:106
void allreduceCustom(ReduceType type) override
Definition: CollectiveCommunication.h:146
void appendLongDouble(long double longDoubleValue) override
Definition: CollectiveCommunication.h:118
static void add(valType *invec, valType *inoutvec, int *, MPI_Datatype *dtype)
method used by MPI to add variables of this type
Definition: CollectiveCommunication.h:277
MPI_Comm getTopology() override
Definition: CollectiveCommunication.h:125
static void exit(int exitcode)
Terminate simulation with given exit code.
Definition: Simulation.cpp:155
Enumeration class corresponding to the type schema type.
Definition: vtk-unstructured.h:1746
Definition: CollectiveCommBase.h:29