VCluster_base.hpp 28.2 KB
Newer Older
incardon's avatar
incardon committed
1
2
#ifndef VCLUSTER_BASE_HPP_
#define VCLUSTER_BASE_HPP_
incardon's avatar
incardon committed
3

4
#include "config.h"
incardon's avatar
incardon committed
5
#include <mpi.h>
6
#include "MPI_wrapper/MPI_util.hpp"
incardon's avatar
incardon committed
7
#include "Vector/map_vector.hpp"
8
9
10
#include "MPI_wrapper/MPI_IallreduceW.hpp"
#include "MPI_wrapper/MPI_IrecvW.hpp"
#include "MPI_wrapper/MPI_IsendW.hpp"
incardon's avatar
incardon committed
11
#include "MPI_wrapper/MPI_IAllGather.hpp"
incardon's avatar
incardon committed
12
#include "MPI_wrapper/MPI_IBcastW.hpp"
incardon's avatar
incardon committed
13
#include <exception>
14
#include "Vector/map_vector.hpp"
15
16
#ifdef DEBUG
#include "util/check_no_pointers.hpp"
17
#include "util/util_debug.hpp"
18
#endif
incardon's avatar
incardon committed
19
#include "util/Vcluster_log.hpp"
20
#include "memory/BHeapMemory.hpp"
Yaroslav's avatar
Yaroslav committed
21
#include "Packer_Unpacker/has_max_prop.hpp"
incardon's avatar
incardon committed
22
#include "data_type/aggregate.hpp"
incardon's avatar
incardon committed
23

Pietro Incardona's avatar
Pietro Incardona committed
24
25
26
27
#ifdef HAVE_PETSC
#include <petscvec.h>
#endif

incardon's avatar
incardon committed
28
29
#define MSG_LENGTH 1024
#define MSG_SEND_RECV 1025
30
#define SEND_SPARSE 4096
incardon's avatar
incardon committed
31
32
33
#define NONE 1
#define NEED_ALL_SIZE 2

34
35
#define SERIVCE_MESSAGE_TAG 16384
#define SEND_RECV_BASE 8192
incardon's avatar
incardon committed
36
#define GATHER_BASE 24576
incardon's avatar
incardon committed
37

incardon's avatar
incardon committed
38
39
40
#define RECEIVE_KNOWN 4
#define KNOWN_ELEMENT_OR_BYTE 8

Pietro Incardona's avatar
Pietro Incardona committed
41
// number of vcluster instances
incardon's avatar
incardon committed
42
extern size_t n_vcluster;
Pietro Incardona's avatar
Pietro Incardona committed
43
// Global MPI initialization
incardon's avatar
incardon committed
44
extern bool global_mpi_init;
Pietro Incardona's avatar
Pietro Incardona committed
45
46
// initialization flag
extern bool ofp_initialized;
incardon's avatar
incardon committed
47
48
extern size_t tot_sent;
extern size_t tot_recv;
incardon's avatar
incardon committed
49

incardon's avatar
incardon committed
50
51
52
53
54
55
56
57
///////////////////// Post functions /////////////

template<typename T> void assign(T * ptr1, T * ptr2)
{
	*ptr1 = *ptr2;
};


incardon's avatar
incardon committed
58
//! temporal buffer for reductions
incardon's avatar
incardon committed
59
60
union red
{
incardon's avatar
incardon committed
61
	//! char
incardon's avatar
incardon committed
62
	char c;
incardon's avatar
incardon committed
63
	//! unsigned char
incardon's avatar
incardon committed
64
	unsigned char uc;
incardon's avatar
incardon committed
65
	//! signed
incardon's avatar
incardon committed
66
	short s;
incardon's avatar
incardon committed
67
	//! unsigned short
incardon's avatar
incardon committed
68
	unsigned short us;
incardon's avatar
incardon committed
69
	//! integer
incardon's avatar
incardon committed
70
	int i;
incardon's avatar
incardon committed
71
	//! unsigned integer
incardon's avatar
incardon committed
72
	unsigned int ui;
incardon's avatar
incardon committed
73
	//! float
incardon's avatar
incardon committed
74
	float f;
incardon's avatar
incardon committed
75
	//! double
incardon's avatar
incardon committed
76
77
78
	double d;
};

79
/*! \brief This class virtualize the cluster of PC as a set of processes that communicate
incardon's avatar
incardon committed
80
 *
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
 * At the moment it is an MPI-like interface, with a more type aware, and simple, interface.
 *  It also give some more complex communication functionalities like **Dynamic Sparse Data Exchange**
 *
 * Actually VCluster expose a Computation driven parallelism (MPI-like), with a plan of extending to
 * communication driven parallelism
 *
 * * In computation driven parallelism, the program compute than communicate to the other processors
 *
 * * In a communication driven parallelism, (Charm++ or HPX), the program receive messages, this receiving
 *   messages trigger computation
 *
 * ### An example of sending and receive plain buffers
 * \snippet VCluster_unit_test_util.hpp Send and receive plain buffer data
 * ### An example of sending vectors of primitives with (T=float,double,lont int,...)
 * \snippet VCluster_unit_test_util.hpp Sending and receiving primitives
 * ### An example of sending vectors of complexes object
 * \snippet VCluster_unit_test_util.hpp Send and receive vectors of complex
 * ### An example of gathering numbers from all processors
 * \snippet VCluster_unit_test_util.hpp allGather numbers
incardon's avatar
incardon committed
100
101
102
 *
 */

incardon's avatar
incardon committed
103
class Vcluster_base
incardon's avatar
incardon committed
104
{
incardon's avatar
incardon committed
105
	//! log file
incardon's avatar
incardon committed
106
107
	Vcluster_log log;

108
109
110
	//! NBX has a potential pitfall that must be addressed,
	//! NBX Send all the messages and probe for incoming messages,
	//! if there is an incoming message it receive it producing
111
	//! an acknowledge notification on the sending processor.
112
113
114
115
116
117
	//! When all the sends has been acknowledged, the processor call the MPI_Ibarrier
	//! when all the processors call MPI_Ibarrier all send has been received.
	//! While the processors are waiting for the MPI_Ibarrier to complete, all processors
	//! are still probing for incoming message, Unfortunately some processor
	//! can quit the MPI_Ibarrier before others and this mean that some
	//! processor can exit the probing status before others, these processors can in theory
118
119
120
121
	//! start new communications while the other processor are still in probing status producing
	//! a wrong send/recv association to
	//! resolve this problem an incremental NBX_cnt is used as message TAG to distinguish that the
	//! messages come from other send or subsequent NBX procedures
incardon's avatar
incardon committed
122
	size_t NBX_cnt;
123

incardon's avatar
incardon committed
124
125
	//! temporal vector used for meta-communication
	//! ( or meta-data before the real communication )
incardon's avatar
incardon committed
126
127
	openfpm::vector<size_t> proc_com;

incardon's avatar
incardon committed
128
	//! vector that contain the scatter map (it is basically an array of one)
incardon's avatar
incardon committed
129
130
	openfpm::vector<int> map_scatter;

incardon's avatar
incardon committed
131
	//! vector of MPI requests
incardon's avatar
incardon committed
132
133
	openfpm::vector<MPI_Request> req;

incardon's avatar
incardon committed
134
	//! vector of MPI status
incardon's avatar
incardon committed
135
136
	openfpm::vector<MPI_Status> stat;

incardon's avatar
incardon committed
137
	//! vector of functions to execute after all the request has been performed
incardon's avatar
incardon committed
138
139
140
141
142
143
144
	std::vector<int> post_exe;

	// Object array


	// Single objects

incardon's avatar
incardon committed
145
	//! number of processes
incardon's avatar
incardon committed
146
	int m_size;
incardon's avatar
incardon committed
147
	//! actual rank
incardon's avatar
incardon committed
148
	int m_rank;
incardon's avatar
incardon committed
149

incardon's avatar
incardon committed
150
	//! number of processing unit per process
incardon's avatar
incardon committed
151
152
	int numPE = 1;

incardon's avatar
incardon committed
153
	/*! This buffer is a temporal buffer for reductions
incardon's avatar
incardon committed
154
155
156
157
158
159
160
161
	 *
	 * MPI_Iallreduce does not accept recv and send buffer to be the same
	 * r is used to overcome this problem (is given as second parameter)
	 * after the execution the data is copied back
	 *
	 */
	std::vector<red> r;

incardon's avatar
incardon committed
162
	//! vector of pointers of send buffers
163
164
	openfpm::vector<void *> ptr_send;

incardon's avatar
incardon committed
165
	//! vector of the size of send buffers
166
167
	openfpm::vector<size_t> sz_send;

incardon's avatar
incardon committed
168
	//! barrier request
169
	MPI_Request bar_req;
incardon's avatar
incardon committed
170
171

	//! barrier status
172
173
	MPI_Status bar_stat;

incardon's avatar
incardon committed
174
	//! disable operator=
incardon's avatar
incardon committed
175
	Vcluster_base & operator=(const Vcluster_base &)	{return *this;};
Pietro Incardona's avatar
Pietro Incardona committed
176

incardon's avatar
incardon committed
177
	//! disable copy constructor
incardon's avatar
incardon committed
178
	Vcluster_base(const Vcluster_base &)
incardon's avatar
incardon committed
179
180
	:NBX_cnt(0)
	{};
Pietro Incardona's avatar
Pietro Incardona committed
181

incardon's avatar
incardon committed
182
183
184
185
186
protected:

	//! Receive buffers
	openfpm::vector<BHeapMemory> recv_buf;

incardon's avatar
incardon committed
187
188
189
public:

	// Finalize the MPI program
incardon's avatar
incardon committed
190
	~Vcluster_base()
incardon's avatar
incardon committed
191
	{
Pietro Incardona's avatar
Pietro Incardona committed
192
193
194
#ifdef SE_CLASS2
		check_delete(this);
#endif
incardon's avatar
incardon committed
195
196
197
198
		n_vcluster--;

		// if there are no other vcluster instances finalize
		if (n_vcluster == 0)
Pietro Incardona's avatar
Pietro Incardona committed
199
		{
200
201
202
203
			int already_finalised;

			MPI_Finalized(&already_finalised);
			if (!already_finalised)
Pietro Incardona's avatar
Pietro Incardona committed
204
			{
205
206
207
208
				if (MPI_Finalize() != 0)
				{
					std::cerr << __FILE__ << ":" << __LINE__  << " MPI_Finalize FAILED \n";
				}
Pietro Incardona's avatar
Pietro Incardona committed
209
			}
Pietro Incardona's avatar
Pietro Incardona committed
210
		}
incardon's avatar
incardon committed
211
212
	}

213
214
215
216
217
218
	/*! \brief Virtual cluster constructor
	 *
	 * \param argc pointer to arguments counts passed to the program
	 * \param argv pointer to arguments vector passed to the program
	 *
	 */
incardon's avatar
incardon committed
219
	Vcluster_base(int *argc, char ***argv)
incardon's avatar
incardon committed
220
	:NBX_cnt(0)
incardon's avatar
incardon committed
221
	{
Pietro Incardona's avatar
Pietro Incardona committed
222
223
224
225
#ifdef SE_CLASS2
		check_new(this,8,VCLUSTER_EVENT,PRJ_VCLUSTER);
#endif

incardon's avatar
incardon committed
226
227
		n_vcluster++;

228
229
230
		int already_initialised;
		MPI_Initialized(&already_initialised);

incardon's avatar
incardon committed
231
		// Check if MPI is already initialized
232
		if (!already_initialised)
incardon's avatar
incardon committed
233
		{
incardon's avatar
incardon committed
234

incardon's avatar
incardon committed
235
236
			MPI_Init(argc,argv);
		}
incardon's avatar
incardon committed
237

238
239
		// Get the total number of process
		// and the rank of this process
incardon's avatar
incardon committed
240

incardon's avatar
incardon committed
241
242
		MPI_Comm_size(MPI_COMM_WORLD, &m_size);
		MPI_Comm_rank(MPI_COMM_WORLD, &m_rank);
incardon's avatar
incardon committed
243

244
#ifdef SE_CLASS2
incardon's avatar
incardon committed
245
			process_v_cl = m_rank;
incardon's avatar
incardon committed
246
247
#endif

248
		// create and fill map scatter with one
incardon's avatar
incardon committed
249
		map_scatter.resize(m_size);
incardon's avatar
incardon committed
250
251
252
253
254

		for (size_t i = 0 ; i < map_scatter.size() ; i++)
		{
			map_scatter.get(i) = 1;
		}
incardon's avatar
incardon committed
255
256

		// open the log file
incardon's avatar
incardon committed
257
		log.openLog(m_rank);
258
259
260
261

		// Initialize bar_req
		bar_req = MPI_Request();
		bar_stat = MPI_Status();
incardon's avatar
incardon committed
262
263
	}

incardon's avatar
incardon committed
264
#ifdef SE_CLASS1
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299

	/*! \brief Check for wrong types
	 *
	 * In general we do not know if a type T make sense to be sent or not, but if it has pointer
	 * inside it does not. This function check if the basic type T has a method called noPointers,
	 * This function in general notify if T has internally pointers. If T has pointer an error
	 * is printed, is T does not have the method a WARNING is printed
	 *
	 * \tparam T type to check
	 *
	 */
	template<typename T> void checkType()
	{
		// if T is a primitive like int, long int, float, double, ... make sense
		// (pointers, l-references and r-references are not fundamentals)
		if (std::is_fundamental<T>::value == true)
			return;

		// if it is a pointer make no sense
		if (std::is_pointer<T>::value == true)
			std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " the type " << demangle(typeid(T).name()) << " is a pointer, sending pointers values has no sense\n";

		// if it is an l-value reference make no send
		if (std::is_lvalue_reference<T>::value == true)
			std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " the type " << demangle(typeid(T).name()) << " is a pointer, sending pointers values has no sense\n";

		// if it is an r-value reference make no send
		if (std::is_rvalue_reference<T>::value == true)
			std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " the type " << demangle(typeid(T).name()) << " is a pointer, sending pointers values has no sense\n";

		// ... if not, check that T has a method called noPointers
		switch (check_no_pointers<T>::value())
		{
			case PNP::UNKNOWN:
			{
incardon's avatar
incardon committed
300
				std::cerr << "Warning: " << __FILE__ << ":" << __LINE__ << " impossible to check the type " << demangle(typeid(T).name()) << " please consider to add a static method \"static bool noPointers()\" \n" ;
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
				break;
			}
			case PNP::POINTERS:
			{
				std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " the type " << demangle(typeid(T).name()) << " has pointers inside, sending pointers values has no sense\n";
				break;
			}
			default:
			{

			}
		}
	}

#endif

Pietro Incardona's avatar
Pietro Incardona committed
317
318
319
320
321
322
323
324
325
326
	/*! \brief Get the MPI_Communicator (or processor group) this VCluster is using
	 *
	 * \return MPI comunicator
	 *
	 */
	MPI_Comm getMPIComm()
	{
		return MPI_COMM_WORLD;
	}

incardon's avatar
incardon committed
327
328
329
330
331
	/*! \brief Get the total number of processors
	 *
	 * \return the total number of processors
	 *
	 */
incardon's avatar
incardon committed
332
333
	size_t getProcessingUnits()
	{
incardon's avatar
incardon committed
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
		return m_size*numPE;
	}

	/*! \brief Get the total number of processors
	 *
	 * It is the same as getProcessingUnits()
	 *
	 * \see getProcessingUnits()
	 *
	 * \return the total number of processors
	 *
	 */
	size_t size()
	{
		return this->m_size*numPE;
incardon's avatar
incardon committed
349
350
	}

incardon's avatar
incardon committed
351
352
353
354
355
	/*! \brief Get the process unit id
	 *
	 * \return the process ID
	 *
	 */
incardon's avatar
incardon committed
356
357
	size_t getProcessUnitID()
	{
incardon's avatar
incardon committed
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
		return m_rank;
	}

	/*! \brief Get the process unit id
	 *
	 * It is the same as getProcessUnitID()
	 *
	 * \see getProcessUnitID()
	 *
	 * \return the process ID
	 *
	 */
	size_t rank()
	{
		return m_rank;
incardon's avatar
incardon committed
373
374
	}

incardon's avatar
incardon committed
375

376
	/*! \brief Sum the numbers across all processors and get the result
incardon's avatar
incardon committed
377
378
379
380
381
	 *
	 * \param num to reduce, input and output
	 *
	 */

382
	template<typename T> void sum(T & num)
incardon's avatar
incardon committed
383
	{
incardon's avatar
incardon committed
384
#ifdef SE_CLASS1
385
386
387
		checkType<T>();
#endif

incardon's avatar
incardon committed
388
389
390
391
392
393
394
395
396
		// reduce over MPI

		// Create one request
		req.add();

		// reduce
		MPI_IallreduceW<T>::reduce(num,MPI_SUM,req.last());
	}

397
	/*! \brief Get the maximum number across all processors (or reduction with infinity norm)
incardon's avatar
incardon committed
398
399
400
401
402
403
	 *
	 * \param num to reduce
	 *
	 */
	template<typename T> void max(T & num)
	{
incardon's avatar
incardon committed
404
#ifdef SE_CLASS1
405
406
		checkType<T>();
#endif
incardon's avatar
incardon committed
407
408
409
410
411
412
413
414
415
		// reduce over MPI

		// Create one request
		req.add();

		// reduce
		MPI_IallreduceW<T>::reduce(num,MPI_MAX,req.last());
	}

tonynsyde's avatar
tonynsyde committed
416
417
418
419
420
421
422
423
	/*! \brief Get the minimum number across all processors (or reduction with insinity norm)
	 *
	 * \param num to reduce
	 *
	 */

	template<typename T> void min(T & num)
	{
incardon's avatar
incardon committed
424
#ifdef SE_CLASS1
tonynsyde's avatar
tonynsyde committed
425
426
427
428
429
430
431
432
433
434
435
		checkType<T>();
#endif
		// reduce over MPI

		// Create one request
		req.add();

		// reduce
		MPI_IallreduceW<T>::reduce(num,MPI_MIN,req.last());
	}

436
	/*! \brief Send and receive multiple messages
437
	 *
438
439
	 * It send multiple messages to a set of processors the and receive
	 * multiple messages from another set of processors, all the processor must call this
incardon's avatar
incardon committed
440
441
442
	 * function. In this particular case the receiver know from which processor is going
	 * to receive.
	 *
443
444
	 *
	 * suppose the following situation the calling processor want to communicate
incardon's avatar
incardon committed
445
446
447
448
	 * * 2 messages of size 100 byte to processor 1
	 * * 1 message of size 50 byte to processor 6
	 * * 1 message of size 48 byte to processor 7
	 * * 1 message of size 70 byte to processor 8
449
450
	 *
	 *
incardon's avatar
incardon committed
451
452
	 * \param prc list of processor with which it should communicate
	 *        [1,1,6,7,8]
453
	 *
incardon's avatar
incardon committed
454
455
456
457
	 * \param data data to send for each processors in contain a pointer to some type T
	 *        this type T must have a method size() that return the size of the data-structure
	 *
	 * \param prc_recv processor that receive data
incardon's avatar
incardon committed
458
	 *
incardon's avatar
incardon committed
459
	 * \param recv_sz for each processor indicate the size of the data received
incardon's avatar
incardon committed
460
461
462
	 *
	 * \param msg_alloc This is a call-back with the purpose of allocate space
	 *        for the incoming message and give back a valid pointer, supposing that this call-back has been triggered by
463
464
	 *        the processor of id 5 that want to communicate with me a message of size 100 byte the call-back will have
	 *        the following 6 parameters
incardon's avatar
incardon committed
465
466
	 *        in the call-back are in order:
	 *        * message size required to receive the message [100]
467
468
	 *        * total message size to receive from all the processors (NBX does not provide this information)
	 *        * the total number of processor want to communicate with you (NBX does not provide this information)
incardon's avatar
incardon committed
469
	 *        * processor id [5]
470
	 *        * ri request id (it is an id that goes from 0 to total_p, and is incremented
471
	 *           every time message_alloc is called)
472
	 *        * void pointer, parameter for additional data to pass to the call-back
473
	 *
incardon's avatar
incardon committed
474
475
	 * \param ptr_arg data passed to the call-back function specified
	 *
incardon's avatar
incardon committed
476
	 * \param opt options, NONE (ignored in this moment)
477
478
	 *
	 */
incardon's avatar
incardon committed
479
480
481
482
483
484
485
	template<typename T> void sendrecvMultipleMessagesNBX(openfpm::vector< size_t > & prc,
			                                              openfpm::vector< T > & data,
														  openfpm::vector< size_t > prc_recv,
														  openfpm::vector< size_t > & recv_sz ,
														  void * (* msg_alloc)(size_t,size_t,size_t,size_t,size_t,void *),
														  void * ptr_arg,
														  long int opt=NONE)
486
	{
incardon's avatar
incardon committed
487
		// Allocate the buffers
488
489

		for (size_t i = 0 ; i < prc.size() ; i++)
incardon's avatar
incardon committed
490
491
492
			send(prc.get(i),SEND_SPARSE + NBX_cnt,data.get(i).getPointer(),data.get(i).size());

		for (size_t i = 0 ; i < prc_recv.size() ; i++)
493
		{
incardon's avatar
incardon committed
494
495
496
			void * ptr_recv = msg_alloc(recv_sz.get(i),0,0,prc_recv.get(i),i,ptr_arg);

			recv(prc_recv.get(i),SEND_SPARSE + NBX_cnt,ptr_recv,recv_sz.get(i));
497
498
		}

incardon's avatar
incardon committed
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
		execute();

		// Circular counter
		NBX_cnt = (NBX_cnt + 1) % 1024;
	}

	/*! \brief Send and receive multiple messages
	 *
	 * It send multiple messages to a set of processors the and receive
	 * multiple messages from another set of processors, all the processor must call this
	 * function. In this particular case the receiver know from which processor is going
	 * to receive.
	 *
	 *
	 * suppose the following situation the calling processor want to communicate
	 * * 2 messages of size 100 byte to processor 1
	 * * 1 message of size 50 byte to processor 6
	 * * 1 message of size 48 byte to processor 7
	 * * 1 message of size 70 byte to processor 8
	 *
	 * \param n_send number of send for this processor [4]
	 *
	 * \param prc list of processor with which it should communicate
	 *        [1,1,6,7,8]
	 *
	 * \param sz the array contain the size of the message for each processor
	 *        (zeros must not be presents) [100,100,50,48,70]
	 *
	 * \param ptr array that contain the pointers to the message to send
	 *
	 * \param msg_alloc This is a call-back with the purpose of allocate space
	 *        for the incoming message and give back a valid pointer, supposing that this call-back has been triggered by
	 *        the processor of id 5 that want to communicate with me a message of size 100 byte the call-back will have
	 *        the following 6 parameters
	 *        in the call-back are in order:
	 *        * message size required to receive the message [100]
	 *        * total message size to receive from all the processors (NBX does not provide this information)
	 *        * the total number of processor want to communicate with you (NBX does not provide this information)
	 *        * processor id [5]
	 *        * ri request id (it is an id that goes from 0 to total_p, and is incremented
	 *           every time message_alloc is called)
	 *        * void pointer, parameter for additional data to pass to the call-back
	 *
	 * \param ptr_arg data passed to the call-back function specified
	 *
	 * \param opt options, NONE (ignored in this moment)
	 *
	 */
	void sendrecvMultipleMessagesNBX(size_t n_send , size_t sz[], size_t prc[] , void * ptr[], size_t n_recv, size_t prc_recv[] , size_t sz_recv[] ,void * (* msg_alloc)(size_t,size_t,size_t,size_t,size_t,void *), void * ptr_arg, long int opt=NONE)
	{
		// Allocate the buffers

		for (size_t i = 0 ; i < n_send ; i++)
			send(prc[i],SEND_SPARSE + NBX_cnt,ptr[i],sz[i]);

		for (size_t i = 0 ; i < n_recv ; i++)
		{
			void * ptr_recv = msg_alloc(sz_recv[i],0,0,prc_recv[i],i,ptr_arg);

			recv(prc_recv[i],SEND_SPARSE + NBX_cnt,ptr_recv,sz_recv[i]);
		}

		execute();

		// Circular counter
		NBX_cnt = (NBX_cnt + 1) % 1024;
565
566
	}

incardon's avatar
incardon committed
567
568
	/*! \brief Send and receive multiple messages
	 *
incardon's avatar
incardon committed
569
	 * It send multiple messages to a set of processors the and receive
570
	 * multiple messages from another set of processors, all the processor must call this
incardon's avatar
incardon committed
571
	 * function
572
573
574
575
576
577
578
579
580
	 *
	 * suppose the following situation the calling processor want to communicate
	 * * 2 vector of 100 integers to processor 1
	 * * 1 vector of 50 integers to processor 6
	 * * 1 vector of 48 integers to processor 7
	 * * 1 vector of 70 integers to processor 8
	 *
	 * \param prc list of processors you should communicate with [1,1,6,7,8]
	 *
incardon's avatar
incardon committed
581
	 * \param data vector containing the data to send [v=vector<vector<int>>, v.size()=4, T=vector<int>], T at the moment
582
583
584
585
586
587
588
	 *          is only tested for vectors of 0 or more generic elements (without pointers)
	 *
	 * \param msg_alloc This is a call-back with the purpose to allocate space
	 *        for the incoming messages and give back a valid pointer, supposing that this call-back has been triggered by
	 *        the processor of id 5 that want to communicate with me a message of size 100 byte the call-back will have
	 *        the following 6 parameters
	 *        in the call-back in order:
incardon's avatar
incardon committed
589
	 *        * message size required to receive the message (100)
590
591
	 *        * total message size to receive from all the processors (NBX does not provide this information)
	 *        * the total number of processor want to communicate with you (NBX does not provide this information)
incardon's avatar
incardon committed
592
	 *        * processor id (5)
593
	 *        * ri request id (it is an id that goes from 0 to total_p, and is incremented
594
	 *           every time message_alloc is called)
595
	 *        * void pointer, parameter for additional data to pass to the call-back
incardon's avatar
incardon committed
596
	 *
incardon's avatar
incardon committed
597
598
	 * \param ptr_arg data passed to the call-back function specified
	 *
599
	 * \param opt options, only NONE supported
incardon's avatar
incardon committed
600
601
	 *
	 */
incardon's avatar
incardon committed
602
	template<typename T> void sendrecvMultipleMessagesNBX(openfpm::vector< size_t > & prc, openfpm::vector< T > & data, void * (* msg_alloc)(size_t,size_t,size_t,size_t,size_t,void *), void * ptr_arg, long int opt=NONE)
incardon's avatar
incardon committed
603
	{
incardon's avatar
incardon committed
604
#ifdef SE_CLASS1
605
606
		checkType<typename T::value_type>();
#endif
incardon's avatar
incardon committed
607
608
609
610
611
612
613
614
615
616
		// resize the pointer list
		ptr_send.resize(prc.size());
		sz_send.resize(prc.size());

		for (size_t i = 0 ; i < prc.size() ; i++)
		{
			ptr_send.get(i) = data.get(i).getPointer();
			sz_send.get(i) = data.get(i).size() * sizeof(typename T::value_type);
		}

incardon's avatar
incardon committed
617
		sendrecvMultipleMessagesNBX(prc.size(),(size_t *)sz_send.getPointer(),(size_t *)prc.getPointer(),(void **)ptr_send.getPointer(),msg_alloc,ptr_arg,opt);
618
619
	}

incardon's avatar
incardon committed
620
621


622
	/*! \brief Send and receive multiple messages
623
	 *
624
625
	 * It send multiple messages to a set of processors the and receive
	 * multiple messages from another set of processors, all the processor must call this
incardon's avatar
incardon committed
626
	 * function
627
	 *
628
629
630
631
632
	 * suppose the following situation the calling processor want to communicate
	 * * 2 messages of size 100 byte to processor 1
	 * * 1 message of size 50 byte to processor 6
	 * * 1 message of size 48 byte to processor 7
	 * * 1 message of size 70 byte to processor 8
633
	 *
634
	 * \param n_send number of send for this processor [4]
635
636
	 *
	 * \param prc list of processor with which it should communicate
637
	 *        [1,1,6,7,8]
638
	 *
639
640
641
642
	 * \param sz the array contain the size of the message for each processor
	 *        (zeros must not be presents) [100,100,50,48,70]
	 *
	 * \param ptr array that contain the pointers to the message to send
643
644
	 *
	 * \param msg_alloc This is a call-back with the purpose of allocate space
645
646
647
	 *        for the incoming message and give back a valid pointer, supposing that this call-back has been triggered by
	 *        the processor of id 5 that want to communicate with me a message of size 100 byte the call-back will have
	 *        the following 6 parameters
648
	 *        in the call-back are in order:
649
650
651
652
653
	 *        * message size required to receive the message [100]
	 *        * total message size to receive from all the processors (NBX does not provide this information)
	 *        * the total number of processor want to communicate with you (NBX does not provide this information)
	 *        * processor id [5]
	 *        * ri request id (it is an id that goes from 0 to total_p, and is incremented
654
	 *           every time message_alloc is called)
655
	 *        * void pointer, parameter for additional data to pass to the call-back
656
	 *
incardon's avatar
incardon committed
657
658
	 * \param ptr_arg data passed to the call-back function specified
	 *
659
660
661
	 * \param opt options, NONE (ignored in this moment)
	 *
	 */
662
	void sendrecvMultipleMessagesNBX(size_t n_send , size_t sz[], size_t prc[] , void * ptr[], void * (* msg_alloc)(size_t,size_t,size_t,size_t,size_t,void *), void * ptr_arg, long int opt = NONE)
663
	{
664
		if (stat.size() != 0 || req.size() != 0)
incardon's avatar
incardon committed
665
			std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " this function must be called when no other requests are in progress. Please remember that if you use function like max(),sum(),send(),recv() check that you did not miss to call the function execute() \n";
666
667
668


		stat.clear();
669
670
671
672
673
		req.clear();
		// Do MPI_Issend

		for (size_t i = 0 ; i < n_send ; i++)
		{
incardon's avatar
incardon committed
674
675
676
			if (sz[i] != 0)
			{
				req.add();
677
678
679
680
681

#ifdef SE_CLASS2
				check_valid(ptr[i],sz[i]);
#endif

incardon's avatar
incardon committed
682
				tot_sent += sz[i];
683
				MPI_SAFE_CALL(MPI_Issend(ptr[i], sz[i], MPI_BYTE, prc[i], SEND_SPARSE + NBX_cnt, MPI_COMM_WORLD,&req.last()));
incardon's avatar
incardon committed
684
				log.logSend(prc[i]);
incardon's avatar
incardon committed
685
			}
686
687
688
689
690
691
692
		}

		size_t rid = 0;
		int flag = false;

		bool reached_bar_req = false;

incardon's avatar
incardon committed
693
694
		log.start(10);

695
696
697
		// Wait that all the send are acknowledge
		do
		{
incardon's avatar
incardon committed
698

699
700
701
702
703
			// flag that notify that this processor reach the barrier
			// Barrier request

			MPI_Status stat_t;
			int stat = false;
704
			MPI_SAFE_CALL(MPI_Iprobe(MPI_ANY_SOURCE,SEND_SPARSE + NBX_cnt,MPI_COMM_WORLD,&stat,&stat_t));
705

706
			// If I have an incoming message and is related to this NBX communication
707
708
709
710
711
712
713
714
715
			if (stat == true)
			{
				// Get the message size
				int msize;
				MPI_SAFE_CALL(MPI_Get_count(&stat_t,MPI_BYTE,&msize));

				// Get the pointer to receive the message
				void * ptr = msg_alloc(msize,0,0,stat_t.MPI_SOURCE,rid,ptr_arg);

incardon's avatar
incardon committed
716
717
718
				// Log the receiving request
				log.logRecv(stat_t);

719
720
				rid++;

721
722
723
724
				// Check the pointer
#ifdef SE_CLASS2
				check_valid(ptr,msize);
#endif
incardon's avatar
incardon committed
725
				tot_recv += msize;
726
				MPI_SAFE_CALL(MPI_Recv(ptr,msize,MPI_BYTE,stat_t.MPI_SOURCE,SEND_SPARSE+NBX_cnt,MPI_COMM_WORLD,&stat_t));
727
728
729
730

#ifdef SE_CLASS2
				check_valid(ptr,msize);
#endif
731
732
733
734
735
736
737
			}

			// Check the status of all the MPI_issend and call the barrier if finished

			if (reached_bar_req == false)
			{
				int flag = false;
incardon's avatar
incardon committed
738
739
740
741
				if (req.size() != 0)
				{MPI_SAFE_CALL(MPI_Testall(req.size(),&req.get(0),&flag,MPI_STATUSES_IGNORE));}
				else
					flag = true;
742
743
744
745
746
747

				// If all send has been completed
				if (flag == true)
				{MPI_SAFE_CALL(MPI_Ibarrier(MPI_COMM_WORLD,&bar_req));reached_bar_req = true;}
			}

incardon's avatar
incardon committed
748
			// Check if all processor reached the async barrier
749
			if (reached_bar_req)
incardon's avatar
incardon committed
750
			{MPI_SAFE_CALL(MPI_Test(&bar_req,&flag,&bar_stat))};
incardon's avatar
incardon committed
751
752

			// produce a report if communication get stuck
incardon's avatar
incardon committed
753
			log.NBXreport(NBX_cnt,req,reached_bar_req,bar_stat);
incardon's avatar
incardon committed
754

755
756
757
758
759
760
		} while (flag == false);

		// Remove the executed request

		req.clear();
		stat.clear();
incardon's avatar
incardon committed
761
		log.clear();
762
763
764

		// Circular counter
		NBX_cnt = (NBX_cnt + 1) % 1024;
incardon's avatar
incardon committed
765
766
	}

767
768
769
770
771
772
	/*! \brief Send data to a processor
	 *
	 * \warning In order to avoid deadlock every send must be coupled with a recv
	 *          in case you want to send data without knowledge from the other side
	 *          consider to use sendRecvMultipleMessages
	 *
incardon's avatar
incardon committed
773
774
775
776
777
778
779
780
781
782
783
784
	 * \warning operation is asynchronous execute must be called to ensure they are executed
	 *
	 * \see sendRecvMultipleMessages
	 *
	 * \param proc processor id
	 * \param tag id
	 * \param mem buffer with the data to send
	 * \param sz size
	 *
	 * \return true if succeed false otherwise
	 *
	 */
785
	bool send(size_t proc, size_t tag, const void * mem, size_t sz)
incardon's avatar
incardon committed
786
787
788
789
790
791
792
793
794
795
796
797
	{
		// send over MPI

		// Create one request
		req.add();

		// send
		MPI_IsendWB::send(proc,SEND_RECV_BASE + tag,mem,sz,req.last());

		return true;
	}

incardon's avatar
incardon committed
798

incardon's avatar
incardon committed
799
800
801
802
803
804
805
	/*! \brief Send data to a processor
	 *
	 * \warning In order to avoid deadlock every send must be coupled with a recv
	 *          in case you want to send data without knowledge from the other side
	 *          consider to use sendRecvMultipleMessages
	 *
	 * \warning operation is asynchronous execute must be called to ensure they are executed
806
807
808
809
810
811
812
	 *
	 * \see sendRecvMultipleMessages
	 *
	 * \param proc processor id
	 * \param tag id
	 * \param v buffer to send
	 *
813
814
	 * \return true if succeed false otherwise
	 *
815
	 */
816
	template<typename T, typename Mem, typename gr> bool send(size_t proc, size_t tag, openfpm::vector<T,Mem,gr> & v)
817
	{
incardon's avatar
incardon committed
818
#ifdef SE_CLASS1
819
820
821
822
823
824
825
826
		checkType<T>();
#endif

		// send over MPI

		// Create one request
		req.add();

827
		// send
828
		MPI_IsendW<T,Mem,gr>::send(proc,SEND_RECV_BASE + tag,v,req.last());
829
830

		return true;
831
832
833
834
835
836
	}

	/*! \brief Recv data from a processor
	 *
	 * \warning In order to avoid deadlock every recv must be coupled with a send
	 *          in case you want to send data without knowledge from the other side
incardon's avatar
incardon committed
837
	 *          consider to use or sendrecvMultipleMessagesNBX
838
	 *
incardon's avatar
incardon committed
839
	 * \warning operation is asynchronous execute must be called to ensure they are executed
840
	 *
incardon's avatar
incardon committed
841
	 * \see sendrecvMultipleMessagesNBX
842
843
844
845
	 *
	 * \param proc processor id
	 * \param tag id
	 * \param v buffer to send
incardon's avatar
incardon committed
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
	 * \param sz size of the buffer
	 *
	 * \return true if succeed false otherwise
	 *
	 */
	bool recv(size_t proc, size_t tag, void * v, size_t sz)
	{
		// recv over MPI

		// Create one request
		req.add();

		// receive
		MPI_IrecvWB::recv(proc,SEND_RECV_BASE + tag,v,sz,req.last());

		return true;
	}

incardon's avatar
incardon committed
864
865
866
867
    /*! \brief Recv data from a processor
     *
     * \warning In order to avoid deadlock every recv must be coupled with a send
     *          in case you want to send data without knowledge from the other side
incardon's avatar
incardon committed
868
     *          consider to use sendrecvMultipleMessagesNBX
incardon's avatar
incardon committed
869
870
871
     *
     * \warning operation is asynchronous execute must be called to ensure they are executed
     *
incardon's avatar
incardon committed
872
     * \see sendrecvMultipleMessagesNBX
incardon's avatar
incardon committed
873
874
875
876
877
878
879
880
881
882
     *
     * \param proc processor id
     * \param tag id
     * \param v vector to send
     *
     * \return true if succeed false otherwise
     *
     */
    template<typename T, typename Mem, typename gr> bool recv(size_t proc, size_t tag, openfpm::vector<T,Mem,gr> & v)
    {
incardon's avatar
incardon committed
883
#ifdef SE_CLASS1
incardon's avatar
incardon committed
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
            checkType<T>();
#endif

            // recv over MPI

            // Create one request
            req.add();

            // receive
            MPI_IrecvW<T>::recv(proc,SEND_RECV_BASE + tag,v,req.last());

            return true;
    }

	/*! \brief Gather the data from all processors
899
900
	 *
	 * send a primitive data T receive the same primitive T from all the other processors
incardon's avatar
incardon committed
901
902
903
	 *
	 * \warning operation is asynchronous execute must be called to ensure they are executed
	 *
904
	 * \param v vector to receive (automaticaly resized)
incardon's avatar
incardon committed
905
	 * \param send data to send
906
	 *
907
908
	 * \return true if succeed false otherwise
	 *
909
	 */
incardon's avatar
incardon committed
910
	template<typename T, typename Mem, typename gr> bool allGather(T & send, openfpm::vector<T,Mem,gr> & v)
911
	{
incardon's avatar
incardon committed
912
#ifdef SE_CLASS1
913
914
915
916
917
918
		checkType<T>();
#endif

		// Create one request
		req.add();

incardon's avatar
incardon committed
919
920
921
		// Number of processors
		v.resize(getProcessingUnits());

922
		// gather
incardon's avatar
incardon committed
923
		MPI_IAllGatherW<T>::gather(&send,1,v.getPointer(),1,req.last());
924
925

		return true;
926
927
	}

incardon's avatar
incardon committed
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
	/*! \brief Broadcast the data to all processors
	 *
	 * broadcast a vector of primitives.
	 *
	 * \warning operation is asynchronous execute must be called to ensure the operation is executed
	 *
	 * \warning the non-root processor must resize the vector to the exact receive size. This mean the
	 *          each processor must known a priory the receiving size
	 *
	 * \param v vector to send in the case of the root processor and vector where to receive in the case of
	 *          non-root
	 * \param root processor (who broadcast)
	 *
	 * \return true if succeed false otherwise
	 *
	 */
	template<typename T, typename Mem, typename gr> bool Bcast(openfpm::vector<T,Mem,gr> & v, size_t root)
	{
#ifdef SE_CLASS1
		checkType<T>();
#endif

		// Create one request
		req.add();

		// gather
		MPI_IBcastW<T>::bcast(root,v,req.last());

		return true;
	}

959
	/*! \brief Execute all the requests
incardon's avatar
incardon committed
960
961
962
963
	 *
	 */
	void execute()
	{
964
965
966
967
		// if req == 0 return
		if (req.size() == 0)
			return;

incardon's avatar
incardon committed
968
		// Wait for all the requests
969
		stat.resize(req.size());
970
		MPI_SAFE_CALL(MPI_Waitall(req.size(),&req.get(0),&stat.get(0)));
incardon's avatar
incardon committed
971

972
		// Remove executed request and status
incardon's avatar
incardon committed
973
974
975
		req.clear();
		stat.clear();
	}
976
977
978
979
980
981
982
983
984

	/*! \brief Release the buffer used for communication
	 *
	 *
	 */
	void clear()
	{
		recv_buf.clear();
	}
incardon's avatar
incardon committed
985
986
};

incardon's avatar
incardon committed
987

Pietro Incardona's avatar
Pietro Incardona committed
988

Pietro Incardona's avatar
Pietro Incardona committed
989

incardon's avatar
incardon committed
990
991
#endif