VCluster_base.hpp 33 KB
Newer Older
incardon's avatar
incardon committed
1 2
#ifndef VCLUSTER_BASE_HPP_
#define VCLUSTER_BASE_HPP_
incardon's avatar
incardon committed
3

4
#include "config.h"
incardon's avatar
incardon committed
5
#include <mpi.h>
incardon's avatar
incardon committed
6
#include <mpi-ext.h>
7
#include "MPI_wrapper/MPI_util.hpp"
incardon's avatar
incardon committed
8
#include "Vector/map_vector.hpp"
9 10 11
#include "MPI_wrapper/MPI_IallreduceW.hpp"
#include "MPI_wrapper/MPI_IrecvW.hpp"
#include "MPI_wrapper/MPI_IsendW.hpp"
incardon's avatar
incardon committed
12
#include "MPI_wrapper/MPI_IAllGather.hpp"
incardon's avatar
incardon committed
13
#include "MPI_wrapper/MPI_IBcastW.hpp"
incardon's avatar
incardon committed
14
#include <exception>
15
#include "Vector/map_vector.hpp"
16 17
#ifdef DEBUG
#include "util/check_no_pointers.hpp"
18
#include "util/util_debug.hpp"
19
#endif
incardon's avatar
incardon committed
20
#include "util/Vcluster_log.hpp"
21
#include "memory/BHeapMemory.hpp"
Yaroslav's avatar
Yaroslav committed
22
#include "Packer_Unpacker/has_max_prop.hpp"
incardon's avatar
incardon committed
23
#include "data_type/aggregate.hpp"
incardon's avatar
incardon committed
24 25 26
#if defined(CUDA_GPU) && defined(__NVCC__)
#include "util/cuda/moderngpu/launch_box.hxx"
#endif
incardon's avatar
incardon committed
27

Pietro Incardona's avatar
Pietro Incardona committed
28 29 30 31
#ifdef HAVE_PETSC
#include <petscvec.h>
#endif

incardon's avatar
incardon committed
32 33
#define MSG_LENGTH 1024
#define MSG_SEND_RECV 1025
34
#define SEND_SPARSE 4096
incardon's avatar
incardon committed
35 36 37
#define NONE 1
#define NEED_ALL_SIZE 2

38 39
#define SERIVCE_MESSAGE_TAG 16384
#define SEND_RECV_BASE 8192
incardon's avatar
incardon committed
40
#define GATHER_BASE 24576
incardon's avatar
incardon committed
41

incardon's avatar
incardon committed
42 43
#define RECEIVE_KNOWN 4
#define KNOWN_ELEMENT_OR_BYTE 8
incardon's avatar
incardon committed
44
#define MPI_GPU_DIRECT 16
incardon's avatar
incardon committed
45

Pietro Incardona's avatar
Pietro Incardona committed
46
// number of vcluster instances
incardon's avatar
incardon committed
47
extern size_t n_vcluster;
Pietro Incardona's avatar
Pietro Incardona committed
48
// Global MPI initialization
incardon's avatar
incardon committed
49
extern bool global_mpi_init;
Pietro Incardona's avatar
Pietro Incardona committed
50 51
// initialization flag
extern bool ofp_initialized;
incardon's avatar
incardon committed
52 53
extern size_t tot_sent;
extern size_t tot_recv;
incardon's avatar
incardon committed
54

incardon's avatar
incardon committed
55 56 57 58 59 60 61 62
///////////////////// Post functions /////////////

template<typename T> void assign(T * ptr1, T * ptr2)
{
	*ptr1 = *ptr2;
};


incardon's avatar
incardon committed
63
//! temporal buffer for reductions
incardon's avatar
incardon committed
64 65
union red
{
incardon's avatar
incardon committed
66
	//! char
incardon's avatar
incardon committed
67
	char c;
incardon's avatar
incardon committed
68
	//! unsigned char
incardon's avatar
incardon committed
69
	unsigned char uc;
incardon's avatar
incardon committed
70
	//! signed
incardon's avatar
incardon committed
71
	short s;
incardon's avatar
incardon committed
72
	//! unsigned short
incardon's avatar
incardon committed
73
	unsigned short us;
incardon's avatar
incardon committed
74
	//! integer
incardon's avatar
incardon committed
75
	int i;
incardon's avatar
incardon committed
76
	//! unsigned integer
incardon's avatar
incardon committed
77
	unsigned int ui;
incardon's avatar
incardon committed
78
	//! float
incardon's avatar
incardon committed
79
	float f;
incardon's avatar
incardon committed
80
	//! double
incardon's avatar
incardon committed
81 82 83
	double d;
};

84
/*! \brief This class virtualize the cluster of PC as a set of processes that communicate
incardon's avatar
incardon committed
85
 *
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
 * At the moment it is an MPI-like interface, with a more type aware, and simple, interface.
 *  It also give some more complex communication functionalities like **Dynamic Sparse Data Exchange**
 *
 * Actually VCluster expose a Computation driven parallelism (MPI-like), with a plan of extending to
 * communication driven parallelism
 *
 * * In computation driven parallelism, the program compute than communicate to the other processors
 *
 * * In a communication driven parallelism, (Charm++ or HPX), the program receive messages, this receiving
 *   messages trigger computation
 *
 * ### An example of sending and receive plain buffers
 * \snippet VCluster_unit_test_util.hpp Send and receive plain buffer data
 * ### An example of sending vectors of primitives with (T=float,double,lont int,...)
 * \snippet VCluster_unit_test_util.hpp Sending and receiving primitives
 * ### An example of sending vectors of complexes object
 * \snippet VCluster_unit_test_util.hpp Send and receive vectors of complex
 * ### An example of gathering numbers from all processors
 * \snippet VCluster_unit_test_util.hpp allGather numbers
incardon's avatar
incardon committed
105 106
 *
 */
incardon's avatar
incardon committed
107
template<typename InternalMemory>
incardon's avatar
incardon committed
108
class Vcluster_base
incardon's avatar
incardon committed
109
{
incardon's avatar
incardon committed
110
	//! log file
incardon's avatar
incardon committed
111 112
	Vcluster_log log;

113 114 115
	//! NBX has a potential pitfall that must be addressed,
	//! NBX Send all the messages and probe for incoming messages,
	//! if there is an incoming message it receive it producing
116
	//! an acknowledge notification on the sending processor.
117 118 119 120 121 122
	//! When all the sends has been acknowledged, the processor call the MPI_Ibarrier
	//! when all the processors call MPI_Ibarrier all send has been received.
	//! While the processors are waiting for the MPI_Ibarrier to complete, all processors
	//! are still probing for incoming message, Unfortunately some processor
	//! can quit the MPI_Ibarrier before others and this mean that some
	//! processor can exit the probing status before others, these processors can in theory
123 124 125 126
	//! start new communications while the other processor are still in probing status producing
	//! a wrong send/recv association to
	//! resolve this problem an incremental NBX_cnt is used as message TAG to distinguish that the
	//! messages come from other send or subsequent NBX procedures
incardon's avatar
incardon committed
127
	size_t NBX_cnt;
128

incardon's avatar
incardon committed
129 130
	//! temporal vector used for meta-communication
	//! ( or meta-data before the real communication )
incardon's avatar
incardon committed
131 132
	openfpm::vector<size_t> proc_com;

incardon's avatar
incardon committed
133
	//! vector that contain the scatter map (it is basically an array of one)
incardon's avatar
incardon committed
134 135
	openfpm::vector<int> map_scatter;

incardon's avatar
incardon committed
136
	//! vector of MPI requests
incardon's avatar
incardon committed
137 138
	openfpm::vector<MPI_Request> req;

incardon's avatar
incardon committed
139
	//! vector of MPI status
incardon's avatar
incardon committed
140 141
	openfpm::vector<MPI_Status> stat;

incardon's avatar
incardon committed
142
	//! vector of functions to execute after all the request has been performed
incardon's avatar
incardon committed
143 144
	std::vector<int> post_exe;

incardon's avatar
incardon committed
145 146 147 148 149 150 151 152 153 154 155
#if defined(CUDA_GPU) && defined(__NVCC__)

	//! standard context for mgpu
	mgpu::standard_context_t * context;

#else

	void * context = NULL;

#endif

incardon's avatar
incardon committed
156 157 158 159 160
	// Object array


	// Single objects

incardon's avatar
incardon committed
161
	//! number of processes
incardon's avatar
incardon committed
162
	int m_size;
incardon's avatar
incardon committed
163
	//! actual rank
incardon's avatar
incardon committed
164
	int m_rank;
incardon's avatar
incardon committed
165

incardon's avatar
incardon committed
166
	//! number of processing unit per process
incardon's avatar
incardon committed
167 168
	int numPE = 1;

incardon's avatar
incardon committed
169
	/*! This buffer is a temporal buffer for reductions
incardon's avatar
incardon committed
170 171 172 173 174 175 176 177
	 *
	 * MPI_Iallreduce does not accept recv and send buffer to be the same
	 * r is used to overcome this problem (is given as second parameter)
	 * after the execution the data is copied back
	 *
	 */
	std::vector<red> r;

incardon's avatar
incardon committed
178
	//! vector of pointers of send buffers
179 180
	openfpm::vector<void *> ptr_send;

incardon's avatar
incardon committed
181
	//! vector of the size of send buffers
182 183
	openfpm::vector<size_t> sz_send;

incardon's avatar
incardon committed
184
	//! barrier request
185
	MPI_Request bar_req;
incardon's avatar
incardon committed
186 187

	//! barrier status
188 189
	MPI_Status bar_stat;

incardon's avatar
incardon committed
190
	//! disable operator=
incardon's avatar
incardon committed
191
	Vcluster_base & operator=(const Vcluster_base &)	{return *this;};
Pietro Incardona's avatar
Pietro Incardona committed
192

incardon's avatar
incardon committed
193
	//! disable copy constructor
incardon's avatar
incardon committed
194
	Vcluster_base(const Vcluster_base &)
incardon's avatar
incardon committed
195 196
	:NBX_cnt(0)
	{};
Pietro Incardona's avatar
Pietro Incardona committed
197

incardon's avatar
incardon committed
198 199 200
protected:

	//! Receive buffers
incardon's avatar
incardon committed
201
	openfpm::vector<BMemory<InternalMemory>> recv_buf;
incardon's avatar
incardon committed
202

incardon's avatar
incardon committed
203 204 205
	//! tags receiving
	openfpm::vector<size_t> tags;

incardon's avatar
incardon committed
206 207 208
public:

	// Finalize the MPI program
incardon's avatar
incardon committed
209
	~Vcluster_base()
incardon's avatar
incardon committed
210
	{
Pietro Incardona's avatar
Pietro Incardona committed
211 212 213
#ifdef SE_CLASS2
		check_delete(this);
#endif
incardon's avatar
incardon committed
214 215 216 217
		n_vcluster--;

		// if there are no other vcluster instances finalize
		if (n_vcluster == 0)
Pietro Incardona's avatar
Pietro Incardona committed
218
		{
219 220 221 222
			int already_finalised;

			MPI_Finalized(&already_finalised);
			if (!already_finalised)
Pietro Incardona's avatar
Pietro Incardona committed
223
			{
224 225 226 227
				if (MPI_Finalize() != 0)
				{
					std::cerr << __FILE__ << ":" << __LINE__  << " MPI_Finalize FAILED \n";
				}
Pietro Incardona's avatar
Pietro Incardona committed
228
			}
Pietro Incardona's avatar
Pietro Incardona committed
229
		}
incardon's avatar
incardon committed
230 231 232 233 234 235

#if defined(CUDA_GPU) && defined(__NVCC__)

		delete context;

#endif
incardon's avatar
incardon committed
236 237
	}

238 239 240 241 242 243
	/*! \brief Virtual cluster constructor
	 *
	 * \param argc pointer to arguments counts passed to the program
	 * \param argv pointer to arguments vector passed to the program
	 *
	 */
incardon's avatar
incardon committed
244
	Vcluster_base(int *argc, char ***argv)
incardon's avatar
incardon committed
245
	:NBX_cnt(0)
incardon's avatar
incardon committed
246
	{
Pietro Incardona's avatar
Pietro Incardona committed
247 248 249 250
#ifdef SE_CLASS2
		check_new(this,8,VCLUSTER_EVENT,PRJ_VCLUSTER);
#endif

incardon's avatar
incardon committed
251 252
		n_vcluster++;

253 254 255
		int already_initialised;
		MPI_Initialized(&already_initialised);

incardon's avatar
incardon committed
256
		// Check if MPI is already initialized
257
		if (!already_initialised)
incardon's avatar
incardon committed
258
		{
incardon's avatar
incardon committed
259

incardon's avatar
incardon committed
260 261
			MPI_Init(argc,argv);
		}
incardon's avatar
incardon committed
262

263 264
		// Get the total number of process
		// and the rank of this process
incardon's avatar
incardon committed
265

incardon's avatar
incardon committed
266 267
		MPI_Comm_size(MPI_COMM_WORLD, &m_size);
		MPI_Comm_rank(MPI_COMM_WORLD, &m_rank);
incardon's avatar
incardon committed
268

269
#ifdef SE_CLASS2
incardon's avatar
incardon committed
270
			process_v_cl = m_rank;
incardon's avatar
incardon committed
271 272
#endif

273
		// create and fill map scatter with one
incardon's avatar
incardon committed
274
		map_scatter.resize(m_size);
incardon's avatar
incardon committed
275 276 277 278 279

		for (size_t i = 0 ; i < map_scatter.size() ; i++)
		{
			map_scatter.get(i) = 1;
		}
incardon's avatar
incardon committed
280 281

		// open the log file
incardon's avatar
incardon committed
282
		log.openLog(m_rank);
283 284 285 286

		// Initialize bar_req
		bar_req = MPI_Request();
		bar_stat = MPI_Status();
incardon's avatar
incardon committed
287 288 289 290 291 292

#if defined(CUDA_GPU) && defined(__NVCC__)

		context = new mgpu::standard_context_t();

#endif
incardon's avatar
incardon committed
293 294
	}

incardon's avatar
incardon committed
295
#ifdef SE_CLASS1
296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330

	/*! \brief Check for wrong types
	 *
	 * In general we do not know if a type T make sense to be sent or not, but if it has pointer
	 * inside it does not. This function check if the basic type T has a method called noPointers,
	 * This function in general notify if T has internally pointers. If T has pointer an error
	 * is printed, is T does not have the method a WARNING is printed
	 *
	 * \tparam T type to check
	 *
	 */
	template<typename T> void checkType()
	{
		// if T is a primitive like int, long int, float, double, ... make sense
		// (pointers, l-references and r-references are not fundamentals)
		if (std::is_fundamental<T>::value == true)
			return;

		// if it is a pointer make no sense
		if (std::is_pointer<T>::value == true)
			std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " the type " << demangle(typeid(T).name()) << " is a pointer, sending pointers values has no sense\n";

		// if it is an l-value reference make no send
		if (std::is_lvalue_reference<T>::value == true)
			std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " the type " << demangle(typeid(T).name()) << " is a pointer, sending pointers values has no sense\n";

		// if it is an r-value reference make no send
		if (std::is_rvalue_reference<T>::value == true)
			std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " the type " << demangle(typeid(T).name()) << " is a pointer, sending pointers values has no sense\n";

		// ... if not, check that T has a method called noPointers
		switch (check_no_pointers<T>::value())
		{
			case PNP::UNKNOWN:
			{
incardon's avatar
incardon committed
331
				std::cerr << "Warning: " << __FILE__ << ":" << __LINE__ << " impossible to check the type " << demangle(typeid(T).name()) << " please consider to add a static method \"static bool noPointers()\" \n" ;
332 333 334 335 336 337 338 339 340 341 342 343 344 345
				break;
			}
			case PNP::POINTERS:
			{
				std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " the type " << demangle(typeid(T).name()) << " has pointers inside, sending pointers values has no sense\n";
				break;
			}
			default:
			{

			}
		}
	}

incardon's avatar
incardon committed
346 347 348 349 350 351 352 353 354 355
#endif

#if defined(CUDA_GPU) && defined(__NVCC__)

	/*! \brief If nvidia cuda is activated return a mgpu context
	 *
	 *
	 */
	mgpu::standard_context_t & getmgpuContext()
	{
356 357 358 359 360 361
		if (context == NULL)
		{
			std::cout << __FILE__ << ":" << __LINE__ << " error: it seem that modern gpu context is not initialized."
					                                    "Either a compatible working cuda device has not been found, either openfpm_init has been called in a file that not compiled with NVCC" << std::endl;
		}

incardon's avatar
incardon committed
362 363 364
		return *context;
	}

365 366
#endif

Pietro Incardona's avatar
Pietro Incardona committed
367 368 369 370 371 372 373 374 375 376
	/*! \brief Get the MPI_Communicator (or processor group) this VCluster is using
	 *
	 * \return MPI comunicator
	 *
	 */
	MPI_Comm getMPIComm()
	{
		return MPI_COMM_WORLD;
	}

incardon's avatar
incardon committed
377 378 379 380 381
	/*! \brief Get the total number of processors
	 *
	 * \return the total number of processors
	 *
	 */
incardon's avatar
incardon committed
382 383
	size_t getProcessingUnits()
	{
incardon's avatar
incardon committed
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
		return m_size*numPE;
	}

	/*! \brief Get the total number of processors
	 *
	 * It is the same as getProcessingUnits()
	 *
	 * \see getProcessingUnits()
	 *
	 * \return the total number of processors
	 *
	 */
	size_t size()
	{
		return this->m_size*numPE;
incardon's avatar
incardon committed
399 400
	}

incardon's avatar
incardon committed
401 402 403 404 405
	/*! \brief Get the process unit id
	 *
	 * \return the process ID
	 *
	 */
incardon's avatar
incardon committed
406 407
	size_t getProcessUnitID()
	{
incardon's avatar
incardon committed
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
		return m_rank;
	}

	/*! \brief Get the process unit id
	 *
	 * It is the same as getProcessUnitID()
	 *
	 * \see getProcessUnitID()
	 *
	 * \return the process ID
	 *
	 */
	size_t rank()
	{
		return m_rank;
incardon's avatar
incardon committed
423 424
	}

incardon's avatar
incardon committed
425

426
	/*! \brief Sum the numbers across all processors and get the result
incardon's avatar
incardon committed
427 428 429 430 431
	 *
	 * \param num to reduce, input and output
	 *
	 */

432
	template<typename T> void sum(T & num)
incardon's avatar
incardon committed
433
	{
incardon's avatar
incardon committed
434
#ifdef SE_CLASS1
435 436 437
		checkType<T>();
#endif

incardon's avatar
incardon committed
438 439 440 441 442 443 444 445 446
		// reduce over MPI

		// Create one request
		req.add();

		// reduce
		MPI_IallreduceW<T>::reduce(num,MPI_SUM,req.last());
	}

447
	/*! \brief Get the maximum number across all processors (or reduction with infinity norm)
incardon's avatar
incardon committed
448 449 450 451 452 453
	 *
	 * \param num to reduce
	 *
	 */
	template<typename T> void max(T & num)
	{
incardon's avatar
incardon committed
454
#ifdef SE_CLASS1
455 456
		checkType<T>();
#endif
incardon's avatar
incardon committed
457 458 459 460 461 462 463 464 465
		// reduce over MPI

		// Create one request
		req.add();

		// reduce
		MPI_IallreduceW<T>::reduce(num,MPI_MAX,req.last());
	}

tonynsyde's avatar
tonynsyde committed
466 467 468 469 470 471 472 473
	/*! \brief Get the minimum number across all processors (or reduction with insinity norm)
	 *
	 * \param num to reduce
	 *
	 */

	template<typename T> void min(T & num)
	{
incardon's avatar
incardon committed
474
#ifdef SE_CLASS1
tonynsyde's avatar
tonynsyde committed
475 476 477 478 479 480 481 482 483 484 485
		checkType<T>();
#endif
		// reduce over MPI

		// Create one request
		req.add();

		// reduce
		MPI_IallreduceW<T>::reduce(num,MPI_MIN,req.last());
	}

486
	/*! \brief Send and receive multiple messages
487
	 *
488 489
	 * It send multiple messages to a set of processors the and receive
	 * multiple messages from another set of processors, all the processor must call this
incardon's avatar
incardon committed
490 491 492
	 * function. In this particular case the receiver know from which processor is going
	 * to receive.
	 *
493 494
	 *
	 * suppose the following situation the calling processor want to communicate
incardon's avatar
incardon committed
495 496 497 498
	 * * 2 messages of size 100 byte to processor 1
	 * * 1 message of size 50 byte to processor 6
	 * * 1 message of size 48 byte to processor 7
	 * * 1 message of size 70 byte to processor 8
499 500
	 *
	 *
incardon's avatar
incardon committed
501 502
	 * \param prc list of processor with which it should communicate
	 *        [1,1,6,7,8]
503
	 *
incardon's avatar
incardon committed
504 505 506 507
	 * \param data data to send for each processors in contain a pointer to some type T
	 *        this type T must have a method size() that return the size of the data-structure
	 *
	 * \param prc_recv processor that receive data
incardon's avatar
incardon committed
508
	 *
incardon's avatar
incardon committed
509
	 * \param recv_sz for each processor indicate the size of the data received
incardon's avatar
incardon committed
510 511 512
	 *
	 * \param msg_alloc This is a call-back with the purpose of allocate space
	 *        for the incoming message and give back a valid pointer, supposing that this call-back has been triggered by
513 514
	 *        the processor of id 5 that want to communicate with me a message of size 100 byte the call-back will have
	 *        the following 6 parameters
incardon's avatar
incardon committed
515 516
	 *        in the call-back are in order:
	 *        * message size required to receive the message [100]
517 518
	 *        * total message size to receive from all the processors (NBX does not provide this information)
	 *        * the total number of processor want to communicate with you (NBX does not provide this information)
incardon's avatar
incardon committed
519
	 *        * processor id [5]
520
	 *        * ri request id (it is an id that goes from 0 to total_p, and is incremented
521
	 *           every time message_alloc is called)
522
	 *        * void pointer, parameter for additional data to pass to the call-back
523
	 *
incardon's avatar
incardon committed
524 525
	 * \param ptr_arg data passed to the call-back function specified
	 *
incardon's avatar
incardon committed
526
	 * \param opt options, NONE (ignored in this moment)
527 528
	 *
	 */
incardon's avatar
incardon committed
529 530 531 532
	template<typename T> void sendrecvMultipleMessagesNBX(openfpm::vector< size_t > & prc,
			                                              openfpm::vector< T > & data,
														  openfpm::vector< size_t > prc_recv,
														  openfpm::vector< size_t > & recv_sz ,
incardon's avatar
incardon committed
533
														  void * (* msg_alloc)(size_t,size_t,size_t,size_t,size_t,size_t,void *),
incardon's avatar
incardon committed
534 535
														  void * ptr_arg,
														  long int opt=NONE)
536
	{
incardon's avatar
incardon committed
537
		// Allocate the buffers
538 539

		for (size_t i = 0 ; i < prc.size() ; i++)
incardon's avatar
incardon committed
540
		{send(prc.get(i),SEND_SPARSE + NBX_cnt,data.get(i).getPointer(),data.get(i).size());}
incardon's avatar
incardon committed
541 542

		for (size_t i = 0 ; i < prc_recv.size() ; i++)
543
		{
incardon's avatar
incardon committed
544
			void * ptr_recv = msg_alloc(recv_sz.get(i),0,0,prc_recv.get(i),i,SEND_SPARSE + NBX_cnt,ptr_arg);
incardon's avatar
incardon committed
545 546

			recv(prc_recv.get(i),SEND_SPARSE + NBX_cnt,ptr_recv,recv_sz.get(i));
547 548
		}

incardon's avatar
incardon committed
549 550 551 552 553 554
		execute();

		// Circular counter
		NBX_cnt = (NBX_cnt + 1) % 1024;
	}

incardon's avatar
incardon committed
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593

	/*! \brief Send and receive multiple messages
	 *
	 * It send multiple messages to a set of processors the and receive
	 * multiple messages from another set of processors, all the processor must call this
	 * function
	 *
	 * suppose the following situation the calling processor want to communicate
	 * * 2 vector of 100 integers to processor 1
	 * * 1 vector of 50 integers to processor 6
	 * * 1 vector of 48 integers to processor 7
	 * * 1 vector of 70 integers to processor 8
	 *
	 * \param prc list of processors you should communicate with [1,1,6,7,8]
	 *
	 * \param data vector containing the data to send [v=vector<vector<int>>, v.size()=4, T=vector<int>], T at the moment
	 *          is only tested for vectors of 0 or more generic elements (without pointers)
	 *
	 * \param msg_alloc This is a call-back with the purpose to allocate space
	 *        for the incoming messages and give back a valid pointer, supposing that this call-back has been triggered by
	 *        the processor of id 5 that want to communicate with me a message of size 100 byte the call-back will have
	 *        the following 6 parameters
	 *        in the call-back in order:
	 *        * message size required to receive the message (100)
	 *        * total message size to receive from all the processors (NBX does not provide this information)
	 *        * the total number of processor want to communicate with you (NBX does not provide this information)
	 *        * processor id (5)
	 *        * ri request id (it is an id that goes from 0 to total_p, and is incremented
	 *           every time message_alloc is called)
	 *        * void pointer, parameter for additional data to pass to the call-back
	 *
	 * \param ptr_arg data passed to the call-back function specified
	 *
	 * \param opt options, only NONE supported
	 *
	 */
	template<typename T>
	void sendrecvMultipleMessagesNBX(openfpm::vector< size_t > & prc,
									 openfpm::vector< T > & data,
incardon's avatar
incardon committed
594
									 void * (* msg_alloc)(size_t,size_t,size_t,size_t,size_t,size_t,void *),
incardon's avatar
incardon committed
595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
									 void * ptr_arg, long int opt=NONE)
	{
#ifdef SE_CLASS1
		checkType<typename T::value_type>();
#endif
		// resize the pointer list
		ptr_send.resize(prc.size());
		sz_send.resize(prc.size());

		for (size_t i = 0 ; i < prc.size() ; i++)
		{
			ptr_send.get(i) = data.get(i).getPointer();
			sz_send.get(i) = data.get(i).size() * sizeof(typename T::value_type);
		}

		sendrecvMultipleMessagesNBX(prc.size(),(size_t *)sz_send.getPointer(),(size_t *)prc.getPointer(),(void **)ptr_send.getPointer(),msg_alloc,ptr_arg,opt);
	}

incardon's avatar
incardon committed
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
	/*! \brief Send and receive multiple messages
	 *
	 * It send multiple messages to a set of processors the and receive
	 * multiple messages from another set of processors, all the processor must call this
	 * function. In this particular case the receiver know from which processor is going
	 * to receive.
	 *
	 *
	 * suppose the following situation the calling processor want to communicate
	 * * 2 messages of size 100 byte to processor 1
	 * * 1 message of size 50 byte to processor 6
	 * * 1 message of size 48 byte to processor 7
	 * * 1 message of size 70 byte to processor 8
	 *
	 * \param n_send number of send for this processor [4]
	 *
	 * \param prc list of processor with which it should communicate
	 *        [1,1,6,7,8]
	 *
	 * \param sz the array contain the size of the message for each processor
	 *        (zeros must not be presents) [100,100,50,48,70]
	 *
	 * \param ptr array that contain the pointers to the message to send
	 *
	 * \param msg_alloc This is a call-back with the purpose of allocate space
	 *        for the incoming message and give back a valid pointer, supposing that this call-back has been triggered by
	 *        the processor of id 5 that want to communicate with me a message of size 100 byte the call-back will have
	 *        the following 6 parameters
	 *        in the call-back are in order:
	 *        * message size required to receive the message [100]
	 *        * total message size to receive from all the processors (NBX does not provide this information)
	 *        * the total number of processor want to communicate with you (NBX does not provide this information)
	 *        * processor id [5]
	 *        * ri request id (it is an id that goes from 0 to total_p, and is incremented
	 *           every time message_alloc is called)
	 *        * void pointer, parameter for additional data to pass to the call-back
	 *
	 * \param ptr_arg data passed to the call-back function specified
	 *
	 * \param opt options, NONE (ignored in this moment)
	 *
	 */
incardon's avatar
incardon committed
655 656 657 658 659
	void sendrecvMultipleMessagesNBX(size_t n_send , size_t sz[],
			                         size_t prc[] , void * ptr[],
			                         size_t n_recv, size_t prc_recv[] ,
			                         size_t sz_recv[] ,void * (* msg_alloc)(size_t,size_t,size_t,size_t,size_t, size_t,void *),
			                         void * ptr_arg, long int opt=NONE)
incardon's avatar
incardon committed
660 661 662 663 664 665 666 667
	{
		// Allocate the buffers

		for (size_t i = 0 ; i < n_send ; i++)
			send(prc[i],SEND_SPARSE + NBX_cnt,ptr[i],sz[i]);

		for (size_t i = 0 ; i < n_recv ; i++)
		{
incardon's avatar
incardon committed
668
			void * ptr_recv = msg_alloc(sz_recv[i],0,0,prc_recv[i],i,SEND_SPARSE + NBX_cnt,ptr_arg);
incardon's avatar
incardon committed
669 670 671 672 673 674 675 676

			recv(prc_recv[i],SEND_SPARSE + NBX_cnt,ptr_recv,sz_recv[i]);
		}

		execute();

		// Circular counter
		NBX_cnt = (NBX_cnt + 1) % 1024;
677 678
	}

incardon's avatar
incardon committed
679 680
	openfpm::vector<size_t> sz_recv_tmp;

incardon's avatar
incardon committed
681 682
	/*! \brief Send and receive multiple messages
	 *
incardon's avatar
incardon committed
683
	 * It send multiple messages to a set of processors the and receive
684
	 * multiple messages from another set of processors, all the processor must call this
incardon's avatar
incardon committed
685 686 687
	 * function. In this particular case the receiver know from which processor is going
	 * to receive, but does not know the size.
	 *
688 689
	 *
	 * suppose the following situation the calling processor want to communicate
incardon's avatar
incardon committed
690 691 692 693
	 * * 2 messages of size 100 byte to processor 1
	 * * 1 message of size 50 byte to processor 6
	 * * 1 message of size 48 byte to processor 7
	 * * 1 message of size 70 byte to processor 8
694
	 *
incardon's avatar
incardon committed
695
	 * \param n_send number of send for this processor [4]
696
	 *
incardon's avatar
incardon committed
697 698
	 * \param prc list of processor with which it should communicate
	 *        [1,1,6,7,8]
699
	 *
incardon's avatar
incardon committed
700 701 702 703 704 705 706
	 * \param sz the array contain the size of the message for each processor
	 *        (zeros must not be presents) [100,100,50,48,70]
	 *
	 * \param ptr array that contain the pointers to the message to send
	 *
	 * \param msg_alloc This is a call-back with the purpose of allocate space
	 *        for the incoming message and give back a valid pointer, supposing that this call-back has been triggered by
707 708
	 *        the processor of id 5 that want to communicate with me a message of size 100 byte the call-back will have
	 *        the following 6 parameters
incardon's avatar
incardon committed
709 710
	 *        in the call-back are in order:
	 *        * message size required to receive the message [100]
711 712
	 *        * total message size to receive from all the processors (NBX does not provide this information)
	 *        * the total number of processor want to communicate with you (NBX does not provide this information)
incardon's avatar
incardon committed
713
	 *        * processor id [5]
714
	 *        * ri request id (it is an id that goes from 0 to total_p, and is incremented
715
	 *           every time message_alloc is called)
716
	 *        * void pointer, parameter for additional data to pass to the call-back
incardon's avatar
incardon committed
717
	 *
incardon's avatar
incardon committed
718 719
	 * \param ptr_arg data passed to the call-back function specified
	 *
incardon's avatar
incardon committed
720
	 * \param opt options, NONE (ignored in this moment)
incardon's avatar
incardon committed
721 722
	 *
	 */
incardon's avatar
incardon committed
723 724 725 726
	void sendrecvMultipleMessagesNBX(size_t n_send , size_t sz[], size_t prc[] ,
									 void * ptr[], size_t n_recv, size_t prc_recv[] ,
									 void * (* msg_alloc)(size_t,size_t,size_t,size_t,size_t,size_t,void *),
									 void * ptr_arg, long int opt=NONE)
incardon's avatar
incardon committed
727
	{
incardon's avatar
incardon committed
728
		sz_recv_tmp.resize(n_recv);
incardon's avatar
incardon committed
729

incardon's avatar
incardon committed
730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748
		// First we understand the receive size for each processor

		for (size_t i = 0 ; i < n_send ; i++)
		{send(prc[i],SEND_SPARSE + NBX_cnt,&sz[i],sizeof(size_t));}

		for (size_t i = 0 ; i < n_recv ; i++)
		{recv(prc_recv[i],SEND_SPARSE + NBX_cnt,&sz_recv_tmp.get(i),sizeof(size_t));}

		execute();

		// Circular counter
		NBX_cnt = (NBX_cnt + 1) % 1024;

		// Allocate the buffers

		for (size_t i = 0 ; i < n_send ; i++)
		{send(prc[i],SEND_SPARSE + NBX_cnt,ptr[i],sz[i]);}

		for (size_t i = 0 ; i < n_recv ; i++)
incardon's avatar
incardon committed
749
		{
incardon's avatar
incardon committed
750
			void * ptr_recv = msg_alloc(sz_recv_tmp.get(i),0,0,prc_recv[i],i,0,ptr_arg);
incardon's avatar
incardon committed
751

incardon's avatar
incardon committed
752 753
			recv(prc_recv[i],SEND_SPARSE + NBX_cnt,ptr_recv,sz_recv_tmp.get(i));
		}
754

incardon's avatar
incardon committed
755
		execute();
incardon's avatar
incardon committed
756

incardon's avatar
incardon committed
757 758 759
		// Circular counter
		NBX_cnt = (NBX_cnt + 1) % 1024;
	}
incardon's avatar
incardon committed
760

761
	/*! \brief Send and receive multiple messages
762
	 *
763 764
	 * It send multiple messages to a set of processors the and receive
	 * multiple messages from another set of processors, all the processor must call this
incardon's avatar
incardon committed
765
	 * function
766
	 *
767 768 769 770 771
	 * suppose the following situation the calling processor want to communicate
	 * * 2 messages of size 100 byte to processor 1
	 * * 1 message of size 50 byte to processor 6
	 * * 1 message of size 48 byte to processor 7
	 * * 1 message of size 70 byte to processor 8
772
	 *
773
	 * \param n_send number of send for this processor [4]
774 775
	 *
	 * \param prc list of processor with which it should communicate
776
	 *        [1,1,6,7,8]
777
	 *
778 779 780 781
	 * \param sz the array contain the size of the message for each processor
	 *        (zeros must not be presents) [100,100,50,48,70]
	 *
	 * \param ptr array that contain the pointers to the message to send
782 783
	 *
	 * \param msg_alloc This is a call-back with the purpose of allocate space
784 785 786
	 *        for the incoming message and give back a valid pointer, supposing that this call-back has been triggered by
	 *        the processor of id 5 that want to communicate with me a message of size 100 byte the call-back will have
	 *        the following 6 parameters
787
	 *        in the call-back are in order:
788 789 790 791 792
	 *        * message size required to receive the message [100]
	 *        * total message size to receive from all the processors (NBX does not provide this information)
	 *        * the total number of processor want to communicate with you (NBX does not provide this information)
	 *        * processor id [5]
	 *        * ri request id (it is an id that goes from 0 to total_p, and is incremented
793
	 *           every time message_alloc is called)
794
	 *        * void pointer, parameter for additional data to pass to the call-back
795
	 *
incardon's avatar
incardon committed
796 797
	 * \param ptr_arg data passed to the call-back function specified
	 *
798 799 800
	 * \param opt options, NONE (ignored in this moment)
	 *
	 */
incardon's avatar
incardon committed
801 802 803 804
	void sendrecvMultipleMessagesNBX(size_t n_send , size_t sz[],
			                         size_t prc[] , void * ptr[],
			                         void * (* msg_alloc)(size_t,size_t,size_t,size_t,size_t,size_t,void *),
			                         void * ptr_arg, long int opt = NONE)
805
	{
806
		if (stat.size() != 0 || req.size() != 0)
incardon's avatar
incardon committed
807
			std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " this function must be called when no other requests are in progress. Please remember that if you use function like max(),sum(),send(),recv() check that you did not miss to call the function execute() \n";
808 809 810


		stat.clear();
811 812 813 814 815
		req.clear();
		// Do MPI_Issend

		for (size_t i = 0 ; i < n_send ; i++)
		{
incardon's avatar
incardon committed
816 817 818
			if (sz[i] != 0)
			{
				req.add();
819 820 821 822 823

#ifdef SE_CLASS2
				check_valid(ptr[i],sz[i]);
#endif

incardon's avatar
incardon committed
824
				tot_sent += sz[i];
incardon's avatar
incardon committed
825
				MPI_SAFE_CALL(MPI_Issend(ptr[i], sz[i], MPI_BYTE, prc[i], SEND_SPARSE + NBX_cnt*131072 + i, MPI_COMM_WORLD,&req.last()));
incardon's avatar
incardon committed
826
				log.logSend(prc[i]);
incardon's avatar
incardon committed
827
			}
828 829 830 831 832 833 834
		}

		size_t rid = 0;
		int flag = false;

		bool reached_bar_req = false;

incardon's avatar
incardon committed
835 836
		log.start(10);

837 838 839
		// Wait that all the send are acknowledge
		do
		{
incardon's avatar
incardon committed
840

841 842 843 844 845
			// flag that notify that this processor reach the barrier
			// Barrier request

			MPI_Status stat_t;
			int stat = false;
incardon's avatar
incardon committed
846
			MPI_SAFE_CALL(MPI_Iprobe(MPI_ANY_SOURCE,MPI_ANY_TAG/*SEND_SPARSE + NBX_cnt*/,MPI_COMM_WORLD,&stat,&stat_t));
847

848
			// If I have an incoming message and is related to this NBX communication
849 850 851
			if (stat == true)
			{
				int msize;
incardon's avatar
incardon committed
852 853

				// Get the message tag and size
854 855
				MPI_SAFE_CALL(MPI_Get_count(&stat_t,MPI_BYTE,&msize));

incardon's avatar
incardon committed
856 857 858 859 860
				// Ok we check if the TAG come from one of our send TAG
				if (stat_t.MPI_TAG >= (int)(SEND_SPARSE + NBX_cnt*131072) && stat_t.MPI_TAG < (int)(SEND_SPARSE + (NBX_cnt + 1)*131072))
				{
					// Get the pointer to receive the message
					void * ptr = msg_alloc(msize,0,0,stat_t.MPI_SOURCE,rid,stat_t.MPI_TAG,ptr_arg);
861

incardon's avatar
incardon committed
862 863
					// Log the receiving request
					log.logRecv(stat_t);
incardon's avatar
incardon committed
864

incardon's avatar
incardon committed
865
					rid++;
866

incardon's avatar
incardon committed
867
					// Check the pointer
868
#ifdef SE_CLASS2
incardon's avatar
incardon committed
869
					check_valid(ptr,msize);
870
#endif
incardon's avatar
incardon committed
871 872
					tot_recv += msize;
					MPI_SAFE_CALL(MPI_Recv(ptr,msize,MPI_BYTE,stat_t.MPI_SOURCE,stat_t.MPI_TAG,MPI_COMM_WORLD,&stat_t));
873 874

#ifdef SE_CLASS2
incardon's avatar
incardon committed
875
					check_valid(ptr,msize);
876
#endif
incardon's avatar
incardon committed
877
				}
878 879 880 881 882 883 884
			}

			// Check the status of all the MPI_issend and call the barrier if finished

			if (reached_bar_req == false)
			{
				int flag = false;
incardon's avatar
incardon committed
885 886 887 888
				if (req.size() != 0)
				{MPI_SAFE_CALL(MPI_Testall(req.size(),&req.get(0),&flag,MPI_STATUSES_IGNORE));}
				else
					flag = true;
889 890 891 892 893 894

				// If all send has been completed
				if (flag == true)
				{MPI_SAFE_CALL(MPI_Ibarrier(MPI_COMM_WORLD,&bar_req));reached_bar_req = true;}
			}

incardon's avatar
incardon committed
895
			// Check if all processor reached the async barrier
896
			if (reached_bar_req)
incardon's avatar
incardon committed
897
			{MPI_SAFE_CALL(MPI_Test(&bar_req,&flag,&bar_stat))};
incardon's avatar
incardon committed
898 899

			// produce a report if communication get stuck
incardon's avatar
incardon committed
900
			log.NBXreport(NBX_cnt,req,reached_bar_req,bar_stat);
incardon's avatar
incardon committed
901

902 903 904 905 906 907
		} while (flag == false);

		// Remove the executed request

		req.clear();
		stat.clear();
incardon's avatar
incardon committed
908
		log.clear();
909 910 911

		// Circular counter
		NBX_cnt = (NBX_cnt + 1) % 1024;
incardon's avatar
incardon committed
912 913
	}

914 915 916 917 918 919
	/*! \brief Send data to a processor
	 *
	 * \warning In order to avoid deadlock every send must be coupled with a recv
	 *          in case you want to send data without knowledge from the other side
	 *          consider to use sendRecvMultipleMessages
	 *
incardon's avatar
incardon committed
920 921 922 923 924 925 926 927 928 929 930 931
	 * \warning operation is asynchronous execute must be called to ensure they are executed
	 *
	 * \see sendRecvMultipleMessages
	 *
	 * \param proc processor id
	 * \param tag id
	 * \param mem buffer with the data to send
	 * \param sz size
	 *
	 * \return true if succeed false otherwise
	 *
	 */
932
	bool send(size_t proc, size_t tag, const void * mem, size_t sz)
incardon's avatar
incardon committed
933 934 935 936 937 938 939 940 941 942 943 944
	{
		// send over MPI

		// Create one request
		req.add();

		// send
		MPI_IsendWB::send(proc,SEND_RECV_BASE + tag,mem,sz,req.last());

		return true;
	}

incardon's avatar
incardon committed
945

incardon's avatar
incardon committed
946 947 948 949 950 951 952
	/*! \brief Send data to a processor
	 *
	 * \warning In order to avoid deadlock every send must be coupled with a recv
	 *          in case you want to send data without knowledge from the other side
	 *          consider to use sendRecvMultipleMessages
	 *
	 * \warning operation is asynchronous execute must be called to ensure they are executed
953 954 955 956 957 958 959
	 *
	 * \see sendRecvMultipleMessages
	 *
	 * \param proc processor id
	 * \param tag id
	 * \param v buffer to send
	 *
960 961
	 * \return true if succeed false otherwise
	 *
962
	 */
963
	template<typename T, typename Mem, typename gr> bool send(size_t proc, size_t tag, openfpm::vector<T,Mem,gr> & v)
964
	{
incardon's avatar
incardon committed
965
#ifdef SE_CLASS1
966 967 968 969 970 971 972 973
		checkType<T>();
#endif

		// send over MPI

		// Create one request
		req.add();

974
		// send
975
		MPI_IsendW<T,Mem,gr>::send(proc,SEND_RECV_BASE + tag,v,req.last());
976 977

		return true;
978 979 980 981 982 983
	}

	/*! \brief Recv data from a processor
	 *
	 * \warning In order to avoid deadlock every recv must be coupled with a send
	 *          in case you want to send data without knowledge from the other side
incardon's avatar
incardon committed
984
	 *          consider to use or sendrecvMultipleMessagesNBX
985
	 *
incardon's avatar
incardon committed
986
	 * \warning operation is asynchronous execute must be called to ensure they are executed
987
	 *
incardon's avatar
incardon committed
988
	 * \see sendrecvMultipleMessagesNBX
989 990 991 992
	 *
	 * \param proc processor id
	 * \param tag id
	 * \param v buffer to send
incardon's avatar
incardon committed
993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
	 * \param sz size of the buffer
	 *
	 * \return true if succeed false otherwise
	 *
	 */
	bool recv(size_t proc, size_t tag, void * v, size_t sz)
	{
		// recv over MPI

		// Create one request
		req.add();

		// receive
		MPI_IrecvWB::recv(proc,SEND_RECV_BASE + tag,v,sz,req.last());

		return true;
	}

incardon's avatar
incardon committed
1011 1012 1013 1014
    /*! \brief Recv data from a processor
     *
     * \warning In order to avoid deadlock every recv must be coupled with a send
     *          in case you want to send data without knowledge from the other side
incardon's avatar
incardon committed
1015
     *          consider to use sendrecvMultipleMessagesNBX
incardon's avatar
incardon committed
1016 1017 1018
     *
     * \warning operation is asynchronous execute must be called to ensure they are executed
     *
incardon's avatar
incardon committed
1019
     * \see sendrecvMultipleMessagesNBX
incardon's avatar
incardon committed
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
     *
     * \param proc processor id
     * \param tag id
     * \param v vector to send
     *
     * \return true if succeed false otherwise
     *
     */
    template<typename T, typename Mem, typename gr> bool recv(size_t proc, size_t tag, openfpm::vector<T,Mem,gr> & v)
    {
incardon's avatar
incardon committed
1030
#ifdef SE_CLASS1
incardon's avatar
incardon committed
1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
            checkType<T>();
#endif

            // recv over MPI

            // Create one request
            req.add();

            // receive
            MPI_IrecvW<T>::recv(proc,SEND_RECV_BASE + tag,v,req.last());

            return true;
    }

	/*! \brief Gather the data from all processors
1046 1047
	 *
	 * send a primitive data T receive the same primitive T from all the other processors
incardon's avatar
incardon committed
1048 1049 1050
	 *
	 * \warning operation is asynchronous execute must be called to ensure they are executed
	 *
1051
	 * \param v vector to receive (automaticaly resized)
incardon's avatar
incardon committed
1052
	 * \param send data to send
1053
	 *
1054 1055
	 * \return true if succeed false otherwise
	 *
1056
	 */
incardon's avatar
incardon committed
1057
	template<typename T, typename Mem, typename gr> bool allGather(T & send, openfpm::vector<T,Mem,gr> & v)
1058
	{
incardon's avatar
incardon committed
1059
#ifdef SE_CLASS1
1060 1061 1062 1063 1064 1065
		checkType<T>();
#endif

		// Create one request
		req.add();

incardon's avatar
incardon committed
1066 1067 1068
		// Number of processors
		v.resize(getProcessingUnits());

1069
		// gather
incardon's avatar
incardon committed
1070
		MPI_IAllGatherW<T>::gather(&send,1,v.getPointer(),1,req.last());
1071 1072

		return true;
1073 1074
	}

incardon's avatar
incardon committed
1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
	/*! \brief Broadcast the data to all processors
	 *
	 * broadcast a vector of primitives.
	 *
	 * \warning operation is asynchronous execute must be called to ensure the operation is executed
	 *
	 * \warning the non-root processor must resize the vector to the exact receive size. This mean the
	 *          each processor must known a priory the receiving size
	 *
	 * \param v vector to send in the case of the root processor and vector where to receive in the case of
	 *          non-root
	 * \param root processor (who broadcast)
	 *
	 * \return true if succeed false otherwise
	 *
	 */
incardon's avatar
incardon committed
1091 1092
	template<typename T, typename Mem, typename lt_type, template<typename> class layout_base >
	bool Bcast(openfpm::vector<T,Mem,lt_type,layout_base> & v, size_t root)
incardon's avatar
incardon committed
1093 1094 1095 1096 1097
	{
#ifdef SE_CLASS1
		checkType<T>();
#endif

incardon's avatar
incardon committed
1098
		b_cast_helper<openfpm::vect_isel<T>::value == STD_VECTOR || is_layout_mlin<layout_base<T>>::value >::bcast_(req,v,root);
incardon's avatar
incardon committed
1099 1100 1101 1102

		return true;
	}