VCluster_semantic.ipp 22.3 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * VCluster_semantic.hpp
 *
 * Implementation of semantic communications
 *
 *  Created on: Feb 8, 2016
 *      Author: Pietro Incardona
 */

Pietro Incardona's avatar
Pietro Incardona committed
10 11
private:

12 13

	template<bool result, typename T, typename S>
incardon's avatar
incardon committed
14
	struct unpack_selector_with_prp
15
	{
incardon's avatar
incardon committed
16
		template<typename op, int ... prp> static void call_unpack(S & recv, openfpm::vector<BHeapMemory> & recv_buf, openfpm::vector<size_t> * sz, op & op_param)
17
		{
Yaroslav's avatar
Yaroslav committed
18
#ifdef DEBUG
19
			std::cout << "Sz.size(): " << sz->size() << std::endl;
incardon's avatar
incardon committed
20
#endif
21 22
			for (size_t i = 0 ; i < recv_buf.size() ; i++)
			{
incardon's avatar
incardon committed
23
#ifdef DEBUG
24
				std::cout << "Recv_buf.get(i).size(): " << recv_buf.get(i).size() << std::endl;
incardon's avatar
incardon committed
25
#endif
26
				T unp;
Yaroslav's avatar
Yaroslav committed
27

28 29
				ExtPreAlloc<HeapMemory> & mem = *(new ExtPreAlloc<HeapMemory>(recv_buf.get(i).size(),recv_buf.get(i)));
				mem.incRef();
incardon's avatar
incardon committed
30

31
				Unpack_stat ps;
incardon's avatar
incardon committed
32 33 34

				Unpacker<T,HeapMemory>::template unpack<>(mem, unp, ps);

Yaroslav's avatar
Yaroslav committed
35
				size_t recv_size_old = recv.size();
36
				// Merge the information
incardon's avatar
incardon committed
37 38 39

				op_param.template execute<true,T,decltype(recv),decltype(unp),prp...>(recv,unp,i);

Yaroslav's avatar
Yaroslav committed
40
				size_t recv_size_new = recv.size();
incardon's avatar
incardon committed
41

Yaroslav's avatar
Yaroslav committed
42 43
				if (sz != NULL)
					sz->get(i) = recv_size_new - recv_size_old;
44 45 46 47 48 49
			}
		}
	};

	//
	template<typename T, typename S>
incardon's avatar
incardon committed
50
	struct unpack_selector_with_prp<true,T,S>
51
	{
incardon's avatar
incardon committed
52
		template<typename op, unsigned int ... prp> static void call_unpack(S & recv, openfpm::vector<BHeapMemory> & recv_buf, openfpm::vector<size_t> * sz, op & op_param)
53 54 55 56 57
		{
			for (size_t i = 0 ; i < recv_buf.size() ; i++)
			{
				// calculate the number of received elements
				size_t n_ele = recv_buf.get(i).size() / sizeof(typename T::value_type);
incardon's avatar
incardon committed
58

59 60
				// add the received particles to the vector
				PtrMemory * ptr1 = new PtrMemory(recv_buf.get(i).getPointer(),recv_buf.get(i).size());
incardon's avatar
incardon committed
61

62 63
				// create vector representation to a piece of memory already allocated
				openfpm::vector<typename T::value_type,PtrMemory,typename memory_traits_lin<typename T::value_type>::type, memory_traits_lin,openfpm::grow_policy_identity> v2;
incardon's avatar
incardon committed
64

65
				v2.setMemory(*ptr1);
incardon's avatar
incardon committed
66

67 68
				// resize with the number of elements
				v2.resize(n_ele);
incardon's avatar
incardon committed
69

70
				// Merge the information
incardon's avatar
incardon committed
71

Yaroslav's avatar
Yaroslav committed
72
				size_t recv_size_old = recv.size();
incardon's avatar
incardon committed
73 74 75

				op_param.template execute<false,T,decltype(recv),decltype(v2),prp...>(recv,v2,i);

Yaroslav's avatar
Yaroslav committed
76
				size_t recv_size_new = recv.size();
incardon's avatar
incardon committed
77

78
				if (sz != NULL)
Yaroslav's avatar
Yaroslav committed
79
					sz->get(i) = recv_size_new - recv_size_old;
80 81 82 83 84
			}
		}
	};
	

Yaroslav's avatar
Yaroslav committed
85 86 87 88 89
	template<typename T>
	struct call_serialize_variadic {};
	
	template<int ... prp>
	struct call_serialize_variadic<index_tuple<prp...>>
Yaroslav's avatar
Yaroslav committed
90
	{
Yaroslav's avatar
Yaroslav committed
91
		template<typename T> inline static void call_pr(T & send, size_t & tot_size)
Yaroslav's avatar
Yaroslav committed
92
		{
Yaroslav's avatar
Yaroslav committed
93
			Packer<T,HeapMemory>::template packRequest<prp...>(send,tot_size);
Yaroslav's avatar
Yaroslav committed
94
		}
Yaroslav's avatar
Yaroslav committed
95
		
Yaroslav's avatar
Yaroslav committed
96
		template<typename T> inline static void call_pack(ExtPreAlloc<HeapMemory> & mem, T & send, Pack_stat & sts)
Yaroslav's avatar
Yaroslav committed
97
		{
Yaroslav's avatar
Yaroslav committed
98
			Packer<T,HeapMemory>::template pack<prp...>(mem,send,sts);
Yaroslav's avatar
Yaroslav committed
99 100
		}
		
incardon's avatar
incardon committed
101
		template<typename op, typename T, typename S> inline static void call_unpack(S & recv, openfpm::vector<BHeapMemory> & recv_buf, openfpm::vector<size_t> * sz, op & op_param)
Yaroslav's avatar
Yaroslav committed
102
		{
Yaroslav's avatar
Yaroslav committed
103 104
			const bool result = has_pack_gen<typename T::value_type>::value == false && is_vector<T>::value == true;
			//const bool result = has_pack<typename T::value_type>::type::value == false && has_pack_agg<typename T::value_type>::result::value == false && is_vector<T>::value == true;
incardon's avatar
incardon committed
105 106
			unpack_selector_with_prp<result, T, S>::template call_unpack<op,prp...>(recv, recv_buf, sz, op_param);
		}
Yaroslav's avatar
Yaroslav committed
107
	};
incardon's avatar
incardon committed
108 109 110 111
	
	//! There is max_prop inside
	template<bool cond, typename op, typename T, typename S, unsigned int ... prp>
	struct pack_unpack_cond_with_prp
Yaroslav's avatar
Yaroslav committed
112
	{
113
		static void packingRequest(T & send, size_t & tot_size, openfpm::vector<size_t> & sz)
Yaroslav's avatar
Yaroslav committed
114 115
		{
			typedef typename ::generate_indexes<int, has_max_prop<T, has_value_type<T>::value>::number, MetaFuncOrd>::result ind_prop_to_pack;
Yaroslav's avatar
Yaroslav committed
116 117
			if (has_pack_gen<typename T::value_type>::value == false && is_vector<T>::value == true)
			//if (has_pack<typename T::value_type>::type::value == false && has_pack_agg<typename T::value_type>::result::value == false && is_vector<T>::value == true)
118
			{
Yaroslav's avatar
Yaroslav committed
119 120 121
#ifdef DEBUG
				std::cout << "Inside SGather pack request (has prp) (vector case) " << std::endl;
#endif
incardon's avatar
incardon committed
122
				sz.add(send.size()*sizeof(typename T::value_type));
123 124 125 126
			}
			else
			{
				call_serialize_variadic<ind_prop_to_pack>::call_pr(send,tot_size);
Yaroslav's avatar
Yaroslav committed
127 128
#ifdef DEBUG
				std::cout << "Inside SGather pack request (has prp) (general case) " << std::endl;
Yaroslav's avatar
Yaroslav committed
129
#endif
130 131
				sz.add(tot_size);
			}
Yaroslav's avatar
Yaroslav committed
132
		}
incardon's avatar
incardon committed
133

134
		static void packing(ExtPreAlloc<HeapMemory> & mem, T & send, Pack_stat & sts, openfpm::vector<const void *> & send_buf)
Yaroslav's avatar
Yaroslav committed
135 136
		{
			typedef typename ::generate_indexes<int, has_max_prop<T, has_value_type<T>::value>::number, MetaFuncOrd>::result ind_prop_to_pack;
Yaroslav's avatar
Yaroslav committed
137 138
			if (has_pack_gen<typename T::value_type>::value == false && is_vector<T>::value == true)
			//if (has_pack<typename T::value_type>::type::value == false && has_pack_agg<typename T::value_type>::result::value == false && is_vector<T>::value == true)
139 140 141 142
			{
#ifdef DEBUG
				std::cout << "Inside SGather pack (has prp) (vector case) " << std::endl;
#endif
Yaroslav's avatar
Yaroslav committed
143
				//std::cout << demangle(typeid(T).name()) << std::endl;
144 145 146 147 148 149 150
				send_buf.add(send.getPointer());
			}
			else
			{
#ifdef DEBUG
				std::cout << "Inside SGather pack (has prp) (general case) " << std::endl;
#endif
Yaroslav's avatar
Yaroslav committed
151
				send_buf.add(mem.getPointerEnd());
incardon's avatar
incardon committed
152 153
				call_serialize_variadic<ind_prop_to_pack>::call_pack(mem,send,sts);
			}
Yaroslav's avatar
Yaroslav committed
154
		}
incardon's avatar
incardon committed
155 156

		static void unpacking(S & recv, openfpm::vector<BHeapMemory> & recv_buf, openfpm::vector<size_t> * sz, op & op_param)
Yaroslav's avatar
Yaroslav committed
157
		{
incardon's avatar
incardon committed
158 159 160
			typedef index_tuple<prp...> ind_prop_to_pack;
			call_serialize_variadic<ind_prop_to_pack>::template call_unpack<op,T,S>(recv, recv_buf, sz, op_param);
		}
Yaroslav's avatar
Yaroslav committed
161 162
	};

incardon's avatar
incardon committed
163 164 165 166 167
	template<typename T>
	struct index_gen {};

	template<int ... prp>
	struct index_gen<index_tuple<prp...>>
Yaroslav's avatar
Yaroslav committed
168
	{
incardon's avatar
incardon committed
169
		template<typename op, typename T, typename S> inline static void process_recv(Vcluster & vcl, S & recv, openfpm::vector<size_t> * sz_recv, op & op_param)
Yaroslav's avatar
Yaroslav committed
170
		{
incardon's avatar
incardon committed
171
			vcl.process_receive_buffer_with_prp<op,T,S,prp...>(recv,sz_recv,op_param);
Yaroslav's avatar
Yaroslav committed
172
		}
incardon's avatar
incardon committed
173
	};
174

Yaroslav's avatar
Yaroslav committed
175

incardon's avatar
incardon committed
176 177 178 179 180 181 182 183 184 185 186 187 188
template<typename op, typename T, typename S> void prepare_send_buffer(openfpm::vector<T> & send, S & recv, openfpm::vector<size_t> & prc_send, openfpm::vector<size_t> & prc_recv, openfpm::vector<size_t> & sz_recv)
{
	prc_recv.clear();
	sz_recv.clear();

	// Reset the receive buffer
	reset_recv_buf();

#ifdef SE_CLASS1

	if (send.size() != prc_send.size())
		std::cerr << __FILE__ << ":" << __LINE__ << " Error, the number of processor involved \"prc.size()\" must match the number of sending buffers \"send.size()\" " << std::endl;

Yaroslav's avatar
Yaroslav committed
189
#endif
incardon's avatar
incardon committed
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230

	// Prepare the sending buffer
	openfpm::vector<const void *> send_buf;

	openfpm::vector<size_t> sz_byte;

	size_t tot_size = 0;

	for (size_t i = 0; i < send.size() ; i++)
	{
		size_t req = 0;

		//Pack requesting
		pack_unpack_cond_with_prp<has_max_prop<T, has_value_type<T>::value>::value,op, T, S>::packingRequest(send.get(i), req, sz_byte);
		tot_size += req;
	}

	HeapMemory pmem;

	ExtPreAlloc<HeapMemory> & mem = *(new ExtPreAlloc<HeapMemory>(tot_size,pmem));
	mem.incRef();

	for (size_t i = 0; i < send.size() ; i++)
	{
		//Packing

		Pack_stat sts;

		pack_unpack_cond_with_prp<has_max_prop<T, has_value_type<T>::value>::value, op, T, S>::packing(mem, send.get(i), sts, send_buf);

	}

	// receive information
	base_info bi(&recv_buf,prc_recv,sz_recv);

	// Send and recv multiple messages
	sendrecvMultipleMessagesNBX(prc_send.size(),(size_t *)sz_byte.getPointer(),(size_t *)prc_send.getPointer(),(void **)send_buf.getPointer(),msg_alloc,(void *)&bi);

	// Reorder the buffer
	reorder_buffer(prc_recv,sz_recv);
}
Yaroslav's avatar
Yaroslav committed
231 232


Pietro Incardona's avatar
Pietro Incardona committed
233 234 235 236 237 238 239 240
/*! \brief Reset the receive buffer
 * 
 * 
 */
void reset_recv_buf()
{
	for (size_t i = 0 ; i < recv_buf.size() ; i++)
		recv_buf.get(i).resize(0);
241 242

	recv_buf.resize(0);
Pietro Incardona's avatar
Pietro Incardona committed
243 244
}

Pietro Incardona's avatar
Pietro Incardona committed
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
/*! \brief Base info
 *
 * \param recv_buf receive buffers
 * \param prc processors involved
 * \param size of the received data
 *
 */
struct base_info
{
	openfpm::vector<BHeapMemory> * recv_buf;
	openfpm::vector<size_t> & prc;
	openfpm::vector<size_t> & sz;

	// constructor
	base_info(openfpm::vector<BHeapMemory> * recv_buf, openfpm::vector<size_t> & prc, openfpm::vector<size_t> & sz)
	:recv_buf(recv_buf),prc(prc),sz(sz)
	{}
};

264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
/*! \brief Call-back to allocate buffer to receive data
 *
 * \param msg_i size required to receive the message from i
 * \param total_msg total size to receive from all the processors
 * \param total_p the total number of processor that want to communicate with you
 * \param i processor id
 * \param ri request id (it is an id that goes from 0 to total_p, and is unique
 *           every time message_alloc is called)
 * \param ptr a pointer to the vector_dist structure
 *
 * \return the pointer where to store the message for the processor i
 *
 */
static void * msg_alloc(size_t msg_i ,size_t total_msg, size_t total_p, size_t i, size_t ri, void * ptr)
{
Pietro Incardona's avatar
Pietro Incardona committed
279
	base_info & rinfo = *(base_info *)ptr;
280

Pietro Incardona's avatar
Pietro Incardona committed
281
	if (rinfo.recv_buf == NULL)
Pietro Incardona's avatar
Pietro Incardona committed
282
	{
283
		std::cerr << __FILE__ << ":" << __LINE__ << " Internal error this processor is not suppose to receive\n";
Pietro Incardona's avatar
Pietro Incardona committed
284 285
		return NULL;
	}
286

Pietro Incardona's avatar
Pietro Incardona committed
287
	rinfo.recv_buf->resize(ri+1);
288

Pietro Incardona's avatar
Pietro Incardona committed
289 290 291 292 293
	rinfo.recv_buf->get(ri).resize(msg_i);

	// Receive info
	rinfo.prc.add(i);
	rinfo.sz.add(msg_i);
294 295

	// return the pointer
Pietro Incardona's avatar
Pietro Incardona committed
296
	return rinfo.recv_buf->last().getPointer();
297 298
}

Pietro Incardona's avatar
Pietro Incardona committed
299 300 301 302
/*! \brief Process the receive buffer
 *
 * \tparam T type of sending object
 * \tparam S type of receiving object
incardon's avatar
incardon committed
303
 * \tparam prp properties to receive
Pietro Incardona's avatar
Pietro Incardona committed
304 305 306 307
 *
 * \param recv receive object
 *
 */
incardon's avatar
incardon committed
308
template<typename op, typename T, typename S, unsigned int ... prp > void process_receive_buffer_with_prp(S & recv, openfpm::vector<size_t> * sz, op & op_param)
Pietro Incardona's avatar
Pietro Incardona committed
309
{
Pietro Incardona's avatar
Pietro Incardona committed
310 311 312
	if (sz != NULL)
		sz->resize(recv_buf.size());

incardon's avatar
incardon committed
313
	pack_unpack_cond_with_prp<has_max_prop<T, has_value_type<T>::value>::value,op, T, S, prp... >::unpacking(recv, recv_buf, sz, op_param);
Pietro Incardona's avatar
Pietro Incardona committed
314 315 316 317
}

public:

incardon's avatar
incardon committed
318 319


320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
/*! \brief Semantic Gather, gather the data from all processors into one node
 *
 * Semantic communication differ from the normal one. They in general 
 * follow the following model.
 * 
 * Gather(T,S,root,op=add);
 *
 * "Gather" indicate the communication pattern, or how the information flow
 * T is the object to send, S is the object that will receive the data. 
 * In order to work S must implement the interface S.add(T).
 *
 * ### Example send a vector of structures, and merge all together in one vector
 * \snippet VCluster_semantic_unit_tests.hpp Gather the data on master
 *
 * ### Example send a vector of structures, and merge all together in one vector
 * \snippet VCluster_semantic_unit_tests.hpp Gather the data on master complex
 *
Pietro Incardona's avatar
Pietro Incardona committed
337 338 339
 * \tparam T type of sending object
 * \tparam S type of receiving object
 *
340 341 342 343 344 345 346
 * \param Object to send
 * \param Object to receive
 * \param root witch node should collect the information
 *
 * \return true if the function completed succefully
 *
 */
Pietro Incardona's avatar
Pietro Incardona committed
347 348 349 350 351 352 353 354
template<typename T, typename S> bool SGather(T & send, S & recv,size_t root)
{
	openfpm::vector<size_t> prc;
	openfpm::vector<size_t> sz;

	return SGather(send,recv,prc,sz,root);
}

Yaroslav's avatar
Yaroslav committed
355 356 357 358
template<size_t index, size_t N> struct MetaFuncOrd {
   enum { value = index };
};

Pietro Incardona's avatar
Pietro Incardona committed
359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
/*! \brief Semantic Gather, gather the data from all processors into one node
 *
 * Semantic communication differ from the normal one. They in general
 * follow the following model.
 *
 * Gather(T,S,root,op=add);
 *
 * "Gather" indicate the communication pattern, or how the information flow
 * T is the object to send, S is the object that will receive the data.
 * In order to work S must implement the interface S.add(T).
 *
 * ### Example send a vector of structures, and merge all together in one vector
 * \snippet VCluster_semantic_unit_tests.hpp Gather the data on master
 *
 * ### Example send a vector of structures, and merge all together in one vector
 * \snippet VCluster_semantic_unit_tests.hpp Gather the data on master complex
 *
 * \tparam T type of sending object
 * \tparam S type of receiving object
 *
 * \param Object to send
 * \param Object to receive
 * \param root witch node should collect the information
 * \param prc processors from witch we received the information
 * \param sz size of the received information for each processor
 *
 * \return true if the function completed succefully
 *
 */
template<typename T, typename S> bool SGather(T & send, S & recv, openfpm::vector<size_t> & prc, openfpm::vector<size_t> & sz,size_t root)
389
{
Pietro Incardona's avatar
Pietro Incardona committed
390 391 392
	// Reset the receive buffer
	reset_recv_buf();
	
393 394 395
	// If we are on master collect the information
	if (getProcessUnitID() == root)
	{
Yaroslav's avatar
Yaroslav committed
396
#ifdef DEBUG
Yaroslav's avatar
Yaroslav committed
397
		std::cout << "Inside root " << root << std::endl;
Yaroslav's avatar
Yaroslav committed
398
#endif
399 400 401 402
		// send buffer (master does not send anything) so send req and send_buf
		// remain buffer with size 0
		openfpm::vector<size_t> send_req;

Pietro Incardona's avatar
Pietro Incardona committed
403 404 405
		// receive information
		base_info bi(&recv_buf,prc,sz);

406
		// Send and recv multiple messages
Pietro Incardona's avatar
Pietro Incardona committed
407 408
		sendrecvMultipleMessagesNBX(send_req.size(),NULL,NULL,NULL,msg_alloc,&bi);

incardon's avatar
incardon committed
409 410 411 412 413 414 415
		// we generate the list of the properties to pack
		typedef typename ::generate_indexes<int, has_max_prop<T, has_value_type<T>::value>::number, MetaFuncOrd>::result ind_prop_to_pack;

		// operation object
		op_ssend_recv_add<void> opa;

		index_gen<ind_prop_to_pack>::template process_recv<op_ssend_recv_add<void>,T,S>(*this,recv,&sz,opa);
416

Pietro Incardona's avatar
Pietro Incardona committed
417
		recv.add(send);
Pietro Incardona's avatar
Pietro Incardona committed
418 419
		prc.add(root);
		sz.add(send.size());
420 421 422
	}
	else
	{
Yaroslav's avatar
Yaroslav committed
423 424 425
#ifdef DEBUG
		std::cout << "Inside slave " << getProcessUnitID() << std::endl;
#endif
426 427 428
		// send buffer (master does not send anything) so send req and send_buf
		// remain buffer with size 0
		openfpm::vector<size_t> send_prc;
Pietro Incardona's avatar
Pietro Incardona committed
429
		send_prc.add(root);
Yaroslav's avatar
Yaroslav committed
430
				
431
		openfpm::vector<size_t> sz;
432 433
				
		openfpm::vector<const void *> send_buf;
Yaroslav's avatar
Yaroslav committed
434 435 436
			
		//Pack requesting
		
437
		size_t tot_size = 0;
438
		
incardon's avatar
incardon committed
439
		pack_unpack_cond_with_prp<has_max_prop<T, has_value_type<T>::value>::value,op_ssend_recv_add<void>, T, S>::packingRequest(send, tot_size, sz);
Yaroslav's avatar
Yaroslav committed
440 441
		
		HeapMemory pmem;
Yaroslav's avatar
Yaroslav committed
442
		
Yaroslav's avatar
Yaroslav committed
443 444 445 446 447 448 449
		ExtPreAlloc<HeapMemory> & mem = *(new ExtPreAlloc<HeapMemory>(tot_size,pmem));
		mem.incRef();

		//Packing

		Pack_stat sts;
		
incardon's avatar
incardon committed
450
		pack_unpack_cond_with_prp<has_max_prop<T, has_value_type<T>::value>::value,op_ssend_recv_add<void>, T, S>::packing(mem, send, sts, send_buf);
451

Pietro Incardona's avatar
Pietro Incardona committed
452 453 454
		// receive information
		base_info bi(NULL,prc,sz);

455
		// Send and recv multiple messages
Pietro Incardona's avatar
Pietro Incardona committed
456
		sendrecvMultipleMessagesNBX(send_prc.size(),(size_t *)sz.getPointer(),(size_t *)send_prc.getPointer(),(void **)send_buf.getPointer(),msg_alloc,(void *)&bi);
457 458 459 460
	}
	
	return true;
}
Pietro Incardona's avatar
Pietro Incardona committed
461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517

/*! \brief Semantic Scatter, scatter the data from one processor to the other node
 *
 * Semantic communication differ from the normal one. They in general
 * follow the following model.
 *
 * Scatter(T,S,...,op=add);
 *
 * "Scatter" indicate the communication pattern, or how the information flow
 * T is the object to send, S is the object that will receive the data.
 * In order to work S must implement the interface S.add(T).
 *
 * ### Example scatter a vector of structures, to other processors
 * \snippet VCluster_semantic_unit_tests.hpp Scatter the data from master
 *
 * \tparam T type of sending object
 * \tparam S type of receiving object
 *
 * \param Object to send
 * \param Object to receive
 * \param prc processor involved in the scatter
 * \param sz size of each chunks
 * \param root which processor should scatter the information
 *
 * \return true if the function completed succefully
 *
 */
template<typename T, typename S> bool SScatter(T & send, S & recv, openfpm::vector<size_t> & prc, openfpm::vector<size_t> & sz, size_t root)
{
	// Reset the receive buffer
	reset_recv_buf();

	// If we are on master scatter the information
	if (getProcessUnitID() == root)
	{
		// Prepare the sending buffer
		openfpm::vector<const void *> send_buf;


		openfpm::vector<size_t> sz_byte;
		sz_byte.resize(sz.size());

		size_t ptr = 0;

		for (size_t i = 0; i < sz.size() ; i++)
		{
			send_buf.add((char *)send.getPointer() + sizeof(typename T::value_type)*ptr );
			sz_byte.get(i) = sz.get(i) * sizeof(typename T::value_type);
			ptr += sz.get(i);
		}

		// receive information
		base_info bi(&recv_buf,prc,sz);

		// Send and recv multiple messages
		sendrecvMultipleMessagesNBX(prc.size(),(size_t *)sz_byte.getPointer(),(size_t *)prc.getPointer(),(void **)send_buf.getPointer(),msg_alloc,(void *)&bi);

incardon's avatar
incardon committed
518 519 520 521 522 523 524
		// we generate the list of the properties to pack
		typedef typename ::generate_indexes<int, has_max_prop<T, has_value_type<T>::value>::number, MetaFuncOrd>::result ind_prop_to_pack;

		// operation object
		op_ssend_recv_add<void> opa;

		index_gen<ind_prop_to_pack>::template process_recv<op_ssend_recv_add<void>,T,S>(*this,recv,NULL,opa);
Pietro Incardona's avatar
Pietro Incardona committed
525 526 527 528 529 530 531 532 533 534 535 536
	}
	else
	{
		// The non-root receive
		openfpm::vector<size_t> send_req;

		// receive information
		base_info bi(&recv_buf,prc,sz);

		// Send and recv multiple messages
		sendrecvMultipleMessagesNBX(send_req.size(),NULL,NULL,NULL,msg_alloc,&bi);

incardon's avatar
incardon committed
537 538 539 540 541 542 543
		// we generate the list of the properties to pack
		typedef typename ::generate_indexes<int, has_max_prop<T, has_value_type<T>::value>::number, MetaFuncOrd>::result ind_prop_to_pack;

		// operation object
		op_ssend_recv_add<void> opa;

		index_gen<ind_prop_to_pack>::template process_recv<op_ssend_recv_add<void>,T,S>(*this,recv,NULL,opa);
Pietro Incardona's avatar
Pietro Incardona committed
544 545 546 547 548
	}

	return true;
}

incardon's avatar
incardon committed
549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
/*! \brief reorder the receiving buffer
 *
 * \param prc list of the receiving processors
 *
 */
void reorder_buffer(openfpm::vector<size_t> & prc, openfpm::vector<size_t> & sz_recv)
{

	struct recv_buff_reorder
	{
		//! processor
		size_t proc;

		//! position in the receive list
		size_t pos;

		//! default constructor
		recv_buff_reorder()	{};

		//! needed to reorder
incardon's avatar
incardon committed
569
		bool operator<(const recv_buff_reorder & rd)
incardon's avatar
incardon committed
570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
		{
			return proc < rd.proc;
		}
	};

	openfpm::vector<recv_buff_reorder> rcv;

	rcv.resize(recv_buf.size());

	for (size_t i = 0 ; i < rcv.size() ; i++)
	{
		rcv.get(i).proc = prc.get(i);
		rcv.get(i).pos = i;
	}

	// we sort based on processor
	rcv.sort();

	openfpm::vector<BHeapMemory> recv_ord;
	recv_ord.resize(rcv.size());

	openfpm::vector<size_t> prc_ord;
	prc_ord.resize(rcv.size());

	openfpm::vector<size_t> sz_recv_ord;
	sz_recv_ord.resize(rcv.size());

	// Now we reorder rcv
	for (size_t i = 0 ; i < rcv.size() ; i++)
	{
		recv_ord.get(i).swap(recv_buf.get(rcv.get(i).pos));
		prc_ord.get(i) = rcv.get(i).proc;
		sz_recv_ord.get(i) = sz_recv.get(rcv.get(i).pos);
	}

	// move rcv into recv
	recv_buf.swap(recv_ord);
	prc.swap(prc_ord);
	sz_recv.swap(sz_recv_ord);

	// reorder prc_recv and recv_sz
}
Pietro Incardona's avatar
Pietro Incardona committed
612

Pietro Incardona's avatar
Pietro Incardona committed
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640
/*! \brief Semantic Send and receive, send the data to processors and receive from the other processors
 *
 * Semantic communication differ from the normal one. They in general
 * follow the following model.
 *
 * SSendRecv(T,S,...,op=add);
 *
 * "SendRecv" indicate the communication pattern, or how the information flow
 * T is the object to send, S is the object that will receive the data.
 * In order to work S must implement the interface S.add(T).
 *
 * ### Example scatter a vector of structures, to other processors
 * \snippet VCluster_semantic_unit_tests.hpp Scatter the data from master
 *
 * \tparam T type of sending object
 * \tparam S type of receiving object
 *
 * \param Object to send
 * \param Object to receive
 * \param prc processor involved in the scatter
 * \param sz size of each chunks
 * \param root which processor should scatter the information
 *
 * \return true if the function completed succefully
 *
 */
template<typename T, typename S> bool SSendRecv(openfpm::vector<T> & send, S & recv, openfpm::vector<size_t> & prc_send, openfpm::vector<size_t> & prc_recv, openfpm::vector<size_t> & sz_recv)
{
incardon's avatar
incardon committed
641
	prepare_send_buffer<op_ssend_recv_add<void>,T,S>(send,recv,prc_send,prc_recv,sz_recv);
Pietro Incardona's avatar
Pietro Incardona committed
642

incardon's avatar
incardon committed
643 644
	// we generate the list of the properties to pack
	typedef typename ::generate_indexes<int, has_max_prop<T, has_value_type<T>::value>::number, MetaFuncOrd>::result ind_prop_to_pack;
Pietro Incardona's avatar
Pietro Incardona committed
645

incardon's avatar
incardon committed
646
	op_ssend_recv_add<void> opa;
Pietro Incardona's avatar
Pietro Incardona committed
647

incardon's avatar
incardon committed
648
	index_gen<ind_prop_to_pack>::template process_recv<op_ssend_recv_add<void>,T,S>(*this,recv,&sz_recv,opa);
Pietro Incardona's avatar
Pietro Incardona committed
649

incardon's avatar
incardon committed
650 651
	return true;
}
Pietro Incardona's avatar
Pietro Incardona committed
652

653

incardon's avatar
incardon committed
654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
/*! \brief Semantic Send and receive, send the data to processors and receive from the other processors
 *
 * Semantic communication differ from the normal one. They in general
 * follow the following model.
 *
 * SSendRecv(T,S,...,op=add);
 *
 * "SendRecv" indicate the communication pattern, or how the information flow
 * T is the object to send, S is the object that will receive the data.
 * In order to work S must implement the interface S.add<prp...>(T).
 *
 * ### Example scatter a vector of structures, to other processors
 * \snippet VCluster_semantic_unit_tests.hpp Scatter the data from master
 *
 * \tparam T type of sending object
 * \tparam S type of receiving object
 * \tparam prp properties for merging
 *
 * \param Object to send
 * \param Object to receive
 * \param prc processor involved in the scatter
 * \param sz size of each chunks
 * \param root which processor should scatter the information
 *
 * \return true if the function completed succefully
 *
 */
template<typename T, typename S, int ... prp> bool SSendRecvP(openfpm::vector<T> & send, S & recv, openfpm::vector<size_t> & prc_send, openfpm::vector<size_t> & prc_recv, openfpm::vector<size_t> & sz_recv)
{
	prepare_send_buffer<op_ssend_recv_add<void>,T,S>(send,recv,prc_send,prc_recv,sz_recv);
684

incardon's avatar
incardon committed
685 686
	// operation object
	op_ssend_recv_add<void> opa;
Pietro Incardona's avatar
Pietro Incardona committed
687

incardon's avatar
incardon committed
688 689
	// process the received information
	process_receive_buffer_with_prp<op_ssend_recv_add<void>,T,S,prp...>(recv,&sz_recv,opa);
Pietro Incardona's avatar
Pietro Incardona committed
690

incardon's avatar
incardon committed
691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723
	return true;
}

/*! \brief Semantic Send and receive, send the data to processors and receive from the other processors
 *
 * Semantic communication differ from the normal one. They in general
 * follow the following model.
 *
 * SSendRecv(T,S,...,op=add);
 *
 * "SendRecv" indicate the communication pattern, or how the information flow
 * T is the object to send, S is the object that will receive the data.
 * In order to work S must implement the interface S.add<prp...>(T).
 *
 * ### Example scatter a vector of structures, to other processors
 * \snippet VCluster_semantic_unit_tests.hpp Scatter the data from master
 *
 * \tparam op type of operation
 * \tparam T type of sending object
 * \tparam S type of receiving object
 * \tparam prp properties for merging
 *
 * \param Object to send
 * \param Object to receive
 * \param prc processor involved in the send and receive
 * \param op_param operation object
 *
 * \return true if the function completed succeful
 *
 */
template<typename op, typename T, typename S, int ... prp> bool SSendRecvP_op(openfpm::vector<T> & send, S & recv, openfpm::vector<size_t> & prc_send,op & op_param, openfpm::vector<size_t> & prc_recv, openfpm::vector<size_t> & sz_recv)
{
	prepare_send_buffer<op,T,S>(send,recv,prc_send,prc_recv,sz_recv);
Pietro Incardona's avatar
Pietro Incardona committed
724 725

	// process the received information
incardon's avatar
incardon committed
726
	process_receive_buffer_with_prp<op,T,S,prp...>(recv,&sz_recv,op_param);
Pietro Incardona's avatar
Pietro Incardona committed
727 728 729

	return true;
}