Commit 9602917f authored by incardon's avatar incardon

Fixing conflicts

parents 7c68ec7f aca29896
......@@ -15,6 +15,12 @@ mkdir openfpm_vcluster/src/config
git clone git@git.mpi-cbg.de:/openfpm/openfpm_devices.git openfpm_devices
git clone git@git.mpi-cbg.de:/openfpm/openfpm_data.git openfpm_data
cd openfpm_data
git checkout 6c2a5911ac16f93ab0ae1e7ac14723c952aa5c16
cd ..
cd openfpm_devices
git checkout 46e4994c5dff879a71e6ae090c50b2f23235d435
cd ..
cd "$1/openfpm_vcluster"
......
......@@ -48,8 +48,7 @@ fi
prefix="$prefix/openfpm_vcluster"
echo "Installation dir is: $prefix"
AC_PROG_RANLIB
AM_PROG_AR
LT_INIT
# Checks for programs.
AC_PROG_CXX
......
......@@ -45,9 +45,13 @@ class Vcluster: public Vcluster_base
struct index_gen<index_tuple<prp...>>
{
//! Process the receive buffer
template<typename op, typename T, typename S,template <typename> class layout_base> inline static void process_recv(Vcluster & vcl, S & recv, openfpm::vector<size_t> * sz_recv, openfpm::vector<size_t> * sz_recv_byte, op & op_param)
template<typename op,
typename T,
typename S,
template <typename> class layout_base = memory_traits_lin>
inline static void process_recv(Vcluster & vcl, S & recv, openfpm::vector<size_t> * sz_recv, openfpm::vector<size_t> * sz_recv_byte, op & op_param)
{
vcl.process_receive_buffer_with_prp<op,T,S,layout_base, prp...>(recv,sz_recv,sz_recv_byte,op_param);
vcl.process_receive_buffer_with_prp<op,T,S,layout_base,prp...>(recv,sz_recv,sz_recv_byte,op_param);
}
};
......@@ -138,11 +142,17 @@ class Vcluster: public Vcluster_base
sz_recv_byte.get(i) = sz_recv.get(i) * sizeof(typename T::value_type);
}
else
std::cout << __FILE__ << ":" << __LINE__ << " Error " << demangle(typeid(T).name()) << " the type does not work with the option RECEIVE_KNOWN or NO_CHANGE_ELEMENTS" << std::endl;
}
{std::cout << __FILE__ << ":" << __LINE__ << " Error " << demangle(typeid(T).name()) << " the type does not work with the option or NO_CHANGE_ELEMENTS" << std::endl;}
Vcluster_base::sendrecvMultipleMessagesNBX(prc_send.size(),(size_t *)send_sz_byte.getPointer(),(size_t *)prc_send.getPointer(),(void **)send_buf.getPointer(),
prc_recv.size(),(size_t *)prc_recv.getPointer(),(size_t *)sz_recv_byte.getPointer(),msg_alloc_known,(void *)&bi);
Vcluster_base::sendrecvMultipleMessagesNBX(prc_send.size(),(size_t *)send_sz_byte.getPointer(),(size_t *)prc_send.getPointer(),(void **)send_buf.getPointer(),
prc_recv.size(),(size_t *)prc_recv.getPointer(),(size_t *)sz_recv_byte.getPointer(),msg_alloc_known,(void *)&bi);
}
else
{
Vcluster_base::sendrecvMultipleMessagesNBX(prc_send.size(),(size_t *)send_sz_byte.getPointer(),(size_t *)prc_send.getPointer(),(void **)send_buf.getPointer(),
prc_recv.size(),(size_t *)prc_recv.getPointer(),msg_alloc_known,(void *)&bi);
sz_recv_byte = sz_recv_tmp;
}
}
else
{
......@@ -275,7 +285,7 @@ class Vcluster: public Vcluster_base
* \param op_param operation to do in merging the received information with recv
*
*/
template<typename op, typename T, typename S, template<typename> class layout_base, unsigned int ... prp >
template<typename op, typename T, typename S, template <typename> class layout_base ,unsigned int ... prp >
void process_receive_buffer_with_prp(S & recv,
openfpm::vector<size_t> * sz,
openfpm::vector<size_t> * sz_byte,
......@@ -369,11 +379,14 @@ class Vcluster: public Vcluster_base
* \return true if the function completed succefully
*
*/
template<typename T, typename S, template <typename> class layout_base=memory_traits_lin> bool SGather(T & send,
S & recv,
openfpm::vector<size_t> & prc,
openfpm::vector<size_t> & sz,
size_t root)
template<typename T,
typename S,
template <typename> class layout_base = memory_traits_lin>
bool SGather(T & send,
S & recv,
openfpm::vector<size_t> & prc,
openfpm::vector<size_t> & sz,
size_t root)
{
#ifdef SE_CLASS1
if (&send == (T *)&recv)
......@@ -644,13 +657,15 @@ class Vcluster: public Vcluster_base
* \return true if the function completed succefully
*
*/
template<typename T, typename S, template<typename> class layout_base = memory_traits_lin >
template<typename T,
typename S,
template <typename> class layout_base = memory_traits_lin>
bool SSendRecv(openfpm::vector<T> & send,
S & recv,
openfpm::vector<size_t> & prc_send,
openfpm::vector<size_t> & prc_recv,
openfpm::vector<size_t> & sz_recv,
size_t opt = NONE)
openfpm::vector<size_t> & prc_send,
openfpm::vector<size_t> & prc_recv,
openfpm::vector<size_t> & sz_recv,
size_t opt = NONE)
{
prepare_send_buffer<op_ssend_recv_add<void>,T,S,layout_base>(send,recv,prc_send,prc_recv,sz_recv,opt);
......@@ -693,14 +708,15 @@ class Vcluster: public Vcluster_base
* \return true if the function completed successful
*
*/
template<typename T, typename S, template<typename> class layout_base, int ... prp> bool SSendRecvP(openfpm::vector<T> & send,
template<typename T, typename S, template <typename> class layout_base, int ... prp> bool SSendRecvP(openfpm::vector<T> & send,
S & recv,
openfpm::vector<size_t> & prc_send,
openfpm::vector<size_t> & prc_recv,
openfpm::vector<size_t> & sz_recv,
openfpm::vector<size_t> & sz_recv_byte)
openfpm::vector<size_t> & sz_recv_byte,
size_t opt = NONE)
{
prepare_send_buffer<op_ssend_recv_add<void>,T,S,layout_base>(send,recv,prc_send,prc_recv,sz_recv,NONE);
prepare_send_buffer<op_ssend_recv_add<void>,T,S,layout_base>(send,recv,prc_send,prc_recv,sz_recv,opt);
// operation object
op_ssend_recv_add<void> opa;
......@@ -739,13 +755,15 @@ class Vcluster: public Vcluster_base
* \return true if the function completed succefully
*
*/
template<typename T, typename S, template<typename> class layout_base, int ... prp> bool SSendRecvP(openfpm::vector<T> & send,
S & recv,
openfpm::vector<size_t> & prc_send,
openfpm::vector<size_t> & prc_recv,
openfpm::vector<size_t> & sz_recv)
template<typename T, typename S, template <typename> class layout_base, int ... prp>
bool SSendRecvP(openfpm::vector<T> & send,
S & recv,
openfpm::vector<size_t> & prc_send,
openfpm::vector<size_t> & prc_recv,
openfpm::vector<size_t> & sz_recv,
size_t opt = NONE)
{
prepare_send_buffer<op_ssend_recv_add<void>,T,S,layout_base>(send,recv,prc_send,prc_recv,sz_recv,NONE);
prepare_send_buffer<op_ssend_recv_add<void>,T,S,layout_base>(send,recv,prc_send,prc_recv,sz_recv,opt);
// operation object
op_ssend_recv_add<void> opa;
......@@ -792,13 +810,18 @@ class Vcluster: public Vcluster_base
* \return true if the function completed successful
*
*/
template<typename op, typename T, typename S, template<typename>class layout_base , int ... prp > bool SSendRecvP_op(openfpm::vector<T> & send,
S & recv,
openfpm::vector<size_t> & prc_send,
op & op_param,
openfpm::vector<size_t> & prc_recv,
openfpm::vector<size_t> & recv_sz,
size_t opt = NONE)
template<typename op,
typename T,
typename S,
template <typename> class layout_base,
int ... prp>
bool SSendRecvP_op(openfpm::vector<T> & send,
S & recv,
openfpm::vector<size_t> & prc_send,
op & op_param,
openfpm::vector<size_t> & prc_recv,
openfpm::vector<size_t> & recv_sz,
size_t opt = NONE)
{
prepare_send_buffer<op,T,S,layout_base>(send,recv,prc_send,prc_recv,recv_sz,opt);
......
......@@ -505,6 +505,64 @@ public:
NBX_cnt = (NBX_cnt + 1) % 1024;
}
/*! \brief Send and receive multiple messages
*
* It send multiple messages to a set of processors the and receive
* multiple messages from another set of processors, all the processor must call this
* function
*
* suppose the following situation the calling processor want to communicate
* * 2 vector of 100 integers to processor 1
* * 1 vector of 50 integers to processor 6
* * 1 vector of 48 integers to processor 7
* * 1 vector of 70 integers to processor 8
*
* \param prc list of processors you should communicate with [1,1,6,7,8]
*
* \param data vector containing the data to send [v=vector<vector<int>>, v.size()=4, T=vector<int>], T at the moment
* is only tested for vectors of 0 or more generic elements (without pointers)
*
* \param msg_alloc This is a call-back with the purpose to allocate space
* for the incoming messages and give back a valid pointer, supposing that this call-back has been triggered by
* the processor of id 5 that want to communicate with me a message of size 100 byte the call-back will have
* the following 6 parameters
* in the call-back in order:
* * message size required to receive the message (100)
* * total message size to receive from all the processors (NBX does not provide this information)
* * the total number of processor want to communicate with you (NBX does not provide this information)
* * processor id (5)
* * ri request id (it is an id that goes from 0 to total_p, and is incremented
* every time message_alloc is called)
* * void pointer, parameter for additional data to pass to the call-back
*
* \param ptr_arg data passed to the call-back function specified
*
* \param opt options, only NONE supported
*
*/
template<typename T>
void sendrecvMultipleMessagesNBX(openfpm::vector< size_t > & prc,
openfpm::vector< T > & data,
void * (* msg_alloc)(size_t,size_t,size_t,size_t,size_t,void *),
void * ptr_arg, long int opt=NONE)
{
#ifdef SE_CLASS1
checkType<typename T::value_type>();
#endif
// resize the pointer list
ptr_send.resize(prc.size());
sz_send.resize(prc.size());
for (size_t i = 0 ; i < prc.size() ; i++)
{
ptr_send.get(i) = data.get(i).getPointer();
sz_send.get(i) = data.get(i).size() * sizeof(typename T::value_type);
}
sendrecvMultipleMessagesNBX(prc.size(),(size_t *)sz_send.getPointer(),(size_t *)prc.getPointer(),(void **)ptr_send.getPointer(),msg_alloc,ptr_arg,opt);
}
/*! \brief Send and receive multiple messages
*
* It send multiple messages to a set of processors the and receive
......@@ -571,39 +629,48 @@ public:
NBX_cnt = (NBX_cnt + 1) % 1024;
}
openfpm::vector<size_t> sz_recv_tmp;
/*! \brief Send and receive multiple messages
*
* It send multiple messages to a set of processors the and receive
* multiple messages from another set of processors, all the processor must call this
* function
* function. In this particular case the receiver know from which processor is going
* to receive, but does not know the size.
*
*
* suppose the following situation the calling processor want to communicate
* * 2 vector of 100 integers to processor 1
* * 1 vector of 50 integers to processor 6
* * 1 vector of 48 integers to processor 7
* * 1 vector of 70 integers to processor 8
* * 2 messages of size 100 byte to processor 1
* * 1 message of size 50 byte to processor 6
* * 1 message of size 48 byte to processor 7
* * 1 message of size 70 byte to processor 8
*
* \param prc list of processors you should communicate with [1,1,6,7,8]
* \param n_send number of send for this processor [4]
*
* \param data vector containing the data to send [v=vector<vector<int>>, v.size()=4, T=vector<int>], T at the moment
* is only tested for vectors of 0 or more generic elements (without pointers)
* \param prc list of processor with which it should communicate
* [1,1,6,7,8]
*
* \param msg_alloc This is a call-back with the purpose to allocate space
* for the incoming messages and give back a valid pointer, supposing that this call-back has been triggered by
* \param sz the array contain the size of the message for each processor
* (zeros must not be presents) [100,100,50,48,70]
*
* \param ptr array that contain the pointers to the message to send
*
* \param msg_alloc This is a call-back with the purpose of allocate space
* for the incoming message and give back a valid pointer, supposing that this call-back has been triggered by
* the processor of id 5 that want to communicate with me a message of size 100 byte the call-back will have
* the following 6 parameters
* in the call-back in order:
* * message size required to receive the message (100)
* in the call-back are in order:
* * message size required to receive the message [100]
* * total message size to receive from all the processors (NBX does not provide this information)
* * the total number of processor want to communicate with you (NBX does not provide this information)
* * processor id (5)
* * processor id [5]
* * ri request id (it is an id that goes from 0 to total_p, and is incremented
* every time message_alloc is called)
* * void pointer, parameter for additional data to pass to the call-back
*
* \param ptr_arg data passed to the call-back function specified
*
* \param opt options, only NONE supported
* \param opt options, NONE (ignored in this moment)
*
*/
template<typename T>
......@@ -611,23 +678,38 @@ public:
void * (* msg_alloc)(size_t,size_t,size_t,size_t,size_t,size_t,void *),
void * ptr_arg, long int opt=NONE)
{
#ifdef SE_CLASS1
checkType<typename T::value_type>();
#endif
// resize the pointer list
ptr_send.resize(prc.size());
sz_send.resize(prc.size());
sz_recv_tmp.resize(n_recv);
for (size_t i = 0 ; i < prc.size() ; i++)
// First we understand the receive size for each processor
for (size_t i = 0 ; i < n_send ; i++)
{send(prc[i],SEND_SPARSE + NBX_cnt,&sz[i],sizeof(size_t));}
for (size_t i = 0 ; i < n_recv ; i++)
{recv(prc_recv[i],SEND_SPARSE + NBX_cnt,&sz_recv_tmp.get(i),sizeof(size_t));}
execute();
// Circular counter
NBX_cnt = (NBX_cnt + 1) % 1024;
// Allocate the buffers
for (size_t i = 0 ; i < n_send ; i++)
{send(prc[i],SEND_SPARSE + NBX_cnt,ptr[i],sz[i]);}
for (size_t i = 0 ; i < n_recv ; i++)
{
ptr_send.get(i) = data.get(i).getPointer();
sz_send.get(i) = data.get(i).size() * sizeof(typename T::value_type);
}
void * ptr_recv = msg_alloc(sz_recv_tmp.get(i),0,0,prc_recv[i],i,ptr_arg);
sendrecvMultipleMessagesNBX(prc.size(),(size_t *)sz_send.getPointer(),(size_t *)prc.getPointer(),(void **)ptr_send.getPointer(),msg_alloc,ptr_arg,opt);
}
recv(prc_recv[i],SEND_SPARSE + NBX_cnt,ptr_recv,sz_recv_tmp.get(i));
}
execute();
// Circular counter
NBX_cnt = (NBX_cnt + 1) % 1024;
}
/*! \brief Send and receive multiple messages
*
......
......@@ -14,7 +14,14 @@
template<bool result, typename T, typename S, template<typename> class layout_base>
struct unpack_selector_with_prp
{
template<typename op, int ... prp> static void call_unpack(S & recv, openfpm::vector<BHeapMemory> & recv_buf, openfpm::vector<size_t> * sz, openfpm::vector<size_t> * sz_byte, op & op_param)
template<typename op,
template <typename> class layout_base,
int ... prp>
static void call_unpack(S & recv,
openfpm::vector<BHeapMemory> & recv_buf,
openfpm::vector<size_t> * sz,
openfpm::vector<size_t> * sz_byte,
op & op_param)
{
if (sz_byte != NULL)
sz_byte->resize(recv_buf.size());
......@@ -421,11 +428,15 @@ struct pack_unpack_cond_with_prp_inte_lin<T,true>
for (size_t j = 0 ; j < T::value_type::max_prop ; j++)
{prc_send_.add(prc_send.get(i));}
}
}
};
//! There is max_prop inside
template<bool cond, typename op, typename T, typename S, template <typename> class layout_base, unsigned int ... prp>
template<bool cond,
typename op,
typename T,
typename S,
template <typename> class layout_base,
unsigned int ... prp>
struct pack_unpack_cond_with_prp
{
static void packingRequest(T & send, size_t & tot_size, openfpm::vector<size_t> & sz)
......@@ -459,9 +470,9 @@ struct pack_unpack_cond_with_prp
static void unpacking(S & recv,
openfpm::vector<BHeapMemory> & recv_buf,
openfpm::vector<size_t> * sz,
openfpm::vector<size_t> * sz_byte,
op & op_param)
openfpm::vector<size_t> * sz,
openfpm::vector<size_t> * sz_byte,
op & op_param)
{
typedef index_tuple<prp...> ind_prop_to_pack;
call_serialize_variadic<ind_prop_to_pack>::template call_unpack<op,T,S,layout_base>(recv, recv_buf, sz, sz_byte, op_param);
......@@ -476,14 +487,18 @@ template<bool sr>
struct op_ssend_recv_add_sr
{
//! Add data
template<typename T, typename D, typename S, template<typename> class layout_base, int ... prp>
static void execute(D & recv,S & v2, size_t i)
template<typename T,
typename D,
typename S,
template <typename> class layout_base,
int ... prp> static void execute(D & recv,S & v2, size_t i)
{
// Merge the information
recv.template add_prp<typename T::value_type,
PtrMemory,
PtrMemory,
openfpm::grow_policy_identity,
openfpm::vect_isel<typename T::value_type>::value,
layout_base,
prp...>(v2);
}
};
......@@ -493,10 +508,20 @@ template<>
struct op_ssend_recv_add_sr<true>
{
//! Add data
template<typename T, typename D, typename S, template<typename> class layout_base , int ... prp> static void execute(D & recv,S & v2, size_t i)
template<typename T,
typename D,
typename S,
template <typename> class layout_base,
int ... prp>
static void execute(D & recv,S & v2, size_t i)
{
// Merge the information
recv.template add_prp<typename T::value_type,HeapMemory,typename T::grow_policy,openfpm::vect_isel<typename T::value_type>::value, prp...>(v2);
recv.template add_prp<typename T::value_type,
HeapMemory,
openfpm::grow_policy_double,
openfpm::vect_isel<typename T::value_type>::value,
layout_base,
prp...>(v2);
}
};
......@@ -505,7 +530,13 @@ template<typename op>
struct op_ssend_recv_add
{
//! Add data
template<bool sr, typename T, typename D, typename S, template<typename> class layout_base, int ... prp> static void execute(D & recv,S & v2, size_t i)
template<bool sr,
typename T,
typename D,
typename S,
template <typename> class layout_base,
int ... prp>
static void execute(D & recv,S & v2, size_t i)
{
// Merge the information
op_ssend_recv_add_sr<sr>::template execute<T,D,S,layout_base,prp...>(recv,v2,i);
......@@ -517,10 +548,20 @@ template<bool sr,template<typename,typename> class op>
struct op_ssend_recv_merge_impl
{
//! Merge the
template<typename T, typename D, typename S, int ... prp> inline static void execute(D & recv,S & v2,size_t i,openfpm::vector<openfpm::vector<aggregate<size_t,size_t>>> & opart)
template<typename T,
typename D,
typename S,
template <typename> class layout_base,
int ... prp>
inline static void execute(D & recv,S & v2,size_t i,openfpm::vector<openfpm::vector<aggregate<size_t,size_t>>> & opart)
{
// Merge the information
recv.template merge_prp_v<op,typename T::value_type, PtrMemory, openfpm::grow_policy_identity, prp...>(v2,opart.get(i));
recv.template merge_prp_v<op,
typename T::value_type,
PtrMemory,
openfpm::grow_policy_identity,
layout_base,
prp...>(v2,opart.get(i));
}
};
......@@ -529,10 +570,20 @@ template<template<typename,typename> class op>
struct op_ssend_recv_merge_impl<true,op>
{
//! merge the data
template<typename T, typename D, typename S, int ... prp> inline static void execute(D & recv,S & v2,size_t i,openfpm::vector<openfpm::vector<aggregate<size_t,size_t>>> & opart)
template<typename T,
typename D,
typename S,
template <typename> class layout_base,
int ... prp>
inline static void execute(D & recv,S & v2,size_t i,openfpm::vector<openfpm::vector<aggregate<size_t,size_t>>> & opart)
{
// Merge the information
recv.template merge_prp_v<op,typename T::value_type, HeapMemory, openfpm::grow_policy_double, prp...>(v2,opart.get(i));
recv.template merge_prp_v<op,
typename T::value_type,
HeapMemory,
openfpm::grow_policy_double,
layout_base,
prp...>(v2,opart.get(i));
}
};
......@@ -549,9 +600,15 @@ struct op_ssend_recv_merge
{}
//! execute the merge
template<bool sr, typename T, typename D, typename S, template<typename> class layout_base , int ... prp> void execute(D & recv,S & v2,size_t i)
template<bool sr,
typename T,
typename D,
typename S,
template <typename> class layout_base,
int ... prp>
void execute(D & recv,S & v2,size_t i)
{
op_ssend_recv_merge_impl<sr,op>::template execute<T,D,S,prp...>(recv,v2,i,opart);
op_ssend_recv_merge_impl<sr,op>::template execute<T,D,S,layout_base,prp...>(recv,v2,i,opart);
}
};
......@@ -560,10 +617,20 @@ template<bool sr>
struct op_ssend_gg_recv_merge_impl
{
//! Merge the
template<typename T, typename D, typename S, template<typename> class layout_base , int ... prp> inline static void execute(D & recv,S & v2,size_t i,size_t & start)
template<typename T,
typename D,
typename S,
template <typename> class layout_base,
int ... prp>
inline static void execute(D & recv,S & v2,size_t i,size_t & start)
{
// Merge the information
recv.template merge_prp_v<replace_,typename T::value_type, PtrMemory, openfpm::grow_policy_identity, prp...>(v2,start);
recv.template merge_prp_v<replace_,
typename T::value_type,
PtrMemory,
openfpm::grow_policy_identity,
layout_base,
prp...>(v2,start);
start += v2.size();
}
......@@ -574,10 +641,19 @@ template<>
struct op_ssend_gg_recv_merge_impl<true>
{
//! merge the data
template<typename T, typename D, typename S, template<typename> class layout_base , int ... prp> inline static void execute(D & recv,S & v2,size_t i,size_t & start)
template<typename T,
typename D,
typename S,
template <typename> class layout_base,
int ... prp> inline static void execute(D & recv,S & v2,size_t i,size_t & start)
{
// Merge the information
recv.template merge_prp_v<replace_,typename T::value_type, HeapMemory , openfpm::grow_policy_double, prp...>(v2,start);
recv.template merge_prp_v<replace_,
typename T::value_type,
HeapMemory,
openfpm::grow_policy_double,
layout_base,
prp...>(v2,start);
// from
start += v2.size();
......
......@@ -45,7 +45,7 @@ BOOST_AUTO_TEST_CASE (Vcluster_semantic_gather)
v1.resize(vcl.getProcessUnitID());
for(size_t i = 0 ; i < vcl.getProcessUnitID() ; i++)
v1.get(i) = 5;
{v1.get(i) = 5;}
openfpm::vector<size_t> v2;
......@@ -82,7 +82,7 @@ BOOST_AUTO_TEST_CASE (Vcluster_semantic_gather_2)
v1.resize(vcl.getProcessUnitID());
for(size_t i = 0 ; i < vcl.getProcessUnitID() ; i++)
v1.get(i) = 5;
{v1.get(i) = 5;}
openfpm::vector<openfpm::vector<size_t>> v2;
......@@ -133,7 +133,7 @@ BOOST_AUTO_TEST_CASE (Vcluster_semantic_gather_3)
Vcluster & vcl = create_vcluster();
if (vcl.getProcessingUnits() >= 32)
return;
{return;}
openfpm::vector<openfpm::vector<aggregate<float, openfpm::vector<size_t>, Point_test<float>>> > v1;
......@@ -210,7 +210,7 @@ BOOST_AUTO_TEST_CASE (Vcluster_semantic_gather_4)
Vcluster & vcl = create_vcluster();
if (vcl.getProcessingUnits() >= 32)
return;
{return;}
size_t sz[] = {16,16};
......@@ -273,7 +273,7 @@ BOOST_AUTO_TEST_CASE (Vcluster_semantic_gather_5)
Vcluster & vcl = create_vcluster();
if (vcl.getProcessingUnits() >= 32)
return;
{return;}
size_t sz[] = {16,16};
grid_cpu<2,Point_test<float>> g1(sz);
......@@ -340,7 +340,7 @@ BOOST_AUTO_TEST_CASE (Vcluster_semantic_gather_6)
Vcluster & vcl = create_vcluster();
if (vcl.getProcessingUnits() >= 32)
return;
{return;}
openfpm::vector<openfpm::vector<openfpm::vector<size_t>>> v1;
openfpm::vector<openfpm::vector<size_t>> v1_int;
......@@ -389,7 +389,7 @@ BOOST_AUTO_TEST_CASE (Vcluster_semantic_gather_7)
Vcluster & vcl = create_vcluster();
if (vcl.getProcessingUnits() >= 32)
return;
{return;}
openfpm::vector<Point_test<float>> v1;
......@@ -399,7 +399,7 @@ BOOST_AUTO_TEST_CASE (Vcluster_semantic_gather_7)
v1.resize(vcl.getProcessUnitID());
for(size_t i = 0 ; i < vcl.getProcessUnitID() ; i++)
v1.get(i) = p1;
{v1.get(i) = p1;}
openfpm::vector<openfpm::vector<Point_test<float>>> v2;
......@@ -453,7 +453,7 @@ BOOST_AUTO_TEST_CASE (Vcluster_semantic_gather_8)
Vcluster & vcl = create_vcluster();
if (vcl.getProcessingUnits() >= 32)
return;
{return;}
openfpm::vector<Box<3,size_t>> v1;
......@@ -544,7 +544,7 @@ BOOST_AUTO_TEST_CASE (Vcluster_semantic_scatter)
Vcluster & vcl = create_vcluster();
if (vcl.getProcessingUnits() >= 32)
return;
{return;}
size_t nc = vcl.getProcessingUnits() / SSCATTER_MAX;
size_t nr = vcl.getProcessingUnits() - nc * SSCATTER_MAX;
......@@ -556,7 +556,7 @@ BOOST_AUTO_TEST_CASE (Vcluster_semantic_scatter)
v1.resize(n_elements);
for(size_t i = 0 ; i < n_elements ; i++)
v1.get(i) = 5;
{v1.get(i) = 5;}
//! [Scatter the data from master]
......@@ -594,7 +594,7 @@ BOOST_AUTO_TEST_CASE (Vcluster_semantic_struct_scatter)
Vcluster & vcl = create_vcluster();
if (vcl.getProcessingUnits() >= 32)
return;
{return;}
size_t nc = vcl.getProcessingUnits() / SSCATTER_MAX;
size_t nr = vcl.getProcessingUnits() - nc * SSCATTER_MAX;
......@@ -637,7 +637,7 @@ BOOST_AUTO_TEST_CASE (Vcluster_semantic_struct_scatter)
BOOST_AUTO_TEST_CASE (Vcluster_semantic_sendrecv)
BOOST_AUTO_TEST_CASE (Vcluster_semantic_sendrecv_all_unknown)
{
openfpm::vector<size_t> prc_recv2;
openfpm::vector<size_t> prc_recv3;
......@@ -654,7 +654,7 @@ BOOST_AUTO_TEST_CASE (Vcluster_semantic_sendrecv)
if (vcl.getProcessingUnits() >= 32)
return;
{return;}
prc_recv2.clear();
prc_recv3.clear();
......@@ -738,17 +738,27 @@ BOOST_AUTO_TEST_CASE (Vcluster_semantic_sendrecv)
BOOST_REQUIRE_EQUAL(match,true);
}
}
BOOST_AUTO_TEST_CASE (Vcluster_semantic_sendrecv_receive_size_known)
{
openfpm::vector<size_t> prc_recv2;
openfpm::vector<size_t> prc_recv3;
openfpm::vector<size_t> sz_recv2;
openfpm::vector<size_t> sz_recv3;
for (size_t i = 0 ; i < 100 ; i++)
{
Vcluster & vcl = create_vcluster();