Commit 7545a755 authored by incardon's avatar incardon

Changing names of some files

parent 3f37237e
......@@ -2,18 +2,18 @@
LINKLIBS = $(DEFAULT_LIB) $(PTHREAD_LIBS) $(OPT_LIBS) $(HDF5_LDFLAGS) $(HDF5_LIBS) $(BOOST_LDFLAGS)
noinst_PROGRAMS = vcluster
vcluster_SOURCES = main.cpp VCluster.cpp ../../openfpm_devices/src/memory/HeapMemory.cpp ../../openfpm_devices/src/memory/PtrMemory.cpp
vcluster_SOURCES = main.cpp VCluster/VCluster.cpp ../../openfpm_devices/src/memory/HeapMemory.cpp ../../openfpm_devices/src/memory/PtrMemory.cpp
vcluster_CXXFLAGS = $(AM_CXXFLAGS) $(INCLUDES_PATH) $(BOOST_CPPFLAGS)
vcluster_CFLAGS = $(CUDA_CFLAGS)
vcluster_LDADD = $(LINKLIBS)
lib_LIBRARIES = libvcluster.a
libvcluster_a_SOURCES = VCluster.cpp
libvcluster_a_SOURCES = VCluster/VCluster.cpp
libvcluster_a_CXXFLAGS = $(AM_CXXFLAGS) $(INCLUDES_PATH) $(BOOST_CPPFLAGS)
libvcluster_a_CFLAGS =
nobase_include_HEADERS = MPI_wrapper/MPI_IallreduceW.hpp MPI_wrapper/MPI_IrecvW.hpp MPI_wrapper/MPI_IsendW.hpp MPI_wrapper/MPI_util.hpp MPI_wrapper/MPI_IAllGather.hpp \
VCluster_semantic.ipp VCluster.hpp VCluster_object.hpp \
VCluster/VCluster_base.hpp VCluster/VCluster.hpp \
util/Vcluster_log.hpp
.cu.o :
......
This diff is collapsed.
#ifndef VCLUSTER
#define VCLUSTER
#ifndef VCLUSTER_BASE_HPP_
#define VCLUSTER_BASE_HPP_
#include "config.h"
#include <mpi.h>
#include "MPI_wrapper/MPI_util.hpp"
#include "VCluster_object.hpp"
#include "Vector/map_vector.hpp"
#include "MPI_wrapper/MPI_IallreduceW.hpp"
#include "MPI_wrapper/MPI_IrecvW.hpp"
......@@ -54,133 +53,6 @@ template<typename T> void assign(T * ptr1, T * ptr2)
*ptr1 = *ptr2;
};
//! Helper class to add data without serialization
template<bool sr>
struct op_ssend_recv_add_sr
{
//! Add data
template<typename T, typename D, typename S, int ... prp> static void execute(D & recv,S & v2, size_t i)
{
// Merge the information
recv.template add_prp<typename T::value_type,PtrMemory,openfpm::grow_policy_identity,openfpm::vect_isel<typename T::value_type>::value,prp...>(v2);
}
};
//! Helper class to add data with serialization
template<>
struct op_ssend_recv_add_sr<true>
{
//! Add data
template<typename T, typename D, typename S, int ... prp> static void execute(D & recv,S & v2, size_t i)
{
// Merge the information
recv.template add_prp<typename T::value_type,HeapMemory,openfpm::grow_policy_double,openfpm::vect_isel<typename T::value_type>::value, prp...>(v2);
}
};
//! Helper class to add data
template<typename op>
struct op_ssend_recv_add
{
//! Add data
template<bool sr, typename T, typename D, typename S, int ... prp> static void execute(D & recv,S & v2, size_t i)
{
// Merge the information
op_ssend_recv_add_sr<sr>::template execute<T,D,S,prp...>(recv,v2,i);
}
};
//! Helper class to merge data without serialization
template<bool sr,template<typename,typename> class op>
struct op_ssend_recv_merge_impl
{
//! Merge the
template<typename T, typename D, typename S, int ... prp> inline static void execute(D & recv,S & v2,size_t i,openfpm::vector<openfpm::vector<aggregate<size_t,size_t>>> & opart)
{
// Merge the information
recv.template merge_prp_v<op,typename T::value_type, PtrMemory, openfpm::grow_policy_identity, prp...>(v2,opart.get(i));
}
};
//! Helper class to merge data with serialization
template<template<typename,typename> class op>
struct op_ssend_recv_merge_impl<true,op>
{
//! merge the data
template<typename T, typename D, typename S, int ... prp> inline static void execute(D & recv,S & v2,size_t i,openfpm::vector<openfpm::vector<aggregate<size_t,size_t>>> & opart)
{
// Merge the information
recv.template merge_prp_v<op,typename T::value_type, HeapMemory, openfpm::grow_policy_double, prp...>(v2,opart.get(i));
}
};
//! Helper class to merge data
template<template<typename,typename> class op>
struct op_ssend_recv_merge
{
//! For each processor contain the list of the particles with which I must merge the information
openfpm::vector<openfpm::vector<aggregate<size_t,size_t>>> & opart;
//! constructor
op_ssend_recv_merge(openfpm::vector<openfpm::vector<aggregate<size_t,size_t>>> & opart)
:opart(opart)
{}
//! execute the merge
template<bool sr, typename T, typename D, typename S, int ... prp> void execute(D & recv,S & v2,size_t i)
{
op_ssend_recv_merge_impl<sr,op>::template execute<T,D,S,prp...>(recv,v2,i,opart);
}
};
//! Helper class to merge data without serialization
template<bool sr>
struct op_ssend_gg_recv_merge_impl
{
//! Merge the
template<typename T, typename D, typename S, int ... prp> inline static void execute(D & recv,S & v2,size_t i,size_t & start)
{
// Merge the information
recv.template merge_prp_v<replace_,typename T::value_type, PtrMemory, openfpm::grow_policy_identity, prp...>(v2,start);
start += v2.size();
}
};
//! Helper class to merge data with serialization
template<>
struct op_ssend_gg_recv_merge_impl<true>
{
//! merge the data
template<typename T, typename D, typename S, int ... prp> inline static void execute(D & recv,S & v2,size_t i,size_t & start)
{
// Merge the information
recv.template merge_prp_v<replace_,typename T::value_type, HeapMemory, openfpm::grow_policy_double, prp...>(v2,start);
// from
start += v2.size();
}
};
//! Helper class to merge data
struct op_ssend_gg_recv_merge
{
//! starting marker
size_t start;
//! constructor
op_ssend_gg_recv_merge(size_t start)
:start(start)
{}
//! execute the merge
template<bool sr, typename T, typename D, typename S, int ... prp> void execute(D & recv,S & v2,size_t i)
{
op_ssend_gg_recv_merge_impl<sr>::template execute<T,D,S,prp...>(recv,v2,i,start);
}
};
//////////////////////////////////////////////////
//! temporal buffer for reductions
union red
......@@ -203,7 +75,6 @@ union red
double d;
};
/*! \brief This class virtualize the cluster of PC as a set of processes that communicate
*
* At the moment it is an MPI-like interface, with a more type aware, and simple, interface.
......@@ -228,7 +99,7 @@ union red
*
*/
class Vcluster
class Vcluster_base
{
//! log file
Vcluster_log log;
......@@ -293,9 +164,6 @@ class Vcluster
//! vector of the size of send buffers
openfpm::vector<size_t> sz_send;
//! Receive buffers
openfpm::vector<BHeapMemory> recv_buf;
//! barrier request
MPI_Request bar_req;
......@@ -303,17 +171,22 @@ class Vcluster
MPI_Status bar_stat;
//! disable operator=
Vcluster & operator=(const Vcluster &) {return *this;};
Vcluster_base & operator=(const Vcluster_base &) {return *this;};
//! disable copy constructor
Vcluster(const Vcluster &)
Vcluster_base(const Vcluster_base &)
:NBX_cnt(0)
{};
protected:
//! Receive buffers
openfpm::vector<BHeapMemory> recv_buf;
public:
// Finalize the MPI program
~Vcluster()
~Vcluster_base()
{
#ifdef SE_CLASS2
check_delete(this);
......@@ -342,7 +215,7 @@ public:
* \param argv pointer to arguments vector passed to the program
*
*/
Vcluster(int *argc, char ***argv)
Vcluster_base(int *argc, char ***argv)
:NBX_cnt(0)
{
#ifdef SE_CLASS2
......@@ -1024,101 +897,10 @@ public:
req.clear();
stat.clear();
}
/////////////////////// Semantic communication ///////////////////////
#include "VCluster_semantic.ipp"
};
// Function to initialize the global VCluster //
extern Vcluster * global_v_cluster_private;
/*! \brief Initialize a global instance of Runtime Virtual Cluster Machine
*
* Initialize a global instance of Runtime Virtual Cluster Machine
*
*/
static inline void init_global_v_cluster_private(int *argc, char ***argv)
{
if (global_v_cluster_private == NULL)
global_v_cluster_private = new Vcluster(argc,argv);
}
static inline void delete_global_v_cluster_private()
{
delete global_v_cluster_private;
}
static inline Vcluster & create_vcluster()
{
#ifdef SE_CLASS1
if (global_v_cluster_private == NULL)
std::cerr << __FILE__ << ":" << __LINE__ << " Error you must call openfpm_init before using any distributed data structures";
#endif
return *global_v_cluster_private;
}
/*! \brief Check if the library has been initialized
*
* \return true if the library has been initialized
*
*/
static inline bool is_openfpm_init()
{
return ofp_initialized;
}
/*! \brief Initialize the library
*
* This function MUST be called before any other function
*
*/
static inline void openfpm_init(int *argc, char ***argv)
{
#ifdef HAVE_PETSC
PetscInitialize(argc,argv,NULL,NULL);
#endif
init_global_v_cluster_private(argc,argv);
#ifdef SE_CLASS1
std::cout << "OpenFPM is compiled with debug mode LEVEL:1. Remember to remove SE_CLASS1 when you go in production" << std::endl;
#endif
#ifdef SE_CLASS2
std::cout << "OpenFPM is compiled with debug mode LEVEL:2. Remember to remove SE_CLASS2 when you go in production" << std::endl;
#endif
ofp_initialized = true;
}
/*! \brief Finalize the library
*
* This function MUST be called at the end of the program
*
*/
static inline void openfpm_finalize()
{
#ifdef HAVE_PETSC
PetscFinalize();
#endif
delete_global_v_cluster_private();
ofp_initialized = false;
}
#endif
/*
* VCluster_meta_function.hpp
*
* Created on: Dec 8, 2016
* Author: i-bird
*/
#ifndef OPENFPM_VCLUSTER_SRC_VCLUSTER_VCLUSTER_META_FUNCTION_HPP_
#define OPENFPM_VCLUSTER_SRC_VCLUSTER_VCLUSTER_META_FUNCTION_HPP_
#include "memory/BHeapMemory.hpp"
#include "Packer_Unpacker/has_max_prop.hpp"
template<bool result, typename T, typename S>
struct unpack_selector_with_prp
{
template<typename op, int ... prp> static void call_unpack(S & recv, openfpm::vector<BHeapMemory> & recv_buf, openfpm::vector<size_t> * sz, openfpm::vector<size_t> * sz_byte, op & op_param)
{
if (sz_byte != NULL)
sz_byte->resize(recv_buf.size());
for (size_t i = 0 ; i < recv_buf.size() ; i++)
{
T unp;
ExtPreAlloc<HeapMemory> & mem = *(new ExtPreAlloc<HeapMemory>(recv_buf.get(i).size(),recv_buf.get(i)));
mem.incRef();
Unpack_stat ps;
Unpacker<T,HeapMemory>::template unpack<>(mem, unp, ps);
size_t recv_size_old = recv.size();
// Merge the information
op_param.template execute<true,T,decltype(recv),decltype(unp),prp...>(recv,unp,i);
size_t recv_size_new = recv.size();
if (sz_byte != NULL)
sz_byte->get(i) = recv_buf.get(i).size();
if (sz != NULL)
sz->get(i) = recv_size_new - recv_size_old;
mem.decRef();
delete &mem;
}
}
};
//
template<typename T, typename S>
struct unpack_selector_with_prp<true,T,S>
{
template<typename op, unsigned int ... prp> static void call_unpack(S & recv, openfpm::vector<BHeapMemory> & recv_buf, openfpm::vector<size_t> * sz, openfpm::vector<size_t> * sz_byte, op & op_param)
{
if (sz_byte != NULL)
sz_byte->resize(recv_buf.size());
for (size_t i = 0 ; i < recv_buf.size() ; i++)
{
// calculate the number of received elements
size_t n_ele = recv_buf.get(i).size() / sizeof(typename T::value_type);
// add the received particles to the vector
PtrMemory * ptr1 = new PtrMemory(recv_buf.get(i).getPointer(),recv_buf.get(i).size());
// create vector representation to a piece of memory already allocated
openfpm::vector<typename T::value_type,PtrMemory,typename memory_traits_lin<typename T::value_type>::type, memory_traits_lin,openfpm::grow_policy_identity> v2;
v2.setMemory(*ptr1);
// resize with the number of elements
v2.resize(n_ele);
// Merge the information
size_t recv_size_old = recv.size();
op_param.template execute<false,T,decltype(recv),decltype(v2),prp...>(recv,v2,i);
size_t recv_size_new = recv.size();
if (sz_byte != NULL)
sz_byte->get(i) = recv_buf.get(i).size();
if (sz != NULL)
sz->get(i) = recv_size_new - recv_size_old;
}
}
};
template<typename T>
struct call_serialize_variadic {};
template<int ... prp>
struct call_serialize_variadic<index_tuple<prp...>>
{
template<typename T> inline static void call_pr(T & send, size_t & tot_size)
{
Packer<T,HeapMemory>::template packRequest<prp...>(send,tot_size);
}
template<typename T> inline static void call_pack(ExtPreAlloc<HeapMemory> & mem, T & send, Pack_stat & sts)
{
Packer<T,HeapMemory>::template pack<prp...>(mem,send,sts);
}
template<typename op, typename T, typename S> inline static void call_unpack(S & recv, openfpm::vector<BHeapMemory> & recv_buf, openfpm::vector<size_t> * sz, openfpm::vector<size_t> * sz_byte, op & op_param)
{
const bool result = has_pack_gen<typename T::value_type>::value == false && is_vector<T>::value == true;
unpack_selector_with_prp<result, T, S>::template call_unpack<op,prp...>(recv, recv_buf, sz, sz_byte, op_param);
}
};
//! There is max_prop inside
template<bool cond, typename op, typename T, typename S, unsigned int ... prp>
struct pack_unpack_cond_with_prp
{
static void packingRequest(T & send, size_t & tot_size, openfpm::vector<size_t> & sz)
{
typedef typename ::generate_indexes<int, has_max_prop<T, has_value_type<T>::value>::number, MetaFuncOrd>::result ind_prop_to_pack;
if (has_pack_gen<typename T::value_type>::value == false && is_vector<T>::value == true)
//if (has_pack<typename T::value_type>::type::value == false && has_pack_agg<typename T::value_type>::result::value == false && is_vector<T>::value == true)
{
sz.add(send.size()*sizeof(typename T::value_type));
}
else
{
call_serialize_variadic<ind_prop_to_pack>::call_pr(send,tot_size);
sz.add(tot_size);
}
}
static void packing(ExtPreAlloc<HeapMemory> & mem, T & send, Pack_stat & sts, openfpm::vector<const void *> & send_buf)
{
typedef typename ::generate_indexes<int, has_max_prop<T, has_value_type<T>::value>::number, MetaFuncOrd>::result ind_prop_to_pack;
if (has_pack_gen<typename T::value_type>::value == false && is_vector<T>::value == true)
//if (has_pack<typename T::value_type>::type::value == false && has_pack_agg<typename T::value_type>::result::value == false && is_vector<T>::value == true)
{
//std::cout << demangle(typeid(T).name()) << std::endl;
send_buf.add(send.getPointer());
}
else
{
send_buf.add(mem.getPointerEnd());
call_serialize_variadic<ind_prop_to_pack>::call_pack(mem,send,sts);
}
}
static void unpacking(S & recv, openfpm::vector<BHeapMemory> & recv_buf, openfpm::vector<size_t> * sz, openfpm::vector<size_t> * sz_byte, op & op_param)
{
typedef index_tuple<prp...> ind_prop_to_pack;
call_serialize_variadic<ind_prop_to_pack>::template call_unpack<op,T,S>(recv, recv_buf, sz, sz_byte, op_param);
}
};
/////////////////////////////
//! Helper class to add data without serialization
template<bool sr>
struct op_ssend_recv_add_sr
{
//! Add data
template<typename T, typename D, typename S, int ... prp> static void execute(D & recv,S & v2, size_t i)
{
// Merge the information
recv.template add_prp<typename T::value_type,PtrMemory,openfpm::grow_policy_identity,openfpm::vect_isel<typename T::value_type>::value,prp...>(v2);
}
};
//! Helper class to add data with serialization
template<>
struct op_ssend_recv_add_sr<true>
{
//! Add data
template<typename T, typename D, typename S, int ... prp> static void execute(D & recv,S & v2, size_t i)
{
// Merge the information
recv.template add_prp<typename T::value_type,HeapMemory,openfpm::grow_policy_double,openfpm::vect_isel<typename T::value_type>::value, prp...>(v2);
}
};
//! Helper class to add data
template<typename op>
struct op_ssend_recv_add
{
//! Add data
template<bool sr, typename T, typename D, typename S, int ... prp> static void execute(D & recv,S & v2, size_t i)
{
// Merge the information
op_ssend_recv_add_sr<sr>::template execute<T,D,S,prp...>(recv,v2,i);
}
};
//! Helper class to merge data without serialization
template<bool sr,template<typename,typename> class op>
struct op_ssend_recv_merge_impl
{
//! Merge the
template<typename T, typename D, typename S, int ... prp> inline static void execute(D & recv,S & v2,size_t i,openfpm::vector<openfpm::vector<aggregate<size_t,size_t>>> & opart)
{
// Merge the information
recv.template merge_prp_v<op,typename T::value_type, PtrMemory, openfpm::grow_policy_identity, prp...>(v2,opart.get(i));
}
};
//! Helper class to merge data with serialization
template<template<typename,typename> class op>
struct op_ssend_recv_merge_impl<true,op>
{
//! merge the data
template<typename T, typename D, typename S, int ... prp> inline static void execute(D & recv,S & v2,size_t i,openfpm::vector<openfpm::vector<aggregate<size_t,size_t>>> & opart)
{
// Merge the information
recv.template merge_prp_v<op,typename T::value_type, HeapMemory, openfpm::grow_policy_double, prp...>(v2,opart.get(i));
}
};
//! Helper class to merge data
template<template<typename,typename> class op>
struct op_ssend_recv_merge
{
//! For each processor contain the list of the particles with which I must merge the information
openfpm::vector<openfpm::vector<aggregate<size_t,size_t>>> & opart;
//! constructor
op_ssend_recv_merge(openfpm::vector<openfpm::vector<aggregate<size_t,size_t>>> & opart)
:opart(opart)
{}
//! execute the merge
template<bool sr, typename T, typename D, typename S, int ... prp> void execute(D & recv,S & v2,size_t i)
{
op_ssend_recv_merge_impl<sr,op>::template execute<T,D,S,prp...>(recv,v2,i,opart);
}
};
//! Helper class to merge data without serialization
template<bool sr>
struct op_ssend_gg_recv_merge_impl
{
//! Merge the
template<typename T, typename D, typename S, int ... prp> inline static void execute(D & recv,S & v2,size_t i,size_t & start)
{
// Merge the information
recv.template merge_prp_v<replace_,typename T::value_type, PtrMemory, openfpm::grow_policy_identity, prp...>(v2,start);
start += v2.size();
}
};
//! Helper class to merge data with serialization
template<>
struct op_ssend_gg_recv_merge_impl<true>
{
//! merge the data
template<typename T, typename D, typename S, int ... prp> inline static void execute(D & recv,S & v2,size_t i,size_t & start)
{
// Merge the information
recv.template merge_prp_v<replace_,typename T::value_type, HeapMemory, openfpm::grow_policy_double, prp...>(v2,start);
// from
start += v2.size();
}
};
//! Helper class to merge data
struct op_ssend_gg_recv_merge
{
//! starting marker
size_t start;
//! constructor
op_ssend_gg_recv_merge(size_t start)
:start(start)
{}
//! execute the merge
template<bool sr, typename T, typename D, typename S, int ... prp> void execute(D & recv,S & v2,size_t i)
{
op_ssend_gg_recv_merge_impl<sr>::template execute<T,D,S,prp...>(recv,v2,i,start);
}
};
//////////////////////////////////////////////////
#endif /* OPENFPM_VCLUSTER_SRC_VCLUSTER_VCLUSTER_META_FUNCTION_HPP_ */
......@@ -650,29 +650,54 @@ BOOST_AUTO_TEST_CASE (Vcluster_semantic_sendrecv)
sz_recv2.clear();
sz_recv3.clear();
//! [dsde with complex objects1]
// A vector of vector we want to send each internal vector to one specified processor
openfpm::vector<openfpm::vector<size_t>> v1;
// We use this empty vector to receive data
openfpm::vector<size_t> v2;
// We use this empty vector to receive data
openfpm::vector<openfpm::vector<size_t>> v3;
// in this case each processor will send a message of different size to all the other processor
// but can also be a subset of processors
v1.resize(vcl.getProcessingUnits());
size_t nc = vcl.getProcessingUnits() / SSCATTER_MAX;
size_t nr = vcl.getProcessingUnits() - nc * SSCATTER_MAX;
nr = ((nr-1) * nr) / 2;
size_t n_ele = nc * SSCATTER_MAX * (SSCATTER_MAX - 1) / 2 + nr;
// We fill the send buffer with some sense-less data
for(size_t i = 0 ; i < v1.size() ; i++)
{
// each vector is filled with a different message size
for (size_t j = 0 ; j < i % SSCATTER_MAX ; j++)
v1.get(i).add(j);
// generate the sending list (in this case the sendinf list is all the other processor)
// but in general can be some of them and totally random
prc_send.add((i + vcl.getProcessUnitID()) % vcl.getProcessingUnits());
}
// Send and receive from the other processor v2 container the received data
// Because in this case v2 is an openfpm::vector<size_t>, all the received
// vector are concatenated one over the other. For example if the processor receive 3 openfpm::vector<size_t>
// each having 3,4,5 elements. v2 will be a vector of 12 elements
vcl.SSendRecv(v1,v2,prc_send,prc_recv2,sz_recv2);
// Send and receive from the other processors v2 contain the received data
// Because in this case v2 is an openfpm::vector<openfpm::vector<size_t>>, all the vector from
// each processor will be collected. For example if the processor receive 3 openfpm::vector<size_t>
// each having 3,4,5 elements. v2 will be a vector of vector of 3 elements (openfpm::vector) and
// each element will be respectivly 3,4,5 elements
vcl.SSendRecv(v1,v3,prc_send,prc_recv3,sz_recv3);
//! [dsde with complex objects1]
size_t nc = vcl.getProcessingUnits() / SSCATTER_MAX;
size_t nr = vcl.getProcessingUnits() - nc * SSCATTER_MAX;
nr = ((nr-1) * nr) / 2;
size_t n_ele = nc * SSCATTER_MAX * (SSCATTER_MAX - 1) / 2 + nr;
BOOST_REQUIRE_EQUAL(v2.size(),n_ele);
size_t nc_check = (vcl.getProcessingUnits()-1) / SSCATTER_MAX;
BOOST_REQUIRE_EQUAL(v3.size(),vcl.getProcessingUnits()-1-nc_check);
......@@ -735,8 +760,8 @@ BOOST_AUTO_TEST_CASE (Vcluster_semantic_sendrecv)
prc_send.add((i + vcl.getProcessUnitID()) % vcl.getProcessingUnits());
}
vcl.SSendRecv(v1,v2,prc_send,prc_recv2,sz_recv2,RECEIVE_KNOWN);
vcl.SSendRecv(v1,v3,prc_send,prc_recv3,sz_recv3,RECEIVE_KNOWN);
vcl.SSendRecv(v1,v2,prc_send,prc_recv2,sz_recv2,RECEIVE_KNOWN | KNOWN_ELEMENT_OR_BYTE);
vcl.SSendRecv(v1,v3,prc_send,prc_recv3,sz_recv3);
BOOST_REQUIRE_EQUAL(v2.size(),n_ele);
size_t nc_check = (vcl.getProcessingUnits()-1) / SSCATTER_MAX;
......
......@@ -8,8 +8,8 @@
#ifndef VCLUSTER_UNIT_TEST_UTIL_HPP_
#define VCLUSTER_UNIT_TEST_UTIL_HPP_
#include "VCluster.hpp"
#include "Point_test.hpp"
#include "VCluster_base.hpp"
#include "Vector/vector_test_util.hpp"
#define NBX 1
......@@ -35,21 +35,36 @@ int mod(int x, int m) {
// Alloc the buffer to receive the messages
//! [message alloc]
void * msg_alloc(size_t msg_i ,size_t total_msg, size_t total_p, size_t i,size_t ri, void * ptr)