Commit e858a42b authored by incardon's avatar incardon

Fixing Vcluster with the new grids interface

parent 99436bef
......@@ -28,10 +28,10 @@ public:
*
*/
template<typename T, typename ly, typename Mem, typename gr> class MPI_IsendW
template<typename T, typename Mem, typename gr> class MPI_IsendW
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<T,ly,Mem,gr> & v, MPI_Request & req)
static inline void send(size_t proc , size_t tag ,openfpm::vector<T,Mem,gr> & v, MPI_Request & req)
{
MPI_Isend(v.getPointer(), v.size() * sizeof(T),MPI_BYTE, proc, tag , MPI_COMM_WORLD,&req);
}
......@@ -41,10 +41,10 @@ public:
/*! \brief specialization for vector of integer
*
*/
template<typename ly, typename Mem, typename gr> class MPI_IsendW<int,ly,Mem,gr>
template<typename Mem, typename gr> class MPI_IsendW<int,Mem,gr>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<int,ly,Mem,gr> & v, MPI_Request & req)
static inline void send(size_t proc , size_t tag ,openfpm::vector<int,Mem,gr> & v, MPI_Request & req)
{
MPI_Isend(v.getPointer(), v.size(),MPI_INT, proc, tag , MPI_COMM_WORLD,&req);
}
......@@ -53,10 +53,10 @@ public:
/*! \brief specialization for unsigned integer
*
*/
template<typename ly, typename Mem, typename gr> class MPI_IsendW<unsigned int,ly,Mem,gr>
template<typename Mem, typename gr> class MPI_IsendW<unsigned int,Mem,gr>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned int,ly,Mem,gr> & v, MPI_Request & req)
static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned int,Mem,gr> & v, MPI_Request & req)
{
MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED, proc, tag , MPI_COMM_WORLD,&req);
}
......@@ -65,10 +65,10 @@ public:
/*! \brief specialization for short
*
*/
template<typename ly, typename Mem, typename gr> class MPI_IsendW<short,ly,Mem,gr>
template<typename Mem, typename gr> class MPI_IsendW<short,Mem,gr>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<short,ly,Mem,gr> & v, MPI_Request & req)
static inline void send(size_t proc , size_t tag ,openfpm::vector<short,Mem,gr> & v, MPI_Request & req)
{
MPI_Isend(v.getPointer(), v.size(),MPI_SHORT, proc, tag , MPI_COMM_WORLD,&req);
}
......@@ -77,10 +77,10 @@ public:
/*! \brief specialization for short
*
*/
template<typename ly, typename Mem, typename gr> class MPI_IsendW<unsigned short,ly,Mem,gr>
template<typename Mem, typename gr> class MPI_IsendW<unsigned short,Mem,gr>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned short,ly,Mem,gr> & v, MPI_Request & req)
static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned short,Mem,gr> & v, MPI_Request & req)
{
MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED_SHORT, proc, tag , MPI_COMM_WORLD,&req);
}
......@@ -89,10 +89,10 @@ public:
/*! \brief specialization for char
*
*/
template<typename ly, typename Mem, typename gr> class MPI_IsendW<char,ly,Mem,gr>
template<typename Mem, typename gr> class MPI_IsendW<char,Mem,gr>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<char,ly,Mem,gr> & v, MPI_Request & req)
static inline void send(size_t proc , size_t tag ,openfpm::vector<char,Mem,gr> & v, MPI_Request & req)
{
MPI_Isend(v.getPointer(), v.size(),MPI_CHAR, proc, tag , MPI_COMM_WORLD,&req);
}
......@@ -101,10 +101,10 @@ public:
/*! \brief specialization for char
*
*/
template<typename ly, typename Mem, typename gr> class MPI_IsendW<unsigned char,ly,Mem,gr>
template<typename Mem, typename gr> class MPI_IsendW<unsigned char,Mem,gr>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned char,ly,Mem,gr> & v, MPI_Request & req)
static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned char,Mem,gr> & v, MPI_Request & req)
{
MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED_CHAR, proc, tag , MPI_COMM_WORLD,&req);
}
......@@ -113,10 +113,10 @@ public:
/*! \brief specialization for size_t
*
*/
template<typename ly, typename Mem, typename gr> class MPI_IsendW<size_t,ly,Mem,gr>
template<typename Mem, typename gr> class MPI_IsendW<size_t,Mem,gr>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<size_t,ly,Mem,gr> & v, MPI_Request & req)
static inline void send(size_t proc , size_t tag ,openfpm::vector<size_t,Mem,gr> & v, MPI_Request & req)
{
MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED_LONG, proc, tag , MPI_COMM_WORLD,&req);
}
......@@ -125,10 +125,10 @@ public:
/*! \brief specialization for size_t
*
*/
template<typename ly, typename Mem, typename gr> class MPI_IsendW<long int,ly,Mem,gr>
template<typename Mem, typename gr> class MPI_IsendW<long int,Mem,gr>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<long int,ly,Mem,gr> & v, MPI_Request & req)
static inline void send(size_t proc , size_t tag ,openfpm::vector<long int,Mem,gr> & v, MPI_Request & req)
{
MPI_Isend(v.getPointer(), v.size(),MPI_LONG, proc, tag , MPI_COMM_WORLD,&req);
}
......@@ -137,10 +137,10 @@ public:
/*! \brief specialization for float
*
*/
template<typename ly, typename Mem, typename gr> class MPI_IsendW<float,ly,Mem,gr>
template<typename Mem, typename gr> class MPI_IsendW<float,Mem,gr>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<float,ly,Mem,gr> & v, MPI_Request & req)
static inline void send(size_t proc , size_t tag ,openfpm::vector<float,Mem,gr> & v, MPI_Request & req)
{
MPI_Isend(v.getPointer(), v.size(),MPI_FLOAT, proc, tag , MPI_COMM_WORLD,&req);
}
......@@ -149,10 +149,10 @@ public:
/*! \brief specialization for double
*
*/
template<typename ly, typename Mem, typename gr> class MPI_IsendW<double,ly,Mem,gr>
template<typename Mem, typename gr> class MPI_IsendW<double,Mem,gr>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<double,ly,Mem,gr> & v, MPI_Request & req)
static inline void send(size_t proc , size_t tag ,openfpm::vector<double,Mem,gr> & v, MPI_Request & req)
{
MPI_Isend(v.getPointer(), v.size(),MPI_DOUBLE, proc, tag , MPI_COMM_WORLD,&req);
}
......
......@@ -223,7 +223,7 @@ public:
// Sending property object
typedef object<typename object_creator<typename T::value_type::type,prp...>::type> prp_object;
typedef openfpm::vector<prp_object,openfpm::device_cpu<prp_object>,ExtPreAlloc<Mem>,openfpm::grow_policy_identity> dtype;
typedef openfpm::vector<prp_object,ExtPreAlloc<Mem>,openfpm::grow_policy_identity> dtype;
// Create an object over the preallocated memory (No allocation is produced)
dtype dest;
......@@ -331,7 +331,7 @@ public:
// Sending property object and vector
typedef object<typename object_creator<typename T::type,prp...>::type> prp_object;
typedef openfpm::vector<prp_object,openfpm::device_cpu<prp_object>,ExtPreAlloc<Mem>> dtype;
typedef openfpm::vector<prp_object,ExtPreAlloc<Mem>> dtype;
// Calculate the required memory for packing
size_t alloc_ele = dtype::calculateMem(obj.size(),0);
......@@ -368,7 +368,7 @@ public:
// Sending property object
typedef object<typename object_creator<typename T::value_type::type,prp...>::type> prp_object;
typedef openfpm::vector<prp_object,openfpm::device_cpu<prp_object>,ExtPreAlloc<Mem>,openfpm::grow_policy_identity> dtype;
typedef openfpm::vector<prp_object,ExtPreAlloc<Mem>,openfpm::grow_policy_identity> dtype;
// Create an object over the preallocated memory (No allocation is produced)
dtype dest;
......@@ -390,7 +390,7 @@ public:
{
// Sending property object
typedef object<typename object_creator<typename T::type::type,prp...>::type> prp_object;
typedef openfpm::vector<prp_object,openfpm::device_cpu<prp_object>,ExtPreAlloc<Mem>,openfpm::grow_policy_identity> dtype;
typedef openfpm::vector<prp_object,ExtPreAlloc<Mem>,openfpm::grow_policy_identity> dtype;
// Calculate the required memory for packing
size_t alloc_ele = dtype::calculateMem(obj.size(),0);
......@@ -411,7 +411,7 @@ public:
{
// Sending property object
typedef object<typename object_creator<typename T::value_type::type,prp...>::type> prp_object;
typedef openfpm::vector<prp_object,openfpm::device_cpu<prp_object>,ExtPreAlloc<Mem>,openfpm::grow_policy_identity> dtype;
typedef openfpm::vector<prp_object,ExtPreAlloc<Mem>,openfpm::grow_policy_identity> dtype;
// Calculate the required memory for packing
size_t alloc_ele = dtype::calculateMem(sub.getVolume(),0);
......
......@@ -89,7 +89,7 @@ BOOST_AUTO_TEST_CASE ( packer_unpacker_test )
size_t sz[] = {16,16,16};
grid_cpu<3,Point_test<float>> g(sz);
g.setMemory<HeapMemory>();
g.setMemory();
fill_grid<3>(g);
grid_key_dx_iterator_sub<3> sub(g.getGrid(),{1,2,3},{5,6,7});
......@@ -225,7 +225,7 @@ BOOST_AUTO_TEST_CASE ( packer_unpacker_test )
size_t sz2[] = {16,16,16};
grid_cpu<3,Point_test<float>> g_test(sz2);
g_test.setMemory<HeapMemory>();
g_test.setMemory();
grid_key_dx_iterator_sub<3> sub2(g_test.getGrid(),{1,2,3},{5,6,7});
Unpacker<grid_cpu<3,Point_test<float>>,HeapMemory>::unpack<pt::x,pt::v>(mem,sub2,g_test,ps);
......
......@@ -153,7 +153,7 @@ public:
// Sending property object
typedef object<typename object_creator<typename T::value_type::type,prp...>::type> prp_object;
typedef openfpm::vector<prp_object,openfpm::device_cpu<prp_object>,PtrMemory,openfpm::grow_policy_identity> stype;
typedef openfpm::vector<prp_object,PtrMemory,openfpm::grow_policy_identity> stype;
// Calculate the size to pack the object
size_t size = stype::calculateMem(obj.size(),0);
......@@ -236,7 +236,7 @@ public:
{
// object that store the information in mem
typedef object<typename object_creator<typename T::type,prp...>::type> prp_object;
typedef openfpm::vector<prp_object,openfpm::device_cpu<prp_object>,PtrMemory,openfpm::grow_policy_identity> stype;
typedef openfpm::vector<prp_object,PtrMemory,openfpm::grow_policy_identity> stype;
// Calculate the size to pack the object
size_t size = stype::calculateMem(obj.size(),0);
......@@ -269,7 +269,7 @@ public:
{
// object that store the information in mem
typedef object<typename object_creator<typename T::value_type::type,prp...>::type> prp_object;
typedef openfpm::vector<prp_object,openfpm::device_cpu<prp_object>,PtrMemory,openfpm::grow_policy_identity> stype;
typedef openfpm::vector<prp_object,PtrMemory,openfpm::grow_policy_identity> stype;
size_t size = stype::calculateMem(sub_it.getVolume(),0);
......
......@@ -164,7 +164,7 @@ public:
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
#ifdef MEMLEAK_CHECK
#ifdef SE_CLASS2
process_v_cl = rank;
#endif
......@@ -764,7 +764,7 @@ public:
* \return true if succeed false otherwise
*
*/
template<typename T, typename ly, typename Mem, typename gr> bool send(size_t proc, size_t tag, openfpm::vector<T,ly,Mem,gr> & v)
template<typename T, typename Mem, typename gr> bool send(size_t proc, size_t tag, openfpm::vector<T,Mem,gr> & v)
{
#ifdef DEBUG
checkType<T>();
......@@ -776,7 +776,7 @@ public:
req.add();
// send
MPI_IsendW<T,ly,Mem,gr>::send(proc,SEND_RECV_BASE + tag,v,req.last());
MPI_IsendW<T,Mem,gr>::send(proc,SEND_RECV_BASE + tag,v,req.last());
return true;
}
......@@ -829,7 +829,7 @@ public:
* \return true if succeed false otherwise
*
*/
template<typename T, typename ly, typename Mem, typename gr> bool recv(size_t proc, size_t tag, openfpm::vector<T,ly,Mem,gr> & v)
template<typename T, typename Mem, typename gr> bool recv(size_t proc, size_t tag, openfpm::vector<T,Mem,gr> & v)
{
#ifdef DEBUG
checkType<T>();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment