Commit b57540c8 authored by incardon's avatar incardon

Memleak_check move

parent 3e846786
......@@ -10,6 +10,7 @@
#include "MPI_wrapper/MPI_IallreduceW.hpp"
#include "MPI_wrapper/MPI_IrecvW.hpp"
#include "MPI_wrapper/MPI_IsendW.hpp"
#include "MPI_wrapper/MPI_IAllGather.hpp"
#include <exception>
#include "Vector/map_vector.hpp"
#ifdef DEBUG
......@@ -26,6 +27,7 @@
#define SERIVCE_MESSAGE_TAG 16384
#define SEND_RECV_BASE 8192
#define GATHER_BASE 24576
extern size_t n_vcluster;
extern bool global_mpi_init;
......@@ -747,6 +749,7 @@ public:
return true;
}
/*! \brief Send data to a processor
*
* \warning In order to avoid deadlock every send must be coupled with a recv
......@@ -812,36 +815,65 @@ public:
return true;
}
/*! \brief Recv data from a processor
*
* \warning In order to avoid deadlock every recv must be coupled with a send
* in case you want to send data without knowledge from the other side
* consider to use sendRecvMultipleMessages
/*! \brief Recv data from a processor
*
* \warning In order to avoid deadlock every recv must be coupled with a send
* in case you want to send data without knowledge from the other side
* consider to use sendRecvMultipleMessages
*
* \warning operation is asynchronous execute must be called to ensure they are executed
*
* \see sendRecvMultipleMessages
*
* \param proc processor id
* \param tag id
* \param v vector to send
*
* \return true if succeed false otherwise
*
*/
template<typename T, typename Mem, typename gr> bool recv(size_t proc, size_t tag, openfpm::vector<T,Mem,gr> & v)
{
#ifdef DEBUG
checkType<T>();
#endif
// recv over MPI
// Create one request
req.add();
// receive
MPI_IrecvW<T>::recv(proc,SEND_RECV_BASE + tag,v,req.last());
return true;
}
/*! \brief Gather the data from all processors
*
* \warning operation is asynchronous execute must be called to ensure they are executed
*
* \see sendRecvMultipleMessages
*
* \param proc processor id
* \param tag id
* \param v vector to send
* \param v vector to receive
* \param send data to send
*
* \return true if succeed false otherwise
*
*/
template<typename T, typename Mem, typename gr> bool recv(size_t proc, size_t tag, openfpm::vector<T,Mem,gr> & v)
template<typename T, typename Mem, typename gr> bool allGather(T & send, openfpm::vector<T,Mem,gr> & v)
{
#ifdef DEBUG
checkType<T>();
#endif
// recv over MPI
// Create one request
req.add();
// Number of processors
v.resize(getProcessingUnits());
// receive
MPI_IrecvW<T>::recv(proc,SEND_RECV_BASE + tag,v,req.last());
MPI_IAllGatherW<T>::gather(&send,1,v.getPointer(),1,req.last());
return true;
}
......
......@@ -679,4 +679,21 @@ template<typename T> void test_send_recv_primitives(size_t n, Vcluster & vcl)
}
}
template<typename T> void test_single_all_gather_primitives(Vcluster & vcl)
{
//! [allGather numbers]
openfpm::vector<T> clt;
T data = vcl.getProcessUnitID();
vcl.allGather(data,clt);
vcl.execute();
for (size_t i = 0 ; i < vcl.getProcessingUnits() ; i++)
BOOST_REQUIRE_EQUAL(i,clt.get(i));
//! [allGather numbers]
}
#endif /* VCLUSTER_UNIT_TEST_UTIL_HPP_ */
......@@ -128,6 +128,22 @@ BOOST_AUTO_TEST_CASE(VCluster_send_recv)
test_send_recv_primitives<double>(N_V_ELEMENTS,vcl);
}
BOOST_AUTO_TEST_CASE(VCluster_allgather)
{
Vcluster & vcl = *global_v_cluster;
test_single_all_gather_primitives<unsigned char>(vcl);
test_single_all_gather_primitives<char>(vcl);
test_single_all_gather_primitives<short>(vcl);
test_single_all_gather_primitives<unsigned short>(vcl);
test_single_all_gather_primitives<int>(vcl);
test_single_all_gather_primitives<unsigned int>(vcl);
test_single_all_gather_primitives<long int>(vcl);
test_single_all_gather_primitives<unsigned long int>(vcl);
test_single_all_gather_primitives<float>(vcl);
test_single_all_gather_primitives<double>(vcl);
}
BOOST_AUTO_TEST_CASE( VCluster_use_sendrecv)
{
std::cout << "VCluster unit test start" << "\n";
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment