Commit 26e5bb68 authored by incardon's avatar incardon

Refactoring

parent c9fae2a0
#ifndef MPI_IALLREDUCEW_HPP
#define MPI_IALLREDUCEW_HPP
#include <mpi.h>
/*! \brief Set of wrapping classing for MPI_Iallreduce
*
* The purpose of these classes is to correctly choose the right call based on the type we want to reduce
*
*/
/*! \brief General reduction
*
* \tparam any type
*
*/
template<typename T> class MPI_IallreduceW
{
public:
static inline void reduce(T & buf,MPI_Op op, MPI_Request & req)
{
std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " cannot recognize " << typeid(T).name() << "\n";
}
};
/*! \brief specialization for integer
*
*/
template<> class MPI_IallreduceW<int>
{
public:
static inline void reduce(int & buf,MPI_Op op, MPI_Request & req)
{
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_INT, op, MPI_COMM_WORLD,&req));
}
};
/*! \brief specialization for unsigned integer
*
*/
template<> class MPI_IallreduceW<unsigned int>
{
public:
static inline void reduce(unsigned int & buf,MPI_Op op, MPI_Request & req)
{
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED, op, MPI_COMM_WORLD,&req));
}
};
/*! \brief specialization for short
*
*/
template<> class MPI_IallreduceW<short>
{
public:
static inline void reduce(short & buf,MPI_Op op, MPI_Request & req)
{
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_SHORT, op, MPI_COMM_WORLD,&req));
}
};
/*! \brief specialization for short
*
*/
template<> class MPI_IallreduceW<unsigned short>
{
public:
static inline void reduce(unsigned short & buf,MPI_Op op, MPI_Request & req)
{
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED_SHORT, op, MPI_COMM_WORLD,&req));
}
};
/*! \brief specialization for char
*
*/
template<> class MPI_IallreduceW<char>
{
public:
static inline void reduce(char & buf,MPI_Op op, MPI_Request & req)
{
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_CHAR, op, MPI_COMM_WORLD,&req));
}
};
/*! \brief specialization for char
*
*/
template<> class MPI_IallreduceW<unsigned char>
{
public:
static inline void reduce(unsigned char & buf,MPI_Op op, MPI_Request & req)
{
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED_CHAR, op, MPI_COMM_WORLD,&req));
}
};
/*! \brief specialization for size_t
*
*/
template<> class MPI_IallreduceW<size_t>
{
public:
static inline void reduce(size_t & buf,MPI_Op op, MPI_Request & req)
{
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED_LONG, op, MPI_COMM_WORLD,&req));
}
};
/*! \brief specialization for size_t
*
*/
template<> class MPI_IallreduceW<long int>
{
public:
static inline void reduce(long int & buf,MPI_Op op, MPI_Request & req)
{
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_LONG, op, MPI_COMM_WORLD,&req));
}
};
/*! \brief specialization for float
*
*/
template<> class MPI_IallreduceW<float>
{
public:
static inline void reduce(float & buf,MPI_Op op, MPI_Request & req)
{
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_FLOAT, op, MPI_COMM_WORLD,&req));
}
};
/*! \brief specialization for double
*
*/
template<> class MPI_IallreduceW<double>
{
public:
static inline void reduce(double & buf,MPI_Op op, MPI_Request & req)
{
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_DOUBLE, op, MPI_COMM_WORLD,&req));
}
};
////////////////// Specialization for vectors ///////////////
/*! \brief specialization for vector integer
*
*/
/*template<> class MPI_IallreduceW<openfpm::vector<int>>
{
public:
static inline void reduce(openfpm::vector<int> & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_INT, op, MPI_COMM_WORLD,&req);
}
};*/
/*! \brief specialization for vector short
*
*/
/*template<> class MPI_IallreduceW<openfpm::vector<short>>
{
public:
static inline void reduce(openfpm::vector<short> & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_SHORT, op, MPI_COMM_WORLD,&req);
}
};*/
/*! \brief specialization for vector char
*
*/
/*template<> class MPI_IallreduceW<openfpm::vector<char>>
{
public:
static inline void reduce(openfpm::vector<char> & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_CHAR, op, MPI_COMM_WORLD,&req);
}
};*/
/*! \brief specialization for vector size_t
*
*/
/*template<> class MPI_IallreduceW<openfpm::vector<size_t>>
{
public:
static inline void reduce(openfpm::vector<size_t> & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_UNSIGNED_LONG, op, MPI_COMM_WORLD,&req);
}
};*/
/*! \brief specialization for vector float
*
*/
/*template<> class MPI_IallreduceW<openfpm::vector<float>>
{
public:
static inline void reduce(openfpm::vector<float> & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_FLOAT, op, MPI_COMM_WORLD,&req);
}
};*/
/*! \brief specialization for vector double
*
*/
/*template<> class MPI_IallreduceW<openfpm::vector<double>>
{
public:
static inline void reduce(openfpm::vector<double> & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_DOUBLE, op, MPI_COMM_WORLD,&req);
}
};*/
#endif
#ifndef MPI_IRECV_HPP
#define MPI_IRECV_HPP
#include <mpi.h>
/*! \brief Set of wrapping classing for MPI_Iallreduce
*
* The purpose of these classes is to correctly choose the right call based on the type we want to reduce
*
*/
/*! \brief General send
*
* \tparam any type
*
*/
template<typename T> class MPI_IrecvW
{
public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<T> & v, MPI_Request & req)
{
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size() * sizeof(T),MPI_BYTE, proc, tag , MPI_COMM_WORLD,&req));
}
};
/*! \brief specialization for vector of integer
*
*/
template<> class MPI_IrecvW<int>
{
public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<int> & v, MPI_Request & req)
{
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_INT, proc, tag , MPI_COMM_WORLD,&req));
}
};
/*! \brief specialization for unsigned integer
*
*/
template<> class MPI_IrecvW<unsigned int>
{
public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<unsigned int> & v, MPI_Request & req)
{
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED, proc, tag , MPI_COMM_WORLD,&req));
}
};
/*! \brief specialization for short
*
*/
template<> class MPI_IrecvW<short>
{
public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<short> & v, MPI_Request & req)
{
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_SHORT, proc, tag , MPI_COMM_WORLD,&req));
}
};
/*! \brief specialization for short
*
*/
template<> class MPI_IrecvW<unsigned short>
{
public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<unsigned short> & v, MPI_Request & req)
{
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED_SHORT, proc, tag , MPI_COMM_WORLD,&req));
}
};
/*! \brief specialization for char
*
*/
template<> class MPI_IrecvW<char>
{
public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<char> & v, MPI_Request & req)
{
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_CHAR, proc, tag , MPI_COMM_WORLD,&req));
}
};
/*! \brief specialization for char
*
*/
template<> class MPI_IrecvW<unsigned char>
{
public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<unsigned char> & v, MPI_Request & req)
{
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED_CHAR, proc, tag , MPI_COMM_WORLD,&req));
}
};
/*! \brief specialization for size_t
*
*/
template<> class MPI_IrecvW<size_t>
{
public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<size_t> & v, MPI_Request & req)
{
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED_LONG, proc, tag , MPI_COMM_WORLD,&req));
}
};
/*! \brief specialization for size_t
*
*/
template<> class MPI_IrecvW<long int>
{
public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<long int> & v, MPI_Request & req)
{
MPI_Irecv(v.getPointer(), v.size(),MPI_LONG, proc, tag , MPI_COMM_WORLD,&req);
}
};
/*! \brief specialization for float
*
*/
template<> class MPI_IrecvW<float>
{
public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<float> & v, MPI_Request & req)
{
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_FLOAT, proc, tag , MPI_COMM_WORLD,&req));
}
};
/*! \brief specialization for double
*
*/
template<> class MPI_IrecvW<double>
{
public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<double> & v, MPI_Request & req)
{
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_DOUBLE, proc, tag , MPI_COMM_WORLD,&req));
}
};
#endif
#ifndef MPI_ISEND_HPP
#define MPI_ISEND_HPP
#include <mpi.h>
/*! \brief Set of wrapping classing for MPI_Iallreduce
*
* The purpose of these classes is to correctly choose the right call based on the type we want to reduce
*
*/
/*! \brief General send
*
* \tparam any type
*
*/
template<typename T> class MPI_IsendW
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<T> & v, MPI_Request & req)
{
MPI_Isend(v.getPointer(), v.size() * sizeof(T),MPI_BYTE, proc, tag , MPI_COMM_WORLD,&req);
}
};
/*! \brief specialization for vector of integer
*
*/
template<> class MPI_IsendW<int>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<int> & v, MPI_Request & req)
{
MPI_Isend(v.getPointer(), v.size(),MPI_INT, proc, tag , MPI_COMM_WORLD,&req);
}
};
/*! \brief specialization for unsigned integer
*
*/
template<> class MPI_IsendW<unsigned int>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned int> & v, MPI_Request & req)
{
MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED, proc, tag , MPI_COMM_WORLD,&req);
}
};
/*! \brief specialization for short
*
*/
template<> class MPI_IsendW<short>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<short> & v, MPI_Request & req)
{
MPI_Isend(v.getPointer(), v.size(),MPI_SHORT, proc, tag , MPI_COMM_WORLD,&req);
}
};
/*! \brief specialization for short
*
*/
template<> class MPI_IsendW<unsigned short>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned short> & v, MPI_Request & req)
{
MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED_SHORT, proc, tag , MPI_COMM_WORLD,&req);
}
};
/*! \brief specialization for char
*
*/
template<> class MPI_IsendW<char>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<char> & v, MPI_Request & req)
{
MPI_Isend(v.getPointer(), v.size(),MPI_CHAR, proc, tag , MPI_COMM_WORLD,&req);
}
};
/*! \brief specialization for char
*
*/
template<> class MPI_IsendW<unsigned char>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned char> & v, MPI_Request & req)
{
MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED_CHAR, proc, tag , MPI_COMM_WORLD,&req);
}
};
/*! \brief specialization for size_t
*
*/
template<> class MPI_IsendW<size_t>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<size_t> & v, MPI_Request & req)
{
MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED_LONG, proc, tag , MPI_COMM_WORLD,&req);
}
};
/*! \brief specialization for size_t
*
*/
template<> class MPI_IsendW<long int>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<long int> & v, MPI_Request & req)
{
MPI_Isend(v.getPointer(), v.size(),MPI_LONG, proc, tag , MPI_COMM_WORLD,&req);
}
};
/*! \brief specialization for float
*
*/
template<> class MPI_IsendW<float>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<float> & v, MPI_Request & req)
{
MPI_Isend(v.getPointer(), v.size(),MPI_FLOAT, proc, tag , MPI_COMM_WORLD,&req);
}
};
/*! \brief specialization for double
*
*/
template<> class MPI_IsendW<double>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<double> & v, MPI_Request & req)
{
MPI_Isend(v.getPointer(), v.size(),MPI_DOUBLE, proc, tag , MPI_COMM_WORLD,&req);
}
};
#endif
/*
* MPI_util.hpp
*
* Created on: Jul 7, 2015
* Author: Pietro Incardona
*/
#ifndef MPI_UTIL_HPP_
#define MPI_UTIL_HPP_
/*! \brief From an MPI error it print an human readable message
*
* \param error_code
*
*/
static void error_handler(int error_code)
{
int rank;
char error_string[BUFSIZ];
int length_of_error_string, error_class;
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Error_class(error_code, &error_class);
MPI_Error_string(error_class, error_string, &length_of_error_string);
std::cerr << rank << ": " << error_string;
MPI_Error_string(error_code, error_string, &length_of_error_string);
std::cerr << rank << ": " << error_string;
}
#define MPI_SAFE_CALL(call) {\
int err = call;\
if (MPI_SUCCESS != err) {\
std::cerr << "MPI error: "<< __FILE__ << " " << __LINE__ << "\n";\
error_handler(err);\
}\
}
#endif /* MPI_UTIL_HPP_ */
#include "ComUnit.hpp"
#define SERVICE_TAG 0xFFFFFFF
/*! \brief Send some data globally to one processor when the other side
* do not know about the other side
*
* Send some data globally to one processor when the other side
* do not know about the other side
*
* \Warning if you already call this function with p, will overwrite the request
*
* \param p is the processor number
* \param buf is the buffer pointer
* \param sz is the size of the communication
*
*/
bool SentToU(size_t p, void * buf,size_t sz)
{
// before complete the communication we have to notify to the other
// processor that we have some data to send.
if (p >= comReq.size())
{
std::cerr << "Error: file: " << __FILE__ << " line: " << __LINE__ << " processor " << p << " does not exist";
return false;
}
return true;
}
/*! \brief Send some data locally (to its neighborhood) to one processor
*
* Send some data locally to one processor
*
*/
bool SendToNU(void * buf, size_t sz)
{
return true;
}
/*! \brief Send some data globally to one processor when the other side
* know about the other side
*
* Send some data globally to one processor when the other side
* know about the other side
*
* \Warning if you already call this function with p, will overwrite the request
*
* \param p is the processor number
* \param buf is the buffer pointer
* \param sz is the size of the communication
*
*/
bool SendTo(size_t p, void * buf, size_t sz)
{
MPI_ISend(p,buf,sz);
}
/*! \brief Wait for all communication to complete
*
* Wait for all communication to complete
*
* \return true if no error occur
*
*/
bool wait()
{
// Here we have to type of communication to handle
// Type 1 One side does not know that the other side want to communcate
// Type 2 The other side know that want to communicate
// The reqs structure handle the communication of type 2
// comReq and comSize store request of type 2
// For the type 1 we have to reduce scatter the comReq this
// for each processor K return the number of processors that need
// to communicate with K
MPI_ireduce_scatter();
// For the type 2 we already have recv coupled to send so just wait to complete
//! wait for all request to complete
MPI_Waitall(reqs.size(),&reqs[0],&status[0]);
//! For the number of incoming request queue an MPI_IRecv with MPI_ANY_SOURCE
//! It is going to receive the length of the message that each processor need
//! communicate
for (int i = 0 ; i < 5; i++)
{
}
//! For the number of outcomming request queue MPI_ISend sending the length of
//! the message the processor need to comunicate
for (int i = 0 ; i < 5; i++)
{
}
//! wait for all request to complete
MPI_Waitall(reqs.size(),&reqs[0],&status[0]);
//! finally send and receive the data
for (int i = 0 ; i < 5; i++)
{
}
for (int i = 0 ; i < 5; i++)
{
}
//! wait for all request to complete
MPI_Waitall(reqs.size(),&reqs[0],&status[0]);
return true;
}
#ifndef COM_UNIT_HPP
#define COM_UNIT_HPP
#include <mpi.h>
#include <vector>
/*! \brief This is the abstraction of the communication
* unit for the virtual cluster
*
* This with the abstraction of the communication
* unit of the virtual cluster
*
* When this unit is returned back, you must ensure that no other thread
* is using MPI call
*
*/
class ComUnit
{
// if this processor need to communicate with the processor K
// it put 1 at the position K
std::vector<unsigned int> comReq;
// if this processor need to communicate with the processor K
// a message to length m put m at position K
std::vector<size_t> sizeReq;
// List of all status request
std::vector<MPI_Request> reqs;
// List of the status of all the request
std::vector<MPI_Status> stat;
//! Send data
bool SentTo();
//! Send data to the neighborhood
bool SendToN();
//! wait for communication to comp