Commit aaf739c3 authored by incardon's avatar incardon

Added missing files

parent d32eae4e
#include "ComUnit.hpp"
#define SERVICE_TAG 0xFFFFFFF
/*! \brief Send some data globally to one processor when the other side
* do not know about the other side
*
* Send some data globally to one processor when the other side
* do not know about the other side
*
* \Warning if you already call this function with p, will overwrite the request
*
* \param p is the processor number
* \param buf is the buffer pointer
* \param sz is the size of the communication
*
*/
bool SentToU(size_t p, void * buf,size_t sz)
{
// before complete the communication we have to notify to the other
// processor that we have some data to send.
if (p >= comReq.size())
{
std::cerr << "Error: file: " << __FILE__ << " line: " << __LINE__ << " processor " << p << " does not exist";
return false;
}
return true;
}
/*! \brief Send some data locally (to its neighborhood) to one processor
*
* Send some data locally to one processor
*
*/
bool SendToNU(void * buf, size_t sz)
{
return true;
}
/*! \brief Send some data globally to one processor when the other side
* know about the other side
*
* Send some data globally to one processor when the other side
* know about the other side
*
* \Warning if you already call this function with p, will overwrite the request
*
* \param p is the processor number
* \param buf is the buffer pointer
* \param sz is the size of the communication
*
*/
bool SendTo(size_t p, void * buf, size_t sz)
{
MPI_ISend(p,buf,sz);
}
/*! \brief Wait for all communication to complete
*
* Wait for all communication to complete
*
* \return true if no error occur
*
*/
bool wait()
{
// Here we have to type of communication to handle
// Type 1 One side does not know that the other side want to communcate
// Type 2 The other side know that want to communicate
// The reqs structure handle the communication of type 2
// comReq and comSize store request of type 2
// For the type 1 we have to reduce scatter the comReq this
// for each processor K return the number of processors that need
// to communicate with K
MPI_ireduce_scatter();
// For the type 2 we already have recv coupled to send so just wait to complete
//! wait for all request to complete
MPI_Waitall(reqs.size(),&reqs[0],&status[0]);
//! For the number of incoming request queue an MPI_IRecv with MPI_ANY_SOURCE
//! It is going to receive the length of the message that each processor need
//! communicate
for (int i = 0 ; i < 5; i++)
{
}
//! For the number of outcomming request queue MPI_ISend sending the length of
//! the message the processor need to comunicate
for (int i = 0 ; i < 5; i++)
{
}
//! wait for all request to complete
MPI_Waitall(reqs.size(),&reqs[0],&status[0]);
//! finally send and receive the data
for (int i = 0 ; i < 5; i++)
{
}
for (int i = 0 ; i < 5; i++)
{
}
//! wait for all request to complete
MPI_Waitall(reqs.size(),&reqs[0],&status[0]);
return true;
}
#ifndef COM_UNIT_HPP
#define COM_UNIT_HPP
#include <mpi.h>
#include <vector>
/*! \brief This is the abstraction of the communication
* unit for the virtual cluster
*
* This with the abstraction of the communication
* unit of the virtual cluster
*
* When this unit is returned back, you must ensure that no other thread
* is using MPI call
*
*/
class ComUnit
{
// if this processor need to communicate with the processor K
// it put 1 at the position K
std::vector<unsigned int> comReq;
// if this processor need to communicate with the processor K
// a message to length m put m at position K
std::vector<size_t> sizeReq;
// List of all status request
std::vector<MPI_Request> reqs;
// List of the status of all the request
std::vector<MPI_Status> stat;
//! Send data
bool SentTo();
//! Send data to the neighborhood
bool SendToN();
//! wait for communication to complete
bool wait();
};
#endif
#ifndef MPI_IALLREDUCEW_HPP
#define MPI_IALLREDUCEW_HPP
#include <mpi.h>
/*! \brief Set of wrapping classing for MPI_Iallreduce
*
* The purpose of these classes is to correctly choose the right call based on the type we want to reduce
*
*/
/*! \brief General reduction
*
* \tparam any type
*
*/
template<typename T> class MPI_IallreduceW
{
public:
static inline void reduce(T & buf,MPI_Op op, MPI_Request & req)
{
std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " cannot recognize " << typeid(T).name() << "\n";
}
};
/*! \brief specialization for integer
*
*/
template<> class MPI_IallreduceW<int>
{
public:
static inline void reduce(int & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_INT, op, MPI_COMM_WORLD,&req);
}
};
/*! \brief specialization for unsigned integer
*
*/
template<> class MPI_IallreduceW<unsigned int>
{
public:
static inline void reduce(unsigned int & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED, op, MPI_COMM_WORLD,&req);
}
};
/*! \brief specialization for short
*
*/
template<> class MPI_IallreduceW<short>
{
public:
static inline void reduce(short & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_SHORT, op, MPI_COMM_WORLD,&req);
}
};
/*! \brief specialization for short
*
*/
template<> class MPI_IallreduceW<unsigned short>
{
public:
static inline void reduce(unsigned short & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED_SHORT, op, MPI_COMM_WORLD,&req);
}
};
/*! \brief specialization for char
*
*/
template<> class MPI_IallreduceW<char>
{
public:
static inline void reduce(char & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_CHAR, op, MPI_COMM_WORLD,&req);
}
};
/*! \brief specialization for char
*
*/
template<> class MPI_IallreduceW<unsigned char>
{
public:
static inline void reduce(unsigned char & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED_CHAR, op, MPI_COMM_WORLD,&req);
}
};
/*! \brief specialization for size_t
*
*/
template<> class MPI_IallreduceW<size_t>
{
public:
static inline void reduce(size_t & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED_LONG, op, MPI_COMM_WORLD,&req);
}
};
/*! \brief specialization for size_t
*
*/
template<> class MPI_IallreduceW<long int>
{
public:
static inline void reduce(long int & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_LONG, op, MPI_COMM_WORLD,&req);
}
};
/*! \brief specialization for float
*
*/
template<> class MPI_IallreduceW<float>
{
public:
static inline void reduce(float & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_FLOAT, op, MPI_COMM_WORLD,&req);
}
};
/*! \brief specialization for double
*
*/
template<> class MPI_IallreduceW<double>
{
public:
static inline void reduce(double & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_DOUBLE, op, MPI_COMM_WORLD,&req);
}
};
////////////////// Specialization for vectors ///////////////
/*! \brief specialization for vector integer
*
*/
/*template<> class MPI_IallreduceW<openfpm::vector<int>>
{
public:
static inline void reduce(openfpm::vector<int> & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_INT, op, MPI_COMM_WORLD,&req);
}
};*/
/*! \brief specialization for vector short
*
*/
/*template<> class MPI_IallreduceW<openfpm::vector<short>>
{
public:
static inline void reduce(openfpm::vector<short> & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_SHORT, op, MPI_COMM_WORLD,&req);
}
};*/
/*! \brief specialization for vector char
*
*/
/*template<> class MPI_IallreduceW<openfpm::vector<char>>
{
public:
static inline void reduce(openfpm::vector<char> & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_CHAR, op, MPI_COMM_WORLD,&req);
}
};*/
/*! \brief specialization for vector size_t
*
*/
/*template<> class MPI_IallreduceW<openfpm::vector<size_t>>
{
public:
static inline void reduce(openfpm::vector<size_t> & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_UNSIGNED_LONG, op, MPI_COMM_WORLD,&req);
}
};*/
/*! \brief specialization for vector float
*
*/
/*template<> class MPI_IallreduceW<openfpm::vector<float>>
{
public:
static inline void reduce(openfpm::vector<float> & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_FLOAT, op, MPI_COMM_WORLD,&req);
}
};*/
/*! \brief specialization for vector double
*
*/
/*template<> class MPI_IallreduceW<openfpm::vector<double>>
{
public:
static inline void reduce(openfpm::vector<double> & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_DOUBLE, op, MPI_COMM_WORLD,&req);
}
};*/
#endif
#include "VCluster.hpp"
Vcluster * global_v_cluster = NULL;
/*! \brief Initialize a global instance of Runtime Virtual Cluster Machine
*
* Initialize a global instance of Runtime Virtual Cluster Machine
*
*/
void init_global_v_cluster(int *argc, char ***argv)
{
if (global_v_cluster == NULL)
global_v_cluster = new Vcluster(argc,argv);
}
// Deallocator object, it deallocate the global_v_cluster at the end of the program
class init_glob_v_cluster
{
public:
~init_glob_v_cluster()
{
delete global_v_cluster;
};
};
// Deallocate at the end
init_glob_v_cluster v_cls;
bool global_mpi_initialization = false;
/*
* Vcluster_object.hpp
*
* Created on: Feb 4, 2015
* Author: i-bird
*/
#ifndef VCLUSTER_OBJECT_HPP_
#define VCLUSTER_OBJECT_HPP_
/*! \brief Encapsulate any object created by the Virtual cluster machine
*
* \tparam original object
*
*/
template<typename T>
class Vcluster_object : public T
{
};
#endif /* VCLUSTER_OBJECT_HPP_ */
/*
* Vcluster_object_array.hpp
*
* Created on: Feb 4, 2015
* Author: Pietro Incardona
*/
#ifndef VCLUSTER_OBJECT_ARRAY_HPP_
#define VCLUSTER_OBJECT_ARRAY_HPP_
#include <vector>
#include "VObject.hpp"
/*! \brief Encapsulate any object created by the Virtual cluster machine
*
* \tparam original object
*
*/
template<typename T>
class Vcluster_object_array : public VObject
{
std::vector<T> objects;
public:
/*! \brief Constructor of object array
*
*/
Vcluster_object_array()
{
}
/*! \brief Return the size of the objects array
*
* \return the size of the array
*
*/
size_t size()
{
return objects.size();
}
/*! \brief Return the element i
*
* \return a reference to the object i
*
*/
T & get(unsigned int i)
{
return objects[i];
}
/*! \brief Check if this Object is an array
*
* \return true, it is an array
*
*/
bool isArray()
{
return true;
}
/*! \brief Destroy the object
*
*/
virtual void destroy()
{
// Destroy the objects
objects.clear();
}
/*! \brief Get the size of the memory needed to pack the object
*
* \return the size of the message to pack the object
*
*/
size_t packObjectSize()
{
size_t message = 0;
// Destroy each objects
for (size_t i = 0 ; i < objects.size() ; i++)
{
message += objects[i].packObjectSize();
}
return message;
}
/*! \brief Get the size of the memory needed to pack the object
*
* \param Memory where to write the packed object
*
* \return the size of the message to pack the object
*
*/
size_t packObject(void * mem)
{
// Pointer is zero
size_t ptr = 0;
unsigned char * m = (unsigned char *)mem;
// pack each object
for (size_t i = 0 ; i < objects.size() ; i++)
{
ptr += objects[i].packObject(&m[ptr]);
}
#ifdef DEBUG
if (ptr != packObjectSize())
{
std::cerr << "Error " << __FILE__ << " " << __LINE__ << " the pack object size does not match the message" << "\n";
}
#endif
return ptr;
}
/*! \brief Calculate the size to pack an object in the array
*
* \param array object index
*
*/
size_t packObjectInArraySize(size_t i)
{
return objects[i].packObjectSize();
}
/*! \brief pack the object in the array (the message produced can be used to move one)
* object from one processor to another
*
* \param i index of the object to pack
* \param p Memory of the packed object message
*
*/
size_t packObjectInArray(size_t i, void * p)
{
return objects[i].packObject(p);
}
/*! \brief Destroy an object from the array
*
* \param i object to destroy
*
*/
void destroy(size_t i)
{
objects.erase(objects.begin() + i);
}
/*! \brief Return the object j in the array
*
* \param j element j
*
*/
T & operator[](size_t j)
{
return objects[j];
}
/*! \brief Resize the array
*
* \param size
*
*/
void resize(size_t n)
{
objects.resize(n);
}
};
#endif /* VCLUSTER_OBJECT_HPP_ */
/*
* VObject.hpp
*
* Created on: Feb 5, 2015
* Author: i-bird
*/
#ifndef VOBJECT_HPP_
#define VOBJECT_HPP_
/*! \brief VObject
*
* Any object produced by the Virtual cluster (MUST) inherit this class
*
*/
class VObject
{
public:
// Check if this Object is an array
virtual bool isArray() = 0;
// destroy the object
virtual void destroy() = 0;
// get the size of the memory needed to pack the object
virtual size_t packObjectSize() = 0;
// pack the object
virtual size_t packObject(void *) = 0;
// get the size of the memory needed to pack the object in the array
virtual size_t packObjectInArraySize(size_t i) = 0;
// pack the object in the array (the message produced can be used to move one)
// object from one processor to another
virtual size_t packObjectInArray(size_t i, void * p) = 0;
// destroy an element from the array
virtual void destroy(size_t n) = 0;
};
#endif /* VOBJECT_HPP_ */
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment