...
 
Commits (24)
This diff is collapsed.
......@@ -6,7 +6,7 @@ if (CUDA_FOUND)
set(CUDA_SOURCES ../../openfpm_devices/src/memory/CudaMemory.cu VCluster/cuda/VCluster_semantic_unit_cuda_tests.cu VCluster/cuda/VCluster_unit_tests.cu )
endif()
add_executable(vcluster_test main.cpp VCluster/VCluster.cpp ../../openfpm_devices/src/memory/HeapMemory.cpp ../../openfpm_devices/src/memory/PtrMemory.cpp ../../openfpm_devices/src/Memleak_check.cpp VCluster/VCluster_unit_tests.cpp VCluster/VCluster_semantic_unit_tests.cpp ${CUDA_SOURCES})
add_executable(vcluster_test main.cpp VCluster/VCluster.cpp VCluster/InVis.cpp VCluster/InVisVolume.cpp ../../openfpm_devices/src/memory/ShmAllocator_manager.cpp ../../openfpm_devices/src/memory/SemManager.cpp ../../openfpm_devices/src/memory/ShmAllocator.cpp ../../openfpm_devices/src/memory/ShmBuffer.cpp ../../openfpm_devices/src/memory/HeapMemory.cpp ../../openfpm_devices/src/memory/PtrMemory.cpp ../../openfpm_devices/src/Memleak_check.cpp VCluster/VCluster_unit_tests.cpp VCluster/VCluster_semantic_unit_tests.cpp ${CUDA_SOURCES})
if ( CMAKE_COMPILER_IS_GNUCC )
target_compile_options(vcluster_test PRIVATE "-Wno-deprecated-declarations")
......@@ -15,7 +15,7 @@ if ( CMAKE_COMPILER_IS_GNUCC )
endif()
endif()
add_library(vcluster STATIC VCluster/VCluster.cpp)
add_library(vcluster STATIC VCluster/VCluster.cpp VCluster/InVis.cpp VCluster/InVisVolume.cpp)
###########################
......@@ -27,6 +27,8 @@ if(CUDA_FOUND)
endif()
endif()
set(JAVA_HOME /usr/lib/jvm/adoptopenjdk-11-hotspot-amd64)
target_include_directories (vcluster_test PUBLIC ${CUDA_INCLUDE_DIRS})
target_include_directories (vcluster_test PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
target_include_directories (vcluster_test PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/../../openfpm_devices/src/)
......@@ -35,6 +37,8 @@ target_include_directories (vcluster_test PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/../
target_include_directories (vcluster_test PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/config)
target_include_directories (vcluster_test PUBLIC ${Boost_INCLUDE_DIRS})
target_include_directories (vcluster_test PUBLIC ${PETSC_INCLUDES})
target_include_directories (vcluster_test PUBLIC ${JAVA_HOME}/include ${JAVA_HOME}/include/linux)
target_include_directories (vcluster PUBLIC ${CUDA_INCLUDE_DIRS})
target_include_directories (vcluster PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
......@@ -43,7 +47,11 @@ target_include_directories (vcluster PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/config)
target_include_directories (vcluster PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/../../openfpm_data/src/)
target_include_directories (vcluster PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/../../openfpm_devices/src/)
target_include_directories (vcluster PUBLIC ${Boost_INCLUDE_DIRS})
target_include_directories (vcluster PUBLIC ${JAVA_HOME}/include ${JAVA_HOME}/include/linux)
#target_link_libraries(vcluster ${JAVA_HOME}/lib/server/libjvm.so)
target_link_libraries(vcluster_test ${JAVA_HOME}/lib/server/libjvm.so)
target_link_libraries(vcluster_test ${Boost_LIBRARIES})
target_link_libraries(vcluster_test ${PETSC_LIBRARIES})
......@@ -69,6 +77,8 @@ install(FILES MPI_wrapper/MPI_IallreduceW.hpp
install(FILES VCluster/VCluster_base.hpp
VCluster/VCluster.hpp
VCluster/VCluster_meta_function.hpp
VCluster/InVis.hpp
VCluster/InVisVolume.hpp
DESTINATION openfpm_vcluster/include/VCluster )
install (FILES util/Vcluster_log.hpp
......
......@@ -28,9 +28,9 @@
class MPI_IAllGatherWB
{
public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req)
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_BYTE, rbuf, sz_r, MPI_BYTE, MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_BYTE, rbuf, sz_r, MPI_BYTE, ext_comm,&req));
}
};
......@@ -44,9 +44,9 @@ template<typename T> class MPI_IAllGatherW
{
public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req)
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sizeof(T) * sz_s,MPI_BYTE, rbuf, sz_r * sizeof(T), MPI_BYTE, MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sizeof(T) * sz_s,MPI_BYTE, rbuf, sz_r * sizeof(T), MPI_BYTE, ext_comm,&req));
}
};
......@@ -57,9 +57,9 @@ public:
template<> class MPI_IAllGatherW<int>
{
public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req)
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_INT, rbuf, sz_r, MPI_INT, MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_INT, rbuf, sz_r, MPI_INT, ext_comm,&req));
}
};
......@@ -69,9 +69,9 @@ public:
template<> class MPI_IAllGatherW<unsigned int>
{
public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req)
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_UNSIGNED, rbuf, sz_r, MPI_UNSIGNED, MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_UNSIGNED, rbuf, sz_r, MPI_UNSIGNED, ext_comm,&req));
}
};
......@@ -81,9 +81,9 @@ public:
template<> class MPI_IAllGatherW<short>
{
public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req)
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_SHORT, rbuf, sz_r, MPI_SHORT, MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_SHORT, rbuf, sz_r, MPI_SHORT, ext_comm,&req));
}
};
......@@ -94,9 +94,9 @@ public:
template<> class MPI_IAllGatherW<unsigned short>
{
public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req)
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_UNSIGNED_SHORT, rbuf, sz_r, MPI_UNSIGNED_SHORT, MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_UNSIGNED_SHORT, rbuf, sz_r, MPI_UNSIGNED_SHORT, ext_comm,&req));
}
};
......@@ -107,9 +107,9 @@ public:
template<> class MPI_IAllGatherW<char>
{
public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req)
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_CHAR, rbuf, sz_r, MPI_CHAR, MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_CHAR, rbuf, sz_r, MPI_CHAR, ext_comm,&req));
}
};
......@@ -120,9 +120,9 @@ public:
template<> class MPI_IAllGatherW<unsigned char>
{
public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req)
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_UNSIGNED_CHAR, rbuf, sz_r, MPI_UNSIGNED_CHAR, MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_UNSIGNED_CHAR, rbuf, sz_r, MPI_UNSIGNED_CHAR, ext_comm,&req));
}
};
......@@ -132,9 +132,9 @@ public:
template<> class MPI_IAllGatherW<size_t>
{
public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req)
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_UNSIGNED_LONG, rbuf, sz_r, MPI_UNSIGNED_LONG, MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_UNSIGNED_LONG, rbuf, sz_r, MPI_UNSIGNED_LONG, ext_comm,&req));
}
};
......@@ -144,9 +144,9 @@ public:
template<> class MPI_IAllGatherW<long int>
{
public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req)
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_LONG, rbuf, sz_r, MPI_LONG, MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_LONG, rbuf, sz_r, MPI_LONG, ext_comm,&req));
}
};
......@@ -156,9 +156,9 @@ public:
template<> class MPI_IAllGatherW<float>
{
public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req)
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_FLOAT, rbuf, sz_r, MPI_FLOAT, MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_FLOAT, rbuf, sz_r, MPI_FLOAT, ext_comm,&req));
}
};
......@@ -168,9 +168,9 @@ public:
template<> class MPI_IAllGatherW<double>
{
public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req)
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_DOUBLE, rbuf, sz_r, MPI_DOUBLE, MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_DOUBLE, rbuf, sz_r, MPI_DOUBLE, ext_comm,&req));
}
};
......
......@@ -33,9 +33,9 @@
class MPI_IBcastWB
{
public:
static inline void bcast(size_t proc ,void * buf, size_t sz, MPI_Request & req)
static inline void bcast(size_t proc ,void * buf, size_t sz, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Ibcast(buf,sz,MPI_BYTE, proc , MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Ibcast(buf,sz,MPI_BYTE, proc , ext_comm,&req));
}
};
......@@ -48,9 +48,9 @@ public:
template<typename T> class MPI_IBcastW
{
public:
template<typename Memory> static inline void bcast(size_t proc ,openfpm::vector<T,Memory> & v, MPI_Request & req)
template<typename Memory> static inline void bcast(size_t proc ,openfpm::vector<T,Memory> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size() * sizeof(T),MPI_BYTE, proc , MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size() * sizeof(T),MPI_BYTE, proc , ext_comm,&req));
}
};
......@@ -61,9 +61,9 @@ public:
template<> class MPI_IBcastW<int>
{
public:
static inline void bcast(size_t proc ,openfpm::vector<int> & v, MPI_Request & req)
static inline void bcast(size_t proc ,openfpm::vector<int> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_INT, proc , MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_INT, proc , ext_comm,&req));
}
};
......@@ -73,9 +73,9 @@ public:
template<> class MPI_IBcastW<unsigned int>
{
public:
static inline void bcast(size_t proc ,openfpm::vector<unsigned int> & v, MPI_Request & req)
static inline void bcast(size_t proc ,openfpm::vector<unsigned int> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED, proc , MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED, proc , ext_comm,&req));
}
};
......@@ -85,9 +85,9 @@ public:
template<> class MPI_IBcastW<short>
{
public:
static inline void bcast(size_t proc ,openfpm::vector<short> & v, MPI_Request & req)
static inline void bcast(size_t proc ,openfpm::vector<short> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_SHORT, proc , MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_SHORT, proc , ext_comm,&req));
}
};
......@@ -97,9 +97,9 @@ public:
template<> class MPI_IBcastW<unsigned short>
{
public:
static inline void bcast(size_t proc ,openfpm::vector<unsigned short> & v, MPI_Request & req)
static inline void bcast(size_t proc ,openfpm::vector<unsigned short> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_SHORT, proc , MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_SHORT, proc , ext_comm,&req));
}
};
......@@ -109,9 +109,9 @@ public:
template<> class MPI_IBcastW<char>
{
public:
static inline void bcast(size_t proc ,openfpm::vector<char> & v, MPI_Request & req)
static inline void bcast(size_t proc ,openfpm::vector<char> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_CHAR, proc , MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_CHAR, proc , ext_comm,&req));
}
};
......@@ -121,9 +121,9 @@ public:
template<> class MPI_IBcastW<unsigned char>
{
public:
static inline void bcast(size_t proc ,openfpm::vector<unsigned char> & v, MPI_Request & req)
static inline void bcast(size_t proc ,openfpm::vector<unsigned char> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_CHAR, proc , MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_CHAR, proc , ext_comm,&req));
}
};
......@@ -133,9 +133,9 @@ public:
template<> class MPI_IBcastW<size_t>
{
public:
static inline void bcast(size_t proc ,openfpm::vector<size_t> & v, MPI_Request & req)
static inline void bcast(size_t proc ,openfpm::vector<size_t> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_LONG, proc , MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_LONG, proc , ext_comm,&req));
}
};
......@@ -145,9 +145,9 @@ public:
template<> class MPI_IBcastW<long int>
{
public:
static inline void bcast(size_t proc ,openfpm::vector<long int> & v, MPI_Request & req)
static inline void bcast(size_t proc ,openfpm::vector<long int> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_LONG, proc , MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_LONG, proc , ext_comm,&req));
}
};
......@@ -157,9 +157,9 @@ public:
template<> class MPI_IBcastW<float>
{
public:
static inline void bcast(size_t proc ,openfpm::vector<float> & v, MPI_Request & req)
static inline void bcast(size_t proc ,openfpm::vector<float> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_FLOAT, proc , MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_FLOAT, proc , ext_comm,&req));
}
};
......@@ -169,9 +169,9 @@ public:
template<> class MPI_IBcastW<double>
{
public:
static inline void bcast(size_t proc ,openfpm::vector<double> & v, MPI_Request & req)
static inline void bcast(size_t proc ,openfpm::vector<double> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_DOUBLE, proc , MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_DOUBLE, proc , ext_comm,&req));
}
};
......@@ -195,15 +195,19 @@ struct bcast_inte_impl
//! root processor
size_t root;
//! MPI communicator
MPI_Comm ext_comm;
/*! \brief constructor
*
* \param v set of pointer buffers to set
*
*/
inline bcast_inte_impl(vect & send,
openfpm::vector<MPI_Request> & req,
size_t root)
:send(send),req(req),root(root)
openfpm::vector<MPI_Request> & req,
size_t root,
MPI_Comm ext_comm)
:send(send),req(req),root(root),ext_comm(ext_comm)
{};
//! It call the copy function for each property
......@@ -216,7 +220,7 @@ struct bcast_inte_impl
req.add();
// gather
MPI_IBcastWB::bcast(root,&send.template get<T::value>(0),send.size()*sizeof(send_type),req.last());
MPI_IBcastWB::bcast(root,&send.template get<T::value>(0),send.size()*sizeof(send_type),req.last(),ext_comm);
}
};
......@@ -226,13 +230,14 @@ struct b_cast_helper
template<typename T, typename Mem, typename lt_type, template<typename> class layout_base >
static void bcast_(openfpm::vector<MPI_Request> & req,
openfpm::vector<T,Mem,lt_type,layout_base> & v,
size_t root)
size_t root,
MPI_Comm ext_comm)
{
// Create one request
req.add();
// gather
MPI_IBcastW<T>::bcast(root,v,req.last());
MPI_IBcastW<T>::bcast(root,v,req.last(),ext_comm);
}
};
......@@ -242,9 +247,10 @@ struct b_cast_helper<false>
template<typename T, typename Mem, typename lt_type, template<typename> class layout_base >
static void bcast_(openfpm::vector<MPI_Request> & req,
openfpm::vector<T,Mem,lt_type,layout_base> & v,
size_t root)
size_t root,
MPI_Comm ext_comm)
{
bcast_inte_impl<openfpm::vector<T,Mem,lt_type,layout_base>> bc(v,req,root);
bcast_inte_impl<openfpm::vector<T,Mem,lt_type,layout_base>> bc(v,req,root,ext_comm);
boost::mpl::for_each_ref<boost::mpl::range_c<int,0,T::max_prop>>(bc);
}
......
......@@ -19,7 +19,7 @@
template<typename T> class MPI_IallreduceW
{
public:
static inline void reduce(T & buf,MPI_Op op, MPI_Request & req)
static inline void reduce(T & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{
std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " cannot recognize " << typeid(T).name() << "\n";
}
......@@ -32,9 +32,9 @@ public:
template<> class MPI_IallreduceW<int>
{
public:
static inline void reduce(int & buf,MPI_Op op, MPI_Request & req)
static inline void reduce(int & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_INT, op, MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_INT, op, ext_comm,&req));
}
};
......@@ -44,9 +44,9 @@ public:
template<> class MPI_IallreduceW<unsigned int>
{
public:
static inline void reduce(unsigned int & buf,MPI_Op op, MPI_Request & req)
static inline void reduce(unsigned int & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED, op, MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED, op, ext_comm,&req));
}
};
......@@ -56,9 +56,9 @@ public:
template<> class MPI_IallreduceW<short>
{
public:
static inline void reduce(short & buf,MPI_Op op, MPI_Request & req)
static inline void reduce(short & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_SHORT, op, MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_SHORT, op, ext_comm,&req));
}
};
......@@ -68,9 +68,9 @@ public:
template<> class MPI_IallreduceW<unsigned short>
{
public:
static inline void reduce(unsigned short & buf,MPI_Op op, MPI_Request & req)
static inline void reduce(unsigned short & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED_SHORT, op, MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED_SHORT, op, ext_comm,&req));
}
};
......@@ -80,9 +80,9 @@ public:
template<> class MPI_IallreduceW<char>
{
public:
static inline void reduce(char & buf,MPI_Op op, MPI_Request & req)
static inline void reduce(char & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_CHAR, op, MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_CHAR, op, ext_comm,&req));
}
};
......@@ -92,9 +92,9 @@ public:
template<> class MPI_IallreduceW<unsigned char>
{
public:
static inline void reduce(unsigned char & buf,MPI_Op op, MPI_Request & req)
static inline void reduce(unsigned char & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED_CHAR, op, MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED_CHAR, op, ext_comm,&req));
}
};
......@@ -104,9 +104,9 @@ public:
template<> class MPI_IallreduceW<size_t>
{
public:
static inline void reduce(size_t & buf,MPI_Op op, MPI_Request & req)
static inline void reduce(size_t & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED_LONG, op, MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED_LONG, op, ext_comm,&req));
}
};
......@@ -116,9 +116,9 @@ public:
template<> class MPI_IallreduceW<long int>
{
public:
static inline void reduce(long int & buf,MPI_Op op, MPI_Request & req)
static inline void reduce(long int & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_LONG, op, MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_LONG, op, ext_comm,&req));
}
};
......@@ -128,9 +128,9 @@ public:
template<> class MPI_IallreduceW<float>
{
public:
static inline void reduce(float & buf,MPI_Op op, MPI_Request & req)
static inline void reduce(float & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_FLOAT, op, MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_FLOAT, op, ext_comm,&req));
}
};
......@@ -140,9 +140,9 @@ public:
template<> class MPI_IallreduceW<double>
{
public:
static inline void reduce(double & buf,MPI_Op op, MPI_Request & req)
static inline void reduce(double & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_DOUBLE, op, MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_DOUBLE, op, ext_comm,&req));
}
};
......@@ -154,9 +154,9 @@ public:
/*template<> class MPI_IallreduceW<openfpm::vector<int>>
{
public:
static inline void reduce(openfpm::vector<int> & buf,MPI_Op op, MPI_Request & req)
static inline void reduce(openfpm::vector<int> & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_INT, op, MPI_COMM_WORLD,&req);
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_INT, op, ext_comm,&req);
}
};*/
......@@ -166,9 +166,9 @@ public:
/*template<> class MPI_IallreduceW<openfpm::vector<short>>
{
public:
static inline void reduce(openfpm::vector<short> & buf,MPI_Op op, MPI_Request & req)
static inline void reduce(openfpm::vector<short> & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_SHORT, op, MPI_COMM_WORLD,&req);
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_SHORT, op, ext_comm,&req);
}
};*/
......@@ -178,9 +178,9 @@ public:
/*template<> class MPI_IallreduceW<openfpm::vector<char>>
{
public:
static inline void reduce(openfpm::vector<char> & buf,MPI_Op op, MPI_Request & req)
static inline void reduce(openfpm::vector<char> & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_CHAR, op, MPI_COMM_WORLD,&req);
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_CHAR, op, ext_comm,&req);
}
};*/
......@@ -190,9 +190,9 @@ public:
/*template<> class MPI_IallreduceW<openfpm::vector<size_t>>
{
public:
static inline void reduce(openfpm::vector<size_t> & buf,MPI_Op op, MPI_Request & req)
static inline void reduce(openfpm::vector<size_t> & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_UNSIGNED_LONG, op, MPI_COMM_WORLD,&req);
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_UNSIGNED_LONG, op, ext_comm,&req);
}
};*/
......@@ -202,9 +202,9 @@ public:
/*template<> class MPI_IallreduceW<openfpm::vector<float>>
{
public:
static inline void reduce(openfpm::vector<float> & buf,MPI_Op op, MPI_Request & req)
static inline void reduce(openfpm::vector<float> & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_FLOAT, op, MPI_COMM_WORLD,&req);
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_FLOAT, op, ext_comm,&req);
}
};*/
......@@ -215,9 +215,9 @@ public:
/*template<> class MPI_IallreduceW<openfpm::vector<double>>
{
public:
static inline void reduce(openfpm::vector<double> & buf,MPI_Op op, MPI_Request & req)
static inline void reduce(openfpm::vector<double> & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_DOUBLE, op, MPI_COMM_WORLD,&req);
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_DOUBLE, op, ext_comm,&req);
}
};*/
......
......@@ -22,9 +22,9 @@ public:
* \param req MPI request
*
*/
static inline void recv(size_t proc , size_t tag ,void * buf, size_t sz, MPI_Request & req)
static inline void recv(size_t proc , size_t tag ,void * buf, size_t sz, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Irecv(buf,sz,MPI_BYTE, proc, tag , MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Irecv(buf,sz,MPI_BYTE, proc, tag , ext_comm,&req));
}
};
......@@ -37,10 +37,15 @@ public:
template<typename T> class MPI_IrecvW
{
public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<T> & v, MPI_Request & req)
static inline void recv(size_t proc , size_t tag ,openfpm::vector<T> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size() * sizeof(T),MPI_BYTE, proc, tag , MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size() * sizeof(T),MPI_BYTE, proc, tag , ext_comm,&req));
}
static inline void recv(size_t proc , size_t tag ,openfpm::vector_ofp<T> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size() * sizeof(T),MPI_BYTE, proc, tag , ext_comm,&req));
}
};
......@@ -50,9 +55,9 @@ public:
template<> class MPI_IrecvW<int>
{
public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<int> & v, MPI_Request & req)
static inline void recv(size_t proc , size_t tag ,openfpm::vector<int> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_INT, proc, tag , MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_INT, proc, tag , ext_comm,&req));
}
};
......@@ -62,9 +67,9 @@ public:
template<> class MPI_IrecvW<unsigned int>
{
public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<unsigned int> & v, MPI_Request & req)
static inline void recv(size_t proc , size_t tag ,openfpm::vector<unsigned int> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED, proc, tag , MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED, proc, tag , ext_comm,&req));
}
};
......@@ -74,9 +79,9 @@ public:
template<> class MPI_IrecvW<short>
{
public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<short> & v, MPI_Request & req)
static inline void recv(size_t proc , size_t tag ,openfpm::vector<short> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_SHORT, proc, tag , MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_SHORT, proc, tag , ext_comm,&req));
}
};
......@@ -86,9 +91,9 @@ public:
template<> class MPI_IrecvW<unsigned short>
{
public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<unsigned short> & v, MPI_Request & req)
static inline void recv(size_t proc , size_t tag ,openfpm::vector<unsigned short> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED_SHORT, proc, tag , MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED_SHORT, proc, tag , ext_comm,&req));
}
};
......@@ -98,9 +103,9 @@ public:
template<> class MPI_IrecvW<char>
{
public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<char> & v, MPI_Request & req)
static inline void recv(size_t proc , size_t tag ,openfpm::vector<char> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_CHAR, proc, tag , MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_CHAR, proc, tag , ext_comm,&req));
}
};
......@@ -110,9 +115,9 @@ public:
template<> class MPI_IrecvW<unsigned char>
{
public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<unsigned char> & v, MPI_Request & req)
static inline void recv(size_t proc , size_t tag ,openfpm::vector<unsigned char> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED_CHAR, proc, tag , MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED_CHAR, proc, tag , ext_comm,&req));
}
};
......@@ -122,9 +127,9 @@ public:
template<> class MPI_IrecvW<size_t>
{
public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<size_t> & v, MPI_Request & req)
static inline void recv(size_t proc , size_t tag ,openfpm::vector<size_t> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED_LONG, proc, tag , MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED_LONG, proc, tag , ext_comm,&req));
}
};
......@@ -134,9 +139,9 @@ public:
template<> class MPI_IrecvW<long int>
{
public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<long int> & v, MPI_Request & req)
static inline void recv(size_t proc , size_t tag ,openfpm::vector<long int> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_LONG, proc, tag , MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_LONG, proc, tag , ext_comm,&req));
}
};
......@@ -146,9 +151,9 @@ public:
template<> class MPI_IrecvW<float>
{
public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<float> & v, MPI_Request & req)
static inline void recv(size_t proc , size_t tag ,openfpm::vector<float> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_FLOAT, proc, tag , MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_FLOAT, proc, tag , ext_comm,&req));
}
};
......@@ -158,9 +163,9 @@ public:
template<> class MPI_IrecvW<double>
{
public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<double> & v, MPI_Request & req)
static inline void recv(size_t proc , size_t tag ,openfpm::vector<double> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_DOUBLE, proc, tag , MPI_COMM_WORLD,&req));
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_DOUBLE, proc, tag , ext_comm,&req));
}
};
......
......@@ -16,9 +16,9 @@
class MPI_IsendWB
{
public:
static inline void send(size_t proc , size_t tag ,const void * buf, size_t sz, MPI_Request & req)
static inline void send(size_t proc , size_t tag ,const void * buf, size_t sz, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_Isend(buf, sz,MPI_BYTE, proc, tag , MPI_COMM_WORLD,&req);
MPI_Isend(buf, sz,MPI_BYTE, proc, tag , ext_comm,&req);
}
};
......@@ -31,12 +31,27 @@ public:
template<typename T, typename Mem, typename gr> class MPI_IsendW
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<T,Mem,gr> & v, MPI_Request & req)
static inline void send(size_t proc , size_t tag ,openfpm::vector<T,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_Isend(v.getPointer(), v.size() * sizeof(T),MPI_BYTE, proc, tag , MPI_COMM_WORLD,&req);
MPI_Isend(v.getPointer(), v.size() * sizeof(T),MPI_BYTE, proc, tag , ext_comm,&req);
}
};
/*! \brief General send for a vector of any type
*
* \tparam any type
*
*/
template<typename T>
class MPI_IsendW<T,HeapMemory,int>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector_ofp<T> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_Isend(v.getPointer(), v.size() * sizeof(T),MPI_BYTE, proc, tag , ext_comm,&req);
}
};
/*! \brief specialization for vector of integer
*
......@@ -44,9 +59,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<int,Mem,gr>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<int,Mem,gr> & v, MPI_Request & req)
static inline void send(size_t proc , size_t tag ,openfpm::vector<int,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_Isend(v.getPointer(), v.size(),MPI_INT, proc, tag , MPI_COMM_WORLD,&req);
MPI_Isend(v.getPointer(), v.size(),MPI_INT, proc, tag , ext_comm,&req);
}
};
......@@ -56,9 +71,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<unsigned int,Mem,gr>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned int,Mem,gr> & v, MPI_Request & req)
static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned int,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED, proc, tag , MPI_COMM_WORLD,&req);
MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED, proc, tag , ext_comm,&req);
}
};
......@@ -68,9 +83,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<short,Mem,gr>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<short,Mem,gr> & v, MPI_Request & req)
static inline void send(size_t proc , size_t tag ,openfpm::vector<short,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_Isend(v.getPointer(), v.size(),MPI_SHORT, proc, tag , MPI_COMM_WORLD,&req);
MPI_Isend(v.getPointer(), v.size(),MPI_SHORT, proc, tag , ext_comm,&req);
}
};
......@@ -80,9 +95,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<unsigned short,Mem,gr>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned short,Mem,gr> & v, MPI_Request & req)
static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned short,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED_SHORT, proc, tag , MPI_COMM_WORLD,&req);
MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED_SHORT, proc, tag , ext_comm,&req);
}
};
......@@ -92,9 +107,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<char,Mem,gr>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<char,Mem,gr> & v, MPI_Request & req)
static inline void send(size_t proc , size_t tag ,openfpm::vector<char,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_Isend(v.getPointer(), v.size(),MPI_CHAR, proc, tag , MPI_COMM_WORLD,&req);
MPI_Isend(v.getPointer(), v.size(),MPI_CHAR, proc, tag , ext_comm,&req);
}
};
......@@ -104,9 +119,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<unsigned char,Mem,gr>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned char,Mem,gr> & v, MPI_Request & req)
static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned char,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED_CHAR, proc, tag , MPI_COMM_WORLD,&req);
MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED_CHAR, proc, tag , ext_comm,&req);
}
};
......@@ -116,9 +131,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<size_t,Mem,gr>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<size_t,Mem,gr> & v, MPI_Request & req)
static inline void send(size_t proc , size_t tag ,openfpm::vector<size_t,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED_LONG, proc, tag , MPI_COMM_WORLD,&req);
MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED_LONG, proc, tag , ext_comm,&req);
}
};
......@@ -128,9 +143,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<long int,Mem,gr>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<long int,Mem,gr> & v, MPI_Request & req)
static inline void send(size_t proc , size_t tag ,openfpm::vector<long int,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_Isend(v.getPointer(), v.size(),MPI_LONG, proc, tag , MPI_COMM_WORLD,&req);
MPI_Isend(v.getPointer(), v.size(),MPI_LONG, proc, tag , ext_comm,&req);
}
};
......@@ -140,9 +155,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<float,Mem,gr>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<float,Mem,gr> & v, MPI_Request & req)
static inline void send(size_t proc , size_t tag ,openfpm::vector<float,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_Isend(v.getPointer(), v.size(),MPI_FLOAT, proc, tag , MPI_COMM_WORLD,&req);
MPI_Isend(v.getPointer(), v.size(),MPI_FLOAT, proc, tag , ext_comm,&req);
}
};
......@@ -152,9 +167,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<double,Mem,gr>
{
public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<double,Mem,gr> & v, MPI_Request & req)
static inline void send(size_t proc , size_t tag ,openfpm::vector<double,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{
MPI_Isend(v.getPointer(), v.size(),MPI_DOUBLE, proc, tag , MPI_COMM_WORLD,&req);
MPI_Isend(v.getPointer(), v.size(),MPI_DOUBLE, proc, tag , ext_comm,&req);
}
};
......
This source diff could not be displayed because it is too large. You can view the blob instead.
/*
* InVis.hpp
*
* Created on: May 11, 2020
* Author: Aryaman Gupta
*/
#ifndef OPENFPM_PDATA_INVIS_HPP
#define OPENFPM_PDATA_INVIS_HPP
#include <jni.h>
class InVis
{
int windowSize;
int computePartners;
int imageSize;
JavaVM *jvm;
jclass clazz;
jobject obj;
MPI_Comm visComm;
int commSize;
void updateMemory(jmethodID methodID, int memKey, bool pos);
void getMemoryPos();
void getMemoryProps();
void receiveImages();
void updateCamera();
void doRender();
public:
InVis(int wSize, int cPartners, MPI_Comm vComm, bool isHead);
void manageVisHead();
void manageVisRenderer();
};
#endif //OPENFPM_PDATA_InVis_HPP
This source diff could not be displayed because it is too large. You can view the blob instead.
/*
* InVisRenderer.hpp
*
* Created on: May 12, 2020
* Author: Aryaman Gupta
*/
#ifndef OPENFPM_PDATA_INVISRENDERER_HPP
#define OPENFPM_PDATA_INVISRENDERER_HPP
#include <jni.h>
#define windowSize 700
#define computePartners 2
#define imageSize windowSize*windowSize*7
class InVisRenderer
{
JavaVM *jvm;
void getMemoryPos(JNIEnv *env, jclass renderClass, jobject renderObject);
void getMemoryProps(JNIEnv *env, jclass renderClass, jobject renderObject);
void sendImage(JNIEnv *e, jobject clazz, jobject image);
void doRender(JNIEnv *env, jclass inVisClass, jobject inVisObject);
public:
void manageVisRenderer();
};
#endif //OPENFPM_PDATA_INVISRENDERER_HPP
This diff is collapsed.
/*
* InVisVolume.hpp
*
* Created on: June 18, 2020
* Author: Aryaman Gupta
*/
#ifndef SRC_INVISVOLUME_HPP
#define SRC_INVISVOLUME_HPP
#include <jni.h>
#include <mpi.h>
#include <memory/ShmBuffer.hpp>
class InVisVolume
{
int windowSize;
int computePartners;
int imageSize;
JavaVM *jvm;
jclass clazz;
jobject obj;
MPI_Comm visComm;
int commSize;
std::vector<std::vector<ShmBuffer *>> gridBuffers;
void updateMemory(jmethodID methodID, int memKey, bool pos);
void getMemory();
void receiveVDI();
void updateCamera();
void doRender();
public:
InVisVolume(int wSize, int cPartners, MPI_Comm vComm, bool isHead);
void manageVisHead();
void manageVolumeRenderer();
};
#endif //SRC_INVISVOLUME_HPP
......@@ -8,12 +8,14 @@
#include "util/print_stack.hpp"
#include "util/math_util_complex.hpp"
init_options global_option;
Vcluster<> * global_v_cluster_private_heap = NULL;
Vcluster<CudaMemory> * global_v_cluster_private_cuda = NULL;
//
std::vector<int> sieve_spf;
// number of vcluster instances
size_t n_vcluster = 0;
bool ofp_initialized = false;
......
This diff is collapsed.
......@@ -112,6 +112,9 @@ union red
template<typename InternalMemory>
class Vcluster_base
{
//! external communicator
MPI_Comm ext_comm;
//! log file
Vcluster_log log;
......@@ -238,8 +241,8 @@ public:
* \param argv pointer to arguments vector passed to the program
*
*/
Vcluster_base(int *argc, char ***argv)
:NBX_cnt(0)
Vcluster_base(int *argc, char ***argv, MPI_Comm ext_comm)
:ext_comm(ext_comm),NBX_cnt(0)
{
#ifdef SE_CLASS2
check_new(this,8,VCLUSTER_EVENT,PRJ_VCLUSTER);
......@@ -259,7 +262,7 @@ public:
// We try to get the local processors rank
MPI_Comm shmcomm;
MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, 0,
MPI_Comm_split_type(ext_comm, MPI_COMM_TYPE_SHARED, 0,
MPI_INFO_NULL, &shmcomm);
MPI_Comm_rank(shmcomm, &shmrank);
......@@ -268,8 +271,8 @@ public:
// Get the total number of process
// and the rank of this process
MPI_Comm_size(MPI_COMM_WORLD, &m_size);
MPI_Comm_rank(MPI_COMM_WORLD, &m_rank);
MPI_Comm_size(ext_comm, &m_size);
MPI_Comm_rank(ext_comm, &m_rank);
#ifdef SE_CLASS2
process_v_cl = m_rank;
......@@ -379,7 +382,7 @@ public:
*/
MPI_Comm getMPIComm()
{
return MPI_COMM_WORLD;
return ext_comm;
}
/*! \brief Get the total number of processors
......@@ -430,6 +433,15 @@ public:
return m_rank;
}
/*! \brief Get the rank within the node
*
* \return the rank of the process within the node
*
*/
size_t shmRank()
{
return shmrank;
}
/*! \brief Sum the numbers across all processors and get the result
*
......@@ -449,7 +461,7 @@ public:
req.add();
// reduce
MPI_IallreduceW<T>::reduce(num,MPI_SUM,req.last());
MPI_IallreduceW<T>::reduce(num,MPI_SUM,req.last(),ext_comm);
}
/*! \brief Get the maximum number across all processors (or reduction with infinity norm)
......@@ -468,7 +480,7 @@ public:
req.add();
// reduce
MPI_IallreduceW<T>::reduce(num,MPI_MAX,req.last());
MPI_IallreduceW<T>::reduce(num,MPI_MAX,req.last(),ext_comm);
}
/*! \brief Get the minimum number across all processors (or reduction with insinity norm)
......@@ -488,7 +500,7 @@ public:
req.add();
// reduce
MPI_IallreduceW<T>::reduce(num,MPI_MIN,req.last());
MPI_IallreduceW<T>::reduce(num,MPI_MIN,req.last(),ext_comm);
}
/*! \brief Send and receive multiple messages
......@@ -831,7 +843,7 @@ public:
#endif
tot_sent += sz[i];
MPI_SAFE_CALL(MPI_Issend(ptr[i], sz[i], MPI_BYTE, prc[i], SEND_SPARSE + NBX_cnt*131072 + i, MPI_COMM_WORLD,&req.last()));
MPI_SAFE_CALL(MPI_Issend(ptr[i], sz[i], MPI_BYTE, prc[i], SEND_SPARSE + NBX_cnt*131072 + i, ext_comm,&req.last()));
log.logSend(prc[i]);
}
}
......@@ -852,7 +864,7 @@ public:
MPI_Status stat_t;
int stat = false;
MPI_SAFE_CALL(MPI_Iprobe(MPI_ANY_SOURCE,MPI_ANY_TAG/*SEND_SPARSE + NBX_cnt*/,MPI_COMM_WORLD,&stat,&stat_t));
MPI_SAFE_CALL(MPI_Iprobe(MPI_ANY_SOURCE,MPI_ANY_TAG/*SEND_SPARSE + NBX_cnt*/,ext_comm,&stat,&stat_t));
// If I have an incoming message and is related to this NBX communication
if (stat == true)
......@@ -878,7 +890,7 @@ public:
check_valid(ptr,msize);
#endif
tot_recv += msize;
MPI_SAFE_CALL(MPI_Recv(ptr,msize,MPI_BYTE,stat_t.MPI_SOURCE,stat_t.MPI_TAG,MPI_COMM_WORLD,&stat_t));
MPI_SAFE_CALL(MPI_Recv(ptr,msize,MPI_BYTE,stat_t.MPI_SOURCE,stat_t.MPI_TAG,ext_comm,&stat_t));
#ifdef SE_CLASS2
check_valid(ptr,msize);
......@@ -898,7 +910,7 @@ public:
// If all send has been completed
if (flag == true)
{MPI_SAFE_CALL(MPI_Ibarrier(MPI_COMM_WORLD,&bar_req));reached_bar_req = true;}
{MPI_SAFE_CALL(MPI_Ibarrier(ext_comm,&bar_req));reached_bar_req = true;}
}
// Check if all processor reached the async barrier
......@@ -946,7 +958,7 @@ public:
req.add();
// send
MPI_IsendWB::send(proc,SEND_RECV_BASE + tag,mem,sz,req.last());
MPI_IsendWB::send(proc,SEND_RECV_BASE + tag,mem,sz,req.last(),ext_comm);
return true;
}
......@@ -981,11 +993,45 @@ public:
req.add();
// send
MPI_IsendW<T,Mem,gr>::send(proc,SEND_RECV_BASE + tag,v,req.last());
MPI_IsendW<T,Mem,gr>::send(proc,SEND_RECV_BASE + tag,v,req.last(),ext_comm);
return true;
}
/*! \brief Send data to a processor
*
* \warning In order to avoid deadlock every send must be coupled with a recv
* in case you want to send data without knowledge from the other side
* consider to use sendRecvMultipleMessages
*
* \warning operation is asynchronous execute must be called to ensure they are executed
*
* \see sendRecvMultipleMessages
*
* \param proc processor id
* \param tag id
* \param v buffer to send
*
* \return true if succeed false otherwise
*
*/
template<typename T> bool send(size_t proc, size_t tag, openfpm::vector_ofp<T> & v)
{
#ifdef SE_CLASS1
checkType<T>();
#endif
// send over MPI
// Create one request
req.add();
// send
MPI_IsendW<T,HeapMemory,int>::send(proc,SEND_RECV_BASE + tag,v,req.last(),ext_comm);
return true;
}
/*! \brief Recv data from a processor
*
* \warning In order to avoid deadlock every recv must be coupled with a send
......@@ -1012,7 +1058,7 @@ public:
req.add();
// receive
MPI_IrecvWB::recv(proc,SEND_RECV_BASE + tag,v,sz,req.last());
MPI_IrecvWB::recv(proc,SEND_RECV_BASE + tag,v,sz,req.last(),ext_comm);
return true;
}
......@@ -1046,11 +1092,45 @@ public:
req.add();
// receive
MPI_IrecvW<T>::recv(proc,SEND_RECV_BASE + tag,v,req.last());
MPI_IrecvW<T>::recv(proc,SEND_RECV_BASE + tag,v,req.last(),ext_comm);
return true;
}
/*! \brief Recv data from a processor
*
* \warning In order to avoid deadlock every recv must be coupled with a send
* in case you want to send data without knowledge from the other side
* consider to use sendrecvMultipleMessagesNBX
*
* \warning operation is asynchronous execute must be called to ensure they are executed
*
* \see sendrecvMultipleMessagesNBX
*
* \param proc processor id
* \param tag id
* \param v vector to send
*
* \return true if succeed false otherwise
*
*/
template<typename T> bool recv(size_t proc, size_t tag, openfpm::vector_ofp<T> & v)
{
#ifdef SE_CLASS1
checkType<T>();
#endif
// recv over MPI
// Create one request
req.add();
// receive
MPI_IrecvW<T>::recv(proc,SEND_RECV_BASE + tag,v,req.last(),ext_comm);
return true;
}
/*! \brief Gather the data from all processors
*
* send a primitive data T receive the same primitive T from all the other processors
......@@ -1076,7 +1156,7 @@ public:
v.resize(getProcessingUnits());
// gather
MPI_IAllGatherW<T>::gather(&send,1,v.getPointer(),1,req.last());
MPI_IAllGatherW<T>::gather(&send,1,v.getPointer(),1,req.last(),ext_comm);
return true;
}
......@@ -1104,7 +1184,7 @@ public:
checkType<T>();
#endif
b_cast_helper<openfpm::vect_isel<T>::value == STD_VECTOR || is_layout_mlin<layout_base<T>>::value >::bcast_(req,v,root);
b_cast_helper<openfpm::vect_isel<T>::value == STD_VECTOR || is_layout_mlin<layout_base<T>>::value >::bcast_(req,v,root,ext_comm);
return true;
}
......
......@@ -217,10 +217,8 @@ template<unsigned int ip> void test_no_send_some_peer()
}
}
template<unsigned int ip> void test_known()
template<unsigned int ip> void test_known(Vcluster<> & vcl)
{
Vcluster<> & vcl = create_vcluster();
// send/recv messages
global_rank = vcl.getProcessUnitID();
......@@ -325,10 +323,8 @@ template<unsigned int ip> void test_known()
}
}
template<unsigned int ip> void test(unsigned int opt)
template<unsigned int ip> void test(Vcluster<> & vcl, unsigned int opt)
{
Vcluster<> & vcl = create_vcluster();
// send/recv messages
global_rank = vcl.getProcessUnitID();
......
......@@ -191,7 +191,9 @@ BOOST_AUTO_TEST_CASE( VCluster_use_sendrecv)
std::cout << "VCluster unit test start sendrecv" << "\n";
totp_check = false;
test<NBX>(RECEIVE_UNKNOWN);
auto & v_cl = create_vcluster();
test<NBX>(v_cl,RECEIVE_UNKNOWN);
totp_check = false;
test_no_send_some_peer<NBX>();
......@@ -203,8 +205,10 @@ BOOST_AUTO_TEST_CASE( VCluster_use_sendrecv_size_known)
{
std::cout << "VCluster unit test start sendrecv known size" << "\n";
auto & v_cl = create_vcluster();
totp_check = false;
test<NBX>(RECEIVE_SIZE_UNKNOWN);
test<NBX>(v_cl,RECEIVE_SIZE_UNKNOWN);
totp_check = false;
test_no_send_some_peer<NBX>();
......@@ -216,12 +220,32 @@ BOOST_AUTO_TEST_CASE( VCluster_use_sendrecv_known )
{
std::cout << "VCluster unit test start known" << "\n";
test_known<NBX>();
test_known<NBX>(create_vcluster());
std::cout << "VCluster unit test stop known" << "\n";
}
BOOST_AUTO_TEST_CASE( VCluster_communicator_with_external_communicator )
{
std::cout << "VCluster unit test external communicator start" << std::endl;
MPI_Comm com_compute;
int rank = create_vcluster().rank();
if (rank == 0)
{MPI_Comm_split(MPI_COMM_WORLD, MPI_UNDEFINED,rank, &com_compute);}
else
{MPI_Comm_split(MPI_COMM_WORLD,0,rank, &com_compute);}
if (rank != 0 )
{
Vcluster<> v_cl(&boost::unit_test::framework::master_test_suite().argc,&boost::unit_test::framework::master_test_suite().argv,com_compute);
test_known<NBX>(v_cl);
test<NBX>(v_cl,RECEIVE_SIZE_UNKNOWN);
}
std::cout << "VCluster unit test external communicator stop" << std::endl;
}
BOOST_AUTO_TEST_SUITE_END()