...
 
Commits (8)
This diff is collapsed.
...@@ -6,7 +6,7 @@ if (CUDA_FOUND) ...@@ -6,7 +6,7 @@ if (CUDA_FOUND)
set(CUDA_SOURCES ../../openfpm_devices/src/memory/CudaMemory.cu VCluster/cuda/VCluster_semantic_unit_cuda_tests.cu VCluster/cuda/VCluster_unit_tests.cu ) set(CUDA_SOURCES ../../openfpm_devices/src/memory/CudaMemory.cu VCluster/cuda/VCluster_semantic_unit_cuda_tests.cu VCluster/cuda/VCluster_unit_tests.cu )
endif() endif()
add_executable(vcluster_test main.cpp VCluster/VCluster.cpp ../../openfpm_devices/src/memory/HeapMemory.cpp ../../openfpm_devices/src/memory/PtrMemory.cpp ../../openfpm_devices/src/Memleak_check.cpp VCluster/VCluster_unit_tests.cpp VCluster/VCluster_semantic_unit_tests.cpp ${CUDA_SOURCES}) add_executable(vcluster_test main.cpp VCluster/VCluster.cpp VCluster/InVis.cpp ../../openfpm_devices/src/memory/ShmAllocator_manager.cpp ../../openfpm_devices/src/memory/SemManager.cpp ../../openfpm_devices/src/memory/ShmAllocator.cpp ../../openfpm_devices/src/memory/ShmBuffer.cpp ../../openfpm_devices/src/memory/HeapMemory.cpp ../../openfpm_devices/src/memory/PtrMemory.cpp ../../openfpm_devices/src/Memleak_check.cpp VCluster/VCluster_unit_tests.cpp VCluster/VCluster_semantic_unit_tests.cpp ${CUDA_SOURCES})
if ( CMAKE_COMPILER_IS_GNUCC ) if ( CMAKE_COMPILER_IS_GNUCC )
target_compile_options(vcluster_test PRIVATE "-Wno-deprecated-declarations") target_compile_options(vcluster_test PRIVATE "-Wno-deprecated-declarations")
...@@ -15,7 +15,7 @@ if ( CMAKE_COMPILER_IS_GNUCC ) ...@@ -15,7 +15,7 @@ if ( CMAKE_COMPILER_IS_GNUCC )
endif() endif()
endif() endif()
add_library(vcluster STATIC VCluster/VCluster.cpp) add_library(vcluster STATIC VCluster/VCluster.cpp VCluster/InVis.cpp)
########################### ###########################
...@@ -27,6 +27,8 @@ if(CUDA_FOUND) ...@@ -27,6 +27,8 @@ if(CUDA_FOUND)
endif() endif()
endif() endif()
set(JAVA_HOME /usr/lib/jvm/adoptopenjdk-11-hotspot-amd64)
target_include_directories (vcluster_test PUBLIC ${CUDA_INCLUDE_DIRS}) target_include_directories (vcluster_test PUBLIC ${CUDA_INCLUDE_DIRS})
target_include_directories (vcluster_test PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) target_include_directories (vcluster_test PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
target_include_directories (vcluster_test PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/../../openfpm_devices/src/) target_include_directories (vcluster_test PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/../../openfpm_devices/src/)
...@@ -35,6 +37,8 @@ target_include_directories (vcluster_test PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/../ ...@@ -35,6 +37,8 @@ target_include_directories (vcluster_test PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/../
target_include_directories (vcluster_test PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/config) target_include_directories (vcluster_test PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/config)
target_include_directories (vcluster_test PUBLIC ${Boost_INCLUDE_DIRS}) target_include_directories (vcluster_test PUBLIC ${Boost_INCLUDE_DIRS})
target_include_directories (vcluster_test PUBLIC ${PETSC_INCLUDES}) target_include_directories (vcluster_test PUBLIC ${PETSC_INCLUDES})
target_include_directories (vcluster_test PUBLIC ${JAVA_HOME}/include ${JAVA_HOME}/include/linux)
target_include_directories (vcluster PUBLIC ${CUDA_INCLUDE_DIRS}) target_include_directories (vcluster PUBLIC ${CUDA_INCLUDE_DIRS})
target_include_directories (vcluster PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) target_include_directories (vcluster PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
...@@ -43,7 +47,11 @@ target_include_directories (vcluster PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/config) ...@@ -43,7 +47,11 @@ target_include_directories (vcluster PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/config)
target_include_directories (vcluster PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/../../openfpm_data/src/) target_include_directories (vcluster PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/../../openfpm_data/src/)
target_include_directories (vcluster PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/../../openfpm_devices/src/) target_include_directories (vcluster PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/../../openfpm_devices/src/)
target_include_directories (vcluster PUBLIC ${Boost_INCLUDE_DIRS}) target_include_directories (vcluster PUBLIC ${Boost_INCLUDE_DIRS})
target_include_directories (vcluster PUBLIC ${JAVA_HOME}/include ${JAVA_HOME}/include/linux)
#target_link_libraries(vcluster ${JAVA_HOME}/lib/server/libjvm.so)
target_link_libraries(vcluster_test ${JAVA_HOME}/lib/server/libjvm.so)
target_link_libraries(vcluster_test ${Boost_LIBRARIES}) target_link_libraries(vcluster_test ${Boost_LIBRARIES})
target_link_libraries(vcluster_test ${PETSC_LIBRARIES}) target_link_libraries(vcluster_test ${PETSC_LIBRARIES})
...@@ -69,6 +77,7 @@ install(FILES MPI_wrapper/MPI_IallreduceW.hpp ...@@ -69,6 +77,7 @@ install(FILES MPI_wrapper/MPI_IallreduceW.hpp
install(FILES VCluster/VCluster_base.hpp install(FILES VCluster/VCluster_base.hpp
VCluster/VCluster.hpp VCluster/VCluster.hpp
VCluster/VCluster_meta_function.hpp VCluster/VCluster_meta_function.hpp
VCluster/InVis.hpp
DESTINATION openfpm_vcluster/include/VCluster ) DESTINATION openfpm_vcluster/include/VCluster )
install (FILES util/Vcluster_log.hpp install (FILES util/Vcluster_log.hpp
......
...@@ -28,9 +28,9 @@ ...@@ -28,9 +28,9 @@
class MPI_IAllGatherWB class MPI_IAllGatherWB
{ {
public: public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req) static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_BYTE, rbuf, sz_r, MPI_BYTE, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_BYTE, rbuf, sz_r, MPI_BYTE, ext_comm,&req));
} }
}; };
...@@ -44,9 +44,9 @@ template<typename T> class MPI_IAllGatherW ...@@ -44,9 +44,9 @@ template<typename T> class MPI_IAllGatherW
{ {
public: public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req) static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sizeof(T) * sz_s,MPI_BYTE, rbuf, sz_r * sizeof(T), MPI_BYTE, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallgather(sbuf,sizeof(T) * sz_s,MPI_BYTE, rbuf, sz_r * sizeof(T), MPI_BYTE, ext_comm,&req));
} }
}; };
...@@ -57,9 +57,9 @@ public: ...@@ -57,9 +57,9 @@ public:
template<> class MPI_IAllGatherW<int> template<> class MPI_IAllGatherW<int>
{ {
public: public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req) static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_INT, rbuf, sz_r, MPI_INT, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_INT, rbuf, sz_r, MPI_INT, ext_comm,&req));
} }
}; };
...@@ -69,9 +69,9 @@ public: ...@@ -69,9 +69,9 @@ public:
template<> class MPI_IAllGatherW<unsigned int> template<> class MPI_IAllGatherW<unsigned int>
{ {
public: public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req) static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_UNSIGNED, rbuf, sz_r, MPI_UNSIGNED, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_UNSIGNED, rbuf, sz_r, MPI_UNSIGNED, ext_comm,&req));
} }
}; };
...@@ -81,9 +81,9 @@ public: ...@@ -81,9 +81,9 @@ public:
template<> class MPI_IAllGatherW<short> template<> class MPI_IAllGatherW<short>
{ {
public: public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req) static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_SHORT, rbuf, sz_r, MPI_SHORT, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_SHORT, rbuf, sz_r, MPI_SHORT, ext_comm,&req));
} }
}; };
...@@ -94,9 +94,9 @@ public: ...@@ -94,9 +94,9 @@ public:
template<> class MPI_IAllGatherW<unsigned short> template<> class MPI_IAllGatherW<unsigned short>
{ {
public: public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req) static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_UNSIGNED_SHORT, rbuf, sz_r, MPI_UNSIGNED_SHORT, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_UNSIGNED_SHORT, rbuf, sz_r, MPI_UNSIGNED_SHORT, ext_comm,&req));
} }
}; };
...@@ -107,9 +107,9 @@ public: ...@@ -107,9 +107,9 @@ public:
template<> class MPI_IAllGatherW<char> template<> class MPI_IAllGatherW<char>
{ {
public: public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req) static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_CHAR, rbuf, sz_r, MPI_CHAR, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_CHAR, rbuf, sz_r, MPI_CHAR, ext_comm,&req));
} }
}; };
...@@ -120,9 +120,9 @@ public: ...@@ -120,9 +120,9 @@ public:
template<> class MPI_IAllGatherW<unsigned char> template<> class MPI_IAllGatherW<unsigned char>
{ {
public: public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req) static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_UNSIGNED_CHAR, rbuf, sz_r, MPI_UNSIGNED_CHAR, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_UNSIGNED_CHAR, rbuf, sz_r, MPI_UNSIGNED_CHAR, ext_comm,&req));
} }
}; };
...@@ -132,9 +132,9 @@ public: ...@@ -132,9 +132,9 @@ public:
template<> class MPI_IAllGatherW<size_t> template<> class MPI_IAllGatherW<size_t>
{ {
public: public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req) static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_UNSIGNED_LONG, rbuf, sz_r, MPI_UNSIGNED_LONG, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_UNSIGNED_LONG, rbuf, sz_r, MPI_UNSIGNED_LONG, ext_comm,&req));
} }
}; };
...@@ -144,9 +144,9 @@ public: ...@@ -144,9 +144,9 @@ public:
template<> class MPI_IAllGatherW<long int> template<> class MPI_IAllGatherW<long int>
{ {
public: public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req) static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_LONG, rbuf, sz_r, MPI_LONG, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_LONG, rbuf, sz_r, MPI_LONG, ext_comm,&req));
} }
}; };
...@@ -156,9 +156,9 @@ public: ...@@ -156,9 +156,9 @@ public:
template<> class MPI_IAllGatherW<float> template<> class MPI_IAllGatherW<float>
{ {
public: public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req) static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_FLOAT, rbuf, sz_r, MPI_FLOAT, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_FLOAT, rbuf, sz_r, MPI_FLOAT, ext_comm,&req));
} }
}; };
...@@ -168,9 +168,9 @@ public: ...@@ -168,9 +168,9 @@ public:
template<> class MPI_IAllGatherW<double> template<> class MPI_IAllGatherW<double>
{ {
public: public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req) static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_DOUBLE, rbuf, sz_r, MPI_DOUBLE, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_DOUBLE, rbuf, sz_r, MPI_DOUBLE, ext_comm,&req));
} }
}; };
......
...@@ -33,9 +33,9 @@ ...@@ -33,9 +33,9 @@
class MPI_IBcastWB class MPI_IBcastWB
{ {
public: public:
static inline void bcast(size_t proc ,void * buf, size_t sz, MPI_Request & req) static inline void bcast(size_t proc ,void * buf, size_t sz, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Ibcast(buf,sz,MPI_BYTE, proc , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Ibcast(buf,sz,MPI_BYTE, proc , ext_comm,&req));
} }
}; };
...@@ -48,9 +48,9 @@ public: ...@@ -48,9 +48,9 @@ public:
template<typename T> class MPI_IBcastW template<typename T> class MPI_IBcastW
{ {
public: public:
template<typename Memory> static inline void bcast(size_t proc ,openfpm::vector<T,Memory> & v, MPI_Request & req) template<typename Memory> static inline void bcast(size_t proc ,openfpm::vector<T,Memory> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size() * sizeof(T),MPI_BYTE, proc , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size() * sizeof(T),MPI_BYTE, proc , ext_comm,&req));
} }
}; };
...@@ -61,9 +61,9 @@ public: ...@@ -61,9 +61,9 @@ public:
template<> class MPI_IBcastW<int> template<> class MPI_IBcastW<int>
{ {
public: public:
static inline void bcast(size_t proc ,openfpm::vector<int> & v, MPI_Request & req) static inline void bcast(size_t proc ,openfpm::vector<int> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_INT, proc , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_INT, proc , ext_comm,&req));
} }
}; };
...@@ -73,9 +73,9 @@ public: ...@@ -73,9 +73,9 @@ public:
template<> class MPI_IBcastW<unsigned int> template<> class MPI_IBcastW<unsigned int>
{ {
public: public:
static inline void bcast(size_t proc ,openfpm::vector<unsigned int> & v, MPI_Request & req) static inline void bcast(size_t proc ,openfpm::vector<unsigned int> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED, proc , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED, proc , ext_comm,&req));
} }
}; };
...@@ -85,9 +85,9 @@ public: ...@@ -85,9 +85,9 @@ public:
template<> class MPI_IBcastW<short> template<> class MPI_IBcastW<short>
{ {
public: public:
static inline void bcast(size_t proc ,openfpm::vector<short> & v, MPI_Request & req) static inline void bcast(size_t proc ,openfpm::vector<short> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_SHORT, proc , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_SHORT, proc , ext_comm,&req));
} }
}; };
...@@ -97,9 +97,9 @@ public: ...@@ -97,9 +97,9 @@ public:
template<> class MPI_IBcastW<unsigned short> template<> class MPI_IBcastW<unsigned short>
{ {
public: public:
static inline void bcast(size_t proc ,openfpm::vector<unsigned short> & v, MPI_Request & req) static inline void bcast(size_t proc ,openfpm::vector<unsigned short> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_SHORT, proc , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_SHORT, proc , ext_comm,&req));
} }
}; };
...@@ -109,9 +109,9 @@ public: ...@@ -109,9 +109,9 @@ public:
template<> class MPI_IBcastW<char> template<> class MPI_IBcastW<char>
{ {
public: public:
static inline void bcast(size_t proc ,openfpm::vector<char> & v, MPI_Request & req) static inline void bcast(size_t proc ,openfpm::vector<char> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_CHAR, proc , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_CHAR, proc , ext_comm,&req));
} }
}; };
...@@ -121,9 +121,9 @@ public: ...@@ -121,9 +121,9 @@ public:
template<> class MPI_IBcastW<unsigned char> template<> class MPI_IBcastW<unsigned char>
{ {
public: public:
static inline void bcast(size_t proc ,openfpm::vector<unsigned char> & v, MPI_Request & req) static inline void bcast(size_t proc ,openfpm::vector<unsigned char> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_CHAR, proc , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_CHAR, proc , ext_comm,&req));
} }
}; };
...@@ -133,9 +133,9 @@ public: ...@@ -133,9 +133,9 @@ public:
template<> class MPI_IBcastW<size_t> template<> class MPI_IBcastW<size_t>
{ {
public: public:
static inline void bcast(size_t proc ,openfpm::vector<size_t> & v, MPI_Request & req) static inline void bcast(size_t proc ,openfpm::vector<size_t> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_LONG, proc , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_LONG, proc , ext_comm,&req));
} }
}; };
...@@ -145,9 +145,9 @@ public: ...@@ -145,9 +145,9 @@ public:
template<> class MPI_IBcastW<long int> template<> class MPI_IBcastW<long int>
{ {
public: public:
static inline void bcast(size_t proc ,openfpm::vector<long int> & v, MPI_Request & req) static inline void bcast(size_t proc ,openfpm::vector<long int> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_LONG, proc , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_LONG, proc , ext_comm,&req));
} }
}; };
...@@ -157,9 +157,9 @@ public: ...@@ -157,9 +157,9 @@ public:
template<> class MPI_IBcastW<float> template<> class MPI_IBcastW<float>
{ {
public: public:
static inline void bcast(size_t proc ,openfpm::vector<float> & v, MPI_Request & req) static inline void bcast(size_t proc ,openfpm::vector<float> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_FLOAT, proc , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_FLOAT, proc , ext_comm,&req));
} }
}; };
...@@ -169,9 +169,9 @@ public: ...@@ -169,9 +169,9 @@ public:
template<> class MPI_IBcastW<double> template<> class MPI_IBcastW<double>
{ {
public: public:
static inline void bcast(size_t proc ,openfpm::vector<double> & v, MPI_Request & req) static inline void bcast(size_t proc ,openfpm::vector<double> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_DOUBLE, proc , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_DOUBLE, proc , ext_comm,&req));
} }
}; };
...@@ -195,15 +195,19 @@ struct bcast_inte_impl ...@@ -195,15 +195,19 @@ struct bcast_inte_impl
//! root processor //! root processor
size_t root; size_t root;
//! MPI communicator
MPI_Comm ext_comm;
/*! \brief constructor /*! \brief constructor
* *
* \param v set of pointer buffers to set * \param v set of pointer buffers to set
* *
*/ */
inline bcast_inte_impl(vect & send, inline bcast_inte_impl(vect & send,
openfpm::vector<MPI_Request> & req, openfpm::vector<MPI_Request> & req,
size_t root) size_t root,
:send(send),req(req),root(root) MPI_Comm ext_comm)
:send(send),req(req),root(root),ext_comm(ext_comm)
{}; {};
//! It call the copy function for each property //! It call the copy function for each property
...@@ -216,7 +220,7 @@ struct bcast_inte_impl ...@@ -216,7 +220,7 @@ struct bcast_inte_impl
req.add(); req.add();
// gather // gather
MPI_IBcastWB::bcast(root,&send.template get<T::value>(0),send.size()*sizeof(send_type),req.last()); MPI_IBcastWB::bcast(root,&send.template get<T::value>(0),send.size()*sizeof(send_type),req.last(),ext_comm);
} }
}; };
...@@ -226,13 +230,14 @@ struct b_cast_helper ...@@ -226,13 +230,14 @@ struct b_cast_helper
template<typename T, typename Mem, typename lt_type, template<typename> class layout_base > template<typename T, typename Mem, typename lt_type, template<typename> class layout_base >
static void bcast_(openfpm::vector<MPI_Request> & req, static void bcast_(openfpm::vector<MPI_Request> & req,
openfpm::vector<T,Mem,lt_type,layout_base> & v, openfpm::vector<T,Mem,lt_type,layout_base> & v,
size_t root) size_t root,
MPI_Comm ext_comm)
{ {
// Create one request // Create one request
req.add(); req.add();
// gather // gather
MPI_IBcastW<T>::bcast(root,v,req.last()); MPI_IBcastW<T>::bcast(root,v,req.last(),ext_comm);
} }
}; };
...@@ -242,9 +247,10 @@ struct b_cast_helper<false> ...@@ -242,9 +247,10 @@ struct b_cast_helper<false>
template<typename T, typename Mem, typename lt_type, template<typename> class layout_base > template<typename T, typename Mem, typename lt_type, template<typename> class layout_base >
static void bcast_(openfpm::vector<MPI_Request> & req, static void bcast_(openfpm::vector<MPI_Request> & req,
openfpm::vector<T,Mem,lt_type,layout_base> & v, openfpm::vector<T,Mem,lt_type,layout_base> & v,
size_t root) size_t root,
MPI_Comm ext_comm)
{ {
bcast_inte_impl<openfpm::vector<T,Mem,lt_type,layout_base>> bc(v,req,root); bcast_inte_impl<openfpm::vector<T,Mem,lt_type,layout_base>> bc(v,req,root,ext_comm);
boost::mpl::for_each_ref<boost::mpl::range_c<int,0,T::max_prop>>(bc); boost::mpl::for_each_ref<boost::mpl::range_c<int,0,T::max_prop>>(bc);
} }
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
template<typename T> class MPI_IallreduceW template<typename T> class MPI_IallreduceW
{ {
public: public:
static inline void reduce(T & buf,MPI_Op op, MPI_Request & req) static inline void reduce(T & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " cannot recognize " << typeid(T).name() << "\n"; std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " cannot recognize " << typeid(T).name() << "\n";
} }
...@@ -32,9 +32,9 @@ public: ...@@ -32,9 +32,9 @@ public:
template<> class MPI_IallreduceW<int> template<> class MPI_IallreduceW<int>
{ {
public: public:
static inline void reduce(int & buf,MPI_Op op, MPI_Request & req) static inline void reduce(int & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_INT, op, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_INT, op, ext_comm,&req));
} }
}; };
...@@ -44,9 +44,9 @@ public: ...@@ -44,9 +44,9 @@ public:
template<> class MPI_IallreduceW<unsigned int> template<> class MPI_IallreduceW<unsigned int>
{ {
public: public:
static inline void reduce(unsigned int & buf,MPI_Op op, MPI_Request & req) static inline void reduce(unsigned int & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED, op, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED, op, ext_comm,&req));
} }
}; };
...@@ -56,9 +56,9 @@ public: ...@@ -56,9 +56,9 @@ public:
template<> class MPI_IallreduceW<short> template<> class MPI_IallreduceW<short>
{ {
public: public:
static inline void reduce(short & buf,MPI_Op op, MPI_Request & req) static inline void reduce(short & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_SHORT, op, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_SHORT, op, ext_comm,&req));
} }
}; };
...@@ -68,9 +68,9 @@ public: ...@@ -68,9 +68,9 @@ public:
template<> class MPI_IallreduceW<unsigned short> template<> class MPI_IallreduceW<unsigned short>
{ {
public: public:
static inline void reduce(unsigned short & buf,MPI_Op op, MPI_Request & req) static inline void reduce(unsigned short & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED_SHORT, op, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED_SHORT, op, ext_comm,&req));
} }
}; };
...@@ -80,9 +80,9 @@ public: ...@@ -80,9 +80,9 @@ public:
template<> class MPI_IallreduceW<char> template<> class MPI_IallreduceW<char>
{ {
public: public:
static inline void reduce(char & buf,MPI_Op op, MPI_Request & req) static inline void reduce(char & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_CHAR, op, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_CHAR, op, ext_comm,&req));
} }
}; };
...@@ -92,9 +92,9 @@ public: ...@@ -92,9 +92,9 @@ public:
template<> class MPI_IallreduceW<unsigned char> template<> class MPI_IallreduceW<unsigned char>
{ {
public: public:
static inline void reduce(unsigned char & buf,MPI_Op op, MPI_Request & req) static inline void reduce(unsigned char & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED_CHAR, op, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED_CHAR, op, ext_comm,&req));
} }
}; };
...@@ -104,9 +104,9 @@ public: ...@@ -104,9 +104,9 @@ public:
template<> class MPI_IallreduceW<size_t> template<> class MPI_IallreduceW<size_t>
{ {
public: public:
static inline void reduce(size_t & buf,MPI_Op op, MPI_Request & req) static inline void reduce(size_t & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED_LONG, op, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED_LONG, op, ext_comm,&req));
} }
}; };
...@@ -116,9 +116,9 @@ public: ...@@ -116,9 +116,9 @@ public:
template<> class MPI_IallreduceW<long int> template<> class MPI_IallreduceW<long int>
{ {
public: public:
static inline void reduce(long int & buf,MPI_Op op, MPI_Request & req) static inline void reduce(long int & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_LONG, op, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_LONG, op, ext_comm,&req));
} }
}; };
...@@ -128,9 +128,9 @@ public: ...@@ -128,9 +128,9 @@ public:
template<> class MPI_IallreduceW<float> template<> class MPI_IallreduceW<float>
{ {
public: public:
static inline void reduce(float & buf,MPI_Op op, MPI_Request & req) static inline void reduce(float & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_FLOAT, op, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_FLOAT, op, ext_comm,&req));
} }
}; };
...@@ -140,9 +140,9 @@ public: ...@@ -140,9 +140,9 @@ public:
template<> class MPI_IallreduceW<double> template<> class MPI_IallreduceW<double>
{ {
public: public:
static inline void reduce(double & buf,MPI_Op op, MPI_Request & req) static inline void reduce(double & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_DOUBLE, op, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_DOUBLE, op, ext_comm,&req));
} }
}; };
...@@ -154,9 +154,9 @@ public: ...@@ -154,9 +154,9 @@ public:
/*template<> class MPI_IallreduceW<openfpm::vector<int>> /*template<> class MPI_IallreduceW<openfpm::vector<int>>
{ {
public: public:
static inline void reduce(openfpm::vector<int> & buf,MPI_Op op, MPI_Request & req) static inline void reduce(openfpm::vector<int> & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_INT, op, MPI_COMM_WORLD,&req); MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_INT, op, ext_comm,&req);
} }
};*/ };*/
...@@ -166,9 +166,9 @@ public: ...@@ -166,9 +166,9 @@ public:
/*template<> class MPI_IallreduceW<openfpm::vector<short>> /*template<> class MPI_IallreduceW<openfpm::vector<short>>
{ {
public: public:
static inline void reduce(openfpm::vector<short> & buf,MPI_Op op, MPI_Request & req) static inline void reduce(openfpm::vector<short> & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_SHORT, op, MPI_COMM_WORLD,&req); MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_SHORT, op, ext_comm,&req);
} }
};*/ };*/
...@@ -178,9 +178,9 @@ public: ...@@ -178,9 +178,9 @@ public:
/*template<> class MPI_IallreduceW<openfpm::vector<char>> /*template<> class MPI_IallreduceW<openfpm::vector<char>>
{ {
public: public:
static inline void reduce(openfpm::vector<char> & buf,MPI_Op op, MPI_Request & req) static inline void reduce(openfpm::vector<char> & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_CHAR, op, MPI_COMM_WORLD,&req); MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_CHAR, op, ext_comm,&req);
} }
};*/ };*/
...@@ -190,9 +190,9 @@ public: ...@@ -190,9 +190,9 @@ public:
/*template<> class MPI_IallreduceW<openfpm::vector<size_t>> /*template<> class MPI_IallreduceW<openfpm::vector<size_t>>
{ {
public: public:
static inline void reduce(openfpm::vector<size_t> & buf,MPI_Op op, MPI_Request & req) static inline void reduce(openfpm::vector<size_t> & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_UNSIGNED_LONG, op, MPI_COMM_WORLD,&req); MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_UNSIGNED_LONG, op, ext_comm,&req);
} }
};*/ };*/
...@@ -202,9 +202,9 @@ public: ...@@ -202,9 +202,9 @@ public:
/*template<> class MPI_IallreduceW<openfpm::vector<float>> /*template<> class MPI_IallreduceW<openfpm::vector<float>>
{ {
public: public:
static inline void reduce(openfpm::vector<float> & buf,MPI_Op op, MPI_Request & req) static inline void reduce(openfpm::vector<float> & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_FLOAT, op, MPI_COMM_WORLD,&req); MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_FLOAT, op, ext_comm,&req);
} }
};*/ };*/
...@@ -215,9 +215,9 @@ public: ...@@ -215,9 +215,9 @@ public:
/*template<> class MPI_IallreduceW<openfpm::vector<double>> /*template<> class MPI_IallreduceW<openfpm::vector<double>>
{ {
public: public:
static inline void reduce(openfpm::vector<double> & buf,MPI_Op op, MPI_Request & req) static inline void reduce(openfpm::vector<double> & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_DOUBLE, op, MPI_COMM_WORLD,&req); MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_DOUBLE, op, ext_comm,&req);
} }
};*/ };*/
......
...@@ -22,9 +22,9 @@ public: ...@@ -22,9 +22,9 @@ public:
* \param req MPI request * \param req MPI request
* *
*/ */
static inline void recv(size_t proc , size_t tag ,void * buf, size_t sz, MPI_Request & req) static inline void recv(size_t proc , size_t tag ,void * buf, size_t sz, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Irecv(buf,sz,MPI_BYTE, proc, tag , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Irecv(buf,sz,MPI_BYTE, proc, tag , ext_comm,&req));
} }
}; };
...@@ -37,9 +37,9 @@ public: ...@@ -37,9 +37,9 @@ public:
template<typename T> class MPI_IrecvW template<typename T> class MPI_IrecvW
{ {
public: public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<T> & v, MPI_Request & req) static inline void recv(size_t proc , size_t tag ,openfpm::vector<T> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size() * sizeof(T),MPI_BYTE, proc, tag , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size() * sizeof(T),MPI_BYTE, proc, tag , ext_comm,&req));
} }
}; };
...@@ -50,9 +50,9 @@ public: ...@@ -50,9 +50,9 @@ public:
template<> class MPI_IrecvW<int> template<> class MPI_IrecvW<int>
{ {
public: public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<int> & v, MPI_Request & req) static inline void recv(size_t proc , size_t tag ,openfpm::vector<int> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_INT, proc, tag , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_INT, proc, tag , ext_comm,&req));
} }
}; };
...@@ -62,9 +62,9 @@ public: ...@@ -62,9 +62,9 @@ public:
template<> class MPI_IrecvW<unsigned int> template<> class MPI_IrecvW<unsigned int>
{ {
public: public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<unsigned int> & v, MPI_Request & req) static inline void recv(size_t proc , size_t tag ,openfpm::vector<unsigned int> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED, proc, tag , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED, proc, tag , ext_comm,&req));
} }
}; };
...@@ -74,9 +74,9 @@ public: ...@@ -74,9 +74,9 @@ public:
template<> class MPI_IrecvW<short> template<> class MPI_IrecvW<short>
{ {
public: public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<short> & v, MPI_Request & req) static inline void recv(size_t proc , size_t tag ,openfpm::vector<short> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_SHORT, proc, tag , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_SHORT, proc, tag , ext_comm,&req));
} }
}; };
...@@ -86,9 +86,9 @@ public: ...@@ -86,9 +86,9 @@ public:
template<> class MPI_IrecvW<unsigned short> template<> class MPI_IrecvW<unsigned short>
{ {
public: public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<unsigned short> & v, MPI_Request & req) static inline void recv(size_t proc , size_t tag ,openfpm::vector<unsigned short> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED_SHORT, proc, tag , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED_SHORT, proc, tag , ext_comm,&req));
} }
}; };
...@@ -98,9 +98,9 @@ public: ...@@ -98,9 +98,9 @@ public:
template<> class MPI_IrecvW<char> template<> class MPI_IrecvW<char>
{ {
public: public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<char> & v, MPI_Request & req) static inline void recv(size_t proc , size_t tag ,openfpm::vector<char> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_CHAR, proc, tag , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_CHAR, proc, tag , ext_comm,&req));
} }
}; };
...@@ -110,9 +110,9 @@ public: ...@@ -110,9 +110,9 @@ public:
template<> class MPI_IrecvW<unsigned char> template<> class MPI_IrecvW<unsigned char>
{ {
public: public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<unsigned char> & v, MPI_Request & req) static inline void recv(size_t proc , size_t tag ,openfpm::vector<unsigned char> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED_CHAR, proc, tag , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED_CHAR, proc, tag , ext_comm,&req));
} }
}; };
...@@ -122,9 +122,9 @@ public: ...@@ -122,9 +122,9 @@ public:
template<> class MPI_IrecvW<size_t> template<> class MPI_IrecvW<size_t>
{ {
public: public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<size_t> & v, MPI_Request & req) static inline void recv(size_t proc , size_t tag ,openfpm::vector<size_t> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED_LONG, proc, tag , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED_LONG, proc, tag , ext_comm,&req));
} }
}; };
...@@ -134,9 +134,9 @@ public: ...@@ -134,9 +134,9 @@ public:
template<> class MPI_IrecvW<long int> template<> class MPI_IrecvW<long int>
{ {
public: public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<long int> & v, MPI_Request & req) static inline void recv(size_t proc , size_t tag ,openfpm::vector<long int> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_LONG, proc, tag , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_LONG, proc, tag , ext_comm,&req));
} }
}; };
...@@ -146,9 +146,9 @@ public: ...@@ -146,9 +146,9 @@ public:
template<> class MPI_IrecvW<float> template<> class MPI_IrecvW<float>
{ {
public: public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<float> & v, MPI_Request & req) static inline void recv(size_t proc , size_t tag ,openfpm::vector<float> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_FLOAT, proc, tag , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_FLOAT, proc, tag , ext_comm,&req));
} }
}; };
...@@ -158,9 +158,9 @@ public: ...@@ -158,9 +158,9 @@ public:
template<> class MPI_IrecvW<double> template<> class MPI_IrecvW<double>
{ {
public: public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<double> & v, MPI_Request & req) static inline void recv(size_t proc , size_t tag ,openfpm::vector<double> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_DOUBLE, proc, tag , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_DOUBLE, proc, tag , ext_comm,&req));
} }
}; };
......
...@@ -16,9 +16,9 @@ ...@@ -16,9 +16,9 @@
class MPI_IsendWB class MPI_IsendWB
{ {
public: public:
static inline void send(size_t proc , size_t tag ,const void * buf, size_t sz, MPI_Request & req) static inline void send(size_t proc , size_t tag ,const void * buf, size_t sz, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Isend(buf, sz,MPI_BYTE, proc, tag , MPI_COMM_WORLD,&req); MPI_Isend(buf, sz,MPI_BYTE, proc, tag , ext_comm,&req);
} }
}; };
...@@ -31,9 +31,9 @@ public: ...@@ -31,9 +31,9 @@ public:
template<typename T, typename Mem, typename gr> class MPI_IsendW template<typename T, typename Mem, typename gr> class MPI_IsendW
{ {
public: public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<T,Mem,gr> & v, MPI_Request & req) static inline void send(size_t proc , size_t tag ,openfpm::vector<T,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Isend(v.getPointer(), v.size() * sizeof(T),MPI_BYTE, proc, tag , MPI_COMM_WORLD,&req); MPI_Isend(v.getPointer(), v.size() * sizeof(T),MPI_BYTE, proc, tag , ext_comm,&req);
} }
}; };
...@@ -44,9 +44,9 @@ public: ...@@ -44,9 +44,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<int,Mem,gr> template<typename Mem, typename gr> class MPI_IsendW<int,Mem,gr>
{ {
public: public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<int,Mem,gr> & v, MPI_Request & req) static inline void send(size_t proc , size_t tag ,openfpm::vector<int,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Isend(v.getPointer(), v.size(),MPI_INT, proc, tag , MPI_COMM_WORLD,&req); MPI_Isend(v.getPointer(), v.size(),MPI_INT, proc, tag , ext_comm,&req);
} }
}; };
...@@ -56,9 +56,9 @@ public: ...@@ -56,9 +56,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<unsigned int,Mem,gr> template<typename Mem, typename gr> class MPI_IsendW<unsigned int,Mem,gr>
{ {
public: public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned int,Mem,gr> & v, MPI_Request & req) static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned int,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED, proc, tag , MPI_COMM_WORLD,&req); MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED, proc, tag , ext_comm,&req);
} }
}; };
...@@ -68,9 +68,9 @@ public: ...@@ -68,9 +68,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<short,Mem,gr> template<typename Mem, typename gr> class MPI_IsendW<short,Mem,gr>
{ {
public: public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<short,Mem,gr> & v, MPI_Request & req) static inline void send(size_t proc , size_t tag ,openfpm::vector<short,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Isend(v.getPointer(), v.size(),MPI_SHORT, proc, tag , MPI_COMM_WORLD,&req); MPI_Isend(v.getPointer(), v.size(),MPI_SHORT, proc, tag , ext_comm,&req);
} }
}; };
...@@ -80,9 +80,9 @@ public: ...@@ -80,9 +80,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<unsigned short,Mem,gr> template<typename Mem, typename gr> class MPI_IsendW<unsigned short,Mem,gr>
{ {
public: public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned short,Mem,gr> & v, MPI_Request & req) static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned short,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED_SHORT, proc, tag , MPI_COMM_WORLD,&req); MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED_SHORT, proc, tag , ext_comm,&req);
} }
}; };
...@@ -92,9 +92,9 @@ public: ...@@ -92,9 +92,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<char,Mem,gr> template<typename Mem, typename gr> class MPI_IsendW<char,Mem,gr>
{ {
public: public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<char,Mem,gr> & v, MPI_Request & req) static inline void send(size_t proc , size_t tag ,openfpm::vector<char,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Isend(v.getPointer(), v.size(),MPI_CHAR, proc, tag , MPI_COMM_WORLD,&req); MPI_Isend(v.getPointer(), v.size(),MPI_CHAR, proc, tag , ext_comm,&req);
} }
}; };
...@@ -104,9 +104,9 @@ public: ...@@ -104,9 +104,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<unsigned char,Mem,gr> template<typename Mem, typename gr> class MPI_IsendW<unsigned char,Mem,gr>
{ {
public: public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned char,Mem,gr> & v, MPI_Request & req) static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned char,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED_CHAR, proc, tag , MPI_COMM_WORLD,&req); MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED_CHAR, proc, tag , ext_comm,&req);
} }
}; };
...@@ -116,9 +116,9 @@ public: ...@@ -116,9 +116,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<size_t,Mem,gr> template<typename Mem, typename gr> class MPI_IsendW<size_t,Mem,gr>
{ {
public: public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<size_t,Mem,gr> & v, MPI_Request & req) static inline void send(size_t proc , size_t tag ,openfpm::vector<size_t,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED_LONG, proc, tag , MPI_COMM_WORLD,&req); MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED_LONG, proc, tag , ext_comm,&req);
} }
}; };
...@@ -128,9 +128,9 @@ public: ...@@ -128,9 +128,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<long int,Mem,gr> template<typename Mem, typename gr> class MPI_IsendW<long int,Mem,gr>
{ {
public: public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<long int,Mem,gr> & v, MPI_Request & req) static inline void send(size_t proc , size_t tag ,openfpm::vector<long int,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Isend(v.getPointer(), v.size(),MPI_LONG, proc, tag , MPI_COMM_WORLD,&req); MPI_Isend(v.getPointer(), v.size(),MPI_LONG, proc, tag , ext_comm,&req);
} }
}; };
...@@ -140,9 +140,9 @@ public: ...@@ -140,9 +140,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<float,Mem,gr> template<typename Mem, typename gr> class MPI_IsendW<float,Mem,gr>
{ {
public: public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<float,Mem,gr> & v, MPI_Request & req) static inline void send(size_t proc , size_t tag ,openfpm::vector<float,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Isend(v.getPointer(), v.size(),MPI_FLOAT, proc, tag , MPI_COMM_WORLD,&req); MPI_Isend(v.getPointer(), v.size(),MPI_FLOAT, proc, tag , ext_comm,&req);
} }
}; };
...@@ -152,9 +152,9 @@ public: ...@@ -152,9 +152,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<double,Mem,gr> template<typename Mem, typename gr> class MPI_IsendW<double,Mem,gr>
{ {
public: public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<double,Mem,gr> & v, MPI_Request & req) static inline void send(size_t proc , size_t tag ,openfpm::vector<double,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Isend(v.getPointer(), v.size(),MPI_DOUBLE, proc, tag , MPI_COMM_WORLD,&req); MPI_Isend(v.getPointer(), v.size(),MPI_DOUBLE, proc, tag , ext_comm,&req);
} }
}; };
......
This source diff could not be displayed because it is too large. You can view the blob instead.
/*
* InVis.hpp
*
* Created on: May 11, 2020
* Author: Aryaman Gupta
*/
#ifndef OPENFPM_PDATA_INVIS_HPP
#define OPENFPM_PDATA_INVIS_HPP
#include <jni.h>
class InVis
{
int windowSize;
int computePartners;
int imageSize;
JavaVM *jvm;
jclass clazz;
jobject obj;
MPI_Comm visComm;
int commSize;
void updateMemory(jmethodID methodID, int memKey, bool pos);
void getMemoryPos();
void getMemoryProps();
void receiveImages();
void updateCamera();
void doRender();
public:
InVis(int wSize, int cPartners, MPI_Comm vComm, bool isHead);
void manageVisHead();
void manageVisRenderer();
};
#endif //OPENFPM_PDATA_InVis_HPP
This source diff could not be displayed because it is too large. You can view the blob instead.
/*
* InVisRenderer.hpp
*
* Created on: May 12, 2020
* Author: Aryaman Gupta
*/
#ifndef OPENFPM_PDATA_INVISRENDERER_HPP
#define OPENFPM_PDATA_INVISRENDERER_HPP
#include <jni.h>
#define windowSize 700
#define computePartners 2
#define imageSize windowSize*windowSize*7
class InVisRenderer
{
JavaVM *jvm;
void getMemoryPos(JNIEnv *env, jclass renderClass, jobject renderObject);
void getMemoryProps(JNIEnv *env, jclass renderClass, jobject renderObject);
void sendImage(JNIEnv *e, jobject clazz, jobject image);
void doRender(JNIEnv *env, jclass inVisClass, jobject inVisObject);
public:
void manageVisRenderer();
};
#endif //OPENFPM_PDATA_INVISRENDERER_HPP
...@@ -8,12 +8,14 @@ ...@@ -8,12 +8,14 @@
#include "util/print_stack.hpp" #include "util/print_stack.hpp"
#include "util/math_util_complex.hpp" #include "util/math_util_complex.hpp"
init_options global_option;
Vcluster<> * global_v_cluster_private_heap = NULL; Vcluster<> * global_v_cluster_private_heap = NULL;
Vcluster<CudaMemory> * global_v_cluster_private_cuda = NULL; Vcluster<CudaMemory> * global_v_cluster_private_cuda = NULL;
// //
std::vector<int> sieve_spf; std::vector<int> sieve_spf;
// number of vcluster instances // number of vcluster instances
size_t n_vcluster = 0; size_t n_vcluster = 0;
bool ofp_initialized = false; bool ofp_initialized = false;
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include "VCluster_base.hpp" #include "VCluster_base.hpp"
#include "VCluster_meta_function.hpp" #include "VCluster_meta_function.hpp"
#include "util/math_util_complex.hpp" #include "util/math_util_complex.hpp"
#include "InVis.hpp"
#ifdef CUDA_GPU #ifdef CUDA_GPU
extern CudaMemory mem_tmp; extern CudaMemory mem_tmp;
...@@ -336,8 +337,8 @@ class Vcluster: public Vcluster_base<InternalMemory> ...@@ -336,8 +337,8 @@ class Vcluster: public Vcluster_base<InternalMemory>
* \param argv main set of arguments * \param argv main set of arguments
* *
*/ */
Vcluster(int *argc, char ***argv) Vcluster(int *argc, char ***argv,MPI_Comm ext_comm = MPI_COMM_WORLD)
:Vcluster_base<InternalMemory>(argc,argv) :Vcluster_base<InternalMemory>(argc,argv,ext_comm)
{ {
} }
...@@ -878,34 +879,276 @@ class Vcluster: public Vcluster_base<InternalMemory> ...@@ -878,34 +879,276 @@ class Vcluster: public Vcluster_base<InternalMemory>
}; };
enum init_options
{
none = 0x0,
in_situ_visualization = 0x1,
};
extern init_options global_option;
// Function to initialize the global VCluster // // Function to initialize the global VCluster //
extern Vcluster<> * global_v_cluster_private_heap; extern Vcluster<> * global_v_cluster_private_heap;
extern Vcluster<CudaMemory> * global_v_cluster_private_cuda; extern Vcluster<CudaMemory> * global_v_cluster_private_cuda;
/*! \brief Initialize a global instance of Runtime Virtual Cluster Machine static inline void delete_global_v_cluster_private()
{
delete global_v_cluster_private_heap;
delete global_v_cluster_private_cuda;
}
/*! \brief Finalize the library
* *
* Initialize a global instance of Runtime Virtual Cluster Machine * This function MUST be called at the end of the program
* *
*/ */
static inline void openfpm_finalize()
{
if (global_option == init_options::in_situ_visualization)
{
MPI_Request bar_req;
MPI_Ibarrier(MPI_COMM_WORLD,&bar_req);
}
#ifdef HAVE_PETSC
static inline void init_global_v_cluster_private(int *argc, char ***argv) PetscFinalize();
#endif
delete_global_v_cluster_private();
ofp_initialized = false;
#ifdef CUDA_GPU
// Release memory
mem_tmp.destroy();
mem_tmp.decRef();
#endif
}
static void get_comm_ranks(MPI_Comm comm, openfpm::vector<unsigned int> & world_ranks)
{ {
if (global_v_cluster_private_heap == NULL) MPI_Group grp, world_grp;
{global_v_cluster_private_heap = new Vcluster<>(argc,argv);}
MPI_Comm_group(MPI_COMM_WORLD, &world_grp);
MPI_Comm_group(comm, &grp);
int grp_size;
MPI_Group_size(grp, &grp_size);
openfpm::vector<unsigned int> local_ranks;
if (global_v_cluster_private_cuda == NULL) local_ranks.resize(grp_size);
{global_v_cluster_private_cuda = new Vcluster<CudaMemory>(argc,argv);} world_ranks.resize(grp_size);
for (int i = 0; i < grp_size; i++)
{local_ranks.get(i) = i;}
MPI_Group_translate_ranks(grp, grp_size, (int *)local_ranks.getPointer(), world_grp, (int *)world_ranks.getPointer());
MPI_Group_free(&grp);
MPI_Group_free(&world_grp);
} }
static inline void delete_global_v_cluster_private()
/*! \brief Initialize a global instance of Runtime Virtual Cluster Machine
*
* Initialize a global instance of Runtime Virtual Cluster Machine
*
*/
static inline void init_global_v_cluster_private(int *argc, char ***argv, init_options option)
{ {
delete global_v_cluster_private_heap; global_option = option;
delete global_v_cluster_private_cuda; if (option == init_options::in_situ_visualization)
{
int flag;
MPI_Initialized(&flag);
if (flag == false)
{
int threadLevel;
MPI_Init_thread(argc, argv, MPI_THREAD_MULTIPLE, &threadLevel);
std::cout << "MPI initialized with thread level " << threadLevel << ". The desired level was " << MPI_THREAD_MULTIPLE << std::endl;
}
MPI_Comm comm_compute;
MPI_Comm comm_steer;
MPI_Comm comm_vis;
int rank;
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Comm nodeComm;
MPI_Comm_split_type( MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, rank,
MPI_INFO_NULL, &nodeComm );
openfpm::vector<unsigned int> world_ranks;
get_comm_ranks(nodeComm,world_ranks);
int nodeRank;
int len;
MPI_Comm_rank(nodeComm,&nodeRank);
bool is_vis_process = false;
if (rank != 0)
{
if (nodeRank == 0)
{
// The lowest ranked process on a given node (except head node); the rendering process of that node
char name[MPI_MAX_PROCESSOR_NAME];
// Vis process
MPI_Get_processor_name(name, &len);
std::cout << "Node: " << name << " vis process: " << rank << std::endl;
is_vis_process = true;
}
else
{
for (int i = 0 ; i < world_ranks.size() ; i++)
{
if (world_ranks.get(i) == 0 && (nodeRank == 1 || nodeRank == 2))
{
char name[MPI_MAX_PROCESSOR_NAME];
// Vis process
MPI_Get_processor_name(name, &len);
std::cout << "Vis process on node 0 " << name << " " << nodeRank << " " << rank << std::endl;
is_vis_process = true;
}
}
}
}
int colorVis;
int colorSteer;
if (is_vis_process == true)
{
//All visualization processes are part of the vis communicator, but not part of the steering communicator
colorVis = 0;
colorSteer = MPI_UNDEFINED;
}
else
{
//All non-visualization processes are part of the steering communicator, but not part of the vis communicator
colorVis = MPI_UNDEFINED;
colorSteer = 0;
}
MPI_Comm_split(MPI_COMM_WORLD, colorSteer, rank, &comm_steer);
MPI_Comm_split(MPI_COMM_WORLD, colorVis, rank, &comm_vis);
if (rank == 0 || is_vis_process == true)
{MPI_Comm_split(MPI_COMM_WORLD, MPI_UNDEFINED,rank, &comm_compute);}
else
{MPI_Comm_split(MPI_COMM_WORLD,0,rank, &comm_compute);}
if (rank != 0 && is_vis_process == false)
{
if (global_v_cluster_private_heap == NULL)
{global_v_cluster_private_heap = new Vcluster<>(argc,argv,comm_compute);}
if (global_v_cluster_private_cuda == NULL)
{global_v_cluster_private_cuda = new Vcluster<CudaMemory>(argc,argv,comm_compute);}
}
else if (is_vis_process == true)
{
int flag = false;
MPI_Request bar_req;
MPI_Ibarrier(MPI_COMM_WORLD,&bar_req);
//! barrier status
MPI_Status bar_stat;
//How many simulation processes are running on this node?
int numSimProcesses;
int nodeCommSize;
MPI_Comm_size(nodeComm, &nodeCommSize);
if(nodeRank != 0)
{
//This process' rank on its node is not 0. So it is on the head node
numSimProcesses = nodeCommSize - 3; //OpenFPM Head + Vis Head + Vis Renderer
}
else
{
numSimProcesses = nodeCommSize - 1; //All process apart from Vis Renderer are simulation processes
}
char name[MPI_MAX_PROCESSOR_NAME];
MPI_Get_processor_name(name, &len);
std::cout<<"Node size is " << nodeCommSize << " and no. of sim processes on node " << name << " are: " << numSimProcesses << std::endl;
while(flag == false)
{
int visRank;
MPI_Comm_rank(comm_vis, &visRank);
sleep(1);
if(visRank == 0)
{
// The head process of the visualization system
// sleep(10);
InVis *visSystem = new InVis(700, numSimProcesses, comm_vis, true);
visSystem->manageVisHead();
}
else
{
// A rendering process of the the visualization system
// sleep(10);
InVis *visSystem = new InVis(700, numSimProcesses, comm_vis, false);
visSystem->manageVisRenderer();
}
MPI_SAFE_CALL(MPI_Test(&bar_req,&flag,&bar_stat));
}
openfpm_finalize();
exit(0);
}
else
{
int flag = false;
MPI_Request bar_req;
MPI_Ibarrier(MPI_COMM_WORLD,&bar_req);
//! barrier status
MPI_Status bar_stat;
while(flag == false)
{
std::cout << "I am node " << rank << std::endl;
sleep(1);
MPI_SAFE_CALL(MPI_Test(&bar_req,&flag,&bar_stat));
}
openfpm_finalize();
exit(0);
}
}
else
{
if (global_v_cluster_private_heap == NULL)
{global_v_cluster_private_heap = new Vcluster<>(argc,argv);}
if (global_v_cluster_private_cuda == NULL)
{global_v_cluster_private_cuda = new Vcluster<CudaMemory>(argc,argv);}
}
} }
template<typename Memory