...
 
Commits (5)
This diff is collapsed.
...@@ -6,7 +6,7 @@ if (CUDA_FOUND) ...@@ -6,7 +6,7 @@ if (CUDA_FOUND)
set(CUDA_SOURCES ../../openfpm_devices/src/memory/CudaMemory.cu VCluster/cuda/VCluster_semantic_unit_cuda_tests.cu VCluster/cuda/VCluster_unit_tests.cu ) set(CUDA_SOURCES ../../openfpm_devices/src/memory/CudaMemory.cu VCluster/cuda/VCluster_semantic_unit_cuda_tests.cu VCluster/cuda/VCluster_unit_tests.cu )
endif() endif()
add_executable(vcluster_test main.cpp VCluster/VCluster.cpp ../../openfpm_devices/src/memory/HeapMemory.cpp ../../openfpm_devices/src/memory/PtrMemory.cpp ../../openfpm_devices/src/Memleak_check.cpp VCluster/VCluster_unit_tests.cpp VCluster/VCluster_semantic_unit_tests.cpp ${CUDA_SOURCES}) add_executable(vcluster_test main.cpp VCluster/VCluster.cpp ../../openfpm_devices/src/memory/ShmAllocator_manager.cpp ../../openfpm_devices/src/memory/SemManager.cpp ../../openfpm_devices/src/memory/ShmAllocator.cpp ../../openfpm_devices/src/memory/HeapMemory.cpp ../../openfpm_devices/src/memory/PtrMemory.cpp ../../openfpm_devices/src/Memleak_check.cpp VCluster/VCluster_unit_tests.cpp VCluster/VCluster_semantic_unit_tests.cpp ${CUDA_SOURCES})
if ( CMAKE_COMPILER_IS_GNUCC ) if ( CMAKE_COMPILER_IS_GNUCC )
target_compile_options(vcluster_test PRIVATE "-Wno-deprecated-declarations") target_compile_options(vcluster_test PRIVATE "-Wno-deprecated-declarations")
......
...@@ -28,9 +28,9 @@ ...@@ -28,9 +28,9 @@
class MPI_IAllGatherWB class MPI_IAllGatherWB
{ {
public: public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req) static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_BYTE, rbuf, sz_r, MPI_BYTE, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_BYTE, rbuf, sz_r, MPI_BYTE, ext_comm,&req));
} }
}; };
...@@ -44,9 +44,9 @@ template<typename T> class MPI_IAllGatherW ...@@ -44,9 +44,9 @@ template<typename T> class MPI_IAllGatherW
{ {
public: public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req) static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sizeof(T) * sz_s,MPI_BYTE, rbuf, sz_r * sizeof(T), MPI_BYTE, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallgather(sbuf,sizeof(T) * sz_s,MPI_BYTE, rbuf, sz_r * sizeof(T), MPI_BYTE, ext_comm,&req));
} }
}; };
...@@ -57,9 +57,9 @@ public: ...@@ -57,9 +57,9 @@ public:
template<> class MPI_IAllGatherW<int> template<> class MPI_IAllGatherW<int>
{ {
public: public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req) static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_INT, rbuf, sz_r, MPI_INT, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_INT, rbuf, sz_r, MPI_INT, ext_comm,&req));
} }
}; };
...@@ -69,9 +69,9 @@ public: ...@@ -69,9 +69,9 @@ public:
template<> class MPI_IAllGatherW<unsigned int> template<> class MPI_IAllGatherW<unsigned int>
{ {
public: public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req) static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_UNSIGNED, rbuf, sz_r, MPI_UNSIGNED, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_UNSIGNED, rbuf, sz_r, MPI_UNSIGNED, ext_comm,&req));
} }
}; };
...@@ -81,9 +81,9 @@ public: ...@@ -81,9 +81,9 @@ public:
template<> class MPI_IAllGatherW<short> template<> class MPI_IAllGatherW<short>
{ {
public: public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req) static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_SHORT, rbuf, sz_r, MPI_SHORT, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_SHORT, rbuf, sz_r, MPI_SHORT, ext_comm,&req));
} }
}; };
...@@ -94,9 +94,9 @@ public: ...@@ -94,9 +94,9 @@ public:
template<> class MPI_IAllGatherW<unsigned short> template<> class MPI_IAllGatherW<unsigned short>
{ {
public: public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req) static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_UNSIGNED_SHORT, rbuf, sz_r, MPI_UNSIGNED_SHORT, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_UNSIGNED_SHORT, rbuf, sz_r, MPI_UNSIGNED_SHORT, ext_comm,&req));
} }
}; };
...@@ -107,9 +107,9 @@ public: ...@@ -107,9 +107,9 @@ public:
template<> class MPI_IAllGatherW<char> template<> class MPI_IAllGatherW<char>
{ {
public: public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req) static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_CHAR, rbuf, sz_r, MPI_CHAR, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_CHAR, rbuf, sz_r, MPI_CHAR, ext_comm,&req));
} }
}; };
...@@ -120,9 +120,9 @@ public: ...@@ -120,9 +120,9 @@ public:
template<> class MPI_IAllGatherW<unsigned char> template<> class MPI_IAllGatherW<unsigned char>
{ {
public: public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req) static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_UNSIGNED_CHAR, rbuf, sz_r, MPI_UNSIGNED_CHAR, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_UNSIGNED_CHAR, rbuf, sz_r, MPI_UNSIGNED_CHAR, ext_comm,&req));
} }
}; };
...@@ -132,9 +132,9 @@ public: ...@@ -132,9 +132,9 @@ public:
template<> class MPI_IAllGatherW<size_t> template<> class MPI_IAllGatherW<size_t>
{ {
public: public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req) static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_UNSIGNED_LONG, rbuf, sz_r, MPI_UNSIGNED_LONG, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_UNSIGNED_LONG, rbuf, sz_r, MPI_UNSIGNED_LONG, ext_comm,&req));
} }
}; };
...@@ -144,9 +144,9 @@ public: ...@@ -144,9 +144,9 @@ public:
template<> class MPI_IAllGatherW<long int> template<> class MPI_IAllGatherW<long int>
{ {
public: public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req) static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_LONG, rbuf, sz_r, MPI_LONG, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_LONG, rbuf, sz_r, MPI_LONG, ext_comm,&req));
} }
}; };
...@@ -156,9 +156,9 @@ public: ...@@ -156,9 +156,9 @@ public:
template<> class MPI_IAllGatherW<float> template<> class MPI_IAllGatherW<float>
{ {
public: public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req) static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_FLOAT, rbuf, sz_r, MPI_FLOAT, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_FLOAT, rbuf, sz_r, MPI_FLOAT, ext_comm,&req));
} }
}; };
...@@ -168,9 +168,9 @@ public: ...@@ -168,9 +168,9 @@ public:
template<> class MPI_IAllGatherW<double> template<> class MPI_IAllGatherW<double>
{ {
public: public:
static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req) static inline void gather(void * sbuf, size_t sz_s ,void * rbuf, size_t sz_r, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_DOUBLE, rbuf, sz_r, MPI_DOUBLE, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallgather(sbuf,sz_s,MPI_DOUBLE, rbuf, sz_r, MPI_DOUBLE, ext_comm,&req));
} }
}; };
......
...@@ -33,9 +33,9 @@ ...@@ -33,9 +33,9 @@
class MPI_IBcastWB class MPI_IBcastWB
{ {
public: public:
static inline void bcast(size_t proc ,void * buf, size_t sz, MPI_Request & req) static inline void bcast(size_t proc ,void * buf, size_t sz, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Ibcast(buf,sz,MPI_BYTE, proc , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Ibcast(buf,sz,MPI_BYTE, proc , ext_comm,&req));
} }
}; };
...@@ -48,9 +48,9 @@ public: ...@@ -48,9 +48,9 @@ public:
template<typename T> class MPI_IBcastW template<typename T> class MPI_IBcastW
{ {
public: public:
template<typename Memory> static inline void bcast(size_t proc ,openfpm::vector<T,Memory> & v, MPI_Request & req) template<typename Memory> static inline void bcast(size_t proc ,openfpm::vector<T,Memory> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size() * sizeof(T),MPI_BYTE, proc , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size() * sizeof(T),MPI_BYTE, proc , ext_comm,&req));
} }
}; };
...@@ -61,9 +61,9 @@ public: ...@@ -61,9 +61,9 @@ public:
template<> class MPI_IBcastW<int> template<> class MPI_IBcastW<int>
{ {
public: public:
static inline void bcast(size_t proc ,openfpm::vector<int> & v, MPI_Request & req) static inline void bcast(size_t proc ,openfpm::vector<int> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_INT, proc , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_INT, proc , ext_comm,&req));
} }
}; };
...@@ -73,9 +73,9 @@ public: ...@@ -73,9 +73,9 @@ public:
template<> class MPI_IBcastW<unsigned int> template<> class MPI_IBcastW<unsigned int>
{ {
public: public:
static inline void bcast(size_t proc ,openfpm::vector<unsigned int> & v, MPI_Request & req) static inline void bcast(size_t proc ,openfpm::vector<unsigned int> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED, proc , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED, proc , ext_comm,&req));
} }
}; };
...@@ -85,9 +85,9 @@ public: ...@@ -85,9 +85,9 @@ public:
template<> class MPI_IBcastW<short> template<> class MPI_IBcastW<short>
{ {
public: public:
static inline void bcast(size_t proc ,openfpm::vector<short> & v, MPI_Request & req) static inline void bcast(size_t proc ,openfpm::vector<short> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_SHORT, proc , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_SHORT, proc , ext_comm,&req));
} }
}; };
...@@ -97,9 +97,9 @@ public: ...@@ -97,9 +97,9 @@ public:
template<> class MPI_IBcastW<unsigned short> template<> class MPI_IBcastW<unsigned short>
{ {
public: public:
static inline void bcast(size_t proc ,openfpm::vector<unsigned short> & v, MPI_Request & req) static inline void bcast(size_t proc ,openfpm::vector<unsigned short> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_SHORT, proc , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_SHORT, proc , ext_comm,&req));
} }
}; };
...@@ -109,9 +109,9 @@ public: ...@@ -109,9 +109,9 @@ public:
template<> class MPI_IBcastW<char> template<> class MPI_IBcastW<char>
{ {
public: public:
static inline void bcast(size_t proc ,openfpm::vector<char> & v, MPI_Request & req) static inline void bcast(size_t proc ,openfpm::vector<char> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_CHAR, proc , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_CHAR, proc , ext_comm,&req));
} }
}; };
...@@ -121,9 +121,9 @@ public: ...@@ -121,9 +121,9 @@ public:
template<> class MPI_IBcastW<unsigned char> template<> class MPI_IBcastW<unsigned char>
{ {
public: public:
static inline void bcast(size_t proc ,openfpm::vector<unsigned char> & v, MPI_Request & req) static inline void bcast(size_t proc ,openfpm::vector<unsigned char> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_CHAR, proc , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_CHAR, proc , ext_comm,&req));
} }
}; };
...@@ -133,9 +133,9 @@ public: ...@@ -133,9 +133,9 @@ public:
template<> class MPI_IBcastW<size_t> template<> class MPI_IBcastW<size_t>
{ {
public: public:
static inline void bcast(size_t proc ,openfpm::vector<size_t> & v, MPI_Request & req) static inline void bcast(size_t proc ,openfpm::vector<size_t> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_LONG, proc , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_LONG, proc , ext_comm,&req));
} }
}; };
...@@ -145,9 +145,9 @@ public: ...@@ -145,9 +145,9 @@ public:
template<> class MPI_IBcastW<long int> template<> class MPI_IBcastW<long int>
{ {
public: public:
static inline void bcast(size_t proc ,openfpm::vector<long int> & v, MPI_Request & req) static inline void bcast(size_t proc ,openfpm::vector<long int> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_LONG, proc , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_LONG, proc , ext_comm,&req));
} }
}; };
...@@ -157,9 +157,9 @@ public: ...@@ -157,9 +157,9 @@ public:
template<> class MPI_IBcastW<float> template<> class MPI_IBcastW<float>
{ {
public: public:
static inline void bcast(size_t proc ,openfpm::vector<float> & v, MPI_Request & req) static inline void bcast(size_t proc ,openfpm::vector<float> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_FLOAT, proc , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_FLOAT, proc , ext_comm,&req));
} }
}; };
...@@ -169,9 +169,9 @@ public: ...@@ -169,9 +169,9 @@ public:
template<> class MPI_IBcastW<double> template<> class MPI_IBcastW<double>
{ {
public: public:
static inline void bcast(size_t proc ,openfpm::vector<double> & v, MPI_Request & req) static inline void bcast(size_t proc ,openfpm::vector<double> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_DOUBLE, proc , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_DOUBLE, proc , ext_comm,&req));
} }
}; };
...@@ -195,15 +195,19 @@ struct bcast_inte_impl ...@@ -195,15 +195,19 @@ struct bcast_inte_impl
//! root processor //! root processor
size_t root; size_t root;
//! MPI communicator
MPI_Comm ext_comm;
/*! \brief constructor /*! \brief constructor
* *
* \param v set of pointer buffers to set * \param v set of pointer buffers to set
* *
*/ */
inline bcast_inte_impl(vect & send, inline bcast_inte_impl(vect & send,
openfpm::vector<MPI_Request> & req, openfpm::vector<MPI_Request> & req,
size_t root) size_t root,
:send(send),req(req),root(root) MPI_Comm ext_comm)
:send(send),req(req),root(root),ext_comm(ext_comm)
{}; {};
//! It call the copy function for each property //! It call the copy function for each property
...@@ -216,7 +220,7 @@ struct bcast_inte_impl ...@@ -216,7 +220,7 @@ struct bcast_inte_impl
req.add(); req.add();
// gather // gather
MPI_IBcastWB::bcast(root,&send.template get<T::value>(0),send.size()*sizeof(send_type),req.last()); MPI_IBcastWB::bcast(root,&send.template get<T::value>(0),send.size()*sizeof(send_type),req.last(),ext_comm);
} }
}; };
...@@ -226,13 +230,14 @@ struct b_cast_helper ...@@ -226,13 +230,14 @@ struct b_cast_helper
template<typename T, typename Mem, typename lt_type, template<typename> class layout_base > template<typename T, typename Mem, typename lt_type, template<typename> class layout_base >
static void bcast_(openfpm::vector<MPI_Request> & req, static void bcast_(openfpm::vector<MPI_Request> & req,
openfpm::vector<T,Mem,lt_type,layout_base> & v, openfpm::vector<T,Mem,lt_type,layout_base> & v,
size_t root) size_t root,
MPI_Comm ext_comm)
{ {
// Create one request // Create one request
req.add(); req.add();
// gather // gather
MPI_IBcastW<T>::bcast(root,v,req.last()); MPI_IBcastW<T>::bcast(root,v,req.last(),ext_comm);
} }
}; };
...@@ -242,9 +247,10 @@ struct b_cast_helper<false> ...@@ -242,9 +247,10 @@ struct b_cast_helper<false>
template<typename T, typename Mem, typename lt_type, template<typename> class layout_base > template<typename T, typename Mem, typename lt_type, template<typename> class layout_base >
static void bcast_(openfpm::vector<MPI_Request> & req, static void bcast_(openfpm::vector<MPI_Request> & req,
openfpm::vector<T,Mem,lt_type,layout_base> & v, openfpm::vector<T,Mem,lt_type,layout_base> & v,
size_t root) size_t root,
MPI_Comm ext_comm)
{ {
bcast_inte_impl<openfpm::vector<T,Mem,lt_type,layout_base>> bc(v,req,root); bcast_inte_impl<openfpm::vector<T,Mem,lt_type,layout_base>> bc(v,req,root,ext_comm);
boost::mpl::for_each_ref<boost::mpl::range_c<int,0,T::max_prop>>(bc); boost::mpl::for_each_ref<boost::mpl::range_c<int,0,T::max_prop>>(bc);
} }
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
template<typename T> class MPI_IallreduceW template<typename T> class MPI_IallreduceW
{ {
public: public:
static inline void reduce(T & buf,MPI_Op op, MPI_Request & req) static inline void reduce(T & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " cannot recognize " << typeid(T).name() << "\n"; std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " cannot recognize " << typeid(T).name() << "\n";
} }
...@@ -32,9 +32,9 @@ public: ...@@ -32,9 +32,9 @@ public:
template<> class MPI_IallreduceW<int> template<> class MPI_IallreduceW<int>
{ {
public: public:
static inline void reduce(int & buf,MPI_Op op, MPI_Request & req) static inline void reduce(int & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_INT, op, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_INT, op, ext_comm,&req));
} }
}; };
...@@ -44,9 +44,9 @@ public: ...@@ -44,9 +44,9 @@ public:
template<> class MPI_IallreduceW<unsigned int> template<> class MPI_IallreduceW<unsigned int>
{ {
public: public:
static inline void reduce(unsigned int & buf,MPI_Op op, MPI_Request & req) static inline void reduce(unsigned int & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED, op, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED, op, ext_comm,&req));
} }
}; };
...@@ -56,9 +56,9 @@ public: ...@@ -56,9 +56,9 @@ public:
template<> class MPI_IallreduceW<short> template<> class MPI_IallreduceW<short>
{ {
public: public:
static inline void reduce(short & buf,MPI_Op op, MPI_Request & req) static inline void reduce(short & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_SHORT, op, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_SHORT, op, ext_comm,&req));
} }
}; };
...@@ -68,9 +68,9 @@ public: ...@@ -68,9 +68,9 @@ public:
template<> class MPI_IallreduceW<unsigned short> template<> class MPI_IallreduceW<unsigned short>
{ {
public: public:
static inline void reduce(unsigned short & buf,MPI_Op op, MPI_Request & req) static inline void reduce(unsigned short & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED_SHORT, op, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED_SHORT, op, ext_comm,&req));
} }
}; };
...@@ -80,9 +80,9 @@ public: ...@@ -80,9 +80,9 @@ public:
template<> class MPI_IallreduceW<char> template<> class MPI_IallreduceW<char>
{ {
public: public:
static inline void reduce(char & buf,MPI_Op op, MPI_Request & req) static inline void reduce(char & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_CHAR, op, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_CHAR, op, ext_comm,&req));
} }
}; };
...@@ -92,9 +92,9 @@ public: ...@@ -92,9 +92,9 @@ public:
template<> class MPI_IallreduceW<unsigned char> template<> class MPI_IallreduceW<unsigned char>
{ {
public: public:
static inline void reduce(unsigned char & buf,MPI_Op op, MPI_Request & req) static inline void reduce(unsigned char & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED_CHAR, op, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED_CHAR, op, ext_comm,&req));
} }
}; };
...@@ -104,9 +104,9 @@ public: ...@@ -104,9 +104,9 @@ public:
template<> class MPI_IallreduceW<size_t> template<> class MPI_IallreduceW<size_t>
{ {
public: public:
static inline void reduce(size_t & buf,MPI_Op op, MPI_Request & req) static inline void reduce(size_t & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED_LONG, op, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_UNSIGNED_LONG, op, ext_comm,&req));
} }
}; };
...@@ -116,9 +116,9 @@ public: ...@@ -116,9 +116,9 @@ public:
template<> class MPI_IallreduceW<long int> template<> class MPI_IallreduceW<long int>
{ {
public: public:
static inline void reduce(long int & buf,MPI_Op op, MPI_Request & req) static inline void reduce(long int & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_LONG, op, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_LONG, op, ext_comm,&req));
} }
}; };
...@@ -128,9 +128,9 @@ public: ...@@ -128,9 +128,9 @@ public:
template<> class MPI_IallreduceW<float> template<> class MPI_IallreduceW<float>
{ {
public: public:
static inline void reduce(float & buf,MPI_Op op, MPI_Request & req) static inline void reduce(float & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_FLOAT, op, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_FLOAT, op, ext_comm,&req));
} }
}; };
...@@ -140,9 +140,9 @@ public: ...@@ -140,9 +140,9 @@ public:
template<> class MPI_IallreduceW<double> template<> class MPI_IallreduceW<double>
{ {
public: public:
static inline void reduce(double & buf,MPI_Op op, MPI_Request & req) static inline void reduce(double & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_DOUBLE, op, MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Iallreduce(MPI_IN_PLACE, &buf, 1,MPI_DOUBLE, op, ext_comm,&req));
} }
}; };
...@@ -154,9 +154,9 @@ public: ...@@ -154,9 +154,9 @@ public:
/*template<> class MPI_IallreduceW<openfpm::vector<int>> /*template<> class MPI_IallreduceW<openfpm::vector<int>>
{ {
public: public:
static inline void reduce(openfpm::vector<int> & buf,MPI_Op op, MPI_Request & req) static inline void reduce(openfpm::vector<int> & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_INT, op, MPI_COMM_WORLD,&req); MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_INT, op, ext_comm,&req);
} }
};*/ };*/
...@@ -166,9 +166,9 @@ public: ...@@ -166,9 +166,9 @@ public:
/*template<> class MPI_IallreduceW<openfpm::vector<short>> /*template<> class MPI_IallreduceW<openfpm::vector<short>>
{ {
public: public:
static inline void reduce(openfpm::vector<short> & buf,MPI_Op op, MPI_Request & req) static inline void reduce(openfpm::vector<short> & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_SHORT, op, MPI_COMM_WORLD,&req); MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_SHORT, op, ext_comm,&req);
} }
};*/ };*/
...@@ -178,9 +178,9 @@ public: ...@@ -178,9 +178,9 @@ public:
/*template<> class MPI_IallreduceW<openfpm::vector<char>> /*template<> class MPI_IallreduceW<openfpm::vector<char>>
{ {
public: public:
static inline void reduce(openfpm::vector<char> & buf,MPI_Op op, MPI_Request & req) static inline void reduce(openfpm::vector<char> & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_CHAR, op, MPI_COMM_WORLD,&req); MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_CHAR, op, ext_comm,&req);
} }
};*/ };*/
...@@ -190,9 +190,9 @@ public: ...@@ -190,9 +190,9 @@ public:
/*template<> class MPI_IallreduceW<openfpm::vector<size_t>> /*template<> class MPI_IallreduceW<openfpm::vector<size_t>>
{ {
public: public:
static inline void reduce(openfpm::vector<size_t> & buf,MPI_Op op, MPI_Request & req) static inline void reduce(openfpm::vector<size_t> & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_UNSIGNED_LONG, op, MPI_COMM_WORLD,&req); MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_UNSIGNED_LONG, op, ext_comm,&req);
} }
};*/ };*/
...@@ -202,9 +202,9 @@ public: ...@@ -202,9 +202,9 @@ public:
/*template<> class MPI_IallreduceW<openfpm::vector<float>> /*template<> class MPI_IallreduceW<openfpm::vector<float>>
{ {
public: public:
static inline void reduce(openfpm::vector<float> & buf,MPI_Op op, MPI_Request & req) static inline void reduce(openfpm::vector<float> & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_FLOAT, op, MPI_COMM_WORLD,&req); MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_FLOAT, op, ext_comm,&req);
} }
};*/ };*/
...@@ -215,9 +215,9 @@ public: ...@@ -215,9 +215,9 @@ public:
/*template<> class MPI_IallreduceW<openfpm::vector<double>> /*template<> class MPI_IallreduceW<openfpm::vector<double>>
{ {
public: public:
static inline void reduce(openfpm::vector<double> & buf,MPI_Op op, MPI_Request & req) static inline void reduce(openfpm::vector<double> & buf,MPI_Op op, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_DOUBLE, op, MPI_COMM_WORLD,&req); MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_DOUBLE, op, ext_comm,&req);
} }
};*/ };*/
......
...@@ -22,9 +22,9 @@ public: ...@@ -22,9 +22,9 @@ public:
* \param req MPI request * \param req MPI request
* *
*/ */
static inline void recv(size_t proc , size_t tag ,void * buf, size_t sz, MPI_Request & req) static inline void recv(size_t proc , size_t tag ,void * buf, size_t sz, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Irecv(buf,sz,MPI_BYTE, proc, tag , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Irecv(buf,sz,MPI_BYTE, proc, tag , ext_comm,&req));
} }
}; };
...@@ -37,9 +37,9 @@ public: ...@@ -37,9 +37,9 @@ public:
template<typename T> class MPI_IrecvW template<typename T> class MPI_IrecvW
{ {
public: public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<T> & v, MPI_Request & req) static inline void recv(size_t proc , size_t tag ,openfpm::vector<T> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size() * sizeof(T),MPI_BYTE, proc, tag , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size() * sizeof(T),MPI_BYTE, proc, tag , ext_comm,&req));
} }
}; };
...@@ -50,9 +50,9 @@ public: ...@@ -50,9 +50,9 @@ public:
template<> class MPI_IrecvW<int> template<> class MPI_IrecvW<int>
{ {
public: public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<int> & v, MPI_Request & req) static inline void recv(size_t proc , size_t tag ,openfpm::vector<int> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_INT, proc, tag , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_INT, proc, tag , ext_comm,&req));
} }
}; };
...@@ -62,9 +62,9 @@ public: ...@@ -62,9 +62,9 @@ public:
template<> class MPI_IrecvW<unsigned int> template<> class MPI_IrecvW<unsigned int>
{ {
public: public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<unsigned int> & v, MPI_Request & req) static inline void recv(size_t proc , size_t tag ,openfpm::vector<unsigned int> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED, proc, tag , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED, proc, tag , ext_comm,&req));
} }
}; };
...@@ -74,9 +74,9 @@ public: ...@@ -74,9 +74,9 @@ public:
template<> class MPI_IrecvW<short> template<> class MPI_IrecvW<short>
{ {
public: public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<short> & v, MPI_Request & req) static inline void recv(size_t proc , size_t tag ,openfpm::vector<short> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_SHORT, proc, tag , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_SHORT, proc, tag , ext_comm,&req));
} }
}; };
...@@ -86,9 +86,9 @@ public: ...@@ -86,9 +86,9 @@ public:
template<> class MPI_IrecvW<unsigned short> template<> class MPI_IrecvW<unsigned short>
{ {
public: public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<unsigned short> & v, MPI_Request & req) static inline void recv(size_t proc , size_t tag ,openfpm::vector<unsigned short> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED_SHORT, proc, tag , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED_SHORT, proc, tag , ext_comm,&req));
} }
}; };
...@@ -98,9 +98,9 @@ public: ...@@ -98,9 +98,9 @@ public:
template<> class MPI_IrecvW<char> template<> class MPI_IrecvW<char>
{ {
public: public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<char> & v, MPI_Request & req) static inline void recv(size_t proc , size_t tag ,openfpm::vector<char> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_CHAR, proc, tag , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_CHAR, proc, tag , ext_comm,&req));
} }
}; };
...@@ -110,9 +110,9 @@ public: ...@@ -110,9 +110,9 @@ public:
template<> class MPI_IrecvW<unsigned char> template<> class MPI_IrecvW<unsigned char>
{ {
public: public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<unsigned char> & v, MPI_Request & req) static inline void recv(size_t proc , size_t tag ,openfpm::vector<unsigned char> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED_CHAR, proc, tag , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED_CHAR, proc, tag , ext_comm,&req));
} }
}; };
...@@ -122,9 +122,9 @@ public: ...@@ -122,9 +122,9 @@ public:
template<> class MPI_IrecvW<size_t> template<> class MPI_IrecvW<size_t>
{ {
public: public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<size_t> & v, MPI_Request & req) static inline void recv(size_t proc , size_t tag ,openfpm::vector<size_t> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED_LONG, proc, tag , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_UNSIGNED_LONG, proc, tag , ext_comm,&req));
} }
}; };
...@@ -134,9 +134,9 @@ public: ...@@ -134,9 +134,9 @@ public:
template<> class MPI_IrecvW<long int> template<> class MPI_IrecvW<long int>
{ {
public: public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<long int> & v, MPI_Request & req) static inline void recv(size_t proc , size_t tag ,openfpm::vector<long int> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_LONG, proc, tag , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_LONG, proc, tag , ext_comm,&req));
} }
}; };
...@@ -146,9 +146,9 @@ public: ...@@ -146,9 +146,9 @@ public:
template<> class MPI_IrecvW<float> template<> class MPI_IrecvW<float>
{ {
public: public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<float> & v, MPI_Request & req) static inline void recv(size_t proc , size_t tag ,openfpm::vector<float> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_FLOAT, proc, tag , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_FLOAT, proc, tag , ext_comm,&req));
} }
}; };
...@@ -158,9 +158,9 @@ public: ...@@ -158,9 +158,9 @@ public:
template<> class MPI_IrecvW<double> template<> class MPI_IrecvW<double>
{ {
public: public:
static inline void recv(size_t proc , size_t tag ,openfpm::vector<double> & v, MPI_Request & req) static inline void recv(size_t proc , size_t tag ,openfpm::vector<double> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_DOUBLE, proc, tag , MPI_COMM_WORLD,&req)); MPI_SAFE_CALL(MPI_Irecv(v.getPointer(), v.size(),MPI_DOUBLE, proc, tag , ext_comm,&req));
} }
}; };
......
...@@ -16,9 +16,9 @@ ...@@ -16,9 +16,9 @@
class MPI_IsendWB class MPI_IsendWB
{ {
public: public:
static inline void send(size_t proc , size_t tag ,const void * buf, size_t sz, MPI_Request & req) static inline void send(size_t proc , size_t tag ,const void * buf, size_t sz, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Isend(buf, sz,MPI_BYTE, proc, tag , MPI_COMM_WORLD,&req); MPI_Isend(buf, sz,MPI_BYTE, proc, tag , ext_comm,&req);
} }
}; };
...@@ -31,9 +31,9 @@ public: ...@@ -31,9 +31,9 @@ public:
template<typename T, typename Mem, typename gr> class MPI_IsendW template<typename T, typename Mem, typename gr> class MPI_IsendW
{ {
public: public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<T,Mem,gr> & v, MPI_Request & req) static inline void send(size_t proc , size_t tag ,openfpm::vector<T,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Isend(v.getPointer(), v.size() * sizeof(T),MPI_BYTE, proc, tag , MPI_COMM_WORLD,&req); MPI_Isend(v.getPointer(), v.size() * sizeof(T),MPI_BYTE, proc, tag , ext_comm,&req);
} }
}; };
...@@ -44,9 +44,9 @@ public: ...@@ -44,9 +44,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<int,Mem,gr> template<typename Mem, typename gr> class MPI_IsendW<int,Mem,gr>
{ {
public: public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<int,Mem,gr> & v, MPI_Request & req) static inline void send(size_t proc , size_t tag ,openfpm::vector<int,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Isend(v.getPointer(), v.size(),MPI_INT, proc, tag , MPI_COMM_WORLD,&req); MPI_Isend(v.getPointer(), v.size(),MPI_INT, proc, tag , ext_comm,&req);
} }
}; };
...@@ -56,9 +56,9 @@ public: ...@@ -56,9 +56,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<unsigned int,Mem,gr> template<typename Mem, typename gr> class MPI_IsendW<unsigned int,Mem,gr>
{ {
public: public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned int,Mem,gr> & v, MPI_Request & req) static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned int,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED, proc, tag , MPI_COMM_WORLD,&req); MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED, proc, tag , ext_comm,&req);
} }
}; };
...@@ -68,9 +68,9 @@ public: ...@@ -68,9 +68,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<short,Mem,gr> template<typename Mem, typename gr> class MPI_IsendW<short,Mem,gr>
{ {
public: public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<short,Mem,gr> & v, MPI_Request & req) static inline void send(size_t proc , size_t tag ,openfpm::vector<short,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Isend(v.getPointer(), v.size(),MPI_SHORT, proc, tag , MPI_COMM_WORLD,&req); MPI_Isend(v.getPointer(), v.size(),MPI_SHORT, proc, tag , ext_comm,&req);
} }
}; };
...@@ -80,9 +80,9 @@ public: ...@@ -80,9 +80,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<unsigned short,Mem,gr> template<typename Mem, typename gr> class MPI_IsendW<unsigned short,Mem,gr>
{ {
public: public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned short,Mem,gr> & v, MPI_Request & req) static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned short,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED_SHORT, proc, tag , MPI_COMM_WORLD,&req); MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED_SHORT, proc, tag , ext_comm,&req);
} }
}; };
...@@ -92,9 +92,9 @@ public: ...@@ -92,9 +92,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<char,Mem,gr> template<typename Mem, typename gr> class MPI_IsendW<char,Mem,gr>
{ {
public: public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<char,Mem,gr> & v, MPI_Request & req) static inline void send(size_t proc , size_t tag ,openfpm::vector<char,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Isend(v.getPointer(), v.size(),MPI_CHAR, proc, tag , MPI_COMM_WORLD,&req); MPI_Isend(v.getPointer(), v.size(),MPI_CHAR, proc, tag , ext_comm,&req);
} }
}; };
...@@ -104,9 +104,9 @@ public: ...@@ -104,9 +104,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<unsigned char,Mem,gr> template<typename Mem, typename gr> class MPI_IsendW<unsigned char,Mem,gr>
{ {
public: public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned char,Mem,gr> & v, MPI_Request & req) static inline void send(size_t proc , size_t tag ,openfpm::vector<unsigned char,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED_CHAR, proc, tag , MPI_COMM_WORLD,&req); MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED_CHAR, proc, tag , ext_comm,&req);
} }
}; };
...@@ -116,9 +116,9 @@ public: ...@@ -116,9 +116,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<size_t,Mem,gr> template<typename Mem, typename gr> class MPI_IsendW<size_t,Mem,gr>
{ {
public: public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<size_t,Mem,gr> & v, MPI_Request & req) static inline void send(size_t proc , size_t tag ,openfpm::vector<size_t,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED_LONG, proc, tag , MPI_COMM_WORLD,&req); MPI_Isend(v.getPointer(), v.size(),MPI_UNSIGNED_LONG, proc, tag , ext_comm,&req);
} }
}; };
...@@ -128,9 +128,9 @@ public: ...@@ -128,9 +128,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<long int,Mem,gr> template<typename Mem, typename gr> class MPI_IsendW<long int,Mem,gr>
{ {
public: public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<long int,Mem,gr> & v, MPI_Request & req) static inline void send(size_t proc , size_t tag ,openfpm::vector<long int,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Isend(v.getPointer(), v.size(),MPI_LONG, proc, tag , MPI_COMM_WORLD,&req); MPI_Isend(v.getPointer(), v.size(),MPI_LONG, proc, tag , ext_comm,&req);
} }
}; };
...@@ -140,9 +140,9 @@ public: ...@@ -140,9 +140,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<float,Mem,gr> template<typename Mem, typename gr> class MPI_IsendW<float,Mem,gr>
{ {
public: public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<float,Mem,gr> & v, MPI_Request & req) static inline void send(size_t proc , size_t tag ,openfpm::vector<float,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Isend(v.getPointer(), v.size(),MPI_FLOAT, proc, tag , MPI_COMM_WORLD,&req); MPI_Isend(v.getPointer(), v.size(),MPI_FLOAT, proc, tag , ext_comm,&req);
} }
}; };
...@@ -152,9 +152,9 @@ public: ...@@ -152,9 +152,9 @@ public:
template<typename Mem, typename gr> class MPI_IsendW<double,Mem,gr> template<typename Mem, typename gr> class MPI_IsendW<double,Mem,gr>
{ {
public: public:
static inline void send(size_t proc , size_t tag ,openfpm::vector<double,Mem,gr> & v, MPI_Request & req) static inline void send(size_t proc , size_t tag ,openfpm::vector<double,Mem,gr> & v, MPI_Request & req, MPI_Comm ext_comm)
{ {
MPI_Isend(v.getPointer(), v.size(),MPI_DOUBLE, proc, tag , MPI_COMM_WORLD,&req); MPI_Isend(v.getPointer(), v.size(),MPI_DOUBLE, proc, tag , ext_comm,&req);
} }
}; };
......
...@@ -8,12 +8,14 @@ ...@@ -8,12 +8,14 @@
#include "util/print_stack.hpp" #include "util/print_stack.hpp"
#include "util/math_util_complex.hpp" #include "util/math_util_complex.hpp"
init_options global_option;
Vcluster<> * global_v_cluster_private_heap = NULL; Vcluster<> * global_v_cluster_private_heap = NULL;
Vcluster<CudaMemory> * global_v_cluster_private_cuda = NULL; Vcluster<CudaMemory> * global_v_cluster_private_cuda = NULL;
// //
std::vector<int> sieve_spf; std::vector<int> sieve_spf;
// number of vcluster instances // number of vcluster instances
size_t n_vcluster = 0; size_t n_vcluster = 0;
bool ofp_initialized = false; bool ofp_initialized = false;
......
...@@ -336,8 +336,8 @@ class Vcluster: public Vcluster_base<InternalMemory> ...@@ -336,8 +336,8 @@ class Vcluster: public Vcluster_base<InternalMemory>
* \param argv main set of arguments * \param argv main set of arguments
* *
*/ */
Vcluster(int *argc, char ***argv) Vcluster(int *argc, char ***argv,MPI_Comm ext_comm = MPI_COMM_WORLD)
:Vcluster_base<InternalMemory>(argc,argv) :Vcluster_base<InternalMemory>(argc,argv,ext_comm)
{ {
} }
...@@ -878,34 +878,123 @@ class Vcluster: public Vcluster_base<InternalMemory> ...@@ -878,34 +878,123 @@ class Vcluster: public Vcluster_base<InternalMemory>
}; };
enum init_options
{
none = 0x0,
in_situ_visualization = 0x1,
};
extern init_options global_option;
// Function to initialize the global VCluster // // Function to initialize the global VCluster //
extern Vcluster<> * global_v_cluster_private_heap; extern Vcluster<> * global_v_cluster_private_heap;
extern Vcluster<CudaMemory> * global_v_cluster_private_cuda; extern Vcluster<CudaMemory> * global_v_cluster_private_cuda;
static inline void delete_global_v_cluster_private()
{
delete global_v_cluster_private_heap;
delete global_v_cluster_private_cuda;
}
/*! \brief Finalize the library
*
* This function MUST be called at the end of the program
*
*/
static inline void openfpm_finalize()
{
if (global_option == init_options::in_situ_visualization)
{
MPI_Request bar_req;
MPI_Ibarrier(MPI_COMM_WORLD,&bar_req);
}
#ifdef HAVE_PETSC
PetscFinalize();
#endif
delete_global_v_cluster_private();
ofp_initialized = false;
#ifdef CUDA_GPU
// Release memory
mem_tmp.destroy();
mem_tmp.decRef();
#endif
}
/*! \brief Initialize a global instance of Runtime Virtual Cluster Machine /*! \brief Initialize a global instance of Runtime Virtual Cluster Machine
* *
* Initialize a global instance of Runtime Virtual Cluster Machine * Initialize a global instance of Runtime Virtual Cluster Machine
* *
*/ */
static inline void init_global_v_cluster_private(int *argc, char ***argv) static inline void init_global_v_cluster_private(int *argc, char ***argv, init_options option)
{ {
if (global_v_cluster_private_heap == NULL) global_option = option;
{global_v_cluster_private_heap = new Vcluster<>(argc,argv);} if (option == init_options::in_situ_visualization)
{
int flag;
MPI_Initialized(&flag);
if (global_v_cluster_private_cuda == NULL) if (flag == false)
{global_v_cluster_private_cuda = new Vcluster<CudaMemory>(argc,argv);} {MPI_Init(argc,argv);}
}
static inline void delete_global_v_cluster_private() MPI_Comm com_compute;
{
delete global_v_cluster_private_heap; int rank;
delete global_v_cluster_private_cuda; MPI_Comm_rank(MPI_COMM_WORLD,&rank);
if (rank == 0)
{MPI_Comm_split(MPI_COMM_WORLD, MPI_UNDEFINED,rank, &com_compute);}
else
{MPI_Comm_split(MPI_COMM_WORLD,0,rank, &com_compute);}
if (rank != 0 )
{
if (global_v_cluster_private_heap == NULL)
{global_v_cluster_private_heap = new Vcluster<>(argc,argv,com_compute);}
if (global_v_cluster_private_cuda == NULL)
{global_v_cluster_private_cuda = new Vcluster<CudaMemory>(argc,argv,com_compute);}
}
else
{
int flag = false;
MPI_Request bar_req;
MPI_Ibarrier(MPI_COMM_WORLD,&bar_req);
//! barrier status
MPI_Status bar_stat;
while(flag == false)
{
std::cout << "I am node " << rank << std::endl;
sleep(1);
MPI_SAFE_CALL(MPI_Test(&bar_req,&flag,&bar_stat));
}
openfpm_finalize();
exit(0);
}
}
else
{
if (global_v_cluster_private_heap == NULL)
{global_v_cluster_private_heap = new Vcluster<>(argc,argv);}
if (global_v_cluster_private_cuda == NULL)
{global_v_cluster_private_cuda = new Vcluster<CudaMemory>(argc,argv);}
}
} }
template<typename Memory> template<typename Memory>
struct get_vcl struct get_vcl
{ {
...@@ -951,7 +1040,7 @@ static inline bool is_openfpm_init() ...@@ -951,7 +1040,7 @@ static inline bool is_openfpm_init()
* This function MUST be called before any other function * This function MUST be called before any other function
* *
*/ */
static inline void openfpm_init(int *argc, char ***argv) static inline void openfpm_init(int *argc, char ***argv, init_options option = init_options::none )
{ {
#ifdef HAVE_PETSC #ifdef HAVE_PETSC
...@@ -959,7 +1048,7 @@ static inline void openfpm_init(int *argc, char ***argv) ...@@ -959,7 +1048,7 @@ static inline void openfpm_init(int *argc, char ***argv)
#endif #endif
init_global_v_cluster_private(argc,argv); init_global_v_cluster_private(argc,argv,option);
#ifdef SE_CLASS1 #ifdef SE_CLASS1
std::cout << "OpenFPM is compiled with debug mode LEVEL:1. Remember to remove SE_CLASS1 when you go in production" << std::endl; std::cout << "OpenFPM is compiled with debug mode LEVEL:1. Remember to remove SE_CLASS1 when you go in production" << std::endl;
...@@ -1000,30 +1089,6 @@ static inline void openfpm_init(int *argc, char ***argv) ...@@ -1000,30 +1089,6 @@ static inline void openfpm_init(int *argc, char ***argv)
} }
/*! \brief Finalize the library
*
* This function MUST be called at the end of the program
*
*/
static inline void openfpm_finalize()
{
#ifdef HAVE_PETSC
PetscFinalize();
#endif
delete_global_v_cluster_private();
ofp_initialized = false;
#ifdef CUDA_GPU
// Release memory
mem_tmp.destroy();
mem_tmp.decRef();
#endif
}
#endif #endif
......
...@@ -112,6 +112,9 @@ union red ...@@ -112,6 +112,9 @@ union red
template<typename InternalMemory> template<typename InternalMemory>
class Vcluster_base class Vcluster_base
{ {
//! external communicator
MPI_Comm ext_comm;
//! log file //! log file
Vcluster_log log; Vcluster_log log;
...@@ -238,8 +241,8 @@ public: ...@@ -238,8 +241,8 @@ public:
* \param argv pointer to arguments vector passed to the program * \param argv pointer to arguments vector passed to the program
* *
*/ */
Vcluster_base(int *argc, char ***argv) Vcluster_base(int *argc, char ***argv, MPI_Comm ext_comm)
:NBX_cnt(0) :ext_comm(ext_comm),NBX_cnt(0)
{ {
#ifdef SE_CLASS2 #ifdef SE_CLASS2
check_new(this,8,VCLUSTER_EVENT,PRJ_VCLUSTER); check_new(this,8,VCLUSTER_EVENT,PRJ_VCLUSTER);
...@@ -259,7 +262,7 @@ public: ...@@ -259,7 +262,7 @@ public:
// We try to get the local processors rank // We try to get the local processors rank
MPI_Comm shmcomm; MPI_Comm shmcomm;
MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, 0, MPI_Comm_split_type(ext_comm, MPI_COMM_TYPE_SHARED, 0,
MPI_INFO_NULL, &shmcomm); MPI_INFO_NULL, &shmcomm);
MPI_Comm_rank(shmcomm, &shmrank); MPI_Comm_rank(shmcomm, &shmrank);
...@@ -268,8 +271,8 @@ public: ...@@ -268,8 +271,8 @@ public:
// Get the total number of process // Get the total number of process
// and the rank of this process // and the rank of this process
MPI_Comm_size(MPI_COMM_WORLD, &m_size); MPI_Comm_size(ext_comm, &m_size);
MPI_Comm_rank(MPI_COMM_WORLD, &m_rank); MPI_Comm_rank(ext_comm, &m_rank);
#ifdef SE_CLASS2 #ifdef SE_CLASS2
process_v_cl = m_rank; process_v_cl = m_rank;
...@@ -379,7 +382,7 @@ public: ...@@ -379,7 +382,7 @@ public:
*/ */
MPI_Comm getMPIComm() MPI_Comm getMPIComm()
{ {
return MPI_COMM_WORLD; return ext_comm;
} }
/*! \brief Get the total number of processors /*! \brief Get the total number of processors
...@@ -449,7 +452,7 @@ public: ...@@ -449,7 +452,7 @@ public:
req.add(); req.add();
// reduce // reduce
MPI_IallreduceW<T>::reduce(num,MPI_SUM,req.last()); MPI_IallreduceW<T>::reduce(num,MPI_SUM,req.last(),ext_comm);
} }
/*! \brief Get the maximum number across all processors (or reduction with infinity norm) /*! \brief Get the maximum number across all processors (or reduction with infinity norm)
...@@ -468,7 +471,7 @@ public: ...@@ -468,7 +471,7 @@ public:
req.add(); req.add();
// reduce // reduce
MPI_IallreduceW<T>::reduce(num,MPI_MAX,req.last()); MPI_IallreduceW<T>::reduce(num,MPI_MAX,req.last(),ext_comm);
} }
/*! \brief Get the minimum number across all processors (or reduction with insinity norm) /*! \brief Get the minimum number across all processors (or reduction with insinity norm)
...@@ -488,7 +491,7 @@ public: ...@@ -488,7 +491,7 @@ public:
req.add(); req.add();
// reduce // reduce
MPI_IallreduceW<T>::reduce(num,MPI_MIN,req.last()); MPI_IallreduceW<T>::reduce(num,MPI_MIN,req.last(),ext_comm);
} }
/*! \brief Send and receive multiple messages /*! \brief Send and receive multiple messages
...@@ -831,7 +834,7 @@ public: ...@@ -831,7 +834,7 @@ public:
#endif #endif
tot_sent += sz[i]; tot_sent += sz[i];
MPI_SAFE_CALL(MPI_Issend(ptr[i], sz[i], MPI_BYTE, prc[i], SEND_SPARSE + NBX_cnt*131072 + i, MPI_COMM_WORLD,&req.last())); MPI_SAFE_CALL(MPI_Issend(ptr[i], sz[i], MPI_BYTE, prc[i], SEND_SPARSE + NBX_cnt*131072 + i, ext_comm,&req.last()));
log.logSend(prc[i]); log.logSend(prc[i]);
} }
} }
...@@ -852,7 +855,7 @@ public: ...@@ -852,7 +855,7 @@ public:
MPI_Status stat_t; MPI_Status stat_t;
int stat = false; int stat = false;
MPI_SAFE_CALL(MPI_Iprobe(MPI_ANY_SOURCE,MPI_ANY_TAG/*SEND_SPARSE + NBX_cnt*/,MPI_COMM_WORLD,&stat,&stat_t)); MPI_SAFE_CALL(MPI_Iprobe(MPI_ANY_SOURCE,MPI_ANY_TAG/*SEND_SPARSE + NBX_cnt*/,ext_comm,&stat,&stat_t));
// If I have an incoming message and is related to this NBX communication // If I have an incoming message and is related to this NBX communication
if (stat == true) if (stat == true)
...@@ -878,7 +881,7 @@ public: ...@@ -878,7 +881,7 @@ public:
check_valid(ptr,msize); check_valid(ptr,msize);
#endif #endif
tot_recv += msize; tot_recv += msize;
MPI_SAFE_CALL(MPI_Recv(ptr,msize,MPI_BYTE,stat_t.MPI_SOURCE,stat_t.MPI_TAG,MPI_COMM_WORLD,&stat_t)); MPI_SAFE_CALL(MPI_Recv(ptr,msize,MPI_BYTE,stat_t.MPI_SOURCE,stat_t.MPI_TAG,ext_comm,&stat_t));
#ifdef SE_CLASS2 #ifdef SE_CLASS2
check_valid(ptr,msize); check_valid(ptr,msize);
...@@ -898,7 +901,7 @@ public: ...@@ -898,7 +901,7 @@ public:
// If all send has been completed // If all send has been completed
if (flag == true) if (flag == true)
{MPI_SAFE_CALL(MPI_Ibarrier(MPI_COMM_WORLD,&bar_req));reached_bar_req = true;} {MPI_SAFE_CALL(MPI_Ibarrier(ext_comm,&bar_req));reached_bar_req = true;}
} }
// Check if all processor reached the async barrier // Check if all processor reached the async barrier
...@@ -946,7 +949,7 @@ public: ...@@ -946,7 +949,7 @@ public:
req.add(); req.add();
// send // send
MPI_IsendWB::send(proc,SEND_RECV_BASE + tag,mem,sz,req.last()); MPI_IsendWB::send(proc,SEND_RECV_BASE + tag,mem,sz,req.last(),ext_comm);
return true; return true;
} }
...@@ -981,7 +984,7 @@ public: ...@@ -981,7 +984,7 @@ public:
req.add(); req.add();
// send // send
MPI_IsendW<T,Mem,gr>::send(proc,SEND_RECV_BASE + tag,v,req.last()); MPI_IsendW<T,Mem,gr>::send(proc,SEND_RECV_BASE + tag,v,req.last(),ext_comm);
return true; return true;
} }
...@@ -1012,7 +1015,7 @@ public: ...@@ -1012,7 +1015,7 @@ public:
req.add(); req.add();
// receive // receive
MPI_IrecvWB::recv(proc,SEND_RECV_BASE + tag,v,sz,req.last()); MPI_IrecvWB::recv(proc,SEND_RECV_BASE + tag,v,sz,req.last(),ext_comm);
return true; return true;
} }
...@@ -1046,7 +1049,7 @@ public: ...@@ -1046,7 +1049,7 @@ public:
req.add(); req.add();
// receive // receive
MPI_IrecvW<T>::recv(proc,SEND_RECV_BASE + tag,v,req.last()); MPI_IrecvW<T>::recv(proc,SEND_RECV_BASE + tag,v,req.last(),ext_comm);
return true; return true;
} }
...@@ -1076,7 +1079,7 @@ public: ...@@ -1076,7 +1079,7 @@ public:
v.resize(getProcessingUnits()); v.resize(getProcessingUnits());
// gather // gather
MPI_IAllGatherW<T>::gather(&send,1,v.getPointer(),1,req.last()); MPI_IAllGatherW<T>::gather(&send,1,v.getPointer(),1,req.last(),ext_comm);
return true; return true;
} }
...@@ -1104,7 +1107,7 @@ public: ...@@ -1104,7 +1107,7 @@ public:
checkType<T>(); checkType<T>();
#endif #endif
b_cast_helper<openfpm::vect_isel<T>::value == STD_VECTOR || is_layout_mlin<layout_base<T>>::value >::bcast_(req,v,root); b_cast_helper<openfpm::vect_isel<T>::value == STD_VECTOR || is_layout_mlin<layout_base<T>>::value >::bcast_(req,v,root,ext_comm);
return true; return true;
} }
......
...@@ -217,10 +217,8 @@ template<unsigned int ip> void test_no_send_some_peer() ...@@ -217,10 +217,8 @@ template<unsigned int ip> void test_no_send_some_peer()
} }
} }
template<unsigned int ip> void test_known() template<unsigned int ip> void test_known(Vcluster<> & vcl)
{ {
Vcluster<> & vcl = create_vcluster();
// send/recv messages // send/recv messages
global_rank = vcl.getProcessUnitID(); global_rank = vcl.getProcessUnitID();
...@@ -325,10 +323,8 @@ template<unsigned int ip> void test_known() ...@@ -325,10 +323,8 @@ template<unsigned int ip> void test_known()
} }
} }
template<unsigned int ip> void test(unsigned int opt) template<unsigned int ip> void test(Vcluster<> & vcl, unsigned int opt)
{ {
Vcluster<> & vcl = create_vcluster();
// send/recv messages // send/recv messages
global_rank = vcl.getProcessUnitID(); global_rank = vcl.getProcessUnitID();
......
...@@ -191,7 +191,9 @@ BOOST_AUTO_TEST_CASE( VCluster_use_sendrecv) ...@@ -191,7 +191,9 @@ BOOST_AUTO_TEST_CASE( VCluster_use_sendrecv)
std::cout << "VCluster unit test start sendrecv" << "\n"<