Commit b07008bc authored by incardon's avatar incardon
Browse files

Fixing installation of VCluster

parent 3235f6c6
...@@ -12,7 +12,7 @@ libvcluster_a_SOURCES = VCluster.cpp ...@@ -12,7 +12,7 @@ libvcluster_a_SOURCES = VCluster.cpp
libvcluster_a_CXXFLAGS = $(INCLUDES_PATH) $(BOOST_CPPFLAGS) libvcluster_a_CXXFLAGS = $(INCLUDES_PATH) $(BOOST_CPPFLAGS)
libvcluster_a_CFLAGS = libvcluster_a_CFLAGS =
nobase_include_HEADERS = MPI_wrapper/MPI_IallreduceW.hpp MPI_wrapper/MPI_IrecvW.hpp MPI_wrapper/MPI_IsendW.hpp MPI_wrapper/MPI_util.hpp \ nobase_include_HEADERS = MPI_wrapper/MPI_IallreduceW.hpp MPI_wrapper/MPI_IrecvW.hpp MPI_wrapper/MPI_IsendW.hpp MPI_wrapper/MPI_util.hpp MPI_wrapper/MPI_IAllGather.hpp \
Pack_selector.hpp Pack_stat.hpp Packer.hpp Unpacker.hpp VCluster.hpp VCluster_object.hpp VCluster_object_array.hpp VObject.hpp \ Pack_selector.hpp Pack_stat.hpp Packer.hpp Unpacker.hpp VCluster.hpp VCluster_object.hpp VCluster_object_array.hpp VObject.hpp \
util/Vcluster_log.hpp util/Vcluster_log.hpp
......
...@@ -317,8 +317,13 @@ public: ...@@ -317,8 +317,13 @@ public:
// sending map // sending map
openfpm::vector<size_t> map; openfpm::vector<size_t> map;
// barrier request
MPI_Request bar_req;
// barrier status
MPI_Status bar_stat;
// Distributed processor graph // Distributed processor graph
MPI_Comm proc_comm_graph; // MPI_Comm proc_comm_graph;
/*! \brief /*! \brief
* *
...@@ -328,7 +333,7 @@ public: ...@@ -328,7 +333,7 @@ public:
openfpm::vector<size_t> NN_proc; openfpm::vector<size_t> NN_proc;
void setLocality(openfpm::vector<size_t> NN_proc) /* void setLocality(openfpm::vector<size_t> NN_proc)
{ {
// Number of sources in the graph, and sources processors // Number of sources in the graph, and sources processors
size_t sources = NN_proc.size(); size_t sources = NN_proc.size();
...@@ -348,7 +353,7 @@ public: ...@@ -348,7 +353,7 @@ public:
} }
MPI_Dist_graph_create_adjacent(MPI_COMM_WORLD,sources,&src_proc.get(0),(const int *)MPI_UNWEIGHTED,dest,&dest_proc.get(0),(const int *)MPI_UNWEIGHTED,MPI_INFO_NULL,true,&proc_comm_graph); MPI_Dist_graph_create_adjacent(MPI_COMM_WORLD,sources,&src_proc.get(0),(const int *)MPI_UNWEIGHTED,dest,&dest_proc.get(0),(const int *)MPI_UNWEIGHTED,MPI_INFO_NULL,true,&proc_comm_graph);
} }*/
/*! \brief Send and receive multiple messages within local processors /*! \brief Send and receive multiple messages within local processors
* *
...@@ -455,11 +460,6 @@ public: ...@@ -455,11 +460,6 @@ public:
sendrecvMultipleMessagesPCX(prc.size(),(size_t *)map.getPointer(),(size_t *)sz_send.getPointer(),(size_t *)prc.getPointer(),(void **)ptr_send.getPointer(),msg_alloc,ptr_arg,opt); sendrecvMultipleMessagesPCX(prc.size(),(size_t *)map.getPointer(),(size_t *)sz_send.getPointer(),(size_t *)prc.getPointer(),(void **)ptr_send.getPointer(),msg_alloc,ptr_arg,opt);
} }
// barrier request
MPI_Request bar_req;
// barrier status
MPI_Status bar_stat;
/*! \brief Send and receive multiple messages local /*! \brief Send and receive multiple messages local
* *
* It send multiple messages to the near processor the and receive * It send multiple messages to the near processor the and receive
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment