Commit 11cfe46d authored by incardon's avatar incardon

Fixing with new VCluster

parent 88394970
......@@ -3,7 +3,14 @@
#include "Decomposition/CartDecomposition.hpp"
#include "VCluster.hpp"
/*
/*! \page VCluster VCluster
*
* \subpage VCluster_0_simple
* \subpage VCluster_1_semantic
*
*/
/*!
*
* \page VCluster_0_simple Using Vcluster to communicate across processors
*
......@@ -12,13 +19,13 @@
*
* ## Simple example
*
* This example show several basic functionalities of VCluster
* This example show several basic functionalities of Vcluster
*
*
*/
int main(int argc, char* argv[])
{
/*
/*!
*
* \page VCluster_0_simple Using Vcluster to communicate across processors
*
......@@ -27,7 +34,7 @@ int main(int argc, char* argv[])
*
* Before using any functionality the library must be initialized
*
* \snippet Vcluster/0_simple/main.cpp initialization
* \snippet VCluster/0_simple/main.cpp initialization
*
*/
......@@ -37,39 +44,45 @@ int main(int argc, char* argv[])
//! \cond [initialization] \endcond
/*
/*!
*
* \page VCluster_0_simple Using Vcluster to communicate across processors
*
* ### Initialization of Vcluster
*
* Because in general our program is parallel we have more than one processors. With
* the function getProcessingUnits we can querry how many processors are involved in
* out computation
* the function getProcessingUnits() we can get how many processors are involved in
* our computation
*
* \snippet Vcluster/0_simple/main.cpp initialization
* \snippet VCluster/0_simple/main.cpp create
*
*/
//! \cond [create] \endcond
Vcluster & v_cl = create_vcluster();
long int N_prc = v_cl.getProcessingUnits();
/*
//! \cond [create] \endcond
/*!
*
* \page VCluster_0_simple Using Vcluster to communicate across processors
*
*
* ### min max, sum
* ### Min, max, sum
*
* with the function getProcessUnitID we can the the id of the processor executing
* With the function getProcessUnitID() we can get the id of the processor executing
* the function. This function is equivalent to the MPI rank function.
* Vcluster provide several high and low level functionalities. One is max that
* Vcluster provides several high and low level functionalities. One is max that
* return the maximum value across processors. There is also the function min
* and sum that return respectively the sum and the minimum across processors.
* All these operations are asynchronous, in order to get the result the function
* execute must be used. In our example the processor 0 print we also print the value
* execute must be used. In our example the processor 0 print the result
* but can be easily verified that also the other processors has the same value.
*
* \snippet Vcluster/0_simple/main.cpp max calc
* \snippet VCluster/0_simple/main.cpp max calc
*
*
*/
......@@ -84,15 +97,17 @@ int main(int argc, char* argv[])
//! \cond [max calc] \endcond
/*
/*!
*
* \page VCluster_0_simple Using Vcluster to communicate across processors
*
* We sum all the processor ranks the result should be that should
* be \$\frac{(n-1)n}{2}\$, only processor 0 print on terminal
*
*
* \snippet Vcluster/0_simple/main.cpp sum calc
* We sum all the processor ranks the result should be \f$\frac{(n-1)n}{2}\f$, only processor 0
* print on terminal
*
*
* \snippet VCluster/0_simple/main.cpp sum calc
*
*/
......@@ -107,18 +122,20 @@ int main(int argc, char* argv[])
//! \cond [sum calc] \endcond
/*
/*!
*
* \page VCluster_0_simple Using Vcluster to communicate across processors
*
* We sum all the processor ranks the result should be that should
* be \$\frac{(n-1)n}{2}\$, only processor 0 print on terminal
* Than each processor send its own rank. the vector of all ranks is collected on all
* processors.
*
*
* \snippet Vcluster/0_simple/main.cpp max calc
* \snippet VCluster/0_simple/main.cpp gather
*
*/
//! \cond [gather] \endcond
long int id3 = v_cl.getProcessUnitID();
openfpm::vector<long int> v;
......@@ -134,15 +151,26 @@ int main(int argc, char* argv[])
std::cout << "\n";
}
//
// ### WIKI 5 ###
//
// we can also send messages to specific processors, with the condition that the receiving
// processors know we want to communicate with them, if you are searching for a more
// free way to communicate where the receiving processors does not know which one processor
// want to communicate with us, see the example 1_dsde
//
//! \cond [gather] \endcond
/*!
*
* \page VCluster_0_simple Using Vcluster to communicate across processors
*
* ### Send and recv
*
* we can also send messages to specific processors, with the condition that the receiving
* processors is aware of such communication to (send and recv must be coupled).
* if you are searching for a more free way to communicate where the receiving processors
* does not know which one processor want to communicate with us, see the example 1_dsde
*
* \snippet VCluster/0_simple/main.cpp recvsend
*
*/
//! \cond [recvsend] \endcond
// Create 2 messages with and hello message inside
std::stringstream ss_message_1;
std::stringstream ss_message_2;
ss_message_1 << "Hello from " << std::setw(8) << v_cl.getProcessUnitID() << "\n";
......@@ -153,9 +181,11 @@ int main(int argc, char* argv[])
// Processor 0 send to processors 1,2 , 1 to 2,1, 2 to 0,1
// send the message
v_cl.send(((id3+1)%N_prc + N_prc)%N_prc,0,message_1.c_str(),msg_size);
v_cl.send(((id3+2)%N_prc + N_prc)%N_prc,0,message_2.c_str(),msg_size);
// create the receiving buffer
openfpm::vector<char> v_one;
v_one.resize(msg_size);
openfpm::vector<char> v_two(msg_size);
......@@ -167,6 +197,7 @@ int main(int argc, char* argv[])
v_cl.recv(((id3-2)%N_prc + N_prc)%N_prc,0,(void *)v_two.getPointer(),msg_size);
v_cl.execute();
// Processor 0 print the received message
if (v_cl.getProcessUnitID() == 0)
{
for (size_t i = 0 ; i < msg_size ; i++)
......@@ -176,19 +207,30 @@ int main(int argc, char* argv[])
std::cout << v_two.get(i);
}
//
// ### WIKI 5 ###
//
// we can also do what we did before in one shot
//
//! \cond [recvsend] \endcond
id = v_cl.getProcessUnitID();
/*!
*
* \page VCluster_0_simple Using Vcluster to communicate across processors
*
* ### All in one
*
* Because all previous functions are asynchronous
* we can also do what we did before in one shot
*
* \snippet VCluster/0_simple/main.cpp allinonestep
*
*/
//! \cond [allinonestep] \endcond
// Get the rank of the processor and put this rank in one variable
id = v_cl.getProcessUnitID();
id2 = v_cl.getProcessUnitID();
id3 = v_cl.getProcessUnitID();
v.clear();
// convert the string into a vector
openfpm::vector<char> message_1_v(msg_size);
openfpm::vector<char> message_2_v(msg_size);
......@@ -198,8 +240,11 @@ int main(int argc, char* argv[])
for (size_t i = 0 ; i < msg_size ; i++)
message_2_v.get(i) = message_2[i];
// Calculate the maximin across all the rank
v_cl.max(id);
// Calculate the sum across all the rank
v_cl.sum(id2);
// all processor send one number, all processor receive all numbers
v_cl.allGather(id3,v);
// in the case of vector we have special functions that avoid to specify the size
......@@ -209,7 +254,8 @@ int main(int argc, char* argv[])
v_cl.recv(((id-2)%N_prc + N_prc)%N_prc,0,v_two);
v_cl.execute();
if (v_cl.getProcessUnitID() == 0)
// Only processor one print the received data
if (v_cl.getProcessUnitID() == 1)
{
std::cout << "Maximum processor rank: " << id << "\n";
std::cout << "Sum of all processors rank: " << id << "\n";
......@@ -227,5 +273,31 @@ int main(int argc, char* argv[])
std::cout << v_two.get(i);
}
//! \cond [allinonestep] \endcond
/*!
* \page VCluster_0_simple Using Vcluster to communicate across processors
*
* ## Finalize ##
*
* At the very end of the program we have always to de-initialize the library
*
* \snippet VCluster/0_simple/main.cpp finalize
*
*/
//! \cond [finalize] \endcond
openfpm_finalize();
//! \cond [finalize] \endcond
/*!
* \page VCluster_0_simple Using Vcluster to communicate across processors
*
* # Full code # {#code}
*
* \include VCluster/0_simple/main.cpp
*
*/
}
include ../../example.mk
CC=mpic++
LDIR =
OBJ = main.o
%.o: %.cpp
$(CC) -O3 -g -c --std=c++11 -o $@ $< $(INCLUDE_PATH)
vcluster: $(OBJ)
$(CC) -o $@ $^ $(CFLAGS) $(LIBS_PATH) $(LIBS)
all: vcluster
run: all
mpirun -np 3 ./vcluster
.PHONY: clean all run
clean:
rm -f *.o *~ core vcluster
[pack]
files = main.cpp Makefile
#include "Grid/grid_dist_id.hpp"
#include "data_type/aggregate.hpp"
#include "Decomposition/CartDecomposition.hpp"
#include "VCluster.hpp"
/*!
*
* \page VCluster_1_semantic Using Vcluster for Dynamic Sparse Data Exchange
*
* # Dynamic Sparse Data Exchange
*
* Dynamic Sparse Data Exchange or DSDE, is a typical point to point communication in which
* senders know to which processor to receive, but receivers has not knowledge about from
* where they are receiving. OpenFPM use the NBX method or Non blocking consensus exchange.
* (Said without bombastic world each processor wait for incoming messages. Pretty basic achivement
* and technique in standard server programming, pictured bombastic and incredible discovery in MPI)
*
*/
#define N_NEXT 3
#define N_NUMBERS 5
int main(int argc, char* argv[])
{
/*!
*
* \page VCluster_1_semantic using Vcluster for Dynamic Sparse Data Exchange
*
*
* ## Initialization
*
* Before using any functionality the library must be initialized. After initialization we can create
* the Vcluster object
*
* \snippet VCluster/0_simple/main.cpp initialization
*
*/
//! \cond [initialization] \endcond
openfpm_init(&argc,&argv);
Vcluster & v_cl = create_vcluster();
//! \cond [initialization] \endcond
/*!
*
* \page VCluster_1_semantic Using Vcluster for Dynamic Sparse Data Exchange
*
* ## Dynamic Sparse Data Exchange
*
* To do dynamic sparse data exchange, each processor fill a send processor list
* and create a message for each processor. In this case the message will be a complex
* object. OpenFPM use the capability to serialize complex object into sequence of byte
* to send over the network and de-serializa or re-assemble the object into another
* processors. In this case the complex object is a list of double numbers. At the end
* of the example each processor print what it received
*
* \snippet VCluster/1_semantic/main.cpp ssendrecv
*
*/
//! \cond [ssendrecv] \endcond
// id of the processor calling this function
long int proc_id = v_cl.getProcessUnitID();
// number of processors executing this program
long int n_proc = v_cl.getProcessingUnits();
// List of processors we communicate with
openfpm::vector<size_t> prc_send;
// For each processor we want to send a vector of doubles
// in this case each processor send N_NEXT vectors.
// In general we can think to openfpm::vector<T> as a set of objects T.
// where we want to send one object T to each processor in out sending list.
// In this case T is a list of double or (openfpm::vector<double>)
openfpm::vector<openfpm::vector<double>> messages_send(N_NEXT);
// Here we prepare the senbding buffer
for (size_t i = 0, m = 0 ; i < N_NEXT ; i++, m++)
{
// create the sending processor list
prc_send.add(openfpm::math::positive_modulo(proc_id + i + 1,n_proc));
// Fill with soma data the vectors
for (size_t j = 0 ; j < N_NUMBERS ; j++)
messages_send.get(m).add(j+N_NUMBERS*proc_id);
}
// Buffer that receive messages
openfpm::vector<double> messages_recv2;
// List of processor from which we receove
openfpm::vector<size_t> prc_recv2;
// number of elements we receive from each processors
openfpm::vector<size_t> sz_recv2;
v_cl.SSendRecv(messages_send,messages_recv2,prc_send,prc_recv2,sz_recv2);
// here each processor print the received message
std::cout << "Processor " << proc_id << " received ";
for (size_t i = 0 ; i < messages_recv2.size() ; i++)
std::cout << messages_recv2.get(i) << " ";
std::cout << std::endl;
//! \cond [ssendrecv] \endcond
/*!
* \page VCluster_1_semantic Using Vcluster to communicate across processors
*
* ## Finalize ##
*
* At the very end of the program we have always to de-initialize the library
*
* \snippet VCluster/1_semantic/main.cpp finalize
*
*/
//! \cond [finalize] \endcond
openfpm_finalize();
//! \cond [finalize] \endcond
}
LINKLIBS = $(OPENMP_LDFLAGS) $(LIBHILBERT_LIB) $(PETSC_LIB) $(METIS_LIB) $(PARMETIS_LIB) $(PTHREAD_LIBS) $(OPT_LIBS) $(BOOST_LDFLAGS) $(BOOST_IOSTREAMS_LIB) $(CUDA_LIBS)
noinst_PROGRAMS = cart_dec metis_dec dom_box vector_dist
cart_dec_SOURCES = CartDecomposition_gen_vtk.cpp ../src/lib/pdata.cpp ../openfpm_devices/src/memory/HeapMemory.cpp ../openfpm_devices/src/memory/PtrMemory.cpp ../openfpm_vcluster/src/VCluster.cpp ../openfpm_devices/src/Memleak_check.cpp
cart_dec_SOURCES = CartDecomposition_gen_vtk.cpp ../src/lib/pdata.cpp ../openfpm_devices/src/memory/HeapMemory.cpp ../openfpm_devices/src/memory/PtrMemory.cpp ../openfpm_vcluster/src/VCluster/VCluster.cpp ../openfpm_devices/src/Memleak_check.cpp
cart_dec_CXXFLAGS = $(OPENMP_CFLAGS) $(AM_CXXFLAGS) $(PETSC_INCLUDE) $(METIS_INCLUDE) $(PARMETIS_INCLUDE) $(CUDA_CFLAGS) $(INCLUDES_PATH) $(BOOST_CPPFLAGS) -I../src -Wno-unused-function -Wno-unused-local-typedefs
cart_dec_CFLAGS = $(OPENMP_CFLAGS) $(CUDA_CFLAGS)
cart_dec_LDADD = $(LINKLIBS) -lparmetis -lmetis
metis_dec_SOURCES = Metis_gen_vtk.cpp ../src/lib/pdata.cpp ../openfpm_devices/src/memory/HeapMemory.cpp ../openfpm_devices/src/memory/PtrMemory.cpp ../openfpm_vcluster/src/VCluster.cpp ../openfpm_devices/src/Memleak_check.cpp
metis_dec_SOURCES = Metis_gen_vtk.cpp ../src/lib/pdata.cpp ../openfpm_devices/src/memory/HeapMemory.cpp ../openfpm_devices/src/memory/PtrMemory.cpp ../openfpm_vcluster/src/VCluster/VCluster.cpp ../openfpm_devices/src/Memleak_check.cpp
metis_dec_CXXFLAGS = $(OPENMP_CFLAGS) $(AM_CXXFLAGS) $(PETSC_INCLUDE) $(METIS_INCLUDE) $(CUDA_CFLAGS) $(INCLUDES_PATH) $(BOOST_CPPFLAGS) -I../src -Wno-unused-function -Wno-unused-local-typedefs
metis_dec_CFLAGS = $(OPENMP_CFLAGS) $(CUDA_CFLAGS)
metis_dec_LDADD = $(LINKLIBS) -lmetis
dom_box_SOURCES = domain_gen_vtk.cpp ../src/lib/pdata.cpp ../openfpm_devices/src/memory/HeapMemory.cpp ../openfpm_devices/src/memory/PtrMemory.cpp ../openfpm_vcluster/src/VCluster.cpp ../openfpm_devices/src/Memleak_check.cpp
dom_box_SOURCES = domain_gen_vtk.cpp ../src/lib/pdata.cpp ../openfpm_devices/src/memory/HeapMemory.cpp ../openfpm_devices/src/memory/PtrMemory.cpp ../openfpm_vcluster/src/VCluster/VCluster.cpp ../openfpm_devices/src/Memleak_check.cpp
dom_box_CXXFLAGS = $(OPENMP_CFLAGS) $(AM_CXXFLAGS) $(PETSC_INCLUDE) $(METIS_INCLUDE) $(CUDA_CFLAGS) $(INCLUDES_PATH) $(BOOST_CPPFLAGS) -I../src -Wno-unused-function -Wno-unused-local-typedefs
dom_box_CFLAGS = $(OPENMP_CFLAGS) $(CUDA_CFLAGS)
dom_box_LDADD = $(LINKLIBS)
vector_dist_SOURCES = vector.cpp ../openfpm_devices/src/memory/HeapMemory.cpp ../openfpm_vcluster/src/VCluster.cpp ../openfpm_devices/src/memory/PtrMemory.cpp
vector_dist_SOURCES = vector.cpp ../openfpm_devices/src/memory/HeapMemory.cpp ../openfpm_vcluster/src/VCluster/VCluster.cpp ../openfpm_devices/src/memory/PtrMemory.cpp
vector_dist_CXXFLAGS = $(OPENMP_CFLAGS) $(AM_CXXFLAGS) $(LIBHILBERT_INCLUDE) $(PETSC_INCLUDE) $(PARMETIS_INCLUDE) $(METIS_INCLUDE) $(CUDA_CFLAGS) $(INCLUDES_PATH) $(HDF5_CPPFLAGS) $(BOOST_CPPFLAGS) -I../src -Wno-unused-function -Wno-unused-local-typedefs
vector_dist_CFLAGS = $(OPENMP_CFLAGS) $(CUDA_CFLAGS)
vector_dist_LDADD = $(LINKLIBS) -lparmetis -lmetis
......
......@@ -132,6 +132,10 @@ fi
echo -e "\033[1;34;5mDEPENCENCIES INSTALLATION DIR \033[0m"
echo -e "Every required dependencies if needed will be installed into: \033[1;34;5m$i_dir\033[0m"
echo -e "if you want to install somewhere else do ./install -i /somewhere/else"
if [ x"$configure_options" == x"" ]; then
echo -e "OpenFPM will be installed into: \033[1;34;5m/usr/local\033[0m"
echo "if you want to install in a different place use ./install -c \"--prefix=/where/you/want/to/install\""
fi
if [ $sq -eq 0 ]; then
unset commands
commands[0]="Continue"
......
openfpm_data @ 36731ae1
Subproject commit a358ca063d5aaca60357390bda29c8aed189302e
Subproject commit 36731ae1e108c64e6210aa63daedd7d665faf52f
openfpm_vcluster @ 7545a755
Subproject commit 3f37237e5b7f263e4096b9e3c4b54b6008843a50
Subproject commit 7545a7557a9ac599528db741ddf0d41ae6078bd2
......@@ -10,7 +10,7 @@
#include "config.h"
#include <cmath>
#include "VCluster.hpp"
#include "VCluster/VCluster.hpp"
#include "Graph/CartesianGraphFactory.hpp"
#include "Decomposition.hpp"
#include "Vector/map_vector.hpp"
......
......@@ -298,22 +298,20 @@ BOOST_AUTO_TEST_CASE( DistParmetis_distribution_test)
//! [Initialize a ParMetis Cartesian graph and decompose]
if (v_cl.getProcessUnitID() == 0)
{
// write the first decomposition
pmet_dist.write("vtk_dist_parmetis_distribution_0");
// write the first decomposition
pmet_dist.write("vtk_dist_parmetis_distribution_0");
#ifdef HAVE_OSX
bool test = compare("vtk_dist_parmetis_distribution_0.vtk","src/Decomposition/Distribution/test_data/vtk_dist_parmetis_distribution_0_osx_test.vtk");
BOOST_REQUIRE_EQUAL(true,test);
bool test = compare("vtk_dist_parmetis_distribution_0.vtk","src/Decomposition/Distribution/test_data/vtk_dist_parmetis_distribution_0_osx_test.vtk");
BOOST_REQUIRE_EQUAL(true,test);
#else
bool test = compare("vtk_dist_parmetis_distribution_0.vtk","src/Decomposition/Distribution/test_data/vtk_dist_parmetis_distribution_0_test.vtk");
BOOST_REQUIRE_EQUAL(true,test);
bool test = compare("vtk_dist_parmetis_distribution_0.vtk","src/Decomposition/Distribution/test_data/vtk_dist_parmetis_distribution_0_test.vtk");
BOOST_REQUIRE_EQUAL(true,test);
#endif
}
//! [refine with dist_parmetis the decomposition]
......
......@@ -11,7 +11,7 @@
#include <iostream>
#include "parmetis.h"
#include "VTKWriter/VTKWriter.hpp"
#include "VCluster.hpp"
#include "VCluster/VCluster.hpp"
/*! \brief Metis graph structure
*
......
......@@ -11,7 +11,7 @@
#include <iostream>
#include "parmetis.h"
#include "VTKWriter/VTKWriter.hpp"
#include "VCluster.hpp"
#include "VCluster/VCluster.hpp"
#include "Graph/ids.hpp"
/*! \brief Metis graph structure
......
......@@ -8,7 +8,7 @@
#ifndef SRC_DECOMPOSITION_NN_PROCESSOR_UNIT_TEST_HPP_
#define SRC_DECOMPOSITION_NN_PROCESSOR_UNIT_TEST_HPP_
#include "VCluster.hpp"
#include "VCluster/VCluster.hpp"
void create_decomposition2x2(openfpm::vector<openfpm::vector<long unsigned int>> & box_nn_processor, openfpm::vector<SpaceBox<2,float>> & sub_domains)
{
......
......@@ -8,7 +8,7 @@
#ifndef DISTGRAPHFACTORYOLD_HPP_
#define DISTGRAPHFACTORYOLD_HPP_
#include "VCluster.hpp"
#include "VCluster/VCluster.hpp"
#include "Vector/map_vector.hpp"
#include "Graph/map_graph.hpp"
#include "Grid/grid_sm.hpp"
......
......@@ -64,7 +64,7 @@
#include <unordered_map>
#include "Packer_Unpacker/Packer.hpp"
#include "Packer_Unpacker/Unpacker.hpp"
#include "VCluster.hpp"
#include "VCluster/VCluster.hpp"
#define NO_EDGE -1
#define DIST_GRAPH_ERROR 7001
......@@ -1033,8 +1033,8 @@ public:
* Constructor
*
*/
DistGraph_CSR(DistGraph_CSR && dg) :
vcl(create_vcluster())
DistGraph_CSR(DistGraph_CSR && dg)
:vcl(create_vcluster())
{
this->operator=(dg);
}
......
......@@ -4,7 +4,7 @@
#include <vector>
#include <unordered_map>
#include "Grid/map_grid.hpp"
#include "VCluster.hpp"
#include "VCluster/VCluster.hpp"
#include "Space/SpaceBox.hpp"
#include "util/mathutil.hpp"
#include "grid_dist_id_iterator_dec.hpp"
......
......@@ -49,7 +49,7 @@ struct GBoxes
#define FIXED 2
#include "grid_dist_key.hpp"
#include "VCluster.hpp"
#include "VCluster/VCluster.hpp"
......
LINKLIBS = $(OPENMP_LDFLAGS) $(LIBHILBERT_LIB) $(METIS_LIB) $(PTHREAD_LIBS) $(OPT_LIBS) $(BOOST_LDFLAGS) $(BOOST_IOSTREAMS_LIB) $(CUDA_LIBS) $(PETSC_LIB) $(HDF5_LDFLAGS) $(HDF5_LIBS) $(PARMETIS_LIB) $(BOOST_UNIT_TEST_FRAMEWORK_LIB) $(BOOST_CHRONO_LIB) $(BOOST_TIMER_LIB) $(BOOST_SYSTEM_LIB) $(LIBIFCORE)
noinst_PROGRAMS = pdata
pdata_SOURCES = main.cpp Grid/grid_dist_id_unit_test.cpp lib/pdata.cpp test_multiple_o.cpp ../openfpm_devices/src/memory/HeapMemory.cpp ../openfpm_devices/src/memory/PtrMemory.cpp ../openfpm_vcluster/src/VCluster.cpp ../openfpm_devices/src/Memleak_check.cpp
pdata_SOURCES = main.cpp Grid/grid_dist_id_unit_test.cpp lib/pdata.cpp test_multiple_o.cpp ../openfpm_devices/src/memory/HeapMemory.cpp ../openfpm_devices/src/memory/PtrMemory.cpp ../openfpm_vcluster/src/VCluster/VCluster.cpp ../openfpm_devices/src/Memleak_check.cpp
pdata_CXXFLAGS = $(OPENMP_CFLAGS) $(AM_CXXFLAGS) $(LIBHILBERT_INCLUDE) $(PETSC_INCLUDE) $(HDF5_CPPFLAGS) $(CUDA_CFLAGS) $(INCLUDES_PATH) $(PARMETIS_INCLUDE) $(METIS_INCLUDE) $(BOOST_CPPFLAGS) $(H5PART_INCLUDE) -DPARALLEL_IO -Wno-unused-local-typedefs
pdata_CFLAGS = $(CUDA_CFLAGS)
pdata_LDADD = $(LINKLIBS) -lparmetis -lmetis
......
......@@ -9,7 +9,7 @@
#define VECTOR_HPP_
#include "HDF5_XdmfWriter/HDF5_XdmfWriter.hpp"
#include "VCluster.hpp"
#include "VCluster/VCluster.hpp"
#include "Space/Shape/Point.hpp"
#include "Vector/vector_dist_iterator.hpp"
#include "Space/Shape/Box.hpp"
......
......@@ -9,7 +9,7 @@
#define VECTOR_DIST_ITERATOR_HPP_
#include "vector_dist_key.hpp"
#include "VCluster.hpp"
#include "VCluster/VCluster.hpp"
class vector_dist_iterator
{
......
......@@ -8,7 +8,7 @@
#ifndef UNIT_TEST_INIT_CLEANUP_HPP_
#define UNIT_TEST_INIT_CLEANUP_HPP_
#include "VCluster.hpp"
#include "VCluster/VCluster.hpp"
struct ut_start {
ut_start() { BOOST_TEST_MESSAGE("Initialize global VCluster"); openfpm_init(&boost::unit_test::framework::master_test_suite().argc,&boost::unit_test::framework::master_test_suite().argv); }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment