Commit 35593873 authored by incardon's avatar incardon

Creating and uploading examples

parent f8c2ec50
include ../../example.mk
CC=mpic++
LDIR =
OBJ = main.o
%.o: %.cpp
$(CC) -O3 -c --std=c++11 -o $@ $< $(INCLUDE_PATH)
grid: $(OBJ)
$(CC) -o $@ $^ $(CFLAGS) $(LIBS_PATH) $(LIBS)
all: grid
.PHONY: clean all
clean:
rm -f *.o *~ core grid
[pack]
files = main.cpp Makefile
#include "Grid/grid_dist_id.hpp"
#include "data_type/scalar.hpp"
#include "Decomposition/CartDecomposition.hpp"
/*
* ### WIKI 1 ###
*
* ## Simple example
*
* This example show several basic functionalities of the distributed grid
*
* ### WIKI END ###
*
*/
int main(int argc, char* argv[])
{
//
// ### WIKI 2 ###
//
// Initialize the library and several objects
//
init_global_v_cluster(&argc,&argv);
//
// ### WIKI 3 ###
//
// Create several object needed later, in particular
// * A 3D box that define the domain
// * an array of 3 unsigned integer that define the size of the grid on each dimension
// * A Ghost object that will define the extension of the ghost part for each sub-domain in physical units
Box<3,float> domain({0.0,0.0,0.0},{1.0,1.0,1.0});
size_t sz[3];
sz[0] = 100;
sz[1] = 100;
sz[2] = 100;
// Ghost
Ghost<3,float> g(0.01);
//
// ### WIKI 4 ###
//
// Create a distributed grid in 3D (1° template parameter) defined in R^3 with float precision (2° template parameter)
// using a CartesianDecomposition strategy (3° parameter) (the parameter 1° and 2° inside CartDecomposition must match 1° and 2°
// of grid_dist_id)
//
// Constructor parameters:
//
// * sz: size of the grid on each dimension
// * domain: where the grid is defined
// * g: ghost extension
//
grid_dist_id<3, float, scalar<float[3]>, CartDecomposition<3,float>> g_dist(sz,domain,g);
// ### WIKI 5 ###
//
// Get an iterator that go throught the point of the domain (No ghost)
//
auto dom = g_dist.getDomainIterator();
// ### WIKI END ###
size_t count = 0;
// Iterate over all the points
while (dom.isNext())
{
//
// ### WIKI 6 ###
//
// Get the local grid key, the local grid key store internaly the sub-domain id (each sub-domain contain a grid)
// and the local grid point id identified by 2 integers in 2D 3 integer in 3D and so on. These two dinstinc element are
// available with key.getSub() and key.getKey()
//
auto key = dom.get();
//
// ### WIKI 7 ###
//
// Here we convert the local grid position, into global position, key_g contain 3 integers that identify the position
// of the grid point in global coordinates
//
//
auto key_g = g_dist.getGKey(key);
//
// ### WIKI 8 ###
//
// we write on the grid point of position (i,j,k) the value i*i + j*j + k*k on the component [0] of the vector
g_dist.template get<0>(key)[0] = key_g.get(0)*key_g.get(0) + key_g.get(1)*key_g.get(1) + key_g.get(2)*key_g.get(2);
// ### WIKI END ###
// Count the points
count++;
//
// ### WIKI 9 ###
//
// next point
++dom;
// ### WIKI END ###
}
//
// ### WIKI 10 ###
//
// Each sub-domain has an extended part, that is materially contained from another processor that in general is not synchronized
// ghost_get<0> synchronize the property 0 (the vector) in the ghost part
//
//
g_dist.template ghost_get<0>();
//
// ### WIKI 11 ###
//
// count contain the number of points the local processor contain, if we are interested to count the total number across the processor
// we can use the function add, to sum across processors. First we have to get an instance of Vcluster, queue an operation of add with
// the variable count and finaly execute. All the operation are asynchronous, execute work like a barrier and ensure that all the
// queued operations are executed
//
Vcluster & vcl = g_dist.getVC();
vcl.sum(count);
vcl.execute();
// only master output
if (vcl.getProcessUnitID() == 0)
std::cout << "Number of points: " << count << "\n";
//
// ### WIKI 12 ###
//
// Finally we want a nice output to visualize the information stored by the distributed grid
//
g_dist.write("output");
//
// ### WIKI 13 ###
//
// For debugging porpouse and demostration we output the decomposition
//
g_dist.getDecomposition().write("dec/out");
//
// ### WIKI 14 ###
//
// Deinitialize the library
//
delete(global_v_cluster);
}
include ../../example.mk
CC=mpic++
LDIR =
OBJ = main.o
%.o: %.cpp
$(CC) -O3 -c --std=c++11 -o $@ $< $(INCLUDE_PATH)
stencil: $(OBJ)
$(CC) -o $@ $^ $(CFLAGS) $(LIBS_PATH) $(LIBS)
all: stencil
.PHONY: clean all
clean:
rm -f *.o *~ core stencil
[pack]
files = main.cpp Makefile
#include "Grid/grid_dist_id.hpp"
#include "data_type/scalar.hpp"
#include "Decomposition/CartDecomposition.hpp"
/*
* ### WIKI 1 ###
*
* ## Simple example
*
* This example show how to move grid_key in order to create a laplacian stencil,
* be carefull, the function move are convenient, we suggest to not use in case speed
* of a speed critical part of the code
*
* ### WIKI END ###
*
*/
/*
*
* ### WIKI 2 ###
*
* Define some convenient constant
*
*/
constexpr size_t x = 0;
constexpr size_t y = 1;
constexpr size_t z = 2;
int main(int argc, char* argv[])
{
//
// ### WIKI 3 ###
//
// Initialize the library and several objects
//
init_global_v_cluster(&argc,&argv);
//
// ### WIKI 4 ###
//
// Create several object needed later, in particular
// * A 3D box that define the domain
// * an array of 3 unsigned integer that define the size of the grid on each dimension
// * A Ghost object that will define the extension of the ghost part for each sub-domain in physical units
Box<3,float> domain({0.0,0.0,0.0},{1.0,1.0,1.0});
size_t sz[3];
sz[0] = 100;
sz[1] = 100;
sz[2] = 100;
// Ghost
Ghost<3,float> g(0.03);
//
// ### WIKI 4 ###
//
// Create a distributed grid in 3D (1° template parameter) defined in R^3 with float precision (2° template parameter)
// using a CartesianDecomposition strategy (3° parameter) (the parameter 1° and 2° inside CartDecomposition must match 1° and 2°
// of grid_dist_id)
//
// Constructor parameters:
//
// * sz: size of the grid on each dimension
// * domain: where the grid is defined
// * g: ghost extension
//
grid_dist_id<3, float, scalar<float[3]>, CartDecomposition<3,float>> g_dist(sz,domain,g);
// ### WIKI 5 ###
//
// Get an iterator that go throught the point of the domain (No ghost)
//
auto dom = g_dist.getDomainIterator();
// ### WIKI END ###
while (dom.isNext())
{
//
// ### WIKI 6 ###
//
// Get the local grid key, the local grid key store internaly the sub-domain id (each sub-domain contain a grid)
// and the local grid point id identified by 2 integers in 2D 3 integer in 3D and so on. These two dinstinc element are
// available with key.getSub() and key.getKey()
//
auto key = dom.get();
//
// ### WIKI 7 ###
//
// Here we convert the local grid position, into global position, key_g contain 3 integers that identify the position
// of the grid point in global coordinates
//
//
auto key_g = g_dist.getGKey(key);
//
// ### WIKI 8 ###
//
// we write on the grid point of position (i,j,k) the value i*i + j*j + k*k on the component [0] of the vector
g_dist.template get<0>(key)[0] = key_g.get(0)*key_g.get(0) + key_g.get(1)*key_g.get(1) + key_g.get(2)*key_g.get(2);
//
// ### WIKI 9 ###
//
// next point
++dom;
// ### WIKI END ###
}
//
// ### WIKI 10 ###
//
// Each sub-domain has an extended part, that is materially contained from another processor that in general is not synchronized
// ghost_get<0> synchronize the property 0 (the vector) in the ghost part
//
//
g_dist.template ghost_get<0>();
//
// ### WIKI 11 ###
//
// Get again another iterator, iterate across all the domain points, calculating a Laplace stencil
//
//
dom = g_dist.getDomainIterator();
while (dom.isNext())
{
auto key = dom.get();
// Laplace stencil
g_dist.template get<0>(key)[1] = g_dist.template get<0>(key.move(x,1))[0] + g_dist.template get<0>(key.move(x,-1))[0] +
g_dist.template get<0>(key.move(y,1))[0] + g_dist.template get<0>(key.move(y,-1))[0] +
g_dist.template get<0>(key.move(z,1))[0] + g_dist.template get<0>(key.move(z,-1))[0] -
6*g_dist.template get<0>(key)[0];
++dom;
}
//
// ### WIKI 12 ###
//
// Finally we want a nice output to visualize the information stored by the distributed grid
//
g_dist.write("output");
//
// ### WIKI 14 ###
//
// Deinitialize the library
//
delete(global_v_cluster);
}
SUBDIRS := $(wildcard */.)
all clean:
for dir in $(SUBDIRS); do \
$(MAKE) -C $$dir $@; \
done
clean: $(SUBDIRS)
.PHONY: all clean $(SUBDIRS)
This Package contain the examples. In order to compile the examples go in the root folder where you installed
OpenFPM (example /usr/local/OpenFPM_install) and copy from there the file example.mk
compile the examples with
make
include ../../example.mk
CC=mpic++
LDIR =
OBJ = main.o
%.o: %.cpp
$(CC) -O3 -c --std=c++11 -o $@ $< $(INCLUDE_PATH)
vector: $(OBJ)
$(CC) -o $@ $^ $(CFLAGS) $(LIBS_PATH) $(LIBS)
all: vector
.PHONY: clean all
clean:
rm -f *.o *~ core vector
[pack]
files = main.cpp Makefile
#include "Vector/vector_dist.hpp"
#include "Decomposition/CartDecomposition.hpp"
#include "Point_test.hpp"
/*
* ### WIKI 1 ###
*
* ## Simple example
*
* This example show several basic functionalities of the distributed vector
*
* ### WIKI END ###
*
*/
int main(int argc, char* argv[])
{
//
// ### WIKI 2 ###
//
// Here we Initialize the library, than we create a uniform random generator between 0 and 1 to to generate particles
// randomly in the domain, we create a Box that define our domain
//
init_global_v_cluster(&argc,&argv);
Vcluster & v_cl = *global_v_cluster;
typedef Point<2,float> s;
// set the seed
// create the random generator engine
std::srand(v_cl.getProcessUnitID());
std::default_random_engine eg;
std::uniform_real_distribution<float> ud(0.0f, 1.0f);
Box<2,float> box({0.0,0.0},{1.0,1.0});
//
// ### WIKI 3 ###
//
// Here we are creating a distributed vector defined by the following parameters
//
// * Dimensionality of the space where the objects live 2D (1° template parameters)
// * Type of the space, float (2° template parameters)
// * Information stored by each object (3* template parameters), in this case a Point_test store 4 scalars
// 1 vector and an asymmetric tensor of rank 2
// * Strategy used to decompose the space
//
// Constructor instead require:
//
// * Number of particles 4096 in this case
// * Domain where is defined this structure
//
// The following construct a vector where each processor has 4096 / N_proc (N_proc = number of processor)
// objects with an undefined position in space. This non-space decomposition is also called data-driven
// decomposition
//
vector_dist<2,float, Point_test<float>, CartDecomposition<2,float> > vd(4096,box);
//
// ### WIKI 5 ###
//
// Get an iterator that go throught the objects, in an undefined position state and define its position
//
auto it = vd.getIterator();
while (it.isNext())
{
auto key = it.get();
vd.template getPos<s::x>(key)[0] = ud(eg);
vd.template getPos<s::x>(key)[1] = ud(eg);
++it;
}
//
// ### WIKI 6 ###
//
// Once we define the position, we distribute them according to the default decomposition
// The default decomposition is created even before assigning the position to the object
// (This will probably change in future)
//
vd.map();
//
// ### WIKI 7 ###
//
// We get the object that store the decomposition, than we iterate again across all the objects, we count them
// and we confirm that all the particles are local
//
size_t cnt = 0;
auto & ct = vd.getDecomposition();
it = vd.getIterator();
while (it.isNext())
{
auto key = it.get();
if (ct.isLocal(vd.template getPos<s::x>(key)) == false)
std::cerr << "Error particle is not local" << "\n";
cnt++;
++it;
}
//
// ### WIKI 8 ###
//
// cnt contain the number of object the local processor contain, if we are interested to count the total number across the processor
// we can use the function add, to sum across processors. First we have to get an instance of Vcluster, queue an operation of add with
// the variable count and finaly execute. All the operations are asynchronous, execute work like a barrier and ensure that all the
// queued operations are executed
//
v_cl.sum(cnt);
v_cl.execute();
//
// ### WIKI 14 ###
//
// Deinitialize the library
//
delete(global_v_cluster);
}
......@@ -88,7 +88,7 @@ class grid_dist_iterator<dim,device_grid,FREE>
Vcluster_object_array<device_grid> & gList;
//! Extension of each grid: domain and ghost + domain
const openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext;
openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext;
//! Actual iterator
grid_key_dx_iterator_sub<dim> a_it;
......@@ -113,12 +113,28 @@ class grid_dist_iterator<dim,device_grid,FREE>
public:
/*! \brief Constructor of the distributed grid
/*! \brief Copy operator=
*
* \param tmp iterator to copy
*
*/
grid_dist_iterator<dim,device_grid,FREE> & operator=(const grid_dist_iterator<dim,device_grid,FREE> & tmp)
{
g_c = tmp.g_c;
gList = tmp.gList;
gdb_ext = tmp.gdb_ext;
a_it.reinitialize(tmp.a_it);
m = tmp.m;
return *this;
}
/*! \brief Constructor of the distributed grid iterator
*
* \param gk std::vector of the local grid
*
*/
grid_dist_iterator(Vcluster_object_array<device_grid> & gk, const openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext)
grid_dist_iterator(Vcluster_object_array<device_grid> & gk, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext)
:g_c(0),gList(gk),gdb_ext(gdb_ext),m(0)
{
// Initialize the current iterator
......@@ -224,7 +240,22 @@ class grid_dist_iterator<dim,device_grid,FIXED>
public:
/*! \brief Constructor of the distributed grid
/*! \brief Copy operator=
*
* \param tmp iterator to copy
*
*/
grid_dist_iterator<dim,device_grid,FIXED> & operator=(const grid_dist_iterator<dim,device_grid,FIXED> & tmp)
{
g_c = tmp.g_c;
gList = tmp.gList;
gdb_ext = tmp.gdb_ext;
a_it.reinitialize(tmp.a_it);
return *this;
}
/*! \brief Constructor of the distributed grid iterator
*
* \param gk std::vector of the local grid
*
......
......@@ -271,7 +271,6 @@ BOOST_AUTO_TEST_CASE( vector_dist_iterator_test_use_3d )
long int big_step = k / 30;
big_step = (big_step == 0)?1:big_step;
long int small_step = 1;
print_test_v( "Testing 3D vector k<=",k);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment