Commit 265d602f authored by incardon's avatar incardon

Sparting to integrate grid with sparse grid

parent 97c6281c
#!groovy
timeout(20)
timeout(180)
{
parallel (
......@@ -126,3 +127,4 @@ parallel (
)
}
openfpm_data @ d125ec9a
Subproject commit 26d8dfe6f8f52cff802c9ab4858e8cf7c9fbcdb1
Subproject commit d125ec9a7a9de358e2fe74e0df55495fd476646e
......@@ -22,6 +22,7 @@
#include "hdf5.h"
#include "grid_dist_id_comm.hpp"
#include "HDF5_wr/HDF5_wr.hpp"
#include "SparseGrid/SparseGrid.hpp"
//! Internal ghost box sent to construct external ghost box into the other processors
template<unsigned int dim>
......@@ -1578,6 +1579,30 @@ public:
return it;
}
/*! /brief Get a grid Iterator
*
* In case of dense grid getGridIterator is equivalent to getDomainIterator
* in case if sparse distributed grid getDomainIterator go across all the
* inserted point get grid iterator run across all grid points independently
* that the point has been insert or not
*
* \return a Grid iterator
*
*/
inline grid_dist_id_iterator_dec<Decomposition> getGridIterator()
{
grid_key_dx<dim> start;
grid_key_dx<dim> stop;
for (size_t i = 0; i < dim; i++)
{
start.set_d(i, 0);
stop.set_d(i, g_sz[i] - 1);
}
grid_dist_id_iterator_dec<Decomposition> it_dec(getDecomposition(), g_sz, start, stop);
return it_dec;
}
/*! \brief It return an iterator that span the full grid domain (each processor span its local domain)
*
* \return the iterator
......@@ -1712,6 +1737,30 @@ public:
return false;
}
/*! \brief insert an element in the grid
*
* In case of dense grid this function is equivalent to get, in case of sparse
* grid this function insert a grid point. When the point already exist it return
* a reference to the already existing point
*
* \tparam p property to get (is an integer)
* \param v1 grid_key that identify the element in the grid
*
* \return a reference to the inserted element
*
*/
template <unsigned int p>inline auto insert(const grid_dist_key_dx<dim> & v1)
-> typename std::add_lvalue_reference
<
decltype(loc_grid.get(v1.getSub()).template insert<p>(v1.getKey()))
>::type
{
#ifdef SE_CLASS2
check_valid(this,8);
#endif
return loc_grid.get(v1.getSub()).template insert<p>(v1.getKey());
}
/*! \brief Get the reference of the selected element
*
* \tparam p property to get (is an integer)
......@@ -2133,5 +2182,6 @@ public:
};
template<unsigned int dim, typename St, typename T> using sgrid_dist_id = grid_dist_id<dim,St,T,CartDecomposition<dim,St>,HeapMemory,sgrid_cpu<dim,T,St>>;
#endif
......@@ -731,14 +731,6 @@ public:
for ( size_t i = 0 ; i < eg_box.size() ; i++ )
{
prp_recv.push_back(eg_box.get(i).recv_pnt * sizeof(prp_object) + sizeof(size_t)*eg_box.get(i).n_r_box);
// for each external ghost box
/* for (size_t j = 0 ; j < eg_box.get(i).bid.size() ; j++)
{
// External ghost box
Box<dim,size_t> g_eg_box = eg_box.get(i).bid.get(j).g_e_box;
prp_recv[prp_recv.size()-1] += g_eg_box.getVolumeKey() * sizeof(prp_object) + sizeof(size_t);
}*/
}
size_t tot_recv = ExtPreAlloc<Memory>::calculateMem(prp_recv);
......@@ -825,8 +817,10 @@ public:
Box<dim,size_t> box = eg_box.get(i).bid.get(nle_id).l_e_box;
Box<dim,size_t> rbox = eg_box.get(i).bid.get(nle_id).lr_e_box;
loc_grid.get(n_sub_id).copy_to(loc_grid.get(sub_id),rbox,box);
// sub-grid where to unpack
grid_key_dx_iterator_sub<dim> src(loc_grid.get(sub_id).getGrid(),rbox.getKP1(),rbox.getKP2());
/* grid_key_dx_iterator_sub<dim> src(loc_grid.get(sub_id).getGrid(),rbox.getKP1(),rbox.getKP2());
grid_key_dx_iterator_sub<dim> dst(loc_grid.get(n_sub_id).getGrid(),box.getKP1(),box.getKP2());
while (src.isNext())
......@@ -838,7 +832,7 @@ public:
++src;
++dst;
}
}*/
}
}
......
LINKLIBS = $(HDF5_LDFLAGS) $(HDF5_LIBS) $(OPENMP_LDFLAGS) $(LIBHILBERT_LIB) $(METIS_LIB) $(PTHREAD_LIBS) $(OPT_LIBS) $(BOOST_LDFLAGS) $(BOOST_IOSTREAMS_LIB) $(CUDA_LIBS) $(PETSC_LIB) $(PARMETIS_LIB) $(BOOST_UNIT_TEST_FRAMEWORK_LIB) $(BOOST_CHRONO_LIB) $(BOOST_TIMER_LIB) $(BOOST_SYSTEM_LIB) $(LIBIFCORE)
noinst_PROGRAMS = pdata
pdata_SOURCES = main.cpp pdata_performance.cpp Grid/tests/grid_dist_id_unit_test.cpp Amr/grid_dist_amr_unit_tests.cpp lib/pdata.cpp Amr/tests/amr_base_unit_tests.cpp ../openfpm_devices/src/memory/HeapMemory.cpp ../openfpm_devices/src/memory/PtrMemory.cpp ../openfpm_vcluster/src/VCluster/VCluster.cpp ../openfpm_devices/src/Memleak_check.cpp
pdata_SOURCES = main.cpp pdata_performance.cpp Grid/tests/grid_dist_id_unit_test.cpp Grid/tests/sgrid_dist_id_unit_tests.cpp Amr/grid_dist_amr_unit_tests.cpp lib/pdata.cpp Amr/tests/amr_base_unit_tests.cpp ../openfpm_devices/src/memory/HeapMemory.cpp ../openfpm_devices/src/memory/PtrMemory.cpp ../openfpm_vcluster/src/VCluster/VCluster.cpp ../openfpm_devices/src/Memleak_check.cpp
pdata_CXXFLAGS = $(HDF5_CPPFLAGS) $(OPENMP_CFLAGS) $(AM_CXXFLAGS) $(LIBHILBERT_INCLUDE) $(PETSC_INCLUDE) $(CUDA_CFLAGS) $(INCLUDES_PATH) $(PARMETIS_INCLUDE) $(METIS_INCLUDE) $(BOOST_CPPFLAGS) $(H5PART_INCLUDE) -DPARALLEL_IO -Wno-unused-local-typedefs
pdata_CFLAGS = $(CUDA_CFLAGS)
pdata_LDADD = $(LINKLIBS) -lparmetis -lmetis
......
......@@ -524,8 +524,8 @@ BOOST_AUTO_TEST_CASE( vector_dist_symmetric_cell_list )
ret &= vd.getPropRead<1>(p) == vd.getPropRead<0>(p);
vd.getPropRead<3>(p).sort();
vd.getPropRead<4>(p).sort();
vd.getPropWrite<3>(p).sort();
vd.getPropWrite<4>(p).sort();
ret &= vd.getPropRead<3>(p).size() == vd.getPropRead<4>(p).size();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment