Commit 1c09878b authored by incardon's avatar incardon

All test working

parent e310f6cc
openfpm_data @ 6c8ba9db
Subproject commit e9c615ab034051ea1bb666930189c9b0ee745fc4
Subproject commit 6c8ba9db530c53c09c9212dff5098a5b4890fb23
......@@ -1044,19 +1044,18 @@ public:
v_cl.execute();
size_t size_r;
size_t size = gdb_ext_global.size();
if (v_cl.getProcessUnitID() == 0)
{
size_t size = gdb_ext_global.size();
for (size_t i = 0; i < v_cl.getProcessingUnits(); i++)
{
for (size_t i = 1; i < v_cl.getProcessingUnits(); i++)
v_cl.send(i,0,&size,sizeof(size_t));
}
size_r = size;
}
else
{
v_cl.recv(0,0,&size_r,sizeof(size_t));
}
v_cl.execute();
gdb_ext_global.resize(size_r);
......@@ -1077,52 +1076,6 @@ public:
v_cl.execute();
}
/*! \brief It gathers the local grids for all of the processors
*
*
*
*/
void getGlobalGrids(openfpm::vector<openfpm::vector<device_grid>> & loc_grid_global) const
{
#ifdef SE_CLASS2
check_valid(this,8);
#endif
v_cl.SGather(loc_grid,loc_grid_global,0);
v_cl.execute();
size_t size_r;
if (v_cl.getProcessUnitID() == 0)
{
size_t size = loc_grid_global.size();
for (size_t i = 0; i < v_cl.getProcessingUnits(); i++)
{
v_cl.send(i,0,&size,sizeof(size_t));
}
}
else
{
v_cl.recv(0,0,&size_r,sizeof(size_t));
}
v_cl.execute();
loc_grid_global.resize(size_r);
if (v_cl.getProcessUnitID() == 0)
{
for (size_t i = 0; i < v_cl.getProcessingUnits(); i++)
{
v_cl.send(i,0,loc_grid_global);
}
}
else
{
v_cl.recv(0,0,loc_grid_global);
}
v_cl.execute();
}
/*! \brief It return an iterator that span the full grid domain (each processor span its local domain)
*
......@@ -1898,7 +1851,12 @@ public:
H5Fclose(file);
}
void load_block(long int bid, hssize_t mpi_size_old, int * metadata_out, openfpm::vector<size_t> metadata_accum, hid_t plist_id, hid_t dataset_2)
void load_block(long int bid,
hssize_t mpi_size_old,
int * metadata_out,
openfpm::vector<size_t> & metadata_accum,
hid_t plist_id,
hid_t dataset_2)
{
/* if (mpi_size >= mpi_size_old)
{
......@@ -2037,12 +1995,12 @@ public:
printf ("LOAD: dataspace_id_3 size: %llu\n", size2);
}
*/
size_t sum = 0;
/* size_t sum = 0;
for (int i = 0; i < mpi_size_old; i++)
{
sum += metadata_out[i];
}
}*/
// std::cout << "LOAD: sum: " << sum << std::endl;
......
......@@ -29,9 +29,6 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_save_test )
if (v_cl.getProcessingUnits() >= 32)
return;
if (v_cl.getProcessUnitID() == 0)
std::cout << "Saving Distributed 2D Grid..." << std::endl;
// grid size
size_t sz[2];
sz[0] = k;
......@@ -67,8 +64,6 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_save_test )
count++;
}
std::cout << "Count: " << count << std::endl;
openfpm::vector<size_t> count_total;
v_cl.allGather(count,count_total);
v_cl.execute();
......@@ -78,15 +73,11 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_save_test )
for (size_t i = 0; i < count_total.size(); i++)
sum += count_total.get(i);
std::cout << "Sum: " << sum << std::endl;
timer t;
t.start();
// Save the grid
g_dist.save("grid_dist_id.h5");
t.stop();
std::cout << "Saving time: " << t.getwct() << std::endl;
}
BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_load_test )
......@@ -106,9 +97,6 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_load_test )
if (v_cl.getProcessingUnits() >= 32)
return;
if (v_cl.getProcessUnitID() == 0)
std::cout << "Loading Distributed 2D Grid..." << std::endl;
// grid size
size_t sz[2];
sz[0] = k;
......@@ -132,8 +120,6 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_load_test )
g_dist.write("Loaded_grid");
g_dist.getDecomposition().write("Loaded_grid_decomposition");
std::cout << "Loading time: " << t.getwct() << std::endl;
auto it = g_dist.getDomainIterator();
size_t count = 0;
......@@ -154,8 +140,6 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_load_test )
count++;
}
std::cout << "COOOOOOUNT: " << count << std::endl;
openfpm::vector<size_t> count_total;
v_cl.allGather(count,count_total);
v_cl.execute();
......
This diff is collapsed.
......@@ -19,21 +19,11 @@
BOOST_AUTO_TEST_SUITE( vd_hdf5_chckpnt_rstrt_test )
// Input data
// Number of particles
size_t k = 1000000;
// Dimensionality
const size_t dim = 3;
BOOST_AUTO_TEST_CASE( vector_dist_hdf5_save_test )
{
Vcluster & v_cl = create_vcluster();
if (v_cl.getProcessUnitID() == 0)
std::cout << "Saving distributed vector" << std::endl;
Box<dim,float> box;
for (size_t i = 0; i < dim; i++)
......@@ -45,7 +35,7 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_save_test )
// Boundary conditions
size_t bc[dim];
const size_t Ng = cbrt(k);
const size_t Ng = 32;
// we create a Grid iterator
size_t sz[dim] = {Ng,Ng,Ng};
......@@ -75,10 +65,6 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_save_test )
++it;
}
//BOOST_REQUIRE_EQUAL(it.getSpacing(0),1.0f/(Ng-1));
//BOOST_REQUIRE_EQUAL(it.getSpacing(1),1.0f/(Ng-1));
//BOOST_REQUIRE_EQUAL(it.getSpacing(2),1.0f/(Ng-1));
vd.map();
// Put forces
......@@ -96,13 +82,37 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_save_test )
++it2;
}
timer t;
t.start();
// Save the vector
vd.save("vector_dist.h5");
t.stop();
std::cout << "Saving time: " << t.getwct() << std::endl;
vector_dist<dim,float, aggregate<float[dim]>, CartDecomposition<dim,float> > vd2(0,box,bc,ghost);
vd2.load("vector_dist.h5");
// Check that vd and vd2 match
auto it3 = vd.getDomainIterator();
BOOST_REQUIRE_EQUAL(vd.size_local(),vd2.size_local());
bool check = true;
while (it3.isNext())
{
auto p = it3.get();
Point<3,float> p1 = vd.getPos(p);
Point<3,float> p2 = vd2.getPos(p);
check &= (p1 == p2);
check &= (vd.template getProp<0>(p)[0] == vd2.template getProp<0>(p)[0]);
check &= (vd.template getProp<0>(p)[1] == vd2.template getProp<0>(p)[1]);
check &= (vd.template getProp<0>(p)[2] == vd2.template getProp<0>(p)[2]);
++it3;
}
BOOST_REQUIRE_EQUAL(check,true);
}
......@@ -111,9 +121,6 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_load_test )
{
Vcluster & v_cl = create_vcluster();
if (v_cl.getProcessUnitID() == 0)
std::cout << "Loading distributed vector" << std::endl;
Box<dim,float> box;
for (size_t i = 0; i < dim; i++)
......@@ -129,58 +136,24 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_load_test )
bc[i] = NON_PERIODIC;
const size_t Ng = cbrt(k);
// we create a Grid iterator
size_t sz[3] = {Ng,Ng,Ng};
const size_t Ng = 32;
// ghost
Ghost<dim,float> ghost(1.0/(Ng-2));
vector_dist<dim,float, aggregate<float[dim]>, CartDecomposition<dim,float> > vd(0,box,bc,ghost);
vd.load("vector_dist.h5");
timer t;
t.start();
// Save the vector
vd.load("vector_dist.h5");
t.stop();
std::cout << "Loading time: " << t.getwct() << std::endl;
// Load the vector
vd.load("test_data/vector_dist_24.h5");
/////////////////// Checking data ///////////////////////
// Check total number of particles
size_t n_part = vd.size_local();
openfpm::vector<size_t> tot_n_part;
v_cl.allGather(n_part,tot_n_part);
v_cl.sum(n_part);
v_cl.execute();
size_t sum = 0;
for (size_t i = 0; i < tot_n_part.size(); i++)
sum += tot_n_part.get(i);
BOOST_REQUIRE_EQUAL(sum,k);
//std::cout << "Sum: " << sum << std::endl;
// Check spacing (positions)
auto it = vd.getGridIterator(sz);
while (it.isNext())
{
//auto key = it.get();
++it;
}
BOOST_REQUIRE_EQUAL(it.getSpacing(0),1.0f/(Ng-1));
BOOST_REQUIRE_EQUAL(it.getSpacing(1),1.0f/(Ng-1));
BOOST_REQUIRE_EQUAL(it.getSpacing(2),1.0f/(Ng-1));
BOOST_REQUIRE_EQUAL(n_part,Ng*Ng*Ng);
// Check properties
......@@ -190,9 +163,9 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_load_test )
{
auto key = it2.get();
//Put the forces
// Check the properties
for (size_t i = 0; i < dim; i++)
BOOST_CHECK_CLOSE(vd.template getProp<0>(key)[i],0.51234 + vd.getPos(key)[0] + vd.getPos(key)[1]+ vd.getPos(key)[2],0.0001);
BOOST_REQUIRE_EQUAL(vd.template getProp<0>(key)[i],(float)(0.51234 + vd.getPos(key)[0] + vd.getPos(key)[1]+ vd.getPos(key)[2]));
++it2;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment