From f107e601d3488e8aba9b77eaf3f61dd2a3d9dae5 Mon Sep 17 00:00:00 2001 From: Yaroslav <beorn.90@gmail.com> Date: Thu, 1 Dec 2016 17:40:19 +0100 Subject: [PATCH] Improved vector and grid HDF5 save/load tests. Added getOldDomainIterator() member for grid_dist --- openfpm_data | 2 +- src/Grid/grid_dist_id.hpp | 59 +++-- ...grid_dist_id_HDF5_chckpnt_restart_test.hpp | 18 +- src/Vector/vector_dist.hpp | 37 +-- .../vector_dist_HDF5_chckpnt_restart_test.hpp | 232 ++++-------------- 5 files changed, 123 insertions(+), 225 deletions(-) diff --git a/openfpm_data b/openfpm_data index 68d63a0a5..e62e281df 160000 --- a/openfpm_data +++ b/openfpm_data @@ -1 +1 @@ -Subproject commit 68d63a0a530ecda967f56c2f761ead6dcab64de8 +Subproject commit e62e281df21687cfa081d4ac9460a8abf4636ec0 diff --git a/src/Grid/grid_dist_id.hpp b/src/Grid/grid_dist_id.hpp index c2d571740..88796bf20 100644 --- a/src/Grid/grid_dist_id.hpp +++ b/src/Grid/grid_dist_id.hpp @@ -1117,6 +1117,27 @@ public: v_cl.execute(); } + /*! \brief It return an iterator that span the full grid domain (each processor span its local domain) + * + * \return the iterator + * + */ + grid_dist_iterator<dim,device_grid,FREE> getOldDomainIterator() const + { +#ifdef SE_CLASS2 + check_valid(this,8); +#endif + + grid_key_dx<dim> stop(ginfo_v.getSize()); + grid_key_dx<dim> one; + one.one(); + stop = stop - one; + + grid_dist_iterator<dim,device_grid,FREE> it(loc_grid_old,gdb_ext_old,stop); + + return it; + } + /*! \brief It return an iterator that span the full grid domain (each processor span its local domain) * * \return the iterator @@ -1683,8 +1704,8 @@ public: inline void save(const std::string & filename) const { - //std::cout << "Loc_grid.size() before save: " << loc_grid.size() << std::endl; - //std::cout << "Gdb_ext.size() before save: " << gdb_ext.size() << std::endl; + std::cout << "Loc_grid.size() before save: " << loc_grid.size() << std::endl; + std::cout << "Gdb_ext.size() before save: " << gdb_ext.size() << std::endl; //Pack_request vector size_t req = 0; @@ -1693,7 +1714,7 @@ public: Packer<decltype(loc_grid),HeapMemory>::packRequest(loc_grid,req); Packer<decltype(gdb_ext),HeapMemory>::packRequest(gdb_ext,req); - //std::cout << "Req: " << req << std::endl; + std::cout << "Req: " << req << std::endl; // allocate the memory HeapMemory pmem; @@ -1730,7 +1751,7 @@ public: H5Pclose(plist_id); size_t sz = pmem.size(); - //std::cout << "Pmem.size: " << pmem.size() << std::endl; + std::cout << "Pmem.size: " << pmem.size() << std::endl; openfpm::vector<size_t> sz_others; v_cl.allGather(sz,sz_others); v_cl.execute(); @@ -1787,7 +1808,7 @@ public: offset[0] += sz_others.get(i); } - //std::cout << "MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << ", Offset: " << offset[0] << ", Block: " << block[0] << std::endl; + std::cout << "MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << ", Offset: " << offset[0] << ", Block: " << block[0] << std::endl; int metadata[mpi_size]; @@ -1867,7 +1888,7 @@ public: //Create data space in memory hid_t mem_dataspace_id = H5Screate_simple(1, mdim, NULL); -/* + if (mpi_rank == 0) { hssize_t size; @@ -1877,10 +1898,10 @@ public: size = H5Sget_select_npoints (file_dataspace_id); printf ("dataspace_id size: %llu\n", size); } -*/ + // Read the dataset. H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace_id, file_dataspace_id, plist_id, metadata_out); -/* + if (mpi_rank == 0) { std::cout << "Metadata_out[]: "; @@ -1890,7 +1911,7 @@ public: } std::cout << " " << std::endl; } -*/ + //Open dataset hid_t dataset_2 = H5Dopen (file, "grid_dist", H5P_DEFAULT); @@ -1956,7 +1977,7 @@ public: //hsize_t stride[1] = {1}; hsize_t count[1] = {1}; - //std::cout << "LOAD: MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << ", Offset: " << offset[0] << ", Offset_add: " << offset_add[0] << ", Block: " << block[0] << ", Block_add: " << block_add[0] << std::endl; + std::cout << "LOAD: MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << ", Offset: " << offset[0] << ", Offset_add: " << offset_add[0] << ", Block: " << block[0] << ", Block_add: " << block_add[0] << std::endl; //Select file dataspace @@ -1975,7 +1996,7 @@ public: //Create data space in memory hid_t mem_dataspace_id_2 = H5Screate_simple(1, mdim_2, NULL); hid_t mem_dataspace_id_3 = H5Screate_simple(1, mdim_3, NULL); -/* + if (mpi_rank == 0) { hssize_t size2; @@ -1995,7 +2016,7 @@ public: size2 = H5Sget_select_npoints (file_dataspace_id_3); printf ("LOAD: dataspace_id_3 size: %llu\n", size2); } -*/ + size_t sum = 0; for (int i = 0; i < mpi_size_old; i++) @@ -2004,7 +2025,7 @@ public: } - //std::cout << "LOAD: sum: " << sum << std::endl; + std::cout << "LOAD: sum: " << sum << std::endl; // allocate the memory HeapMemory pmem; @@ -2023,7 +2044,7 @@ public: mem.allocate(pmem.size()); mem2.allocate(pmem2.size()); - //std::cout << "Mem+mem2.size(): " << mem.size() + mem2.size() << " = " << block[0]+block_add[0] << std::endl; + std::cout << "Mem+mem2.size(): " << mem.size() + mem2.size() << " = " << block[0]+block_add[0] << std::endl; Unpack_stat ps; @@ -2038,23 +2059,23 @@ public: Unpacker<decltype(loc_grid_old),HeapMemory>::unpack(mem2,loc_grid_old_unp,ps2); Unpacker<decltype(gdb_ext_old),HeapMemory>::unpack(mem2,gdb_ext_old_unp,ps2); -/* + std::cout << "Loc_grid_old.size() before merge: " << loc_grid_old.size() << std::endl; std::cout << "Gdb_ext_old.size() before merge: " << gdb_ext_old.size() << std::endl; std::cout << "Loc_grid_old_unp.size() before merge: " << loc_grid_old_unp.size() << std::endl; std::cout << "Gdb_ext_old_unp.size() before merge: " << gdb_ext_old_unp.size() << std::endl; -*/ + for (size_t i = 0; i < loc_grid_old_unp.size(); i++) loc_grid_old.add(loc_grid_old_unp.get(i)); for (size_t i = 0; i < gdb_ext_old_unp.size(); i++) gdb_ext_old.add(gdb_ext_old_unp.get(i)); -/* + std::cout << "Loc_grid_old.size() after merge: " << loc_grid_old.size() << std::endl; std::cout << "Gdb_ext_old.size() after merge: " << gdb_ext_old.size() << std::endl; std::cout << "*********************************" << std::endl; -*/ + // Close the dataset. H5Dclose(dataset); H5Dclose(dataset_2); @@ -2066,7 +2087,7 @@ public: delete &mem; // Map the distributed grid - map(); + //map(); } //! Define friend classes diff --git a/src/Grid/grid_dist_id_HDF5_chckpnt_restart_test.hpp b/src/Grid/grid_dist_id_HDF5_chckpnt_restart_test.hpp index 9fb14ce28..7868d9f36 100644 --- a/src/Grid/grid_dist_id_HDF5_chckpnt_restart_test.hpp +++ b/src/Grid/grid_dist_id_HDF5_chckpnt_restart_test.hpp @@ -16,7 +16,7 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_save_test ) { // Input data - size_t k = 1000; + size_t k = 100; size_t ghost_part = 0.02; @@ -63,7 +63,7 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_load_test ) { // Input data - size_t k = 1000; + size_t k = 100; size_t ghost_part = 0.02; @@ -98,7 +98,7 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_load_test ) std::cout << "Loading time: " << t.getwct() << std::endl; - auto it = g_dist.getDomainIterator(); + auto it = g_dist.getOldDomainIterator(); size_t count = 0; @@ -112,7 +112,17 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_load_test ) ++it; count++; } - BOOST_REQUIRE_EQUAL(count, (size_t)1000*1000); + + openfpm::vector<size_t> count_total; + v_cl.allGather(count,count_total); + v_cl.execute(); + + size_t sum = 0; + + for (size_t i = 0; i < count_total.size(); i++) + sum += count_total.get(i); + + BOOST_REQUIRE_EQUAL(sum, (size_t)k*k); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/Vector/vector_dist.hpp b/src/Vector/vector_dist.hpp index c087d315b..2d94c32ff 100644 --- a/src/Vector/vector_dist.hpp +++ b/src/Vector/vector_dist.hpp @@ -983,7 +983,7 @@ public: Packer<decltype(v_pos),HeapMemory>::packRequest(v_pos,req); Packer<decltype(v_prp),HeapMemory>::packRequest(v_prp,req); - std::cout << "Req: " << req << std::endl; + //std::cout << "Req: " << req << std::endl; // allocate the memory HeapMemory pmem; @@ -1081,7 +1081,7 @@ public: offset[0] += sz_others.get(i); } - std::cout << "MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << ", Offset: " << offset[0] << ", Block: " << block[0] << std::endl; + //std::cout << "MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << ", Offset: " << offset[0] << ", Block: " << block[0] << std::endl; int metadata[mpi_size]; @@ -1143,10 +1143,10 @@ public: hid_t file_dataspace_id = H5Dget_space(dataset); hssize_t mpi_size_old = H5Sget_select_npoints (file_dataspace_id); - +/* if (mpi_rank == 0) printf ("\nOld MPI size: %llu\n", mpi_size_old); - +*/ //Where to read metadata int metadata_out[mpi_size_old]; @@ -1160,8 +1160,7 @@ public: //Create data space in memory hid_t mem_dataspace_id = H5Screate_simple(1, mdim, NULL); - - +/* if (mpi_rank == 0) { hssize_t size; @@ -1171,10 +1170,10 @@ public: size = H5Sget_select_npoints (file_dataspace_id); printf ("LOAD: dataspace_id size: %llu\n", size); } - +*/ // Read the dataset. H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace_id, file_dataspace_id, plist_id, metadata_out); - +/* if (mpi_rank == 0) { std::cout << "Metadata_out[]: "; @@ -1184,7 +1183,7 @@ public: } std::cout << " " << std::endl; } - +*/ //Open dataset hid_t dataset_2 = H5Dopen (file, "vector_dist", H5P_DEFAULT); @@ -1268,7 +1267,7 @@ public: //hsize_t stride[1] = {1}; hsize_t count[1] = {1}; - std::cout << "LOAD: MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << ", Offset: " << offset[0] << ", Offset_add: " << offset_add[0] << ", Block: " << block[0] << ", Block_add: " << block_add[0] << std::endl; + //std::cout << "LOAD: MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << ", Offset: " << offset[0] << ", Offset_add: " << offset_add[0] << ", Block: " << block[0] << ", Block_add: " << block_add[0] << std::endl; /* std::cout << "LOAD: MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << std::endl; for (size_t i = 0; i < offset0.get(mpi_rank).size(); i++) @@ -1305,7 +1304,7 @@ public: //Create data space in memory hid_t mem_dataspace_id_2 = H5Screate_simple(1, mdim_2, NULL); hid_t mem_dataspace_id_3 = H5Screate_simple(1, mdim_3, NULL); - +/* if (mpi_rank == 0) { hssize_t size2; @@ -1325,7 +1324,7 @@ public: size2 = H5Sget_select_npoints (file_dataspace_id_3); printf ("LOAD: dataspace_id_3 size: %llu\n", size2); } - +*/ size_t sum = 0; for (int i = 0; i < mpi_size_old; i++) @@ -1334,7 +1333,7 @@ public: } - std::cout << "LOAD: sum: " << sum << std::endl; + //std::cout << "LOAD: sum: " << sum << std::endl; // allocate the memory HeapMemory pmem; @@ -1353,7 +1352,7 @@ public: mem.allocate(pmem.size()); mem2.allocate(pmem2.size()); - std::cout << "Mem+mem2.size(): " << mem.size() + mem2.size() << " = " << block[0]+block_add[0] << std::endl; + //std::cout << "Mem+mem2.size(): " << mem.size() + mem2.size() << " = " << block[0]+block_add[0] << std::endl; Unpack_stat ps; @@ -1376,8 +1375,8 @@ public: H5Fclose(file); H5Pclose(plist_id); - std::cout << "V_pos.size(): " << v_pos.size() << std::endl; - std::cout << "V_pos_unp.size(): " << v_pos_unp.size() << std::endl; + //std::cout << "V_pos.size(): " << v_pos.size() << std::endl; + //std::cout << "V_pos_unp.size(): " << v_pos_unp.size() << std::endl; mem.decRef(); delete &mem; @@ -1387,10 +1386,12 @@ public: g_m = v_pos.size(); - std::cout << "V_pos.size() after merge: " << v_pos.size() << std::endl; + //std::cout << "V_pos.size() after merge: " << v_pos.size() << std::endl; + + // Map particles map(); - std::cout << "V_pos.size() after merge and map: " << v_pos.size() << std::endl; + //std::cout << "V_pos.size() after merge and map: " << v_pos.size() << std::endl; } /*! \brief Output particle position and properties diff --git a/src/Vector/vector_dist_HDF5_chckpnt_restart_test.hpp b/src/Vector/vector_dist_HDF5_chckpnt_restart_test.hpp index 4764374e1..c30686fbf 100644 --- a/src/Vector/vector_dist_HDF5_chckpnt_restart_test.hpp +++ b/src/Vector/vector_dist_HDF5_chckpnt_restart_test.hpp @@ -20,19 +20,19 @@ BOOST_AUTO_TEST_SUITE( vd_hdf5_chckpnt_rstrt_test ) // Input data -//Number of particles + +// Number of particles size_t k = 1000; + // Dimensionality const size_t dim = 3; BOOST_AUTO_TEST_CASE( vector_dist_hdf5_save_test ) { - ///////////////// - Vcluster & v_cl = create_vcluster(); if (v_cl.getProcessUnitID() == 0) - std::cout << "Saving Distributed 3D Vector..." << std::endl; + std::cout << "Saving distributed vector" << std::endl; Box<dim,float> box; @@ -45,55 +45,59 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_save_test ) // Boundary conditions size_t bc[dim]; + const size_t Ng = cbrt(k); + + // we create a Grid iterator + size_t sz[3] = {Ng,Ng,Ng}; + for (size_t i = 0; i < dim; i++) bc[i] = NON_PERIODIC; // ghost - Ghost<dim,float> ghost(0.1); - - vector_dist<dim,float, aggregate<float[dim]>, CartDecomposition<dim,float> > vd(k,box,bc,ghost); + Ghost<dim,float> ghost(1.0/(Ng-2)); - // Initialize a dist vector - //vd_initialize<dim>(vd, v_cl, k); + vector_dist<dim,float, aggregate<float[dim]>, CartDecomposition<dim,float> > vd(0,box,bc,ghost); - auto it = vd.getDomainIterator(); + // Put particles - std::default_random_engine eg(v_cl.getProcessUnitID()*4313); - std::uniform_real_distribution<float> ud(0.0f, 1.0f); + auto it = vd.getGridIterator(sz); while (it.isNext()) { + vd.add(); + auto key = it.get(); - for (size_t i = 0; i < dim; i++) - { - vd.getPos(key)[i] = ud(eg); - //std::cout << "Value: " << vd.getPos(key)[i] << std::endl; - } + vd.getLastPos()[0] = key.get(0) * it.getSpacing(0); + vd.getLastPos()[1] = key.get(1) * it.getSpacing(1); + vd.getLastPos()[2] = key.get(2) * it.getSpacing(2); ++it; } - std::cout << "Size_local: " << vd.size_local_with_ghost() << std::endl; + BOOST_REQUIRE_EQUAL(it.getSpacing(0),1.0f/(Ng-1)); + BOOST_REQUIRE_EQUAL(it.getSpacing(1),1.0f/(Ng-1)); + BOOST_REQUIRE_EQUAL(it.getSpacing(2),1.0f/(Ng-1)); + + //std::cout << "Size_local: " << vd.size_local_with_ghost() << std::endl; vd.map(); - std::cout << "Size_local after map: " << vd.size_local_with_ghost() << std::endl; + //std::cout << "Size_local after map: " << vd.size_local_with_ghost() << std::endl; - //vd.template ghost_get<0>(); + // Put forces - //std::cout << "Size_local after ghost get: " << vd.size_local_with_ghost() << std::endl; + auto it2 = vd.getDomainIterator(); - auto it_2 = vd.getDomainIterator(); - - while (it_2.isNext()) + while (it2.isNext()) { - auto key = it_2.get(); + auto key = it2.get(); //Put the forces for (size_t i = 0; i < dim; i++) vd.template getProp<0>(key)[i] = 0.51234; - ++it_2; + + ++it2; } timer t; @@ -110,9 +114,7 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_load_test ) Vcluster & v_cl = create_vcluster(); if (v_cl.getProcessUnitID() == 0) - std::cout << "Loading Distributed 3D Vector..." << std::endl; - - const size_t dim = 3; + std::cout << "Loading distributed vector" << std::endl; Box<dim,float> box; @@ -128,8 +130,14 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_load_test ) for (size_t i = 0; i < dim; i++) bc[i] = NON_PERIODIC; + + const size_t Ng = cbrt(k); + + // we create a Grid iterator + size_t sz[3] = {Ng,Ng,Ng}; + // ghost - Ghost<dim,float> ghost(0.1); + Ghost<dim,float> ghost(1.0/(Ng-2)); vector_dist<dim,float, aggregate<float[dim]>, CartDecomposition<dim,float> > vd(0,box,bc,ghost); @@ -141,7 +149,9 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_load_test ) std::cout << "Loading time: " << t.getwct() << std::endl; + /////////////////// Checking data /////////////////////// + // Check total number of particles size_t n_part = vd.size_local(); openfpm::vector<size_t> tot_n_part; v_cl.allGather(n_part,tot_n_part); @@ -152,99 +162,16 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_load_test ) for (size_t i = 0; i < tot_n_part.size(); i++) sum += tot_n_part.get(i); - // Check total number of real particles BOOST_REQUIRE_EQUAL(sum,k); -/* - auto it = vd.getDomainIterator(); - - while (it.isNext()) - { - auto key = it.get(); - - for (size_t i = 0; i < dim; i++) - { - std::cout << "Pos: " << vd.getPos(key)[i] << std::endl; - } - - ++it; - } -*/ - - //vd.template ghost_get<0>(); - - - auto it_2 = vd.getDomainIterator(); - - while (it_2.isNext()) - { - auto key = it_2.get(); - - //Put the forces - for (size_t i = 0; i < dim; i++) - //BOOST_CHECK_CLOSE(vd.template getProp<0>(key)[i],0.51234,0.0001); - std::cout << "Prop: " << vd.template getProp<0>(key)[i] << std::endl; - ++it_2; - } - -} - -BOOST_AUTO_TEST_CASE( vector_dist_hdf5_save_test_2 ) -{ - // Input data - // Number of particles - size_t k = 100; - - //Dimensinality of the space - const size_t dim = 3; - - ///////////////// - - Vcluster & v_cl = create_vcluster(); - - if (v_cl.getProcessUnitID() == 0) - std::cout << "Saving distributed vector" << std::endl; - - - Box<dim,float> box; - - for (size_t i = 0; i < dim; i++) - { - box.setLow(i,0.0); - box.setHigh(i,1.0); - } - - // Boundary conditions - size_t bc[dim]; - - const size_t Ng = 128; - - // we create a 128x128x128 Grid iterator - size_t sz[3] = {Ng,Ng,Ng}; - - for (size_t i = 0; i < dim; i++) - bc[i] = NON_PERIODIC; - - // ghost - Ghost<dim,float> ghost(1.0/(Ng-2)); - - vector_dist<dim,float, aggregate<float[dim]>, CartDecomposition<dim,float> > vd(k,box,bc,ghost); - - // Initialize a dist vector - //vd_initialize<dim>(vd, v_cl, k); + // Check spacing (positions) auto it = vd.getGridIterator(sz); while (it.isNext()) { - vd.add(); - auto key = it.get(); - vd.getLastPos()[0] = key.get(0) * it.getSpacing(0); - vd.getLastPos()[1] = key.get(1) * it.getSpacing(1); - vd.getLastPos()[2] = key.get(2) * it.getSpacing(2); - ++it; } @@ -252,83 +179,22 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_save_test_2 ) BOOST_REQUIRE_EQUAL(it.getSpacing(1),1.0f/(Ng-1)); BOOST_REQUIRE_EQUAL(it.getSpacing(2),1.0f/(Ng-1)); - vd.map(); - - vd.template ghost_get<0>(); - - // The random generator engine - std::default_random_engine eg(v_cl.getProcessUnitID()*4313); - std::uniform_real_distribution<float> ud(0.0f, 1.0f); - - // Create a vector of random elements on each processor +/* + // Check properties - auto it_2 = vd.getIterator(); + auto it2 = vd.getDomainIterator(); - while (it.isNext()) + while (it2.isNext()) { - auto key = it_2.get(); + auto key = it2.get(); //Put the forces for (size_t i = 0; i < dim; i++) - vd.template getProp<0>(key)[i] = ud(eg); - //vd.getPos(key)[i] - ++it_2; - } - - // Save the vector - vd.save("vector_dist_2.h5"); -} - -BOOST_AUTO_TEST_CASE( vector_dist_hdf5_load_test_2 ) -{ - Vcluster & v_cl = create_vcluster(); - - if (v_cl.getProcessUnitID() == 0) - std::cout << "Loading distributed vector" << std::endl; - - const size_t dim = 3; - - Box<dim,float> box; - - for (size_t i = 0; i < dim; i++) - { - box.setLow(i,0.0); - box.setHigh(i,1.0); - } - - // Boundary conditions - size_t bc[dim]; - - for (size_t i = 0; i < dim; i++) - bc[i] = NON_PERIODIC; - - - const size_t Ng = 128; - - // we create a 128x128x128 Grid iterator - size_t sz[3] = {Ng,Ng,Ng}; - - // ghost - Ghost<dim,float> ghost(1.0/(Ng-2)); - - vector_dist<dim,float, aggregate<float[dim]>, CartDecomposition<dim,float> > vd(0,box,bc,ghost); - - vd.load("vector_dist_2.h5"); - - auto NN = vd.getCellList(0.5); - - auto it = vd.getGridIterator(sz); - - while (it.isNext()) - { - auto key = it.get(); + BOOST_CHECK_CLOSE(vd.template getProp<0>(key)[i],0.51234,0.0001); - ++it; + ++it2; } - - BOOST_REQUIRE_EQUAL(it.getSpacing(0),1.0f/(Ng-1)); - BOOST_REQUIRE_EQUAL(it.getSpacing(1),1.0f/(Ng-1)); - BOOST_REQUIRE_EQUAL(it.getSpacing(2),1.0f/(Ng-1)); +*/ } BOOST_AUTO_TEST_SUITE_END() -- GitLab