gargabe.hpp 8.32 KB
Newer Older
incardon's avatar
incardon committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
/*
 * gargabe.hpp
 *
 *  Created on: Jan 13, 2015
 *      Author: i-bird
 */

#ifndef GARGABE_HPP_
#define GARGABE_HPP_



	template <unsigned int j, unsigned int i, typename Graph> void optimize(size_t start_p, Graph & graph)
	{
		// We assume that Graph is the rapresentation of a cartesian graph
		// this mean that the direction d is at the child d

		// Create an Hyper-cube

		HyperCube<dim> hyp;

		// Get the number of wavefronts

		size_t n_wf = hyp.getNumberOfElements_R(0);

		// Get the number of intersecting wavefront



		// Get the number of sub-dimensional common wavefront
		// basically are a list of all the subdomain common to two or more

		// Create n_wf wavefront queue

		openfpm::vector<wavefront> v_w;
		v.reserve(n_wf);

		// direction of expansion

		size_t domain_id = 0;
		int exp_dir = 0;
		bool can_expand = true;

		// while is possible to expand

		while (can_expand)
		{
			// for each direction of expansion expand the wavefront

			for (int d = 0 ; d < n_wf ; d++)
			{
				// get the wavefront at direction d

				openfpm::vector<size_t> & wf_d = v_w.get<wavefront::domains>(d);

				// flag to indicate if the wavefront can expand

				bool w_can_expand = true;

				// for each subdomain

				for (size_t sub = 0 ; sub < wf_d.size() ; sub++)
				{
					// check if the adjacent domain in direction d exist
					// and is of the same id

					// get the starting subdomain
					size_t sub_w = wf_d.get<0>(sub);

					// we get the processor id of the neighborhood sub-domain on direction d
					size_t exp_p = graph.getChild(sub_w,d).get<j>();

					// we check if it is the same processor id
					if (exp_p != domain_id)
					{
						w_can_expand = false;
					}
				}

				// if we can expand the wavefront expand it
				if (w_can_expand == true)
				{
					// for each subdomain
					for (size_t sub = 0 ; sub < wf_d.size() ; sub++)
					{
						// update the position of the wavefront
						wf_d.get<0>(sub) = wf_d.get<0>(sub) + gh.stride(d);
					}

					// here we add sub-domains to all the other queues
					// get the face of the hyper-cube

					SubHyperCube<dim,dim-1> sub_hyp = hyp.getSubHyperCube(d);

					std::vector<comb<dim>> q_comb = sub_hyp.getCombinations_R(dim-2);
				}
			}
		}

		// For each point in the Hyper-cube check if we can move the wave front


	}

incardon's avatar
incardon committed
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
#ifndef PARALLEL_DECOMPOSITION
//		CreateSubspaces();
#endif

#ifndef USE_METIS_GP

		// Here we do not use METIS
		// Distribute the divided domains

		// Get the number of processing units
		size_t Np = v_cl.getProcessingUnits();

		// Get the ID of this processing unit
		// and push the subspace is taking this
		// processing unit

		for (size_t p_id = v_cl.getProcessUnitID(); p_id < Np ; p_id += Np)
			id_sub.push_back(p_id);
#else


#endif

incardon's avatar
incardon committed
128 129


incardon's avatar
incardon committed
130
<<<<<<< HEAD
incardon's avatar
incardon committed
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
		/////////////// DEBUG /////////////////////

		// get the decomposition
		auto & dec = g_dist.getDecomposition();

		Vcluster & v_cl = *global_v_cluster;

		// check the consistency of the decomposition
		val = dec.check_consistency();
		BOOST_REQUIRE_EQUAL(val,true);

		// for each local volume
		// Get the number of local grid needed
		size_t n_grid = dec.getNLocalHyperCube();

		size_t vol = 0;

		openfpm::vector<Box<2,size_t>> v_b;

		// Allocate the grids
		for (size_t i = 0 ; i < n_grid ; i++)
		{
			// Get the local hyper-cube
			SpaceBox<2,float> sub = dec.getLocalHyperCube(i);

			Box<2,size_t> g_box = g_dist.getCellDecomposer().convertDomainSpaceIntoGridUnits(sub);
			v_b.add(g_box);

			vol += g_box.getVolumeKey();
		}

		v_cl.reduce(vol);
		v_cl.execute();

		BOOST_REQUIRE_EQUAL(vol,k*k);

		/////////////////////////////////////

incardon's avatar
incardon committed
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254

		// 3D test

	//	g_dist.write("");

	/*	auto g_it = g_dist.getIteratorBulk();

		auto g_it_halo = g_dist.getHalo();

		// Let try to solve the poisson equation d2(u) = f with f = 1 and computation
		// comunication overlap (100 Jacobi iteration)

		for (int i = 0 ; i < 100 ; i++)
		{
			g_dist.ghost_get();

			// Compute the bulk

			jacobi_iteration(g_it);

			g_dist.ghost_sync();

			// Compute the halo

			jacobi_iteration(g_it_halo);
		}*/


		BOOST_AUTO_TEST_CASE( grid_dist_id_poisson_test_use)
		{
			// grid size
		/*	size_t sz[2] = {1024,1024};

			// Distributed grid with id decomposition

			grid_dist_id<2, scalar<float>, CartDecomposition<2,size_t>> g_dist(sz);

			// Create the grid on memory

			g_dist.Create();*/

		/*	auto g_it = g_dist.getIteratorBulk();

			auto g_it_halo = g_dist.getHalo();

			// Let try to solve the poisson equation d2(u) = f with f = 1 and computation
			// comunication overlap (100 Jacobi iteration)

			for (int i = 0 ; i < 100 ; i++)
			{
				g_dist.ghost_get();

				// Compute the bulk

				jacobi_iteration(g_it);

				g_dist.ghost_sync();

				// Compute the halo

				jacobi_iteration(g_it_halo);
			}*/
		}

		template<typename iterator> void jacobi_iteration(iterator g_it, grid_dist_id<2, float, scalar<float>, CartDecomposition<2,float>> & g_dist)
		{
			// scalar
			typedef scalar<float> S;

			// iterator

			while(g_it.isNext())
			{
				// Jacobi update

				auto pos = g_it.get();

				g_dist.template get<S::ele>(pos) = (g_dist.template get<S::ele>(pos.move(0,1)) +
			                             g_dist.template get<S::ele>(pos.move(0,-1)) +
			                             g_dist.template get<S::ele>(pos.move(1,1)) +
			                             g_dist.template get<S::ele>(pos.move(1,-1)) / 4.0);

				++g_it;
			}
		}

incardon's avatar
incardon committed
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
=======

		/*
		 * CartDecomposition.cpp
		 *
		 *  Created on: Aug 15, 2014
		 *      Author: Pietro Incardona
		 */

		#include "CartDecomposition.hpp"



		/*! \brief The the bulk part of the data set, or the data that does not depend
		 *  from the ghosts layers
		 *
		 * The the bulk part of the data set, or the data that does not depend from the
		 *  ghosts layers
		 *
		 */

		/*template<typename T> T CartDecomposition<T>::getBulk(T data)
		{
			// for each element in data

			for (size_t i = 0; i < data.size() ; i++)
			{
				if (localSpace.isInside())
			}

		}

		template<typename T> T CartDecomposition<T>::getInternal()
		{

		}*/

		/*! \brief Check if is border or bulk
		 *
		 * \param neighboorhood define the neighboorhood of all the points
		 * \return true if border, false if bulk
		 *
		 */

		bool borderOrBulk(neighborhood & nb)
		{
			device::grid<1,size_t> nbr = nb.next();

			// check the neighborhood

			// get neighborhood iterator

			grid_key_dx_iterator<dim> iterator_nbr = nbr.getIterator();

			while (iterator_nbr.hasNext())
			{
				grid_key_dx key_nbr = iterator_nbr.next();

				// check if the neighboorhood is internal

				if(subspace.isBound(data.template get<Point::x>(key_nbr)) == false)
				{
					// it is border

					return true;

					ret.bord.push_back(key);
					break;
				}
			}

			return false;
		}

		/*! \brief This function divide the data set into bulk, border, external and internal part
		 *
		 * \tparam dim dimensionality of the structure storing your data
		 *         (example if they are in 3D grid, has to be 3)
		 * \tparam T type of object we are dividing
		 * \tparam device type of layout selected
		 * \param data 1-dimensional grid of point
		 * \param nb define the neighborhood of all the points
		 * \return a structure with the set of objects divided
		 *
		 */

		template<unsigned int dim, typename T, template<typename> class layout, typename Memory, template<unsigned int, typename> class Domain, template<typename, typename, typename> class data_s>
		dataDiv<T> CartDecomposition<dim,T,layout>::divide(device::grid<1,Point<dim,T>> & data, neighborhood & nb)
		{
			//! allocate the 3 subset

			dataDiv<T> ret;

			ret.bord = new boost::shared_ptr<T>(new T());
			ret.inte = new boost::shared_ptr<T>(new T());
			ret.ext = new boost::shared_ptr<T>(new T());

			//! get grid iterator

			grid_key_dx_iterator<dim> iterator = data.getIterator();

			//! we iterate trough all the set of objects

			while (iterator.hasNext())
			{
				grid_key_dx<dim> key = iterator.next();

				//! Check if the object is inside the subspace

				if (subspace.isBound(data.template get<Point<3,T>::x>(key)))
				{
					//! Check if the neighborhood is inside the subspace

					if (borderOrBulk(nb) == true)
					{
						// It is border

						ret.bord.push_back(key);
					}
					else
					{
						// It is bulk

						ret.bulk.push_back(key);
					}
				}
				else
				{
					//! it is external

					ret.ext.push_back(key);
				}
			}
		}


>>>>>>> Jenkin script for taurus
incardon's avatar
incardon committed
392
#endif /* GARGABE_HPP_ */