ESyS-Particle  4.0.1
pp_array.hpp
1 
2 // //
3 // Copyright (c) 2003-2011 by The University of Queensland //
4 // Earth Systems Science Computational Centre (ESSCC) //
5 // http://www.uq.edu.au/esscc //
6 // //
7 // Primary Business: Brisbane, Queensland, Australia //
8 // Licensed under the Open Software License version 3.0 //
9 // http://www.opensource.org/licenses/osl-3.0.php //
10 // //
12 
14 #include "Foundation/console.h"
15 #include <iostream>
16 #include <sstream>
17 #include <stdexcept>
18 
19 #ifdef __INTEL_COMPILER
20 #include <mathimf.h>
21 #else
22 #include <math.h>
23 #endif
24 using std::cout;
25 using std::cerr;
26 using std::endl;
27 using std::flush;
28 
29 //--- TML includes ---
30 #include "ntable/src/nt_slab.h"
31 
32 //--- STL includes ---
33 #include <utility>
34 
35 using std::pair;
36 using std::make_pair;
37 
38 
39 
40 template<typename T>
42 
61 template<typename T>
62 ParallelParticleArray<T>::ParallelParticleArray(TML_Comm *comm, const std::vector<unsigned int> &dims,const Vec3& min, const Vec3& max ,double range, double alpha):AParallelParticleArray(comm, dims), m_nt(NULL)
63 {
64  //- get own process coords
65  vector<int> pcoords=m_comm.get_coords();
66  //- initialize ntable
67  // get global number of cells
68  int nx_global,ny_global,nz_global;
69  nx_global=lrint((max[0]-min[0])/range);
70  ny_global=lrint((max[1]-min[1])/range);
71  nz_global=lrint((max[2]-min[2])/range);
72  if ((fabs((double(nx_global)-(max[0]-min[0])/range)) > 1e-6) ||
73  (fabs((double(ny_global)-(max[1]-min[1])/range)) > 1e-6) ||
74  (fabs((double(nz_global)-(max[2]-min[2])/range)) > 1e-6)){
75  m_nt=NULL;
76  std::stringstream msg;
77  msg << "size doesn't fit range" << endl; // replace by throw
78  msg << "diff x : " << double(nx_global)-(max[0]-min[0])/range << endl;
79  msg << "diff y : " << double(ny_global)-(max[1]-min[1])/range << endl;
80  msg << "diff z : " << double(nz_global)-(max[2]-min[2])/range << endl;
81  console.Error() << msg.str() << "\n";
82  throw std::runtime_error(msg.str());
83  } else {
84  // calc local min. cell, considering overlap
85  int nx_min=((nx_global*pcoords[0])/m_comm.get_dim(0))-1;
86  int ny_min=((ny_global*pcoords[1])/m_comm.get_dim(1))-1;
87  int nz_min=((nz_global*pcoords[2])/m_comm.get_dim(2))-1;
88  // calc local number of cells, considering overlap
89  int nx=(((nx_global*(pcoords[0]+1))/m_comm.get_dim(0))-nx_min)+1;
90  int ny=(((ny_global*(pcoords[1]+1))/m_comm.get_dim(1))-ny_min)+1;
91  int nz=(((nz_global*(pcoords[2]+1))/m_comm.get_dim(2))-nz_min)+1;
92  // init
93  m_nt=new NeighborTable<T>(nx,ny,nz,range,alpha,min,nx_min,ny_min,nz_min);
94  }
95  // init time stamp
96  m_timestamp=0;
97 }
98 
120 template<typename T>
121 ParallelParticleArray<T>::ParallelParticleArray(TML_Comm *comm, const vector<unsigned int> &dims, const vector<bool> &circ,const Vec3 &min,const Vec3 &max, double range, double alpha):AParallelParticleArray(comm,dims,circ), m_nt(NULL)
122 {
123  //- get own process coords
124  vector<int> pcoords=m_comm.get_coords();
125  //- initialize ntable
126  // get global number of cells
127  int nx_global,ny_global,nz_global;
128  nx_global=lrint((max[0]-min[0])/range);
129  ny_global=lrint((max[1]-min[1])/range);
130  nz_global=lrint((max[2]-min[2])/range);
131  if ((fabs((double(nx_global)-(max[0]-min[0])/range)) > 1e-6) ||
132  (fabs((double(ny_global)-(max[1]-min[1])/range)) > 1e-6) ||
133  (fabs((double(nz_global)-(max[2]-min[2])/range)) > 1e-6)){
134  m_nt=NULL;
135  std::stringstream msg;
136  msg << "size doesn't fit range" << endl; // replace by throw
137  msg << "diff x : " << double(nx_global)-(max[0]-min[0])/range << endl;
138  msg << "diff y : " << double(ny_global)-(max[1]-min[1])/range << endl;
139  msg << "diff z : " << double(nz_global)-(max[2]-min[2])/range << endl;
140  throw std::runtime_error(msg.str());
141  } else {
142  // calc local min. cell, considering overlap
143  int nx_min=((nx_global*pcoords[0])/m_comm.get_dim(0))-1;
144  int ny_min=((ny_global*pcoords[1])/m_comm.get_dim(1))-1;
145  int nz_min=((nz_global*pcoords[2])/m_comm.get_dim(2))-1;
146  // calc local number of cells, considering overlap
147  int nx=(((nx_global*(pcoords[0]+1))/m_comm.get_dim(0))-nx_min)+1;
148  int ny=(((ny_global*(pcoords[1]+1))/m_comm.get_dim(1))-ny_min)+1;
149  int nz=(((nz_global*(pcoords[2]+1))/m_comm.get_dim(2))-nz_min)+1;
150  // init local neighbortable
151  m_nt=new NeighborTable<T>(nx,ny,nz,range,alpha,min,nx_min,ny_min,nz_min);
152  // setup circular shift values
153  m_xshift=(max-min).X();
154  m_yshift=(max-min).Y();
155  m_zshift=(max-min).Z();
156  // setup circular edges
157  m_circ_edge_x_down=(circ[0] && (pcoords[0]==0)) ? true : false ;
158  m_circ_edge_x_up=(circ[0] && (pcoords[0]==m_comm.get_dim(0)-1)) ? true : false ;
159  }
160  // init time stamp
161  m_timestamp=0;
162 }
163 
167 template<typename T>
169 {
170  if(m_nt!=NULL) delete m_nt;
171 }
172 
178 template<typename T>
180 {
181  m_nt->insert(p);
182 }
183 
189 template<typename T>
190 void ParallelParticleArray<T>::insert(const vector<T>& vp)
191 {
192  for(typename vector<T>::const_iterator iter=vp.begin();
193  iter!=vp.end();
194  iter++){
195  m_nt->insert(*iter);
196  }
197 }
198 
204 template<typename T>
206 {
207  return m_nt->isInInner(pos);
208 }
209 
216 template<typename T>
218 {
219  return m_nt->ptr_by_id(id);
220 }
221 
228 template<typename T>
230 {
231  return m_nt->getNearestPtr(pos);
232 }
238 template<typename T>
240 {
241  // cout << "PPA at node " << m_comm.rank() << "rebuilding " << endl << flush;
242  //- get own process coords (for debug output)
243  vector<int> pcoords=m_comm.get_coords();
244 
245  // rebuild locally
246  // cout << "node " << m_comm.rank() << " pre-build " << *m_nt << endl;
247  m_nt->build();
248  // cout << "node " << m_comm.rank() << " pre-exchange " << *m_nt << endl;
249  //- exchange boundary particles
250  NTSlab<T> up_slab;
251  NTSlab<T> down_slab;
252  vector<T> recv_data;
253  bool circ_edge=false;
254 
255  // x-dir (there is at least one dimension)
256  if(m_comm.get_dim(0)>1){
257  // clean out boundary slabs
258  NTSlab<T> up_boundary_slab=m_nt->yz_slab(m_nt->xsize()-1);
259  up_boundary_slab.erase(up_boundary_slab.begin(),up_boundary_slab.end());
260  NTSlab<T> down_boundary_slab=m_nt->yz_slab(0);
261  down_boundary_slab.erase(down_boundary_slab.begin(),down_boundary_slab.end());
262 
263  // synchronize
264  m_comm.barrier();
265 
266  // define tranfer slabs
267  up_slab=m_nt->yz_slab(m_nt->xsize()-2);
268  down_slab=m_nt->yz_slab(1);
269 
270  // upstream
271  if(m_circ_edge_x_up){ // circ. bdry
272  // cout << "circular shift, node " << m_comm.rank() << " x-dim, up slab size " << up_slab.size() << endl;
273  // copy particles from transfer slab into buffer
274  vector<T> buffer;
275  for(typename NTSlab<T>::iterator iter=up_slab.begin();
276  iter!=up_slab.end();
277  iter++){
278  buffer.push_back(*iter);
279  }
280  // shift particles in buffer by circular shift value
281  for(typename vector<T>::iterator iter=buffer.begin();
282  iter!=buffer.end();
283  iter++){
284  iter->setCircular(Vec3(-1.0*m_xshift,0.0,0.0));
285  }
286  m_comm.shift_cont_packed(buffer,*m_nt,0,1,m_exchg_tag);
287  } else {
288  m_comm.shift_cont_packed(up_slab,*m_nt,0,1,m_exchg_tag);
289  }
290  // downstream
291  if(m_circ_edge_x_down){// circ. bdry
292  // cout << "circular shift , node " << m_comm.rank() << " x-dim, down slab size " << down_slab.size() << endl;
293  // copy particles from transfer slab into buffer
294  vector<T> buffer;
295  for(typename NTSlab<T>::iterator iter=down_slab.begin();
296  iter!=down_slab.end();
297  iter++){
298  buffer.push_back(*iter);
299  }
300  // shift particles in buffer by circular shift value
301  for(typename vector<T>::iterator iter=buffer.begin();
302  iter!=buffer.end();
303  iter++){
304  iter->setCircular(Vec3(m_xshift,0.0,0.0));
305  }
306  m_comm.shift_cont_packed(buffer,*m_nt,0,-1,m_exchg_tag);
307  } else {
308  m_comm.shift_cont_packed(down_slab,*m_nt,0,-1,m_exchg_tag);
309  }
310  }
311  // y-dir
312  if(m_comm.get_dim(1)>1){
313  // clean out boundary slabs
314  NTSlab<T> up_boundary_slab=m_nt->xz_slab(m_nt->ysize()-1);
315  up_boundary_slab.erase(up_boundary_slab.begin(),up_boundary_slab.end());
316  NTSlab<T> down_boundary_slab=m_nt->xz_slab(0);
317  down_boundary_slab.erase(down_boundary_slab.begin(),down_boundary_slab.end());
318 
319  // synchronize
320  m_comm.barrier();
321 
322  // define tranfer slabs
323  up_slab=m_nt->xz_slab(m_nt->ysize()-2);
324  down_slab=m_nt->xz_slab(1);
325  if(!circ_edge){ // normal boundary
326  // upstream
327  m_comm.shift_cont_packed(up_slab,*m_nt,1,1,m_exchg_tag);
328  // downstream
329  m_comm.shift_cont_packed(down_slab,*m_nt,1,-1,m_exchg_tag);
330  } else { // circular edge -> buffer & shift received particles
331  cout << "circular y shift not implemented" << endl;
332  }
333  }
334  // z-dir
335  if(m_comm.get_dim(2)>1){
336  // cout << "zdim= " << m_comm.get_dim(2) << " shifting" << endl;
337  // clean out boundary slabs
338  NTSlab<T> up_boundary_slab=m_nt->xy_slab(m_nt->zsize()-1);
339  up_boundary_slab.erase(up_boundary_slab.begin(),up_boundary_slab.end());
340  NTSlab<T> down_boundary_slab=m_nt->xy_slab(0);
341  down_boundary_slab.erase(down_boundary_slab.begin(),down_boundary_slab.end());
342 
343  // define tranfer slabs
344  up_slab=m_nt->xy_slab(m_nt->zsize()-2);
345  down_slab=m_nt->xy_slab(1);
346  if(!circ_edge){ // normal boundary
347  // upstream
348  m_comm.shift_cont_packed(up_slab,*m_nt,2,1,m_exchg_tag);
349  // downstream
350  m_comm.shift_cont_packed(down_slab,*m_nt,2,-1,m_exchg_tag);
351  } else { // circular edge -> buffer & shift received particles
352  cout << "circular x shift not implemented" << endl;
353  }
354  }
355  // update timestamp
356  m_timestamp++;
357  // debug
358  // cout << "node " << m_comm.rank() << "post-exchange " << *m_nt << flush;
359 }
360 
368 template<typename T>
369 template<typename P>
370 void ParallelParticleArray<T>::exchange(P (T::*rdf)(),void (T::*wrtf)(const P&))
371 {
372  // x-dir (there is at least one dimension)
373  if(m_comm.get_dim(0)>1){
374  // cout << "xdim= " << m_comm.get_dim(0) << " exchanging" << endl;
375  // do transfer
376  exchange_single(rdf,wrtf,m_nt->yz_slab(m_nt->xsize()-2),m_nt->yz_slab(0),0,1);
377  // downstream
378  exchange_single(rdf,wrtf,m_nt->yz_slab(1),m_nt->yz_slab(m_nt->xsize()-1),0,-1);
379  }
380  // y-dir
381  if(m_comm.get_dim(1)>1){
382  // cout << "ydim= " << m_comm.get_dim(1) << " shifting" << endl;
383  // upstream
384  exchange_single(rdf,wrtf,m_nt->xz_slab(m_nt->ysize()-2),m_nt->xz_slab(0),1,1);
385  // downstream
386  exchange_single(rdf,wrtf,m_nt->xz_slab(1),m_nt->xz_slab(m_nt->ysize()-1),1,-1);
387  }
388  // z-dir
389  if(m_comm.get_dim(2)>1){
390  // cout << "zdim= " << m_comm.get_dim(2) << " shifting" << endl;
391  // upstream
392  exchange_single(rdf,wrtf,m_nt->xy_slab(m_nt->zsize()-2),m_nt->xy_slab(0),2,1);
393  // downstream
394  exchange_single(rdf,wrtf,m_nt->xy_slab(1),m_nt->xy_slab(m_nt->zsize()-1),2,-1);
395  }
396 }
397 
408 template<typename T>
409 template<typename P>
410 void ParallelParticleArray<T>::exchange_single(P (T::*rdf)(),void (T::*wrtf)(const P&),
411  NTSlab<T> send_slab,NTSlab<T> recv_slab,
412  int dir,int dist)
413 {
414  vector<P> send_data;
415  vector<P> recv_data;
416 
417  // get data
418  for(typename NTSlab<T>::iterator iter=send_slab.begin();
419  iter!=send_slab.end();
420  iter++){
421  send_data.push_back(((*iter).*rdf)());
422 // cout << "put exchange values from particle " << iter->getID() << " into buffer" << endl;
423  }
424  // exchange
425  m_comm.shift_cont_packed(send_data,recv_data,dir,dist,m_exchg_tag);
426 
427  // apply received data
428  // check if sizes are correct
429  if(recv_data.size()==recv_slab.size()){
430  int count=0;
431  for(typename NTSlab<T>::iterator iter=recv_slab.begin();
432  iter!=recv_slab.end();
433  iter++){
434  ((*iter).*wrtf)(recv_data[count]);
435 // cout << "wrote exchange value to particle " << iter->getID() << endl;
436  count++;
437  }
438  }else{
439  cerr << "rank = " << m_comm.rank() << ":ParallelParticleArray<T>::exchange_single ERROR: size mismatch in received data, recv_data.size()!=recv_slab.size() - (" << recv_data.size() << "!=" << recv_slab.size() << "). dir = " << dir << ", dist = " << dist << endl;
440  }
441 }
442 
452 template<typename T>
453 void ParallelParticleArray<T>::forParticle(int id,void (T::*mf)())
454 {
455  T* pp=m_nt->ptr_by_id(id);
456  if(pp!=NULL){
457  (pp->*mf)();
458  }
459 }
460 
471 template<typename T>
472 template<typename P>
473 void ParallelParticleArray<T>::forParticle(int id,void (T::*mf)(P),const P& arg)
474 {
475  T* pp=m_nt->ptr_by_id(id);
476  if(pp!=NULL){
477  (pp->*mf)(arg);
478  }
479 }
480 
487 template<typename T>
488 void ParallelParticleArray<T>::forParticleTag(int tag,void (T::*mf)())
489 {
490  for(typename NeighborTable<T>::iterator iter=m_nt->begin();
491  iter!=m_nt->end();
492  iter++){
493  if(iter->getTag()==tag){
494  ((*iter).*mf)();
495  }
496  }
497 }
498 
506 template<typename T>
507 template<typename P>
508 void ParallelParticleArray<T>::forParticleTag(int tag,void (T::*mf)(P),const P& arg)
509 {
510  for(typename NeighborTable<T>::iterator iter=m_nt->begin();
511  iter!=m_nt->end();
512  iter++){
513  if(iter->getTag()==tag){
514  ((*iter).*mf)(arg);
515  }
516  }
517 }
518 
528 template<typename T>
529 void ParallelParticleArray<T>::forParticleTagMask(int tag,int mask, void (T::*mf)())
530 {
531  for(typename NeighborTable<T>::iterator iter=m_nt->begin();
532  iter!=m_nt->end();
533  iter++){
534  if((iter->getTag() & mask) == (tag & mask)){
535  ((*iter).*mf)();
536  }
537  }
538 }
539 
550 template<typename T>
551 template<typename P>
552 void ParallelParticleArray<T>::forParticleTagMask(int tag,int mask,void (T::*mf)(P),const P& arg)
553 {
554  for(typename NeighborTable<T>::iterator iter=m_nt->begin();
555  iter!=m_nt->end();
556  iter++){
557  if((iter->getTag() & mask) == (tag & mask)){
558  ((*iter).*mf)(arg);
559  }
560  }
561 }
562 
566 template<typename T>
568 {
569  for(typename NeighborTable<T>::iterator iter=m_nt->begin();
570  iter!=m_nt->end();
571  iter++){
572  ((*iter).*fnc)();
573  }
574 }
575 
579 template<typename T>
580 void ParallelParticleArray<T>::forAllParticles(void (T::*fnc)()const)
581 {
582  for(typename NeighborTable<T>::iterator iter=m_nt->begin();
583  iter!=m_nt->end();
584  iter++){
585  ((*iter).*fnc)();
586  }
587 }
588 
595 template<typename T>
596 template<typename P>
597 void ParallelParticleArray<T>::forAllParticles(void (T::*fnc)(P),const P& arg)
598 {
599  for(typename NeighborTable<T>::iterator iter=m_nt->begin();
600  iter!=m_nt->end();
601  iter++){
602  ((*iter).*fnc)(arg);
603  }
604 }
605 
612 template<typename T>
613 template<typename P>
614 void ParallelParticleArray<T>::forAllInnerParticles(void (T::*fnc)(P&),P& arg)
615 {
616 
617  NTBlock<T> InnerBlock=m_nt->inner();
618  for(typename NTBlock<T>::iterator iter=InnerBlock.begin();
619  iter!=InnerBlock.end();
620  iter++){
621  ((*iter).*fnc)(arg);
622  }
623 }
624 
637 template<typename T>
638 template<typename P>
639 void ParallelParticleArray<T>::forAllParticlesGet(P& cont,typename P::value_type (T::*rdf)()const)
640 {
641  for(typename NeighborTable<T>::iterator iter=m_nt->begin();
642  iter!=m_nt->end();
643  iter++){
644  cont.push_back(((*iter).*rdf)());
645  }
646 }
647 
648 template<typename T>
650  const NtBlock &ntBlock
651 )
652  : m_ntBlock(ntBlock),
653  m_it(m_ntBlock.begin())
654 {
655  m_it = m_ntBlock.begin();
656  m_numRemaining = m_ntBlock.size();
657 }
658 
659 template<typename T>
661 {
662  return (m_numRemaining > 0);
663 }
664 
665 template<typename T>
666 typename ParallelParticleArray<T>::ParticleIterator::Particle &ParallelParticleArray<T>::ParticleIterator::next()
667 {
668  Particle &p = (*m_it);
669  m_it++;
670  m_numRemaining--;
671  return p;
672 }
673 
674 template<typename T>
676 {
677  return m_numRemaining;
678 }
679 
680 template<typename T>
682 {
683  return ParticleIterator(m_nt->inner());
684 }
685 
693 template<typename T>
694 template<typename P>
695 void ParallelParticleArray<T>::forAllInnerParticlesGet(P& cont,typename P::value_type (T::*rdf)()const)
696 {
697  NTBlock<T> InnerBlock=m_nt->inner();
698  for(typename NTBlock<T>::iterator iter=InnerBlock.begin();
699  iter!=InnerBlock.end();
700  iter++){
701  cont.push_back(((*iter).*rdf)());
702  }
703 }
704 
711 template<typename T>
712 template <typename P>
713 vector<pair<int,P> > ParallelParticleArray<T>::forAllParticlesGetIndexed(P (T::*rdf)() const)
714 {
715  vector<pair<int,P> > res;
716 
717  for(typename NeighborTable<T>::iterator iter=m_nt->begin();
718  iter!=m_nt->end();
719  iter++){
720  res.push_back(make_pair(iter->getID(),((*iter).*rdf)()));
721  }
722 
723  return res;
724 }
725 
732 template<typename T>
733 template <typename P>
734 vector<pair<int,P> > ParallelParticleArray<T>::forAllInnerParticlesGetIndexed(P (T::*rdf)() const)
735 {
736  vector<pair<int,P> > res;
737 
738  NTBlock<T> InnerBlock=m_nt->inner();
739  for(typename NTBlock<T>::iterator iter=InnerBlock.begin();
740  iter!=InnerBlock.end();
741  iter++){
742  res.push_back(make_pair(iter->getID(),((*iter).*rdf)()));
743  }
744 
745  return res;
746 }
747 
763 template<typename T>
764 template<typename P>
765 void ParallelParticleArray<T>::forAllTaggedParticlesGet(P& cont,typename P::value_type (T::*rdf)()const,int tag,int mask)
766 {
767  for(typename NeighborTable<T>::iterator iter=m_nt->begin();
768  iter!=m_nt->end();
769  iter++){
770  if((iter->getTag() | mask )==(tag | mask)){
771  cont.push_back(((*iter).*rdf)());
772  }
773  }
774 }
775 
785 template<typename T>
786 template<typename P>
787 void ParallelParticleArray<T>::forAllTaggedInnerParticlesGet(P& cont,typename P::value_type (T::*rdf)()const,int tag,int mask)
788 {
789  NTBlock<T> InnerBlock=m_nt->inner();
790  for(typename NTBlock<T>::iterator iter=InnerBlock.begin();
791  iter!=InnerBlock.end();
792  iter++){
793  int ptag=iter->getTag();
794  if((ptag & mask )==(tag & mask)){
795  cont.push_back(((*iter).*rdf)());
796  }
797  }
798 }
799 
808 template<typename T>
809 template <typename P>
810 vector<pair<int,P> > ParallelParticleArray<T>::forAllTaggedParticlesGetIndexed(P (T::*rdf)() const,int tag,int mask)
811 {
812  vector<pair<int,P> > res;
813 
814  for(typename NeighborTable<T>::iterator iter=m_nt->begin();
815  iter!=m_nt->end();
816  iter++){
817  int id=iter->getID();
818  int ptag=iter->getTag();
819  if(( ptag & mask )==(tag & mask)){
820  res.push_back(make_pair(id,((*iter).*rdf)()));
821  }
822  }
823 
824  return res;
825 }
826 
836 template<typename T>
837 template <typename P>
838 vector<pair<int,P> > ParallelParticleArray<T>::forAllInnerTaggedParticlesGetIndexed(P (T::*rdf)() const,int tag,int mask)
839 {
840  vector<pair<int,P> > res;
841 
842  NTBlock<T> InnerBlock=m_nt->inner();
843  for(typename NTBlock<T>::iterator iter=InnerBlock.begin();
844  iter!=InnerBlock.end();
845  iter++){
846  int id=iter->getID();
847  int ptag=iter->getTag();
848  if((ptag & mask )==(tag & mask)){
849  res.push_back(make_pair(id,((*iter).*rdf)()));
850  }
851  }
852 
853  return res;
854 }
855 
870 template<typename T>
871 template <typename P>
872 void ParallelParticleArray<T>::forPointsGetNearest(P& cont,typename P::value_type (T::*rdf)() const,const Vec3& orig,
873  double dx,double dy,double dz,int nx,int ny,int nz)
874 {
875  console.Debug() << "forPointsGetNearest" << "\n";
876 
877  Vec3 u_x=Vec3(1.0,0.0,0.0);
878  Vec3 u_y=Vec3(0.0,1.0,0.0);
879  Vec3 u_z=Vec3(0.0,0.0,1.0);
880  for(int ix=0;ix<nx;ix++){
881  for(int iy=0;iy<ny;iy++){
882  for(int iz=0;iz<nz;iz++){
883  Vec3 p=orig+double(ix)*dx*u_x+double(iy)*dy*u_y+double(iz)*dz*u_z;
884  cont.push_back(((*(m_nt->getNearestPtr(p))).*rdf)());
885  }
886  }
887  }
888 
889  console.Debug() << "end forPointsGetNearest" << "\n";
890 }
891 
898 template<typename T>
899 set<int> ParallelParticleArray<T>::getBoundarySlabIds(int dir,int up) const
900 {
901  set<int> res;
902  NTSlab<T> slab,slab2;
903 
904  switch(dir){
905  case 0 : // x-dir
906  if(up==1){
907  slab=m_nt->yz_slab(m_nt->xsize()-1);
908  slab2=m_nt->yz_slab(m_nt->xsize()-2);
909  } else {
910  slab=m_nt->yz_slab(0);
911  slab2=m_nt->yz_slab(1);
912  }
913  break;
914  case 1 : // y-dir
915  if(up==1){
916  slab=m_nt->xz_slab(m_nt->ysize()-1);
917  slab2=m_nt->xz_slab(m_nt->ysize()-2);
918  } else {
919  slab=m_nt->xz_slab(0);
920  slab2=m_nt->xz_slab(1);
921  }
922  break;
923  case 2 : // z-dir
924  if(up==1){
925  slab=m_nt->xy_slab(m_nt->zsize()-1);
926  slab2=m_nt->xy_slab(m_nt->zsize()-2);
927  } else {
928  slab=m_nt->xy_slab(0);
929  slab2=m_nt->xy_slab(1);
930  }
931  break;
932  default:
933  cout << "Error: wrong direction " << dir << " in getBoundarySlabIds" << endl;
934  }
935 
936  for(typename NTSlab<T>::iterator iter=slab.begin();
937  iter!=slab.end();
938  iter++){
939  res.insert(iter->getID());
940  }
941  for(typename NTSlab<T>::iterator iter=slab2.begin();
942  iter!=slab2.end();
943  iter++){
944  res.insert(iter->getID());
945  }
946 
947  return res;
948 }
949 
956 template<typename T>
957 set<int> ParallelParticleArray<T>::get2ndSlabIds(int dir,int up) const
958 {
959  set<int> res;
960  NTSlab<T> slab;
961 
962  switch(dir){
963  case 0 : // x-dir
964  if(up==1){
965  slab=m_nt->yz_slab(m_nt->xsize()-2);
966  } else {
967  slab=m_nt->yz_slab(1);
968  }
969  break;
970  case 1 : // y-dir
971  if(up==1){
972  slab=m_nt->xz_slab(m_nt->ysize()-2);
973  } else {
974  slab=m_nt->xz_slab(1);
975  }
976  break;
977  case 2 : // z-dir
978  if(up==1){
979  slab=m_nt->xy_slab(m_nt->zsize()-2);
980  } else {
981  slab=m_nt->xy_slab(1);
982  }
983  break;
984  default:
985  cout << "Error: wrong direction " << dir << " in get2ndSlabIds" << endl;
986  }
987 
988  for(typename NTSlab<T>::iterator iter=slab.begin();
989  iter!=slab.end();
990  iter++){
991  res.insert(iter->getID());
992  }
993 
994  return res;
995 }
996 
1002 template<typename T>
1004 {
1005  NTBlock<T> InnerBlock=m_nt->inner();
1006  for(typename NTBlock<T>::iterator iter=InnerBlock.begin();
1007  iter!=InnerBlock.end();
1008  iter++){
1009  pv.push_back(*iter);
1010  }
1011 }
1012 
1013 
1019 template<typename T>
1021 {
1022  console.Debug() << "ParallelParticleArray<T>::saveCheckPointData\n";
1023 
1024  // output dimensions
1025  ost << m_nt->base_point() << "\n";
1026  ost << m_nt->base_idx_x() << " " << m_nt->base_idx_y() << " " << m_nt->base_idx_z() << "\n";
1027 
1028  // get nr. of particles in inner block
1029  ost << getInnerSize() << "\n";
1030 
1031  // save particles to stream
1032  NTBlock<T> InnerBlock=m_nt->inner();
1033  for(typename NTBlock<T>::iterator iter=InnerBlock.begin();
1034  iter!=InnerBlock.end();
1035  iter++){
1036  iter->saveCheckPointData(ost);
1037  ost << "\n";
1038  }
1039 }
1040 
1046 template<typename T>
1048 {
1049  console.Debug() << "ParallelParticleArray<T>::loadCheckPointData\n";
1050  int nparts;
1051 
1052  // get dimensions
1053  Vec3 bp;
1054  int bix,biy,biz;
1055  ist >> bp;
1056  ist >> bix;
1057  ist >> biy;
1058  ist >> biz;
1059  // barf if different from current values
1060  if((bp!=m_nt->base_point()) ||
1061  (bix!=m_nt->base_idx_x()) ||
1062  (biy!=m_nt->base_idx_y()) ||
1063  (biz!=m_nt->base_idx_z())){
1064  std::cerr << "local area data inconsistet: bp " << bp << " / " << m_nt->base_point()
1065  << " bix: " << bix << " / " << m_nt->base_idx_x()
1066  << " biy: " << biy << " / " << m_nt->base_idx_y()
1067  << " bix: " << biz << " / " << m_nt->base_idx_z() << std::endl;
1068  }
1069 
1070  // get nr. of particles
1071  ist >> nparts;
1072 
1073  // load particles from stream and try to insert them
1074  for(int i=0;i!=nparts;i++){
1075  T p;
1076  p.loadCheckPointData(ist);
1077  if(!isInInner(p.getPos())) std::cerr << "not in inner: " << p.getPos() << std::endl;
1078  m_nt->insert(p);
1079  }
1080 }
1081 
1082 
1089 template<typename T>
1090 ostream& operator<<(ostream& ost, const ParallelParticleArray<T> &ppa)
1091 {
1092  ost << "--ParallelParticleArray--" << endl;
1093  ost << *(ppa.m_nt) << endl;
1094  return ost;
1095 }