Peano
main.cpp
Go to the documentation of this file.
1 #include "MyObserver.h"
2 
3 #include "tarch/logging/Log.h"
4 #include "tarch/logging/CommandLineLogger.h"
5 #include "tarch/logging/ChromeTraceFileLogger.h"
6 #include "tarch/logging/LogFilter.h"
7 #include "tarch/multicore/Core.h"
8 #include "tarch/mpi/Rank.h"
9 
10 #include "peano4/peano4.h"
11 #include "peano4/grid/Spacetree.h"
12 #include "peano4/parallel/SpacetreeSet.h"
13 #include "peano4/parallel/Node.h"
14 
15 
16 tarch::logging::Log _log("examples::regulargridupscaling");
17 
18 
19 #include "peano4/UnitTests.h"
20 #include "tarch/UnitTests.h"
21 
22 
23 void runTests() {
24  int unitTestsErrors = 0;
25  tarch::tests::TestCase* tests = nullptr;
26 
27  tests = tarch::getUnitTests();
28  tests->run();
29  unitTestsErrors += tests->getNumberOfErrors();
30  delete tests;
31 
32  tests = peano4::getUnitTests();
33  tests->run();
34  unitTestsErrors += tests->getNumberOfErrors();
35  delete tests;
36 
37  if (unitTestsErrors != 0) {
38  logError("main()", "unit tests failed. Quit.");
39  exit(-2);
40  }
41 
42 }
43 
44 
45 
46 
47 void runParallel(double h, int flopsPerCell) {
48  logTraceInWith1Argument( "runParallel", h );
49  peano4::parallel::SpacetreeSet::getInstance().init(
50  #if DIMENSIONS==2
51  {0.0, 0.0},
52  {1.0, 1.0},
53  #else
54  {0.0, 0.0, 0.0},
55  {1.0, 1.0, 1.0},
56  #endif
57  0
58  );
59 
61 
62  int numberOfThreads = tarch::multicore::Core::getInstance().getNumberOfThreads();
63 
64  if (tarch::mpi::Rank::getInstance().isGlobalMaster() ) {
65  logInfo( "runParallel(...)", "create initial grid (step #1)" );
66  int numberOfGridConstructionSteps = 0;
67  while (
68  peano4::parallel::SpacetreeSet::getInstance().getGridStatistics().getNumberOfLocalUnrefinedCells() <
69  tarch::mpi::Rank::getInstance().getNumberOfRanks() * THREE_POWER_D
70  and
71  numberOfGridConstructionSteps<20
72  ) {
73  peano4::parallel::Node::getInstance().setNextProgramStep(1);
74  peano4::parallel::SpacetreeSet::getInstance().traverse( emptyObserver );
75  numberOfGridConstructionSteps++;
76  }
77  logInfo( "runParallel(...)", "grid statistics = " << peano4::parallel::SpacetreeSet::getInstance().getGridStatistics().toString() );
78 
79  if (
80  peano4::parallel::SpacetreeSet::getInstance().getGridStatistics().getNumberOfLocalUnrefinedCells() <
81  tarch::mpi::Rank::getInstance().getNumberOfRanks()
82  ) {
83  logError( "runParallel(...)", "not enough cells to keep " << tarch::mpi::Rank::getInstance().getNumberOfRanks() << " busy" );
84  exit(-1);
85  }
86 
87  int numberOfCellsPerRank = peano4::parallel::SpacetreeSet::getInstance().getGridStatistics().getNumberOfLocalUnrefinedCells() / tarch::mpi::Rank::getInstance().getNumberOfRanks();
88  logInfo( "runParallel(...)", "trigger split of initial grid among ranks with " << numberOfCellsPerRank << " cells per rank" );
89  for (int rank=1; rank<tarch::mpi::Rank::getInstance().getNumberOfRanks(); rank++) {
90  if ( not peano4::parallel::SpacetreeSet::getInstance().split(0,numberOfCellsPerRank,rank)) {
91  logWarning( "runParallel(...)", "failed to assign rank " << rank << " " << numberOfCellsPerRank << " cell(s)" );
92  }
93  }
94 
95  const int MaxNumberOfConstructionSteps = static_cast<int>(std::round( std::log(1.0 / h)/std::log(3.0)+1 )) - 1;
96  assertion1(MaxNumberOfConstructionSteps>=0, MaxNumberOfConstructionSteps);
97  logInfo( "runParallel(...)", "commit split and give ranks " << MaxNumberOfConstructionSteps << " iterations to 'recover' (step #2)" );
98  for (int i=0; i<MaxNumberOfConstructionSteps; i++) {
99  peano4::parallel::Node::getInstance().setNextProgramStep(2);
100  peano4::parallel::SpacetreeSet::getInstance().traverse( emptyObserver );
101  }
102 
103  int numberOfCellsPerThread = peano4::parallel::SpacetreeSet::getInstance().getGridStatistics().getNumberOfLocalUnrefinedCells() / numberOfThreads;
104  logInfo( "runParallel(...)", "trigger split of master rank into threads with " << numberOfCellsPerThread << " cells per thread (total: " << peano4::parallel::SpacetreeSet::getInstance().getGridStatistics().getNumberOfLocalUnrefinedCells() << ")");
105  for (int thread=1; thread<numberOfThreads; thread++) {
106  if ( not peano4::parallel::SpacetreeSet::getInstance().split(0,numberOfCellsPerThread,0)) {
107  logWarning( "runParallel(...)", "failed to assign thread " << thread << " " << numberOfCellsPerThread << " cell(s)" );
108  }
109  }
110 
111  logInfo( "runParallel(...)", "run one step committing split and telling other ranks to split as well (step #3)" );
112  peano4::parallel::Node::getInstance().setNextProgramStep(3);
113  peano4::parallel::SpacetreeSet::getInstance().traverse( emptyObserver );
114 
115  logInfo( "runParallel(...)", "commit splits into threads and give ranks time to 'recover' (step #4)" );
116  for (int i=0; i<3; i++) {
117  peano4::parallel::Node::getInstance().setNextProgramStep(4);
118  peano4::parallel::SpacetreeSet::getInstance().traverse( emptyObserver );
119  }
120 
121  logInfo( "runParallel(...)", "start parallel traversals (step #5)" );
122  logInfo( "runParallel(...)", "grid statistics = " << peano4::parallel::SpacetreeSet::getInstance().getGridStatistics().toString() );
123 
124  #if PEANO_DEBUG>=2
125  const int Iterations=2;
126  #else
127  const int Iterations=20;
128  #endif
129 
130  for (int i=0; i<Iterations; i++) {
131  peano4::parallel::Node::getInstance().setNextProgramStep(5);
132  peano4::parallel::SpacetreeSet::getInstance().traverse( emptyObserver );
133  }
134  logInfo( "runParallel(...)", "terminated successfully" );
135  }
136  else { // not the global master
137  while (peano4::parallel::Node::getInstance().continueToRun()) {
138  logDebug( "runParallel(...)", "trigger a new sweep with step " << peano4::parallel::Node::getInstance().getCurrentProgramStep() );
139  if (
140  peano4::parallel::Node::getInstance().getCurrentProgramStep()==2
141  or
142  peano4::parallel::Node::getInstance().getCurrentProgramStep()==4
143  or
144  peano4::parallel::Node::getInstance().getCurrentProgramStep()==5
145  ) {
146  peano4::parallel::SpacetreeSet::getInstance().traverse(emptyObserver);
147  }
148  else if (peano4::parallel::Node::getInstance().getCurrentProgramStep()==3) {
149  assertionEquals( peano4::parallel::SpacetreeSet::getInstance().getLocalSpacetrees().size(), 1);
150  const int localTree = *(peano4::parallel::SpacetreeSet::getInstance().getLocalSpacetrees().begin());
151  int numberOfCellsPerThread = peano4::parallel::SpacetreeSet::getInstance().getGridStatistics().getNumberOfLocalUnrefinedCells() / tarch::multicore::Core::getInstance().getNumberOfThreads();
152  logInfo( "runParallel(...)", "local unrefined cells = " << peano4::parallel::SpacetreeSet::getInstance().getGridStatistics().getNumberOfLocalUnrefinedCells());
153  logInfo( "runParallel(...)", "trigger split of master rank into threads with " << numberOfCellsPerThread << " cells per thread");
154  for (int thread=1; thread<numberOfThreads; thread++) {
155  if ( not peano4::parallel::SpacetreeSet::getInstance().split(localTree,numberOfCellsPerThread,tarch::mpi::Rank::getInstance().getRank())) {
156  logWarning( "runParallel(...)", "failed to assign thread " << thread << " " << numberOfCellsPerThread << " cell(s)" );
157  }
158  }
159  peano4::parallel::SpacetreeSet::getInstance().traverse(emptyObserver);
160  }
161  }
162  }
163  logTraceOut( "runParallel" );
164 }
165 
166 
167 int main(int argc, char** argv) {
168  const int ExitCodeSuccess = 0;
169  const int ExitCodeUnitTestsFailed = 1;
170 
171  peano4::initParallelEnvironment(&argc,&argv);
172  peano4::fillLookupTables();
173 
174  tarch::logging::LogFilter::getInstance().addFilterListEntry( tarch::logging::LogFilter::FilterListEntry(
175  tarch::logging::LogFilter::FilterListEntry::TargetDebug, tarch::logging::LogFilter::FilterListEntry::AnyRank, "peano4", false
176  ));
177  tarch::logging::LogFilter::getInstance().addFilterListEntry( tarch::logging::LogFilter::FilterListEntry(
178  tarch::logging::LogFilter::FilterListEntry::TargetInfo, tarch::logging::LogFilter::FilterListEntry::AnyRank, "peano4", false
179  ));
180  tarch::logging::LogFilter::getInstance().addFilterListEntry( tarch::logging::LogFilter::FilterListEntry(
181  tarch::logging::LogFilter::FilterListEntry::TargetTrace, tarch::logging::LogFilter::FilterListEntry::AnyRank, "peano4", false
182  ));
183  tarch::logging::LogFilter::getInstance().addFilterListEntry( tarch::logging::LogFilter::FilterListEntry(
184  tarch::logging::LogFilter::FilterListEntry::TargetDebug, tarch::logging::LogFilter::FilterListEntry::AnyRank, "tarch", true
185  ));
186  tarch::logging::LogFilter::getInstance().addFilterListEntry( tarch::logging::LogFilter::FilterListEntry(
187  tarch::logging::LogFilter::FilterListEntry::TargetInfo, tarch::logging::LogFilter::FilterListEntry::AnyRank, "tarch", false
188  ));
189  tarch::logging::LogFilter::getInstance().addFilterListEntry( tarch::logging::LogFilter::FilterListEntry(
190  tarch::logging::LogFilter::FilterListEntry::TargetTrace, tarch::logging::LogFilter::FilterListEntry::AnyRank, "tarch", true
191  ));
192 
193  //tarch::logging::CommandLineLogger::getInstance().setOutputFile( "trace.txt" );
194  tarch::logging::ChromeTraceFileLogger::getInstance().setOutputFile( "p4.tracing" );
195 
196  runTests();
197 
198  if (argc<3 or argc>6) {
199  logError( "main(...)", "Usage: ./executable mesh-width flops-per-cell [core-count] [spawn-frequency] [integration-points]");
200  return 1;
201  }
202 
203  double meshWidth = std::atof( argv[1] );
204  int flopsPerCell = std::atoi( argv[2] );
205  if (meshWidth<=0) {
206  logError( "main(...)", "Usage: ./executable mesh-width");
207  logError( "main(...)", " mesh-width has to be a positive value");
208  return 2;
209  }
210  if (meshWidth>=1.0) {
211  logError( "main(...)", "Usage: ./executable mesh-width");
212  logError( "main(...)", " mesh-width has to be smaller than one");
213  return 2;
214  }
215 
216  if (argc>=4) {
217  int cores = std::atoi( argv[3] );
218  tarch::multicore::Core::getInstance().configure(cores);
219  }
220 
221  if (argc>=5) {
223  }
224  if (argc>=6) {
226  }
227 
228  const int numberOfRanks = tarch::mpi::Rank::getInstance().getNumberOfRanks();
229  const int numberOfCores = tarch::multicore::Core::getInstance().getNumberOfThreads();
230  logInfo( "main(...)", "run on " << numberOfRanks << " ranks with " << numberOfCores << " thread(s) each" );
231 
232  runParallel(meshWidth,flopsPerCell);
233 
234  peano4::shutdownParallelEnvironment();
235 
236  return 0;
237 }
static constexpr int RanksObserverTemplate
Definition: MyObserver.h:34
int main(int argc, char **argv)
Definition: main.cpp:247
void runTests()
Definition: main.cpp:25
void runParallel()
Definition: main.cpp:194
tarch::logging::Log _log("examples::grid")
size
Definition: euler.py:44
h
Definition: swe.py:79