Scippy

UG

Ubiquity Generator framework

bbParaSolverStateMpi.cpp
Go to the documentation of this file.
1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2 /* */
3 /* This file is part of the program and software framework */
4 /* UG --- Ubquity Generator Framework */
5 /* */
6 /* Copyright Written by Yuji Shinano <shinano@zib.de>, */
7 /* Copyright (C) 2021 by Zuse Institute Berlin, */
8 /* licensed under LGPL version 3 or later. */
9 /* Commercial licenses are available through <licenses@zib.de> */
10 /* */
11 /* This code is free software; you can redistribute it and/or */
12 /* modify it under the terms of the GNU Lesser General Public License */
13 /* as published by the Free Software Foundation; either version 3 */
14 /* of the License, or (at your option) any later version. */
15 /* */
16 /* This program is distributed in the hope that it will be useful, */
17 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
18 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
19 /* GNU Lesser General Public License for more details. */
20 /* */
21 /* You should have received a copy of the GNU Lesser General Public License */
22 /* along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 /* */
24 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
25 
26 /**@file paraSolverStateMpi.cpp
27  * @brief BbParaSolverState extension for MPI communication.
28  * @author Yuji Shinano
29  *
30  *
31  *
32  */
33 
34 /*---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2*/
35 
36 
37 #include "bbParaSolverStateMpi.h"
38 
39 using namespace UG;
40 
41 MPI_Datatype
43  )
44 {
45 
46  const int nBlocks = 10;
47 
48  MPI_Datatype datatype;
49 
50  MPI_Aint startAddress = 0;
51  MPI_Aint address = 0;
52 
53  int blockLengths[nBlocks];
54  MPI_Aint displacements[nBlocks];
55  MPI_Datatype types[nBlocks];
56 
57  for( int i = 0; i < nBlocks; i++ )
58  {
59  blockLengths[i] = 1;
60  types[i] = MPI_INT;
61  }
62 
63  MPI_CALL(
64  MPI_Get_address( &racingStage, &startAddress )
65  );
66  displacements[0] = 0;
67 
68  MPI_CALL(
69  MPI_Get_address( &notificationId, &address )
70  );
71  displacements[1] = address - startAddress;
72  types[1] = MPI_UNSIGNED;
73 
74  MPI_CALL(
75  MPI_Get_address( &lcId, &address )
76  );
77  displacements[2] = address - startAddress;
78 
79  MPI_CALL(
80  MPI_Get_address( &globalSubtreeIdInLc, &address )
81  );
82  displacements[3] = address - startAddress;
83 
84  MPI_CALL(
85  MPI_Get_address( &nNodesSolved, &address )
86  );
87  displacements[4] = address - startAddress;
88 #ifdef _ALIBABA
89  types[4] = MPI_LONG;
90 #else
91  types[4] = MPI_LONG_LONG;
92 #endif
93 
94  MPI_CALL(
95  MPI_Get_address( &nNodesLeft, &address )
96  );
97  displacements[5] = address - startAddress;
98 
99  MPI_CALL(
100  MPI_Get_address( &bestDualBoundValue, &address )
101  );
102  displacements[6] = address - startAddress;
103  types[6] = MPI_DOUBLE;
104 
105  MPI_CALL(
106  MPI_Get_address( &globalBestPrimalBoundValue, &address )
107  );
108  displacements[7] = address - startAddress;
109  types[7] = MPI_DOUBLE;
110 
111  MPI_CALL(
112  MPI_Get_address( &detTime, &address )
113  );
114  displacements[8] = address - startAddress;
115  types[8] = MPI_DOUBLE;
116 
117  MPI_CALL(
118  MPI_Get_address( &averageDualBoundGain, &address )
119  );
120  displacements[9] = address - startAddress;
121  types[9] = MPI_DOUBLE;
122 
123  MPI_CALL(
124  MPI_Type_create_struct(nBlocks, blockLengths, displacements, types, &datatype)
125  );
126 
127  return datatype;
128 }
129 
130 void
132  ParaComm *comm,
133  int destination,
134  int tag
135  )
136 {
137  assert(nNodesLeft >= 0);
138  assert(bestDualBoundValue >= -1e+10);
139  DEF_PARA_COMM( commMpi, comm);
140 
141  MPI_Datatype datatype;
142  datatype = createDatatype();
143  MPI_CALL(
144  MPI_Type_commit( &datatype )
145  );
147  commMpi->usend(&racingStage, 1, datatype, destination, tag)
148  );
149  MPI_CALL(
150  MPI_Type_free( &datatype )
151  );
152 }
153 
154 void
156  ParaComm *comm,
157  int source,
158  int tag
159  )
160 {
161  DEF_PARA_COMM( commMpi, comm);
162 
163  MPI_Datatype datatype;
164  datatype = createDatatype();
165  MPI_CALL(
166  MPI_Type_commit( &datatype )
167  );
169  commMpi->ureceive(&racingStage, 1, datatype, source, tag)
170  );
171  MPI_CALL(
172  MPI_Type_free( &datatype )
173  );
174 }
void receive(ParaComm *comm, int source, int tag)
receive this object
void send(ParaComm *comm, int destination, int tag)
send this object
static ScipParaCommTh * comm
Definition: fscip.cpp:73
int globalSubtreeIdInLc
global subtree id of current ParaTask
#define PARA_COMM_CALL(paracommcall)
Definition: paraComm.h:47
#define DEF_PARA_COMM(para_comm, comm)
double detTime
deterministic time, -1: should be non-deterministic
double averageDualBoundGain
average dual bound gain received
int nNodesLeft
number of remaining nodes
MPI_Datatype createDatatype()
create BbParaSolverStateMpi datatype
int lcId
lc id of current ParaTask
double globalBestPrimalBoundValue
global best primal bound value
long long nNodesSolved
number of nodes solved
int racingStage
if this value is 1, solver is in racing stage
#define MPI_CALL(mpicall)
Definition: paraCommMpi.h:68
Base class of communicator object.
Definition: paraComm.h:101
unsigned int notificationId
id for this notification
double bestDualBoundValue
best dual bound value in that of remaining nodes