Scippy

UG

Ubiquity Generator framework

bbParaSolverTerminationStateMpi.cpp
Go to the documentation of this file.
1/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2/* */
3/* This file is part of the program and software framework */
4/* UG --- Ubquity Generator Framework */
5/* */
6/* Copyright Written by Yuji Shinano <shinano@zib.de>, */
7/* Copyright (C) 2021-2024 by Zuse Institute Berlin, */
8/* licensed under LGPL version 3 or later. */
9/* Commercial licenses are available through <licenses@zib.de> */
10/* */
11/* This code is free software; you can redistribute it and/or */
12/* modify it under the terms of the GNU Lesser General Public License */
13/* as published by the Free Software Foundation; either version 3 */
14/* of the License, or (at your option) any later version. */
15/* */
16/* This program is distributed in the hope that it will be useful, */
17/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
18/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
19/* GNU Lesser General Public License for more details. */
20/* */
21/* You should have received a copy of the GNU Lesser General Public License */
22/* along with this program. If not, see <http://www.gnu.org/licenses/>. */
23/* */
24/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
25
26/**@file bbParaSolverTerminationStateMpi.cpp
27 * @brief BbParaSolverTerminationState extension for MIP communication.
28 * @author Yuji Shinano
29 *
30 *
31 *
32 */
33
34/*---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2*/
35
36
37#include "ug/paraComm.h"
39
40using namespace UG;
41
42MPI_Datatype
43BbParaSolverTerminationStateMpi::createDatatype(){
44
45 const int nBlocks = 34;
46
47 MPI_Datatype datatype;
48
49 MPI_Aint startAddress = 0;
50 MPI_Aint address = 0;
51
52 int blockLengths[nBlocks];
53 MPI_Aint displacements[nBlocks];
54 MPI_Datatype types[nBlocks];
55
56 for( int i = 0; i < nBlocks; i++ ){
57 blockLengths[i] = 1;
58 types[i] = MPI_INT;
59 }
60
62 MPI_Get_address( &interrupted, &startAddress )
63 );
64 displacements[0] = 0;
65
67 MPI_Get_address( &rank, &address )
68 );
69 displacements[1] = address - startAddress;
70
72 MPI_Get_address( &totalNSolved, &address )
73 );
74 displacements[2] = address - startAddress;
75
77 MPI_Get_address( &minNSolved, &address )
78 );
79 displacements[3] = address - startAddress;
80
82 MPI_Get_address( &maxNSolved, &address )
83 );
84 displacements[4] = address - startAddress;
85
87 MPI_Get_address( &totalNSent, &address )
88 );
89 displacements[5] = address - startAddress;
90
92 MPI_Get_address( &totalNImprovedIncumbent, &address )
93 );
94 displacements[6] = address - startAddress;
95
97 MPI_Get_address( &nParaTasksReceived, &address )
98 );
99 displacements[7] = address - startAddress;
100
101 MPI_CALL(
102 MPI_Get_address( &nParaTasksSolved, &address )
103 );
104 displacements[8] = address - startAddress;
105
106 MPI_CALL(
107 MPI_Get_address( &nParaNodesSolvedAtRoot, &address )
108 );
109 displacements[9] = address - startAddress;
110
111 MPI_CALL(
112 MPI_Get_address( &nParaNodesSolvedAtPreCheck, &address )
113 );
114 displacements[10] = address - startAddress;
115
116 MPI_CALL(
117 MPI_Get_address( &nTransferredLocalCutsFromSolver, &address )
118 );
119 displacements[11] = address - startAddress;
120 MPI_CALL(
121 MPI_Get_address( &minTransferredLocalCutsFromSolver, &address )
122 );
123 displacements[12] = address - startAddress;
124 MPI_CALL(
125 MPI_Get_address( &maxTransferredLocalCutsFromSolver, &address )
126 );
127 displacements[13] = address - startAddress;
128
129 MPI_CALL(
130 MPI_Get_address( &nTransferredBendersCutsFromSolver, &address )
131 );
132 displacements[14] = address - startAddress;
133 MPI_CALL(
134 MPI_Get_address( &minTransferredBendersCutsFromSolver, &address )
135 );
136 displacements[15] = address - startAddress;
137 MPI_CALL(
138 MPI_Get_address( &maxTransferredBendersCutsFromSolver, &address )
139 );
140 displacements[16] = address - startAddress;
141
142 MPI_CALL(
143 MPI_Get_address( &nTotalRestarts, &address )
144 );
145 displacements[17] = address - startAddress;
146 MPI_CALL(
147 MPI_Get_address( &minRestarts, &address )
148 );
149 displacements[18] = address - startAddress;
150 MPI_CALL(
151 MPI_Get_address( &maxRestarts, &address )
152 );
153 displacements[19] = address - startAddress;
154 MPI_CALL(
155 MPI_Get_address( &nTightened, &address )
156 );
157 displacements[20] = address - startAddress;
158 MPI_CALL(
159 MPI_Get_address( &nTightenedInt, &address )
160 );
161 displacements[21] = address - startAddress;
162 MPI_CALL(
163 MPI_Get_address( &calcTerminationState, &address )
164 );
165 displacements[22] = address - startAddress;
166
167 MPI_CALL(
168 MPI_Get_address( &runningTime, &address )
169 );
170 displacements[23] = address - startAddress;
171 types[23] = MPI_DOUBLE;
172
173 MPI_CALL(
174 MPI_Get_address( &idleTimeToFirstParaTask, &address )
175 );
176 displacements[24] = address - startAddress;
177 types[24] = MPI_DOUBLE;
178
179 MPI_CALL(
180 MPI_Get_address( &idleTimeBetweenParaTasks, &address )
181 );
182 displacements[25] = address - startAddress;
183 types[25] = MPI_DOUBLE;
184
185 MPI_CALL(
186 MPI_Get_address( &idleTimeAfterLastParaTask, &address )
187 );
188 displacements[26] = address - startAddress;
189 types[26] = MPI_DOUBLE;
190
191 MPI_CALL(
192 MPI_Get_address( &idleTimeToWaitNotificationId, &address )
193 );
194 displacements[27] = address - startAddress;
195 types[27] = MPI_DOUBLE;
196
197 MPI_CALL(
198 MPI_Get_address( &idleTimeToWaitAckCompletion, &address )
199 );
200 displacements[28] = address - startAddress;
201 types[28] = MPI_DOUBLE;
202
203 MPI_CALL(
204 MPI_Get_address( &idleTimeToWaitToken, &address )
205 );
206 displacements[29] = address - startAddress;
207 types[29] = MPI_DOUBLE;
208
209 MPI_CALL(
210 MPI_Get_address( &totalRootNodeTime, &address )
211 );
212 displacements[30] = address - startAddress;
213 types[30] = MPI_DOUBLE;
214
215 MPI_CALL(
216 MPI_Get_address( &minRootNodeTime, &address )
217 );
218 displacements[31] = address - startAddress;
219 types[31] = MPI_DOUBLE;
220
221 MPI_CALL(
222 MPI_Get_address( &maxRootNodeTime, &address )
223 );
224 displacements[32] = address - startAddress;
225 types[32] = MPI_DOUBLE;
226
227 MPI_CALL(
228 MPI_Get_address( &detTime, &address )
229 );
230 displacements[33] = address - startAddress;
231 types[33] = MPI_DOUBLE;
232
233 MPI_CALL(
234 MPI_Type_create_struct(nBlocks, blockLengths, displacements, types, &datatype)
235 );
236
237 return datatype;
238
239}
240
241void
243 ParaComm *comm,
244 int destination,
245 int tag
246 )
247{
248 DEF_PARA_COMM( commMpi, comm);
249
250 MPI_Datatype datatype;
251 datatype = createDatatype();
252 MPI_CALL(
253 MPI_Type_commit( &datatype )
254 );
256 commMpi->usend(&interrupted, 1, datatype, destination, tag)
257 );
258 MPI_CALL(
259 MPI_Type_free( &datatype )
260 );
261}
262
263void
265 ParaComm *comm,
266 int source,
267 int tag
268 )
269{
270 DEF_PARA_COMM( commMpi, comm);
271
272 MPI_Datatype datatype;
273 datatype = createDatatype();
274 MPI_CALL(
275 MPI_Type_commit( &datatype )
276 );
278 commMpi->ureceive(&interrupted, 1, datatype, source, tag)
279 );
280 MPI_CALL(
281 MPI_Type_free( &datatype )
282 );
283}
BbParaSolverTerminationState extension for MIP communication.
void send(ParaComm *comm, int destination, int tag)
send this object
MPI_Datatype createDatatype()
create BbParaSolverTerminationStateMpi datatype
void receive(ParaComm *comm, int source, int tag)
receive this object
int minNSolved
minimum number of subtree nodes rooted from ParaNode
int maxNSolved
maximum number of subtree nodes rooted from ParaNode
int nTransferredLocalCutsFromSolver
number of local cuts transferred from this Solver
int nTightened
number of tightened variable bounds during racing stage
int maxRestarts
maximum number of restarts
int totalNSolved
accumulated number of nodes solved in this ParaSolver
int nParaNodesSolvedAtRoot
number of ParaNodes solved at root node before sending
double maxRootNodeTime
maximum time consumed by root node processes
int totalNSent
accumulated number of nodes sent from this ParaSolver
int minRestarts
minimum number of restarts
int nTightenedInt
number of tightened integral variable bounds during racing stage
int nTransferredBendersCutsFromSolver
number of benders cuts transferred from this Solver
double minRootNodeTime
minimum time consumed by root node processes
int minTransferredLocalCutsFromSolver
minimum number of local cuts transferred from this Solver
int minTransferredBendersCutsFromSolver
minimum number of benders cuts transferred from this Solver
int nParaNodesSolvedAtPreCheck
number of ParaNodes solved at pre-checking of root node solvability
int maxTransferredBendersCutsFromSolver
maximum number of benders cuts transferred from this Solver
int maxTransferredLocalCutsFromSolver
maximum number of local cuts transferred from this Solver
double totalRootNodeTime
total time consumed by root node processes
int totalNImprovedIncumbent
accumulated number of improvements of incumbent value in this ParaSolver
int nTotalRestarts
number of total restarts
Base class of communicator object.
Definition: paraComm.h:102
double idleTimeToWaitNotificationId
idle time to wait notification Id messages
double detTime
deterministic time, -1: should be non-deterministic
double runningTime
this solver running time
int nParaTasksReceived
number of ParaTasks received in this ParaSolver
double idleTimeBetweenParaTasks
idle time between ParaTasks processing
int interrupted
indicate that this solver is interrupted or not. 0: not interrupted, 1: interrupted,...
double idleTimeToWaitAckCompletion
idle time to wait ack completion message
double idleTimeAfterLastParaTask
idle time after the last ParaTask was solved
double idleTimeToFirstParaTask
idle time to start solving the first ParaTask
static ScipParaCommTh * comm
Definition: fscip.cpp:73
#define DEF_PARA_COMM(para_comm, comm)
#define MPI_CALL(mpicall)
Definition: paraCommMpi.h:68
Base class of communicator for UG Framework.
#define PARA_COMM_CALL(paracommcall)
Definition: paraComm.h:47