Scippy

UG

Ubiquity Generator framework

bbParaCalculationStateMpi.cpp
Go to the documentation of this file.
1/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2/* */
3/* This file is part of the program and software framework */
4/* UG --- Ubquity Generator Framework */
5/* */
6/* Copyright Written by Yuji Shinano <shinano@zib.de>, */
7/* Copyright (C) 2021-2024 by Zuse Institute Berlin, */
8/* licensed under LGPL version 3 or later. */
9/* Commercial licenses are available through <licenses@zib.de> */
10/* */
11/* This code is free software; you can redistribute it and/or */
12/* modify it under the terms of the GNU Lesser General Public License */
13/* as published by the Free Software Foundation; either version 3 */
14/* of the License, or (at your option) any later version. */
15/* */
16/* This program is distributed in the hope that it will be useful, */
17/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
18/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
19/* GNU Lesser General Public License for more details. */
20/* */
21/* You should have received a copy of the GNU Lesser General Public License */
22/* along with this program. If not, see <http://www.gnu.org/licenses/>. */
23/* */
24/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
25
26/**@file bbParaCalculationStateMpi.cpp
27 * @brief CalcutationStte object extension for MPI communication
28 * @author Yuji Shinano
29 *
30 *
31 *
32 */
33
34/*---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2*/
35
36
38
39using namespace UG;
40
41///
42/// create MPI datatype of this object
43/// @return
44///
45MPI_Datatype
46BbParaCalculationStateMpi::createDatatype(
47 )
48{
49
50 const int nBlocks = 22;
51
52 MPI_Datatype datatype;
53
54 MPI_Aint startAddress = 0;
55 MPI_Aint address = 0;
56
57 int blockLengths[nBlocks];
58 MPI_Aint displacements[nBlocks];
59 MPI_Datatype types[nBlocks];
60
61 for( int i = 0; i < nBlocks; i++ ){
62 blockLengths[i] = 1;
63 types[i] = MPI_INT;
64 }
65
67 MPI_Get_address( &compTime, &startAddress )
68 );
69 displacements[0] = 0;
70 types[0] = MPI_DOUBLE;
71
73 MPI_Get_address( &rootTime, &address )
74 );
75 displacements[1] = address - startAddress;
76 types[1] = MPI_DOUBLE;
77
79 MPI_Get_address( &nSolved, &address )
80 );
81 displacements[2] = address - startAddress;
82
84 MPI_Get_address( &nSent, &address )
85 );
86 displacements[3] = address - startAddress;
87
89 MPI_Get_address( &nImprovedIncumbent, &address )
90 );
91 displacements[4] = address - startAddress;
92
94 MPI_Get_address( &terminationState, &address )
95 );
96 displacements[5] = address - startAddress;
97
99 MPI_Get_address( &nSolvedWithNoPreprocesses, &address )
100 );
101 displacements[6] = address - startAddress;
102
103 MPI_CALL(
104 MPI_Get_address( &nSimplexIterRoot, &address )
105 );
106 displacements[7] = address - startAddress;
107
108 MPI_CALL(
109 MPI_Get_address( &averageSimplexIter, &address )
110 );
111 displacements[8] = address - startAddress;
112 types[8] = MPI_DOUBLE;
113
114 MPI_CALL(
115 MPI_Get_address( &nTransferredLocalCuts, &address )
116 );
117 displacements[9] = address - startAddress;
118
119 MPI_CALL(
120 MPI_Get_address( &minTransferredLocalCuts, &address )
121 );
122 displacements[10] = address - startAddress;
123
124 MPI_CALL(
125 MPI_Get_address( &maxTransferredLocalCuts, &address )
126 );
127 displacements[11] = address - startAddress;
128
129 MPI_CALL(
130 MPI_Get_address( &nTransferredBendersCuts, &address )
131 );
132 displacements[12] = address - startAddress;
133
134 MPI_CALL(
135 MPI_Get_address( &minTransferredBendersCuts, &address )
136 );
137 displacements[13] = address - startAddress;
138
139 MPI_CALL(
140 MPI_Get_address( &maxTransferredBendersCuts, &address )
141 );
142 displacements[14] = address - startAddress;
143
144
145 MPI_CALL(
146 MPI_Get_address( &nRestarts, &address )
147 );
148 displacements[15] = address - startAddress;
149
150 MPI_CALL(
151 MPI_Get_address( &minIisum, &address )
152 );
153 displacements[16] = address - startAddress;
154 types[16] = MPI_DOUBLE;
155
156 MPI_CALL(
157 MPI_Get_address( &maxIisum, &address )
158 );
159 displacements[17] = address - startAddress;
160 types[17] = MPI_DOUBLE;
161
162 MPI_CALL(
163 MPI_Get_address( &minNii, &address )
164 );
165 displacements[18] = address - startAddress;
166
167 MPI_CALL(
168 MPI_Get_address( &maxNii, &address )
169 );
170 displacements[19] = address - startAddress;
171
172 MPI_CALL(
173 MPI_Get_address( &dualBound, &address )
174 );
175 displacements[20] = address - startAddress;
176 types[20] = MPI_DOUBLE;
177
178 MPI_CALL(
179 MPI_Get_address( &nSelfSplitNodesLeft, &address )
180 );
181 displacements[21] = address - startAddress;
182
183 MPI_CALL(
184 MPI_Type_create_struct(nBlocks, blockLengths, displacements, types, &datatype)
185 );
186
187 return datatype;
188
189}
190
191///
192/// send this object to destination
193///
194void
196 ParaComm *comm, /**< communicator used to send this object */
197 int destination, /**< destination rank to send */
198 int tag /**< tag to show this object */
199 )
200{
201 DEF_PARA_COMM( commMpi, comm);
202
203 MPI_Datatype datatype;
204 datatype = createDatatype();
205 MPI_CALL(
206 MPI_Type_commit( &datatype )
207 );
209 commMpi->usend(&compTime, 1, datatype, destination, tag)
210 );
211 MPI_CALL(
212 MPI_Type_free( &datatype )
213 );
214}
215
216///
217/// receive this object from source
218///
219void
221 ParaComm *comm, /**< communicator used to receive this object */
222 int source, /**< source rank to receive this object */
223 int tag /**< tag to show this object */
224 )
225{
226 DEF_PARA_COMM( commMpi, comm);
227
228 MPI_Datatype datatype;
229 datatype = createDatatype();
230 MPI_CALL(
231 MPI_Type_commit( &datatype )
232 );
234 commMpi->ureceive(&compTime, 1, datatype, source, tag)
235 );
236 MPI_CALL(
237 MPI_Type_free( &datatype )
238 );
239}
CalcutationStte object extension for MPI communication.
void send(ParaComm *comm, int destination, int tag)
send this object to destination
MPI_Datatype createDatatype()
create MPI datatype of this object
void receive(ParaComm *comm, int source, int tag)
receive this object from source
int minTransferredLocalCuts
minimum number of local cuts transferred from a ParaNode
int nTransferredBendersCuts
number of benders cuts transferred from a ParaNode
double rootTime
computation time of the root node
double dualBound
final dual bound value
int maxTransferredLocalCuts
maximum number of local cuts transferred from a ParaNode
int maxTransferredBendersCuts
maximum number of benders cuts transferred from a ParaNode
int nSelfSplitNodesLeft
number of self-split nodes left
int nSimplexIterRoot
number of simplex iteration at root node
int maxNii
maximum number of integer infeasibility
int minTransferredBendersCuts
minimum number of benders cuts transferred from a ParaNode
int nSolvedWithNoPreprocesses
number of nodes solved when it is solved with no preprocesses
int nRestarts
number of restarts
double averageSimplexIter
average number of simplex iteration except root node
double maxIisum
maximum sum of integer infeasibility
int nImprovedIncumbent
the number of improved solution generated in this ParaSolver
double minIisum
minimum sum of integer infeasibility
int minNii
minimum number of integer infeasibility
int nSent
the number of ParaNodes sent
int nTransferredLocalCuts
number of local cuts transferred from a ParaNode
int nSolved
the number of tasks solved
int terminationState
indicate whether if this computation is terminationState or not. 0: no, 1: terminationState meaning c...
double compTime
computation time of this ParaTask
Base class of communicator object.
Definition: paraComm.h:102
static ScipParaCommTh * comm
Definition: fscip.cpp:73
#define DEF_PARA_COMM(para_comm, comm)
#define MPI_CALL(mpicall)
Definition: paraCommMpi.h:68
#define PARA_COMM_CALL(paracommcall)
Definition: paraComm.h:47