Scippy

UG

Ubiquity Generator framework

scipParaInitialStatMpi.cpp
Go to the documentation of this file.
1/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2/* */
3/* This file is part of the program and software framework */
4/* UG --- Ubquity Generator Framework */
5/* */
6/* Copyright Written by Yuji Shinano <shinano@zib.de>, */
7/* Copyright (C) 2021-2024 by Zuse Institute Berlin, */
8/* licensed under LGPL version 3 or later. */
9/* Commercial licenses are available through <licenses@zib.de> */
10/* */
11/* This code is free software; you can redistribute it and/or */
12/* modify it under the terms of the GNU Lesser General Public License */
13/* as published by the Free Software Foundation; either version 3 */
14/* of the License, or (at your option) any later version. */
15/* */
16/* This program is distributed in the hope that it will be useful, */
17/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
18/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
19/* GNU Lesser General Public License for more details. */
20/* */
21/* You should have received a copy of the GNU Lesser General Public License */
22/* along with this program. If not, see <http://www.gnu.org/licenses/>. */
23/* */
24/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
25
26/**@file scipParaInitialStatMpi.cpp
27 * @brief ScipParaInitialStat extension for MPI communication.
28 * @author Yuji Shinano
29 *
30 *
31 *
32 */
33
34/*---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2*/
35
36
38
39using namespace UG;
40using namespace ParaSCIP;
41
42/** create ScipDiffSubproblemPreDatatype */
43MPI_Datatype
44ScipParaInitialStatMpi::createDatatype1(
45 )
46{
47 const int nBlocks = 4;
48
49 MPI_Datatype datatype;
50
51 MPI_Aint startAddress = 0;
52 MPI_Aint address = 0;
53
54 int blockLengths[nBlocks];
55 MPI_Aint displacements[nBlocks];
56 MPI_Datatype types[nBlocks];
57
59 MPI_Get_address( &maxDepth, &startAddress )
60 );
61 blockLengths[0] = 1;
62 displacements[0] = 0;
63 types[0] = MPI_INT;
64
66 MPI_Get_address( &maxTotalDepth, &address )
67 );
68 blockLengths[1] = 1;
69 displacements[1] = address - startAddress;
70 types[1] = MPI_INT;
71
73 MPI_Get_address( &nVarBranchStatsDown, &address )
74 );
75 blockLengths[2] = 1;
76 displacements[2] = address - startAddress;
77 types[2] = MPI_INT;
78
80 MPI_Get_address( &nVarBranchStatsUp, &address )
81 );
82 blockLengths[3] = 1;
83 displacements[3] = address - startAddress;
84 types[3] = MPI_INT;
85
87 MPI_Type_create_struct(nBlocks, blockLengths, displacements, types, &datatype)
88 );
89
90 return datatype;
91}
92
93/** create ScipDiffSubproblemDatatype */
94MPI_Datatype
96 bool memAllocNecessary
97 )
98{
99 assert( nVarBranchStatsDown != 0 || nVarBranchStatsUp != 0 );
100
101 int nBlocks = 0;
102
103 MPI_Datatype datatype;
104
105 MPI_Aint startAddress = 0;
106 MPI_Aint address = 0;
107
108 int blockLengths[17]; // reserve maximum number of elements
109 MPI_Aint displacements[17]; // reserve maximum number of elements
110 MPI_Datatype types[17]; // reserve maximum number of elements
111
113 {
114 if( memAllocNecessary )
115 {
118 downpscost = new SCIP_Real[nVarBranchStatsDown];
119 downvsids = new SCIP_Real[nVarBranchStatsDown];
120 downconflen = new SCIP_Real[nVarBranchStatsDown];
121 downinfer = new SCIP_Real[nVarBranchStatsDown];
122 downcutoff = new SCIP_Real[nVarBranchStatsDown];
123 }
124
125 MPI_CALL(
126 MPI_Get_address( idxLBranchStatsVarsDown, &startAddress )
127 );
128 displacements[nBlocks] = 0;
129 blockLengths[nBlocks] = nVarBranchStatsDown;
130 types[nBlocks] = MPI_INT;
131 nBlocks++;
132
133 MPI_CALL(
134 MPI_Get_address( nVarBranchingDown, &address )
135 );
136 displacements[nBlocks] = address - startAddress;
137 blockLengths[nBlocks] = nVarBranchStatsDown;
138 types[nBlocks] = MPI_INT;
139 nBlocks++;
140
141 MPI_CALL(
142 MPI_Get_address( downpscost, &address )
143 );
144 displacements[nBlocks] = address - startAddress;
145 blockLengths[nBlocks] = nVarBranchStatsDown;
146 types[nBlocks] = MPI_DOUBLE;
147 nBlocks++;
148
149 MPI_CALL(
150 MPI_Get_address( downvsids, &address )
151 );
152 displacements[nBlocks] = address - startAddress;
153 blockLengths[nBlocks] = nVarBranchStatsDown;
154 types[nBlocks] = MPI_DOUBLE;
155 nBlocks++;
156
157 MPI_CALL(
158 MPI_Get_address( downconflen, &address )
159 );
160 displacements[nBlocks] = address - startAddress;
161 blockLengths[nBlocks] = nVarBranchStatsDown;
162 types[nBlocks] = MPI_DOUBLE;
163 nBlocks++;
164
165 MPI_CALL(
166 MPI_Get_address( downinfer, &address )
167 );
168 displacements[nBlocks] = address - startAddress;
169 blockLengths[nBlocks] = nVarBranchStatsDown;
170 types[nBlocks] = MPI_DOUBLE;
171 nBlocks++;
172
173 MPI_CALL(
174 MPI_Get_address( downcutoff, &address )
175 );
176 displacements[nBlocks] = address - startAddress;
177 blockLengths[nBlocks] = nVarBranchStatsDown;
178 types[nBlocks] = MPI_DOUBLE;
179 nBlocks++;
180 }
181
183 {
184 if( memAllocNecessary )
185 {
188 uppscost = new SCIP_Real[nVarBranchStatsUp];
189 upvsids = new SCIP_Real[nVarBranchStatsUp];
190 upconflen = new SCIP_Real[nVarBranchStatsUp];
191 upinfer = new SCIP_Real[nVarBranchStatsUp];
192 upcutoff = new SCIP_Real[nVarBranchStatsUp];
193 }
194
195 if( nBlocks == 0 )
196 {
197 MPI_CALL(
198 MPI_Get_address( idxLBranchStatsVarsUp, &startAddress )
199 );
200 displacements[nBlocks] = 0;
201 }
202 else
203 {
204 MPI_CALL(
205 MPI_Get_address( idxLBranchStatsVarsUp, &address )
206 );
207 displacements[nBlocks] = address - startAddress;
208 }
209 blockLengths[nBlocks] = nVarBranchStatsUp;
210 types[nBlocks] = MPI_INT;
211 nBlocks++;
212
213 MPI_CALL(
214 MPI_Get_address( nVarBranchingUp, &address )
215 );
216 displacements[nBlocks] = address - startAddress;;
217 blockLengths[nBlocks] = nVarBranchStatsUp;
218 types[nBlocks] = MPI_INT;
219 nBlocks++;
220
221 MPI_CALL(
222 MPI_Get_address( uppscost, &address )
223 );
224 displacements[nBlocks] = address - startAddress;;
225 blockLengths[nBlocks] = nVarBranchStatsUp;
226 types[nBlocks] = MPI_DOUBLE;
227 nBlocks++;
228
229 MPI_CALL(
230 MPI_Get_address( upvsids, &address )
231 );
232 displacements[nBlocks] = address - startAddress;;
233 blockLengths[nBlocks] = nVarBranchStatsUp;
234 types[nBlocks] = MPI_DOUBLE;
235 nBlocks++;
236
237 MPI_CALL(
238 MPI_Get_address( upconflen, &address )
239 );
240 displacements[nBlocks] = address - startAddress;;
241 blockLengths[nBlocks] = nVarBranchStatsUp;
242 types[nBlocks] = MPI_DOUBLE;
243 nBlocks++;
244
245 MPI_CALL(
246 MPI_Get_address( upinfer, &address )
247 );
248 displacements[nBlocks] = address - startAddress;;
249 blockLengths[nBlocks] = nVarBranchStatsUp;
250 types[nBlocks] = MPI_DOUBLE;
251 nBlocks++;
252
253 MPI_CALL(
254 MPI_Get_address( upcutoff, &address )
255 );
256 displacements[nBlocks] = address - startAddress;;
257 blockLengths[nBlocks] = nVarBranchStatsUp;
258 types[nBlocks] = MPI_DOUBLE;
259 nBlocks++;
260 }
261
262 MPI_CALL(
263 MPI_Type_create_struct(nBlocks, blockLengths, displacements, types, &datatype)
264 );
265
266 return datatype;
267}
268
269/** send solution data to the rank */
270void
272{
273 DEF_PARA_COMM( commMpi, comm);
274 MPI_Datatype datatype1;
275 datatype1 = createDatatype1();
276 MPI_CALL(
277 MPI_Type_commit( &datatype1 )
278 );
280 commMpi->usend(&maxDepth, 1, datatype1, destination, TagInitialStat)
281 );
282 MPI_CALL(
283 MPI_Type_free( &datatype1 )
284 );
285
286 if( nVarBranchStatsDown !=0 || nVarBranchStatsUp != 0 )
287 {
288 MPI_Datatype datatype2;
289 datatype2 = createDatatype2(false);
290 MPI_CALL(
291 MPI_Type_commit( &datatype2 )
292 );
294 {
296 commMpi->usend(idxLBranchStatsVarsDown, 1, datatype2, destination, TagInitialStat)
297 );
298 }
299 else
300 {
302 {
304 commMpi->usend(idxLBranchStatsVarsUp, 1, datatype2, destination, TagInitialStat)
305 );
306 }
307 }
308 MPI_CALL(
309 MPI_Type_free( &datatype2 )
310 );
311 }
312}
313
314/** receive solution data from the source rank */
315void
317{
318 DEF_PARA_COMM( commMpi, comm);
319 MPI_Datatype datatype1;
320 datatype1 = createDatatype1();
321 MPI_CALL(
322 MPI_Type_commit( &datatype1 )
323 );
325 commMpi->ureceive(&maxDepth, 1, datatype1, source, TagInitialStat)
326 );
327 MPI_CALL(
328 MPI_Type_free( &datatype1 )
329 );
330
331 if( nVarBranchStatsDown !=0 || nVarBranchStatsUp != 0 )
332 {
333 MPI_Datatype datatype2;
334 datatype2 = createDatatype2(true);
335 MPI_CALL(
336 MPI_Type_commit( &datatype2 )
337 );
339 {
341 commMpi->ureceive(idxLBranchStatsVarsDown, 1, datatype2, source, TagInitialStat)
342 );
343 }
344 else
345 {
347 {
349 commMpi->ureceive(idxLBranchStatsVarsUp, 1, datatype2, source, TagInitialStat)
350 );
351 }
352 }
353 MPI_CALL(
354 MPI_Type_free( &datatype2 )
355 );
356 }
357}
void send(UG::ParaComm *comm, int dest)
void receive(UG::ParaComm *comm, int source)
MPI_Datatype createDatatype2(bool memAllocNecessary)
Base class of communicator object.
Definition: paraComm.h:102
static ScipParaCommTh * comm
Definition: fscip.cpp:73
static const int TagInitialStat
#define DEF_PARA_COMM(para_comm, comm)
#define MPI_CALL(mpicall)
Definition: paraCommMpi.h:68
#define PARA_COMM_CALL(paracommcall)
Definition: paraComm.h:47
ScipParaInitialStat extension for MPI communication.