Scippy

UG

Ubiquity Generator framework

bbParaNodeMpi.cpp
Go to the documentation of this file.
1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2 /* */
3 /* This file is part of the program and software framework */
4 /* UG --- Ubquity Generator Framework */
5 /* */
6 /* Copyright Written by Yuji Shinano <shinano@zib.de>, */
7 /* Copyright (C) 2021 by Zuse Institute Berlin, */
8 /* licensed under LGPL version 3 or later. */
9 /* Commercial licenses are available through <licenses@zib.de> */
10 /* */
11 /* This code is free software; you can redistribute it and/or */
12 /* modify it under the terms of the GNU Lesser General Public License */
13 /* as published by the Free Software Foundation; either version 3 */
14 /* of the License, or (at your option) any later version. */
15 /* */
16 /* This program is distributed in the hope that it will be useful, */
17 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
18 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
19 /* GNU Lesser General Public License for more details. */
20 /* */
21 /* You should have received a copy of the GNU Lesser General Public License */
22 /* along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 /* */
24 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
25 
26 /**@file paraNodeMpi.cpp
27  * @brief BbParaNode extension for MIP communication.
28  * @author Yuji Shinano
29  *
30  *
31  *
32  */
33 
34 /*---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2*/
35 
36 
37 #include <mpi.h>
38 #include "bbParaNodeMpi.h"
39 
40 using namespace UG;
41 
42 MPI_Datatype
44  )
45 {
46  const int nBlocks = 15;
47 
48  MPI_Datatype datatype;
49 
50  MPI_Aint startAddress = 0;
51  MPI_Aint address = 0;
52 
53  int blockLengths[nBlocks];
54  MPI_Aint displacements[nBlocks];
55  MPI_Datatype types[nBlocks];
56 
57  for( int i = 0; i < nBlocks; i++ ){
58  blockLengths[i] = 1;
59  types[i] = MPI_INT;
60  }
61 
62  MPI_CALL(
63  MPI_Get_address( &taskId.subtaskId.lcId, &startAddress )
64  );
65  displacements[0] = 0;
66 
67  MPI_CALL(
68  MPI_Get_address( &taskId.subtaskId.globalSubtaskIdInLc, &address )
69  );
70  displacements[1] = address - startAddress;
71 
72  MPI_CALL(
73  MPI_Get_address( &taskId.subtaskId.solverId, &address )
74  );
75  displacements[2] = address - startAddress;
76 
77  MPI_CALL(
78  MPI_Get_address( &taskId.seqNum, &address )
79  );
80  displacements[3] = address - startAddress;
81 #ifdef _ALIBABA
82  types[3] = MPI_LONG;
83 #else
84  types[3] = MPI_LONG_LONG;
85 #endif
86 
87  MPI_CALL(
88  MPI_Get_address( &generatorTaskId.subtaskId.lcId, &address )
89  );
90  displacements[4] = address - startAddress;
91 
92  MPI_CALL(
93  MPI_Get_address( &generatorTaskId.subtaskId.globalSubtaskIdInLc, &address )
94  );
95  displacements[5] = address - startAddress;
96 
97  MPI_CALL(
98  MPI_Get_address( &generatorTaskId.subtaskId.solverId, &address )
99  );
100  displacements[6] = address - startAddress;
101 
102  MPI_CALL(
103  MPI_Get_address( &generatorTaskId.seqNum, &address )
104  );
105  displacements[7] = address - startAddress;
106 #ifdef _ALIBABA
107  types[7] = MPI_LONG;
108 #else
109  types[7] = MPI_LONG_LONG;
110 #endif
111 
112  MPI_CALL(
113  MPI_Get_address( &depth, &address )
114  );
115  displacements[8] = address - startAddress;
116 
117  MPI_CALL(
118  MPI_Get_address( &dualBoundValue, &address )
119  );
120  displacements[9] = address - startAddress;
121  types[9] = MPI_DOUBLE;
122 
123  MPI_CALL(
124  MPI_Get_address( &initialDualBoundValue, &address )
125  );
126  displacements[10] = address - startAddress;
127  types[10] = MPI_DOUBLE;
128 
129  MPI_CALL(
130  MPI_Get_address( &estimatedValue, &address )
131  );
132  displacements[11] = address - startAddress;
133  types[11] = MPI_DOUBLE;
134 
135  MPI_CALL(
136  MPI_Get_address( &diffSubproblemInfo, &address )
137  );
138  displacements[12] = address - startAddress;
139 
140  MPI_CALL(
141  MPI_Get_address( &basisInfo, &address )
142  );
143  displacements[13] = address - startAddress;
144 
145  MPI_CALL(
146  MPI_Get_address( &mergingStatus, &address )
147  );
148  displacements[14] = address - startAddress;
149 
150  MPI_CALL(
151  MPI_Type_create_struct(nBlocks, blockLengths, displacements, types, &datatype)
152  );
153 
154  return datatype;
155 }
156 
157 MPI_Datatype
159  )
160 {
161  const int nBlocks = 8;
162 
163  MPI_Datatype datatype;
164 
165  MPI_Aint startAddress = 0;
166  MPI_Aint address = 0;
167 
168  int blockLengths[nBlocks];
169  MPI_Aint displacements[nBlocks];
170  MPI_Datatype types[nBlocks];
171 
172  for( int i = 0; i < nBlocks; i++ ){
173  blockLengths[i] = 1;
174  types[i] = MPI_INT;
175  }
176 
177  MPI_CALL(
178  MPI_Get_address( &taskId.subtaskId.lcId, &startAddress )
179  );
180  displacements[0] = 0;
181 
182  MPI_CALL(
183  MPI_Get_address( &taskId.subtaskId.globalSubtaskIdInLc, &address )
184  );
185  displacements[1] = address - startAddress;
186 
187  MPI_CALL(
188  MPI_Get_address( &taskId.subtaskId.solverId, &address )
189  );
190  displacements[2] = address - startAddress;
191 
192  MPI_CALL(
193  MPI_Get_address( &taskId.seqNum, &address )
194  );
195  displacements[3] = address - startAddress;
196 #ifdef _ALIBABA
197  types[3] = MPI_LONG;
198 #else
199  types[3] = MPI_LONG_LONG;
200 #endif
201 
202  MPI_CALL(
203  MPI_Get_address( &generatorTaskId.subtaskId.lcId, &address )
204  );
205  displacements[4] = address - startAddress;
206 
207  MPI_CALL(
208  MPI_Get_address( &generatorTaskId.subtaskId.globalSubtaskIdInLc, &address )
209  );
210  displacements[5] = address - startAddress;
211 
212  MPI_CALL(
213  MPI_Get_address( &generatorTaskId.subtaskId.solverId, &address )
214  );
215  displacements[6] = address - startAddress;
216 
217  MPI_CALL(
218  MPI_Get_address( &generatorTaskId.seqNum, &address )
219  );
220  displacements[7] = address - startAddress;
221 #ifdef _ALIBABA
222  types[7] = MPI_LONG;
223 #else
224  types[7] = MPI_LONG_LONG;
225 #endif
226 
227  MPI_CALL(
228  MPI_Type_create_struct(nBlocks, blockLengths, displacements, types, &datatype)
229  );
230 
231  return datatype;
232 }
233 
234 
235 int
237  ParaComm *comm,
238  int root
239  )
240 {
241  DEF_PARA_COMM( commMpi, comm);
242 
243  MPI_Datatype datatype;
244  datatype = createDatatype();
245  MPI_CALL(
246  MPI_Type_commit( &datatype )
247  );
249  commMpi->ubcast(&taskId.subtaskId.lcId, 1, datatype, root)
250  );
251  MPI_CALL(
252  MPI_Type_free( &datatype )
253  );
254 
255  // root node does not have diffSubproblem
256  if( diffSubproblemInfo )
257  {
258  if( commMpi->getRank() != root )
259  {
260  diffSubproblem = commMpi->createParaDiffSubproblem();
261  }
262  diffSubproblem->bcast(commMpi, root);
263  }
264  return 0;
265 }
266 
267 int
269  ParaComm *comm,
270  int destination
271  )
272 {
273  DEF_PARA_COMM( commMpi, comm);
274 
275  MPI_Datatype datatype;
276  datatype = createDatatype();
277  MPI_CALL(
278  MPI_Type_commit( &datatype )
279  );
281  commMpi->usend(&taskId.subtaskId.lcId, 1, datatype, destination, TagTask)
282  );
283  MPI_CALL(
284  MPI_Type_free( &datatype )
285  );
286  // root node does not have diffSubproblem
287  if( diffSubproblemInfo ) diffSubproblem->send(commMpi, destination);
288  return 0;
289 }
290 
291 int
293  ParaComm *comm,
294  int destination
295  )
296 {
297  DEF_PARA_COMM( commMpi, comm);
298 
299  MPI_Datatype datatype;
300  datatype = createDatatype();
301  MPI_CALL(
302  MPI_Type_commit( &datatype )
303  );
305  commMpi->usend(&taskId.subtaskId.lcId, 1, datatype, destination, TagNewSubtreeRootNode)
306  );
307  MPI_CALL(
308  MPI_Type_free( &datatype )
309  );
310  // root node does not have diffSubproblem
311  if( diffSubproblemInfo ) diffSubproblem->send(commMpi, destination);
312  return 0;
313 }
314 
315 int
317  ParaComm *comm,
318  int destination, ///< destination rank
319  int tag
320  )
321 {
322  DEF_PARA_COMM( commMpi, comm);
323 
324  MPI_Datatype datatype;
325  datatype = createDatatypeForNodeId();
326  MPI_CALL(
327  MPI_Type_commit( &datatype )
328  );
330  commMpi->usend(&taskId.subtaskId.lcId, 1, datatype, destination, tag)
331  );
332  MPI_CALL(
333  MPI_Type_free( &datatype )
334  );
335  return 0;
336 }
337 
338 //int
339 //BbParaNodeMpi::sendReassignSelfSplitSubtreeRoot(
340 // ParaComm *comm,
341 // int destination ///< destination rank
342 // )
343 //{
344 // DEF_PARA_COMM( commMpi, comm);
345 //
346 // MPI_Datatype datatype;
347 // datatype = createDatatypeForNodeId();
348 // MPI_CALL(
349 // MPI_Type_commit( &datatype )
350 // );
351 // PARA_COMM_CALL(
352 // commMpi->usend(&taskId.subtaskId.lcId, 1, datatype, destination, TagReassignSelfSplitSubtreeRootNode)
353 // );
354 // MPI_CALL(
355 // MPI_Type_free( &datatype )
356 // );
357 // return 0;
358 //}
359 
360 int
362  DEF_PARA_COMM( commMpi, comm);
363 
364  MPI_Datatype datatype;
365  datatype = createDatatype();
366  MPI_CALL(
367  MPI_Type_commit( &datatype )
368  );
370  commMpi->ureceive(&taskId.subtaskId.lcId, 1, datatype, source, TagTask)
371  );
372  MPI_CALL(
373  MPI_Type_free( &datatype )
374  );
375 
376  if( diffSubproblemInfo )
377  {
378  diffSubproblem = commMpi->createParaDiffSubproblem();
379  diffSubproblem->receive(commMpi, source);
380  }
381 
382  return 0;
383 }
384 
385 int
387  ParaComm *comm,
388  int source
389  )
390 {
391  DEF_PARA_COMM( commMpi, comm);
392 
393  MPI_Datatype datatype;
394  datatype = createDatatype();
395  MPI_CALL(
396  MPI_Type_commit( &datatype )
397  );
399  commMpi->ureceive(&taskId.subtaskId.lcId, 1, datatype, source, TagNewSubtreeRootNode)
400  );
401  MPI_CALL(
402  MPI_Type_free( &datatype )
403  );
404 
405  if( diffSubproblemInfo )
406  {
407  diffSubproblem = commMpi->createParaDiffSubproblem();
408  diffSubproblem->receive(commMpi, source);
409  }
410 
411  return 0;
412 }
413 
414 int
416  ParaComm *comm,
417  int source,
418  int tag
419  )
420 {
421  DEF_PARA_COMM( commMpi, comm);
422 
423  MPI_Datatype datatype;
424  datatype = createDatatypeForNodeId();
425  MPI_CALL(
426  MPI_Type_commit( &datatype )
427  );
429  commMpi->ureceive(&taskId.subtaskId.lcId, 1, datatype, source, tag);
430  );
431  MPI_CALL(
432  MPI_Type_free( &datatype )
433  );
434 
435  return 0;
436 }
437 
438 
439 //int
440 //BbParaNodeMpi::receiveReassignSelfSplitSubtreeRoot(
441 // ParaComm *comm,
442 // int source
443 // )
444 //{
445 // DEF_PARA_COMM( commMpi, comm);
446 //
447 // MPI_Datatype datatype;
448 // datatype = createDatatypeForNodeId();
449 // MPI_CALL(
450 // MPI_Type_commit( &datatype )
451 // );
452 // PARA_COMM_CALL(
453 // commMpi->ureceive(&taskId.subtaskId.lcId, 1, datatype, source, TagReassignSelfSplitSubtreeRootNode);
454 // );
455 // MPI_CALL(
456 // MPI_Type_free( &datatype )
457 // );
458 //
459 // return 0;
460 //}
double estimatedValue
estimate value
Definition: paraTask.h:556
double dualBoundValue
dual bound value
Definition: bbParaNode.h:67
int basisInfo
indicate if basis information is including or not
Definition: bbParaNode.h:70
static const int TagNewSubtreeRootNode
Definition: bbParaTagDef.h:72
int bcast(ParaComm *comm, int root)
broadcast this object
static ScipParaCommTh * comm
Definition: fscip.cpp:73
int send(ParaComm *comm, int destination)
send this object
SubtaskId subtaskId
subtree id
Definition: paraTask.h:227
int solverId
Solver ID.
Definition: paraTask.h:65
int lcId
LoadCoordinator ID.
Definition: paraTask.h:63
int receiveSubtreeRootNodeId(ParaComm *comm, int source, int tag)
receive this object
int diffSubproblemInfo
1: with diffSubproblem, 0: no diffSubproblem
Definition: paraTask.h:557
double initialDualBoundValue
dual bound value when this node is created This value is updated to precise one when there is guarant...
Definition: bbParaNode.h:68
int sendNewSubtreeRoot(ParaComm *comm, int destination)
send new subtree root node
#define PARA_COMM_CALL(paracommcall)
Definition: paraComm.h:47
TaskId generatorTaskId
subtree root task id of generator
Definition: paraTask.h:550
int globalSubtaskIdInLc
Global Subtask ID in Solvers managed by LoadCoordinator.
Definition: paraTask.h:64
ParaDiffSubproblem * diffSubproblem
difference between solving instance data and subproblem data
Definition: paraTask.h:558
int mergingStatus
merging status: -1 - no merging node, 0 - checking, 1 - merged (representative) 2 - merged to the oth...
Definition: bbParaNode.h:71
#define DEF_PARA_COMM(para_comm, comm)
int receiveNewSubtreeRoot(ParaComm *comm, int source)
receive this object
MPI_Datatype createDatatypeForNodeId()
create BbParaNode datatype
virtual int send(ParaComm *comm, int dest)=0
send function for ParaDiffSubproblem object
int sendSubtreeRootNodeId(ParaComm *comm, int destination, int tag)
send subtree root to be removed
MPI_Datatype createDatatype()
create BbParaNode datatype
virtual int receive(ParaComm *comm, int source)=0
receive function for ParaDiffSubproblem object
long long seqNum
sequential number in the subtree
Definition: paraTask.h:228
static const int TagTask
Definition: paraTagDef.h:47
int receive(ParaComm *comm, int source)
receive this object
int depth
depth from the root node of original tree
Definition: bbParaNode.h:66
virtual int bcast(ParaComm *comm, int root)=0
broadcast function for ParaDiffSubproblem object
TaskId taskId
solving task information
Definition: paraTask.h:549
#define MPI_CALL(mpicall)
Definition: paraCommMpi.h:68
Base class of communicator object.
Definition: paraComm.h:101