Scippy

UG

Ubiquity Generator framework

scipParaInstanceMpi.cpp
Go to the documentation of this file.
1/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2/* */
3/* This file is part of the program and software framework */
4/* UG --- Ubquity Generator Framework */
5/* */
6/* Copyright Written by Yuji Shinano <shinano@zib.de>, */
7/* Copyright (C) 2021-2024 by Zuse Institute Berlin, */
8/* licensed under LGPL version 3 or later. */
9/* Commercial licenses are available through <licenses@zib.de> */
10/* */
11/* This code is free software; you can redistribute it and/or */
12/* modify it under the terms of the GNU Lesser General Public License */
13/* as published by the Free Software Foundation; either version 3 */
14/* of the License, or (at your option) any later version. */
15/* */
16/* This program is distributed in the hope that it will be useful, */
17/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
18/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
19/* GNU Lesser General Public License for more details. */
20/* */
21/* You should have received a copy of the GNU Lesser General Public License */
22/* along with this program. If not, see <http://www.gnu.org/licenses/>. */
23/* */
24/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
25
26/**@file scipParaInstanceMpi.cpp
27 * @brief ScipParaInstance extension for MPI communication.
28 * @author Yuji Shinano
29 *
30 *
31 *
32 */
33
34/*---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2*/
35
36#include <cstring>
37#include "scipParaCommMpi.h"
38#include "scipParaInstanceMpi.h"
39
40using namespace UG;
41using namespace ParaSCIP;
42
43/** create ScipInstancePrePreDatatype */
44MPI_Datatype
45ScipParaInstanceMpi::createDatatype1(
46 )
47{
48 const int nBlocks = 21;
49 MPI_Datatype prePreDatatype;
50
51 MPI_Aint startAddress = 0;
52 MPI_Aint address = 0;
53
54 int blockLengths[nBlocks];
55 MPI_Aint displacements[nBlocks];
56 MPI_Datatype types[nBlocks];
57
58 for( int i = 0; i < nBlocks; i++ ){
59 blockLengths[i] = 1;
60 types[i] = MPI_INT;
61 }
62
64 MPI_Get_address( &lProbName, &startAddress )
65 );
66 displacements[0] = 0;
67
69 MPI_Get_address( &nCopies, &address )
70 );
71 displacements[1] = address - startAddress;
72
74 MPI_Get_address( &origObjSense, &address )
75 );
76 displacements[2] = address - startAddress;
77
79 MPI_Get_address( &objScale, &address )
80 );
81 displacements[3] = address - startAddress;
82 types[3] = MPI_DOUBLE;
83
85 MPI_Get_address( &objOffset, &address )
86 );
87 displacements[4] = address - startAddress;
88 types[4] = MPI_DOUBLE;
89
91 MPI_Get_address( &nVars, &address )
92 );
93 displacements[5] = address - startAddress;
94
96 MPI_Get_address( &varIndexRange, &address )
97 );
98 displacements[6] = address - startAddress;
99
100 MPI_CALL(
101 MPI_Get_address( &lVarNames, &address )
102 );
103 displacements[7] = address - startAddress;
104
105 MPI_CALL(
106 MPI_Get_address( &nConss, &address )
107 );
108 displacements[8] = address - startAddress;
109
110 MPI_CALL(
111 MPI_Get_address( &lConsNames, &address )
112 );
113 displacements[9] = address - startAddress;
114
115 MPI_CALL(
116 MPI_Get_address( &nLinearConss, &address )
117 );
118 displacements[10] = address - startAddress;
119
120 MPI_CALL(
121 MPI_Get_address( &nSetppcConss, &address )
122 );
123 displacements[11] = address - startAddress;
124
125 MPI_CALL(
126 MPI_Get_address( &nLogicorConss, &address )
127 );
128 displacements[12] = address - startAddress;
129
130 MPI_CALL(
131 MPI_Get_address( &nKnapsackConss, &address )
132 );
133 displacements[13] = address - startAddress;
134
135 MPI_CALL(
136 MPI_Get_address( &nVarboundConss, &address )
137 );
138 displacements[14] = address - startAddress;
139
140 MPI_CALL(
141 MPI_Get_address( &nVarBoundDisjunctionConss, &address )
142 );
143 displacements[15] = address - startAddress;
144
145 MPI_CALL(
146 MPI_Get_address( &nSos1Conss, &address )
147 );
148 displacements[16] = address - startAddress;
149
150 MPI_CALL(
151 MPI_Get_address( &nSos2Conss, &address )
152 );
153 displacements[17] = address - startAddress;
154
155 MPI_CALL(
156 MPI_Get_address( &nAggregatedConss, &address )
157 );
158 displacements[18] = address - startAddress;
159
160 MPI_CALL(
161 MPI_Get_address( &lAggregatedVarNames, &address )
162 );
163 displacements[19] = address - startAddress;
164
165 MPI_CALL(
166 MPI_Get_address( &lAggregatedConsNames, &address )
167 );
168 displacements[20] = address - startAddress;
169
170 MPI_CALL(
171 MPI_Type_create_struct(nBlocks, blockLengths, displacements, types, &prePreDatatype)
172 );
173
174 return prePreDatatype;
175}
176
177void
179 )
180{
181 if( !lProbName ) THROW_LOGICAL_ERROR1("No problem name");
182 probName = new char[lProbName+1];
183 if( nVars )
184 {
185 varLbs = new SCIP_Real[nVars];
186 varUbs = new SCIP_Real[nVars];
187 objCoefs = new SCIP_Real[nVars];
188 varTypes = new int[nVars];
189 if(lVarNames) varNames = new char[lVarNames];
190 posVarNames = new int[nVars];
191 }
192 if( nConss )
193 {
194 if( lConsNames ) consNames = new char[lConsNames];
195 posConsNames = new int[nConss];
196 }
197 if( nLinearConss )
198 {
200 linearLhss = new SCIP_Real[nLinearConss];
201 linearRhss = new SCIP_Real[nLinearConss];
202 nLinearCoefs = new int[nLinearConss];
203 }
204 if( nSetppcConss )
205 {
207 nIdxSetppcVars = new int[nSetppcConss];
208 setppcTypes = new int[nSetppcConss];
209 }
210 if( nLogicorConss )
211 {
214 }
215 if( nKnapsackConss )
216 {
218 capacities = new SCIP_Longint[nKnapsackConss];
220 }
221 if( nVarboundConss )
222 {
224 varboundLhss = new SCIP_Real[nVarboundConss];
225 varboundRhss = new SCIP_Real[nVarboundConss];
227 varboundCoef2s = new SCIP_Real[nVarboundConss];
229 }
233
234 }
235 if( nSos1Conss )
236 {
237 idxSos1ConsNames = new int[nSos1Conss];
238 nSos1Coefs = new int[nSos1Conss];
239 }
240 if( nSos2Conss )
241 {
242 idxSos2ConsNames = new int[nSos2Conss];
243 nSos2Coefs = new int[nSos2Conss];
244 }
245 if( nAggregatedConss )
246 {
253 }
254}
255
256/** create ScipInstancePreDatatype */
257MPI_Datatype
259 bool memAllocNecessary
260 )
261{
262 const int nBlocks = 37;
263 MPI_Datatype datatype;
264
265 MPI_Aint startAddress = 0;
266 MPI_Aint address = 0;
267
268 if( memAllocNecessary )
269 {
271 }
272
273 int blockLengths[nBlocks];
274 MPI_Aint displacements[nBlocks];
275 MPI_Datatype types[nBlocks];
276
277 int n = 0;
278
279 MPI_CALL(
280 MPI_Get_address( probName, &startAddress )
281 );
282 displacements[n] = 0;
283 blockLengths[n] = lProbName + 1;
284 types[n++] = MPI_CHAR;
285
286 if( nVars )
287 {
288 MPI_CALL(
289 MPI_Get_address( varLbs, &address )
290 );
291 displacements[n] = address - startAddress;
292 blockLengths[n] = nVars;
293 types[n++] = MPI_DOUBLE;
294
295 MPI_CALL(
296 MPI_Get_address( varUbs, &address )
297 );
298 displacements[n] = address - startAddress;
299 blockLengths[n] = nVars;
300 types[n++] = MPI_DOUBLE;
301
302 MPI_CALL(
303 MPI_Get_address( objCoefs, &address )
304 );
305 displacements[n] = address - startAddress;
306 blockLengths[n] = nVars;
307 types[n++] = MPI_DOUBLE;
308
309 MPI_CALL(
310 MPI_Get_address( varTypes, &address )
311 );
312 displacements[n] = address - startAddress;
313 blockLengths[n] = nVars;
314 types[n++] = MPI_INT;
315
316 if( lVarNames )
317 {
318 MPI_CALL(
319 MPI_Get_address( varNames, &address )
320 );
321 displacements[n] = address - startAddress;
322 blockLengths[n] = lVarNames;
323 types[n++] = MPI_CHAR;
324 }
325
326 MPI_CALL(
327 MPI_Get_address( posVarNames, &address )
328 );
329 displacements[n] = address - startAddress;
330 blockLengths[n] = nVars;
331 types[n++] = MPI_INT;
332 }
333
334 if( nConss )
335 {
336 if( lConsNames )
337 {
338 MPI_CALL(
339 MPI_Get_address( consNames, &address )
340 );
341 displacements[n] = address - startAddress;
342 blockLengths[n] = lConsNames;
343 types[n++] = MPI_CHAR;
344 }
345 MPI_CALL(
346 MPI_Get_address( posConsNames, &address )
347 );
348 displacements[n] = address - startAddress;
349 blockLengths[n] = nConss;
350 types[n++] = MPI_INT;
351 }
352
353 if( nLinearConss )
354 {
355 MPI_CALL(
356 MPI_Get_address( idxLinearConsNames, &address )
357 );
358 displacements[n] = address - startAddress;
359 blockLengths[n] = nLinearConss;
360 types[n++] = MPI_INT;
361
362 MPI_CALL(
363 MPI_Get_address( linearLhss, &address )
364 );
365 displacements[n] = address - startAddress;
366 blockLengths[n] = nLinearConss;
367 types[n++] = MPI_DOUBLE;
368
369 MPI_CALL(
370 MPI_Get_address( linearRhss, &address )
371 );
372 displacements[n] = address - startAddress;
373 blockLengths[n] = nLinearConss;
374 types[n++] = MPI_DOUBLE;
375
376 MPI_CALL(
377 MPI_Get_address( nLinearCoefs, &address )
378 );
379 displacements[n] = address - startAddress;
380 blockLengths[n] = nLinearConss;
381 types[n++] = MPI_INT;
382 }
383
384 if( nSetppcConss )
385 {
386 MPI_CALL(
387 MPI_Get_address( idxSetppcConsNames, &address )
388 );
389 displacements[n] = address - startAddress;
390 blockLengths[n] = nSetppcConss;
391 types[n++] = MPI_INT;
392
393 MPI_CALL(
394 MPI_Get_address( nIdxSetppcVars, &address )
395 );
396 displacements[n] = address - startAddress;
397 blockLengths[n] = nSetppcConss;
398 types[n++] = MPI_INT;
399
400 MPI_CALL(
401 MPI_Get_address( setppcTypes, &address )
402 );
403 displacements[n] = address - startAddress;
404 blockLengths[n] = nSetppcConss;
405 types[n++] = MPI_INT;
406 }
407
408 if( nLogicorConss )
409 {
410 MPI_CALL(
411 MPI_Get_address( idxLogicorConsNames, &address )
412 );
413 displacements[n] = address - startAddress;
414 blockLengths[n] = nLogicorConss;
415 types[n++] = MPI_INT;
416 MPI_CALL(
417 MPI_Get_address( nIdxLogicorVars, &address )
418 );
419 displacements[n] = address - startAddress;
420 blockLengths[n] = nLogicorConss;
421 types[n++] = MPI_INT;
422 }
423
424 if( nKnapsackConss )
425 {
426 MPI_CALL(
427 MPI_Get_address( idxKnapsackConsNames, &address )
428 );
429 displacements[n] = address - startAddress;
430 blockLengths[n] = nKnapsackConss;
431 types[n++] = MPI_INT;
432
433 MPI_CALL(
434 MPI_Get_address( capacities, &address )
435 );
436 displacements[n] = address - startAddress;
437 blockLengths[n] = nKnapsackConss;
438#ifdef _ALIBABA
439 types[n++] = MPI_LONG;
440#else
441 types[n++] = MPI_LONG_LONG;
442#endif
443
444 MPI_CALL(
445 MPI_Get_address( nLKnapsackCoefs, &address )
446 );
447 displacements[n] = address - startAddress;
448 blockLengths[n] = nKnapsackConss;
449 types[n++] = MPI_INT;
450 }
451
452 if( nVarboundConss )
453 {
454 MPI_CALL(
455 MPI_Get_address( idxVarboundConsNames, &address )
456 );
457 displacements[n] = address - startAddress;
458 blockLengths[n] = nVarboundConss;
459 types[n++] = MPI_INT;
460
461 MPI_CALL(
462 MPI_Get_address( varboundLhss, &address )
463 );
464 displacements[n] = address - startAddress;
465 blockLengths[n] = nVarboundConss;
466 types[n++] = MPI_DOUBLE;
467
468 MPI_CALL(
469 MPI_Get_address( varboundRhss, &address )
470 );
471 displacements[n] = address - startAddress;
472 blockLengths[n] = nVarboundConss;
473 types[n++] = MPI_DOUBLE;
474
475 MPI_CALL(
476 MPI_Get_address( idxVarboundCoefVar1s, &address )
477 );
478 displacements[n] = address - startAddress;
479 blockLengths[n] = nVarboundConss;
480 types[n++] = MPI_INT;
481
482 MPI_CALL(
483 MPI_Get_address( varboundCoef2s, &address )
484 );
485 displacements[n] = address - startAddress;
486 blockLengths[n] = nVarboundConss;
487 types[n++] = MPI_DOUBLE;
488
489 MPI_CALL(
490 MPI_Get_address( idxVarboundCoefVar2s, &address )
491 );
492 displacements[n] = address - startAddress;
493 blockLengths[n] = nVarboundConss;
494 types[n++] = MPI_INT;
495 }
496
498 {
499 MPI_CALL(
500 MPI_Get_address( idxBoundDisjunctionConsNames, &address )
501 );
502 displacements[n] = address - startAddress;
503 blockLengths[n] = nVarBoundDisjunctionConss;
504 types[n++] = MPI_INT;
505
506 MPI_CALL(
507 MPI_Get_address( nVarsBoundDisjunction, &address )
508 );
509 displacements[n] = address - startAddress;
510 blockLengths[n] = nVarBoundDisjunctionConss;
511 types[n++] = MPI_INT;
512 }
513
514 if( nSos1Conss )
515 {
516 MPI_CALL(
517 MPI_Get_address( idxSos1ConsNames, &address )
518 );
519 displacements[n] = address - startAddress;
520 blockLengths[n] = nSos1Conss;
521 types[n++] = MPI_INT;
522
523 MPI_CALL(
524 MPI_Get_address( nSos1Coefs, &address )
525 );
526 displacements[n] = address - startAddress;
527 blockLengths[n] = nSos1Conss;
528 types[n++] = MPI_INT;
529 }
530
531 if( nSos2Conss )
532 {
533 MPI_CALL(
534 MPI_Get_address( idxSos2ConsNames, &address )
535 );
536 displacements[n] = address - startAddress;
537 blockLengths[n] = nSos2Conss;
538 types[n++] = MPI_INT;
539
540 MPI_CALL(
541 MPI_Get_address( nSos2Coefs, &address )
542 );
543 displacements[n] = address - startAddress;
544 blockLengths[n] = nSos2Conss;
545 types[n++] = MPI_INT;
546 }
547
548 if( nAggregatedConss )
549 {
551 {
552 MPI_CALL(
553 MPI_Get_address( aggregatedVarNames, &address )
554 );
555 displacements[n] = address - startAddress;
556 blockLengths[n] = lAggregatedVarNames;
557 types[n++] = MPI_CHAR;
558 }
559
560 MPI_CALL(
561 MPI_Get_address( posAggregatedVarNames, &address )
562 );
563 displacements[n] = address - startAddress;
564 blockLengths[n] = nAggregatedConss;
565 types[n++] = MPI_INT;
566
568 {
569 MPI_CALL(
570 MPI_Get_address( aggregatedConsNames, &address )
571 );
572 displacements[n] = address - startAddress;
573 blockLengths[n] = lAggregatedConsNames;
574 types[n++] = MPI_CHAR;
575 }
576
577 MPI_CALL(
578 MPI_Get_address( posAggregatedConsNames, &address )
579 );
580 displacements[n] = address - startAddress;
581 blockLengths[n] = nAggregatedConss;
582 types[n++] = MPI_INT;
583
584 MPI_CALL(
585 MPI_Get_address( aggregatedLhsAndLhss, &address )
586 );
587 displacements[n] = address - startAddress;
588 blockLengths[n] = nAggregatedConss;
589 types[n++] = MPI_DOUBLE;
590
591 MPI_CALL(
592 MPI_Get_address( nAggregatedCoefs, &address )
593 );
594 displacements[n] = address - startAddress;
595 blockLengths[n] = nAggregatedConss;
596 types[n++] = MPI_INT;
597 }
598
599 MPI_CALL(
600 MPI_Type_create_struct(n, blockLengths, displacements, types, &datatype)
601 );
602
603 return datatype;
604}
605
606void
608 )
609{
610 if( nLinearConss )
611 {
612 linearCoefs = new SCIP_Real*[nLinearConss];
614 for(int i = 0; i < nLinearConss; i++ )
615 {
616 linearCoefs[i] = new SCIP_Real[nLinearCoefs[i]];
617 idxLinearCoefsVars[i] = new int[nLinearCoefs[i]];
618 }
619 }
620 if( nSetppcConss )
621 {
622 idxSetppcVars = new int*[nSetppcConss];
623 for(int i = 0; i < nSetppcConss; i++ )
624 {
625 idxSetppcVars[i] = new int[nIdxSetppcVars[i]];
626 }
627 }
628 if( nLogicorConss )
629 {
630 idxLogicorVars = new int*[nLogicorConss];
631 for( int i = 0; i < nLogicorConss; i++ )
632 {
633 idxLogicorVars[i] = new int[nIdxLogicorVars[i]];
634 }
635 }
636 if( nKnapsackConss )
637 {
638 knapsackCoefs = new SCIP_Longint*[nKnapsackConss];
640 for( int i = 0; i < nKnapsackConss; i++ )
641 {
642 knapsackCoefs[i] = new SCIP_Longint[nLKnapsackCoefs[i]];
643 idxKnapsackCoefsVars[i] = new int[nLKnapsackCoefs[i]];
644 }
645 }
647 {
651 for( int i = 0; i < nVarBoundDisjunctionConss; i++ )
652 {
654 boundTypesBoundDisjunction[i] = new SCIP_BOUNDTYPE[nVarsBoundDisjunction[i]];
655 boundsBoundDisjunction[i] = new SCIP_Real[nVarsBoundDisjunction[i]];
656 }
657 }
658 if( nSos1Conss )
659 {
660 sos1Coefs = new SCIP_Real*[nSos1Conss];
661 idxSos1CoefsVars = new int*[nSos1Conss];
662 for( int i = 0; i < nSos1Conss; i++ )
663 {
664 sos1Coefs[i] = new SCIP_Real[nSos1Coefs[i]];
665 idxSos1CoefsVars[i] = new int[nSos1Coefs[i]];
666 }
667 }
668 if( nSos2Conss )
669 {
670 sos2Coefs = new SCIP_Real*[nSos2Conss];
671 idxSos2CoefsVars = new int*[nSos2Conss];
672 for( int i = 0; i < nSos1Conss; i++ )
673 {
674 sos2Coefs[i] = new SCIP_Real[nSos2Coefs[i]];
675 idxSos2CoefsVars[i] = new int[nSos2Coefs[i]];
676 }
677 }
678 if( nAggregatedConss )
679 {
680 aggregatedCoefs = new SCIP_Real*[nAggregatedConss];
682 for( int i = 0; i < nAggregatedConss; i++ )
683 {
684 aggregatedCoefs[i] = new SCIP_Real[nAggregatedCoefs[i]];
686 }
687 }
688}
689
690/** create ScipInstanceDatatype */
691MPI_Datatype
693 bool memAllocNecessary
694 )
695{
696 MPI_Datatype datatype;
697
698 MPI_Aint startAddress = 0;
699 MPI_Aint address = 0;
700
701 if( memAllocNecessary )
702 {
704 }
705
707 int *blockLengths = new int[nArrays];
708 MPI_Aint *displacements = new MPI_Aint[nArrays];
709 MPI_Datatype *types = new MPI_Datatype[nArrays];
710
711 int n = 0;
712
713 MPI_CALL(
714 MPI_Get_address( &dummyToKeepStartPos, &startAddress )
715 );
716 displacements[n] = 0;
717 blockLengths[n] = 1;
718 types[n++] = MPI_INT;
719
720 if( nLinearConss )
721 {
722 for(int i = 0; i < nLinearConss; i++ )
723 {
724 MPI_CALL(
725 MPI_Get_address( linearCoefs[i], &address )
726 );
727 displacements[n] = address - startAddress;
728 blockLengths[n] = nLinearCoefs[i];
729 types[n++] = MPI_DOUBLE;
730 MPI_CALL(
731 MPI_Get_address( idxLinearCoefsVars[i], &address )
732 );
733 displacements[n] = address - startAddress;
734 blockLengths[n] = nLinearCoefs[i];
735 types[n++] = MPI_INT;
736 }
737 }
738
739 if( nSetppcConss )
740 {
741 for( int i = 0; i < nSetppcConss; i++ )
742 {
743 MPI_CALL(
744 MPI_Get_address( idxSetppcVars[i], &address )
745 );
746 displacements[n] = address - startAddress;
747 blockLengths[n] = nIdxSetppcVars[i];
748 types[n++] = MPI_INT;
749 }
750 }
751
752 if( nLogicorConss )
753 {
754 for( int i = 0; i < nLogicorConss; i++ )
755 {
756 MPI_CALL(
757 MPI_Get_address( idxLogicorVars[i], &address )
758 );
759 displacements[n] = address - startAddress;
760 blockLengths[n] = nIdxLogicorVars[i];
761 types[n++] = MPI_INT;
762 }
763 }
764 if( nKnapsackConss )
765 {
766 for( int i = 0; i < nKnapsackConss; i++ )
767 {
768 MPI_CALL(
769 MPI_Get_address( knapsackCoefs[i], &address )
770 );
771 displacements[n] = address - startAddress;
772 blockLengths[n] = nLKnapsackCoefs[i];
773#ifdef _ALIBABA
774 types[n++] = MPI_LONG;
775#else
776 types[n++] = MPI_LONG_LONG;
777#endif
778 MPI_CALL(
779 MPI_Get_address( idxKnapsackCoefsVars[i], &address )
780 );
781 displacements[n] = address - startAddress;
782 blockLengths[n] = nLKnapsackCoefs[i];
783 types[n++] = MPI_INT;
784 }
785 }
787 {
788 for( int i = 0; i < nVarBoundDisjunctionConss; i++ )
789 {
790 MPI_CALL(
791 MPI_Get_address( idxVarBoundDisjunction[i], &address )
792 );
793 displacements[n] = address - startAddress;
794 blockLengths[n] = nVarsBoundDisjunction[i];
795 types[n++] = MPI_INT;
796 MPI_CALL(
797 MPI_Get_address( boundTypesBoundDisjunction[i], &address )
798 );
799 displacements[n] = address - startAddress;
800 blockLengths[n] = nVarsBoundDisjunction[i];
801 types[n++] = MPI_INT;
802 MPI_CALL(
803 MPI_Get_address( boundsBoundDisjunction[i], &address )
804 );
805 displacements[n] = address - startAddress;
806 blockLengths[n] = nVarsBoundDisjunction[i];
807 types[n++] = MPI_DOUBLE;
808 }
809 }
810 if( nSos1Conss )
811 {
812 for( int i = 0; i < nSos1Conss; i++ )
813 {
814 MPI_CALL(
815 MPI_Get_address( sos1Coefs[i], &address )
816 );
817 displacements[n] = address - startAddress;
818 blockLengths[n] = nSos1Coefs[i];
819 types[n++] = MPI_DOUBLE;
820 MPI_CALL(
821 MPI_Get_address( idxSos1CoefsVars[i], &address )
822 );
823 displacements[n] = address - startAddress;
824 blockLengths[n] = nSos1Coefs[i];
825 types[n++] = MPI_INT;
826 }
827 }
828 if( nSos2Conss )
829 {
830 for( int i = 0; i < nSos1Conss; i++ )
831 {
832 MPI_CALL(
833 MPI_Get_address( sos2Coefs[i], &address )
834 );
835 displacements[n] = address - startAddress;
836 blockLengths[n] = nSos2Coefs[i];
837 types[n++] = MPI_DOUBLE;
838 MPI_CALL(
839 MPI_Get_address( idxSos2CoefsVars[i], &address )
840 );
841 displacements[n] = address - startAddress;
842 blockLengths[n] = nSos2Coefs[i];
843 types[n++] = MPI_INT;
844 }
845 }
846 if( nAggregatedConss )
847 {
848 for( int i = 0; i < nAggregatedConss; i++ )
849 {
850 MPI_CALL(
851 MPI_Get_address( aggregatedCoefs[i], &address )
852 );
853 displacements[n] = address - startAddress;
854 blockLengths[n] = nAggregatedCoefs[i];
855 types[n++] = MPI_DOUBLE;
856 MPI_CALL(
857 MPI_Get_address( idxAggregatedCoefsVars[i], &address )
858 );
859 displacements[n] = address - startAddress;
860 blockLengths[n] = nAggregatedCoefs[i];
861 types[n++] = MPI_INT;
862 }
863 }
864
865 assert(n == nArrays);
866
867 MPI_CALL(
868 MPI_Type_create_struct(n, blockLengths, displacements, types, &datatype)
869 );
870
871 delete [] blockLengths;
872 delete [] displacements;
873 delete [] types;
874
875 return datatype;
876}
877
878int
880 ParaComm *comm,
881 int root,
882 int method
883 )
884{
885 DEF_PARA_COMM( commMpi, comm);
886
887 switch ( method )
888 {
889 case 0 :
890 {
891 MPI_Datatype datatype = createDatatype1();
892 MPI_CALL(
893 MPI_Type_commit( &datatype )
894 );
896 commMpi->ubcast(&lProbName, 1, datatype, root)
897 );
898 MPI_CALL(
899 MPI_Type_free( &datatype )
900 );
901
902 if( commMpi->getRank() == root )
903 {
904 datatype = createDatatype2(false);
905 }
906 else
907 {
908 datatype = createDatatype2(true);
909 }
910 MPI_CALL(
911 MPI_Type_commit( &datatype )
912 );
913
915 commMpi->ubcast(probName, 1, datatype, root)
916 );
917 MPI_CALL(
918 MPI_Type_free( &datatype )
919 );
920
921 if( commMpi->getRank() == root )
922 {
923 datatype = createDatatype3(false);
924 }
925 else
926 {
927 datatype = createDatatype3(true);
928 }
929 MPI_CALL(
930 MPI_Type_commit( &datatype )
931 );
933 commMpi->ubcast(&dummyToKeepStartPos, 1, datatype, root)
934 );
935 MPI_CALL(
936 MPI_Type_free( &datatype )
937 );
938 break;
939 }
940 case 1:
941 case 2:
942 {
943 MPI_Datatype datatype = createDatatype1();
944 MPI_CALL(
945 MPI_Type_commit( &datatype )
946 );
948 commMpi->ubcast(&lProbName, 1, datatype, root)
949 );
950 MPI_CALL(
951 MPI_Type_free( &datatype )
952 );
953 if( fileName )
954 {
955 char *probNameFromFileName;
956 char *temp = new char[strlen(fileName)+1];
957 (void) strcpy(temp, fileName);
958 SCIPsplitFilename(temp, NULL, &probNameFromFileName, NULL, NULL);
959 probName = new char[strlen(probNameFromFileName)+1];
960 (void) strcpy(probName, probNameFromFileName);
961 delete [] temp;
962 assert(static_cast<unsigned int>(lProbName) == strlen(probName));
963 }
964 break;
965 }
966 default:
967 THROW_LOGICAL_ERROR1("Undefined instance transfer method");
968 }
969
970 return 0;
971
972}
MPI_Datatype createDatatype3(bool memAllocNecessary)
MPI_Datatype createDatatype2(bool memAllocNecessary)
int bcast(UG::ParaComm *comm, int rank, int method)
SCIP_BOUNDTYPE ** boundTypesBoundDisjunction
Base class of communicator object.
Definition: paraComm.h:102
static ScipParaCommTh * comm
Definition: fscip.cpp:73
#define DEF_PARA_COMM(para_comm, comm)
#define MPI_CALL(mpicall)
Definition: paraCommMpi.h:68
#define PARA_COMM_CALL(paracommcall)
Definition: paraComm.h:47
#define THROW_LOGICAL_ERROR1(msg1)
Definition: paraDef.h:52
SCIP ParaComm extension for MPI communication.
ScipParaInstance extension for MPI communication.