DFT-FE 1.1.0-pre
Density Functional Theory With Finite-Elements
Loading...
Searching...
No Matches
MPIPatternP2P.t.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (c) 2017-2025 The Regents of the University of Michigan and DFT-FE
4// authors.
5//
6// This file is part of the DFT-FE code.
7//
8// The DFT-FE code is free software; you can use it, redistribute
9// it, and/or modify it under the terms of the GNU Lesser General
10// Public License as published by the Free Software Foundation; either
11// version 2.1 of the License, or (at your option) any later version.
12// The full text of the license can be found in the file LICENSE at
13// the top level of the DFT-FE distribution.
14//
15// ---------------------------------------------------------------------
16//
17
18/*
19 * @author Bikash Kanungo
20 */
21
22#include <Exceptions.h>
23#include <MPITags.h>
24#include <MPIRequestersBase.h>
25#include <MPIRequestersNBX.h>
26#include <string>
27#include <map>
28#include <set>
29#include <iostream>
30#include <memory>
31#include <numeric>
32
33namespace dftfe
34{
35 namespace utils
36 {
37 namespace mpi
38 {
39 namespace
40 {
41 void
42 getAllOwnedRanges(const global_size_type ownedRangeStart,
43 const global_size_type ownedRangeEnd,
44 std::vector<global_size_type> &allOwnedRanges,
45 const MPI_Comm & mpiComm)
46 {
47 int nprocs = 1;
48 int err = MPI_Comm_size(mpiComm, &nprocs);
49 std::string errMsg = "Error occured while using MPI_Comm_size. "
50 "Error code: " +
51 std::to_string(err);
52 throwException(err == MPI_SUCCESS, errMsg);
53 std::vector<int> recvCounts(nprocs, 2);
54 std::vector<int> displs(nprocs, 0);
55 allOwnedRanges.resize(2 * nprocs);
56 for (unsigned int i = 0; i < nprocs; ++i)
57 displs[i] = 2 * i;
58
59 std::vector<global_size_type> ownedRanges = {ownedRangeStart,
60 ownedRangeEnd};
61 MPI_Allgatherv(&ownedRanges[0],
62 2,
63 MPI_UNSIGNED_LONG,
64 &allOwnedRanges[0],
65 &recvCounts[0],
66 &displs[0],
67 MPI_UNSIGNED_LONG,
68 mpiComm);
69 }
70
71 void
72 getGhostProcIdToLocalGhostIndicesMap(
73 const std::vector<global_size_type> &ghostIndices,
74 const global_size_type nGlobalIndices,
75 const std::vector<global_size_type> &allOwnedRanges,
76 std::map<size_type, std::vector<size_type>>
77 & ghostProcIdToLocalGhostIndices,
78 const MPI_Comm &mpiComm)
79 {
80 int nprocs = 1;
81 int err = MPI_Comm_size(mpiComm, &nprocs);
82 std::string errMsg = "Error occured while using MPI_Comm_size. "
83 "Error code: " +
84 std::to_string(err);
85 throwException(err == MPI_SUCCESS, errMsg);
86
87 //
88 // NOTE: The locally owned ranges need not be ordered as per the
89 // processor ranks. That is ranges for processor 0, 1, ...., P-1 given
90 // by [N_0,N_1), [N_1, N_2), [N_2, N_3), ..., [N_{P-1},N_P) need not
91 // honor the fact that N_0, N_1, ..., N_P are increasing. However, it
92 // is more efficient to perform search operations in a sorted vector.
93 // Thus, we perform a sort on the end of each locally owned range and
94 // also keep track of the indices during the sort
95 //
96
97 // vector to store the end of each locally owned ranges
98 std::vector<global_size_type> locallyOwnedRangesEnd(nprocs);
99
100 //
101 // Vector to keep track of the indices of locallyOwnedRangesEnd
102 // during sort
103 std::vector<size_type> locallyOwnedRangesEndProcIds(nprocs);
104 for (unsigned int i = 0; i < nprocs; ++i)
105 {
106 locallyOwnedRangesEnd[i] = allOwnedRanges[2 * i + 1];
107
108 // this is required for handing trivial ranges [Nt, Nt)
109 // where Nt can be arbitrary integer >=0
110 if (allOwnedRanges[2 * i + 1] == allOwnedRanges[2 * i])
111 locallyOwnedRangesEnd[i] = (nGlobalIndices + 1 + i);
112 locallyOwnedRangesEndProcIds[i] = i;
113 }
114
115 std::sort(locallyOwnedRangesEndProcIds.begin(),
116 locallyOwnedRangesEndProcIds.end(),
117 [&locallyOwnedRangesEnd](size_type x, size_type y) {
118 return locallyOwnedRangesEnd[x] <
119 locallyOwnedRangesEnd[y];
120 });
121
122 std::sort(locallyOwnedRangesEnd.begin(), locallyOwnedRangesEnd.end());
123
124 const size_type numGhosts = ghostIndices.size();
125 for (unsigned int iGhost = 0; iGhost < numGhosts; ++iGhost)
126 {
127 global_size_type ghostIndex = ghostIndices[iGhost];
128 auto up = std::upper_bound(locallyOwnedRangesEnd.begin(),
129 locallyOwnedRangesEnd.end(),
130 ghostIndex);
131 std::string msg = "Ghost index " + std::to_string(ghostIndex) +
132 " not found in any of the processors";
133 throwException(up != locallyOwnedRangesEnd.end(), msg);
134 size_type upPos =
135 std::distance(locallyOwnedRangesEnd.begin(), up);
136 size_type procId = locallyOwnedRangesEndProcIds[upPos];
137 ghostProcIdToLocalGhostIndices[procId].push_back(iGhost);
138 }
139 }
140
141 bool
142 checkContiguity(const std::vector<size_type> &v)
143 {
144 const size_type N = v.size();
145 bool returnValue = true;
146 for (unsigned int i = 1; i < N; ++i)
147 {
148 if ((v[i] - 1) != v[i - 1])
149 {
150 returnValue = false;
151 break;
152 }
153 }
154 return returnValue;
155 }
156
157 struct RangeMetaData
158 {
160 size_type rangeId;
161 bool isRangeStart;
162 };
163
164 bool
165 compareRangeMetaData(const RangeMetaData &x, const RangeMetaData &y)
166 {
167 if (x.Id == y.Id)
168 return (!x.isRangeStart);
169 else
170 return x.Id < y.Id;
171 }
172 std::vector<size_type>
173 getOverlappingRangeIds(const std::vector<global_size_type> &ranges)
174 {
175 size_type numRanges = ranges.size() / 2;
176 std::vector<RangeMetaData> rangeMetaDataVec(0);
177 for (unsigned int i = 0; i < numRanges; ++i)
178 {
179 RangeMetaData left;
180 left.Id = ranges[2 * i];
181 left.rangeId = i;
182 left.isRangeStart = true;
183
184 RangeMetaData right;
185 right.Id = ranges[2 * i + 1];
186 right.rangeId = i;
187 right.isRangeStart = false;
188
189 // This check is required to ignore ranges with 0 elements
190 if (left.Id != right.Id)
191 {
192 rangeMetaDataVec.push_back(left);
193 rangeMetaDataVec.push_back(right);
194 }
195 }
196 std::sort(rangeMetaDataVec.begin(),
197 rangeMetaDataVec.end(),
198 compareRangeMetaData);
199 int currentOpen = -1;
200 bool added = false;
201 std::vector<size_type> returnValue(0);
202 for (unsigned int i = 0; i < rangeMetaDataVec.size(); ++i)
203 {
204 size_type rangeId = rangeMetaDataVec[i].rangeId;
205 if (rangeMetaDataVec[i].isRangeStart)
206 {
207 if (currentOpen == -1)
208 {
209 currentOpen = rangeId;
210 added = false;
211 }
212 else
213 {
214 if (!added)
215 {
216 returnValue.push_back(currentOpen);
217 added = true;
218 }
219 returnValue.push_back(rangeId);
220 if (ranges[2 * rangeId + 1] > ranges[2 * currentOpen + 1])
221 {
222 currentOpen = rangeId;
223 added = true;
224 }
225 }
226 }
227 else
228 {
229 if (rangeId == currentOpen)
230 {
231 currentOpen = -1;
232 added = false;
233 }
234 }
235 }
236 return returnValue;
237 }
238
239 } // namespace
240
241 ///
242 /// Constructor with MPI
243 ///
244 template <dftfe::utils::MemorySpace memorySpace>
246 const std::pair<global_size_type, global_size_type> &locallyOwnedRange,
247 const std::vector<dftfe::global_size_type> & ghostIndices,
248 const MPI_Comm & mpiComm)
249 : d_locallyOwnedRange(locallyOwnedRange)
250 , d_mpiComm(mpiComm)
252 , d_ghostIndices(0)
255 , d_numGhostProcs(0)
256 , d_ghostProcIds(0)
261 , d_targetProcIds(0)
265 {
266 d_myRank = 0;
267 d_nprocs = 1;
268 int err = MPI_Comm_size(d_mpiComm, &d_nprocs);
269 std::string errMsg = "Error occured while using MPI_Comm_size. "
270 "Error code: " +
271 std::to_string(err);
272 throwException(err == MPI_SUCCESS, errMsg);
273
274 err = MPI_Comm_rank(d_mpiComm, &d_myRank);
275 errMsg = "Error occured while using MPI_Comm_rank. "
276 "Error code: " +
277 std::to_string(err);
278 throwException(err == MPI_SUCCESS, errMsg);
279
282 "In processor " + std::to_string(d_myRank) +
283 ", invalid locally owned range found "
284 "(i.e., the second value in the range is less than the first value).");
287 ///////////////////////////////////////////////////
288 //////////// Ghost Data Evaluation Begin //////////
289 ///////////////////////////////////////////////////
290
291 //
292 // check if the ghostIndices is strictly increasing or nor
293 //
294 bool isStrictlyIncreasing = std::is_sorted(ghostIndices.begin(),
295 ghostIndices.end(),
296 std::less_equal<>());
298 isStrictlyIncreasing,
299 "In processor " + std::to_string(d_myRank) +
300 ", the ghost indices passed to MPIPatternP2P is not a strictly increasing set.");
301
302 d_ghostIndices = ghostIndices;
303
304 // copy the ghostIndices to d_ghostIndicesSetSTL
305 d_ghostIndicesSetSTL.clear();
306 std::copy(ghostIndices.begin(),
307 ghostIndices.end(),
308 std::inserter(d_ghostIndicesSetSTL,
309 d_ghostIndicesSetSTL.end()));
312
313 d_numGhostIndices = ghostIndices.size();
314
316
317 d_allOwnedRanges.clear();
318 getAllOwnedRanges(d_locallyOwnedRange.first,
319 d_locallyOwnedRange.second,
321 d_mpiComm);
322
323 std::vector<size_type> overlappingRangeIds =
324 getOverlappingRangeIds(d_allOwnedRanges);
326 overlappingRangeIds.size() == 0,
327 "Detected overlapping ranges among the locallyOwnedRanges passed "
328 "to MPIPatternP2P");
329
331 for (unsigned int i = 0; i < d_nprocs; ++i)
332 {
334 d_allOwnedRanges[2 * i + 1] - d_allOwnedRanges[2 * i];
335 }
336
337 if (ghostIndices.size() > 0)
338 {
340 ghostIndices.back() < d_nGlobalIndices,
341 "Detected global ghost index to be larger than (nGlobalIndices-1)");
342 }
343
344 std::map<size_type, std::vector<size_type>>
345 ghostProcIdToLocalGhostIndices;
346 getGhostProcIdToLocalGhostIndicesMap(ghostIndices,
349 ghostProcIdToLocalGhostIndices,
350 d_mpiComm);
351
352 d_numGhostProcs = ghostProcIdToLocalGhostIndices.size();
356
357 std::vector<size_type> flattenedLocalGhostIndicesTmp(0);
358 auto it = ghostProcIdToLocalGhostIndices.begin();
359 unsigned int iGhostProc = 0;
360 for (; it != ghostProcIdToLocalGhostIndices.end(); ++it)
361 {
362 d_ghostProcIds[iGhostProc] = it->first;
363 const std::vector<size_type> localGhostIndicesInGhostProc =
364 it->second;
365 bool isContiguous = checkContiguity(localGhostIndicesInGhostProc);
366 std::string msg = "In rank " + std::to_string(d_myRank) +
367 ", the local ghost indices that are owned"
368 " by rank " +
369 std::to_string(d_ghostProcIds[iGhostProc]) +
370 " does not form a contiguous set.";
371 throwException<LogicError>(isContiguous, msg);
373 localGhostIndicesInGhostProc.size();
374
375 d_localGhostIndicesRanges[2 * iGhostProc] =
376 *(localGhostIndicesInGhostProc.begin());
377 d_localGhostIndicesRanges[2 * iGhostProc + 1] =
378 *(localGhostIndicesInGhostProc.end() - 1) + 1;
379 //
380 // Append localGhostIndicesInGhostProc to
381 // flattenedLocalGhostIndicesTmp
382 //
383 std::copy(localGhostIndicesInGhostProc.begin(),
384 localGhostIndicesInGhostProc.end(),
385 back_inserter(flattenedLocalGhostIndicesTmp));
386 ++iGhostProc;
387 }
388
389 std::string msg = "In rank " + std::to_string(d_myRank) +
390 " mismatch of"
391 " the sizes of ghost indices. Expected size: " +
392 std::to_string(d_numGhostIndices) +
393 " Received size: " +
394 std::to_string(flattenedLocalGhostIndicesTmp.size());
395 throwException<DomainError>(flattenedLocalGhostIndicesTmp.size() ==
397 msg);
398
399
401 if (d_numGhostIndices > 0)
402 memoryTransfer.copy(d_numGhostIndices,
404 &flattenedLocalGhostIndicesTmp[0]);
405 ///////////////////////////////////////////////////
406 //////////// Ghost Data Evaluation End / //////////
407 ///////////////////////////////////////////////////
408
409
410 ///////////////////////////////////////////////////
411 //////////// Target Data Evaluation Begin ////////
412 ///////////////////////////////////////////////////
414 d_targetProcIds = mpirequesters.getRequestingRankIds();
417
418 std::vector<MPI_Request> sendRequests(d_numGhostProcs);
419 std::vector<MPI_Status> sendStatuses(d_numGhostProcs);
420 std::vector<MPI_Request> recvRequests(d_numTargetProcs);
421 std::vector<MPI_Status> recvStatuses(d_numTargetProcs);
422 const int tag = static_cast<int>(MPITags::MPI_P2P_PATTERN_TAG);
423 for (unsigned int iGhost = 0; iGhost < d_numGhostProcs; ++iGhost)
424 {
425 const int ghostProcId = d_ghostProcIds[iGhost];
426 err = MPI_Isend(&d_numGhostIndicesInGhostProcs[iGhost],
427 1,
428 MPI_UNSIGNED,
429 ghostProcId,
430 tag,
431 d_mpiComm,
432 &sendRequests[iGhost]);
433 std::string errMsg = "Error occured while using MPI_Isend. "
434 "Error code: " +
435 std::to_string(err);
436 throwException(err == MPI_SUCCESS, errMsg);
437 }
438
439 for (unsigned int iTarget = 0; iTarget < d_numTargetProcs; ++iTarget)
440 {
441 const int targetProcId = d_targetProcIds[iTarget];
442 err = MPI_Irecv(&d_numOwnedIndicesForTargetProcs[iTarget],
443 1,
444 MPI_UNSIGNED,
445 targetProcId,
446 tag,
447 d_mpiComm,
448 &recvRequests[iTarget]);
449 std::string errMsg = "Error occured while using MPI_Irecv. "
450 "Error code: " +
451 std::to_string(err);
452 throwException(err == MPI_SUCCESS, errMsg);
453 }
454
455 if (sendRequests.size() > 0)
456 {
457 err = MPI_Waitall(d_numGhostProcs,
458 sendRequests.data(),
459 sendStatuses.data());
460 errMsg = "Error occured while using MPI_Waitall. "
461 "Error code: " +
462 std::to_string(err);
463 throwException(err == MPI_SUCCESS, errMsg);
464 }
465
466 if (recvRequests.size() > 0)
467 {
468 err = MPI_Waitall(d_numTargetProcs,
469 recvRequests.data(),
470 recvStatuses.data());
471 errMsg = "Error occured while using MPI_Waitall. "
472 "Error code: " +
473 std::to_string(err);
474 throwException(err == MPI_SUCCESS, errMsg);
475 }
476
477
478 size_type totalOwnedIndicesForTargetProcs =
479 std::accumulate(d_numOwnedIndicesForTargetProcs.begin(),
481 0);
482
483
484 std::vector<size_type> flattenedLocalTargetIndicesTmp(
485 totalOwnedIndicesForTargetProcs, 0);
486
487 std::vector<size_type> localIndicesForGhostProc(d_numGhostIndices, 0);
488
489 size_type startIndex = 0;
490 for (unsigned int iGhost = 0; iGhost < d_numGhostProcs; ++iGhost)
491 {
492 const int numGhostIndicesInProc =
494 const int ghostProcId = d_ghostProcIds[iGhost];
495
496 // We need to send what is the local index in the ghost processor
497 // (i.e., the processor that owns the current processor's ghost
498 // index)
499 for (unsigned int iIndex = 0; iIndex < numGhostIndicesInProc;
500 ++iIndex)
501 {
502 const size_type ghostLocalIndex =
503 flattenedLocalGhostIndicesTmp[startIndex + iIndex];
504
505 throwException<LogicError>(ghostLocalIndex <
506 ghostIndices.size(),
507 "BUG1");
508
509 const global_size_type ghostGlobalIndex =
510 ghostIndices[ghostLocalIndex];
511 const global_size_type ghostProcOwnedIndicesStart =
512 d_allOwnedRanges[2 * ghostProcId];
513 localIndicesForGhostProc[startIndex + iIndex] =
514 (size_type)(ghostGlobalIndex - ghostProcOwnedIndicesStart);
515
517 localIndicesForGhostProc[startIndex + iIndex] <
518 (d_allOwnedRanges[2 * ghostProcId + 1] -
519 d_allOwnedRanges[2 * ghostProcId]),
520 "BUG2");
521 }
522
523 err = MPI_Isend(&localIndicesForGhostProc[startIndex],
524 numGhostIndicesInProc,
525 MPI_UNSIGNED,
526 ghostProcId,
527 tag,
528 d_mpiComm,
529 &sendRequests[iGhost]);
530 std::string errMsg = "Error occured while using MPI_Isend. "
531 "Error code: " +
532 std::to_string(err);
533 throwException(err == MPI_SUCCESS, errMsg);
534 startIndex += numGhostIndicesInProc;
535 }
536
537 startIndex = 0;
538 for (unsigned int iTarget = 0; iTarget < d_numTargetProcs; ++iTarget)
539 {
540 const int targetProcId = d_targetProcIds[iTarget];
541 const int numOwnedIndicesForTarget =
543 err = MPI_Irecv(&flattenedLocalTargetIndicesTmp[startIndex],
544 numOwnedIndicesForTarget,
545 MPI_UNSIGNED,
546 targetProcId,
547 tag,
548 d_mpiComm,
549 &recvRequests[iTarget]);
550 std::string errMsg = "Error occured while using MPI_Irecv. "
551 "Error code: " +
552 std::to_string(err);
553 throwException(err == MPI_SUCCESS, errMsg);
554 startIndex += numOwnedIndicesForTarget;
555 }
556
557 if (sendRequests.size() > 0)
558 {
559 err = MPI_Waitall(d_numGhostProcs,
560 sendRequests.data(),
561 sendStatuses.data());
562 errMsg = "Error occured while using MPI_Waitall. "
563 "Error code: " +
564 std::to_string(err);
565 throwException(err == MPI_SUCCESS, errMsg);
566 }
567
568 if (recvRequests.size() > 0)
569 {
570 err = MPI_Waitall(d_numTargetProcs,
571 recvRequests.data(),
572 recvStatuses.data());
573 errMsg = "Error occured while using MPI_Waitall. "
574 "Error code: " +
575 std::to_string(err);
576 throwException(err == MPI_SUCCESS, errMsg);
577 }
578
579 for (size_type i = 0; i < totalOwnedIndicesForTargetProcs; ++i)
580 {
582 flattenedLocalTargetIndicesTmp[i] < d_numLocallyOwnedIndices,
583 "Detected local owned target index to be larger than (nLocallyOwnedIndices-1)");
584 }
585
586 d_flattenedLocalTargetIndices.resize(totalOwnedIndicesForTargetProcs);
587 if (totalOwnedIndicesForTargetProcs > 0)
588 memoryTransfer.copy(totalOwnedIndicesForTargetProcs,
590 &flattenedLocalTargetIndicesTmp[0]);
591
592 ///////////////////////////////////////////////////
593 //////////// Target Data Evaluation End ////////
594 ///////////////////////////////////////////////////
595 }
596
597
598 ///
599 /// Constructor for a serial case
600 ///
601 template <dftfe::utils::MemorySpace memorySpace>
603 : d_locallyOwnedRange(std::make_pair(0, (global_size_type)size))
604 , d_mpiComm(MPI_COMM_SELF)
608 , d_ghostIndices(0)
609 , d_numGhostProcs(0)
610 , d_ghostProcIds(0)
615 , d_targetProcIds(0)
619 {
620 d_myRank = 0;
621 d_nprocs = 1;
624 "In processor " + std::to_string(d_myRank) +
625 ", invalid locally owned range found "
626 "(i.e., the second value in the range is less than the first value).");
629 std::vector<global_size_type> d_allOwnedRanges = {
631 for (unsigned int i = 0; i < d_nprocs; ++i)
633 d_allOwnedRanges[2 * i + 1] - d_allOwnedRanges[2 * i];
634
635 // set the d_ghostIndicesSetSTL to be of size zero
636 d_ghostIndicesSetSTL.clear();
639 }
640
641 template <dftfe::utils::MemorySpace memorySpace>
642 std::pair<global_size_type, global_size_type>
647
648 template <dftfe::utils::MemorySpace memorySpace>
649 const std::vector<global_size_type> &
654
655 template <dftfe::utils::MemorySpace memorySpace>
656 const std::vector<size_type> &
661
662 template <dftfe::utils::MemorySpace memorySpace>
663 const std::vector<size_type> &
668
669
670 template <dftfe::utils::MemorySpace memorySpace>
671 const std::vector<size_type> &
676
677 template <dftfe::utils::MemorySpace memorySpace>
680 const size_type procId) const
681 {
682 auto itProcIds = d_ghostProcIds.begin();
683 auto itNumGhostIndices = d_numGhostIndicesInGhostProcs.begin();
684 size_type numGhostIndicesInProc = 0;
685 for (; itProcIds != d_ghostProcIds.end(); ++itProcIds)
686 {
687 numGhostIndicesInProc = *itNumGhostIndices;
688 if (procId == *itProcIds)
689 break;
690
691 ++itNumGhostIndices;
692 }
693
694 std::string msg =
695 "The processor Id " + std::to_string(procId) +
696 " does not contain any ghost indices for the current processor"
697 " (i.e., processor Id " +
698 std::to_string(d_myRank) + ")";
699 throwException<InvalidArgument>(itProcIds != d_ghostProcIds.end(), msg);
700
701 return numGhostIndicesInProc;
702 }
703
704 template <dftfe::utils::MemorySpace memorySpace>
707 const size_type procId) const
708 {
709 size_type cumulativeIndices = 0;
710 size_type numGhostIndicesInProc = 0;
711 auto itProcIds = d_ghostProcIds.begin();
712 auto itNumGhostIndices = d_numGhostIndicesInGhostProcs.begin();
713 for (; itProcIds != d_ghostProcIds.end(); ++itProcIds)
714 {
715 numGhostIndicesInProc = *itNumGhostIndices;
716 if (procId == *itProcIds)
717 break;
718
719 cumulativeIndices += numGhostIndicesInProc;
720 ++itNumGhostIndices;
721 }
722
723 std::string msg =
724 "The processor Id " + std::to_string(procId) +
725 " does not contain any ghost indices for the current processor"
726 " (i.e., processor Id " +
727 std::to_string(d_myRank) + ")";
728 throwException<InvalidArgument>(itProcIds != d_ghostProcIds.end(), msg);
729
730 SizeTypeVector returnValue(numGhostIndicesInProc);
732 numGhostIndicesInProc,
733 returnValue.begin(),
734 d_flattenedLocalGhostIndices.begin() + cumulativeIndices);
735
736 return returnValue;
737 }
738
739 template <dftfe::utils::MemorySpace memorySpace>
740 const std::vector<size_type> &
745
746 template <dftfe::utils::MemorySpace memorySpace>
747 const std::vector<size_type> &
752
753 template <dftfe::utils::MemorySpace memorySpace>
759
760 template <dftfe::utils::MemorySpace memorySpace>
763 const size_type procId) const
764 {
765 auto itProcIds = d_targetProcIds.begin();
766 auto itNumOwnedIndices = d_numOwnedIndicesForTargetProcs.begin();
767 size_type numOwnedIndicesForProc = 0;
768 for (; itProcIds != d_targetProcIds.end(); ++itProcIds)
769 {
770 numOwnedIndicesForProc = *itNumOwnedIndices;
771 if (procId == *itProcIds)
772 break;
773
774 ++itNumOwnedIndices;
775 }
776
777 std::string msg = "There are no owned indices for "
778 " target processor Id " +
779 std::to_string(procId) +
780 " in the current processor"
781 " (i.e., processor Id " +
782 std::to_string(d_myRank) + ")";
784 msg);
785 return numOwnedIndicesForProc;
786 }
787
788 template <dftfe::utils::MemorySpace memorySpace>
791 const size_type procId) const
792 {
793 size_type cumulativeIndices = 0;
794 size_type numOwnedIndicesForProc = 0;
795 auto itProcIds = d_targetProcIds.begin();
796 auto itNumOwnedIndices = d_numOwnedIndicesForTargetProcs.begin();
797 for (; itProcIds != d_targetProcIds.end(); ++itProcIds)
798 {
799 numOwnedIndicesForProc = *itNumOwnedIndices;
800 if (procId == *itProcIds)
801 break;
802
803 cumulativeIndices += numOwnedIndicesForProc;
804 ++itNumOwnedIndices;
805 }
806
807 std::string msg = "There are no owned indices for "
808 " target processor Id " +
809 std::to_string(procId) +
810 " in the current processor"
811 " (i.e., processor Id " +
812 std::to_string(d_myRank) + ")";
814 msg);
815
816 SizeTypeVector returnValue(numOwnedIndicesForProc);
818 numOwnedIndicesForProc,
819 returnValue.begin(),
820 d_flattenedLocalTargetIndices.begin() + cumulativeIndices);
821
822 return returnValue;
823 }
824
825
826
827 template <dftfe::utils::MemorySpace memorySpace>
828 const MPI_Comm &
833
834 template <dftfe::utils::MemorySpace memorySpace>
840
841 template <dftfe::utils::MemorySpace memorySpace>
847
848 template <dftfe::utils::MemorySpace memorySpace>
854
855 template <dftfe::utils::MemorySpace memorySpace>
861
862 template <dftfe::utils::MemorySpace memorySpace>
868
869
870 template <dftfe::utils::MemorySpace memorySpace>
873 {
874 global_size_type returnValue = 0;
875 if (localId < d_numLocallyOwnedIndices)
876 {
877 returnValue = d_locallyOwnedRange.first + localId;
878 }
879 else if (localId < (d_numLocallyOwnedIndices + d_numGhostIndices))
880 {
881 auto it =
882 d_ghostIndices.begin() + (localId - d_numLocallyOwnedIndices);
883 returnValue = *it;
884 }
885 else
886 {
887 std::string msg =
888 "In processor " + std::to_string(d_myRank) +
889 ", the local index " + std::to_string(localId) +
890 " passed to localToGlobal() in MPIPatternP2P is"
891 " larger than number of locally owned plus ghost indices.";
893 }
894 return returnValue;
895 }
896
897 template <dftfe::utils::MemorySpace memorySpace>
900 const global_size_type globalId) const
901 {
902 size_type returnValue = 0;
903 if (globalId >= d_locallyOwnedRange.first &&
904 globalId < d_locallyOwnedRange.second)
905 {
906 returnValue = globalId - d_locallyOwnedRange.first;
907 }
908 else
909 {
910 bool found = false;
911 d_ghostIndicesOptimizedIndexSet.getPosition(globalId,
912 returnValue,
913 found);
914 std::string msg =
915 "In processor " + std::to_string(d_myRank) +
916 ", the global index " + std::to_string(globalId) +
917 " passed to globalToLocal() in MPIPatternP2P is"
918 " neither present in its locally owned range nor in its "
919 " ghost indices.";
921 returnValue += d_numLocallyOwnedIndices;
922 }
923
924 return returnValue;
925 }
926
927 template <dftfe::utils::MemorySpace memorySpace>
928 bool
930 const global_size_type globalId) const
931 {
932 return (globalId >= d_locallyOwnedRange.first &&
933 globalId < d_locallyOwnedRange.second);
934 }
935
936 template <dftfe::utils::MemorySpace memorySpace>
937 bool
939 const global_size_type globalId) const
940 {
941 bool found = false;
942 size_type localId;
943 d_ghostIndicesOptimizedIndexSet.getPosition(globalId, localId, found);
944 return found;
945 }
946
947 template <dftfe::utils::MemorySpace memorySpace>
948 bool
950 const MPIPatternP2P<memorySpace> &rhs) const
951 {
952 if (d_nprocs != rhs.d_nprocs)
953 return false;
954
955 else if (d_nGlobalIndices != rhs.d_nGlobalIndices)
956 return false;
957
959 return false;
960
961 else if (d_numGhostIndices != rhs.d_numGhostIndices)
962 return false;
963
964 else
967 }
968 } // end of namespace mpi
969 } // end of namespace utils
970} // end of namespace dftfe
iterator begin()
Return iterator pointing to the beginning of point data.
Definition MemoryStorage.t.cc:130
Definition MemoryTransfer.h:33
static void copy(std::size_t size, ValueType *dst, const ValueType *src)
Copy array from the memory space of source to the memory space of destination.
Definition OptimizedIndexSet.h:44
SizeTypeVector getOwnedLocalIndices(const size_type procId) const
Definition MPIPatternP2P.t.cc:790
size_type globalToLocal(const global_size_type globalId) const
Definition MPIPatternP2P.t.cc:899
const std::vector< size_type > & getGhostLocalIndicesRanges() const
Definition MPIPatternP2P.t.cc:672
size_type getNumGhostIndicesInProc(const size_type procId) const
Definition MPIPatternP2P.t.cc:679
MPIPatternP2P(const std::pair< global_size_type, global_size_type > &locallyOwnedRange, const std::vector< dftfe::global_size_type > &ghostIndices, const MPI_Comm &mpiComm)
Constructor. This constructor is the typical way of creation of an MPI pattern.
Definition MPIPatternP2P.t.cc:245
global_size_type localToGlobal(const size_type localId) const
Definition MPIPatternP2P.t.cc:872
size_type d_numTargetProcs
Definition MPIPatternP2P.h:326
size_type getNumOwnedIndicesForTargetProc(const size_type procId) const
Definition MPIPatternP2P.t.cc:762
utils::MemoryStorage< size_type, memorySpace > SizeTypeVector
Definition MPIPatternP2P.h:62
size_type thisProcessId() const
Definition MPIPatternP2P.t.cc:843
SizeTypeVector d_flattenedLocalTargetIndices
Definition MPIPatternP2P.h:363
OptimizedIndexSet< global_size_type > d_ghostIndicesOptimizedIndexSet
Definition MPIPatternP2P.h:242
int d_myRank
Rank of the current processor.
Definition MPIPatternP2P.h:369
SizeTypeVector getGhostLocalIndices(const size_type procId) const
Definition MPIPatternP2P.t.cc:706
int d_nprocs
Number of processors in the MPI Communicator.
Definition MPIPatternP2P.h:366
const std::vector< size_type > & getGhostProcIds() const
Definition MPIPatternP2P.t.cc:657
std::pair< global_size_type, global_size_type > getLocallyOwnedRange() const
Definition MPIPatternP2P.t.cc:643
std::set< global_size_type > d_ghostIndicesSetSTL
Definition MPIPatternP2P.h:234
const SizeTypeVector & getOwnedLocalIndicesForTargetProcs() const
Definition MPIPatternP2P.t.cc:755
size_type localOwnedSize() const
Definition MPIPatternP2P.t.cc:857
size_type d_numGhostProcs
Definition MPIPatternP2P.h:249
global_size_type d_nGlobalIndices
Definition MPIPatternP2P.h:374
const std::vector< size_type > & getTargetProcIds() const
Definition MPIPatternP2P.t.cc:741
const std::vector< size_type > & getNumOwnedIndicesForTargetProcs() const
Definition MPIPatternP2P.t.cc:748
size_type d_numGhostIndices
Definition MPIPatternP2P.h:223
bool isCompatible(const MPIPatternP2P< memorySpace > &rhs) const
Definition MPIPatternP2P.t.cc:949
SizeTypeVector d_flattenedLocalGhostIndices
Definition MPIPatternP2P.h:297
std::vector< size_type > d_targetProcIds
Definition MPIPatternP2P.h:333
std::vector< global_size_type > d_allOwnedRanges
Definition MPIPatternP2P.h:213
global_size_type nGlobalIndices() const
Definition MPIPatternP2P.t.cc:850
std::vector< size_type > d_numGhostIndicesInGhostProcs
Definition MPIPatternP2P.h:261
std::pair< global_size_type, global_size_type > d_locallyOwnedRange
Definition MPIPatternP2P.h:202
const std::vector< size_type > & getNumGhostIndicesInProcs() const
Definition MPIPatternP2P.t.cc:664
size_type nmpiProcesses() const
Definition MPIPatternP2P.t.cc:836
bool inLocallyOwnedRange(const global_size_type globalId) const
Definition MPIPatternP2P.t.cc:929
bool isGhostEntry(const global_size_type globalId) const
Definition MPIPatternP2P.t.cc:938
size_type localGhostSize() const
Definition MPIPatternP2P.t.cc:864
const MPI_Comm & mpiCommunicator() const
Definition MPIPatternP2P.t.cc:829
std::vector< size_type > d_numOwnedIndicesForTargetProcs
Definition MPIPatternP2P.h:341
std::vector< global_size_type > d_ghostIndices
Definition MPIPatternP2P.h:229
size_type d_numLocallyOwnedIndices
Definition MPIPatternP2P.h:218
std::vector< size_type > d_ghostProcIds
Definition MPIPatternP2P.h:255
std::vector< size_type > d_localGhostIndicesRanges
A vector of size 2 times the number of ghost processors to store the range of local ghost indices tha...
Definition MPIPatternP2P.h:319
MPI_Comm d_mpiComm
MPI Communicator object.
Definition MPIPatternP2P.h:377
const std::vector< global_size_type > & getGhostIndices() const
Definition MPIPatternP2P.t.cc:650
Definition MPIRequestersNBX.h:38
std::vector< size_type > getRequestingRankIds() override
Definition MPICommunicatorP2P.h:46
@ MPI_P2P_PATTERN_TAG
Definition MPITags.h:41
Definition Cell.h:36
void throwException(bool condition, std::string msg="")
Definition pseudoPotentialToDftfeConverter.cc:34
unsigned int size_type
Definition TypeConfig.h:6
unsigned long int global_size_type
Definition TypeConfig.h:7