DOLFIN-X
DOLFIN-X C++ interface
partition.h
1 // Copyright (C) 2020 Garth N. Wells
2 //
3 // This file is part of DOLFINX (https://www.fenicsproject.org)
4 //
5 // SPDX-License-Identifier: LGPL-3.0-or-later
6 
7 #pragma once
8 
9 #include <algorithm>
10 #include <cstdint>
11 #include <dolfinx/common/IndexMap.h>
12 #include <dolfinx/common/Timer.h>
13 #include <dolfinx/graph/AdjacencyList.h>
14 #include <mpi.h>
15 #include <utility>
16 #include <vector>
17 
18 namespace dolfinx::graph
19 {
20 
32 using partition_fn = std::function<graph::AdjacencyList<std::int32_t>(
33  MPI_Comm comm, int nparts, const AdjacencyList<std::int64_t>& local_graph,
34  std::int32_t num_ghost_nodes, bool ghosting)>;
35 
48 partition_graph(const MPI_Comm comm, int nparts,
49  const AdjacencyList<std::int64_t>& local_graph,
50  std::int32_t num_ghost_nodes, bool ghosting);
51 
55 namespace build
56 {
68 std::tuple<graph::AdjacencyList<std::int64_t>, std::vector<int>,
69  std::vector<std::int64_t>, std::vector<int>>
70 distribute(MPI_Comm comm, const graph::AdjacencyList<std::int64_t>& list,
71  const graph::AdjacencyList<std::int32_t>& destinations);
72 
80 std::vector<std::int64_t>
81 compute_ghost_indices(MPI_Comm comm,
82  const std::vector<std::int64_t>& global_indices,
83  const std::vector<int>& ghost_owners);
84 
94 template <typename T>
95 Eigen::Array<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>
96 distribute_data(MPI_Comm comm, const std::vector<std::int64_t>& indices,
97  const Eigen::Ref<const Eigen::Array<
98  T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>>& x);
99 
111 std::vector<std::int64_t>
114 
123 std::vector<std::int32_t>
124 compute_local_to_local(const std::vector<std::int64_t>& local0_to_global,
125  const std::vector<std::int64_t>& local1_to_global);
126 } // namespace build
127 
128 //---------------------------------------------------------------------------
129 // Implementation
130 //---------------------------------------------------------------------------
131 template <typename T>
132 Eigen::Array<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>
134  MPI_Comm comm, const std::vector<std::int64_t>& indices,
135  const Eigen::Ref<const Eigen::Array<T, Eigen::Dynamic, Eigen::Dynamic,
136  Eigen::RowMajor>>& x)
137 {
138  common::Timer timer("Fetch float data from remote processes");
139 
140  const std::int64_t num_points_local = x.rows();
141  const int size = dolfinx::MPI::size(comm);
142  const int rank = dolfinx::MPI::rank(comm);
143  std::vector<std::int64_t> global_sizes(size);
144  MPI_Allgather(&num_points_local, 1, MPI_INT64_T, global_sizes.data(), 1,
145  MPI_INT64_T, comm);
146  std::vector<std::int64_t> global_offsets(size + 1, 0);
147  std::partial_sum(global_sizes.begin(), global_sizes.end(),
148  global_offsets.begin() + 1);
149 
150  // Build index data requests
151  std::vector<int> number_index_send(size, 0);
152  std::vector<int> index_owner(indices.size());
153  std::vector<int> index_order(indices.size());
154  std::iota(index_order.begin(), index_order.end(), 0);
155  std::sort(index_order.begin(), index_order.end(),
156  [&indices](int a, int b) { return (indices[a] < indices[b]); });
157 
158  int p = 0;
159  for (std::size_t i = 0; i < index_order.size(); ++i)
160  {
161  int j = index_order[i];
162  while (indices[j] >= global_offsets[p + 1])
163  ++p;
164  index_owner[j] = p;
165  number_index_send[p]++;
166  }
167 
168  // Compute send displacements
169  std::vector<int> disp_index_send(size + 1, 0);
170  std::partial_sum(number_index_send.begin(), number_index_send.end(),
171  disp_index_send.begin() + 1);
172 
173  // Pack global index send data
174  std::vector<std::int64_t> indices_send(disp_index_send.back());
175  std::vector<int> disp_tmp = disp_index_send;
176  for (std::size_t i = 0; i < indices.size(); ++i)
177  {
178  const int owner = index_owner[i];
179  indices_send[disp_tmp[owner]++] = indices[i];
180  }
181 
182  // Send/receive number of indices to communicate to each process
183  std::vector<int> number_index_recv(size);
184  MPI_Alltoall(number_index_send.data(), 1, MPI_INT, number_index_recv.data(),
185  1, MPI_INT, comm);
186 
187  // Compute receive displacements
188  std::vector<int> disp_index_recv(size + 1, 0);
189  std::partial_sum(number_index_recv.begin(), number_index_recv.end(),
190  disp_index_recv.begin() + 1);
191 
192  // Send/receive global indices
193  std::vector<std::int64_t> indices_recv(disp_index_recv.back());
194  MPI_Alltoallv(indices_send.data(), number_index_send.data(),
195  disp_index_send.data(), MPI_INT64_T, indices_recv.data(),
196  number_index_recv.data(), disp_index_recv.data(), MPI_INT64_T,
197  comm);
198 
199  const int item_size = x.cols();
200  assert(item_size != 0);
201  // Pack point data to send back (transpose)
202  Eigen::Array<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> x_return(
203  indices_recv.size(), item_size);
204  for (int p = 0; p < size; ++p)
205  {
206  for (int i = disp_index_recv[p]; i < disp_index_recv[p + 1]; ++i)
207  {
208  const std::int32_t index_local = indices_recv[i] - global_offsets[rank];
209  assert(index_local >= 0);
210  x_return.row(i) = x.row(index_local);
211  }
212  }
213 
214  MPI_Datatype compound_type;
215  MPI_Type_contiguous(item_size, dolfinx::MPI::mpi_type<T>(), &compound_type);
216  MPI_Type_commit(&compound_type);
217 
218  // Send back point data
219  Eigen::Array<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> my_x(
220  disp_index_send.back(), item_size);
221  MPI_Alltoallv(x_return.data(), number_index_recv.data(),
222  disp_index_recv.data(), compound_type, my_x.data(),
223  number_index_send.data(), disp_index_send.data(), compound_type,
224  comm);
225 
226  return my_x;
227 }
228 
229 } // namespace dolfinx::graph
static int rank(MPI_Comm comm)
Return process rank for the communicator.
Definition: MPI.cpp:79
static int size(MPI_Comm comm)
Return size of the group (number of processes) associated with the communicator.
Definition: MPI.cpp:87
A timer can be used for timing tasks. The basic usage is.
Definition: Timer.h:31
This class provides a static adjacency list data structure. It is commonly used to store directed gra...
Definition: AdjacencyList.h:46
std::vector< std::int64_t > compute_ghost_indices(MPI_Comm comm, const std::vector< std::int64_t > &global_indices, const std::vector< int > &ghost_owners)
Compute ghost indices in a global IndexMap space, from a list of arbitrary global indices,...
Definition: partition.cpp:160
std::vector< std::int64_t > compute_local_to_global_links(const graph::AdjacencyList< std::int64_t > &global, const graph::AdjacencyList< std::int32_t > &local)
Given an adjacency list with global, possibly non-contiguous, link indices and a local adjacency list...
Definition: partition.cpp:276
std::tuple< graph::AdjacencyList< std::int64_t >, std::vector< int >, std::vector< std::int64_t >, std::vector< int > > distribute(MPI_Comm comm, const graph::AdjacencyList< std::int64_t > &list, const graph::AdjacencyList< std::int32_t > &destinations)
Distribute adjacency list nodes to destination ranks. The global index of each node is assumed to be ...
Definition: partition.cpp:33
std::vector< std::int32_t > compute_local_to_local(const std::vector< std::int64_t > &local0_to_global, const std::vector< std::int64_t > &local1_to_global)
Compute a local0-to-local1 map from two local-to-global maps with common global indices.
Definition: partition.cpp:313
Eigen::Array< T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor > distribute_data(MPI_Comm comm, const std::vector< std::int64_t > &indices, const Eigen::Ref< const Eigen::Array< T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor >> &x)
Distribute data to process ranks where it it required.
Definition: partition.h:133
Graph data structures and algorithms.
Definition: AdjacencyList.h:18
AdjacencyList< std::int32_t > partition_graph(const MPI_Comm comm, int nparts, const AdjacencyList< std::int64_t > &local_graph, std::int32_t num_ghost_nodes, bool ghosting)
Partition graph across processes using the default graph partitioner.
Definition: partition.cpp:23
std::function< graph::AdjacencyList< std::int32_t >(MPI_Comm comm, int nparts, const AdjacencyList< std::int64_t > &local_graph, std::int32_t num_ghost_nodes, bool ghosting)> partition_fn
Signature of functions for computing the parallel partitioning of a distributed graph.
Definition: partition.h:34