Line data Source code
1 : // Class FEMVector
2 : // This class represents a one dimensional vector which can be used in the
3 : // context of FEM to represent a field defined on the DOFs of a mesh.
4 :
5 :
6 : #ifndef IPPL_FEMVECTOR_H
7 : #define IPPL_FEMVECTOR_H
8 :
9 : #include "Types/ViewTypes.h"
10 : #include "Field/HaloCells.h"
11 :
12 : namespace ippl {
13 :
14 : /**
15 : * @brief 1D vector used in the context of FEM.
16 : *
17 : * This class represents a 1D vector which stores elements of type \p T and
18 : * provides functionalities to handle halo cells and their exchanges.
19 : * It can conceptually be though of being a mathemtical vector, and to this
20 : * extend is used during fem to represent the vectors \f$x\f$, \f$b\f$ when
21 : * solving the linear system \f$Ax = b\f$.
22 : *
23 : * We use this instead of an \c ippl::Field, because for basis
24 : * functions which have DoFs at non-vertex positions a representation as a
25 : * field is not easily possible.
26 : *
27 : * @tparam T The datatype which the vector is storing.
28 : */
29 : template <typename T>
30 : class FEMVector : public detail::Expression<
31 : FEMVector<T>,
32 : sizeof(typename detail::ViewType<T, 1>::view_type)>{
33 : public:
34 : /**
35 : * @brief Dummy parameter in order for the \p detail::Expression defined
36 : * operators to work.
37 : *
38 : * In the file IpplOperations.h a bunch of operations are defined, we
39 : * want to be able to use them with a \c FEMVector, the problem is that
40 : * they require the class to have a \p dim parameter, therefore we have
41 : * one here.
42 : */
43 : static constexpr unsigned dim = 1;
44 :
45 : /**
46 : * @brief Dummy type definition in order for the \p detail::Expression
47 : * defined operators to work.
48 : *
49 : * In the file IpplOperations.h a bunch of operations are defined, we
50 : * want to be able to use them with a \c FEMVector, the problem is that
51 : * they require the class to define a \p value_type type. So therefore
52 : * we define it here.
53 : */
54 : using value_type = T;
55 :
56 : /**
57 : * @brief Constructor taking size, neighbors, and halo exchange indices.
58 : *
59 : * Constructor of a FEMVector taking in the size and information about
60 : * the neighboring MPI ranks.
61 : *
62 : * @param n The size of the vector.
63 : * @param neighbors The ranks of the neighboring MPI tasks.
64 : * @param sendIdxs The indices for which the data should be sent to the
65 : * MPI neighbors.
66 : * @param recvIdxs The halo cell indices.
67 : */
68 : FEMVector(size_t n, std::vector<size_t> neighbors,
69 : std::vector< Kokkos::View<size_t*> > sendIdxs,
70 : std::vector< Kokkos::View<size_t*> > recvIdxs);
71 :
72 :
73 : /**
74 : * @brief Copy constructor (shallow).
75 : *
76 : * Creates a shallow copy of the other vector, by copying the underlying
77 : * \c Kokkos::View and boundary infromation.
78 : *
79 : * @param other The other vector we are copying from.
80 : */
81 : FEMVector(const FEMVector<T>& other);
82 :
83 :
84 : /**
85 : * @brief Constructor only taking size, does not create any MPI/boundary
86 : * information.
87 : *
88 : * This constructor only takes the size of the vector and allocates the
89 : * appropriate number of elements, it does not sotre any MPI
90 : * communication or boundary infromation. This constructor is useful
91 : * if only a simply vector for arithmetic is needed without the need
92 : * for any communication.
93 : */
94 : FEMVector(size_t n);
95 :
96 :
97 : /**
98 : * @brief Copy values from neighboring ranks into local halo.
99 : *
100 : * This function takes the local values which are part of other ranks
101 : * halos and copies them to the corresponding halo cells of those
102 : * neighbors.
103 : */
104 : void fillHalo();
105 :
106 : /**
107 : * @brief Accumulate halo values in neighbor.
108 : *
109 : * This function takes the local halo values (which are part of another
110 : * ranks values) and sums them to these corresponding values of the
111 : * neighbor ranks.
112 : */
113 : void accumulateHalo();
114 :
115 : /**
116 : * @brief Set the halo cells to \p setValue.
117 : *
118 : * @param setValue The value to which the halo cells should be set.
119 : */
120 : void setHalo(T setValue);
121 :
122 :
123 : /**
124 : * @brief Set all the values of the vector to \p value.
125 : *
126 : * Sets all the values in the vector to \p value this also includes the
127 : * halo cells.
128 : *
129 : * @param value The value to which the entries should be set.
130 : */
131 : FEMVector<T>& operator= (T value);
132 :
133 :
134 : /**
135 : * @brief Set all the values of this vector to the values of the
136 : * expression.
137 : *
138 : * Set the values of this vector to the values of \p expr
139 : *
140 : * @param expr The expression from which to copy the values
141 : *
142 : * @note Here we have to check how efficient this is, because in theory
143 : * we are copying a FEMVector onto the device when we are calling the
144 : * operator[] function inside of the Kokkos::parallel_for.
145 : */
146 : template <typename E, size_t N>
147 : FEMVector<T>& operator= (const detail::Expression<E, N>& expr);
148 :
149 :
150 : /**
151 : * @brief Copy the values from another \c FEMVector to this one.
152 : *
153 : * Sets the element values of this vector to the ones of \p v, only the
154 : * values are set everything else (MPI config, boundaries) are ignored.
155 : *
156 : * @param v The other vector to copy values from.
157 : */
158 : FEMVector<T>& operator= (const FEMVector<T>& v);
159 :
160 :
161 :
162 : /**
163 : * @brief Subscript operator to get value at position \p i.
164 : *
165 : * This function returns the value of the vector at index \p i, it is
166 : * equivalent to \p FEMVector::operator().
167 : *
168 : * @param i The index off the value to retrieve.
169 : */
170 : KOKKOS_INLINE_FUNCTION T operator[] (size_t i) const;
171 :
172 :
173 : /**
174 : * @brief Subscript operator to get value at position \p i.
175 : *
176 : * This function returns the value of the vector at index \p i, it is
177 : * equivalent to \p FEMVector::operator().
178 : *
179 : * @param i The index off the value to retrieve.
180 : */
181 : KOKKOS_INLINE_FUNCTION T operator() (size_t i) const;
182 :
183 :
184 : /**
185 : * @brief Get underlying data view.
186 : *
187 : * Returns a constant reference to the underlying data the FEMVector is
188 : * storing, this corresponds to a \c Kokkos::View living on the default
189 : * device.
190 : */
191 : const Kokkos::View<T*>& getView() const;
192 :
193 :
194 : /**
195 : * @brief Get the size (number of elements) of the vector.
196 : */
197 : size_t size() const;
198 :
199 :
200 : /**
201 : * @brief Create a deep copy, where all the information of this vector
202 : * is copied to a new one.
203 : *
204 : * Returns a \c FEMVector which is an exact copy of this one. All the
205 : * information (elements, MPI information, etc.) are explicitly copied
206 : * (i.e. deep copy).
207 : *
208 : * @returns An exact copy of this vector
209 : */
210 : FEMVector<T> deepCopy() const;
211 :
212 :
213 : /**
214 : * @brief Create a new \c FEMVector with different data type, but same
215 : * size and boundary infromation.
216 : *
217 : * This function is used to create a new \c FEMVector with same size and
218 : * boundary infromation, but of different data type. The boundary
219 : * information is copied over via a deep copy fashion.
220 : *
221 : * @tparam K The data type of the new vector.
222 : *
223 : * @returns A vector of same structure but new data type.
224 : */
225 : template <typename K>
226 : FEMVector<K> skeletonCopy() const;
227 :
228 :
229 : /**
230 : * @brief Pack data into \p BoundaryInfo::commBuffer_m for
231 : * MPI communication.
232 : *
233 : * This function takes data from the vector accoding to \p idxStore and
234 : * stores it inside of \p BoundaryInfo::commBuffer_m.
235 : *
236 : * @param idxStore A 1D Kokkos view which stores the the indices for
237 : * \p FEMVector::data_m which we want to send.
238 : */
239 : void pack(const Kokkos::View<size_t*>& idxStore);
240 :
241 :
242 : /**
243 : * @brief Unpack data from \p BoundaryInfo::commBuffer_m into
244 : * \p FEMVector::data_m after communication.
245 : *
246 : * This function takes data from \p BoundaryInfo::commBuffer_m and stores
247 : * it accoding to \p idxStore in
248 : * \p FEMVector::data_m.
249 : *
250 : * @param idxStore A 1D Kokkos view which stores the the indices for
251 : * \p FEMVector::data_m to which we want to store.
252 : *
253 : * @tparam Op The operator to use in order to update the values in
254 : * \p FEMVector::data_m.
255 : */
256 : template <typename Op>
257 : void unpack(const Kokkos::View<size_t*>& idxStore);
258 :
259 :
260 : /**
261 : * @brief Struct for assigment operator to be used with
262 : * \p FEMVector::unpack().
263 : */
264 : struct Assign{
265 168 : KOKKOS_INLINE_FUNCTION void operator()(T& lhs, const T& rhs) const {
266 168 : lhs = rhs;
267 168 : }
268 : };
269 :
270 :
271 : /**
272 : * @brief Struct for addition+assignment operator to be used with
273 : * \p FEMVector::unpack().
274 : */
275 : struct AssignAdd{
276 168 : KOKKOS_INLINE_FUNCTION void operator()(T& lhs, const T& rhs) const {
277 168 : lhs += rhs;
278 168 : }
279 : };
280 :
281 :
282 : private:
283 : /**
284 : * @brief Structure holding MPI neighbor and boundary information.
285 : *
286 : * This struct holds all the information regarding MPI (neighbor list,
287 : * indecies which need to be exchanged...) and additionaly boundary
288 : * information, like what type of boundary we have. The \c FEMVector
289 : * class then has a pointer to an object of this, the reason behind is
290 : * that this allows us to copy a \c FEMVector onto the device cheaply,
291 : * without having to worry about copying much additional information.
292 : */
293 : struct BoundaryInfo {
294 :
295 : /**
296 : * @brief constructor for a \c BoundaryInfo object.
297 : *
298 : * Constructor to be used to create an object of type
299 : * \c BoundaryInfo.
300 : *
301 : * @param neighbors The ranks of the neighboring MPI tasks.
302 : * @param sendIdxs The indices for which the data should be sent to
303 : * the MPI neighbors.
304 : * @param recvIdxs The halo cell indices.
305 : */
306 : BoundaryInfo (std::vector<size_t> neighbors,
307 : std::vector< Kokkos::View<size_t*> > sendIdxs,
308 : std::vector< Kokkos::View<size_t*> > recvIdxs_m);
309 :
310 :
311 : /**
312 : * @brief Stores the ranks of the neighboring MPI tasks.
313 : *
314 : * A vector storing the ranks of the neighboring MPI tasks. This is
315 : * used during halo operators in combination with
316 : * \p BoundaryInfo::sendIdxs_m and
317 : * \p BoundaryInfo::recvIdxs_m in order to send the
318 : * correct data to the correct ranks.
319 : */
320 : std::vector<size_t> neighbors_m;
321 :
322 : /**
323 : * @brief Stores indices of \p FEMVector::data_m which need to be
324 : * send to the MPI neighbors.
325 : *
326 : * This is a 2D list which stores the indices of the
327 : * \p FEMVector::data_m variable which need to be sent to the MPI
328 : * neighbors. The first dimension goes over all the neighbors and
329 : * should be used in combination with \p BoundaryInfo::neighbors_m
330 : * while the second dimension goes over the actual indices.
331 : *
332 : * This corresponds to the indices which belong to this rank but are
333 : * shared by the halos of the other ranks.
334 : */
335 : std::vector< Kokkos::View<size_t*> > sendIdxs_m;
336 :
337 : /**
338 : * @brief Stores indices of \p FEMVector::data_m which are part of
339 : * the halo.
340 : *
341 : * This is a 2D list which stores the indices of the
342 : * \p FEMVector::data_m variable which are part of the halo. The
343 : * first dimension goes over all the
344 : * neighbors and should be used in combination with
345 : * \p BoundaryInfo::neighbors_m while the second dimension goes
346 : * over the actual indices.
347 : *
348 : * It is called recvIdxs to be in line with
349 : * \c ippl::detail::HaloCells and because it stores the indices for
350 : * which we recive data from the neighbors.
351 : */
352 : std::vector< Kokkos::View<size_t*> > recvIdxs_m;
353 :
354 :
355 : /**
356 : * @brief Buffer for MPI communication.
357 : *
358 : * This buffer is used during MPI communication in order to store
359 : * the values which should be send to the neighbors. We are using a
360 : * \c ippl::detail::FieldBufferData even though we do not have a
361 : * \c ippl::Field , but this buffer struct is general enough to
362 : * allow for such things.
363 : */
364 : detail::FieldBufferData<T> commBuffer_m;
365 : };
366 :
367 : /**
368 : * @brief Data this object is storing.
369 : *
370 : * The data which the \c FEMVector is storing, it is represented by a
371 : * one dimensional \c Kokkos::View and lives on the default device.
372 : */
373 : Kokkos::View<T*> data_m;
374 :
375 :
376 : /**
377 : * @brief Struct holding all the MPI and boundary information.
378 : *
379 : * Pointer to a struct holding all the information required for MPI
380 : * communication and general boundary information. The reason for it
381 : * beeing a pointer, is such that when this \c FEMVector object is
382 : * copied to device only a pointer and not all the data needs to be
383 : * copied to device.
384 : */
385 : std::shared_ptr<BoundaryInfo> boundaryInfo_m;
386 :
387 : };
388 :
389 :
390 : /**
391 : * @brief Calculate the inner product between two \c ippl::FEMVector(s).
392 : *
393 : * Calculates the inner product \f$ a^T b\f$ between the
394 : * \c ippl::FEMVector(s) \p a and \p b. Note that during the
395 : * inner product computations the halo cells are included, if this should
396 : * not be the case the hallo cells should be set to 0 using the
397 : * \p ippl::FEMVector::setHalo() function.
398 : *
399 : * @param a First field.
400 : * @param b Second field.
401 : *
402 : * @return The value \f$a^Tb\f$.
403 : */
404 : template <typename T>
405 106 : T innerProduct(const FEMVector<T>& a, const FEMVector<T>& b) {
406 106 : T localSum = 0;
407 106 : auto aView = a.getView();
408 106 : auto bView = b.getView();
409 :
410 :
411 106 : size_t n = aView.extent(0);
412 106 : Kokkos::parallel_reduce("FEMVector innerProduct", n,
413 212 : KOKKOS_LAMBDA(const size_t i, T& val){
414 2120 : val += aView(i)*bView(i);
415 : },
416 212 : Kokkos::Sum<T>(localSum)
417 : );
418 :
419 106 : T globalSum = 0;
420 106 : ippl::Comm->allreduce(localSum, globalSum, 1, std::plus<T>());
421 106 : return globalSum;
422 106 : }
423 :
424 : template <typename T>
425 8 : T norm(const FEMVector<T>& v, int p = 2) {
426 :
427 8 : T local = 0;
428 8 : auto view = v.getView();
429 8 : size_t n = view.extent(0);
430 8 : switch (p) {
431 0 : case 0: {
432 0 : Kokkos::parallel_reduce("FEMVector l0 norm", n,
433 0 : KOKKOS_LAMBDA(const size_t i, T& val) {
434 0 : val = Kokkos::max(val, Kokkos::abs(view(i)));
435 : },
436 0 : Kokkos::Max<T>(local)
437 : );
438 0 : T globalMax = 0;
439 0 : ippl::Comm->allreduce(local, globalMax, 1, std::greater<T>());
440 0 : return globalMax;
441 : }
442 8 : default: {
443 8 : Kokkos::parallel_reduce("FEMVector lp norm", n,
444 16 : KOKKOS_LAMBDA(const size_t i, T& val) {
445 160 : val += std::pow(Kokkos::abs(view(i)), p);
446 : },
447 16 : Kokkos::Sum<T>(local)
448 : );
449 8 : T globalSum = 0;
450 8 : ippl::Comm->allreduce(local, globalSum, 1, std::plus<T>());
451 8 : return std::pow(globalSum, 1.0 / p);
452 : }
453 : }
454 8 : }
455 : } // namespace ippl
456 :
457 :
458 : #include "FEMVector.hpp"
459 :
460 : #endif // IPPL_FEMVECTOR_H
|