Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
trixi-framework
GitHub Repository: trixi-framework/Trixi.jl
Path: blob/main/src/solvers/dgsem_p4est/containers_parallel.jl
5616 views
1
# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
2
# Since these FMAs can increase the performance of many numerical algorithms,
3
# we need to opt-in explicitly.
4
# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
5
@muladd begin
6
#! format: noindent
7
8
mutable struct P4estMPIInterfaceContainer{NDIMS, uEltype <: Real, NDIMSP2,
9
uArray <: DenseArray{uEltype, NDIMSP2},
10
VecInt <: DenseVector{Int},
11
IndicesVector <:
12
DenseVector{NTuple{NDIMS, Symbol}},
13
uVector <: DenseVector{uEltype}} <:
14
AbstractMPIInterfaceContainer
15
u::uArray # [primary/secondary, variable, i, j, interface]
16
local_neighbor_ids::VecInt # [interface]
17
node_indices::IndicesVector # [interface]
18
local_sides::VecInt # [interface]
19
# internal `resize!`able storage
20
_u::uVector
21
end
22
23
@inline function nmpiinterfaces(interfaces::P4estMPIInterfaceContainer)
24
return length(interfaces.local_sides)
25
end
26
@inline Base.ndims(::P4estMPIInterfaceContainer{NDIMS}) where {NDIMS} = NDIMS
27
28
function Base.resize!(mpi_interfaces::P4estMPIInterfaceContainer, capacity)
29
@unpack _u, local_neighbor_ids, node_indices, local_sides = mpi_interfaces
30
31
n_dims = ndims(mpi_interfaces)
32
n_nodes = size(mpi_interfaces.u, 3)
33
n_variables = size(mpi_interfaces.u, 2)
34
ArrayType = storage_type(mpi_interfaces)
35
36
resize!(_u, 2 * n_variables * n_nodes^(n_dims - 1) * capacity)
37
mpi_interfaces.u = unsafe_wrap(ArrayType, pointer(_u),
38
(2, n_variables, ntuple(_ -> n_nodes, n_dims - 1)...,
39
capacity))
40
41
resize!(local_neighbor_ids, capacity)
42
43
resize!(node_indices, capacity)
44
45
resize!(local_sides, capacity)
46
47
return nothing
48
end
49
50
# Create MPI interface container and initialize interface data
51
function init_mpi_interfaces(mesh::Union{P4estMeshParallel, T8codeMeshParallel},
52
equations, basis, elements)
53
NDIMS = ndims(elements)
54
uEltype = eltype(elements)
55
56
# Initialize container
57
n_mpi_interfaces = count_required_surfaces(mesh).mpi_interfaces
58
59
_u = Vector{uEltype}(undef,
60
2 * nvariables(equations) * nnodes(basis)^(NDIMS - 1) *
61
n_mpi_interfaces)
62
u = unsafe_wrap(Array, pointer(_u),
63
(2, nvariables(equations), ntuple(_ -> nnodes(basis), NDIMS - 1)...,
64
n_mpi_interfaces))
65
66
local_neighbor_ids = Vector{Int}(undef, n_mpi_interfaces)
67
68
node_indices = Vector{NTuple{NDIMS, Symbol}}(undef, n_mpi_interfaces)
69
70
local_sides = Vector{Int}(undef, n_mpi_interfaces)
71
72
mpi_interfaces = P4estMPIInterfaceContainer{NDIMS, uEltype, NDIMS + 2,
73
typeof(u), typeof(local_neighbor_ids),
74
typeof(node_indices), typeof(_u)}(u,
75
local_neighbor_ids,
76
node_indices,
77
local_sides,
78
_u)
79
80
init_mpi_interfaces!(mpi_interfaces, mesh)
81
82
return mpi_interfaces
83
end
84
85
function init_mpi_interfaces!(mpi_interfaces, mesh::P4estMeshParallel)
86
init_surfaces!(nothing, nothing, nothing, mpi_interfaces, nothing, mesh)
87
88
return mpi_interfaces
89
end
90
91
function Adapt.parent_type(::Type{<:Trixi.P4estMPIInterfaceContainer{<:Any, <:Any,
92
<:Any, A}}) where {A}
93
return A
94
end
95
96
# Manual adapt_structure since we have aliasing memory
97
function Adapt.adapt_structure(to, mpi_interfaces::P4estMPIInterfaceContainer)
98
# Adapt Vectors and underlying storage
99
_u = adapt(to, mpi_interfaces._u)
100
local_neighbor_ids = adapt(to, mpi_interfaces.local_neighbor_ids)
101
node_indices = adapt(to, mpi_interfaces.node_indices)
102
local_sides = adapt(to, mpi_interfaces.local_sides)
103
104
# Wrap array again
105
u = unsafe_wrap_or_alloc(to, _u, size(mpi_interfaces.u))
106
107
NDIMS = ndims(mpi_interfaces)
108
return P4estMPIInterfaceContainer{NDIMS, eltype(u),
109
NDIMS + 2,
110
typeof(u), typeof(local_neighbor_ids),
111
typeof(node_indices), typeof(_u)}(u,
112
local_neighbor_ids,
113
node_indices,
114
local_sides, _u)
115
end
116
117
# Container data structure (structure-of-arrays style) for DG L2 mortars
118
#
119
# Similar to `P4estMortarContainer`. The field `neighbor_ids` has been split up into
120
# `local_neighbor_ids` and `local_neighbor_positions` to describe the ids and positions of the locally
121
# available elements belonging to a particular MPI mortar. Furthermore, `normal_directions` holds
122
# the normal vectors on the surface of the small elements for each mortar.
123
mutable struct P4estMPIMortarContainer{NDIMS, uEltype <: Real, RealT <: Real, NDIMSP1,
124
NDIMSP2, NDIMSP3,
125
uArray <: DenseArray{uEltype, NDIMSP3},
126
uVector <: DenseVector{uEltype}} <:
127
AbstractMPIMortarContainer
128
u::uArray # [small/large side, variable, position, i, j, mortar]
129
local_neighbor_ids::Vector{Vector{Int}} # [mortar][ids]
130
local_neighbor_positions::Vector{Vector{Int}} # [mortar][positions]
131
node_indices::Matrix{NTuple{NDIMS, Symbol}} # [small/large, mortar]
132
normal_directions::Array{RealT, NDIMSP2} # [dimension, i, j, position, mortar]
133
# internal `resize!`able storage
134
_u::uVector
135
_node_indices::Vector{NTuple{NDIMS, Symbol}}
136
_normal_directions::Vector{RealT}
137
end
138
139
@inline function nmpimortars(mpi_mortars::P4estMPIMortarContainer)
140
return length(mpi_mortars.local_neighbor_ids)
141
end
142
@inline Base.ndims(::P4estMPIMortarContainer{NDIMS}) where {NDIMS} = NDIMS
143
144
function Base.resize!(mpi_mortars::P4estMPIMortarContainer, capacity)
145
@unpack _u, _node_indices, _normal_directions = mpi_mortars
146
147
n_dims = ndims(mpi_mortars)
148
n_nodes = size(mpi_mortars.u, 4)
149
n_variables = size(mpi_mortars.u, 2)
150
151
resize!(_u, 2 * n_variables * 2^(n_dims - 1) * n_nodes^(n_dims - 1) * capacity)
152
mpi_mortars.u = unsafe_wrap(Array, pointer(_u),
153
(2, n_variables, 2^(n_dims - 1),
154
ntuple(_ -> n_nodes, n_dims - 1)..., capacity))
155
156
resize!(mpi_mortars.local_neighbor_ids, capacity)
157
resize!(mpi_mortars.local_neighbor_positions, capacity)
158
159
resize!(_node_indices, 2 * capacity)
160
mpi_mortars.node_indices = unsafe_wrap(Array, pointer(_node_indices), (2, capacity))
161
162
resize!(_normal_directions,
163
n_dims * n_nodes^(n_dims - 1) * 2^(n_dims - 1) * capacity)
164
mpi_mortars.normal_directions = unsafe_wrap(Array, pointer(_normal_directions),
165
(n_dims,
166
ntuple(_ -> n_nodes, n_dims - 1)...,
167
2^(n_dims - 1), capacity))
168
169
return nothing
170
end
171
172
# Create MPI mortar container and initialize MPI mortar data
173
function init_mpi_mortars(mesh::Union{P4estMeshParallel, T8codeMeshParallel}, equations,
174
basis, elements)
175
NDIMS = ndims(mesh)
176
RealT = real(mesh)
177
uEltype = eltype(elements)
178
179
# Initialize container
180
n_mpi_mortars = count_required_surfaces(mesh).mpi_mortars
181
182
_u = Vector{uEltype}(undef,
183
2 * nvariables(equations) * 2^(NDIMS - 1) *
184
nnodes(basis)^(NDIMS - 1) * n_mpi_mortars)
185
u = unsafe_wrap(Array, pointer(_u),
186
(2, nvariables(equations), 2^(NDIMS - 1),
187
ntuple(_ -> nnodes(basis), NDIMS - 1)..., n_mpi_mortars))
188
189
local_neighbor_ids = fill(Vector{Int}(), n_mpi_mortars)
190
local_neighbor_positions = fill(Vector{Int}(), n_mpi_mortars)
191
192
_node_indices = Vector{NTuple{NDIMS, Symbol}}(undef, 2 * n_mpi_mortars)
193
node_indices = unsafe_wrap(Array, pointer(_node_indices), (2, n_mpi_mortars))
194
195
_normal_directions = Vector{RealT}(undef,
196
NDIMS * nnodes(basis)^(NDIMS - 1) *
197
2^(NDIMS - 1) * n_mpi_mortars)
198
normal_directions = unsafe_wrap(Array, pointer(_normal_directions),
199
(NDIMS, ntuple(_ -> nnodes(basis), NDIMS - 1)...,
200
2^(NDIMS - 1), n_mpi_mortars))
201
202
mpi_mortars = P4estMPIMortarContainer{NDIMS, uEltype, RealT, NDIMS + 1, NDIMS + 2,
203
NDIMS + 3, typeof(u),
204
typeof(_u)}(u, local_neighbor_ids,
205
local_neighbor_positions,
206
node_indices, normal_directions,
207
_u, _node_indices,
208
_normal_directions)
209
210
if n_mpi_mortars > 0
211
init_mpi_mortars!(mpi_mortars, mesh, basis, elements)
212
end
213
214
return mpi_mortars
215
end
216
217
function init_mpi_mortars!(mpi_mortars, mesh::P4estMeshParallel, basis, elements)
218
init_surfaces!(nothing, nothing, nothing, nothing, mpi_mortars, mesh)
219
init_normal_directions!(mpi_mortars, basis, elements)
220
221
return mpi_mortars
222
end
223
224
function Adapt.adapt_structure(to, mpi_mortars::P4estMPIMortarContainer)
225
# TODO: GPU
226
# Only parts of this container are adapted, since we currently don't
227
# use `local_neighbor_ids`, `local_neighbor_positions`, `normal_directions`
228
# on the GPU. If we do need them we need to redesign this to use the VecOfArrays
229
# approach.
230
231
_u = adapt(to, mpi_mortars._u)
232
_node_indices = mpi_mortars._node_indices
233
_normal_directions = mpi_mortars._normal_directions
234
235
u = unsafe_wrap_or_alloc(to, _u, size(mpi_mortars.u))
236
local_neighbor_ids = mpi_mortars.local_neighbor_ids
237
local_neighbor_positions = mpi_mortars.local_neighbor_positions
238
node_indices = mpi_mortars.node_indices
239
normal_directions = mpi_mortars.normal_directions
240
241
NDIMS = ndims(mpi_mortars)
242
return P4estMPIMortarContainer{NDIMS, eltype(_u),
243
eltype(_normal_directions),
244
NDIMS + 1, NDIMS + 2, NDIMS + 3,
245
typeof(u), typeof(_u)}(u, local_neighbor_ids,
246
local_neighbor_positions,
247
node_indices,
248
normal_directions, _u,
249
_node_indices,
250
_normal_directions)
251
end
252
253
# Overload init! function for regular interfaces, regular mortars and boundaries since they must
254
# call the appropriate init_surfaces! function for parallel p4est meshes
255
function init_interfaces!(interfaces, elements,
256
mesh::P4estMeshParallel, basis)
257
init_surfaces!(interfaces, nothing, nothing, nothing, nothing, mesh)
258
init_normal_directions!(interfaces, basis, elements)
259
260
return interfaces
261
end
262
263
function init_mortars!(mortars, mesh::P4estMeshParallel)
264
init_surfaces!(nothing, mortars, nothing, nothing, nothing, mesh)
265
266
return mortars
267
end
268
269
function init_boundaries!(boundaries, mesh::P4estMeshParallel)
270
init_surfaces!(nothing, nothing, boundaries, nothing, nothing, mesh)
271
272
return boundaries
273
end
274
275
function reinitialize_containers!(mesh::P4estMeshParallel, equations, dg::DGSEM, cache)
276
# Make sure to re-create ghost layer before reinitializing MPI-related containers
277
update_ghost_layer!(mesh)
278
279
n_cells = ncells(mesh)
280
281
# Re-initialize elements container
282
@unpack elements = cache
283
resize!(elements, n_cells)
284
init_elements!(elements, mesh, dg.basis)
285
286
# Resize volume integral and related datastructures
287
@unpack volume_integral = dg
288
resize_volume_integral_cache!(cache, mesh, volume_integral, n_cells)
289
reinit_volume_integral_cache!(cache, mesh, dg, volume_integral, n_cells)
290
291
required = count_required_surfaces(mesh)
292
293
# resize interfaces container
294
@unpack interfaces = cache
295
resize!(interfaces, required.interfaces)
296
297
# resize boundaries container
298
@unpack boundaries = cache
299
resize!(boundaries, required.boundaries)
300
301
# resize mortars container
302
@unpack mortars = cache
303
resize!(mortars, required.mortars)
304
305
# resize mpi_interfaces container
306
@unpack mpi_interfaces = cache
307
resize!(mpi_interfaces, required.mpi_interfaces)
308
309
# resize mpi_mortars container
310
@unpack mpi_mortars = cache
311
resize!(mpi_mortars, required.mpi_mortars)
312
313
# re-initialize containers together to reduce
314
# the number of iterations over the mesh in p4est
315
init_surfaces!(interfaces, mortars, boundaries, mpi_interfaces, mpi_mortars, mesh)
316
317
# init_normal_directions! requires that `node_indices` have been initialized
318
init_normal_directions!(interfaces, dg.basis, elements)
319
320
# re-initialize MPI cache
321
@unpack mpi_cache = cache
322
init_mpi_cache!(mpi_cache, mesh, mpi_interfaces, mpi_mortars,
323
nvariables(equations), nnodes(dg), eltype(elements))
324
325
# re-initialize and distribute normal directions of MPI mortars; requires MPI communication, so
326
# the MPI cache must be re-initialized before
327
init_normal_directions!(mpi_mortars, dg.basis, elements)
328
exchange_normal_directions!(mpi_mortars, mpi_cache, mesh, nnodes(dg))
329
330
return nothing
331
end
332
333
# A helper struct used in initialization methods below
334
mutable struct ParallelInitSurfacesIterFaceUserData{Interfaces, Mortars, Boundaries,
335
MPIInterfaces, MPIMortars, Mesh}
336
interfaces::Interfaces
337
interface_id::Int
338
mortars::Mortars
339
mortar_id::Int
340
boundaries::Boundaries
341
boundary_id::Int
342
mpi_interfaces::MPIInterfaces
343
mpi_interface_id::Int
344
mpi_mortars::MPIMortars
345
mpi_mortar_id::Int
346
mesh::Mesh
347
end
348
349
function ParallelInitSurfacesIterFaceUserData(interfaces, mortars, boundaries,
350
mpi_interfaces, mpi_mortars, mesh)
351
return ParallelInitSurfacesIterFaceUserData{typeof(interfaces), typeof(mortars),
352
typeof(boundaries),
353
typeof(mpi_interfaces),
354
typeof(mpi_mortars), typeof(mesh)}(interfaces,
355
1,
356
mortars,
357
1,
358
boundaries,
359
1,
360
mpi_interfaces,
361
1,
362
mpi_mortars,
363
1,
364
mesh)
365
end
366
367
function init_surfaces_iter_face_parallel(info, user_data)
368
# Unpack user_data
369
data = unsafe_pointer_to_objref(Ptr{ParallelInitSurfacesIterFaceUserData}(user_data))
370
371
# Function barrier because the unpacked user_data above is type-unstable
372
return init_surfaces_iter_face_inner(info, data)
373
end
374
375
# 2D
376
function cfunction(::typeof(init_surfaces_iter_face_parallel), ::Val{2})
377
@cfunction(init_surfaces_iter_face_parallel, Cvoid,
378
(Ptr{p4est_iter_face_info_t}, Ptr{Cvoid}))
379
end
380
# 3D
381
function cfunction(::typeof(init_surfaces_iter_face_parallel), ::Val{3})
382
@cfunction(init_surfaces_iter_face_parallel, Cvoid,
383
(Ptr{p8est_iter_face_info_t}, Ptr{Cvoid}))
384
end
385
386
# Function barrier for type stability, overload for parallel P4estMesh
387
function init_surfaces_iter_face_inner(info,
388
user_data::ParallelInitSurfacesIterFaceUserData)
389
@unpack interfaces, mortars, boundaries, mpi_interfaces, mpi_mortars = user_data
390
# This function is called during `init_surfaces!`, more precisely it is called for each face
391
# while p4est iterates over the forest. Since `init_surfaces!` can be used to initialize all
392
# surfaces at once or any subset of them, some of the unpacked values above may be `nothing` if
393
# they're not supposed to be initialized during this call. That is why we need additional
394
# `!== nothing` checks below before initializing individual faces.
395
info_pw = PointerWrapper(info)
396
if info_pw.sides.elem_count[] == 2
397
# Two neighboring elements => Interface or mortar
398
399
# Extract surface data
400
sides_pw = (load_pointerwrapper_side(info_pw, 1),
401
load_pointerwrapper_side(info_pw, 2))
402
403
if sides_pw[1].is_hanging[] == false && sides_pw[2].is_hanging[] == false
404
# No hanging nodes => normal interface or MPI interface
405
if sides_pw[1].is.full.is_ghost[] == true ||
406
sides_pw[2].is.full.is_ghost[] == true # remote side => MPI interface
407
if mpi_interfaces !== nothing
408
init_mpi_interfaces_iter_face_inner(info_pw, sides_pw, user_data)
409
end
410
else
411
if interfaces !== nothing
412
init_interfaces_iter_face_inner(info_pw, sides_pw, user_data)
413
end
414
end
415
else
416
# Hanging nodes => mortar or MPI mortar
417
# First, we check which side is hanging, i.e., on which side we have the refined cells.
418
# Then we check if any of the refined cells or the coarse cell are "ghost" cells, i.e., they
419
# belong to another rank. That way we can determine if this is a regular mortar or MPI mortar
420
if sides_pw[1].is_hanging[] == true
421
@assert sides_pw[2].is_hanging[] == false
422
if any(sides_pw[1].is.hanging.is_ghost[] .== true) ||
423
sides_pw[2].is.full.is_ghost[] == true
424
face_has_ghost_side = true
425
else
426
face_has_ghost_side = false
427
end
428
else # sides_pw[2].is_hanging[] == true
429
@assert sides_pw[1].is_hanging[] == false
430
if sides_pw[1].is.full.is_ghost[] == true ||
431
any(sides_pw[2].is.hanging.is_ghost[] .== true)
432
face_has_ghost_side = true
433
else
434
face_has_ghost_side = false
435
end
436
end
437
# Initialize mortar or MPI mortar
438
if face_has_ghost_side && mpi_mortars !== nothing
439
init_mpi_mortars_iter_face_inner(info_pw, sides_pw, user_data)
440
elseif !face_has_ghost_side && mortars !== nothing
441
init_mortars_iter_face_inner(info_pw, sides_pw, user_data)
442
end
443
end
444
elseif info_pw.sides.elem_count[] == 1
445
# One neighboring elements => boundary
446
if boundaries !== nothing
447
init_boundaries_iter_face_inner(info_pw, user_data)
448
end
449
end
450
451
return nothing
452
end
453
454
function init_surfaces!(interfaces, mortars, boundaries, mpi_interfaces, mpi_mortars,
455
mesh::P4estMeshParallel)
456
# Let p4est iterate over all interfaces and call init_surfaces_iter_face
457
iter_face_c = cfunction(init_surfaces_iter_face_parallel, Val(ndims(mesh)))
458
user_data = ParallelInitSurfacesIterFaceUserData(interfaces, mortars, boundaries,
459
mpi_interfaces, mpi_mortars, mesh)
460
461
iterate_p4est(mesh.p4est, user_data; ghost_layer = mesh.ghost,
462
iter_face_c = iter_face_c)
463
464
return nothing
465
end
466
467
# Initialization of MPI interfaces after the function barrier
468
function init_mpi_interfaces_iter_face_inner(info_pw, sides_pw, user_data)
469
@unpack mpi_interfaces, mpi_interface_id, mesh = user_data
470
user_data.mpi_interface_id += 1
471
472
if sides_pw[1].is.full.is_ghost[] == true
473
local_side = 2
474
elseif sides_pw[2].is.full.is_ghost[] == true
475
local_side = 1
476
else
477
error("should not happen")
478
end
479
480
# Get local tree, one-based indexing
481
tree_pw = load_pointerwrapper_tree(mesh.p4est, sides_pw[local_side].treeid[] + 1)
482
# Quadrant numbering offset of the local quadrant at this interface
483
offset = tree_pw.quadrants_offset[]
484
tree_quad_id = sides_pw[local_side].is.full.quadid[] # quadid in the local tree
485
# ID of the local neighboring quad, cumulative over local trees
486
local_quad_id = offset + tree_quad_id
487
488
# p4est uses zero-based indexing, convert to one-based indexing
489
mpi_interfaces.local_neighbor_ids[mpi_interface_id] = local_quad_id + 1
490
mpi_interfaces.local_sides[mpi_interface_id] = local_side
491
492
# Face at which the interface lies
493
faces = (sides_pw[1].face[], sides_pw[2].face[])
494
495
# Save mpi_interfaces.node_indices dimension specific in containers_[23]d_parallel.jl
496
init_mpi_interface_node_indices!(mpi_interfaces, faces, local_side,
497
info_pw.orientation[],
498
mpi_interface_id)
499
500
return nothing
501
end
502
503
# Initialization of MPI mortars after the function barrier
504
function init_mpi_mortars_iter_face_inner(info_pw, sides_pw, user_data)
505
@unpack mpi_mortars, mpi_mortar_id, mesh = user_data
506
user_data.mpi_mortar_id += 1
507
508
# Get Tuple of adjacent trees, one-based indexing
509
trees_pw = (load_pointerwrapper_tree(mesh.p4est, sides_pw[1].treeid[] + 1),
510
load_pointerwrapper_tree(mesh.p4est, sides_pw[2].treeid[] + 1))
511
# Quadrant numbering offsets of the quadrants at this mortar
512
offsets = SVector(trees_pw[1].quadrants_offset[],
513
trees_pw[2].quadrants_offset[])
514
515
if sides_pw[1].is_hanging[] == true
516
hanging_side = 1
517
full_side = 2
518
else # sides_pw[2].is_hanging[] == true
519
hanging_side = 2
520
full_side = 1
521
end
522
# Just be sure before accessing is.full or is.hanging later
523
@assert sides_pw[full_side].is_hanging[] == false
524
@assert sides_pw[hanging_side].is_hanging[] == true
525
526
# Find small quads that are locally available
527
local_small_quad_positions = findall(sides_pw[hanging_side].is.hanging.is_ghost[] .==
528
false)
529
530
# Get id of local small quadrants within their tree
531
# Indexing CBinding.Caccessor via a Vector does not work here -> use map instead
532
tree_small_quad_ids = map(p -> sides_pw[hanging_side].is.hanging.quadid[][p],
533
local_small_quad_positions)
534
local_small_quad_ids = offsets[hanging_side] .+ tree_small_quad_ids # ids cumulative over local trees
535
536
# Determine if large quadrant is available and if yes, determine its id
537
if sides_pw[full_side].is.full.is_ghost[] == false
538
local_large_quad_id = offsets[full_side] + sides_pw[full_side].is.full.quadid[]
539
else
540
local_large_quad_id = -1 # large quad is ghost
541
end
542
543
# Write data to mortar container, convert to 1-based indexing
544
# Start with small elements
545
local_neighbor_ids = local_small_quad_ids .+ 1
546
local_neighbor_positions = local_small_quad_positions
547
# Add large element information if it is locally available
548
if local_large_quad_id > -1
549
push!(local_neighbor_ids, local_large_quad_id + 1) # convert to 1-based index
550
push!(local_neighbor_positions, 2^(ndims(mesh) - 1) + 1)
551
end
552
553
mpi_mortars.local_neighbor_ids[mpi_mortar_id] = local_neighbor_ids
554
mpi_mortars.local_neighbor_positions[mpi_mortar_id] = local_neighbor_positions
555
556
# init_mortar_node_indices! expects side 1 to contain small elements
557
faces = (sides_pw[hanging_side].face[], sides_pw[full_side].face[])
558
init_mortar_node_indices!(mpi_mortars, faces, info_pw.orientation[], mpi_mortar_id)
559
560
return nothing
561
end
562
563
# Iterate over all interfaces and count
564
# - (inner) interfaces
565
# - mortars
566
# - boundaries
567
# - (MPI) interfaces at subdomain boundaries
568
# - (MPI) mortars at subdomain boundaries
569
# and collect the numbers in `user_data` in this order.
570
function count_surfaces_iter_face_parallel(info, user_data)
571
info_pw = PointerWrapper(info)
572
if info_pw.sides.elem_count[] == 2
573
# Two neighboring elements => Interface or mortar
574
575
# Extract surface data
576
sides_pw = (load_pointerwrapper_side(info_pw, 1),
577
load_pointerwrapper_side(info_pw, 2))
578
579
if sides_pw[1].is_hanging[] == false && sides_pw[2].is_hanging[] == false
580
# No hanging nodes => normal interface or MPI interface
581
if sides_pw[1].is.full.is_ghost[] == true ||
582
sides_pw[2].is.full.is_ghost[] == true # remote side => MPI interface
583
# Unpack user_data = [mpi_interface_count] and increment mpi_interface_count
584
pw = PointerWrapper(Int, user_data)
585
id = pw[4]
586
pw[4] = id + 1
587
else
588
# Unpack user_data = [interface_count] and increment interface_count
589
pw = PointerWrapper(Int, user_data)
590
id = pw[1]
591
pw[1] = id + 1
592
end
593
else
594
# Hanging nodes => mortar or MPI mortar
595
# First, we check which side is hanging, i.e., on which side we have the refined cells.
596
# Then we check if any of the refined cells or the coarse cell are "ghost" cells, i.e., they
597
# belong to another rank. That way we can determine if this is a regular mortar or MPI mortar
598
if sides_pw[1].is_hanging[] == true
599
@assert sides_pw[2].is_hanging[] == false
600
if any(sides_pw[1].is.hanging.is_ghost[] .== true) ||
601
sides_pw[2].is.full.is_ghost[] == true
602
face_has_ghost_side = true
603
else
604
face_has_ghost_side = false
605
end
606
else # sides_pw[2].is_hanging[] == true
607
@assert sides_pw[1].is_hanging[] == false
608
if sides_pw[1].is.full.is_ghost[] == true ||
609
any(sides_pw[2].is.hanging.is_ghost[] .== true)
610
face_has_ghost_side = true
611
else
612
face_has_ghost_side = false
613
end
614
end
615
if face_has_ghost_side
616
# Unpack user_data = [mpi_mortar_count] and increment mpi_mortar_count
617
pw = PointerWrapper(Int, user_data)
618
id = pw[5]
619
pw[5] = id + 1
620
else
621
# Unpack user_data = [mortar_count] and increment mortar_count
622
pw = PointerWrapper(Int, user_data)
623
id = pw[2]
624
pw[2] = id + 1
625
end
626
end
627
elseif info_pw.sides.elem_count[] == 1
628
# One neighboring elements => boundary
629
630
# Unpack user_data = [boundary_count] and increment boundary_count
631
pw = PointerWrapper(Int, user_data)
632
id = pw[3]
633
pw[3] = id + 1
634
end
635
636
return nothing
637
end
638
639
# 2D
640
function cfunction(::typeof(count_surfaces_iter_face_parallel), ::Val{2})
641
@cfunction(count_surfaces_iter_face_parallel, Cvoid,
642
(Ptr{p4est_iter_face_info_t}, Ptr{Cvoid}))
643
end
644
# 3D
645
function cfunction(::typeof(count_surfaces_iter_face_parallel), ::Val{3})
646
@cfunction(count_surfaces_iter_face_parallel, Cvoid,
647
(Ptr{p8est_iter_face_info_t}, Ptr{Cvoid}))
648
end
649
650
function count_required_surfaces(mesh::P4estMeshParallel)
651
# Let p4est iterate over all interfaces and call count_surfaces_iter_face_parallel
652
iter_face_c = cfunction(count_surfaces_iter_face_parallel, Val(ndims(mesh)))
653
654
# interfaces, mortars, boundaries, mpi_interfaces, mpi_mortars
655
user_data = [0, 0, 0, 0, 0]
656
657
iterate_p4est(mesh.p4est, user_data; ghost_layer = mesh.ghost,
658
iter_face_c = iter_face_c)
659
660
# Return counters
661
return (interfaces = user_data[1],
662
mortars = user_data[2],
663
boundaries = user_data[3],
664
mpi_interfaces = user_data[4],
665
mpi_mortars = user_data[5])
666
end
667
end # @muladd
668
669