Path: blob/main/src/solvers/dgsem_p4est/containers_parallel.jl
5616 views
# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).1# Since these FMAs can increase the performance of many numerical algorithms,2# we need to opt-in explicitly.3# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.4@muladd begin5#! format: noindent67mutable struct P4estMPIInterfaceContainer{NDIMS, uEltype <: Real, NDIMSP2,8uArray <: DenseArray{uEltype, NDIMSP2},9VecInt <: DenseVector{Int},10IndicesVector <:11DenseVector{NTuple{NDIMS, Symbol}},12uVector <: DenseVector{uEltype}} <:13AbstractMPIInterfaceContainer14u::uArray # [primary/secondary, variable, i, j, interface]15local_neighbor_ids::VecInt # [interface]16node_indices::IndicesVector # [interface]17local_sides::VecInt # [interface]18# internal `resize!`able storage19_u::uVector20end2122@inline function nmpiinterfaces(interfaces::P4estMPIInterfaceContainer)23return length(interfaces.local_sides)24end25@inline Base.ndims(::P4estMPIInterfaceContainer{NDIMS}) where {NDIMS} = NDIMS2627function Base.resize!(mpi_interfaces::P4estMPIInterfaceContainer, capacity)28@unpack _u, local_neighbor_ids, node_indices, local_sides = mpi_interfaces2930n_dims = ndims(mpi_interfaces)31n_nodes = size(mpi_interfaces.u, 3)32n_variables = size(mpi_interfaces.u, 2)33ArrayType = storage_type(mpi_interfaces)3435resize!(_u, 2 * n_variables * n_nodes^(n_dims - 1) * capacity)36mpi_interfaces.u = unsafe_wrap(ArrayType, pointer(_u),37(2, n_variables, ntuple(_ -> n_nodes, n_dims - 1)...,38capacity))3940resize!(local_neighbor_ids, capacity)4142resize!(node_indices, capacity)4344resize!(local_sides, capacity)4546return nothing47end4849# Create MPI interface container and initialize interface data50function init_mpi_interfaces(mesh::Union{P4estMeshParallel, T8codeMeshParallel},51equations, basis, elements)52NDIMS = ndims(elements)53uEltype = eltype(elements)5455# Initialize container56n_mpi_interfaces = count_required_surfaces(mesh).mpi_interfaces5758_u = Vector{uEltype}(undef,592 * nvariables(equations) * nnodes(basis)^(NDIMS - 1) *60n_mpi_interfaces)61u = unsafe_wrap(Array, pointer(_u),62(2, nvariables(equations), ntuple(_ -> nnodes(basis), NDIMS - 1)...,63n_mpi_interfaces))6465local_neighbor_ids = Vector{Int}(undef, n_mpi_interfaces)6667node_indices = Vector{NTuple{NDIMS, Symbol}}(undef, n_mpi_interfaces)6869local_sides = Vector{Int}(undef, n_mpi_interfaces)7071mpi_interfaces = P4estMPIInterfaceContainer{NDIMS, uEltype, NDIMS + 2,72typeof(u), typeof(local_neighbor_ids),73typeof(node_indices), typeof(_u)}(u,74local_neighbor_ids,75node_indices,76local_sides,77_u)7879init_mpi_interfaces!(mpi_interfaces, mesh)8081return mpi_interfaces82end8384function init_mpi_interfaces!(mpi_interfaces, mesh::P4estMeshParallel)85init_surfaces!(nothing, nothing, nothing, mpi_interfaces, nothing, mesh)8687return mpi_interfaces88end8990function Adapt.parent_type(::Type{<:Trixi.P4estMPIInterfaceContainer{<:Any, <:Any,91<:Any, A}}) where {A}92return A93end9495# Manual adapt_structure since we have aliasing memory96function Adapt.adapt_structure(to, mpi_interfaces::P4estMPIInterfaceContainer)97# Adapt Vectors and underlying storage98_u = adapt(to, mpi_interfaces._u)99local_neighbor_ids = adapt(to, mpi_interfaces.local_neighbor_ids)100node_indices = adapt(to, mpi_interfaces.node_indices)101local_sides = adapt(to, mpi_interfaces.local_sides)102103# Wrap array again104u = unsafe_wrap_or_alloc(to, _u, size(mpi_interfaces.u))105106NDIMS = ndims(mpi_interfaces)107return P4estMPIInterfaceContainer{NDIMS, eltype(u),108NDIMS + 2,109typeof(u), typeof(local_neighbor_ids),110typeof(node_indices), typeof(_u)}(u,111local_neighbor_ids,112node_indices,113local_sides, _u)114end115116# Container data structure (structure-of-arrays style) for DG L2 mortars117#118# Similar to `P4estMortarContainer`. The field `neighbor_ids` has been split up into119# `local_neighbor_ids` and `local_neighbor_positions` to describe the ids and positions of the locally120# available elements belonging to a particular MPI mortar. Furthermore, `normal_directions` holds121# the normal vectors on the surface of the small elements for each mortar.122mutable struct P4estMPIMortarContainer{NDIMS, uEltype <: Real, RealT <: Real, NDIMSP1,123NDIMSP2, NDIMSP3,124uArray <: DenseArray{uEltype, NDIMSP3},125uVector <: DenseVector{uEltype}} <:126AbstractMPIMortarContainer127u::uArray # [small/large side, variable, position, i, j, mortar]128local_neighbor_ids::Vector{Vector{Int}} # [mortar][ids]129local_neighbor_positions::Vector{Vector{Int}} # [mortar][positions]130node_indices::Matrix{NTuple{NDIMS, Symbol}} # [small/large, mortar]131normal_directions::Array{RealT, NDIMSP2} # [dimension, i, j, position, mortar]132# internal `resize!`able storage133_u::uVector134_node_indices::Vector{NTuple{NDIMS, Symbol}}135_normal_directions::Vector{RealT}136end137138@inline function nmpimortars(mpi_mortars::P4estMPIMortarContainer)139return length(mpi_mortars.local_neighbor_ids)140end141@inline Base.ndims(::P4estMPIMortarContainer{NDIMS}) where {NDIMS} = NDIMS142143function Base.resize!(mpi_mortars::P4estMPIMortarContainer, capacity)144@unpack _u, _node_indices, _normal_directions = mpi_mortars145146n_dims = ndims(mpi_mortars)147n_nodes = size(mpi_mortars.u, 4)148n_variables = size(mpi_mortars.u, 2)149150resize!(_u, 2 * n_variables * 2^(n_dims - 1) * n_nodes^(n_dims - 1) * capacity)151mpi_mortars.u = unsafe_wrap(Array, pointer(_u),152(2, n_variables, 2^(n_dims - 1),153ntuple(_ -> n_nodes, n_dims - 1)..., capacity))154155resize!(mpi_mortars.local_neighbor_ids, capacity)156resize!(mpi_mortars.local_neighbor_positions, capacity)157158resize!(_node_indices, 2 * capacity)159mpi_mortars.node_indices = unsafe_wrap(Array, pointer(_node_indices), (2, capacity))160161resize!(_normal_directions,162n_dims * n_nodes^(n_dims - 1) * 2^(n_dims - 1) * capacity)163mpi_mortars.normal_directions = unsafe_wrap(Array, pointer(_normal_directions),164(n_dims,165ntuple(_ -> n_nodes, n_dims - 1)...,1662^(n_dims - 1), capacity))167168return nothing169end170171# Create MPI mortar container and initialize MPI mortar data172function init_mpi_mortars(mesh::Union{P4estMeshParallel, T8codeMeshParallel}, equations,173basis, elements)174NDIMS = ndims(mesh)175RealT = real(mesh)176uEltype = eltype(elements)177178# Initialize container179n_mpi_mortars = count_required_surfaces(mesh).mpi_mortars180181_u = Vector{uEltype}(undef,1822 * nvariables(equations) * 2^(NDIMS - 1) *183nnodes(basis)^(NDIMS - 1) * n_mpi_mortars)184u = unsafe_wrap(Array, pointer(_u),185(2, nvariables(equations), 2^(NDIMS - 1),186ntuple(_ -> nnodes(basis), NDIMS - 1)..., n_mpi_mortars))187188local_neighbor_ids = fill(Vector{Int}(), n_mpi_mortars)189local_neighbor_positions = fill(Vector{Int}(), n_mpi_mortars)190191_node_indices = Vector{NTuple{NDIMS, Symbol}}(undef, 2 * n_mpi_mortars)192node_indices = unsafe_wrap(Array, pointer(_node_indices), (2, n_mpi_mortars))193194_normal_directions = Vector{RealT}(undef,195NDIMS * nnodes(basis)^(NDIMS - 1) *1962^(NDIMS - 1) * n_mpi_mortars)197normal_directions = unsafe_wrap(Array, pointer(_normal_directions),198(NDIMS, ntuple(_ -> nnodes(basis), NDIMS - 1)...,1992^(NDIMS - 1), n_mpi_mortars))200201mpi_mortars = P4estMPIMortarContainer{NDIMS, uEltype, RealT, NDIMS + 1, NDIMS + 2,202NDIMS + 3, typeof(u),203typeof(_u)}(u, local_neighbor_ids,204local_neighbor_positions,205node_indices, normal_directions,206_u, _node_indices,207_normal_directions)208209if n_mpi_mortars > 0210init_mpi_mortars!(mpi_mortars, mesh, basis, elements)211end212213return mpi_mortars214end215216function init_mpi_mortars!(mpi_mortars, mesh::P4estMeshParallel, basis, elements)217init_surfaces!(nothing, nothing, nothing, nothing, mpi_mortars, mesh)218init_normal_directions!(mpi_mortars, basis, elements)219220return mpi_mortars221end222223function Adapt.adapt_structure(to, mpi_mortars::P4estMPIMortarContainer)224# TODO: GPU225# Only parts of this container are adapted, since we currently don't226# use `local_neighbor_ids`, `local_neighbor_positions`, `normal_directions`227# on the GPU. If we do need them we need to redesign this to use the VecOfArrays228# approach.229230_u = adapt(to, mpi_mortars._u)231_node_indices = mpi_mortars._node_indices232_normal_directions = mpi_mortars._normal_directions233234u = unsafe_wrap_or_alloc(to, _u, size(mpi_mortars.u))235local_neighbor_ids = mpi_mortars.local_neighbor_ids236local_neighbor_positions = mpi_mortars.local_neighbor_positions237node_indices = mpi_mortars.node_indices238normal_directions = mpi_mortars.normal_directions239240NDIMS = ndims(mpi_mortars)241return P4estMPIMortarContainer{NDIMS, eltype(_u),242eltype(_normal_directions),243NDIMS + 1, NDIMS + 2, NDIMS + 3,244typeof(u), typeof(_u)}(u, local_neighbor_ids,245local_neighbor_positions,246node_indices,247normal_directions, _u,248_node_indices,249_normal_directions)250end251252# Overload init! function for regular interfaces, regular mortars and boundaries since they must253# call the appropriate init_surfaces! function for parallel p4est meshes254function init_interfaces!(interfaces, elements,255mesh::P4estMeshParallel, basis)256init_surfaces!(interfaces, nothing, nothing, nothing, nothing, mesh)257init_normal_directions!(interfaces, basis, elements)258259return interfaces260end261262function init_mortars!(mortars, mesh::P4estMeshParallel)263init_surfaces!(nothing, mortars, nothing, nothing, nothing, mesh)264265return mortars266end267268function init_boundaries!(boundaries, mesh::P4estMeshParallel)269init_surfaces!(nothing, nothing, boundaries, nothing, nothing, mesh)270271return boundaries272end273274function reinitialize_containers!(mesh::P4estMeshParallel, equations, dg::DGSEM, cache)275# Make sure to re-create ghost layer before reinitializing MPI-related containers276update_ghost_layer!(mesh)277278n_cells = ncells(mesh)279280# Re-initialize elements container281@unpack elements = cache282resize!(elements, n_cells)283init_elements!(elements, mesh, dg.basis)284285# Resize volume integral and related datastructures286@unpack volume_integral = dg287resize_volume_integral_cache!(cache, mesh, volume_integral, n_cells)288reinit_volume_integral_cache!(cache, mesh, dg, volume_integral, n_cells)289290required = count_required_surfaces(mesh)291292# resize interfaces container293@unpack interfaces = cache294resize!(interfaces, required.interfaces)295296# resize boundaries container297@unpack boundaries = cache298resize!(boundaries, required.boundaries)299300# resize mortars container301@unpack mortars = cache302resize!(mortars, required.mortars)303304# resize mpi_interfaces container305@unpack mpi_interfaces = cache306resize!(mpi_interfaces, required.mpi_interfaces)307308# resize mpi_mortars container309@unpack mpi_mortars = cache310resize!(mpi_mortars, required.mpi_mortars)311312# re-initialize containers together to reduce313# the number of iterations over the mesh in p4est314init_surfaces!(interfaces, mortars, boundaries, mpi_interfaces, mpi_mortars, mesh)315316# init_normal_directions! requires that `node_indices` have been initialized317init_normal_directions!(interfaces, dg.basis, elements)318319# re-initialize MPI cache320@unpack mpi_cache = cache321init_mpi_cache!(mpi_cache, mesh, mpi_interfaces, mpi_mortars,322nvariables(equations), nnodes(dg), eltype(elements))323324# re-initialize and distribute normal directions of MPI mortars; requires MPI communication, so325# the MPI cache must be re-initialized before326init_normal_directions!(mpi_mortars, dg.basis, elements)327exchange_normal_directions!(mpi_mortars, mpi_cache, mesh, nnodes(dg))328329return nothing330end331332# A helper struct used in initialization methods below333mutable struct ParallelInitSurfacesIterFaceUserData{Interfaces, Mortars, Boundaries,334MPIInterfaces, MPIMortars, Mesh}335interfaces::Interfaces336interface_id::Int337mortars::Mortars338mortar_id::Int339boundaries::Boundaries340boundary_id::Int341mpi_interfaces::MPIInterfaces342mpi_interface_id::Int343mpi_mortars::MPIMortars344mpi_mortar_id::Int345mesh::Mesh346end347348function ParallelInitSurfacesIterFaceUserData(interfaces, mortars, boundaries,349mpi_interfaces, mpi_mortars, mesh)350return ParallelInitSurfacesIterFaceUserData{typeof(interfaces), typeof(mortars),351typeof(boundaries),352typeof(mpi_interfaces),353typeof(mpi_mortars), typeof(mesh)}(interfaces,3541,355mortars,3561,357boundaries,3581,359mpi_interfaces,3601,361mpi_mortars,3621,363mesh)364end365366function init_surfaces_iter_face_parallel(info, user_data)367# Unpack user_data368data = unsafe_pointer_to_objref(Ptr{ParallelInitSurfacesIterFaceUserData}(user_data))369370# Function barrier because the unpacked user_data above is type-unstable371return init_surfaces_iter_face_inner(info, data)372end373374# 2D375function cfunction(::typeof(init_surfaces_iter_face_parallel), ::Val{2})376@cfunction(init_surfaces_iter_face_parallel, Cvoid,377(Ptr{p4est_iter_face_info_t}, Ptr{Cvoid}))378end379# 3D380function cfunction(::typeof(init_surfaces_iter_face_parallel), ::Val{3})381@cfunction(init_surfaces_iter_face_parallel, Cvoid,382(Ptr{p8est_iter_face_info_t}, Ptr{Cvoid}))383end384385# Function barrier for type stability, overload for parallel P4estMesh386function init_surfaces_iter_face_inner(info,387user_data::ParallelInitSurfacesIterFaceUserData)388@unpack interfaces, mortars, boundaries, mpi_interfaces, mpi_mortars = user_data389# This function is called during `init_surfaces!`, more precisely it is called for each face390# while p4est iterates over the forest. Since `init_surfaces!` can be used to initialize all391# surfaces at once or any subset of them, some of the unpacked values above may be `nothing` if392# they're not supposed to be initialized during this call. That is why we need additional393# `!== nothing` checks below before initializing individual faces.394info_pw = PointerWrapper(info)395if info_pw.sides.elem_count[] == 2396# Two neighboring elements => Interface or mortar397398# Extract surface data399sides_pw = (load_pointerwrapper_side(info_pw, 1),400load_pointerwrapper_side(info_pw, 2))401402if sides_pw[1].is_hanging[] == false && sides_pw[2].is_hanging[] == false403# No hanging nodes => normal interface or MPI interface404if sides_pw[1].is.full.is_ghost[] == true ||405sides_pw[2].is.full.is_ghost[] == true # remote side => MPI interface406if mpi_interfaces !== nothing407init_mpi_interfaces_iter_face_inner(info_pw, sides_pw, user_data)408end409else410if interfaces !== nothing411init_interfaces_iter_face_inner(info_pw, sides_pw, user_data)412end413end414else415# Hanging nodes => mortar or MPI mortar416# First, we check which side is hanging, i.e., on which side we have the refined cells.417# Then we check if any of the refined cells or the coarse cell are "ghost" cells, i.e., they418# belong to another rank. That way we can determine if this is a regular mortar or MPI mortar419if sides_pw[1].is_hanging[] == true420@assert sides_pw[2].is_hanging[] == false421if any(sides_pw[1].is.hanging.is_ghost[] .== true) ||422sides_pw[2].is.full.is_ghost[] == true423face_has_ghost_side = true424else425face_has_ghost_side = false426end427else # sides_pw[2].is_hanging[] == true428@assert sides_pw[1].is_hanging[] == false429if sides_pw[1].is.full.is_ghost[] == true ||430any(sides_pw[2].is.hanging.is_ghost[] .== true)431face_has_ghost_side = true432else433face_has_ghost_side = false434end435end436# Initialize mortar or MPI mortar437if face_has_ghost_side && mpi_mortars !== nothing438init_mpi_mortars_iter_face_inner(info_pw, sides_pw, user_data)439elseif !face_has_ghost_side && mortars !== nothing440init_mortars_iter_face_inner(info_pw, sides_pw, user_data)441end442end443elseif info_pw.sides.elem_count[] == 1444# One neighboring elements => boundary445if boundaries !== nothing446init_boundaries_iter_face_inner(info_pw, user_data)447end448end449450return nothing451end452453function init_surfaces!(interfaces, mortars, boundaries, mpi_interfaces, mpi_mortars,454mesh::P4estMeshParallel)455# Let p4est iterate over all interfaces and call init_surfaces_iter_face456iter_face_c = cfunction(init_surfaces_iter_face_parallel, Val(ndims(mesh)))457user_data = ParallelInitSurfacesIterFaceUserData(interfaces, mortars, boundaries,458mpi_interfaces, mpi_mortars, mesh)459460iterate_p4est(mesh.p4est, user_data; ghost_layer = mesh.ghost,461iter_face_c = iter_face_c)462463return nothing464end465466# Initialization of MPI interfaces after the function barrier467function init_mpi_interfaces_iter_face_inner(info_pw, sides_pw, user_data)468@unpack mpi_interfaces, mpi_interface_id, mesh = user_data469user_data.mpi_interface_id += 1470471if sides_pw[1].is.full.is_ghost[] == true472local_side = 2473elseif sides_pw[2].is.full.is_ghost[] == true474local_side = 1475else476error("should not happen")477end478479# Get local tree, one-based indexing480tree_pw = load_pointerwrapper_tree(mesh.p4est, sides_pw[local_side].treeid[] + 1)481# Quadrant numbering offset of the local quadrant at this interface482offset = tree_pw.quadrants_offset[]483tree_quad_id = sides_pw[local_side].is.full.quadid[] # quadid in the local tree484# ID of the local neighboring quad, cumulative over local trees485local_quad_id = offset + tree_quad_id486487# p4est uses zero-based indexing, convert to one-based indexing488mpi_interfaces.local_neighbor_ids[mpi_interface_id] = local_quad_id + 1489mpi_interfaces.local_sides[mpi_interface_id] = local_side490491# Face at which the interface lies492faces = (sides_pw[1].face[], sides_pw[2].face[])493494# Save mpi_interfaces.node_indices dimension specific in containers_[23]d_parallel.jl495init_mpi_interface_node_indices!(mpi_interfaces, faces, local_side,496info_pw.orientation[],497mpi_interface_id)498499return nothing500end501502# Initialization of MPI mortars after the function barrier503function init_mpi_mortars_iter_face_inner(info_pw, sides_pw, user_data)504@unpack mpi_mortars, mpi_mortar_id, mesh = user_data505user_data.mpi_mortar_id += 1506507# Get Tuple of adjacent trees, one-based indexing508trees_pw = (load_pointerwrapper_tree(mesh.p4est, sides_pw[1].treeid[] + 1),509load_pointerwrapper_tree(mesh.p4est, sides_pw[2].treeid[] + 1))510# Quadrant numbering offsets of the quadrants at this mortar511offsets = SVector(trees_pw[1].quadrants_offset[],512trees_pw[2].quadrants_offset[])513514if sides_pw[1].is_hanging[] == true515hanging_side = 1516full_side = 2517else # sides_pw[2].is_hanging[] == true518hanging_side = 2519full_side = 1520end521# Just be sure before accessing is.full or is.hanging later522@assert sides_pw[full_side].is_hanging[] == false523@assert sides_pw[hanging_side].is_hanging[] == true524525# Find small quads that are locally available526local_small_quad_positions = findall(sides_pw[hanging_side].is.hanging.is_ghost[] .==527false)528529# Get id of local small quadrants within their tree530# Indexing CBinding.Caccessor via a Vector does not work here -> use map instead531tree_small_quad_ids = map(p -> sides_pw[hanging_side].is.hanging.quadid[][p],532local_small_quad_positions)533local_small_quad_ids = offsets[hanging_side] .+ tree_small_quad_ids # ids cumulative over local trees534535# Determine if large quadrant is available and if yes, determine its id536if sides_pw[full_side].is.full.is_ghost[] == false537local_large_quad_id = offsets[full_side] + sides_pw[full_side].is.full.quadid[]538else539local_large_quad_id = -1 # large quad is ghost540end541542# Write data to mortar container, convert to 1-based indexing543# Start with small elements544local_neighbor_ids = local_small_quad_ids .+ 1545local_neighbor_positions = local_small_quad_positions546# Add large element information if it is locally available547if local_large_quad_id > -1548push!(local_neighbor_ids, local_large_quad_id + 1) # convert to 1-based index549push!(local_neighbor_positions, 2^(ndims(mesh) - 1) + 1)550end551552mpi_mortars.local_neighbor_ids[mpi_mortar_id] = local_neighbor_ids553mpi_mortars.local_neighbor_positions[mpi_mortar_id] = local_neighbor_positions554555# init_mortar_node_indices! expects side 1 to contain small elements556faces = (sides_pw[hanging_side].face[], sides_pw[full_side].face[])557init_mortar_node_indices!(mpi_mortars, faces, info_pw.orientation[], mpi_mortar_id)558559return nothing560end561562# Iterate over all interfaces and count563# - (inner) interfaces564# - mortars565# - boundaries566# - (MPI) interfaces at subdomain boundaries567# - (MPI) mortars at subdomain boundaries568# and collect the numbers in `user_data` in this order.569function count_surfaces_iter_face_parallel(info, user_data)570info_pw = PointerWrapper(info)571if info_pw.sides.elem_count[] == 2572# Two neighboring elements => Interface or mortar573574# Extract surface data575sides_pw = (load_pointerwrapper_side(info_pw, 1),576load_pointerwrapper_side(info_pw, 2))577578if sides_pw[1].is_hanging[] == false && sides_pw[2].is_hanging[] == false579# No hanging nodes => normal interface or MPI interface580if sides_pw[1].is.full.is_ghost[] == true ||581sides_pw[2].is.full.is_ghost[] == true # remote side => MPI interface582# Unpack user_data = [mpi_interface_count] and increment mpi_interface_count583pw = PointerWrapper(Int, user_data)584id = pw[4]585pw[4] = id + 1586else587# Unpack user_data = [interface_count] and increment interface_count588pw = PointerWrapper(Int, user_data)589id = pw[1]590pw[1] = id + 1591end592else593# Hanging nodes => mortar or MPI mortar594# First, we check which side is hanging, i.e., on which side we have the refined cells.595# Then we check if any of the refined cells or the coarse cell are "ghost" cells, i.e., they596# belong to another rank. That way we can determine if this is a regular mortar or MPI mortar597if sides_pw[1].is_hanging[] == true598@assert sides_pw[2].is_hanging[] == false599if any(sides_pw[1].is.hanging.is_ghost[] .== true) ||600sides_pw[2].is.full.is_ghost[] == true601face_has_ghost_side = true602else603face_has_ghost_side = false604end605else # sides_pw[2].is_hanging[] == true606@assert sides_pw[1].is_hanging[] == false607if sides_pw[1].is.full.is_ghost[] == true ||608any(sides_pw[2].is.hanging.is_ghost[] .== true)609face_has_ghost_side = true610else611face_has_ghost_side = false612end613end614if face_has_ghost_side615# Unpack user_data = [mpi_mortar_count] and increment mpi_mortar_count616pw = PointerWrapper(Int, user_data)617id = pw[5]618pw[5] = id + 1619else620# Unpack user_data = [mortar_count] and increment mortar_count621pw = PointerWrapper(Int, user_data)622id = pw[2]623pw[2] = id + 1624end625end626elseif info_pw.sides.elem_count[] == 1627# One neighboring elements => boundary628629# Unpack user_data = [boundary_count] and increment boundary_count630pw = PointerWrapper(Int, user_data)631id = pw[3]632pw[3] = id + 1633end634635return nothing636end637638# 2D639function cfunction(::typeof(count_surfaces_iter_face_parallel), ::Val{2})640@cfunction(count_surfaces_iter_face_parallel, Cvoid,641(Ptr{p4est_iter_face_info_t}, Ptr{Cvoid}))642end643# 3D644function cfunction(::typeof(count_surfaces_iter_face_parallel), ::Val{3})645@cfunction(count_surfaces_iter_face_parallel, Cvoid,646(Ptr{p8est_iter_face_info_t}, Ptr{Cvoid}))647end648649function count_required_surfaces(mesh::P4estMeshParallel)650# Let p4est iterate over all interfaces and call count_surfaces_iter_face_parallel651iter_face_c = cfunction(count_surfaces_iter_face_parallel, Val(ndims(mesh)))652653# interfaces, mortars, boundaries, mpi_interfaces, mpi_mortars654user_data = [0, 0, 0, 0, 0]655656iterate_p4est(mesh.p4est, user_data; ghost_layer = mesh.ghost,657iter_face_c = iter_face_c)658659# Return counters660return (interfaces = user_data[1],661mortars = user_data[2],662boundaries = user_data[3],663mpi_interfaces = user_data[4],664mpi_mortars = user_data[5])665end666end # @muladd667668669