Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
trixi-framework
GitHub Repository: trixi-framework/Trixi.jl
Path: blob/main/src/solvers/fdsbp_tree/fdsbp_2d.jl
5591 views
1
# !!! warning "Experimental implementation (upwind SBP)"
2
# This is an experimental feature and may change in future releases.
3
4
# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
5
# Since these FMAs can increase the performance of many numerical algorithms,
6
# we need to opt-in explicitly.
7
# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
8
@muladd begin
9
#! format: noindent
10
11
# 2D caches
12
function create_cache(mesh::Union{TreeMesh{2}, UnstructuredMesh2D}, equations,
13
volume_integral::VolumeIntegralStrongForm,
14
dg, cache_containers, uEltype)
15
prototype = Array{SVector{nvariables(equations), uEltype}, ndims(mesh)}(undef,
16
ntuple(_ -> nnodes(dg),
17
ndims(mesh))...)
18
f_threaded = [similar(prototype) for _ in 1:Threads.maxthreadid()]
19
20
return (; f_threaded)
21
end
22
23
function create_cache(mesh::Union{TreeMesh{2}, UnstructuredMesh2D}, equations,
24
volume_integral::VolumeIntegralUpwind,
25
dg, cache_containers, uEltype)
26
u_node = SVector{nvariables(equations), uEltype}(ntuple(_ -> zero(uEltype),
27
Val{nvariables(equations)}()))
28
f = StructArray([(u_node, u_node)])
29
f_minus_plus_threaded = [similar(f, ntuple(_ -> nnodes(dg), ndims(mesh))...)
30
for _ in 1:Threads.maxthreadid()]
31
32
f_minus, f_plus = StructArrays.components(f_minus_plus_threaded[1])
33
f_minus_threaded = [f_minus]
34
f_plus_threaded = [f_plus]
35
for i in 2:Threads.maxthreadid()
36
f_minus, f_plus = StructArrays.components(f_minus_plus_threaded[i])
37
push!(f_minus_threaded, f_minus)
38
push!(f_plus_threaded, f_plus)
39
end
40
41
return (; f_minus_plus_threaded, f_minus_threaded, f_plus_threaded)
42
end
43
44
# 2D volume integral contributions for `VolumeIntegralStrongForm`
45
function calc_volume_integral!(backend::Nothing, du, u,
46
mesh::TreeMesh{2},
47
have_nonconservative_terms::False, equations,
48
volume_integral::VolumeIntegralStrongForm,
49
dg::FDSBP, cache)
50
D = dg.basis # SBP derivative operator
51
@unpack f_threaded = cache
52
53
# SBP operators from SummationByPartsOperators.jl implement the basic interface
54
# of matrix-vector multiplication. Thus, we pass an "array of structures",
55
# packing all variables per node in an `SVector`.
56
if nvariables(equations) == 1
57
# `reinterpret(reshape, ...)` removes the leading dimension only if more
58
# than one variable is used.
59
u_vectors = reshape(reinterpret(SVector{nvariables(equations), eltype(u)}, u),
60
nnodes(dg), nnodes(dg), nelements(dg, cache))
61
du_vectors = reshape(reinterpret(SVector{nvariables(equations), eltype(du)},
62
du),
63
nnodes(dg), nnodes(dg), nelements(dg, cache))
64
else
65
u_vectors = reinterpret(reshape, SVector{nvariables(equations), eltype(u)}, u)
66
du_vectors = reinterpret(reshape, SVector{nvariables(equations), eltype(du)},
67
du)
68
end
69
70
# Use the tensor product structure to compute the discrete derivatives of
71
# the fluxes line-by-line and add them to `du` for each element.
72
@threaded for element in eachelement(dg, cache)
73
f_element = f_threaded[Threads.threadid()]
74
u_element = view(u_vectors, :, :, element)
75
76
# x direction
77
@. f_element = flux(u_element, 1, equations)
78
for j in eachnode(dg)
79
mul!(view(du_vectors, :, j, element), D, view(f_element, :, j),
80
one(eltype(du)), one(eltype(du)))
81
end
82
83
# y direction
84
@. f_element = flux(u_element, 2, equations)
85
for i in eachnode(dg)
86
mul!(view(du_vectors, i, :, element), D, view(f_element, i, :),
87
one(eltype(du)), one(eltype(du)))
88
end
89
end
90
91
return nothing
92
end
93
94
# 2D volume integral contributions for `VolumeIntegralUpwind`.
95
# Note that the plus / minus notation of the operators does not refer to the
96
# upwind / downwind directions of the fluxes.
97
# Instead, the plus / minus refers to the direction of the biasing within
98
# the finite difference stencils. Thus, the D^- operator acts on the positive
99
# part of the flux splitting f^+ and the D^+ operator acts on the negative part
100
# of the flux splitting f^-.
101
function calc_volume_integral!(backend::Nothing, du, u,
102
mesh::TreeMesh{2},
103
have_nonconservative_terms::False, equations,
104
volume_integral::VolumeIntegralUpwind,
105
dg::FDSBP, cache)
106
# Assume that
107
# dg.basis isa SummationByPartsOperators.UpwindOperators
108
D_minus = dg.basis.minus # Upwind SBP D^- derivative operator
109
D_plus = dg.basis.plus # Upwind SBP D^+ derivative operator
110
@unpack f_minus_plus_threaded, f_minus_threaded, f_plus_threaded = cache
111
@unpack splitting = volume_integral
112
113
# SBP operators from SummationByPartsOperators.jl implement the basic interface
114
# of matrix-vector multiplication. Thus, we pass an "array of structures",
115
# packing all variables per node in an `SVector`.
116
if nvariables(equations) == 1
117
# `reinterpret(reshape, ...)` removes the leading dimension only if more
118
# than one variable is used.
119
u_vectors = reshape(reinterpret(SVector{nvariables(equations), eltype(u)}, u),
120
nnodes(dg), nnodes(dg), nelements(dg, cache))
121
du_vectors = reshape(reinterpret(SVector{nvariables(equations), eltype(du)},
122
du),
123
nnodes(dg), nnodes(dg), nelements(dg, cache))
124
else
125
u_vectors = reinterpret(reshape, SVector{nvariables(equations), eltype(u)}, u)
126
du_vectors = reinterpret(reshape, SVector{nvariables(equations), eltype(du)},
127
du)
128
end
129
130
# Use the tensor product structure to compute the discrete derivatives of
131
# the fluxes line-by-line and add them to `du` for each element.
132
@threaded for element in eachelement(dg, cache)
133
# f_minus_plus_element wraps the storage provided by f_minus_element and
134
# f_plus_element such that we can use a single plain broadcasting below.
135
# f_minus_element and f_plus_element are updated in broadcasting calls
136
# of the form `@. f_minus_plus_element = ...`.
137
f_minus_plus_element = f_minus_plus_threaded[Threads.threadid()]
138
f_minus_element = f_minus_threaded[Threads.threadid()]
139
f_plus_element = f_plus_threaded[Threads.threadid()]
140
u_element = view(u_vectors, :, :, element)
141
142
# x direction
143
@. f_minus_plus_element = splitting(u_element, 1, equations)
144
for j in eachnode(dg)
145
mul!(view(du_vectors, :, j, element), D_minus, view(f_plus_element, :, j),
146
one(eltype(du)), one(eltype(du)))
147
mul!(view(du_vectors, :, j, element), D_plus, view(f_minus_element, :, j),
148
one(eltype(du)), one(eltype(du)))
149
end
150
151
# y direction
152
@. f_minus_plus_element = splitting(u_element, 2, equations)
153
for i in eachnode(dg)
154
mul!(view(du_vectors, i, :, element), D_minus, view(f_plus_element, i, :),
155
one(eltype(du)), one(eltype(du)))
156
mul!(view(du_vectors, i, :, element), D_plus, view(f_minus_element, i, :),
157
one(eltype(du)), one(eltype(du)))
158
end
159
end
160
161
return nothing
162
end
163
164
function calc_surface_integral!(backend::Nothing, du, u, mesh::TreeMesh{2},
165
equations, surface_integral::SurfaceIntegralStrongForm,
166
dg::DG, cache)
167
inv_weight_left = inv(left_boundary_weight(dg.basis))
168
inv_weight_right = inv(right_boundary_weight(dg.basis))
169
@unpack surface_flux_values = cache.elements
170
171
@threaded for element in eachelement(dg, cache)
172
for l in eachnode(dg)
173
# surface at -x
174
u_node = get_node_vars(u, equations, dg, 1, l, element)
175
f_node = flux(u_node, 1, equations)
176
f_num = get_node_vars(surface_flux_values, equations, dg, l, 1, element)
177
multiply_add_to_node_vars!(du, inv_weight_left, -(f_num - f_node),
178
equations, dg, 1, l, element)
179
180
# surface at +x
181
u_node = get_node_vars(u, equations, dg, nnodes(dg), l, element)
182
f_node = flux(u_node, 1, equations)
183
f_num = get_node_vars(surface_flux_values, equations, dg, l, 2, element)
184
multiply_add_to_node_vars!(du, inv_weight_right, +(f_num - f_node),
185
equations, dg, nnodes(dg), l, element)
186
187
# surface at -y
188
u_node = get_node_vars(u, equations, dg, l, 1, element)
189
f_node = flux(u_node, 2, equations)
190
f_num = get_node_vars(surface_flux_values, equations, dg, l, 3, element)
191
multiply_add_to_node_vars!(du, inv_weight_left, -(f_num - f_node),
192
equations, dg, l, 1, element)
193
194
# surface at +y
195
u_node = get_node_vars(u, equations, dg, l, nnodes(dg), element)
196
f_node = flux(u_node, 2, equations)
197
f_num = get_node_vars(surface_flux_values, equations, dg, l, 4, element)
198
multiply_add_to_node_vars!(du, inv_weight_right, +(f_num - f_node),
199
equations, dg, l, nnodes(dg), element)
200
end
201
end
202
203
return nothing
204
end
205
206
# Periodic FDSBP operators need to use a single element without boundaries
207
function calc_surface_integral!(backend::Nothing, du, u, mesh::TreeMesh2D,
208
equations, surface_integral::SurfaceIntegralStrongForm,
209
dg::PeriodicFDSBP, cache)
210
@assert nelements(dg, cache) == 1
211
return nothing
212
end
213
214
# Specialized interface flux computation because the upwind solver does
215
# not require a standard numerical flux (Riemann solver). The flux splitting
216
# already separates the solution information into right-traveling and
217
# left-traveling information. So we only need to compute the appropriate
218
# flux information at each side of an interface.
219
function calc_interface_flux!(backend::Nothing, surface_flux_values,
220
mesh::TreeMesh{2},
221
have_nonconservative_terms::False, equations,
222
surface_integral::SurfaceIntegralUpwind,
223
dg::FDSBP, cache)
224
@unpack splitting = surface_integral
225
@unpack u, neighbor_ids, orientations = cache.interfaces
226
227
@threaded for interface in eachinterface(dg, cache)
228
# Get neighboring elements
229
left_id = neighbor_ids[1, interface]
230
right_id = neighbor_ids[2, interface]
231
232
# Determine interface direction with respect to elements:
233
# orientation = 1: left -> 2, right -> 1
234
# orientation = 2: left -> 4, right -> 3
235
left_direction = 2 * orientations[interface]
236
right_direction = 2 * orientations[interface] - 1
237
238
for i in eachnode(dg)
239
# Pull the left and right solution data
240
u_ll, u_rr = get_surface_node_vars(u, equations, dg, i, interface)
241
242
# Compute the upwind coupling terms where right-traveling
243
# information comes from the left and left-traveling information
244
# comes from the right
245
flux_minus_rr = splitting(u_rr, Val{:minus}(), orientations[interface],
246
equations)
247
flux_plus_ll = splitting(u_ll, Val{:plus}(), orientations[interface],
248
equations)
249
250
# Save the upwind coupling into the appropriate side of the elements
251
for v in eachvariable(equations)
252
surface_flux_values[v, i, left_direction, left_id] = flux_minus_rr[v]
253
surface_flux_values[v, i, right_direction, right_id] = flux_plus_ll[v]
254
end
255
end
256
end
257
258
return nothing
259
end
260
261
# Implementation of fully upwind SATs. The surface flux values are pre-computed
262
# in the specialized `calc_interface_flux` routine. These SATs are still of
263
# a strong form penalty type, except that the interior flux at a particular
264
# side of the element are computed in the upwind direction.
265
function calc_surface_integral!(backend::Nothing, du, u, mesh::TreeMesh{2},
266
equations, surface_integral::SurfaceIntegralUpwind,
267
dg::FDSBP, cache)
268
inv_weight_left = inv(left_boundary_weight(dg.basis))
269
inv_weight_right = inv(right_boundary_weight(dg.basis))
270
@unpack surface_flux_values = cache.elements
271
@unpack splitting = surface_integral
272
273
@threaded for element in eachelement(dg, cache)
274
for l in eachnode(dg)
275
# surface at -x
276
u_node = get_node_vars(u, equations, dg, 1, l, element)
277
f_node = splitting(u_node, Val{:plus}(), 1, equations)
278
f_num = get_node_vars(surface_flux_values, equations, dg, l, 1, element)
279
multiply_add_to_node_vars!(du, inv_weight_left, -(f_num - f_node),
280
equations, dg, 1, l, element)
281
282
# surface at +x
283
u_node = get_node_vars(u, equations, dg, nnodes(dg), l, element)
284
f_node = splitting(u_node, Val{:minus}(), 1, equations)
285
f_num = get_node_vars(surface_flux_values, equations, dg, l, 2, element)
286
multiply_add_to_node_vars!(du, inv_weight_right, +(f_num - f_node),
287
equations, dg, nnodes(dg), l, element)
288
289
# surface at -y
290
u_node = get_node_vars(u, equations, dg, l, 1, element)
291
f_node = splitting(u_node, Val{:plus}(), 2, equations)
292
f_num = get_node_vars(surface_flux_values, equations, dg, l, 3, element)
293
multiply_add_to_node_vars!(du, inv_weight_left, -(f_num - f_node),
294
equations, dg, l, 1, element)
295
296
# surface at +y
297
u_node = get_node_vars(u, equations, dg, l, nnodes(dg), element)
298
f_node = splitting(u_node, Val{:minus}(), 2, equations)
299
f_num = get_node_vars(surface_flux_values, equations, dg, l, 4, element)
300
multiply_add_to_node_vars!(du, inv_weight_right, +(f_num - f_node),
301
equations, dg, l, nnodes(dg), element)
302
end
303
end
304
305
return nothing
306
end
307
308
# Periodic FDSBP operators need to use a single element without boundaries
309
function calc_surface_integral!(backend::Nothing, du, u, mesh::TreeMesh2D,
310
equations, surface_integral::SurfaceIntegralUpwind,
311
dg::PeriodicFDSBP, cache)
312
@assert nelements(dg, cache) == 1
313
return nothing
314
end
315
316
# AnalysisCallback
317
function integrate_via_indices(func::Func, u,
318
mesh::TreeMesh{2}, equations,
319
dg::FDSBP, cache, args...; normalize = true) where {Func}
320
# TODO: FD. This is rather inefficient right now and allocates...
321
M = SummationByPartsOperators.mass_matrix(dg.basis)
322
if M isa UniformScaling
323
M = M(nnodes(dg))
324
end
325
weights = diag(M)
326
327
# Initialize integral with zeros of the right shape
328
integral = zero(func(u, 1, 1, 1, equations, dg, args...))
329
330
# Use quadrature to numerically integrate over entire domain
331
@batch reduction=(+, integral) for element in eachelement(dg, cache)
332
volume_jacobian_ = volume_jacobian(element, mesh, cache)
333
for j in eachnode(dg), i in eachnode(dg)
334
integral += volume_jacobian_ * weights[i] * weights[j] *
335
func(u, i, j, element, equations, dg, args...)
336
end
337
end
338
339
# Normalize with total volume
340
if normalize
341
integral = integral / total_volume(mesh)
342
end
343
344
return integral
345
end
346
347
function calc_error_norms(func, u, t, analyzer,
348
mesh::TreeMesh{2}, equations, initial_condition,
349
dg::FDSBP, cache, cache_analysis)
350
# TODO: FD. This is rather inefficient right now and allocates...
351
M = SummationByPartsOperators.mass_matrix(dg.basis)
352
if M isa UniformScaling
353
M = M(nnodes(dg))
354
end
355
weights = diag(M)
356
@unpack node_coordinates = cache.elements
357
358
# Set up data structures
359
l2_error = zero(func(get_node_vars(u, equations, dg, 1, 1, 1), equations))
360
linf_error = copy(l2_error)
361
362
# Iterate over all elements for error calculations
363
for element in eachelement(dg, cache)
364
# Calculate errors at each node
365
volume_jacobian_ = volume_jacobian(element, mesh, cache)
366
367
for j in eachnode(analyzer), i in eachnode(analyzer)
368
u_exact = initial_condition(get_node_coords(node_coordinates, equations, dg,
369
i, j, element), t, equations)
370
diff = func(u_exact, equations) -
371
func(get_node_vars(u, equations, dg, i, j, element), equations)
372
l2_error += diff .^ 2 * (weights[i] * weights[j] * volume_jacobian_)
373
linf_error = @. max(linf_error, abs(diff))
374
end
375
end
376
377
# For L2 error, divide by total volume
378
total_volume_ = total_volume(mesh)
379
l2_error = @. sqrt(l2_error / total_volume_)
380
381
return l2_error, linf_error
382
end
383
end # @muladd
384
385