Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
trixi-framework
GitHub Repository: trixi-framework/Trixi.jl
Path: blob/main/src/solvers/fdsbp_tree/fdsbp_1d.jl
5591 views
1
# !!! warning "Experimental implementation (upwind SBP)"
2
# This is an experimental feature and may change in future releases.
3
4
# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
5
# Since these FMAs can increase the performance of many numerical algorithms,
6
# we need to opt-in explicitly.
7
# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
8
@muladd begin
9
#! format: noindent
10
11
# 1D caches
12
function create_cache(mesh::TreeMesh{1}, equations,
13
volume_integral::VolumeIntegralStrongForm,
14
dg, cache_containers, uEltype)
15
prototype = Array{SVector{nvariables(equations), uEltype}, ndims(mesh)}(undef,
16
ntuple(_ -> nnodes(dg),
17
ndims(mesh))...)
18
f_threaded = [similar(prototype) for _ in 1:Threads.maxthreadid()]
19
20
return (; f_threaded)
21
end
22
23
function create_cache(mesh::TreeMesh{1}, equations,
24
volume_integral::VolumeIntegralUpwind,
25
dg, cache_containers, uEltype)
26
u_node = SVector{nvariables(equations), uEltype}(ntuple(_ -> zero(uEltype),
27
Val{nvariables(equations)}()))
28
f = StructArray([(u_node, u_node)])
29
f_minus_plus_threaded = [similar(f, ntuple(_ -> nnodes(dg), ndims(mesh))...)
30
for _ in 1:Threads.maxthreadid()]
31
32
f_minus, f_plus = StructArrays.components(f_minus_plus_threaded[1])
33
f_minus_threaded = [f_minus]
34
f_plus_threaded = [f_plus]
35
for i in 2:Threads.maxthreadid()
36
f_minus, f_plus = StructArrays.components(f_minus_plus_threaded[i])
37
push!(f_minus_threaded, f_minus)
38
push!(f_plus_threaded, f_plus)
39
end
40
41
return (; f_minus_plus_threaded, f_minus_threaded, f_plus_threaded)
42
end
43
44
# 2D volume integral contributions for `VolumeIntegralStrongForm`
45
function calc_volume_integral!(backend::Nothing, du, u,
46
mesh::TreeMesh{1},
47
have_nonconservative_terms::False, equations,
48
volume_integral::VolumeIntegralStrongForm,
49
dg::FDSBP, cache)
50
D = dg.basis # SBP derivative operator
51
@unpack f_threaded = cache
52
53
# SBP operators from SummationByPartsOperators.jl implement the basic interface
54
# of matrix-vector multiplication. Thus, we pass an "array of structures",
55
# packing all variables per node in an `SVector`.
56
if nvariables(equations) == 1
57
# `reinterpret(reshape, ...)` removes the leading dimension only if more
58
# than one variable is used.
59
u_vectors = reshape(reinterpret(SVector{nvariables(equations), eltype(u)}, u),
60
nnodes(dg), nelements(dg, cache))
61
du_vectors = reshape(reinterpret(SVector{nvariables(equations), eltype(du)},
62
du),
63
nnodes(dg), nelements(dg, cache))
64
else
65
u_vectors = reinterpret(reshape, SVector{nvariables(equations), eltype(u)}, u)
66
du_vectors = reinterpret(reshape, SVector{nvariables(equations), eltype(du)},
67
du)
68
end
69
70
# Use the tensor product structure to compute the discrete derivatives of
71
# the fluxes line-by-line and add them to `du` for each element.
72
@threaded for element in eachelement(dg, cache)
73
f_element = f_threaded[Threads.threadid()]
74
u_element = view(u_vectors, :, element)
75
76
# x direction
77
@. f_element = flux(u_element, 1, equations)
78
mul!(view(du_vectors, :, element), D, view(f_element, :),
79
one(eltype(du)), one(eltype(du)))
80
end
81
82
return nothing
83
end
84
85
# 1D volume integral contributions for `VolumeIntegralUpwind`.
86
# Note that the plus / minus notation of the operators does not refer to the
87
# upwind / downwind directions of the fluxes.
88
# Instead, the plus / minus refers to the direction of the biasing within
89
# the finite difference stencils. Thus, the D^- operator acts on the positive
90
# part of the flux splitting f^+ and the D^+ operator acts on the negative part
91
# of the flux splitting f^-.
92
function calc_volume_integral!(backend::Nothing, du, u,
93
mesh::TreeMesh{1},
94
have_nonconservative_terms::False, equations,
95
volume_integral::VolumeIntegralUpwind,
96
dg::FDSBP, cache)
97
# Assume that
98
# dg.basis isa SummationByPartsOperators.UpwindOperators
99
D_minus = dg.basis.minus # Upwind SBP D^- derivative operator
100
D_plus = dg.basis.plus # Upwind SBP D^+ derivative operator
101
@unpack f_minus_plus_threaded, f_minus_threaded, f_plus_threaded = cache
102
@unpack splitting = volume_integral
103
104
# SBP operators from SummationByPartsOperators.jl implement the basic interface
105
# of matrix-vector multiplication. Thus, we pass an "array of structures",
106
# packing all variables per node in an `SVector`.
107
if nvariables(equations) == 1
108
# `reinterpret(reshape, ...)` removes the leading dimension only if more
109
# than one variable is used.
110
u_vectors = reshape(reinterpret(SVector{nvariables(equations), eltype(u)}, u),
111
nnodes(dg), nelements(dg, cache))
112
du_vectors = reshape(reinterpret(SVector{nvariables(equations), eltype(du)},
113
du),
114
nnodes(dg), nelements(dg, cache))
115
else
116
u_vectors = reinterpret(reshape, SVector{nvariables(equations), eltype(u)}, u)
117
du_vectors = reinterpret(reshape, SVector{nvariables(equations), eltype(du)},
118
du)
119
end
120
121
# Use the tensor product structure to compute the discrete derivatives of
122
# the fluxes line-by-line and add them to `du` for each element.
123
@threaded for element in eachelement(dg, cache)
124
# f_minus_plus_element wraps the storage provided by f_minus_element and
125
# f_plus_element such that we can use a single plain broadcasting below.
126
# f_minus_element and f_plus_element are updated in broadcasting calls
127
# of the form `@. f_minus_plus_element = ...`.
128
f_minus_plus_element = f_minus_plus_threaded[Threads.threadid()]
129
f_minus_element = f_minus_threaded[Threads.threadid()]
130
f_plus_element = f_plus_threaded[Threads.threadid()]
131
u_element = view(u_vectors, :, element)
132
133
# x direction
134
@. f_minus_plus_element = splitting(u_element, 1, equations)
135
mul!(view(du_vectors, :, element), D_plus, view(f_minus_element, :),
136
one(eltype(du)), one(eltype(du)))
137
mul!(view(du_vectors, :, element), D_minus, view(f_plus_element, :),
138
one(eltype(du)), one(eltype(du)))
139
end
140
141
return nothing
142
end
143
144
function calc_surface_integral!(backend::Nothing, du, u, mesh::TreeMesh{1},
145
equations, surface_integral::SurfaceIntegralStrongForm,
146
dg::DG, cache)
147
inv_weight_left = inv(left_boundary_weight(dg.basis))
148
inv_weight_right = inv(right_boundary_weight(dg.basis))
149
@unpack surface_flux_values = cache.elements
150
151
@threaded for element in eachelement(dg, cache)
152
# surface at -x
153
u_node = get_node_vars(u, equations, dg, 1, element)
154
f_node = flux(u_node, 1, equations)
155
f_num = get_node_vars(surface_flux_values, equations, dg, 1, element)
156
multiply_add_to_node_vars!(du, inv_weight_left, -(f_num - f_node),
157
equations, dg, 1, element)
158
159
# surface at +x
160
u_node = get_node_vars(u, equations, dg, nnodes(dg), element)
161
f_node = flux(u_node, 1, equations)
162
f_num = get_node_vars(surface_flux_values, equations, dg, 2, element)
163
multiply_add_to_node_vars!(du, inv_weight_right, +(f_num - f_node),
164
equations, dg, nnodes(dg), element)
165
end
166
167
return nothing
168
end
169
170
# Periodic FDSBP operators need to use a single element without boundaries
171
function calc_surface_integral!(backend::Nothing, du, u, mesh::TreeMesh1D,
172
equations, surface_integral::SurfaceIntegralStrongForm,
173
dg::PeriodicFDSBP, cache)
174
@assert nelements(dg, cache) == 1
175
return nothing
176
end
177
178
# Specialized interface flux computation because the upwind solver does
179
# not require a standard numerical flux (Riemann solver). The flux splitting
180
# already separates the solution information into right-traveling and
181
# left-traveling information. So we only need to compute the appropriate
182
# flux information at each side of an interface.
183
function calc_interface_flux!(surface_flux_values,
184
mesh::TreeMesh{1},
185
have_nonconservative_terms::False, equations,
186
surface_integral::SurfaceIntegralUpwind,
187
dg::FDSBP, cache)
188
@unpack splitting = surface_integral
189
@unpack u, neighbor_ids, orientations = cache.interfaces
190
191
@threaded for interface in eachinterface(dg, cache)
192
# Get neighboring elements
193
left_id = neighbor_ids[1, interface]
194
right_id = neighbor_ids[2, interface]
195
196
# Determine interface direction with respect to elements:
197
# orientation = 1: left -> 2, right -> 1
198
left_direction = 2 * orientations[interface]
199
right_direction = 2 * orientations[interface] - 1
200
201
# Pull the left and right solution data
202
u_ll, u_rr = get_surface_node_vars(u, equations, dg, interface)
203
204
# Compute the upwind coupling terms where right-traveling
205
# information comes from the left and left-traveling information
206
# comes from the right
207
flux_minus_rr = splitting(u_rr, Val{:minus}(), orientations[interface],
208
equations)
209
flux_plus_ll = splitting(u_ll, Val{:plus}(), orientations[interface], equations)
210
211
# Save the upwind coupling into the appropriate side of the elements
212
for v in eachvariable(equations)
213
surface_flux_values[v, left_direction, left_id] = flux_minus_rr[v]
214
surface_flux_values[v, right_direction, right_id] = flux_plus_ll[v]
215
end
216
end
217
218
return nothing
219
end
220
221
# Implementation of fully upwind SATs. The surface flux values are pre-computed
222
# in the specialized `calc_interface_flux` routine. These SATs are still of
223
# a strong form penalty type, except that the interior flux at a particular
224
# side of the element are computed in the upwind direction.
225
function calc_surface_integral!(backend::Nothing, du, u, mesh::TreeMesh{1},
226
equations, surface_integral::SurfaceIntegralUpwind,
227
dg::FDSBP, cache)
228
inv_weight_left = inv(left_boundary_weight(dg.basis))
229
inv_weight_right = inv(right_boundary_weight(dg.basis))
230
@unpack surface_flux_values = cache.elements
231
@unpack splitting = surface_integral
232
233
@threaded for element in eachelement(dg, cache)
234
# surface at -x
235
u_node = get_node_vars(u, equations, dg, 1, element)
236
f_node = splitting(u_node, Val{:plus}(), 1, equations)
237
f_num = get_node_vars(surface_flux_values, equations, dg, 1, element)
238
multiply_add_to_node_vars!(du, inv_weight_left, -(f_num - f_node),
239
equations, dg, 1, element)
240
241
# surface at +x
242
u_node = get_node_vars(u, equations, dg, nnodes(dg), element)
243
f_node = splitting(u_node, Val{:minus}(), 1, equations)
244
f_num = get_node_vars(surface_flux_values, equations, dg, 2, element)
245
multiply_add_to_node_vars!(du, inv_weight_right, +(f_num - f_node),
246
equations, dg, nnodes(dg), element)
247
end
248
249
return nothing
250
end
251
252
# Periodic FDSBP operators need to use a single element without boundaries
253
function calc_surface_integral!(backend::Nothing, du, u, mesh::TreeMesh1D,
254
equations, surface_integral::SurfaceIntegralUpwind,
255
dg::PeriodicFDSBP, cache)
256
@assert nelements(dg, cache) == 1
257
return nothing
258
end
259
260
# AnalysisCallback
261
262
function integrate_via_indices(func::Func, u,
263
mesh::TreeMesh{1}, equations,
264
dg::FDSBP, cache, args...; normalize = true) where {Func}
265
# TODO: FD. This is rather inefficient right now and allocates...
266
M = SummationByPartsOperators.mass_matrix(dg.basis)
267
if M isa UniformScaling
268
M = M(nnodes(dg))
269
end
270
weights = diag(M)
271
272
# Initialize integral with zeros of the right shape
273
integral = zero(func(u, 1, 1, equations, dg, args...))
274
275
# Use quadrature to numerically integrate over entire domain
276
@batch reduction=(+, integral) for element in eachelement(dg, cache)
277
volume_jacobian_ = volume_jacobian(element, mesh, cache)
278
for i in eachnode(dg)
279
integral += volume_jacobian_ * weights[i] *
280
func(u, i, element, equations, dg, args...)
281
end
282
end
283
284
# Normalize with total volume
285
if normalize
286
integral = integral / total_volume(mesh)
287
end
288
289
return integral
290
end
291
292
function calc_error_norms(func, u, t, analyzer,
293
mesh::TreeMesh{1}, equations, initial_condition,
294
dg::FDSBP, cache, cache_analysis)
295
# TODO: FD. This is rather inefficient right now and allocates...
296
M = SummationByPartsOperators.mass_matrix(dg.basis)
297
if M isa UniformScaling
298
M = M(nnodes(dg))
299
end
300
weights = diag(M)
301
@unpack node_coordinates = cache.elements
302
303
# Set up data structures
304
l2_error = zero(func(get_node_vars(u, equations, dg, 1, 1), equations))
305
linf_error = copy(l2_error)
306
307
# Iterate over all elements for error calculations
308
for element in eachelement(dg, cache)
309
# Calculate errors at each node
310
volume_jacobian_ = volume_jacobian(element, mesh, cache)
311
312
for i in eachnode(analyzer)
313
u_exact = initial_condition(get_node_coords(node_coordinates, equations, dg,
314
i, element), t, equations)
315
diff = func(u_exact, equations) -
316
func(get_node_vars(u, equations, dg, i, element), equations)
317
l2_error += diff .^ 2 * (weights[i] * volume_jacobian_)
318
linf_error = @. max(linf_error, abs(diff))
319
end
320
end
321
322
# For L2 error, divide by total volume
323
total_volume_ = total_volume(mesh)
324
l2_error = @. sqrt(l2_error / total_volume_)
325
326
return l2_error, linf_error
327
end
328
end # @muladd
329
330