Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
trixi-framework
GitHub Repository: trixi-framework/Trixi.jl
Path: blob/main/src/callbacks_step/analysis_dg3d.jl
5586 views
1
# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
2
# Since these FMAs can increase the performance of many numerical algorithms,
3
# we need to opt-in explicitly.
4
# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
5
@muladd begin
6
#! format: noindent
7
8
function create_cache_analysis(analyzer, mesh::TreeMesh{3},
9
equations, dg::DG, cache,
10
RealT, uEltype)
11
12
# pre-allocate buffers
13
# We use `StrideArray`s here since these buffers are used in performance-critical
14
# places and the additional information passed to the compiler makes them faster
15
# than native `Array`s.
16
u_local = StrideArray(undef, uEltype,
17
StaticInt(nvariables(equations)), StaticInt(nnodes(analyzer)),
18
StaticInt(nnodes(analyzer)), StaticInt(nnodes(analyzer)))
19
u_tmp1 = StrideArray(undef, uEltype,
20
StaticInt(nvariables(equations)), StaticInt(nnodes(analyzer)),
21
StaticInt(nnodes(dg)), StaticInt(nnodes(dg)))
22
u_tmp2 = StrideArray(undef, uEltype,
23
StaticInt(nvariables(equations)), StaticInt(nnodes(analyzer)),
24
StaticInt(nnodes(analyzer)), StaticInt(nnodes(dg)))
25
x_local = StrideArray(undef, RealT,
26
StaticInt(ndims(equations)), StaticInt(nnodes(analyzer)),
27
StaticInt(nnodes(analyzer)), StaticInt(nnodes(analyzer)))
28
x_tmp1 = StrideArray(undef, RealT,
29
StaticInt(ndims(equations)), StaticInt(nnodes(analyzer)),
30
StaticInt(nnodes(dg)), StaticInt(nnodes(dg)))
31
x_tmp2 = StrideArray(undef, RealT,
32
StaticInt(ndims(equations)), StaticInt(nnodes(analyzer)),
33
StaticInt(nnodes(analyzer)), StaticInt(nnodes(dg)))
34
35
return (; u_local, u_tmp1, u_tmp2, x_local, x_tmp1, x_tmp2)
36
end
37
38
# Specialized cache for P4estMesh to allow for different ambient dimension from mesh dimension
39
function create_cache_analysis(analyzer,
40
mesh::P4estMesh{3, NDIMS_AMBIENT},
41
equations, dg::DG, cache,
42
RealT, uEltype) where {NDIMS_AMBIENT}
43
44
# pre-allocate buffers
45
# We use `StrideArray`s here since these buffers are used in performance-critical
46
# places and the additional information passed to the compiler makes them faster
47
# than native `Array`s.
48
u_local = StrideArray(undef, uEltype,
49
StaticInt(nvariables(equations)), StaticInt(nnodes(analyzer)),
50
StaticInt(nnodes(analyzer)), StaticInt(nnodes(analyzer)))
51
u_tmp1 = StrideArray(undef, uEltype,
52
StaticInt(nvariables(equations)), StaticInt(nnodes(analyzer)),
53
StaticInt(nnodes(dg)), StaticInt(nnodes(dg)))
54
u_tmp2 = StrideArray(undef, uEltype,
55
StaticInt(nvariables(equations)), StaticInt(nnodes(analyzer)),
56
StaticInt(nnodes(analyzer)), StaticInt(nnodes(dg)))
57
x_local = StrideArray(undef, RealT,
58
StaticInt(NDIMS_AMBIENT), StaticInt(nnodes(analyzer)),
59
StaticInt(nnodes(analyzer)), StaticInt(nnodes(analyzer)))
60
x_tmp1 = StrideArray(undef, RealT,
61
StaticInt(NDIMS_AMBIENT), StaticInt(nnodes(analyzer)),
62
StaticInt(nnodes(dg)), StaticInt(nnodes(dg)))
63
x_tmp2 = StrideArray(undef, RealT,
64
StaticInt(NDIMS_AMBIENT), StaticInt(nnodes(analyzer)),
65
StaticInt(nnodes(analyzer)), StaticInt(nnodes(dg)))
66
jacobian_local = StrideArray(undef, RealT,
67
StaticInt(nnodes(analyzer)),
68
StaticInt(nnodes(analyzer)),
69
StaticInt(nnodes(analyzer)))
70
jacobian_tmp1 = StrideArray(undef, RealT,
71
StaticInt(nnodes(analyzer)), StaticInt(nnodes(dg)),
72
StaticInt(nnodes(dg)))
73
jacobian_tmp2 = StrideArray(undef, RealT,
74
StaticInt(nnodes(analyzer)),
75
StaticInt(nnodes(analyzer)), StaticInt(nnodes(dg)))
76
77
return (; u_local, u_tmp1, u_tmp2, x_local, x_tmp1, x_tmp2, jacobian_local,
78
jacobian_tmp1, jacobian_tmp2)
79
end
80
81
function create_cache_analysis(analyzer,
82
mesh::Union{StructuredMesh{3}, T8codeMesh{3}},
83
equations, dg::DG, cache,
84
RealT, uEltype)
85
86
# pre-allocate buffers
87
# We use `StrideArray`s here since these buffers are used in performance-critical
88
# places and the additional information passed to the compiler makes them faster
89
# than native `Array`s.
90
u_local = StrideArray(undef, uEltype,
91
StaticInt(nvariables(equations)), StaticInt(nnodes(analyzer)),
92
StaticInt(nnodes(analyzer)), StaticInt(nnodes(analyzer)))
93
u_tmp1 = StrideArray(undef, uEltype,
94
StaticInt(nvariables(equations)), StaticInt(nnodes(analyzer)),
95
StaticInt(nnodes(dg)), StaticInt(nnodes(dg)))
96
u_tmp2 = StrideArray(undef, uEltype,
97
StaticInt(nvariables(equations)), StaticInt(nnodes(analyzer)),
98
StaticInt(nnodes(analyzer)), StaticInt(nnodes(dg)))
99
x_local = StrideArray(undef, RealT,
100
StaticInt(ndims(equations)), StaticInt(nnodes(analyzer)),
101
StaticInt(nnodes(analyzer)), StaticInt(nnodes(analyzer)))
102
x_tmp1 = StrideArray(undef, RealT,
103
StaticInt(ndims(equations)), StaticInt(nnodes(analyzer)),
104
StaticInt(nnodes(dg)), StaticInt(nnodes(dg)))
105
x_tmp2 = StrideArray(undef, RealT,
106
StaticInt(ndims(equations)), StaticInt(nnodes(analyzer)),
107
StaticInt(nnodes(analyzer)), StaticInt(nnodes(dg)))
108
jacobian_local = StrideArray(undef, RealT,
109
StaticInt(nnodes(analyzer)),
110
StaticInt(nnodes(analyzer)),
111
StaticInt(nnodes(analyzer)))
112
jacobian_tmp1 = StrideArray(undef, RealT,
113
StaticInt(nnodes(analyzer)), StaticInt(nnodes(dg)),
114
StaticInt(nnodes(dg)))
115
jacobian_tmp2 = StrideArray(undef, RealT,
116
StaticInt(nnodes(analyzer)),
117
StaticInt(nnodes(analyzer)), StaticInt(nnodes(dg)))
118
119
return (; u_local, u_tmp1, u_tmp2, x_local, x_tmp1, x_tmp2, jacobian_local,
120
jacobian_tmp1, jacobian_tmp2)
121
end
122
123
function calc_error_norms(func, u, t, analyzer,
124
mesh::TreeMesh{3}, equations, initial_condition,
125
dg::DGSEM, cache, cache_analysis)
126
@unpack vandermonde, weights = analyzer
127
@unpack node_coordinates = cache.elements
128
@unpack u_local, u_tmp1, u_tmp2, x_local, x_tmp1, x_tmp2 = cache_analysis
129
130
# Set up data structures
131
l2_error = zero(func(get_node_vars(u, equations, dg, 1, 1, 1, 1), equations))
132
linf_error = copy(l2_error)
133
134
# Iterate over all elements for error calculations
135
for element in eachelement(dg, cache)
136
# Interpolate solution and node locations to analysis nodes
137
multiply_dimensionwise!(u_local, vandermonde, view(u, :, :, :, :, element),
138
u_tmp1, u_tmp2)
139
multiply_dimensionwise!(x_local, vandermonde,
140
view(node_coordinates, :, :, :, :, element), x_tmp1,
141
x_tmp2)
142
143
# Calculate errors at each analysis node
144
volume_jacobian_ = volume_jacobian(element, mesh, cache)
145
146
for k in eachnode(analyzer), j in eachnode(analyzer), i in eachnode(analyzer)
147
u_exact = initial_condition(get_node_coords(x_local, equations, dg, i, j,
148
k), t, equations)
149
diff = func(u_exact, equations) -
150
func(get_node_vars(u_local, equations, dg, i, j, k), equations)
151
l2_error += diff .^ 2 *
152
(weights[i] * weights[j] * weights[k] * volume_jacobian_)
153
linf_error = @. max(linf_error, abs(diff))
154
end
155
end
156
157
# For L2 error, divide by total volume
158
total_volume_ = total_volume(mesh)
159
l2_error = @. sqrt(l2_error / total_volume_)
160
161
return l2_error, linf_error
162
end
163
164
function calc_error_norms(func, u, t, analyzer,
165
mesh::Union{StructuredMesh{3}, P4estMesh{3}, T8codeMesh{3}},
166
equations, initial_condition,
167
dg::DGSEM, cache, cache_analysis)
168
@unpack vandermonde, weights = analyzer
169
@unpack u_local, u_tmp1, u_tmp2, x_local, x_tmp1, x_tmp2, jacobian_local, jacobian_tmp1, jacobian_tmp2 = cache_analysis
170
@unpack node_coordinates, inverse_jacobian = cache.elements
171
172
# Calculate error norms on the CPU, to ensure the order of summation is the same.
173
if trixi_backend(u) !== nothing
174
node_coordinates = Array(node_coordinates)
175
inverse_jacobian = Array(inverse_jacobian)
176
u = Array(u)
177
end
178
179
# Set up data structures
180
l2_error = zero(func(get_node_vars(u, equations, dg, 1, 1, 1, 1), equations))
181
linf_error = copy(l2_error)
182
total_volume = zero(real(mesh))
183
184
# Iterate over all elements for error calculations
185
for element in eachelement(dg, cache)
186
# Interpolate solution and node locations to analysis nodes
187
multiply_dimensionwise!(u_local, vandermonde, view(u, :, :, :, :, element),
188
u_tmp1, u_tmp2)
189
multiply_dimensionwise!(x_local, vandermonde,
190
view(node_coordinates, :, :, :, :, element), x_tmp1,
191
x_tmp2)
192
multiply_scalar_dimensionwise!(jacobian_local, vandermonde,
193
inv.(view(inverse_jacobian, :, :, :, element)),
194
jacobian_tmp1, jacobian_tmp2)
195
196
# Calculate errors at each analysis node
197
for k in eachnode(analyzer), j in eachnode(analyzer), i in eachnode(analyzer)
198
u_exact = initial_condition(get_node_coords(x_local, equations, dg, i, j,
199
k), t, equations)
200
diff = func(u_exact, equations) -
201
func(get_node_vars(u_local, equations, dg, i, j, k), equations)
202
# We take absolute value as we need the Jacobian here for the volume calculation
203
abs_jacobian_local_ijk = abs(jacobian_local[i, j, k])
204
205
l2_error += diff .^ 2 *
206
(weights[i] * weights[j] * weights[k] * abs_jacobian_local_ijk)
207
linf_error = @. max(linf_error, abs(diff))
208
total_volume += (weights[i] * weights[j] * weights[k] *
209
abs_jacobian_local_ijk)
210
end
211
end
212
213
# For L2 error, divide by total volume
214
l2_error = @. sqrt(l2_error / total_volume)
215
216
return l2_error, linf_error
217
end
218
219
# Use quadrature to numerically integrate a single element.
220
# We do not multiply by the Jacobian to stay in reference space.
221
# This avoids the need to divide the RHS of the DG scheme by the Jacobian when computing
222
# the time derivative of entropy, see `entropy_change_reference_element`.
223
function integrate_reference_element(func::Func, u, element,
224
::Type{<:AbstractMesh{3}}, equations, dg::DGSEM,
225
cache,
226
args...; normalize = true) where {Func}
227
@unpack weights = dg.basis
228
229
# Initialize integral with zeros of the right shape
230
integral = zero(func(u, 1, 1, 1, element, equations, dg, args...))
231
232
for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg)
233
integral += weights[i] * weights[j] * weights[k] *
234
func(u, i, j, k, element, equations, dg, args...)
235
end
236
237
return integral
238
end
239
240
# Calculate ∫_e (∂S/∂u ⋅ ∂u/∂t) dΩ_e where the result on element 'e' is kept in reference space
241
# Note that ∂S/∂u = w(u) with entropy variables w
242
function entropy_change_reference_element(du, u, element,
243
MeshT::Type{<:AbstractMesh{3}},
244
equations, dg::DGSEM, cache, args...)
245
return integrate_reference_element(u, element, MeshT, equations, dg, cache,
246
du) do u, i, j, k, element, equations, dg, du
247
u_node = get_node_vars(u, equations, dg, i, j, k, element)
248
du_node = get_node_vars(du, equations, dg, i, j, k, element)
249
250
dot(cons2entropy(u_node, equations), du_node)
251
end
252
end
253
254
# calculate surface integral of func(u, equations) * normal on the reference element.
255
function surface_integral_reference_element(func::Func, u, element,
256
::Type{<:TreeMesh{3}}, equations, dg::DGSEM,
257
cache, args...) where {Func}
258
@unpack weights = dg.basis
259
260
u_tmp = get_node_vars(u, equations, dg, 1, 1, 1, element)
261
surface_integral = zero(func(u_tmp, 1, equations))
262
for j in eachnode(dg), i in eachnode(dg)
263
# integrate along x-y plane, normal in z (3) direction
264
u_back = get_node_vars(u, equations, dg, i, j, 1, element)
265
u_front = get_node_vars(u, equations, dg, i, j, nnodes(dg), element)
266
267
surface_integral += dg.basis.weights[i] * dg.basis.weights[j] *
268
(func(u_front, 3, equations) - func(u_back, 3, equations))
269
270
# integrate along x-z plane, normal in y (2) direction
271
u_bottom = get_node_vars(u, equations, dg, i, 1, j, element)
272
u_top = get_node_vars(u, equations, dg, i, nnodes(dg), j, element)
273
274
surface_integral += dg.basis.weights[i] * dg.basis.weights[j] *
275
(func(u_top, 2, equations) - func(u_bottom, 2, equations))
276
277
# integrate along y-z plane, normal in x (1) direction
278
u_left = get_node_vars(u, equations, dg, 1, i, j, element)
279
u_right = get_node_vars(u, equations, dg, nnodes(dg), i, j, element)
280
281
surface_integral += dg.basis.weights[i] * dg.basis.weights[j] *
282
(func(u_right, 1, equations) - func(u_left, 1, equations))
283
end
284
285
return surface_integral
286
end
287
288
# calculate surface integral of func(u, normal_direction, equations) * normal on the reference element.
289
# Note: `get_normal_direction` already returns an outward-pointing normal for all directions,
290
# thus no +- flips are needed here.
291
function surface_integral_reference_element(func::Func, u, element,
292
::Type{<:Union{StructuredMesh{3},
293
P4estMesh{3},
294
T8codeMesh{3}}},
295
equations, dg::DGSEM, cache,
296
args...) where {Func}
297
@unpack contravariant_vectors = cache.elements
298
@unpack weights = dg.basis
299
300
# Construct zero of right shape:
301
# Evaluate `func` at actual quadrature node and normal direction
302
u_tmp = get_node_vars(u, equations, dg, 1, 1, 1, element)
303
normal_direction = get_normal_direction(1, contravariant_vectors,
304
1, 1, 1, element)
305
surface_integral = zero(func(u_tmp, normal_direction, equations))
306
307
# Direction 1: face at i = 1 (x_min)
308
for k in eachnode(dg), j in eachnode(dg)
309
u_node = get_node_vars(u, equations, dg, 1, j, k, element)
310
normal_direction = get_normal_direction(1, contravariant_vectors,
311
1, j, k, element)
312
surface_integral += weights[j] * weights[k] *
313
func(u_node, normal_direction, equations)
314
end
315
316
# Direction 2: face at i = nnodes(dg) (x_max)
317
for k in eachnode(dg), j in eachnode(dg)
318
u_node = get_node_vars(u, equations, dg, nnodes(dg), j, k, element)
319
normal_direction = get_normal_direction(2, contravariant_vectors,
320
nnodes(dg), j, k, element)
321
surface_integral += weights[j] * weights[k] *
322
func(u_node, normal_direction, equations)
323
end
324
325
# Direction 3: face at j = 1 (y_min)
326
for k in eachnode(dg), i in eachnode(dg)
327
u_node = get_node_vars(u, equations, dg, i, 1, k, element)
328
normal_direction = get_normal_direction(3, contravariant_vectors,
329
i, 1, k, element)
330
surface_integral += weights[i] * weights[k] *
331
func(u_node, normal_direction, equations)
332
end
333
334
# Direction 4: face at j = nnodes(dg) (y_max)
335
for k in eachnode(dg), i in eachnode(dg)
336
u_node = get_node_vars(u, equations, dg, i, nnodes(dg), k, element)
337
normal_direction = get_normal_direction(4, contravariant_vectors,
338
i, nnodes(dg), k, element)
339
surface_integral += weights[i] * weights[k] *
340
func(u_node, normal_direction, equations)
341
end
342
343
# Direction 5: face at k = 1 (z_min)
344
for j in eachnode(dg), i in eachnode(dg)
345
u_node = get_node_vars(u, equations, dg, i, j, 1, element)
346
normal_direction = get_normal_direction(5, contravariant_vectors,
347
i, j, 1, element)
348
surface_integral += weights[i] * weights[j] *
349
func(u_node, normal_direction, equations)
350
end
351
352
# Direction 6: face at k = nnodes(dg) (z_max)
353
for j in eachnode(dg), i in eachnode(dg)
354
u_node = get_node_vars(u, equations, dg, i, j, nnodes(dg), element)
355
normal_direction = get_normal_direction(6, contravariant_vectors,
356
i, j, nnodes(dg), element)
357
surface_integral += weights[i] * weights[j] *
358
func(u_node, normal_direction, equations)
359
end
360
361
return surface_integral
362
end
363
364
function integrate_via_indices(func::Func, u,
365
mesh::TreeMesh{3}, equations, dg::DGSEM, cache,
366
args...; normalize = true) where {Func}
367
@unpack weights = dg.basis
368
369
# Initialize integral with zeros of the right shape
370
integral = zero(func(u, 1, 1, 1, 1, equations, dg, args...))
371
372
# Use quadrature to numerically integrate over entire domain
373
@batch reduction=(+, integral) for element in eachelement(dg, cache)
374
volume_jacobian_ = volume_jacobian(element, mesh, cache)
375
for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg)
376
integral += volume_jacobian_ * weights[i] * weights[j] * weights[k] *
377
func(u, i, j, k, element, equations, dg, args...)
378
end
379
end
380
381
# Normalize with total volume
382
if normalize
383
integral = integral / total_volume(mesh)
384
end
385
386
return integral
387
end
388
389
function integrate_via_indices(func::Func, u,
390
mesh::Union{StructuredMesh{3}, P4estMesh{3},
391
T8codeMesh{3}},
392
equations, dg::DGSEM, cache,
393
args...; normalize = true) where {Func}
394
return integrate_via_indices(func, trixi_backend(u), u, mesh, equations, dg, cache,
395
args...; normalize = normalize)
396
end
397
398
function integrate_via_indices(func::Func, ::Nothing, u,
399
mesh::Union{StructuredMesh{3}, P4estMesh{3},
400
T8codeMesh{3}},
401
equations, dg::DGSEM, cache,
402
args...; normalize = true) where {Func}
403
@unpack weights = dg.basis
404
@unpack inverse_jacobian = cache.elements
405
406
# Initialize integral with zeros of the right shape
407
integral = zero(func(u, 1, 1, 1, 1, equations, dg, args...))
408
total_volume = zero(real(mesh))
409
410
# Use quadrature to numerically integrate over entire domain
411
@batch reduction=((+, integral), (+, total_volume)) for element in eachelement(dg,
412
cache)
413
for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg)
414
volume_jacobian = abs(inv(inverse_jacobian[i, j, k, element]))
415
integral += volume_jacobian * weights[i] * weights[j] * weights[k] *
416
func(u, i, j, k, element, equations, dg, args...)
417
total_volume += volume_jacobian * weights[i] * weights[j] * weights[k]
418
end
419
end
420
421
# Normalize with total volume
422
if normalize
423
integral = integral / total_volume
424
end
425
426
return integral
427
end
428
429
function integrate_via_indices(func::Func, backend::Backend, u,
430
mesh::Union{StructuredMesh{3}, P4estMesh{3},
431
T8codeMesh{3}},
432
equations, dg::DGSEM, cache,
433
args...; normalize = true) where {Func}
434
@unpack weights = dg.basis
435
@unpack inverse_jacobian = cache.elements
436
437
function local_plus((integral, total_volume), (integral_element, volume_element))
438
return (integral + integral_element, total_volume + volume_element)
439
end
440
441
# `func(u, 1,1,1,1, equations, dg, args...)` might access GPU memory
442
# so we have to rely on the compiler to correctly infer the type of the integral here.
443
# TODO: Technically we need device_promote_op here that "infers" the function within the context of the GPU.
444
integral0 = zero(Base.promote_op(func, typeof(u), Int, Int, Int, Int,
445
typeof(equations), typeof(dg),
446
map(typeof, args)...))
447
init = neutral = (integral0, zero(real(mesh)))
448
449
# Use quadrature to numerically integrate over entire domain
450
num_elements = nelements(dg, cache)
451
integral, total_volume = AcceleratedKernels.mapreduce(local_plus, 1:num_elements,
452
backend; init,
453
neutral) do element
454
# Initialize integral with zeros of the right shape
455
local_integral, local_total_volume = neutral
456
457
for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg)
458
volume_jacobian = abs(inv(inverse_jacobian[i, j, k, element]))
459
local_integral += volume_jacobian * weights[i] * weights[j] * weights[k] *
460
func(u, i, j, k, element, equations, dg, args...)
461
local_total_volume += volume_jacobian * weights[i] * weights[j] * weights[k]
462
end
463
return local_integral, local_total_volume
464
end
465
466
# Normalize with total volume
467
if normalize
468
integral = integral / total_volume
469
end
470
471
return integral
472
end
473
474
function integrate(func::Func, u,
475
mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3},
476
T8codeMesh{3}},
477
equations, dg::Union{DGSEM, FDSBP}, cache;
478
normalize = true) where {Func}
479
integrate_via_indices(u, mesh, equations, dg, cache;
480
normalize = normalize) do u, i, j, k, element, equations, dg
481
u_local = get_node_vars(u, equations, dg, i, j, k, element)
482
return func(u_local, equations)
483
end
484
end
485
486
function integrate(func::Func, u,
487
mesh::Union{TreeMesh{3}, P4estMesh{3}},
488
equations, equations_parabolic, dg::DGSEM,
489
cache, cache_parabolic; normalize = true) where {Func}
490
gradients_x, gradients_y, gradients_z = cache_parabolic.parabolic_container.gradients
491
integrate_via_indices(u, mesh, equations, dg, cache;
492
normalize = normalize) do u, i, j, k, element, equations, dg
493
u_local = get_node_vars(u, equations, dg, i, j, k, element)
494
gradients_1_local = get_node_vars(gradients_x, equations_parabolic, dg, i, j, k,
495
element)
496
gradients_2_local = get_node_vars(gradients_y, equations_parabolic, dg, i, j, k,
497
element)
498
gradients_3_local = get_node_vars(gradients_z, equations_parabolic, dg, i, j, k,
499
element)
500
return func(u_local, (gradients_1_local, gradients_2_local, gradients_3_local),
501
equations_parabolic)
502
end
503
end
504
505
function analyze(::typeof(entropy_timederivative), du, u, t,
506
mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3},
507
T8codeMesh{3}},
508
equations, dg::Union{DGSEM, FDSBP}, cache)
509
# Calculate ∫(∂S/∂u ⋅ ∂u/∂t)dΩ
510
integrate_via_indices(u, mesh, equations, dg, cache,
511
du) do u, i, j, k, element, equations, dg, du
512
u_node = get_node_vars(u, equations, dg, i, j, k, element)
513
du_node = get_node_vars(du, equations, dg, i, j, k, element)
514
return dot(cons2entropy(u_node, equations), du_node)
515
end
516
end
517
518
function analyze(::Val{:l2_divb}, du, u, t,
519
mesh::TreeMesh{3}, equations,
520
dg::DGSEM, cache)
521
integrate_via_indices(u, mesh, equations, dg, cache, cache,
522
dg.basis.derivative_matrix) do u, i, j, k, element, equations,
523
dg, cache, derivative_matrix
524
divb = zero(eltype(u))
525
for l in eachnode(dg)
526
u_ljk = get_node_vars(u, equations, dg, l, j, k, element)
527
u_ilk = get_node_vars(u, equations, dg, i, l, k, element)
528
u_ijl = get_node_vars(u, equations, dg, i, j, l, element)
529
530
B_ljk = magnetic_field(u_ljk, equations)
531
B_ilk = magnetic_field(u_ilk, equations)
532
B_ijl = magnetic_field(u_ijl, equations)
533
534
divb += (derivative_matrix[i, l] * B_ljk[1] +
535
derivative_matrix[j, l] * B_ilk[2] +
536
derivative_matrix[k, l] * B_ijl[3])
537
end
538
divb *= cache.elements.inverse_jacobian[element]
539
return divb^2
540
end |> sqrt
541
end
542
543
function analyze(::Val{:l2_divb}, du, u, t,
544
mesh::Union{StructuredMesh{3}, P4estMesh{3}, T8codeMesh{3}},
545
equations,
546
dg::DGSEM, cache)
547
@unpack contravariant_vectors, inverse_jacobian = cache.elements
548
integrate_via_indices(u, mesh, equations, dg, cache, cache,
549
dg.basis.derivative_matrix) do u, i, j, k, element, equations,
550
dg, cache, derivative_matrix
551
divb = zero(eltype(u))
552
# Get the contravariant vectors Ja^1, Ja^2, and Ja^3
553
Ja11, Ja12, Ja13 = get_contravariant_vector(1, contravariant_vectors,
554
i, j, k, element)
555
Ja21, Ja22, Ja23 = get_contravariant_vector(2, contravariant_vectors,
556
i, j, k, element)
557
Ja31, Ja32, Ja33 = get_contravariant_vector(3, contravariant_vectors,
558
i, j, k, element)
559
# Compute the transformed divergence
560
for l in eachnode(dg)
561
u_ljk = get_node_vars(u, equations, dg, l, j, k, element)
562
u_ilk = get_node_vars(u, equations, dg, i, l, k, element)
563
u_ijl = get_node_vars(u, equations, dg, i, j, l, element)
564
565
B_ljk = magnetic_field(u_ljk, equations)
566
B_ilk = magnetic_field(u_ilk, equations)
567
B_ijl = magnetic_field(u_ijl, equations)
568
569
divb += (derivative_matrix[i, l] *
570
(Ja11 * B_ljk[1] + Ja12 * B_ljk[2] + Ja13 * B_ljk[3]) +
571
derivative_matrix[j, l] *
572
(Ja21 * B_ilk[1] + Ja22 * B_ilk[2] + Ja23 * B_ilk[3]) +
573
derivative_matrix[k, l] *
574
(Ja31 * B_ijl[1] + Ja32 * B_ijl[2] + Ja33 * B_ijl[3]))
575
end
576
divb *= inverse_jacobian[i, j, k, element]
577
return divb^2
578
end |> sqrt
579
end
580
581
function analyze(::Val{:linf_divb}, du, u, t,
582
mesh::TreeMesh{3}, equations,
583
dg::DGSEM, cache)
584
@unpack derivative_matrix, weights = dg.basis
585
586
# integrate over all elements to get the divergence-free condition errors
587
linf_divb = zero(eltype(u))
588
@batch reduction=(max, linf_divb) for element in eachelement(dg, cache)
589
for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg)
590
divb = zero(eltype(u))
591
for l in eachnode(dg)
592
u_ljk = get_node_vars(u, equations, dg, l, j, k, element)
593
u_ilk = get_node_vars(u, equations, dg, i, l, k, element)
594
u_ijl = get_node_vars(u, equations, dg, i, j, l, element)
595
596
B_ljk = magnetic_field(u_ljk, equations)
597
B_ilk = magnetic_field(u_ilk, equations)
598
B_ijl = magnetic_field(u_ijl, equations)
599
600
divb += (derivative_matrix[i, l] * B_ljk[1] +
601
derivative_matrix[j, l] * B_ilk[2] +
602
derivative_matrix[k, l] * B_ijl[3])
603
end
604
divb *= cache.elements.inverse_jacobian[element]
605
linf_divb = max(linf_divb, abs(divb))
606
end
607
end
608
609
if mpi_isparallel()
610
# Base.max instead of max needed, see comment in src/auxiliary/math.jl
611
linf_divb = MPI.Allreduce!(Ref(linf_divb), Base.max, mpi_comm())[]
612
end
613
614
return linf_divb
615
end
616
617
function analyze(::Val{:linf_divb}, du, u, t,
618
mesh::Union{StructuredMesh{3}, P4estMesh{3}, T8codeMesh{3}},
619
equations,
620
dg::DGSEM, cache)
621
@unpack derivative_matrix, weights = dg.basis
622
@unpack contravariant_vectors, inverse_jacobian = cache.elements
623
624
# integrate over all elements to get the divergence-free condition errors
625
linf_divb = zero(eltype(u))
626
@batch reduction=(max, linf_divb) for element in eachelement(dg, cache)
627
for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg)
628
divb = zero(eltype(u))
629
# Get the contravariant vectors Ja^1, Ja^2, and Ja^3
630
Ja11, Ja12, Ja13 = get_contravariant_vector(1, contravariant_vectors,
631
i, j, k, element)
632
Ja21, Ja22, Ja23 = get_contravariant_vector(2, contravariant_vectors,
633
i, j, k, element)
634
Ja31, Ja32, Ja33 = get_contravariant_vector(3, contravariant_vectors,
635
i, j, k, element)
636
# Compute the transformed divergence
637
for l in eachnode(dg)
638
u_ljk = get_node_vars(u, equations, dg, l, j, k, element)
639
u_ilk = get_node_vars(u, equations, dg, i, l, k, element)
640
u_ijl = get_node_vars(u, equations, dg, i, j, l, element)
641
642
B_ljk = magnetic_field(u_ljk, equations)
643
B_ilk = magnetic_field(u_ilk, equations)
644
B_ijl = magnetic_field(u_ijl, equations)
645
646
divb += (derivative_matrix[i, l] * (Ja11 * B_ljk[1] +
647
Ja12 * B_ljk[2] + Ja13 * B_ljk[3]) +
648
derivative_matrix[j, l] * (Ja21 * B_ilk[1] +
649
Ja22 * B_ilk[2] + Ja23 * B_ilk[3]) +
650
derivative_matrix[k, l] * (Ja31 * B_ijl[1] +
651
Ja32 * B_ijl[2] + Ja33 * B_ijl[3]))
652
end
653
divb *= inverse_jacobian[i, j, k, element]
654
linf_divb = max(linf_divb, abs(divb))
655
end
656
end
657
658
if mpi_isparallel()
659
# Base.max instead of max needed, see comment in src/auxiliary/math.jl
660
linf_divb = MPI.Allreduce!(Ref(linf_divb), Base.max, mpi_comm())[]
661
end
662
663
return linf_divb
664
end
665
end # @muladd
666
667