Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
trixi-framework
GitHub Repository: trixi-framework/Trixi.jl
Path: blob/main/src/callbacks_step/analysis_dg2d.jl
5586 views
1
# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
2
# Since these FMAs can increase the performance of many numerical algorithms,
3
# we need to opt-in explicitly.
4
# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
5
@muladd begin
6
#! format: noindent
7
8
function create_cache_analysis(analyzer, mesh::TreeMesh{2},
9
equations, dg::DG, cache,
10
RealT, uEltype)
11
12
# pre-allocate buffers
13
# We use `StrideArray`s here since these buffers are used in performance-critical
14
# places and the additional information passed to the compiler makes them faster
15
# than native `Array`s.
16
u_local = StrideArray(undef, uEltype,
17
StaticInt(nvariables(equations)), StaticInt(nnodes(analyzer)),
18
StaticInt(nnodes(analyzer)))
19
u_tmp1 = StrideArray(undef, uEltype,
20
StaticInt(nvariables(equations)), StaticInt(nnodes(analyzer)),
21
StaticInt(nnodes(dg)))
22
x_local = StrideArray(undef, RealT,
23
StaticInt(ndims(equations)), StaticInt(nnodes(analyzer)),
24
StaticInt(nnodes(analyzer)))
25
x_tmp1 = StrideArray(undef, RealT,
26
StaticInt(ndims(equations)), StaticInt(nnodes(analyzer)),
27
StaticInt(nnodes(dg)))
28
29
return (; u_local, u_tmp1, x_local, x_tmp1)
30
end
31
32
# Specialized cache for P4estMesh to allow for different ambient dimension from mesh dimension
33
function create_cache_analysis(analyzer,
34
mesh::Union{P4estMesh{2, NDIMS_AMBIENT},
35
P4estMeshView{2, NDIMS_AMBIENT}},
36
equations, dg::DG, cache,
37
RealT, uEltype) where {NDIMS_AMBIENT}
38
39
# pre-allocate buffers
40
# We use `StrideArray`s here since these buffers are used in performance-critical
41
# places and the additional information passed to the compiler makes them faster
42
# than native `Array`s.
43
u_local = StrideArray(undef, uEltype,
44
StaticInt(nvariables(equations)), StaticInt(nnodes(analyzer)),
45
StaticInt(nnodes(analyzer)))
46
u_tmp1 = StrideArray(undef, uEltype,
47
StaticInt(nvariables(equations)), StaticInt(nnodes(analyzer)),
48
StaticInt(nnodes(dg)))
49
x_local = StrideArray(undef, RealT,
50
StaticInt(NDIMS_AMBIENT), StaticInt(nnodes(analyzer)),
51
StaticInt(nnodes(analyzer)))
52
x_tmp1 = StrideArray(undef, RealT,
53
StaticInt(NDIMS_AMBIENT), StaticInt(nnodes(analyzer)),
54
StaticInt(nnodes(dg)))
55
jacobian_local = StrideArray(undef, RealT,
56
StaticInt(nnodes(analyzer)),
57
StaticInt(nnodes(analyzer)))
58
jacobian_tmp1 = StrideArray(undef, RealT,
59
StaticInt(nnodes(analyzer)), StaticInt(nnodes(dg)))
60
61
return (; u_local, u_tmp1, x_local, x_tmp1, jacobian_local, jacobian_tmp1)
62
end
63
64
function create_cache_analysis(analyzer,
65
mesh::Union{StructuredMesh{2}, StructuredMeshView{2},
66
UnstructuredMesh2D, T8codeMesh{2}},
67
equations, dg::DG, cache,
68
RealT, uEltype)
69
70
# pre-allocate buffers
71
# We use `StrideArray`s here since these buffers are used in performance-critical
72
# places and the additional information passed to the compiler makes them faster
73
# than native `Array`s.
74
u_local = StrideArray(undef, uEltype,
75
StaticInt(nvariables(equations)), StaticInt(nnodes(analyzer)),
76
StaticInt(nnodes(analyzer)))
77
u_tmp1 = StrideArray(undef, uEltype,
78
StaticInt(nvariables(equations)), StaticInt(nnodes(analyzer)),
79
StaticInt(nnodes(dg)))
80
x_local = StrideArray(undef, RealT,
81
StaticInt(ndims(equations)), StaticInt(nnodes(analyzer)),
82
StaticInt(nnodes(analyzer)))
83
x_tmp1 = StrideArray(undef, RealT,
84
StaticInt(ndims(equations)), StaticInt(nnodes(analyzer)),
85
StaticInt(nnodes(dg)))
86
jacobian_local = StrideArray(undef, RealT,
87
StaticInt(nnodes(analyzer)),
88
StaticInt(nnodes(analyzer)))
89
jacobian_tmp1 = StrideArray(undef, RealT,
90
StaticInt(nnodes(analyzer)), StaticInt(nnodes(dg)))
91
92
return (; u_local, u_tmp1, x_local, x_tmp1, jacobian_local, jacobian_tmp1)
93
end
94
95
function calc_error_norms(func, u, t, analyzer,
96
mesh::TreeMesh{2}, equations, initial_condition,
97
dg::DGSEM, cache, cache_analysis)
98
@unpack vandermonde, weights = analyzer
99
@unpack node_coordinates = cache.elements
100
@unpack u_local, u_tmp1, x_local, x_tmp1 = cache_analysis
101
102
# Set up data structures
103
l2_error = zero(func(get_node_vars(u, equations, dg, 1, 1, 1), equations))
104
linf_error = copy(l2_error)
105
106
# Iterate over all elements for error calculations
107
# Accumulate L2 error on the element first so that the order of summation is the
108
# same as in the parallel case to ensure exact equality. This facilitates easier parallel
109
# development and debugging (see
110
# https://github.com/trixi-framework/Trixi.jl/pull/850#pullrequestreview-757463943 for details).
111
for element in eachelement(dg, cache)
112
# Set up data structures for local element L2 error
113
l2_error_local = zero(l2_error)
114
115
# Interpolate solution and node locations to analysis nodes
116
multiply_dimensionwise!(u_local, vandermonde, view(u, :, :, :, element), u_tmp1)
117
multiply_dimensionwise!(x_local, vandermonde,
118
view(node_coordinates, :, :, :, element), x_tmp1)
119
120
# Calculate errors at each analysis node
121
volume_jacobian_ = volume_jacobian(element, mesh, cache)
122
123
for j in eachnode(analyzer), i in eachnode(analyzer)
124
u_exact = initial_condition(get_node_coords(x_local, equations, dg, i, j),
125
t, equations)
126
diff = func(u_exact, equations) -
127
func(get_node_vars(u_local, equations, dg, i, j), equations)
128
l2_error_local += diff .^ 2 * (weights[i] * weights[j] * volume_jacobian_)
129
linf_error = @. max(linf_error, abs(diff))
130
end
131
l2_error += l2_error_local
132
end
133
134
# For L2 error, divide by total volume
135
total_volume_ = total_volume(mesh)
136
l2_error = @. sqrt(l2_error / total_volume_)
137
138
return l2_error, linf_error
139
end
140
141
function calc_error_norms(func, u, t, analyzer,
142
mesh::Union{StructuredMesh{2}, StructuredMeshView{2},
143
UnstructuredMesh2D,
144
P4estMesh{2}, P4estMeshView{2},
145
T8codeMesh{2}},
146
equations,
147
initial_condition, dg::DGSEM, cache, cache_analysis)
148
@unpack vandermonde, weights = analyzer
149
@unpack u_local, u_tmp1, x_local, x_tmp1, jacobian_local, jacobian_tmp1 = cache_analysis
150
@unpack node_coordinates, inverse_jacobian = cache.elements
151
152
# Calculate error norms on the CPU, to ensure the order of summation is the same.
153
if trixi_backend(u) !== nothing
154
node_coordinates = Array(node_coordinates)
155
inverse_jacobian = Array(inverse_jacobian)
156
u = Array(u)
157
end
158
159
# Set up data structures
160
l2_error = zero(func(get_node_vars(u, equations, dg, 1, 1, 1), equations))
161
linf_error = copy(l2_error)
162
total_volume = zero(real(mesh))
163
164
# Iterate over all elements for error calculations
165
for element in eachelement(dg, cache)
166
# Interpolate solution and node locations to analysis nodes
167
multiply_dimensionwise!(u_local, vandermonde, view(u, :, :, :, element), u_tmp1)
168
multiply_dimensionwise!(x_local, vandermonde,
169
view(node_coordinates, :, :, :, element), x_tmp1)
170
multiply_scalar_dimensionwise!(jacobian_local, vandermonde,
171
inv.(view(inverse_jacobian, :, :, element)),
172
jacobian_tmp1)
173
174
# Calculate errors at each analysis node
175
for j in eachnode(analyzer), i in eachnode(analyzer)
176
u_exact = initial_condition(get_node_coords(x_local, equations, dg, i, j),
177
t, equations)
178
diff = func(u_exact, equations) -
179
func(get_node_vars(u_local, equations, dg, i, j), equations)
180
# We take absolute value as we need the Jacobian here for the volume calculation
181
abs_jacobian_local_ij = abs(jacobian_local[i, j])
182
183
l2_error += diff .^ 2 * (weights[i] * weights[j] * abs_jacobian_local_ij)
184
linf_error = @. max(linf_error, abs(diff))
185
total_volume += weights[i] * weights[j] * abs_jacobian_local_ij
186
end
187
end
188
189
# For L2 error, divide by total volume
190
l2_error = @. sqrt(l2_error / total_volume)
191
192
return l2_error, linf_error
193
end
194
195
# Use quadrature to numerically integrate a single element.
196
# We do not multiply by the Jacobian to stay in reference space.
197
# This avoids the need to divide the RHS of the DG scheme by the Jacobian when computing
198
# the time derivative of entropy, see `entropy_change_reference_element`.
199
function integrate_reference_element(func::Func, u, element,
200
::Type{<:AbstractMesh{2}}, equations, dg::DGSEM,
201
cache,
202
args...) where {Func}
203
@unpack weights = dg.basis
204
205
# Initialize integral with zeros of the right shape
206
element_integral = zero(func(u, 1, 1, element, equations, dg, args...))
207
208
for j in eachnode(dg), i in eachnode(dg)
209
element_integral += weights[i] * weights[j] *
210
func(u, i, j, element, equations, dg, args...)
211
end
212
213
return element_integral
214
end
215
216
# Calculate ∫_e (∂S/∂u ⋅ ∂u/∂t) dΩ_e where the result on element 'e' is kept in reference space
217
# Note that ∂S/∂u = w(u) with entropy variables w
218
function entropy_change_reference_element(du, u, element,
219
MeshT::Type{<:AbstractMesh{2}},
220
equations, dg::DGSEM, cache, args...)
221
return integrate_reference_element(u, element, MeshT, equations, dg, cache,
222
du) do u, i, j, element, equations, dg, du
223
u_node = get_node_vars(u, equations, dg, i, j, element)
224
du_node = get_node_vars(du, equations, dg, i, j, element)
225
226
dot(cons2entropy(u_node, equations), du_node)
227
end
228
end
229
230
# calculate surface integral of func(u, equations) * normal on the reference element.
231
function surface_integral_reference_element(func::Func, u, element,
232
::Type{<:TreeMesh{2}}, equations, dg::DGSEM,
233
cache, args...) where {Func}
234
@unpack weights = dg.basis
235
236
u_tmp = get_node_vars(u, equations, dg, 1, 1, element)
237
surface_integral = zero(func(u_tmp, 1, equations))
238
for i in eachnode(dg)
239
# integrate along x direction, normal in y (2) direction
240
u_bottom = get_node_vars(u, equations, dg, i, 1, element)
241
u_top = get_node_vars(u, equations, dg, i, nnodes(dg), element)
242
243
surface_integral += weights[i] *
244
(func(u_top, 2, equations) - func(u_bottom, 2, equations))
245
246
# integrate along y direction, normal in x (1) direction
247
u_left = get_node_vars(u, equations, dg, 1, i, element)
248
u_right = get_node_vars(u, equations, dg, nnodes(dg), i, element)
249
250
surface_integral += weights[i] *
251
(func(u_right, 1, equations) - func(u_left, 1, equations))
252
end
253
254
return surface_integral
255
end
256
257
# calculate surface integral of func(u, normal_direction, equations) * normal on the reference element.
258
# Note: `get_normal_direction` already returns an outward-pointing normal for all directions,
259
# thus no +- flips are needed here.
260
function surface_integral_reference_element(func::Func, u, element,
261
::Type{<:Union{StructuredMesh{2},
262
StructuredMeshView{2},
263
UnstructuredMesh2D,
264
P4estMesh{2},
265
T8codeMesh{2}}},
266
equations, dg::DGSEM,
267
cache, args...) where {Func}
268
@unpack contravariant_vectors = cache.elements
269
@unpack weights = dg.basis
270
271
# Construct zero of right shape:
272
# Evaluate `func` at actual quadrature node and normal direction
273
u_tmp = get_node_vars(u, equations, dg, 1, 1, element)
274
normal_direction = get_normal_direction(1, contravariant_vectors,
275
1, 1, element)
276
surface_integral = zero(func(u_tmp, normal_direction, equations))
277
278
# Direction 1: face at i = 1 (x_min)
279
for j in eachnode(dg)
280
u_node = get_node_vars(u, equations, dg, 1, j, element)
281
normal_direction = get_normal_direction(1, contravariant_vectors,
282
1, j, element)
283
surface_integral += weights[j] * func(u_node, normal_direction, equations)
284
end
285
286
# Direction 2: face at i = nnodes(dg) (x_max)
287
for j in eachnode(dg)
288
u_node = get_node_vars(u, equations, dg, nnodes(dg), j, element)
289
normal_direction = get_normal_direction(2, contravariant_vectors,
290
nnodes(dg), j, element)
291
surface_integral += weights[j] * func(u_node, normal_direction, equations)
292
end
293
294
# Direction 3: face at j = 1 (y_min)
295
for i in eachnode(dg)
296
u_node = get_node_vars(u, equations, dg, i, 1, element)
297
normal_direction = get_normal_direction(3, contravariant_vectors,
298
i, 1, element)
299
surface_integral += weights[i] * func(u_node, normal_direction, equations)
300
end
301
302
# Direction 4: face at j = nnodes(dg) (y_max)
303
for i in eachnode(dg)
304
u_node = get_node_vars(u, equations, dg, i, nnodes(dg), element)
305
normal_direction = get_normal_direction(4, contravariant_vectors,
306
i, nnodes(dg), element)
307
surface_integral += weights[i] * func(u_node, normal_direction, equations)
308
end
309
310
return surface_integral
311
end
312
313
function integrate_via_indices(func::Func, u,
314
mesh::TreeMesh{2}, equations, dg::DGSEM, cache,
315
args...; normalize = true) where {Func}
316
@unpack weights = dg.basis
317
318
# Initialize integral with zeros of the right shape
319
integral = zero(func(u, 1, 1, 1, equations, dg, args...))
320
321
# Use quadrature to numerically integrate over entire domain
322
@batch reduction=(+, integral) for element in eachelement(dg, cache)
323
volume_jacobian_ = volume_jacobian(element, mesh, cache)
324
for j in eachnode(dg), i in eachnode(dg)
325
integral += volume_jacobian_ * weights[i] * weights[j] *
326
func(u, i, j, element, equations, dg, args...)
327
end
328
end
329
330
# Normalize with total volume
331
if normalize
332
integral = integral / total_volume(mesh)
333
end
334
335
return integral
336
end
337
338
function integrate_via_indices(func::Func, u,
339
mesh::Union{StructuredMesh{2}, StructuredMeshView{2},
340
UnstructuredMesh2D,
341
P4estMesh{2}, P4estMeshView{2},
342
T8codeMesh{2}},
343
equations, dg::DGSEM, cache,
344
args...; normalize = true) where {Func}
345
return integrate_via_indices(func, trixi_backend(u), u, mesh, equations, dg, cache,
346
args...; normalize = normalize)
347
end
348
349
function integrate_via_indices(func::Func, ::Nothing, u,
350
mesh::Union{StructuredMesh{2}, StructuredMeshView{2},
351
UnstructuredMesh2D,
352
P4estMesh{2}, P4estMeshView{2},
353
T8codeMesh{2}},
354
equations, dg::DGSEM, cache,
355
args...; normalize = true) where {Func}
356
@unpack weights = dg.basis
357
@unpack inverse_jacobian = cache.elements
358
359
# Initialize integral with zeros of the right shape
360
integral = zero(func(u, 1, 1, 1, equations, dg, args...))
361
total_volume = zero(real(mesh))
362
363
# Use quadrature to numerically integrate over entire domain
364
@batch reduction=((+, integral), (+, total_volume)) for element in eachelement(dg,
365
cache)
366
for j in eachnode(dg), i in eachnode(dg)
367
volume_jacobian = abs(inv(inverse_jacobian[i, j, element]))
368
integral += volume_jacobian * weights[i] * weights[j] *
369
func(u, i, j, element, equations, dg, args...)
370
total_volume += volume_jacobian * weights[i] * weights[j]
371
end
372
end
373
374
# Normalize with total volume
375
if normalize
376
integral = integral / total_volume
377
end
378
379
return integral
380
end
381
382
function integrate_via_indices(func::Func, backend::Backend, u,
383
mesh::Union{StructuredMesh{2}, StructuredMeshView{2},
384
UnstructuredMesh2D,
385
P4estMesh{2}, P4estMeshView{2},
386
T8codeMesh{2}},
387
equations, dg::DGSEM, cache,
388
args...; normalize = true) where {Func}
389
@unpack weights = dg.basis
390
@unpack inverse_jacobian = cache.elements
391
392
function local_plus((integral, total_volume), (integral_element, volume_element))
393
return (integral + integral_element, total_volume + volume_element)
394
end
395
396
# `func(u, 1,1,1, equations, dg, args...)` might access GPU memory
397
# so we have to rely on the compiler to correctly infer the type of the integral here.
398
# TODO: Technically we need device_promote_op here that "infers" the function within the context of the GPU.
399
integral0 = zero(Base.promote_op(func, typeof(u), Int, Int, Int, typeof(equations),
400
typeof(dg), map(typeof, args)...))
401
init = neutral = (integral0, zero(real(mesh)))
402
403
# Use quadrature to numerically integrate over entire domain
404
num_elements = nelements(dg, cache)
405
integral, total_volume = AcceleratedKernels.mapreduce(local_plus, 1:num_elements,
406
backend; init,
407
neutral) do element
408
# Initialize integral with zeros of the right shapeu
409
local_integral, local_total_volume = neutral
410
411
for j in eachnode(dg), i in eachnode(dg)
412
volume_jacobian = abs(inv(inverse_jacobian[i, j, element]))
413
local_integral += volume_jacobian * weights[i] * weights[j] *
414
func(u, i, j, element, equations, dg, args...)
415
local_total_volume += volume_jacobian * weights[i] * weights[j]
416
end
417
418
return (local_integral, local_total_volume)
419
end
420
421
# Normalize with total volume
422
if normalize
423
integral = integral / total_volume
424
end
425
426
return integral
427
end
428
429
function integrate(func::Func, u,
430
mesh::Union{TreeMesh{2}, StructuredMesh{2}, StructuredMeshView{2},
431
UnstructuredMesh2D, P4estMesh{2}, P4estMeshView{2},
432
T8codeMesh{2}},
433
equations, dg::Union{DGSEM, FDSBP}, cache;
434
normalize = true) where {Func}
435
integrate_via_indices(u, mesh, equations, dg, cache;
436
normalize = normalize) do u, i, j, element, equations, dg
437
u_local = get_node_vars(u, equations, dg, i, j, element)
438
return func(u_local, equations)
439
end
440
end
441
442
function integrate(func::Func, u,
443
mesh::Union{TreeMesh{2}, P4estMesh{2}},
444
equations, equations_parabolic, dg::DGSEM,
445
cache, cache_parabolic; normalize = true) where {Func}
446
gradients_x, gradients_y = cache_parabolic.parabolic_container.gradients
447
integrate_via_indices(u, mesh, equations, dg, cache;
448
normalize = normalize) do u, i, j, element, equations, dg
449
u_local = get_node_vars(u, equations, dg, i, j, element)
450
gradients_1_local = get_node_vars(gradients_x, equations_parabolic, dg, i, j,
451
element)
452
gradients_2_local = get_node_vars(gradients_y, equations_parabolic, dg, i, j,
453
element)
454
return func(u_local, (gradients_1_local, gradients_2_local),
455
equations_parabolic)
456
end
457
end
458
459
function analyze(::typeof(entropy_timederivative), du, u, t,
460
mesh::Union{TreeMesh{2}, StructuredMesh{2}, StructuredMeshView{2},
461
UnstructuredMesh2D, P4estMesh{2}, T8codeMesh{2}},
462
equations, dg::Union{DGSEM, FDSBP}, cache)
463
# Calculate
464
# Calculate ∫(∂S/∂u ⋅ ∂u/∂t)dΩ
465
integrate_via_indices(u, mesh, equations, dg, cache,
466
du) do u, i, j, element, equations, dg, du
467
u_node = get_node_vars(u, equations, dg, i, j, element)
468
du_node = get_node_vars(du, equations, dg, i, j, element)
469
return dot(cons2entropy(u_node, equations), du_node)
470
end
471
end
472
473
function analyze(::Val{:l2_divb}, du, u, t,
474
mesh::TreeMesh{2},
475
equations, dg::DGSEM, cache)
476
integrate_via_indices(u, mesh, equations, dg, cache, cache,
477
dg.basis.derivative_matrix) do u, i, j, element, equations,
478
dg, cache, derivative_matrix
479
divb = zero(eltype(u))
480
for k in eachnode(dg)
481
u_kj = get_node_vars(u, equations, dg, k, j, element)
482
u_ik = get_node_vars(u, equations, dg, i, k, element)
483
484
B1_kj, _, _ = magnetic_field(u_kj, equations)
485
_, B2_ik, _ = magnetic_field(u_ik, equations)
486
487
divb += (derivative_matrix[i, k] * B1_kj +
488
derivative_matrix[j, k] * B2_ik)
489
end
490
divb *= cache.elements.inverse_jacobian[element]
491
return divb^2
492
end |> sqrt
493
end
494
495
function analyze(::Val{:l2_divb}, du, u, t,
496
mesh::Union{StructuredMesh{2}, UnstructuredMesh2D, P4estMesh{2},
497
T8codeMesh{2}},
498
equations, dg::DGSEM, cache)
499
@unpack contravariant_vectors, inverse_jacobian = cache.elements
500
integrate_via_indices(u, mesh, equations, dg, cache, cache,
501
dg.basis.derivative_matrix) do u, i, j, element, equations,
502
dg, cache, derivative_matrix
503
divb = zero(eltype(u))
504
# Get the contravariant vectors Ja^1 and Ja^2
505
Ja11, Ja12 = get_contravariant_vector(1, contravariant_vectors, i, j, element)
506
Ja21, Ja22 = get_contravariant_vector(2, contravariant_vectors, i, j, element)
507
# Compute the transformed divergence
508
for k in eachnode(dg)
509
u_kj = get_node_vars(u, equations, dg, k, j, element)
510
u_ik = get_node_vars(u, equations, dg, i, k, element)
511
512
B1_kj, B2_kj, _ = magnetic_field(u_kj, equations)
513
B1_ik, B2_ik, _ = magnetic_field(u_ik, equations)
514
515
divb += (derivative_matrix[i, k] *
516
(Ja11 * B1_kj + Ja12 * B2_kj) +
517
derivative_matrix[j, k] *
518
(Ja21 * B1_ik + Ja22 * B2_ik))
519
end
520
divb *= inverse_jacobian[i, j, element]
521
return divb^2
522
end |> sqrt
523
end
524
525
function analyze(::Val{:linf_divb}, du, u, t,
526
mesh::TreeMesh{2},
527
equations, dg::DGSEM, cache)
528
@unpack derivative_matrix, weights = dg.basis
529
530
# integrate over all elements to get the divergence-free condition errors
531
linf_divb = zero(eltype(u))
532
@batch reduction=(max, linf_divb) for element in eachelement(dg, cache)
533
for j in eachnode(dg), i in eachnode(dg)
534
divb = zero(eltype(u))
535
for k in eachnode(dg)
536
u_kj = get_node_vars(u, equations, dg, k, j, element)
537
u_ik = get_node_vars(u, equations, dg, i, k, element)
538
539
B1_kj, _, _ = magnetic_field(u_kj, equations)
540
_, B2_ik, _ = magnetic_field(u_ik, equations)
541
542
divb += (derivative_matrix[i, k] * B1_kj +
543
derivative_matrix[j, k] * B2_ik)
544
end
545
divb *= cache.elements.inverse_jacobian[element]
546
linf_divb = max(linf_divb, abs(divb))
547
end
548
end
549
550
return linf_divb
551
end
552
553
function analyze(::Val{:linf_divb}, du, u, t,
554
mesh::Union{StructuredMesh{2}, UnstructuredMesh2D, P4estMesh{2},
555
T8codeMesh{2}},
556
equations, dg::DGSEM, cache)
557
@unpack derivative_matrix, weights = dg.basis
558
@unpack contravariant_vectors, inverse_jacobian = cache.elements
559
560
# integrate over all elements to get the divergence-free condition errors
561
linf_divb = zero(eltype(u))
562
@batch reduction=(max, linf_divb) for element in eachelement(dg, cache)
563
for j in eachnode(dg), i in eachnode(dg)
564
divb = zero(eltype(u))
565
# Get the contravariant vectors Ja^1 and Ja^2
566
Ja11, Ja12 = get_contravariant_vector(1, contravariant_vectors,
567
i, j, element)
568
Ja21, Ja22 = get_contravariant_vector(2, contravariant_vectors,
569
i, j, element)
570
# Compute the transformed divergence
571
for k in eachnode(dg)
572
u_kj = get_node_vars(u, equations, dg, k, j, element)
573
u_ik = get_node_vars(u, equations, dg, i, k, element)
574
575
B1_kj, B2_kj, _ = magnetic_field(u_kj, equations)
576
B1_ik, B2_ik, _ = magnetic_field(u_ik, equations)
577
578
divb += (derivative_matrix[i, k] *
579
(Ja11 * B1_kj + Ja12 * B2_kj) +
580
derivative_matrix[j, k] *
581
(Ja21 * B1_ik + Ja22 * B2_ik))
582
end
583
divb *= inverse_jacobian[i, j, element]
584
linf_divb = max(linf_divb, abs(divb))
585
end
586
end
587
if mpi_isparallel()
588
# Base.max instead of max needed, see comment in src/auxiliary/math.jl
589
linf_divb = MPI.Allreduce!(Ref(linf_divb), Base.max, mpi_comm())[]
590
end
591
592
return linf_divb
593
end
594
end # @muladd
595
596