Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
trixi-framework
GitHub Repository: trixi-framework/Trixi.jl
Path: blob/main/src/solvers/dgsem_p4est/dg_2d_parabolic_parallel.jl
5616 views
1
# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
2
# Since these FMAs can increase the performance of many numerical algorithms,
3
# we need to opt-in explicitly.
4
# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
5
@muladd begin
6
#! format: noindent
7
function rhs_parabolic!(du, u, t,
8
mesh::P4estMeshParallel{2},
9
equations_parabolic::AbstractEquationsParabolic,
10
boundary_conditions_parabolic, source_terms_parabolic,
11
dg::DG, parabolic_scheme, cache, cache_parabolic)
12
@unpack parabolic_container = cache_parabolic
13
@unpack u_transformed, gradients, flux_parabolic = parabolic_container
14
15
@trixi_timeit timer() "transform variables" begin
16
transform_variables!(u_transformed, u, mesh, equations_parabolic,
17
dg, cache)
18
end
19
20
### Gradient computation ###
21
22
# Start gradient MPI receive
23
@trixi_timeit timer() "start MPI receive gradient" begin
24
start_mpi_receive!(cache.mpi_cache)
25
end
26
27
# Prolong transformed variables to MPI mortars
28
@trixi_timeit timer() "prolong2mpimortars gradient" begin
29
prolong2mpimortars!(cache, u_transformed, mesh, equations_parabolic,
30
dg.mortar, dg)
31
end
32
33
# Prolong transformed variables to MPI interfaces
34
@trixi_timeit timer() "prolong2mpiinterfaces gradient" begin
35
prolong2mpiinterfaces!(cache, u_transformed, mesh, equations_parabolic,
36
dg.surface_integral, dg)
37
end
38
39
# Start gradient MPI send
40
@trixi_timeit timer() "start MPI send gradient" begin
41
start_mpi_send!(cache.mpi_cache, mesh, equations_parabolic, dg, cache)
42
end
43
44
# Local gradient computation
45
@trixi_timeit timer() "calculate gradient local" begin
46
calc_gradient_local!(gradients, u_transformed, t, mesh,
47
equations_parabolic, boundary_conditions_parabolic,
48
dg, parabolic_scheme, cache)
49
end
50
51
# Finish gradient MPI receive
52
@trixi_timeit timer() "finish MPI receive gradient" begin
53
finish_mpi_receive!(cache.mpi_cache, mesh, equations_parabolic, dg, cache)
54
end
55
56
# MPI interface fluxes for gradients
57
@trixi_timeit timer() "MPI interface flux gradient" begin
58
calc_mpi_interface_flux_gradient!(cache.elements.surface_flux_values,
59
mesh, equations_parabolic,
60
dg, parabolic_scheme, cache)
61
end
62
63
# MPI mortar fluxes for gradients
64
@trixi_timeit timer() "MPI mortar flux gradient" begin
65
calc_mpi_mortar_flux_gradient!(cache.elements.surface_flux_values,
66
mesh, equations_parabolic, dg.mortar,
67
dg, parabolic_scheme, cache)
68
end
69
70
# Calculate surface integrals
71
@trixi_timeit timer() "surface integral" begin
72
calc_surface_integral_gradient!(gradients, mesh, equations_parabolic,
73
dg, cache)
74
end
75
76
# Apply Jacobian from mapping to reference element
77
@trixi_timeit timer() "Jacobian" begin
78
apply_jacobian_parabolic!(gradients, mesh, equations_parabolic, dg,
79
cache)
80
end
81
82
# Finish gradient MPI send
83
@trixi_timeit timer() "finish MPI send gradient" begin
84
finish_mpi_send!(cache.mpi_cache)
85
end
86
87
# Local parabolic flux construction
88
@trixi_timeit timer() "calculate parabolic fluxes" begin
89
calc_parabolic_fluxes!(flux_parabolic, gradients, u_transformed, mesh,
90
equations_parabolic, dg, cache)
91
end
92
93
### Divergence of parabolic/gradient fluxes ###
94
95
# Start divergence MPI receive
96
@trixi_timeit timer() "start MPI receive divergence" begin
97
start_mpi_receive!(cache.mpi_cache)
98
end
99
100
# Reset du
101
@trixi_timeit timer() "reset ∂u/∂t" begin
102
set_zero!(du, dg, cache)
103
end
104
105
# Local volume integral
106
@trixi_timeit timer() "volume integral" begin
107
calc_volume_integral!(du, flux_parabolic, mesh, equations_parabolic, dg, cache)
108
end
109
110
# Prolong parabolic fluxes to MPI mortars
111
@trixi_timeit timer() "prolong2mpimortars divergence" begin
112
prolong2mpimortars_divergence!(cache, flux_parabolic, mesh, equations_parabolic,
113
dg.mortar, dg)
114
end
115
116
# Prolong parabolic fluxes to MPI interfaces
117
@trixi_timeit timer() "prolong2mpiinterfaces divergence" begin
118
prolong2mpiinterfaces!(cache, flux_parabolic, mesh, equations_parabolic, dg)
119
end
120
121
# Start divergence MPI send
122
@trixi_timeit timer() "start MPI send divergence" begin
123
start_mpi_send!(cache.mpi_cache, mesh, equations_parabolic, dg, cache)
124
end
125
126
# Local interface fluxes
127
@trixi_timeit timer() "prolong2interfaces" begin
128
prolong2interfaces!(cache, flux_parabolic, mesh, equations_parabolic, dg)
129
end
130
131
@trixi_timeit timer() "interface flux" begin
132
calc_interface_flux!(cache.elements.surface_flux_values, mesh,
133
equations_parabolic, dg, parabolic_scheme, cache)
134
end
135
136
# Local boundary fluxes
137
@trixi_timeit timer() "prolong2boundaries" begin
138
prolong2boundaries!(cache, flux_parabolic, mesh, equations_parabolic, dg)
139
end
140
141
@trixi_timeit timer() "boundary flux" begin
142
calc_boundary_flux_divergence!(cache, t,
143
boundary_conditions_parabolic, mesh,
144
equations_parabolic,
145
dg.surface_integral, dg)
146
end
147
148
# Local mortar fluxes
149
@trixi_timeit timer() "prolong2mortars" begin
150
prolong2mortars_divergence!(cache, flux_parabolic, mesh, equations_parabolic,
151
dg.mortar, dg)
152
end
153
154
@trixi_timeit timer() "mortar flux" begin
155
calc_mortar_flux_divergence!(cache.elements.surface_flux_values,
156
mesh, equations_parabolic, dg.mortar,
157
dg, parabolic_scheme, cache)
158
end
159
160
# Finish divergence MPI receive
161
@trixi_timeit timer() "finish MPI receive divergence" begin
162
finish_mpi_receive!(cache.mpi_cache, mesh, equations_parabolic, dg, cache)
163
end
164
165
# MPI interface fluxes for divergence
166
@trixi_timeit timer() "MPI interface flux divergence" begin
167
calc_mpi_interface_flux_divergence!(cache.elements.surface_flux_values,
168
mesh, equations_parabolic,
169
dg, parabolic_scheme, cache)
170
end
171
172
# MPI mortar fluxes for divergence
173
@trixi_timeit timer() "MPI mortar flux divergence" begin
174
calc_mpi_mortar_flux_divergence!(cache.elements.surface_flux_values,
175
mesh, equations_parabolic, dg.mortar,
176
dg, parabolic_scheme, cache)
177
end
178
179
# Finish divergence MPI send
180
@trixi_timeit timer() "finish MPI send divergence" begin
181
finish_mpi_send!(cache.mpi_cache)
182
end
183
184
@trixi_timeit timer() "surface integral" begin
185
calc_surface_integral!(nothing, du, u, mesh, equations_parabolic,
186
dg.surface_integral, dg, cache)
187
end
188
189
@trixi_timeit timer() "Jacobian" begin
190
apply_jacobian_parabolic!(du, mesh, equations_parabolic, dg, cache)
191
end
192
193
@trixi_timeit timer() "source terms parabolic" begin
194
calc_sources_parabolic!(du, u, gradients, t, source_terms_parabolic,
195
equations_parabolic, dg, cache)
196
end
197
198
return nothing
199
end
200
201
function calc_gradient_local!(gradients, u_transformed, t,
202
mesh::P4estMeshParallel{2},
203
equations_parabolic, boundary_conditions_parabolic,
204
dg::DG, parabolic_scheme, cache)
205
# Reset gradients
206
@trixi_timeit timer() "reset gradients" begin
207
reset_gradients!(gradients, dg, cache)
208
end
209
210
# Calculate volume integral
211
@trixi_timeit timer() "volume integral" begin
212
calc_volume_integral_gradient!(gradients, u_transformed,
213
mesh, equations_parabolic, dg, cache)
214
end
215
216
# Prolong solution to interfaces.
217
# This reuses `prolong2interfaces` for the purely hyperbolic case.
218
@trixi_timeit timer() "prolong2interfaces" begin
219
prolong2interfaces!(nothing, cache, u_transformed, mesh,
220
equations_parabolic, dg)
221
end
222
223
# Calculate interface fluxes for the gradients
224
@trixi_timeit timer() "interface flux" begin
225
@unpack surface_flux_values = cache.elements
226
calc_interface_flux_gradient!(surface_flux_values, mesh, equations_parabolic,
227
dg, parabolic_scheme, cache)
228
end
229
230
# Prolong solution to boundaries.
231
# This reuses `prolong2boundaries` for the purely hyperbolic case.
232
@trixi_timeit timer() "prolong2boundaries" begin
233
prolong2boundaries!(cache, u_transformed, mesh,
234
equations_parabolic, dg)
235
end
236
237
# Calculate boundary fluxes
238
@trixi_timeit timer() "boundary flux" begin
239
calc_boundary_flux_gradient!(cache, t, boundary_conditions_parabolic,
240
mesh, equations_parabolic, dg.surface_integral,
241
dg)
242
end
243
244
# Prolong solution to mortars.
245
# This reuses `prolong2mortars` for the purely hyperbolic case.
246
@trixi_timeit timer() "prolong2mortars" begin
247
prolong2mortars!(cache, u_transformed, mesh, equations_parabolic,
248
dg.mortar, dg)
249
end
250
251
# Calculate mortar fluxes
252
@trixi_timeit timer() "mortar flux" begin
253
calc_mortar_flux_gradient!(cache.elements.surface_flux_values,
254
mesh, equations_parabolic, dg.mortar,
255
dg, parabolic_scheme, cache)
256
end
257
258
return nothing
259
end
260
261
function prolong2mpiinterfaces!(cache, flux_parabolic::Tuple,
262
mesh::P4estMeshParallel{2},
263
equations_parabolic, dg::DG)
264
@unpack local_neighbor_ids, node_indices, local_sides = cache.mpi_interfaces
265
@unpack contravariant_vectors = cache.elements
266
index_range = eachnode(dg)
267
268
flux_parabolic_x, flux_parabolic_y = flux_parabolic
269
270
@threaded for interface in eachmpiinterface(dg, cache)
271
# Copy solution data from the local element using "delayed indexing" with
272
# a start value and a step size to get the correct face and orientation.
273
# Note that in the current implementation, the interface will be
274
# "aligned at the primary element", i.e., the index of the primary side
275
# will always run forwards.
276
local_element = local_neighbor_ids[interface]
277
local_indices = node_indices[interface]
278
local_direction = indices2direction(local_indices)
279
local_side = local_sides[interface]
280
# store the normal flux with respect to the primary normal direction,
281
# which is the negative of the secondary normal direction
282
orientation_factor = local_side == 1 ? 1 : -1
283
284
i_start, i_step = index_to_start_step_2d(local_indices[1], index_range)
285
j_start, j_step = index_to_start_step_2d(local_indices[2], index_range)
286
287
i_elem = i_start
288
j_elem = j_start
289
290
for i in eachnode(dg)
291
normal_direction = get_normal_direction(local_direction,
292
contravariant_vectors,
293
i_elem, j_elem,
294
local_element)
295
296
for v in eachvariable(equations_parabolic)
297
flux_visc = SVector(flux_parabolic_x[v, i_elem, j_elem, local_element],
298
flux_parabolic_y[v, i_elem, j_elem, local_element])
299
# Side 1 and 2 must be consistent, i.e., with their outward-pointing normals.
300
# Thus, the `orientation_factor` changes the logic such that the
301
# flux which enters side 1 leaves side 2.
302
cache.mpi_interfaces.u[local_side, v, i, interface] = orientation_factor *
303
dot(flux_visc,
304
normal_direction)
305
end
306
307
i_elem += i_step
308
j_elem += j_step
309
end
310
end
311
312
return nothing
313
end
314
315
function calc_mpi_interface_flux_gradient!(surface_flux_values,
316
mesh::P4estMeshParallel{2},
317
equations_parabolic,
318
dg::DG, parabolic_scheme, cache)
319
@unpack local_neighbor_ids, node_indices, local_sides = cache.mpi_interfaces
320
@unpack contravariant_vectors = cache.elements
321
@unpack u = cache.mpi_interfaces
322
index_range = eachnode(dg)
323
index_end = last(index_range)
324
325
@threaded for interface in eachmpiinterface(dg, cache)
326
local_element = local_neighbor_ids[interface]
327
local_indices = node_indices[interface]
328
local_direction = indices2direction(local_indices)
329
local_side = local_sides[interface]
330
331
# Create the local i,j indexing on the local element used to pull normal direction information
332
i_element_start, i_element_step = index_to_start_step_2d(local_indices[1],
333
index_range)
334
j_element_start, j_element_step = index_to_start_step_2d(local_indices[2],
335
index_range)
336
337
i_element = i_element_start
338
j_element = j_element_start
339
340
# Initiate the node index to be used in the surface for loop,
341
# the surface flux storage must be indexed in alignment with the local element indexing
342
if :i_backward in local_indices
343
surface_node = index_end
344
surface_node_step = -1
345
else
346
surface_node = 1
347
surface_node_step = 1
348
end
349
350
for i in eachnode(dg)
351
normal_direction = get_normal_direction(local_direction,
352
contravariant_vectors,
353
i_element, j_element,
354
local_element)
355
356
u_ll, u_rr = get_surface_node_vars(u, equations_parabolic, dg,
357
i, interface)
358
359
flux_ = flux_parabolic(u_ll, u_rr, normal_direction, Gradient(),
360
equations_parabolic, parabolic_scheme)
361
362
for v in eachvariable(equations_parabolic)
363
surface_flux_values[v, surface_node,
364
local_direction, local_element] = flux_[v]
365
end
366
367
# Increment local element indices to pull the normal direction
368
# from the element data
369
i_element += i_element_step
370
j_element += j_element_step
371
372
# Increment the surface node index along the local element
373
surface_node += surface_node_step
374
end
375
end
376
377
return nothing
378
end
379
380
function calc_mpi_interface_flux_divergence!(surface_flux_values,
381
mesh::P4estMeshParallel{2},
382
equations_parabolic,
383
dg::DG, parabolic_scheme, cache)
384
@unpack local_neighbor_ids, node_indices, local_sides = cache.mpi_interfaces
385
@unpack contravariant_vectors = cache.elements
386
@unpack u = cache.mpi_interfaces
387
index_range = eachnode(dg)
388
index_end = last(index_range)
389
390
@threaded for interface in eachmpiinterface(dg, cache)
391
local_element = local_neighbor_ids[interface]
392
local_indices = node_indices[interface]
393
local_direction = indices2direction(local_indices)
394
local_side = local_sides[interface]
395
396
i_element_start, i_element_step = index_to_start_step_2d(local_indices[1],
397
index_range)
398
j_element_start, j_element_step = index_to_start_step_2d(local_indices[2],
399
index_range)
400
401
i_element = i_element_start
402
j_element = j_element_start
403
404
# Initiate the node index to be used in the surface for loop,
405
# the surface flux storage must be indexed in alignment with the local element indexing
406
if :i_backward in local_indices
407
surface_node = index_end
408
surface_node_step = -1
409
else
410
surface_node = 1
411
surface_node_step = 1
412
end
413
414
for i in eachnode(dg)
415
normal_direction = get_normal_direction(local_direction,
416
contravariant_vectors,
417
i_element, j_element,
418
local_element)
419
420
parabolic_flux_normal_ll, parabolic_flux_normal_rr = get_surface_node_vars(u,
421
equations_parabolic,
422
dg,
423
i,
424
interface)
425
426
# Sign flip for `local_side = 2` required for divergence calculation since
427
# the divergence interface flux involves the normal direction.
428
# `local_side=2` is thus flipped (opposite of primary side)
429
orientation_factor = local_side == 1 ? 1 : -1
430
flux_ = flux_parabolic(parabolic_flux_normal_ll, parabolic_flux_normal_rr,
431
orientation_factor * normal_direction, Divergence(),
432
equations_parabolic, parabolic_scheme)
433
434
for v in eachvariable(equations_parabolic)
435
surface_flux_values[v, surface_node,
436
local_direction, local_element] = orientation_factor * flux_[v]
437
end
438
439
i_element += i_element_step
440
j_element += j_element_step
441
442
surface_node += surface_node_step
443
end
444
end
445
446
return nothing
447
end
448
449
function calc_mpi_mortar_flux_gradient!(surface_flux_values,
450
mesh::Union{P4estMeshParallel{2},
451
T8codeMeshParallel{2}},
452
equations_parabolic,
453
mortar_l2::LobattoLegendreMortarL2,
454
dg::DG, parabolic_scheme, cache)
455
@unpack (fstar_primary_upper_threaded, fstar_primary_lower_threaded,
456
fstar_secondary_upper_threaded, fstar_secondary_lower_threaded) = cache
457
@unpack u = cache.mpi_mortars
458
@threaded for mortar in eachmpimortar(dg, cache)
459
fstar_primary = (fstar_primary_lower_threaded[Threads.threadid()],
460
fstar_primary_upper_threaded[Threads.threadid()])
461
462
fstar_secondary = (fstar_secondary_lower_threaded[Threads.threadid()],
463
fstar_secondary_upper_threaded[Threads.threadid()])
464
465
for position in 1:2
466
for i in eachnode(dg)
467
normal_direction = get_normal_direction(cache.mpi_mortars, i,
468
position, mortar)
469
470
u_ll, u_rr = get_surface_node_vars(u, equations_parabolic, dg,
471
position, i, mortar)
472
473
flux = flux_parabolic(u_ll, u_rr, normal_direction, Gradient(),
474
equations_parabolic, parabolic_scheme)
475
476
set_node_vars!(fstar_primary[position], flux, equations_parabolic, dg,
477
i)
478
set_node_vars!(fstar_secondary[position], flux, equations_parabolic, dg,
479
i)
480
end
481
end
482
483
u_buffer = cache.u_threaded[Threads.threadid()]
484
mpi_mortar_fluxes_to_elements_gradient!(surface_flux_values,
485
mesh, equations_parabolic, mortar_l2,
486
dg, cache,
487
mortar, fstar_primary, fstar_secondary,
488
u_buffer)
489
end
490
491
return nothing
492
end
493
494
@inline function mpi_mortar_fluxes_to_elements_gradient!(surface_flux_values,
495
mesh::Union{P4estMeshParallel{2},
496
T8codeMeshParallel{2}},
497
equations_parabolic,
498
mortar_l2::LobattoLegendreMortarL2,
499
dg::DGSEM, cache, mortar,
500
fstar_primary, fstar_secondary,
501
u_buffer)
502
@unpack local_neighbor_ids, local_neighbor_positions, node_indices = cache.mpi_mortars
503
index_range = eachnode(dg)
504
index_end = last(index_range)
505
506
small_indices = node_indices[1, mortar]
507
small_direction = indices2direction(small_indices)
508
509
large_indices = node_indices[2, mortar]
510
large_direction = indices2direction(large_indices)
511
512
for (element, position) in zip(local_neighbor_ids[mortar],
513
local_neighbor_positions[mortar])
514
515
# In 2D the large side is the third mortar neighbor slot.
516
if position == 3
517
# Project the two small-side traces to the large side.
518
multiply_dimensionwise!(u_buffer,
519
mortar_l2.reverse_upper,
520
fstar_secondary[2],
521
mortar_l2.reverse_lower,
522
fstar_secondary[1])
523
524
# Gradient stage: no extra sign flip / scale factor
525
# (same as local 2D parabolic mortar_fluxes_to_elements!)
526
if :i_backward in large_indices
527
for i in eachnode(dg)
528
for v in eachvariable(equations_parabolic)
529
surface_flux_values[v, index_end + 1 - i,
530
large_direction, element] = u_buffer[v, i]
531
end
532
end
533
else
534
for i in eachnode(dg)
535
for v in eachvariable(equations_parabolic)
536
surface_flux_values[v, i,
537
large_direction, element] = u_buffer[v, i]
538
end
539
end
540
end
541
else
542
# Small sides copy directly
543
for i in eachnode(dg)
544
for v in eachvariable(equations_parabolic)
545
surface_flux_values[v, i,
546
small_direction, element] = fstar_primary[position][v, i]
547
end
548
end
549
end
550
end
551
552
return nothing
553
end
554
555
function prolong2mpimortars_divergence!(cache, flux_parabolic,
556
mesh::Union{P4estMeshParallel{2},
557
T8codeMeshParallel{2}},
558
equations_parabolic,
559
mortar_l2::LobattoLegendreMortarL2,
560
dg::DGSEM)
561
@unpack node_indices = cache.mpi_mortars
562
@unpack contravariant_vectors = cache.elements
563
index_range = eachnode(dg)
564
565
flux_parabolic_x, flux_parabolic_y = flux_parabolic
566
567
@threaded for mortar in eachmpimortar(dg, cache)
568
local_neighbor_ids = cache.mpi_mortars.local_neighbor_ids[mortar]
569
local_neighbor_positions = cache.mpi_mortars.local_neighbor_positions[mortar]
570
571
# Small side indexing
572
small_indices = node_indices[1, mortar]
573
small_direction = indices2direction(small_indices)
574
575
i_small_start, i_small_step = index_to_start_step_2d(small_indices[1],
576
index_range)
577
j_small_start, j_small_step = index_to_start_step_2d(small_indices[2],
578
index_range)
579
580
# Large side indexing
581
large_indices = node_indices[2, mortar]
582
large_direction = indices2direction(large_indices)
583
584
i_large_start, i_large_step = index_to_start_step_2d(large_indices[1],
585
index_range)
586
j_large_start, j_large_step = index_to_start_step_2d(large_indices[2],
587
index_range)
588
589
for (element, position) in zip(local_neighbor_ids, local_neighbor_positions)
590
if position == 3
591
# =========================
592
# LARGE ELEMENT
593
# =========================
594
u_buffer = cache.u_threaded[Threads.threadid()]
595
596
i_large = i_large_start
597
j_large = j_large_start
598
599
for i in eachnode(dg)
600
normal_direction = get_normal_direction(large_direction,
601
contravariant_vectors,
602
i_large, j_large,
603
element)
604
605
for v in eachvariable(equations_parabolic)
606
flux_node = SVector(flux_parabolic_x[v, i_large, j_large,
607
element],
608
flux_parabolic_y[v, i_large, j_large,
609
element])
610
611
# Same convention as local 2D code:
612
# prolong flux dotted with outward normal on the small element.
613
# The large-element normal is -2x the small-element normal,
614
# hence the factor -1/2 here.
615
u_buffer[v, i] = -0.5f0 * dot(flux_node, normal_direction)
616
end
617
618
i_large += i_large_step
619
j_large += j_large_step
620
end
621
622
multiply_dimensionwise!(view(cache.mpi_mortars.u, 2, :, 1, :, mortar),
623
mortar_l2.forward_lower, u_buffer)
624
multiply_dimensionwise!(view(cache.mpi_mortars.u, 2, :, 2, :, mortar),
625
mortar_l2.forward_upper, u_buffer)
626
627
else
628
# =========================
629
# SMALL ELEMENT (1–2)
630
# =========================
631
i_small = i_small_start
632
j_small = j_small_start
633
634
for i in eachnode(dg)
635
normal_direction = get_normal_direction(small_direction,
636
contravariant_vectors,
637
i_small, j_small,
638
element)
639
640
for v in eachvariable(equations_parabolic)
641
flux_node = SVector(flux_parabolic_x[v, i_small, j_small,
642
element],
643
flux_parabolic_y[v, i_small, j_small,
644
element])
645
646
cache.mpi_mortars.u[1, v, position, i, mortar] = dot(flux_node,
647
normal_direction)
648
end
649
650
i_small += i_small_step
651
j_small += j_small_step
652
end
653
end
654
end
655
end
656
657
return nothing
658
end
659
660
function calc_mpi_mortar_flux_divergence!(surface_flux_values,
661
mesh::Union{P4estMeshParallel{2},
662
T8codeMeshParallel{2}},
663
equations_parabolic,
664
mortar_l2::LobattoLegendreMortarL2,
665
dg::DG, parabolic_scheme, cache)
666
@unpack fstar_primary_upper_threaded, fstar_primary_lower_threaded = cache
667
@unpack u = cache.mpi_mortars
668
@threaded for mortar in eachmpimortar(dg, cache)
669
# Match local 2D structure as one tuple
670
fstar = (fstar_primary_lower_threaded[Threads.threadid()],
671
fstar_primary_upper_threaded[Threads.threadid()])
672
673
for position in 1:2
674
for i in eachnode(dg)
675
normal_direction = get_normal_direction(cache.mpi_mortars, i,
676
position, mortar)
677
678
for v in eachvariable(equations_parabolic)
679
viscous_flux_normal_ll = u[1, v, position, i, mortar]
680
viscous_flux_normal_rr = u[2, v, position, i, mortar]
681
682
flux = flux_parabolic(viscous_flux_normal_ll,
683
viscous_flux_normal_rr,
684
normal_direction, Divergence(),
685
equations_parabolic, parabolic_scheme)
686
687
# Same convention as local 2D: sign flip / scaling already handled in prolongation
688
fstar[position][v, i] = flux
689
end
690
end
691
end
692
693
u_buffer = cache.u_threaded[Threads.threadid()]
694
695
# Reuse hyperbolic MPI mortar-to-element transfer, same as local 2D
696
mpi_mortar_fluxes_to_elements!(surface_flux_values, mesh,
697
equations_parabolic, mortar_l2, dg, cache,
698
mortar, fstar, fstar, u_buffer)
699
end
700
701
return nothing
702
end
703
end #muladd
704
705