Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
trixi-framework
GitHub Repository: trixi-framework/Trixi.jl
Path: blob/main/src/semidiscretization/semidiscretization.jl
5586 views
1
# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
2
# Since these FMAs can increase the performance of many numerical algorithms,
3
# we need to opt-in explicitly.
4
# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
5
@muladd begin
6
#! format: noindent
7
8
"""
9
ndofs(semi::AbstractSemidiscretization)
10
11
Return the number of degrees of freedom associated with each scalar variable.
12
"""
13
@inline function ndofs(semi::AbstractSemidiscretization)
14
mesh, _, solver, cache = mesh_equations_solver_cache(semi)
15
return ndofs(mesh, solver, cache)
16
end
17
18
"""
19
ndofsglobal(semi::AbstractSemidiscretization)
20
21
Return the global number of degrees of freedom associated with each scalar variable across all MPI ranks.
22
This is the same as [`ndofs`](@ref) for simulations running in serial or
23
parallelized via threads. It will in general be different for simulations
24
running in parallel with MPI.
25
"""
26
@inline function ndofsglobal(semi::AbstractSemidiscretization)
27
mesh, _, solver, cache = mesh_equations_solver_cache(semi)
28
return ndofsglobal(mesh, solver, cache)
29
end
30
31
"""
32
integrate_via_indices(func, u_ode, semi::AbstractSemidiscretization, args...; normalize=true)
33
34
Call `func(u, i..., element, equations, solver, args...)` for all nodal indices `i..., element`
35
and integrate the result using a quadrature associated with the semidiscretization `semi`.
36
37
If `normalize` is true, the result is divided by the total volume of the computational domain.
38
"""
39
function integrate_via_indices(func::Func, u_ode, semi::AbstractSemidiscretization,
40
args...; normalize = true) where {Func}
41
mesh, equations, solver, cache = mesh_equations_solver_cache(semi)
42
43
u = wrap_array(u_ode, mesh, equations, solver, cache)
44
return integrate_via_indices(func, u, mesh, equations, solver, cache, args...,
45
normalize = normalize)
46
end
47
48
"""
49
integrate([func=(u_node,equations)->u_node,] u_ode, semi::AbstractSemidiscretization; normalize=true)
50
51
Call `func(u_node, equations)` for each vector of nodal variables `u_node` in `u_ode`
52
and integrate the result using a quadrature associated with the semidiscretization `semi`.
53
54
If `normalize` is true, the result is divided by the total volume of the computational domain.
55
"""
56
function integrate(func::Func, u_ode, semi::AbstractSemidiscretization;
57
normalize = true) where {Func}
58
mesh, equations, solver, cache = mesh_equations_solver_cache(semi)
59
60
u = wrap_array(u_ode, mesh, equations, solver, cache)
61
return integrate(func, u, mesh, equations, solver, cache, normalize = normalize)
62
end
63
64
function integrate(u_ode, semi::AbstractSemidiscretization; normalize = true)
65
return integrate(cons2cons, u_ode, semi; normalize = normalize)
66
end
67
68
# Select the right-hand side function corresponding to the semidiscretization `semi`.
69
@inline default_rhs(::AbstractSemidiscretization) = rhs!
70
71
"""
72
calc_error_norms([func=(u_node,equations)->u_node,] u_ode, t, analyzer, semi::AbstractSemidiscretization, cache_analysis)
73
74
Calculate discrete L2 and L∞ error norms of `func` applied to each nodal variable `u_node` in `u_ode`.
75
If no exact solution is available, "errors" are calculated using some reference state and can be useful
76
for regression tests.
77
"""
78
function calc_error_norms(u_ode, t, analyzer, semi::AbstractSemidiscretization,
79
cache_analysis)
80
return calc_error_norms(cons2cons, u_ode, t, analyzer, semi, cache_analysis)
81
end
82
83
"""
84
semidiscretize(semi::AbstractSemidiscretization, tspan;
85
jac_prototype::Union{AbstractMatrix, Nothing} = nothing,
86
colorvec::Union{AbstractVector, Nothing} = nothing,
87
storage_type = nothing,
88
real_type = nothing)
89
90
Wrap the semidiscretization `semi` as an ODE problem in the time interval `tspan`
91
that can be passed to `solve` from the [SciML ecosystem](https://diffeq.sciml.ai/latest/).
92
93
Optional keyword arguments:
94
- `jac_prototype`: Expected to come from [SparseConnectivityTracer.jl](https://github.com/adrhill/SparseConnectivityTracer.jl).
95
Specifies the sparsity structure of the Jacobian to enable e.g. efficient implicit time stepping.
96
- `colorvec`: Expected to come from [SparseMatrixColorings.jl](https://github.com/gdalle/SparseMatrixColorings.jl).
97
Allows for even faster Jacobian computation if a sparse `jac_prototype` is given (optional).
98
- `storage_type` and `real_type`: Configure the underlying computational datastructures.
99
`storage_type` changes the fundamental array type being used, allowing the experimental use of `CuArray`
100
or other GPU array types. `real_type` changes the computational data type being used.
101
"""
102
function semidiscretize(semi::AbstractSemidiscretization, tspan;
103
jac_prototype::Union{AbstractMatrix, Nothing} = nothing,
104
colorvec::Union{AbstractVector, Nothing} = nothing,
105
reset_threads = true,
106
storage_type = nothing,
107
real_type = nothing)
108
# Optionally reset Polyester.jl threads. See
109
# https://github.com/trixi-framework/Trixi.jl/issues/1583
110
# https://github.com/JuliaSIMD/Polyester.jl/issues/30
111
if reset_threads
112
Polyester.reset_threads!()
113
end
114
115
if !(storage_type === nothing && real_type === nothing)
116
if storage_type === nothing
117
storage_type = Array
118
end
119
if real_type === nothing
120
real_type = real(semi)
121
end
122
semi = trixi_adapt(storage_type, real_type, semi)
123
if eltype(tspan) !== real_type
124
tspan = convert.(real_type, tspan)
125
end
126
end
127
128
u0_ode = compute_coefficients(first(tspan), semi) # Invoke initial condition
129
rhs_semi! = default_rhs(semi)
130
131
# TODO: MPI, do we want to synchronize loading and print debug statements, e.g. using
132
# mpi_isparallel() && MPI.Barrier(mpi_comm())
133
# See https://github.com/trixi-framework/Trixi.jl/issues/328
134
iip = true # is-inplace, i.e., we modify a vector when calling `rhs_semi!`
135
specialize = SciMLBase.FullSpecialize # specialize on `rhs_semi!` and parameters (semi)
136
137
# Check if Jacobian prototype is provided for sparse Jacobian
138
if jac_prototype !== nothing
139
# Convert `jac_prototype` to real type, as seen here:
140
# https://docs.sciml.ai/DiffEqDocs/stable/tutorials/advanced_ode_example/#Declaring-a-Sparse-Jacobian-with-Automatic-Sparsity-Detection
141
ode = SciMLBase.ODEFunction(rhs_semi!,
142
jac_prototype = convert.(eltype(u0_ode),
143
jac_prototype),
144
colorvec = colorvec) # coloring vector is optional
145
146
return ODEProblem{iip, specialize}(ode, u0_ode, tspan, semi)
147
else
148
# We could also construct an `ODEFunction` explicitly without the Jacobian here,
149
# but we stick to the lean direct in-place function `rhs_semi!` and
150
# let OrdinaryDiffEq.jl handle the rest
151
return ODEProblem{iip, specialize}(rhs_semi!, u0_ode, tspan, semi)
152
end
153
end
154
155
"""
156
semidiscretize(semi::AbstractSemidiscretization, tspan,
157
restart_file::AbstractString;
158
jac_prototype::Union{AbstractMatrix, Nothing} = nothing,
159
colorvec::Union{AbstractVector, Nothing} = nothing)
160
161
Wrap the semidiscretization `semi` as an ODE problem in the time interval `tspan`
162
that can be passed to `solve` from the [SciML ecosystem](https://diffeq.sciml.ai/latest/).
163
The initial condition etc. is taken from the `restart_file`.
164
165
Optional keyword arguments:
166
- `jac_prototype`: Expected to come from [SparseConnectivityTracer.jl](https://github.com/adrhill/SparseConnectivityTracer.jl).
167
Specifies the sparsity structure of the Jacobian to enable e.g. efficient implicit time stepping.
168
- `colorvec`: Expected to come from [SparseMatrixColorings.jl](https://github.com/gdalle/SparseMatrixColorings.jl).
169
Allows for even faster Jacobian computation. Not necessarily required when `jac_prototype` is given.
170
"""
171
function semidiscretize(semi::AbstractSemidiscretization, tspan,
172
restart_file::AbstractString;
173
jac_prototype::Union{AbstractMatrix, Nothing} = nothing,
174
colorvec::Union{AbstractVector, Nothing} = nothing,
175
reset_threads = true)
176
# Optionally reset Polyester.jl threads. See
177
# https://github.com/trixi-framework/Trixi.jl/issues/1583
178
# https://github.com/JuliaSIMD/Polyester.jl/issues/30
179
if reset_threads
180
Polyester.reset_threads!()
181
end
182
183
u0_ode = load_restart_file(semi, restart_file) # Load initial condition from restart file
184
rhs_semi! = default_rhs(semi)
185
186
# TODO: MPI, do we want to synchronize loading and print debug statements, e.g. using
187
# mpi_isparallel() && MPI.Barrier(mpi_comm())
188
# See https://github.com/trixi-framework/Trixi.jl/issues/328
189
iip = true # is-inplace, i.e., we modify a vector when calling `rhs_semi!`
190
specialize = SciMLBase.FullSpecialize # specialize on `rhs_semi!` and parameters (semi)
191
192
# Check if Jacobian prototype is provided for sparse Jacobian
193
if jac_prototype !== nothing
194
# Convert `jac_prototype` to real type, as seen here:
195
# https://docs.sciml.ai/DiffEqDocs/stable/tutorials/advanced_ode_example/#Declaring-a-Sparse-Jacobian-with-Automatic-Sparsity-Detection
196
ode = SciMLBase.ODEFunction(rhs_semi!,
197
jac_prototype = convert.(eltype(u0_ode),
198
jac_prototype),
199
colorvec = colorvec) # coloring vector is optional
200
201
return ODEProblem{iip, specialize}(ode, u0_ode, tspan, semi)
202
else
203
# We could also construct an `ODEFunction` explicitly without the Jacobian here,
204
# but we stick to the lean direct in-place function `rhs_semi!` and
205
# let OrdinaryDiffEq.jl handle the rest
206
return ODEProblem{iip, specialize}(rhs_semi!, u0_ode, tspan, semi)
207
end
208
end
209
210
"""
211
compute_coefficients(func, t, semi::AbstractSemidiscretization)
212
213
Compute the discrete coefficients of the continuous function `func` at time `t`
214
associated with the semidiscretization `semi`.
215
For example, the discrete coefficients of `func` for a discontinuous Galerkin
216
spectral element method ([`DGSEM`](@ref)) are the values of `func` at the
217
Lobatto-Legendre nodes. Similarly, a classical finite difference method will use
218
the values of `func` at the nodes of the grid assoociated with the semidiscretization
219
`semi`.
220
221
For semidiscretizations `semi` associated with an initial condition, `func` can be omitted
222
to use the given initial condition at time `t`.
223
"""
224
function compute_coefficients(func, t, semi::AbstractSemidiscretization)
225
u_ode = allocate_coefficients(mesh_equations_solver_cache(semi)...)
226
# Call `compute_coefficients` defined below
227
compute_coefficients!(u_ode, func, t, semi)
228
return u_ode
229
end
230
231
"""
232
compute_coefficients!(u_ode, func, t, semi::AbstractSemidiscretization)
233
234
Same as [`compute_coefficients`](@ref) but stores the result in `u_ode`.
235
"""
236
function compute_coefficients!(u_ode, func, t, semi::AbstractSemidiscretization)
237
backend = trixi_backend(u_ode)
238
u = wrap_array(u_ode, semi)
239
# Call `compute_coefficients` defined by the solver
240
return compute_coefficients!(backend, u, func, t,
241
mesh_equations_solver_cache(semi)...)
242
end
243
244
# Helper function to compute linear structure for a given semidiscretization
245
# `semi` and applied RHS `apply_rhs!`
246
function _linear_structure_from_rhs(semi::AbstractSemidiscretization, apply_rhs!)
247
# allocate memory
248
u_ode = allocate_coefficients(mesh_equations_solver_cache(semi)...)
249
du_ode = similar(u_ode)
250
251
# get the right hand side from boundary conditions and optional source terms
252
u_ode .= zero(eltype(u_ode))
253
apply_rhs!(du_ode, u_ode)
254
b = -du_ode
255
256
# Create a copy of `b` used internally to extract the linear part of `semi`.
257
# This is necessary to get everything correct when the user updates the
258
# returned vector `b`.
259
b_tmp = copy(b)
260
261
# wrap the linear operator
262
A = LinearMap(length(u_ode), ismutating = true) do dest, src
263
apply_rhs!(dest, src)
264
@. dest += b_tmp
265
return dest
266
end
267
268
return A, b
269
end
270
271
"""
272
linear_structure(semi::AbstractSemidiscretization;
273
t0 = zero(real(semi)))
274
275
Wraps the right-hand side operator of the semidiscretization `semi`
276
at time `t0` as an affine-linear operator given by a linear operator `A`
277
and a vector `b`:
278
```math
279
\\partial_t u(t) = A u(t) - b.
280
```
281
Works only for linear equations, i.e.,
282
equations which `have_constant_speed(equations) == True()`.
283
284
This has the benefit of greatly reduced memory consumption compared to constructing
285
the full system matrix explicitly, as done for instance in
286
[`jacobian_fd`](@ref) and [`jacobian_ad_forward`](@ref).
287
288
The returned linear operator `A` is a matrix-free operator which can be
289
supplied to iterative solvers from, e.g., [Krylov.jl](https://github.com/JuliaSmoothOptimizers/Krylov.jl).
290
291
It is also possible to use this to construct a sparse matrix without the detour of constructing
292
first the full Jacobian by calling
293
```julia
294
using SparseArrays
295
A_map, b = linear_structure(semi, t0 = t0)
296
A_sparse = sparse(A_map)
297
```
298
"""
299
function linear_structure(semi::AbstractSemidiscretization;
300
t0 = zero(real(semi)))
301
if have_constant_speed(semi.equations) == False()
302
throw(ArgumentError("`linear_structure` expects linear equations."))
303
end
304
305
apply_rhs! = function (dest, src)
306
return rhs!(dest, src, semi, t0)
307
end
308
309
return _linear_structure_from_rhs(semi, apply_rhs!)
310
end
311
312
"""
313
jacobian_fd(semi::AbstractSemidiscretization;
314
t0 = zero(real(semi)),
315
u0_ode = compute_coefficients(t0, semi))
316
317
Uses the right-hand side operator of the semidiscretization `semi`
318
and simple second order finite difference to compute the Jacobian `J`
319
of the semidiscretization `semi` at time `t0` and state `u0_ode`.
320
"""
321
function jacobian_fd(semi::AbstractSemidiscretization;
322
t0 = zero(real(semi)),
323
u0_ode = compute_coefficients(t0, semi))
324
# copy the initial state since it will be modified in the following
325
u_ode = copy(u0_ode)
326
du0_ode = similar(u_ode)
327
dup_ode = similar(u_ode)
328
dum_ode = similar(u_ode)
329
rhs_semi! = default_rhs(semi)
330
331
# compute residual of linearization state
332
rhs_semi!(du0_ode, u_ode, semi, t0)
333
334
# initialize Jacobian matrix
335
J = zeros(eltype(u_ode), length(u_ode), length(u_ode))
336
337
# use second order finite difference to estimate Jacobian matrix
338
for idx in eachindex(u0_ode)
339
# determine size of fluctuation
340
# This is the approach used by FiniteDiff.jl to compute the
341
# step size, which assures that the finite difference is accurate
342
# for very small and very large absolute values `u0_ode[idx]`.
343
# See https://github.com/trixi-framework/Trixi.jl/pull/2514#issuecomment-3190534904.
344
absstep = sqrt(eps(typeof(u0_ode[idx])))
345
relstep = absstep
346
epsilon = max(relstep * abs(u0_ode[idx]), absstep)
347
348
# plus fluctuation
349
u_ode[idx] = u0_ode[idx] + epsilon
350
rhs_semi!(dup_ode, u_ode, semi, t0)
351
352
# minus fluctuation
353
u_ode[idx] = u0_ode[idx] - epsilon
354
rhs_semi!(dum_ode, u_ode, semi, t0)
355
356
# restore linearization state
357
u_ode[idx] = u0_ode[idx]
358
359
# central second order finite difference
360
@. J[:, idx] = (dup_ode - dum_ode) / (2 * epsilon)
361
end
362
363
return J
364
end
365
366
"""
367
jacobian_ad_forward(semi::AbstractSemidiscretization;
368
t0 = zero(real(semi)),
369
u0_ode = compute_coefficients(t0, semi))
370
371
Uses the right-hand side operator of the semidiscretization `semi`
372
and forward mode automatic differentiation to compute the Jacobian `J`
373
of the semidiscretization `semi` at time `t0` and state `u0_ode`.
374
"""
375
function jacobian_ad_forward(semi::AbstractSemidiscretization;
376
t0 = zero(real(semi)),
377
u0_ode = compute_coefficients(t0, semi))
378
return jacobian_ad_forward(semi, t0, u0_ode)
379
end
380
381
# The following version is for plain arrays
382
function jacobian_ad_forward(semi::AbstractSemidiscretization, t0, u0_ode)
383
du_ode = similar(u0_ode)
384
config = ForwardDiff.JacobianConfig(nothing, du_ode, u0_ode)
385
386
# Use a function barrier since the generation of the `config` we use above
387
# is not type-stable
388
return _jacobian_ad_forward(semi, t0, u0_ode, du_ode, config)
389
end
390
391
function _jacobian_ad_forward(semi, t0, u0_ode, du_ode, config)
392
new_semi = remake(semi, uEltype = eltype(config))
393
rhs_semi! = default_rhs(new_semi)
394
# Create anonymous function passed as first argument to `ForwardDiff.jacobian` to match
395
# `ForwardDiff.jacobian(f!, y::AbstractArray, x::AbstractArray,
396
# cfg::JacobianConfig = JacobianConfig(f!, y, x), check=Val{true}())`
397
J = ForwardDiff.jacobian(du_ode, u0_ode, config) do du_ode, u_ode
398
return rhs_semi!(du_ode, u_ode, new_semi, t0)
399
end
400
401
return J
402
end
403
404
# unpack u if it is wrapped in VectorOfArray (mainly for DGMulti solvers)
405
jacobian_ad_forward(semi::AbstractSemidiscretization, t0, u0_ode::VectorOfArray) = jacobian_ad_forward(semi,
406
t0,
407
parent(u0_ode))
408
409
# This version is specialized to `StructArray`s used by some `DGMulti` solvers.
410
# We need to convert the numerical solution vectors since ForwardDiff cannot
411
# handle arrays of `SVector`s.
412
function jacobian_ad_forward(semi::AbstractSemidiscretization, t0, _u0_ode::StructArray)
413
u0_ode_plain = similar(_u0_ode, eltype(eltype(_u0_ode)),
414
(size(_u0_ode)..., nvariables(semi)))
415
for (v, u_v) in enumerate(StructArrays.components(_u0_ode))
416
u0_ode_plain[.., v] = u_v
417
end
418
du_ode_plain = similar(u0_ode_plain)
419
config = ForwardDiff.JacobianConfig(nothing, du_ode_plain, u0_ode_plain)
420
421
# Use a function barrier since the generation of the `config` we use above
422
# is not type-stable
423
return _jacobian_ad_forward_structarrays(semi, t0, u0_ode_plain, du_ode_plain,
424
config)
425
end
426
427
function _jacobian_ad_forward_structarrays(semi, t0, u0_ode_plain, du_ode_plain, config)
428
new_semi = remake(semi, uEltype = eltype(config))
429
rhs_semi! = default_rhs(new_semi)
430
# Create anonymous function passed as first argument to `ForwardDiff.jacobian` to match
431
# `ForwardDiff.jacobian(f!, y::AbstractArray, x::AbstractArray,
432
# cfg::JacobianConfig = JacobianConfig(f!, y, x), check=Val{true}())`
433
J = ForwardDiff.jacobian(du_ode_plain, u0_ode_plain,
434
config) do du_ode_plain, u_ode_plain
435
u_ode = StructArray{SVector{nvariables(semi), eltype(config)}}(ntuple(v -> view(u_ode_plain,
436
:,
437
:,
438
v),
439
nvariables(semi)))
440
du_ode = StructArray{SVector{nvariables(semi), eltype(config)}}(ntuple(v -> view(du_ode_plain,
441
:,
442
:,
443
v),
444
nvariables(semi)))
445
return rhs_semi!(du_ode, u_ode, new_semi, t0)
446
end
447
448
return J
449
end
450
451
# This version is specialized to arrays of `StaticArray`s used by some `DGMulti` solvers.
452
# We need to convert the numerical solution vectors since ForwardDiff cannot
453
# handle arrays of `SVector`s.
454
function jacobian_ad_forward(semi::AbstractSemidiscretization, t0,
455
_u0_ode::AbstractArray{<:SVector})
456
u0_ode_plain = reinterpret(eltype(eltype(_u0_ode)), _u0_ode)
457
du_ode_plain = similar(u0_ode_plain)
458
config = ForwardDiff.JacobianConfig(nothing, du_ode_plain, u0_ode_plain)
459
460
# Use a function barrier since the generation of the `config` we use above
461
# is not type-stable
462
return _jacobian_ad_forward_staticarrays(semi, t0, u0_ode_plain, du_ode_plain,
463
config)
464
end
465
466
function _jacobian_ad_forward_staticarrays(semi, t0, u0_ode_plain, du_ode_plain, config)
467
new_semi = remake(semi, uEltype = eltype(config))
468
rhs_semi! = default_rhs(new_semi)
469
J = ForwardDiff.jacobian(du_ode_plain, u0_ode_plain,
470
config) do du_ode_plain, u_ode_plain
471
u_ode = reinterpret(SVector{nvariables(semi), eltype(config)}, u_ode_plain)
472
du_ode = reinterpret(SVector{nvariables(semi), eltype(config)}, du_ode_plain)
473
return rhs_semi!(du_ode, u_ode, new_semi, t0)
474
end
475
476
return J
477
end
478
479
# Sometimes, it can be useful to save some (scalar) variables associated with each element,
480
# e.g. AMR indicators or shock indicators. Since these usually have to be re-computed
481
# directly before IO and do not necessarily need to be stored in memory before,
482
# get_element_variables!(element_variables, ..)
483
# is used to retrieve such up to date element variables, modifying
484
# `element_variables::Dict{Symbol,Any}` in place.
485
function get_element_variables!(element_variables, u_ode,
486
semi::AbstractSemidiscretization)
487
u = wrap_array(u_ode, semi)
488
return get_element_variables!(element_variables, u,
489
mesh_equations_solver_cache(semi)...)
490
end
491
492
# Required for storing `extra_node_variables` in the `SaveSolutionCallback`.
493
# Not to be confused with `get_node_vars` which returns the variables of the simulated equation.
494
function get_node_variables!(node_variables, u_ode, semi::AbstractSemidiscretization)
495
return get_node_variables!(node_variables, u_ode,
496
mesh_equations_solver_cache(semi)...)
497
end
498
499
# To implement AMR and use OrdinaryDiffEq.jl etc., we have to be a bit creative.
500
# Since the caches of the SciML ecosystem are immutable structs, we cannot simply
501
# change the underlying arrays therein. Hence, to support changing the number of
502
# DOFs, we need to use `resize!`. In some sense, this will force us to write more
503
# efficient code, since `resize!` will make use of previously allocated memory
504
# instead of allocating memory from scratch every time.
505
#
506
# However, multidimensional `Array`s don't support `resize!`. One option might be
507
# to use ElasticArrays.jl. But I don't really like that approach. Needing to use
508
# ElasticArray doesn't feel completely good to me, since we also want to experiment
509
# with other array types such as PaddedMatrices.jl, see trixi-framework/Trixi.jl#166.
510
# Then, we would need to wrap an Array inside something from PaddedMatrices.jl inside
511
# something from ElasticArrays.jl - or the other way round? Is that possible at all?
512
# If we go further, this looks like it could easily explode.
513
#
514
# Currently, the best option seems to be to let OrdinaryDiffEq.jl use `Vector`s,
515
# which can be `resize!`ed for AMR. Then, we have to wrap these `Vector`s inside
516
# Trixi.jl as our favorite multidimensional array type. We need to do this wrapping
517
# in every method exposed to OrdinaryDiffEq, i.e. in the first levels of things like
518
# rhs!, AMRCallback, StepsizeCallback, AnalysisCallback, SaveSolutionCallback
519
#
520
# This wrapping will also allow us to experiment more easily with additional
521
# kinds of wrapping, e.g. HybridArrays.jl or PaddedMatrices.jl to inform the
522
# compiler about the sizes of the first few dimensions in DG methods, i.e.
523
# nvariables(equations) and nnodes(dg).
524
#
525
# In some sense, having plain multidimensional `Array`s not support `resize!`
526
# isn't necessarily a bug (although it would be nice to add this possibility to
527
# base Julia) but can turn out to be a feature for us, because it will allow us
528
# more specializations.
529
# Since we can use multiple dispatch, these kinds of specializations can be
530
# tailored specifically to each combinations of mesh/solver etc.
531
#
532
# Under the hood, `wrap_array(u_ode, mesh, equations, solver, cache)` might
533
# (and probably will) use `unsafe_wrap`. Hence, you have to remember to
534
# `GC.@preserve` temporaries that are only used indirectly via `wrap_array`
535
# to avoid stochastic memory errors.
536
#
537
# Xref https://github.com/SciML/OrdinaryDiffEq.jl/pull/1275
538
function wrap_array(u_ode, semi::AbstractSemidiscretization)
539
return wrap_array(u_ode, mesh_equations_solver_cache(semi)...)
540
end
541
542
# Like `wrap_array`, but guarantees to return a plain `Array`, which can be better
543
# for writing solution files etc.
544
function wrap_array_native(u_ode, semi::AbstractSemidiscretization)
545
return wrap_array_native(u_ode, mesh_equations_solver_cache(semi)...)
546
end
547
548
# TODO: Taal, document interface?
549
# New mesh/solver combinations have to implement
550
# - ndofs(mesh, solver, cache)
551
# - ndofsgloabal(mesh, solver, cache)
552
# - ndims(mesh)
553
# - nnodes(solver)
554
# - real(solver)
555
# - create_cache(mesh, equations, solver, RealT)
556
# - wrap_array(u_ode, mesh, equations, solver, cache)
557
# - integrate(func, u, mesh, equations, solver, cache; normalize=true)
558
# - integrate_via_indices(func, u, mesh, equations, solver, cache, args...; normalize=true)
559
# - calc_error_norms(func, u, t, analyzer, mesh, equations, initial_condition, solver, cache, cache_analysis)
560
# - allocate_coefficients(mesh, equations, solver, cache)
561
# - compute_coefficients!(u, func, mesh, equations, solver, cache)
562
# - rhs!(du, u, t, mesh, equations, boundary_conditions, source_terms, solver, cache)
563
#
564
565
end # @muladd
566
567