Path: blob/main/src/solvers/fdsbp_tree/fdsbp_1d.jl
5591 views
# !!! warning "Experimental implementation (upwind SBP)"1# This is an experimental feature and may change in future releases.23# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).4# Since these FMAs can increase the performance of many numerical algorithms,5# we need to opt-in explicitly.6# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.7@muladd begin8#! format: noindent910# 1D caches11function create_cache(mesh::TreeMesh{1}, equations,12volume_integral::VolumeIntegralStrongForm,13dg, cache_containers, uEltype)14prototype = Array{SVector{nvariables(equations), uEltype}, ndims(mesh)}(undef,15ntuple(_ -> nnodes(dg),16ndims(mesh))...)17f_threaded = [similar(prototype) for _ in 1:Threads.maxthreadid()]1819return (; f_threaded)20end2122function create_cache(mesh::TreeMesh{1}, equations,23volume_integral::VolumeIntegralUpwind,24dg, cache_containers, uEltype)25u_node = SVector{nvariables(equations), uEltype}(ntuple(_ -> zero(uEltype),26Val{nvariables(equations)}()))27f = StructArray([(u_node, u_node)])28f_minus_plus_threaded = [similar(f, ntuple(_ -> nnodes(dg), ndims(mesh))...)29for _ in 1:Threads.maxthreadid()]3031f_minus, f_plus = StructArrays.components(f_minus_plus_threaded[1])32f_minus_threaded = [f_minus]33f_plus_threaded = [f_plus]34for i in 2:Threads.maxthreadid()35f_minus, f_plus = StructArrays.components(f_minus_plus_threaded[i])36push!(f_minus_threaded, f_minus)37push!(f_plus_threaded, f_plus)38end3940return (; f_minus_plus_threaded, f_minus_threaded, f_plus_threaded)41end4243# 2D volume integral contributions for `VolumeIntegralStrongForm`44function calc_volume_integral!(backend::Nothing, du, u,45mesh::TreeMesh{1},46have_nonconservative_terms::False, equations,47volume_integral::VolumeIntegralStrongForm,48dg::FDSBP, cache)49D = dg.basis # SBP derivative operator50@unpack f_threaded = cache5152# SBP operators from SummationByPartsOperators.jl implement the basic interface53# of matrix-vector multiplication. Thus, we pass an "array of structures",54# packing all variables per node in an `SVector`.55if nvariables(equations) == 156# `reinterpret(reshape, ...)` removes the leading dimension only if more57# than one variable is used.58u_vectors = reshape(reinterpret(SVector{nvariables(equations), eltype(u)}, u),59nnodes(dg), nelements(dg, cache))60du_vectors = reshape(reinterpret(SVector{nvariables(equations), eltype(du)},61du),62nnodes(dg), nelements(dg, cache))63else64u_vectors = reinterpret(reshape, SVector{nvariables(equations), eltype(u)}, u)65du_vectors = reinterpret(reshape, SVector{nvariables(equations), eltype(du)},66du)67end6869# Use the tensor product structure to compute the discrete derivatives of70# the fluxes line-by-line and add them to `du` for each element.71@threaded for element in eachelement(dg, cache)72f_element = f_threaded[Threads.threadid()]73u_element = view(u_vectors, :, element)7475# x direction76@. f_element = flux(u_element, 1, equations)77mul!(view(du_vectors, :, element), D, view(f_element, :),78one(eltype(du)), one(eltype(du)))79end8081return nothing82end8384# 1D volume integral contributions for `VolumeIntegralUpwind`.85# Note that the plus / minus notation of the operators does not refer to the86# upwind / downwind directions of the fluxes.87# Instead, the plus / minus refers to the direction of the biasing within88# the finite difference stencils. Thus, the D^- operator acts on the positive89# part of the flux splitting f^+ and the D^+ operator acts on the negative part90# of the flux splitting f^-.91function calc_volume_integral!(backend::Nothing, du, u,92mesh::TreeMesh{1},93have_nonconservative_terms::False, equations,94volume_integral::VolumeIntegralUpwind,95dg::FDSBP, cache)96# Assume that97# dg.basis isa SummationByPartsOperators.UpwindOperators98D_minus = dg.basis.minus # Upwind SBP D^- derivative operator99D_plus = dg.basis.plus # Upwind SBP D^+ derivative operator100@unpack f_minus_plus_threaded, f_minus_threaded, f_plus_threaded = cache101@unpack splitting = volume_integral102103# SBP operators from SummationByPartsOperators.jl implement the basic interface104# of matrix-vector multiplication. Thus, we pass an "array of structures",105# packing all variables per node in an `SVector`.106if nvariables(equations) == 1107# `reinterpret(reshape, ...)` removes the leading dimension only if more108# than one variable is used.109u_vectors = reshape(reinterpret(SVector{nvariables(equations), eltype(u)}, u),110nnodes(dg), nelements(dg, cache))111du_vectors = reshape(reinterpret(SVector{nvariables(equations), eltype(du)},112du),113nnodes(dg), nelements(dg, cache))114else115u_vectors = reinterpret(reshape, SVector{nvariables(equations), eltype(u)}, u)116du_vectors = reinterpret(reshape, SVector{nvariables(equations), eltype(du)},117du)118end119120# Use the tensor product structure to compute the discrete derivatives of121# the fluxes line-by-line and add them to `du` for each element.122@threaded for element in eachelement(dg, cache)123# f_minus_plus_element wraps the storage provided by f_minus_element and124# f_plus_element such that we can use a single plain broadcasting below.125# f_minus_element and f_plus_element are updated in broadcasting calls126# of the form `@. f_minus_plus_element = ...`.127f_minus_plus_element = f_minus_plus_threaded[Threads.threadid()]128f_minus_element = f_minus_threaded[Threads.threadid()]129f_plus_element = f_plus_threaded[Threads.threadid()]130u_element = view(u_vectors, :, element)131132# x direction133@. f_minus_plus_element = splitting(u_element, 1, equations)134mul!(view(du_vectors, :, element), D_plus, view(f_minus_element, :),135one(eltype(du)), one(eltype(du)))136mul!(view(du_vectors, :, element), D_minus, view(f_plus_element, :),137one(eltype(du)), one(eltype(du)))138end139140return nothing141end142143function calc_surface_integral!(backend::Nothing, du, u, mesh::TreeMesh{1},144equations, surface_integral::SurfaceIntegralStrongForm,145dg::DG, cache)146inv_weight_left = inv(left_boundary_weight(dg.basis))147inv_weight_right = inv(right_boundary_weight(dg.basis))148@unpack surface_flux_values = cache.elements149150@threaded for element in eachelement(dg, cache)151# surface at -x152u_node = get_node_vars(u, equations, dg, 1, element)153f_node = flux(u_node, 1, equations)154f_num = get_node_vars(surface_flux_values, equations, dg, 1, element)155multiply_add_to_node_vars!(du, inv_weight_left, -(f_num - f_node),156equations, dg, 1, element)157158# surface at +x159u_node = get_node_vars(u, equations, dg, nnodes(dg), element)160f_node = flux(u_node, 1, equations)161f_num = get_node_vars(surface_flux_values, equations, dg, 2, element)162multiply_add_to_node_vars!(du, inv_weight_right, +(f_num - f_node),163equations, dg, nnodes(dg), element)164end165166return nothing167end168169# Periodic FDSBP operators need to use a single element without boundaries170function calc_surface_integral!(backend::Nothing, du, u, mesh::TreeMesh1D,171equations, surface_integral::SurfaceIntegralStrongForm,172dg::PeriodicFDSBP, cache)173@assert nelements(dg, cache) == 1174return nothing175end176177# Specialized interface flux computation because the upwind solver does178# not require a standard numerical flux (Riemann solver). The flux splitting179# already separates the solution information into right-traveling and180# left-traveling information. So we only need to compute the appropriate181# flux information at each side of an interface.182function calc_interface_flux!(surface_flux_values,183mesh::TreeMesh{1},184have_nonconservative_terms::False, equations,185surface_integral::SurfaceIntegralUpwind,186dg::FDSBP, cache)187@unpack splitting = surface_integral188@unpack u, neighbor_ids, orientations = cache.interfaces189190@threaded for interface in eachinterface(dg, cache)191# Get neighboring elements192left_id = neighbor_ids[1, interface]193right_id = neighbor_ids[2, interface]194195# Determine interface direction with respect to elements:196# orientation = 1: left -> 2, right -> 1197left_direction = 2 * orientations[interface]198right_direction = 2 * orientations[interface] - 1199200# Pull the left and right solution data201u_ll, u_rr = get_surface_node_vars(u, equations, dg, interface)202203# Compute the upwind coupling terms where right-traveling204# information comes from the left and left-traveling information205# comes from the right206flux_minus_rr = splitting(u_rr, Val{:minus}(), orientations[interface],207equations)208flux_plus_ll = splitting(u_ll, Val{:plus}(), orientations[interface], equations)209210# Save the upwind coupling into the appropriate side of the elements211for v in eachvariable(equations)212surface_flux_values[v, left_direction, left_id] = flux_minus_rr[v]213surface_flux_values[v, right_direction, right_id] = flux_plus_ll[v]214end215end216217return nothing218end219220# Implementation of fully upwind SATs. The surface flux values are pre-computed221# in the specialized `calc_interface_flux` routine. These SATs are still of222# a strong form penalty type, except that the interior flux at a particular223# side of the element are computed in the upwind direction.224function calc_surface_integral!(backend::Nothing, du, u, mesh::TreeMesh{1},225equations, surface_integral::SurfaceIntegralUpwind,226dg::FDSBP, cache)227inv_weight_left = inv(left_boundary_weight(dg.basis))228inv_weight_right = inv(right_boundary_weight(dg.basis))229@unpack surface_flux_values = cache.elements230@unpack splitting = surface_integral231232@threaded for element in eachelement(dg, cache)233# surface at -x234u_node = get_node_vars(u, equations, dg, 1, element)235f_node = splitting(u_node, Val{:plus}(), 1, equations)236f_num = get_node_vars(surface_flux_values, equations, dg, 1, element)237multiply_add_to_node_vars!(du, inv_weight_left, -(f_num - f_node),238equations, dg, 1, element)239240# surface at +x241u_node = get_node_vars(u, equations, dg, nnodes(dg), element)242f_node = splitting(u_node, Val{:minus}(), 1, equations)243f_num = get_node_vars(surface_flux_values, equations, dg, 2, element)244multiply_add_to_node_vars!(du, inv_weight_right, +(f_num - f_node),245equations, dg, nnodes(dg), element)246end247248return nothing249end250251# Periodic FDSBP operators need to use a single element without boundaries252function calc_surface_integral!(backend::Nothing, du, u, mesh::TreeMesh1D,253equations, surface_integral::SurfaceIntegralUpwind,254dg::PeriodicFDSBP, cache)255@assert nelements(dg, cache) == 1256return nothing257end258259# AnalysisCallback260261function integrate_via_indices(func::Func, u,262mesh::TreeMesh{1}, equations,263dg::FDSBP, cache, args...; normalize = true) where {Func}264# TODO: FD. This is rather inefficient right now and allocates...265M = SummationByPartsOperators.mass_matrix(dg.basis)266if M isa UniformScaling267M = M(nnodes(dg))268end269weights = diag(M)270271# Initialize integral with zeros of the right shape272integral = zero(func(u, 1, 1, equations, dg, args...))273274# Use quadrature to numerically integrate over entire domain275@batch reduction=(+, integral) for element in eachelement(dg, cache)276volume_jacobian_ = volume_jacobian(element, mesh, cache)277for i in eachnode(dg)278integral += volume_jacobian_ * weights[i] *279func(u, i, element, equations, dg, args...)280end281end282283# Normalize with total volume284if normalize285integral = integral / total_volume(mesh)286end287288return integral289end290291function calc_error_norms(func, u, t, analyzer,292mesh::TreeMesh{1}, equations, initial_condition,293dg::FDSBP, cache, cache_analysis)294# TODO: FD. This is rather inefficient right now and allocates...295M = SummationByPartsOperators.mass_matrix(dg.basis)296if M isa UniformScaling297M = M(nnodes(dg))298end299weights = diag(M)300@unpack node_coordinates = cache.elements301302# Set up data structures303l2_error = zero(func(get_node_vars(u, equations, dg, 1, 1), equations))304linf_error = copy(l2_error)305306# Iterate over all elements for error calculations307for element in eachelement(dg, cache)308# Calculate errors at each node309volume_jacobian_ = volume_jacobian(element, mesh, cache)310311for i in eachnode(analyzer)312u_exact = initial_condition(get_node_coords(node_coordinates, equations, dg,313i, element), t, equations)314diff = func(u_exact, equations) -315func(get_node_vars(u, equations, dg, i, element), equations)316l2_error += diff .^ 2 * (weights[i] * volume_jacobian_)317linf_error = @. max(linf_error, abs(diff))318end319end320321# For L2 error, divide by total volume322total_volume_ = total_volume(mesh)323l2_error = @. sqrt(l2_error / total_volume_)324325return l2_error, linf_error326end327end # @muladd328329330