Path: blob/main/src/solvers/fdsbp_unstructured/fdsbp_2d.jl
5591 views
# !!! warning "Experimental implementation (curvilinear FDSBP)"1# This is an experimental feature and may change in future releases.23# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).4# Since these FMAs can increase the performance of many numerical algorithms,5# we need to opt-in explicitly.6# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.7@muladd begin8#! format: noindent910# 2D unstructured cache11function create_cache(mesh::UnstructuredMesh2D, equations, dg::FDSBP, RealT, uEltype)12elements = init_elements(mesh, equations, dg.basis, RealT, uEltype)1314interfaces = init_interfaces(mesh, elements)1516boundaries = init_boundaries(mesh, elements)1718# Container cache19cache = (; elements, interfaces, boundaries)2021# Add Volume-Integral cache22cache = (; cache...,23create_cache(mesh, equations, dg.volume_integral, dg, cache, uEltype)...)2425return cache26end2728# 2D volume integral contributions for `VolumeIntegralStrongForm`29# OBS! This is the standard (not de-aliased) form of the volume integral.30# So it is not provably stable for variable coefficients due to the the metric terms.31function calc_volume_integral!(backend::Nothing, du, u,32mesh::UnstructuredMesh2D,33have_nonconservative_terms::False, equations,34volume_integral::VolumeIntegralStrongForm,35dg::FDSBP, cache)36D = dg.basis # SBP derivative operator37@unpack f_threaded = cache38@unpack contravariant_vectors = cache.elements3940# SBP operators from SummationByPartsOperators.jl implement the basic interface41# of matrix-vector multiplication. Thus, we pass an "array of structures",42# packing all variables per node in an `SVector`.43if nvariables(equations) == 144# `reinterpret(reshape, ...)` removes the leading dimension only if more45# than one variable is used.46u_vectors = reshape(reinterpret(SVector{nvariables(equations), eltype(u)}, u),47nnodes(dg), nnodes(dg), nelements(dg, cache))48du_vectors = reshape(reinterpret(SVector{nvariables(equations), eltype(du)},49du),50nnodes(dg), nnodes(dg), nelements(dg, cache))51else52u_vectors = reinterpret(reshape, SVector{nvariables(equations), eltype(u)}, u)53du_vectors = reinterpret(reshape, SVector{nvariables(equations), eltype(du)},54du)55end5657# Use the tensor product structure to compute the discrete derivatives of58# the contravariant fluxes line-by-line and add them to `du` for each element.59@threaded for element in eachelement(dg, cache)60f_element = f_threaded[Threads.threadid()]61u_element = view(u_vectors, :, :, element)6263# x direction64for j in eachnode(dg)65for i in eachnode(dg)66Ja1 = get_contravariant_vector(1, contravariant_vectors, i, j, element)67f_element[i, j] = flux(u_element[i, j], Ja1, equations)68end69mul!(view(du_vectors, :, j, element), D, view(f_element, :, j),70one(eltype(du)), one(eltype(du)))71end7273# y direction74for i in eachnode(dg)75for j in eachnode(dg)76Ja2 = get_contravariant_vector(2, contravariant_vectors, i, j, element)77f_element[i, j] = flux(u_element[i, j], Ja2, equations)78end79mul!(view(du_vectors, i, :, element), D, view(f_element, i, :),80one(eltype(du)), one(eltype(du)))81end82end8384return nothing85end8687# 2D volume integral contributions for `VolumeIntegralUpwind`.88# Note that the plus / minus notation of the operators does not refer to the89# upwind / downwind directions of the fluxes.90# Instead, the plus / minus refers to the direction of the biasing within91# the finite difference stencils. Thus, the D^- operator acts on the positive92# part of the flux splitting f^+ and the D^+ operator acts on the negative part93# of the flux splitting f^-.94function calc_volume_integral!(backend::Nothing, du, u,95mesh::UnstructuredMesh2D,96have_nonconservative_terms::False, equations,97volume_integral::VolumeIntegralUpwind,98dg::FDSBP, cache)99# Assume that100# dg.basis isa SummationByPartsOperators.UpwindOperators101D_minus = dg.basis.minus # Upwind SBP D^- derivative operator102D_plus = dg.basis.plus # Upwind SBP D^+ derivative operator103@unpack f_minus_plus_threaded, f_minus_threaded, f_plus_threaded = cache104@unpack splitting = volume_integral105@unpack contravariant_vectors = cache.elements106107# SBP operators from SummationByPartsOperators.jl implement the basic interface108# of matrix-vector multiplication. Thus, we pass an "array of structures",109# packing all variables per node in an `SVector`.110if nvariables(equations) == 1111# `reinterpret(reshape, ...)` removes the leading dimension only if more112# than one variable is used.113u_vectors = reshape(reinterpret(SVector{nvariables(equations), eltype(u)}, u),114nnodes(dg), nnodes(dg), nelements(dg, cache))115du_vectors = reshape(reinterpret(SVector{nvariables(equations), eltype(du)},116du),117nnodes(dg), nnodes(dg), nelements(dg, cache))118else119u_vectors = reinterpret(reshape, SVector{nvariables(equations), eltype(u)}, u)120du_vectors = reinterpret(reshape, SVector{nvariables(equations), eltype(du)},121du)122end123124# Use the tensor product structure to compute the discrete derivatives of125# the fluxes line-by-line and add them to `du` for each element.126@threaded for element in eachelement(dg, cache)127# f_minus_plus_element wraps the storage provided by f_minus_element and128# f_plus_element such that we can use a single assignment below.129# f_minus_element and f_plus_element are updated whenever we update130# `f_minus_plus_element[i, j] = ...` below.131f_minus_plus_element = f_minus_plus_threaded[Threads.threadid()]132f_minus_element = f_minus_threaded[Threads.threadid()]133f_plus_element = f_plus_threaded[Threads.threadid()]134u_element = view(u_vectors, :, :, element)135136# x direction137# We use flux vector splittings in the directions of the contravariant138# basis vectors. Thus, we do not use a broadcasting operation like139# @. f_minus_plus_element = splitting(u_element, 1, equations)140# in the Cartesian case but loop over all nodes.141for j in eachnode(dg), i in eachnode(dg)142# contravariant vectors computed with central D matrix143Ja1 = get_contravariant_vector(1, contravariant_vectors, i, j, element)144f_minus_plus_element[i, j] = splitting(u_element[i, j], Ja1, equations)145end146147for j in eachnode(dg)148mul!(view(du_vectors, :, j, element), D_minus, view(f_plus_element, :, j),149one(eltype(du)), one(eltype(du)))150mul!(view(du_vectors, :, j, element), D_plus, view(f_minus_element, :, j),151one(eltype(du)), one(eltype(du)))152end153154# y direction155for j in eachnode(dg), i in eachnode(dg)156# contravariant vectors computed with central D matrix157Ja2 = get_contravariant_vector(2, contravariant_vectors, i, j, element)158f_minus_plus_element[i, j] = splitting(u_element[i, j], Ja2, equations)159end160161for i in eachnode(dg)162mul!(view(du_vectors, i, :, element), D_minus, view(f_plus_element, i, :),163one(eltype(du)), one(eltype(du)))164mul!(view(du_vectors, i, :, element), D_plus, view(f_minus_element, i, :),165one(eltype(du)), one(eltype(du)))166end167end168169return nothing170end171172# Note! The local side numbering for the unstructured quadrilateral element implementation differs173# from the structured TreeMesh or StructuredMesh local side numbering:174#175# TreeMesh/StructuredMesh sides versus UnstructuredMesh sides176# 4 3177# ----------------- -----------------178# | | | |179# | ^ eta | | ^ eta |180# 1 | | | 2 4 | | | 2181# | | | | | |182# | ---> xi | | ---> xi |183# ----------------- -----------------184# 3 1185# Therefore, we require a different surface integral routine here despite their similar structure.186# Also, the normal directions are already outward pointing for `UnstructuredMesh2D` so all the187# surface contributions are added.188function calc_surface_integral!(backend::Nothing, du, u, mesh::UnstructuredMesh2D,189equations, surface_integral::SurfaceIntegralStrongForm,190dg::DG, cache)191inv_weight_left = inv(left_boundary_weight(dg.basis))192inv_weight_right = inv(right_boundary_weight(dg.basis))193@unpack normal_directions, surface_flux_values = cache.elements194195@threaded for element in eachelement(dg, cache)196for l in eachnode(dg)197# surface at -x198u_node = get_node_vars(u, equations, dg, 1, l, element)199# compute internal flux in normal direction on side 4200outward_direction = get_surface_normal(normal_directions, l, 4, element)201f_node = flux(u_node, outward_direction, equations)202f_num = get_node_vars(surface_flux_values, equations, dg, l, 4, element)203multiply_add_to_node_vars!(du, inv_weight_left, (f_num - f_node),204equations, dg, 1, l, element)205206# surface at +x207u_node = get_node_vars(u, equations, dg, nnodes(dg), l, element)208# compute internal flux in normal direction on side 2209outward_direction = get_surface_normal(normal_directions, l, 2, element)210f_node = flux(u_node, outward_direction, equations)211f_num = get_node_vars(surface_flux_values, equations, dg, l, 2, element)212multiply_add_to_node_vars!(du, inv_weight_right, (f_num - f_node),213equations, dg, nnodes(dg), l, element)214215# surface at -y216u_node = get_node_vars(u, equations, dg, l, 1, element)217# compute internal flux in normal direction on side 1218outward_direction = get_surface_normal(normal_directions, l, 1, element)219f_node = flux(u_node, outward_direction, equations)220f_num = get_node_vars(surface_flux_values, equations, dg, l, 1, element)221multiply_add_to_node_vars!(du, inv_weight_left, (f_num - f_node),222equations, dg, l, 1, element)223224# surface at +y225u_node = get_node_vars(u, equations, dg, l, nnodes(dg), element)226# compute internal flux in normal direction on side 3227outward_direction = get_surface_normal(normal_directions, l, 3, element)228f_node = flux(u_node, outward_direction, equations)229f_num = get_node_vars(surface_flux_values, equations, dg, l, 3, element)230multiply_add_to_node_vars!(du, inv_weight_right, (f_num - f_node),231equations, dg, l, nnodes(dg), element)232end233end234235return nothing236end237238# AnalysisCallback239function integrate_via_indices(func::Func, u,240mesh::UnstructuredMesh2D, equations,241dg::FDSBP, cache, args...; normalize = true) where {Func}242# TODO: FD. This is rather inefficient right now and allocates...243weights = diag(SummationByPartsOperators.mass_matrix(dg.basis))244245# Initialize integral with zeros of the right shape246integral = zero(func(u, 1, 1, 1, equations, dg, args...))247total_volume = zero(real(mesh))248249# Use quadrature to numerically integrate over entire domain250@batch reduction=((+, integral), (+, total_volume)) for element in eachelement(dg,251cache)252for j in eachnode(dg), i in eachnode(dg)253volume_jacobian = abs(inv(cache.elements.inverse_jacobian[i, j, element]))254integral += volume_jacobian * weights[i] * weights[j] *255func(u, i, j, element, equations, dg, args...)256total_volume += volume_jacobian * weights[i] * weights[j]257end258end259260# Normalize with total volume261if normalize262integral = integral / total_volume263end264265return integral266end267268function calc_error_norms(func, u, t, analyzer,269mesh::UnstructuredMesh2D, equations, initial_condition,270dg::FDSBP, cache, cache_analysis)271# TODO: FD. This is rather inefficient right now and allocates...272weights = diag(SummationByPartsOperators.mass_matrix(dg.basis))273@unpack node_coordinates, inverse_jacobian = cache.elements274275# Set up data structures276l2_error = zero(func(get_node_vars(u, equations, dg, 1, 1, 1), equations))277linf_error = copy(l2_error)278total_volume = zero(real(mesh))279280# Iterate over all elements for error calculations281for element in eachelement(dg, cache)282for j in eachnode(analyzer), i in eachnode(analyzer)283volume_jacobian = abs(inv(cache.elements.inverse_jacobian[i, j, element]))284u_exact = initial_condition(get_node_coords(node_coordinates, equations, dg,285i, j, element), t, equations)286diff = func(u_exact, equations) -287func(get_node_vars(u, equations, dg, i, j, element), equations)288l2_error += diff .^ 2 * (weights[i] * weights[j] * volume_jacobian)289linf_error = @. max(linf_error, abs(diff))290total_volume += weights[i] * weights[j] * volume_jacobian291end292end293294# For L2 error, divide by total volume295l2_error = @. sqrt(l2_error / total_volume)296297return l2_error, linf_error298end299end # @muladd300301302