Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Ok-landscape
GitHub Repository: Ok-landscape/computational-pipeline
Path: blob/main/latex-templates/templates/numerical-methods/integration.tex
51 views
unlisted
1
\documentclass[a4paper, 11pt]{report}
2
\usepackage[utf8]{inputenc}
3
\usepackage[T1]{fontenc}
4
\usepackage{amsmath, amssymb}
5
\usepackage{graphicx}
6
\usepackage{siunitx}
7
\usepackage{booktabs}
8
\usepackage{xcolor}
9
\usepackage[makestderr]{pythontex}
10
11
\definecolor{trap}{RGB}{231, 76, 60}
12
\definecolor{simp}{RGB}{46, 204, 113}
13
\definecolor{gauss}{RGB}{52, 152, 219}
14
\definecolor{romberg}{RGB}{155, 89, 182}
15
16
\title{Numerical Integration Methods:\\
17
Quadrature Algorithms and Error Analysis}
18
\author{Department of Computational Mathematics\\Technical Report NM-2024-001}
19
\date{\today}
20
21
\begin{document}
22
\maketitle
23
24
\begin{abstract}
25
This report presents a comprehensive analysis of numerical integration (quadrature) methods. We implement and compare the trapezoidal rule, Simpson's rule, Romberg integration, and Gaussian quadrature. Error analysis demonstrates convergence rates, and we verify results against analytically known integrals. All computations use PythonTeX for reproducibility.
26
\end{abstract}
27
28
\tableofcontents
29
30
\chapter{Introduction}
31
32
Numerical integration approximates definite integrals when analytical solutions are unavailable or impractical. We seek to compute:
33
\begin{equation}
34
I = \int_a^b f(x) \, dx
35
\end{equation}
36
37
\section{Newton-Cotes Formulas}
38
These methods interpolate $f(x)$ with polynomials at equally spaced nodes:
39
\begin{itemize}
40
\item Trapezoidal rule: Linear interpolation
41
\item Simpson's rule: Quadratic interpolation
42
\item Higher-order rules: Cubic, quartic, etc.
43
\end{itemize}
44
45
\chapter{Trapezoidal Rule}
46
47
\section{Formula}
48
The composite trapezoidal rule with $n$ subintervals:
49
\begin{equation}
50
T_n = h\left[\frac{f(a) + f(b)}{2} + \sum_{i=1}^{n-1} f(a + ih)\right], \quad h = \frac{b-a}{n}
51
\end{equation}
52
53
Error: $E_T = -\frac{(b-a)h^2}{12}f''(\xi) = O(h^2)$
54
55
\begin{pycode}
56
import numpy as np
57
import matplotlib.pyplot as plt
58
from scipy import integrate
59
from scipy.special import roots_legendre
60
plt.rc('text', usetex=True)
61
plt.rc('font', family='serif')
62
63
np.random.seed(42)
64
65
def trapezoidal(f, a, b, n):
66
h = (b - a) / n
67
x = np.linspace(a, b, n + 1)
68
y = f(x)
69
return h * (0.5*y[0] + np.sum(y[1:-1]) + 0.5*y[-1])
70
71
def simpson(f, a, b, n):
72
if n % 2 == 1:
73
n += 1
74
h = (b - a) / n
75
x = np.linspace(a, b, n + 1)
76
y = f(x)
77
return h/3 * (y[0] + 4*np.sum(y[1:-1:2]) + 2*np.sum(y[2:-1:2]) + y[-1])
78
79
def romberg(f, a, b, max_iter=10, tol=1e-10):
80
R = np.zeros((max_iter, max_iter))
81
h = b - a
82
R[0, 0] = 0.5 * h * (f(a) + f(b))
83
84
for i in range(1, max_iter):
85
h = h / 2
86
sum_new = sum(f(a + (2*k-1)*h) for k in range(1, 2**(i-1) + 1))
87
R[i, 0] = 0.5 * R[i-1, 0] + h * sum_new
88
89
for j in range(1, i + 1):
90
R[i, j] = R[i, j-1] + (R[i, j-1] - R[i-1, j-1]) / (4**j - 1)
91
92
if i > 0 and abs(R[i, i] - R[i-1, i-1]) < tol:
93
return R[i, i], i, R[:i+1, :i+1]
94
95
return R[max_iter-1, max_iter-1], max_iter, R
96
97
def gauss_legendre(f, a, b, n):
98
nodes, weights = roots_legendre(n)
99
# Transform from [-1, 1] to [a, b]
100
x = 0.5 * (b - a) * nodes + 0.5 * (a + b)
101
return 0.5 * (b - a) * np.sum(weights * f(x))
102
\end{pycode}
103
104
\section{Visualization}
105
106
\begin{pycode}
107
# Test function
108
def f_test(x):
109
return np.exp(-x**2)
110
111
a, b = 0, 2
112
exact = 0.5 * np.sqrt(np.pi) * (1 - 2/np.sqrt(np.pi) * integrate.quad(lambda t: np.exp(-t**2), 2, np.inf)[0])
113
exact_value = integrate.quad(f_test, a, b)[0]
114
115
fig, axes = plt.subplots(2, 2, figsize=(12, 10))
116
117
# Trapezoidal visualization
118
ax = axes[0, 0]
119
x_plot = np.linspace(a, b, 200)
120
ax.plot(x_plot, f_test(x_plot), 'b-', linewidth=2, label='$f(x) = e^{-x^2}$')
121
n_trap = 6
122
x_trap = np.linspace(a, b, n_trap + 1)
123
y_trap = f_test(x_trap)
124
for i in range(n_trap):
125
ax.fill([x_trap[i], x_trap[i], x_trap[i+1], x_trap[i+1]],
126
[0, y_trap[i], y_trap[i+1], 0], alpha=0.3, color='red')
127
ax.scatter(x_trap, y_trap, color='red', s=50, zorder=5)
128
ax.set_xlabel('$x$')
129
ax.set_ylabel('$f(x)$')
130
ax.set_title(f'Trapezoidal Rule ($n = {n_trap}$)')
131
ax.legend()
132
ax.grid(True, alpha=0.3)
133
134
# Simpson visualization
135
ax = axes[0, 1]
136
ax.plot(x_plot, f_test(x_plot), 'b-', linewidth=2, label='$f(x) = e^{-x^2}$')
137
n_simp = 4
138
h_simp = (b - a) / n_simp
139
for i in range(0, n_simp, 2):
140
x0, x1, x2 = a + i*h_simp, a + (i+1)*h_simp, a + (i+2)*h_simp
141
y0, y1, y2 = f_test(x0), f_test(x1), f_test(x2)
142
# Quadratic fit
143
coeffs = np.polyfit([x0, x1, x2], [y0, y1, y2], 2)
144
x_para = np.linspace(x0, x2, 50)
145
y_para = np.polyval(coeffs, x_para)
146
ax.fill_between(x_para, 0, y_para, alpha=0.3, color='green')
147
x_simp = np.linspace(a, b, n_simp + 1)
148
ax.scatter(x_simp, f_test(x_simp), color='green', s=50, zorder=5)
149
ax.set_xlabel('$x$')
150
ax.set_ylabel('$f(x)$')
151
ax.set_title(f"Simpson's Rule ($n = {n_simp}$)")
152
ax.legend()
153
ax.grid(True, alpha=0.3)
154
155
# Convergence comparison
156
ax = axes[1, 0]
157
n_values = np.array([4, 8, 16, 32, 64, 128, 256])
158
trap_errors = []
159
simp_errors = []
160
gauss_errors = []
161
162
for n in n_values:
163
trap_errors.append(abs(trapezoidal(f_test, a, b, n) - exact_value))
164
simp_errors.append(abs(simpson(f_test, a, b, n) - exact_value))
165
if n <= 20:
166
gauss_errors.append(abs(gauss_legendre(f_test, a, b, n) - exact_value))
167
168
ax.loglog(n_values, trap_errors, 'ro-', label='Trapezoidal $O(h^2)$')
169
ax.loglog(n_values, simp_errors, 'gs-', label="Simpson's $O(h^4)$")
170
ax.loglog(n_values[:len(gauss_errors)], gauss_errors, 'b^-', label='Gauss-Legendre')
171
ax.set_xlabel('Number of subintervals $n$')
172
ax.set_ylabel('Absolute Error')
173
ax.set_title('Convergence Comparison')
174
ax.legend()
175
ax.grid(True, alpha=0.3)
176
177
# Romberg tableau
178
ax = axes[1, 1]
179
result, iters, R = romberg(f_test, a, b)
180
# Show Romberg tableau as heatmap
181
R_display = R.copy()
182
R_display[R_display == 0] = np.nan
183
im = ax.imshow(np.abs(R_display - exact_value), cmap='viridis_r', aspect='auto')
184
ax.set_xlabel('Richardson Extrapolation Level $j$')
185
ax.set_ylabel('Refinement Level $i$')
186
ax.set_title('Romberg Tableau (Error)')
187
plt.colorbar(im, ax=ax, label='$|R_{i,j} - I|$')
188
189
plt.tight_layout()
190
plt.savefig('integration_methods.pdf', dpi=150, bbox_inches='tight')
191
plt.close()
192
\end{pycode}
193
194
\begin{figure}[htbp]
195
\centering
196
\includegraphics[width=0.95\textwidth]{integration_methods.pdf}
197
\caption{Numerical integration methods: (a) trapezoidal approximation, (b) Simpson's parabolic approximation, (c) error convergence, (d) Romberg tableau.}
198
\end{figure}
199
200
\chapter{Simpson's Rule}
201
202
\section{Formula}
203
Composite Simpson's 1/3 rule (requires even $n$):
204
\begin{equation}
205
S_n = \frac{h}{3}\left[f(a) + 4\sum_{i=1,3,5,...}^{n-1} f(a+ih) + 2\sum_{i=2,4,6,...}^{n-2} f(a+ih) + f(b)\right]
206
\end{equation}
207
208
Error: $E_S = -\frac{(b-a)h^4}{180}f^{(4)}(\xi) = O(h^4)$
209
210
\chapter{Romberg Integration}
211
212
\section{Richardson Extrapolation}
213
Romberg integration uses the trapezoidal rule with Richardson extrapolation:
214
\begin{equation}
215
R_{i,j} = R_{i,j-1} + \frac{R_{i,j-1} - R_{i-1,j-1}}{4^j - 1}
216
\end{equation}
217
218
\begin{pycode}
219
# Test multiple integrals
220
test_functions = [
221
(lambda x: np.exp(-x**2), 0, 2, "e^{-x^2}"),
222
(lambda x: np.sin(x)/x if x != 0 else 1.0, 0.001, np.pi, "\\sin(x)/x"),
223
(lambda x: 1/(1 + x**2), 0, 1, "1/(1+x^2)"),
224
]
225
226
results_table = []
227
for func, a, b, name in test_functions:
228
f_vec = np.vectorize(func)
229
exact = integrate.quad(f_vec, a, b)[0]
230
231
trap_32 = trapezoidal(f_vec, a, b, 32)
232
simp_32 = simpson(f_vec, a, b, 32)
233
gauss_8 = gauss_legendre(f_vec, a, b, 8)
234
romb_val, _, _ = romberg(f_vec, a, b)
235
236
results_table.append({
237
'name': name,
238
'exact': exact,
239
'trap_err': abs(trap_32 - exact),
240
'simp_err': abs(simp_32 - exact),
241
'gauss_err': abs(gauss_8 - exact),
242
'romb_err': abs(romb_val - exact)
243
})
244
\end{pycode}
245
246
\begin{table}[htbp]
247
\centering
248
\caption{Error comparison for different integrals}
249
\begin{tabular}{@{}lcccc@{}}
250
\toprule
251
Function & Trapezoidal & Simpson & Gauss-8 & Romberg \\
252
\midrule
253
$\py{results_table[0]['name']}$ & \py{f"{results_table[0]['trap_err']:.2e}"} & \py{f"{results_table[0]['simp_err']:.2e}"} & \py{f"{results_table[0]['gauss_err']:.2e}"} & \py{f"{results_table[0]['romb_err']:.2e}"} \\
254
$\py{results_table[1]['name']}$ & \py{f"{results_table[1]['trap_err']:.2e}"} & \py{f"{results_table[1]['simp_err']:.2e}"} & \py{f"{results_table[1]['gauss_err']:.2e}"} & \py{f"{results_table[1]['romb_err']:.2e}"} \\
255
$\py{results_table[2]['name']}$ & \py{f"{results_table[2]['trap_err']:.2e}"} & \py{f"{results_table[2]['simp_err']:.2e}"} & \py{f"{results_table[2]['gauss_err']:.2e}"} & \py{f"{results_table[2]['romb_err']:.2e}"} \\
256
\bottomrule
257
\end{tabular}
258
\end{table}
259
260
\chapter{Gaussian Quadrature}
261
262
\section{Theory}
263
Gaussian quadrature chooses nodes and weights optimally:
264
\begin{equation}
265
\int_{-1}^{1} f(x) \, dx \approx \sum_{i=1}^{n} w_i f(x_i)
266
\end{equation}
267
268
Nodes $x_i$ are roots of Legendre polynomials. An $n$-point formula is exact for polynomials of degree $\leq 2n-1$.
269
270
\begin{pycode}
271
# Gaussian quadrature analysis
272
fig, axes = plt.subplots(1, 3, figsize=(14, 4))
273
274
# Gauss nodes and weights
275
ax = axes[0]
276
for n in [2, 3, 4, 5]:
277
nodes, weights = roots_legendre(n)
278
ax.scatter(nodes, [n]*len(nodes), s=weights*500, alpha=0.7, label=f'$n = {n}$')
279
ax.set_xlabel('Node position $x_i$')
280
ax.set_ylabel('Number of points $n$')
281
ax.set_title('Gauss-Legendre Nodes')
282
ax.set_xlim(-1.1, 1.1)
283
ax.legend()
284
ax.grid(True, alpha=0.3)
285
286
# Polynomial exactness
287
ax = axes[1]
288
n_points = range(2, 11)
289
degrees_exact = []
290
for n in n_points:
291
# Test polynomial of degree 2n-1
292
deg = 2*n - 1
293
poly = lambda x, d=deg: x**d
294
exact_poly = 2 / (deg + 1) if deg % 2 == 0 else 0
295
gauss_result = gauss_legendre(poly, -1, 1, n)
296
error = abs(gauss_result - exact_poly)
297
degrees_exact.append(error < 1e-10)
298
ax.bar(n_points, [2*n-1 for n in n_points], color='blue', alpha=0.7)
299
ax.set_xlabel('Number of points $n$')
300
ax.set_ylabel('Exact polynomial degree')
301
ax.set_title('Polynomial Exactness: $2n-1$')
302
ax.grid(True, alpha=0.3)
303
304
# Error vs number of Gauss points
305
ax = axes[2]
306
n_gauss = range(2, 16)
307
gauss_errors_exp = [abs(gauss_legendre(f_test, a, b, n) - exact_value) for n in n_gauss]
308
ax.semilogy(n_gauss, gauss_errors_exp, 'b^-')
309
ax.set_xlabel('Number of Gauss points $n$')
310
ax.set_ylabel('Absolute Error')
311
ax.set_title('Gauss-Legendre Convergence')
312
ax.grid(True, alpha=0.3)
313
314
plt.tight_layout()
315
plt.savefig('gaussian_quadrature.pdf', dpi=150, bbox_inches='tight')
316
plt.close()
317
\end{pycode}
318
319
\begin{figure}[htbp]
320
\centering
321
\includegraphics[width=0.95\textwidth]{gaussian_quadrature.pdf}
322
\caption{Gaussian quadrature: node positions (left), polynomial exactness (center), convergence for $e^{-x^2}$ (right).}
323
\end{figure}
324
325
\chapter{Adaptive Quadrature}
326
327
\begin{pycode}
328
# Adaptive Simpson's rule
329
def adaptive_simpson(f, a, b, tol=1e-8, max_depth=50):
330
def simpson_recursive(a, b, fa, fm, fb, S, tol, depth):
331
c = (a + b) / 2
332
d = (a + c) / 2
333
e = (c + b) / 2
334
fd = f(d)
335
fe = f(e)
336
h = b - a
337
338
S_left = h/12 * (fa + 4*fd + fm)
339
S_right = h/12 * (fm + 4*fe + fb)
340
S_new = S_left + S_right
341
342
if depth >= max_depth or abs(S_new - S) < 15*tol:
343
return S_new + (S_new - S)/15
344
345
return (simpson_recursive(a, c, fa, fd, fm, S_left, tol/2, depth+1) +
346
simpson_recursive(c, b, fm, fe, fb, S_right, tol/2, depth+1))
347
348
c = (a + b) / 2
349
fa, fm, fb = f(a), f(c), f(b)
350
S = (b - a)/6 * (fa + 4*fm + fb)
351
return simpson_recursive(a, b, fa, fm, fb, S, tol, 0)
352
353
# Test on oscillatory function
354
def f_osc(x):
355
return np.sin(10*x) * np.exp(-x)
356
357
exact_osc = integrate.quad(f_osc, 0, 2*np.pi)[0]
358
adaptive_result = adaptive_simpson(f_osc, 0, 2*np.pi)
359
fixed_result = simpson(f_osc, 0, 2*np.pi, 100)
360
361
fig, ax = plt.subplots(figsize=(10, 4))
362
x = np.linspace(0, 2*np.pi, 500)
363
ax.plot(x, f_osc(x), 'b-', linewidth=1)
364
ax.fill_between(x, 0, f_osc(x), alpha=0.3)
365
ax.set_xlabel('$x$')
366
ax.set_ylabel('$f(x)$')
367
ax.set_title('$f(x) = \\sin(10x) e^{-x}$')
368
ax.grid(True, alpha=0.3)
369
plt.tight_layout()
370
plt.savefig('adaptive_function.pdf', dpi=150, bbox_inches='tight')
371
plt.close()
372
\end{pycode}
373
374
\begin{figure}[htbp]
375
\centering
376
\includegraphics[width=0.8\textwidth]{adaptive_function.pdf}
377
\caption{Oscillatory test function for adaptive quadrature.}
378
\end{figure}
379
380
Adaptive Simpson error: $\py{f'{abs(adaptive_result - exact_osc):.2e}'}$ \\
381
Fixed Simpson (n=100) error: $\py{f'{abs(fixed_result - exact_osc):.2e}'}$
382
383
\chapter{Summary}
384
385
\begin{pycode}
386
summary_data = [
387
('Trapezoidal', '$O(h^2)$', 'Simple, low accuracy'),
388
("Simpson's", '$O(h^4)$', 'Good accuracy, even $n$'),
389
('Romberg', '$O(h^{2k})$', 'High accuracy, extrapolation'),
390
('Gauss-Legendre', 'Exponential', 'Optimal for smooth functions'),
391
]
392
\end{pycode}
393
394
\begin{table}[htbp]
395
\centering
396
\caption{Summary of quadrature methods}
397
\begin{tabular}{@{}lll@{}}
398
\toprule
399
Method & Convergence & Notes \\
400
\midrule
401
\py{summary_data[0][0]} & \py{summary_data[0][1]} & \py{summary_data[0][2]} \\
402
\py{summary_data[1][0]} & \py{summary_data[1][1]} & \py{summary_data[1][2]} \\
403
\py{summary_data[2][0]} & \py{summary_data[2][1]} & \py{summary_data[2][2]} \\
404
\py{summary_data[3][0]} & \py{summary_data[3][1]} & \py{summary_data[3][2]} \\
405
\bottomrule
406
\end{tabular}
407
\end{table}
408
409
\chapter{Conclusions}
410
411
\begin{enumerate}
412
\item Trapezoidal rule: $O(h^2)$, simple but low accuracy
413
\item Simpson's rule: $O(h^4)$, good balance of simplicity and accuracy
414
\item Romberg: achieves high accuracy through Richardson extrapolation
415
\item Gaussian quadrature: optimal for smooth functions, exponential convergence
416
\item Adaptive methods: efficient for oscillatory or peaked integrands
417
\end{enumerate}
418
419
\end{document}
420
421