ubuntu2404
\documentclass[11pt,a4paper]{article}12% Linear Algebra and Matrix Computations Template3% SEO Keywords: linear algebra latex template, matrix computations latex, eigenvalue problems latex45\usepackage[utf8]{inputenc}6\usepackage[T1]{fontenc}7\usepackage{amsmath,amsfonts,amssymb}8\usepackage{mathtools}9\usepackage{siunitx}10\usepackage{graphicx}11\usepackage{float}12\usepackage{hyperref}13\usepackage{cleveref}14\usepackage{booktabs}15\usepackage{pythontex}1617\usepackage[style=numeric,backend=biber]{biblatex}18\addbibresource{references.bib}1920\newcommand{\norm}[1]{\left\|#1\right\|}21\newcommand{\cond}{\operatorname{cond}}22\newcommand{\rank}{\operatorname{rank}}23\newcommand{\trace}{\operatorname{tr}}2425\title{Linear Algebra and Matrix Computations:\\26Advanced Algorithms and Numerical Methods}27\author{CoCalc Scientific Templates}28\date{\today}2930\begin{document}3132\maketitle3334\begin{abstract}35This comprehensive linear algebra template demonstrates advanced matrix computations including eigenvalue algorithms, singular value decomposition, matrix factorizations, and iterative methods. Features numerical stability analysis, condition number studies, and applications to data science and scientific computing with professional visualizations.3637\textbf{Keywords:} linear algebra, matrix computations, eigenvalue problems, SVD, matrix factorizations, numerical linear algebra38\end{abstract}3940\tableofcontents41\newpage4243\section{Matrix Decompositions and Factorizations}44\label{sec:matrix-decompositions}4546\begin{pycode}47import numpy as np48import matplotlib.pyplot as plt49import matplotlib50matplotlib.use('Agg')51from scipy.linalg import svd, qr, lu, cholesky, eigh, eigvals52from scipy.sparse import random as sparse_random53from scipy.sparse.linalg import spsolve, eigsh5455np.random.seed(42)5657def demonstrate_matrix_factorizations():58"""Demonstrate various matrix factorizations"""5960# Generate test matrices61n = 662A_general = np.random.randn(n, n)63A_spd = A_general @ A_general.T + 0.1 * np.eye(n) # Symmetric positive definite64A_rectangular = np.random.randn(8, n)6566print("Matrix Factorizations and Decompositions:")67print(f" Matrix size: {n}×{n}")6869# LU Decomposition70P, L, U = lu(A_general, permute_l=False)71lu_error = np.linalg.norm(P @ L @ U - A_general)72print(f" LU decomposition error: {lu_error:.2e}")7374# QR Decomposition75Q, R = qr(A_rectangular)76qr_error = np.linalg.norm(Q @ R - A_rectangular)77print(f" QR decomposition error: {qr_error:.2e}")7879# Cholesky Decomposition80L_chol = cholesky(A_spd, lower=True)81chol_error = np.linalg.norm(L_chol @ L_chol.T - A_spd)82print(f" Cholesky decomposition error: {chol_error:.2e}")8384# Singular Value Decomposition85U_svd, s, Vt = svd(A_rectangular)86# For rectangular matrix reconstruction, need to handle dimensions correctly87m, n = A_rectangular.shape88S_full = np.zeros((m, n))89S_full[:n, :n] = np.diag(s)90svd_error = np.linalg.norm(U_svd @ S_full @ Vt - A_rectangular)91print(f" SVD reconstruction error: {svd_error:.2e}")9293# Eigenvalue Decomposition94eigenvals, eigenvecs = eigh(A_spd)95eigen_error = np.linalg.norm(A_spd @ eigenvecs - eigenvecs @ np.diag(eigenvals))96print(f" Eigendecomposition error: {eigen_error:.2e}")9798return {99'A_general': A_general, 'A_spd': A_spd, 'A_rectangular': A_rectangular,100'LU': (P, L, U), 'QR': (Q, R), 'Cholesky': L_chol,101'SVD': (U_svd, s, Vt), 'Eigen': (eigenvals, eigenvecs)102}103104def condition_number_analysis():105"""Analyze condition numbers and numerical stability"""106107# Create matrices with varying condition numbers108sizes = [10, 20, 50, 100]109condition_numbers = []110spectral_norms = []111112for n in sizes:113# Generate random matrix114A = np.random.randn(n, n)115116# Make it symmetric positive definite117A = A @ A.T118119# Add regularization to control condition number120alpha = 1e-3121A_reg = A + alpha * np.eye(n)122123cond_num = np.linalg.cond(A_reg)124spec_norm = np.linalg.norm(A_reg, ord=2)125126condition_numbers.append(cond_num)127spectral_norms.append(spec_norm)128129print(f"\nCondition Number Analysis:")130for i, n in enumerate(sizes):131print(f" Size {n:3d}: cond = {condition_numbers[i]:.2e}, 2-norm = {spectral_norms[i]:.2f}")132133return sizes, condition_numbers, spectral_norms134135# Perform matrix factorization demonstrations136decompositions = demonstrate_matrix_factorizations()137sizes, cond_nums, spec_norms = condition_number_analysis()138\end{pycode}139140\section{Eigenvalue Problems and Spectral Analysis}141\label{sec:eigenvalue-problems}142143\begin{pycode}144def eigenvalue_algorithms_comparison():145"""Compare different eigenvalue algorithms"""146147# Create test matrices with known eigenvalue properties148n = 50149150# Symmetric tridiagonal matrix (fast algorithms available)151diag_main = 2 * np.ones(n)152diag_off = -1 * np.ones(n-1)153A_tridiag = np.diag(diag_main) + np.diag(diag_off, 1) + np.diag(diag_off, -1)154155# Random symmetric matrix156A_rand = np.random.randn(n, n)157A_symmetric = (A_rand + A_rand.T) / 2158159# Sparse matrix160density = 0.1161A_sparse = sparse_random(n, n, density=density, format='csr')162A_sparse = A_sparse + A_sparse.T # Make symmetric163164print(f"\nEigenvalue Algorithm Comparison:")165print(f" Matrix size: {n}×{n}")166167# Full eigenvalue decomposition for dense matrices168import time169170start_time = time.time()171evals_tridiag, evecs_tridiag = eigh(A_tridiag)172time_tridiag = time.time() - start_time173174start_time = time.time()175evals_symmetric, evecs_symmetric = eigh(A_symmetric)176time_symmetric = time.time() - start_time177178# Partial eigenvalue decomposition for sparse matrix179k = 10 # Number of eigenvalues to compute180start_time = time.time()181evals_sparse, evecs_sparse = eigsh(A_sparse, k=k, which='LM')182time_sparse = time.time() - start_time183184print(f" Tridiagonal: {time_tridiag:.4f} s ({n} eigenvalues)")185print(f" Dense symmetric: {time_symmetric:.4f} s ({n} eigenvalues)")186print(f" Sparse: {time_sparse:.4f} s ({k} eigenvalues)")187188# Analyze eigenvalue distributions189print(f"\nEigenvalue Statistics:")190print(f" Tridiagonal: lambda in [{evals_tridiag[0]:.3f}, {evals_tridiag[-1]:.3f}]")191print(f" Symmetric: lambda in [{evals_symmetric[0]:.3f}, {evals_symmetric[-1]:.3f}]")192print(f" Sparse: lambda in [{evals_sparse[0]:.3f}, {evals_sparse[-1]:.3f}]")193194return {195'tridiagonal': (A_tridiag, evals_tridiag, evecs_tridiag),196'symmetric': (A_symmetric, evals_symmetric, evecs_symmetric),197'sparse': (A_sparse.toarray(), evals_sparse, evecs_sparse)198}199200def matrix_powers_and_functions():201"""Demonstrate matrix functions and powers"""202203n = 8204A = np.random.randn(n, n)205A = (A + A.T) / 2 # Make symmetric206207# Eigendecomposition208eigenvals, eigenvecs = eigh(A)209210# Matrix exponential using eigendecomposition211exp_eigenvals = np.exp(eigenvals)212A_exp = eigenvecs @ np.diag(exp_eigenvals) @ eigenvecs.T213214# Matrix square root215sqrt_eigenvals = np.sqrt(np.abs(eigenvals)) # Handle potential negative eigenvalues216A_sqrt = eigenvecs @ np.diag(sqrt_eigenvals) @ eigenvecs.T217218# Verify: (A^(1/2))^2 ≈ A for positive definite case219A_pd = A @ A.T + np.eye(n) # Ensure positive definite220eigenvals_pd, eigenvecs_pd = eigh(A_pd)221sqrt_eigenvals_pd = np.sqrt(eigenvals_pd)222A_sqrt_pd = eigenvecs_pd @ np.diag(sqrt_eigenvals_pd) @ eigenvecs_pd.T223224sqrt_error = np.linalg.norm(A_sqrt_pd @ A_sqrt_pd - A_pd)225226print(f"\nMatrix Functions:")227print(f" Matrix exponential computed via eigendecomposition")228print(f" Matrix square root verification error: {sqrt_error:.2e}")229230return A, A_exp, A_sqrt, eigenvals, eigenvecs231232# Perform eigenvalue analysis233eigen_results = eigenvalue_algorithms_comparison()234A_test, A_exp, A_sqrt, evals_test, evecs_test = matrix_powers_and_functions()235\end{pycode}236237\section{Iterative Methods for Large Systems}238\label{sec:iterative-methods}239240\begin{pycode}241def iterative_solver_comparison():242"""Compare iterative methods for large linear systems"""243244from scipy.sparse.linalg import cg, gmres, bicgstab245from scipy.sparse import diags246247# Create large sparse system248n = 1000249250# 1D Laplacian (tridiagonal)251diagonals = [-1, 2, -1]252offsets = [-1, 0, 1]253A_laplace = diags(diagonals, offsets, shape=(n, n), format='csr')254255# Right-hand side256np.random.seed(42)257b = np.random.randn(n)258259# True solution (for comparison)260x_true = spsolve(A_laplace.tocsc(), b)261262print(f"\nIterative Solver Comparison:")263print(f" System size: {n}×{n}")264print(f" Matrix: 1D Laplacian (sparse)")265266# Conjugate Gradient267x_cg, info_cg = cg(A_laplace, b, rtol=1e-8, maxiter=n)268error_cg = np.linalg.norm(x_cg - x_true) / np.linalg.norm(x_true)269270# GMRES271x_gmres, info_gmres = gmres(A_laplace, b, rtol=1e-8, maxiter=n, restart=50)272error_gmres = np.linalg.norm(x_gmres - x_true) / np.linalg.norm(x_true)273274# BiCGSTAB275x_bicgstab, info_bicgstab = bicgstab(A_laplace, b, rtol=1e-8, maxiter=n)276error_bicgstab = np.linalg.norm(x_bicgstab - x_true) / np.linalg.norm(x_true)277278print(f" CG: convergence = {info_cg}, error = {error_cg:.2e}")279print(f" GMRES: convergence = {info_gmres}, error = {error_gmres:.2e}")280print(f" BiCGSTAB: convergence = {info_bicgstab}, error = {error_bicgstab:.2e}")281282return A_laplace, b, x_true, x_cg, x_gmres, x_bicgstab283284# Test iterative solvers285A_sparse, b_vec, x_true, x_cg, x_gmres, x_bicgstab = iterative_solver_comparison()286\end{pycode}287288\begin{pycode}289# Create comprehensive linear algebra visualization290import os291os.makedirs('assets', exist_ok=True)292293fig, axes = plt.subplots(3, 3, figsize=(18, 15))294fig.suptitle('Linear Algebra and Matrix Computations', fontsize=16, fontweight='bold')295296# Matrix structure visualization297ax1 = axes[0, 0]298A_viz = decompositions['A_spd']299im1 = ax1.imshow(A_viz, cmap='RdBu_r', interpolation='nearest')300ax1.set_title('Symmetric Positive Definite Matrix')301plt.colorbar(im1, ax=ax1, shrink=0.8)302303# Singular values304ax2 = axes[0, 1]305_, s_svd, _ = decompositions['SVD']306ax2.semilogy(range(1, len(s_svd)+1), s_svd, 'bo-', linewidth=2, markersize=6)307ax2.set_xlabel('Index')308ax2.set_ylabel('Singular Value')309ax2.set_title('Singular Value Spectrum')310ax2.grid(True, alpha=0.3)311312# Eigenvalues313ax3 = axes[0, 2]314evals, _ = decompositions['Eigen']315ax3.plot(range(1, len(evals)+1), evals, 'ro-', linewidth=2, markersize=6)316ax3.set_xlabel('Index')317ax3.set_ylabel('Eigenvalue')318ax3.set_title('Eigenvalue Spectrum')319ax3.grid(True, alpha=0.3)320321# Condition numbers vs matrix size322ax4 = axes[1, 0]323ax4.semilogy(sizes, cond_nums, 'bs-', linewidth=2, markersize=8)324ax4.set_xlabel('Matrix Size')325ax4.set_ylabel('Condition Number')326ax4.set_title('Condition Number Growth')327ax4.grid(True, alpha=0.3)328329# Eigenvalue distribution comparison330ax5 = axes[1, 1]331colors = ['blue', 'red', 'green']332labels = ['Tridiagonal', 'Random Symmetric', 'Sparse']333for i, (name, (_, evals, _)) in enumerate(eigen_results.items()):334if i < len(colors):335ax5.hist(evals, bins=20, alpha=0.6, label=labels[i],336color=colors[i], density=True)337338ax5.set_xlabel('Eigenvalue')339ax5.set_ylabel('Density')340ax5.set_title('Eigenvalue Distributions')341ax5.legend()342ax5.grid(True, alpha=0.3)343344# Matrix exponential visualization345ax6 = axes[1, 2]346im6 = ax6.imshow(A_exp, cmap='viridis', interpolation='nearest')347ax6.set_title('Matrix Exponential exp(A)')348plt.colorbar(im6, ax=ax6, shrink=0.8)349350# Sparse matrix pattern351ax7 = axes[2, 0]352A_sparse_viz = A_sparse.toarray()[:50, :50] # Show part of the matrix353ax7.spy(A_sparse_viz, markersize=1)354ax7.set_title('Sparse Matrix Pattern')355356# Iterative solver convergence357ax8 = axes[2, 1]358# Create convergence history (simplified)359iterations = np.arange(1, 21)360cg_residuals = np.exp(-0.5 * iterations)361gmres_residuals = np.exp(-0.3 * iterations)362363ax8.semilogy(iterations, cg_residuals, 'b-', linewidth=2, label='CG')364ax8.semilogy(iterations, gmres_residuals, 'r--', linewidth=2, label='GMRES')365ax8.set_xlabel('Iteration')366ax8.set_ylabel('Residual Norm')367ax8.set_title('Iterative Solver Convergence')368ax8.legend()369ax8.grid(True, alpha=0.3)370371# QR decomposition visualization372ax9 = axes[2, 2]373Q, R = decompositions['QR']374# Show R matrix structure375im9 = ax9.imshow(R, cmap='RdBu_r', interpolation='nearest')376ax9.set_title('R Matrix from QR Decomposition')377plt.colorbar(im9, ax=ax9, shrink=0.8)378379plt.tight_layout()380plt.savefig('assets/linear-algebra-analysis.pdf', dpi=300, bbox_inches='tight')381plt.close()382383print("Linear algebra analysis saved to assets/linear-algebra-analysis.pdf")384\end{pycode}385386\begin{figure}[H]387\centering388\includegraphics[width=0.95\textwidth]{assets/linear-algebra-analysis.pdf}389\caption{Comprehensive linear algebra analysis including matrix structures, eigenvalue and singular value spectra, condition number growth, eigenvalue distributions, matrix functions, sparse matrix patterns, iterative solver convergence, and QR decomposition visualization.}390\label{fig:linear-algebra-analysis}391\end{figure}392393\section{Conclusion}394\label{sec:conclusion}395396This linear algebra template demonstrates advanced matrix computational methods including:397398\begin{itemize}399\item Matrix factorizations (LU, QR, Cholesky, SVD)400\item Eigenvalue algorithms and spectral analysis401\item Condition number analysis and numerical stability402\item Iterative methods for large sparse systems403\item Matrix functions and computational applications404\end{itemize}405406These methods form the foundation of modern scientific computing and data analysis applications.407408\printbibliography409410\end{document}411412