ubuntu2404
=>PYTHONTEX#py#default#default#0#code#####136# import numpy as np import matplotlib.pyplot as plt # Set random seed for reproducibility np.random.seed(42) def generate_test_matrix(n, condition_number): """Generate a symmetric positive definite test matrix with a given condition number.""" lambda_min = 1.0 lambda_max = condition_number * lambda_min eigenvals = np.logspace(np.log10(lambda_min), np.log10(lambda_max), n) Q, _ = np.linalg.qr(np.random.randn(n, n)) A = Q @ np.diag(eigenvals) @ Q.T return A, eigenvals def conjugate_gradient(A, b, x0, tol=1e-8, max_iter=None): """Classical conjugate gradient method.""" n = len(b) if max_iter is None: max_iter = n x = x0.copy() r = b - A @ x p = r.copy() residuals = [np.linalg.norm(r)] for k in range(max_iter): Ap = A @ p alpha = (r.T @ r) / (p.T @ Ap) x = x + alpha * p r_new = r - alpha * Ap residual_norm = np.linalg.norm(r_new) residuals.append(residual_norm) if residual_norm < tol: break beta = (r_new.T @ r_new) / (r.T @ r) p = r_new + beta * p r = r_new return x, residuals, k+1 def accelerated_cg(A, b, x0, tol=1e-8, max_iter=None): """Accelerated CG with a simple adaptive momentum term.""" n = len(b) if max_iter is None: max_iter = n x = x0.copy() r = b - A @ x p = r.copy() r_old = r.copy() residuals = [np.linalg.norm(r)] for k in range(max_iter): Ap = A @ p alpha = (r.T @ r) / (p.T @ Ap) x = x + alpha * p r_new = r - alpha * Ap residual_norm = np.linalg.norm(r_new) residuals.append(residual_norm) if residual_norm < tol: break beta_cg = (r_new.T @ r_new) / (r.T @ r) if k > 0: accel_term = 0.1 * (r_new.T @ r_old) / (np.linalg.norm(r_old)**2 + 1e-12) beta_acc = max(0.0, beta_cg + accel_term) else: beta_acc = beta_cg p = r_new + beta_acc * p r_old = r.copy() r = r_new return x, residuals, k+1 # Problem set condition_numbers = [10, 100, 1000] matrix_size = 50 problems = [] for kappa in condition_numbers: A, eigenvals = generate_test_matrix(matrix_size, kappa) x_true = np.random.randn(matrix_size) b = A @ x_true x0 = np.zeros(matrix_size) problems.append({ 'A': A, 'b': b, 'x0': x0, 'x_true': x_true, 'kappa': kappa, 'eigenvals': eigenvals }) # Run solvers and collect results results = [] for prob in problems: A = prob['A']; b = prob['b']; x0 = prob['x0']; x_true = prob['x_true'] x_cg, res_cg, it_cg = conjugate_gradient(A, b, x0, tol=1e-8, max_iter=matrix_size) x_acc, res_acc, it_acc = accelerated_cg(A, b, x0, tol=1e-8, max_iter=matrix_size) results.append({ 'kappa': prob['kappa'], 'residuals_cg': res_cg, 'residuals_acc': res_acc, 'iters_cg': it_cg, 'iters_acc': it_acc, 'err_cg': np.linalg.norm(x_cg - x_true), 'err_acc': np.linalg.norm(x_acc - x_true), 'eigenvals': prob['eigenvals'] }) print(f"Generated {len(problems)} problems and built {len(results)} result entries.") =>PYTHONTEX#py#default#default#1#i#####243# len(problems) =>PYTHONTEX#py#default#default#2#i#####243# matrix_size =>PYTHONTEX#py#default#default#3#i#####243# matrix_size =>PYTHONTEX#py#default#default#4#i#####243# min(condition_numbers) =>PYTHONTEX#py#default#default#5#i#####243# max(condition_numbers) =>PYTHONTEX#py#default#default#6#code#####247# import numpy as np import matplotlib.pyplot as plt # Build a 2x2 summary figure and save it fig, axes = plt.subplots(2, 2, figsize=(12, 10)) colors = list(plt.cm.tab10.colors) # (0,0) Convergence curves ax = axes[0, 0] for i, result in enumerate(results): kappa = result['kappa'] ax.semilogy(result['residuals_cg'], label=f'CG κ={kappa}', color=colors[i%len(colors)], linestyle='-') ax.semilogy(result['residuals_acc'], label=f'AACG κ={kappa}', color=colors[i%len(colors)], linestyle='--') ax.set_xlabel('Iteration') ax.set_ylabel('Residual norm') ax.set_title('Convergence') ax.grid(True, which='both', ls=':') ax.legend() # (0,1) Iteration counts ax = axes[0, 1] xpos = np.arange(len(results)) width = 0.35 iters_cg = [r['iters_cg'] for r in results] iters_acc = [r['iters_acc'] for r in results] labels = [f'κ={r["kappa"]}' for r in results] ax.bar(xpos - width/2, iters_cg, width, label='CG') ax.bar(xpos + width/2, iters_acc, width, label='AACG') ax.set_xticks(xpos) ax.set_xticklabels(labels) ax.set_ylabel('Iterations') ax.set_title('Iteration counts to tolerance') ax.grid(axis='y', ls=':', alpha=0.5) ax.legend() # (1,0) Final solution errors ax = axes[1, 0] err_cg = [r['err_cg'] for r in results] err_acc = [r['err_acc'] for r in results] ax.bar(xpos - width/2, err_cg, width, label='CG') ax.bar(xpos + width/2, err_acc, width, label='AACG') ax.set_xticks(xpos) ax.set_xticklabels(labels) ax.set_ylabel('||x - x*||_2') ax.set_yscale('log') ax.set_title('Final solution error') ax.grid(axis='y', which='both', ls=':', alpha=0.5) ax.legend() # (1,1) Example eigenvalue spectra ax = axes[1, 1] for i, r in enumerate(results): ev = np.sort(r['eigenvals']) ax.semilogy(ev, marker='.', linestyle='none', label=f'κ={r["kappa"]}', color=colors[i%len(colors)]) ax.set_xlabel('Index') ax.set_ylabel('Eigenvalue') ax.set_title('Eigenvalue spectra') ax.grid(True, which='both', ls=':') ax.legend() fig.tight_layout() fig.savefig('conv_summary.pdf', bbox_inches='tight') plt.close(fig) =>PYTHONTEX#py#default#default#7#i#####320# f"{avg_improvement:.1f}" =>PYTHONTEX#py#default#default#8#code#####324# # Create comprehensive convergence plots fig, axes = plt.subplots(2, 2, figsize=(12, 10)) # Plot 1: Convergence curves for different condition numbers ax = axes[0, 0] colors = ['blue', 'red', 'green'] for i, result in enumerate(results): kappa = result['kappa'] iterations_cg = range(len(result['residuals_cg'])) iterations_acc = range(len(result['residuals_acc'])) ax.semilogy(iterations_cg, result['residuals_cg'], color=colors[i], linestyle='-', alpha=0.7, label=f'CG κ={kappa}') ax.semilogy(iterations_acc, result['residuals_acc'], color=colors[i], linestyle='--', linewidth=2, label=f'ACC κ={kappa}') ax.set_xlabel('Iteration') ax.set_ylabel('Residual Norm') ax.set_title('Convergence Comparison') ax.legend(fontsize=8) ax.grid(True, alpha=0.3) # Plot 2: Iteration count vs condition number ax = axes[0, 1] kappas = [r['kappa'] for r in results] iters_cg = [r['iter_cg'] for r in results] iters_acc = [r['iter_acc'] for r in results] ax.semilogx(kappas, iters_cg, 'bo-', label='Classical CG', linewidth=2) ax.semilogx(kappas, iters_acc, 'ro-', label='Accelerated CG', linewidth=2) ax.set_xlabel('Condition Number') ax.set_ylabel('Iterations to Convergence') ax.set_title('Iteration Count vs Condition Number') ax.legend() ax.grid(True, alpha=0.3) # Plot 3: Performance improvement ax = axes[1, 0] improvements = [r['improvement'] for r in results] ax.bar(range(len(kappas)), improvements, color='green', alpha=0.7) ax.set_xlabel('Problem Index') ax.set_ylabel('Improvement (%)') ax.set_title('Iteration Reduction by Problem') ax.set_xticks(range(len(kappas))) ax.set_xticklabels([f'κ={k}' for k in kappas]) ax.grid(True, alpha=0.3) # Plot 4: Theoretical vs observed convergence ax = axes[1, 1] for i, result in enumerate(results): kappa = result['kappa'] # Theoretical CG bound theoretical_rate = (np.sqrt(kappa) - 1) / (np.sqrt(kappa) + 1) # Empirical convergence rate (from residuals) residuals = np.array(result['residuals_cg'][1:]) # Skip initial residual if len(residuals) > 1: empirical_rate = np.mean(residuals[1:] / residuals[:-1]) else: empirical_rate = 0 ax.scatter(theoretical_rate, empirical_rate, s=100, label=f'κ={kappa}', color=colors[i]) # Add diagonal line rate_range = np.linspace(0, 1, 100) ax.plot(rate_range, rate_range, 'k--', alpha=0.5, label='Theory = Practice') ax.set_xlabel('Theoretical Convergence Rate') ax.set_ylabel('Empirical Convergence Rate') ax.set_title('Theory vs Practice') ax.legend() ax.grid(True, alpha=0.3) plt.tight_layout() plt.savefig('convergence_analysis.pdf', dpi=300, bbox_inches='tight') plt.savefig('convergence_analysis.png', dpi=200, bbox_inches='tight') print("Generated comprehensive convergence analysis plots") =>PYTHONTEX#py#default#default#9#code#####423# # Analyze spectral properties of test matrices print("Spectral Analysis of Test Matrices:") print("=" * 40) for i, prob in enumerate(problems): kappa = prob['kappa'] eigenvals = prob['eigenvals'] # Compute spectral statistics lambda_min = np.min(eigenvals) lambda_max = np.max(eigenvals) spectral_gap = np.min(np.diff(np.sort(eigenvals))) print(f"\nProblem {i+1} (kappa = {kappa}):") print(f" lambda min = {lambda_min:.2e}") print(f" lambda max = {lambda_max:.2e}") print(f" Smallest spectral gap = {spectral_gap:.2e}") print(f" CG iterations: {results[i]['iter_cg']}") print(f" ACC iterations: {results[i]['iter_acc']}") print(f" Improvement: {results[i]['improvement']:.1f}%") # Create eigenvalue distribution plot fig, axes = plt.subplots(1, 3, figsize=(15, 5)) for i, prob in enumerate(problems): ax = axes[i] eigenvals = prob['eigenvals'] kappa = prob['kappa'] ax.hist(eigenvals, bins=20, alpha=0.7, color=['blue', 'red', 'green'][i]) ax.set_xlabel('Eigenvalue') ax.set_ylabel('Frequency') ax.set_title(f'Eigenvalue Distribution (kappa = {kappa})') ax.grid(True, alpha=0.3) plt.tight_layout() plt.savefig('eigenvalue_distributions.pdf', dpi=300, bbox_inches='tight') plt.savefig('eigenvalue_distributions.png', dpi=200, bbox_inches='tight') plt.close() print("\nGenerated eigenvalue distribution analysis") =>PYTHONTEX#py#default#default#10#i#####469# f"{results[-1]['improvement']:.1f}" =>PYTHONTEX#py#default#default#11#code#####500# # Analyze computational overhead of acceleration print("Computational Overhead Analysis:") print("=" * 40) total_time_cg = sum(r['time_cg'] for r in results) total_time_acc = sum(r['time_acc'] for r in results) time_overhead = (total_time_acc - total_time_cg) / total_time_cg * 100 avg_time_per_iter_cg = total_time_cg / \ sum(r['iter_cg'] for r in results) avg_time_per_iter_acc = total_time_acc / \ sum(r['iter_acc'] for r in results) print(f"Total CG time: {total_time_cg:.4f}s") print(f"Total ACC time: {total_time_acc:.4f}s") print(f"Time overhead: {time_overhead:.1f}%") print(f"Avg time per CG iteration: " f"{avg_time_per_iter_cg*1000:.2f}ms") print(f"Avg time per ACC iteration: " f"{avg_time_per_iter_acc*1000:.2f}ms") # Overall efficiency metric iteration_reduction = avg_improvement time_increase = time_overhead net_efficiency = iteration_reduction - time_increase print(f"\nNet efficiency gain: {net_efficiency:.1f}%") print(f"(Iteration reduction: {iteration_reduction:.1f}%, " f"Time overhead: {time_overhead:.1f}%)") =>PYTHONTEX#py#default#default#12#i#####532# f"{time_overhead:.1f}" =>PYTHONTEX#py#default#default#13#i#####532# f"{net_efficiency:.1f}" =>PYTHONTEX#py#default#default#14#code#####562# # Analyze convergence detection strategies print("Convergence Detection Analysis:") print("=" * 35) for result in results: kappa = result['kappa'] residuals = np.array(result['residuals_cg']) # Different stopping criteria rel_residual = residuals / residuals[0] abs_residual = residuals # Find when different criteria would stop tol_abs = 1e-10 tol_rel = 1e-8 stop_abs = np.where(abs_residual < tol_abs)[0] stop_rel = np.where(rel_residual < tol_rel)[0] iter_abs = stop_abs[0] if len(stop_abs) > 0 else len(residuals) iter_rel = stop_rel[0] if len(stop_rel) > 0 else len(residuals) print(f"kappa = {kappa}:") print(f" Absolute tol ({tol_abs}): {iter_abs} iterations") print(f" Relative tol ({tol_rel}): {iter_rel} iterations") print(f" Actual convergence: {result['iter_cg']} iterations") =>PYTHONTEX#py#default#default#15#i#####596# f"{avg_improvement:.1f}" =>PYTHONTEX#py#default#default#16#i#####597# f"{time_overhead:.1f}" =>PYTHONTEX#py#default#default#17#i#####597# f"{net_efficiency:.1f}" =>PYTHONTEX:SETTINGS# version=0.18 outputdir=pythontex-files-main workingdir=. workingdirset=false gobble=none rerun=default hashdependencies=default makestderr=false stderrfilename=full keeptemps=none pyfuture=default pyconfuture=none pygments=true pygglobal=:GLOBAL|| fvextfile=-1 pyconbanner=none pyconfilename=stdin depythontex=false pygfamily=py|python3| pygfamily=pycon|pycon| pygfamily=sympy|python3| pygfamily=sympycon|pycon| pygfamily=pylab|python3| pygfamily=pylabcon|pycon|