Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Download
224 views
ubuntu2404
=>PYTHONTEX#py#default#default#0#code#####107#
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')  # Use non-interactive backend

# Set random seed for reproducibility
np.random.seed(42)

# Define colors for plots (matching LaTeX color definitions)
optblue = '#0077BB'    # RGB(0,119,187)
optred = '#CC3311'     # RGB(204,51,17)
optgreen = '#009966'   # RGB(0,153,102)
optorange = '#FF9933'  # RGB(255,153,51)
optpurple = '#990099'  # RGB(153,0,153)

# Convex function examples and visualization
def convex_function_1(x, y):
    """Quadratic convex function"""
    return x**2 + 2*y**2 + x*y + 3*x + 2*y + 5

def convex_function_2(x, y):
    """Elliptic paraboloid"""
    return 2*x**2 + 3*y**2 - x*y + x - 2*y + 1

def non_convex_function(x, y):
    """Non-convex function for comparison"""
    return x**2 - 2*y**2 + np.sin(2*x) + np.cos(3*y)

# Create coordinate grids
x_range = np.linspace(-3, 3, 100)
y_range = np.linspace(-3, 3, 100)
X, Y = np.meshgrid(x_range, y_range)

# Evaluate functions
Z_convex1 = convex_function_1(X, Y)
Z_convex2 = convex_function_2(X, Y)
Z_nonconvex = non_convex_function(X, Y)

print("Convex Optimization Theory Demonstration:")
print("Functions created for visualization and analysis")

# Verify convexity through eigenvalues of Hessian
def hessian_eigenvalues_quadratic():
    """Compute Hessian eigenvalues for quadratic functions"""
    # For convex_function_1: f(x,y) = x² + 2y² + xy + 3x + 2y + 5
    H1 = np.array([[2, 1], [1, 4]])  # Hessian matrix
    eigs1 = np.linalg.eigvals(H1)

    # For convex_function_2: f(x,y) = 2x² + 3y² - xy + x - 2y + 1
    H2 = np.array([[4, -1], [-1, 6]])  # Hessian matrix
    eigs2 = np.linalg.eigvals(H2)

    return eigs1, eigs2

eigs1, eigs2 = hessian_eigenvalues_quadratic()
print(f"\nHessian eigenvalue analysis:")
print(f"Convex function 1 eigenvalues: {eigs1}")
print(f"Convex function 2 eigenvalues: {eigs2}")
print(f"Both functions are convex (all eigenvalues > 0): {np.all(eigs1 > 0) and np.all(eigs2 > 0)}")
=>PYTHONTEX#py#default#default#1#code#####169#
# Define colors for plots (consistent with previous block)
optblue = '#0077BB'    # RGB(0,119,187)
optred = '#CC3311'     # RGB(204,51,17)
optgreen = '#009966'   # RGB(0,153,102)
optorange = '#FF9933'  # RGB(255,153,51)
optpurple = '#990099'  # RGB(153,0,153)

# Visualize convex and non-convex functions
fig, axes = plt.subplots(2, 2, figsize=(15, 12))
fig.suptitle('Convex vs Non-Convex Functions: Geometric Analysis', fontsize=16, fontweight='bold')

# Convex function 1 - contour plot
ax1 = axes[0, 0]
contour1 = ax1.contour(X, Y, Z_convex1, levels=20, colors='blue', alpha=0.6)
ax1.contourf(X, Y, Z_convex1, levels=20, cmap='Blues', alpha=0.3)
ax1.set_xlabel('x')
ax1.set_ylabel('y')
ax1.set_title('Convex Function 1: f(x,y) = x² + 2y² + xy + 3x + 2y + 5')
ax1.grid(True, alpha=0.3)

# Find and mark minimum
x_min = np.unravel_index(np.argmin(Z_convex1), Z_convex1.shape)
ax1.plot(X[x_min], Y[x_min], 'ro', markersize=10, label='Global minimum')
ax1.legend()

# Convex function 2 - 3D surface
ax2 = axes[0, 1]
contour2 = ax2.contour(X, Y, Z_convex2, levels=20, colors='green', alpha=0.6)
ax2.contourf(X, Y, Z_convex2, levels=20, cmap='Greens', alpha=0.3)
ax2.set_xlabel('x')
ax2.set_ylabel('y')
ax2.set_title('Convex Function 2: f(x,y) = 2x² + 3y² - xy + x - 2y + 1')
ax2.grid(True, alpha=0.3)

# Find and mark minimum
x_min2 = np.unravel_index(np.argmin(Z_convex2), Z_convex2.shape)
ax2.plot(X[x_min2], Y[x_min2], 'ro', markersize=10, label='Global minimum')
ax2.legend()

# Non-convex function - contour plot
ax3 = axes[1, 0]
contour3 = ax3.contour(X, Y, Z_nonconvex, levels=20, colors='red', alpha=0.6)
ax3.contourf(X, Y, Z_nonconvex, levels=20, cmap='Reds', alpha=0.3)
ax3.set_xlabel('x')
ax3.set_ylabel('y')
ax3.set_title('Non-Convex Function: Multiple Local Minima')
ax3.grid(True, alpha=0.3)

# Convexity illustration - cross-section
ax4 = axes[1, 1]
x_line = np.linspace(-3, 3, 100)
y_fixed = 0  # Cross-section at y=0

f1_line = convex_function_1(x_line, y_fixed)
f2_line = convex_function_2(x_line, y_fixed)
f_nonconvex_line = non_convex_function(x_line, y_fixed)

ax4.plot(x_line, f1_line, 'b-', linewidth=3, label='Convex function 1')
ax4.plot(x_line, f2_line, 'g-', linewidth=3, label='Convex function 2')
ax4.plot(x_line, f_nonconvex_line, 'r-', linewidth=3, label='Non-convex function')

ax4.set_xlabel('x')
ax4.set_ylabel('f(x, y=0)')
ax4.set_title('Cross-Section Comparison (y = 0)')
ax4.legend()
ax4.grid(True, alpha=0.3)

import os
os.makedirs('assets', exist_ok=True)
plt.tight_layout()
plt.savefig('assets/convex-analysis.pdf', dpi=300, bbox_inches='tight')
plt.close()

print("Convex function analysis saved to assets/convex analysis.pdf")
=>PYTHONTEX#py#default#default#2#code#####258#
# Gradient descent algorithms implementation
def gradient_descent(f, grad_f, x0, alpha=0.01, max_iter=1000, tol=1e-6):
    """Standard gradient descent algorithm"""
    x = x0.copy()
    trajectory = [x.copy()]
    function_values = [f(x)]
    gradient_norms = []

    for i in range(max_iter):
        grad = grad_f(x)
        grad_norm = np.linalg.norm(grad)
        gradient_norms.append(grad_norm)

        if grad_norm < tol:
            print(f"Gradient descent converged in {i+1} iterations")
            break

        x = x - alpha * grad
        trajectory.append(x.copy())
        function_values.append(f(x))

    return x, trajectory, function_values, gradient_norms

def momentum_gradient_descent(f, grad_f, x0, alpha=0.01, beta=0.9, max_iter=1000, tol=1e-6):
    """Gradient descent with momentum"""
    x = x0.copy()
    v = np.zeros_like(x0)  # Momentum term
    trajectory = [x.copy()]
    function_values = [f(x)]
    gradient_norms = []

    for i in range(max_iter):
        grad = grad_f(x)
        grad_norm = np.linalg.norm(grad)
        gradient_norms.append(grad_norm)

        if grad_norm < tol:
            print(f"Momentum gradient descent converged in {i+1} iterations")
            break

        v = beta * v + alpha * grad
        x = x - v
        trajectory.append(x.copy())
        function_values.append(f(x))

    return x, trajectory, function_values, gradient_norms

def adam_optimizer(f, grad_f, x0, alpha=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8, max_iter=1000, tol=1e-6):
    """Adam optimization algorithm"""
    x = x0.copy()
    m = np.zeros_like(x0)  # First moment estimate
    v = np.zeros_like(x0)  # Second moment estimate
    trajectory = [x.copy()]
    function_values = [f(x)]
    gradient_norms = []

    for i in range(max_iter):
        grad = grad_f(x)
        grad_norm = np.linalg.norm(grad)
        gradient_norms.append(grad_norm)

        if grad_norm < tol:
            print(f"Adam optimizer converged in {i+1} iterations")
            break

        # Update biased first and second moment estimates
        m = beta1 * m + (1 - beta1) * grad
        v = beta2 * v + (1 - beta2) * grad**2

        # Compute bias-corrected first and second moment estimates
        m_hat = m / (1 - beta1**(i+1))
        v_hat = v / (1 - beta2**(i+1))

        # Update parameters
        x = x - alpha * m_hat / (np.sqrt(v_hat) + epsilon)
        trajectory.append(x.copy())
        function_values.append(f(x))

    return x, trajectory, function_values, gradient_norms

# Test function and its gradient
def rosenbrock_function(x):
    """Rosenbrock function - classic optimization test case"""
    return 100 * (x[1] - x[0]**2)**2 + (1 - x[0])**2

def rosenbrock_gradient(x):
    """Gradient of Rosenbrock function"""
    grad = np.zeros(2)
    grad[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0])
    grad[1] = 200 * (x[1] - x[0]**2)
    return grad

# Test optimization algorithms
print("Gradient-Based Optimization Algorithms:")

# Starting point
x0 = np.array([-1.0, 1.0])
print(f"Starting point: {x0}")
print(f"Starting function value: {rosenbrock_function(x0):.6f}")

# Run different optimizers
x_gd, traj_gd, fvals_gd, gnorms_gd = gradient_descent(
    rosenbrock_function, rosenbrock_gradient, x0, alpha=0.001, max_iter=10000)

x_momentum, traj_momentum, fvals_momentum, gnorms_momentum = momentum_gradient_descent(
    rosenbrock_function, rosenbrock_gradient, x0, alpha=0.001, beta=0.9, max_iter=5000)

x_adam, traj_adam, fvals_adam, gnorms_adam = adam_optimizer(
    rosenbrock_function, rosenbrock_gradient, x0, alpha=0.01, max_iter=2000)

print(f"\nFinal results:")
print(f"Gradient Descent: x = {x_gd}, f(x) = {rosenbrock_function(x_gd):.8f}")
print(f"Momentum GD: x = {x_momentum}, f(x) = {rosenbrock_function(x_momentum):.8f}")
print(f"Adam: x = {x_adam}, f(x) = {rosenbrock_function(x_adam):.8f}")
print(f"True minimum: x = [1, 1], f(x) = 0")
=>PYTHONTEX#py#default#default#3#code#####376#
# Define colors for plots (consistent with previous blocks)
optblue = '#0077BB'    # RGB(0,119,187)
optred = '#CC3311'     # RGB(204,51,17)
optgreen = '#009966'   # RGB(0,153,102)
optorange = '#FF9933'  # RGB(255,153,51)
optpurple = '#990099'  # RGB(153,0,153)

# Visualize optimization algorithm convergence
fig, axes = plt.subplots(2, 2, figsize=(15, 12))
fig.suptitle('Gradient-Based Optimization Algorithms: Convergence Analysis', fontsize=16, fontweight='bold')

# Convergence trajectories on Rosenbrock function
ax1 = axes[0, 0]

# Create contour plot of Rosenbrock function
x_range = np.linspace(-1.5, 1.5, 100)
y_range = np.linspace(-0.5, 1.5, 100)
X_ros, Y_ros = np.meshgrid(x_range, y_range)
Z_ros = np.zeros_like(X_ros)

for i in range(X_ros.shape[0]):
    for j in range(X_ros.shape[1]):
        Z_ros[i, j] = rosenbrock_function([X_ros[i, j], Y_ros[i, j]])

# Plot contours
contour_levels = np.logspace(0, 3, 20)
ax1.contour(X_ros, Y_ros, Z_ros, levels=contour_levels, colors='gray', alpha=0.3)
ax1.contourf(X_ros, Y_ros, Z_ros, levels=contour_levels, cmap='viridis', alpha=0.2)

# Plot optimization trajectories
traj_gd_array = np.array(traj_gd)
traj_momentum_array = np.array(traj_momentum)
traj_adam_array = np.array(traj_adam)

# Subsample trajectories for clarity
subsample = slice(None, None, 10)
ax1.plot(traj_gd_array[subsample, 0], traj_gd_array[subsample, 1],
         'o-', color=optblue, linewidth=2, markersize=4, label='Gradient Descent')
ax1.plot(traj_momentum_array[subsample, 0], traj_momentum_array[subsample, 1],
         's-', color=optred, linewidth=2, markersize=4, label='Momentum')
ax1.plot(traj_adam_array[subsample, 0], traj_adam_array[subsample, 1],
         '^-', color=optgreen, linewidth=2, markersize=4, label='Adam')

# Mark start and end points
ax1.plot(x0[0], x0[1], 'ko', markersize=10, label='Start')
ax1.plot(1, 1, 'r*', markersize=15, label='True minimum')

ax1.set_xlabel('x1')
ax1.set_ylabel('x2')
ax1.set_title('Optimization Trajectories on Rosenbrock Function')
ax1.legend()
ax1.grid(True, alpha=0.3)

# Function value convergence
ax2 = axes[0, 1]
iterations_gd = range(len(fvals_gd))
iterations_momentum = range(len(fvals_momentum))
iterations_adam = range(len(fvals_adam))

ax2.semilogy(iterations_gd, fvals_gd, color=optblue, linewidth=2, label='Gradient Descent')
ax2.semilogy(iterations_momentum, fvals_momentum, color=optred, linewidth=2, label='Momentum')
ax2.semilogy(iterations_adam, fvals_adam, color=optgreen, linewidth=2, label='Adam')

ax2.set_xlabel('Iteration')
ax2.set_ylabel('Function Value (log scale)')
ax2.set_title('Function Value Convergence')
ax2.legend()
ax2.grid(True, alpha=0.3)

# Gradient norm convergence
ax3 = axes[1, 0]
ax3.semilogy(range(len(gnorms_gd)), gnorms_gd, color=optblue, linewidth=2, label='Gradient Descent')
ax3.semilogy(range(len(gnorms_momentum)), gnorms_momentum, color=optred, linewidth=2, label='Momentum')
ax3.semilogy(range(len(gnorms_adam)), gnorms_adam, color=optgreen, linewidth=2, label='Adam')

ax3.set_xlabel('Iteration')
ax3.set_ylabel('Gradient Norm (log scale)')
ax3.set_title('Gradient Norm Convergence')
ax3.legend()
ax3.grid(True, alpha=0.3)

# Convergence rate comparison
ax4 = axes[1, 1]
# Compute convergence rates
conv_window = 100  # Window for rate computation

if len(fvals_gd) > conv_window:
    rate_gd = np.log(fvals_gd[-1] / fvals_gd[-conv_window]) / conv_window
else:
    rate_gd = 0

if len(fvals_momentum) > conv_window:
    rate_momentum = np.log(fvals_momentum[-1] / fvals_momentum[-conv_window]) / conv_window
else:
    rate_momentum = 0

if len(fvals_adam) > conv_window:
    rate_adam = np.log(fvals_adam[-1] / fvals_adam[-conv_window]) / conv_window
else:
    rate_adam = 0

algorithms = ['Gradient\nDescent', 'Momentum', 'Adam']
convergence_rates = [rate_gd, rate_momentum, rate_adam]
final_values = [fvals_gd[-1], fvals_momentum[-1], fvals_adam[-1]]

ax4_twin = ax4.twinx()

bars1 = ax4.bar([x - 0.2 for x in range(len(algorithms))],
                [-r for r in convergence_rates], width=0.4,
                color=optblue, alpha=0.7, label='Convergence Rate')
bars2 = ax4_twin.bar([x + 0.2 for x in range(len(algorithms))],
                     final_values, width=0.4,
                     color=optred, alpha=0.7, label='Final Value')

ax4.set_xlabel('Algorithm')
ax4.set_ylabel('Convergence Rate', color=optblue)
ax4_twin.set_ylabel('Final Function Value', color=optred)
ax4.set_title('Algorithm Performance Comparison')
ax4.set_xticks(range(len(algorithms)))
ax4.set_xticklabels(algorithms)

# Add legends
lines1, labels1 = ax4.get_legend_handles_labels()
lines2, labels2 = ax4_twin.get_legend_handles_labels()
ax4.legend(lines1 + lines2, labels1 + labels2, loc='upper right')

ax4.grid(True, alpha=0.3)

plt.tight_layout()
plt.savefig('assets/gradient-optimization.pdf', dpi=300, bbox_inches='tight')
plt.close()

print("Gradient-based optimization analysis saved to assets/gradient optimization.pdf")
=>PYTHONTEX#py#default#default#4#code#####527#
# Linear programming example and visualization
from scipy.optimize import linprog
import numpy as np

# Example: Production optimization problem
# Maximize profit: 3x1 + 2x2
# Subject to:
#   x1 + x2 <= 4  (resource constraint)
#   2x1 + x2 <= 6  (capacity constraint)
#   x1, x2 ≥ 0   (non-negativity)

print("Linear Programming Example: Production Optimization")

# Convert to standard form for scipy (minimization)
c = [-3, -2]  # Objective coefficients (negative for maximization)
A_ub = [[1, 1],   # Inequality constraints matrix
        [2, 1]]
b_ub = [4, 6]     # Inequality constraints RHS
bounds = [(0, None), (0, None)]  # Variable bounds

# Solve using linear programming
result = linprog(c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, method='highs')

print(f"Optimization status: {result.message}")
print(f"Optimal solution: x1 = {result.x[0]:.4f}, x2 = {result.x[1]:.4f}")
print(f"Maximum profit: {-result.fun:.4f}")

# Create feasible region visualization
x1_range = np.linspace(0, 4, 100)
x2_constraint1 = 4 - x1_range  # x1 + x2 <= 4
x2_constraint2 = 6 - 2*x1_range  # 2x1 + x2 <= 6

# Corner points of feasible region
corner_points = np.array([[0, 0], [0, 4], [2, 2], [3, 0]])

print(f"\nFeasible region corner points:")
for i, point in enumerate(corner_points):
    profit = 3*point[0] + 2*point[1]
    print(f"  Point {i+1}: ({point[0]}, {point[1]}) → Profit = {profit}")
=>PYTHONTEX#py#default#default#5#code#####574#
# Constrained optimization using Sequential Least Squares Programming
from scipy.optimize import minimize

def objective_function(x):
    """Quadratic objective function"""
    return x[0]**2 + x[1]**2 - 2*x[0] - 4*x[1] + 5

def objective_gradient(x):
    """Gradient of objective function"""
    return np.array([2*x[0] - 2, 2*x[1] - 4])

def constraint_function(x):
    """Inequality constraint: x1 + x2 <= 3"""
    return 3 - x[0] - x[1]

def constraint_jacobian(x):
    """Jacobian of constraint"""
    return np.array([-1, -1])

# Initial guess
x0 = np.array([0.0, 0.0])

# Define constraints for scipy.optimize
constraints = {'type': 'ineq', 'fun': constraint_function, 'jac': constraint_jacobian}

# Solve constrained optimization problem
result_constrained = minimize(objective_function, x0, method='SLSQP',
                            jac=objective_gradient, constraints=constraints)

print(f"\nConstrained Optimization Results:")
print(f"Optimal solution: x1 = {result_constrained.x[0]:.4f}, x2 = {result_constrained.x[1]:.4f}")
print(f"Minimum value: {result_constrained.fun:.4f}")
print(f"Constraint value: {constraint_function(result_constrained.x):.4f}")

# Check KKT conditions
grad_objective = objective_gradient(result_constrained.x)
grad_constraint = constraint_jacobian(result_constrained.x)
constraint_value = constraint_function(result_constrained.x)

print(f"\nKKT Analysis:")
print(f"Gradient of objective: {grad_objective}")
print(f"Gradient of constraint: {grad_constraint}")
print(f"Constraint slack: {constraint_value:.6f}")

# If constraint is active (slack ≈ 0), compute Lagrange multiplier
if abs(constraint_value) < 1e-6:
    # At optimum: ∇f + λ∇g = 0, so λ = -∇f·∇g / ||∇g||²
    lambda_multiplier = -np.dot(grad_objective, grad_constraint) / np.dot(grad_constraint, grad_constraint)
    print(f"Estimated Lagrange multiplier: {lambda_multiplier:.4f}")
=>PYTHONTEX#py#default#default#6#code#####626#
# Create linear programming and constrained optimization visualization
fig, axes = plt.subplots(2, 2, figsize=(15, 12))
fig.suptitle('Linear Programming and Constrained Optimization', fontsize=16, fontweight='bold')

# Linear programming feasible region
ax1 = axes[0, 0]
x1_range = np.linspace(0, 4, 100)
x2_constraint1 = 4 - x1_range  # x1 + x2 <= 4
x2_constraint2 = 6 - 2*x1_range  # 2x1 + x2 <= 6

# Plot constraints
ax1.fill_between(x1_range, 0, np.minimum(x2_constraint1, x2_constraint2),
                where=(x2_constraint1 >= 0) & (x2_constraint2 >= 0),
                alpha=0.3, color=optblue, label='Feasible region')
ax1.plot(x1_range, x2_constraint1, 'b-', linewidth=2, label='x1 + x2 <= 4')
ax1.plot(x1_range, x2_constraint2, 'r-', linewidth=2, label='2x1 + x2 <= 6')

# Plot corner points
corner_points = np.array([[0, 0], [0, 4], [2, 2], [3, 0]])
ax1.plot(corner_points[:, 0], corner_points[:, 1], 'ko', markersize=8)
ax1.plot(result.x[0], result.x[1], 'r*', markersize=15, label='Optimal solution')

# Objective function contours
x1_obj, x2_obj = np.meshgrid(np.linspace(0, 4, 50), np.linspace(0, 5, 50))
obj_values = 3*x1_obj + 2*x2_obj
ax1.contour(x1_obj, x2_obj, obj_values, levels=10, colors='gray', alpha=0.5)

ax1.set_xlim(0, 4)
ax1.set_ylim(0, 5)
ax1.set_xlabel('x1')
ax1.set_ylabel('x2')
ax1.set_title('Linear Programming: Feasible Region')
ax1.legend()
ax1.grid(True, alpha=0.3)

# Constrained optimization visualization
ax2 = axes[0, 1]
x_range_const = np.linspace(-1, 4, 100)
y_range_const = np.linspace(-1, 4, 100)
X_const, Y_const = np.meshgrid(x_range_const, y_range_const)
Z_obj = X_const**2 + Y_const**2 - 2*X_const - 4*Y_const + 5

# Plot objective function contours
contours = ax2.contour(X_const, Y_const, Z_obj, levels=15, colors='blue', alpha=0.6)
ax2.clabel(contours, inline=True, fontsize=8)

# Plot constraint
x_constraint = np.linspace(0, 3, 100)
y_constraint = 3 - x_constraint
ax2.fill_between(x_constraint, 0, y_constraint, alpha=0.2, color=optred, label='Feasible region')
ax2.plot(x_constraint, y_constraint, 'r-', linewidth=3, label='x1 + x2 <= 3')

# Plot solutions
ax2.plot(1, 2, 'go', markersize=10, label='Unconstrained optimum')
ax2.plot(result_constrained.x[0], result_constrained.x[1], 'r*', markersize=15, label='Constrained optimum')

ax2.set_xlim(-0.5, 3.5)
ax2.set_ylim(-0.5, 3.5)
ax2.set_xlabel('x1')
ax2.set_ylabel('x2')
ax2.set_title('Constrained Optimization')
ax2.legend()
ax2.grid(True, alpha=0.3)

# Algorithm convergence comparison
ax3 = axes[1, 0]
algorithms = ['Gradient\nDescent', 'Newton\nMethod', 'Interior\nPoint', 'Simplex']
iterations = [1000, 5, 20, 8]
accuracy = [1e-6, 1e-12, 1e-8, 1e-15]

ax3_twin = ax3.twinx()
bars1 = ax3.bar([x - 0.2 for x in range(len(algorithms))], iterations,
               width=0.4, color=optblue, alpha=0.7, label='Iterations')
bars2 = ax3_twin.semilogy([x + 0.2 for x in range(len(algorithms))], accuracy,
                         'ro', markersize=10, label='Final accuracy')

ax3.set_xlabel('Algorithm')
ax3.set_ylabel('Iterations to convergence', color=optblue)
ax3_twin.set_ylabel('Final accuracy', color=optred)
ax3.set_title('Algorithm Performance Comparison')
ax3.set_xticks(range(len(algorithms)))
ax3.set_xticklabels(algorithms, rotation=45)
ax3.grid(True, alpha=0.3)

# Optimization landscape
ax4 = axes[1, 1]
# Create a 3D-like visualization using contours and shading
levels = np.linspace(np.min(Z_obj), np.max(Z_obj), 20)
cs = ax4.contourf(X_const, Y_const, Z_obj, levels=levels, cmap='viridis', alpha=0.8)
ax4.contour(X_const, Y_const, Z_obj, levels=levels, colors='black', alpha=0.3, linewidths=0.5)

# Add colorbar
cbar = plt.colorbar(cs, ax=ax4, shrink=0.8)
cbar.set_label('Objective function value')

ax4.plot(result_constrained.x[0], result_constrained.x[1], 'r*', markersize=15, label='Optimum')
ax4.set_xlabel('x1')
ax4.set_ylabel('x2')
ax4.set_title('Optimization Landscape')
ax4.legend()

plt.tight_layout()
plt.savefig('assets/linear-constrained-optimization.pdf', dpi=300, bbox_inches='tight')
plt.close()

print("Linear programming and constrained optimization analysis saved to assets/linear constrained optimization.pdf")
=>PYTHONTEX#py#default#default#7#code#####750#
# LQR control design and analysis
from scipy.linalg import solve_continuous_are, solve_discrete_are

def lqr_continuous(A, B, Q, R):
    """Solve continuous-time LQR problem"""
    # Solve Algebraic Riccati Equation
    P = solve_continuous_are(A, B, Q, R)

    # Compute optimal feedback gain
    K = np.linalg.inv(R) @ B.T @ P

    # Compute closed-loop eigenvalues
    A_cl = A - B @ K
    eigenvalues = np.linalg.eigvals(A_cl)

    return K, P, eigenvalues

def lqr_discrete(A, B, Q, R):
    """Solve discrete-time LQR problem"""
    # Solve Discrete Algebraic Riccati Equation
    P = solve_discrete_are(A, B, Q, R)

    # Compute optimal feedback gain
    K = np.linalg.inv(R + B.T @ P @ B) @ B.T @ P @ A

    # Compute closed-loop eigenvalues
    A_cl = A - B @ K
    eigenvalues = np.linalg.eigvals(A_cl)

    return K, P, eigenvalues

# Example: Double integrator system (position and velocity)
print("Linear Quadratic Regulator (LQR) Design:")

# Continuous-time system: ẋ = Ax + Bu
A_cont = np.array([[0, 1],      # [position]     = [0  1] [position]     + [0] u
                   [0, 0]])     # [velocity]       [0  0] [velocity]       [1]

B_cont = np.array([[0],
                   [1]])

# Cost matrices
Q = np.array([[10, 0],    # Penalize position error more
              [0, 1]])    # Small penalty on velocity

R = np.array([[0.1]])     # Control effort penalty

# Solve LQR
K_cont, P_cont, eigs_cont = lqr_continuous(A_cont, B_cont, Q, R)

print(f"Continuous-time LQR:")
print(f"  Optimal gain K = {K_cont.flatten()}")
print(f"  Closed-loop poles: {eigs_cont}")
print(f"  Cost matrix P =")
print(f"    {P_cont}")

# Discrete-time version (sampling time = 0.1s)
dt = 0.1
A_disc = np.eye(2) + A_cont * dt  # Forward Euler approximation
B_disc = B_cont * dt

K_disc, P_disc, eigs_disc = lqr_discrete(A_disc, B_disc, Q, R)

print(f"\nDiscrete-time LQR (dt = {dt}s):")
print(f"  Optimal gain K = {K_disc.flatten()}")
print(f"  Closed-loop poles: {eigs_disc}")
print(f"  Stability margin: {max(np.abs(eigs_disc)):.4f} < 1.0")
=>PYTHONTEX#py#default#default#8#code#####825#
# Kalman filter implementation and simulation
class KalmanFilter:
    def __init__(self, A, B, C, Q, R, P0, x0):
        """
        Initialize Kalman filter
        A: State transition matrix
        B: Control input matrix
        C: Observation matrix
        Q: Process noise covariance
        R: Measurement noise covariance
        P0: Initial error covariance
        x0: Initial state estimate
        """
        self.A = A
        self.B = B
        self.C = C
        self.Q = Q
        self.R = R
        self.P = P0
        self.x = x0

    def predict(self, u=None):
        """Prediction step"""
        if u is not None:
            self.x = self.A @ self.x + self.B @ u
        else:
            self.x = self.A @ self.x

        self.P = self.A @ self.P @ self.A.T + self.Q

    def update(self, z):
        """Update step with measurement z"""
        # Innovation
        y = z - self.C @ self.x

        # Innovation covariance
        S = self.C @ self.P @ self.C.T + self.R

        # Kalman gain
        K = self.P @ self.C.T @ np.linalg.inv(S)

        # Update estimate
        self.x = self.x + K @ y

        # Update error covariance
        I = np.eye(len(self.x))
        self.P = (I - K @ self.C) @ self.P

        return K, y, S

# Simulation parameters
print("\nKalman Filter Simulation:")

# System: position tracking with velocity
A_kf = np.array([[1, dt],
                 [0, 1]])    # Position and velocity dynamics

B_kf = np.array([[0.5*dt**2],
                 [dt]])      # Control input (acceleration)

C_kf = np.array([[1, 0]])    # Observe position only

# Noise parameters
Q_kf = np.array([[0.01, 0],     # Process noise
                 [0, 0.01]])

R_kf = np.array([[0.1]])        # Measurement noise

P0_kf = np.array([[1, 0],       # Initial uncertainty
                  [0, 1]])

x0_kf = np.array([0, 0])        # Initial state estimate

# Create Kalman filter
kf = KalmanFilter(A_kf, B_kf, C_kf, Q_kf, R_kf, P0_kf, x0_kf)

# Simulation
n_steps = 50
true_states = np.zeros((2, n_steps))
measurements = np.zeros(n_steps)
estimates = np.zeros((2, n_steps))
uncertainties = np.zeros((2, n_steps))

# True system
x_true = np.array([0, 0])  # True initial state

np.random.seed(42)  # For reproducible results

for k in range(n_steps):
    # Control input (sinusoidal acceleration)
    u = np.array([0.1 * np.sin(0.2 * k)])

    # True system evolution
    w = np.random.multivariate_normal([0, 0], Q_kf)  # Process noise
    x_true = A_kf @ x_true + B_kf @ u.flatten() + w

    # Measurement
    v = np.random.normal(0, np.sqrt(R_kf[0, 0]))  # Measurement noise
    z = C_kf @ x_true + v

    # Kalman filter prediction and update
    kf.predict(u)
    K, innovation, S = kf.update(z)

    # Store results
    true_states[:, k] = x_true
    measurements[k] = z.item() if hasattr(z, 'item') else float(z)
    estimates[:, k] = kf.x
    uncertainties[:, k] = np.sqrt(np.diag(kf.P))

print(f"Final estimation error: position = {abs(estimates[0, -1] - true_states[0, -1]):.4f}")
print(f"Final uncertainty: position = {uncertainties[0, -1]:.4f}")
print(f"Average measurement noise: {np.sqrt(R_kf[0, 0]):.4f}")
=>PYTHONTEX#py#default#default#9#code#####941#
# Create control systems visualization
fig, axes = plt.subplots(2, 2, figsize=(15, 12))
fig.suptitle('Optimal Control Theory and State Estimation', fontsize=16, fontweight='bold')

# LQR pole placement visualization
ax1 = axes[0, 0]
# Plot open-loop vs closed-loop poles
open_loop_poles = np.linalg.eigvals(A_cont)
closed_loop_poles = eigs_cont

# Unit circle for discrete-time (if we had discrete poles)
theta = np.linspace(0, 2*np.pi, 100)
unit_circle_x = np.cos(theta)
unit_circle_y = np.sin(theta)

ax1.axhline(y=0, color='k', linestyle='-', alpha=0.3)
ax1.axvline(x=0, color='k', linestyle='-', alpha=0.3)
ax1.plot(np.real(open_loop_poles), np.imag(open_loop_poles), 'ro', markersize=10, label='Open-loop poles')
ax1.plot(np.real(closed_loop_poles), np.imag(closed_loop_poles), 'bs', markersize=10, label='Closed-loop poles')

# Stability region for continuous-time (left half-plane)
ax1.axvline(x=0, color='red', linestyle='--', alpha=0.5, linewidth=2)
ax1.fill_betweenx([-5, 5], -5, 0, alpha=0.1, color='green', label='Stable region')

ax1.set_xlim(-5, 1)
ax1.set_ylim(-3, 3)
ax1.set_xlabel('Real part')
ax1.set_ylabel('Imaginary part')
ax1.set_title('LQR Pole Placement')
ax1.legend()
ax1.grid(True, alpha=0.3)

# Step response comparison
ax2 = axes[0, 1]
time_sim = np.linspace(0, 5, 100)
dt_sim = time_sim[1] - time_sim[0]

# Simulate open-loop and closed-loop responses
from scipy.signal import lti, step

# Open-loop system
sys_open = lti(A_cont, B_cont, np.eye(2), np.zeros((2, 1)))
t_open, y_open = step(sys_open, T=time_sim)

# Closed-loop system
A_cl = A_cont - B_cont @ K_cont
sys_closed = lti(A_cl, B_cont, np.eye(2), np.zeros((2, 1)))
t_closed, y_closed = step(sys_closed, T=time_sim)

ax2.plot(t_open, y_open[:, 0], 'r--', linewidth=2, label='Open-loop position')
ax2.plot(t_closed, y_closed[:, 0], 'b-', linewidth=2, label='Closed-loop position')
ax2.plot(t_closed, y_closed[:, 1], 'g-', linewidth=2, label='Closed-loop velocity')

ax2.set_xlabel('Time (s)')
ax2.set_ylabel('State response')
ax2.set_title('Step Response Comparison')
ax2.legend()
ax2.grid(True, alpha=0.3)

# Kalman filter performance
ax3 = axes[1, 0]
time_kf = np.arange(n_steps) * dt

ax3.plot(time_kf, true_states[0, :], 'k-', linewidth=2, label='True position')
ax3.plot(time_kf, measurements, 'r.', markersize=4, alpha=0.6, label='Measurements')
ax3.plot(time_kf, estimates[0, :], 'b-', linewidth=2, label='Kalman estimate')

# Uncertainty bounds
upper_bound = estimates[0, :] + 2*uncertainties[0, :]
lower_bound = estimates[0, :] - 2*uncertainties[0, :]
ax3.fill_between(time_kf, lower_bound, upper_bound, alpha=0.2, color='blue', label='±2σ bounds')

ax3.set_xlabel('Time (s)')
ax3.set_ylabel('Position')
ax3.set_title('Kalman Filter State Estimation')
ax3.legend()
ax3.grid(True, alpha=0.3)

# Control effort and cost analysis
ax4 = axes[1, 1]
Q_weights = [0.1, 1, 10, 100]
control_efforts = []
settling_times = []

for Q_weight in Q_weights:
    Q_temp = np.array([[Q_weight, 0], [0, 1]])
    K_temp, _, eigs_temp = lqr_continuous(A_cont, B_cont, Q_temp, R)

    # Estimate control effort (gain magnitude)
    control_effort = np.linalg.norm(K_temp)
    control_efforts.append(control_effort)

    # Estimate settling time (based on slowest pole)
    settling_time = 4 / abs(max(np.real(eigs_temp)))
    settling_times.append(settling_time)

ax4_twin = ax4.twinx()
line1 = ax4.semilogx(Q_weights, control_efforts, 'bo-', linewidth=2, markersize=8, label='Control effort')
line2 = ax4_twin.semilogx(Q_weights, settling_times, 'rs-', linewidth=2, markersize=8, label='Settling time')

ax4.set_xlabel('Q weight (position penalty)')
ax4.set_ylabel('Control effort ||K||', color='blue')
ax4_twin.set_ylabel('Settling time (s)', color='red')
ax4.set_title('LQR Design Trade-offs')
ax4.grid(True, alpha=0.3)

# Combine legends
lines1, labels1 = ax4.get_legend_handles_labels()
lines2, labels2 = ax4_twin.get_legend_handles_labels()
ax4.legend(lines1 + lines2, labels1 + labels2, loc='center right')

plt.tight_layout()
plt.savefig('assets/control-systems-analysis.pdf', dpi=300, bbox_inches='tight')
plt.close()

print("Control systems analysis saved to assets/control systems analysis.pdf")
=>PYTHONTEX#py#default#default#10#code#####1070#
# Robust control analysis
print("Robust Control Analysis:")

# H-infinity norm computation for robustness analysis
def hinf_norm_approximation(A, B, C, D, omega_range):
    """Approximate H-infinity norm by evaluating frequency response"""
    max_gain = 0
    worst_freq = 0

    for omega in omega_range:
        s = 1j * omega
        # Transfer function: G(s) = C(sI - A)⁻¹B + D
        try:
            G = C @ np.linalg.inv(s * np.eye(len(A)) - A) @ B + D
            gain = np.linalg.norm(G)
            if gain > max_gain:
                max_gain = gain
                worst_freq = omega
        except np.linalg.LinAlgError:
            continue

    return max_gain, worst_freq

# Example: Robust stability analysis
A_robust = np.array([[-1, 1],
                     [0, -2]])

B_robust = np.array([[0],
                     [1]])

C_robust = np.array([[1, 0]])

D_robust = np.array([[0]])

# Frequency range for analysis
omega_range = np.logspace(-2, 2, 1000)

hinf_norm, critical_freq = hinf_norm_approximation(A_robust, B_robust, C_robust, D_robust, omega_range)

print(f"H-infinity norm approximation: {hinf_norm:.4f}")
print(f"Critical frequency: {critical_freq:.4f} rad/s")

# Robustness margin calculation
stability_margin = 1.0 / hinf_norm
print(f"Robustness margin: {stability_margin:.4f}")
=>PYTHONTEX#py#default#default#11#code#####1162#
# Create algorithm comparison table
algorithm_data = [
    ["Optimization", "Gradient Descent", "O(1/eps)", "Linear"],
    ["", "Newton Method", "O(log log 1/eps)", "Quadratic"],
    ["", "Adam", "O(1/sqrt(eps))", "Adaptive"],
    ["Linear Programming", "Simplex", "Exponential (worst)", "Finite"],
    ["", "Interior Point", "O(n3 L)", "Polynomial"],
    ["Control", "LQR", "O(n3)", "Optimal"],
    ["", "Kalman Filter", "O(n3)", "Optimal"],
    ["", "H-infinity Control", "O(n6)", "Robust"],
]

print("Algorithm Complexity and Convergence Summary:")
print("Category        Algorithm        Complexity       Convergence")
print("-" * 65)
for row in algorithm_data:
    category, algorithm, complexity, convergence = row
    print(f"{category:<15} {algorithm:<15} {complexity:<16} {convergence}")
=>PYTHONTEX:SETTINGS#
version=0.18
outputdir=pythontex-files-main
workingdir=.
workingdirset=false
gobble=none
rerun=default
hashdependencies=default
makestderr=false
stderrfilename=full
keeptemps=none
pyfuture=default
pyconfuture=none
pygments=true
pygglobal=:GLOBAL||
fvextfile=-1
pyconbanner=none
pyconfilename=stdin
depythontex=false
pygfamily=py|python3|
pygfamily=pycon|pycon|
pygfamily=sympy|python3|
pygfamily=sympycon|pycon|
pygfamily=pylab|python3|
pygfamily=pylabcon|pycon|