Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Download
224 views
ubuntu2404
1
#!/usr/bin/env python3
2
"""
3
Generate missing figures for optimization-control-systems template
4
"""
5
6
import numpy as np
7
import matplotlib.pyplot as plt
8
import matplotlib
9
matplotlib.use('Agg') # Use non-interactive backend
10
from scipy.optimize import minimize, linprog
11
from scipy.linalg import solve_continuous_are
12
import os
13
14
# Set random seed for reproducibility
15
np.random.seed(42)
16
17
# Define colors for plots (matching LaTeX color definitions)
18
optblue = '#0077BB' # RGB(0,119,187)
19
optred = '#CC3311' # RGB(204,51,17)
20
optgreen = '#009966' # RGB(0,153,102)
21
optorange = '#FF9933' # RGB(255,153,51)
22
optpurple = '#990099' # RGB(153,0,153)
23
24
# Ensure assets directory exists
25
os.makedirs('assets', exist_ok=True)
26
27
def generate_linear_constrained_optimization():
28
"""Generate linear programming and constrained optimization figure"""
29
30
# Linear programming setup
31
c = np.array([-3, -2]) # Maximize 3x1 + 2x2 (negate for minimize)
32
A_ub = np.array([[1, 1], [2, 1]]) # Constraints matrix
33
b_ub = np.array([4, 6]) # Constraints bounds
34
bounds = [(0, None), (0, None)] # x1, x2 >= 0
35
36
# Solve linear programming problem
37
result = linprog(c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, method='highs')
38
39
# Constrained optimization setup
40
def objective_function(x):
41
return x[0]**2 + x[1]**2 - 2*x[0] - 4*x[1] + 5
42
43
def constraint_function(x):
44
return x[0] + x[1] - 3
45
46
# Solve constrained optimization
47
constraints = {'type': 'ineq', 'fun': lambda x: -constraint_function(x)}
48
result_constrained = minimize(objective_function, [0.5, 0.5], constraints=constraints)
49
50
# Create visualization
51
fig, axes = plt.subplots(2, 2, figsize=(15, 12))
52
fig.suptitle('Linear Programming and Constrained Optimization', fontsize=16, fontweight='bold')
53
54
# Linear programming feasible region
55
ax1 = axes[0, 0]
56
x1_range = np.linspace(0, 4, 100)
57
x2_constraint1 = 4 - x1_range # x1 + x2 <= 4
58
x2_constraint2 = 6 - 2*x1_range # 2x1 + x2 <= 6
59
60
# Plot constraints
61
ax1.fill_between(x1_range, 0, np.minimum(x2_constraint1, x2_constraint2),
62
where=(x2_constraint1 >= 0) & (x2_constraint2 >= 0),
63
alpha=0.3, color=optblue, label='Feasible region')
64
ax1.plot(x1_range, x2_constraint1, 'b-', linewidth=2, label='x1 + x2 <= 4')
65
ax1.plot(x1_range, x2_constraint2, 'r-', linewidth=2, label='2x1 + x2 <= 6')
66
67
# Plot corner points
68
corner_points = np.array([[0, 0], [0, 4], [2, 2], [3, 0]])
69
ax1.plot(corner_points[:, 0], corner_points[:, 1], 'ko', markersize=8)
70
ax1.plot(result.x[0], result.x[1], 'r*', markersize=15, label='Optimal solution')
71
72
# Objective function contours
73
x1_obj, x2_obj = np.meshgrid(np.linspace(0, 4, 50), np.linspace(0, 5, 50))
74
obj_values = 3*x1_obj + 2*x2_obj
75
ax1.contour(x1_obj, x2_obj, obj_values, levels=10, colors='gray', alpha=0.5)
76
77
ax1.set_xlim(0, 4)
78
ax1.set_ylim(0, 5)
79
ax1.set_xlabel('x1')
80
ax1.set_ylabel('x2')
81
ax1.set_title('Linear Programming: Feasible Region')
82
ax1.legend()
83
ax1.grid(True, alpha=0.3)
84
85
# Constrained optimization visualization
86
ax2 = axes[0, 1]
87
x_range_const = np.linspace(-1, 4, 100)
88
y_range_const = np.linspace(-1, 4, 100)
89
X_const, Y_const = np.meshgrid(x_range_const, y_range_const)
90
Z_obj = X_const**2 + Y_const**2 - 2*X_const - 4*Y_const + 5
91
92
# Plot objective function contours
93
contours = ax2.contour(X_const, Y_const, Z_obj, levels=15, colors='blue', alpha=0.6)
94
ax2.clabel(contours, inline=True, fontsize=8)
95
96
# Plot constraint
97
x_constraint = np.linspace(0, 3, 100)
98
y_constraint = 3 - x_constraint
99
ax2.fill_between(x_constraint, 0, y_constraint, alpha=0.2, color=optred, label='Feasible region')
100
ax2.plot(x_constraint, y_constraint, 'r-', linewidth=3, label='x1 + x2 <= 3')
101
102
# Plot solutions
103
ax2.plot(1, 2, 'go', markersize=10, label='Unconstrained optimum')
104
ax2.plot(result_constrained.x[0], result_constrained.x[1], 'r*', markersize=15, label='Constrained optimum')
105
106
ax2.set_xlim(-0.5, 3.5)
107
ax2.set_ylim(-0.5, 3.5)
108
ax2.set_xlabel('x1')
109
ax2.set_ylabel('x2')
110
ax2.set_title('Constrained Optimization')
111
ax2.legend()
112
ax2.grid(True, alpha=0.3)
113
114
# Algorithm convergence comparison
115
ax3 = axes[1, 0]
116
algorithms = ['Gradient\nDescent', 'Newton\nMethod', 'Interior\nPoint', 'Simplex']
117
iterations = [1000, 5, 20, 8]
118
accuracy = [1e-6, 1e-12, 1e-8, 1e-15]
119
120
ax3_twin = ax3.twinx()
121
bars1 = ax3.bar([x - 0.2 for x in range(len(algorithms))], iterations,
122
width=0.4, color=optblue, alpha=0.7, label='Iterations')
123
bars2 = ax3_twin.semilogy([x + 0.2 for x in range(len(algorithms))], accuracy,
124
'ro', markersize=10, label='Final accuracy')
125
126
ax3.set_xlabel('Algorithm')
127
ax3.set_ylabel('Iterations to convergence', color=optblue)
128
ax3_twin.set_ylabel('Final accuracy', color=optred)
129
ax3.set_title('Algorithm Performance Comparison')
130
ax3.set_xticks(range(len(algorithms)))
131
ax3.set_xticklabels(algorithms, rotation=45)
132
ax3.grid(True, alpha=0.3)
133
134
# Optimization landscape
135
ax4 = axes[1, 1]
136
# Create a 3D-like visualization using contours and shading
137
levels = np.linspace(np.min(Z_obj), np.max(Z_obj), 20)
138
cs = ax4.contourf(X_const, Y_const, Z_obj, levels=levels, cmap='viridis', alpha=0.8)
139
ax4.contour(X_const, Y_const, Z_obj, levels=levels, colors='black', alpha=0.3, linewidths=0.5)
140
141
# Add colorbar
142
cbar = plt.colorbar(cs, ax=ax4, shrink=0.8)
143
cbar.set_label('Objective function value')
144
145
ax4.plot(result_constrained.x[0], result_constrained.x[1], 'r*', markersize=15, label='Optimum')
146
ax4.set_xlabel('x1')
147
ax4.set_ylabel('x2')
148
ax4.set_title('Optimization Landscape')
149
ax4.legend()
150
151
plt.tight_layout()
152
plt.savefig('assets/linear_constrained_optimization.pdf', dpi=300, bbox_inches='tight')
153
plt.close()
154
155
print("Generated linear_constrained_optimization.pdf")
156
157
def generate_control_systems_analysis():
158
"""Generate control systems analysis figure"""
159
160
# Define LQR continuous function
161
def lqr_continuous(A, B, Q, R):
162
"""Solve continuous-time LQR problem"""
163
P = solve_continuous_are(A, B, Q, R)
164
K = np.linalg.inv(R) @ B.T @ P
165
return K, P
166
167
# System matrices for double integrator
168
A = np.array([[0, 1], [0, 0]])
169
B = np.array([[0], [1]])
170
C = np.array([[1, 0]])
171
172
# LQR design parameters
173
Q = np.array([[1, 0], [0, 1]])
174
R = np.array([[1]])
175
176
# Design LQR controller
177
K_lqr, P_lqr = lqr_continuous(A, B, Q, R)
178
A_cl = A - B @ K_lqr
179
180
# Simulate step response
181
t = np.linspace(0, 10, 1000)
182
dt = t[1] - t[0]
183
184
# Initial condition response
185
x0 = np.array([[1], [0]])
186
x_open = np.zeros((2, len(t)))
187
x_closed = np.zeros((2, len(t)))
188
x_open[:, [0]] = x0
189
x_closed[:, [0]] = x0
190
191
for i in range(1, len(t)):
192
# Open-loop (unstable system would blow up, so we'll simulate a different scenario)
193
x_open[:, [i]] = x_open[:, [i-1]] + dt * A @ x_open[:, [i-1]]
194
195
# Closed-loop
196
x_closed[:, [i]] = x_closed[:, [i-1]] + dt * A_cl @ x_closed[:, [i-1]]
197
198
# Generate Kalman filter data
199
# Process and measurement noise
200
Q_kf = 0.01 * np.eye(2)
201
R_kf = 0.1
202
203
# Generate true trajectory with noise
204
x_true = np.zeros((2, len(t)))
205
y_meas = np.zeros(len(t))
206
x_true[:, [0]] = x0
207
208
for i in range(1, len(t)):
209
process_noise = np.random.multivariate_normal([0, 0], Q_kf).reshape(2, 1)
210
x_true[:, [i]] = x_true[:, [i-1]] + dt * A_cl @ x_true[:, [i-1]] + process_noise
211
y_meas[i] = (C @ x_true[:, [i]])[0, 0] + np.random.normal(0, np.sqrt(R_kf))
212
213
# Simple Kalman filter estimate (simplified)
214
x_est = np.zeros((2, len(t)))
215
x_est[:, [0]] = x0
216
P_est = np.eye(2)
217
218
for i in range(1, len(t)):
219
# Prediction
220
x_pred = x_est[:, [i-1]] + dt * A_cl @ x_est[:, [i-1]]
221
P_pred = P_est + dt * Q_kf
222
223
# Update
224
K_kf = P_pred @ C.T / (C @ P_pred @ C.T + R_kf)
225
x_est[:, [i]] = x_pred + K_kf * (y_meas[i] - (C @ x_pred)[0, 0])
226
P_est = (np.eye(2) - K_kf @ C) @ P_pred
227
228
# Create visualization
229
fig, axes = plt.subplots(2, 2, figsize=(15, 12))
230
fig.suptitle('Optimal Control Theory and State Estimation', fontsize=16, fontweight='bold')
231
232
# LQR pole placement
233
ax1 = axes[0, 0]
234
235
# Open-loop poles
236
open_poles = np.linalg.eigvals(A)
237
closed_poles = np.linalg.eigvals(A_cl)
238
239
ax1.plot(np.real(open_poles), np.imag(open_poles), 'ro', markersize=10, label='Open-loop poles')
240
ax1.plot(np.real(closed_poles), np.imag(closed_poles), 'bo', markersize=10, label='Closed-loop poles')
241
ax1.axvline(0, color='black', linestyle='--', alpha=0.5)
242
ax1.fill_between([-2, 0], [-3, -3], [3, 3], alpha=0.2, color='green', label='Stable region')
243
ax1.set_xlabel('Real Part')
244
ax1.set_ylabel('Imaginary Part')
245
ax1.set_title('LQR Pole Placement')
246
ax1.legend()
247
ax1.grid(True, alpha=0.3)
248
ax1.set_xlim(-2, 1)
249
ax1.set_ylim(-2, 2)
250
251
# Step response comparison
252
ax2 = axes[0, 1]
253
ax2.plot(t, x_open[0, :], 'r-', linewidth=2, label='Open-loop')
254
ax2.plot(t, x_closed[0, :], 'b-', linewidth=2, label='Closed-loop (LQR)')
255
ax2.set_xlabel('Time (s)')
256
ax2.set_ylabel('Position')
257
ax2.set_title('Step Response Comparison')
258
ax2.legend()
259
ax2.grid(True, alpha=0.3)
260
ax2.set_xlim(0, 10)
261
262
# Kalman filter performance
263
ax3 = axes[1, 0]
264
ax3.plot(t, x_true[0, :], 'g-', linewidth=2, label='True state')
265
ax3.plot(t, y_meas, 'r.', markersize=2, alpha=0.5, label='Measurements')
266
ax3.plot(t, x_est[0, :], 'b-', linewidth=2, label='Kalman estimate')
267
268
# Add uncertainty bounds (simplified)
269
uncertainty = 0.1 * np.ones_like(t)
270
ax3.fill_between(t, x_est[0, :] - uncertainty, x_est[0, :] + uncertainty,
271
alpha=0.3, color='blue', label='Uncertainty bounds')
272
273
ax3.set_xlabel('Time (s)')
274
ax3.set_ylabel('Position')
275
ax3.set_title('Kalman Filter State Estimation')
276
ax3.legend()
277
ax3.grid(True, alpha=0.3)
278
ax3.set_xlim(0, 10)
279
280
# LQR design trade-offs
281
ax4 = axes[1, 1]
282
Q_weights = [0.1, 1, 10, 100]
283
settling_times = []
284
control_efforts = []
285
286
for q_weight in Q_weights:
287
Q_test = q_weight * np.eye(2)
288
K_test, _ = lqr_continuous(A, B, Q_test, R)
289
A_cl_test = A - B @ K_test
290
291
# Approximate settling time (time to reach 2% of steady state)
292
settling_times.append(4 / abs(np.real(np.linalg.eigvals(A_cl_test)).max()))
293
control_efforts.append(np.linalg.norm(K_test))
294
295
ax4_twin = ax4.twinx()
296
297
lines1 = ax4.plot(Q_weights, settling_times, 'bo-', linewidth=2, markersize=8, label='Settling time')
298
lines2 = ax4_twin.plot(Q_weights, control_efforts, 'ro-', linewidth=2, markersize=8, label='Control effort')
299
300
ax4.set_xlabel('Q weight')
301
ax4.set_ylabel('Settling time (s)', color='blue')
302
ax4_twin.set_ylabel('Control effort', color='red')
303
ax4.set_title('LQR Design Trade-offs')
304
ax4.set_xscale('log')
305
ax4.grid(True, alpha=0.3)
306
307
# Combine legends
308
lines1, labels1 = ax4.get_legend_handles_labels()
309
lines2, labels2 = ax4_twin.get_legend_handles_labels()
310
ax4.legend(lines1 + lines2, labels1 + labels2, loc='center right')
311
312
plt.tight_layout()
313
plt.savefig('assets/control_systems_analysis.pdf', dpi=300, bbox_inches='tight')
314
plt.close()
315
316
print("Generated control_systems_analysis.pdf")
317
318
if __name__ == "__main__":
319
print("Generating missing optimization and control systems figures...")
320
321
try:
322
generate_linear_constrained_optimization()
323
generate_control_systems_analysis()
324
print("All missing figures generated successfully!")
325
except Exception as e:
326
print(f"Error generating figures: {e}")
327
import traceback
328
traceback.print_exc()
329