Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Ok-landscape
GitHub Repository: Ok-landscape/computational-pipeline
Path: blob/main/latex-templates/templates/control-theory/adaptive_control.tex
75 views
unlisted
1
% Adaptive Control Systems Template
2
% Topics: MRAC, self-tuning regulators, persistent excitation, robust adaptive control
3
% Style: Engineering report with simulation analysis
4
5
\documentclass[a4paper, 11pt]{article}
6
\usepackage[utf8]{inputenc}
7
\usepackage[T1]{fontenc}
8
\usepackage{amsmath, amssymb}
9
\usepackage{graphicx}
10
\usepackage{siunitx}
11
\usepackage{booktabs}
12
\usepackage{subcaption}
13
\usepackage[makestderr]{pythontex}
14
15
% Theorem environments
16
\newtheorem{definition}{Definition}[section]
17
\newtheorem{theorem}{Theorem}[section]
18
\newtheorem{example}{Example}[section]
19
\newtheorem{remark}{Remark}[section]
20
21
\title{Adaptive Control Systems: Model Reference and Self-Tuning Approaches}
22
\author{Control Systems Laboratory}
23
\date{\today}
24
25
\begin{document}
26
\maketitle
27
28
\begin{abstract}
29
This report presents a comprehensive analysis of adaptive control systems with emphasis on
30
Model Reference Adaptive Control (MRAC) and Self-Tuning Regulators (STR). We examine the
31
stability guarantees provided by Lyapunov-based adaptation laws, analyze the role of
32
persistent excitation in parameter convergence, and compare direct versus indirect adaptive
33
approaches. Computational simulations demonstrate parameter adaptation dynamics, tracking
34
performance, and robustness modifications including $\sigma$-modification and projection
35
methods for handling unmodeled dynamics and bounded disturbances.
36
\end{abstract}
37
38
\section{Introduction}
39
40
Adaptive control addresses the challenge of controlling systems with unknown or time-varying
41
parameters. Unlike robust control which handles uncertainty through conservative design,
42
adaptive control actively estimates and compensates for parameter variations in real-time.
43
44
\begin{definition}[Adaptive Control Problem]
45
Given a plant with unknown parameters $\theta^*$ and a desired performance specification
46
(typically represented by a reference model), design a controller with adjustable parameters
47
$\hat{\theta}(t)$ and an adaptation mechanism such that:
48
\begin{equation}
49
\lim_{t \to \infty} [y(t) - y_m(t)] = 0
50
\end{equation}
51
where $y(t)$ is the plant output and $y_m(t)$ is the reference model output.
52
\end{definition}
53
54
\section{Theoretical Framework}
55
56
\subsection{Model Reference Adaptive Control (MRAC)}
57
58
Consider a first-order plant:
59
\begin{equation}
60
\dot{x} = ax + bu, \quad y = x
61
\end{equation}
62
where $a$ and $b$ are unknown parameters. The reference model is:
63
\begin{equation}
64
\dot{x}_m = a_m x_m + b_m r, \quad y_m = x_m
65
\end{equation}
66
67
\begin{definition}[MRAC Control Law]
68
The control law has the form:
69
\begin{equation}
70
u = \theta_1^T \omega_1 + \theta_2^T \omega_2 = \hat{k}_x x + \hat{k}_r r
71
\end{equation}
72
where $\hat{k}_x$ and $\hat{k}_r$ are adaptive gains.
73
\end{definition}
74
75
\begin{theorem}[MIT Rule]
76
The gradient-based adaptation law:
77
\begin{equation}
78
\dot{\hat{\theta}} = -\gamma e \frac{\partial e}{\partial \hat{\theta}}
79
\end{equation}
80
where $e = y - y_m$ and $\gamma > 0$ is the adaptation gain, adjusts parameters in the
81
direction that reduces the instantaneous error.
82
\end{theorem}
83
84
\begin{theorem}[Lyapunov-Based Adaptation]
85
For the parameter error $\tilde{\theta} = \hat{\theta} - \theta^*$, consider the Lyapunov
86
function:
87
\begin{equation}
88
V = \frac{1}{2}e^2 + \frac{1}{2\gamma}\tilde{\theta}^T\tilde{\theta}
89
\end{equation}
90
The adaptation law:
91
\begin{equation}
92
\dot{\hat{\theta}} = -\gamma e \omega
93
\end{equation}
94
guarantees $\dot{V} \leq 0$, ensuring bounded tracking error and parameter estimates.
95
\end{theorem}
96
97
\subsection{Self-Tuning Regulators}
98
99
\begin{definition}[Certainty Equivalence Principle]
100
Self-tuning regulators separate the control problem into two stages:
101
\begin{enumerate}
102
\item \textbf{Parameter Estimation}: Estimate plant parameters $\hat{\theta}(t)$ using recursive identification
103
\item \textbf{Control Design}: Design controller using $\hat{\theta}(t)$ as if they were true values
104
\end{enumerate}
105
\end{definition}
106
107
\begin{theorem}[Recursive Least Squares (RLS)]
108
For the regression model $y(t) = \phi^T(t)\theta + \epsilon(t)$, the RLS algorithm:
109
\begin{align}
110
\hat{\theta}(t) &= \hat{\theta}(t-1) + K(t)[y(t) - \phi^T(t)\hat{\theta}(t-1)] \\
111
K(t) &= \frac{P(t-1)\phi(t)}{1 + \phi^T(t)P(t-1)\phi(t)} \\
112
P(t) &= P(t-1) - K(t)\phi^T(t)P(t-1)
113
\end{align}
114
minimizes the weighted sum of squared prediction errors.
115
\end{theorem}
116
117
\subsection{Persistent Excitation}
118
119
\begin{definition}[Persistent Excitation]
120
A signal $\phi(t)$ is persistently exciting of order $n$ if there exist $\alpha, T_0 > 0$
121
such that for all $t \geq 0$:
122
\begin{equation}
123
\int_t^{t+T_0} \phi(\tau)\phi^T(\tau) d\tau \geq \alpha I_n
124
\end{equation}
125
\end{definition}
126
127
\begin{theorem}[Parameter Convergence]
128
Under persistent excitation, the RLS algorithm guarantees exponential convergence:
129
\begin{equation}
130
\|\tilde{\theta}(t)\| \leq c e^{-\lambda t} \|\tilde{\theta}(0)\|
131
\end{equation}
132
for some constants $c, \lambda > 0$.
133
\end{theorem}
134
135
\subsection{Robust Modifications}
136
137
\begin{definition}[Sigma Modification]
138
To handle unmodeled dynamics and bounded disturbances, the $\sigma$-modification adds a
139
damping term:
140
\begin{equation}
141
\dot{\hat{\theta}} = -\gamma e \omega - \sigma \gamma \hat{\theta}
142
\end{equation}
143
where $\sigma > 0$ is a small constant that prevents parameter drift.
144
\end{definition}
145
146
\begin{definition}[Projection Modification]
147
Constrain parameter estimates to a known convex set $\mathcal{S}$:
148
\begin{equation}
149
\dot{\hat{\theta}} = \text{Proj}(-\gamma e \omega, \hat{\theta})
150
\end{equation}
151
where the projection ensures $\hat{\theta}(t) \in \mathcal{S}$ for all $t$.
152
\end{definition}
153
154
\section{Computational Analysis}
155
156
\begin{pycode}
157
import numpy as np
158
import matplotlib.pyplot as plt
159
from scipy.integrate import odeint
160
from scipy.linalg import lstsq
161
162
np.random.seed(42)
163
164
# Plant parameters (unknown to controller)
165
a_true = -2.0
166
b_true = 3.0
167
168
# Reference model parameters (desired closed-loop behavior)
169
a_m = -5.0
170
b_m = 5.0
171
172
# Control design: for perfect matching, we need
173
# a + b*k_x = a_m => k_x = (a_m - a) / b
174
# b*k_r = b_m => k_r = b_m / b
175
k_x_star = (a_m - a_true) / b_true
176
k_r_star = b_m / b_true
177
178
# Store values for later use (print statements cause LaTeX issues)
179
# print(f"Ideal controller parameters: k_x* = {k_x_star:.3f}, k_r* = {k_r_star:.3f}")
180
181
# MRAC simulation with MIT rule
182
def mrac_mit_system(state, t, r_func, gamma_mit):
183
x, x_m, k_x, k_r = state
184
185
r = r_func(t)
186
187
# Control law
188
u = k_x * x + k_r * r
189
190
# Plant dynamics
191
dx = a_true * x + b_true * u
192
193
# Reference model
194
dx_m = a_m * x_m + b_m * r
195
196
# Tracking error
197
e = x - x_m
198
199
# MIT rule (gradient descent)
200
# Sensitivity: de/dk_x x, de/dk_r r
201
dk_x = -gamma_mit * e * x
202
dk_r = -gamma_mit * e * r
203
204
return [dx, dx_m, dk_x, dk_r]
205
206
# MRAC simulation with Lyapunov-based adaptation
207
def mrac_lyapunov_system(state, t, r_func, gamma_lyap):
208
x, x_m, k_x, k_r = state
209
210
r = r_func(t)
211
212
# Control law
213
u = k_x * x + k_r * r
214
215
# Plant dynamics
216
dx = a_true * x + b_true * u
217
218
# Reference model
219
dx_m = a_m * x_m + b_m * r
220
221
# Tracking error
222
e = x - x_m
223
224
# Lyapunov-based adaptation law
225
# omega = [x, r] for this simple case
226
dk_x = -gamma_lyap * e * x
227
dk_r = -gamma_lyap * e * r
228
229
return [dx, dx_m, dk_x, dk_r]
230
231
# MRAC with sigma-modification
232
def mrac_sigma_system(state, t, r_func, gamma_sig, sigma):
233
x, x_m, k_x, k_r = state
234
235
r = r_func(t)
236
237
u = k_x * x + k_r * r
238
dx = a_true * x + b_true * u
239
dx_m = a_m * x_m + b_m * r
240
e = x - x_m
241
242
# Sigma-modification
243
dk_x = -gamma_sig * e * x - sigma * gamma_sig * k_x
244
dk_r = -gamma_sig * e * r - sigma * gamma_sig * k_r
245
246
return [dx, dx_m, dk_x, dk_r]
247
248
# Reference input: step + sinusoid (persistent excitation)
249
def reference_signal(t):
250
return 1.0 + 0.5 * np.sin(2.0 * t) + 0.3 * np.sin(5.0 * t)
251
252
# Time vector
253
t_sim = np.linspace(0, 20, 2000)
254
255
# Initial conditions: [x, x_m, k_x, k_r]
256
initial_state = [0.0, 0.0, 0.0, 0.0]
257
258
# Adaptation gains
259
gamma_mit = 2.0
260
gamma_lyap = 5.0
261
gamma_sig = 5.0
262
sigma_param = 0.1
263
264
# Simulate MIT rule
265
sol_mit = odeint(mrac_mit_system, initial_state, t_sim, args=(reference_signal, gamma_mit))
266
x_mit = sol_mit[:, 0]
267
x_m_mit = sol_mit[:, 1]
268
k_x_mit = sol_mit[:, 2]
269
k_r_mit = sol_mit[:, 3]
270
e_mit = x_mit - x_m_mit
271
272
# Simulate Lyapunov adaptation
273
sol_lyap = odeint(mrac_lyapunov_system, initial_state, t_sim, args=(reference_signal, gamma_lyap))
274
x_lyap = sol_lyap[:, 0]
275
x_m_lyap = sol_lyap[:, 1]
276
k_x_lyap = sol_lyap[:, 2]
277
k_r_lyap = sol_lyap[:, 3]
278
e_lyap = x_lyap - x_m_lyap
279
280
# Simulate sigma-modification
281
sol_sigma = odeint(mrac_sigma_system, initial_state, t_sim, args=(reference_signal, gamma_sig, sigma_param))
282
x_sigma = sol_sigma[:, 0]
283
x_m_sigma = sol_sigma[:, 1]
284
k_x_sigma = sol_sigma[:, 2]
285
k_r_sigma = sol_sigma[:, 3]
286
e_sigma = x_sigma - x_m_sigma
287
288
# Recursive Least Squares simulation
289
class RLSEstimator:
290
def __init__(self, n_params, forgetting_factor=1.0):
291
self.theta_hat = np.zeros(n_params)
292
self.P = 100.0 * np.eye(n_params)
293
self.lambda_ff = forgetting_factor
294
295
def update(self, phi, y):
296
# RLS update
297
K = self.P @ phi / (self.lambda_ff + phi.T @ self.P @ phi)
298
prediction_error = y - phi.T @ self.theta_hat
299
self.theta_hat = self.theta_hat + K * prediction_error
300
self.P = (self.P - np.outer(K, phi.T @ self.P)) / self.lambda_ff
301
return self.theta_hat, prediction_error
302
303
# Generate plant data for RLS
304
def plant_with_noise(x, u, dt, noise_level=0.01):
305
dx = a_true * x + b_true * u + noise_level * np.random.randn()
306
x_next = x + dx * dt
307
return x_next
308
309
# RLS simulation
310
dt_rls = 0.01
311
t_rls = np.arange(0, 20, dt_rls)
312
n_samples = len(t_rls)
313
314
x_rls = np.zeros(n_samples)
315
u_rls = np.zeros(n_samples)
316
theta_rls = np.zeros((n_samples, 2))
317
prediction_errors = np.zeros(n_samples)
318
319
rls = RLSEstimator(n_params=2, forgetting_factor=0.995)
320
x_rls[0] = 0.1
321
322
for i in range(1, n_samples):
323
# Excitation signal (pseudo-random binary sequence approximation)
324
u_rls[i] = 2.0 * np.sign(np.sin(3.0 * t_rls[i]) + 0.5 * np.sin(7.0 * t_rls[i]))
325
326
# Plant response
327
x_rls[i] = plant_with_noise(x_rls[i-1], u_rls[i-1], dt_rls)
328
329
# Regression vector: y(t) a*x(t-1) + b*u(t-1)
330
phi = np.array([x_rls[i-1], u_rls[i-1]])
331
332
# RLS update
333
theta_hat, pred_err = rls.update(phi, x_rls[i])
334
theta_rls[i] = theta_hat
335
prediction_errors[i] = pred_err
336
337
a_hat_rls = theta_rls[:, 0]
338
b_hat_rls = theta_rls[:, 1]
339
340
# Persistent excitation analysis
341
def check_persistent_excitation(phi_matrix, window_size):
342
n_samples = len(phi_matrix)
343
min_eigenvalues = []
344
times = []
345
346
for i in range(window_size, n_samples):
347
window = phi_matrix[i-window_size:i]
348
Phi = window.T @ window
349
eigvals = np.linalg.eigvalsh(Phi)
350
min_eigenvalues.append(np.min(eigvals))
351
times.append(i * dt_rls)
352
353
return np.array(times), np.array(min_eigenvalues)
354
355
# Build regressor matrix for PE analysis
356
phi_matrix_rls = np.column_stack([x_rls[:-1], u_rls[:-1]])
357
pe_times, pe_eigenvalues = check_persistent_excitation(phi_matrix_rls, window_size=100)
358
359
# Calculate performance metrics
360
tracking_error_mit_rms = np.sqrt(np.mean(e_mit**2))
361
tracking_error_lyap_rms = np.sqrt(np.mean(e_lyap**2))
362
tracking_error_sigma_rms = np.sqrt(np.mean(e_sigma**2))
363
364
param_error_k_x_mit = np.abs(k_x_mit[-1] - k_x_star)
365
param_error_k_r_mit = np.abs(k_r_mit[-1] - k_r_star)
366
param_error_k_x_lyap = np.abs(k_x_lyap[-1] - k_x_star)
367
param_error_k_r_lyap = np.abs(k_r_lyap[-1] - k_r_star)
368
369
param_error_a_rls = np.abs(a_hat_rls[-1] - a_true)
370
param_error_b_rls = np.abs(b_hat_rls[-1] - b_true)
371
372
# Create comprehensive figure
373
fig = plt.figure(figsize=(16, 14))
374
375
# Plot 1: MRAC tracking performance (Lyapunov)
376
ax1 = fig.add_subplot(3, 3, 1)
377
r_values = [reference_signal(t) for t in t_sim]
378
ax1.plot(t_sim, r_values, 'k--', linewidth=2, label='Reference $r(t)$', alpha=0.7)
379
ax1.plot(t_sim, x_m_lyap, 'b-', linewidth=2, label='Model $y_m(t)$')
380
ax1.plot(t_sim, x_lyap, 'r-', linewidth=1.5, label='Plant $y(t)$', alpha=0.8)
381
ax1.set_xlabel('Time (s)')
382
ax1.set_ylabel('Output')
383
ax1.set_title('MRAC Tracking Performance (Lyapunov)')
384
ax1.legend(fontsize=9)
385
ax1.grid(True, alpha=0.3)
386
ax1.set_xlim(0, 20)
387
388
# Plot 2: Tracking error comparison
389
ax2 = fig.add_subplot(3, 3, 2)
390
ax2.plot(t_sim, e_mit, linewidth=1.5, label=f'MIT ($\gamma$={gamma_mit})', alpha=0.7)
391
ax2.plot(t_sim, e_lyap, linewidth=1.5, label=f'Lyapunov ($\gamma$={gamma_lyap})', alpha=0.7)
392
ax2.plot(t_sim, e_sigma, linewidth=1.5, label=f'$\sigma$-mod ($\sigma$={sigma_param})', alpha=0.7)
393
ax2.axhline(y=0, color='k', linestyle=':', linewidth=1)
394
ax2.set_xlabel('Time (s)')
395
ax2.set_ylabel('Tracking Error $e(t)$')
396
ax2.set_title('Adaptation Law Comparison')
397
ax2.legend(fontsize=8)
398
ax2.grid(True, alpha=0.3)
399
ax2.set_xlim(0, 20)
400
401
# Plot 3: Parameter adaptation k_x
402
ax3 = fig.add_subplot(3, 3, 3)
403
ax3.plot(t_sim, k_x_mit, linewidth=1.5, label='MIT rule')
404
ax3.plot(t_sim, k_x_lyap, linewidth=1.5, label='Lyapunov')
405
ax3.plot(t_sim, k_x_sigma, linewidth=1.5, label='$\sigma$-modification')
406
ax3.axhline(y=k_x_star, color='k', linestyle='--', linewidth=2, label='$k_x^*$')
407
ax3.set_xlabel('Time (s)')
408
ax3.set_ylabel('$\hat{k}_x(t)$')
409
ax3.set_title('State Feedback Gain Adaptation')
410
ax3.legend(fontsize=8)
411
ax3.grid(True, alpha=0.3)
412
ax3.set_xlim(0, 20)
413
414
# Plot 4: Parameter adaptation k_r
415
ax4 = fig.add_subplot(3, 3, 4)
416
ax4.plot(t_sim, k_r_mit, linewidth=1.5, label='MIT rule')
417
ax4.plot(t_sim, k_r_lyap, linewidth=1.5, label='Lyapunov')
418
ax4.plot(t_sim, k_r_sigma, linewidth=1.5, label='$\sigma$-modification')
419
ax4.axhline(y=k_r_star, color='k', linestyle='--', linewidth=2, label='$k_r^*$')
420
ax4.set_xlabel('Time (s)')
421
ax4.set_ylabel('$\hat{k}_r(t)$')
422
ax4.set_title('Feedforward Gain Adaptation')
423
ax4.legend(fontsize=8)
424
ax4.grid(True, alpha=0.3)
425
ax4.set_xlim(0, 20)
426
427
# Plot 5: RLS parameter estimation
428
ax5 = fig.add_subplot(3, 3, 5)
429
ax5.plot(t_rls, a_hat_rls, 'b-', linewidth=1.5, label='$\hat{a}(t)$')
430
ax5.plot(t_rls, b_hat_rls, 'r-', linewidth=1.5, label='$\hat{b}(t)$')
431
ax5.axhline(y=a_true, color='b', linestyle='--', linewidth=2, alpha=0.5, label='$a^*$')
432
ax5.axhline(y=b_true, color='r', linestyle='--', linewidth=2, alpha=0.5, label='$b^*$')
433
ax5.set_xlabel('Time (s)')
434
ax5.set_ylabel('Parameter Estimate')
435
ax5.set_title('RLS Parameter Convergence')
436
ax5.legend(fontsize=8, loc='right')
437
ax5.grid(True, alpha=0.3)
438
ax5.set_xlim(0, 20)
439
ax5.set_ylim(-4, 4)
440
441
# Plot 6: RLS prediction error
442
ax6 = fig.add_subplot(3, 3, 6)
443
ax6.plot(t_rls, prediction_errors, 'g-', linewidth=0.5, alpha=0.7)
444
ax6.set_xlabel('Time (s)')
445
ax6.set_ylabel('Prediction Error')
446
ax6.set_title('RLS Prediction Error')
447
ax6.grid(True, alpha=0.3)
448
ax6.set_xlim(0, 20)
449
450
# Plot 7: Persistent excitation measure
451
ax7 = fig.add_subplot(3, 3, 7)
452
ax7.plot(pe_times, pe_eigenvalues, 'purple', linewidth=1.5)
453
ax7.set_xlabel('Time (s)')
454
ax7.set_ylabel('Min. Eigenvalue of $\Phi^T\Phi$')
455
ax7.set_title('Persistent Excitation Indicator')
456
ax7.grid(True, alpha=0.3)
457
ax7.set_xlim(0, 20)
458
459
# Plot 8: Phase portrait (x vs dx)
460
ax8 = fig.add_subplot(3, 3, 8)
461
# Approximate derivative
462
dx_lyap = np.gradient(x_lyap, t_sim)
463
dx_m_lyap = np.gradient(x_m_lyap, t_sim)
464
ax8.plot(x_lyap, dx_lyap, 'r-', linewidth=1, alpha=0.7, label='Plant')
465
ax8.plot(x_m_lyap, dx_m_lyap, 'b-', linewidth=1, alpha=0.7, label='Model')
466
ax8.set_xlabel('$x$')
467
ax8.set_ylabel('$\dot{x}$')
468
ax8.set_title('Phase Portrait')
469
ax8.legend(fontsize=9)
470
ax8.grid(True, alpha=0.3)
471
472
# Plot 9: Adaptation gain sensitivity
473
ax9 = fig.add_subplot(3, 3, 9)
474
gamma_values = [1.0, 2.0, 5.0, 10.0]
475
final_errors = []
476
convergence_times = []
477
478
for gamma_test in gamma_values:
479
sol_test = odeint(mrac_lyapunov_system, initial_state, t_sim, args=(reference_signal, gamma_test))
480
e_test = sol_test[:, 0] - sol_test[:, 1]
481
final_errors.append(np.sqrt(np.mean(e_test[-200:]**2)))
482
483
# Find convergence time (when |e| < 0.1)
484
conv_idx = np.where(np.abs(e_test) < 0.1)[0]
485
if len(conv_idx) > 0:
486
convergence_times.append(t_sim[conv_idx[0]])
487
else:
488
convergence_times.append(20.0)
489
490
ax9_twin = ax9.twinx()
491
x_pos = np.arange(len(gamma_values))
492
width = 0.35
493
bars1 = ax9.bar(x_pos - width/2, final_errors, width, label='Final RMS Error',
494
color='steelblue', edgecolor='black')
495
bars2 = ax9_twin.bar(x_pos + width/2, convergence_times, width, label='Conv. Time',
496
color='coral', edgecolor='black')
497
ax9.set_xlabel('Adaptation Gain $\gamma$')
498
ax9.set_ylabel('RMS Error', color='steelblue')
499
ax9_twin.set_ylabel('Convergence Time (s)', color='coral')
500
ax9.set_xticks(x_pos)
501
ax9.set_xticklabels([str(g) for g in gamma_values])
502
ax9.set_title('Adaptation Gain Sensitivity')
503
ax9.tick_params(axis='y', labelcolor='steelblue')
504
ax9_twin.tick_params(axis='y', labelcolor='coral')
505
506
plt.tight_layout()
507
plt.savefig('adaptive_control_analysis.pdf', dpi=150, bbox_inches='tight')
508
plt.close()
509
510
# Create comparison table data
511
comparison_data = {
512
'MIT': {'RMS_error': tracking_error_mit_rms, 'k_x_error': param_error_k_x_mit, 'k_r_error': param_error_k_r_mit},
513
'Lyapunov': {'RMS_error': tracking_error_lyap_rms, 'k_x_error': param_error_k_x_lyap, 'k_r_error': param_error_k_r_lyap},
514
'Sigma-mod': {'RMS_error': tracking_error_sigma_rms, 'k_x_error': np.abs(k_x_sigma[-1] - k_x_star),
515
'k_r_error': np.abs(k_r_sigma[-1] - k_r_star)}
516
}
517
518
# Store performance metrics for use in tables
519
# print(f"\nPerformance Summary:")
520
# print(f"RLS final estimates: a_hat = {a_hat_rls[-1]:.3f} (true: {a_true}), b_hat = {b_hat_rls[-1]:.3f} (true: {b_true})")
521
# print(f"RLS parameter errors: |a_hat - a*| = {param_error_a_rls:.4f}, |b_hat - b*| = {param_error_b_rls:.4f}")
522
523
\end{pycode}
524
525
\begin{figure}[htbp]
526
\centering
527
\includegraphics[width=\textwidth]{adaptive_control_analysis.pdf}
528
\caption{Comprehensive adaptive control analysis: (a) MRAC tracking performance showing
529
plant output following reference model; (b) Comparison of tracking errors for MIT rule,
530
Lyapunov adaptation, and $\sigma$-modification; (c-d) Parameter convergence for state
531
feedback gain $k_x$ and feedforward gain $k_r$; (e) RLS parameter estimation showing
532
convergence to true plant parameters $a$ and $b$ under persistent excitation; (f) RLS
533
prediction error decay; (g) Persistent excitation measure quantified by minimum eigenvalue
534
of regressor product matrix; (h) Phase portrait comparison between plant and reference model
535
trajectories; (i) Adaptation gain sensitivity analysis showing trade-off between convergence
536
speed and steady-state accuracy.}
537
\label{fig:adaptive_control}
538
\end{figure}
539
540
\section{Results}
541
542
\subsection{MRAC Performance Comparison}
543
544
\begin{pycode}
545
print(r"\begin{table}[htbp]")
546
print(r"\centering")
547
print(r"\caption{MRAC Adaptation Law Performance Comparison}")
548
print(r"\begin{tabular}{lccc}")
549
print(r"\toprule")
550
print(r"Method & RMS Error & $|\hat{k}_x - k_x^*|$ & $|\hat{k}_r - k_r^*|$ \\")
551
print(r"\midrule")
552
553
for method, data in comparison_data.items():
554
print(f"{method} & {data['RMS_error']:.4f} & {data['k_x_error']:.4f} & {data['k_r_error']:.4f} \\\\")
555
556
print(r"\midrule")
557
print(f"True values & --- & $k_x^* = {k_x_star:.3f}$ & $k_r^* = {k_r_star:.3f}$ \\\\")
558
print(r"\bottomrule")
559
print(r"\end{tabular}")
560
print(r"\label{tab:mrac_comparison}")
561
print(r"\end{table}")
562
\end{pycode}
563
564
\subsection{RLS Parameter Identification}
565
566
\begin{pycode}
567
print(r"\begin{table}[htbp]")
568
print(r"\centering")
569
print(r"\caption{Recursive Least Squares Parameter Estimation Results}")
570
print(r"\begin{tabular}{lcccc}")
571
print(r"\toprule")
572
print(r"Parameter & True Value & Initial Estimate & Final Estimate & Absolute Error \\")
573
print(r"\midrule")
574
575
print(f"$a$ & {a_true:.3f} & {a_hat_rls[100]:.3f} & {a_hat_rls[-1]:.3f} & {param_error_a_rls:.4f} \\\\")
576
print(f"$b$ & {b_true:.3f} & {b_hat_rls[100]:.3f} & {b_hat_rls[-1]:.3f} & {param_error_b_rls:.4f} \\\\")
577
578
print(r"\bottomrule")
579
print(r"\end{tabular}")
580
print(r"\label{tab:rls_results}")
581
print(r"\end{table}")
582
\end{pycode}
583
584
\section{Discussion}
585
586
\begin{example}[Direct vs Indirect Adaptive Control]
587
This analysis demonstrates both approaches:
588
\begin{itemize}
589
\item \textbf{MRAC (Direct)}: Adjusts controller parameters $\hat{k}_x, \hat{k}_r$ directly
590
to minimize tracking error without explicitly identifying plant parameters $a, b$
591
\item \textbf{STR (Indirect)}: Uses RLS to estimate plant parameters $\hat{a}, \hat{b}$,
592
then computes controller gains via certainty equivalence: $k_x = (a_m - \hat{a})/\hat{b}$
593
\end{itemize}
594
\end{example}
595
596
\begin{remark}[Lyapunov Stability Guarantee]
597
The Lyapunov-based adaptation law guarantees:
598
\begin{enumerate}
599
\item Boundedness of all signals (tracking error $e(t)$ and parameter estimates $\hat{\theta}(t)$)
600
\item Convergence: $\lim_{t \to \infty} e(t) = 0$
601
\item Parameter convergence requires persistent excitation; otherwise only $e(t) \to 0$ is guaranteed
602
\end{enumerate}
603
\end{remark}
604
605
\begin{remark}[Sigma Modification Trade-off]
606
The $\sigma$-modification introduces a bias in parameter estimates (preventing exact convergence
607
to $\theta^*$ even under persistent excitation) but provides robustness to bounded disturbances
608
and unmodeled dynamics. The final parameter error is $O(\sigma)$.
609
\end{remark}
610
611
\subsection{Persistent Excitation Requirements}
612
613
\begin{pycode}
614
min_pe_eigenvalue = np.min(pe_eigenvalues)
615
mean_pe_eigenvalue = np.mean(pe_eigenvalues)
616
print(f"The persistent excitation analysis shows minimum eigenvalue $\\lambda_{{\\min}} = {min_pe_eigenvalue:.2f}$ " +
617
f"and mean $\\bar{{\\lambda}} = {mean_pe_eigenvalue:.2f}$, confirming adequate richness for parameter convergence.")
618
\end{pycode}
619
620
\section{Conclusions}
621
622
This analysis demonstrates fundamental adaptive control methodologies:
623
624
\begin{enumerate}
625
\item Lyapunov-based MRAC achieves RMS tracking error of \py{f"{tracking_error_lyap_rms:.4f}"}
626
with adaptation gain $\gamma = \py{gamma_lyap}$
627
628
\item Parameter convergence: $\hat{k}_x$ converges to within \py{f"{param_error_k_x_lyap:.4f}"}
629
of ideal value $k_x^* = \py{f"{k_x_star:.3f}"}$
630
631
\item RLS identification under persistent excitation yields parameter errors
632
$|\hat{a} - a^*| = \py{f"{param_error_a_rls:.4f}"}$ and
633
$|\hat{b} - b^*| = \py{f"{param_error_b_rls:.4f}"}$
634
635
\item The $\sigma$-modification trades parameter accuracy for robustness, with parameter
636
drift bounded by $\sigma = \py{sigma_param}$
637
638
\item Adaptation gain selection involves trade-off: higher $\gamma$ accelerates convergence
639
but increases sensitivity to noise and unmodeled dynamics
640
\end{enumerate}
641
642
\section{Engineering Implications}
643
644
\begin{remark}[Practical Considerations]
645
When implementing adaptive control in practice:
646
\begin{itemize}
647
\item Use $\sigma$-modification or projection for robustness to modeling errors
648
\item Ensure persistent excitation through reference signal design (multi-frequency content)
649
\item Monitor parameter drift as indication of plant changes or disturbances
650
\item Combine with robust control for guaranteed performance during adaptation transient
651
\end{itemize}
652
\end{remark}
653
654
\section*{Further Reading}
655
656
\begin{thebibliography}{99}
657
658
\bibitem{astrom1995}
659
{\AA}str{\"o}m, K. J., \& Wittenmark, B. (1995). \textit{Adaptive Control}, 2nd ed. Addison-Wesley.
660
661
\bibitem{ioannou2006}
662
Ioannou, P. A., \& Sun, J. (2006). \textit{Robust Adaptive Control}. Dover Publications.
663
664
\bibitem{narendra1989}
665
Narendra, K. S., \& Annaswamy, A. M. (1989). \textit{Stable Adaptive Systems}. Prentice Hall.
666
667
\bibitem{sastry1989}
668
Sastry, S., \& Bodson, M. (1989). \textit{Adaptive Control: Stability, Convergence, and Robustness}. Prentice Hall.
669
670
\bibitem{slotine1991}
671
Slotine, J.-J. E., \& Li, W. (1991). \textit{Applied Nonlinear Control}. Prentice Hall.
672
673
\bibitem{lavretsky2013}
674
Lavretsky, E., \& Wise, K. A. (2013). \textit{Robust and Adaptive Control with Aerospace Applications}. Springer.
675
676
\bibitem{goodwin1984}
677
Goodwin, G. C., \& Sin, K. S. (1984). \textit{Adaptive Filtering Prediction and Control}. Prentice Hall.
678
679
\bibitem{landau2011}
680
Landau, I. D., Lozano, R., M'Saad, M., \& Karimi, A. (2011). \textit{Adaptive Control: Algorithms, Analysis and Applications}, 2nd ed. Springer.
681
682
\bibitem{tao2003}
683
Tao, G. (2003). \textit{Adaptive Control Design and Analysis}. Wiley-IEEE Press.
684
685
\bibitem{krstic1995}
686
Krsti{\'c}, M., Kanellakopoulos, I., \& Kokotovi{\'c}, P. V. (1995). \textit{Nonlinear and Adaptive Control Design}. Wiley.
687
688
\bibitem{morgan1984}
689
Morgan, A. P., \& Narendra, K. S. (1977). On the stability of nonautonomous differential equations $\dot{x} = [A + B(t)]x$, with skew-symmetric matrix $B(t)$. \textit{SIAM J. Control Optim.}, 15(1), 163--176.
690
691
\bibitem{boyd1986}
692
Boyd, S., \& Sastry, S. S. (1986). Necessary and sufficient conditions for parameter convergence in adaptive control. \textit{Automatica}, 22(6), 629--639.
693
694
\bibitem{egardt1979}
695
Egardt, B. (1979). \textit{Stability of Adaptive Controllers}. Springer-Verlag.
696
697
\bibitem{rohrs1985}
698
Rohrs, C. E., Valavani, L., Athans, M., \& Stein, G. (1985). Robustness of continuous-time adaptive control algorithms in the presence of unmodeled dynamics. \textit{IEEE Trans. Autom. Control}, 30(9), 881--889.
699
700
\bibitem{ioannou1984}
701
Ioannou, P. A., \& Kokotovi{\'c}, P. V. (1984). Robust redesign of adaptive control. \textit{IEEE Trans. Autom. Control}, 29(3), 202--211.
702
703
\bibitem{narendra1987}
704
Narendra, K. S., \& Annaswamy, A. M. (1987). Persistent excitation in adaptive systems. \textit{Int. J. Control}, 45(1), 127--160.
705
706
\bibitem{anderson1977}
707
Anderson, B. D. O., \& Johnson, C. R. (1982). Exponential convergence of adaptive identification and control algorithms. \textit{Automatica}, 18(1), 1--13.
708
709
\bibitem{kreisselmeier1986}
710
Kreisselmeier, G., \& Anderson, B. D. O. (1986). Robust model reference adaptive control. \textit{IEEE Trans. Autom. Control}, 31(2), 127--133.
711
712
\bibitem{peterson1987}
713
Peterson, B. B., \& Narendra, K. S. (1982). Bounded error adaptive control. \textit{IEEE Trans. Autom. Control}, 27(6), 1161--1168.
714
715
\bibitem{kosut1987}
716
Kosut, R. L., Anderson, B. D. O., \& Mareels, I. M. Y. (1987). Stability theory for adaptive systems: Methods of averaging and persistency of excitation. \textit{IEEE Trans. Autom. Control}, 32(1), 26--34.
717
718
\end{thebibliography}
719
720
\end{document}
721
722