Path: blob/main/latex-templates/templates/control-theory/adaptive_control.tex
75 views
unlisted
% Adaptive Control Systems Template1% Topics: MRAC, self-tuning regulators, persistent excitation, robust adaptive control2% Style: Engineering report with simulation analysis34\documentclass[a4paper, 11pt]{article}5\usepackage[utf8]{inputenc}6\usepackage[T1]{fontenc}7\usepackage{amsmath, amssymb}8\usepackage{graphicx}9\usepackage{siunitx}10\usepackage{booktabs}11\usepackage{subcaption}12\usepackage[makestderr]{pythontex}1314% Theorem environments15\newtheorem{definition}{Definition}[section]16\newtheorem{theorem}{Theorem}[section]17\newtheorem{example}{Example}[section]18\newtheorem{remark}{Remark}[section]1920\title{Adaptive Control Systems: Model Reference and Self-Tuning Approaches}21\author{Control Systems Laboratory}22\date{\today}2324\begin{document}25\maketitle2627\begin{abstract}28This report presents a comprehensive analysis of adaptive control systems with emphasis on29Model Reference Adaptive Control (MRAC) and Self-Tuning Regulators (STR). We examine the30stability guarantees provided by Lyapunov-based adaptation laws, analyze the role of31persistent excitation in parameter convergence, and compare direct versus indirect adaptive32approaches. Computational simulations demonstrate parameter adaptation dynamics, tracking33performance, and robustness modifications including $\sigma$-modification and projection34methods for handling unmodeled dynamics and bounded disturbances.35\end{abstract}3637\section{Introduction}3839Adaptive control addresses the challenge of controlling systems with unknown or time-varying40parameters. Unlike robust control which handles uncertainty through conservative design,41adaptive control actively estimates and compensates for parameter variations in real-time.4243\begin{definition}[Adaptive Control Problem]44Given a plant with unknown parameters $\theta^*$ and a desired performance specification45(typically represented by a reference model), design a controller with adjustable parameters46$\hat{\theta}(t)$ and an adaptation mechanism such that:47\begin{equation}48\lim_{t \to \infty} [y(t) - y_m(t)] = 049\end{equation}50where $y(t)$ is the plant output and $y_m(t)$ is the reference model output.51\end{definition}5253\section{Theoretical Framework}5455\subsection{Model Reference Adaptive Control (MRAC)}5657Consider a first-order plant:58\begin{equation}59\dot{x} = ax + bu, \quad y = x60\end{equation}61where $a$ and $b$ are unknown parameters. The reference model is:62\begin{equation}63\dot{x}_m = a_m x_m + b_m r, \quad y_m = x_m64\end{equation}6566\begin{definition}[MRAC Control Law]67The control law has the form:68\begin{equation}69u = \theta_1^T \omega_1 + \theta_2^T \omega_2 = \hat{k}_x x + \hat{k}_r r70\end{equation}71where $\hat{k}_x$ and $\hat{k}_r$ are adaptive gains.72\end{definition}7374\begin{theorem}[MIT Rule]75The gradient-based adaptation law:76\begin{equation}77\dot{\hat{\theta}} = -\gamma e \frac{\partial e}{\partial \hat{\theta}}78\end{equation}79where $e = y - y_m$ and $\gamma > 0$ is the adaptation gain, adjusts parameters in the80direction that reduces the instantaneous error.81\end{theorem}8283\begin{theorem}[Lyapunov-Based Adaptation]84For the parameter error $\tilde{\theta} = \hat{\theta} - \theta^*$, consider the Lyapunov85function:86\begin{equation}87V = \frac{1}{2}e^2 + \frac{1}{2\gamma}\tilde{\theta}^T\tilde{\theta}88\end{equation}89The adaptation law:90\begin{equation}91\dot{\hat{\theta}} = -\gamma e \omega92\end{equation}93guarantees $\dot{V} \leq 0$, ensuring bounded tracking error and parameter estimates.94\end{theorem}9596\subsection{Self-Tuning Regulators}9798\begin{definition}[Certainty Equivalence Principle]99Self-tuning regulators separate the control problem into two stages:100\begin{enumerate}101\item \textbf{Parameter Estimation}: Estimate plant parameters $\hat{\theta}(t)$ using recursive identification102\item \textbf{Control Design}: Design controller using $\hat{\theta}(t)$ as if they were true values103\end{enumerate}104\end{definition}105106\begin{theorem}[Recursive Least Squares (RLS)]107For the regression model $y(t) = \phi^T(t)\theta + \epsilon(t)$, the RLS algorithm:108\begin{align}109\hat{\theta}(t) &= \hat{\theta}(t-1) + K(t)[y(t) - \phi^T(t)\hat{\theta}(t-1)] \\110K(t) &= \frac{P(t-1)\phi(t)}{1 + \phi^T(t)P(t-1)\phi(t)} \\111P(t) &= P(t-1) - K(t)\phi^T(t)P(t-1)112\end{align}113minimizes the weighted sum of squared prediction errors.114\end{theorem}115116\subsection{Persistent Excitation}117118\begin{definition}[Persistent Excitation]119A signal $\phi(t)$ is persistently exciting of order $n$ if there exist $\alpha, T_0 > 0$120such that for all $t \geq 0$:121\begin{equation}122\int_t^{t+T_0} \phi(\tau)\phi^T(\tau) d\tau \geq \alpha I_n123\end{equation}124\end{definition}125126\begin{theorem}[Parameter Convergence]127Under persistent excitation, the RLS algorithm guarantees exponential convergence:128\begin{equation}129\|\tilde{\theta}(t)\| \leq c e^{-\lambda t} \|\tilde{\theta}(0)\|130\end{equation}131for some constants $c, \lambda > 0$.132\end{theorem}133134\subsection{Robust Modifications}135136\begin{definition}[Sigma Modification]137To handle unmodeled dynamics and bounded disturbances, the $\sigma$-modification adds a138damping term:139\begin{equation}140\dot{\hat{\theta}} = -\gamma e \omega - \sigma \gamma \hat{\theta}141\end{equation}142where $\sigma > 0$ is a small constant that prevents parameter drift.143\end{definition}144145\begin{definition}[Projection Modification]146Constrain parameter estimates to a known convex set $\mathcal{S}$:147\begin{equation}148\dot{\hat{\theta}} = \text{Proj}(-\gamma e \omega, \hat{\theta})149\end{equation}150where the projection ensures $\hat{\theta}(t) \in \mathcal{S}$ for all $t$.151\end{definition}152153\section{Computational Analysis}154155\begin{pycode}156import numpy as np157import matplotlib.pyplot as plt158from scipy.integrate import odeint159from scipy.linalg import lstsq160161np.random.seed(42)162163# Plant parameters (unknown to controller)164a_true = -2.0165b_true = 3.0166167# Reference model parameters (desired closed-loop behavior)168a_m = -5.0169b_m = 5.0170171# Control design: for perfect matching, we need172# a + b*k_x = a_m => k_x = (a_m - a) / b173# b*k_r = b_m => k_r = b_m / b174k_x_star = (a_m - a_true) / b_true175k_r_star = b_m / b_true176177# Store values for later use (print statements cause LaTeX issues)178# print(f"Ideal controller parameters: k_x* = {k_x_star:.3f}, k_r* = {k_r_star:.3f}")179180# MRAC simulation with MIT rule181def mrac_mit_system(state, t, r_func, gamma_mit):182x, x_m, k_x, k_r = state183184r = r_func(t)185186# Control law187u = k_x * x + k_r * r188189# Plant dynamics190dx = a_true * x + b_true * u191192# Reference model193dx_m = a_m * x_m + b_m * r194195# Tracking error196e = x - x_m197198# MIT rule (gradient descent)199# Sensitivity: de/dk_x ≈ x, de/dk_r ≈ r200dk_x = -gamma_mit * e * x201dk_r = -gamma_mit * e * r202203return [dx, dx_m, dk_x, dk_r]204205# MRAC simulation with Lyapunov-based adaptation206def mrac_lyapunov_system(state, t, r_func, gamma_lyap):207x, x_m, k_x, k_r = state208209r = r_func(t)210211# Control law212u = k_x * x + k_r * r213214# Plant dynamics215dx = a_true * x + b_true * u216217# Reference model218dx_m = a_m * x_m + b_m * r219220# Tracking error221e = x - x_m222223# Lyapunov-based adaptation law224# omega = [x, r] for this simple case225dk_x = -gamma_lyap * e * x226dk_r = -gamma_lyap * e * r227228return [dx, dx_m, dk_x, dk_r]229230# MRAC with sigma-modification231def mrac_sigma_system(state, t, r_func, gamma_sig, sigma):232x, x_m, k_x, k_r = state233234r = r_func(t)235236u = k_x * x + k_r * r237dx = a_true * x + b_true * u238dx_m = a_m * x_m + b_m * r239e = x - x_m240241# Sigma-modification242dk_x = -gamma_sig * e * x - sigma * gamma_sig * k_x243dk_r = -gamma_sig * e * r - sigma * gamma_sig * k_r244245return [dx, dx_m, dk_x, dk_r]246247# Reference input: step + sinusoid (persistent excitation)248def reference_signal(t):249return 1.0 + 0.5 * np.sin(2.0 * t) + 0.3 * np.sin(5.0 * t)250251# Time vector252t_sim = np.linspace(0, 20, 2000)253254# Initial conditions: [x, x_m, k_x, k_r]255initial_state = [0.0, 0.0, 0.0, 0.0]256257# Adaptation gains258gamma_mit = 2.0259gamma_lyap = 5.0260gamma_sig = 5.0261sigma_param = 0.1262263# Simulate MIT rule264sol_mit = odeint(mrac_mit_system, initial_state, t_sim, args=(reference_signal, gamma_mit))265x_mit = sol_mit[:, 0]266x_m_mit = sol_mit[:, 1]267k_x_mit = sol_mit[:, 2]268k_r_mit = sol_mit[:, 3]269e_mit = x_mit - x_m_mit270271# Simulate Lyapunov adaptation272sol_lyap = odeint(mrac_lyapunov_system, initial_state, t_sim, args=(reference_signal, gamma_lyap))273x_lyap = sol_lyap[:, 0]274x_m_lyap = sol_lyap[:, 1]275k_x_lyap = sol_lyap[:, 2]276k_r_lyap = sol_lyap[:, 3]277e_lyap = x_lyap - x_m_lyap278279# Simulate sigma-modification280sol_sigma = odeint(mrac_sigma_system, initial_state, t_sim, args=(reference_signal, gamma_sig, sigma_param))281x_sigma = sol_sigma[:, 0]282x_m_sigma = sol_sigma[:, 1]283k_x_sigma = sol_sigma[:, 2]284k_r_sigma = sol_sigma[:, 3]285e_sigma = x_sigma - x_m_sigma286287# Recursive Least Squares simulation288class RLSEstimator:289def __init__(self, n_params, forgetting_factor=1.0):290self.theta_hat = np.zeros(n_params)291self.P = 100.0 * np.eye(n_params)292self.lambda_ff = forgetting_factor293294def update(self, phi, y):295# RLS update296K = self.P @ phi / (self.lambda_ff + phi.T @ self.P @ phi)297prediction_error = y - phi.T @ self.theta_hat298self.theta_hat = self.theta_hat + K * prediction_error299self.P = (self.P - np.outer(K, phi.T @ self.P)) / self.lambda_ff300return self.theta_hat, prediction_error301302# Generate plant data for RLS303def plant_with_noise(x, u, dt, noise_level=0.01):304dx = a_true * x + b_true * u + noise_level * np.random.randn()305x_next = x + dx * dt306return x_next307308# RLS simulation309dt_rls = 0.01310t_rls = np.arange(0, 20, dt_rls)311n_samples = len(t_rls)312313x_rls = np.zeros(n_samples)314u_rls = np.zeros(n_samples)315theta_rls = np.zeros((n_samples, 2))316prediction_errors = np.zeros(n_samples)317318rls = RLSEstimator(n_params=2, forgetting_factor=0.995)319x_rls[0] = 0.1320321for i in range(1, n_samples):322# Excitation signal (pseudo-random binary sequence approximation)323u_rls[i] = 2.0 * np.sign(np.sin(3.0 * t_rls[i]) + 0.5 * np.sin(7.0 * t_rls[i]))324325# Plant response326x_rls[i] = plant_with_noise(x_rls[i-1], u_rls[i-1], dt_rls)327328# Regression vector: y(t) ≈ a*x(t-1) + b*u(t-1)329phi = np.array([x_rls[i-1], u_rls[i-1]])330331# RLS update332theta_hat, pred_err = rls.update(phi, x_rls[i])333theta_rls[i] = theta_hat334prediction_errors[i] = pred_err335336a_hat_rls = theta_rls[:, 0]337b_hat_rls = theta_rls[:, 1]338339# Persistent excitation analysis340def check_persistent_excitation(phi_matrix, window_size):341n_samples = len(phi_matrix)342min_eigenvalues = []343times = []344345for i in range(window_size, n_samples):346window = phi_matrix[i-window_size:i]347Phi = window.T @ window348eigvals = np.linalg.eigvalsh(Phi)349min_eigenvalues.append(np.min(eigvals))350times.append(i * dt_rls)351352return np.array(times), np.array(min_eigenvalues)353354# Build regressor matrix for PE analysis355phi_matrix_rls = np.column_stack([x_rls[:-1], u_rls[:-1]])356pe_times, pe_eigenvalues = check_persistent_excitation(phi_matrix_rls, window_size=100)357358# Calculate performance metrics359tracking_error_mit_rms = np.sqrt(np.mean(e_mit**2))360tracking_error_lyap_rms = np.sqrt(np.mean(e_lyap**2))361tracking_error_sigma_rms = np.sqrt(np.mean(e_sigma**2))362363param_error_k_x_mit = np.abs(k_x_mit[-1] - k_x_star)364param_error_k_r_mit = np.abs(k_r_mit[-1] - k_r_star)365param_error_k_x_lyap = np.abs(k_x_lyap[-1] - k_x_star)366param_error_k_r_lyap = np.abs(k_r_lyap[-1] - k_r_star)367368param_error_a_rls = np.abs(a_hat_rls[-1] - a_true)369param_error_b_rls = np.abs(b_hat_rls[-1] - b_true)370371# Create comprehensive figure372fig = plt.figure(figsize=(16, 14))373374# Plot 1: MRAC tracking performance (Lyapunov)375ax1 = fig.add_subplot(3, 3, 1)376r_values = [reference_signal(t) for t in t_sim]377ax1.plot(t_sim, r_values, 'k--', linewidth=2, label='Reference $r(t)$', alpha=0.7)378ax1.plot(t_sim, x_m_lyap, 'b-', linewidth=2, label='Model $y_m(t)$')379ax1.plot(t_sim, x_lyap, 'r-', linewidth=1.5, label='Plant $y(t)$', alpha=0.8)380ax1.set_xlabel('Time (s)')381ax1.set_ylabel('Output')382ax1.set_title('MRAC Tracking Performance (Lyapunov)')383ax1.legend(fontsize=9)384ax1.grid(True, alpha=0.3)385ax1.set_xlim(0, 20)386387# Plot 2: Tracking error comparison388ax2 = fig.add_subplot(3, 3, 2)389ax2.plot(t_sim, e_mit, linewidth=1.5, label=f'MIT ($\gamma$={gamma_mit})', alpha=0.7)390ax2.plot(t_sim, e_lyap, linewidth=1.5, label=f'Lyapunov ($\gamma$={gamma_lyap})', alpha=0.7)391ax2.plot(t_sim, e_sigma, linewidth=1.5, label=f'$\sigma$-mod ($\sigma$={sigma_param})', alpha=0.7)392ax2.axhline(y=0, color='k', linestyle=':', linewidth=1)393ax2.set_xlabel('Time (s)')394ax2.set_ylabel('Tracking Error $e(t)$')395ax2.set_title('Adaptation Law Comparison')396ax2.legend(fontsize=8)397ax2.grid(True, alpha=0.3)398ax2.set_xlim(0, 20)399400# Plot 3: Parameter adaptation k_x401ax3 = fig.add_subplot(3, 3, 3)402ax3.plot(t_sim, k_x_mit, linewidth=1.5, label='MIT rule')403ax3.plot(t_sim, k_x_lyap, linewidth=1.5, label='Lyapunov')404ax3.plot(t_sim, k_x_sigma, linewidth=1.5, label='$\sigma$-modification')405ax3.axhline(y=k_x_star, color='k', linestyle='--', linewidth=2, label='$k_x^*$')406ax3.set_xlabel('Time (s)')407ax3.set_ylabel('$\hat{k}_x(t)$')408ax3.set_title('State Feedback Gain Adaptation')409ax3.legend(fontsize=8)410ax3.grid(True, alpha=0.3)411ax3.set_xlim(0, 20)412413# Plot 4: Parameter adaptation k_r414ax4 = fig.add_subplot(3, 3, 4)415ax4.plot(t_sim, k_r_mit, linewidth=1.5, label='MIT rule')416ax4.plot(t_sim, k_r_lyap, linewidth=1.5, label='Lyapunov')417ax4.plot(t_sim, k_r_sigma, linewidth=1.5, label='$\sigma$-modification')418ax4.axhline(y=k_r_star, color='k', linestyle='--', linewidth=2, label='$k_r^*$')419ax4.set_xlabel('Time (s)')420ax4.set_ylabel('$\hat{k}_r(t)$')421ax4.set_title('Feedforward Gain Adaptation')422ax4.legend(fontsize=8)423ax4.grid(True, alpha=0.3)424ax4.set_xlim(0, 20)425426# Plot 5: RLS parameter estimation427ax5 = fig.add_subplot(3, 3, 5)428ax5.plot(t_rls, a_hat_rls, 'b-', linewidth=1.5, label='$\hat{a}(t)$')429ax5.plot(t_rls, b_hat_rls, 'r-', linewidth=1.5, label='$\hat{b}(t)$')430ax5.axhline(y=a_true, color='b', linestyle='--', linewidth=2, alpha=0.5, label='$a^*$')431ax5.axhline(y=b_true, color='r', linestyle='--', linewidth=2, alpha=0.5, label='$b^*$')432ax5.set_xlabel('Time (s)')433ax5.set_ylabel('Parameter Estimate')434ax5.set_title('RLS Parameter Convergence')435ax5.legend(fontsize=8, loc='right')436ax5.grid(True, alpha=0.3)437ax5.set_xlim(0, 20)438ax5.set_ylim(-4, 4)439440# Plot 6: RLS prediction error441ax6 = fig.add_subplot(3, 3, 6)442ax6.plot(t_rls, prediction_errors, 'g-', linewidth=0.5, alpha=0.7)443ax6.set_xlabel('Time (s)')444ax6.set_ylabel('Prediction Error')445ax6.set_title('RLS Prediction Error')446ax6.grid(True, alpha=0.3)447ax6.set_xlim(0, 20)448449# Plot 7: Persistent excitation measure450ax7 = fig.add_subplot(3, 3, 7)451ax7.plot(pe_times, pe_eigenvalues, 'purple', linewidth=1.5)452ax7.set_xlabel('Time (s)')453ax7.set_ylabel('Min. Eigenvalue of $\Phi^T\Phi$')454ax7.set_title('Persistent Excitation Indicator')455ax7.grid(True, alpha=0.3)456ax7.set_xlim(0, 20)457458# Plot 8: Phase portrait (x vs dx)459ax8 = fig.add_subplot(3, 3, 8)460# Approximate derivative461dx_lyap = np.gradient(x_lyap, t_sim)462dx_m_lyap = np.gradient(x_m_lyap, t_sim)463ax8.plot(x_lyap, dx_lyap, 'r-', linewidth=1, alpha=0.7, label='Plant')464ax8.plot(x_m_lyap, dx_m_lyap, 'b-', linewidth=1, alpha=0.7, label='Model')465ax8.set_xlabel('$x$')466ax8.set_ylabel('$\dot{x}$')467ax8.set_title('Phase Portrait')468ax8.legend(fontsize=9)469ax8.grid(True, alpha=0.3)470471# Plot 9: Adaptation gain sensitivity472ax9 = fig.add_subplot(3, 3, 9)473gamma_values = [1.0, 2.0, 5.0, 10.0]474final_errors = []475convergence_times = []476477for gamma_test in gamma_values:478sol_test = odeint(mrac_lyapunov_system, initial_state, t_sim, args=(reference_signal, gamma_test))479e_test = sol_test[:, 0] - sol_test[:, 1]480final_errors.append(np.sqrt(np.mean(e_test[-200:]**2)))481482# Find convergence time (when |e| < 0.1)483conv_idx = np.where(np.abs(e_test) < 0.1)[0]484if len(conv_idx) > 0:485convergence_times.append(t_sim[conv_idx[0]])486else:487convergence_times.append(20.0)488489ax9_twin = ax9.twinx()490x_pos = np.arange(len(gamma_values))491width = 0.35492bars1 = ax9.bar(x_pos - width/2, final_errors, width, label='Final RMS Error',493color='steelblue', edgecolor='black')494bars2 = ax9_twin.bar(x_pos + width/2, convergence_times, width, label='Conv. Time',495color='coral', edgecolor='black')496ax9.set_xlabel('Adaptation Gain $\gamma$')497ax9.set_ylabel('RMS Error', color='steelblue')498ax9_twin.set_ylabel('Convergence Time (s)', color='coral')499ax9.set_xticks(x_pos)500ax9.set_xticklabels([str(g) for g in gamma_values])501ax9.set_title('Adaptation Gain Sensitivity')502ax9.tick_params(axis='y', labelcolor='steelblue')503ax9_twin.tick_params(axis='y', labelcolor='coral')504505plt.tight_layout()506plt.savefig('adaptive_control_analysis.pdf', dpi=150, bbox_inches='tight')507plt.close()508509# Create comparison table data510comparison_data = {511'MIT': {'RMS_error': tracking_error_mit_rms, 'k_x_error': param_error_k_x_mit, 'k_r_error': param_error_k_r_mit},512'Lyapunov': {'RMS_error': tracking_error_lyap_rms, 'k_x_error': param_error_k_x_lyap, 'k_r_error': param_error_k_r_lyap},513'Sigma-mod': {'RMS_error': tracking_error_sigma_rms, 'k_x_error': np.abs(k_x_sigma[-1] - k_x_star),514'k_r_error': np.abs(k_r_sigma[-1] - k_r_star)}515}516517# Store performance metrics for use in tables518# print(f"\nPerformance Summary:")519# print(f"RLS final estimates: a_hat = {a_hat_rls[-1]:.3f} (true: {a_true}), b_hat = {b_hat_rls[-1]:.3f} (true: {b_true})")520# print(f"RLS parameter errors: |a_hat - a*| = {param_error_a_rls:.4f}, |b_hat - b*| = {param_error_b_rls:.4f}")521522\end{pycode}523524\begin{figure}[htbp]525\centering526\includegraphics[width=\textwidth]{adaptive_control_analysis.pdf}527\caption{Comprehensive adaptive control analysis: (a) MRAC tracking performance showing528plant output following reference model; (b) Comparison of tracking errors for MIT rule,529Lyapunov adaptation, and $\sigma$-modification; (c-d) Parameter convergence for state530feedback gain $k_x$ and feedforward gain $k_r$; (e) RLS parameter estimation showing531convergence to true plant parameters $a$ and $b$ under persistent excitation; (f) RLS532prediction error decay; (g) Persistent excitation measure quantified by minimum eigenvalue533of regressor product matrix; (h) Phase portrait comparison between plant and reference model534trajectories; (i) Adaptation gain sensitivity analysis showing trade-off between convergence535speed and steady-state accuracy.}536\label{fig:adaptive_control}537\end{figure}538539\section{Results}540541\subsection{MRAC Performance Comparison}542543\begin{pycode}544print(r"\begin{table}[htbp]")545print(r"\centering")546print(r"\caption{MRAC Adaptation Law Performance Comparison}")547print(r"\begin{tabular}{lccc}")548print(r"\toprule")549print(r"Method & RMS Error & $|\hat{k}_x - k_x^*|$ & $|\hat{k}_r - k_r^*|$ \\")550print(r"\midrule")551552for method, data in comparison_data.items():553print(f"{method} & {data['RMS_error']:.4f} & {data['k_x_error']:.4f} & {data['k_r_error']:.4f} \\\\")554555print(r"\midrule")556print(f"True values & --- & $k_x^* = {k_x_star:.3f}$ & $k_r^* = {k_r_star:.3f}$ \\\\")557print(r"\bottomrule")558print(r"\end{tabular}")559print(r"\label{tab:mrac_comparison}")560print(r"\end{table}")561\end{pycode}562563\subsection{RLS Parameter Identification}564565\begin{pycode}566print(r"\begin{table}[htbp]")567print(r"\centering")568print(r"\caption{Recursive Least Squares Parameter Estimation Results}")569print(r"\begin{tabular}{lcccc}")570print(r"\toprule")571print(r"Parameter & True Value & Initial Estimate & Final Estimate & Absolute Error \\")572print(r"\midrule")573574print(f"$a$ & {a_true:.3f} & {a_hat_rls[100]:.3f} & {a_hat_rls[-1]:.3f} & {param_error_a_rls:.4f} \\\\")575print(f"$b$ & {b_true:.3f} & {b_hat_rls[100]:.3f} & {b_hat_rls[-1]:.3f} & {param_error_b_rls:.4f} \\\\")576577print(r"\bottomrule")578print(r"\end{tabular}")579print(r"\label{tab:rls_results}")580print(r"\end{table}")581\end{pycode}582583\section{Discussion}584585\begin{example}[Direct vs Indirect Adaptive Control]586This analysis demonstrates both approaches:587\begin{itemize}588\item \textbf{MRAC (Direct)}: Adjusts controller parameters $\hat{k}_x, \hat{k}_r$ directly589to minimize tracking error without explicitly identifying plant parameters $a, b$590\item \textbf{STR (Indirect)}: Uses RLS to estimate plant parameters $\hat{a}, \hat{b}$,591then computes controller gains via certainty equivalence: $k_x = (a_m - \hat{a})/\hat{b}$592\end{itemize}593\end{example}594595\begin{remark}[Lyapunov Stability Guarantee]596The Lyapunov-based adaptation law guarantees:597\begin{enumerate}598\item Boundedness of all signals (tracking error $e(t)$ and parameter estimates $\hat{\theta}(t)$)599\item Convergence: $\lim_{t \to \infty} e(t) = 0$600\item Parameter convergence requires persistent excitation; otherwise only $e(t) \to 0$ is guaranteed601\end{enumerate}602\end{remark}603604\begin{remark}[Sigma Modification Trade-off]605The $\sigma$-modification introduces a bias in parameter estimates (preventing exact convergence606to $\theta^*$ even under persistent excitation) but provides robustness to bounded disturbances607and unmodeled dynamics. The final parameter error is $O(\sigma)$.608\end{remark}609610\subsection{Persistent Excitation Requirements}611612\begin{pycode}613min_pe_eigenvalue = np.min(pe_eigenvalues)614mean_pe_eigenvalue = np.mean(pe_eigenvalues)615print(f"The persistent excitation analysis shows minimum eigenvalue $\\lambda_{{\\min}} = {min_pe_eigenvalue:.2f}$ " +616f"and mean $\\bar{{\\lambda}} = {mean_pe_eigenvalue:.2f}$, confirming adequate richness for parameter convergence.")617\end{pycode}618619\section{Conclusions}620621This analysis demonstrates fundamental adaptive control methodologies:622623\begin{enumerate}624\item Lyapunov-based MRAC achieves RMS tracking error of \py{f"{tracking_error_lyap_rms:.4f}"}625with adaptation gain $\gamma = \py{gamma_lyap}$626627\item Parameter convergence: $\hat{k}_x$ converges to within \py{f"{param_error_k_x_lyap:.4f}"}628of ideal value $k_x^* = \py{f"{k_x_star:.3f}"}$629630\item RLS identification under persistent excitation yields parameter errors631$|\hat{a} - a^*| = \py{f"{param_error_a_rls:.4f}"}$ and632$|\hat{b} - b^*| = \py{f"{param_error_b_rls:.4f}"}$633634\item The $\sigma$-modification trades parameter accuracy for robustness, with parameter635drift bounded by $\sigma = \py{sigma_param}$636637\item Adaptation gain selection involves trade-off: higher $\gamma$ accelerates convergence638but increases sensitivity to noise and unmodeled dynamics639\end{enumerate}640641\section{Engineering Implications}642643\begin{remark}[Practical Considerations]644When implementing adaptive control in practice:645\begin{itemize}646\item Use $\sigma$-modification or projection for robustness to modeling errors647\item Ensure persistent excitation through reference signal design (multi-frequency content)648\item Monitor parameter drift as indication of plant changes or disturbances649\item Combine with robust control for guaranteed performance during adaptation transient650\end{itemize}651\end{remark}652653\section*{Further Reading}654655\begin{thebibliography}{99}656657\bibitem{astrom1995}658{\AA}str{\"o}m, K. J., \& Wittenmark, B. (1995). \textit{Adaptive Control}, 2nd ed. Addison-Wesley.659660\bibitem{ioannou2006}661Ioannou, P. A., \& Sun, J. (2006). \textit{Robust Adaptive Control}. Dover Publications.662663\bibitem{narendra1989}664Narendra, K. S., \& Annaswamy, A. M. (1989). \textit{Stable Adaptive Systems}. Prentice Hall.665666\bibitem{sastry1989}667Sastry, S., \& Bodson, M. (1989). \textit{Adaptive Control: Stability, Convergence, and Robustness}. Prentice Hall.668669\bibitem{slotine1991}670Slotine, J.-J. E., \& Li, W. (1991). \textit{Applied Nonlinear Control}. Prentice Hall.671672\bibitem{lavretsky2013}673Lavretsky, E., \& Wise, K. A. (2013). \textit{Robust and Adaptive Control with Aerospace Applications}. Springer.674675\bibitem{goodwin1984}676Goodwin, G. C., \& Sin, K. S. (1984). \textit{Adaptive Filtering Prediction and Control}. Prentice Hall.677678\bibitem{landau2011}679Landau, I. D., Lozano, R., M'Saad, M., \& Karimi, A. (2011). \textit{Adaptive Control: Algorithms, Analysis and Applications}, 2nd ed. Springer.680681\bibitem{tao2003}682Tao, G. (2003). \textit{Adaptive Control Design and Analysis}. Wiley-IEEE Press.683684\bibitem{krstic1995}685Krsti{\'c}, M., Kanellakopoulos, I., \& Kokotovi{\'c}, P. V. (1995). \textit{Nonlinear and Adaptive Control Design}. Wiley.686687\bibitem{morgan1984}688Morgan, A. P., \& Narendra, K. S. (1977). On the stability of nonautonomous differential equations $\dot{x} = [A + B(t)]x$, with skew-symmetric matrix $B(t)$. \textit{SIAM J. Control Optim.}, 15(1), 163--176.689690\bibitem{boyd1986}691Boyd, S., \& Sastry, S. S. (1986). Necessary and sufficient conditions for parameter convergence in adaptive control. \textit{Automatica}, 22(6), 629--639.692693\bibitem{egardt1979}694Egardt, B. (1979). \textit{Stability of Adaptive Controllers}. Springer-Verlag.695696\bibitem{rohrs1985}697Rohrs, C. E., Valavani, L., Athans, M., \& Stein, G. (1985). Robustness of continuous-time adaptive control algorithms in the presence of unmodeled dynamics. \textit{IEEE Trans. Autom. Control}, 30(9), 881--889.698699\bibitem{ioannou1984}700Ioannou, P. A., \& Kokotovi{\'c}, P. V. (1984). Robust redesign of adaptive control. \textit{IEEE Trans. Autom. Control}, 29(3), 202--211.701702\bibitem{narendra1987}703Narendra, K. S., \& Annaswamy, A. M. (1987). Persistent excitation in adaptive systems. \textit{Int. J. Control}, 45(1), 127--160.704705\bibitem{anderson1977}706Anderson, B. D. O., \& Johnson, C. R. (1982). Exponential convergence of adaptive identification and control algorithms. \textit{Automatica}, 18(1), 1--13.707708\bibitem{kreisselmeier1986}709Kreisselmeier, G., \& Anderson, B. D. O. (1986). Robust model reference adaptive control. \textit{IEEE Trans. Autom. Control}, 31(2), 127--133.710711\bibitem{peterson1987}712Peterson, B. B., \& Narendra, K. S. (1982). Bounded error adaptive control. \textit{IEEE Trans. Autom. Control}, 27(6), 1161--1168.713714\bibitem{kosut1987}715Kosut, R. L., Anderson, B. D. O., \& Mareels, I. M. Y. (1987). Stability theory for adaptive systems: Methods of averaging and persistency of excitation. \textit{IEEE Trans. Autom. Control}, 32(1), 26--34.716717\end{thebibliography}718719\end{document}720721722