Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
hackassin
GitHub Repository: hackassin/Coursera-Machine-Learning
Path: blob/master/Week 7/Programming Assignment - 6/ex6/svmTrain.m
863 views
1
function [model] = svmTrain(X, Y, C, kernelFunction, ...
2
tol, max_passes)
3
%SVMTRAIN Trains an SVM classifier using a simplified version of the SMO
4
%algorithm.
5
% [model] = SVMTRAIN(X, Y, C, kernelFunction, tol, max_passes) trains an
6
% SVM classifier and returns trained model. X is the matrix of training
7
% examples. Each row is a training example, and the jth column holds the
8
% jth feature. Y is a column matrix containing 1 for positive examples
9
% and 0 for negative examples. C is the standard SVM regularization
10
% parameter. tol is a tolerance value used for determining equality of
11
% floating point numbers. max_passes controls the number of iterations
12
% over the dataset (without changes to alpha) before the algorithm quits.
13
%
14
% Note: This is a simplified version of the SMO algorithm for training
15
% SVMs. In practice, if you want to train an SVM classifier, we
16
% recommend using an optimized package such as:
17
%
18
% LIBSVM (http://www.csie.ntu.edu.tw/~cjlin/libsvm/)
19
% SVMLight (http://svmlight.joachims.org/)
20
%
21
%
22
23
if ~exist('tol', 'var') || isempty(tol)
24
tol = 1e-3;
25
end
26
27
if ~exist('max_passes', 'var') || isempty(max_passes)
28
max_passes = 5;
29
end
30
31
% Data parameters
32
m = size(X, 1);
33
n = size(X, 2);
34
35
% Map 0 to -1
36
Y(Y==0) = -1;
37
38
% Variables
39
alphas = zeros(m, 1);
40
b = 0;
41
E = zeros(m, 1);
42
passes = 0;
43
eta = 0;
44
L = 0;
45
H = 0;
46
47
% Pre-compute the Kernel Matrix since our dataset is small
48
% (in practice, optimized SVM packages that handle large datasets
49
% gracefully will _not_ do this)
50
%
51
% We have implemented optimized vectorized version of the Kernels here so
52
% that the svm training will run faster.
53
if strcmp(func2str(kernelFunction), 'linearKernel')
54
% Vectorized computation for the Linear Kernel
55
% This is equivalent to computing the kernel on every pair of examples
56
K = X*X';
57
elseif strfind(func2str(kernelFunction), 'gaussianKernel')
58
% Vectorized RBF Kernel
59
% This is equivalent to computing the kernel on every pair of examples
60
X2 = sum(X.^2, 2);
61
K = bsxfun(@plus, X2, bsxfun(@plus, X2', - 2 * (X * X')));
62
K = kernelFunction(1, 0) .^ K;
63
else
64
% Pre-compute the Kernel Matrix
65
% The following can be slow due to the lack of vectorization
66
K = zeros(m);
67
for i = 1:m
68
for j = i:m
69
K(i,j) = kernelFunction(X(i,:)', X(j,:)');
70
K(j,i) = K(i,j); %the matrix is symmetric
71
end
72
end
73
end
74
75
% Train
76
fprintf('\nTraining ...');
77
dots = 12;
78
while passes < max_passes,
79
80
num_changed_alphas = 0;
81
for i = 1:m,
82
83
% Calculate Ei = f(x(i)) - y(i) using (2).
84
% E(i) = b + sum (X(i, :) * (repmat(alphas.*Y,1,n).*X)') - Y(i);
85
E(i) = b + sum (alphas.*Y.*K(:,i)) - Y(i);
86
87
if ((Y(i)*E(i) < -tol && alphas(i) < C) || (Y(i)*E(i) > tol && alphas(i) > 0)),
88
89
% In practice, there are many heuristics one can use to select
90
% the i and j. In this simplified code, we select them randomly.
91
j = ceil(m * rand());
92
while j == i, % Make sure i \neq j
93
j = ceil(m * rand());
94
end
95
96
% Calculate Ej = f(x(j)) - y(j) using (2).
97
E(j) = b + sum (alphas.*Y.*K(:,j)) - Y(j);
98
99
% Save old alphas
100
alpha_i_old = alphas(i);
101
alpha_j_old = alphas(j);
102
103
% Compute L and H by (10) or (11).
104
if (Y(i) == Y(j)),
105
L = max(0, alphas(j) + alphas(i) - C);
106
H = min(C, alphas(j) + alphas(i));
107
else
108
L = max(0, alphas(j) - alphas(i));
109
H = min(C, C + alphas(j) - alphas(i));
110
end
111
112
if (L == H),
113
% continue to next i.
114
continue;
115
end
116
117
% Compute eta by (14).
118
eta = 2 * K(i,j) - K(i,i) - K(j,j);
119
if (eta >= 0),
120
% continue to next i.
121
continue;
122
end
123
124
% Compute and clip new value for alpha j using (12) and (15).
125
alphas(j) = alphas(j) - (Y(j) * (E(i) - E(j))) / eta;
126
127
% Clip
128
alphas(j) = min (H, alphas(j));
129
alphas(j) = max (L, alphas(j));
130
131
% Check if change in alpha is significant
132
if (abs(alphas(j) - alpha_j_old) < tol),
133
% continue to next i.
134
% replace anyway
135
alphas(j) = alpha_j_old;
136
continue;
137
end
138
139
% Determine value for alpha i using (16).
140
alphas(i) = alphas(i) + Y(i)*Y(j)*(alpha_j_old - alphas(j));
141
142
% Compute b1 and b2 using (17) and (18) respectively.
143
b1 = b - E(i) ...
144
- Y(i) * (alphas(i) - alpha_i_old) * K(i,j)' ...
145
- Y(j) * (alphas(j) - alpha_j_old) * K(i,j)';
146
b2 = b - E(j) ...
147
- Y(i) * (alphas(i) - alpha_i_old) * K(i,j)' ...
148
- Y(j) * (alphas(j) - alpha_j_old) * K(j,j)';
149
150
% Compute b by (19).
151
if (0 < alphas(i) && alphas(i) < C),
152
b = b1;
153
elseif (0 < alphas(j) && alphas(j) < C),
154
b = b2;
155
else
156
b = (b1+b2)/2;
157
end
158
159
num_changed_alphas = num_changed_alphas + 1;
160
161
end
162
163
end
164
165
if (num_changed_alphas == 0),
166
passes = passes + 1;
167
else
168
passes = 0;
169
end
170
171
fprintf('.');
172
dots = dots + 1;
173
if dots > 78
174
dots = 0;
175
fprintf('\n');
176
end
177
if exist('OCTAVE_VERSION')
178
fflush(stdout);
179
end
180
end
181
fprintf(' Done! \n\n');
182
183
% Save the model
184
idx = alphas > 0;
185
model.X= X(idx,:);
186
model.y= Y(idx);
187
model.kernelFunction = kernelFunction;
188
model.b= b;
189
model.alphas= alphas(idx);
190
model.w = ((alphas.*Y)'*X)';
191
192
end
193
194