Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
rasbt
GitHub Repository: rasbt/machine-learning-book
Path: blob/main/ch03/ch03.py
1247 views
1
# coding: utf-8
2
3
4
import sys
5
from python_environment_check import check_packages
6
from sklearn import datasets
7
import numpy as np
8
from sklearn.model_selection import train_test_split
9
from sklearn.preprocessing import StandardScaler
10
from sklearn.linear_model import Perceptron
11
from sklearn.metrics import accuracy_score
12
from matplotlib.colors import ListedColormap
13
import matplotlib.pyplot as plt
14
import matplotlib
15
from distutils.version import LooseVersion
16
from sklearn.linear_model import LogisticRegression
17
from sklearn.svm import SVC
18
from sklearn.linear_model import SGDClassifier
19
from sklearn.tree import DecisionTreeClassifier
20
from sklearn import tree
21
from sklearn.ensemble import RandomForestClassifier
22
from sklearn.neighbors import KNeighborsClassifier
23
24
# # Machine Learning with PyTorch and Scikit-Learn
25
# # -- Code Examples
26
27
# ## Package version checks
28
29
# Add folder to path in order to load from the check_packages.py script:
30
31
32
33
sys.path.insert(0, '..')
34
35
36
# Check recommended package versions:
37
38
39
40
41
42
d = {
43
'numpy': '1.21.2',
44
'matplotlib': '3.4.3',
45
'sklearn': '1.0',
46
'pandas': '1.3.2'
47
}
48
check_packages(d)
49
50
51
# # Chapter 3 - A Tour of Machine Learning Classifiers Using Scikit-Learn
52
53
# ### Overview
54
55
# - [Choosing a classification algorithm](#Choosing-a-classification-algorithm)
56
# - [First steps with scikit-learn](#First-steps-with-scikit-learn)
57
# - [Training a perceptron via scikit-learn](#Training-a-perceptron-via-scikit-learn)
58
# - [Modeling class probabilities via logistic regression](#Modeling-class-probabilities-via-logistic-regression)
59
# - [Logistic regression intuition and conditional probabilities](#Logistic-regression-intuition-and-conditional-probabilities)
60
# - [Learning the weights of the logistic loss function](#Learning-the-weights-of-the-logistic-loss-function)
61
# - [Training a logistic regression model with scikit-learn](#Training-a-logistic-regression-model-with-scikit-learn)
62
# - [Tackling overfitting via regularization](#Tackling-overfitting-via-regularization)
63
# - [Maximum margin classification with support vector machines](#Maximum-margin-classification-with-support-vector-machines)
64
# - [Maximum margin intuition](#Maximum-margin-intuition)
65
# - [Dealing with the nonlinearly separable case using slack variables](#Dealing-with-the-nonlinearly-separable-case-using-slack-variables)
66
# - [Alternative implementations in scikit-learn](#Alternative-implementations-in-scikit-learn)
67
# - [Solving nonlinear problems using a kernel SVM](#Solving-nonlinear-problems-using-a-kernel-SVM)
68
# - [Using the kernel trick to find separating hyperplanes in higher dimensional space](#Using-the-kernel-trick-to-find-separating-hyperplanes-in-higher-dimensional-space)
69
# - [Decision tree learning](#Decision-tree-learning)
70
# - [Maximizing information gain – getting the most bang for the buck](#Maximizing-information-gain-–-getting-the-most-bang-for-the-buck)
71
# - [Building a decision tree](#Building-a-decision-tree)
72
# - [Combining weak to strong learners via random forests](#Combining-weak-to-strong-learners-via-random-forests)
73
# - [K-nearest neighbors – a lazy learning algorithm](#K-nearest-neighbors-–-a-lazy-learning-algorithm)
74
# - [Summary](#Summary)
75
76
77
78
79
80
81
82
# # Choosing a classification algorithm
83
84
# ...
85
86
# # First steps with scikit-learn
87
88
# Loading the Iris dataset from scikit-learn. Here, the third column represents the petal length, and the fourth column the petal width of the flower examples. The classes are already converted to integer labels where 0=Iris-Setosa, 1=Iris-Versicolor, 2=Iris-Virginica.
89
90
91
92
93
iris = datasets.load_iris()
94
X = iris.data[:, [2, 3]]
95
y = iris.target
96
97
print('Class labels:', np.unique(y))
98
99
100
# Splitting data into 70% training and 30% test data:
101
102
103
104
105
X_train, X_test, y_train, y_test = train_test_split(
106
X, y, test_size=0.3, random_state=1, stratify=y)
107
108
109
110
111
print('Labels counts in y:', np.bincount(y))
112
print('Labels counts in y_train:', np.bincount(y_train))
113
print('Labels counts in y_test:', np.bincount(y_test))
114
115
116
# Standardizing the features:
117
118
119
120
121
sc = StandardScaler()
122
sc.fit(X_train)
123
X_train_std = sc.transform(X_train)
124
X_test_std = sc.transform(X_test)
125
126
127
128
# ## Training a perceptron via scikit-learn
129
130
131
132
133
ppn = Perceptron(eta0=0.1, random_state=1)
134
ppn.fit(X_train_std, y_train)
135
136
137
138
139
y_pred = ppn.predict(X_test_std)
140
print('Misclassified examples: %d' % (y_test != y_pred).sum())
141
142
143
144
145
146
print('Accuracy: %.3f' % accuracy_score(y_test, y_pred))
147
148
149
150
151
print('Accuracy: %.3f' % ppn.score(X_test_std, y_test))
152
153
154
155
156
157
# To check recent matplotlib compatibility
158
159
160
def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):
161
162
# setup marker generator and color map
163
markers = ('o', 's', '^', 'v', '<')
164
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
165
cmap = ListedColormap(colors[:len(np.unique(y))])
166
167
# plot the decision surface
168
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
169
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
170
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
171
np.arange(x2_min, x2_max, resolution))
172
lab = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
173
lab = lab.reshape(xx1.shape)
174
plt.contourf(xx1, xx2, lab, alpha=0.3, cmap=cmap)
175
plt.xlim(xx1.min(), xx1.max())
176
plt.ylim(xx2.min(), xx2.max())
177
178
# plot class examples
179
for idx, cl in enumerate(np.unique(y)):
180
plt.scatter(x=X[y == cl, 0],
181
y=X[y == cl, 1],
182
alpha=0.8,
183
c=colors[idx],
184
marker=markers[idx],
185
label=f'Class {cl}',
186
edgecolor='black')
187
188
# highlight test examples
189
if test_idx:
190
# plot all examples
191
X_test, y_test = X[test_idx, :], y[test_idx]
192
193
plt.scatter(X_test[:, 0],
194
X_test[:, 1],
195
c='none',
196
edgecolor='black',
197
alpha=1.0,
198
linewidth=1,
199
marker='o',
200
s=100,
201
label='Test set')
202
203
204
# Training a perceptron model using the standardized training data:
205
206
207
208
X_combined_std = np.vstack((X_train_std, X_test_std))
209
y_combined = np.hstack((y_train, y_test))
210
211
plot_decision_regions(X=X_combined_std, y=y_combined,
212
classifier=ppn, test_idx=range(105, 150))
213
plt.xlabel('Petal length [standardized]')
214
plt.ylabel('Petal width [standardized]')
215
plt.legend(loc='upper left')
216
217
plt.tight_layout()
218
#plt.savefig('figures/03_01.png', dpi=300)
219
plt.show()
220
221
222
223
# # Modeling class probabilities via logistic regression
224
225
# ...
226
227
# ### Logistic regression intuition and conditional probabilities
228
229
230
231
232
233
def sigmoid(z):
234
return 1.0 / (1.0 + np.exp(-z))
235
236
z = np.arange(-7, 7, 0.1)
237
sigma_z = sigmoid(z)
238
239
plt.plot(z, sigma_z)
240
plt.axvline(0.0, color='k')
241
plt.ylim(-0.1, 1.1)
242
plt.xlabel('z')
243
plt.ylabel('$\sigma (z)$')
244
245
# y axis ticks and gridline
246
plt.yticks([0.0, 0.5, 1.0])
247
ax = plt.gca()
248
ax.yaxis.grid(True)
249
250
plt.tight_layout()
251
#plt.savefig('figures/03_02.png', dpi=300)
252
plt.show()
253
254
255
256
257
258
259
260
261
262
263
264
# ### Learning the weights of the logistic loss function
265
266
267
268
def loss_1(z):
269
return - np.log(sigmoid(z))
270
271
272
def loss_0(z):
273
return - np.log(1 - sigmoid(z))
274
275
z = np.arange(-10, 10, 0.1)
276
sigma_z = sigmoid(z)
277
278
c1 = [loss_1(x) for x in z]
279
plt.plot(sigma_z, c1, label='L(w, b) if y=1')
280
281
c0 = [loss_0(x) for x in z]
282
plt.plot(sigma_z, c0, linestyle='--', label='L(w, b) if y=0')
283
284
plt.ylim(0.0, 5.1)
285
plt.xlim([0, 1])
286
plt.xlabel('$\sigma(z)$')
287
plt.ylabel('L(w, b)')
288
plt.legend(loc='best')
289
plt.tight_layout()
290
#plt.savefig('figures/03_04.png', dpi=300)
291
plt.show()
292
293
294
295
296
class LogisticRegressionGD:
297
"""Gradient descent-based logistic regression classifier.
298
299
Parameters
300
------------
301
eta : float
302
Learning rate (between 0.0 and 1.0)
303
n_iter : int
304
Passes over the training dataset.
305
random_state : int
306
Random number generator seed for random weight
307
initialization.
308
309
310
Attributes
311
-----------
312
w_ : 1d-array
313
Weights after training.
314
b_ : Scalar
315
Bias unit after fitting.
316
losses_ : list
317
Log loss function values in each epoch.
318
319
"""
320
def __init__(self, eta=0.01, n_iter=50, random_state=1):
321
self.eta = eta
322
self.n_iter = n_iter
323
self.random_state = random_state
324
325
def fit(self, X, y):
326
""" Fit training data.
327
328
Parameters
329
----------
330
X : {array-like}, shape = [n_examples, n_features]
331
Training vectors, where n_examples is the number of examples and
332
n_features is the number of features.
333
y : array-like, shape = [n_examples]
334
Target values.
335
336
Returns
337
-------
338
self : Instance of LogisticRegressionGD
339
340
"""
341
rgen = np.random.RandomState(self.random_state)
342
self.w_ = rgen.normal(loc=0.0, scale=0.01, size=X.shape[1])
343
self.b_ = np.float_(0.)
344
self.losses_ = []
345
346
for i in range(self.n_iter):
347
net_input = self.net_input(X)
348
output = self.activation(net_input)
349
errors = (y - output)
350
self.w_ += self.eta * X.T.dot(errors) / X.shape[0]
351
self.b_ += self.eta * errors.mean()
352
loss = -y.dot(np.log(output)) - ((1 - y).dot(np.log(1 - output))) / X.shape[0]
353
self.losses_.append(loss)
354
return self
355
356
def net_input(self, X):
357
"""Calculate net input"""
358
return np.dot(X, self.w_) + self.b_
359
360
def activation(self, z):
361
"""Compute logistic sigmoid activation"""
362
return 1. / (1. + np.exp(-np.clip(z, -250, 250)))
363
364
def predict(self, X):
365
"""Return class label after unit step"""
366
return np.where(self.activation(self.net_input(X)) >= 0.5, 1, 0)
367
368
369
370
371
372
X_train_01_subset = X_train_std[(y_train == 0) | (y_train == 1)]
373
y_train_01_subset = y_train[(y_train == 0) | (y_train == 1)]
374
375
lrgd = LogisticRegressionGD(eta=0.3, n_iter=1000, random_state=1)
376
lrgd.fit(X_train_01_subset,
377
y_train_01_subset)
378
379
plot_decision_regions(X=X_train_01_subset,
380
y=y_train_01_subset,
381
classifier=lrgd)
382
383
plt.xlabel('Petal length [standardized]')
384
plt.ylabel('Petal width [standardized]')
385
plt.legend(loc='upper left')
386
387
plt.tight_layout()
388
#plt.savefig('figures/03_05.png', dpi=300)
389
plt.show()
390
391
392
# ### Training a logistic regression model with scikit-learn
393
394
395
396
397
lr = LogisticRegression(C=100.0, solver='lbfgs', multi_class='ovr')
398
lr.fit(X_train_std, y_train)
399
400
plot_decision_regions(X_combined_std, y_combined,
401
classifier=lr, test_idx=range(105, 150))
402
plt.xlabel('Petal length [standardized]')
403
plt.ylabel('Petal width [standardized]')
404
plt.legend(loc='upper left')
405
plt.tight_layout()
406
#plt.savefig('figures/03_06.png', dpi=300)
407
plt.show()
408
409
410
411
412
lr.predict_proba(X_test_std[:3, :])
413
414
415
416
417
lr.predict_proba(X_test_std[:3, :]).sum(axis=1)
418
419
420
421
422
lr.predict_proba(X_test_std[:3, :]).argmax(axis=1)
423
424
425
426
427
lr.predict(X_test_std[:3, :])
428
429
430
431
432
lr.predict(X_test_std[0, :].reshape(1, -1))
433
434
435
436
# ### Tackling overfitting via regularization
437
438
439
440
441
442
443
444
weights, params = [], []
445
for c in np.arange(-5, 5):
446
lr = LogisticRegression(C=10.**c,
447
multi_class='ovr')
448
lr.fit(X_train_std, y_train)
449
weights.append(lr.coef_[1])
450
params.append(10.**c)
451
452
weights = np.array(weights)
453
plt.plot(params, weights[:, 0],
454
label='Petal length')
455
plt.plot(params, weights[:, 1], linestyle='--',
456
label='Petal width')
457
plt.ylabel('Weight coefficient')
458
plt.xlabel('C')
459
plt.legend(loc='upper left')
460
plt.xscale('log')
461
#plt.savefig('figures/03_08.png', dpi=300)
462
plt.show()
463
464
465
466
# # Maximum margin classification with support vector machines
467
468
469
470
471
472
# ## Maximum margin intuition
473
474
# ...
475
476
# ## Dealing with the nonlinearly separable case using slack variables
477
478
479
480
481
482
483
484
485
svm = SVC(kernel='linear', C=1.0, random_state=1)
486
svm.fit(X_train_std, y_train)
487
488
plot_decision_regions(X_combined_std,
489
y_combined,
490
classifier=svm,
491
test_idx=range(105, 150))
492
plt.xlabel('Petal length [standardized]')
493
plt.ylabel('Petal width [standardized]')
494
plt.legend(loc='upper left')
495
plt.tight_layout()
496
#plt.savefig('figures/03_11.png', dpi=300)
497
plt.show()
498
499
500
# ## Alternative implementations in scikit-learn
501
502
503
504
505
ppn = SGDClassifier(loss='perceptron')
506
lr = SGDClassifier(loss='log')
507
svm = SGDClassifier(loss='hinge')
508
509
510
511
# # Solving non-linear problems using a kernel SVM
512
513
514
515
516
np.random.seed(1)
517
X_xor = np.random.randn(200, 2)
518
y_xor = np.logical_xor(X_xor[:, 0] > 0,
519
X_xor[:, 1] > 0)
520
y_xor = np.where(y_xor, 1, 0)
521
522
plt.scatter(X_xor[y_xor == 1, 0],
523
X_xor[y_xor == 1, 1],
524
c='royalblue',
525
marker='s',
526
label='Class 1')
527
plt.scatter(X_xor[y_xor == 0, 0],
528
X_xor[y_xor == 0, 1],
529
c='tomato',
530
marker='o',
531
label='Class 0')
532
533
plt.xlim([-3, 3])
534
plt.ylim([-3, 3])
535
plt.xlabel('Feature 1')
536
plt.ylabel('Feature 2')
537
538
plt.legend(loc='best')
539
plt.tight_layout()
540
#plt.savefig('figures/03_12.png', dpi=300)
541
plt.show()
542
543
544
545
546
547
548
549
# ## Using the kernel trick to find separating hyperplanes in higher dimensional space
550
551
552
553
svm = SVC(kernel='rbf', random_state=1, gamma=0.10, C=10.0)
554
svm.fit(X_xor, y_xor)
555
plot_decision_regions(X_xor, y_xor,
556
classifier=svm)
557
558
plt.legend(loc='upper left')
559
plt.tight_layout()
560
#plt.savefig('figures/03_14.png', dpi=300)
561
plt.show()
562
563
564
565
566
567
svm = SVC(kernel='rbf', random_state=1, gamma=0.2, C=1.0)
568
svm.fit(X_train_std, y_train)
569
570
plot_decision_regions(X_combined_std, y_combined,
571
classifier=svm, test_idx=range(105, 150))
572
plt.xlabel('Petal length [standardized]')
573
plt.ylabel('Petal width [standardized]')
574
plt.legend(loc='upper left')
575
plt.tight_layout()
576
#plt.savefig('figures/03_15.png', dpi=300)
577
plt.show()
578
579
580
581
582
svm = SVC(kernel='rbf', random_state=1, gamma=100.0, C=1.0)
583
svm.fit(X_train_std, y_train)
584
585
plot_decision_regions(X_combined_std, y_combined,
586
classifier=svm, test_idx=range(105, 150))
587
plt.xlabel('Petal length [standardized]')
588
plt.ylabel('Petal width [standardized]')
589
plt.legend(loc='upper left')
590
plt.tight_layout()
591
#plt.savefig('figures/03_16.png', dpi=300)
592
plt.show()
593
594
595
596
# # Decision tree learning
597
598
599
600
601
602
603
604
def entropy(p):
605
return - p * np.log2(p) - (1 - p) * np.log2((1 - p))
606
607
x = np.arange(0.0, 1.0, 0.01)
608
ent = [entropy(p) if p != 0 else None
609
for p in x]
610
611
plt.ylabel('Entropy')
612
plt.xlabel('Class-membership probability p(i=1)')
613
plt.plot(x, ent)
614
#plt.savefig('figures/03_26.png', dpi=300)
615
plt.show()
616
617
618
619
620
621
622
623
# ## Maximizing information gain - getting the most bang for the buck
624
625
626
627
628
629
def gini(p):
630
return p * (1 - p) + (1 - p) * (1 - (1 - p))
631
632
633
def entropy(p):
634
return - p * np.log2(p) - (1 - p) * np.log2((1 - p))
635
636
637
def error(p):
638
return 1 - np.max([p, 1 - p])
639
640
x = np.arange(0.0, 1.0, 0.01)
641
642
ent = [entropy(p) if p != 0 else None for p in x]
643
sc_ent = [e * 0.5 if e else None for e in ent]
644
err = [error(i) for i in x]
645
646
fig = plt.figure()
647
ax = plt.subplot(111)
648
for i, lab, ls, c, in zip([ent, sc_ent, gini(x), err],
649
['Entropy', 'Entropy (scaled)',
650
'Gini impurity', 'Misclassification error'],
651
['-', '-', '--', '-.'],
652
['black', 'lightgray', 'red', 'green', 'cyan']):
653
line = ax.plot(x, i, label=lab, linestyle=ls, lw=2, color=c)
654
655
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15),
656
ncol=5, fancybox=True, shadow=False)
657
658
ax.axhline(y=0.5, linewidth=1, color='k', linestyle='--')
659
ax.axhline(y=1.0, linewidth=1, color='k', linestyle='--')
660
plt.ylim([0, 1.1])
661
plt.xlabel('p(i=1)')
662
plt.ylabel('Impurity index')
663
#plt.savefig('figures/03_19.png', dpi=300, bbox_inches='tight')
664
plt.show()
665
666
667
668
# ## Building a decision tree
669
670
671
672
673
tree_model = DecisionTreeClassifier(criterion='gini',
674
max_depth=4,
675
random_state=1)
676
tree_model.fit(X_train, y_train)
677
678
X_combined = np.vstack((X_train, X_test))
679
y_combined = np.hstack((y_train, y_test))
680
plot_decision_regions(X_combined, y_combined,
681
classifier=tree_model,
682
test_idx=range(105, 150))
683
684
plt.xlabel('Petal length [cm]')
685
plt.ylabel('Petal width [cm]')
686
plt.legend(loc='upper left')
687
plt.tight_layout()
688
#plt.savefig('figures/03_20.png', dpi=300)
689
plt.show()
690
691
692
693
694
695
feature_names = ['Sepal length', 'Sepal width',
696
'Petal length', 'Petal width']
697
tree.plot_tree(tree_model,
698
feature_names=feature_names,
699
filled=True)
700
701
#plt.savefig('figures/03_21_1.pdf')
702
plt.show()
703
704
705
706
707
# ## Combining weak to strong learners via random forests
708
709
710
711
712
forest = RandomForestClassifier(n_estimators=25,
713
random_state=1,
714
n_jobs=2)
715
forest.fit(X_train, y_train)
716
717
plot_decision_regions(X_combined, y_combined,
718
classifier=forest, test_idx=range(105, 150))
719
720
plt.xlabel('Petal length [cm]')
721
plt.ylabel('Petal width [cm]')
722
plt.legend(loc='upper left')
723
plt.tight_layout()
724
#plt.savefig('figures/03_2.png', dpi=300)
725
plt.show()
726
727
728
729
# # K-nearest neighbors - a lazy learning algorithm
730
731
732
733
734
735
736
737
738
knn = KNeighborsClassifier(n_neighbors=5,
739
p=2,
740
metric='minkowski')
741
knn.fit(X_train_std, y_train)
742
743
plot_decision_regions(X_combined_std, y_combined,
744
classifier=knn, test_idx=range(105, 150))
745
746
plt.xlabel('Petal length [standardized]')
747
plt.ylabel('Petal width [standardized]')
748
plt.legend(loc='upper left')
749
plt.tight_layout()
750
#plt.savefig('figures/03_24_figures.png', dpi=300)
751
plt.show()
752
753
754
755
# # Summary
756
757
# ...
758
759
# ---
760
#
761
# Readers may ignore the next cell.
762
763
764
765
766
767
768
769
770
771
772