Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
YStrano
GitHub Repository: YStrano/DataScience_GA
Path: blob/master/april_18/lessons/lesson-11-flex/code/Clustering with Scikit-Learn.ipynb
1904 views
Kernel: Python 2

Clustering with Sklearn

In this notebook we'll practice clustering algorithms with Scikit-Learn.

Data sets

We'll use the following datasets:

There are many clustering data sets you can use for practice!

%matplotlib inline from collections import Counter import random import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns from sklearn.cluster import KMeans, DBSCAN
## Create some synthetic data from scipy.stats import multivariate_normal data = [] dist = multivariate_normal(mean=[0,0], cov=[[1,0],[0,1]]) for i in range(150): data.append(dist.rvs()) dist = multivariate_normal(mean=[5,5], cov=[[1,0.5],[0.2,1]]) for i in range(150): data.append(dist.rvs()) dist = multivariate_normal(mean=[9,9], cov=[[1,0.5],[0.2,1]]) for i in range(150): data.append(dist.rvs()) dist = multivariate_normal(mean=[-10,5], cov=[[3,0.5],[0.2,2]]) for i in range(150): data.append(dist.rvs()) df = pd.DataFrame(data, columns=["x", "y"]) df.head() plt.scatter(df['x'], df['y']) plt.show()
Image in a Jupyter notebook
def annulus(inner_radius, outer_radius, n=30, color='b'): """Generate n points with class `color` between the inner radius and the outer radius.""" data = [] diff = outer_radius - inner_radius for _ in range(n): # Pick an angle and radius angle = 2 * np.pi * random.random() r = inner_radius + diff * random.random() x = r * np.cos(angle) y = r * np.sin(angle) data.append((x, y)) # Return a data frame for convenience xs, ys = zip(*data) df = pd.DataFrame() df["x"] = xs df["y"] = ys df["color"] = color return df df1 = annulus(2, 6, 200, color='r') df2 = annulus(8, 10, 300, color='b') df_circ = pd.concat([df1, df2])
plt.scatter(df_circ['x'], df_circ['y'], c=df_circ['color']) plt.show()
Image in a Jupyter notebook

K-Means with sklearn

# Fit a k-means estimator estimator = KMeans(n_clusters=2) X = df[["x", "y"]] estimator.fit(X) # Clusters are given in the labels_ attribute labels = estimator.labels_ print labels
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
# Plot the data def set_colors(labels, colors='rgbykcm'): colored_labels = [] for label in labels: colored_labels.append(colors[label]) return colored_labels colors = set_colors(labels) plt.scatter(df['x'], df['y'], c=colors) plt.xlabel("x") plt.ylabel("y") plt.show()
Image in a Jupyter notebook

Let's try it with k=4 this time.

estimator = KMeans(n_clusters=4) X = df[["x", "y"]] estimator.fit(X) # Clusters are given in the labels_ attribute labels = estimator.labels_ print Counter(labels) colors = set_colors(labels) plt.scatter(df['x'], df['y'], c=colors) plt.xlabel("x") plt.ylabel("y") plt.show()
Counter({2: 152, 1: 150, 3: 150, 0: 148})
Image in a Jupyter notebook

Let's try the circular data.

estimator = KMeans(n_clusters=2) X = df_circ[["x", "y"]] estimator.fit(X) # Clusters are given in the labels_ attribute labels = estimator.labels_ print Counter(labels) colors = set_colors(labels) plt.scatter(df_circ['x'], df_circ['y'], c=colors) plt.xlabel("x") plt.ylabel("y") plt.show()
Counter({0: 250, 1: 250})
Image in a Jupyter notebook

Ouch! No so great on this dataset. Now let's try some real data.

of_df = pd.read_csv("../assets/datasets/old-faithful.csv") of_df.head()
of_df.plot.scatter(x="eruption_time", y="wait_time") plt.show()
Image in a Jupyter notebook
# Fit a k-means estimator estimator = KMeans(n_clusters=2) X = of_df[["eruption_time", "wait_time"]] estimator.fit(X) # Clusters are given in the labels_ attribute labels = estimator.labels_ print Counter(labels)
Counter({0: 172, 1: 100})
# Plot the data colors = set_colors(labels) plt.scatter(of_df["eruption_time"], of_df["wait_time"], c=colors) plt.xlabel("eruption_time") plt.ylabel("wait_time") plt.show()
Image in a Jupyter notebook

Exercise: k-means

For the Iris dataset, fit and plot k-means models to:

  • sepal_length and petal_length, for k=2 and k=3

  • sepal_width and petal_width, for k=2 and k=3

Bonus: Compare your classifications to the known species. How well do the labels match up?

After: Check out the 3D-example here

iris = pd.read_csv("../assets/datasets/iris.data") sns.pairplot(iris, hue="species") plt.show()
Image in a Jupyter notebook
## Exercise Answers here

DBSCAN

# Fit a DBSCAN estimator estimator = DBSCAN(eps=1, min_samples=10) X = df[["x", "y"]] estimator.fit(X) # Clusters are given in the labels_ attribute labels = estimator.labels_ print Counter(labels) colors = set_colors(labels) plt.scatter(df['x'], df['y'], c=colors) plt.xlabel("x") plt.ylabel("y") plt.show()
Counter({1: 299, 0: 147, 2: 133, -1: 21})
Image in a Jupyter notebook
# Fit a DBSCAN estimator estimator = DBSCAN(eps=0.8, min_samples=10) X = df[["x", "y"]] estimator.fit(X) # Clusters are given in the labels_ attribute labels = estimator.labels_ print Counter(labels) colors = set_colors(labels) plt.scatter(df['x'], df['y'], c=colors) plt.xlabel("x") plt.ylabel("y") plt.show()
Counter({2: 145, 1: 143, 0: 142, 3: 109, -1: 61})
Image in a Jupyter notebook
# Fit a DBSCAN estimator estimator = DBSCAN(eps=2, min_samples=10) X = df_circ[["x", "y"]] estimator.fit(X) # Clusters are given in the labels_ attribute labels = estimator.labels_ print Counter(labels) colors = set_colors(labels) plt.scatter(df_circ['x'], df_circ['y'], c=colors) plt.xlabel("x") plt.ylabel("y") plt.show()
Counter({1: 300, 0: 200})
Image in a Jupyter notebook

Much better than k-means on this dataset! Let's try to cook up something that DBSCAN doesn't work as well on.

## Create some synthetic data data = [] dist = multivariate_normal(mean=[0,0], cov=[[6,12],[1,6]]) for i in range(50): data.append(dist.rvs()) dist = multivariate_normal(mean=[10,10], cov=[[1,1.1],[0.2,0.6]]) for i in range(400): data.append(dist.rvs()) df2 = pd.DataFrame(data, columns=["x", "y"]) df2.head() plt.scatter(df2['x'], df2['y']) plt.show()
Image in a Jupyter notebook
# Fit a DBSCAN estimator estimator = DBSCAN(eps=0.5, min_samples=10) X = df2[["x", "y"]] estimator.fit(X) # Clusters are given in the labels_ attribute labels = estimator.labels_ print Counter(labels) colors = set_colors(labels) plt.scatter(df2['x'], df2['y'], c=colors) plt.xlabel("x") plt.ylabel("y") plt.show()
Counter({0: 392, -1: 58})
Image in a Jupyter notebook

Exercise: DBSCAN

For the Iris dataset, fit and plot dbscan models to:

  • sepal_length and petal_length

  • sepal_width and petal_width

Bonus: Compare your classifications to the known species. How well do the labels match up?

Hierarchical Clustering

# Hierarchical: Agglomerative Clustering from sklearn.cluster import AgglomerativeClustering # Fit an estimator estimator = AgglomerativeClustering(n_clusters=4) X = df[["x", "y"]] estimator.fit(X) # Clusters are given in the labels_ attribute labels = estimator.labels_ print Counter(labels) colors = set_colors(labels) plt.scatter(df['x'], df['y'], c=colors) plt.xlabel("x") plt.ylabel("y") plt.show()
Counter({1: 153, 0: 150, 2: 150, 3: 147})
Image in a Jupyter notebook
# Hierarchical: Agglomerative Clustering from sklearn.cluster import AgglomerativeClustering # Fit an estimator estimator = AgglomerativeClustering(n_clusters=2) X = df_circ[["x", "y"]] estimator.fit(X) # Clusters are given in the labels_ attribute labels = estimator.labels_ print Counter(labels) colors = set_colors(labels) plt.scatter(df_circ['x'], df_circ['y'], c=colors) plt.xlabel("x") plt.ylabel("y") plt.show()
Counter({0: 297, 1: 203})
Image in a Jupyter notebook
## Silhouette Coefficient from sklearn import metrics estimator = KMeans(n_clusters=4) X = df[["x", "y"]] estimator.fit(X) # Clusters are given in the labels_ attribute labels = estimator.labels_ print Counter(labels) print metrics.silhouette_score(X, labels, metric='euclidean')
Counter({3: 152, 0: 150, 2: 150, 1: 148}) 0.707565789149
estimator = DBSCAN(eps=0.8, min_samples=10) X = df[["x", "y"]] estimator.fit(X) # Clusters are given in the labels_ attribute labels = estimator.labels_ print Counter(labels) print metrics.silhouette_score(X, labels, metric='euclidean')
Counter({2: 145, 1: 143, 0: 142, 3: 109, -1: 61}) 0.577093605577

Bigger is better, so k-means was a better clustering algorithm on this data set.