Path: blob/master/Part 4 - Clustering/Hierarchical Clustering/hc.py
1009 views
# Hierarchical Clustering12# Importing the libraries3import numpy as np4import matplotlib.pyplot as plt5import pandas as pd67# Importing the dataset8dataset = pd.read_csv('Mall_Customers.csv')9X = dataset.iloc[:, [3, 4]].values10# y = dataset.iloc[:, 3].values1112# Splitting the dataset into the Training set and Test set13"""from sklearn.cross_validation import train_test_split14X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)"""1516# Feature Scaling17"""from sklearn.preprocessing import StandardScaler18sc_X = StandardScaler()19X_train = sc_X.fit_transform(X_train)20X_test = sc_X.transform(X_test)21sc_y = StandardScaler()22y_train = sc_y.fit_transform(y_train)"""2324# Using the dendrogram to find the optimal number of clusters25import scipy.cluster.hierarchy as sch26dendrogram = sch.dendrogram(sch.linkage(X, method = 'ward'))27plt.title('Dendrogram')28plt.xlabel('Customers')29plt.ylabel('Euclidean distances')30plt.show()3132# Fitting Hierarchical Clustering to the dataset33from sklearn.cluster import AgglomerativeClustering34hc = AgglomerativeClustering(n_clusters = 5, affinity = 'euclidean', linkage = 'ward')35y_hc = hc.fit_predict(X)3637# Visualising the clusters38plt.scatter(X[y_hc == 0, 0], X[y_hc == 0, 1], s = 100, c = 'red', label = 'Cluster 1')39plt.scatter(X[y_hc == 1, 0], X[y_hc == 1, 1], s = 100, c = 'blue', label = 'Cluster 2')40plt.scatter(X[y_hc == 2, 0], X[y_hc == 2, 1], s = 100, c = 'green', label = 'Cluster 3')41plt.scatter(X[y_hc == 3, 0], X[y_hc == 3, 1], s = 100, c = 'cyan', label = 'Cluster 4')42plt.scatter(X[y_hc == 4, 0], X[y_hc == 4, 1], s = 100, c = 'magenta', label = 'Cluster 5')43plt.title('Clusters of customers')44plt.xlabel('Annual Income (k$)')45plt.ylabel('Spending Score (1-100)')46plt.legend()47plt.show()4849