Contact Us!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
Avatar for stephanie's main branch.

Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.

| Download

"Guiding Future STEM Leaders through Innovative Research Training" ~ thinkingbeyond.education

Views: 1075
Image: ubuntu2204
Kernel: Python 3
""" First Code - No C Parameter, Only Gamma Parameter - Not Used for Poster Using Digits Dataset of Scikit, Not MNIST Dataset """
import matplotlib.pyplot as plt from sklearn import datasets, metrics, svm from sklearn.model_selection import train_test_split digits = datasets.load_digits() """ _, axes = plt.subplots(nrows = 1, ncols = 4, figsize = (10, 3)) for ax, image, label in zip(axes, digits.images, digits.target): ax.set_axis_off() ax.imshow(image, cmap = plt.cm.gray_r, interpolation = "nearest") ax.set_title("Training: %i" % label) """ n_samples = len(digits.images) data = digits.images.reshape((n_samples, -1)) clf = svm.SVC(gamma = 0.001) X_train, X_test, y_train, y_test = train_test_split( data, digits.target, test_size = 0.7, shuffle = False ) clf.fit(X_train, y_train) predicted = clf.predict(X_test) """ _, axes = plt.subplots(nrows=1, ncols=4, figsize=(10, 3)) for ax, image, prediction in zip(axes, X_test, predicted): ax.set_axis_off() image = image.reshape(8, 8) ax.imshow(image, cmap=plt.cm.gray_r, interpolation="nearest") ax.set_title(f"Prediction: {prediction}") """ print( f"Classification Report for Classifier {clf}:\n" f"{metrics.classification_report(y_test, predicted)}\n" )
Classification Report for Classifier SVC(gamma=0.001): precision recall f1-score support 0 0.99 0.99 0.99 123 1 0.95 0.97 0.96 127 2 0.98 0.97 0.98 122 3 0.98 0.91 0.94 128 4 0.98 0.96 0.97 128 5 0.94 0.96 0.95 128 6 0.99 0.98 0.98 128 7 0.96 1.00 0.98 126 8 0.93 0.93 0.93 121 9 0.90 0.94 0.92 127 accuracy 0.96 1258 macro avg 0.96 0.96 0.96 1258 weighted avg 0.96 0.96 0.96 1258
""" Code using MNIST Dataset - 10,000 Images Used Fixed Hyperparameters: Gamma = 0.05 C = 1 Generated Classification Report and Confusion Matrix """
import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import fetch_openml from sklearn.model_selection import train_test_split from sklearn import svm, metrics from matplotlib.colors import Normalize mnist = fetch_openml('mnist_784', version = 1, as_frame = False) X = mnist.data y = mnist.target.astype(int) random_indices = np.random.choice(len(X), 10000, replace = False) X_subset = X[random_indices] y_subset = y[random_indices] X_subset = X_subset / 255.0 X_train, X_test, y_train, y_test = train_test_split(X_subset, y_subset, test_size = 0.2, random_state = 42) param_C = 1.0 param_gamma = 0.05 classifier = svm.SVC(C = param_C, gamma = param_gamma) import datetime as dt start_time = dt.datetime.now() print(f"Start learning at {start_time}") classifier.fit(X_train, y_train) end_time = dt.datetime.now() print(f"Stop learning at {end_time}") elapsed_time = end_time - start_time print(f"Elapsed learning time: {elapsed_time}") predicted = classifier.predict(X_test) print("Classification Report:\n", metrics.classification_report(y_test, predicted)) print("Accuracy:", metrics.accuracy_score(y_test, predicted)) cm = metrics.confusion_matrix(y_test, predicted) class HighContrastNormalize(Normalize): def __init__(self, vmin = None, vmax = None, midpoint = None, clip = False): super().__init__(vmin, vmax, clip) self.midpoint = midpoint def __call__(self, value, clip = None): x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1] return np.ma.masked_array(np.interp(value, x, y)) plt.figure(figsize = (10, 8)) plt.imshow( cm, interpolation = 'nearest', cmap = plt.cm.coolwarm, norm = HighContrastNormalize(vmin = 0, vmax = cm.max(), midpoint = cm.max() / 2) ) plt.title("High-Contrast Confusion Matrix", fontsize = 16) plt.colorbar() classes = np.arange(10) plt.xticks(classes, classes, fontsize = 12) plt.yticks(classes, classes, fontsize = 12) plt.ylabel("True Label", fontsize = 14) plt.xlabel("Predicted Label", fontsize = 14) thresh = cm.max() / 2 for i in range(cm.shape[0]): for j in range(cm.shape[1]): plt.text( j, i, f"{cm[i, j]}", horizontalalignment = "center", color = "white" if cm[i, j] > thresh else "black" ) plt.tight_layout() plt.show() _, axes = plt.subplots(nrows = 2, ncols = 5, figsize = (10, 5)) for ax, image, true_label, prediction in zip(axes.flatten(), X_test[:10], y_test[:10], predicted[:10]): image = image.reshape(28, 28) ax.set_axis_off() ax.imshow(image, cmap = plt.cm.gray_r, interpolation = "nearest") ax.set_title(f"True: {true_label}\nPred: {prediction}") plt.show()
Start learning at 2024-12-03 12:03:13.908212 Stop learning at 2024-12-03 12:03:42.315214 Elapsed learning time: 0:00:28.407002 Classification Report: precision recall f1-score support 0 0.98 1.00 0.99 213 1 1.00 0.99 0.99 235 2 0.95 0.97 0.96 225 3 0.97 0.95 0.96 187 4 0.97 0.97 0.97 175 5 0.98 0.97 0.98 198 6 0.99 0.96 0.97 181 7 0.96 0.99 0.98 192 8 0.96 0.97 0.96 193 9 0.99 0.97 0.98 201 accuracy 0.98 2000 macro avg 0.98 0.97 0.98 2000 weighted avg 0.98 0.98 0.98 2000 Accuracy: 0.9755
Image in a Jupyter notebookImage in a Jupyter notebook
""" Looped the Gamma Hyperparameter with C = 1 """
import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import fetch_openml from sklearn.model_selection import train_test_split from sklearn import svm, metrics import datetime as dt mnist = fetch_openml('mnist_784', version=1, as_frame = False) X = mnist.data y = mnist.target.astype(int) random_indices = np.random.choice(len(X), 10000, replace = False) X_subset = X[random_indices] y_subset = y[random_indices] X_subset = X_subset / 255.0 X_train, X_test, y_train, y_test = train_test_split(X_subset, y_subset, test_size = 0.2, random_state = 42) param_C = 1.0 gamma_values = np.arange(0.001, 1, 0.005) accuracies = [] learning_times = [] for gamma in gamma_values: classifier = svm.SVC(C = param_C, gamma = gamma) start_time = dt.datetime.now() classifier.fit(X_train, y_train) end_time = dt.datetime.now() elapsed_time = (end_time - start_time).total_seconds() learning_times.append(elapsed_time) predicted = classifier.predict(X_test) accuracy = metrics.accuracy_score(y_test, predicted) accuracies.append(accuracy) print(f"Gamma: {gamma:.5f}, Accuracy: {accuracy:.4f}, Learning Time: {elapsed_time:.2f}s")
""" Values were taken out and a graph was made using matplotlib. """
""" Looping the C Hyperparameter with Gamma = 0.026, the value of Gamma where accuracy peaked. """
import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import fetch_openml from sklearn.model_selection import train_test_split from sklearn import svm, metrics import datetime as dt mnist = fetch_openml('mnist_784', version=1, as_frame = False) X = mnist.data y = mnist.target.astype(int) random_indices = np.random.choice(len(X), 10000, replace = False) X_subset = X[random_indices] y_subset = y[random_indices] X_subset = X_subset / 255.0 X_train, X_test, y_train, y_test = train_test_split(X_subset, y_subset, test_size = 0.2, random_state = 42) param_C_values = np.arange(0.01, 2, 0.01) gamma = 0.026 accuracies = [] learning_times = [] for param_C in param_C_values: classifier = svm.SVC(C = param_C, gamma = gamma) start_time = dt.datetime.now() classifier.fit(X_train, y_train) end_time = dt.datetime.now() elapsed_time = (end_time - start_time).total_seconds() learning_times.append(elapsed_time) predicted = classifier.predict(X_test) accuracy = metrics.accuracy_score(y_test, predicted) accuracies.append(accuracy) print(f"C Parameter: {param_C:.5f}, Accuracy: {accuracy:.4f}, Learning Time: {elapsed_time:.2f}s")
#Moving on to MLP with Quantum Approach
#Importing Necessary Libraries !pip install pennylane import pennylane as qml import numpy as np from sklearn.datasets import fetch_openml from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn.preprocessing import StandardScaler from sklearn.model_selection import cross_val_score from sklearn.neural_network import MLPClassifier
Requirement already satisfied: pennylane in /usr/local/lib/python3.10/dist-packages (0.39.0) Requirement already satisfied: numpy<2.1 in /usr/local/lib/python3.10/dist-packages (from pennylane) (1.26.4) Requirement already satisfied: scipy in /usr/local/lib/python3.10/dist-packages (from pennylane) (1.13.1) Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from pennylane) (3.4.2) Requirement already satisfied: rustworkx>=0.14.0 in /usr/local/lib/python3.10/dist-packages (from pennylane) (0.15.1) Requirement already satisfied: autograd in /usr/local/lib/python3.10/dist-packages (from pennylane) (1.7.0) Requirement already satisfied: toml in /usr/local/lib/python3.10/dist-packages (from pennylane) (0.10.2) Requirement already satisfied: appdirs in /usr/local/lib/python3.10/dist-packages (from pennylane) (1.4.4) Requirement already satisfied: autoray>=0.6.11 in /usr/local/lib/python3.10/dist-packages (from pennylane) (0.7.0) Requirement already satisfied: cachetools in /usr/local/lib/python3.10/dist-packages (from pennylane) (5.5.0) Requirement already satisfied: pennylane-lightning>=0.39 in /usr/local/lib/python3.10/dist-packages (from pennylane) (0.39.0) Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from pennylane) (2.32.3) Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from pennylane) (4.12.2) Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from pennylane) (24.2) Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->pennylane) (3.4.0) Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->pennylane) (3.10) Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->pennylane) (2.2.3) Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->pennylane) (2024.8.30)
#Loading MNIST Dataset mnist = fetch_openml('mnist_784',version=1) X,y = mnist.data / 255.0, mnist.target.astype(int) X_train , X_test ,y_train , y_test = train_test_split(X,y,test_size=0.2,random_state=42)
#feature scaling and data preprocessing scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) print(f"Training data dimension:{X_train.shape},Test data dimension:{X_test.shape}") print(f"Training labels dimension:{y_train.shape},Test labels dimension:{y_test.shape}")
Training data dimension:(8000, 784),Test data dimension:(2000, 784) Training labels dimension:(8000,),Test labels dimension:(2000,)
#Quantum Device setup n_qubits = 3 dev = qml.device('default.qubit',wires=(n_qubits)) #Defining Quantum Circuits @qml.qnode(dev) def Quantum_Circ(p): for i in range(n_qubits): qml.RX(p[i],wires=i) return[qml.expval(qml.PauliZ(i)) for i in range(n_qubits)] #Visualizing and Testing the Quantum Circuit test_params = np.random.uniform(-np.pi,np.pi,n_qubits) print('Quantum Circuit:') print(qml.draw(Quantum_Circ)(test_params)) print(f"Testing quantum circuit with params:{test_params}") qvalues = Quantum_Circ(test_params) print(f"Quantum circuit output:{qvalues}")
Quantum Circuit: 0: ──RX(3.00)──┤ <Z> 1: ──RX(-1.10)─┤ <Z> 2: ──RX(-0.18)─┤ <Z> Testing quantum circuit with params:[ 2.99742779 -1.09737333 -0.17626929] Quantum circuit output:[-0.9896262315339532, 0.45593546069885144, 0.9845047525219398]
#Translating Quantum Output to Hyperparameters def translate_quantum_to_classic(qvalues): #learning_rate- Maps qvalues between -1 and 1 to Appropriate range between 0.0001 and 0.1 learning_rate = 0.0001 + (0.1 - 0.0001)*((qvalues[0] + 1)/2) #num_neurons- Maps qvalues between -1 and 1 to Appropriate range between 16 and 256 num_neurons = int(16 + (256 - 16)*((qvalues[1] + 1 / 2))) #alpha- Maps qvalue between -1 and 1 to Appropriate range between 0.0001 and 0.1 alpha = 0.0001 + (0.1 - 0.0001)*((qvalues[2] + 1 / 2)) return learning_rate,num_neurons,alpha print(f"Translated hyperparameter:{translate_quantum_to_classic(qvalues)}")
Translated hyperparameter:(0.0006181697348790358, 245, 0.14840202477694178)
#Fitness Function for Mlp Evalution def fitness_function(qvalues): try: learning_rate , num_neurons , alpha = translate_quantum_to_classic(qvalues) model = MLPClassifier(hidden_layer_sizes=(num_neurons,),learning_rate_init=learning_rate,alpha = alpha, max_iter=20) cv_scores = cross_val_score(model,X_train,y_train,cv=3,scoring='accuracy') print(f"cross-validation scores:{cv_scores}") return np.mean(cv_scores) except Exception as e: print(f"Error during fitness evalution:{e}") return -np.pi #Running the Fitness Function test_qvalues = Quantum_Circ(test_params) print(f"Fitness for test quantum values{test_qvalues}:{fitness_function(test_qvalues)}")
/usr/local/lib/python3.10/dist-packages/sklearn/neural_network/_multilayer_perceptron.py:690: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (20) reached and the optimization hasn't converged yet. warnings.warn( /usr/local/lib/python3.10/dist-packages/sklearn/neural_network/_multilayer_perceptron.py:690: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (20) reached and the optimization hasn't converged yet. warnings.warn(
cross-validation scores:[0.94375703 0.9343832 0.94073518] Fitness for test quantum values[-0.9896262315339532, 0.45593546069885144, 0.9845047525219398]:0.9396251387556301
/usr/local/lib/python3.10/dist-packages/sklearn/neural_network/_multilayer_perceptron.py:690: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (20) reached and the optimization hasn't converged yet. warnings.warn(

Evaluation Results :

Cross-Validation scores: Fold1: 0.94375703 ,Fold2: 0.9343832 , Fold3: 0.94073518

Average Accuracy: 0.9396251387556301

Fitness for test quantum values: [-0.9896262315339532, 0.45593546069885144, 0.9845047525219398]

#Visualizing Cross-Validation fold scores and Fitness score For Model Performance Comparison using Bar Graph import matplotlib.pyplot as plt import numpy as np cv_scores = [0.94375703,0.9343832,0.94073518] fitness_score = 0.9396251387556301 labels = ['Fold 1', 'Fold 2', 'Fold 3'] plt.figure(figsize=(8,6)) bars = plt.bar(labels, cv_scores, color=['#4CAF50','#2196F3','#FFC107'],alpha=0.8) plt.axhline(y=fitness_score, color='red', linestyle='--',linewidth=1.5,label=f'Fitness score:{fitness_score:4f}') for bar,score in zip(bars, cv_scores): plt.text(bar.get_x() + bar.get_width() / 2, bar.get_height() + 0.003,f'{score:.4f}', ha='center',fontsize=10) plt.title('Cross-Validation scores and Fitness score', fontsize=14) plt.ylabel('Accuracy', fontsize=12) plt.xlabel('Cross-Validation Folds', fontsize=12) plt.legend() plt.tight_layout() plt.show()
Image in a Jupyter notebook