Path: blob/master/deprecated/scripts/activation_fun_plot.py
1192 views
# Plots various neural net activation functions.12import superimport34import numpy as np5import matplotlib.pyplot as plt6import os7import pyprobml_utils as pml8import sys910def sigmoid(z):11return 1 / (1 + np.exp(-z))1213def relu(z):14return np.maximum(0, z)1516def heaviside(z):17return (z > 0)181920def softplus(z):21return np.log(1+np.exp(z))2223def lrelu(z, lam=0.1):24return np.maximum(lam*z, z)2526def elu(z, alpha=1):27return np.where(z < 0, alpha * (np.exp(z) - 1), z)2829def elu2(z, lam=0.5):30return np.maximum(0, z) + np.minimum(0, lam*(np.exp(z) - 1))3132def swish(z):33return z * sigmoid(z)343536from scipy.special import erfc3738# alpha and scale to self normalize with mean 0 and standard deviation 139# (see equation 14 in the SELU paper):40alpha_0_1 = -np.sqrt(2 / np.pi) / (erfc(1/np.sqrt(2)) * np.exp(1/2) - 1)41scale_0_1 = (1 - erfc(1 / np.sqrt(2)) * np.sqrt(np.e)) * np.sqrt(2 * np.pi) * (2 * erfc(np.sqrt(2))*np.e**2 + np.pi*erfc(1/np.sqrt(2))**2*np.e - 2*(2+np.pi)*erfc(1/np.sqrt(2))*np.sqrt(np.e)+np.pi+2)**(-1/2)4243def selu(z, scale=scale_0_1, alpha=alpha_0_1):44return scale * elu(z, alpha)4546z = np.linspace(-5, 5, 200)47print(z)48# dummy test49#sys.exit()5051#plt.figure(figsize=(11,4))52plt.figure()53plt.plot(z, sigmoid(z), "b-", linewidth=2, label="Sigmoid")54plt.plot(z, np.tanh(z), "g--", linewidth=2, label="Tanh")55plt.plot(z, relu(z), "m-.", linewidth=2, label="ReLU")56plt.grid(True)57plt.legend(loc="lower right", fontsize=14)58plt.title("Activation functions", fontsize=14)59plt.axis([-5, 5, -1.2, 1.2])60pml.savefig('activationFuns.pdf')61plt.show()6263#plt.figure(figsize=(11,4))64plt.figure()65plt.plot(z, relu(z), "r-", linewidth=2, label="ReLU")66plt.plot(z, lrelu(z), "g--", linewidth=2, label="LReLU")67plt.plot(z, elu(z), "b-", linewidth=2, label="ELU")68plt.plot(z, selu(z), "k:", linewidth=2, label="SELU")69plt.plot(z, swish(z), "m-.", linewidth=2, label="swish")70plt.grid(True)71plt.legend(loc="upper left", fontsize=14)72plt.title("Activation functions", fontsize=14)73plt.axis([-2, 2, -1.2, 2])74pml.savefig('activationFuns2.pdf')75plt.show()7677# From https://github.com/ageron/handson-ml2/blob/master/11_training_deep_neural_networks.ipynb78plt.figure()79z = np.linspace(-5, 5, 200)80plt.plot([-5, 5], [0, 0], 'k-')81plt.plot([-5, 5], [1, 1], 'k--')82plt.plot([0, 0], [-0.2, 1.2], 'k-')83plt.plot([-5, 5], [-3/4, 7/4], 'g--')84plt.plot(z, sigmoid(z), "b-", linewidth=2)85props = dict(facecolor='black', shrink=0.1)86plt.annotate('Saturating', xytext=(3.5, 0.7), xy=(5, 1), arrowprops=props, fontsize=14, ha="center")87plt.annotate('Saturating', xytext=(-3.5, 0.3), xy=(-5, 0), arrowprops=props, fontsize=14, ha="center")88plt.annotate('Linear', xytext=(2, 0.2), xy=(0, 0.5), arrowprops=props, fontsize=14, ha="center")89plt.grid(True)90plt.title("Sigmoid activation function", fontsize=14)91plt.axis([-5, 5, -0.2, 1.2])92pml.savefig("sigmoid_saturation_plot.pdf")93plt.show()949596