Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
probml
GitHub Repository: probml/pyprobml
Path: blob/master/deprecated/scripts/activation_fun_plot.py
1192 views
1
# Plots various neural net activation functions.
2
3
import superimport
4
5
import numpy as np
6
import matplotlib.pyplot as plt
7
import os
8
import pyprobml_utils as pml
9
import sys
10
11
def sigmoid(z):
12
return 1 / (1 + np.exp(-z))
13
14
def relu(z):
15
return np.maximum(0, z)
16
17
def heaviside(z):
18
return (z > 0)
19
20
21
def softplus(z):
22
return np.log(1+np.exp(z))
23
24
def lrelu(z, lam=0.1):
25
return np.maximum(lam*z, z)
26
27
def elu(z, alpha=1):
28
return np.where(z < 0, alpha * (np.exp(z) - 1), z)
29
30
def elu2(z, lam=0.5):
31
return np.maximum(0, z) + np.minimum(0, lam*(np.exp(z) - 1))
32
33
def swish(z):
34
return z * sigmoid(z)
35
36
37
from scipy.special import erfc
38
39
# alpha and scale to self normalize with mean 0 and standard deviation 1
40
# (see equation 14 in the SELU paper):
41
alpha_0_1 = -np.sqrt(2 / np.pi) / (erfc(1/np.sqrt(2)) * np.exp(1/2) - 1)
42
scale_0_1 = (1 - erfc(1 / np.sqrt(2)) * np.sqrt(np.e)) * np.sqrt(2 * np.pi) * (2 * erfc(np.sqrt(2))*np.e**2 + np.pi*erfc(1/np.sqrt(2))**2*np.e - 2*(2+np.pi)*erfc(1/np.sqrt(2))*np.sqrt(np.e)+np.pi+2)**(-1/2)
43
44
def selu(z, scale=scale_0_1, alpha=alpha_0_1):
45
return scale * elu(z, alpha)
46
47
z = np.linspace(-5, 5, 200)
48
print(z)
49
# dummy test
50
#sys.exit()
51
52
#plt.figure(figsize=(11,4))
53
plt.figure()
54
plt.plot(z, sigmoid(z), "b-", linewidth=2, label="Sigmoid")
55
plt.plot(z, np.tanh(z), "g--", linewidth=2, label="Tanh")
56
plt.plot(z, relu(z), "m-.", linewidth=2, label="ReLU")
57
plt.grid(True)
58
plt.legend(loc="lower right", fontsize=14)
59
plt.title("Activation functions", fontsize=14)
60
plt.axis([-5, 5, -1.2, 1.2])
61
pml.savefig('activationFuns.pdf')
62
plt.show()
63
64
#plt.figure(figsize=(11,4))
65
plt.figure()
66
plt.plot(z, relu(z), "r-", linewidth=2, label="ReLU")
67
plt.plot(z, lrelu(z), "g--", linewidth=2, label="LReLU")
68
plt.plot(z, elu(z), "b-", linewidth=2, label="ELU")
69
plt.plot(z, selu(z), "k:", linewidth=2, label="SELU")
70
plt.plot(z, swish(z), "m-.", linewidth=2, label="swish")
71
plt.grid(True)
72
plt.legend(loc="upper left", fontsize=14)
73
plt.title("Activation functions", fontsize=14)
74
plt.axis([-2, 2, -1.2, 2])
75
pml.savefig('activationFuns2.pdf')
76
plt.show()
77
78
# From https://github.com/ageron/handson-ml2/blob/master/11_training_deep_neural_networks.ipynb
79
plt.figure()
80
z = np.linspace(-5, 5, 200)
81
plt.plot([-5, 5], [0, 0], 'k-')
82
plt.plot([-5, 5], [1, 1], 'k--')
83
plt.plot([0, 0], [-0.2, 1.2], 'k-')
84
plt.plot([-5, 5], [-3/4, 7/4], 'g--')
85
plt.plot(z, sigmoid(z), "b-", linewidth=2)
86
props = dict(facecolor='black', shrink=0.1)
87
plt.annotate('Saturating', xytext=(3.5, 0.7), xy=(5, 1), arrowprops=props, fontsize=14, ha="center")
88
plt.annotate('Saturating', xytext=(-3.5, 0.3), xy=(-5, 0), arrowprops=props, fontsize=14, ha="center")
89
plt.annotate('Linear', xytext=(2, 0.2), xy=(0, 0.5), arrowprops=props, fontsize=14, ha="center")
90
plt.grid(True)
91
plt.title("Sigmoid activation function", fontsize=14)
92
plt.axis([-5, 5, -0.2, 1.2])
93
pml.savefig("sigmoid_saturation_plot.pdf")
94
plt.show()
95
96