Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
probml
GitHub Repository: probml/pyprobml
Path: blob/master/notebooks/book1/14/layer_norm_torch.ipynb
1192 views
Kernel: Python 3

Open In Colab

import numpy as np import matplotlib.pyplot as plt np.random.seed(seed=1) import math import collections try: import torch except ModuleNotFoundError: %pip install -qq torch import torch from torch import nn from torch.nn import functional as F
# batch size 3, feature size 2 X = np.array([[1, 2, 3], [4, 5, 6]]) # X = np.array([[1, 2], [2,3]], dtype=np.float32) print("batch norm") mu_batch = np.mean(X, axis=0) sigma_batch = np.std(X, axis=0) XBN = (X - mu_batch) / sigma_batch print(XBN) print("layer norm") mu_layer = np.expand_dims(np.mean(X, axis=1), axis=1) sigma_layer = np.expand_dims(np.std(X, axis=1), axis=1) XLN = (X - mu_layer) / sigma_layer print(XLN)
batch norm [[-1. -1. -1.] [ 1. 1. 1.]] layer norm [[-1.22474487 0. 1.22474487] [-1.22474487 0. 1.22474487]]
X = torch.tensor(X, dtype=torch.float32) N, D = X.shape ln = nn.LayerNorm(D) bn = nn.BatchNorm1d(D) print("batch norm") XBN_t = bn(X) print(XBN_t) assert np.allclose(XBN_t.detach().numpy(), XBN, atol=1e-3) print("layer norm") XLN_t = ln(X) print(XLN_t) assert np.allclose(XLN_t.detach().numpy(), XLN, atol=1e-3)
batch norm tensor([[-1.0000, -1.0000, -1.0000], [ 1.0000, 1.0000, 1.0000]], grad_fn=<NativeBatchNormBackward>) layer norm tensor([[-1.2247e+00, 0.0000e+00, 1.2247e+00], [-1.2247e+00, 1.1921e-07, 1.2247e+00]], grad_fn=<NativeLayerNormBackward>)
/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:2: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).