Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
POSTECH-CVLab
GitHub Repository: POSTECH-CVLab/PyTorch-StudioGAN
Path: blob/master/src/sync_batchnorm/batchnorm_reimpl.py
809 views
1
"""
2
-*- coding: utf-8 -*-
3
File : batchnorm_reimpl.py
4
Author : Jiayuan Mao
5
Email : [email protected]
6
Date : 27/01/2018
7
8
This file is part of Synchronized-BatchNorm-PyTorch.
9
https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
10
Distributed under MIT License.
11
12
MIT License
13
14
Copyright (c) 2018 Jiayuan MAO
15
16
Permission is hereby granted, free of charge, to any person obtaining a copy
17
of this software and associated documentation files (the "Software"), to deal
18
in the Software without restriction, including without limitation the rights
19
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
20
copies of the Software, and to permit persons to whom the Software is
21
furnished to do so, subject to the following conditions:
22
23
The above copyright notice and this permission notice shall be included in all
24
copies or substantial portions of the Software.
25
26
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
31
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32
SOFTWARE.
33
"""
34
35
import torch
36
import torch.nn as nn
37
import torch.nn.init as init
38
39
__all__ = ['BatchNorm2dReimpl']
40
41
42
class BatchNorm2dReimpl(nn.Module):
43
"""
44
A re-implementation of batch normalization, used for testing the numerical
45
stability.
46
47
Author: acgtyrant
48
See also:
49
https://github.com/vacancy/Synchronized-BatchNorm-PyTorch/issues/14
50
"""
51
def __init__(self, num_features, eps=1e-5, momentum=0.1):
52
super().__init__()
53
54
self.num_features = num_features
55
self.eps = eps
56
self.momentum = momentum
57
self.weight = nn.Parameter(torch.empty(num_features))
58
self.bias = nn.Parameter(torch.empty(num_features))
59
self.register_buffer('running_mean', torch.zeros(num_features))
60
self.register_buffer('running_var', torch.ones(num_features))
61
self.reset_parameters()
62
63
def reset_running_stats(self):
64
self.running_mean.zero_()
65
self.running_var.fill_(1)
66
67
def reset_parameters(self):
68
self.reset_running_stats()
69
init.uniform_(self.weight)
70
init.zeros_(self.bias)
71
72
def forward(self, input_):
73
batchsize, channels, height, width = input_.size()
74
numel = batchsize * height * width
75
input_ = input_.permute(1, 0, 2, 3).contiguous().view(channels, numel)
76
sum_ = input_.sum(1)
77
sum_of_square = input_.pow(2).sum(1)
78
mean = sum_ / numel
79
sumvar = sum_of_square - sum_ * mean
80
81
self.running_mean = ((1 - self.momentum) * self.running_mean + self.momentum * mean.detach())
82
unbias_var = sumvar / (numel - 1)
83
self.running_var = ((1 - self.momentum) * self.running_var + self.momentum * unbias_var.detach())
84
85
bias_var = sumvar / numel
86
inv_std = 1 / (bias_var + self.eps).pow(0.5)
87
output = ((input_ - mean.unsqueeze(1)) * inv_std.unsqueeze(1) * self.weight.unsqueeze(1) +
88
self.bias.unsqueeze(1))
89
90
return output.view(channels, batchsize, height, width).permute(1, 0, 2, 3).contiguous()
91
92