Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
POSTECH-CVLab
GitHub Repository: POSTECH-CVLab/PyTorch-StudioGAN
Path: blob/master/src/utils/style_ops/bias_act.cu
809 views
1
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
//
3
// NVIDIA CORPORATION and its licensors retain all intellectual property
4
// and proprietary rights in and to this software, related documentation
5
// and any modifications thereto. Any use, reproduction, disclosure or
6
// distribution of this software and related documentation without an express
7
// license agreement from NVIDIA CORPORATION is strictly prohibited.
8
9
#include <c10/util/Half.h>
10
#include "bias_act.h"
11
12
//------------------------------------------------------------------------
13
// Helpers.
14
15
template <class T> struct InternalType;
16
template <> struct InternalType<double> { typedef double scalar_t; };
17
template <> struct InternalType<float> { typedef float scalar_t; };
18
template <> struct InternalType<c10::Half> { typedef float scalar_t; };
19
20
//------------------------------------------------------------------------
21
// CUDA kernel.
22
23
template <class T, int A>
24
__global__ void bias_act_kernel(bias_act_kernel_params p)
25
{
26
typedef typename InternalType<T>::scalar_t scalar_t;
27
int G = p.grad;
28
scalar_t alpha = (scalar_t)p.alpha;
29
scalar_t gain = (scalar_t)p.gain;
30
scalar_t clamp = (scalar_t)p.clamp;
31
scalar_t one = (scalar_t)1;
32
scalar_t two = (scalar_t)2;
33
scalar_t expRange = (scalar_t)80;
34
scalar_t halfExpRange = (scalar_t)40;
35
scalar_t seluScale = (scalar_t)1.0507009873554804934193349852946;
36
scalar_t seluAlpha = (scalar_t)1.6732632423543772848170429916717;
37
38
// Loop over elements.
39
int xi = blockIdx.x * p.loopX * blockDim.x + threadIdx.x;
40
for (int loopIdx = 0; loopIdx < p.loopX && xi < p.sizeX; loopIdx++, xi += blockDim.x)
41
{
42
// Load.
43
scalar_t x = (scalar_t)((const T*)p.x)[xi];
44
scalar_t b = (p.b) ? (scalar_t)((const T*)p.b)[(xi / p.stepB) % p.sizeB] : 0;
45
scalar_t xref = (p.xref) ? (scalar_t)((const T*)p.xref)[xi] : 0;
46
scalar_t yref = (p.yref) ? (scalar_t)((const T*)p.yref)[xi] : 0;
47
scalar_t dy = (p.dy) ? (scalar_t)((const T*)p.dy)[xi] : one;
48
scalar_t yy = (gain != 0) ? yref / gain : 0;
49
scalar_t y = 0;
50
51
// Apply bias.
52
((G == 0) ? x : xref) += b;
53
54
// linear
55
if (A == 1)
56
{
57
if (G == 0) y = x;
58
if (G == 1) y = x;
59
}
60
61
// relu
62
if (A == 2)
63
{
64
if (G == 0) y = (x > 0) ? x : 0;
65
if (G == 1) y = (yy > 0) ? x : 0;
66
}
67
68
// lrelu
69
if (A == 3)
70
{
71
if (G == 0) y = (x > 0) ? x : x * alpha;
72
if (G == 1) y = (yy > 0) ? x : x * alpha;
73
}
74
75
// tanh
76
if (A == 4)
77
{
78
if (G == 0) { scalar_t c = exp(x); scalar_t d = one / c; y = (x < -expRange) ? -one : (x > expRange) ? one : (c - d) / (c + d); }
79
if (G == 1) y = x * (one - yy * yy);
80
if (G == 2) y = x * (one - yy * yy) * (-two * yy);
81
}
82
83
// sigmoid
84
if (A == 5)
85
{
86
if (G == 0) y = (x < -expRange) ? 0 : one / (exp(-x) + one);
87
if (G == 1) y = x * yy * (one - yy);
88
if (G == 2) y = x * yy * (one - yy) * (one - two * yy);
89
}
90
91
// elu
92
if (A == 6)
93
{
94
if (G == 0) y = (x >= 0) ? x : exp(x) - one;
95
if (G == 1) y = (yy >= 0) ? x : x * (yy + one);
96
if (G == 2) y = (yy >= 0) ? 0 : x * (yy + one);
97
}
98
99
// selu
100
if (A == 7)
101
{
102
if (G == 0) y = (x >= 0) ? seluScale * x : (seluScale * seluAlpha) * (exp(x) - one);
103
if (G == 1) y = (yy >= 0) ? x * seluScale : x * (yy + seluScale * seluAlpha);
104
if (G == 2) y = (yy >= 0) ? 0 : x * (yy + seluScale * seluAlpha);
105
}
106
107
// softplus
108
if (A == 8)
109
{
110
if (G == 0) y = (x > expRange) ? x : log(exp(x) + one);
111
if (G == 1) y = x * (one - exp(-yy));
112
if (G == 2) { scalar_t c = exp(-yy); y = x * c * (one - c); }
113
}
114
115
// swish
116
if (A == 9)
117
{
118
if (G == 0)
119
y = (x < -expRange) ? 0 : x / (exp(-x) + one);
120
else
121
{
122
scalar_t c = exp(xref);
123
scalar_t d = c + one;
124
if (G == 1)
125
y = (xref > halfExpRange) ? x : x * c * (xref + d) / (d * d);
126
else
127
y = (xref > halfExpRange) ? 0 : x * c * (xref * (two - d) + two * d) / (d * d * d);
128
yref = (xref < -expRange) ? 0 : xref / (exp(-xref) + one) * gain;
129
}
130
}
131
132
// Apply gain.
133
y *= gain * dy;
134
135
// Clamp.
136
if (clamp >= 0)
137
{
138
if (G == 0)
139
y = (y > -clamp & y < clamp) ? y : (y >= 0) ? clamp : -clamp;
140
else
141
y = (yref > -clamp & yref < clamp) ? y : 0;
142
}
143
144
// Store.
145
((T*)p.y)[xi] = (T)y;
146
}
147
}
148
149
//------------------------------------------------------------------------
150
// CUDA kernel selection.
151
152
template <class T> void* choose_bias_act_kernel(const bias_act_kernel_params& p)
153
{
154
if (p.act == 1) return (void*)bias_act_kernel<T, 1>;
155
if (p.act == 2) return (void*)bias_act_kernel<T, 2>;
156
if (p.act == 3) return (void*)bias_act_kernel<T, 3>;
157
if (p.act == 4) return (void*)bias_act_kernel<T, 4>;
158
if (p.act == 5) return (void*)bias_act_kernel<T, 5>;
159
if (p.act == 6) return (void*)bias_act_kernel<T, 6>;
160
if (p.act == 7) return (void*)bias_act_kernel<T, 7>;
161
if (p.act == 8) return (void*)bias_act_kernel<T, 8>;
162
if (p.act == 9) return (void*)bias_act_kernel<T, 9>;
163
return NULL;
164
}
165
166
//------------------------------------------------------------------------
167
// Template specializations.
168
169
template void* choose_bias_act_kernel<double> (const bias_act_kernel_params& p);
170
template void* choose_bias_act_kernel<float> (const bias_act_kernel_params& p);
171
template void* choose_bias_act_kernel<c10::Half> (const bias_act_kernel_params& p);
172
173
//------------------------------------------------------------------------
174
175