Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
jantic
GitHub Repository: jantic/deoldify
Path: blob/master/fastai/text/models/forget_mult_cuda_kernel.cu
840 views
1
#include <ATen/ATen.h>
2
#include <THC/THC.h>
3
4
#include <cuda.h>
5
#include <cuda_runtime.h>
6
7
#include <vector>
8
9
template <typename scalar_t>
10
__global__ void forget_mult_cuda_forward_kernel(const scalar_t* __restrict__ x,
11
const scalar_t* __restrict__ f, scalar_t* __restrict__ output,
12
size_t batch_size, size_t seq_length, size_t n_hidden, bool batch_first) {
13
/*
14
Note: output is assumed to be one timestep longer than f or x where output[0] = h_{-1}
15
This means output array has a size of seq_length+1 on the word dimension
16
*/
17
const int hid = blockIdx.x * blockDim.x + threadIdx.x;
18
const int bid = blockIdx.y * blockDim.y + threadIdx.y;
19
if (hid < n_hidden && bid < batch_size){
20
for (int ts = 1; ts < seq_length + 1; ts++) {
21
int i = 0;
22
int dst_i = 0;
23
int dst_iminus1 = 0;
24
if (batch_first){
25
i = bid * n_hidden * seq_length + (ts-1) * n_hidden + hid;
26
dst_i = bid * n_hidden * (seq_length+1) + (ts-0) * n_hidden + hid;
27
dst_iminus1 = bid * n_hidden * (seq_length+1) + (ts-1) * n_hidden + hid;
28
}
29
else {
30
i = (ts-1) * n_hidden * batch_size + bid * n_hidden + hid;
31
dst_i = (ts-0) * n_hidden * batch_size + bid * n_hidden + hid;
32
dst_iminus1 = (ts-1) * n_hidden * batch_size + bid * n_hidden + hid;
33
}
34
output[dst_i] = f[i] * x[i];
35
output[dst_i] += (1 - f[i]) * output[dst_iminus1];
36
}
37
}
38
}
39
40
template <typename scalar_t>
41
__global__ void forget_mult_cuda_backward_kernel(const scalar_t* __restrict__ x,
42
const scalar_t* __restrict__ f, const scalar_t* __restrict__ output,
43
const scalar_t* __restrict__ grad_output, scalar_t* __restrict__ grad_x,
44
scalar_t* __restrict__ grad_f, scalar_t* __restrict__ grad_h,
45
size_t batch_size, size_t seq_length, size_t n_hidden, bool batch_first) {
46
const int hid = blockIdx.x * blockDim.x + threadIdx.x;
47
const int bid = blockIdx.y * blockDim.y + threadIdx.y;
48
double running_f = 0;
49
if(hid < n_hidden && bid < batch_size){
50
for (int ts = seq_length; ts >= 0 + 1; ts--) {
51
int i = 0;
52
int dst_i = 0;
53
int dst_iminus1 = 0;
54
if (batch_first){
55
i = bid * n_hidden * seq_length + (ts-1) * n_hidden + hid;
56
dst_i = bid * n_hidden * (seq_length+1) + (ts-0) * n_hidden + hid;
57
dst_iminus1 = bid * n_hidden * (seq_length+1) + (ts-1) * n_hidden + hid;
58
}
59
else {
60
i = (ts-1) * n_hidden * batch_size + bid * n_hidden + hid;
61
dst_i = (ts-0) * n_hidden * batch_size + bid * n_hidden + hid;
62
dst_iminus1 = (ts-1) * n_hidden * batch_size + bid * n_hidden + hid;
63
}
64
running_f += grad_output[i];
65
grad_x[i] = f[i] * running_f;
66
grad_f[i] = (x[i] - output[dst_iminus1]) * running_f;
67
// The line below is likely more numerically stable than (1 - f[i]) * running_f;
68
running_f = running_f - f[i] * running_f;
69
}
70
grad_h[bid * n_hidden + hid] = running_f;
71
}
72
}
73
74
at::Tensor forget_mult_cuda_forward(at::Tensor x, at::Tensor f, at::Tensor output, bool batch_first) {
75
const auto batch_size = (batch_first) ? x.size(0) : x.size(1);
76
const auto seq_length = (batch_first) ? x.size(1) : x.size(0);
77
const auto n_hidden = x.size(2);
78
79
const int threads = 1024;
80
const dim3 blocks((n_hidden + threads - 1) / threads, batch_size);
81
AT_DISPATCH_FLOATING_TYPES(x.type(), "forget_mult_cuda_forward", ([&] {
82
forget_mult_cuda_forward_kernel<scalar_t><<<blocks, threads>>>(
83
x.data<scalar_t>(), f.data<scalar_t>(), output.data<scalar_t>(), batch_size,
84
seq_length, n_hidden, batch_first);
85
}));
86
87
THCudaCheck(cudaGetLastError());
88
return output;
89
}
90
91
std::vector<at::Tensor> forget_mult_cuda_backward(at::Tensor x, at::Tensor f,
92
at::Tensor output, at::Tensor grad_output, bool batch_first) {
93
const auto batch_size = (batch_first) ? x.size(0) : x.size(1);
94
const auto seq_length = (batch_first) ? x.size(1) : x.size(0);
95
const auto n_hidden = x.size(2);
96
97
auto grad_x = at::zeros_like(x);
98
auto grad_f = at::zeros_like(x);
99
auto grad_h = at::zeros({batch_size, n_hidden}, x.options());
100
101
const int threads = 1024;
102
const dim3 blocks((n_hidden + threads - 1) / threads, batch_size);
103
AT_DISPATCH_FLOATING_TYPES(x.type(), "forget_mult_cuda_forward", ([&] {
104
forget_mult_cuda_backward_kernel<scalar_t><<<blocks, threads>>>(
105
x.data<scalar_t>(), f.data<scalar_t>(), output.data<scalar_t>(), grad_output.data<scalar_t>(),
106
grad_x.data<scalar_t>(), grad_f.data<scalar_t>(), grad_h.data<scalar_t>(), batch_size,
107
seq_length, n_hidden, batch_first);
108
}));
109
110
THCudaCheck(cudaGetLastError());
111
return {grad_x, grad_f, grad_h};
112
}
113
114
115