Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
deeplearningzerotoall
GitHub Repository: deeplearningzerotoall/PyTorch
Path: blob/master/RNN/season1_refactored/RNN_intro_1.py
631 views
1
import torch
2
import torch.nn as nn
3
4
# Random seed to make results deterministic
5
torch.manual_seed(0)
6
7
# One hot encoding for each char in 'hello'
8
h = [1, 0, 0, 0]
9
e = [0, 1, 0, 0]
10
l = [0, 0, 1, 0]
11
o = [0, 0, 0, 1]
12
13
# One cell RNN input_dim (4) -> output_dim (2). sequence: 5
14
cell = nn.RNN(input_size=4, hidden_size=2, batch_first=True)
15
16
# (num_layers * num_directions, batch, hidden_size) whether batch_first=True or False
17
# num_directions is 2 when the RNN is bidirectional, otherwise, it is 1
18
hidden = torch.randn(1, 1, 2)
19
20
# Propagate input through RNN
21
# Input: (batch, seq_len, input_size) when batch_first=True
22
inputs = torch.Tensor([h, e, l, l, o])
23
for one in inputs:
24
one = one.view(1, 1, -1)
25
# Input: (batch, seq_len, input_size) when batch_first=True
26
out, hidden = cell(one, hidden)
27
print("one input size", one.size(), "out size", out.size())
28
29
# We can do the whole at once
30
# Propagate input through RNN
31
# Input: (batch, seq_len, input_size) when batch_first=True
32
inputs = inputs.view(1, 5, -1)
33
out, hidden = cell(inputs, hidden)
34
print("sequence input size", inputs.size(), "out size", out.size())
35
36
37
# hidden : (num_layers * num_directions, batch, hidden_size) whether batch_first=True or False
38
hidden = torch.randn(1, 3, 2)
39
40
# One cell RNN input_dim (4) -> output_dim (2). sequence: 5, batch 3
41
# 3 batches 'hello', 'eolll', 'lleel'
42
# rank = (3, 5, 4)
43
inputs = torch.Tensor([[h, e, l, l, o],
44
[e, o, l, l, l],
45
[l, l, e, e, l]])
46
47
# Propagate input through RNN
48
# Input: (batch, seq_len, input_size) when batch_first=True
49
# B x S x I
50
out, hidden = cell(inputs, hidden)
51
print("batch input size", inputs.size(), "out size", out.size())
52
53
54
# One cell RNN input_dim (4) -> output_dim (2)
55
cell = nn.RNN(input_size=4, hidden_size=2)
56
57
# The given dimensions dim0 and dim1 are swapped.
58
inputs = inputs.transpose(dim0=0, dim1=1)
59
# Propagate input through RNN
60
# Input: (seq_len, batch_size, input_size) when batch_first=False (default)
61
# S x B x I
62
out, hidden = cell(inputs, hidden)
63
print("batch input size", inputs.size(), "out size", out.size())
64