Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
deeplearningzerotoall
GitHub Repository: deeplearningzerotoall/PyTorch
Path: blob/master/RNN/5-timeseries.py
618 views
1
import torch
2
import torch.optim as optim
3
import numpy as np
4
import matplotlib.pyplot as plt
5
6
# Random seed to make results deterministic and reproducible
7
torch.manual_seed(0)
8
9
10
def minmax_scaler(data):
11
numerator = data - np.min(data, 0)
12
denominator = np.max(data, 0) - np.min(data, 0)
13
return numerator / (denominator + 1e-7)
14
15
16
def build_dataset(time_series, seq_length):
17
dataX = []
18
dataY = []
19
for i in range(0, len(time_series) - seq_length):
20
_x = time_series[i:i + seq_length, :]
21
_y = time_series[i + seq_length, [-1]] # Next close price
22
print(_x, "->", _y)
23
dataX.append(_x)
24
dataY.append(_y)
25
return np.array(dataX), np.array(dataY)
26
27
28
seq_length = 7
29
data_dim = 5
30
hidden_dim = 10
31
output_dim = 1
32
learning_rate = 0.01
33
iterations = 500
34
35
xy = np.loadtxt("data-02-stock_daily.csv", delimiter=",")
36
xy = xy[::-1] # reverse order
37
38
train_size = int(len(xy) * 0.7)
39
train_set = xy[0:train_size]
40
test_set = xy[train_size - seq_length:]
41
42
train_set = minmax_scaler(train_set)
43
test_set = minmax_scaler(test_set)
44
45
trainX, trainY = build_dataset(train_set, seq_length)
46
testX, testY = build_dataset(test_set, seq_length)
47
48
trainX_tensor = torch.FloatTensor(trainX)
49
trainY_tensor = torch.FloatTensor(trainY)
50
51
testX_tensor = torch.FloatTensor(testX)
52
testY_tensor = torch.FloatTensor(testY)
53
54
55
class Net(torch.nn.Module):
56
def __init__(self, input_dim, hidden_dim, output_dim, layers):
57
super(Net, self).__init__()
58
self.rnn = torch.nn.LSTM(input_dim, hidden_dim, num_layers=layers, batch_first=True)
59
self.fc = torch.nn.Linear(hidden_dim, output_dim, bias=True)
60
61
def forward(self, x):
62
x, _status = self.rnn(x)
63
x = self.fc(x[:, -1])
64
return x
65
66
67
net = Net(data_dim, hidden_dim, output_dim, 1)
68
69
# loss & optimizer setting
70
criterion = torch.nn.MSELoss()
71
optimizer = optim.Adam(net.parameters(), lr=learning_rate)
72
73
for i in range(iterations):
74
75
optimizer.zero_grad()
76
outputs = net(trainX_tensor)
77
loss = criterion(outputs, trainY_tensor)
78
loss.backward()
79
optimizer.step()
80
print(i, loss.item())
81
82
plt.plot(testY)
83
plt.plot(net(testX_tensor).data.numpy())
84
plt.legend(['original', 'prediction'])
85
plt.show()
86
87