Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
labmlai
GitHub Repository: labmlai/annotated_deep_learning_paper_implementations
Path: blob/master/labml_nn/graphs/gatv2/experiment.py
4950 views
1
"""
2
---
3
title: Train a Graph Attention Network v2 (GATv2) on Cora dataset
4
summary: >
5
This trains is a Graph Attention Network v2 (GATv2) on Cora dataset
6
---
7
8
# Train a Graph Attention Network v2 (GATv2) on Cora dataset
9
"""
10
11
import torch
12
from torch import nn
13
14
from labml import experiment
15
from labml.configs import option
16
from labml_nn.graphs.gat.experiment import Configs as GATConfigs
17
from labml_nn.graphs.gatv2 import GraphAttentionV2Layer
18
19
20
class GATv2(nn.Module):
21
"""
22
## Graph Attention Network v2 (GATv2)
23
24
This graph attention network has two [graph attention layers](index.html).
25
"""
26
27
def __init__(self, in_features: int, n_hidden: int, n_classes: int, n_heads: int, dropout: float,
28
share_weights: bool = True):
29
"""
30
* `in_features` is the number of features per node
31
* `n_hidden` is the number of features in the first graph attention layer
32
* `n_classes` is the number of classes
33
* `n_heads` is the number of heads in the graph attention layers
34
* `dropout` is the dropout probability
35
* `share_weights` if set to True, the same matrix will be applied to the source and the target node of every edge
36
"""
37
super().__init__()
38
39
# First graph attention layer where we concatenate the heads
40
self.layer1 = GraphAttentionV2Layer(in_features, n_hidden, n_heads,
41
is_concat=True, dropout=dropout, share_weights=share_weights)
42
# Activation function after first graph attention layer
43
self.activation = nn.ELU()
44
# Final graph attention layer where we average the heads
45
self.output = GraphAttentionV2Layer(n_hidden, n_classes, 1,
46
is_concat=False, dropout=dropout, share_weights=share_weights)
47
# Dropout
48
self.dropout = nn.Dropout(dropout)
49
50
def forward(self, x: torch.Tensor, adj_mat: torch.Tensor):
51
"""
52
* `x` is the features vectors of shape `[n_nodes, in_features]`
53
* `adj_mat` is the adjacency matrix of the form
54
`[n_nodes, n_nodes, n_heads]` or `[n_nodes, n_nodes, 1]`
55
"""
56
# Apply dropout to the input
57
x = self.dropout(x)
58
# First graph attention layer
59
x = self.layer1(x, adj_mat)
60
# Activation function
61
x = self.activation(x)
62
# Dropout
63
x = self.dropout(x)
64
# Output layer (without activation) for logits
65
return self.output(x, adj_mat)
66
67
68
class Configs(GATConfigs):
69
"""
70
## Configurations
71
72
Since the experiment is same as [GAT experiment](../gat/experiment.html) but with
73
[GATv2 model](index.html) we extend the same configs and change the model.
74
"""
75
76
# Whether to share weights for source and target nodes of edges
77
share_weights: bool = False
78
# Set the model
79
model: GATv2 = 'gat_v2_model'
80
81
82
@option(Configs.model)
83
def gat_v2_model(c: Configs):
84
"""
85
Create GATv2 model
86
"""
87
return GATv2(c.in_features, c.n_hidden, c.n_classes, c.n_heads, c.dropout, c.share_weights).to(c.device)
88
89
90
def main():
91
# Create configurations
92
conf = Configs()
93
# Create an experiment
94
experiment.create(name='gatv2')
95
# Calculate configurations.
96
experiment.configs(conf, {
97
# Adam optimizer
98
'optimizer.optimizer': 'Adam',
99
'optimizer.learning_rate': 5e-3,
100
'optimizer.weight_decay': 5e-4,
101
102
'dropout': 0.7,
103
})
104
105
# Start and watch the experiment
106
with experiment.start():
107
# Run the training
108
conf.run()
109
110
111
#
112
if __name__ == '__main__':
113
main()
114
115