forked from compbiolabucf/omicsGAT
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcluster_model.py
56 lines (34 loc) · 1.84 KB
/
cluster_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import torch
import torch.nn as nn
import torch.nn.functional as F
from layers import GraphAttentionLayer
class omicsGAT(nn.Module):
def __init__(self, nfeat, nhid, nheads, dropout=0, alpha=0.2):
super(omicsGAT, self).__init__()
self.dropout = dropout
## creating attention layers for given number of heads
self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention) ## adding the modules for each head
def forward(self, x, adj):
x = F.dropout(x, self.dropout, training=self.training)
x = torch.cat([att(x, adj) for att in self.attentions], dim=1) ## concatanating all the attention heads dimension (#input X out_features*nb_heads); out_features = nhid... each head contributing to (#input X out_features)
return x
class autoencoder(nn.Module):
def __init__(self,in_features, nhid, nheads, alpha=0.2):
super(autoencoder, self).__init__()
self.encoder = omicsGAT(in_features, nhid, nheads, alpha=alpha)
embedding = nhid*nheads
self.decoder = nn.Sequential(
nn.Linear(embedding, int(embedding/2)),
nn.ReLU(),
nn.Linear(int(embedding/2), int(in_features/4)),
nn.ReLU(),
nn.Linear(int(in_features/4), int(in_features/2)),
nn.ReLU(),
nn.Linear(int(in_features/2), in_features))
def forward(self, x, adj, train=True):
out = self.encoder(x, adj)
if train:
out = self.decoder(out)
return out