-
Notifications
You must be signed in to change notification settings - Fork 150
/
attentivefp_predictor.py
93 lines (83 loc) · 3.5 KB
/
attentivefp_predictor.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
# -*- coding: utf-8 -*-
#
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
# AttentiveFP
# pylint: disable= no-member, arguments-differ, invalid-name
import torch.nn as nn
from ..gnn import AttentiveFPGNN
from ..readout import AttentiveFPReadout
__all__ = ['AttentiveFPPredictor']
# pylint: disable=W0221
class AttentiveFPPredictor(nn.Module):
"""AttentiveFP for regression and classification on graphs.
AttentiveFP is introduced in
`Pushing the Boundaries of Molecular Representation for Drug Discovery with the Graph
Attention Mechanism. <https://www.ncbi.nlm.nih.gov/pubmed/31408336>`__
Parameters
----------
node_feat_size : int
Size for the input node features.
edge_feat_size : int
Size for the input edge features.
num_layers : int
Number of GNN layers. Default to 2.
num_timesteps : int
Times of updating the graph representations with GRU. Default to 2.
graph_feat_size : int
Size for the learned graph representations. Default to 200.
n_tasks : int
Number of tasks, which is also the output size. Default to 1.
dropout : float
Probability for performing the dropout. Default to 0.
"""
def __init__(self,
node_feat_size,
edge_feat_size,
num_layers=2,
num_timesteps=2,
graph_feat_size=200,
n_tasks=1,
dropout=0.):
super(AttentiveFPPredictor, self).__init__()
self.gnn = AttentiveFPGNN(node_feat_size=node_feat_size,
edge_feat_size=edge_feat_size,
num_layers=num_layers,
graph_feat_size=graph_feat_size,
dropout=dropout)
self.readout = AttentiveFPReadout(feat_size=graph_feat_size,
num_timesteps=num_timesteps,
dropout=dropout)
self.predict = nn.Sequential(
nn.Dropout(dropout),
nn.Linear(graph_feat_size, n_tasks)
)
def forward(self, g, node_feats, edge_feats, get_node_weight=False):
"""Graph-level regression/soft classification.
Parameters
----------
g : DGLGraph
DGLGraph for a batch of graphs.
node_feats : float32 tensor of shape (V, node_feat_size)
Input node features. V for the number of nodes.
edge_feats : float32 tensor of shape (E, edge_feat_size)
Input edge features. E for the number of edges.
get_node_weight : bool
Whether to get the weights of atoms during readout. Default to False.
Returns
-------
float32 tensor of shape (G, n_tasks)
Prediction for the graphs in the batch. G for the number of graphs.
node_weights : list of float32 tensor of shape (V, 1), optional
This is returned when ``get_node_weight`` is ``True``.
The list has a length ``num_timesteps`` and ``node_weights[i]``
gives the node weights in the i-th update.
"""
node_feats = self.gnn(g, node_feats, edge_feats)
if get_node_weight:
g_feats, node_weights = self.readout(g, node_feats, get_node_weight)
return self.predict(g_feats), node_weights
else:
g_feats = self.readout(g, node_feats, get_node_weight)
return self.predict(g_feats)