Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
jantic
GitHub Repository: jantic/deoldify
Path: blob/master/fastai/text/interpret.py
781 views
1
from ..torch_core import *
2
from ..basic_data import *
3
from ..basic_train import *
4
from ..train import ClassificationInterpretation
5
import matplotlib.cm as cm
6
7
__all__ = ['TextClassificationInterpretation']
8
9
def value2rgba(x:float, cmap:Callable=cm.RdYlGn, alpha_mult:float=1.0)->Tuple:
10
"Convert a value `x` from 0 to 1 (inclusive) to an RGBA tuple according to `cmap` times transparency `alpha_mult`."
11
c = cmap(x)
12
rgb = (np.array(c[:-1]) * 255).astype(int)
13
a = c[-1] * alpha_mult
14
return tuple(rgb.tolist() + [a])
15
16
def piece_attn_html(pieces:List[str], attns:List[float], sep:str=' ', **kwargs)->str:
17
html_code,spans = ['<span style="font-family: monospace;">'], []
18
for p, a in zip(pieces, attns):
19
p = html.escape(p)
20
c = str(value2rgba(a, alpha_mult=0.5, **kwargs))
21
spans.append(f'<span title="{a:.3f}" style="background-color: rgba{c};">{p}</span>')
22
html_code.append(sep.join(spans))
23
html_code.append('</span>')
24
return ''.join(html_code)
25
26
def show_piece_attn(*args, **kwargs):
27
from IPython.display import display, HTML
28
display(HTML(piece_attn_html(*args, **kwargs)))
29
30
def _eval_dropouts(mod):
31
module_name = mod.__class__.__name__
32
if 'Dropout' in module_name or 'BatchNorm' in module_name: mod.training = False
33
for module in mod.children(): _eval_dropouts(module)
34
35
class TextClassificationInterpretation(ClassificationInterpretation):
36
"""Provides an interpretation of classification based on input sensitivity.
37
This was designed for AWD-LSTM only for the moment, because Transformer already has its own attentional model.
38
"""
39
40
def __init__(self, learn: Learner, preds: Tensor, y_true: Tensor, losses: Tensor, ds_type: DatasetType = DatasetType.Valid):
41
super(TextClassificationInterpretation, self).__init__(learn,preds,y_true,losses,ds_type)
42
self.model = learn.model
43
44
@classmethod
45
def from_learner(cls, learn: Learner, ds_type:DatasetType=DatasetType.Valid, activ:nn.Module=None):
46
"Gets preds, y_true, losses to construct base class from a learner"
47
preds_res = learn.get_preds(ds_type=ds_type, activ=activ, with_loss=True, ordered=True)
48
return cls(learn, *preds_res)
49
50
def intrinsic_attention(self, text:str, class_id:int=None):
51
"""Calculate the intrinsic attention of the input w.r.t to an output `class_id`, or the classification given by the model if `None`.
52
For reference, see the Sequential Jacobian session at https://www.cs.toronto.edu/~graves/preprint.pdf
53
"""
54
self.model.train()
55
_eval_dropouts(self.model)
56
self.model.zero_grad()
57
self.model.reset()
58
ids = self.data.one_item(text)[0]
59
emb = self.model[0].module.encoder(ids).detach().requires_grad_(True)
60
lstm_output = self.model[0].module(emb, from_embeddings=True)
61
self.model.eval()
62
cl = self.model[1](lstm_output + (torch.zeros_like(ids).byte(),))[0].softmax(dim=-1)
63
if class_id is None: class_id = cl.argmax()
64
cl[0][class_id].backward()
65
attn = emb.grad.squeeze().abs().sum(dim=-1)
66
attn /= attn.max()
67
tokens = self.data.single_ds.reconstruct(ids[0])
68
return tokens, attn
69
70
def html_intrinsic_attention(self, text:str, class_id:int=None, **kwargs)->str:
71
text, attn = self.intrinsic_attention(text, class_id)
72
return piece_attn_html(text.text.split(), to_np(attn), **kwargs)
73
74
def show_intrinsic_attention(self, text:str, class_id:int=None, **kwargs)->None:
75
text, attn = self.intrinsic_attention(text, class_id)
76
show_piece_attn(text.text.split(), to_np(attn), **kwargs)
77
78
def show_top_losses(self, k:int, max_len:int=70)->None:
79
"""
80
Create a tabulation showing the first `k` texts in top_losses along with their prediction, actual,loss, and probability of
81
actual class. `max_len` is the maximum number of tokens displayed.
82
"""
83
from IPython.display import display, HTML
84
items = []
85
tl_val,tl_idx = self.top_losses()
86
for i,idx in enumerate(tl_idx):
87
if k <= 0: break
88
k -= 1
89
tx,cl = self.data.dl(self.ds_type).dataset[idx]
90
cl = cl.data
91
classes = self.data.classes
92
txt = ' '.join(tx.text.split(' ')[:max_len]) if max_len is not None else tx.text
93
tmp = [txt, f'{classes[self.pred_class[idx]]}', f'{classes[cl]}', f'{self.losses[idx]:.2f}',
94
f'{self.preds[idx][cl]:.2f}']
95
items.append(tmp)
96
items = np.array(items)
97
names = ['Text', 'Prediction', 'Actual', 'Loss', 'Probability']
98
df = pd.DataFrame({n:items[:,i] for i,n in enumerate(names)}, columns=names)
99
with pd.option_context('display.max_colwidth', -1):
100
display(HTML(df.to_html(index=False)))
101
102