Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
POSTECH-CVLab
GitHub Repository: POSTECH-CVLab/PyTorch-StudioGAN
Path: blob/master/src/metrics/ins.py
809 views
1
# PyTorch StudioGAN: https://github.com/POSTECH-CVLab/PyTorch-StudioGAN
2
# The MIT License (MIT)
3
# See license file or visit https://github.com/POSTECH-CVLab/PyTorch-StudioGAN for details
4
5
# src/metrics/ins.py
6
7
import math
8
9
from torch.nn import DataParallel
10
from torch.nn.parallel import DistributedDataParallel
11
from sklearn.metrics import top_k_accuracy_score
12
from tqdm import tqdm
13
import torch
14
import numpy as np
15
16
import utils.sample as sample
17
import utils.misc as misc
18
import utils.losses as losses
19
20
21
def inception_softmax(eval_model, images, quantize):
22
with torch.no_grad():
23
embeddings, logits = eval_model.get_outputs(images, quantize=quantize)
24
ps = torch.nn.functional.softmax(logits, dim=1)
25
return ps
26
27
28
def calculate_kl_div(ps, splits):
29
scores = []
30
num_samples = ps.shape[0]
31
with torch.no_grad():
32
for j in range(splits):
33
part = ps[(j * num_samples // splits):((j + 1) * num_samples // splits), :]
34
kl = part * (torch.log(part) - torch.log(torch.unsqueeze(torch.mean(part, 0), 0)))
35
kl = torch.mean(torch.sum(kl, 1))
36
kl = torch.exp(kl)
37
scores.append(kl.unsqueeze(0))
38
39
scores = torch.cat(scores, 0)
40
m_scores = torch.mean(scores).detach().cpu().numpy()
41
m_std = torch.std(scores).detach().cpu().numpy()
42
return m_scores, m_std
43
44
45
def eval_features(probs, labels, data_loader, num_features, split, is_acc, is_torch_backbone=False):
46
if is_acc:
47
ImageNet_folder_label_dict = misc.load_ImageNet_label_dict(data_name=data_loader.dataset.data_name,
48
is_torch_backbone=is_torch_backbone)
49
loader_label_folder_dict = {v: k for k, v, in data_loader.dataset.data.class_to_idx.items()}
50
loader_label_holder = labels
51
else:
52
top1, top5 = "N/A", "N/A"
53
54
probs, labels = probs[:num_features], labels[:num_features]
55
m_scores, m_std = calculate_kl_div(probs, splits=split)
56
57
if is_acc and is_torch_backbone:
58
if data_loader.dataset.data_name in ["Baby_ImageNet", "Papa_ImageNet", "Grandpa_ImageNet"]:
59
converted_labels = []
60
for loader_label in labels:
61
converted_labels.append(ImageNet_folder_label_dict[loader_label_folder_dict[loader_label]])
62
top1 = top_k_accuracy_score(converted_labels, probs.detach().cpu().numpy(), k=1, labels=range(1000))
63
top5 = top_k_accuracy_score(converted_labels, probs.detach().cpu().numpy(), k=5, labels=range(1000))
64
else:
65
top1 = top_k_accuracy_score(labels, probs.detach().cpu().numpy(), k=1)
66
top5 = top_k_accuracy_score(labels, probs.detach().cpu().numpy(), k=5)
67
elif is_acc and not is_torch_backbone:
68
converted_labels = []
69
for loader_label in labels:
70
converted_labels.append(ImageNet_folder_label_dict[loader_label_folder_dict[loader_label]])
71
if data_loader.dataset.data_name in ["Baby_ImageNet", "Papa_ImageNet", "Grandpa_ImageNet"]:
72
top1 = top_k_accuracy_score([i + 1 for i in converted_labels], probs[:, 0:1001].detach().cpu().numpy(), k=1, labels=range(1001))
73
top5 = top_k_accuracy_score([i + 1 for i in converted_labels], probs[:, 0:1001].detach().cpu().numpy(), k=5, labels=range(1001))
74
else:
75
top1 = top_k_accuracy_score([i + 1 for i in converted_labels], probs[:, 1:1001].detach().cpu().numpy(), k=1)
76
top5 = top_k_accuracy_score([i + 1 for i in converted_labels], probs[:, 1:1001].detach().cpu().numpy(), k=5)
77
else:
78
pass
79
return m_scores, m_std, top1, top5
80
81
82
def eval_dataset(data_loader, eval_model, quantize, splits, batch_size, world_size, DDP,
83
is_acc, is_torch_backbone=False, disable_tqdm=False):
84
eval_model.eval()
85
num_samples = len(data_loader.dataset)
86
num_batches = int(math.ceil(float(num_samples) / float(batch_size)))
87
if DDP: num_batches = int(math.ceil(float(num_samples) / float(batch_size*world_size)))
88
dataset_iter = iter(data_loader)
89
90
if is_acc:
91
ImageNet_folder_label_dict = misc.load_ImageNet_label_dict(data_name=data_loader.dataset.data_name,
92
is_torch_backbone=is_torch_backbone)
93
loader_label_folder_dict = {v: k for k, v, in data_loader.dataset.data.class_to_idx.items()}
94
else:
95
top1, top5 = "N/A", "N/A"
96
97
ps_holder = []
98
labels_holder = []
99
for i in tqdm(range(num_batches), disable=disable_tqdm):
100
try:
101
real_images, real_labels = next(dataset_iter)
102
except StopIteration:
103
break
104
105
real_images, real_labels = real_images.to("cuda"), real_labels.to("cuda")
106
107
with torch.no_grad():
108
ps = inception_softmax(eval_model, real_images, quantize)
109
ps_holder.append(ps)
110
labels_holder.append(real_labels)
111
112
ps_holder = torch.cat(ps_holder, 0)
113
labels_holder = torch.cat(labels_holder, 0)
114
if DDP:
115
ps_holder = torch.cat(losses.GatherLayer.apply(ps_holder), dim=0)
116
labels_holder = torch.cat(losses.GatherLayer.apply(labels_holder), dim=0)
117
labels_holder = list(labels_holder.detach().cpu().numpy())
118
119
m_scores, m_std = calculate_kl_div(ps_holder[:len(data_loader.dataset)], splits=splits)
120
121
if is_acc and is_torch_backbone:
122
if data_loader.dataset.data_name in ["Baby_ImageNet", "Papa_ImageNet", "Grandpa_ImageNet"]:
123
converted_labels = []
124
for loader_label in labels_holder:
125
converted_labels.append(ImageNet_folder_label_dict[loader_label_folder_dict[loader_label]])
126
top1 = top_k_accuracy_score(converted_labels, ps_holder.detach().cpu().numpy(), k=1, labels=range(1000))
127
top5 = top_k_accuracy_score(converted_labels, ps_holder.detach().cpu().numpy(), k=5, labels=range(1000))
128
else:
129
top1 = top_k_accuracy_score(labels_holder, ps_holder.detach().cpu().numpy(), k=1)
130
top5 = top_k_accuracy_score(labels_holder, ps_holder.detach().cpu().numpy(), k=5)
131
elif is_acc and not is_torch_backbone:
132
converted_labels = []
133
for loader_label in labels_holder:
134
converted_labels.append(ImageNet_folder_label_dict[loader_label_folder_dict[loader_label]])
135
if data_loader.dataset.data_name in ["Baby_ImageNet", "Papa_ImageNet", "Grandpa_ImageNet"]:
136
top1 = top_k_accuracy_score([i + 1 for i in converted_labels], ps_holder[:, 0:1001].detach().cpu().numpy(), k=1, labels=range(1001))
137
top5 = top_k_accuracy_score([i + 1 for i in converted_labels], ps_holder[:, 0:1001].detach().cpu().numpy(), k=5, labels=range(1001))
138
else:
139
top1 = top_k_accuracy_score([i + 1 for i in converted_labels], ps_holder[:, 1:1001].detach().cpu().numpy(), k=1)
140
top5 = top_k_accuracy_score([i + 1 for i in converted_labels], ps_holder[:, 1:1001].detach().cpu().numpy(), k=5)
141
else:
142
pass
143
return m_scores, m_std, top1, top5
144
145