-
Notifications
You must be signed in to change notification settings - Fork 0
/
dynamic_partial.py
128 lines (109 loc) · 4.39 KB
/
dynamic_partial.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
import torch.nn as nn
import torch
import torch.distributions as dist
import torch.nn.functional as F
import numpy as np
class DynamicPartial(nn.Module):
def __init__(self, num_samples, beta=0.9, num_classes=10, T=0.5):
super(DynamicPartial, self).__init__()
self.latent = (torch.ones(num_samples, num_classes) / num_classes).cuda()
self.beta = beta
self.T = T
def update_hist(self, probs, index):
probs = torch.clamp(probs, 1e-4, 1.0 - 1e-4).detach()
probs /= probs.sum(1, keepdim=True)
self.latent[index] = self.beta * self.latent[index] + (1 - self.beta) * probs
def sample_latent(self, index=None):
latent_distribution = (
self.latent[index] ** (1 / self.T) if index is not None else self.latent
)
norm_ld = latent_distribution / latent_distribution.sum(1, keepdim=True)
return dist.Categorical(norm_ld)
class DynamicPartial_Mix(nn.Module):
def __init__(self, num_samples, beta=0.9, num_classes=10, T=0.5) -> None:
super(DynamicPartial_Mix, self).__init__()
self.latent = (torch.ones(num_samples, num_classes) / num_classes).cuda()
self.beta = beta
self.T = T
def update_hist(self, probs, index):
probs = torch.clamp(probs, 1e-4, 1.0 - 1e-4).detach()
probs /= probs.sum(1, keepdim=True)
self.latent[index] = self.beta * self.latent[index] + (1 - self.beta) * probs
# self.q = (
# mixup_l * self.latent[index] + (1 - mixup_l) * self.latent[index][mix_index]
# )
def sample_latent(self, index, mix_index, mixup_l):
# latent_distribution = self.q ** (1 / self.T)
# q = mixup_l * self.latent[index] + (1 - mixup_l) * self.latent[index][mix_index]
latent_distribution = (
self.latent[index] ** (1 / self.T) if index is not None else self.latent
)
norm_ld = latent_distribution / latent_distribution.sum(1, keepdim=True)
return dist.Categorical(norm_ld)
def sample_neg(prior_cov, num_classes, num=None):
probs = prior_cov.detach().cpu().numpy().astype("float64")
probs = (1 - probs) / (1 - probs).sum(1, keepdims=True)
neg = torch.vstack(
[
F.one_hot(
torch.tensor(
np.random.choice(
num_classes,
# int(
# torch.round(num[i] * num_classes - (probs[i] == 0).sum())
# .clamp(min=0.0, max=num_classes)
# .item()
# )
int(
torch.round(num[i] * ((probs[i] > 0).sum() - 1))
.clamp(min=0.0, max=num_classes)
.item()
)
if num is not None
else np.random.randint(0, num_classes - 2, dtype=np.uint8),
replace=False,
p=probs[i],
)
),
num_classes,
).sum(0)
for i in range(probs.shape[0])
]
).cuda()
neg[neg > 1] = 1
return neg
#! Two approaches for Eq. 12
#! Option 1: log_outputs.softmax(0)
#! Option 2: log_outputs / log_outputs.sum(0,keepdim=True), logsumexp is used for computing in log space
def prior_loss(log_outputs, log_prior):
return F.kl_div(
log_outputs,
(log_prior + log_outputs.log_softmax(0)).log_softmax(1),
# (log_prior + (log_outputs - torch.logsumexp(log_outputs, dim=0, keepdim=True))).log_softmax(1),
reduction="batchmean",
log_target=True,
)
def pxy_kl(log_outputs, tildey, log_prior):
return F.kl_div(
(tildey.log_softmax(1) + log_prior).log_softmax(1),
log_outputs.detach(),
reduction="batchmean",
log_target=True,
)
def pyx_kl(log_outputs, tildey, log_prior):
return F.kl_div(
(
tildey.log_softmax(1)
+ torch.logsumexp(
log_outputs.log_softmax(0) + log_prior, dim=1, keepdim=True
)
# + torch.logsumexp(
# (log_outputs - torch.logsumexp(log_outputs, dim=0, keepdim=True)) + log_prior,
# dim=1,
# keepdim=True,
# )
).log_softmax(1),
log_outputs.detach(),
reduction="batchmean",
log_target=True,
)