Skip to content

Commit

Permalink
Merge pull request #42 from Tensor46/develop
Browse files Browse the repository at this point in the history
Develop
  • Loading branch information
Tensor46 committed Jan 8, 2019
2 parents 138f2c7 + 622545c commit efeedba
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 9 deletions.
6 changes: 3 additions & 3 deletions core/NeuralEssentials/cudamodel.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,10 @@ def check_precision_device(self, inputs):
break
self.precision = p.dtype if "p" in locals() else torch.float32
if type(inputs) in [list, tuple]:
inputs = [x if x.dtype == torch.long else x.type(self.precision)
for x in inputs]
if self.is_cuda:
inputs = [(x.type(self.precision).cuda() if self.is_cuda else
x.type(self.precision)) if x.dtype != torch.long
else x for x in inputs]
inputs = [x.cuda() for x in inputs]
return inputs
else:
if not (inputs.dtype == torch.long):
Expand Down
15 changes: 9 additions & 6 deletions core/NeuralLayers/dropblock.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@ class DropBlock(nn.Module):
tensor channels. default = False - recommended in paper
iterative_p: when True, iteratively increases probability from 0 to p
till n_iterations = steps_to_max, and maintains p there after.
steps_to_max: steps to reach p, default = 500000
default = True
steps_to_max: iterations to reach p, default = 50000
Return:
torch.Tensor of shape BCHW
Expand All @@ -31,8 +32,8 @@ def __init__(self,
p: float = 0.1,
block_size: int = 5,
shared: bool = False,
iterative_p: bool = False,
steps_to_max: int = 500000):
iterative_p: bool = True,
steps_to_max: int = 50000):

super(DropBlock, self).__init__()
# checks
Expand Down Expand Up @@ -70,7 +71,7 @@ def __init__(self,
"{}".format(type(iterative_p).__name__))
if iterative_p:
# steps_to_max = steps to reach p
self.steps_to_max = 500000
self.steps_to_max = steps_to_max
self.register_buffer("n_iterations", torch.Tensor([0]).sum())

def forward(self, tensor):
Expand All @@ -89,12 +90,14 @@ def forward(self, tensor):
pad = self.w//2
if self.shared:
c = 1

mask = torch.ones(n, c, h-2*pad, w-2*pad).to(tensor.device)
mask = torch.bernoulli(mask * gamma)
mask = F.pad(mask, (pad, pad, pad, pad))
block_mask = F.conv2d(mask, torch.ones(c, 1, self.w, self.w),
padding=pad, groups=c)
kernel = torch.ones(c, 1, self.w, self.w).to(tensor.device)
block_mask = F.conv2d(mask, kernel, padding=pad, groups=c)
block_mask = (block_mask == 0).float().detach()

# norm = count(M)/count_ones(M)
norm = block_mask.sum(2, True).sum(3, True) / h / w
return tensor * block_mask * norm # A × count(M)/count_ones(M)
Expand Down

0 comments on commit efeedba

Please sign in to comment.