-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathlayers.py
More file actions
70 lines (68 loc) · 2.5 KB
/
layers.py
File metadata and controls
70 lines (68 loc) · 2.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import torch
import torch.nn as nn
import torch.nn.functional as F
class LinearWithConstraint(nn.Linear):
def __init__(self, *config, max_norm=1, **kwconfig):
self.max_norm = max_norm
super(LinearWithConstraint, self).__init__(*config, **kwconfig)
def forward(self, x):
self.weight.data = torch.renorm(
self.weight.data, p=2, dim=0, maxnorm=self.max_norm
)
return super(LinearWithConstraint, self).forward(x)
class Conv2dWithConstraint(nn.Conv2d):
def __init__(self, *config, max_norm=1, **kwconfig):
self.max_norm = max_norm
super(Conv2dWithConstraint, self).__init__(*config, **kwconfig)
def forward(self, x):
self.weight.data = torch.renorm(
self.weight.data, p=2, dim=0, maxnorm=self.max_norm
)
return super(Conv2dWithConstraint, self).forward(x)
class ConvSamePad2d(nn.Module):
"""
extend nn.Conv2d to support SAME padding
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, groups=1, bias=True):
super(ConvSamePad2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.groups = groups
# self.bias = bias
self.conv = torch.nn.Conv2d(
in_channels=self.in_channels,
out_channels=self.out_channels,
kernel_size=self.kernel_size,
stride=self.stride,
groups=self.groups,
bias=bias)
def forward(self, x):
if type(self.kernel_size) != int:
kernel_size = self.kernel_size
else:
kernel_size = (self.kernel_size, self.kernel_size)
if type(self.stride) != int:
stride = self.stride
else:
stride = (self.stride, self.stride)
# net = x
_, _, h, w = x.size()
# Compute weight padding size
out_dim = (w + stride[1] - 1) // stride[1]
p = max(0, (out_dim - 1) * stride[1] + kernel_size[1] - w)
pad_1 = p // 2
pad_2 = p - pad_1
w_pad_size = (pad_1, pad_2)
# Compute height padding size
out_dim = (h + stride[0] - 1) // stride[0]
p = max(0, (out_dim - 1) * stride[0] + kernel_size[0] - h)
pad_1 = p // 2
pad_2 = p - pad_1
h_pad_size = (pad_1, pad_2)
# Pad
x_pad = F.pad(x, w_pad_size + h_pad_size, "constant", 0)
# Conv
out = self.conv(x_pad)
return out