Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
ai-forever

Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.

GitHub Repository: ai-forever/sber-swap
Path: blob/main/models/networks/discriminator.py
Views: 813
1
"""
2
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
3
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
4
"""
5
6
import torch.nn as nn
7
import numpy as np
8
import torch, math
9
import torch.nn.functional as F
10
from models.networks.base_network import BaseNetwork
11
from models.networks.normalization import get_nonspade_norm_layer
12
import utils.inference.util as util
13
14
15
class MultiscaleDiscriminator(BaseNetwork):
16
@staticmethod
17
def modify_commandline_options(parser, is_train):
18
parser.add_argument('--netD_subarch', type=str, default='n_layer',
19
help='architecture of each discriminator')
20
parser.add_argument('--num_D', type=int, default=2,
21
help='number of discriminators to be used in multiscale')
22
opt, _ = parser.parse_known_args()
23
24
# define properties of each discriminator of the multiscale discriminator
25
subnetD = util.find_class_in_module(opt.netD_subarch + 'discriminator',
26
'models.networks.discriminator')
27
subnetD.modify_commandline_options(parser, is_train)
28
29
return parser
30
31
def __init__(self, opt):
32
super().__init__()
33
self.opt = opt
34
35
for i in range(opt.num_D):
36
subnetD = self.create_single_discriminator(opt)
37
self.add_module('discriminator_%d' % i, subnetD)
38
39
def create_single_discriminator(self, opt):
40
subarch = opt.netD_subarch
41
if subarch == 'n_layer':
42
netD = NLayerDiscriminator(opt)
43
else:
44
raise ValueError('unrecognized discriminator subarchitecture %s' % subarch)
45
return netD
46
47
def downsample(self, input):
48
return F.avg_pool2d(input, kernel_size=3,
49
stride=2, padding=[1, 1],
50
count_include_pad=False)
51
52
# Returns list of lists of discriminator outputs.
53
# The final result is of size opt.num_D x opt.n_layers_D
54
def forward(self, input):
55
result = []
56
get_intermediate_features = not self.opt.no_ganFeat_loss
57
for name, D in self.named_children():
58
out = D(input)
59
if not get_intermediate_features:
60
out = [out]
61
result.append(out)
62
input = self.downsample(input)
63
64
return result
65
66
67
# Defines the PatchGAN discriminator with the specified arguments.
68
class NLayerDiscriminator(BaseNetwork):
69
@staticmethod
70
def modify_commandline_options(parser, is_train):
71
parser.add_argument('--n_layers_D', type=int, default=4,
72
help='# layers in each discriminator')
73
return parser
74
75
def __init__(self, opt):
76
super().__init__()
77
self.opt = opt
78
79
kw = 4
80
padw = int(np.ceil((kw - 1.0) / 2))
81
nf = opt.ndf
82
input_nc = self.compute_D_input_nc(opt)
83
84
norm_layer = get_nonspade_norm_layer(opt, opt.norm_D)
85
sequence = [[nn.Conv2d(input_nc, nf, kernel_size=kw, stride=2, padding=padw),
86
nn.LeakyReLU(0.2, False)]]
87
88
for n in range(1, opt.n_layers_D):
89
nf_prev = nf
90
nf = min(nf * 2, 512)
91
stride = 1 if n == opt.n_layers_D - 1 else 2
92
sequence += [[norm_layer(nn.Conv2d(nf_prev, nf, kernel_size=kw,
93
stride=stride, padding=padw)),
94
nn.LeakyReLU(0.2, False)
95
]]
96
97
sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
98
99
# We divide the layers into groups to extract intermediate layer outputs
100
for n in range(len(sequence)):
101
self.add_module('model' + str(n), nn.Sequential(*sequence[n]))
102
103
def compute_D_input_nc(self, opt):
104
input_nc = opt.label_nc + opt.output_nc
105
if opt.contain_dontcare_label:
106
input_nc += 1
107
if not opt.no_instance:
108
input_nc += 1
109
return input_nc
110
111
def forward(self, input):
112
results = [input]
113
for submodel in self.children():
114
intermediate_output = submodel(results[-1])
115
results.append(intermediate_output)
116
117
get_intermediate_features = not self.opt.no_ganFeat_loss
118
if get_intermediate_features:
119
return results[1:]
120
else:
121
return results[-1]
122
123
124
class ScaledLeakyReLU(nn.Module):
125
def __init__(self, negative_slope=0.2):
126
super().__init__()
127
128
self.negative_slope = negative_slope
129
130
def forward(self, input):
131
out = F.leaky_relu(input, negative_slope=self.negative_slope)
132
133
return out * math.sqrt(2)
134
135
136
def make_kernel(k):
137
k = torch.tensor(k, dtype=torch.float32)
138
139
if k.ndim == 1:
140
k = k[None, :] * k[:, None]
141
142
k /= k.sum()
143
144
return k
145
146
147
class Blur(nn.Module):
148
def __init__(self, kernel, pad, upsample_factor=1):
149
super().__init__()
150
151
kernel = make_kernel(kernel)
152
153
if upsample_factor > 1:
154
kernel = kernel * (upsample_factor ** 2)
155
156
self.register_buffer('kernel', kernel)
157
158
self.pad = pad
159
160
def forward(self, input):
161
out = upfirdn2d(input, self.kernel, pad=self.pad)
162
163
return out
164
165
166
class EqualConv2d(nn.Module):
167
def __init__(
168
self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
169
):
170
super().__init__()
171
172
self.weight = nn.Parameter(
173
torch.randn(out_channel, in_channel, kernel_size, kernel_size)
174
)
175
self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
176
177
self.stride = stride
178
self.padding = padding
179
180
if bias:
181
self.bias = nn.Parameter(torch.zeros(out_channel))
182
183
else:
184
self.bias = None
185
186
def forward(self, input):
187
out = F.conv2d(
188
input,
189
self.weight * self.scale,
190
bias=self.bias,
191
stride=self.stride,
192
padding=self.padding,
193
)
194
195
return out
196
197
198
class ConvLayer(nn.Sequential):
199
def __init__(self, in_channel, out_channel, kernel_size,
200
downsample=False, blur_kernel=[1, 3, 3, 1],
201
bias=True, activate=True):
202
layers = []
203
204
if downsample:
205
factor = 2
206
p = (len(blur_kernel) - factor) + (kernel_size - 1)
207
pad0 = (p + 1) // 2
208
pad1 = p // 2
209
210
layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
211
212
stride = 2
213
self.padding = 0
214
215
else:
216
stride = 1
217
self.padding = kernel_size // 2
218
219
layers.append(
220
EqualConv2d(in_channel, out_channel, kernel_size,
221
padding=self.padding, stride=stride, bias=bias and not activate)
222
)
223
224
if activate:
225
if bias:
226
layers.append(FusedLeakyReLU(out_channel))
227
else:
228
layers.append(ScaledLeakyReLU(0.2))
229
230
super().__init__(*layers)
231
232
233
234