Skip to content

discriminator

Discriminator

Discriminator(
    resolution: Resolution,
    channels: Dict[Resolution, int] = default_channels,
    blur_kernel: List[int] = [1, 3, 3, 1],
)

Bases: nn.Module

Discriminator module

Source code in stylegan2_torch/discriminator/__init__.py
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
def __init__(
    self,
    resolution: Resolution,
    channels: Dict[Resolution, int] = default_channels,
    blur_kernel: List[int] = [1, 3, 3, 1],
):
    super().__init__()

    # FromRGB followed by ResBlock
    self.n_layers = int(math.log(resolution, 2))

    self.blocks = nn.Sequential(
        ConvBlock(1, channels[resolution], 1),
        *[
            ResBlock(channels[2**i], channels[2 ** (i - 1)], blur_kernel)
            for i in range(self.n_layers, 2, -1)
        ],
    )

    # Minibatch std settings
    self.stddev_group = 4
    self.stddev_feat = 1

    # Final layers
    self.final_conv = ConvBlock(channels[4] + 1, channels[4], 3)
    self.final_relu = EqualLeakyReLU(channels[4] * 4 * 4, channels[4])
    self.final_linear = EqualLinear(channels[4], 1)

__call__ class-attribute

__call__ = proxy(forward)

blocks instance-attribute

blocks = nn.Sequential(
    ConvBlock(1, channels[resolution], 1),
    [
        ResBlock(
            channels[2**i],
            channels[2**i - 1],
            blur_kernel,
        )
        for i in range(self.n_layers, 2, -1)
    ],
)

final_conv instance-attribute

final_conv = ConvBlock(channels[4] + 1, channels[4], 3)

final_linear instance-attribute

final_linear = EqualLinear(channels[4], 1)

final_relu instance-attribute

final_relu = EqualLeakyReLU(
    channels[4] * 4 * 4, channels[4]
)

n_layers instance-attribute

n_layers = int(math.log(resolution, 2))

stddev_feat instance-attribute

stddev_feat = 1

stddev_group instance-attribute

stddev_group = 4

forward

forward(input: Tensor, *, return_features: bool = False)
Source code in stylegan2_torch/discriminator/__init__.py
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
def forward(self, input: Tensor, *, return_features: bool = False):
    # Downsampling blocks
    out: Tensor = self.blocks(input)

    # Minibatch stddev layer in Progressive GAN https://www.youtube.com/watch?v=V1qQXb9KcDY
    # Purpose is to provide variational information to the discriminator to prevent mode collapse
    # Other layers do not cross sample boundaries
    batch, channel, height, width = out.shape
    n_groups = min(batch, self.stddev_group)
    stddev = out.view(
        n_groups, -1, self.stddev_feat, channel // self.stddev_feat, height, width
    )
    stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
    stddev = stddev.mean([2, 3, 4], keepdim=True).squeeze(2)
    stddev = stddev.repeat(n_groups, 1, height, width)
    out = torch.cat([out, stddev], 1)

    # Final layers
    out = self.final_conv(out)
    features = self.final_relu(out.view(batch, -1))
    out = self.final_linear(features)

    if return_features:
        return out, features
    else:
        return out

blocks

ConvBlock

ConvBlock(
    in_channel: int, out_channel: int, kernel_size: int
)

Bases: nn.Sequential

Convolution in feature space

EqualConv2d: 2D convolution with equalized learning rate FusedLeakyReLU: LeakyReLU with a bias added before activation

Source code in stylegan2_torch/discriminator/blocks.py
19
20
21
22
23
24
25
26
27
28
29
30
def __init__(self, in_channel: int, out_channel: int, kernel_size: int):
    super().__init__(
        EqualConv2d(
            in_channel,
            out_channel,
            kernel_size,
            padding=kernel_size // 2,
            stride=1,
            bias=False,
        ),
        FusedLeakyReLU(out_channel, bias=True),
    )

__call__

__call__(input: Tensor) -> Tensor
Source code in stylegan2_torch/discriminator/blocks.py
32
33
def __call__(self, input: Tensor) -> Tensor:
    return super().__call__(input)

DownConvBlock

DownConvBlock(
    in_channel: int,
    out_channel: int,
    kernel_size: int,
    down: int,
    blur_kernel: List[int],
)

Bases: nn.Sequential

Downsampling convolution in feature space

Blur: Gaussian filter as low-pass filter for anti-aliasing + adjust tensor shape to preserve downsampled tensor shape EqualConv2d: 2D (downsampling) convolution with equalized learning rate FusedLeakyReLU: LeakyReLU with a bias added before activation

Source code in stylegan2_torch/discriminator/blocks.py
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
def __init__(
    self,
    in_channel: int,
    out_channel: int,
    kernel_size: int,
    down: int,
    blur_kernel: List[int],
):
    super().__init__(
        Blur(blur_kernel, -down, kernel_size),
        EqualConv2d(
            in_channel, out_channel, kernel_size, padding=0, stride=down, bias=False
        ),
        FusedLeakyReLU(out_channel, bias=True),
    )

__call__

__call__(input: Tensor) -> Tensor
Source code in stylegan2_torch/discriminator/blocks.py
61
62
def __call__(self, input: Tensor) -> Tensor:
    return super().__call__(input)

RGBDown

RGBDown(
    in_channel: int,
    out_channel: int,
    kernel_size: int,
    down: int,
    blur_kernel: List[int],
)

Bases: nn.Sequential

Downsampling convolution in RGB space, hence no need nonlinearity

Blur: Gaussian filter as low-pass filter for anti-aliasing + adjust tensor shape to preserve downsampled tensor shape EqualConv2d: 2D (downsampling) convolution with equalized learning rate

Source code in stylegan2_torch/discriminator/blocks.py
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
def __init__(
    self,
    in_channel: int,
    out_channel: int,
    kernel_size: int,
    down: int,
    blur_kernel: List[int],
):

    super().__init__(
        Blur(blur_kernel, -down, kernel_size),
        EqualConv2d(
            in_channel, out_channel, kernel_size, padding=0, stride=down, bias=False
        ),
    )

__call__

__call__(input: Tensor) -> Tensor
Source code in stylegan2_torch/discriminator/blocks.py
89
90
def __call__(self, input: Tensor) -> Tensor:
    return super().__call__(input)

ResBlock

ResBlock(
    in_channel: int,
    out_channel: int,
    blur_kernel: List[int],
)

Bases: nn.Module

Residual block

ConvBlock + DownConvBlock: Convolution + downsampling RGBDown: Skip connection from higher (double) resolution RGB image

Source code in stylegan2_torch/discriminator/blocks.py
101
102
103
104
105
106
107
108
def __init__(self, in_channel: int, out_channel: int, blur_kernel: List[int]):
    super().__init__()

    self.conv = ConvBlock(in_channel, in_channel, 3)
    self.down_conv = DownConvBlock(
        in_channel, out_channel, 3, down=2, blur_kernel=blur_kernel
    )
    self.skip = RGBDown(in_channel, out_channel, 1, down=2, blur_kernel=blur_kernel)

__call__ class-attribute

__call__ = proxy(forward)

conv instance-attribute

conv = ConvBlock(in_channel, in_channel, 3)

down_conv instance-attribute

down_conv = DownConvBlock(
    in_channel,
    out_channel,
    3,
    down=2,
    blur_kernel=blur_kernel,
)

skip instance-attribute

skip = RGBDown(
    in_channel,
    out_channel,
    1,
    down=2,
    blur_kernel=blur_kernel,
)

forward

forward(input: Tensor) -> Tensor
Source code in stylegan2_torch/discriminator/blocks.py
110
111
112
113
114
115
116
def forward(self, input: Tensor) -> Tensor:
    out = self.conv(input)
    out = self.down_conv(out)
    skip = self.skip(input)
    # sqrt 2 to adhere to equalized learning rate philosophy
    # (i.e. preserve variance in forward pass not initialization)
    return (out + skip) / math.sqrt(2)