## 全卷积网络FCN

fcn是深度学习用于图像分割的鼻祖.后续的很多网络结构都是在此基础上演进而来.

FCN是分割网络的鼻祖,后面的很多网络都是在此基础上提出的.

## 代码解析

```class FCNs(nn.Module):
def __init__(self, pretrained_net, n_class):
super().__init__()
self.n_class = n_class
self.pretrained_net = pretrained_net
self.relu    = nn.ReLU(inplace=True)
self.bn1     = nn.BatchNorm2d(512)
self.bn2     = nn.BatchNorm2d(256)
self.bn3     = nn.BatchNorm2d(128)
self.bn4     = nn.BatchNorm2d(64)
self.bn5     = nn.BatchNorm2d(32)
self.classifier = nn.Conv2d(32, n_class, kernel_size=1)
def forward(self, x):
output = self.pretrained_net(x)
x5 = output['x5']  # size=(N, 512, x.H/32, x.W/32)
x4 = output['x4']  # size=(N, 512, x.H/16, x.W/16)
x3 = output['x3']  # size=(N, 256, x.H/8,  x.W/8)
x2 = output['x2']  # size=(N, 128, x.H/4,  x.W/4)
x1 = output['x1']  # size=(N, 64, x.H/2,  x.W/2)
score = self.bn1(self.relu(self.deconv1(x5)))     # size=(N, 512, x.H/16, x.W/16)
score = score + x4                                # element-wise add, size=(N, 512, x.H/16, x.W/16)
score = self.bn2(self.relu(self.deconv2(score)))  # size=(N, 256, x.H/8, x.W/8)
score = score + x3                                # element-wise add, size=(N, 256, x.H/8, x.W/8)
score = self.bn3(self.relu(self.deconv3(score)))  # size=(N, 128, x.H/4, x.W/4)
score = score + x2                                # element-wise add, size=(N, 128, x.H/4, x.W/4)
score = self.bn4(self.relu(self.deconv4(score)))  # size=(N, 64, x.H/2, x.W/2)
score = score + x1                                # element-wise add, size=(N, 64, x.H/2, x.W/2)
score = self.bn5(self.relu(self.deconv5(score)))  # size=(N, 32, x.H, x.W)
score = self.classifier(score)                    # size=(N, n_class, x.H/1, x.W/1)
return score  # size=(N, n_class, x.H/1, x.W/1)```

train.py中

```vgg_model = VGGNet(requires_grad=True, remove_fc=True)
fcn_model = FCNs(pretrained_net=vgg_model, n_class=n_class)```

```def forward(self, x):
output = self.pretrained_net(x)
x5 = output['x5']  # size=(N, 512, x.H/32, x.W/32)
x4 = output['x4']  # size=(N, 512, x.H/16, x.W/16)
x3 = output['x3']  # size=(N, 256, x.H/8,  x.W/8)
x2 = output['x2']  # size=(N, 128, x.H/4,  x.W/4)
x1 = output['x1']  # size=(N, 64, x.H/2,  x.W/2)
score = self.bn1(self.relu(self.deconv1(x5)))     # size=(N, 512, x.H/16, x.W/16)
score = score + x4                                # element-wise add, size=(N, 512, x.H/16, x.W/16)
score = self.bn2(self.relu(self.deconv2(score)))  # size=(N, 256, x.H/8, x.W/8)
score = score + x3                                # element-wise add, size=(N, 256, x.H/8, x.W/8)
score = self.bn3(self.relu(self.deconv3(score)))  # size=(N, 128, x.H/4, x.W/4)
score = score + x2                                # element-wise add, size=(N, 128, x.H/4, x.W/4)
score = self.bn4(self.relu(self.deconv4(score)))  # size=(N, 64, x.H/2, x.W/2)
score = score + x1                                # element-wise add, size=(N, 64, x.H/2, x.W/2)
score = self.bn5(self.relu(self.deconv5(score)))  # size=(N, 32, x.H, x.W)
score = self.classifier(score)                    # size=(N, n_class, x.H/1, x.W/1)
return score  # size=(N, n_class, x.H/1, x.W/1)```

```x5 = output['x5']  # size=(N, 512, x.H/32, x.W/32)
x4 = output['x4']  # size=(N, 512, x.H/16, x.W/16)
x3 = output['x3']  # size=(N, 256, x.H/8,  x.W/8)
x2 = output['x2']  # size=(N, 128, x.H/4,  x.W/4)
x1 = output['x1']  # size=(N, 64, x.H/2,  x.W/2)```

### 损失函数

`criterion = nn.BCEWithLogitsLoss()`

BCELoss()
BCEWithLogitsLoss()