layers_new.py 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. import torch
  2. import torch.nn.functional as F
  3. from torch import nn
  4. from . import spec_utils
  5. class Conv2DBNActiv(nn.Module):
  6. def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
  7. super(Conv2DBNActiv, self).__init__()
  8. self.conv = nn.Sequential(
  9. nn.Conv2d(
  10. nin,
  11. nout,
  12. kernel_size=ksize,
  13. stride=stride,
  14. padding=pad,
  15. dilation=dilation,
  16. bias=False,
  17. ),
  18. nn.BatchNorm2d(nout),
  19. activ(),
  20. )
  21. def __call__(self, x):
  22. return self.conv(x)
  23. class Encoder(nn.Module):
  24. def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
  25. super(Encoder, self).__init__()
  26. self.conv1 = Conv2DBNActiv(nin, nout, ksize, stride, pad, activ=activ)
  27. self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ)
  28. def __call__(self, x):
  29. h = self.conv1(x)
  30. h = self.conv2(h)
  31. return h
  32. class Decoder(nn.Module):
  33. def __init__(
  34. self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
  35. ):
  36. super(Decoder, self).__init__()
  37. self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
  38. # self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ)
  39. self.dropout = nn.Dropout2d(0.1) if dropout else None
  40. def __call__(self, x, skip=None):
  41. x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
  42. if skip is not None:
  43. skip = spec_utils.crop_center(skip, x)
  44. x = torch.cat([x, skip], dim=1)
  45. h = self.conv1(x)
  46. # h = self.conv2(h)
  47. if self.dropout is not None:
  48. h = self.dropout(h)
  49. return h
  50. class ASPPModule(nn.Module):
  51. def __init__(self, nin, nout, dilations=(4, 8, 12), activ=nn.ReLU, dropout=False):
  52. super(ASPPModule, self).__init__()
  53. self.conv1 = nn.Sequential(
  54. nn.AdaptiveAvgPool2d((1, None)),
  55. Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ),
  56. )
  57. self.conv2 = Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ)
  58. self.conv3 = Conv2DBNActiv(
  59. nin, nout, 3, 1, dilations[0], dilations[0], activ=activ
  60. )
  61. self.conv4 = Conv2DBNActiv(
  62. nin, nout, 3, 1, dilations[1], dilations[1], activ=activ
  63. )
  64. self.conv5 = Conv2DBNActiv(
  65. nin, nout, 3, 1, dilations[2], dilations[2], activ=activ
  66. )
  67. self.bottleneck = Conv2DBNActiv(nout * 5, nout, 1, 1, 0, activ=activ)
  68. self.dropout = nn.Dropout2d(0.1) if dropout else None
  69. def forward(self, x):
  70. _, _, h, w = x.size()
  71. feat1 = F.interpolate(
  72. self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
  73. )
  74. feat2 = self.conv2(x)
  75. feat3 = self.conv3(x)
  76. feat4 = self.conv4(x)
  77. feat5 = self.conv5(x)
  78. out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
  79. out = self.bottleneck(out)
  80. if self.dropout is not None:
  81. out = self.dropout(out)
  82. return out
  83. class LSTMModule(nn.Module):
  84. def __init__(self, nin_conv, nin_lstm, nout_lstm):
  85. super(LSTMModule, self).__init__()
  86. self.conv = Conv2DBNActiv(nin_conv, 1, 1, 1, 0)
  87. self.lstm = nn.LSTM(
  88. input_size=nin_lstm, hidden_size=nout_lstm // 2, bidirectional=True
  89. )
  90. self.dense = nn.Sequential(
  91. nn.Linear(nout_lstm, nin_lstm), nn.BatchNorm1d(nin_lstm), nn.ReLU()
  92. )
  93. def forward(self, x):
  94. N, _, nbins, nframes = x.size()
  95. h = self.conv(x)[:, 0] # N, nbins, nframes
  96. h = h.permute(2, 0, 1) # nframes, N, nbins
  97. h, _ = self.lstm(h)
  98. h = self.dense(h.reshape(-1, h.size()[-1])) # nframes * N, nbins
  99. h = h.reshape(nframes, N, 1, nbins)
  100. h = h.permute(1, 2, 3, 0)
  101. return h