modules.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923
  1. import math
  2. import numpy as np
  3. import torch
  4. from torch import nn
  5. from torch.nn import functional as F
  6. from torch.nn import Conv1d
  7. from torch.nn.utils import weight_norm, remove_weight_norm
  8. from module import commons
  9. from module.commons import init_weights, get_padding
  10. from module.transforms import piecewise_rational_quadratic_transform
  11. import torch.distributions as D
  12. LRELU_SLOPE = 0.1
  13. class LayerNorm(nn.Module):
  14. def __init__(self, channels, eps=1e-5):
  15. super().__init__()
  16. self.channels = channels
  17. self.eps = eps
  18. self.gamma = nn.Parameter(torch.ones(channels))
  19. self.beta = nn.Parameter(torch.zeros(channels))
  20. def forward(self, x):
  21. x = x.transpose(1, -1)
  22. x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
  23. return x.transpose(1, -1)
  24. class ConvReluNorm(nn.Module):
  25. def __init__(
  26. self,
  27. in_channels,
  28. hidden_channels,
  29. out_channels,
  30. kernel_size,
  31. n_layers,
  32. p_dropout,
  33. ):
  34. super().__init__()
  35. self.in_channels = in_channels
  36. self.hidden_channels = hidden_channels
  37. self.out_channels = out_channels
  38. self.kernel_size = kernel_size
  39. self.n_layers = n_layers
  40. self.p_dropout = p_dropout
  41. assert n_layers > 1, "Number of layers should be larger than 0."
  42. self.conv_layers = nn.ModuleList()
  43. self.norm_layers = nn.ModuleList()
  44. self.conv_layers.append(
  45. nn.Conv1d(
  46. in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
  47. )
  48. )
  49. self.norm_layers.append(LayerNorm(hidden_channels))
  50. self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
  51. for _ in range(n_layers - 1):
  52. self.conv_layers.append(
  53. nn.Conv1d(
  54. hidden_channels,
  55. hidden_channels,
  56. kernel_size,
  57. padding=kernel_size // 2,
  58. )
  59. )
  60. self.norm_layers.append(LayerNorm(hidden_channels))
  61. self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
  62. self.proj.weight.data.zero_()
  63. self.proj.bias.data.zero_()
  64. def forward(self, x, x_mask):
  65. x_org = x
  66. for i in range(self.n_layers):
  67. x = self.conv_layers[i](x * x_mask)
  68. x = self.norm_layers[i](x)
  69. x = self.relu_drop(x)
  70. x = x_org + self.proj(x)
  71. return x * x_mask
  72. class DDSConv(nn.Module):
  73. """
  74. Dialted and Depth-Separable Convolution
  75. """
  76. def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
  77. super().__init__()
  78. self.channels = channels
  79. self.kernel_size = kernel_size
  80. self.n_layers = n_layers
  81. self.p_dropout = p_dropout
  82. self.drop = nn.Dropout(p_dropout)
  83. self.convs_sep = nn.ModuleList()
  84. self.convs_1x1 = nn.ModuleList()
  85. self.norms_1 = nn.ModuleList()
  86. self.norms_2 = nn.ModuleList()
  87. for i in range(n_layers):
  88. dilation = kernel_size**i
  89. padding = (kernel_size * dilation - dilation) // 2
  90. self.convs_sep.append(
  91. nn.Conv1d(
  92. channels,
  93. channels,
  94. kernel_size,
  95. groups=channels,
  96. dilation=dilation,
  97. padding=padding,
  98. )
  99. )
  100. self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
  101. self.norms_1.append(LayerNorm(channels))
  102. self.norms_2.append(LayerNorm(channels))
  103. def forward(self, x, x_mask, g=None):
  104. if g is not None:
  105. x = x + g
  106. for i in range(self.n_layers):
  107. y = self.convs_sep[i](x * x_mask)
  108. y = self.norms_1[i](y)
  109. y = F.gelu(y)
  110. y = self.convs_1x1[i](y)
  111. y = self.norms_2[i](y)
  112. y = F.gelu(y)
  113. y = self.drop(y)
  114. x = x + y
  115. return x * x_mask
  116. class WN(torch.nn.Module):
  117. def __init__(
  118. self,
  119. hidden_channels,
  120. kernel_size,
  121. dilation_rate,
  122. n_layers,
  123. gin_channels=0,
  124. p_dropout=0,
  125. ):
  126. super(WN, self).__init__()
  127. assert kernel_size % 2 == 1
  128. self.hidden_channels = hidden_channels
  129. self.kernel_size = (kernel_size,)
  130. self.dilation_rate = dilation_rate
  131. self.n_layers = n_layers
  132. self.gin_channels = gin_channels
  133. self.p_dropout = p_dropout
  134. self.in_layers = torch.nn.ModuleList()
  135. self.res_skip_layers = torch.nn.ModuleList()
  136. self.drop = nn.Dropout(p_dropout)
  137. if gin_channels != 0:
  138. cond_layer = torch.nn.Conv1d(
  139. gin_channels, 2 * hidden_channels * n_layers, 1
  140. )
  141. self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
  142. for i in range(n_layers):
  143. dilation = dilation_rate**i
  144. padding = int((kernel_size * dilation - dilation) / 2)
  145. in_layer = torch.nn.Conv1d(
  146. hidden_channels,
  147. 2 * hidden_channels,
  148. kernel_size,
  149. dilation=dilation,
  150. padding=padding,
  151. )
  152. in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
  153. self.in_layers.append(in_layer)
  154. # last one is not necessary
  155. if i < n_layers - 1:
  156. res_skip_channels = 2 * hidden_channels
  157. else:
  158. res_skip_channels = hidden_channels
  159. res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
  160. res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
  161. self.res_skip_layers.append(res_skip_layer)
  162. def forward(self, x, x_mask, g=None, **kwargs):
  163. output = torch.zeros_like(x)
  164. n_channels_tensor = torch.IntTensor([self.hidden_channels])
  165. if g is not None:
  166. g = self.cond_layer(g)
  167. for i in range(self.n_layers):
  168. x_in = self.in_layers[i](x)
  169. if g is not None:
  170. cond_offset = i * 2 * self.hidden_channels
  171. g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
  172. else:
  173. g_l = torch.zeros_like(x_in)
  174. acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
  175. acts = self.drop(acts)
  176. res_skip_acts = self.res_skip_layers[i](acts)
  177. if i < self.n_layers - 1:
  178. res_acts = res_skip_acts[:, : self.hidden_channels, :]
  179. x = (x + res_acts) * x_mask
  180. output = output + res_skip_acts[:, self.hidden_channels :, :]
  181. else:
  182. output = output + res_skip_acts
  183. return output * x_mask
  184. def remove_weight_norm(self):
  185. if self.gin_channels != 0:
  186. torch.nn.utils.remove_weight_norm(self.cond_layer)
  187. for l in self.in_layers:
  188. torch.nn.utils.remove_weight_norm(l)
  189. for l in self.res_skip_layers:
  190. torch.nn.utils.remove_weight_norm(l)
  191. class ResBlock1(torch.nn.Module):
  192. def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
  193. super(ResBlock1, self).__init__()
  194. self.convs1 = nn.ModuleList(
  195. [
  196. weight_norm(
  197. Conv1d(
  198. channels,
  199. channels,
  200. kernel_size,
  201. 1,
  202. dilation=dilation[0],
  203. padding=get_padding(kernel_size, dilation[0]),
  204. )
  205. ),
  206. weight_norm(
  207. Conv1d(
  208. channels,
  209. channels,
  210. kernel_size,
  211. 1,
  212. dilation=dilation[1],
  213. padding=get_padding(kernel_size, dilation[1]),
  214. )
  215. ),
  216. weight_norm(
  217. Conv1d(
  218. channels,
  219. channels,
  220. kernel_size,
  221. 1,
  222. dilation=dilation[2],
  223. padding=get_padding(kernel_size, dilation[2]),
  224. )
  225. ),
  226. ]
  227. )
  228. self.convs1.apply(init_weights)
  229. self.convs2 = nn.ModuleList(
  230. [
  231. weight_norm(
  232. Conv1d(
  233. channels,
  234. channels,
  235. kernel_size,
  236. 1,
  237. dilation=1,
  238. padding=get_padding(kernel_size, 1),
  239. )
  240. ),
  241. weight_norm(
  242. Conv1d(
  243. channels,
  244. channels,
  245. kernel_size,
  246. 1,
  247. dilation=1,
  248. padding=get_padding(kernel_size, 1),
  249. )
  250. ),
  251. weight_norm(
  252. Conv1d(
  253. channels,
  254. channels,
  255. kernel_size,
  256. 1,
  257. dilation=1,
  258. padding=get_padding(kernel_size, 1),
  259. )
  260. ),
  261. ]
  262. )
  263. self.convs2.apply(init_weights)
  264. def forward(self, x, x_mask=None):
  265. for c1, c2 in zip(self.convs1, self.convs2):
  266. xt = F.leaky_relu(x, LRELU_SLOPE)
  267. if x_mask is not None:
  268. xt = xt * x_mask
  269. xt = c1(xt)
  270. xt = F.leaky_relu(xt, LRELU_SLOPE)
  271. if x_mask is not None:
  272. xt = xt * x_mask
  273. xt = c2(xt)
  274. x = xt + x
  275. if x_mask is not None:
  276. x = x * x_mask
  277. return x
  278. def remove_weight_norm(self):
  279. for l in self.convs1:
  280. remove_weight_norm(l)
  281. for l in self.convs2:
  282. remove_weight_norm(l)
  283. class ResBlock2(torch.nn.Module):
  284. def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
  285. super(ResBlock2, self).__init__()
  286. self.convs = nn.ModuleList(
  287. [
  288. weight_norm(
  289. Conv1d(
  290. channels,
  291. channels,
  292. kernel_size,
  293. 1,
  294. dilation=dilation[0],
  295. padding=get_padding(kernel_size, dilation[0]),
  296. )
  297. ),
  298. weight_norm(
  299. Conv1d(
  300. channels,
  301. channels,
  302. kernel_size,
  303. 1,
  304. dilation=dilation[1],
  305. padding=get_padding(kernel_size, dilation[1]),
  306. )
  307. ),
  308. ]
  309. )
  310. self.convs.apply(init_weights)
  311. def forward(self, x, x_mask=None):
  312. for c in self.convs:
  313. xt = F.leaky_relu(x, LRELU_SLOPE)
  314. if x_mask is not None:
  315. xt = xt * x_mask
  316. xt = c(xt)
  317. x = xt + x
  318. if x_mask is not None:
  319. x = x * x_mask
  320. return x
  321. def remove_weight_norm(self):
  322. for l in self.convs:
  323. remove_weight_norm(l)
  324. class Log(nn.Module):
  325. def forward(self, x, x_mask, reverse=False, **kwargs):
  326. if not reverse:
  327. y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
  328. logdet = torch.sum(-y, [1, 2])
  329. return y, logdet
  330. else:
  331. x = torch.exp(x) * x_mask
  332. return x
  333. class Flip(nn.Module):
  334. def forward(self, x, *args, reverse=False, **kwargs):
  335. x = torch.flip(x, [1])
  336. if not reverse:
  337. logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
  338. return x, logdet
  339. else:
  340. return x
  341. class ElementwiseAffine(nn.Module):
  342. def __init__(self, channels):
  343. super().__init__()
  344. self.channels = channels
  345. self.m = nn.Parameter(torch.zeros(channels, 1))
  346. self.logs = nn.Parameter(torch.zeros(channels, 1))
  347. def forward(self, x, x_mask, reverse=False, **kwargs):
  348. if not reverse:
  349. y = self.m + torch.exp(self.logs) * x
  350. y = y * x_mask
  351. logdet = torch.sum(self.logs * x_mask, [1, 2])
  352. return y, logdet
  353. else:
  354. x = (x - self.m) * torch.exp(-self.logs) * x_mask
  355. return x
  356. class ResidualCouplingLayer(nn.Module):
  357. def __init__(
  358. self,
  359. channels,
  360. hidden_channels,
  361. kernel_size,
  362. dilation_rate,
  363. n_layers,
  364. p_dropout=0,
  365. gin_channels=0,
  366. mean_only=False,
  367. ):
  368. assert channels % 2 == 0, "channels should be divisible by 2"
  369. super().__init__()
  370. self.channels = channels
  371. self.hidden_channels = hidden_channels
  372. self.kernel_size = kernel_size
  373. self.dilation_rate = dilation_rate
  374. self.n_layers = n_layers
  375. self.half_channels = channels // 2
  376. self.mean_only = mean_only
  377. self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
  378. self.enc = WN(
  379. hidden_channels,
  380. kernel_size,
  381. dilation_rate,
  382. n_layers,
  383. p_dropout=p_dropout,
  384. gin_channels=gin_channels,
  385. )
  386. self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
  387. self.post.weight.data.zero_()
  388. self.post.bias.data.zero_()
  389. def forward(self, x, x_mask, g=None, reverse=False):
  390. x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
  391. h = self.pre(x0) * x_mask
  392. h = self.enc(h, x_mask, g=g)
  393. stats = self.post(h) * x_mask
  394. if not self.mean_only:
  395. m, logs = torch.split(stats, [self.half_channels] * 2, 1)
  396. else:
  397. m = stats
  398. logs = torch.zeros_like(m)
  399. if not reverse:
  400. x1 = m + x1 * torch.exp(logs) * x_mask
  401. x = torch.cat([x0, x1], 1)
  402. logdet = torch.sum(logs, [1, 2])
  403. return x, logdet
  404. else:
  405. x1 = (x1 - m) * torch.exp(-logs) * x_mask
  406. x = torch.cat([x0, x1], 1)
  407. return x
  408. class ConvFlow(nn.Module):
  409. def __init__(
  410. self,
  411. in_channels,
  412. filter_channels,
  413. kernel_size,
  414. n_layers,
  415. num_bins=10,
  416. tail_bound=5.0,
  417. ):
  418. super().__init__()
  419. self.in_channels = in_channels
  420. self.filter_channels = filter_channels
  421. self.kernel_size = kernel_size
  422. self.n_layers = n_layers
  423. self.num_bins = num_bins
  424. self.tail_bound = tail_bound
  425. self.half_channels = in_channels // 2
  426. self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
  427. self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
  428. self.proj = nn.Conv1d(
  429. filter_channels, self.half_channels * (num_bins * 3 - 1), 1
  430. )
  431. self.proj.weight.data.zero_()
  432. self.proj.bias.data.zero_()
  433. def forward(self, x, x_mask, g=None, reverse=False):
  434. x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
  435. h = self.pre(x0)
  436. h = self.convs(h, x_mask, g=g)
  437. h = self.proj(h) * x_mask
  438. b, c, t = x0.shape
  439. h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
  440. unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
  441. unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(
  442. self.filter_channels
  443. )
  444. unnormalized_derivatives = h[..., 2 * self.num_bins :]
  445. x1, logabsdet = piecewise_rational_quadratic_transform(
  446. x1,
  447. unnormalized_widths,
  448. unnormalized_heights,
  449. unnormalized_derivatives,
  450. inverse=reverse,
  451. tails="linear",
  452. tail_bound=self.tail_bound,
  453. )
  454. x = torch.cat([x0, x1], 1) * x_mask
  455. logdet = torch.sum(logabsdet * x_mask, [1, 2])
  456. if not reverse:
  457. return x, logdet
  458. else:
  459. return x
  460. class LinearNorm(nn.Module):
  461. def __init__(
  462. self,
  463. in_channels,
  464. out_channels,
  465. bias=True,
  466. spectral_norm=False,
  467. ):
  468. super(LinearNorm, self).__init__()
  469. self.fc = nn.Linear(in_channels, out_channels, bias)
  470. if spectral_norm:
  471. self.fc = nn.utils.spectral_norm(self.fc)
  472. def forward(self, input):
  473. out = self.fc(input)
  474. return out
  475. class Mish(nn.Module):
  476. def __init__(self):
  477. super(Mish, self).__init__()
  478. def forward(self, x):
  479. return x * torch.tanh(F.softplus(x))
  480. class Conv1dGLU(nn.Module):
  481. """
  482. Conv1d + GLU(Gated Linear Unit) with residual connection.
  483. For GLU refer to https://arxiv.org/abs/1612.08083 paper.
  484. """
  485. def __init__(self, in_channels, out_channels, kernel_size, dropout):
  486. super(Conv1dGLU, self).__init__()
  487. self.out_channels = out_channels
  488. self.conv1 = ConvNorm(in_channels, 2 * out_channels, kernel_size=kernel_size)
  489. self.dropout = nn.Dropout(dropout)
  490. def forward(self, x):
  491. residual = x
  492. x = self.conv1(x)
  493. x1, x2 = torch.split(x, split_size_or_sections=self.out_channels, dim=1)
  494. x = x1 * torch.sigmoid(x2)
  495. x = residual + self.dropout(x)
  496. return x
  497. class ConvNorm(nn.Module):
  498. def __init__(
  499. self,
  500. in_channels,
  501. out_channels,
  502. kernel_size=1,
  503. stride=1,
  504. padding=None,
  505. dilation=1,
  506. bias=True,
  507. spectral_norm=False,
  508. ):
  509. super(ConvNorm, self).__init__()
  510. if padding is None:
  511. assert kernel_size % 2 == 1
  512. padding = int(dilation * (kernel_size - 1) / 2)
  513. self.conv = torch.nn.Conv1d(
  514. in_channels,
  515. out_channels,
  516. kernel_size=kernel_size,
  517. stride=stride,
  518. padding=padding,
  519. dilation=dilation,
  520. bias=bias,
  521. )
  522. if spectral_norm:
  523. self.conv = nn.utils.spectral_norm(self.conv)
  524. def forward(self, input):
  525. out = self.conv(input)
  526. return out
  527. class MultiHeadAttention(nn.Module):
  528. """Multi-Head Attention module"""
  529. def __init__(self, n_head, d_model, d_k, d_v, dropout=0.0, spectral_norm=False):
  530. super().__init__()
  531. self.n_head = n_head
  532. self.d_k = d_k
  533. self.d_v = d_v
  534. self.w_qs = nn.Linear(d_model, n_head * d_k)
  535. self.w_ks = nn.Linear(d_model, n_head * d_k)
  536. self.w_vs = nn.Linear(d_model, n_head * d_v)
  537. self.attention = ScaledDotProductAttention(
  538. temperature=np.power(d_model, 0.5), dropout=dropout
  539. )
  540. self.fc = nn.Linear(n_head * d_v, d_model)
  541. self.dropout = nn.Dropout(dropout)
  542. if spectral_norm:
  543. self.w_qs = nn.utils.spectral_norm(self.w_qs)
  544. self.w_ks = nn.utils.spectral_norm(self.w_ks)
  545. self.w_vs = nn.utils.spectral_norm(self.w_vs)
  546. self.fc = nn.utils.spectral_norm(self.fc)
  547. def forward(self, x, mask=None):
  548. d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
  549. sz_b, len_x, _ = x.size()
  550. residual = x
  551. q = self.w_qs(x).view(sz_b, len_x, n_head, d_k)
  552. k = self.w_ks(x).view(sz_b, len_x, n_head, d_k)
  553. v = self.w_vs(x).view(sz_b, len_x, n_head, d_v)
  554. q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_x, d_k) # (n*b) x lq x dk
  555. k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_x, d_k) # (n*b) x lk x dk
  556. v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_x, d_v) # (n*b) x lv x dv
  557. if mask is not None:
  558. slf_mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..
  559. else:
  560. slf_mask = None
  561. output, attn = self.attention(q, k, v, mask=slf_mask)
  562. output = output.view(n_head, sz_b, len_x, d_v)
  563. output = (
  564. output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_x, -1)
  565. ) # b x lq x (n*dv)
  566. output = self.fc(output)
  567. output = self.dropout(output) + residual
  568. return output, attn
  569. class ScaledDotProductAttention(nn.Module):
  570. """Scaled Dot-Product Attention"""
  571. def __init__(self, temperature, dropout):
  572. super().__init__()
  573. self.temperature = temperature
  574. self.softmax = nn.Softmax(dim=2)
  575. self.dropout = nn.Dropout(dropout)
  576. def forward(self, q, k, v, mask=None):
  577. attn = torch.bmm(q, k.transpose(1, 2))
  578. attn = attn / self.temperature
  579. if mask is not None:
  580. attn = attn.masked_fill(mask, -np.inf)
  581. attn = self.softmax(attn)
  582. p_attn = self.dropout(attn)
  583. output = torch.bmm(p_attn, v)
  584. return output, attn
  585. class MelStyleEncoder(nn.Module):
  586. """MelStyleEncoder"""
  587. def __init__(
  588. self,
  589. n_mel_channels=80,
  590. style_hidden=128,
  591. style_vector_dim=256,
  592. style_kernel_size=5,
  593. style_head=2,
  594. dropout=0.1,
  595. ):
  596. super(MelStyleEncoder, self).__init__()
  597. self.in_dim = n_mel_channels
  598. self.hidden_dim = style_hidden
  599. self.out_dim = style_vector_dim
  600. self.kernel_size = style_kernel_size
  601. self.n_head = style_head
  602. self.dropout = dropout
  603. self.spectral = nn.Sequential(
  604. LinearNorm(self.in_dim, self.hidden_dim),
  605. Mish(),
  606. nn.Dropout(self.dropout),
  607. LinearNorm(self.hidden_dim, self.hidden_dim),
  608. Mish(),
  609. nn.Dropout(self.dropout),
  610. )
  611. self.temporal = nn.Sequential(
  612. Conv1dGLU(self.hidden_dim, self.hidden_dim, self.kernel_size, self.dropout),
  613. Conv1dGLU(self.hidden_dim, self.hidden_dim, self.kernel_size, self.dropout),
  614. )
  615. self.slf_attn = MultiHeadAttention(
  616. self.n_head,
  617. self.hidden_dim,
  618. self.hidden_dim // self.n_head,
  619. self.hidden_dim // self.n_head,
  620. self.dropout,
  621. )
  622. self.fc = LinearNorm(self.hidden_dim, self.out_dim)
  623. def temporal_avg_pool(self, x, mask=None):
  624. if mask is None:
  625. out = torch.mean(x, dim=1)
  626. else:
  627. len_ = (~mask).sum(dim=1).unsqueeze(1)
  628. x = x.masked_fill(mask.unsqueeze(-1), 0)
  629. x = x.sum(dim=1)
  630. out = torch.div(x, len_)
  631. return out
  632. def forward(self, x, mask=None):
  633. x = x.transpose(1, 2)
  634. if mask is not None:
  635. mask = (mask.int() == 0).squeeze(1)
  636. max_len = x.shape[1]
  637. slf_attn_mask = (
  638. mask.unsqueeze(1).expand(-1, max_len, -1) if mask is not None else None
  639. )
  640. # spectral
  641. x = self.spectral(x)
  642. # temporal
  643. x = x.transpose(1, 2)
  644. x = self.temporal(x)
  645. x = x.transpose(1, 2)
  646. # self-attention
  647. if mask is not None:
  648. x = x.masked_fill(mask.unsqueeze(-1), 0)
  649. x, _ = self.slf_attn(x, mask=slf_attn_mask)
  650. # fc
  651. x = self.fc(x)
  652. # temoral average pooling
  653. w = self.temporal_avg_pool(x, mask=mask)
  654. return w.unsqueeze(-1)
  655. class MelStyleEncoderVAE(nn.Module):
  656. def __init__(self, spec_channels, z_latent_dim, emb_dim):
  657. super().__init__()
  658. self.ref_encoder = MelStyleEncoder(spec_channels, style_vector_dim=emb_dim)
  659. self.fc1 = nn.Linear(emb_dim, z_latent_dim)
  660. self.fc2 = nn.Linear(emb_dim, z_latent_dim)
  661. self.fc3 = nn.Linear(z_latent_dim, emb_dim)
  662. self.z_latent_dim = z_latent_dim
  663. def reparameterize(self, mu, logvar):
  664. if self.training:
  665. std = torch.exp(0.5 * logvar)
  666. eps = torch.randn_like(std)
  667. return eps.mul(std).add_(mu)
  668. else:
  669. return mu
  670. def forward(self, inputs, mask=None):
  671. enc_out = self.ref_encoder(inputs.squeeze(-1), mask).squeeze(-1)
  672. mu = self.fc1(enc_out)
  673. logvar = self.fc2(enc_out)
  674. posterior = D.Normal(mu, torch.exp(logvar))
  675. kl_divergence = D.kl_divergence(
  676. posterior, D.Normal(torch.zeros_like(mu), torch.ones_like(logvar))
  677. )
  678. loss_kl = kl_divergence.mean()
  679. z = posterior.rsample()
  680. style_embed = self.fc3(z)
  681. return style_embed.unsqueeze(-1), loss_kl
  682. def infer(self, inputs=None, random_sample=False, manual_latent=None):
  683. if manual_latent is None:
  684. if random_sample:
  685. dev = next(self.parameters()).device
  686. posterior = D.Normal(
  687. torch.zeros(1, self.z_latent_dim, device=dev),
  688. torch.ones(1, self.z_latent_dim, device=dev),
  689. )
  690. z = posterior.rsample()
  691. else:
  692. enc_out = self.ref_encoder(inputs.transpose(1, 2))
  693. mu = self.fc1(enc_out)
  694. z = mu
  695. else:
  696. z = manual_latent
  697. style_embed = self.fc3(z)
  698. return style_embed.unsqueeze(-1), z
  699. class ActNorm(nn.Module):
  700. def __init__(self, channels, ddi=False, **kwargs):
  701. super().__init__()
  702. self.channels = channels
  703. self.initialized = not ddi
  704. self.logs = nn.Parameter(torch.zeros(1, channels, 1))
  705. self.bias = nn.Parameter(torch.zeros(1, channels, 1))
  706. def forward(self, x, x_mask=None, g=None, reverse=False, **kwargs):
  707. if x_mask is None:
  708. x_mask = torch.ones(x.size(0), 1, x.size(2)).to(
  709. device=x.device, dtype=x.dtype
  710. )
  711. x_len = torch.sum(x_mask, [1, 2])
  712. if not self.initialized:
  713. self.initialize(x, x_mask)
  714. self.initialized = True
  715. if reverse:
  716. z = (x - self.bias) * torch.exp(-self.logs) * x_mask
  717. logdet = None
  718. return z
  719. else:
  720. z = (self.bias + torch.exp(self.logs) * x) * x_mask
  721. logdet = torch.sum(self.logs) * x_len # [b]
  722. return z, logdet
  723. def store_inverse(self):
  724. pass
  725. def set_ddi(self, ddi):
  726. self.initialized = not ddi
  727. def initialize(self, x, x_mask):
  728. with torch.no_grad():
  729. denom = torch.sum(x_mask, [0, 2])
  730. m = torch.sum(x * x_mask, [0, 2]) / denom
  731. m_sq = torch.sum(x * x * x_mask, [0, 2]) / denom
  732. v = m_sq - (m**2)
  733. logs = 0.5 * torch.log(torch.clamp_min(v, 1e-6))
  734. bias_init = (
  735. (-m * torch.exp(-logs)).view(*self.bias.shape).to(dtype=self.bias.dtype)
  736. )
  737. logs_init = (-logs).view(*self.logs.shape).to(dtype=self.logs.dtype)
  738. self.bias.data.copy_(bias_init)
  739. self.logs.data.copy_(logs_init)
  740. class InvConvNear(nn.Module):
  741. def __init__(self, channels, n_split=4, no_jacobian=False, **kwargs):
  742. super().__init__()
  743. assert n_split % 2 == 0
  744. self.channels = channels
  745. self.n_split = n_split
  746. self.no_jacobian = no_jacobian
  747. w_init = torch.linalg.qr(
  748. torch.FloatTensor(self.n_split, self.n_split).normal_()
  749. )[0]
  750. if torch.det(w_init) < 0:
  751. w_init[:, 0] = -1 * w_init[:, 0]
  752. self.weight = nn.Parameter(w_init)
  753. def forward(self, x, x_mask=None, g=None, reverse=False, **kwargs):
  754. b, c, t = x.size()
  755. assert c % self.n_split == 0
  756. if x_mask is None:
  757. x_mask = 1
  758. x_len = torch.ones((b,), dtype=x.dtype, device=x.device) * t
  759. else:
  760. x_len = torch.sum(x_mask, [1, 2])
  761. x = x.view(b, 2, c // self.n_split, self.n_split // 2, t)
  762. x = (
  763. x.permute(0, 1, 3, 2, 4)
  764. .contiguous()
  765. .view(b, self.n_split, c // self.n_split, t)
  766. )
  767. if reverse:
  768. if hasattr(self, "weight_inv"):
  769. weight = self.weight_inv
  770. else:
  771. weight = torch.inverse(self.weight.float()).to(dtype=self.weight.dtype)
  772. logdet = None
  773. else:
  774. weight = self.weight
  775. if self.no_jacobian:
  776. logdet = 0
  777. else:
  778. logdet = torch.logdet(self.weight) * (c / self.n_split) * x_len # [b]
  779. weight = weight.view(self.n_split, self.n_split, 1, 1)
  780. z = F.conv2d(x, weight)
  781. z = z.view(b, 2, self.n_split // 2, c // self.n_split, t)
  782. z = z.permute(0, 1, 3, 2, 4).contiguous().view(b, c, t) * x_mask
  783. if reverse:
  784. return z
  785. else:
  786. return z, logdet
  787. def store_inverse(self):
  788. self.weight_inv = torch.inverse(self.weight.float()).to(dtype=self.weight.dtype)