Fix a couple minor warnings (#210)
* use non-deprecated import path of weight_norm * don't pass dropout to single layer LSTM per the docs, dropout is not applied to the last LSTM layer, so there is no effect to passing it when `num_layers=1`.
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
# ADAPTED from https://github.com/yl4579/StyleTTS2/blob/main/Modules/istftnet.py
|
||||
from kokoro.custom_stft import CustomSTFT
|
||||
from torch.nn.utils import weight_norm
|
||||
from torch.nn.utils.parametrizations import weight_norm
|
||||
import math
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# https://github.com/yl4579/StyleTTS2/blob/main/models.py
|
||||
from .istftnet import AdainResBlk1d
|
||||
from torch.nn.utils import weight_norm
|
||||
from torch.nn.utils.parametrizations import weight_norm
|
||||
from transformers import AlbertModel
|
||||
import numpy as np
|
||||
import torch
|
||||
@@ -139,7 +139,7 @@ class DurationEncoder(nn.Module):
|
||||
super().__init__()
|
||||
self.lstms = nn.ModuleList()
|
||||
for _ in range(nlayers):
|
||||
self.lstms.append(nn.LSTM(d_model + sty_dim, d_model // 2, num_layers=1, batch_first=True, bidirectional=True, dropout=dropout))
|
||||
self.lstms.append(nn.LSTM(d_model + sty_dim, d_model // 2, num_layers=1, batch_first=True, bidirectional=True))
|
||||
self.lstms.append(AdaLayerNorm(sty_dim, d_model))
|
||||
self.dropout = dropout
|
||||
self.d_model = d_model
|
||||
|
||||
Reference in New Issue
Block a user