Fix a couple minor warnings (#210)

* use non-deprecated import path of weight_norm

* don't pass dropout to single layer LSTM

per the docs, dropout is not applied to the last LSTM layer, so there is
no effect to passing it when `num_layers=1`.
This commit is contained in:
Eric Izoita
2025-06-30 13:50:50 -07:00
committed by GitHub
parent 2668b2e279
commit 4b647d371e
2 changed files with 3 additions and 3 deletions

View File

@@ -1,6 +1,6 @@
# ADAPTED from https://github.com/yl4579/StyleTTS2/blob/main/Modules/istftnet.py
from kokoro.custom_stft import CustomSTFT
from torch.nn.utils import weight_norm
from torch.nn.utils.parametrizations import weight_norm
import math
import torch
import torch.nn as nn

View File

@@ -1,6 +1,6 @@
# https://github.com/yl4579/StyleTTS2/blob/main/models.py
from .istftnet import AdainResBlk1d
from torch.nn.utils import weight_norm
from torch.nn.utils.parametrizations import weight_norm
from transformers import AlbertModel
import numpy as np
import torch
@@ -139,7 +139,7 @@ class DurationEncoder(nn.Module):
super().__init__()
self.lstms = nn.ModuleList()
for _ in range(nlayers):
self.lstms.append(nn.LSTM(d_model + sty_dim, d_model // 2, num_layers=1, batch_first=True, bidirectional=True, dropout=dropout))
self.lstms.append(nn.LSTM(d_model + sty_dim, d_model // 2, num_layers=1, batch_first=True, bidirectional=True))
self.lstms.append(AdaLayerNorm(sty_dim, d_model))
self.dropout = dropout
self.d_model = d_model