From 4b647d371e837e1f6e356bbbabc99b0c436f27bd Mon Sep 17 00:00:00 2001 From: Eric Izoita Date: Mon, 30 Jun 2025 13:50:50 -0700 Subject: [PATCH] Fix a couple minor warnings (#210) * use non-deprecated import path of weight_norm * don't pass dropout to single layer LSTM per the docs, dropout is not applied to the last LSTM layer, so there is no effect to passing it when `num_layers=1`. --- kokoro/istftnet.py | 2 +- kokoro/modules.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/kokoro/istftnet.py b/kokoro/istftnet.py index cb22279..f1c536e 100644 --- a/kokoro/istftnet.py +++ b/kokoro/istftnet.py @@ -1,6 +1,6 @@ # ADAPTED from https://github.com/yl4579/StyleTTS2/blob/main/Modules/istftnet.py from kokoro.custom_stft import CustomSTFT -from torch.nn.utils import weight_norm +from torch.nn.utils.parametrizations import weight_norm import math import torch import torch.nn as nn diff --git a/kokoro/modules.py b/kokoro/modules.py index 05d1575..f183bd3 100644 --- a/kokoro/modules.py +++ b/kokoro/modules.py @@ -1,6 +1,6 @@ # https://github.com/yl4579/StyleTTS2/blob/main/models.py from .istftnet import AdainResBlk1d -from torch.nn.utils import weight_norm +from torch.nn.utils.parametrizations import weight_norm from transformers import AlbertModel import numpy as np import torch @@ -139,7 +139,7 @@ class DurationEncoder(nn.Module): super().__init__() self.lstms = nn.ModuleList() for _ in range(nlayers): - self.lstms.append(nn.LSTM(d_model + sty_dim, d_model // 2, num_layers=1, batch_first=True, bidirectional=True, dropout=dropout)) + self.lstms.append(nn.LSTM(d_model + sty_dim, d_model // 2, num_layers=1, batch_first=True, bidirectional=True)) self.lstms.append(AdaLayerNorm(sty_dim, d_model)) self.dropout = dropout self.d_model = d_model