Fix warmup text length and ve attribute for torch.compile
All checks were successful
Build ROCm Image / build (push) Successful in 3m35s

- Warmup now uses a ~170-char representative sentence so torch.compile
  JIT-compiles for typical token sequence lengths. Previously "Warmup."
  compiled for very short shapes, causing a full re-compile (17s) on the
  first real HA request and pushing total synthesis past 30s.
- Compile model.ve (voice encoder) in addition to s3gen — both are
  convolutional and hit the MIOpen workspace=0 bug.
- Fix _patch_timing: attribute is model.ve not model.voice_encoder,
  so the timing wrap was silently skipping the speaker embedding.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-04-05 14:51:08 -04:00
parent 5766870304
commit 169e003a34
2 changed files with 19 additions and 10 deletions

View File

@@ -49,15 +49,18 @@ def load_model() -> bool:
if torch.cuda.is_available(): if torch.cuda.is_available():
# torch.compile replaces MIOpen's convolution path with Triton-generated # torch.compile replaces MIOpen's convolution path with Triton-generated
# kernels, bypassing the workspace=0 fallback entirely. We compile only # kernels, bypassing the workspace=0 fallback entirely. We compile s3gen
# s3gen (HiFiGAN vocoder + flow matching) since that's the bottleneck. # (HiFiGAN vocoder + flow matching) and the voice encoder (ve) since both
# are convolutional and hit the workspace=0 bug.
# suppress_errors=True falls back to eager for any op compile can't handle. # suppress_errors=True falls back to eager for any op compile can't handle.
try: torch._dynamo.config.suppress_errors = True
torch._dynamo.config.suppress_errors = True for attr, label in [("s3gen", "s3gen"), ("ve", "ve")]:
chatterbox_model.s3gen = torch.compile(chatterbox_model.s3gen, dynamic=True) try:
logger.info("s3gen compiled with torch.compile") obj = getattr(chatterbox_model, attr)
except Exception: setattr(chatterbox_model, attr, torch.compile(obj, dynamic=True))
logger.warning("torch.compile unavailable, running s3gen in eager mode", exc_info=True) logger.info(f"{label} compiled with torch.compile")
except Exception:
logger.warning(f"torch.compile unavailable for {label}, running in eager mode", exc_info=True)
_patch_timing(chatterbox_model) _patch_timing(chatterbox_model)
logger.info("Model loaded successfully") logger.info("Model loaded successfully")
@@ -87,7 +90,7 @@ def _patch_timing(model) -> None:
pass pass
try: try:
# Speaker/voice encoder — xvector embedding from reference audio # Speaker/voice encoder — xvector embedding from reference audio
_wrap(model.voice_encoder, "forward", "voice_encoder (speaker embedding)") _wrap(model.ve, "forward", "ve (speaker embedding)")
except AttributeError: except AttributeError:
pass pass
try: try:

View File

@@ -23,7 +23,13 @@ def _warmup(voices: dict) -> None:
audio_prompt = resolve_voice(None, voices) if voices else None audio_prompt = resolve_voice(None, voices) if voices else None
logger.info("Running warmup synthesis to populate MIOpen kernel cache...") logger.info("Running warmup synthesis to populate MIOpen kernel cache...")
try: try:
engine.synthesize(text="Warmup.", audio_prompt_path=audio_prompt) engine.synthesize(
text=(
"This is a warmup synthesis request used to pre-compile neural network kernels "
"for typical text lengths, so that the first real request runs at full speed."
),
audio_prompt_path=audio_prompt,
)
logger.info("Warmup complete — MIOpen cache populated") logger.info("Warmup complete — MIOpen cache populated")
except Exception: except Exception:
logger.warning("Warmup synthesis failed (non-fatal)", exc_info=True) logger.warning("Warmup synthesis failed (non-fatal)", exc_info=True)