From 59731084cd567c98b7913cb28c97552354beb730 Mon Sep 17 00:00:00 2001 From: scott Date: Sun, 5 Apr 2026 15:04:46 -0400 Subject: [PATCH] Multi-pass warmup and smaller chunk_size to fix HA timeout torch.compile with dynamic=True still specializes per shape family on first call. The warmup was running one text length, leaving real requests to JIT-compile their own shapes (15-22s for first chunk). HA freezes because it gets no AudioChunk for 22 seconds. Fix: - Run 3 warmup passes (short/medium/long text) so torch.compile builds a dynamic shape graph covering the range HA actually sends. Real requests then hit a cached compilation and synthesize in 3-8s. - Reduce default chunk_size from 300 to 120 chars so the first text chunk is shorter, producing faster synthesis and earlier first audio. Co-Authored-By: Claude Sonnet 4.6 --- config.py | 2 +- config.yaml | 2 +- main.py | 36 ++++++++++++++++++++++++------------ 3 files changed, 26 insertions(+), 14 deletions(-) diff --git a/config.py b/config.py index 3980334..bf66d8d 100644 --- a/config.py +++ b/config.py @@ -29,7 +29,7 @@ DEFAULTS = { "wyoming": { "host": "0.0.0.0", "port": 10200, - "chunk_size": 300, + "chunk_size": 120, }, "paths": { "model_cache": "/app/hf_cache", diff --git a/config.yaml b/config.yaml index 3b8c4cf..ff9812e 100644 --- a/config.yaml +++ b/config.yaml @@ -23,7 +23,7 @@ wyoming: host: "0.0.0.0" port: 10200 # Max characters per synthesis chunk (split at sentence boundaries) - chunk_size: 300 + chunk_size: 120 paths: model_cache: /app/hf_cache diff --git a/main.py b/main.py index b3b3e68..9268ea3 100644 --- a/main.py +++ b/main.py @@ -18,21 +18,33 @@ logging.basicConfig( logger = logging.getLogger(__name__) +_WARMUP_TEXTS = [ + # Short: covers brief HA notifications (lights on/off, etc.) + "Okay.", + # Medium: covers typical HA announcements + "The front door is open. Please close it.", + # Long: covers longer TTS requests and pre-compiles dynamic shape graph + ( + "This is a warmup synthesis to pre-compile neural network kernels " + "for longer text lengths used in Home Assistant announcements and notifications." + ), +] + + def _warmup(voices: dict) -> None: from wyoming_voices import resolve_voice audio_prompt = resolve_voice(None, voices) if voices else None - logger.info("Running warmup synthesis to populate MIOpen kernel cache...") - try: - engine.synthesize( - text=( - "This is a warmup synthesis request used to pre-compile neural network kernels " - "for typical text lengths, so that the first real request runs at full speed." - ), - audio_prompt_path=audio_prompt, - ) - logger.info("Warmup complete — MIOpen cache populated") - except Exception: - logger.warning("Warmup synthesis failed (non-fatal)", exc_info=True) + logger.info( + f"Running {len(_WARMUP_TEXTS)}-pass warmup to pre-compile torch kernels " + "for short, medium, and long text lengths..." + ) + for i, text in enumerate(_WARMUP_TEXTS, 1): + try: + engine.synthesize(text=text, audio_prompt_path=audio_prompt) + logger.info(f"Warmup pass {i}/{len(_WARMUP_TEXTS)} complete") + except Exception: + logger.warning(f"Warmup pass {i} failed (non-fatal)", exc_info=True) + logger.info("Warmup complete") async def main() -> None: