Multi-pass warmup and smaller chunk_size to fix HA timeout
All checks were successful
Build ROCm Image / build (push) Successful in 2m49s

torch.compile with dynamic=True still specializes per shape family on
first call. The warmup was running one text length, leaving real requests
to JIT-compile their own shapes (15-22s for first chunk). HA freezes
because it gets no AudioChunk for 22 seconds.

Fix:
- Run 3 warmup passes (short/medium/long text) so torch.compile builds
  a dynamic shape graph covering the range HA actually sends. Real
  requests then hit a cached compilation and synthesize in 3-8s.
- Reduce default chunk_size from 300 to 120 chars so the first text
  chunk is shorter, producing faster synthesis and earlier first audio.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-04-05 15:04:46 -04:00
parent 169e003a34
commit 59731084cd
3 changed files with 26 additions and 14 deletions

View File

@@ -29,7 +29,7 @@ DEFAULTS = {
"wyoming": { "wyoming": {
"host": "0.0.0.0", "host": "0.0.0.0",
"port": 10200, "port": 10200,
"chunk_size": 300, "chunk_size": 120,
}, },
"paths": { "paths": {
"model_cache": "/app/hf_cache", "model_cache": "/app/hf_cache",

View File

@@ -23,7 +23,7 @@ wyoming:
host: "0.0.0.0" host: "0.0.0.0"
port: 10200 port: 10200
# Max characters per synthesis chunk (split at sentence boundaries) # Max characters per synthesis chunk (split at sentence boundaries)
chunk_size: 300 chunk_size: 120
paths: paths:
model_cache: /app/hf_cache model_cache: /app/hf_cache

36
main.py
View File

@@ -18,21 +18,33 @@ logging.basicConfig(
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
_WARMUP_TEXTS = [
# Short: covers brief HA notifications (lights on/off, etc.)
"Okay.",
# Medium: covers typical HA announcements
"The front door is open. Please close it.",
# Long: covers longer TTS requests and pre-compiles dynamic shape graph
(
"This is a warmup synthesis to pre-compile neural network kernels "
"for longer text lengths used in Home Assistant announcements and notifications."
),
]
def _warmup(voices: dict) -> None: def _warmup(voices: dict) -> None:
from wyoming_voices import resolve_voice from wyoming_voices import resolve_voice
audio_prompt = resolve_voice(None, voices) if voices else None audio_prompt = resolve_voice(None, voices) if voices else None
logger.info("Running warmup synthesis to populate MIOpen kernel cache...") logger.info(
try: f"Running {len(_WARMUP_TEXTS)}-pass warmup to pre-compile torch kernels "
engine.synthesize( "for short, medium, and long text lengths..."
text=( )
"This is a warmup synthesis request used to pre-compile neural network kernels " for i, text in enumerate(_WARMUP_TEXTS, 1):
"for typical text lengths, so that the first real request runs at full speed." try:
), engine.synthesize(text=text, audio_prompt_path=audio_prompt)
audio_prompt_path=audio_prompt, logger.info(f"Warmup pass {i}/{len(_WARMUP_TEXTS)} complete")
) except Exception:
logger.info("Warmup complete — MIOpen cache populated") logger.warning(f"Warmup pass {i} failed (non-fatal)", exc_info=True)
except Exception: logger.info("Warmup complete")
logger.warning("Warmup synthesis failed (non-fatal)", exc_info=True)
async def main() -> None: async def main() -> None: