From 29b66e24bbabb057ccfee1c27646f587af95a93b Mon Sep 17 00:00:00 2001 From: scott Date: Sun, 5 Apr 2026 20:22:13 -0400 Subject: [PATCH 1/3] Cache voice conditionals and add FP16 autocast MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Voice conditionals (s3tokenizer + voice encoder + mel embeddings) are expensive to compute but depend only on the reference audio, not the text. Previously they ran on every synthesis chunk — 3x wasted work for a 3-chunk request. Now computed once at startup and reused. Also wrap generate() in torch.amp.autocast(float16) for ~2x speedup on all model computation (T3 LLM, S3Gen CFM, HiFiGAN vocoder). Co-Authored-By: Claude Sonnet 4.6 --- engine.py | 39 +++++++++++++++++++++++++++++++++++++-- main.py | 12 +++++++++++- 2 files changed, 48 insertions(+), 3 deletions(-) diff --git a/engine.py b/engine.py index 00d4a05..55b457b 100644 --- a/engine.py +++ b/engine.py @@ -8,6 +8,12 @@ chatterbox_model = None _sample_rate = 24000 _is_turbo = False +# Cache: voice file path → prepared conditionals object. +# prepare_conditionals loads audio, runs s3tokenizer + voice encoder, and +# builds mel embeddings — expensive work that only depends on the reference +# audio, not the text. Cache it so multi-chunk requests pay the cost once. +_cond_cache: dict = {} + def _test_cuda() -> bool: try: @@ -51,6 +57,26 @@ def load_model() -> bool: return False +def prepare_voice(audio_prompt_path: str) -> None: + """ + Pre-compute and cache the voice conditionals for a reference audio file. + Calling this once avoids repeating the s3tokenizer + voice encoder work + on every synthesis chunk that uses the same voice. + """ + if chatterbox_model is None: + return + if audio_prompt_path in _cond_cache: + return + if not _is_turbo: + return # only turbo exposes prepare_conditionals + + logger.info(f"Preparing voice conditionals for '{audio_prompt_path}'") + with torch.inference_mode(): + chatterbox_model.prepare_conditionals(audio_prompt_path) + _cond_cache[audio_prompt_path] = chatterbox_model.conds + logger.info("Voice conditionals cached") + + def get_sample_rate() -> int: return _sample_rate @@ -71,8 +97,16 @@ def synthesize( if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) + # Restore cached conditionals so generate() skips prepare_conditionals. + if audio_prompt_path and _is_turbo: + if audio_prompt_path not in _cond_cache: + prepare_voice(audio_prompt_path) + chatterbox_model.conds = _cond_cache[audio_prompt_path] + kwargs: dict = {} - if audio_prompt_path: + # Don't pass audio_prompt_path — conds are already set above. + # For non-turbo models there's no cache, pass path as normal. + if audio_prompt_path and not _is_turbo: kwargs["audio_prompt_path"] = audio_prompt_path if _is_turbo: @@ -82,7 +116,8 @@ def synthesize( kwargs["cfg_weight"] = cfg_weight with torch.inference_mode(): - wav = chatterbox_model.generate(text=text, **kwargs) + with torch.amp.autocast(device_type="cuda", dtype=torch.float16): + wav = chatterbox_model.generate(text=text, **kwargs) if torch.cuda.is_available(): torch.cuda.synchronize() diff --git a/main.py b/main.py index dc5f66b..7ec7f84 100644 --- a/main.py +++ b/main.py @@ -19,8 +19,18 @@ logger = logging.getLogger(__name__) def _warmup(voices: dict) -> None: - """Run one synthesis to populate MIOpen's in-memory kernel cache.""" + """Pre-compute voice conditionals and populate MIOpen's kernel cache.""" from wyoming_voices import resolve_voice + + # Pre-compute conditionals for all discovered voices so the first real + # request doesn't pay the s3tokenizer + voice encoder cost. + for name, path in voices.items(): + try: + engine.prepare_voice(path) + except Exception: + logger.warning(f"Failed to prepare voice '{name}' (non-fatal)", exc_info=True) + + # Synthesis warmup to populate MIOpen's in-memory kernel cache. audio_prompt = resolve_voice(None, voices) if voices else None logger.info("Running warmup synthesis...") try: From 967ed41239253e95dd5128291d2931c38808fb48 Mon Sep 17 00:00:00 2001 From: scott Date: Sun, 5 Apr 2026 20:30:49 -0400 Subject: [PATCH 2/3] =?UTF-8?q?Revert=20FP16=20autocast=20=E2=80=94=20incr?= =?UTF-8?q?eases=20TTFA=20on=20first=20request?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit autocast triggers fp16 kernel selection at first call for each tensor shape. Since the warmup uses short text, real requests re-trigger selection and are slower net. Keeping FP32 + conditionals cache. Co-Authored-By: Claude Sonnet 4.6 --- engine.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/engine.py b/engine.py index 55b457b..766ed87 100644 --- a/engine.py +++ b/engine.py @@ -116,8 +116,7 @@ def synthesize( kwargs["cfg_weight"] = cfg_weight with torch.inference_mode(): - with torch.amp.autocast(device_type="cuda", dtype=torch.float16): - wav = chatterbox_model.generate(text=text, **kwargs) + wav = chatterbox_model.generate(text=text, **kwargs) if torch.cuda.is_available(): torch.cuda.synchronize() From f292ace76cf0addfae89fcdbf3061da4f9382cbe Mon Sep 17 00:00:00 2001 From: scott Date: Mon, 6 Apr 2026 17:32:56 -0400 Subject: [PATCH 3/3] Trigger rebuild to restore latest tag Co-Authored-By: Claude Sonnet 4.6