Compare commits

...

5 Commits

Author SHA1 Message Date
d517f730c2 Merge main into dev and suppress MIOpen workspace warnings
All checks were successful
Build ROCm Image / build (push) Successful in 3m27s
- Merge: voice conditionals cache and warmup pre-computation from main
- Add MIOPEN_LOG_LEVEL=2 to suppress GemmFwdRest workspace=0 warnings

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-06 17:41:54 -04:00
69f5489532 Merge branch 'main' into dev 2026-04-06 17:41:40 -04:00
f292ace76c Trigger rebuild to restore latest tag
All checks were successful
Build ROCm Image / build (push) Successful in 14m47s
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-06 17:33:16 -04:00
967ed41239 Revert FP16 autocast — increases TTFA on first request
All checks were successful
Build ROCm Image / build (push) Successful in 3m21s
autocast triggers fp16 kernel selection at first call for each tensor
shape. Since the warmup uses short text, real requests re-trigger
selection and are slower net. Keeping FP32 + conditionals cache.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-05 20:30:49 -04:00
29b66e24bb Cache voice conditionals and add FP16 autocast
All checks were successful
Build ROCm Image / build (push) Successful in 3m17s
Voice conditionals (s3tokenizer + voice encoder + mel embeddings) are
expensive to compute but depend only on the reference audio, not the
text. Previously they ran on every synthesis chunk — 3x wasted work for
a 3-chunk request. Now computed once at startup and reused.

Also wrap generate() in torch.amp.autocast(float16) for ~2x speedup on
all model computation (T3 LLM, S3Gen CFM, HiFiGAN vocoder).

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-05 20:22:13 -04:00
3 changed files with 48 additions and 2 deletions

View File

@@ -32,6 +32,8 @@ services:
- HSA_OVERRIDE_GFX_VERSION=10.3.0
# Disable MIOpen's SQLite cache — avoids crashes writing benchmark results.
- MIOPEN_DISABLE_CACHE=1
# Suppress MIOpen workspace=0 solver warnings (expected with ROCm/PyTorch, not actionable).
- MIOPEN_LOG_LEVEL=2
volumes:
hf_cache:

View File

@@ -8,6 +8,12 @@ chatterbox_model = None
_sample_rate = 24000
_is_turbo = False
# Cache: voice file path → prepared conditionals object.
# prepare_conditionals loads audio, runs s3tokenizer + voice encoder, and
# builds mel embeddings — expensive work that only depends on the reference
# audio, not the text. Cache it so multi-chunk requests pay the cost once.
_cond_cache: dict = {}
def _test_cuda() -> bool:
try:
@@ -51,6 +57,26 @@ def load_model() -> bool:
return False
def prepare_voice(audio_prompt_path: str) -> None:
"""
Pre-compute and cache the voice conditionals for a reference audio file.
Calling this once avoids repeating the s3tokenizer + voice encoder work
on every synthesis chunk that uses the same voice.
"""
if chatterbox_model is None:
return
if audio_prompt_path in _cond_cache:
return
if not _is_turbo:
return # only turbo exposes prepare_conditionals
logger.info(f"Preparing voice conditionals for '{audio_prompt_path}'")
with torch.inference_mode():
chatterbox_model.prepare_conditionals(audio_prompt_path)
_cond_cache[audio_prompt_path] = chatterbox_model.conds
logger.info("Voice conditionals cached")
def get_sample_rate() -> int:
return _sample_rate
@@ -71,8 +97,16 @@ def synthesize(
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
# Restore cached conditionals so generate() skips prepare_conditionals.
if audio_prompt_path and _is_turbo:
if audio_prompt_path not in _cond_cache:
prepare_voice(audio_prompt_path)
chatterbox_model.conds = _cond_cache[audio_prompt_path]
kwargs: dict = {}
if audio_prompt_path:
# Don't pass audio_prompt_path — conds are already set above.
# For non-turbo models there's no cache, pass path as normal.
if audio_prompt_path and not _is_turbo:
kwargs["audio_prompt_path"] = audio_prompt_path
if _is_turbo:

12
main.py
View File

@@ -19,8 +19,18 @@ logger = logging.getLogger(__name__)
def _warmup(voices: dict) -> None:
"""Run one synthesis to populate MIOpen's in-memory kernel cache."""
"""Pre-compute voice conditionals and populate MIOpen's kernel cache."""
from wyoming_voices import resolve_voice
# Pre-compute conditionals for all discovered voices so the first real
# request doesn't pay the s3tokenizer + voice encoder cost.
for name, path in voices.items():
try:
engine.prepare_voice(path)
except Exception:
logger.warning(f"Failed to prepare voice '{name}' (non-fatal)", exc_info=True)
# Synthesis warmup to populate MIOpen's in-memory kernel cache.
audio_prompt = resolve_voice(None, voices) if voices else None
logger.info("Running warmup synthesis...")
try: