Compare commits

..

3 Commits

Author SHA1 Message Date
766ca9d278 Fix image tagging: dev branch tags as dev, not latest
All checks were successful
Build ROCm Image / build (push) Successful in 25s
main branch → :latest + :sha
other branches → :<branch-name> + :sha

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-06 17:29:59 -04:00
9a017df4ca Trigger CI builds on dev branch
All checks were successful
Build ROCm Image / build (push) Successful in 17m21s
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-06 17:10:56 -04:00
fe3c77ff4f Upgrade to ROCm 7.2, Python 3.11, PyTorch 2.11.0
- Base image: rocm/dev-ubuntu-22.04:6.1 → 7.2
- Python 3.10 → 3.11 via deadsnakes PPA
- torch/torchaudio: 2.5.1 → 2.11.0
- torchvision: 0.20.1 → 0.26.0
- pytorch_triton_rocm: 3.1.0 → 3.3.0
- transformers: 4.46.3 → >=4.50.0
- diffusers: 0.29.0 → >=0.32.0
- safetensors: >=0.4.1 → >=0.4.5
- config: temperature 0.8→0.9, seed 0→1960

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-06 17:09:56 -04:00
7 changed files with 43 additions and 67 deletions

View File

@@ -1,7 +1,10 @@
name: Build ROCm Image name: Build ROCm Image
on: on:
workflow_dispatch: push:
branches:
- main
- dev
jobs: jobs:
build: build:
@@ -20,14 +23,21 @@ jobs:
username: ${{ secrets.REGISTRY_USERNAME }} username: ${{ secrets.REGISTRY_USERNAME }}
password: ${{ secrets.REGISTRY_TOKEN }} password: ${{ secrets.REGISTRY_TOKEN }}
- name: Set tags
id: tags
run: |
if [ "${{ gitea.ref_name }}" = "main" ]; then
echo "tags=git.sdgarren.com/scott/rocm-chatterbox-whisper:latest,git.sdgarren.com/scott/rocm-chatterbox-whisper:${{ gitea.sha }}" >> $GITHUB_OUTPUT
else
echo "tags=git.sdgarren.com/scott/rocm-chatterbox-whisper:${{ gitea.ref_name }},git.sdgarren.com/scott/rocm-chatterbox-whisper:${{ gitea.sha }}" >> $GITHUB_OUTPUT
fi
- name: Build and Push - name: Build and Push
uses: docker/build-push-action@v6 uses: docker/build-push-action@v6
with: with:
context: . context: .
file: Dockerfile.rocm file: Dockerfile.rocm
push: true push: true
tags: | tags: ${{ steps.tags.outputs.tags }}
git.sdgarren.com/scott/rocm-chatterbox-whisper:latest
git.sdgarren.com/scott/rocm-chatterbox-whisper:${{ gitea.sha }}
cache-from: type=registry,ref=git.sdgarren.com/scott/rocm-chatterbox-whisper:latest cache-from: type=registry,ref=git.sdgarren.com/scott/rocm-chatterbox-whisper:latest
cache-to: type=inline cache-to: type=inline

View File

@@ -1,4 +1,4 @@
FROM rocm/dev-ubuntu-22.04:6.1 FROM rocm/dev-ubuntu-22.04:7.2
ENV DEBIAN_FRONTEND=noninteractive \ ENV DEBIAN_FRONTEND=noninteractive \
PYTHONDONTWRITEBYTECODE=1 \ PYTHONDONTWRITEBYTECODE=1 \
@@ -7,27 +7,37 @@ ENV DEBIAN_FRONTEND=noninteractive \
PIP_NO_CACHE_DIR=1 PIP_NO_CACHE_DIR=1
RUN apt-get update && apt-get install -y --no-install-recommends \ RUN apt-get update && apt-get install -y --no-install-recommends \
python3 \ software-properties-common \
python3-pip \ && add-apt-repository ppa:deadsnakes/ppa \
python3-dev \ && apt-get update && apt-get install -y --no-install-recommends \
python3.11 \
python3.11-dev \
python3.11-distutils \
git \ git \
ffmpeg \ ffmpeg \
libsndfile1 \ libsndfile1 \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
# Install pip for Python 3.11
RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.11
# Make python3.11 the default python3
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1 \
&& update-alternatives --install /usr/bin/python python /usr/bin/python3.11 1
WORKDIR /app WORKDIR /app
# Step 1: Install ROCm-compatible PyTorch stack first. # Step 1: Install ROCm-compatible PyTorch stack first.
# This must happen before anything else to prevent pip from pulling CPU wheels. # This must happen before anything else to prevent pip from pulling CPU wheels.
COPY requirements-rocm-init.txt . COPY requirements-rocm-init.txt .
RUN pip3 install -r requirements-rocm-init.txt RUN python3 -m pip install -r requirements-rocm-init.txt
# Step 2: Install remaining dependencies (pinned to avoid overwriting torch). # Step 2: Install remaining dependencies (pinned to avoid overwriting torch).
COPY requirements-rocm.txt . COPY requirements-rocm.txt .
RUN pip3 install -r requirements-rocm.txt RUN python3 -m pip install -r requirements-rocm.txt
# Step 3: Install chatterbox with --no-deps so pip cannot replace ROCm torch. # Step 3: Install chatterbox with --no-deps so pip cannot replace ROCm torch.
RUN pip3 install --no-deps chatterbox-tts RUN python3 -m pip install --no-deps chatterbox-tts
# Application source # Application source
COPY engine.py config.py wyoming_handler.py wyoming_voices.py main.py ./ COPY engine.py config.py wyoming_handler.py wyoming_voices.py main.py ./

View File

@@ -13,11 +13,11 @@ tts_engine:
generation_defaults: generation_defaults:
# Turbo model: uses temperature only (exaggeration/cfg_weight ignored) # Turbo model: uses temperature only (exaggeration/cfg_weight ignored)
# Standard model: uses exaggeration and cfg_weight (temperature ignored) # Standard model: uses exaggeration and cfg_weight (temperature ignored)
temperature: 0.8 temperature: 0.9
exaggeration: 0.5 exaggeration: 0.5
cfg_weight: 0.5 cfg_weight: 0.5
# seed: 0 = random each call, >0 = reproducible output # seed: 0 = random each call, >0 = reproducible output
seed: 0 seed: 1960
wyoming: wyoming:
host: "0.0.0.0" host: "0.0.0.0"

View File

@@ -8,12 +8,6 @@ chatterbox_model = None
_sample_rate = 24000 _sample_rate = 24000
_is_turbo = False _is_turbo = False
# Cache: voice file path → prepared conditionals object.
# prepare_conditionals loads audio, runs s3tokenizer + voice encoder, and
# builds mel embeddings — expensive work that only depends on the reference
# audio, not the text. Cache it so multi-chunk requests pay the cost once.
_cond_cache: dict = {}
def _test_cuda() -> bool: def _test_cuda() -> bool:
try: try:
@@ -57,26 +51,6 @@ def load_model() -> bool:
return False return False
def prepare_voice(audio_prompt_path: str) -> None:
"""
Pre-compute and cache the voice conditionals for a reference audio file.
Calling this once avoids repeating the s3tokenizer + voice encoder work
on every synthesis chunk that uses the same voice.
"""
if chatterbox_model is None:
return
if audio_prompt_path in _cond_cache:
return
if not _is_turbo:
return # only turbo exposes prepare_conditionals
logger.info(f"Preparing voice conditionals for '{audio_prompt_path}'")
with torch.inference_mode():
chatterbox_model.prepare_conditionals(audio_prompt_path)
_cond_cache[audio_prompt_path] = chatterbox_model.conds
logger.info("Voice conditionals cached")
def get_sample_rate() -> int: def get_sample_rate() -> int:
return _sample_rate return _sample_rate
@@ -97,16 +71,8 @@ def synthesize(
if torch.cuda.is_available(): if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed) torch.cuda.manual_seed_all(seed)
# Restore cached conditionals so generate() skips prepare_conditionals.
if audio_prompt_path and _is_turbo:
if audio_prompt_path not in _cond_cache:
prepare_voice(audio_prompt_path)
chatterbox_model.conds = _cond_cache[audio_prompt_path]
kwargs: dict = {} kwargs: dict = {}
# Don't pass audio_prompt_path — conds are already set above. if audio_prompt_path:
# For non-turbo models there's no cache, pass path as normal.
if audio_prompt_path and not _is_turbo:
kwargs["audio_prompt_path"] = audio_prompt_path kwargs["audio_prompt_path"] = audio_prompt_path
if _is_turbo: if _is_turbo:

12
main.py
View File

@@ -19,18 +19,8 @@ logger = logging.getLogger(__name__)
def _warmup(voices: dict) -> None: def _warmup(voices: dict) -> None:
"""Pre-compute voice conditionals and populate MIOpen's kernel cache.""" """Run one synthesis to populate MIOpen's in-memory kernel cache."""
from wyoming_voices import resolve_voice from wyoming_voices import resolve_voice
# Pre-compute conditionals for all discovered voices so the first real
# request doesn't pay the s3tokenizer + voice encoder cost.
for name, path in voices.items():
try:
engine.prepare_voice(path)
except Exception:
logger.warning(f"Failed to prepare voice '{name}' (non-fatal)", exc_info=True)
# Synthesis warmup to populate MIOpen's in-memory kernel cache.
audio_prompt = resolve_voice(None, voices) if voices else None audio_prompt = resolve_voice(None, voices) if voices else None
logger.info("Running warmup synthesis...") logger.info("Running warmup synthesis...")
try: try:

View File

@@ -1,5 +1,5 @@
--index-url https://download.pytorch.org/whl/rocm6.1 --index-url https://download.pytorch.org/whl/rocm7.2
torch==2.5.1 torch==2.11.0
torchaudio==2.5.1 torchaudio==2.11.0
torchvision==0.20.1 torchvision==0.26.0
pytorch_triton_rocm==3.1.0 pytorch_triton_rocm==3.3.0

View File

@@ -5,9 +5,9 @@ librosa==0.11.0
pyloudnorm pyloudnorm
# ML dependencies (pinned to match chatterbox without overwriting ROCm torch) # ML dependencies (pinned to match chatterbox without overwriting ROCm torch)
transformers==4.46.3 transformers>=4.50.0
diffusers==0.29.0 diffusers>=0.32.0
safetensors>=0.4.1 safetensors>=0.4.5
huggingface-hub huggingface-hub
omegaconf omegaconf