Switch to ROCm 6.1 + torch 2.5.1 to fix MIOpen workspace=0 slowness
Some checks failed
Build ROCm Image / build (push) Failing after 11s
Some checks failed
Build ROCm Image / build (push) Failing after 11s
ROCm 7.2 + PyTorch 2.11.0 has a bug where PyTorch passes workspace=0 to MIOpen convolutions, forcing fallback to the slow GemmFwdRest solver. This caused s3gen.inference to take 15-22s instead of <5s, making synthesis 3-4x slower than real-time audio playback. ROCm 6.1 allocates workspace correctly so MIOpen picks fast GEMM solvers without needing torch.compile workarounds. Changes: - Base image: rocm/dev-ubuntu-22.04:7.2 → 6.1 - torch 2.11.0 → 2.5.1 (rocm6.1 wheel index) - Add pytorch_triton_rocm==3.1.0 - transformers 5.2.0 → 4.46.3, safetensors 0.5.3 → 0.4.0 - s3tokenizer unpinned → 0.3.0 - resemble-perth==1.0.1 directly (v1.0.1 is pip-installable; drop stub) - Drop Dockerfile perth_stub steps - Drop torch.compile and timing patches from engine.py (not needed) - Drop multi-pass warmup from main.py (torch JIT warmup not needed) - Drop ROCm 7.2-specific env vars from docker-compose.yml Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
49
engine.py
49
engine.py
@@ -1,7 +1,5 @@
|
||||
import logging
|
||||
import time
|
||||
import torch
|
||||
import torch._dynamo
|
||||
from typing import Optional, Tuple
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -46,23 +44,6 @@ def load_model() -> bool:
|
||||
_is_turbo = False
|
||||
|
||||
_sample_rate = 24000
|
||||
|
||||
if torch.cuda.is_available():
|
||||
# torch.compile replaces MIOpen's convolution path with Triton-generated
|
||||
# kernels, bypassing the workspace=0 fallback entirely. We compile s3gen
|
||||
# (HiFiGAN vocoder + flow matching) and the voice encoder (ve) since both
|
||||
# are convolutional and hit the workspace=0 bug.
|
||||
# suppress_errors=True falls back to eager for any op compile can't handle.
|
||||
torch._dynamo.config.suppress_errors = True
|
||||
for attr, label in [("s3gen", "s3gen"), ("ve", "ve")]:
|
||||
try:
|
||||
obj = getattr(chatterbox_model, attr)
|
||||
setattr(chatterbox_model, attr, torch.compile(obj, dynamic=True))
|
||||
logger.info(f"{label} compiled with torch.compile")
|
||||
except Exception:
|
||||
logger.warning(f"torch.compile unavailable for {label}, running in eager mode", exc_info=True)
|
||||
|
||||
_patch_timing(chatterbox_model)
|
||||
logger.info("Model loaded successfully")
|
||||
return True
|
||||
except Exception:
|
||||
@@ -70,36 +51,6 @@ def load_model() -> bool:
|
||||
return False
|
||||
|
||||
|
||||
def _patch_timing(model) -> None:
|
||||
"""Wrap key sub-model forward() calls with timing logs."""
|
||||
def _wrap(obj, method_name, label):
|
||||
original = getattr(obj, method_name)
|
||||
def timed(*args, **kwargs):
|
||||
t0 = time.monotonic()
|
||||
result = original(*args, **kwargs)
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.synchronize()
|
||||
logger.info(f"[timing] {label}: {time.monotonic() - t0:.3f}s")
|
||||
return result
|
||||
setattr(obj, method_name, timed)
|
||||
|
||||
try:
|
||||
# S3 tokenizer — processes reference audio through a conformer
|
||||
_wrap(model.s3tokenizer, "forward", "s3tokenizer (ref audio encoding)")
|
||||
except AttributeError:
|
||||
pass
|
||||
try:
|
||||
# Speaker/voice encoder — xvector embedding from reference audio
|
||||
_wrap(model.ve, "forward", "ve (speaker embedding)")
|
||||
except AttributeError:
|
||||
pass
|
||||
try:
|
||||
# S3Gen decode: flow matching (token -> mel) + HiFiGAN (mel -> wav)
|
||||
_wrap(model.s3gen, "inference", "s3gen.inference (flow+vocoder)")
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
|
||||
def get_sample_rate() -> int:
|
||||
return _sample_rate
|
||||
|
||||
|
||||
Reference in New Issue
Block a user