* Set up JS project * Finalise JS library * Update README * Fix package.json repository url * Rename package -> `kokoro-js` * Fix samples in README * Cleanup README * Bump `phonemizer` version * Create web demo * Run prettier * Link to model used in demo * Enable multithreading in HF space demo (~40% faster) * Add link to demo in README * Bump to v1.0.1 * Update voices * Update versions * Update phonemize JSDoc * Use updated voice pack * Update versions * Update demo (v1.0 & WebGPU support) * Update README * Enforce maximum number of tokens * Update README * [version] Update to 1.1.1
30 lines
925 B
JavaScript
30 lines
925 B
JavaScript
import { KokoroTTS } from "kokoro-js";
|
|
import { detectWebGPU } from "./utils.js";
|
|
|
|
// Device detection
|
|
const device = (await detectWebGPU()) ? "webgpu" : "wasm";
|
|
self.postMessage({ status: "device", device });
|
|
|
|
// Load the model
|
|
const model_id = "onnx-community/Kokoro-82M-v1.0-ONNX";
|
|
const tts = await KokoroTTS.from_pretrained(model_id, {
|
|
dtype: device === "wasm" ? "q8" : "fp32",
|
|
device,
|
|
}).catch((e) => {
|
|
self.postMessage({ status: "error", error: e.message });
|
|
throw e;
|
|
});
|
|
self.postMessage({ status: "ready", voices: tts.voices, device });
|
|
|
|
// Listen for messages from the main thread
|
|
self.addEventListener("message", async (e) => {
|
|
const { text, voice } = e.data;
|
|
|
|
// Generate speech
|
|
const audio = await tts.generate(text, { voice });
|
|
|
|
// Send the audio file back to the main thread
|
|
const blob = audio.toBlob();
|
|
self.postMessage({ status: "complete", audio: URL.createObjectURL(blob), text });
|
|
});
|