diff --git a/.github/workflows/wasm-simd-hf-space-vad-asr-zipformer-ja.yaml b/.github/workflows/wasm-simd-hf-space-vad-asr-zipformer-ja.yaml index b893d88aa..460397b66 100644 --- a/.github/workflows/wasm-simd-hf-space-vad-asr-zipformer-ja.yaml +++ b/.github/workflows/wasm-simd-hf-space-vad-asr-zipformer-ja.yaml @@ -51,9 +51,9 @@ jobs: tar xvf sherpa-onnx-zipformer-ja-reazonspeech-2024-08-01.tar.bz2 rm sherpa-onnx-zipformer-ja-reazonspeech-2024-08-01.tar.bz2 - mv -v sherpa-onnx-zipformer-ja-reazonspeech-2024-08-01/encoder-epoch-99-avg-1.int8.onnx ./zipformer-encoder.onnx - mv -v sherpa-onnx-zipformer-ja-reazonspeech-2024-08-01/decoder-epoch-99-avg-1.onnx ./zipformer-decoder.onnx - mv -v sherpa-onnx-zipformer-ja-reazonspeech-2024-08-01/joiner-epoch-99-avg-1.int8.onnx ./zipformer-joiner.onnx + mv -v sherpa-onnx-zipformer-ja-reazonspeech-2024-08-01/encoder-epoch-99-avg-1.int8.onnx ./transducer-encoder.onnx + mv -v sherpa-onnx-zipformer-ja-reazonspeech-2024-08-01/decoder-epoch-99-avg-1.onnx ./transducer-decoder.onnx + mv -v sherpa-onnx-zipformer-ja-reazonspeech-2024-08-01/joiner-epoch-99-avg-1.int8.onnx ./transducer-joiner.onnx mv -v sherpa-onnx-zipformer-ja-reazonspeech-2024-08-01/tokens.txt ./ rm -rf sherpa-onnx-zipformer-ja-reazonspeech-2024-08-01 diff --git a/.github/workflows/wasm-simd-hf-space-vad-asr-zipformer-thai.yaml b/.github/workflows/wasm-simd-hf-space-vad-asr-zipformer-thai.yaml index 7f28be39e..2b03b3845 100644 --- a/.github/workflows/wasm-simd-hf-space-vad-asr-zipformer-thai.yaml +++ b/.github/workflows/wasm-simd-hf-space-vad-asr-zipformer-thai.yaml @@ -50,9 +50,9 @@ jobs: curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-zipformer-thai-2024-06-20.tar.bz2 tar xvf sherpa-onnx-zipformer-thai-2024-06-20.tar.bz2 rm sherpa-onnx-zipformer-thai-2024-06-20.tar.bz2 - mv -v sherpa-onnx-zipformer-thai-2024-06-20/encoder-epoch-12-avg-5.int8.onnx ./zipformer-encoder.onnx - mv -v sherpa-onnx-zipformer-thai-2024-06-20/decoder-epoch-12-avg-5.onnx ./zipformer-decoder.onnx - mv -v sherpa-onnx-zipformer-thai-2024-06-20/joiner-epoch-12-avg-5.int8.onnx ./zipformer-joiner.onnx + mv -v sherpa-onnx-zipformer-thai-2024-06-20/encoder-epoch-12-avg-5.int8.onnx ./transducer-encoder.onnx + mv -v sherpa-onnx-zipformer-thai-2024-06-20/decoder-epoch-12-avg-5.onnx ./transducer-decoder.onnx + mv -v sherpa-onnx-zipformer-thai-2024-06-20/joiner-epoch-12-avg-5.int8.onnx ./transducer-joiner.onnx mv -v sherpa-onnx-zipformer-thai-2024-06-20/tokens.txt ./tokens.txt rm -rf sherpa-onnx-zipformer-thai-2024-06-20 diff --git a/README.md b/README.md index 3684caf2d..b7f3cebfe 100644 --- a/README.md +++ b/README.md @@ -100,6 +100,8 @@ We also have spaces built using WebAssembly. The are listed below: |Real-time speech recognition (Chinese + English + Cantonese) with [Paraformer](https://www.modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary)|[Click me](https://huggingface.co/spaces/k2-fsa/web-assembly-asr-sherpa-onnx-zh-cantonese-en-paraformer)| [地址](https://modelscope.cn/studios/k2-fsa/web-assembly-asr-sherpa-onnx-zh-cantonese-en-paraformer)| |VAD + speech recognition (Chinese + English + Korean + Japanese + Cantonese) with [SenseVoice](https://github.com/FunAudioLLM/SenseVoice)|[Click me](https://huggingface.co/spaces/k2-fsa/web-assembly-vad-asr-sherpa-onnx-zh-en-ja-ko-cantonese-sense-voice)| [地址](https://www.modelscope.cn/studios/csukuangfj/web-assembly-vad-asr-sherpa-onnx-zh-en-jp-ko-cantonese-sense-voice)| |VAD + speech recognition (English) with [Whisper](https://github.com/openai/whisper) tiny.en|[Click me](https://huggingface.co/spaces/k2-fsa/web-assembly-vad-asr-sherpa-onnx-en-whisper-tiny)| [地址](https://www.modelscope.cn/studios/csukuangfj/web-assembly-vad-asr-sherpa-onnx-en-whisper-tiny)| +|VAD + speech recognition (Japanese) with Zipformer trained with [ReazonSpeech](https://research.reazon.jp/_static/reazonspeech_nlp2023.pdf)|[Click me](https://huggingface.co/spaces/k2-fsa/web-assembly-vad-asr-sherpa-onnx-ja-zipformer)| [地址](https://www.modelscope.cn/studios/csukuangfj/web-assembly-vad-asr-sherpa-onnx-ja-zipformer)| +|VAD + speech recognition (Thai) with Zipformer trained with [GigaSpeech2](https://github.com/SpeechColab/GigaSpeech2)|[Click me](https://huggingface.co/spaces/k2-fsa/web-assembly-vad-asr-sherpa-onnx-th-zipformer)| [地址](https://www.modelscope.cn/studios/csukuangfj/web-assembly-vad-asr-sherpa-onnx-th-zipformer)| |Real-time speech recognition (English) |[Click me](https://huggingface.co/spaces/k2-fsa/web-assembly-asr-sherpa-onnx-en)|[地址](https://modelscope.cn/studios/k2-fsa/web-assembly-asr-sherpa-onnx-en)| |Speech synthesis (English) |[Click me](https://huggingface.co/spaces/k2-fsa/web-assembly-tts-sherpa-onnx-en)| [地址](https://modelscope.cn/studios/k2-fsa/web-assembly-tts-sherpa-onnx-en)| |Speech synthesis (German)|[Click me](https://huggingface.co/spaces/k2-fsa/web-assembly-tts-sherpa-onnx-de)| [地址](https://modelscope.cn/studios/k2-fsa/web-assembly-tts-sherpa-onnx-de)|