text-generation-webui/extensions/whisper_stt/script.py

45 lines
1.4 KiB
Python
Raw Normal View History

2023-03-09 11:46:50 +00:00
import gradio as gr
import speech_recognition as sr
2023-03-09 19:33:00 +00:00
2023-03-09 11:46:50 +00:00
input_hijack = {
'state': False,
'value': ["", ""]
}
2023-03-12 20:03:07 +00:00
def do_stt(audio, text_state=""):
2023-03-09 11:46:50 +00:00
transcription = ""
r = sr.Recognizer()
2023-03-12 20:03:07 +00:00
# Convert to AudioData
audio_data = sr.AudioData(sample_rate=audio[0], frame_data=audio[1], sample_width=4)
2023-03-09 11:46:50 +00:00
try:
2023-03-12 20:03:07 +00:00
transcription = r.recognize_whisper(audio_data, language="english", model="base.en")
2023-03-09 11:46:50 +00:00
except sr.UnknownValueError:
print("Whisper could not understand audio")
except sr.RequestError as e:
2023-03-09 19:33:00 +00:00
print("Could not request results from Whisper", e)
2023-03-09 11:46:50 +00:00
input_hijack.update({"state": True, "value": [transcription, transcription]})
2023-03-12 20:03:07 +00:00
text_state += transcription + " "
return text_state, text_state
2023-03-09 11:46:50 +00:00
2023-03-09 20:03:49 +00:00
def update_hijack(val):
input_hijack.update({"state": True, "value": [val, val]})
return val
2023-03-09 11:46:50 +00:00
def ui():
2023-03-12 20:03:07 +00:00
tr_state = gr.State(value="")
output_transcription = gr.Textbox(label="STT-Input",
placeholder="Speech Preview. Click \"Generate\" to send",
interactive=True)
output_transcription.change(fn=update_hijack, inputs=[output_transcription], outputs=[tr_state])
with gr.Row():
audio = gr.Audio(source="microphone")
transcribe_button = gr.Button(value="Transcribe")
transcribe_button.click(do_stt, inputs=[audio, tr_state], outputs=[output_transcription, tr_state])