first implementation

This commit is contained in:
EliasVincent 2023-03-09 12:46:50 +01:00
parent c09f416adb
commit 4c72e43bcf
2 changed files with 45 additions and 0 deletions

View File

@ -0,0 +1,5 @@
git+https://github.com/Uberi/speech_recognition.git@010382b
PyAudio
openai-whisper
soundfile
ffmpeg

View File

@ -0,0 +1,40 @@
import gradio as gr
import speech_recognition as sr
import modules.shared as shared
input_hijack = {
'state': False,
'value': ["", ""]
}
def input_modifier(string):
return string
def do_stt():
transcription = ""
r = sr.Recognizer()
with sr.Microphone() as source:
print("Say something!")
r.adjust_for_ambient_noise(source)
audio = r.listen(source)
# recognize speech using whisper
try:
transcription = r.recognize_whisper(audio, language="english", model="tiny.en")
print("Whisper thinks you said " + transcription)
except sr.UnknownValueError:
print("Whisper could not understand audio")
except sr.RequestError as e:
print("Could not request results from Whisper")
# input_modifier(transcription)
input_hijack.update({"state": True, "value": [transcription, transcription]})
return transcription
def ui():
speech_button = gr.Button(value="STT")
output_transcription = gr.Textbox(label="Speech Preview")
speech_button.click(do_stt, outputs=[output_transcription])