2024-09-26 13:08:29 +02:00
|
|
|
import io
|
2024-09-27 13:59:27 +02:00
|
|
|
from faster_whisper import WhisperModel
|
2024-09-26 13:08:29 +02:00
|
|
|
from pydub import AudioSegment
|
2024-09-24 09:55:23 +02:00
|
|
|
|
|
|
|
|
|
|
|
class VoiceRecognition:
|
2024-09-26 11:01:15 +02:00
|
|
|
@staticmethod
|
|
|
|
def recognition(audio):
|
2024-10-11 10:18:33 +02:00
|
|
|
# Read the audio file into a BytesIO buffer
|
2024-09-27 13:59:27 +02:00
|
|
|
audio_buffer = io.BytesIO(audio.read())
|
2024-09-30 11:47:58 +02:00
|
|
|
|
2024-10-11 10:18:33 +02:00
|
|
|
# Load the audio file using pydub
|
2024-09-30 11:47:58 +02:00
|
|
|
audio_segment = AudioSegment.from_file(audio_buffer, format="ogg")
|
|
|
|
|
2024-10-11 10:18:33 +02:00
|
|
|
# Export the audio to a WAV format in a BytesIO buffer
|
2024-09-30 11:47:58 +02:00
|
|
|
wav_io = io.BytesIO()
|
|
|
|
audio_segment.export(wav_io, format="wav")
|
2024-10-11 10:18:33 +02:00
|
|
|
wav_io.seek(0) # Reset the buffer pointer to the start
|
2024-09-30 11:47:58 +02:00
|
|
|
|
2024-10-11 10:18:33 +02:00
|
|
|
# Load the Whisper model
|
|
|
|
model_size = "base" # Specify the model size
|
2024-09-30 11:47:58 +02:00
|
|
|
model = WhisperModel(model_size, device="cpu", compute_type="int8")
|
|
|
|
|
2024-10-11 10:18:33 +02:00
|
|
|
# Transcribe the audio
|
2024-09-27 13:59:27 +02:00
|
|
|
segments, _ = model.transcribe(wav_io)
|
|
|
|
transcription = ""
|
2024-10-11 10:18:33 +02:00
|
|
|
|
|
|
|
# Combine the transcribed segments into a single string
|
2024-09-27 13:59:27 +02:00
|
|
|
for segment in segments:
|
|
|
|
transcription += segment.text + " "
|
2024-10-11 10:18:33 +02:00
|
|
|
|
|
|
|
result = transcription.strip() # Strip any leading/trailing whitespace
|
2024-09-27 13:59:27 +02:00
|
|
|
return result
|