voice recognition trial 1

This commit is contained in:
YasinOnm08 2024-09-26 08:57:28 +02:00
parent edc7260966
commit 0d84454a17
5 changed files with 74 additions and 88 deletions

View file

@ -3,8 +3,8 @@ import React, { useEffect, useRef, useState } from "react";
import ConversationFrontend from "../components/ConversationFrontend";
import InputFrontend from "../components/InputFrontend";
import VoiceSend from "./voice_backend"
import { AudioRecorder } from "./AudioRecorder";
import axios from "axios";
import { skip } from "node:test";
const InputOutputBackend: React.FC = () => {
@ -20,10 +20,10 @@ const InputOutputBackend: React.FC = () => {
const [liveMessage, setLiveMessage] = useState("")
const [inputMessage, setInputMessage] = useState<string>("")
const [inputDisabled, setInputDisabled] = useState(false)
const [lastMessage, setLastMessage] = useState<Message>({ role: "user", content: "Not supposed to happen." })
const [isRecording, setIsRecording] = useState(false);
const mediaRecorderRef = useRef<MediaRecorder | null>(null);
const audioChunksRef = useRef<Blob[]>([]);
const [isRecording, setIsRecording] = useState(false)
const [audioURL, setAudioURL] = useState<string | null>(null)
const mediaRecorderRef = useRef<MediaRecorder | null>(null)
const audioChunks = useRef<Blob[]>([])
console.log(messages);
@ -129,7 +129,6 @@ const InputOutputBackend: React.FC = () => {
}
const handleSendClick = (inputValue: string, override: boolean) => {
if (inputValue != "") {
console.log(inputDisabled)
if (!inputDisabled || override) {
setInputDisabled(true)
if (postWorkerRef.current) {
@ -143,37 +142,33 @@ const InputOutputBackend: React.FC = () => {
}
const startRecording = async () => {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
mediaRecorderRef.current = new MediaRecorder(stream);
mediaRecorderRef.current.ondataavailable = (event) => {
audioChunksRef.current.push(event.data);
};
mediaRecorderRef.current.onstop = () => {
const audioBlob = new Blob(audioChunksRef.current, { type: 'audio/wav' });
audioChunksRef.current = []; // Clear the chunks for the next recording
// Call your existing function to send the audioBlob
// Example: sendAudioToApi(audioBlob);
};
mediaRecorderRef.current.start();
setIsRecording(true);
// Automatically stop recording after 10 seconds
setTimeout(() => {
stopRecording();
}, 10000);
};
const stopRecording = () => {
if (mediaRecorderRef.current) {
mediaRecorderRef.current.stop();
setIsRecording(false);
var remote = new VoiceSend()
remote.sendToVoiceRecognition(new Blob(audioChunksRef.current, { type: 'audio/wav' }), remote.voiceDataTemplate);
const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
const mediaRecorder = new MediaRecorder(stream)
mediaRecorderRef.current = mediaRecorder
mediaRecorder.ondataavailable = (event) => {
audioChunks.current.push(event.data)
}
mediaRecorder.onstop = () => {
const audioBlob = new Blob(audioChunks.current, { type: "audio/wav" })
const url = URL.createObjectURL(audioBlob)
console.log(url);
setAudioURL(url)
audioChunks.current = []
const remote = new VoiceSend()
remote.sendToVoiceRecognition(audioBlob,)
}
mediaRecorder.start()
setIsRecording(true)
}
const stopRecording = () => {
mediaRecorderRef.current?.stop()
setIsRecording(false)
}
};
const handleMicClick = () => {
@ -224,6 +219,7 @@ const InputOutputBackend: React.FC = () => {
onSendClick={handleSendClick}
onMicClick={handleMicClick}
inputDisabled={inputDisabled}
isRecording={isRecording}
/>
</div>
)