forked from React-Group/interstellar_ai
Merge pull request 'Merge pull request 'main' (#21) from React-Group/interstellar_ai:main into main' (#51) from YasinOnm08/interstellar_ai:main into main
Reviewed-on: https://interstellardevelopment.org/code/code/React-Group/interstellar_ai/pulls/51
This commit is contained in:
commit
492443e139
8 changed files with 112 additions and 94 deletions
|
@ -1,39 +0,0 @@
|
|||
// import React, { useState, useRef } from 'react'
|
||||
|
||||
// const AudioRecorder: React.FC = () => {
|
||||
// const [isRecording, setIsRecording] = useState(false)
|
||||
// const [audioURL, setAudioURL] = useState<string | null>(null)
|
||||
// const medaRecorderRef = useRef<MediaRecorder | null>(null)
|
||||
// const audioChunks = useRef<Blob[]>([])
|
||||
|
||||
// const startRecording = async () => {
|
||||
// const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
|
||||
// const mediaRecorder = new MediaRecorder(stream)
|
||||
// medaRecorderRef.current = mediaRecorder
|
||||
|
||||
// mediaRecorder.ondataavailable = (event) => {
|
||||
// audioChunks.current.push(event.data)
|
||||
// }
|
||||
|
||||
// mediaRecorder.onstop = () => {
|
||||
// const audioBlob = new Blob(audioChunks.current, { type: "audio/wav" })
|
||||
// const url = URL.createObjectURL(audioBlob)
|
||||
// setAudioURL(url)
|
||||
// audioChunks.current = []
|
||||
// }
|
||||
|
||||
// mediaRecorder.start()
|
||||
// setIsRecording(true)
|
||||
|
||||
// const stopRecording = () => {
|
||||
// medaRecorderRef.current?.stop()
|
||||
// setIsRecording(false)
|
||||
// }
|
||||
|
||||
// return (
|
||||
// <div></div>
|
||||
// )
|
||||
// }
|
||||
// }
|
||||
|
||||
// export default AudioRecorder
|
34
app/backend/AudioRecorder.ts
Normal file
34
app/backend/AudioRecorder.ts
Normal file
|
@ -0,0 +1,34 @@
|
|||
import React, { useState, useRef } from 'react'
|
||||
|
||||
export const AudioRecorder= () => {
|
||||
const [isRecording, setIsRecording] = useState(false)
|
||||
const [audioURL, setAudioURL] = useState<string | null>(null)
|
||||
const mediaRecorderRef = useRef<MediaRecorder | null>(null)
|
||||
const audioChunks = useRef<Blob[]>([])
|
||||
|
||||
const startRecording = async () => {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
|
||||
const mediaRecorder = new MediaRecorder(stream)
|
||||
mediaRecorderRef.current = mediaRecorder
|
||||
|
||||
mediaRecorder.ondataavailable = (event) => {
|
||||
audioChunks.current.push(event.data)
|
||||
}
|
||||
|
||||
mediaRecorder.onstop = () => {
|
||||
const audioBlob = new Blob(audioChunks.current, { type: "audio/wav" })
|
||||
const url = URL.createObjectURL(audioBlob)
|
||||
setAudioURL(url)
|
||||
audioChunks.current = []
|
||||
}
|
||||
|
||||
mediaRecorder.start()
|
||||
setIsRecording(true)
|
||||
|
||||
}
|
||||
|
||||
const stopRecording = () => {
|
||||
mediaRecorderRef.current?.stop()
|
||||
setIsRecording(false)
|
||||
}
|
||||
}
|
|
@ -3,8 +3,8 @@ import React, { useEffect, useRef, useState } from "react";
|
|||
import ConversationFrontend from "../components/ConversationFrontend";
|
||||
import InputFrontend from "../components/InputFrontend";
|
||||
import VoiceSend from "./voice_backend"
|
||||
import { AudioRecorder } from "./AudioRecorder";
|
||||
import axios from "axios";
|
||||
import { skip } from "node:test";
|
||||
|
||||
|
||||
const InputOutputBackend: React.FC = () => {
|
||||
|
@ -20,10 +20,10 @@ const InputOutputBackend: React.FC = () => {
|
|||
const [liveMessage, setLiveMessage] = useState("")
|
||||
const [inputMessage, setInputMessage] = useState<string>("")
|
||||
const [inputDisabled, setInputDisabled] = useState(false)
|
||||
const [lastMessage, setLastMessage] = useState<Message>({ role: "user", content: "Not supposed to happen." })
|
||||
const [isRecording, setIsRecording] = useState(false);
|
||||
const mediaRecorderRef = useRef<MediaRecorder | null>(null);
|
||||
const audioChunksRef = useRef<Blob[]>([]);
|
||||
const [isRecording, setIsRecording] = useState(false)
|
||||
const [audioURL, setAudioURL] = useState<string | null>(null)
|
||||
const mediaRecorderRef = useRef<MediaRecorder | null>(null)
|
||||
const audioChunks = useRef<Blob[]>([])
|
||||
|
||||
|
||||
console.log(messages);
|
||||
|
@ -129,7 +129,6 @@ const InputOutputBackend: React.FC = () => {
|
|||
}
|
||||
const handleSendClick = (inputValue: string, override: boolean) => {
|
||||
if (inputValue != "") {
|
||||
console.log(inputDisabled)
|
||||
if (!inputDisabled || override) {
|
||||
setInputDisabled(true)
|
||||
if (postWorkerRef.current) {
|
||||
|
@ -143,37 +142,33 @@ const InputOutputBackend: React.FC = () => {
|
|||
}
|
||||
|
||||
const startRecording = async () => {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
||||
mediaRecorderRef.current = new MediaRecorder(stream);
|
||||
const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
|
||||
const mediaRecorder = new MediaRecorder(stream)
|
||||
mediaRecorderRef.current = mediaRecorder
|
||||
|
||||
mediaRecorderRef.current.ondataavailable = (event) => {
|
||||
audioChunksRef.current.push(event.data);
|
||||
};
|
||||
|
||||
mediaRecorderRef.current.onstop = () => {
|
||||
const audioBlob = new Blob(audioChunksRef.current, { type: 'audio/wav' });
|
||||
audioChunksRef.current = []; // Clear the chunks for the next recording
|
||||
// Call your existing function to send the audioBlob
|
||||
// Example: sendAudioToApi(audioBlob);
|
||||
};
|
||||
|
||||
mediaRecorderRef.current.start();
|
||||
setIsRecording(true);
|
||||
|
||||
// Automatically stop recording after 10 seconds
|
||||
setTimeout(() => {
|
||||
stopRecording();
|
||||
}, 10000);
|
||||
};
|
||||
|
||||
const stopRecording = () => {
|
||||
if (mediaRecorderRef.current) {
|
||||
mediaRecorderRef.current.stop();
|
||||
setIsRecording(false);
|
||||
var remote = new VoiceSend()
|
||||
remote.sendToVoiceRecognition(new Blob(audioChunksRef.current, { type: 'audio/wav' }), remote.voiceDataTemplate);
|
||||
mediaRecorder.ondataavailable = (event) => {
|
||||
audioChunks.current.push(event.data)
|
||||
}
|
||||
|
||||
mediaRecorder.onstop = () => {
|
||||
const audioBlob = new Blob(audioChunks.current, { type: "audio/wav" })
|
||||
const url = URL.createObjectURL(audioBlob)
|
||||
console.log(url);
|
||||
setAudioURL(url)
|
||||
audioChunks.current = []
|
||||
const remote = new VoiceSend()
|
||||
remote.sendToVoiceRecognition(audioBlob,)
|
||||
}
|
||||
|
||||
mediaRecorder.start()
|
||||
setIsRecording(true)
|
||||
|
||||
}
|
||||
|
||||
const stopRecording = () => {
|
||||
mediaRecorderRef.current?.stop()
|
||||
setIsRecording(false)
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
const handleMicClick = () => {
|
||||
|
@ -224,6 +219,7 @@ const InputOutputBackend: React.FC = () => {
|
|||
onSendClick={handleSendClick}
|
||||
onMicClick={handleMicClick}
|
||||
inputDisabled={inputDisabled}
|
||||
isRecording={isRecording}
|
||||
/>
|
||||
</div>
|
||||
)
|
||||
|
|
|
@ -1,20 +1,22 @@
|
|||
import axios from "axios";
|
||||
|
||||
|
||||
class VoiceSend {
|
||||
sendToVoiceRecognition(audio_data: Blob) {
|
||||
console.log("sending recording...");
|
||||
console.log(typeof (audio_data));
|
||||
console.log(audio_data instanceof Blob);
|
||||
|
||||
voiceDataTemplate = {
|
||||
type: "basic",
|
||||
audio_data: null,
|
||||
option: "offline"
|
||||
}
|
||||
const formdata = new FormData()
|
||||
formdata.append("audio", audio_data)
|
||||
formdata.append("option", "offline")
|
||||
formdata.append("type", "basic")
|
||||
|
||||
sendToVoiceRecognition(audio_data: Blob, data: any) {
|
||||
var dataSend = data
|
||||
dataSend['audio_data'] = audio_data
|
||||
axios.post("http://localhost:5000/interstellar_ai/api/voice_recognition", dataSend)
|
||||
.then((response: any) => {
|
||||
console.log(response['response'])
|
||||
return response['response']
|
||||
const dataSend = { option:"offline", type:"basic",audio:audio_data }
|
||||
axios.post("http://localhost:5000/interstellar_ai/api/voice_recognition", formdata)
|
||||
.then((response) => {
|
||||
console.log(response.data)
|
||||
return response.data.response
|
||||
})
|
||||
.catch(error => {
|
||||
console.log("Error calling API:", error)
|
||||
|
|
|
@ -1,14 +1,16 @@
|
|||
import React, { useState, ForwardedRef, useEffect } from 'react';
|
||||
import "../styles/variables.css"
|
||||
|
||||
interface InputProps {
|
||||
message: string;
|
||||
onSendClick: (message: string, override: boolean) => void;
|
||||
onMicClick: () => void;
|
||||
inputDisabled: boolean
|
||||
inputDisabled: boolean;
|
||||
isRecording:boolean
|
||||
}
|
||||
|
||||
const InputFrontend = React.forwardRef<HTMLDivElement, InputProps>(
|
||||
({ message, onSendClick, onMicClick, inputDisabled }, ref: ForwardedRef<HTMLDivElement>) => {
|
||||
({ message, onSendClick, onMicClick, inputDisabled, isRecording}, ref: ForwardedRef<HTMLDivElement>) => {
|
||||
const [inputValue, setInputValue] = useState('');
|
||||
|
||||
useEffect(() => {
|
||||
|
@ -29,6 +31,10 @@ const InputFrontend = React.forwardRef<HTMLDivElement, InputProps>(
|
|||
}
|
||||
};
|
||||
|
||||
const styles = {
|
||||
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="input" id="inputForm" ref={ref}>
|
||||
<input
|
||||
|
@ -42,7 +48,7 @@ const InputFrontend = React.forwardRef<HTMLDivElement, InputProps>(
|
|||
<button type="button" onClick={() => onSendClick(inputValue, false)} disabled={inputDisabled ? true : false}>
|
||||
<img src="/img/send.svg" alt="send" />
|
||||
</button>
|
||||
<button type="button" onClick={onMicClick}>
|
||||
<button className={`microphone-button ${isRecording ? "red":"green"}`} type="button" onClick={onMicClick}>
|
||||
<img src="/img/microphone.svg" alt="microphone" />
|
||||
</button>
|
||||
</div>
|
||||
|
|
|
@ -59,3 +59,19 @@
|
|||
background-color: var(--input-button-hover-color);
|
||||
box-shadow: 0 6px 15px rgba(0, 0, 0, 0.2);
|
||||
}
|
||||
|
||||
.microphone-button.red{
|
||||
background-color: var(--close-button-color);
|
||||
}
|
||||
|
||||
.microphone-button.green{
|
||||
background-color: var(--button-background-color);
|
||||
}
|
||||
|
||||
.microphone-button.red:hover{
|
||||
background-color: var(--close-button-hover-color);
|
||||
}
|
||||
|
||||
.microphone-button.green:hover{
|
||||
background-color: var(--input-button-hover-color);
|
||||
}
|
|
@ -19,6 +19,7 @@
|
|||
--conversation-background-color: #79832e; /* Background color for conversation container */
|
||||
--doc-background-color: #ffffff; /* Background color for documents */
|
||||
--close-button-color: red;
|
||||
--close-button-hover-color: #9e0101; /*NEW*/
|
||||
--burger-menu-background-color: #79832e; /*NEW*/
|
||||
--overlay-text-color:white; /*NEW*/
|
||||
|
||||
|
|
10
py/api.py
10
py/api.py
|
@ -99,10 +99,12 @@ class API:
|
|||
|
||||
@self.app.route('/interstellar_ai/api/voice_recognition', methods=['POST'])
|
||||
def voice_recognition():
|
||||
type = request.args.get('type')
|
||||
audio = request.args.get('audio')
|
||||
option = request.args.get('option')
|
||||
if type == "basic":
|
||||
print(request.args)
|
||||
recog_type = request.form.get('type')
|
||||
print(recog_type)
|
||||
audio = request.files.get('audio')
|
||||
option = request.form.get('option')
|
||||
if recog_type == "basic":
|
||||
text = self.voice.basic_recognition(audio, option)
|
||||
return jsonify({'status': 200, 'response': text})
|
||||
else:
|
||||
|
|
Loading…
Reference in a new issue