forked from React-Group/interstellar_ai
		
	Compare commits
	
		
			7 commits
		
	
	
		
			95eadb5ee2
			...
			ea3fd09ea2
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| ea3fd09ea2 | |||
| 0c02d76d6f | |||
| 2517307ffc | |||
| 077b748849 | |||
| e23da7a579 | |||
| eded5f81d9 | |||
| 0d84454a17 | 
					 8 changed files with 112 additions and 94 deletions
				
			
		|  | @ -1,39 +0,0 @@ | ||||||
| // import React, { useState, useRef } from 'react'
 |  | ||||||
| 
 |  | ||||||
| // const AudioRecorder: React.FC = () => {
 |  | ||||||
| //     const [isRecording, setIsRecording] = useState(false)
 |  | ||||||
| //     const [audioURL, setAudioURL] = useState<string | null>(null)
 |  | ||||||
| //     const medaRecorderRef = useRef<MediaRecorder | null>(null)
 |  | ||||||
| //     const audioChunks = useRef<Blob[]>([])
 |  | ||||||
| 
 |  | ||||||
| //     const startRecording = async () => {
 |  | ||||||
| //         const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
 |  | ||||||
| //         const mediaRecorder = new MediaRecorder(stream)
 |  | ||||||
| //         medaRecorderRef.current = mediaRecorder
 |  | ||||||
| 
 |  | ||||||
| //         mediaRecorder.ondataavailable = (event) => {
 |  | ||||||
| //             audioChunks.current.push(event.data)
 |  | ||||||
| //         }
 |  | ||||||
| 
 |  | ||||||
| //         mediaRecorder.onstop = () => {
 |  | ||||||
| //             const audioBlob = new Blob(audioChunks.current, { type: "audio/wav" })
 |  | ||||||
| //             const url = URL.createObjectURL(audioBlob)
 |  | ||||||
| //             setAudioURL(url)
 |  | ||||||
| //             audioChunks.current = []
 |  | ||||||
| //         }
 |  | ||||||
| 
 |  | ||||||
| //         mediaRecorder.start()
 |  | ||||||
| //         setIsRecording(true)
 |  | ||||||
| 
 |  | ||||||
| //         const stopRecording = () => {
 |  | ||||||
| //             medaRecorderRef.current?.stop()
 |  | ||||||
| //             setIsRecording(false)
 |  | ||||||
| //         }
 |  | ||||||
| 
 |  | ||||||
| //         return (
 |  | ||||||
| //             <div></div>
 |  | ||||||
| //         )
 |  | ||||||
| //     }
 |  | ||||||
| // }
 |  | ||||||
| 
 |  | ||||||
| // export default AudioRecorder
 |  | ||||||
							
								
								
									
										34
									
								
								app/backend/AudioRecorder.ts
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										34
									
								
								app/backend/AudioRecorder.ts
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,34 @@ | ||||||
|  | import React, { useState, useRef } from 'react' | ||||||
|  | 
 | ||||||
|  |     export const AudioRecorder= () => { | ||||||
|  |         const [isRecording, setIsRecording] = useState(false) | ||||||
|  |         const [audioURL, setAudioURL] = useState<string | null>(null) | ||||||
|  |         const mediaRecorderRef = useRef<MediaRecorder | null>(null) | ||||||
|  |         const audioChunks = useRef<Blob[]>([]) | ||||||
|  |      | ||||||
|  |         const startRecording = async () => { | ||||||
|  |             const stream = await navigator.mediaDevices.getUserMedia({ audio: true }) | ||||||
|  |             const mediaRecorder = new MediaRecorder(stream) | ||||||
|  |             mediaRecorderRef.current = mediaRecorder | ||||||
|  |          | ||||||
|  |             mediaRecorder.ondataavailable = (event) => { | ||||||
|  |                 audioChunks.current.push(event.data) | ||||||
|  |             } | ||||||
|  |          | ||||||
|  |             mediaRecorder.onstop = () => { | ||||||
|  |                 const audioBlob = new Blob(audioChunks.current, { type: "audio/wav" }) | ||||||
|  |                 const url = URL.createObjectURL(audioBlob) | ||||||
|  |                 setAudioURL(url) | ||||||
|  |                 audioChunks.current = [] | ||||||
|  |             } | ||||||
|  | 
 | ||||||
|  |             mediaRecorder.start() | ||||||
|  |             setIsRecording(true) | ||||||
|  |          | ||||||
|  |         } | ||||||
|  |      | ||||||
|  |         const stopRecording = () => { | ||||||
|  |             mediaRecorderRef.current?.stop() | ||||||
|  |            setIsRecording(false) | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | @ -3,8 +3,8 @@ import React, { useEffect, useRef, useState } from "react"; | ||||||
| import ConversationFrontend from "../components/ConversationFrontend"; | import ConversationFrontend from "../components/ConversationFrontend"; | ||||||
| import InputFrontend from "../components/InputFrontend"; | import InputFrontend from "../components/InputFrontend"; | ||||||
| import VoiceSend from "./voice_backend" | import VoiceSend from "./voice_backend" | ||||||
|  | import { AudioRecorder } from "./AudioRecorder"; | ||||||
| import axios from "axios"; | import axios from "axios"; | ||||||
| import { skip } from "node:test"; |  | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| const InputOutputBackend: React.FC = () => { | const InputOutputBackend: React.FC = () => { | ||||||
|  | @ -20,10 +20,10 @@ const InputOutputBackend: React.FC = () => { | ||||||
|   const [liveMessage, setLiveMessage] = useState("") |   const [liveMessage, setLiveMessage] = useState("") | ||||||
|   const [inputMessage, setInputMessage] = useState<string>("") |   const [inputMessage, setInputMessage] = useState<string>("") | ||||||
|   const [inputDisabled, setInputDisabled] = useState(false) |   const [inputDisabled, setInputDisabled] = useState(false) | ||||||
|   const [lastMessage, setLastMessage] = useState<Message>({ role: "user", content: "Not supposed to happen." }) |   const [isRecording, setIsRecording] = useState(false) | ||||||
|   const [isRecording, setIsRecording] = useState(false); |   const [audioURL, setAudioURL] = useState<string | null>(null) | ||||||
|   const mediaRecorderRef = useRef<MediaRecorder | null>(null); |   const mediaRecorderRef = useRef<MediaRecorder | null>(null) | ||||||
|   const audioChunksRef = useRef<Blob[]>([]); |   const audioChunks = useRef<Blob[]>([]) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|   console.log(messages); |   console.log(messages); | ||||||
|  | @ -129,7 +129,6 @@ const InputOutputBackend: React.FC = () => { | ||||||
|   } |   } | ||||||
|   const handleSendClick = (inputValue: string, override: boolean) => { |   const handleSendClick = (inputValue: string, override: boolean) => { | ||||||
|     if (inputValue != "") { |     if (inputValue != "") { | ||||||
|       console.log(inputDisabled) |  | ||||||
|       if (!inputDisabled || override) { |       if (!inputDisabled || override) { | ||||||
|         setInputDisabled(true) |         setInputDisabled(true) | ||||||
|         if (postWorkerRef.current) { |         if (postWorkerRef.current) { | ||||||
|  | @ -143,37 +142,33 @@ const InputOutputBackend: React.FC = () => { | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|   const startRecording = async () => { |   const startRecording = async () => { | ||||||
|     const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); |     const stream = await navigator.mediaDevices.getUserMedia({ audio: true }) | ||||||
|     mediaRecorderRef.current = new MediaRecorder(stream); |     const mediaRecorder = new MediaRecorder(stream) | ||||||
|  |     mediaRecorderRef.current = mediaRecorder | ||||||
|      |      | ||||||
|     mediaRecorderRef.current.ondataavailable = (event) => { |     mediaRecorder.ondataavailable = (event) => { | ||||||
|       audioChunksRef.current.push(event.data); |       audioChunks.current.push(event.data) | ||||||
|     }; |     } | ||||||
|          |          | ||||||
|     mediaRecorderRef.current.onstop = () => { |     mediaRecorder.onstop = () => { | ||||||
|       const audioBlob = new Blob(audioChunksRef.current, { type: 'audio/wav' }); |       const audioBlob = new Blob(audioChunks.current, { type: "audio/wav" }) | ||||||
|       audioChunksRef.current = []; // Clear the chunks for the next recording
 |       const url = URL.createObjectURL(audioBlob) | ||||||
|       // Call your existing function to send the audioBlob
 |       console.log(url); | ||||||
|       // Example: sendAudioToApi(audioBlob);
 |       setAudioURL(url) | ||||||
|     }; |       audioChunks.current = [] | ||||||
|  |       const remote = new VoiceSend() | ||||||
|  |       remote.sendToVoiceRecognition(audioBlob,) | ||||||
|  |     } | ||||||
| 
 | 
 | ||||||
|     mediaRecorderRef.current.start(); |     mediaRecorder.start() | ||||||
|     setIsRecording(true); |     setIsRecording(true) | ||||||
|          |          | ||||||
|     // Automatically stop recording after 10 seconds
 |     } | ||||||
|     setTimeout(() => { |  | ||||||
|       stopRecording(); |  | ||||||
|     }, 10000); |  | ||||||
|   }; |  | ||||||
|      |      | ||||||
|     const stopRecording = () => { |     const stopRecording = () => { | ||||||
|     if (mediaRecorderRef.current) { |       mediaRecorderRef.current?.stop() | ||||||
|       mediaRecorderRef.current.stop(); |       setIsRecording(false) | ||||||
|       setIsRecording(false); |  | ||||||
|       var remote = new VoiceSend() |  | ||||||
|       remote.sendToVoiceRecognition(new Blob(audioChunksRef.current, { type: 'audio/wav' }), remote.voiceDataTemplate); |  | ||||||
|     } |     } | ||||||
|   }; |  | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|   const handleMicClick = () => { |   const handleMicClick = () => { | ||||||
|  | @ -224,6 +219,7 @@ const InputOutputBackend: React.FC = () => { | ||||||
|         onSendClick={handleSendClick} |         onSendClick={handleSendClick} | ||||||
|         onMicClick={handleMicClick} |         onMicClick={handleMicClick} | ||||||
|         inputDisabled={inputDisabled} |         inputDisabled={inputDisabled} | ||||||
|  |         isRecording={isRecording} | ||||||
|       /> |       /> | ||||||
|     </div> |     </div> | ||||||
|   ) |   ) | ||||||
|  |  | ||||||
|  | @ -1,20 +1,22 @@ | ||||||
| import axios from "axios"; | import axios from "axios"; | ||||||
| 
 | 
 | ||||||
|  | 
 | ||||||
| class VoiceSend { | class VoiceSend { | ||||||
|  |     sendToVoiceRecognition(audio_data: Blob) { | ||||||
|  |         console.log("sending recording..."); | ||||||
|  |         console.log(typeof (audio_data)); | ||||||
|  |         console.log(audio_data instanceof Blob); | ||||||
| 
 | 
 | ||||||
|     voiceDataTemplate = { |         const formdata = new FormData() | ||||||
|         type: "basic", |         formdata.append("audio", audio_data) | ||||||
|         audio_data: null, |         formdata.append("option", "offline") | ||||||
|         option: "offline" |         formdata.append("type", "basic") | ||||||
|     } |  | ||||||
| 
 | 
 | ||||||
|     sendToVoiceRecognition(audio_data: Blob, data: any) { |         const dataSend = { option:"offline", type:"basic",audio:audio_data } | ||||||
|         var dataSend = data |         axios.post("http://localhost:5000/interstellar_ai/api/voice_recognition", formdata) | ||||||
|         dataSend['audio_data'] = audio_data |             .then((response) => { | ||||||
|         axios.post("http://localhost:5000/interstellar_ai/api/voice_recognition", dataSend) |                 console.log(response.data) | ||||||
|             .then((response: any) => { |                 return response.data.response | ||||||
|                 console.log(response['response']) |  | ||||||
|                 return response['response'] |  | ||||||
|             }) |             }) | ||||||
|             .catch(error => { |             .catch(error => { | ||||||
|                 console.log("Error calling API:", error) |                 console.log("Error calling API:", error) | ||||||
|  |  | ||||||
|  | @ -1,14 +1,16 @@ | ||||||
| import React, { useState, ForwardedRef, useEffect } from 'react'; | import React, { useState, ForwardedRef, useEffect } from 'react'; | ||||||
|  | import "../styles/variables.css" | ||||||
| 
 | 
 | ||||||
| interface InputProps { | interface InputProps { | ||||||
|   message: string; |   message: string; | ||||||
|   onSendClick: (message: string, override: boolean) => void; |   onSendClick: (message: string, override: boolean) => void; | ||||||
|   onMicClick: () => void; |   onMicClick: () => void; | ||||||
|   inputDisabled: boolean |   inputDisabled: boolean; | ||||||
|  |   isRecording:boolean | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| const InputFrontend = React.forwardRef<HTMLDivElement, InputProps>( | const InputFrontend = React.forwardRef<HTMLDivElement, InputProps>( | ||||||
|   ({ message, onSendClick, onMicClick, inputDisabled }, ref: ForwardedRef<HTMLDivElement>) => { |   ({ message, onSendClick, onMicClick, inputDisabled, isRecording}, ref: ForwardedRef<HTMLDivElement>) => { | ||||||
|     const [inputValue, setInputValue] = useState(''); |     const [inputValue, setInputValue] = useState(''); | ||||||
| 
 | 
 | ||||||
|     useEffect(() => { |     useEffect(() => { | ||||||
|  | @ -29,6 +31,10 @@ const InputFrontend = React.forwardRef<HTMLDivElement, InputProps>( | ||||||
|       } |       } | ||||||
|     }; |     }; | ||||||
| 
 | 
 | ||||||
|  |     const styles = { | ||||||
|  |        | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|     return ( |     return ( | ||||||
|       <div className="input" id="inputForm" ref={ref}> |       <div className="input" id="inputForm" ref={ref}> | ||||||
|         <input |         <input | ||||||
|  | @ -42,7 +48,7 @@ const InputFrontend = React.forwardRef<HTMLDivElement, InputProps>( | ||||||
|         <button type="button" onClick={() => onSendClick(inputValue, false)} disabled={inputDisabled ? true : false}> |         <button type="button" onClick={() => onSendClick(inputValue, false)} disabled={inputDisabled ? true : false}> | ||||||
|           <img src="/img/send.svg" alt="send" /> |           <img src="/img/send.svg" alt="send" /> | ||||||
|         </button> |         </button> | ||||||
|         <button type="button" onClick={onMicClick}> |         <button className={`microphone-button ${isRecording ? "red":"green"}`} type="button" onClick={onMicClick}> | ||||||
|           <img src="/img/microphone.svg" alt="microphone" /> |           <img src="/img/microphone.svg" alt="microphone" /> | ||||||
|         </button> |         </button> | ||||||
|       </div> |       </div> | ||||||
|  |  | ||||||
|  | @ -59,3 +59,19 @@ | ||||||
|     background-color: var(--input-button-hover-color); |     background-color: var(--input-button-hover-color); | ||||||
|     box-shadow: 0 6px 15px rgba(0, 0, 0, 0.2); |     box-shadow: 0 6px 15px rgba(0, 0, 0, 0.2); | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | .microphone-button.red{ | ||||||
|  |     background-color: var(--close-button-color); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | .microphone-button.green{ | ||||||
|  |     background-color: var(--button-background-color); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | .microphone-button.red:hover{ | ||||||
|  |     background-color: var(--close-button-hover-color); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | .microphone-button.green:hover{ | ||||||
|  |     background-color: var(--input-button-hover-color); | ||||||
|  | } | ||||||
|  | @ -19,6 +19,7 @@ | ||||||
|     --conversation-background-color: #79832e; /* Background color for conversation container */ |     --conversation-background-color: #79832e; /* Background color for conversation container */ | ||||||
|     --doc-background-color: #ffffff; /* Background color for documents */ |     --doc-background-color: #ffffff; /* Background color for documents */ | ||||||
|     --close-button-color: red; |     --close-button-color: red; | ||||||
|  |     --close-button-hover-color: #9e0101; /*NEW*/ | ||||||
|     --burger-menu-background-color: #79832e;  /*NEW*/ |     --burger-menu-background-color: #79832e;  /*NEW*/ | ||||||
|     --overlay-text-color:white; /*NEW*/ |     --overlay-text-color:white; /*NEW*/ | ||||||
|      |      | ||||||
|  |  | ||||||
							
								
								
									
										10
									
								
								py/api.py
									
										
									
									
									
								
							
							
						
						
									
										10
									
								
								py/api.py
									
										
									
									
									
								
							|  | @ -99,10 +99,12 @@ class API: | ||||||
| 
 | 
 | ||||||
|         @self.app.route('/interstellar_ai/api/voice_recognition', methods=['POST']) |         @self.app.route('/interstellar_ai/api/voice_recognition', methods=['POST']) | ||||||
|         def voice_recognition(): |         def voice_recognition(): | ||||||
|             type = request.args.get('type') |             print(request.args) | ||||||
|             audio = request.args.get('audio') |             recog_type = request.form.get('type') | ||||||
|             option = request.args.get('option') |             print(recog_type) | ||||||
|             if type == "basic": |             audio = request.files.get('audio') | ||||||
|  |             option = request.form.get('option') | ||||||
|  |             if recog_type == "basic": | ||||||
|                 text = self.voice.basic_recognition(audio, option) |                 text = self.voice.basic_recognition(audio, option) | ||||||
|                 return jsonify({'status': 200, 'response': text}) |                 return jsonify({'status': 200, 'response': text}) | ||||||
|             else: |             else: | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue