diff --git a/app/backend/AudioRecorder(not yet).tsx b/app/backend/AudioRecorder(not yet).tsx
deleted file mode 100644
index af6a941..0000000
--- a/app/backend/AudioRecorder(not yet).tsx	
+++ /dev/null
@@ -1,39 +0,0 @@
-// import React, { useState, useRef } from 'react'
-
-// const AudioRecorder: React.FC = () => {
-//     const [isRecording, setIsRecording] = useState(false)
-//     const [audioURL, setAudioURL] = useState<string | null>(null)
-//     const medaRecorderRef = useRef<MediaRecorder | null>(null)
-//     const audioChunks = useRef<Blob[]>([])
-
-//     const startRecording = async () => {
-//         const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
-//         const mediaRecorder = new MediaRecorder(stream)
-//         medaRecorderRef.current = mediaRecorder
-
-//         mediaRecorder.ondataavailable = (event) => {
-//             audioChunks.current.push(event.data)
-//         }
-
-//         mediaRecorder.onstop = () => {
-//             const audioBlob = new Blob(audioChunks.current, { type: "audio/wav" })
-//             const url = URL.createObjectURL(audioBlob)
-//             setAudioURL(url)
-//             audioChunks.current = []
-//         }
-
-//         mediaRecorder.start()
-//         setIsRecording(true)
-
-//         const stopRecording = () => {
-//             medaRecorderRef.current?.stop()
-//             setIsRecording(false)
-//         }
-
-//         return (
-//             <div></div>
-//         )
-//     }
-// }
-
-// export default AudioRecorder
\ No newline at end of file
diff --git a/app/backend/AudioRecorder.ts b/app/backend/AudioRecorder.ts
new file mode 100644
index 0000000..459674e
--- /dev/null
+++ b/app/backend/AudioRecorder.ts
@@ -0,0 +1,34 @@
+import React, { useState, useRef } from 'react'
+
+    export const AudioRecorder= () => {
+        const [isRecording, setIsRecording] = useState(false)
+        const [audioURL, setAudioURL] = useState<string | null>(null)
+        const mediaRecorderRef = useRef<MediaRecorder | null>(null)
+        const audioChunks = useRef<Blob[]>([])
+    
+        const startRecording = async () => {
+            const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
+            const mediaRecorder = new MediaRecorder(stream)
+            mediaRecorderRef.current = mediaRecorder
+        
+            mediaRecorder.ondataavailable = (event) => {
+                audioChunks.current.push(event.data)
+            }
+        
+            mediaRecorder.onstop = () => {
+                const audioBlob = new Blob(audioChunks.current, { type: "audio/wav" })
+                const url = URL.createObjectURL(audioBlob)
+                setAudioURL(url)
+                audioChunks.current = []
+            }
+
+            mediaRecorder.start()
+            setIsRecording(true)
+        
+        }
+    
+        const stopRecording = () => {
+            mediaRecorderRef.current?.stop()
+           setIsRecording(false)
+        }
+    }
\ No newline at end of file
diff --git a/app/backend/InputOutputHandler.tsx b/app/backend/InputOutputHandler.tsx
index 3b00707..36b3fac 100644
--- a/app/backend/InputOutputHandler.tsx
+++ b/app/backend/InputOutputHandler.tsx
@@ -3,8 +3,8 @@ import React, { useEffect, useRef, useState } from "react";
 import ConversationFrontend from "../components/ConversationFrontend";
 import InputFrontend from "../components/InputFrontend";
 import VoiceSend from "./voice_backend"
+import { AudioRecorder } from "./AudioRecorder";
 import axios from "axios";
-import { skip } from "node:test";
 
 
 const InputOutputBackend: React.FC = () => {
@@ -20,10 +20,10 @@ const InputOutputBackend: React.FC = () => {
   const [liveMessage, setLiveMessage] = useState("")
   const [inputMessage, setInputMessage] = useState<string>("")
   const [inputDisabled, setInputDisabled] = useState(false)
-  const [lastMessage, setLastMessage] = useState<Message>({ role: "user", content: "Not supposed to happen." })
-  const [isRecording, setIsRecording] = useState(false);
-  const mediaRecorderRef = useRef<MediaRecorder | null>(null);
-  const audioChunksRef = useRef<Blob[]>([]);
+  const [isRecording, setIsRecording] = useState(false)
+  const [audioURL, setAudioURL] = useState<string | null>(null)
+  const mediaRecorderRef = useRef<MediaRecorder | null>(null)
+  const audioChunks = useRef<Blob[]>([])
 
 
   console.log(messages);
@@ -129,7 +129,6 @@ const InputOutputBackend: React.FC = () => {
   }
   const handleSendClick = (inputValue: string, override: boolean) => {
     if (inputValue != "") {
-      console.log(inputDisabled)
       if (!inputDisabled || override) {
         setInputDisabled(true)
         if (postWorkerRef.current) {
@@ -143,37 +142,33 @@ const InputOutputBackend: React.FC = () => {
   }
 
   const startRecording = async () => {
-    const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
-    mediaRecorderRef.current = new MediaRecorder(stream);
-
-    mediaRecorderRef.current.ondataavailable = (event) => {
-      audioChunksRef.current.push(event.data);
-    };
-
-    mediaRecorderRef.current.onstop = () => {
-      const audioBlob = new Blob(audioChunksRef.current, { type: 'audio/wav' });
-      audioChunksRef.current = []; // Clear the chunks for the next recording
-      // Call your existing function to send the audioBlob
-      // Example: sendAudioToApi(audioBlob);
-    };
-
-    mediaRecorderRef.current.start();
-    setIsRecording(true);
-
-    // Automatically stop recording after 10 seconds
-    setTimeout(() => {
-      stopRecording();
-    }, 10000);
-  };
-
-  const stopRecording = () => {
-    if (mediaRecorderRef.current) {
-      mediaRecorderRef.current.stop();
-      setIsRecording(false);
-      var remote = new VoiceSend()
-      remote.sendToVoiceRecognition(new Blob(audioChunksRef.current, { type: 'audio/wav' }), remote.voiceDataTemplate);
+    const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
+    const mediaRecorder = new MediaRecorder(stream)
+    mediaRecorderRef.current = mediaRecorder
+    
+    mediaRecorder.ondataavailable = (event) => {
+      audioChunks.current.push(event.data)
+    }
+        
+    mediaRecorder.onstop = () => {
+      const audioBlob = new Blob(audioChunks.current, { type: "audio/wav" })
+      const url = URL.createObjectURL(audioBlob)
+      console.log(url);
+      setAudioURL(url)
+      audioChunks.current = []
+      const remote = new VoiceSend()
+      remote.sendToVoiceRecognition(audioBlob,)
+    }
+
+    mediaRecorder.start()
+    setIsRecording(true)
+        
+    }
+    
+    const stopRecording = () => {
+      mediaRecorderRef.current?.stop()
+      setIsRecording(false)
     }
-  };
 
 
   const handleMicClick = () => {
@@ -224,6 +219,7 @@ const InputOutputBackend: React.FC = () => {
         onSendClick={handleSendClick}
         onMicClick={handleMicClick}
         inputDisabled={inputDisabled}
+        isRecording={isRecording}
       />
     </div>
   )
diff --git a/app/backend/voice_backend.ts b/app/backend/voice_backend.ts
index 650afeb..a93fd89 100644
--- a/app/backend/voice_backend.ts
+++ b/app/backend/voice_backend.ts
@@ -1,20 +1,22 @@
 import axios from "axios";
 
+
 class VoiceSend {
+    sendToVoiceRecognition(audio_data: Blob) {
+        console.log("sending recording...");
+        console.log(typeof (audio_data));
+        console.log(audio_data instanceof Blob);
 
-    voiceDataTemplate = {
-        type: "basic",
-        audio_data: null,
-        option: "offline"
-    }
+        const formdata = new FormData()
+        formdata.append("audio", audio_data)
+        formdata.append("option", "offline")
+        formdata.append("type", "basic")
 
-    sendToVoiceRecognition(audio_data: Blob, data: any) {
-        var dataSend = data
-        dataSend['audio_data'] = audio_data
-        axios.post("http://localhost:5000/interstellar_ai/api/voice_recognition", dataSend)
-            .then((response: any) => {
-                console.log(response['response'])
-                return response['response']
+        const dataSend = { option:"offline", type:"basic",audio:audio_data }
+        axios.post("http://localhost:5000/interstellar_ai/api/voice_recognition", formdata)
+            .then((response) => {
+                console.log(response.data)
+                return response.data.response
             })
             .catch(error => {
                 console.log("Error calling API:", error)
diff --git a/app/components/InputFrontend.tsx b/app/components/InputFrontend.tsx
index e50e916..c84124b 100644
--- a/app/components/InputFrontend.tsx
+++ b/app/components/InputFrontend.tsx
@@ -1,14 +1,16 @@
 import React, { useState, ForwardedRef, useEffect } from 'react';
+import "../styles/variables.css"
 
 interface InputProps {
   message: string;
   onSendClick: (message: string, override: boolean) => void;
   onMicClick: () => void;
-  inputDisabled: boolean
+  inputDisabled: boolean;
+  isRecording:boolean
 }
 
 const InputFrontend = React.forwardRef<HTMLDivElement, InputProps>(
-  ({ message, onSendClick, onMicClick, inputDisabled }, ref: ForwardedRef<HTMLDivElement>) => {
+  ({ message, onSendClick, onMicClick, inputDisabled, isRecording}, ref: ForwardedRef<HTMLDivElement>) => {
     const [inputValue, setInputValue] = useState('');
 
     useEffect(() => {
@@ -29,6 +31,10 @@ const InputFrontend = React.forwardRef<HTMLDivElement, InputProps>(
       }
     };
 
+    const styles = {
+      
+    }
+
     return (
       <div className="input" id="inputForm" ref={ref}>
         <input
@@ -42,7 +48,7 @@ const InputFrontend = React.forwardRef<HTMLDivElement, InputProps>(
         <button type="button" onClick={() => onSendClick(inputValue, false)} disabled={inputDisabled ? true : false}>
           <img src="/img/send.svg" alt="send" />
         </button>
-        <button type="button" onClick={onMicClick}>
+        <button className={`microphone-button ${isRecording ? "red":"green"}`} type="button" onClick={onMicClick}>
           <img src="/img/microphone.svg" alt="microphone" />
         </button>
       </div>
diff --git a/app/styles/input.css b/app/styles/input.css
index ceeb0b3..ffbffeb 100644
--- a/app/styles/input.css
+++ b/app/styles/input.css
@@ -59,3 +59,19 @@
     background-color: var(--input-button-hover-color);
     box-shadow: 0 6px 15px rgba(0, 0, 0, 0.2);
 }
+
+.microphone-button.red{
+    background-color: var(--close-button-color);
+}
+
+.microphone-button.green{
+    background-color: var(--button-background-color);
+}
+
+.microphone-button.red:hover{
+    background-color: var(--close-button-hover-color);
+}
+
+.microphone-button.green:hover{
+    background-color: var(--input-button-hover-color);
+}
\ No newline at end of file
diff --git a/app/styles/variables.css b/app/styles/variables.css
index 72c464a..795b4fc 100644
--- a/app/styles/variables.css
+++ b/app/styles/variables.css
@@ -19,6 +19,7 @@
     --conversation-background-color: #79832e; /* Background color for conversation container */
     --doc-background-color: #ffffff; /* Background color for documents */
     --close-button-color: red;
+    --close-button-hover-color: #9e0101; /*NEW*/
     --burger-menu-background-color: #79832e;  /*NEW*/
     --overlay-text-color:white; /*NEW*/
     
diff --git a/py/api.py b/py/api.py
index e152cdc..0951717 100644
--- a/py/api.py
+++ b/py/api.py
@@ -99,10 +99,12 @@ class API:
 
         @self.app.route('/interstellar_ai/api/voice_recognition', methods=['POST'])
         def voice_recognition():
-            type = request.args.get('type')
-            audio = request.args.get('audio')
-            option = request.args.get('option')
-            if type == "basic":
+            print(request.args)
+            recog_type = request.form.get('type')
+            print(recog_type)
+            audio = request.files.get('audio')
+            option = request.form.get('option')
+            if recog_type == "basic":
                 text = self.voice.basic_recognition(audio, option)
                 return jsonify({'status': 200, 'response': text})
             else: