"use client" import React, { use, useEffect, useRef, useState } from "react"; import ConversationFrontend from "../components/ConversationFrontend"; import InputFrontend from "../components/InputFrontend"; import { sendToVoiceRecognition } from "./voice_backend" import { AudioRecorder } from "./AudioRecorder"; import axios from "axios"; import { resolve } from "path"; import { FFmpeg } from "@ffmpeg/ffmpeg"; import { fetchFile, toBlobURL } from "@ffmpeg/util" const InputOutputBackend: React.FC = () => { // # variables type Message = { role: string content: string } /* Variables for System-prompt */ const [preferredCurrency, setPreferredCurrency] = useState("") const [preferredLanguage, setPreferredLanguage] = useState("") const [timeFormat, setTimeFormat] = useState("") const [preferredMeasurement, setPreferredMeasurement] = useState("") const [timeZone, setTimeZone] = useState("") const [dateFormat, setDateFormat] = useState("") useEffect(() => { setPreferredCurrency(localStorage.getItem("preferredCurrency")) setPreferredLanguage(localStorage.getItem("preferredLanguage")) setTimeFormat(localStorage.getItem("timeFormat")) setPreferredMeasurement(localStorage.getItem("preferredMeasurement")) setTimeZone(localStorage.getItem("timeZone")) setDateFormat(localStorage.getItem("dateFormat")) }, [preferredCurrency, preferredLanguage, timeFormat, preferredMeasurement, timeZone, dateFormat]) const [copyClicked, setCopyClicked] = useState(false) const [accessToken, setAccessToken] = useState("") const postWorkerRef = useRef(null) const getWorkerRef = useRef(null) const [messages, setMessages] = useState([{ role: "system", content: `You are in the timezone: ${timeZone}. You use the time format ${timeFormat}. You use the date format ${dateFormat} for all references of dates. You use the ${preferredMeasurement} system. You use the currency ${preferredCurrency}. You will only answer in the language (you will receive the country code) ${preferredLanguage}. But in the case the user specifically states to answer in an other language do that speaking in a nother language is not stating you should answer in that language. Additionally do not translate your answer into multiple languages` }, { role: "assistant", content: "Hello! How can I help you?" }]) const [liveMessage, setLiveMessage] = useState("") const [inputMessage, setInputMessage] = useState("") const [inputDisabled, setInputDisabled] = useState(false) const [isRecording, setIsRecording] = useState(false) const mediaRecorderRef = useRef(null) const audioChunks = useRef([]) console.log(messages); useEffect(() => { getNewToken() postWorkerRef.current = new Worker(new URL("./threads/PostWorker.js", import.meta.url)) postWorkerRef.current.onmessage = (event) => { const status = event.data.status if (status == 200) { setInputDisabled(false) endGetWorker() } else if (status == 500) { setInputDisabled(false) if (getWorkerRef.current) { addMessage("assistant", "There was an Error with the AI response") getWorkerRef.current.postMessage("terminate") getWorkerRef.current.terminate() } } } return () => { if (postWorkerRef.current) { postWorkerRef.current.terminate() } if (getWorkerRef.current) { getWorkerRef.current.postMessage("terminate") getWorkerRef.current.terminate() } } }, []) const getNewToken = () => { console.log("getting access"); axios.get("http://localhost:5000/interstellar_ai/api/ai_create") .then(response => { setAccessToken(response.data.access_token) console.log(response.data.access_token); }) .catch(error => { console.log("error:", error.message); }) } const startGetWorker = () => { if (!getWorkerRef.current) { getWorkerRef.current = new Worker(new URL("./threads/GetWorker.js", import.meta.url)) getWorkerRef.current.postMessage({ action: "start", access_token: accessToken }) addMessage("assistant", "") getWorkerRef.current.onmessage = (event) => { const data = event.data if (event.data == "error") { setLiveMessage("error getting AI response: " + data.error) } else { console.log("Received data:", data); editLastMessage(data.response) } } getWorkerRef.current.onerror = (error) => { console.error("Worker error:", error) } } } const endGetWorker = () => { if (getWorkerRef.current) { getWorkerRef.current.postMessage({ action: "terminate" }) getWorkerRef.current.terminate() getWorkerRef.current = null console.log(messages); } } const editLastMessage = (newContent: string) => { if (newContent == "") { newContent = "Generating answer..." } setMessages((prevMessages) => { const updatedMessages = prevMessages.slice(); // Create a shallow copy of the current messages if (updatedMessages.length > 0) { const lastMessage = updatedMessages[updatedMessages.length - 1]; updatedMessages[updatedMessages.length - 1] = { ...lastMessage, // Keep the existing role and other properties content: newContent, // Update only the content }; } return updatedMessages; // Return the updated array }); }; const addMessage = (role: string, content: string) => { setMessages(previous => [...previous, { role, content }]) } const handleSendClick = (inputValue: string, override: boolean) => { if (inputValue != "") { if (!inputDisabled || override) { setInputDisabled(true) if (postWorkerRef.current) { addMessage("user", inputValue) console.log("input:", inputValue); postWorkerRef.current.postMessage({ messages: [...messages, { role: "user", content: inputValue }], ai_model: "llama3.2", access_token: accessToken }) startGetWorker() } } } } const startRecording = async (): Promise => { const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); const mediaRecorder = new MediaRecorder(stream); mediaRecorderRef.current = mediaRecorder; audioChunks.current = []; // Initialize audio chunks // Create a promise that resolves when the onstop event is done const stopRecordingPromise = new Promise((resolve) => { mediaRecorder.ondataavailable = (event) => { audioChunks.current.push(event.data); }; mediaRecorder.onstop = async () => { const audioBlob = new Blob(audioChunks.current, { type: "audio/ogg" }); audioChunks.current = []; const text_voice = await sendToVoiceRecognition(audioBlob); console.log(text_voice); resolve(text_voice); // Resolve the promise with the recognized text }; }); mediaRecorder.start(); setIsRecording(true); // Wait for the recording to stop and get the recognized text return stopRecordingPromise; }; const stopRecording = () => { mediaRecorderRef.current?.stop(); setIsRecording(false); }; const handleMicClick = async () => { if (!isRecording) { const recognizedText = await startRecording(); setInputMessage(recognizedText); // Set the recognized text after recording console.log("Set!") } else { stopRecording(); } }; const handleResendClick = () => { var temporary_message = messages[messages.length - 2]['content'] const updatedMessages = messages.slice(0, -2) setMessages(updatedMessages) endGetWorker() getNewToken() setInputDisabled(false) handleSendClick(temporary_message, true) } const handleEditClick = () => { setInputMessage(messages[messages.length - 2]['content']) const updatedMessages = messages.slice(0, -2) setMessages(updatedMessages) endGetWorker() getNewToken() setInputDisabled(false) } const handleCopyClick = async () => { setCopyClicked(false) try { await navigator.clipboard.writeText(messages[messages.length - 1]['content']); fadeCopyText() } catch (err) { console.error('Failed to copy: ', err); } } const wait = (time: number) => { return new Promise(resolve => setTimeout(resolve, time)); } const fadeCopyText = async () => { setCopyClicked(true) await wait(1000) setCopyClicked(false) } return ( <> ) } export default InputOutputBackend