|
|
|
@ -2,7 +2,7 @@
|
|
|
|
|
import React, { use, useEffect, useRef, useState } from "react";
|
|
|
|
|
import ConversationFrontend from "../components/ConversationFrontend";
|
|
|
|
|
import InputFrontend from "../components/InputFrontend";
|
|
|
|
|
import VoiceSend from "./voice_backend"
|
|
|
|
|
import { sendToVoiceRecognition } from "./voice_backend"
|
|
|
|
|
import { AudioRecorder } from "./AudioRecorder";
|
|
|
|
|
import axios from "axios";
|
|
|
|
|
import { resolve } from "path";
|
|
|
|
@ -17,36 +17,48 @@ const InputOutputBackend: React.FC = () => {
|
|
|
|
|
content: string
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Variables for System-prompt */
|
|
|
|
|
const [preferredCurrency, setPreferredCurrency] = useState<string | null>("")
|
|
|
|
|
const [preferredLanguage, setPreferredLanguage] = useState<string | null>("")
|
|
|
|
|
const [timeFormat, setTimeFormat] = useState<string | null>("")
|
|
|
|
|
const [preferredMeasurement, setPreferredMeasurement] = useState<string | null>("")
|
|
|
|
|
const [timeZone, setTimeZone] = useState<string | null>("")
|
|
|
|
|
const [dateFormat, setDateFormat] = useState<string | null>("")
|
|
|
|
|
const [preferredCurrency, setPreferredCurrency] = useState<string | null>(null);
|
|
|
|
|
const [preferredLanguage, setPreferredLanguage] = useState<string | null>(null);
|
|
|
|
|
const [timeFormat, setTimeFormat] = useState<string | null>(null);
|
|
|
|
|
const [preferredMeasurement, setPreferredMeasurement] = useState<string | null>(null);
|
|
|
|
|
const [timeZone, setTimeZone] = useState<string | null>(null);
|
|
|
|
|
const [dateFormat, setDateFormat] = useState<string | null>(null);
|
|
|
|
|
const [messages, setMessages] = useState<Message[]>([]);
|
|
|
|
|
|
|
|
|
|
useEffect(() => {
|
|
|
|
|
setPreferredCurrency(localStorage.getItem("preferredCurrency"))
|
|
|
|
|
setPreferredLanguage(localStorage.getItem("preferredLanguage"))
|
|
|
|
|
setTimeFormat(localStorage.getItem("timeFormat"))
|
|
|
|
|
setPreferredMeasurement(localStorage.getItem("preferredMeasurement"))
|
|
|
|
|
setTimeZone(localStorage.getItem("timeZone"))
|
|
|
|
|
setDateFormat(localStorage.getItem("dateFormat"))
|
|
|
|
|
}, [preferredCurrency, preferredLanguage, timeFormat, preferredMeasurement, timeZone, dateFormat])
|
|
|
|
|
setPreferredCurrency(localStorage.getItem("preferredCurrency"));
|
|
|
|
|
setPreferredLanguage(localStorage.getItem("preferredLanguage"));
|
|
|
|
|
setTimeFormat(localStorage.getItem("timeFormat"));
|
|
|
|
|
setPreferredMeasurement(localStorage.getItem("preferredMeasurement"));
|
|
|
|
|
setTimeZone(localStorage.getItem("timeZone"));
|
|
|
|
|
setDateFormat(localStorage.getItem("dateFormat"));
|
|
|
|
|
}, []);
|
|
|
|
|
|
|
|
|
|
useEffect(() => {
|
|
|
|
|
if (preferredCurrency && preferredLanguage && timeFormat && dateFormat && preferredMeasurement && timeZone) {
|
|
|
|
|
setMessages([
|
|
|
|
|
{
|
|
|
|
|
role: "system",
|
|
|
|
|
content: `You are in the timezone: ${timeZone}.
|
|
|
|
|
You use the time format ${timeFormat}.
|
|
|
|
|
You use the date format ${dateFormat} for all references of dates.
|
|
|
|
|
You use the ${preferredMeasurement} system.
|
|
|
|
|
You use the currency ${preferredCurrency}.
|
|
|
|
|
You will only answer in the language (you will receive the country code) ${preferredLanguage}.
|
|
|
|
|
But in the case the user specifically states to answer in another language, do that. Speaking in
|
|
|
|
|
another language is not stating you should answer in that language.
|
|
|
|
|
Additionally, under no circumstances translate your answer into multiple languages.`,
|
|
|
|
|
},
|
|
|
|
|
{ role: "assistant", content: "Hello! How can I help you?" },
|
|
|
|
|
]);
|
|
|
|
|
}
|
|
|
|
|
}, [preferredCurrency, preferredLanguage, timeFormat, dateFormat, preferredMeasurement, timeZone]);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const [copyClicked, setCopyClicked] = useState(false)
|
|
|
|
|
const [accessToken, setAccessToken] = useState("")
|
|
|
|
|
const postWorkerRef = useRef<Worker | null>(null)
|
|
|
|
|
const getWorkerRef = useRef<Worker | null>(null)
|
|
|
|
|
const [messages, setMessages] = useState<Message[]>([{ role: "system",
|
|
|
|
|
content: `You are in the timezone: ${timeZone}.
|
|
|
|
|
You use the time format ${timeFormat}.
|
|
|
|
|
You use the date format ${dateFormat} for all references of dates.
|
|
|
|
|
You use the ${preferredMeasurement} system. You use the currency ${preferredCurrency}.
|
|
|
|
|
You will only answer in the language (you will receive the country code) ${preferredLanguage}.
|
|
|
|
|
But in the case the user specifically states to answer in an other language do that speaking in a
|
|
|
|
|
nother language is not stating you should answer in that language. Additionally do not translate your answer into multiple languages`
|
|
|
|
|
},{ role: "assistant", content: "Hello! How can I help you?" }])
|
|
|
|
|
const [liveMessage, setLiveMessage] = useState("")
|
|
|
|
|
const [inputMessage, setInputMessage] = useState<string>("")
|
|
|
|
|
const [inputDisabled, setInputDisabled] = useState(false)
|
|
|
|
@ -169,40 +181,46 @@ const InputOutputBackend: React.FC = () => {
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const startRecording = async () => {
|
|
|
|
|
const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
|
|
|
|
|
const mediaRecorder = new MediaRecorder(stream)
|
|
|
|
|
mediaRecorderRef.current = mediaRecorder
|
|
|
|
|
const startRecording = async (): Promise<string> => {
|
|
|
|
|
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
|
|
|
|
const mediaRecorder = new MediaRecorder(stream);
|
|
|
|
|
mediaRecorderRef.current = mediaRecorder;
|
|
|
|
|
|
|
|
|
|
audioChunks.current = []; // Initialize audio chunks
|
|
|
|
|
|
|
|
|
|
// Create a promise that resolves when the onstop event is done
|
|
|
|
|
const stopRecordingPromise = new Promise<string>((resolve) => {
|
|
|
|
|
mediaRecorder.ondataavailable = (event) => {
|
|
|
|
|
audioChunks.current.push(event.data)
|
|
|
|
|
}
|
|
|
|
|
audioChunks.current.push(event.data);
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
mediaRecorder.onstop = async () => {
|
|
|
|
|
const audioBlob = new Blob(audioChunks.current, { type: "audio/ogg" })
|
|
|
|
|
audioChunks.current = []
|
|
|
|
|
// console.log(audioBlob);
|
|
|
|
|
// const url = URL.createObjectURL(audioBlob)
|
|
|
|
|
// const audio = new Audio(url);
|
|
|
|
|
// audio.play().catch(error => console.error("Error playing audio:", error));
|
|
|
|
|
const audioBlob = new Blob(audioChunks.current, { type: "audio/ogg" });
|
|
|
|
|
audioChunks.current = [];
|
|
|
|
|
|
|
|
|
|
const remote = new VoiceSend()
|
|
|
|
|
remote.sendToVoiceRecognition(audioBlob)
|
|
|
|
|
}
|
|
|
|
|
const text_voice = await sendToVoiceRecognition(audioBlob);
|
|
|
|
|
console.log(text_voice);
|
|
|
|
|
resolve(text_voice); // Resolve the promise with the recognized text
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
mediaRecorder.start()
|
|
|
|
|
setIsRecording(true)
|
|
|
|
|
}
|
|
|
|
|
mediaRecorder.start();
|
|
|
|
|
setIsRecording(true);
|
|
|
|
|
|
|
|
|
|
// Wait for the recording to stop and get the recognized text
|
|
|
|
|
return stopRecordingPromise;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
const stopRecording = () => {
|
|
|
|
|
mediaRecorderRef.current?.stop()
|
|
|
|
|
setIsRecording(false)
|
|
|
|
|
}
|
|
|
|
|
mediaRecorderRef.current?.stop();
|
|
|
|
|
setIsRecording(false);
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const handleMicClick = () => {
|
|
|
|
|
const handleMicClick = async () => {
|
|
|
|
|
if (!isRecording) {
|
|
|
|
|
startRecording();
|
|
|
|
|
const recognizedText = await startRecording();
|
|
|
|
|
setInputMessage(recognizedText); // Set the recognized text after recording
|
|
|
|
|
console.log("Set!")
|
|
|
|
|
} else {
|
|
|
|
|
stopRecording();
|
|
|
|
|
}
|
|
|
|
|