🫨 #50

Merged
sageTheDm merged 1 commit from React-Group/interstellar_ai:main into main 2024-10-10 09:59:16 +02:00
2 changed files with 21 additions and 22 deletions

View file

@ -12,9 +12,11 @@ import PrivacySettings from './PrivacySettings';
import FontSizeSetting from './FontSize';
import OpenSourceModeToggle from './OpenSourceToggle';
import {
changeHistory,
changeSettings,
createAccount,
deleteAccount,
getHistory,
} from '../../backend/database';
import ThemeDropdown from './DropDownTheme';
@ -372,7 +374,11 @@ const Settings: React.FC<{ closeSettings: () => void; accountName: string }> = (
localStorage.setItem("currentEmail", useEmail)
alert('Account successfully changed!')
window.location.reload()
} else {
alert("failed to send settings")
}
} else {
alert("failed to create account")
}
}
};
@ -624,6 +630,7 @@ const Settings: React.FC<{ closeSettings: () => void; accountName: string }> = (
onClick={handleLogout} // Function to call on click
className="update-credentials-button" // Custom styling class
/>
<p>WARNING: Will delete your chat history.</p>
<ButtonSetting
label="Update Credentials" // Button label
onClick={handleUpdateCredentials} // Function to call on click

View file

@ -20,34 +20,29 @@ class AI:
for chunk in stream:
with return_class.ai_response_lock:
return_class.ai_response[access_token] += chunk['message']['content']
return_class.ai_response[access_token] += chunk["message"]["content"]
@staticmethod
def process_mistralai(model, messages, return_class, access_token, api_key):
client = Mistral(api_key=api_key)
stream_response = client.chat.stream(
model=model,
messages=messages
)
stream_response = client.chat.stream(model=model, messages=messages)
with return_class.ai_response_lock:
return_class.ai_response[access_token] = ""
for chunk in stream_response:
with return_class.ai_response_lock:
return_class.ai_response[access_token] += chunk.data.choices[0].delta.content
return_class.ai_response[access_token] += chunk.data.choices[
0
].delta.content
@staticmethod
def process_openai(model, messages, return_class, access_token, api_key):
client = OpenAI(api_key=api_key)
stream_response = client.chat.completions.create(
model=model,
messages=messages,
stream=True
model=model, messages=messages, stream=True
)
with return_class.ai_response_lock:
@ -59,7 +54,6 @@ class AI:
@staticmethod
def process_anthropic(model, messages, return_class, access_token, api_key):
client = anthropic.Anthropic(api_key=api_key)
with return_class.ai_response_lock:
@ -76,16 +70,15 @@ class AI:
@staticmethod
def process_google(model, messages, return_class, access_token, api_key):
message = messages[-1]['content']
message = messages[-1]["content"]
messages.pop()
for msg in messages:
msg['parts'] = msg.pop('content')
msg["parts"] = msg.pop()["content"]
for msg in messages:
if msg['role'] == 'assistant':
msg['role'] = 'model'
if msg["role"] == "assistant":
msg["role"] = "model"
genai.configure(api_key=api_key)
@ -93,7 +86,6 @@ class AI:
chat = model.start_chat(
history=messages,
)
response = chat.send_message(message, stream=True)