2024-09-20 09:03:46 +02:00
|
|
|
from mistralai import Mistral
|
2024-09-23 11:01:39 +02:00
|
|
|
from openai import OpenAI
|
2024-09-24 09:24:31 +02:00
|
|
|
import google.generativeai as genai
|
2024-09-23 11:57:16 +02:00
|
|
|
import anthropic
|
2024-09-20 09:03:46 +02:00
|
|
|
import ollama
|
|
|
|
|
|
|
|
|
|
|
|
class AI:
|
|
|
|
@staticmethod
|
|
|
|
def process_local(model, messages, return_class, access_token):
|
|
|
|
stream = ollama.chat(
|
|
|
|
model=model,
|
|
|
|
messages=messages,
|
|
|
|
stream=True,
|
2024-09-20 09:21:07 +02:00
|
|
|
options={"temperature": 0.5},
|
2024-09-20 09:03:46 +02:00
|
|
|
)
|
|
|
|
|
2024-09-23 11:01:39 +02:00
|
|
|
with return_class.ai_response_lock:
|
|
|
|
return_class.ai_response[access_token] = ""
|
2024-09-20 09:17:00 +02:00
|
|
|
|
2024-09-20 09:03:46 +02:00
|
|
|
for chunk in stream:
|
2024-09-23 11:01:39 +02:00
|
|
|
with return_class.ai_response_lock:
|
2024-10-10 08:34:27 +02:00
|
|
|
return_class.ai_response[access_token] += chunk["message"]["content"]
|
2024-09-20 09:03:46 +02:00
|
|
|
|
|
|
|
@staticmethod
|
2024-09-23 11:01:39 +02:00
|
|
|
def process_mistralai(model, messages, return_class, access_token, api_key):
|
2024-09-20 09:03:46 +02:00
|
|
|
client = Mistral(api_key=api_key)
|
|
|
|
|
2024-10-10 08:34:27 +02:00
|
|
|
stream_response = client.chat.stream(model=model, messages=messages)
|
2024-09-20 09:03:46 +02:00
|
|
|
|
2024-09-23 11:01:39 +02:00
|
|
|
with return_class.ai_response_lock:
|
|
|
|
return_class.ai_response[access_token] = ""
|
2024-09-20 09:17:00 +02:00
|
|
|
|
2024-09-20 09:03:46 +02:00
|
|
|
for chunk in stream_response:
|
2024-09-23 11:01:39 +02:00
|
|
|
with return_class.ai_response_lock:
|
2024-10-10 08:34:27 +02:00
|
|
|
return_class.ai_response[access_token] += chunk.data.choices[
|
|
|
|
0
|
|
|
|
].delta.content
|
2024-09-23 11:01:39 +02:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def process_openai(model, messages, return_class, access_token, api_key):
|
|
|
|
client = OpenAI(api_key=api_key)
|
|
|
|
|
|
|
|
stream_response = client.chat.completions.create(
|
2024-10-10 08:34:27 +02:00
|
|
|
model=model, messages=messages, stream=True
|
2024-09-23 11:01:39 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
with return_class.ai_response_lock:
|
|
|
|
return_class.ai_response[access_token] = ""
|
|
|
|
|
|
|
|
for chunk in stream_response:
|
|
|
|
with return_class.ai_response_lock:
|
2024-09-23 11:57:16 +02:00
|
|
|
return_class.ai_response[access_token] += chunk.choices[0].delta.content
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def process_anthropic(model, messages, return_class, access_token, api_key):
|
|
|
|
client = anthropic.Anthropic(api_key=api_key)
|
|
|
|
|
|
|
|
with return_class.ai_response_lock:
|
|
|
|
return_class.ai_response[access_token] = ""
|
|
|
|
|
|
|
|
with client.messages.stream(
|
2024-10-10 08:34:27 +02:00
|
|
|
max_tokens=1024,
|
|
|
|
model=model,
|
|
|
|
messages=messages,
|
2024-09-23 11:57:16 +02:00
|
|
|
) as stream:
|
|
|
|
for text in stream.text_stream:
|
|
|
|
with return_class.ai_response_lock:
|
|
|
|
return_class.ai_response[access_token] += text
|
2024-09-24 09:24:31 +02:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def process_google(model, messages, return_class, access_token, api_key):
|
2024-10-10 08:34:27 +02:00
|
|
|
message = messages[-1]["content"]
|
2024-09-24 09:24:31 +02:00
|
|
|
messages.pop()
|
|
|
|
|
|
|
|
for msg in messages:
|
2024-10-10 08:34:27 +02:00
|
|
|
msg["parts"] = msg.pop()["content"]
|
2024-09-24 09:24:31 +02:00
|
|
|
|
|
|
|
for msg in messages:
|
2024-10-10 08:34:27 +02:00
|
|
|
if msg["role"] == "assistant":
|
|
|
|
msg["role"] = "model"
|
2024-09-24 09:24:31 +02:00
|
|
|
|
|
|
|
genai.configure(api_key=api_key)
|
2024-09-24 09:27:36 +02:00
|
|
|
|
|
|
|
model = genai.GenerativeModel(model)
|
|
|
|
|
2024-09-24 09:24:31 +02:00
|
|
|
chat = model.start_chat(
|
2024-09-30 11:30:30 +02:00
|
|
|
history=messages,
|
2024-09-24 09:24:31 +02:00
|
|
|
)
|
2024-09-24 09:27:36 +02:00
|
|
|
|
|
|
|
response = chat.send_message(message, stream=True)
|
|
|
|
for chunk in response:
|
|
|
|
return_class.ai_response[access_token] += chunk.text
|