interstellar_ai/py/ai.py

109 lines
4.3 KiB
Python
Raw Normal View History

2024-09-20 09:03:46 +02:00
from mistralai import Mistral
2024-09-23 11:01:39 +02:00
from openai import OpenAI
2024-09-24 09:24:31 +02:00
import google.generativeai as genai
2024-09-23 11:57:16 +02:00
import anthropic
2024-09-20 09:03:46 +02:00
import ollama
class AI:
@staticmethod
def process_local(model, messages, return_class, access_token):
"""Process chat messages using the Ollama model locally."""
# Stream the chat response from the Ollama model
2024-09-20 09:03:46 +02:00
stream = ollama.chat(
model=model,
messages=messages,
stream=True,
2024-09-20 09:21:07 +02:00
options={"temperature": 0.5},
2024-09-20 09:03:46 +02:00
)
# Initialize the AI response for the given access token
2024-09-23 11:01:39 +02:00
with return_class.ai_response_lock:
return_class.ai_response[access_token] = ""
2024-09-20 09:17:00 +02:00
# Collect the response chunks and append to the response for the given access token
2024-09-20 09:03:46 +02:00
for chunk in stream:
2024-09-23 11:01:39 +02:00
with return_class.ai_response_lock:
2024-10-10 08:34:27 +02:00
return_class.ai_response[access_token] += chunk["message"]["content"]
2024-09-20 09:03:46 +02:00
@staticmethod
2024-09-23 11:01:39 +02:00
def process_mistralai(model, messages, return_class, access_token, api_key):
"""Process chat messages using the Mistral AI model."""
2024-09-20 09:03:46 +02:00
client = Mistral(api_key=api_key)
# Stream the chat response from the Mistral model
2024-10-10 08:34:27 +02:00
stream_response = client.chat.stream(model=model, messages=messages)
2024-09-20 09:03:46 +02:00
# Initialize the AI response for the given access token
2024-09-23 11:01:39 +02:00
with return_class.ai_response_lock:
return_class.ai_response[access_token] = ""
2024-09-20 09:17:00 +02:00
# Collect the response chunks and append to the response for the given access token
2024-09-20 09:03:46 +02:00
for chunk in stream_response:
2024-09-23 11:01:39 +02:00
with return_class.ai_response_lock:
return_class.ai_response[access_token] += chunk.data.choices[0].delta.content
2024-09-23 11:01:39 +02:00
@staticmethod
def process_openai(model, messages, return_class, access_token, api_key):
"""Process chat messages using the OpenAI model."""
2024-09-23 11:01:39 +02:00
client = OpenAI(api_key=api_key)
# Stream the chat response from the OpenAI model
2024-09-23 11:01:39 +02:00
stream_response = client.chat.completions.create(
2024-10-10 08:34:27 +02:00
model=model, messages=messages, stream=True
2024-09-23 11:01:39 +02:00
)
# Initialize the AI response for the given access token
2024-09-23 11:01:39 +02:00
with return_class.ai_response_lock:
return_class.ai_response[access_token] = ""
# Collect the response chunks and append to the response for the given access token
2024-09-23 11:01:39 +02:00
for chunk in stream_response:
with return_class.ai_response_lock:
2024-09-23 11:57:16 +02:00
return_class.ai_response[access_token] += chunk.choices[0].delta.content
@staticmethod
def process_anthropic(model, messages, return_class, access_token, api_key):
"""Process chat messages using the Anthropic model."""
2024-09-23 11:57:16 +02:00
client = anthropic.Anthropic(api_key=api_key)
# Initialize the AI response for the given access token
2024-09-23 11:57:16 +02:00
with return_class.ai_response_lock:
return_class.ai_response[access_token] = ""
# Stream the chat response from the Anthropic model
2024-09-23 11:57:16 +02:00
with client.messages.stream(
2024-10-10 08:34:27 +02:00
max_tokens=1024,
model=model,
messages=messages,
2024-09-23 11:57:16 +02:00
) as stream:
for text in stream.text_stream:
with return_class.ai_response_lock:
return_class.ai_response[access_token] += text
2024-09-24 09:24:31 +02:00
@staticmethod
def process_google(model, messages, return_class, access_token, api_key):
"""Process chat messages using the Google Generative AI model."""
message = messages[-1]["content"] # Get the latest message content
messages.pop() # Remove the latest message from the list
2024-09-24 09:24:31 +02:00
# Prepare messages for the Google Generative AI format
2024-09-24 09:24:31 +02:00
for msg in messages:
2024-10-10 08:34:27 +02:00
msg["parts"] = msg.pop()["content"]
2024-09-24 09:24:31 +02:00
# Change 'assistant' role to 'model' for compatibility
2024-09-24 09:24:31 +02:00
for msg in messages:
2024-10-10 08:34:27 +02:00
if msg["role"] == "assistant":
msg["role"] = "model"
2024-09-24 09:24:31 +02:00
# Configure the Google Generative AI client
2024-09-24 09:24:31 +02:00
genai.configure(api_key=api_key)
2024-09-24 09:27:36 +02:00
# Start a chat session with the specified model and message history
2024-09-24 09:27:36 +02:00
model = genai.GenerativeModel(model)
chat = model.start_chat(history=messages)
2024-09-24 09:27:36 +02:00
# Send the message and stream the response
2024-09-24 09:27:36 +02:00
response = chat.send_message(message, stream=True)
for chunk in response:
return_class.ai_response[access_token] += chunk.text