forked from React-Group/interstellar_ai
ai.py comments
This commit is contained in:
parent
8a20c3f22f
commit
4debcc6fad
1 changed files with 25 additions and 10 deletions
35
py/ai.py
35
py/ai.py
|
@ -4,10 +4,11 @@ import google.generativeai as genai
|
||||||
import anthropic
|
import anthropic
|
||||||
import ollama
|
import ollama
|
||||||
|
|
||||||
|
|
||||||
class AI:
|
class AI:
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def process_local(model, messages, return_class, access_token):
|
def process_local(model, messages, return_class, access_token):
|
||||||
|
"""Process chat messages using the Ollama model locally."""
|
||||||
|
# Stream the chat response from the Ollama model
|
||||||
stream = ollama.chat(
|
stream = ollama.chat(
|
||||||
model=model,
|
model=model,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
|
@ -15,50 +16,61 @@ class AI:
|
||||||
options={"temperature": 0.5},
|
options={"temperature": 0.5},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Initialize the AI response for the given access token
|
||||||
with return_class.ai_response_lock:
|
with return_class.ai_response_lock:
|
||||||
return_class.ai_response[access_token] = ""
|
return_class.ai_response[access_token] = ""
|
||||||
|
|
||||||
|
# Collect the response chunks and append to the response for the given access token
|
||||||
for chunk in stream:
|
for chunk in stream:
|
||||||
with return_class.ai_response_lock:
|
with return_class.ai_response_lock:
|
||||||
return_class.ai_response[access_token] += chunk["message"]["content"]
|
return_class.ai_response[access_token] += chunk["message"]["content"]
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def process_mistralai(model, messages, return_class, access_token, api_key):
|
def process_mistralai(model, messages, return_class, access_token, api_key):
|
||||||
|
"""Process chat messages using the Mistral AI model."""
|
||||||
client = Mistral(api_key=api_key)
|
client = Mistral(api_key=api_key)
|
||||||
|
|
||||||
|
# Stream the chat response from the Mistral model
|
||||||
stream_response = client.chat.stream(model=model, messages=messages)
|
stream_response = client.chat.stream(model=model, messages=messages)
|
||||||
|
|
||||||
|
# Initialize the AI response for the given access token
|
||||||
with return_class.ai_response_lock:
|
with return_class.ai_response_lock:
|
||||||
return_class.ai_response[access_token] = ""
|
return_class.ai_response[access_token] = ""
|
||||||
|
|
||||||
|
# Collect the response chunks and append to the response for the given access token
|
||||||
for chunk in stream_response:
|
for chunk in stream_response:
|
||||||
with return_class.ai_response_lock:
|
with return_class.ai_response_lock:
|
||||||
return_class.ai_response[access_token] += chunk.data.choices[
|
return_class.ai_response[access_token] += chunk.data.choices[0].delta.content
|
||||||
0
|
|
||||||
].delta.content
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def process_openai(model, messages, return_class, access_token, api_key):
|
def process_openai(model, messages, return_class, access_token, api_key):
|
||||||
|
"""Process chat messages using the OpenAI model."""
|
||||||
client = OpenAI(api_key=api_key)
|
client = OpenAI(api_key=api_key)
|
||||||
|
|
||||||
|
# Stream the chat response from the OpenAI model
|
||||||
stream_response = client.chat.completions.create(
|
stream_response = client.chat.completions.create(
|
||||||
model=model, messages=messages, stream=True
|
model=model, messages=messages, stream=True
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Initialize the AI response for the given access token
|
||||||
with return_class.ai_response_lock:
|
with return_class.ai_response_lock:
|
||||||
return_class.ai_response[access_token] = ""
|
return_class.ai_response[access_token] = ""
|
||||||
|
|
||||||
|
# Collect the response chunks and append to the response for the given access token
|
||||||
for chunk in stream_response:
|
for chunk in stream_response:
|
||||||
with return_class.ai_response_lock:
|
with return_class.ai_response_lock:
|
||||||
return_class.ai_response[access_token] += chunk.choices[0].delta.content
|
return_class.ai_response[access_token] += chunk.choices[0].delta.content
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def process_anthropic(model, messages, return_class, access_token, api_key):
|
def process_anthropic(model, messages, return_class, access_token, api_key):
|
||||||
|
"""Process chat messages using the Anthropic model."""
|
||||||
client = anthropic.Anthropic(api_key=api_key)
|
client = anthropic.Anthropic(api_key=api_key)
|
||||||
|
|
||||||
|
# Initialize the AI response for the given access token
|
||||||
with return_class.ai_response_lock:
|
with return_class.ai_response_lock:
|
||||||
return_class.ai_response[access_token] = ""
|
return_class.ai_response[access_token] = ""
|
||||||
|
|
||||||
|
# Stream the chat response from the Anthropic model
|
||||||
with client.messages.stream(
|
with client.messages.stream(
|
||||||
max_tokens=1024,
|
max_tokens=1024,
|
||||||
model=model,
|
model=model,
|
||||||
|
@ -70,24 +82,27 @@ class AI:
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def process_google(model, messages, return_class, access_token, api_key):
|
def process_google(model, messages, return_class, access_token, api_key):
|
||||||
message = messages[-1]["content"]
|
"""Process chat messages using the Google Generative AI model."""
|
||||||
messages.pop()
|
message = messages[-1]["content"] # Get the latest message content
|
||||||
|
messages.pop() # Remove the latest message from the list
|
||||||
|
|
||||||
|
# Prepare messages for the Google Generative AI format
|
||||||
for msg in messages:
|
for msg in messages:
|
||||||
msg["parts"] = msg.pop()["content"]
|
msg["parts"] = msg.pop()["content"]
|
||||||
|
|
||||||
|
# Change 'assistant' role to 'model' for compatibility
|
||||||
for msg in messages:
|
for msg in messages:
|
||||||
if msg["role"] == "assistant":
|
if msg["role"] == "assistant":
|
||||||
msg["role"] = "model"
|
msg["role"] = "model"
|
||||||
|
|
||||||
|
# Configure the Google Generative AI client
|
||||||
genai.configure(api_key=api_key)
|
genai.configure(api_key=api_key)
|
||||||
|
|
||||||
|
# Start a chat session with the specified model and message history
|
||||||
model = genai.GenerativeModel(model)
|
model = genai.GenerativeModel(model)
|
||||||
|
chat = model.start_chat(history=messages)
|
||||||
|
|
||||||
chat = model.start_chat(
|
# Send the message and stream the response
|
||||||
history=messages,
|
|
||||||
)
|
|
||||||
|
|
||||||
response = chat.send_message(message, stream=True)
|
response = chat.send_message(message, stream=True)
|
||||||
for chunk in response:
|
for chunk in response:
|
||||||
return_class.ai_response[access_token] += chunk.text
|
return_class.ai_response[access_token] += chunk.text
|
||||||
|
|
Loading…
Reference in a new issue