FIXED THE MERGE CONFLICT (THANKS)
This commit is contained in:
parent
e1df8869fb
commit
af1353a41a
5 changed files with 91 additions and 27 deletions
54
py/api.py
54
py/api.py
|
@ -1,19 +1,44 @@
|
|||
import requests
|
||||
import json
|
||||
from transformers import AutoTokenizer, LlamaForCausalLM
|
||||
|
||||
from gradio_client import Client
|
||||
import os
|
||||
from mistralai import Mistral
|
||||
|
||||
class API:
|
||||
# This method processes a message via transformers. (NOT FINISHED!)
|
||||
@staticmethod
|
||||
def process_text_transformers(prompt, model):
|
||||
model = LlamaForCausalLM.from_pretrained(model)
|
||||
tokenizer = AutoTokenizer.from_pretrained(model)
|
||||
def process_text_mistralai(prompt, model, system):
|
||||
with open("token.txt", "r") as f:
|
||||
token = f.readlines()[0].strip()
|
||||
|
||||
inputs = tokenizer(prompt, return_tensors="pt")
|
||||
api_key = token
|
||||
|
||||
generate_ids = model.generate(inputs.input_ids, max_length=30)
|
||||
return tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
||||
client = Mistral(api_key=api_key)
|
||||
|
||||
chat_response = client.chat.complete(
|
||||
model=model,
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": prompt,
|
||||
}, {
|
||||
"role": "system",
|
||||
"content": system,
|
||||
},
|
||||
]
|
||||
)
|
||||
return chat_response.choices[0].message.content
|
||||
@staticmethod
|
||||
def process_text_gradio(prompt, model, system):
|
||||
client = Client(model)
|
||||
result = client.predict(
|
||||
message=prompt,
|
||||
system_message=system,
|
||||
max_tokens=512,
|
||||
temperature=0.7,
|
||||
top_p=0.95,
|
||||
api_name="/chat"
|
||||
)
|
||||
return result;
|
||||
|
||||
# This method processes a message via ollama
|
||||
@staticmethod
|
||||
|
@ -38,18 +63,19 @@ class API:
|
|||
|
||||
# This method sends a message to a certain AI.
|
||||
|
||||
|
||||
def send_message(self, message, model, system):
|
||||
if model == 1:
|
||||
answer = self.process_text_local(message, "phi3.5", system)
|
||||
elif model == 2:
|
||||
answer = self.process_text_local(message, "gemma2:2b", system)
|
||||
answer = self.process_text_local(message, "gemma2:9b", system)
|
||||
elif model == 3:
|
||||
answer = self.process_text_local(message, "qwen2:0.5b", system)
|
||||
elif model == 4:
|
||||
answer = self.process_text_local(message, "codegemma:2b", system)
|
||||
elif model == 4:
|
||||
answer = self.process_text_gradio(message, "PatrickPluto/InterstellarAIChatbot", system)
|
||||
elif model == 5:
|
||||
answer = self.process_text_transformers(message, "meta-llama/Meta-Llama-3.1-8B")
|
||||
answer = self.process_text_mistralai(message, "mistral-large-latest", system)
|
||||
elif model == 6:
|
||||
answer = self.process_text_mistralai(message, "codestral-latest", system)
|
||||
else:
|
||||
return "Invalid choice"
|
||||
return answer
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue