Added online model.
This commit is contained in:
parent
c4655fb49e
commit
b3ae2625ac
5 changed files with 26 additions and 8 deletions
24
py/api.py
24
py/api.py
|
@ -1,10 +1,20 @@
|
|||
import requests
|
||||
import json
|
||||
|
||||
from transformers import AutoTokenizer, LlamaForCausalLM
|
||||
|
||||
class API:
|
||||
@staticmethod
|
||||
def process_text(prompt, model):
|
||||
def process_text_transformers(prompt, model):
|
||||
model = LlamaForCausalLM.from_pretrained(model)
|
||||
tokenizer = AutoTokenizer.from_pretrained(model)
|
||||
|
||||
inputs = tokenizer(prompt, return_tensors="pt")
|
||||
|
||||
generate_ids = model.generate(inputs.input_ids, max_length=30)
|
||||
return tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
||||
|
||||
@staticmethod
|
||||
def process_text_local(prompt, model):
|
||||
ollama_url = "http://localhost:11434"
|
||||
|
||||
response = requests.post(
|
||||
|
@ -26,13 +36,15 @@ class API:
|
|||
|
||||
def send_message(self, message, model):
|
||||
if model == 1:
|
||||
answer = self.process_text(message, "phi3.5")
|
||||
answer = self.process_text_local(message, "phi3.5")
|
||||
elif model == 2:
|
||||
answer = self.process_text(message, "gemma2:2b")
|
||||
answer = self.process_text_local(message, "gemma2:2b")
|
||||
elif model == 3:
|
||||
answer = self.process_text(message, "qwen2:0.5b")
|
||||
answer = self.process_text_local(message, "qwen2:0.5b")
|
||||
elif model == 4:
|
||||
answer = self.process_text(message, "codegemma:2b")
|
||||
answer = self.process_text_local(message, "codegemma:2b")
|
||||
elif model == 5:
|
||||
answer = self.process_text_transformers(message, "meta-llama/Meta-Llama-3.1-8B")
|
||||
else:
|
||||
return "Invalid choice"
|
||||
return answer
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue