import requests import json from transformers import AutoTokenizer, LlamaForCausalLM class API: # This method processes a message via transformers. (NOT FINISHED!) @staticmethod def process_text_transformers(prompt, model): model = LlamaForCausalLM.from_pretrained(model) tokenizer = AutoTokenizer.from_pretrained(model) inputs = tokenizer(prompt, return_tensors="pt") generate_ids = model.generate(inputs.input_ids, max_length=30) return tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] # This method processes a message via ollama @staticmethod def process_text_local(prompt, model, system): ollama_url = "http://localhost:11434" response = requests.post( f"{ollama_url}/api/generate", json={"model": model, "prompt": prompt, "system": system} ) if response.status_code == 200: response_data = [] for line in response.iter_lines(): line_decoded = line.decode("utf-8") line_raw = json.loads(line_decoded) response_data.append(line_raw["response"]) final_response = "".join(response_data) return final_response else: return "Error: " + response.text # This method sends a message to a certain AI. def send_message(self, message, model, system): if model == 1: answer = self.process_text_local(message, "phi3.5", system) elif model == 2: answer = self.process_text_local(message, "gemma2:2b", system) elif model == 3: answer = self.process_text_local(message, "qwen2:0.5b", system) elif model == 4: answer = self.process_text_local(message, "codegemma:2b", system) elif model == 5: answer = self.process_text_transformers(message, "meta-llama/Meta-Llama-3.1-8B") else: return "Invalid choice" return answer