From 7cdcc35a2a5390d70d8c017a4aacb1a29c046912 Mon Sep 17 00:00:00 2001 From: Patrick_Pluto Date: Mon, 16 Sep 2024 10:29:26 +0200 Subject: [PATCH 1/3] First AI API. --- py/api.py | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/py/api.py b/py/api.py index e5ffba9..09373b5 100644 --- a/py/api.py +++ b/py/api.py @@ -1,7 +1,29 @@ import requests +import json + class api: + def initialize_ollama(self, prompt): + ollama_url = "http://localhost:11434" + model = "phi3.5" - def send_message(message): - answer = "Test" - return answer \ No newline at end of file + response = requests.post( + f"{ollama_url}/api/generate", json={"model": model, "prompt": prompt} + ) + + if response.status_code == 200: + response_data = [] + for line in response.iter_lines(): + line_decoded = line.decode("utf-8") + line_raw = json.loads(line_decoded) + response_data.append(line_raw["response"]) + + final_response = "".join(response_data) + generated_text = final_response.splitlines()[-1] + return generated_text + else: + return "Error: " + response.text + + def send_message(self, message): + answer = self.initialize_ollama(message) + return answer -- 2.39.5 From c4655fb49e85bfc4a0a91019b81e5f3e745e4557 Mon Sep 17 00:00:00 2001 From: Patrick_Pluto Date: Mon, 16 Sep 2024 11:27:00 +0200 Subject: [PATCH 2/3] Expanded AI API. --- py/.idea/.gitignore | 3 +++ .../inspectionProfiles/profiles_settings.xml | 6 ++++++ py/.idea/misc.xml | 7 +++++++ py/.idea/modules.xml | 8 ++++++++ py/.idea/py.iml | 8 ++++++++ py/.idea/vcs.xml | 6 ++++++ py/api.py | 19 ++++++++++++++----- 7 files changed, 52 insertions(+), 5 deletions(-) create mode 100644 py/.idea/.gitignore create mode 100644 py/.idea/inspectionProfiles/profiles_settings.xml create mode 100644 py/.idea/misc.xml create mode 100644 py/.idea/modules.xml create mode 100644 py/.idea/py.iml create mode 100644 py/.idea/vcs.xml diff --git a/py/.idea/.gitignore b/py/.idea/.gitignore new file mode 100644 index 0000000..26d3352 --- /dev/null +++ b/py/.idea/.gitignore @@ -0,0 +1,3 @@ +# Default ignored files +/shelf/ +/workspace.xml diff --git a/py/.idea/inspectionProfiles/profiles_settings.xml b/py/.idea/inspectionProfiles/profiles_settings.xml new file mode 100644 index 0000000..105ce2d --- /dev/null +++ b/py/.idea/inspectionProfiles/profiles_settings.xml @@ -0,0 +1,6 @@ + + + + \ No newline at end of file diff --git a/py/.idea/misc.xml b/py/.idea/misc.xml new file mode 100644 index 0000000..db8786c --- /dev/null +++ b/py/.idea/misc.xml @@ -0,0 +1,7 @@ + + + + + + \ No newline at end of file diff --git a/py/.idea/modules.xml b/py/.idea/modules.xml new file mode 100644 index 0000000..3a65488 --- /dev/null +++ b/py/.idea/modules.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/py/.idea/py.iml b/py/.idea/py.iml new file mode 100644 index 0000000..d0876a7 --- /dev/null +++ b/py/.idea/py.iml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/py/.idea/vcs.xml b/py/.idea/vcs.xml new file mode 100644 index 0000000..6c0b863 --- /dev/null +++ b/py/.idea/vcs.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/py/api.py b/py/api.py index 09373b5..f1df05c 100644 --- a/py/api.py +++ b/py/api.py @@ -2,10 +2,10 @@ import requests import json -class api: - def initialize_ollama(self, prompt): +class API: + @staticmethod + def process_text(prompt, model): ollama_url = "http://localhost:11434" - model = "phi3.5" response = requests.post( f"{ollama_url}/api/generate", json={"model": model, "prompt": prompt} @@ -24,6 +24,15 @@ class api: else: return "Error: " + response.text - def send_message(self, message): - answer = self.initialize_ollama(message) + def send_message(self, message, model): + if model == 1: + answer = self.process_text(message, "phi3.5") + elif model == 2: + answer = self.process_text(message, "gemma2:2b") + elif model == 3: + answer = self.process_text(message, "qwen2:0.5b") + elif model == 4: + answer = self.process_text(message, "codegemma:2b") + else: + return "Invalid choice" return answer -- 2.39.5 From b3ae2625ac25b32f6315b71efb643b8024ffef8a Mon Sep 17 00:00:00 2001 From: Patrick_Pluto Date: Mon, 16 Sep 2024 11:44:35 +0200 Subject: [PATCH 3/3] Added online model. --- .gitignore | 1 + py/.idea/misc.xml | 2 +- py/.idea/py.iml | 2 +- py/api.py | 24 ++++++++++++++++++------ py/venv.sh | 5 +++++ 5 files changed, 26 insertions(+), 8 deletions(-) create mode 100644 .gitignore create mode 100644 py/venv.sh diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..f7275bb --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +venv/ diff --git a/py/.idea/misc.xml b/py/.idea/misc.xml index db8786c..f5d7485 100644 --- a/py/.idea/misc.xml +++ b/py/.idea/misc.xml @@ -3,5 +3,5 @@ - + \ No newline at end of file diff --git a/py/.idea/py.iml b/py/.idea/py.iml index d0876a7..451946f 100644 --- a/py/.idea/py.iml +++ b/py/.idea/py.iml @@ -2,7 +2,7 @@ - + \ No newline at end of file diff --git a/py/api.py b/py/api.py index f1df05c..9e6b40d 100644 --- a/py/api.py +++ b/py/api.py @@ -1,10 +1,20 @@ import requests import json - +from transformers import AutoTokenizer, LlamaForCausalLM class API: @staticmethod - def process_text(prompt, model): + def process_text_transformers(prompt, model): + model = LlamaForCausalLM.from_pretrained(model) + tokenizer = AutoTokenizer.from_pretrained(model) + + inputs = tokenizer(prompt, return_tensors="pt") + + generate_ids = model.generate(inputs.input_ids, max_length=30) + return tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + + @staticmethod + def process_text_local(prompt, model): ollama_url = "http://localhost:11434" response = requests.post( @@ -26,13 +36,15 @@ class API: def send_message(self, message, model): if model == 1: - answer = self.process_text(message, "phi3.5") + answer = self.process_text_local(message, "phi3.5") elif model == 2: - answer = self.process_text(message, "gemma2:2b") + answer = self.process_text_local(message, "gemma2:2b") elif model == 3: - answer = self.process_text(message, "qwen2:0.5b") + answer = self.process_text_local(message, "qwen2:0.5b") elif model == 4: - answer = self.process_text(message, "codegemma:2b") + answer = self.process_text_local(message, "codegemma:2b") + elif model == 5: + answer = self.process_text_transformers(message, "meta-llama/Meta-Llama-3.1-8B") else: return "Invalid choice" return answer diff --git a/py/venv.sh b/py/venv.sh new file mode 100644 index 0000000..4a3be2f --- /dev/null +++ b/py/venv.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +virtualenv venv +source venv/bin/activate +pip install transformers -- 2.39.5