diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..f7275bb
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+venv/
diff --git a/py/.idea/.gitignore b/py/.idea/.gitignore
new file mode 100644
index 0000000..26d3352
--- /dev/null
+++ b/py/.idea/.gitignore
@@ -0,0 +1,3 @@
+# Default ignored files
+/shelf/
+/workspace.xml
diff --git a/py/.idea/inspectionProfiles/profiles_settings.xml b/py/.idea/inspectionProfiles/profiles_settings.xml
new file mode 100644
index 0000000..105ce2d
--- /dev/null
+++ b/py/.idea/inspectionProfiles/profiles_settings.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/py/.idea/misc.xml b/py/.idea/misc.xml
new file mode 100644
index 0000000..f5d7485
--- /dev/null
+++ b/py/.idea/misc.xml
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/py/.idea/modules.xml b/py/.idea/modules.xml
new file mode 100644
index 0000000..3a65488
--- /dev/null
+++ b/py/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/py/.idea/py.iml b/py/.idea/py.iml
new file mode 100644
index 0000000..451946f
--- /dev/null
+++ b/py/.idea/py.iml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/py/.idea/vcs.xml b/py/.idea/vcs.xml
new file mode 100644
index 0000000..6c0b863
--- /dev/null
+++ b/py/.idea/vcs.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/py/api.py b/py/api.py
index e5ffba9..9e6b40d 100644
--- a/py/api.py
+++ b/py/api.py
@@ -1,7 +1,50 @@
import requests
+import json
+from transformers import AutoTokenizer, LlamaForCausalLM
-class api:
+class API:
+ @staticmethod
+ def process_text_transformers(prompt, model):
+ model = LlamaForCausalLM.from_pretrained(model)
+ tokenizer = AutoTokenizer.from_pretrained(model)
- def send_message(message):
- answer = "Test"
- return answer
\ No newline at end of file
+ inputs = tokenizer(prompt, return_tensors="pt")
+
+ generate_ids = model.generate(inputs.input_ids, max_length=30)
+ return tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+
+ @staticmethod
+ def process_text_local(prompt, model):
+ ollama_url = "http://localhost:11434"
+
+ response = requests.post(
+ f"{ollama_url}/api/generate", json={"model": model, "prompt": prompt}
+ )
+
+ if response.status_code == 200:
+ response_data = []
+ for line in response.iter_lines():
+ line_decoded = line.decode("utf-8")
+ line_raw = json.loads(line_decoded)
+ response_data.append(line_raw["response"])
+
+ final_response = "".join(response_data)
+ generated_text = final_response.splitlines()[-1]
+ return generated_text
+ else:
+ return "Error: " + response.text
+
+ def send_message(self, message, model):
+ if model == 1:
+ answer = self.process_text_local(message, "phi3.5")
+ elif model == 2:
+ answer = self.process_text_local(message, "gemma2:2b")
+ elif model == 3:
+ answer = self.process_text_local(message, "qwen2:0.5b")
+ elif model == 4:
+ answer = self.process_text_local(message, "codegemma:2b")
+ elif model == 5:
+ answer = self.process_text_transformers(message, "meta-llama/Meta-Llama-3.1-8B")
+ else:
+ return "Invalid choice"
+ return answer
diff --git a/py/venv.sh b/py/venv.sh
new file mode 100644
index 0000000..4a3be2f
--- /dev/null
+++ b/py/venv.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+virtualenv venv
+source venv/bin/activate
+pip install transformers