diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..f7275bb
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+venv/
diff --git a/py/.idea/.gitignore b/py/.idea/.gitignore
new file mode 100644
index 0000000..26d3352
--- /dev/null
+++ b/py/.idea/.gitignore
@@ -0,0 +1,3 @@
+# Default ignored files
+/shelf/
+/workspace.xml
diff --git a/py/.idea/inspectionProfiles/profiles_settings.xml b/py/.idea/inspectionProfiles/profiles_settings.xml
new file mode 100644
index 0000000..105ce2d
--- /dev/null
+++ b/py/.idea/inspectionProfiles/profiles_settings.xml
@@ -0,0 +1,6 @@
+<component name="InspectionProjectProfileManager">
+  <settings>
+    <option name="USE_PROJECT_PROFILE" value="false" />
+    <version value="1.0" />
+  </settings>
+</component>
\ No newline at end of file
diff --git a/py/.idea/misc.xml b/py/.idea/misc.xml
new file mode 100644
index 0000000..f5d7485
--- /dev/null
+++ b/py/.idea/misc.xml
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="Black">
+    <option name="sdkName" value="Python 3.12" />
+  </component>
+  <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.12 (py)" project-jdk-type="Python SDK" />
+</project>
\ No newline at end of file
diff --git a/py/.idea/modules.xml b/py/.idea/modules.xml
new file mode 100644
index 0000000..3a65488
--- /dev/null
+++ b/py/.idea/modules.xml
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="ProjectModuleManager">
+    <modules>
+      <module fileurl="file://$PROJECT_DIR$/.idea/py.iml" filepath="$PROJECT_DIR$/.idea/py.iml" />
+    </modules>
+  </component>
+</project>
\ No newline at end of file
diff --git a/py/.idea/py.iml b/py/.idea/py.iml
new file mode 100644
index 0000000..451946f
--- /dev/null
+++ b/py/.idea/py.iml
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<module type="PYTHON_MODULE" version="4">
+  <component name="NewModuleRootManager">
+    <content url="file://$MODULE_DIR$" />
+    <orderEntry type="jdk" jdkName="Python 3.12 (py)" jdkType="Python SDK" />
+    <orderEntry type="sourceFolder" forTests="false" />
+  </component>
+</module>
\ No newline at end of file
diff --git a/py/.idea/vcs.xml b/py/.idea/vcs.xml
new file mode 100644
index 0000000..6c0b863
--- /dev/null
+++ b/py/.idea/vcs.xml
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="VcsDirectoryMappings">
+    <mapping directory="$PROJECT_DIR$/.." vcs="Git" />
+  </component>
+</project>
\ No newline at end of file
diff --git a/py/api.py b/py/api.py
index 09373b5..9e6b40d 100644
--- a/py/api.py
+++ b/py/api.py
@@ -1,11 +1,21 @@
 import requests
 import json
+from transformers import AutoTokenizer, LlamaForCausalLM
 
+class API:
+    @staticmethod
+    def process_text_transformers(prompt, model):
+        model = LlamaForCausalLM.from_pretrained(model)
+        tokenizer = AutoTokenizer.from_pretrained(model)
 
-class api:
-    def initialize_ollama(self, prompt):
+        inputs = tokenizer(prompt, return_tensors="pt")
+
+        generate_ids = model.generate(inputs.input_ids, max_length=30)
+        return tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+
+    @staticmethod
+    def process_text_local(prompt, model):
         ollama_url = "http://localhost:11434"
-        model = "phi3.5"
 
         response = requests.post(
             f"{ollama_url}/api/generate", json={"model": model, "prompt": prompt}
@@ -24,6 +34,17 @@ class api:
         else:
             return "Error: " + response.text
 
-    def send_message(self, message):
-        answer = self.initialize_ollama(message)
+    def send_message(self, message, model):
+        if model == 1:
+            answer = self.process_text_local(message, "phi3.5")
+        elif model == 2:
+            answer = self.process_text_local(message, "gemma2:2b")
+        elif model == 3:
+            answer = self.process_text_local(message, "qwen2:0.5b")
+        elif model == 4:
+            answer = self.process_text_local(message, "codegemma:2b")
+        elif model == 5:
+            answer = self.process_text_transformers(message, "meta-llama/Meta-Llama-3.1-8B")
+        else:
+            return "Invalid choice"
         return answer
diff --git a/py/venv.sh b/py/venv.sh
new file mode 100644
index 0000000..4a3be2f
--- /dev/null
+++ b/py/venv.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+virtualenv venv
+source venv/bin/activate
+pip install transformers