diff --git a/py/.idea/.gitignore b/py/.idea/.gitignore
new file mode 100644
index 0000000..26d3352
--- /dev/null
+++ b/py/.idea/.gitignore
@@ -0,0 +1,3 @@
+# Default ignored files
+/shelf/
+/workspace.xml
diff --git a/py/.idea/inspectionProfiles/profiles_settings.xml b/py/.idea/inspectionProfiles/profiles_settings.xml
new file mode 100644
index 0000000..105ce2d
--- /dev/null
+++ b/py/.idea/inspectionProfiles/profiles_settings.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/py/.idea/misc.xml b/py/.idea/misc.xml
new file mode 100644
index 0000000..db8786c
--- /dev/null
+++ b/py/.idea/misc.xml
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/py/.idea/modules.xml b/py/.idea/modules.xml
new file mode 100644
index 0000000..3a65488
--- /dev/null
+++ b/py/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/py/.idea/py.iml b/py/.idea/py.iml
new file mode 100644
index 0000000..d0876a7
--- /dev/null
+++ b/py/.idea/py.iml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/py/.idea/vcs.xml b/py/.idea/vcs.xml
new file mode 100644
index 0000000..6c0b863
--- /dev/null
+++ b/py/.idea/vcs.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/py/api.py b/py/api.py
index 09373b5..f1df05c 100644
--- a/py/api.py
+++ b/py/api.py
@@ -2,10 +2,10 @@ import requests
import json
-class api:
- def initialize_ollama(self, prompt):
+class API:
+ @staticmethod
+ def process_text(prompt, model):
ollama_url = "http://localhost:11434"
- model = "phi3.5"
response = requests.post(
f"{ollama_url}/api/generate", json={"model": model, "prompt": prompt}
@@ -24,6 +24,15 @@ class api:
else:
return "Error: " + response.text
- def send_message(self, message):
- answer = self.initialize_ollama(message)
+ def send_message(self, message, model):
+ if model == 1:
+ answer = self.process_text(message, "phi3.5")
+ elif model == 2:
+ answer = self.process_text(message, "gemma2:2b")
+ elif model == 3:
+ answer = self.process_text(message, "qwen2:0.5b")
+ elif model == 4:
+ answer = self.process_text(message, "codegemma:2b")
+ else:
+ return "Invalid choice"
return answer