diff --git a/.gitignore b/.gitignore
index 15fae49..8a16aa7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,5 @@
venv/
-__pycache__
+__pycache__/
.idea/
.vscode/
+token.txt
diff --git a/docs/class.drawio b/docs/class.drawio
new file mode 100644
index 0000000..bb4fd55
--- /dev/null
+++ b/docs/class.drawio
@@ -0,0 +1,34 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/py/api.py b/py/api.py
index d3db341..7582c39 100644
--- a/py/api.py
+++ b/py/api.py
@@ -1,27 +1,52 @@
import requests
import json
-from transformers import AutoTokenizer, LlamaForCausalLM
-
+from gradio_client import Client
+import os
+from mistralai import Mistral
class API:
- # This method processes a message via transformers. (NOT FINISHED!)
@staticmethod
- def process_text_transformers(prompt, model):
- model = LlamaForCausalLM.from_pretrained(model)
- tokenizer = AutoTokenizer.from_pretrained(model)
+ def process_text_mistralai(prompt, model, system):
+ with open("token.txt", "r") as f:
+ token = f.readlines()[0].strip()
- inputs = tokenizer(prompt, return_tensors="pt")
+ api_key = token
- generate_ids = model.generate(inputs.input_ids, max_length=30)
- return tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ client = Mistral(api_key=api_key)
+
+ chat_response = client.chat.complete(
+ model=model,
+ messages=[
+ {
+ "role": "user",
+ "content": prompt,
+ }, {
+ "role": "system",
+ "content": system,
+ },
+ ]
+ )
+ return chat_response.choices[0].message.content
+ @staticmethod
+ def process_text_gradio(prompt, model, system):
+ client = Client(model)
+ result = client.predict(
+ message=prompt,
+ system_message=system,
+ max_tokens=512,
+ temperature=0.7,
+ top_p=0.95,
+ api_name="/chat"
+ )
+ return result;
# This method processes a message via ollama
@staticmethod
- def process_text_local(prompt, model):
+ def process_text_local(prompt, model, system):
ollama_url = "http://localhost:11434"
response = requests.post(
- f"{ollama_url}/api/generate", json={"model": model, "prompt": prompt}
+ f"{ollama_url}/api/generate", json={"model": model, "prompt": prompt, "system": system}
)
if response.status_code == 200:
@@ -37,17 +62,20 @@ class API:
return "Error: " + response.text
# This method sends a message to a certain AI.
- def send_message(self, message, model):
+
+ def send_message(self, message, model, system):
if model == 1:
- answer = self.process_text_local(message, "phi3.5")
+ answer = self.process_text_local(message, "phi3.5", system)
elif model == 2:
- answer = self.process_text_local(message, "gemma2:2b")
+ answer = self.process_text_local(message, "gemma2:9b", system)
elif model == 3:
- answer = self.process_text_local(message, "qwen2:0.5b")
+ answer = self.process_text_local(message, "codegemma:2b", system)
elif model == 4:
- answer = self.process_text_local(message, "codegemma:2b")
+ answer = self.process_text_gradio(message, "PatrickPluto/InterstellarAIChatbot", system)
elif model == 5:
- answer = self.process_text_transformers(message, "meta-llama/Meta-Llama-3.1-8B")
+ answer = self.process_text_mistralai(message, "mistral-large-latest", system)
+ elif model == 6:
+ answer = self.process_text_mistralai(message, "codestral-latest", system)
else:
return "Invalid choice"
return answer
diff --git a/py/install.sh b/py/install.sh
new file mode 100755
index 0000000..fbbea2b
--- /dev/null
+++ b/py/install.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+python3 -m venv venv
+source venv/bin/activate
+pip install flask
+pip install SpeechRecognition
+pip install pyaudio
+pip install pocketsphinx
+pip install sentencepiece
+pip install pyqt5
+pip install pyqtwebengine
+pip install gradio_client
+pip install mistralai
+
+ollama pull phi3.5
+ollama pull codegemma:2b
+ollama pull gemma2:9b
diff --git a/py/simple_chat.py b/py/simple_chat.py
deleted file mode 100644
index 879e1f1..0000000
--- a/py/simple_chat.py
+++ /dev/null
@@ -1,23 +0,0 @@
-from api import API
-
-class CLIChat:
- @staticmethod
- def chat():
- chat1 = API()
-
- while True:
- print("")
- print("Which AI Model do you want to use? Write as a Number (1-5).")
- model_input = input()
- model = int(model_input)
- if model <= 0 or model > 5:
- print("Invalid model.")
- continue
- while True:
- print("")
- print("Ask a question")
- input_text = input()
- if input_text == "change":
- break
- output_text = chat1.send_message(input_text, model)
- print(output_text)
diff --git a/py/static/img/code.jpg b/py/static/img/code.jpg
new file mode 100644
index 0000000..416fca4
Binary files /dev/null and b/py/static/img/code.jpg differ
diff --git a/py/static/img/copy.svg b/py/static/img/copy.svg
new file mode 100644
index 0000000..c8bdf17
--- /dev/null
+++ b/py/static/img/copy.svg
@@ -0,0 +1,54 @@
+
+
+
+
+
+
+
+
+
+
diff --git a/py/static/img/default.jpg b/py/static/img/default.jpg
new file mode 100644
index 0000000..266213d
Binary files /dev/null and b/py/static/img/default.jpg differ
diff --git a/py/static/img/edit.svg b/py/static/img/edit.svg
new file mode 100644
index 0000000..ed1f7fe
--- /dev/null
+++ b/py/static/img/edit.svg
@@ -0,0 +1,74 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/py/static/img/language.jpg b/py/static/img/language.jpg
new file mode 100644
index 0000000..5ab31b9
Binary files /dev/null and b/py/static/img/language.jpg differ
diff --git a/py/static/img/logo.png b/py/static/img/logo.png
new file mode 100644
index 0000000..00332a4
Binary files /dev/null and b/py/static/img/logo.png differ
diff --git a/py/static/img/math.jpg b/py/static/img/math.jpg
new file mode 100644
index 0000000..a417968
Binary files /dev/null and b/py/static/img/math.jpg differ
diff --git a/py/static/img/resend.svg b/py/static/img/resend.svg
new file mode 100644
index 0000000..469d02b
--- /dev/null
+++ b/py/static/img/resend.svg
@@ -0,0 +1,65 @@
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/py/static/img/send.svg b/py/static/img/send.svg
new file mode 100644
index 0000000..edcd870
--- /dev/null
+++ b/py/static/img/send.svg
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/py/static/styles.css b/py/static/styles.css
index 399202a..950c0d8 100644
--- a/py/static/styles.css
+++ b/py/static/styles.css
@@ -1,6 +1,5 @@
-/* Color Variables */
:root {
- --background-color: black;
+ --background-color: white;
--text-color: white;
--font-family: Arial, sans-serif;
--history-background-color: rgb(0, 0, 48);
@@ -10,33 +9,73 @@
--language-model-color: blue;
--default-model-color: yellow;
--custom-model-color: purple;
- --output-background-color: rgb(0, 0, 48);
- --user-message-color: rgb(0, 128, 255);
- --ai-message-color: rgb(100, 100, 255);
+ --output-background-color: black; /* Set the conversation background to black */
+ --user-message-color: rgb(0, 128, 255); /* Blueish bubble for user */
+ --ai-message-color: rgb(100, 100, 255); /* Lighter blue for AI */
--input-background-color: rgb(0, 0, 48);
--input-button-color: rgb(0, 128, 255);
--input-button-hover-color: rgb(0, 100, 200);
}
+
/* Global Reset */
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
-/* Disable Scrolling */
+
html, body {
height: 100vh;
overflow: hidden; /* Prevent scrolling */
}
+
/* Body Styling */
body {
+ margin-top: 2em;
display: flex;
justify-content: center;
align-items: center;
background-color: var(--background-color);
color: var(--text-color);
font-family: var(--font-family);
+ margin-bottom: 0.5em;
}
+
+/* Header Styling */
+header {
+ background-color: var(--background-color);
+ color: black;
+ width: 100%;
+ text-decoration: none;
+ position: fixed;
+ top: 0;
+ left: 0;
+ padding: 10px 20px;
+ box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2);
+ z-index: 1000;
+ font-family: var(--font-family);
+}
+
+header li {
+ display: inline-block;
+ margin: 0 15px;
+}
+
+header img {
+ height: 2em;
+ vertical-align: middle;
+}
+
+header a {
+ color: black;
+ text-decoration: none;
+ transition: color 0.3s;
+}
+
+header a:hover {
+ color: var(--input-button-color);
+}
+
/* Container Grid Layout */
.container {
display: grid;
@@ -46,6 +85,7 @@ body {
width: 90vw;
height: 90vh;
}
+
/* History Section */
.history {
grid-column: 1;
@@ -54,128 +94,204 @@ body {
background-color: var(--history-background-color);
padding: 1em;
overflow-y: auto;
- height: 50vh; /* Adjusted to occupy 60% of the viewport height */
+ height: 50vh;
}
+
.history ul {
list-style: none;
}
+
.history ul li {
padding: 10px 0;
border-bottom: 1px solid var(--text-color);
width: 100%;
}
-.history ul li a{
+.history ul li a {
display: block;
text-decoration: none;
color: white;
width: 100%;
- padding: 5px;
+ padding: 5px;
}
-.history ul li a:hover{
+.history ul li a:hover {
background-color: var(--input-button-hover-color);
}
/* Models Section */
.models {
+ overflow-y: auto;
background-color: var(--models-background-color);
border-radius: 2em;
padding: 1em;
- height: 40vh; /* Adjusted height to occupy 40% of the viewport height */
+ height: 40vh;
+ box-sizing: border-box;
}
-.models h3 {
- padding: 2px;
- margin: 5px;
-}
-
-.models .title h3 {
- padding: 2px;
- margin: 5px;
+.models .titel {
padding-bottom: 1em;
-}
-
-.grid{
- grid-column: 1;
- grid-row: 2;
display: flex;
- justify-content: space-between;
+ justify-content: center;
align-items: center;
- padding-top: 1em;
+
}
-.ai-class {
- text-align: center;
+.grid {
+ display: grid;
+ grid-template-columns: repeat(2, 1fr);
+ gap: 1.5vh;
+ height: calc(100% - 2em);
+}
+
+.model-box {
display: flex;
- flex-direction: column;
align-items: center;
justify-content: center;
+ color: #fff;
+ border-radius: 5%;
+ overflow: hidden;
+ position: relative;
+ height: 20vh;
}
-.circle {
- width: 50px;
- height: 50px;
- border-radius: 50%;
- cursor: pointer;
- transition: transform 0.2s;
+
+.overlay{
+ z-index: 900;
+ position: absolute;
+ left: 0;
+ width: 100%;
+ height: 100%;
+ background-color: rgba(0, 0, 0, 0.7);
+ /* Dark overlay */
+ color: white;
+ display: flex;
+ justify-content: center;
+ align-items: center;
+ font-size: 300%;
+ transition: opacity 0.5s ease;
+ pointer-events: none;
+ opacity: 0;
+ font-size:xx-large;
}
-.circle:hover {
- transform: scale(1.1);
+
+.model-box:hover .overlay{
+ opacity: 1;
}
-/* Model Colors */
+
.code-model {
- background-color: var(--code-model-color);
+ background-image: url(/static/img/code.jpg);
+ background-repeat: no-repeat;
+ background-size: cover;
}
+
.math-model {
- background-color: var(--math-model-color);
+ background-image: url(/static/img/math.jpg);
+ background-color: white;
+ background-position: center;
+ background-repeat: no-repeat;
+ background-size: contain;
}
+
.language-model {
- background-color: var(--language-model-color);
+ background-image: url(/static/img/language.jpg);
+ background-color: #72cce4;
+ background-repeat: no-repeat;
+ background-size: contain;
+ background-position: center;
}
+
.default-model {
- background-color: var(--default-model-color);
-}
-.custom-model {
- background-color: var(--custom-model-color);
+ background-image: url(/static/img/default.jpg);
+ background-repeat: no-repeat;
+ background-size: cover;
+ background-position: center;
}
+
/* Output Section */
.output {
grid-column: 2;
grid-row: 1 / span 2;
border-radius: 2em;
background-color: var(--output-background-color);
- padding: 1em;
+ padding: 1.5em;
display: flex;
flex-direction: column;
justify-content: flex-start;
font-size: 1.2em;
- overflow-y: scroll;
- height: 80vh;
+ overflow-y: auto;
+ max-height: 75vh;
margin-bottom: 0;
+ width: 100%;
}
+
+
/* Conversation */
-.conversation {
- width: 100%;
+#conversation {
+ display: flex;
+ flex-direction: column;
+ padding: 10px;
+ overflow-y: auto;
+ max-height: 80vh;
+ background-color: var(--output-background-color); /* Black background */
+ border-radius: 10px;
+ scroll-behavior: smooth; /* Optional: Smooth scrolling */
}
-.md {
- display: block;
- width: 100%;
+
+.user-message,
+.ai-message {
+ margin: 10px 0;
+ padding: 10px 15px;
+ border-radius: 15px;
+ max-width: 60%;
+ width: fit-content; /* Adjusts width to fit the content */
+ word-wrap: break-word;
+ display: block; /* Changed from inline-block to block */
+ box-shadow: 0px 1px 3px rgba(0, 0, 0, 0.1);
}
+
+/* Align user message to the right */
.user-message {
background-color: var(--user-message-color);
- padding: 10px;
- border-radius: 10px;
- margin-bottom: 10px;
- align-self: flex-end;
- text-align: right;
+ color: var(--text-color);
+ border-bottom-right-radius: 0;
+ margin-left: auto;
+ text-align: right; /* Align text to the right */
}
+
+/* Align AI message to the left */
.ai-message {
background-color: var(--ai-message-color);
- padding: 10px;
- border-radius: 10px;
- margin-bottom: 10px;
- align-self: flex-start;
+ color: var(--text-color);
+ border-bottom-left-radius: 0;
+ margin-right: auto;
+ text-align: left; /* Align text to the left */
}
+
+
+/* Output Form Buttons */
+.output form {
+ display: flex;
+ justify-content: flex-start;
+ gap: 10px;
+}
+
+.output form button {
+ background-color: transparent;
+ color: white;
+ border: none;
+ padding: 0;
+ cursor: pointer;
+ transition: transform 0.2s ease-in-out;
+}
+
+.output form button:hover {
+ transform: scale(1.2);
+}
+
+.output form button img {
+ height: 1.8em;
+}
+
/* Input Section */
.input {
grid-column: 2;
@@ -186,31 +302,133 @@ body {
display: flex;
justify-content: space-between;
align-items: center;
- height: 10vh;
- margin-top: -7em;
+ height: auto;
+ margin-top: -9em;
+ gap: 10px;
+ height: 7em;
}
+
.input input {
flex-grow: 1;
- padding: 10px;
+ padding: 12px;
font-size: 1.2em;
- border-radius: 5px;
- border: none;
+ border-radius: 8px;
+ border: 2px solid var(--input-button-color);
+ outline: none;
margin-right: 10px;
+ background-color: rgba(255, 255, 255, 0.9);
+ color: #333;
+ transition: border-color 0.3s ease-in-out;
}
+
+.input input:focus {
+ border-color: var(--input-button-hover-color);
+}
+
.input button {
- padding: 10px 20px;
+ padding: 12px 20px;
background-color: var(--input-button-color);
color: white;
border: none;
- border-radius: 5px;
- font-size: 1em;
+ border-radius: 50%;
+ font-size: 1.5em;
cursor: pointer;
- height: 3em;
- margin: 5px;
+ height: 50px;
+ width: 50px;
+ display: flex;
+ justify-content: center;
+ align-items: center;
+ transition: background-color 0.3s ease;
+ position: relative;
+ box-shadow: 0 4px 10px rgba(0, 0, 0, 0.1);
}
+
.input button img {
- height: 100%;
+ height: 1em;
}
+
.input button:hover {
background-color: var(--input-button-hover-color);
-}
\ No newline at end of file
+ box-shadow: 0 6px 15px rgba(0, 0, 0, 0.2);
+}
+
+/* FAQ Section */
+#faq {
+ max-width: 800px;
+ width: 90%;
+ margin-top: 50px;
+ padding: 20px;
+ background-color: #222;
+ border-radius: 10px;
+ box-shadow: 0 2px 8px rgba(0, 0, 0, 0.3);
+}
+
+#faq h2 {
+ text-align: center;
+ color: #00ccff;
+ font-size: 2em;
+ margin-bottom: 20px;
+}
+
+.faq-item {
+ margin-bottom: 20px;
+ padding: 10px;
+ border-radius: 5px;
+ background-color: #333;
+}
+
+.faq-item h3 {
+ color: #00ccff;
+ margin-bottom: 10px;
+ font-size: 1.5em;
+}
+
+.faq-item p {
+ color: #ddd;
+ font-size: 1.1em;
+ line-height: 1.5;
+}
+
+.faq-item:hover {
+ background-color: #444;
+ transition: background-color 0.3s;
+}
+
+@media (max-width: 1400px) {
+ .grid{
+ grid-template-columns: 1fr;
+ }
+}
+
+/* Responsive Adjustments */
+@media (max-width: 768px) {
+ .container {
+ grid-template-columns: 1fr;
+ grid-template-rows: auto;
+ width: 95vw;
+ }
+
+ .history, .models {
+ display: none; /* Hide history and models */
+ }
+
+ .output {
+ grid-column: 1;
+ grid-row: 1 / span 2;
+ }
+
+ .input {
+ grid-column: 1;
+ grid-row: 3;
+ margin-top: -4em;
+ }
+
+ .input button {
+ height: 40px;
+ width: 40px;
+ }
+
+ .output form button img {
+ height: 1.5em;
+ }
+}
diff --git a/py/templates/documentation.html b/py/templates/documentation.html
new file mode 100644
index 0000000..88591d8
--- /dev/null
+++ b/py/templates/documentation.html
@@ -0,0 +1,44 @@
+
+
+
+
+
+
+
+ AI Assistant
+
+
+
+
+
+
+
diff --git a/py/templates/faq.html b/py/templates/faq.html
new file mode 100644
index 0000000..a890258
--- /dev/null
+++ b/py/templates/faq.html
@@ -0,0 +1,58 @@
+
+
+
+
+
+
+
+ AI Assistant - FAQ
+
+
+
+
+
+
+ Frequently Asked Questions
+
+
+
What is this AI assistant for?
+
This AI assistant helps you with various tasks such as answering questions, generating text, and even helping with code or writing tasks.
+
+
+
+
How does the AI assistant work?
+
The assistant uses machine learning algorithms to understand your input and provide contextually relevant answers or generate content based on the task you've described.
+
+
+
+
Can I trust the answers given by the AI assistant?
+
While the AI strives to give accurate and helpful answers, it is important to verify critical information, especially for complex or sensitive queries.
+
+
+
+
What kind of questions can I ask?
+
You can ask a wide range of questions from simple factual queries to more complex requests like generating creative writing or code snippets.
+
+
+
+
Is my data secure when using the AI assistant?
+
We take privacy seriously. Your data is handled according to our privacy policy, ensuring that any personal information shared is securely processed and not misused.
+
+
+
+
+
diff --git a/py/templates/index.html b/py/templates/index.html
index 3425d0a..0ddb62e 100644
--- a/py/templates/index.html
+++ b/py/templates/index.html
@@ -1,20 +1,39 @@
-
AI Assistant
-
+
+
-
-
-
+
+
-
-
+
+
Different AI models
-
-
-
Code
-
+
-
-
Math
-
+
-
-
Language
-
+
-
-
+
-
+
{% for message in messages %}
{% if message.startswith('User:') %}
{{ message }}
@@ -78,16 +111,62 @@
{{ message }}
{% endif %}
{% endfor %}
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
- Send
+
+
+
+
+
+
+
+
+
+
-
+
+