From af1353a41a1e7dce118b3d613a30ec1a9338170f Mon Sep 17 00:00:00 2001 From: Patrick_Pluto Date: Tue, 17 Sep 2024 12:36:32 +0200 Subject: [PATCH] FIXED THE MERGE CONFLICT (THANKS) --- .gitignore | 3 ++- py/api.py | 54 ++++++++++++++++++++++++++++++++++++------------- py/install.sh | 17 ++++++++++++++++ py/venv.sh | 10 --------- py/web_flask.py | 34 +++++++++++++++++++++++++++++-- 5 files changed, 91 insertions(+), 27 deletions(-) create mode 100755 py/install.sh delete mode 100755 py/venv.sh mode change 100644 => 100755 py/web_flask.py diff --git a/.gitignore b/.gitignore index 15fae49..8a16aa7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ venv/ -__pycache__ +__pycache__/ .idea/ .vscode/ +token.txt diff --git a/py/api.py b/py/api.py index 4290692..7582c39 100644 --- a/py/api.py +++ b/py/api.py @@ -1,19 +1,44 @@ import requests import json -from transformers import AutoTokenizer, LlamaForCausalLM - +from gradio_client import Client +import os +from mistralai import Mistral class API: - # This method processes a message via transformers. (NOT FINISHED!) @staticmethod - def process_text_transformers(prompt, model): - model = LlamaForCausalLM.from_pretrained(model) - tokenizer = AutoTokenizer.from_pretrained(model) + def process_text_mistralai(prompt, model, system): + with open("token.txt", "r") as f: + token = f.readlines()[0].strip() - inputs = tokenizer(prompt, return_tensors="pt") + api_key = token - generate_ids = model.generate(inputs.input_ids, max_length=30) - return tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + client = Mistral(api_key=api_key) + + chat_response = client.chat.complete( + model=model, + messages=[ + { + "role": "user", + "content": prompt, + }, { + "role": "system", + "content": system, + }, + ] + ) + return chat_response.choices[0].message.content + @staticmethod + def process_text_gradio(prompt, model, system): + client = Client(model) + result = client.predict( + message=prompt, + system_message=system, + max_tokens=512, + temperature=0.7, + top_p=0.95, + api_name="/chat" + ) + return result; # This method processes a message via ollama @staticmethod @@ -38,18 +63,19 @@ class API: # This method sends a message to a certain AI. - def send_message(self, message, model, system): if model == 1: answer = self.process_text_local(message, "phi3.5", system) elif model == 2: - answer = self.process_text_local(message, "gemma2:2b", system) + answer = self.process_text_local(message, "gemma2:9b", system) elif model == 3: - answer = self.process_text_local(message, "qwen2:0.5b", system) - elif model == 4: answer = self.process_text_local(message, "codegemma:2b", system) + elif model == 4: + answer = self.process_text_gradio(message, "PatrickPluto/InterstellarAIChatbot", system) elif model == 5: - answer = self.process_text_transformers(message, "meta-llama/Meta-Llama-3.1-8B") + answer = self.process_text_mistralai(message, "mistral-large-latest", system) + elif model == 6: + answer = self.process_text_mistralai(message, "codestral-latest", system) else: return "Invalid choice" return answer diff --git a/py/install.sh b/py/install.sh new file mode 100755 index 0000000..fbbea2b --- /dev/null +++ b/py/install.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +python3 -m venv venv +source venv/bin/activate +pip install flask +pip install SpeechRecognition +pip install pyaudio +pip install pocketsphinx +pip install sentencepiece +pip install pyqt5 +pip install pyqtwebengine +pip install gradio_client +pip install mistralai + +ollama pull phi3.5 +ollama pull codegemma:2b +ollama pull gemma2:9b diff --git a/py/venv.sh b/py/venv.sh deleted file mode 100755 index 3d70f23..0000000 --- a/py/venv.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -virtualenv venv -source venv/bin/activate -pip install transformers -pip install torch -pip install flask -pip install SpeechRecognition -pip install pyaudio -pip install pocketsphinx diff --git a/py/web_flask.py b/py/web_flask.py old mode 100644 new mode 100755 index a3a5eea..17a9400 --- a/py/web_flask.py +++ b/py/web_flask.py @@ -1,6 +1,14 @@ +#!venv/bin/python + from flask import Flask, request, render_template from api import API from voice_recognition import Voice +import sys +import threading +from PyQt5.QtCore import * +from PyQt5.QtWebEngineWidgets import * +from PyQt5.QtWidgets import * + APP = Flask(__name__) api = API() @@ -49,6 +57,28 @@ def contact(): if __name__ == '__main__': - APP.run(debug=True) + qapp = QApplication(sys.argv) -# This is a comment --> test if this creates a merge conflict \ No newline at end of file + view = QWebEngineView() + + view.setGeometry(100, 100, 1280, 720) + view.setWindowTitle("InterstellarAI") + + view.setUrl(QUrl("http://localhost:5000")) + + view.show() + + def run_flask(): + APP.run() + + def stop_flask(): + thread.join() + qapp.quit() + + thread = threading.Thread(target=run_flask) + thread.daemon = True + thread.start() + + qapp.aboutToQuit.connect(stop_flask) + + sys.exit(qapp.exec_())