main #10

Merged
YasinOnm08 merged 5 commits from React-Group/ai-virtual-assistant:main into main 2024-09-16 15:17:01 +02:00
4 changed files with 49 additions and 19 deletions

View file

@ -4,6 +4,7 @@ from transformers import AutoTokenizer, LlamaForCausalLM
class API:
# This method processes a message via transformers. (NOT FINISHED!)
@staticmethod
def process_text_transformers(prompt, model):
model = LlamaForCausalLM.from_pretrained(model)
@ -14,6 +15,7 @@ class API:
generate_ids = model.generate(inputs.input_ids, max_length=30)
return tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
# This method processes a message via ollama
@staticmethod
def process_text_local(prompt, model):
ollama_url = "http://localhost:11434"
@ -34,6 +36,7 @@ class API:
else:
return "Error: " + response.text
# This method sends a message to a certain AI.
def send_message(self, message, model):
if model == 1:
answer = self.process_text_local(message, "phi3.5")

View file

@ -71,19 +71,22 @@
<!-- Output section: Simulating a conversation with AI -->
<div class="output">
<div class="conversation">
<div class="user-message">User: What is the weather today?</div>
<div class="ai-message">AI: It's sunny with a slight breeze.</div>
<div class="user-message">User: Great! Thank you!</div>
<div class="ai-message">AI: You're welcome!</div>
{% for message in messages %}
{% if message.startswith('User:') %}
<div class="user-message">{{ message }}</div>
{% else %}
<div class="ai-message">{{ message }}</div>
{% endif %}
{% endfor %}
</div>
</div>
<!-- Input section: Where user input is provided -->
<div class="input">
<input type="text" placeholder="Type your message here..." />
<button><img src="/static/img/microphone.svg" alt="microphone"></button>
<button>Send</button>
</div>
<form class="input" method="POST" action="">
<input type="text" name="user_message" placeholder="Type your message here..." />
<button type="submit" name="option" value="voice"><img src="/static/img/microphone.svg" alt="microphone"></button>
<button type="submit" name="option" value="chat">Send</button>
</form>
</div>
</body>

View file

@ -5,3 +5,6 @@ source venv/bin/activate
pip install transformers
pip install torch
pip install flask
pip install SpeechRecognition
pip install pyaudio
pip install pocketsphinx

View file

@ -1,14 +1,35 @@
from flask import Flask, send_from_directory
from flask import Flask, request, render_template
from api import API
from voice_recognition import Voice
APP = Flask(__name__)
api = API()
voice = Voice()
messages = []
class WebHost:
@staticmethod
def main_page():
app = Flask(__name__)
# The following method shows the user the GUI and does the backend connections to the API.
@APP.route('/', methods=['GET', 'POST'])
def index():
global messages
@app.route('/')
def index():
return app.send_static_file('index.html')
if request.method == 'POST':
option = request.form['option']
if __name__ == '__main__':
app.run(debug=True)
user_message = request.form['user_message']
if option == "voice":
user_message = voice.listen()
messages.append(f"User: {user_message}")
elif option == "chat":
messages.append(f"User: {user_message}")
ai_response = "AI: " + api.send_message(user_message, 1)
messages.append(ai_response)
return render_template('index.html', messages=messages)
if __name__ == '__main__':
APP.run(debug=True)