Added a header to the project #28
					 3 changed files with 11 additions and 0 deletions
				
			
		|  | @ -4,6 +4,7 @@ from transformers import AutoTokenizer, LlamaForCausalLM | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| class API: | class API: | ||||||
|  |     # This method processes a message via transformers. (NOT FINISHED!) | ||||||
|     @staticmethod |     @staticmethod | ||||||
|     def process_text_transformers(prompt, model): |     def process_text_transformers(prompt, model): | ||||||
|         model = LlamaForCausalLM.from_pretrained(model) |         model = LlamaForCausalLM.from_pretrained(model) | ||||||
|  | @ -14,6 +15,7 @@ class API: | ||||||
|         generate_ids = model.generate(inputs.input_ids, max_length=30) |         generate_ids = model.generate(inputs.input_ids, max_length=30) | ||||||
|         return tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] |         return tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] | ||||||
| 
 | 
 | ||||||
|  |     # This method processes a message via ollama | ||||||
|     @staticmethod |     @staticmethod | ||||||
|     def process_text_local(prompt, model): |     def process_text_local(prompt, model): | ||||||
|         ollama_url = "http://localhost:11434" |         ollama_url = "http://localhost:11434" | ||||||
|  | @ -34,6 +36,7 @@ class API: | ||||||
|         else: |         else: | ||||||
|             return "Error: " + response.text |             return "Error: " + response.text | ||||||
| 
 | 
 | ||||||
|  |     # This method sends a message to a certain AI. | ||||||
|     def send_message(self, message, model): |     def send_message(self, message, model): | ||||||
|         if model == 1: |         if model == 1: | ||||||
|             answer = self.process_text_local(message, "phi3.5") |             answer = self.process_text_local(message, "phi3.5") | ||||||
|  |  | ||||||
|  | @ -5,3 +5,6 @@ source venv/bin/activate | ||||||
| pip install transformers | pip install transformers | ||||||
| pip install torch | pip install torch | ||||||
| pip install flask | pip install flask | ||||||
|  | pip install SpeechRecognition | ||||||
|  | pip install pyaudio | ||||||
|  | pip install pocketsphinx | ||||||
|  |  | ||||||
|  | @ -1,11 +1,15 @@ | ||||||
| from flask import Flask, request, render_template | from flask import Flask, request, render_template | ||||||
| from api import API | from api import API | ||||||
|  | from voice_recognition import Voice | ||||||
| 
 | 
 | ||||||
| APP = Flask(__name__) | APP = Flask(__name__) | ||||||
| api = API() | api = API() | ||||||
|  | voice = Voice() | ||||||
|  | 
 | ||||||
| messages = [] | messages = [] | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | # The following method shows the user the GUI and does the backend connections to the API. | ||||||
| @APP.route('/', methods=['GET', 'POST']) | @APP.route('/', methods=['GET', 'POST']) | ||||||
| def index(): | def index(): | ||||||
|     global messages |     global messages | ||||||
|  | @ -16,6 +20,7 @@ def index(): | ||||||
|         user_message = request.form['user_message'] |         user_message = request.form['user_message'] | ||||||
| 
 | 
 | ||||||
|         if option == "voice": |         if option == "voice": | ||||||
|  |             user_message = voice.listen() | ||||||
|             messages.append(f"User: {user_message}") |             messages.append(f"User: {user_message}") | ||||||
|         elif option == "chat": |         elif option == "chat": | ||||||
|             messages.append(f"User: {user_message}") |             messages.append(f"User: {user_message}") | ||||||
|  |  | ||||||
		Reference in a new issue