From 2c7aa6bc3ca0d5f761bbf996391f035c6c8bed25 Mon Sep 17 00:00:00 2001 From: YasinOnm08 Date: Mon, 16 Sep 2024 11:21:09 +0200 Subject: [PATCH 1/4] input/output --- py/simple_chat.py | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 py/simple_chat.py diff --git a/py/simple_chat.py b/py/simple_chat.py new file mode 100644 index 0000000..53203d1 --- /dev/null +++ b/py/simple_chat.py @@ -0,0 +1,10 @@ +from api import api + +chat1 = api() + +while True: + print("Gib deine Frage ein") + input_text = input() + + output_text = api.send_message(chat1, input_text) + print(output_text) \ No newline at end of file -- 2.39.5 From 480660b5efdb92a4f6ff52ed1b735305a9f8a311 Mon Sep 17 00:00:00 2001 From: YasinOnm08 Date: Mon, 16 Sep 2024 11:59:15 +0200 Subject: [PATCH 2/4] model change possible --- py/simple_chat.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/py/simple_chat.py b/py/simple_chat.py index 53203d1..3bb0c16 100644 --- a/py/simple_chat.py +++ b/py/simple_chat.py @@ -1,10 +1,20 @@ -from api import api +from api import API -chat1 = api() +chat1 = API() while True: - print("Gib deine Frage ein") - input_text = input() - - output_text = api.send_message(chat1, input_text) - print(output_text) \ No newline at end of file + print("") + print("Which AI Model do you want to use? Write as a Number (1-5)") + model_input = input() + model = int(model_input) + if model <=0 or model > 5: + print("ungültiges Modell") + continue + while True: + print("") + print("Ask a question") + inputText = input () + if inputText == "change": + break + outputText = chat1.send_message(inputText, model) + print(outputText) \ No newline at end of file -- 2.39.5 From d7544c082f1738925c9ba367f05e98c9e4e486ce Mon Sep 17 00:00:00 2001 From: Patrick_Pluto Date: Mon, 16 Sep 2024 12:07:15 +0200 Subject: [PATCH 3/4] Last Changes before break. --- py/venv.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/py/venv.sh b/py/venv.sh index 4a3be2f..ed72fee 100644 --- a/py/venv.sh +++ b/py/venv.sh @@ -1,5 +1,7 @@ #!/bin/bash +rm -rf venv/ virtualenv venv source venv/bin/activate pip install transformers +pip install torch -- 2.39.5 From 1e639383c2a5bbad15c0eeb1fe36886d88737c1e Mon Sep 17 00:00:00 2001 From: Patrick_Pluto Date: Mon, 16 Sep 2024 12:17:06 +0200 Subject: [PATCH 4/4] Fixed multiline responses. --- .gitignore | 1 + py/api.py | 5 ++--- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index f7275bb..c65a729 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ venv/ +__pycache__ diff --git a/py/api.py b/py/api.py index 9e6b40d..8ed3afb 100644 --- a/py/api.py +++ b/py/api.py @@ -29,8 +29,7 @@ class API: response_data.append(line_raw["response"]) final_response = "".join(response_data) - generated_text = final_response.splitlines()[-1] - return generated_text + return final_response else: return "Error: " + response.text @@ -47,4 +46,4 @@ class API: answer = self.process_text_transformers(message, "meta-llama/Meta-Llama-3.1-8B") else: return "Invalid choice" - return answer + return answer \ No newline at end of file -- 2.39.5