main #6

Merged
Patrick_Pluto merged 9 commits from React-Group/ai-virtual-assistant:main into main 2024-09-16 12:19:35 +02:00
9 changed files with 91 additions and 4 deletions
Showing only changes of commit 564c50184a - Show all commits

1
.gitignore vendored Normal file
View file

@ -0,0 +1 @@
venv/

3
py/.idea/.gitignore vendored Normal file
View file

@ -0,0 +1,3 @@
# Default ignored files
/shelf/
/workspace.xml

View file

@ -0,0 +1,6 @@
<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>

7
py/.idea/misc.xml Normal file
View file

@ -0,0 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="Black">
<option name="sdkName" value="Python 3.12" />
</component>
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.12 (py)" project-jdk-type="Python SDK" />
</project>

8
py/.idea/modules.xml Normal file
View file

@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/py.iml" filepath="$PROJECT_DIR$/.idea/py.iml" />
</modules>
</component>
</project>

8
py/.idea/py.iml Normal file
View file

@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="jdk" jdkName="Python 3.12 (py)" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>

6
py/.idea/vcs.xml Normal file
View file

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$/.." vcs="Git" />
</component>
</project>

View file

@ -1,7 +1,50 @@
import requests import requests
import json
from transformers import AutoTokenizer, LlamaForCausalLM
class api: class API:
@staticmethod
def process_text_transformers(prompt, model):
model = LlamaForCausalLM.from_pretrained(model)
tokenizer = AutoTokenizer.from_pretrained(model)
def send_message(message): inputs = tokenizer(prompt, return_tensors="pt")
answer = "Test"
generate_ids = model.generate(inputs.input_ids, max_length=30)
return tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
@staticmethod
def process_text_local(prompt, model):
ollama_url = "http://localhost:11434"
response = requests.post(
f"{ollama_url}/api/generate", json={"model": model, "prompt": prompt}
)
if response.status_code == 200:
response_data = []
for line in response.iter_lines():
line_decoded = line.decode("utf-8")
line_raw = json.loads(line_decoded)
response_data.append(line_raw["response"])
final_response = "".join(response_data)
generated_text = final_response.splitlines()[-1]
return generated_text
else:
return "Error: " + response.text
def send_message(self, message, model):
if model == 1:
answer = self.process_text_local(message, "phi3.5")
elif model == 2:
answer = self.process_text_local(message, "gemma2:2b")
elif model == 3:
answer = self.process_text_local(message, "qwen2:0.5b")
elif model == 4:
answer = self.process_text_local(message, "codegemma:2b")
elif model == 5:
answer = self.process_text_transformers(message, "meta-llama/Meta-Llama-3.1-8B")
else:
return "Invalid choice"
return answer return answer

5
py/venv.sh Normal file
View file

@ -0,0 +1,5 @@
#!/bin/bash
virtualenv venv
source venv/bin/activate
pip install transformers