forked from React-Group/interstellar_ai
51 lines
1.6 KiB
Python
51 lines
1.6 KiB
Python
from flask import Flask, request, jsonify
|
|
import ollama
|
|
|
|
|
|
class AI:
|
|
@staticmethod
|
|
def process_local(model, message, system, return_class, access_token):
|
|
stream = ollama.chat(
|
|
model=model,
|
|
messages=[{'role': 'user', 'content': message}, {'role': 'system', 'content': system}],
|
|
stream=True,
|
|
)
|
|
|
|
for chunk in stream:
|
|
print(chunk['message']['content'])
|
|
return_class.ai_response[access_token] += chunk['message']['content']
|
|
|
|
|
|
class API:
|
|
def __init__(self):
|
|
self.app = Flask(__name__)
|
|
self.ai_response = []
|
|
self.ai = AI()
|
|
|
|
def run(self):
|
|
@self.app.route('/interstellar/api/ai_create', methods=['GET'])
|
|
def create_ai():
|
|
self.ai_response.append("")
|
|
return jsonify({'status': 200, 'access_token': len(self.ai_response) - 1})
|
|
|
|
@self.app.route('/interstellar/api/ai_send', methods=['POST'])
|
|
def send_ai():
|
|
data = request.get_json()
|
|
message = data.get('message')
|
|
ai_model = data.get('ai_model')
|
|
system_prompt = data.get('system_prompt')
|
|
access_token = data.get('access_token')
|
|
self.ai.process_local(ai_model, message, system_prompt, self, access_token)
|
|
return jsonify({'status': 200})
|
|
|
|
@self.app.route('/interstellar/api/ai_get', methods=['GET'])
|
|
def get_ai():
|
|
data = request.args.get('access_token')
|
|
return jsonify({'status': 200, 'response': self.ai_response[int(data)]})
|
|
|
|
self.app.run(debug=True)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
api = API()
|
|
api.run()
|