2022-11-28 21:56:08 +01:00
|
|
|
import json
|
|
|
|
import tempfile
|
|
|
|
|
|
|
|
import requests
|
|
|
|
from flask import request, Flask
|
|
|
|
|
2022-12-01 16:06:23 +01:00
|
|
|
from jarvis.skills import intent_manager
|
|
|
|
|
2022-11-28 21:56:08 +01:00
|
|
|
app = Flask(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
# .WAV (i.e.) FILE REQUEST
|
2022-12-01 21:30:09 +01:00
|
|
|
@app.route("/process_voice", methods=['POST'])
|
2022-11-28 21:56:08 +01:00
|
|
|
def process_audio_request_android():
|
|
|
|
print("[" + request.remote_addr + "] - New STT request")
|
|
|
|
|
|
|
|
audio_temp_file = tempfile.NamedTemporaryFile(prefix='jarvis-audio_', suffix='_client')
|
|
|
|
audio_temp_file.write(request.data)
|
|
|
|
print(audio_temp_file.name)
|
|
|
|
|
2022-12-01 16:06:23 +01:00
|
|
|
text = text_recognition_whisperasr(audio_temp_file.name)
|
|
|
|
|
|
|
|
# TODO: send to each skill to answer the questions
|
|
|
|
|
2022-12-01 21:30:09 +01:00
|
|
|
return {"transcription": text, "status": 200}
|
2022-12-01 16:06:23 +01:00
|
|
|
|
|
|
|
|
|
|
|
@app.route("/process_text", methods=['POST'])
|
|
|
|
def process_text():
|
|
|
|
print("[" + request.remote_addr + "] - New TXT request")
|
|
|
|
|
|
|
|
text = request.values['text']
|
|
|
|
|
|
|
|
answer = intent_manager.recognise(text, request.headers.get('Client-Ip'), request.headers.get('Client-Port'))
|
|
|
|
|
|
|
|
return {"transcription": text, "answer": answer}
|
2022-11-28 21:56:08 +01:00
|
|
|
|
|
|
|
|
|
|
|
# send request to whisper-asr server (docker)
|
|
|
|
def text_recognition_whisperasr(audio_file):
|
|
|
|
headers = {
|
|
|
|
'accept': 'application/json',
|
|
|
|
# 'Content-Type': 'multipart/form-data',
|
|
|
|
}
|
|
|
|
|
|
|
|
params = {
|
|
|
|
'task': 'transcribe',
|
|
|
|
# TODO: add to config
|
|
|
|
'language': 'fr',
|
|
|
|
'output': 'json',
|
|
|
|
}
|
|
|
|
|
|
|
|
files = {
|
|
|
|
'audio_file': open(audio_file, 'rb'),
|
|
|
|
}
|
|
|
|
|
|
|
|
# TODO: add to config
|
2022-11-28 22:15:09 +01:00
|
|
|
response = requests.post('https://whisper.broillet.ch/asr', params=params, headers=headers, files=files)
|
2022-11-28 21:56:08 +01:00
|
|
|
return json.loads(response.text)['text']
|
|
|
|
|
|
|
|
|
|
|
|
# NOT IMPLEMENTED RIGHT NOW / to use with local whisper cpp (cpu)
|
|
|
|
"""
|
|
|
|
def local_recognition(audio_file, time_of_request):
|
|
|
|
path = os.path.dirname(get_path_file.__file__)
|
|
|
|
|
|
|
|
print("Loading model and recognition")
|
|
|
|
model = path + "/whisper/models/" + "ggml-small.bin"
|
|
|
|
os.system(path + "/whisper/main -l fr -t 8 -m " + model + " -f " + audio_file + " -otxt") # + "> /dev/null 2>&1")
|
|
|
|
|
|
|
|
output = open(audio_file + ".txt").read()
|
|
|
|
|
|
|
|
# time_of_resolution = time.perf_counter()
|
|
|
|
# print(output + f" - {time_of_resolution - time_of_request:0.4f} seconds")
|
|
|
|
|
|
|
|
return jsonify(transcription=output, time=5, answer="WIP...")
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
|
|
def start_server():
|
|
|
|
app.config['JSON_AS_ASCII'] = False
|
|
|
|
# TODO: add to config
|
|
|
|
app.run(port=5000, debug=False, host='0.0.0.0', threaded=True)
|