diff --git a/run.py b/run.py index 66aa70c..e08983d 100644 --- a/run.py +++ b/run.py @@ -1,12 +1,11 @@ import logging -import pathlib - -# import lingua_franca from src import api from src.database import db_utils from src.utils import faster_whisper_utils +# import lingua_franca + if __name__ == '__main__': logging.getLogger().setLevel(logging.INFO) @@ -28,4 +27,4 @@ if __name__ == '__main__': faster_whisper_utils.load_model() # Start the api endpoint - api.start_api() + api.start_api(6000) diff --git a/src/api.py b/src/api.py index 119a68a..32277c2 100644 --- a/src/api.py +++ b/src/api.py @@ -4,10 +4,10 @@ import logging import openai import sys import tempfile +from threading import Lock from flask import Flask, request from flask_socketio import SocketIO, emit, join_room, leave_room, \ rooms -from threading import Lock from src.utils import chat_utils, chatgpt_utils, faster_whisper_utils @@ -116,6 +116,6 @@ def process_text(): return {"transcription": text, "answer": answer}""" -def start_api(): - logging.info("Starting Jarvis Server API...") - socketio.run(app, host='0.0.0.0', port=6000) +def start_api(port=6000): + logging.info("Starting Jarvis Server API on port " + str(port) + "...") + socketio.run(app, host='0.0.0.0', port=port) diff --git a/src/resources/gpt_prompt_v4.txt b/src/resources/gpt_prompt_v4.txt new file mode 100644 index 0000000..ae50ab5 --- /dev/null +++ b/src/resources/gpt_prompt_v4.txt @@ -0,0 +1,36 @@ +# You a sarcastic, introverted smart (home assistant) bot, dry humor, it shall not interfere with the user question. +# Simplify complex sentences, add short responses, return only JSON in one of the following forms: + +# Any kind of action (turn on/off, set temperature, etc.) +# Specify the generic homeassistant service to call homeassistant.toggle or homeassistant.turn_on or homeassistant.turn_off +{ + "type": "action", + "simplified": "", + "response": "", + "entity": "", + "room": "", + "homeassistant_service": "" +} + +# General questions, not smart home related +{ + "type": "answer_question", + "simplified": "", + "response": "" +} + +# Smart home questions +# For questions related to the smart home and that need further information (e.g., the temperature in the living room), you shall provide a replaceable variable in the response (e.g., the temperature in the living room is degrees) and for the entites to query, you shall provide a list of entitie(s) to query (e.g., ["temperature living room"]). +{ + "type": "smart_home_question", + "simplified": "", + "response": "", + "entites_to_query": [], + "room": "" +} + +# Other +{ + "type": "other", + "response": "" +} diff --git a/src/utils/chatgpt_utils.py b/src/utils/chatgpt_utils.py index 90b30c3..b836974 100644 --- a/src/utils/chatgpt_utils.py +++ b/src/utils/chatgpt_utils.py @@ -78,7 +78,7 @@ def chatgpt_recognise(text, uuid): # If the response is not a JSON, it's probably a plaintext response logging.error("Error while parsing ChatGPT response, probably not JSON: " + str(response.choices)) - return {"simplified_sentence": "Error", "response": "An error has occured or I don't understand."} + return {"simplified_sentence": "Error", "response": "I am having trouble understanding you. Please try again."} def parse_gpt_json(input_string): diff --git a/src/utils/faster_whisper_utils.py b/src/utils/faster_whisper_utils.py index 0f67ac0..9312742 100644 --- a/src/utils/faster_whisper_utils.py +++ b/src/utils/faster_whisper_utils.py @@ -3,10 +3,11 @@ import logging from faster_whisper import WhisperModel -def load_model(): +def load_model(model_size='small', device="cpu", cpu_threads=8, compute_type="int8"): log_level = logging.getLogger().level global model - model = WhisperModel('small', device="cpu", cpu_threads=8, compute_type="int8") + model = WhisperModel(model_size_or_path=model_size, device=device, cpu_threads=cpu_threads, + compute_type=compute_type) logging.getLogger().setLevel(log_level)