From 6fca1ec80e03342bde064791d3e2562f27c40c73 Mon Sep 17 00:00:00 2001 From: Mathieu B Date: Sun, 26 Mar 2023 13:26:43 +0200 Subject: [PATCH] add history fix and calculate --- Dockerfile | 2 +- jarvis/api.py | 6 +-- jarvis/utils/chatgpt_prompt_2_smaller.txt | 17 ++++--- jarvis/utils/chatgpt_utils.py | 61 +++++++++++++++++++---- jarvis/start.py => start.py | 4 +- 5 files changed, 67 insertions(+), 23 deletions(-) rename jarvis/start.py => start.py (92%) diff --git a/Dockerfile b/Dockerfile index e426197..f2e3a67 100644 --- a/Dockerfile +++ b/Dockerfile @@ -18,4 +18,4 @@ RUN python3 -m pip install --no-cache-dir -r requirements.txt EXPOSE 5000 -ENTRYPOINT [ "python3", "/jarvis/start.py"] \ No newline at end of file +ENTRYPOINT [ "python3", "/start.py"] \ No newline at end of file diff --git a/jarvis/api.py b/jarvis/api.py index a0b92d6..227a83c 100644 --- a/jarvis/api.py +++ b/jarvis/api.py @@ -44,9 +44,9 @@ def process_message(message): # intent_manager.recognise(message['data'], message['uuid']) if message['data'] != "": - # response = chatgpt_recognise(message['data'], message['uuid']) - # text_response = chatgpt_utils.get_answer_from_response(response) - text_response = "Tokens are expensive ya know?" + response = chatgpt_utils.chatgpt_recognise(message['data'], message['uuid']) + text_response = chatgpt_utils.get_answer_from_response(response) + # response = "Tokens are expensive ya know?" chat_utils.send_jarvis_message_to_room(text_response, message['uuid']) diff --git a/jarvis/utils/chatgpt_prompt_2_smaller.txt b/jarvis/utils/chatgpt_prompt_2_smaller.txt index da81241..c689aae 100644 --- a/jarvis/utils/chatgpt_prompt_2_smaller.txt +++ b/jarvis/utils/chatgpt_prompt_2_smaller.txt @@ -2,16 +2,17 @@ Respond to smart home requests in JSON format with HomeAssistant API terminology Requests groups: - command: change the state of an accessory (properties : location, device_class, device_description, value, comment, scheduleTimeStamp) - - query: only for retrieving a smart device state (properties : location, device_class, device_description, property) - - answer: for any questions (properties : answer) - - clarify: when you don't understand, ask for details (properties : question) + - query: only for retrieving a smart device state never for general questions (properties : location, device_class, device_description, property) + - answer: for questions (properties : answer) + - clarify: when you really don't understand, ask for details but try to guess what the user means (properties : question) + - calculate: for math questions, never give the answer (properties : calculation) NEVER add other properties -Response: +Response parameters: action: groups just mentioned location: room name, unknown if not obvious value: wanted state, song name, artist, temperature, etc. -device_class: homeassistant device class +device_class: switch, light, button, service device_description: information to identify the device later, include room and others identifiers For queries property "property" should be "state" @@ -20,5 +21,7 @@ For commands property "scheduleTimeStamp" is for scheduling a command in the fut The house located at {{location}} and current time is {{timestamp}}. -If questions about you, you are funny smart home AI like Jarvis from Iron Man, be nice and helpful with all topics. -Very important for you to only respond with a single valid JSON response and encapsulate every JSON property with double quotes "". Don't add anything and never excuse yourself. Respond to only one request at a time. \ No newline at end of file +{{history}} + +You reluctantly answers questions with sarcastic responses. +Always respond with a single valid JSON response, never excuse yourself. \ No newline at end of file diff --git a/jarvis/utils/chatgpt_utils.py b/jarvis/utils/chatgpt_utils.py index a8e48b1..eac1ada 100644 --- a/jarvis/utils/chatgpt_utils.py +++ b/jarvis/utils/chatgpt_utils.py @@ -8,34 +8,45 @@ chat_messages = {} def chatgpt_recognise(text, uuid): - if len(chat_messages) == 0: - chatgpt_init(uuid) + # If the chat history is empty, create it + if uuid not in chat_messages: + chat_messages[uuid] = [] + # Add the user message to the chat history chat_messages[uuid].append({"role": "user", "content": text}) # Call ChatGPT API start_time = time.time() response = openai.ChatCompletion.create( model="gpt-3.5-turbo", - messages=chat_messages[uuid], + messages=get_conversation_as_one_message(uuid), ) end_time = time.time() - logging.info("GPT-3 response in " + str(end_time - start_time) + " seconds") + logging.info("GPT-3 response in " + str(round(end_time - start_time, ndigits=2)) + " seconds") - # Check if the response is a "valid" JSON try: + # Parse the response response = json.loads(str(response.choices[0].message.content)) + + # Check if the response looks like a "valid" JSON if 'action' in response: + # Add the response to the chat history chat_messages[uuid].append({"role": "assistant", "content": get_answer_from_response(response)}) + + # Return the response return response except Exception as e: + # If the response is not a JSON, it's probably a plaintext response logging.error("Error while parsing ChatGPT response, probably not JSON: " + str(response.choices)) logging.error(str(e)) + + # Try to parse the response as a plaintext response, if unsuccessful, return a "I don't understand" response return {"action": "answer", "answer": get_answer_from_response(response)} def clear_chat(uuid): + """Clear the chat history for a given uuid""" logging.info("Cleared chat for uuid " + uuid) chat_messages[uuid] = [] @@ -54,15 +65,45 @@ def get_answer_from_response(response): return response['device_description'] elif response['action'] == 'answer': return response['answer'] + elif response['action'] == 'calculate': + return response['calculation'] else: return "I don't know how to respond to that..." -def chatgpt_init(uuid): +def get_conversation_as_one_message(uuid): + """ + Prepare the messages to send to OpenAI API + We don't use OpenAI way of using an array of messages because it doesn't work well with the prompt and chatgpt + breaks out of character. + So instead, we add the history to the system prompt and create a new conversation each time. + It should not cost more tokens, but I am not sure at 100%. + """ + + # Load the prompt prompt = open("utils/chatgpt_prompt_2_smaller.txt", "r").read() - prompt.replace("{{timestamp}}", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) - prompt.replace("{{location}}", "Lausanne in the canton Vaud of Switzerland") + # Replace the variables in the prompt + prompt = prompt.replace("{{timestamp}}", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) + prompt = prompt.replace("{{location}}", "Lausanne in the canton Vaud of Switzerland") - chat_messages[uuid] = [] - chat_messages[uuid].append({"role": "system", "content": prompt}) + # Check if the user has already asked a question (to keep context) + if len(chat_messages[uuid]) > 1: + history = "" + + # Add the last 4 messages from the user to the prompt + # we don't give the whole history because it adds a lot of tokens. + history += "\nFor context, last discussion you had with user:\n" + for message in chat_messages[uuid][-4:]: + if message['role'] == "user": + history += "U: " + message['content'] + "\n" + elif message['role'] == "assistant": + history += "Y: " + message['content'] + "\n" + + # Replace the {{history}} variable in the prompt with the history + prompt = prompt.replace("{{history}}", history) + else: + # If the user hasn't asked a question yet, remove the history part of the prompt + prompt = prompt.replace("{{history}}", "") + + return [{"role": "system", "content": prompt}] diff --git a/jarvis/start.py b/start.py similarity index 92% rename from jarvis/start.py rename to start.py index 98f73c0..9ced225 100644 --- a/jarvis/start.py +++ b/start.py @@ -2,7 +2,7 @@ import logging import lingua_franca -import api +import jarvis.api from jarvis.skills.cocktails import CocktailSkill from jarvis.skills.intent_services import intent_manager from jarvis.utils import whisper_utils @@ -23,4 +23,4 @@ if __name__ == '__main__': whisper_utils.load_model() # Start the api endpoint - api.start_api() + jarvis.api.start_api()