new gpt prompt usage

This commit is contained in:
Mathieu Broillet 2023-05-31 18:53:37 +02:00
parent 22bb680dca
commit 173f79e684

View File

@ -1,16 +1,22 @@
import json
import logging
import openai
import time
import openai
from jarvis.db import db_utils
chat_messages = {}
def setup_messages(uuid):
prompt = open("jarvis/utils/gpt_prompt_v3.txt", "r").read()
chat_messages[uuid] = [{"role": "system", "content": prompt}]
def chatgpt_recognise(text, uuid):
# If the chat history is empty, create it
if uuid not in chat_messages:
chat_messages[uuid] = []
setup_messages(uuid)
# Add the user message to the chat history
chat_messages[uuid].append({"role": "user", "content": text})
@ -19,30 +25,36 @@ def chatgpt_recognise(text, uuid):
start_time = time.time()
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=get_conversation_as_one_message(uuid),
messages=chat_messages.get(uuid),
)
end_time = time.time()
logging.info("GPT-3 response in " + str(round(end_time - start_time, ndigits=2)) + " seconds")
try:
# Parse the response
response = json.loads(str(response.choices[0].message.content))
query_json = json.loads(str(response.choices[0].message.content))
# Check if the response looks like a "valid" JSON
if 'action' in response:
if 'simplified_sentence' in query_json and 'response' in query_json:
response = query_json['response']
simplified_sentence = query_json['simplified_sentence']
# Add the response to the chat history
chat_messages[uuid].append({"role": "assistant", "content": get_answer_from_response(response)})
chat_messages[uuid].append({"role": "assistant", "content": response})
# Add to local database
db_utils.add_query(text, simplified_sentence, response)
# Return the response
return response
return query_json
except Exception as e:
# If the response is not a JSON, it's probably a plaintext response
logging.error("Error while parsing ChatGPT response, probably not JSON: " + str(response.choices))
logging.error(str(e))
# Try to parse the response as a plaintext response, if unsuccessful, return a "I don't understand" response
return {"action": "answer", "answer": get_answer_from_response(response)}
# TODO: repeat query if it's not a JSON response
return None
def clear_chat(uuid):
@ -50,60 +62,59 @@ def clear_chat(uuid):
logging.info("Cleared chat for uuid " + uuid)
chat_messages[uuid] = []
def get_answer_from_response(response):
if 'action' not in response:
# Fix for when it responds in plaintext to follow-up questions
# In that case the response is an OpenAIObject not a JSON
return response.choices[0].message.content
else:
if response['action'] == 'clarify':
return response['question']
elif response['action'] == 'command':
return response['comment']
elif response['action'] == 'query':
return response['device_description']
elif response['action'] == 'answer':
return response['answer']
elif response['action'] == 'calculate':
return response['calculation']
else:
return "I don't know how to respond to that..."
# def get_answer_from_response(response):
# if 'action' not in response:
# # Fix for when it responds in plaintext to follow-up questions
# # In that case the response is an OpenAIObject not a JSON
# return response.choices[0].message.content
# else:
# if response['action'] == 'clarify':
# return response['question']
# elif response['action'] == 'command':
# return response['comment']
# elif response['action'] == 'query':
# return response['device_description']
# elif response['action'] == 'answer':
# return response['answer']
# elif response['action'] == 'calculate':
# return response['calculation']
# else:
# return "I don't know how to respond to that..."
def get_conversation_as_one_message(uuid):
"""
Prepare the messages to send to OpenAI API
We don't use OpenAI way of using an array of messages because it doesn't work well with the prompt and chatgpt
breaks out of character.
So instead, we add the history to the system prompt and create a new conversation each time.
It should not cost more tokens, but I am not sure at 100%.
"""
# Load the prompt
prompt = open("utils/chatgpt_prompt_2_smaller.txt", "r").read()
# Replace the variables in the prompt
prompt = prompt.replace("{{timestamp}}", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
prompt = prompt.replace("{{location}}", "Lausanne in the canton Vaud of Switzerland")
# Check if the user has already asked a question (to keep context)
if len(chat_messages[uuid]) > 1:
history = ""
# Add the last 4 messages from the user to the prompt
# we don't give the whole history because it adds a lot of tokens.
history += "\nFor context, last discussion you had with user:\n"
for message in chat_messages[uuid][-4:]:
if message['role'] == "user":
history += "U: " + message['content'] + "\n"
elif message['role'] == "assistant":
history += "Y: " + message['content'] + "\n"
# Replace the {{history}} variable in the prompt with the history
prompt = prompt.replace("{{history}}", history)
else:
# If the user hasn't asked a question yet, remove the history part of the prompt
prompt = prompt.replace("{{history}}", "")
return [{"role": "system", "content": prompt}]
# def get_conversation_as_one_message(uuid):
# """
# Prepare the messages to send to OpenAI API
# We don't use OpenAI way of using an array of messages because it doesn't work well with the prompt and chatgpt
# breaks out of character.
# So instead, we add the history to the system prompt and create a new conversation each time.
# It should not cost more tokens, but I am not sure at 100%.
# """
#
# # Load the prompt
# prompt = open("/home/mathieu/PycharmProjects/jarvis-server-v2/jarvis/utils/chatgpt_prompt_2_smaller.txt", "r").read()
#
# # Replace the variables in the prompt
# prompt = prompt.replace("{{timestamp}}", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
# prompt = prompt.replace("{{location}}", "Lausanne in the canton Vaud of Switzerland")
#
# # Check if the user has already asked a question (to keep context)
# if len(chat_messages[uuid]) > 1:
# history = ""
#
# # Add the last 4 messages from the user to the prompt
# # we don't give the whole history because it adds a lot of tokens.
# history += "\nFor context, last discussion you had with user:\n"
# for message in chat_messages[uuid][-4:]:
# if message['role'] == "user":
# history += "U: " + message['content'] + "\n"
# elif message['role'] == "assistant":
# history += "Y: " + message['content'] + "\n"
#
# # Replace the {{history}} variable in the prompt with the history
# prompt = prompt.replace("{{history}}", history)
# else:
# # If the user hasn't asked a question yet, remove the history part of the prompt
# prompt = prompt.replace("{{history}}", "")
#
# return [{"role": "system", "content": prompt}]