new gpt prompt usage
This commit is contained in:
parent
22bb680dca
commit
173f79e684
@ -1,16 +1,22 @@
|
|||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
import openai
|
||||||
import time
|
import time
|
||||||
|
|
||||||
import openai
|
from jarvis.db import db_utils
|
||||||
|
|
||||||
chat_messages = {}
|
chat_messages = {}
|
||||||
|
|
||||||
|
|
||||||
|
def setup_messages(uuid):
|
||||||
|
prompt = open("jarvis/utils/gpt_prompt_v3.txt", "r").read()
|
||||||
|
chat_messages[uuid] = [{"role": "system", "content": prompt}]
|
||||||
|
|
||||||
|
|
||||||
def chatgpt_recognise(text, uuid):
|
def chatgpt_recognise(text, uuid):
|
||||||
# If the chat history is empty, create it
|
# If the chat history is empty, create it
|
||||||
if uuid not in chat_messages:
|
if uuid not in chat_messages:
|
||||||
chat_messages[uuid] = []
|
setup_messages(uuid)
|
||||||
|
|
||||||
# Add the user message to the chat history
|
# Add the user message to the chat history
|
||||||
chat_messages[uuid].append({"role": "user", "content": text})
|
chat_messages[uuid].append({"role": "user", "content": text})
|
||||||
@ -19,30 +25,36 @@ def chatgpt_recognise(text, uuid):
|
|||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
response = openai.ChatCompletion.create(
|
response = openai.ChatCompletion.create(
|
||||||
model="gpt-3.5-turbo",
|
model="gpt-3.5-turbo",
|
||||||
messages=get_conversation_as_one_message(uuid),
|
messages=chat_messages.get(uuid),
|
||||||
)
|
)
|
||||||
end_time = time.time()
|
end_time = time.time()
|
||||||
logging.info("GPT-3 response in " + str(round(end_time - start_time, ndigits=2)) + " seconds")
|
logging.info("GPT-3 response in " + str(round(end_time - start_time, ndigits=2)) + " seconds")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Parse the response
|
# Parse the response
|
||||||
response = json.loads(str(response.choices[0].message.content))
|
query_json = json.loads(str(response.choices[0].message.content))
|
||||||
|
|
||||||
# Check if the response looks like a "valid" JSON
|
# Check if the response looks like a "valid" JSON
|
||||||
if 'action' in response:
|
if 'simplified_sentence' in query_json and 'response' in query_json:
|
||||||
|
response = query_json['response']
|
||||||
|
simplified_sentence = query_json['simplified_sentence']
|
||||||
|
|
||||||
# Add the response to the chat history
|
# Add the response to the chat history
|
||||||
chat_messages[uuid].append({"role": "assistant", "content": get_answer_from_response(response)})
|
chat_messages[uuid].append({"role": "assistant", "content": response})
|
||||||
|
|
||||||
|
# Add to local database
|
||||||
|
db_utils.add_query(text, simplified_sentence, response)
|
||||||
|
|
||||||
# Return the response
|
# Return the response
|
||||||
return response
|
return query_json
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# If the response is not a JSON, it's probably a plaintext response
|
# If the response is not a JSON, it's probably a plaintext response
|
||||||
logging.error("Error while parsing ChatGPT response, probably not JSON: " + str(response.choices))
|
logging.error("Error while parsing ChatGPT response, probably not JSON: " + str(response.choices))
|
||||||
logging.error(str(e))
|
logging.error(str(e))
|
||||||
|
|
||||||
# Try to parse the response as a plaintext response, if unsuccessful, return a "I don't understand" response
|
# TODO: repeat query if it's not a JSON response
|
||||||
return {"action": "answer", "answer": get_answer_from_response(response)}
|
return None
|
||||||
|
|
||||||
|
|
||||||
def clear_chat(uuid):
|
def clear_chat(uuid):
|
||||||
@ -50,60 +62,59 @@ def clear_chat(uuid):
|
|||||||
logging.info("Cleared chat for uuid " + uuid)
|
logging.info("Cleared chat for uuid " + uuid)
|
||||||
chat_messages[uuid] = []
|
chat_messages[uuid] = []
|
||||||
|
|
||||||
|
# def get_answer_from_response(response):
|
||||||
def get_answer_from_response(response):
|
# if 'action' not in response:
|
||||||
if 'action' not in response:
|
# # Fix for when it responds in plaintext to follow-up questions
|
||||||
# Fix for when it responds in plaintext to follow-up questions
|
# # In that case the response is an OpenAIObject not a JSON
|
||||||
# In that case the response is an OpenAIObject not a JSON
|
# return response.choices[0].message.content
|
||||||
return response.choices[0].message.content
|
# else:
|
||||||
else:
|
# if response['action'] == 'clarify':
|
||||||
if response['action'] == 'clarify':
|
# return response['question']
|
||||||
return response['question']
|
# elif response['action'] == 'command':
|
||||||
elif response['action'] == 'command':
|
# return response['comment']
|
||||||
return response['comment']
|
# elif response['action'] == 'query':
|
||||||
elif response['action'] == 'query':
|
# return response['device_description']
|
||||||
return response['device_description']
|
# elif response['action'] == 'answer':
|
||||||
elif response['action'] == 'answer':
|
# return response['answer']
|
||||||
return response['answer']
|
# elif response['action'] == 'calculate':
|
||||||
elif response['action'] == 'calculate':
|
# return response['calculation']
|
||||||
return response['calculation']
|
# else:
|
||||||
else:
|
# return "I don't know how to respond to that..."
|
||||||
return "I don't know how to respond to that..."
|
|
||||||
|
|
||||||
|
|
||||||
def get_conversation_as_one_message(uuid):
|
# def get_conversation_as_one_message(uuid):
|
||||||
"""
|
# """
|
||||||
Prepare the messages to send to OpenAI API
|
# Prepare the messages to send to OpenAI API
|
||||||
We don't use OpenAI way of using an array of messages because it doesn't work well with the prompt and chatgpt
|
# We don't use OpenAI way of using an array of messages because it doesn't work well with the prompt and chatgpt
|
||||||
breaks out of character.
|
# breaks out of character.
|
||||||
So instead, we add the history to the system prompt and create a new conversation each time.
|
# So instead, we add the history to the system prompt and create a new conversation each time.
|
||||||
It should not cost more tokens, but I am not sure at 100%.
|
# It should not cost more tokens, but I am not sure at 100%.
|
||||||
"""
|
# """
|
||||||
|
#
|
||||||
# Load the prompt
|
# # Load the prompt
|
||||||
prompt = open("utils/chatgpt_prompt_2_smaller.txt", "r").read()
|
# prompt = open("/home/mathieu/PycharmProjects/jarvis-server-v2/jarvis/utils/chatgpt_prompt_2_smaller.txt", "r").read()
|
||||||
|
#
|
||||||
# Replace the variables in the prompt
|
# # Replace the variables in the prompt
|
||||||
prompt = prompt.replace("{{timestamp}}", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
|
# prompt = prompt.replace("{{timestamp}}", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
|
||||||
prompt = prompt.replace("{{location}}", "Lausanne in the canton Vaud of Switzerland")
|
# prompt = prompt.replace("{{location}}", "Lausanne in the canton Vaud of Switzerland")
|
||||||
|
#
|
||||||
# Check if the user has already asked a question (to keep context)
|
# # Check if the user has already asked a question (to keep context)
|
||||||
if len(chat_messages[uuid]) > 1:
|
# if len(chat_messages[uuid]) > 1:
|
||||||
history = ""
|
# history = ""
|
||||||
|
#
|
||||||
# Add the last 4 messages from the user to the prompt
|
# # Add the last 4 messages from the user to the prompt
|
||||||
# we don't give the whole history because it adds a lot of tokens.
|
# # we don't give the whole history because it adds a lot of tokens.
|
||||||
history += "\nFor context, last discussion you had with user:\n"
|
# history += "\nFor context, last discussion you had with user:\n"
|
||||||
for message in chat_messages[uuid][-4:]:
|
# for message in chat_messages[uuid][-4:]:
|
||||||
if message['role'] == "user":
|
# if message['role'] == "user":
|
||||||
history += "U: " + message['content'] + "\n"
|
# history += "U: " + message['content'] + "\n"
|
||||||
elif message['role'] == "assistant":
|
# elif message['role'] == "assistant":
|
||||||
history += "Y: " + message['content'] + "\n"
|
# history += "Y: " + message['content'] + "\n"
|
||||||
|
#
|
||||||
# Replace the {{history}} variable in the prompt with the history
|
# # Replace the {{history}} variable in the prompt with the history
|
||||||
prompt = prompt.replace("{{history}}", history)
|
# prompt = prompt.replace("{{history}}", history)
|
||||||
else:
|
# else:
|
||||||
# If the user hasn't asked a question yet, remove the history part of the prompt
|
# # If the user hasn't asked a question yet, remove the history part of the prompt
|
||||||
prompt = prompt.replace("{{history}}", "")
|
# prompt = prompt.replace("{{history}}", "")
|
||||||
|
#
|
||||||
return [{"role": "system", "content": prompt}]
|
# return [{"role": "system", "content": prompt}]
|
||||||
|
Loading…
Reference in New Issue
Block a user