110 lines
4.0 KiB
Python
110 lines
4.0 KiB
Python
import json
|
|
import logging
|
|
import time
|
|
|
|
import openai
|
|
|
|
chat_messages = {}
|
|
|
|
|
|
def chatgpt_recognise(text, uuid):
|
|
# If the chat history is empty, create it
|
|
if uuid not in chat_messages:
|
|
chat_messages[uuid] = []
|
|
|
|
# Add the user message to the chat history
|
|
chat_messages[uuid].append({"role": "user", "content": text})
|
|
|
|
# Call ChatGPT API
|
|
start_time = time.time()
|
|
response = openai.ChatCompletion.create(
|
|
model="gpt-3.5-turbo",
|
|
messages=get_conversation_as_one_message(uuid),
|
|
)
|
|
end_time = time.time()
|
|
logging.info("GPT-3 response in " + str(round(end_time - start_time, ndigits=2)) + " seconds")
|
|
|
|
try:
|
|
# Parse the response
|
|
response = json.loads(str(response.choices[0].message.content))
|
|
|
|
# Check if the response looks like a "valid" JSON
|
|
if 'action' in response:
|
|
# Add the response to the chat history
|
|
chat_messages[uuid].append({"role": "assistant", "content": get_answer_from_response(response)})
|
|
|
|
# Return the response
|
|
return response
|
|
|
|
except Exception as e:
|
|
# If the response is not a JSON, it's probably a plaintext response
|
|
logging.error("Error while parsing ChatGPT response, probably not JSON: " + str(response.choices))
|
|
logging.error(str(e))
|
|
|
|
# Try to parse the response as a plaintext response, if unsuccessful, return a "I don't understand" response
|
|
return {"action": "answer", "answer": get_answer_from_response(response)}
|
|
|
|
|
|
def clear_chat(uuid):
|
|
"""Clear the chat history for a given uuid"""
|
|
logging.info("Cleared chat for uuid " + uuid)
|
|
chat_messages[uuid] = []
|
|
|
|
|
|
def get_answer_from_response(response):
|
|
if 'action' not in response:
|
|
# Fix for when it responds in plaintext to follow-up questions
|
|
# In that case the response is an OpenAIObject not a JSON
|
|
return response.choices[0].message.content
|
|
else:
|
|
if response['action'] == 'clarify':
|
|
return response['question']
|
|
elif response['action'] == 'command':
|
|
return response['comment']
|
|
elif response['action'] == 'query':
|
|
return response['device_description']
|
|
elif response['action'] == 'answer':
|
|
return response['answer']
|
|
elif response['action'] == 'calculate':
|
|
return response['calculation']
|
|
else:
|
|
return "I don't know how to respond to that..."
|
|
|
|
|
|
def get_conversation_as_one_message(uuid):
|
|
"""
|
|
Prepare the messages to send to OpenAI API
|
|
We don't use OpenAI way of using an array of messages because it doesn't work well with the prompt and chatgpt
|
|
breaks out of character.
|
|
So instead, we add the history to the system prompt and create a new conversation each time.
|
|
It should not cost more tokens, but I am not sure at 100%.
|
|
"""
|
|
|
|
# Load the prompt
|
|
prompt = open("utils/chatgpt_prompt_2_smaller.txt", "r").read()
|
|
|
|
# Replace the variables in the prompt
|
|
prompt = prompt.replace("{{timestamp}}", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
|
|
prompt = prompt.replace("{{location}}", "Lausanne in the canton Vaud of Switzerland")
|
|
|
|
# Check if the user has already asked a question (to keep context)
|
|
if len(chat_messages[uuid]) > 1:
|
|
history = ""
|
|
|
|
# Add the last 4 messages from the user to the prompt
|
|
# we don't give the whole history because it adds a lot of tokens.
|
|
history += "\nFor context, last discussion you had with user:\n"
|
|
for message in chat_messages[uuid][-4:]:
|
|
if message['role'] == "user":
|
|
history += "U: " + message['content'] + "\n"
|
|
elif message['role'] == "assistant":
|
|
history += "Y: " + message['content'] + "\n"
|
|
|
|
# Replace the {{history}} variable in the prompt with the history
|
|
prompt = prompt.replace("{{history}}", history)
|
|
else:
|
|
# If the user hasn't asked a question yet, remove the history part of the prompt
|
|
prompt = prompt.replace("{{history}}", "")
|
|
|
|
return [{"role": "system", "content": prompt}]
|