jarvis-server-v2/jarvis/utils/chatgpt_utils.py

121 lines
4.5 KiB
Python

import json
import logging
import openai
import time
from jarvis.db import db_utils
chat_messages = {}
def setup_messages(uuid):
prompt = open("jarvis/utils/gpt_prompt_v3.txt", "r").read()
chat_messages[uuid] = [{"role": "system", "content": prompt}]
def chatgpt_recognise(text, uuid):
# If the chat history is empty, create it
if uuid not in chat_messages:
setup_messages(uuid)
# Add the user message to the chat history
chat_messages[uuid].append({"role": "user", "content": text})
# Call ChatGPT API
start_time = time.time()
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=chat_messages.get(uuid),
)
end_time = time.time()
logging.info("GPT-3 response in " + str(round(end_time - start_time, ndigits=2)) + " seconds")
try:
# Parse the response
query_json = json.loads(str(response.choices[0].message.content))
# Check if the response looks like a "valid" JSON
if 'simplified_sentence' in query_json and 'response' in query_json:
response = query_json['response']
simplified_sentence = query_json['simplified_sentence']
# Add the response to the chat history
chat_messages[uuid].append({"role": "assistant", "content": response})
# Add to local database
db_utils.add_query(text, simplified_sentence, response)
# Return the response
return query_json
except Exception as e:
# If the response is not a JSON, it's probably a plaintext response
logging.error("Error while parsing ChatGPT response, probably not JSON: " + str(response.choices))
logging.error(str(e))
# TODO: repeat query if it's not a JSON response
return None
def clear_chat(uuid):
"""Clear the chat history for a given uuid"""
logging.info("Cleared chat for uuid " + uuid)
chat_messages[uuid] = []
# def get_answer_from_response(response):
# if 'action' not in response:
# # Fix for when it responds in plaintext to follow-up questions
# # In that case the response is an OpenAIObject not a JSON
# return response.choices[0].message.content
# else:
# if response['action'] == 'clarify':
# return response['question']
# elif response['action'] == 'command':
# return response['comment']
# elif response['action'] == 'query':
# return response['device_description']
# elif response['action'] == 'answer':
# return response['answer']
# elif response['action'] == 'calculate':
# return response['calculation']
# else:
# return "I don't know how to respond to that..."
# def get_conversation_as_one_message(uuid):
# """
# Prepare the messages to send to OpenAI API
# We don't use OpenAI way of using an array of messages because it doesn't work well with the prompt and chatgpt
# breaks out of character.
# So instead, we add the history to the system prompt and create a new conversation each time.
# It should not cost more tokens, but I am not sure at 100%.
# """
#
# # Load the prompt
# prompt = open("/home/mathieu/PycharmProjects/jarvis-server-v2/jarvis/utils/chatgpt_prompt_2_smaller.txt", "r").read()
#
# # Replace the variables in the prompt
# prompt = prompt.replace("{{timestamp}}", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
# prompt = prompt.replace("{{location}}", "Lausanne in the canton Vaud of Switzerland")
#
# # Check if the user has already asked a question (to keep context)
# if len(chat_messages[uuid]) > 1:
# history = ""
#
# # Add the last 4 messages from the user to the prompt
# # we don't give the whole history because it adds a lot of tokens.
# history += "\nFor context, last discussion you had with user:\n"
# for message in chat_messages[uuid][-4:]:
# if message['role'] == "user":
# history += "U: " + message['content'] + "\n"
# elif message['role'] == "assistant":
# history += "Y: " + message['content'] + "\n"
#
# # Replace the {{history}} variable in the prompt with the history
# prompt = prompt.replace("{{history}}", history)
# else:
# # If the user hasn't asked a question yet, remove the history part of the prompt
# prompt = prompt.replace("{{history}}", "")
#
# return [{"role": "system", "content": prompt}]