add history fix and calculate

This commit is contained in:
Mathieu B 2023-03-26 13:26:43 +02:00
parent 9e64983d28
commit 6fca1ec80e
5 changed files with 67 additions and 23 deletions

View File

@ -18,4 +18,4 @@ RUN python3 -m pip install --no-cache-dir -r requirements.txt
EXPOSE 5000
ENTRYPOINT [ "python3", "/jarvis/start.py"]
ENTRYPOINT [ "python3", "/start.py"]

View File

@ -44,9 +44,9 @@ def process_message(message):
# intent_manager.recognise(message['data'], message['uuid'])
if message['data'] != "":
# response = chatgpt_recognise(message['data'], message['uuid'])
# text_response = chatgpt_utils.get_answer_from_response(response)
text_response = "Tokens are expensive ya know?"
response = chatgpt_utils.chatgpt_recognise(message['data'], message['uuid'])
text_response = chatgpt_utils.get_answer_from_response(response)
# response = "Tokens are expensive ya know?"
chat_utils.send_jarvis_message_to_room(text_response, message['uuid'])

View File

@ -2,16 +2,17 @@ Respond to smart home requests in JSON format with HomeAssistant API terminology
Requests groups:
- command: change the state of an accessory (properties : location, device_class, device_description, value, comment, scheduleTimeStamp)
- query: only for retrieving a smart device state (properties : location, device_class, device_description, property)
- answer: for any questions (properties : answer)
- clarify: when you don't understand, ask for details (properties : question)
- query: only for retrieving a smart device state never for general questions (properties : location, device_class, device_description, property)
- answer: for questions (properties : answer)
- clarify: when you really don't understand, ask for details but try to guess what the user means (properties : question)
- calculate: for math questions, never give the answer (properties : calculation)
NEVER add other properties
Response:
Response parameters:
action: groups just mentioned
location: room name, unknown if not obvious
value: wanted state, song name, artist, temperature, etc.
device_class: homeassistant device class
device_class: switch, light, button, service
device_description: information to identify the device later, include room and others identifiers
For queries property "property" should be "state"
@ -20,5 +21,7 @@ For commands property "scheduleTimeStamp" is for scheduling a command in the fut
The house located at {{location}} and current time is {{timestamp}}.
If questions about you, you are funny smart home AI like Jarvis from Iron Man, be nice and helpful with all topics.
Very important for you to only respond with a single valid JSON response and encapsulate every JSON property with double quotes "". Don't add anything and never excuse yourself. Respond to only one request at a time.
{{history}}
You reluctantly answers questions with sarcastic responses.
Always respond with a single valid JSON response, never excuse yourself.

View File

@ -8,34 +8,45 @@ chat_messages = {}
def chatgpt_recognise(text, uuid):
if len(chat_messages) == 0:
chatgpt_init(uuid)
# If the chat history is empty, create it
if uuid not in chat_messages:
chat_messages[uuid] = []
# Add the user message to the chat history
chat_messages[uuid].append({"role": "user", "content": text})
# Call ChatGPT API
start_time = time.time()
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=chat_messages[uuid],
messages=get_conversation_as_one_message(uuid),
)
end_time = time.time()
logging.info("GPT-3 response in " + str(end_time - start_time) + " seconds")
logging.info("GPT-3 response in " + str(round(end_time - start_time, ndigits=2)) + " seconds")
# Check if the response is a "valid" JSON
try:
# Parse the response
response = json.loads(str(response.choices[0].message.content))
# Check if the response looks like a "valid" JSON
if 'action' in response:
# Add the response to the chat history
chat_messages[uuid].append({"role": "assistant", "content": get_answer_from_response(response)})
# Return the response
return response
except Exception as e:
# If the response is not a JSON, it's probably a plaintext response
logging.error("Error while parsing ChatGPT response, probably not JSON: " + str(response.choices))
logging.error(str(e))
# Try to parse the response as a plaintext response, if unsuccessful, return a "I don't understand" response
return {"action": "answer", "answer": get_answer_from_response(response)}
def clear_chat(uuid):
"""Clear the chat history for a given uuid"""
logging.info("Cleared chat for uuid " + uuid)
chat_messages[uuid] = []
@ -54,15 +65,45 @@ def get_answer_from_response(response):
return response['device_description']
elif response['action'] == 'answer':
return response['answer']
elif response['action'] == 'calculate':
return response['calculation']
else:
return "I don't know how to respond to that..."
def chatgpt_init(uuid):
def get_conversation_as_one_message(uuid):
"""
Prepare the messages to send to OpenAI API
We don't use OpenAI way of using an array of messages because it doesn't work well with the prompt and chatgpt
breaks out of character.
So instead, we add the history to the system prompt and create a new conversation each time.
It should not cost more tokens, but I am not sure at 100%.
"""
# Load the prompt
prompt = open("utils/chatgpt_prompt_2_smaller.txt", "r").read()
prompt.replace("{{timestamp}}", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
prompt.replace("{{location}}", "Lausanne in the canton Vaud of Switzerland")
# Replace the variables in the prompt
prompt = prompt.replace("{{timestamp}}", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
prompt = prompt.replace("{{location}}", "Lausanne in the canton Vaud of Switzerland")
chat_messages[uuid] = []
chat_messages[uuid].append({"role": "system", "content": prompt})
# Check if the user has already asked a question (to keep context)
if len(chat_messages[uuid]) > 1:
history = ""
# Add the last 4 messages from the user to the prompt
# we don't give the whole history because it adds a lot of tokens.
history += "\nFor context, last discussion you had with user:\n"
for message in chat_messages[uuid][-4:]:
if message['role'] == "user":
history += "U: " + message['content'] + "\n"
elif message['role'] == "assistant":
history += "Y: " + message['content'] + "\n"
# Replace the {{history}} variable in the prompt with the history
prompt = prompt.replace("{{history}}", history)
else:
# If the user hasn't asked a question yet, remove the history part of the prompt
prompt = prompt.replace("{{history}}", "")
return [{"role": "system", "content": prompt}]

View File

@ -2,7 +2,7 @@ import logging
import lingua_franca
import api
import jarvis.api
from jarvis.skills.cocktails import CocktailSkill
from jarvis.skills.intent_services import intent_manager
from jarvis.utils import whisper_utils
@ -23,4 +23,4 @@ if __name__ == '__main__':
whisper_utils.load_model()
# Start the api endpoint
api.start_api()
jarvis.api.start_api()