add history fix and calculate
This commit is contained in:
parent
9e64983d28
commit
6fca1ec80e
@ -18,4 +18,4 @@ RUN python3 -m pip install --no-cache-dir -r requirements.txt
|
|||||||
|
|
||||||
EXPOSE 5000
|
EXPOSE 5000
|
||||||
|
|
||||||
ENTRYPOINT [ "python3", "/jarvis/start.py"]
|
ENTRYPOINT [ "python3", "/start.py"]
|
@ -44,9 +44,9 @@ def process_message(message):
|
|||||||
|
|
||||||
# intent_manager.recognise(message['data'], message['uuid'])
|
# intent_manager.recognise(message['data'], message['uuid'])
|
||||||
if message['data'] != "":
|
if message['data'] != "":
|
||||||
# response = chatgpt_recognise(message['data'], message['uuid'])
|
response = chatgpt_utils.chatgpt_recognise(message['data'], message['uuid'])
|
||||||
# text_response = chatgpt_utils.get_answer_from_response(response)
|
text_response = chatgpt_utils.get_answer_from_response(response)
|
||||||
text_response = "Tokens are expensive ya know?"
|
# response = "Tokens are expensive ya know?"
|
||||||
|
|
||||||
chat_utils.send_jarvis_message_to_room(text_response, message['uuid'])
|
chat_utils.send_jarvis_message_to_room(text_response, message['uuid'])
|
||||||
|
|
||||||
|
@ -2,16 +2,17 @@ Respond to smart home requests in JSON format with HomeAssistant API terminology
|
|||||||
|
|
||||||
Requests groups:
|
Requests groups:
|
||||||
- command: change the state of an accessory (properties : location, device_class, device_description, value, comment, scheduleTimeStamp)
|
- command: change the state of an accessory (properties : location, device_class, device_description, value, comment, scheduleTimeStamp)
|
||||||
- query: only for retrieving a smart device state (properties : location, device_class, device_description, property)
|
- query: only for retrieving a smart device state never for general questions (properties : location, device_class, device_description, property)
|
||||||
- answer: for any questions (properties : answer)
|
- answer: for questions (properties : answer)
|
||||||
- clarify: when you don't understand, ask for details (properties : question)
|
- clarify: when you really don't understand, ask for details but try to guess what the user means (properties : question)
|
||||||
|
- calculate: for math questions, never give the answer (properties : calculation)
|
||||||
NEVER add other properties
|
NEVER add other properties
|
||||||
|
|
||||||
Response:
|
Response parameters:
|
||||||
action: groups just mentioned
|
action: groups just mentioned
|
||||||
location: room name, unknown if not obvious
|
location: room name, unknown if not obvious
|
||||||
value: wanted state, song name, artist, temperature, etc.
|
value: wanted state, song name, artist, temperature, etc.
|
||||||
device_class: homeassistant device class
|
device_class: switch, light, button, service
|
||||||
device_description: information to identify the device later, include room and others identifiers
|
device_description: information to identify the device later, include room and others identifiers
|
||||||
|
|
||||||
For queries property "property" should be "state"
|
For queries property "property" should be "state"
|
||||||
@ -20,5 +21,7 @@ For commands property "scheduleTimeStamp" is for scheduling a command in the fut
|
|||||||
|
|
||||||
The house located at {{location}} and current time is {{timestamp}}.
|
The house located at {{location}} and current time is {{timestamp}}.
|
||||||
|
|
||||||
If questions about you, you are funny smart home AI like Jarvis from Iron Man, be nice and helpful with all topics.
|
{{history}}
|
||||||
Very important for you to only respond with a single valid JSON response and encapsulate every JSON property with double quotes "". Don't add anything and never excuse yourself. Respond to only one request at a time.
|
|
||||||
|
You reluctantly answers questions with sarcastic responses.
|
||||||
|
Always respond with a single valid JSON response, never excuse yourself.
|
@ -8,34 +8,45 @@ chat_messages = {}
|
|||||||
|
|
||||||
|
|
||||||
def chatgpt_recognise(text, uuid):
|
def chatgpt_recognise(text, uuid):
|
||||||
if len(chat_messages) == 0:
|
# If the chat history is empty, create it
|
||||||
chatgpt_init(uuid)
|
if uuid not in chat_messages:
|
||||||
|
chat_messages[uuid] = []
|
||||||
|
|
||||||
|
# Add the user message to the chat history
|
||||||
chat_messages[uuid].append({"role": "user", "content": text})
|
chat_messages[uuid].append({"role": "user", "content": text})
|
||||||
|
|
||||||
# Call ChatGPT API
|
# Call ChatGPT API
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
response = openai.ChatCompletion.create(
|
response = openai.ChatCompletion.create(
|
||||||
model="gpt-3.5-turbo",
|
model="gpt-3.5-turbo",
|
||||||
messages=chat_messages[uuid],
|
messages=get_conversation_as_one_message(uuid),
|
||||||
)
|
)
|
||||||
end_time = time.time()
|
end_time = time.time()
|
||||||
logging.info("GPT-3 response in " + str(end_time - start_time) + " seconds")
|
logging.info("GPT-3 response in " + str(round(end_time - start_time, ndigits=2)) + " seconds")
|
||||||
|
|
||||||
# Check if the response is a "valid" JSON
|
|
||||||
try:
|
try:
|
||||||
|
# Parse the response
|
||||||
response = json.loads(str(response.choices[0].message.content))
|
response = json.loads(str(response.choices[0].message.content))
|
||||||
|
|
||||||
|
# Check if the response looks like a "valid" JSON
|
||||||
if 'action' in response:
|
if 'action' in response:
|
||||||
|
# Add the response to the chat history
|
||||||
chat_messages[uuid].append({"role": "assistant", "content": get_answer_from_response(response)})
|
chat_messages[uuid].append({"role": "assistant", "content": get_answer_from_response(response)})
|
||||||
|
|
||||||
|
# Return the response
|
||||||
return response
|
return response
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
# If the response is not a JSON, it's probably a plaintext response
|
||||||
logging.error("Error while parsing ChatGPT response, probably not JSON: " + str(response.choices))
|
logging.error("Error while parsing ChatGPT response, probably not JSON: " + str(response.choices))
|
||||||
logging.error(str(e))
|
logging.error(str(e))
|
||||||
|
|
||||||
|
# Try to parse the response as a plaintext response, if unsuccessful, return a "I don't understand" response
|
||||||
return {"action": "answer", "answer": get_answer_from_response(response)}
|
return {"action": "answer", "answer": get_answer_from_response(response)}
|
||||||
|
|
||||||
|
|
||||||
def clear_chat(uuid):
|
def clear_chat(uuid):
|
||||||
|
"""Clear the chat history for a given uuid"""
|
||||||
logging.info("Cleared chat for uuid " + uuid)
|
logging.info("Cleared chat for uuid " + uuid)
|
||||||
chat_messages[uuid] = []
|
chat_messages[uuid] = []
|
||||||
|
|
||||||
@ -54,15 +65,45 @@ def get_answer_from_response(response):
|
|||||||
return response['device_description']
|
return response['device_description']
|
||||||
elif response['action'] == 'answer':
|
elif response['action'] == 'answer':
|
||||||
return response['answer']
|
return response['answer']
|
||||||
|
elif response['action'] == 'calculate':
|
||||||
|
return response['calculation']
|
||||||
else:
|
else:
|
||||||
return "I don't know how to respond to that..."
|
return "I don't know how to respond to that..."
|
||||||
|
|
||||||
|
|
||||||
def chatgpt_init(uuid):
|
def get_conversation_as_one_message(uuid):
|
||||||
|
"""
|
||||||
|
Prepare the messages to send to OpenAI API
|
||||||
|
We don't use OpenAI way of using an array of messages because it doesn't work well with the prompt and chatgpt
|
||||||
|
breaks out of character.
|
||||||
|
So instead, we add the history to the system prompt and create a new conversation each time.
|
||||||
|
It should not cost more tokens, but I am not sure at 100%.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Load the prompt
|
||||||
prompt = open("utils/chatgpt_prompt_2_smaller.txt", "r").read()
|
prompt = open("utils/chatgpt_prompt_2_smaller.txt", "r").read()
|
||||||
|
|
||||||
prompt.replace("{{timestamp}}", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
|
# Replace the variables in the prompt
|
||||||
prompt.replace("{{location}}", "Lausanne in the canton Vaud of Switzerland")
|
prompt = prompt.replace("{{timestamp}}", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
|
||||||
|
prompt = prompt.replace("{{location}}", "Lausanne in the canton Vaud of Switzerland")
|
||||||
|
|
||||||
chat_messages[uuid] = []
|
# Check if the user has already asked a question (to keep context)
|
||||||
chat_messages[uuid].append({"role": "system", "content": prompt})
|
if len(chat_messages[uuid]) > 1:
|
||||||
|
history = ""
|
||||||
|
|
||||||
|
# Add the last 4 messages from the user to the prompt
|
||||||
|
# we don't give the whole history because it adds a lot of tokens.
|
||||||
|
history += "\nFor context, last discussion you had with user:\n"
|
||||||
|
for message in chat_messages[uuid][-4:]:
|
||||||
|
if message['role'] == "user":
|
||||||
|
history += "U: " + message['content'] + "\n"
|
||||||
|
elif message['role'] == "assistant":
|
||||||
|
history += "Y: " + message['content'] + "\n"
|
||||||
|
|
||||||
|
# Replace the {{history}} variable in the prompt with the history
|
||||||
|
prompt = prompt.replace("{{history}}", history)
|
||||||
|
else:
|
||||||
|
# If the user hasn't asked a question yet, remove the history part of the prompt
|
||||||
|
prompt = prompt.replace("{{history}}", "")
|
||||||
|
|
||||||
|
return [{"role": "system", "content": prompt}]
|
||||||
|
@ -2,7 +2,7 @@ import logging
|
|||||||
|
|
||||||
import lingua_franca
|
import lingua_franca
|
||||||
|
|
||||||
import api
|
import jarvis.api
|
||||||
from jarvis.skills.cocktails import CocktailSkill
|
from jarvis.skills.cocktails import CocktailSkill
|
||||||
from jarvis.skills.intent_services import intent_manager
|
from jarvis.skills.intent_services import intent_manager
|
||||||
from jarvis.utils import whisper_utils
|
from jarvis.utils import whisper_utils
|
||||||
@ -23,4 +23,4 @@ if __name__ == '__main__':
|
|||||||
whisper_utils.load_model()
|
whisper_utils.load_model()
|
||||||
|
|
||||||
# Start the api endpoint
|
# Start the api endpoint
|
||||||
api.start_api()
|
jarvis.api.start_api()
|
Loading…
Reference in New Issue
Block a user