139 lines
3.9 KiB
Python
139 lines
3.9 KiB
Python
import os
|
|
|
|
import numpy as np
|
|
import torch
|
|
import torch.nn as nn
|
|
from torch.utils.data import Dataset, DataLoader
|
|
|
|
from jarvis import get_path_file
|
|
from jarvis.ia.model import NeuralNet
|
|
from jarvis.ia.nltk_utils import bag_of_words, tokenize, stem
|
|
from jarvis.utils import intents_utils
|
|
|
|
path = os.path.dirname(get_path_file.__file__)
|
|
|
|
|
|
def train():
|
|
intents_utils.register_all_intents() # important
|
|
all_intents_patterns = intents_utils.get_all_patterns()
|
|
|
|
all_words = []
|
|
tags = []
|
|
xy = []
|
|
# loop through each sentence in our skills patterns
|
|
for intent in all_intents_patterns:
|
|
tag = intent
|
|
# add to tag list
|
|
tags.append(tag)
|
|
|
|
for pattern in all_intents_patterns[intent]:
|
|
# tokenize each word in the sentence
|
|
w = tokenize(pattern)
|
|
# add to our words list
|
|
all_words.extend(w)
|
|
# add to xy pair
|
|
xy.append((w, tag))
|
|
|
|
# stem and lower each word
|
|
ignore_words = ['?', '.', '!']
|
|
all_words = [stem(w) for w in all_words if w not in ignore_words]
|
|
# remove duplicates and sort
|
|
all_words = sorted(set(all_words))
|
|
tags = sorted(set(tags))
|
|
|
|
print(len(xy), "patterns")
|
|
print(len(tags), "tags:", tags)
|
|
print(len(all_words), "unique stemmed words:", all_words)
|
|
|
|
# create training data
|
|
X_train = []
|
|
y_train = []
|
|
for (pattern_sentence, tag) in xy:
|
|
# X: bag of words for each pattern_sentence
|
|
bag = bag_of_words(pattern_sentence, all_words)
|
|
X_train.append(bag)
|
|
# y: PyTorch CrossEntropyLoss needs only class labels, not one-hot
|
|
label = tags.index(tag)
|
|
y_train.append(label)
|
|
|
|
X_train = np.array(X_train)
|
|
y_train = np.array(y_train)
|
|
|
|
# Hyper-parameters
|
|
num_epochs = 1000
|
|
batch_size = 8
|
|
learning_rate = 0.001
|
|
input_size = len(X_train[0])
|
|
hidden_size = 8
|
|
output_size = len(tags)
|
|
print(input_size, output_size)
|
|
|
|
class ChatDataset(Dataset):
|
|
|
|
def __init__(self):
|
|
self.n_samples = len(X_train)
|
|
self.x_data = X_train
|
|
self.y_data = y_train
|
|
|
|
# support indexing such that dataset[i] can be used to get i-th sample
|
|
def __getitem__(self, index):
|
|
return self.x_data[index], self.y_data[index]
|
|
|
|
# we can call len(dataset) to return the size
|
|
def __len__(self):
|
|
return self.n_samples
|
|
|
|
dataset = ChatDataset()
|
|
train_loader = DataLoader(dataset=dataset,
|
|
batch_size=batch_size,
|
|
shuffle=True,
|
|
num_workers=0)
|
|
|
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
|
|
model = NeuralNet(input_size, hidden_size, output_size).to(device)
|
|
|
|
# Loss and optimizer
|
|
criterion = nn.CrossEntropyLoss()
|
|
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
|
|
|
|
# Train the model
|
|
for epoch in range(num_epochs):
|
|
for (words, labels) in train_loader:
|
|
words = words.to(device)
|
|
labels = labels.to(dtype=torch.long).to(device)
|
|
|
|
# Forward pass
|
|
outputs = model(words)
|
|
# if y would be one-hot, we must apply
|
|
# labels = torch.max(labels, 1)[1]
|
|
loss = criterion(outputs, labels)
|
|
|
|
# Backward and optimize
|
|
optimizer.zero_grad()
|
|
loss.backward()
|
|
optimizer.step()
|
|
|
|
if (epoch + 1) % 100 == 0:
|
|
print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')
|
|
|
|
print(f'Final loss: {loss.item():.4f}')
|
|
|
|
data = {
|
|
"model_state": model.state_dict(),
|
|
"input_size": input_size,
|
|
"hidden_size": hidden_size,
|
|
"output_size": output_size,
|
|
"all_words": all_words,
|
|
"tags": tags
|
|
}
|
|
|
|
file = path + "/ia/trained_model.pth"
|
|
torch.save(data, file)
|
|
|
|
print(f'Training complete. file saved to {file}')
|
|
|
|
|
|
if __name__ == '__main__':
|
|
train()
|