"ValueError: Shapes (5, 6) and (5, 35) are incompatible" when trying to train my model

I wrote this code by learning from this site: https://www.geeksforgeeks.org/deploy-a-chatbot-using-tensorflow-in-python/

#import
import random
import json
import pickle
import numpy as np
import nltk

from keras.models import Sequential
from nltk.stem import WordNetLemmatizer
from keras.layers import Dense, Activation, Dropout
from keras.optimizers import SGD

lemmatizer=WordNetLemmatizer()

# Reading the JSON file
intents=json.loads(open("intents2.json").read())

#Creating empty lists to store data
words=[]
classes=[]
documents=[]
ignoreLetters=["?", "!", ".", ","]

for intent in intents['intents']:
    for pattern in intent['patterns']:
        #Separate words from patterns
        wordList=nltk.word_tokenize(pattern)
        words.extend(wordList)

        #Associate patterns with respective tags
        documents.append(((wordList), intent['tag']))

        #append tags to class list
        if intent['tag'] not in classes:
            classes.append(intent['tag'])

#Storing the root words or lemma
words=[lemmatizer.lemmatize(word)
       for word in words if word not in ignoreLetters]
words=sorted(set(words))

#Save words and class list to binary files
pickle.dump(words,open('words.pkl','wb'))
pickle.dump(classes,open('classes.pkl','wb'))

# Training
training=[]
outputEmpty=[0]*len(classes)
for document in documents:
    bag=[]
    wordPatterns=document[0]
    wordPatterns=[lemmatizer.lemmatize(
        word.lower())for word in wordPatterns]
    for word in words:
        bag.append(1) if word in wordPatterns else bag.append(0)

    #Make a copy of outputEmpty
    outputRow=list(outputEmpty)
    outputRow[classes.index(document[1])]=1
    training.append([bag,outputRow])

random.shuffle(training)
training=np.array(training)

#Split data
trainX=list(training[:,0])
trainY=list(training[:,1])
        
#Create sequential machine learning model
model=Sequential()
model.add(Dense(128,input_shape=(len(trainX[0]), ),
                activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(trainX[0]), 
                activation='softmax'))

#Compile model
sgd=SGD(learning_rate=0.01, weight_decay=1e-6,momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
              optimizer=sgd,metrics=['accuracy'])
hist=model.fit(np.array(trainX),np.array(trainY),
               epochs=200, batch_size=5, verbose=True)  #Offending line

#Save Model
model.save("chatbotmodel.h5", hist)

print("Whooohooo! Chatbot model training is success!")

But when I try to run training.py, I get this error:

Traceback (most recent call last):
  File "C:\Users\user\Documents\G4G Bot\training.py", line 83, in <module>
    hist=model.fit(np.array(trainX),np.array(trainY),
  File "C:\Users\user\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\src\utils\traceback_utils.py", line 70, in error_handler
    raise e.with_traceback(filtered_tb) from None
  File "C:\Users\user\AppData\Local\Temp\__autograph_generated_filegfvyeton.py", line 15, in tf__train_function
    retval_ = ag__.converted_call(ag__.ld(step_function), (ag__.ld(self), ag__.ld(iterator)), None, fscope)
ValueError: in user code:

    File "C:\Users\user\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\src\engine\training.py", line 1401, in train_function  *
        return step_function(self, iterator)
    File "C:\Users\user\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\src\engine\training.py", line 1384, in step_function  **
        outputs = model.distribute_strategy.run(run_step, args=(data,))
    File "C:\Users\user\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\src\engine\training.py", line 1373, in run_step  **
        outputs = model.train_step(data)
    File "C:\Users\user\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\src\engine\training.py", line 1151, in train_step
        loss = self.compute_loss(x, y, y_pred, sample_weight)
    File "C:\Users\user\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\src\engine\training.py", line 1209, in compute_loss
        return self.compiled_loss(
    File "C:\Users\user\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\src\engine\compile_utils.py", line 277, in __call__
        loss_value = loss_obj(y_t, y_p, sample_weight=sw)
    File "C:\Users\user\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\src\losses.py", line 143, in __call__
        losses = call_fn(y_true, y_pred)
    File "C:\Users\user\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\src\losses.py", line 270, in call  **
        return ag_fn(y_true, y_pred, **self._fn_kwargs)
    File "C:\Users\user\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\src\losses.py", line 2221, in categorical_crossentropy
        return backend.categorical_crossentropy(
    File "C:\Users\user\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\src\backend.py", line 5573, in categorical_crossentropy
        target.shape.assert_is_compatible_with(output.shape)

    ValueError: Shapes (5, 6) and (5, 35) are incompatible

Is it another of these old, deprecated packages? Any fix?

Yes. It seems that it contains errors. Here is my fix (I make some improvements as well to make a little more pythonic)

It consist of 2 python files. The training file and the chatbot file.

Training File

# import
from nltk.corpus import stopwords
from keras.optimizers import SGD
from keras.layers import Dense, Activation, Dropout
from nltk.stem import WordNetLemmatizer
from keras.models import Sequential
import random
import json
import pickle
import numpy as np
import nltk
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('stopwords')

# Reading the JSON file. Make sure to use your path ...
intents = json.loads(open("tensorflow_models/intents.json").read())

# Creating empty lists to store data
simple_words = []
classes = []
documents = []

for intent in intents['intents']:
    for pattern in intent['patterns']:
        # tokenizing the pattern ...
        word_list = nltk.word_tokenize(pattern)
        simple_words.extend(word_list)

        # Associate the tokenize pattern list with respective tags
        documents.append((word_list, intent['tag']))

        # append tags to class list
        if intent['tag'] not in classes:
            classes.append(intent['tag'])

# Storing the root words or lemma
all_stopwords = stopwords.words('english')

# change plural words to singular ...
lemmatizer = WordNetLemmatizer()
lemmatize_words = [lemmatizer.lemmatize(simple_word)
                   for simple_word in simple_words if simple_word not in set(all_stopwords)]
lemmatize_words = sorted(set(lemmatize_words))

# Training
training = []
output_empty = [0]*len(classes)
for document in documents:
    bag = []
    word_patterns = document[0]
    lemmatize_word_patterns = [lemmatizer.lemmatize(
        word.lower()) for word in word_patterns]

    for lemmatize_word in lemmatize_words:
        bag.append(
            1) if lemmatize_word in lemmatize_word_patterns else bag.append(0)

    # Make a copy of outputEmpty
    output_row = list(output_empty)
    output_row[classes.index(document[1])] = 1

    bag.extend(output_row)
    training.append(bag)

random.shuffle(training)
training = np.array(training)

# Split data
trainX = training[:, :len(lemmatize_words)]
trainY = training[:, len(lemmatize_words):]

# Create sequential machine learning model
model = Sequential()

# adding deep learning layers....
model.add(Dense(128, input_shape=(len(trainX[0]),), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
# Corrected output layer
model.add(Dense(len(classes), activation='softmax'))

# Compile model
sgd = SGD(learning_rate=0.01, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
              optimizer=sgd, metrics=['accuracy'])

# start the training ...
hist = model.fit(trainX, trainY, epochs=200, batch_size=5, verbose=True)

# Save words and class list to binary files, to be loaded later
#  when creating conversation ....
pickle.dump(lemmatize_words, open('tensorflow_models/words.pkl', 'wb'))
pickle.dump(classes, open('tensorflow_models/classes.pkl', 'wb'))

# Save Model, to be used later for conversation ...
model.save("tensorflow_models/chatbotmodel.h5", hist)

print("Training is finished!")

The chatbot

import random
from keras.models import load_model
import json
import pickle
import numpy as np
from nltk.stem import WordNetLemmatizer
import nltk

# Load trained model and associated files
# Make sure to use your paths...
model = load_model("tensorflow_models/chatbotmodel.h5")
intents = json.loads(open("tensorflow_models/intents.json").read())
words = pickle.load(open('tensorflow_models/words.pkl', 'rb'))
classes = pickle.load(open('tensorflow_models/classes.pkl', 'rb'))

lemmatizer = WordNetLemmatizer()


def clean_up_sentence(sentence):
    sentence_words = nltk.word_tokenize(sentence)
    sentence_words = [lemmatizer.lemmatize(
        word.lower()) for word in sentence_words]
    return sentence_words


def bow(sentence, words, show_details=True):
    sentence_words = clean_up_sentence(sentence)
    bag = [0]*len(words)
    for s in sentence_words:
        for i, w in enumerate(words):
            if w == s:
                bag[i] = 1
                if show_details:
                    print("found in bag: %s" % w)
    return (np.array(bag))


def predict_class(sentence):
    p = bow(sentence, words, show_details=False)
    res = model.predict(np.array([p]))[0]
    ERROR_THRESHOLD = 0.25
    results = [[i, r] for i, r in enumerate(res) if r > ERROR_THRESHOLD]
    results.sort(key=lambda x: x[1], reverse=True)
    return_list = []
    for r in results:
        return_list.append({"intent": classes[r[0]], "probability": str(r[1])})
    return return_list


def get_response(ints, intents_json):
    tag = ints[0]['intent']
    list_of_intents = intents_json['intents']
    for i in list_of_intents:
        if (i['tag'] == tag):
            result = random.choice(i['responses'])
            break
    return result


# Main conversation loop
print("Welcome! How can I assist you today?")
while True:
    user_input = input("You: ")
    if user_input.lower() == 'quit':
        break

    # Predict the intent of the user input
    ints = predict_class(user_input)

    # Get a response based on the predicted intent
    response = get_response(ints, intents)
    print("Bot:", response)

Let me know if you get into trouble.

Good luck

2 Likes

This topic was automatically closed 91 days after the last reply. New replies are no longer allowed.