Add Chatbot AI project

This commit is contained in:
Don Heshanthaka 2022-10-01 16:03:05 +05:30
parent 7a91d8527e
commit 358c4e5add
8 changed files with 206 additions and 0 deletions

View File

@ -0,0 +1,40 @@
# Chatbot AI 🤖
This project contains an AI powered chatbot that you can interact with for both fun and to learn about how these technologies work.
## Tech Stack
* Python 3.9
* Tensorflow
* Keras
* nltk
* numpy
## How to setup the project
* Clone this repo
```bash
git clone https://github.com/metafy-social/daily-python-scripts.git
```
* Move to the project folder
```bash
cd '.\scripts\Chatbot AI\'
```
* Install dependencies
```bash
pip install -r requirements.txt
```
* Run the python script `chatbot.py`
## Screenshots of the project
![screenshot 1](https://user-images.githubusercontent.com/61963664/193405239-5403c601-2c5b-4443-b463-1452e72cbb20.PNG)
![screenshot 2](https://user-images.githubusercontent.com/61963664/193405262-5b05c68b-994c-4b04-ba77-df878d3f42c8.PNG)

View File

@ -0,0 +1,61 @@
import random
import json
import pickle
import numpy as np
import nltk
from nltk.stem import WordNetLemmatizer
from tensorflow import keras
from keras.models import load_model
lemmatizer = WordNetLemmatizer()
intents = json.loads(open('intents.json').read())
words = pickle.load(open('words.pkl', 'rb'))
classes = pickle.load(open('classes.pkl', 'rb'))
model = load_model('chatbot_model.h5')
def clean_up_sentence(sentence):
sentence_words = nltk.word_tokenize(sentence)
sentence_words = [lemmatizer.lemmatize(word) for word in sentence_words]
return sentence_words
def bag_of_words(sentence):
sentence_words = clean_up_sentence(sentence)
bag = [0] * len(words)
for w in sentence_words:
for i, word in enumerate(words):
if word == w:
bag[i] = 1
return np.array(bag)
def predict_class(sentence):
bow = bag_of_words(sentence)
res = model.predict(np.array([bow]), verbose=0)[0]
ERROR_THRESHOLD = 0.25
results = [[i, r] for i, r in enumerate(res) if r > ERROR_THRESHOLD]
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append({'intent': classes[r[0]], 'probability': str(r[1])})
return return_list
def get_response(intents_list, intents_json):
tag = intents_list[0]['intent']
list_of_intents = intents_json['intents']
for i in list_of_intents:
if i['tag'] == tag:
result = random.choice(i['responses'])
break
return result
print("Welcome to Metafy-Social Chatbot, let's start chatting!")
while True:
message = input("\n>")
ints = predict_class(message)
res = get_response(ints, intents)
print(f'Chatbot: {res}')

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,25 @@
{"intents": [
{"tag": "greetings",
"patterns": ["hello", "hey", "hi", "good day", "greetings", "what's up?", "how is it going?"],
"responses": ["Hello!", "Hey!", "What can i do for you?"]
},
{"tag": "goodbye",
"patterns": ["cya", "See you later", "Goodbye", "I am leaving", "Have a Good day", "bye", "cao", "see ya"],
"responses": ["Sad to see you go :(", "Talk to you later", "Goodbye!"]
},
{"tag": "age",
"patterns": ["how old", "how old are you?", "what is your age?", "age"],
"responses": ["I am immortal", "I do not age", "you guess", "I'm too shy >.<"]
},
{"tag": "name",
"patterns": ["What is your name?", "What should i call you?", "what's your name?", "who are you?", "Can you tell me your name ?"],
"responses": ["I am metafy-social AI", "I am metafy-social Chatbot"]
},
{"tag": "hours",
"patterns": ["when are you guys open?", "hours open?", "Are you open today?", "Are you open"],
"responses": ["24/7", "We are open 24/7", "We are always open"]
},
{"tag": "learn",
"patterns": ["What are good resources to learn python?", "Where can i learn python programming", "Where to learn python?", "How to learn python?", "Where can i study python?", "Study?"],
"responses": ["You can learn python at our repository 'https://github.com/metafy-social/daily-python-scripts'"]}
]}

View File

@ -0,0 +1,4 @@
keras==2.9.0
nltk==3.7
numpy==1.20.3
tensorflow==2.9.1

View File

@ -0,0 +1,76 @@
import random
import json
import pickle
import numpy as np
import nltk
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('omw-1.4')
from nltk.stem import WordNetLemmatizer
from tensorflow import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.optimizers import SGD
lemmatizer = WordNetLemmatizer()
intents = json.loads(open('intents.json').read())
words = []
classes = []
documents = []
ignore_letters = ['?', '!', '.', ',']
for intent in intents['intents']:
for pattern in intent['patterns']:
word_list = nltk.word_tokenize(pattern)
words.extend(word_list)
documents.append((word_list, intent['tag']))
if intent['tag'] not in classes:
classes.append(intent['tag'])
# print(documents)
words = [lemmatizer.lemmatize(word) for word in words if word not in ignore_letters]
words = sorted(set(words))
classes = sorted(set(classes))
pickle.dump(words, open('words.pkl', 'wb'))
pickle.dump(classes, open('classes.pkl', 'wb'))
training = []
output_empty = [0] * len(classes)
for document in documents:
bag = []
word_patterns = document[0]
word_patterns = [lemmatizer.lemmatize(word.lower()) for word in word_patterns]
for word in words:
bag.append(1) if word in word_patterns else bag.append(0)
output_row = list(output_empty)
output_row[classes.index(document[1])] = 1
training.append([bag, output_row])
random.shuffle(training)
training = np.array(training)
train_x = list(training[:, 0])
train_y = list(training[:, 1])
model = Sequential()
model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(train_y[0]), activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
hist = model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)
model.save('chatbot_model.h5', hist)
print('Done')

Binary file not shown.