shorten the individual lines

This commit is contained in:
“Shashank 2024-10-15 10:29:20 +05:30
parent 0fbb04b070
commit 4c2ec80aec

View File

@ -10,38 +10,43 @@ Github: LEVII007
link : https://www.kaggle.com/code/navjindervirdee/lstm-neural-network-from-scratch link : https://www.kaggle.com/code/navjindervirdee/lstm-neural-network-from-scratch
""" """
##### Explanation ##### ##### Explanation #####
# This script implements a Long Short-Term Memory (LSTM) network to learn and predict sequences of characters. # This script implements a Long Short-Term Memory (LSTM) network to learn
# and predict sequences of characters.
# It uses numpy for numerical operations and tqdm for progress visualization. # It uses numpy for numerical operations and tqdm for progress visualization.
# The data is a paragraph about LSTM, converted to lowercase and split into characters. # The data is a paragraph about LSTM, converted to lowercase and split into
# Each character is one-hot encoded for training. # characters. Each character is one-hot encoded for training.
# The LSTM class initializes weights and biases for the forget, input, candidate, and output gates. # The LSTM class initializes weights and biases for the forget, input, candidate,
# It also initializes weights and biases for the final output layer. # and output gates. It also initializes weights and biases for the final output layer.
# The forward method performs forward propagation through the LSTM network, computing hidden and cell states. # The forward method performs forward propagation through the LSTM network,
# It uses sigmoid and tanh activation functions for the gates and cell states. # computing hidden and cell states. It uses sigmoid and tanh activation
# functions for the gates and cell states.
# The backward method performs backpropagation through time, computing gradients for the weights and biases. # The backward method performs backpropagation through time, computing gradients
# It updates the weights and biases using the computed gradients and the learning rate. # for the weights and biases. It updates the weights and biases using
# the computed gradients and the learning rate.
# The train method trains the LSTM network on the input data for a specified number of epochs. # The train method trains the LSTM network on the input data for a specified
# It uses one-hot encoded inputs and computes errors using the softmax function. # number of epochs. It uses one-hot encoded inputs and computes errors
# using the softmax function.
# The test method evaluates the trained LSTM network on the input data, computing accuracy based on predictions. # The test method evaluates the trained LSTM network on the input data,
# computing accuracy based on predictions.
# The script initializes the LSTM network with specified hyperparameters and trains it on the input data. # The script initializes the LSTM network with specified hyperparameters
# Finally, it tests the trained network and prints the accuracy of the predictions. # and trains it on the input data. Finally, it tests the trained network
# and prints the accuracy of the predictions.
##### Imports ##### ##### Imports #####
from tqdm import tqdm from tqdm import tqdm
import numpy as np import numpy as np
class LSTM: class LSTM:
def __init__(self, data: str, hidden_dim: int = 25, epochs: int = 1000, lr: float = 0.05) -> None: def __init__(self, data: str, hidden_dim: int = 25,
epochs: int = 1000, lr: float = 0.05) -> None:
""" """
Initialize the LSTM network with the given data and hyperparameters. Initialize the LSTM network with the given data and hyperparameters.
@ -83,16 +88,20 @@ class LSTM:
""" """
Initialize the weights and biases for the LSTM network. Initialize the weights and biases for the LSTM network.
""" """
self.wf = self.init_weights(self.char_size + self.hidden_dim, self.hidden_dim) self.wf = self.init_weights(self.char_size + self.hidden_dim,
self.hidden_dim)
self.bf = np.zeros((self.hidden_dim, 1)) self.bf = np.zeros((self.hidden_dim, 1))
self.wi = self.init_weights(self.char_size + self.hidden_dim, self.hidden_dim) self.wi = self.init_weights(self.char_size + self.hidden_dim,
self.hidden_dim)
self.bi = np.zeros((self.hidden_dim, 1)) self.bi = np.zeros((self.hidden_dim, 1))
self.wc = self.init_weights(self.char_size + self.hidden_dim, self.hidden_dim) self.wc = self.init_weights(self.char_size + self.hidden_dim,
self.hidden_dim)
self.bc = np.zeros((self.hidden_dim, 1)) self.bc = np.zeros((self.hidden_dim, 1))
self.wo = self.init_weights(self.char_size + self.hidden_dim, self.hidden_dim) self.wo = self.init_weights(self.char_size + self.hidden_dim,
self.hidden_dim)
self.bo = np.zeros((self.hidden_dim, 1)) self.bo = np.zeros((self.hidden_dim, 1))
self.wy = self.init_weights(self.hidden_dim, self.char_size) self.wy = self.init_weights(self.hidden_dim, self.char_size)
@ -106,7 +115,8 @@ class LSTM:
:param output_dim: The output dimension. :param output_dim: The output dimension.
:return: A matrix of initialized weights. :return: A matrix of initialized weights.
""" """
return np.random.uniform(-1, 1, (output_dim, input_dim)) * np.sqrt(6 / (input_dim + output_dim)) return np.random.uniform(-1, 1, (output_dim, input_dim)) * \
np.sqrt(6 / (input_dim + output_dim))
##### Activation Functions ##### ##### Activation Functions #####
def sigmoid(self, x: np.ndarray, derivative: bool = False) -> np.ndarray: def sigmoid(self, x: np.ndarray, derivative: bool = False) -> np.ndarray:
@ -171,15 +181,22 @@ class LSTM:
outputs = [] outputs = []
for t in range(len(inputs)): for t in range(len(inputs)):
self.concat_inputs[t] = np.concatenate((self.hidden_states[t - 1], inputs[t])) self.concat_inputs[t] = np.concatenate(
(self.hidden_states[t - 1], inputs[t]))
self.forget_gates[t] = self.sigmoid(np.dot(self.wf, self.concat_inputs[t]) + self.bf) self.forget_gates[t] = self.sigmoid(np.dot(self.wf,
self.input_gates[t] = self.sigmoid(np.dot(self.wi, self.concat_inputs[t]) + self.bi) self.concat_inputs[t]) + self.bf)
self.candidate_gates[t] = self.tanh(np.dot(self.wc, self.concat_inputs[t]) + self.bc) self.input_gates[t] = self.sigmoid(np.dot(self.wi,
self.output_gates[t] = self.sigmoid(np.dot(self.wo, self.concat_inputs[t]) + self.bo) self.concat_inputs[t]) + self.bi)
self.candidate_gates[t] = self.tanh(np.dot(self.wc,
self.concat_inputs[t]) + self.bc)
self.output_gates[t] = self.sigmoid(np.dot(self.wo,
self.concat_inputs[t]) + self.bo)
self.cell_states[t] = self.forget_gates[t] * self.cell_states[t - 1] + self.input_gates[t] * self.candidate_gates[t] self.cell_states[t] = self.forget_gates[t] * self.cell_states[t - 1] + \
self.hidden_states[t] = self.output_gates[t] * self.tanh(self.cell_states[t]) self.input_gates[t] * self.candidate_gates[t]
self.hidden_states[t] = self.output_gates[t] * \
self.tanh(self.cell_states[t])
outputs.append(np.dot(self.wy, self.hidden_states[t]) + self.by) outputs.append(np.dot(self.wy, self.hidden_states[t]) + self.by)
@ -198,7 +215,8 @@ class LSTM:
d_wo, d_bo = 0, 0 d_wo, d_bo = 0, 0
d_wy, d_by = 0, 0 d_wy, d_by = 0, 0
dh_next, dc_next = np.zeros_like(self.hidden_states[0]), np.zeros_like(self.cell_states[0]) dh_next, dc_next = np.zeros_like(self.hidden_states[0]), \
np.zeros_like(self.cell_states[0])
for t in reversed(range(len(inputs))): for t in reversed(range(len(inputs))):
error = errors[t] error = errors[t]
@ -210,110 +228,96 @@ class LSTM:
d_hs = np.dot(self.wy.T, error) + dh_next d_hs = np.dot(self.wy.T, error) + dh_next
# Output Gate Weights and Biases Errors # Output Gate Weights and Biases Errors
d_o = self.tanh(self.cell_states[t]) * d_hs * self.sigmoid(self.output_gates[t], derivative=True) d_o = self.tanh(self.cell_states[t]) * d_hs * \
self.sigmoid(self.output_gates[t], derivative=True)
d_wo += np.dot(d_o, inputs[t].T) d_wo += np.dot(d_o, inputs[t].T)
d_bo += d_o d_bo += d_o
# Cell State Error # Cell State Error
d_cs = self.tanh(self.tanh(self.cell_states[t]), derivative=True) * self.output_gates[t] * d_hs + dc_next d_cs = self.tanh(self.tanh(self.cell_states[t]),
derivative=True) * self.output_gates[t] * d_hs + dc_next
# Forget Gate Weights and Biases Errors # Forget Gate Weights and Biases Errors
d_f = d_cs * self.cell_states[t - 1] * self.sigmoid(self.forget_gates[t], derivative=True) d_f = d_cs * self.cell_states[t - 1] * \
self.sigmoid(self.forget_gates[t], derivative=True)
d_wf += np.dot(d_f, inputs[t].T) d_wf += np.dot(d_f, inputs[t].T)
d_bf += d_f d_bf += d_f
# Input Gate Weights and Biases Errors # Input Gate Weights and Biases Errors
d_i = d_cs * self.candidate_gates[t] * self.sigmoid(self.input_gates[t], derivative=True) d_i = d_cs * self.candidate_gates[t] * \
self.sigmoid(self.input_gates[t], derivative=True)
d_wi += np.dot(d_i, inputs[t].T) d_wi += np.dot(d_i, inputs[t].T)
d_bi += d_i d_bi += d_i
# Candidate Gate Weights and Biases Errors # Candidate Gate Weights and Biases Errors
d_c = d_cs * self.input_gates[t] * self.tanh(self.candidate_gates[t], derivative=True) d_c = d_cs * self.input_gates[t] * self.tanh(self.candidate_gates[t],
derivative=True)
d_wc += np.dot(d_c, inputs[t].T) d_wc += np.dot(d_c, inputs[t].T)
d_bc += d_c d_bc += d_c
# Concatenated Input Error (Sum of Error at Each Gate!) # Update the next hidden and cell state errors
d_z = np.dot(self.wf.T, d_f) + np.dot(self.wi.T, d_i) + np.dot(self.wc.T, d_c) + np.dot(self.wo.T, d_o) dh_next = np.dot(self.wf.T, d_f) + np.dot(self.wi.T, d_i) + \
np.dot(self.wo.T, d_o) + np.dot(self.wc.T, d_c)
dc_next = d_cs * self.forget_gates[t]
# Error of Hidden State and Cell State at Next Time Step # Apply gradients to weights and biases
dh_next = d_z[:self.hidden_dim, :] for param, grad in zip([self.wf, self.wi, self.wc, self.wo, self.wy],
dc_next = self.forget_gates[t] * d_cs [d_wf, d_wi, d_wc, d_wo, d_wy]):
param -= self.lr * grad
for d_ in (d_wf, d_bf, d_wi, d_bi, d_wc, d_bc, d_wo, d_bo, d_wy, d_by): for param, grad in zip([self.bf, self.bi, self.bc, self.bo, self.by],
np.clip(d_, -1, 1, out=d_) [d_bf, d_bi, d_bc, d_bo, d_by]):
param -= self.lr * grad
self.wf += d_wf * self.lr
self.bf += d_bf * self.lr
self.wi += d_wi * self.lr
self.bi += d_bi * self.lr
self.wc += d_wc * self.lr
self.bc += d_bc * self.lr
self.wo += d_wo * self.lr
self.bo += d_bo * self.lr
self.wy += d_wy * self.lr
self.by += d_by * self.lr
def train(self) -> None: def train(self) -> None:
""" """
Train the LSTM network on the input data. Train the LSTM network on the input data for a specified number of epochs.
""" """
for epoch in tqdm(range(self.epochs)):
inputs = [self.one_hot_encode(char) for char in self.train_X] inputs = [self.one_hot_encode(char) for char in self.train_X]
targets = [self.one_hot_encode(char) for char in self.train_y]
for _ in tqdm(range(self.epochs)): # Forward pass
predictions = self.forward(inputs) outputs = self.forward(inputs)
errors = [] # Compute error at each time step
for t in range(len(predictions)): errors = [output - target for output, target in zip(outputs, targets)]
errors.append(-self.softmax(predictions[t]))
errors[-1][self.char_to_idx[self.train_y[t]]] += 1
self.backward(errors, self.concat_inputs) # Backward pass and weight updates
self.backward(errors, inputs)
def predict(self, inputs: list) -> str:
"""
Predict the next character in the sequence.
:param inputs: The input data as a list of one-hot encoded vectors.
:return: The predicted character.
"""
output = self.forward(inputs)[-1]
return self.idx_to_char[np.argmax(self.softmax(output))]
def test(self) -> None: def test(self) -> None:
""" """
Test the trained LSTM network on the input data and print the accuracy. Test the LSTM network on the input data and compute accuracy.
""" """
accuracy = 0 inputs = [self.one_hot_encode(char) for char in self.train_X]
probabilities = self.forward([self.one_hot_encode(char) for char in self.train_X]) correct_predictions = sum(self.idx_to_char[np.argmax(self.softmax(output))] == target
for output, target in zip(self.forward(inputs), self.train_y))
output = '' accuracy = (correct_predictions / len(self.train_y)) * 100
for t in range(len(self.train_y)): print(f'Accuracy: {accuracy:.2f}%')
prediction = self.idx_to_char[np.random.choice(range(self.char_size), p=self.softmax(probabilities[t].reshape(-1)))]
output += prediction
if prediction == self.train_y[t]:
accuracy += 1
print(f'Ground Truth:\n{self.train_y}\n')
print(f'Predictions:\n{output}\n')
print(f'Accuracy: {round(accuracy * 100 / len(self.train_X), 2)}%')
##### Data #####
data = """Long Short-Term Memory (LSTM) networks are a type of recurrent neural network (RNN) capable of learning order dependence in sequence prediction problems. This behavior is required in complex problem domains like machine translation, speech recognition, and more. LSTMs are well-suited to classifying, processing, and making predictions based on time series data, since there can be lags of unknown duration between important events in a time series. LSTMs were introduced by Hochreiter and Schmidhuber in 1997, and were refined and popularized by many people in following work. They work by maintaining a cell state that is updated by gates: the forget gate, the input gate, and the output gate. These gates control the flow of information, allowing the network to remember or forget information as needed."""
# Initialize Network
# lstm = LSTM(data=data, hidden_dim=25, epochs=1000, lr=0.05)
##### Training #####
# lstm.train()
##### Testing #####
# lstm.test()
if __name__ == "__main__": if __name__ == "__main__":
# Initialize Network # Define the input data and hyperparameters
# lstm = LSTM(data=data, hidden_dim=25, epochs=1000, lr=0.05) data = "LSTM Neural Networks are designed to handle sequences of data."
hidden_dim = 50
epochs = 1000
lr = 0.01
##### Training ##### # Initialize and train the LSTM network
# lstm.train() lstm = LSTM(data, hidden_dim, epochs, lr)
lstm.train()
##### Testing ##### # Test the LSTM network and compute accuracy
# lstm.test() lstm.test()
# testing can be done by uncommenting the above lines of code.