From f2246ce7fd539d94fd9299bd2fe42469dafab03f Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 28 Mar 2024 21:03:23 +0300 Subject: [PATCH] Enable ruff ICN001 rule (#11329) * Enable ruff ICN001 rule * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- ciphers/hill_cipher.py | 38 ++++----- fractals/julia_sets.py | 54 ++++++------ fractals/koch_snowflake.py | 34 ++++---- graphics/bezier_curve.py | 2 +- machine_learning/gradient_descent.py | 4 +- neural_network/input_data.py | 32 +++---- .../two_hidden_layers_neural_network.py | 84 +++++++++---------- pyproject.toml | 1 - 8 files changed, 121 insertions(+), 128 deletions(-) diff --git a/ciphers/hill_cipher.py b/ciphers/hill_cipher.py index ea337a72d..33b2529f0 100644 --- a/ciphers/hill_cipher.py +++ b/ciphers/hill_cipher.py @@ -38,7 +38,7 @@ https://www.youtube.com/watch?v=4RhLNDqcjpA import string -import numpy +import numpy as np from maths.greatest_common_divisor import greatest_common_divisor @@ -49,11 +49,11 @@ class HillCipher: # i.e. a total of 36 characters # take x and return x % len(key_string) - modulus = numpy.vectorize(lambda x: x % 36) + modulus = np.vectorize(lambda x: x % 36) - to_int = numpy.vectorize(round) + to_int = np.vectorize(round) - def __init__(self, encrypt_key: numpy.ndarray) -> None: + def __init__(self, encrypt_key: np.ndarray) -> None: """ encrypt_key is an NxN numpy array """ @@ -63,7 +63,7 @@ class HillCipher: def replace_letters(self, letter: str) -> int: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.replace_letters('T') 19 >>> hill_cipher.replace_letters('0') @@ -73,7 +73,7 @@ class HillCipher: def replace_digits(self, num: int) -> str: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.replace_digits(19) 'T' >>> hill_cipher.replace_digits(26) @@ -83,10 +83,10 @@ class HillCipher: def check_determinant(self) -> None: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.check_determinant() """ - det = round(numpy.linalg.det(self.encrypt_key)) + det = round(np.linalg.det(self.encrypt_key)) if det < 0: det = det % len(self.key_string) @@ -101,7 +101,7 @@ class HillCipher: def process_text(self, text: str) -> str: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.process_text('Testing Hill Cipher') 'TESTINGHILLCIPHERR' >>> hill_cipher.process_text('hello') @@ -117,7 +117,7 @@ class HillCipher: def encrypt(self, text: str) -> str: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.encrypt('testing hill cipher') 'WHXYJOLM9C6XT085LL' >>> hill_cipher.encrypt('hello') @@ -129,7 +129,7 @@ class HillCipher: for i in range(0, len(text) - self.break_key + 1, self.break_key): batch = text[i : i + self.break_key] vec = [self.replace_letters(char) for char in batch] - batch_vec = numpy.array([vec]).T + batch_vec = np.array([vec]).T batch_encrypted = self.modulus(self.encrypt_key.dot(batch_vec)).T.tolist()[ 0 ] @@ -140,14 +140,14 @@ class HillCipher: return encrypted - def make_decrypt_key(self) -> numpy.ndarray: + def make_decrypt_key(self) -> np.ndarray: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.make_decrypt_key() array([[ 6, 25], [ 5, 26]]) """ - det = round(numpy.linalg.det(self.encrypt_key)) + det = round(np.linalg.det(self.encrypt_key)) if det < 0: det = det % len(self.key_string) @@ -158,16 +158,14 @@ class HillCipher: break inv_key = ( - det_inv - * numpy.linalg.det(self.encrypt_key) - * numpy.linalg.inv(self.encrypt_key) + det_inv * np.linalg.det(self.encrypt_key) * np.linalg.inv(self.encrypt_key) ) return self.to_int(self.modulus(inv_key)) def decrypt(self, text: str) -> str: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.decrypt('WHXYJOLM9C6XT085LL') 'TESTINGHILLCIPHERR' >>> hill_cipher.decrypt('85FF00') @@ -180,7 +178,7 @@ class HillCipher: for i in range(0, len(text) - self.break_key + 1, self.break_key): batch = text[i : i + self.break_key] vec = [self.replace_letters(char) for char in batch] - batch_vec = numpy.array([vec]).T + batch_vec = np.array([vec]).T batch_decrypted = self.modulus(decrypt_key.dot(batch_vec)).T.tolist()[0] decrypted_batch = "".join( self.replace_digits(num) for num in batch_decrypted @@ -199,7 +197,7 @@ def main() -> None: row = [int(x) for x in input().split()] hill_matrix.append(row) - hc = HillCipher(numpy.array(hill_matrix)) + hc = HillCipher(np.array(hill_matrix)) print("Would you like to encrypt or decrypt some text? (1 or 2)") option = input("\n1. Encrypt\n2. Decrypt\n") diff --git a/fractals/julia_sets.py b/fractals/julia_sets.py index 482e1eddf..1eef4573b 100644 --- a/fractals/julia_sets.py +++ b/fractals/julia_sets.py @@ -25,8 +25,8 @@ import warnings from collections.abc import Callable from typing import Any -import numpy -from matplotlib import pyplot +import matplotlib.pyplot as plt +import numpy as np c_cauliflower = 0.25 + 0.0j c_polynomial_1 = -0.4 + 0.6j @@ -37,22 +37,20 @@ window_size = 2.0 nb_pixels = 666 -def eval_exponential(c_parameter: complex, z_values: numpy.ndarray) -> numpy.ndarray: +def eval_exponential(c_parameter: complex, z_values: np.ndarray) -> np.ndarray: """ Evaluate $e^z + c$. >>> eval_exponential(0, 0) 1.0 - >>> abs(eval_exponential(1, numpy.pi*1.j)) < 1e-15 + >>> abs(eval_exponential(1, np.pi*1.j)) < 1e-15 True >>> abs(eval_exponential(1.j, 0)-1-1.j) < 1e-15 True """ - return numpy.exp(z_values) + c_parameter + return np.exp(z_values) + c_parameter -def eval_quadratic_polynomial( - c_parameter: complex, z_values: numpy.ndarray -) -> numpy.ndarray: +def eval_quadratic_polynomial(c_parameter: complex, z_values: np.ndarray) -> np.ndarray: """ >>> eval_quadratic_polynomial(0, 2) 4 @@ -66,7 +64,7 @@ def eval_quadratic_polynomial( return z_values * z_values + c_parameter -def prepare_grid(window_size: float, nb_pixels: int) -> numpy.ndarray: +def prepare_grid(window_size: float, nb_pixels: int) -> np.ndarray: """ Create a grid of complex values of size nb_pixels*nb_pixels with real and imaginary parts ranging from -window_size to window_size (inclusive). @@ -77,20 +75,20 @@ def prepare_grid(window_size: float, nb_pixels: int) -> numpy.ndarray: [ 0.-1.j, 0.+0.j, 0.+1.j], [ 1.-1.j, 1.+0.j, 1.+1.j]]) """ - x = numpy.linspace(-window_size, window_size, nb_pixels) + x = np.linspace(-window_size, window_size, nb_pixels) x = x.reshape((nb_pixels, 1)) - y = numpy.linspace(-window_size, window_size, nb_pixels) + y = np.linspace(-window_size, window_size, nb_pixels) y = y.reshape((1, nb_pixels)) return x + 1.0j * y def iterate_function( - eval_function: Callable[[Any, numpy.ndarray], numpy.ndarray], + eval_function: Callable[[Any, np.ndarray], np.ndarray], function_params: Any, nb_iterations: int, - z_0: numpy.ndarray, + z_0: np.ndarray, infinity: float | None = None, -) -> numpy.ndarray: +) -> np.ndarray: """ Iterate the function "eval_function" exactly nb_iterations times. The first argument of the function is a parameter which is contained in @@ -98,22 +96,22 @@ def iterate_function( values to iterate from. This function returns the final iterates. - >>> iterate_function(eval_quadratic_polynomial, 0, 3, numpy.array([0,1,2])).shape + >>> iterate_function(eval_quadratic_polynomial, 0, 3, np.array([0,1,2])).shape (3,) - >>> numpy.round(iterate_function(eval_quadratic_polynomial, + >>> np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... numpy.array([0,1,2]))[0]) + ... np.array([0,1,2]))[0]) 0j - >>> numpy.round(iterate_function(eval_quadratic_polynomial, + >>> np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... numpy.array([0,1,2]))[1]) + ... np.array([0,1,2]))[1]) (1+0j) - >>> numpy.round(iterate_function(eval_quadratic_polynomial, + >>> np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... numpy.array([0,1,2]))[2]) + ... np.array([0,1,2]))[2]) (256+0j) """ @@ -121,8 +119,8 @@ def iterate_function( for _ in range(nb_iterations): z_n = eval_function(function_params, z_n) if infinity is not None: - numpy.nan_to_num(z_n, copy=False, nan=infinity) - z_n[abs(z_n) == numpy.inf] = infinity + np.nan_to_num(z_n, copy=False, nan=infinity) + z_n[abs(z_n) == np.inf] = infinity return z_n @@ -130,21 +128,21 @@ def show_results( function_label: str, function_params: Any, escape_radius: float, - z_final: numpy.ndarray, + z_final: np.ndarray, ) -> None: """ Plots of whether the absolute value of z_final is greater than the value of escape_radius. Adds the function_label and function_params to the title. - >>> show_results('80', 0, 1, numpy.array([[0,1,.5],[.4,2,1.1],[.2,1,1.3]])) + >>> show_results('80', 0, 1, np.array([[0,1,.5],[.4,2,1.1],[.2,1,1.3]])) """ abs_z_final = (abs(z_final)).transpose() abs_z_final[:, :] = abs_z_final[::-1, :] - pyplot.matshow(abs_z_final < escape_radius) - pyplot.title(f"Julia set of ${function_label}$, $c={function_params}$") - pyplot.show() + plt.matshow(abs_z_final < escape_radius) + plt.title(f"Julia set of ${function_label}$, $c={function_params}$") + plt.show() def ignore_overflow_warnings() -> None: diff --git a/fractals/koch_snowflake.py b/fractals/koch_snowflake.py index 30cd4b39c..724b78f41 100644 --- a/fractals/koch_snowflake.py +++ b/fractals/koch_snowflake.py @@ -22,25 +22,25 @@ Requirements (pip): from __future__ import annotations -import matplotlib.pyplot as plt # type: ignore -import numpy +import matplotlib.pyplot as plt +import numpy as np # initial triangle of Koch snowflake -VECTOR_1 = numpy.array([0, 0]) -VECTOR_2 = numpy.array([0.5, 0.8660254]) -VECTOR_3 = numpy.array([1, 0]) +VECTOR_1 = np.array([0, 0]) +VECTOR_2 = np.array([0.5, 0.8660254]) +VECTOR_3 = np.array([1, 0]) INITIAL_VECTORS = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] # uncomment for simple Koch curve instead of Koch snowflake # INITIAL_VECTORS = [VECTOR_1, VECTOR_3] -def iterate(initial_vectors: list[numpy.ndarray], steps: int) -> list[numpy.ndarray]: +def iterate(initial_vectors: list[np.ndarray], steps: int) -> list[np.ndarray]: """ Go through the number of iterations determined by the argument "steps". Be careful with high values (above 5) since the time to calculate increases exponentially. - >>> iterate([numpy.array([0, 0]), numpy.array([1, 0])], 1) + >>> iterate([np.array([0, 0]), np.array([1, 0])], 1) [array([0, 0]), array([0.33333333, 0. ]), array([0.5 , \ 0.28867513]), array([0.66666667, 0. ]), array([1, 0])] """ @@ -50,13 +50,13 @@ def iterate(initial_vectors: list[numpy.ndarray], steps: int) -> list[numpy.ndar return vectors -def iteration_step(vectors: list[numpy.ndarray]) -> list[numpy.ndarray]: +def iteration_step(vectors: list[np.ndarray]) -> list[np.ndarray]: """ Loops through each pair of adjacent vectors. Each line between two adjacent vectors is divided into 4 segments by adding 3 additional vectors in-between the original two vectors. The vector in the middle is constructed through a 60 degree rotation so it is bent outwards. - >>> iteration_step([numpy.array([0, 0]), numpy.array([1, 0])]) + >>> iteration_step([np.array([0, 0]), np.array([1, 0])]) [array([0, 0]), array([0.33333333, 0. ]), array([0.5 , \ 0.28867513]), array([0.66666667, 0. ]), array([1, 0])] """ @@ -74,22 +74,22 @@ def iteration_step(vectors: list[numpy.ndarray]) -> list[numpy.ndarray]: return new_vectors -def rotate(vector: numpy.ndarray, angle_in_degrees: float) -> numpy.ndarray: +def rotate(vector: np.ndarray, angle_in_degrees: float) -> np.ndarray: """ Standard rotation of a 2D vector with a rotation matrix (see https://en.wikipedia.org/wiki/Rotation_matrix ) - >>> rotate(numpy.array([1, 0]), 60) + >>> rotate(np.array([1, 0]), 60) array([0.5 , 0.8660254]) - >>> rotate(numpy.array([1, 0]), 90) + >>> rotate(np.array([1, 0]), 90) array([6.123234e-17, 1.000000e+00]) """ - theta = numpy.radians(angle_in_degrees) - c, s = numpy.cos(theta), numpy.sin(theta) - rotation_matrix = numpy.array(((c, -s), (s, c))) - return numpy.dot(rotation_matrix, vector) + theta = np.radians(angle_in_degrees) + c, s = np.cos(theta), np.sin(theta) + rotation_matrix = np.array(((c, -s), (s, c))) + return np.dot(rotation_matrix, vector) -def plot(vectors: list[numpy.ndarray]) -> None: +def plot(vectors: list[np.ndarray]) -> None: """ Utility function to plot the vectors using matplotlib.pyplot No doctest was implemented since this function does not have a return value diff --git a/graphics/bezier_curve.py b/graphics/bezier_curve.py index 7c22329ad..6eeb89da6 100644 --- a/graphics/bezier_curve.py +++ b/graphics/bezier_curve.py @@ -78,7 +78,7 @@ class BezierCurve: step_size: defines the step(s) at which to evaluate the Bezier curve. The smaller the step size, the finer the curve produced. """ - from matplotlib import pyplot as plt # type: ignore + from matplotlib import pyplot as plt to_plot_x: list[float] = [] # x coordinates of points to plot to_plot_y: list[float] = [] # y coordinates of points to plot diff --git a/machine_learning/gradient_descent.py b/machine_learning/gradient_descent.py index db38b3c95..95463faf5 100644 --- a/machine_learning/gradient_descent.py +++ b/machine_learning/gradient_descent.py @@ -3,7 +3,7 @@ Implementation of gradient descent algorithm for minimizing cost of a linear hyp function. """ -import numpy +import numpy as np # List of input, output pairs train_data = ( @@ -116,7 +116,7 @@ def run_gradient_descent(): temp_parameter_vector[i] = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) - if numpy.allclose( + if np.allclose( parameter_vector, temp_parameter_vector, atol=absolute_error_limit, diff --git a/neural_network/input_data.py b/neural_network/input_data.py index f7ae86b48..9d4195487 100644 --- a/neural_network/input_data.py +++ b/neural_network/input_data.py @@ -22,7 +22,7 @@ import os import typing import urllib -import numpy +import numpy as np from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated @@ -39,8 +39,8 @@ DEFAULT_SOURCE_URL = "https://storage.googleapis.com/cvdf-datasets/mnist/" def _read32(bytestream): - dt = numpy.dtype(numpy.uint32).newbyteorder(">") - return numpy.frombuffer(bytestream.read(4), dtype=dt)[0] + dt = np.dtype(np.uint32).newbyteorder(">") + return np.frombuffer(bytestream.read(4), dtype=dt)[0] @deprecated(None, "Please use tf.data to implement this functionality.") @@ -68,7 +68,7 @@ def _extract_images(f): rows = _read32(bytestream) cols = _read32(bytestream) buf = bytestream.read(rows * cols * num_images) - data = numpy.frombuffer(buf, dtype=numpy.uint8) + data = np.frombuffer(buf, dtype=np.uint8) data = data.reshape(num_images, rows, cols, 1) return data @@ -77,8 +77,8 @@ def _extract_images(f): def _dense_to_one_hot(labels_dense, num_classes): """Convert class labels from scalars to one-hot vectors.""" num_labels = labels_dense.shape[0] - index_offset = numpy.arange(num_labels) * num_classes - labels_one_hot = numpy.zeros((num_labels, num_classes)) + index_offset = np.arange(num_labels) * num_classes + labels_one_hot = np.zeros((num_labels, num_classes)) labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1 return labels_one_hot @@ -107,7 +107,7 @@ def _extract_labels(f, one_hot=False, num_classes=10): ) num_items = _read32(bytestream) buf = bytestream.read(num_items) - labels = numpy.frombuffer(buf, dtype=numpy.uint8) + labels = np.frombuffer(buf, dtype=np.uint8) if one_hot: return _dense_to_one_hot(labels, num_classes) return labels @@ -153,7 +153,7 @@ class _DataSet: """ seed1, seed2 = random_seed.get_seed(seed) # If op level seed is not set, use whatever graph level seed is returned - numpy.random.seed(seed1 if seed is None else seed2) + np.random.seed(seed1 if seed is None else seed2) dtype = dtypes.as_dtype(dtype).base_dtype if dtype not in (dtypes.uint8, dtypes.float32): raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype) @@ -175,8 +175,8 @@ class _DataSet: ) if dtype == dtypes.float32: # Convert from [0, 255] -> [0.0, 1.0]. - images = images.astype(numpy.float32) - images = numpy.multiply(images, 1.0 / 255.0) + images = images.astype(np.float32) + images = np.multiply(images, 1.0 / 255.0) self._images = images self._labels = labels self._epochs_completed = 0 @@ -210,8 +210,8 @@ class _DataSet: start = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: - perm0 = numpy.arange(self._num_examples) - numpy.random.shuffle(perm0) + perm0 = np.arange(self._num_examples) + np.random.shuffle(perm0) self._images = self.images[perm0] self._labels = self.labels[perm0] # Go to the next epoch @@ -224,8 +224,8 @@ class _DataSet: labels_rest_part = self._labels[start : self._num_examples] # Shuffle the data if shuffle: - perm = numpy.arange(self._num_examples) - numpy.random.shuffle(perm) + perm = np.arange(self._num_examples) + np.random.shuffle(perm) self._images = self.images[perm] self._labels = self.labels[perm] # Start next epoch @@ -235,8 +235,8 @@ class _DataSet: images_new_part = self._images[start:end] labels_new_part = self._labels[start:end] return ( - numpy.concatenate((images_rest_part, images_new_part), axis=0), - numpy.concatenate((labels_rest_part, labels_new_part), axis=0), + np.concatenate((images_rest_part, images_new_part), axis=0), + np.concatenate((labels_rest_part, labels_new_part), axis=0), ) else: self._index_in_epoch += batch_size diff --git a/neural_network/two_hidden_layers_neural_network.py b/neural_network/two_hidden_layers_neural_network.py index 7b374a93d..dea7e2342 100644 --- a/neural_network/two_hidden_layers_neural_network.py +++ b/neural_network/two_hidden_layers_neural_network.py @@ -5,11 +5,11 @@ References: - https://en.wikipedia.org/wiki/Feedforward_neural_network (Feedforward) """ -import numpy +import numpy as np class TwoHiddenLayerNeuralNetwork: - def __init__(self, input_array: numpy.ndarray, output_array: numpy.ndarray) -> None: + def __init__(self, input_array: np.ndarray, output_array: np.ndarray) -> None: """ This function initializes the TwoHiddenLayerNeuralNetwork class with random weights for every layer and initializes predicted output with zeroes. @@ -28,30 +28,28 @@ class TwoHiddenLayerNeuralNetwork: # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. - self.input_layer_and_first_hidden_layer_weights = numpy.random.rand( + self.input_layer_and_first_hidden_layer_weights = np.random.rand( self.input_array.shape[1], 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. - self.first_hidden_layer_and_second_hidden_layer_weights = numpy.random.rand( - 4, 3 - ) + self.first_hidden_layer_and_second_hidden_layer_weights = np.random.rand(4, 3) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. - self.second_hidden_layer_and_output_layer_weights = numpy.random.rand(3, 1) + self.second_hidden_layer_and_output_layer_weights = np.random.rand(3, 1) # Real output values provided. self.output_array = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. - self.predicted_output = numpy.zeros(output_array.shape) + self.predicted_output = np.zeros(output_array.shape) - def feedforward(self) -> numpy.ndarray: + def feedforward(self) -> np.ndarray: """ The information moves in only one direction i.e. forward from the input nodes, through the two hidden nodes and to the output nodes. @@ -60,24 +58,24 @@ class TwoHiddenLayerNeuralNetwork: Return layer_between_second_hidden_layer_and_output (i.e the last layer of the neural network). - >>> input_val = numpy.array(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float) - >>> output_val = numpy.array(([0], [0], [0]), dtype=float) + >>> input_val = np.array(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float) + >>> output_val = np.array(([0], [0], [0]), dtype=float) >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) >>> res = nn.feedforward() - >>> array_sum = numpy.sum(res) - >>> numpy.isnan(array_sum) + >>> array_sum = np.sum(res) + >>> np.isnan(array_sum) False """ # Layer_between_input_and_first_hidden_layer is the layer connecting the # input nodes with the first hidden layer nodes. self.layer_between_input_and_first_hidden_layer = sigmoid( - numpy.dot(self.input_array, self.input_layer_and_first_hidden_layer_weights) + np.dot(self.input_array, self.input_layer_and_first_hidden_layer_weights) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. self.layer_between_first_hidden_layer_and_second_hidden_layer = sigmoid( - numpy.dot( + np.dot( self.layer_between_input_and_first_hidden_layer, self.first_hidden_layer_and_second_hidden_layer_weights, ) @@ -86,7 +84,7 @@ class TwoHiddenLayerNeuralNetwork: # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. self.layer_between_second_hidden_layer_and_output = sigmoid( - numpy.dot( + np.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer, self.second_hidden_layer_and_output_layer_weights, ) @@ -100,8 +98,8 @@ class TwoHiddenLayerNeuralNetwork: error rate obtained in the previous epoch (i.e., iteration). Updation is done using derivative of sogmoid activation function. - >>> input_val = numpy.array(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float) - >>> output_val = numpy.array(([0], [0], [0]), dtype=float) + >>> input_val = np.array(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float) + >>> output_val = np.array(([0], [0], [0]), dtype=float) >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) >>> res = nn.feedforward() >>> nn.back_propagation() @@ -110,15 +108,15 @@ class TwoHiddenLayerNeuralNetwork: False """ - updated_second_hidden_layer_and_output_layer_weights = numpy.dot( + updated_second_hidden_layer_and_output_layer_weights = np.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T, 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output), ) - updated_first_hidden_layer_and_second_hidden_layer_weights = numpy.dot( + updated_first_hidden_layer_and_second_hidden_layer_weights = np.dot( self.layer_between_input_and_first_hidden_layer.T, - numpy.dot( + np.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output), @@ -128,10 +126,10 @@ class TwoHiddenLayerNeuralNetwork: self.layer_between_first_hidden_layer_and_second_hidden_layer ), ) - updated_input_layer_and_first_hidden_layer_weights = numpy.dot( + updated_input_layer_and_first_hidden_layer_weights = np.dot( self.input_array.T, - numpy.dot( - numpy.dot( + np.dot( + np.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output), @@ -155,7 +153,7 @@ class TwoHiddenLayerNeuralNetwork: updated_second_hidden_layer_and_output_layer_weights ) - def train(self, output: numpy.ndarray, iterations: int, give_loss: bool) -> None: + def train(self, output: np.ndarray, iterations: int, give_loss: bool) -> None: """ Performs the feedforwarding and back propagation process for the given number of iterations. @@ -166,8 +164,8 @@ class TwoHiddenLayerNeuralNetwork: give_loss : boolean value, If True then prints loss for each iteration, If False then nothing is printed - >>> input_val = numpy.array(([0, 0, 0], [0, 1, 0], [0, 0, 1]), dtype=float) - >>> output_val = numpy.array(([0], [1], [1]), dtype=float) + >>> input_val = np.array(([0, 0, 0], [0, 1, 0], [0, 0, 1]), dtype=float) + >>> output_val = np.array(([0], [1], [1]), dtype=float) >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) >>> first_iteration_weights = nn.feedforward() >>> nn.back_propagation() @@ -179,10 +177,10 @@ class TwoHiddenLayerNeuralNetwork: self.output = self.feedforward() self.back_propagation() if give_loss: - loss = numpy.mean(numpy.square(output - self.feedforward())) + loss = np.mean(np.square(output - self.feedforward())) print(f"Iteration {iteration} Loss: {loss}") - def predict(self, input_arr: numpy.ndarray) -> int: + def predict(self, input_arr: np.ndarray) -> int: """ Predict's the output for the given input values using the trained neural network. @@ -192,8 +190,8 @@ class TwoHiddenLayerNeuralNetwork: than the threshold value else returns 0, as the real output values are in binary. - >>> input_val = numpy.array(([0, 0, 0], [0, 1, 0], [0, 0, 1]), dtype=float) - >>> output_val = numpy.array(([0], [1], [1]), dtype=float) + >>> input_val = np.array(([0, 0, 0], [0, 1, 0], [0, 0, 1]), dtype=float) + >>> output_val = np.array(([0], [1], [1]), dtype=float) >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) >>> nn.train(output_val, 1000, False) >>> nn.predict([0, 1, 0]) in (0, 1) @@ -204,18 +202,18 @@ class TwoHiddenLayerNeuralNetwork: self.array = input_arr self.layer_between_input_and_first_hidden_layer = sigmoid( - numpy.dot(self.array, self.input_layer_and_first_hidden_layer_weights) + np.dot(self.array, self.input_layer_and_first_hidden_layer_weights) ) self.layer_between_first_hidden_layer_and_second_hidden_layer = sigmoid( - numpy.dot( + np.dot( self.layer_between_input_and_first_hidden_layer, self.first_hidden_layer_and_second_hidden_layer_weights, ) ) self.layer_between_second_hidden_layer_and_output = sigmoid( - numpy.dot( + np.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer, self.second_hidden_layer_and_output_layer_weights, ) @@ -224,26 +222,26 @@ class TwoHiddenLayerNeuralNetwork: return int((self.layer_between_second_hidden_layer_and_output > 0.6)[0]) -def sigmoid(value: numpy.ndarray) -> numpy.ndarray: +def sigmoid(value: np.ndarray) -> np.ndarray: """ Applies sigmoid activation function. return normalized values - >>> sigmoid(numpy.array(([1, 0, 2], [1, 0, 0]), dtype=numpy.float64)) + >>> sigmoid(np.array(([1, 0, 2], [1, 0, 0]), dtype=np.float64)) array([[0.73105858, 0.5 , 0.88079708], [0.73105858, 0.5 , 0.5 ]]) """ - return 1 / (1 + numpy.exp(-value)) + return 1 / (1 + np.exp(-value)) -def sigmoid_derivative(value: numpy.ndarray) -> numpy.ndarray: +def sigmoid_derivative(value: np.ndarray) -> np.ndarray: """ Provides the derivative value of the sigmoid function. returns derivative of the sigmoid value - >>> sigmoid_derivative(numpy.array(([1, 0, 2], [1, 0, 0]), dtype=numpy.float64)) + >>> sigmoid_derivative(np.array(([1, 0, 2], [1, 0, 0]), dtype=np.float64)) array([[ 0., 0., -2.], [ 0., 0., 0.]]) """ @@ -264,7 +262,7 @@ def example() -> int: True """ # Input values. - test_input = numpy.array( + test_input = np.array( ( [0, 0, 0], [0, 0, 1], @@ -275,11 +273,11 @@ def example() -> int: [1, 1, 0], [1, 1, 1], ), - dtype=numpy.float64, + dtype=np.float64, ) # True output values for the given input values. - output = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]), dtype=numpy.float64) + output = np.array(([0], [1], [1], [0], [1], [0], [0], [1]), dtype=np.float64) # Calling neural network class. neural_network = TwoHiddenLayerNeuralNetwork( @@ -290,7 +288,7 @@ def example() -> int: # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=output, iterations=10, give_loss=False) - return neural_network.predict(numpy.array(([1, 1, 1]), dtype=numpy.float64)) + return neural_network.predict(np.array(([1, 1, 1]), dtype=np.float64)) if __name__ == "__main__": diff --git a/pyproject.toml b/pyproject.toml index b9f3115df..22da7cb77 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "EM101", # Exception must not use a string literal, assign to variable first "EXE001", # Shebang is present but file is not executable" -- FIX ME "G004", # Logging statement uses f-string - "ICN001", # `matplotlib.pyplot` should be imported as `plt` -- FIX ME "INP001", # File `x/y/z.py` is part of an implicit namespace package. Add an `__init__.py`. -- FIX ME "NPY002", # Replace legacy `np.random.choice` call with `np.random.Generator` -- FIX ME "PGH003", # Use specific rule codes when ignoring type issues -- FIX ME