From c93659d7ce65e3717f06333e3d049ebaa888e597 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Mon, 29 May 2023 17:37:54 -0700 Subject: [PATCH 01/43] Fix type error in `strassen_matrix_multiplication.py` (#8784) * Fix type error in strassen_matrix_multiplication.py * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + ...ion.py.BROKEN => strassen_matrix_multiplication.py} | 10 ++++++---- 2 files changed, 7 insertions(+), 4 deletions(-) rename divide_and_conquer/{strassen_matrix_multiplication.py.BROKEN => strassen_matrix_multiplication.py} (97%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 11ff93c91..231b0e2f1 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -294,6 +294,7 @@ * [Mergesort](divide_and_conquer/mergesort.py) * [Peak](divide_and_conquer/peak.py) * [Power](divide_and_conquer/power.py) + * [Strassen Matrix Multiplication](divide_and_conquer/strassen_matrix_multiplication.py) ## Dynamic Programming * [Abbreviation](dynamic_programming/abbreviation.py) diff --git a/divide_and_conquer/strassen_matrix_multiplication.py.BROKEN b/divide_and_conquer/strassen_matrix_multiplication.py similarity index 97% rename from divide_and_conquer/strassen_matrix_multiplication.py.BROKEN rename to divide_and_conquer/strassen_matrix_multiplication.py index 2ca91c63b..cbfc7e565 100644 --- a/divide_and_conquer/strassen_matrix_multiplication.py.BROKEN +++ b/divide_and_conquer/strassen_matrix_multiplication.py @@ -112,17 +112,19 @@ def strassen(matrix1: list, matrix2: list) -> list: [[139, 163], [121, 134], [100, 121]] """ if matrix_dimensions(matrix1)[1] != matrix_dimensions(matrix2)[0]: - raise Exception( - "Unable to multiply these matrices, please check the dimensions. \n" - f"Matrix A:{matrix1} \nMatrix B:{matrix2}" + msg = ( + "Unable to multiply these matrices, please check the dimensions.\n" + f"Matrix A: {matrix1}\n" + f"Matrix B: {matrix2}" ) + raise Exception(msg) dimension1 = matrix_dimensions(matrix1) dimension2 = matrix_dimensions(matrix2) if dimension1[0] == dimension1[1] and dimension2[0] == dimension2[1]: return [matrix1, matrix2] - maximum = max(dimension1, dimension2) + maximum = max(*dimension1, *dimension2) maxim = int(math.pow(2, math.ceil(math.log2(maximum)))) new_matrix1 = matrix1 new_matrix2 = matrix2 From 4a27b544303e6bab90ed57b72fa3acf3d785429e Mon Sep 17 00:00:00 2001 From: Sundaram Kumar Jha Date: Wed, 31 May 2023 06:26:59 +0530 Subject: [PATCH 02/43] Update permutations.py (#8102) --- data_structures/arrays/permutations.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/data_structures/arrays/permutations.py b/data_structures/arrays/permutations.py index eb3f26517..4558bd8d4 100644 --- a/data_structures/arrays/permutations.py +++ b/data_structures/arrays/permutations.py @@ -1,7 +1,6 @@ def permute(nums: list[int]) -> list[list[int]]: """ Return all permutations. - >>> from itertools import permutations >>> numbers= [1,2,3] >>> all(list(nums) in permute(numbers) for nums in permutations(numbers)) @@ -20,7 +19,32 @@ def permute(nums: list[int]) -> list[list[int]]: return result +def permute2(nums): + """ + Return all permutations of the given list. + + >>> permute2([1, 2, 3]) + [[1, 2, 3], [1, 3, 2], [2, 1, 3], [2, 3, 1], [3, 2, 1], [3, 1, 2]] + """ + + def backtrack(start): + if start == len(nums) - 1: + output.append(nums[:]) + else: + for i in range(start, len(nums)): + nums[start], nums[i] = nums[i], nums[start] + backtrack(start + 1) + nums[start], nums[i] = nums[i], nums[start] # backtrack + + output = [] + backtrack(0) + return output + + if __name__ == "__main__": import doctest + # use res to print the data in permute2 function + res = permute2([1, 2, 3]) + print(res) doctest.testmod() From e871540e37b834673f9e6650b8e2281d7d36a8c3 Mon Sep 17 00:00:00 2001 From: Rudransh Bhardwaj <115872354+rudransh61@users.noreply.github.com> Date: Wed, 31 May 2023 20:33:02 +0530 Subject: [PATCH 03/43] Added rank of matrix in linear algebra (#8687) * Added rank of matrix in linear algebra * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Corrected name of function * Corrected Rank_of_Matrix.py * Completed rank_of_matrix.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * delete to rename Rank_of_Matrix.py * created rank_of_matrix * added more doctests in rank_of_matrix.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed some issues in rank_of_matrix.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added moreeee doctestsss in rank_of_mtrix.py and fixed some bugss * Update linear_algebra/src/rank_of_matrix.py Co-authored-by: Christian Clauss * Update linear_algebra/src/rank_of_matrix.py Co-authored-by: Christian Clauss * Update linear_algebra/src/rank_of_matrix.py Co-authored-by: Christian Clauss * Update rank_of_matrix.py * Update linear_algebra/src/rank_of_matrix.py Co-authored-by: Caeden Perelli-Harris --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: Caeden Perelli-Harris --- linear_algebra/src/rank_of_matrix.py | 89 ++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 linear_algebra/src/rank_of_matrix.py diff --git a/linear_algebra/src/rank_of_matrix.py b/linear_algebra/src/rank_of_matrix.py new file mode 100644 index 000000000..7ff3c1699 --- /dev/null +++ b/linear_algebra/src/rank_of_matrix.py @@ -0,0 +1,89 @@ +""" +Calculate the rank of a matrix. + +See: https://en.wikipedia.org/wiki/Rank_(linear_algebra) +""" + + +def rank_of_matrix(matrix: list[list[int | float]]) -> int: + """ + Finds the rank of a matrix. + Args: + matrix: The matrix as a list of lists. + Returns: + The rank of the matrix. + Example: + >>> matrix1 = [[1, 2, 3], + ... [4, 5, 6], + ... [7, 8, 9]] + >>> rank_of_matrix(matrix1) + 2 + >>> matrix2 = [[1, 0, 0], + ... [0, 1, 0], + ... [0, 0, 0]] + >>> rank_of_matrix(matrix2) + 2 + >>> matrix3 = [[1, 2, 3, 4], + ... [5, 6, 7, 8], + ... [9, 10, 11, 12]] + >>> rank_of_matrix(matrix3) + 2 + >>> rank_of_matrix([[2,3,-1,-1], + ... [1,-1,-2,4], + ... [3,1,3,-2], + ... [6,3,0,-7]]) + 4 + >>> rank_of_matrix([[2,1,-3,-6], + ... [3,-3,1,2], + ... [1,1,1,2]]) + 3 + >>> rank_of_matrix([[2,-1,0], + ... [1,3,4], + ... [4,1,-3]]) + 3 + >>> rank_of_matrix([[3,2,1], + ... [-6,-4,-2]]) + 1 + >>> rank_of_matrix([[],[]]) + 0 + >>> rank_of_matrix([[1]]) + 1 + >>> rank_of_matrix([[]]) + 0 + """ + + rows = len(matrix) + columns = len(matrix[0]) + rank = min(rows, columns) + + for row in range(rank): + # Check if diagonal element is not zero + if matrix[row][row] != 0: + # Eliminate all the elements below the diagonal + for col in range(row + 1, rows): + multiplier = matrix[col][row] / matrix[row][row] + for i in range(row, columns): + matrix[col][i] -= multiplier * matrix[row][i] + else: + # Find a non-zero diagonal element to swap rows + reduce = True + for i in range(row + 1, rows): + if matrix[i][row] != 0: + matrix[row], matrix[i] = matrix[i], matrix[row] + reduce = False + break + if reduce: + rank -= 1 + for i in range(rows): + matrix[i][row] = matrix[i][rank] + + # Reduce the row pointer by one to stay on the same row + row -= 1 + + return rank + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 4621b0bb4f5d3fff2fa4f0e53d6cb862fe002c60 Mon Sep 17 00:00:00 2001 From: nith2001 <75632283+nith2001@users.noreply.github.com> Date: Wed, 31 May 2023 13:06:12 -0700 Subject: [PATCH 04/43] Improved Graph Implementations (#8730) * Improved Graph Implementations Provides new implementation for graph_list.py and graph_matrix.py along with pytest suites for each. Fixes #8709 * Graph implementation style fixes, corrections, and refactored tests * Helpful docs about graph implementation * Refactored code to separate files and applied enumerate() * Renamed files and refactored code to fail fast * Error handling style fix * Fixed f-string code quality issue * Last f-string fix * Added return types to test functions and more style fixes * Added more function return types * Added more function return types pt2 * Fixed error messages --- graphs/graph_adjacency_list.py | 589 ++++++++++++++++++++++++++++++ graphs/graph_adjacency_matrix.py | 608 +++++++++++++++++++++++++++++++ graphs/graph_matrix.py | 24 -- graphs/tests/__init__.py | 0 4 files changed, 1197 insertions(+), 24 deletions(-) create mode 100644 graphs/graph_adjacency_list.py create mode 100644 graphs/graph_adjacency_matrix.py delete mode 100644 graphs/graph_matrix.py create mode 100644 graphs/tests/__init__.py diff --git a/graphs/graph_adjacency_list.py b/graphs/graph_adjacency_list.py new file mode 100644 index 000000000..76f34f845 --- /dev/null +++ b/graphs/graph_adjacency_list.py @@ -0,0 +1,589 @@ +#!/usr/bin/env python3 +""" +Author: Vikram Nithyanandam + +Description: +The following implementation is a robust unweighted Graph data structure +implemented using an adjacency list. This vertices and edges of this graph can be +effectively initialized and modified while storing your chosen generic +value in each vertex. + +Adjacency List: https://en.wikipedia.org/wiki/Adjacency_list + +Potential Future Ideas: +- Add a flag to set edge weights on and set edge weights +- Make edge weights and vertex values customizable to store whatever the client wants +- Support multigraph functionality if the client wants it +""" +from __future__ import annotations + +import random +import unittest +from pprint import pformat +from typing import Generic, TypeVar + +T = TypeVar("T") + + +class GraphAdjacencyList(Generic[T]): + def __init__( + self, vertices: list[T], edges: list[list[T]], directed: bool = True + ) -> None: + """ + Parameters: + - vertices: (list[T]) The list of vertex names the client wants to + pass in. Default is empty. + - edges: (list[list[T]]) The list of edges the client wants to + pass in. Each edge is a 2-element list. Default is empty. + - directed: (bool) Indicates if graph is directed or undirected. + Default is True. + """ + self.adj_list: dict[T, list[T]] = {} # dictionary of lists of T + self.directed = directed + + # Falsey checks + edges = edges or [] + vertices = vertices or [] + + for vertex in vertices: + self.add_vertex(vertex) + + for edge in edges: + if len(edge) != 2: + msg = f"Invalid input: {edge} is the wrong length." + raise ValueError(msg) + self.add_edge(edge[0], edge[1]) + + def add_vertex(self, vertex: T) -> None: + """ + Adds a vertex to the graph. If the given vertex already exists, + a ValueError will be thrown. + """ + if self.contains_vertex(vertex): + msg = f"Incorrect input: {vertex} is already in the graph." + raise ValueError(msg) + self.adj_list[vertex] = [] + + def add_edge(self, source_vertex: T, destination_vertex: T) -> None: + """ + Creates an edge from source vertex to destination vertex. If any + given vertex doesn't exist or the edge already exists, a ValueError + will be thrown. + """ + if not ( + self.contains_vertex(source_vertex) + and self.contains_vertex(destination_vertex) + ): + msg = ( + f"Incorrect input: Either {source_vertex} or " + f"{destination_vertex} does not exist" + ) + raise ValueError(msg) + if self.contains_edge(source_vertex, destination_vertex): + msg = ( + "Incorrect input: The edge already exists between " + f"{source_vertex} and {destination_vertex}" + ) + raise ValueError(msg) + + # add the destination vertex to the list associated with the source vertex + # and vice versa if not directed + self.adj_list[source_vertex].append(destination_vertex) + if not self.directed: + self.adj_list[destination_vertex].append(source_vertex) + + def remove_vertex(self, vertex: T) -> None: + """ + Removes the given vertex from the graph and deletes all incoming and + outgoing edges from the given vertex as well. If the given vertex + does not exist, a ValueError will be thrown. + """ + if not self.contains_vertex(vertex): + msg = f"Incorrect input: {vertex} does not exist in this graph." + raise ValueError(msg) + + if not self.directed: + # If not directed, find all neighboring vertices and delete all references + # of edges connecting to the given vertex + for neighbor in self.adj_list[vertex]: + self.adj_list[neighbor].remove(vertex) + else: + # If directed, search all neighbors of all vertices and delete all + # references of edges connecting to the given vertex + for edge_list in self.adj_list.values(): + if vertex in edge_list: + edge_list.remove(vertex) + + # Finally, delete the given vertex and all of its outgoing edge references + self.adj_list.pop(vertex) + + def remove_edge(self, source_vertex: T, destination_vertex: T) -> None: + """ + Removes the edge between the two vertices. If any given vertex + doesn't exist or the edge does not exist, a ValueError will be thrown. + """ + if not ( + self.contains_vertex(source_vertex) + and self.contains_vertex(destination_vertex) + ): + msg = ( + f"Incorrect input: Either {source_vertex} or " + f"{destination_vertex} does not exist" + ) + raise ValueError(msg) + if not self.contains_edge(source_vertex, destination_vertex): + msg = ( + "Incorrect input: The edge does NOT exist between " + f"{source_vertex} and {destination_vertex}" + ) + raise ValueError(msg) + + # remove the destination vertex from the list associated with the source + # vertex and vice versa if not directed + self.adj_list[source_vertex].remove(destination_vertex) + if not self.directed: + self.adj_list[destination_vertex].remove(source_vertex) + + def contains_vertex(self, vertex: T) -> bool: + """ + Returns True if the graph contains the vertex, False otherwise. + """ + return vertex in self.adj_list + + def contains_edge(self, source_vertex: T, destination_vertex: T) -> bool: + """ + Returns True if the graph contains the edge from the source_vertex to the + destination_vertex, False otherwise. If any given vertex doesn't exist, a + ValueError will be thrown. + """ + if not ( + self.contains_vertex(source_vertex) + and self.contains_vertex(destination_vertex) + ): + msg = ( + f"Incorrect input: Either {source_vertex} " + f"or {destination_vertex} does not exist." + ) + raise ValueError(msg) + + return destination_vertex in self.adj_list[source_vertex] + + def clear_graph(self) -> None: + """ + Clears all vertices and edges. + """ + self.adj_list = {} + + def __repr__(self) -> str: + return pformat(self.adj_list) + + +class TestGraphAdjacencyList(unittest.TestCase): + def __assert_graph_edge_exists_check( + self, + undirected_graph: GraphAdjacencyList, + directed_graph: GraphAdjacencyList, + edge: list[int], + ) -> None: + self.assertTrue(undirected_graph.contains_edge(edge[0], edge[1])) + self.assertTrue(undirected_graph.contains_edge(edge[1], edge[0])) + self.assertTrue(directed_graph.contains_edge(edge[0], edge[1])) + + def __assert_graph_edge_does_not_exist_check( + self, + undirected_graph: GraphAdjacencyList, + directed_graph: GraphAdjacencyList, + edge: list[int], + ) -> None: + self.assertFalse(undirected_graph.contains_edge(edge[0], edge[1])) + self.assertFalse(undirected_graph.contains_edge(edge[1], edge[0])) + self.assertFalse(directed_graph.contains_edge(edge[0], edge[1])) + + def __assert_graph_vertex_exists_check( + self, + undirected_graph: GraphAdjacencyList, + directed_graph: GraphAdjacencyList, + vertex: int, + ) -> None: + self.assertTrue(undirected_graph.contains_vertex(vertex)) + self.assertTrue(directed_graph.contains_vertex(vertex)) + + def __assert_graph_vertex_does_not_exist_check( + self, + undirected_graph: GraphAdjacencyList, + directed_graph: GraphAdjacencyList, + vertex: int, + ) -> None: + self.assertFalse(undirected_graph.contains_vertex(vertex)) + self.assertFalse(directed_graph.contains_vertex(vertex)) + + def __generate_random_edges( + self, vertices: list[int], edge_pick_count: int + ) -> list[list[int]]: + self.assertTrue(edge_pick_count <= len(vertices)) + + random_source_vertices: list[int] = random.sample( + vertices[0 : int(len(vertices) / 2)], edge_pick_count + ) + random_destination_vertices: list[int] = random.sample( + vertices[int(len(vertices) / 2) :], edge_pick_count + ) + random_edges: list[list[int]] = [] + + for source in random_source_vertices: + for dest in random_destination_vertices: + random_edges.append([source, dest]) + + return random_edges + + def __generate_graphs( + self, vertex_count: int, min_val: int, max_val: int, edge_pick_count: int + ) -> tuple[GraphAdjacencyList, GraphAdjacencyList, list[int], list[list[int]]]: + if max_val - min_val + 1 < vertex_count: + raise ValueError( + "Will result in duplicate vertices. Either increase range " + "between min_val and max_val or decrease vertex count." + ) + + # generate graph input + random_vertices: list[int] = random.sample( + range(min_val, max_val + 1), vertex_count + ) + random_edges: list[list[int]] = self.__generate_random_edges( + random_vertices, edge_pick_count + ) + + # build graphs + undirected_graph = GraphAdjacencyList( + vertices=random_vertices, edges=random_edges, directed=False + ) + directed_graph = GraphAdjacencyList( + vertices=random_vertices, edges=random_edges, directed=True + ) + + return undirected_graph, directed_graph, random_vertices, random_edges + + def test_init_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + # test graph initialization with vertices and edges + for num in random_vertices: + self.__assert_graph_vertex_exists_check( + undirected_graph, directed_graph, num + ) + + for edge in random_edges: + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, edge + ) + self.assertFalse(undirected_graph.directed) + self.assertTrue(directed_graph.directed) + + def test_contains_vertex(self) -> None: + random_vertices: list[int] = random.sample(range(101), 20) + + # Build graphs WITHOUT edges + undirected_graph = GraphAdjacencyList( + vertices=random_vertices, edges=[], directed=False + ) + directed_graph = GraphAdjacencyList( + vertices=random_vertices, edges=[], directed=True + ) + + # Test contains_vertex + for num in range(101): + self.assertEqual( + num in random_vertices, undirected_graph.contains_vertex(num) + ) + self.assertEqual( + num in random_vertices, directed_graph.contains_vertex(num) + ) + + def test_add_vertices(self) -> None: + random_vertices: list[int] = random.sample(range(101), 20) + + # build empty graphs + undirected_graph: GraphAdjacencyList = GraphAdjacencyList( + vertices=[], edges=[], directed=False + ) + directed_graph: GraphAdjacencyList = GraphAdjacencyList( + vertices=[], edges=[], directed=True + ) + + # run add_vertex + for num in random_vertices: + undirected_graph.add_vertex(num) + + for num in random_vertices: + directed_graph.add_vertex(num) + + # test add_vertex worked + for num in random_vertices: + self.__assert_graph_vertex_exists_check( + undirected_graph, directed_graph, num + ) + + def test_remove_vertices(self) -> None: + random_vertices: list[int] = random.sample(range(101), 20) + + # build graphs WITHOUT edges + undirected_graph = GraphAdjacencyList( + vertices=random_vertices, edges=[], directed=False + ) + directed_graph = GraphAdjacencyList( + vertices=random_vertices, edges=[], directed=True + ) + + # test remove_vertex worked + for num in random_vertices: + self.__assert_graph_vertex_exists_check( + undirected_graph, directed_graph, num + ) + + undirected_graph.remove_vertex(num) + directed_graph.remove_vertex(num) + + self.__assert_graph_vertex_does_not_exist_check( + undirected_graph, directed_graph, num + ) + + def test_add_and_remove_vertices_repeatedly(self) -> None: + random_vertices1: list[int] = random.sample(range(51), 20) + random_vertices2: list[int] = random.sample(range(51, 101), 20) + + # build graphs WITHOUT edges + undirected_graph = GraphAdjacencyList( + vertices=random_vertices1, edges=[], directed=False + ) + directed_graph = GraphAdjacencyList( + vertices=random_vertices1, edges=[], directed=True + ) + + # test adding and removing vertices + for i, _ in enumerate(random_vertices1): + undirected_graph.add_vertex(random_vertices2[i]) + directed_graph.add_vertex(random_vertices2[i]) + + self.__assert_graph_vertex_exists_check( + undirected_graph, directed_graph, random_vertices2[i] + ) + + undirected_graph.remove_vertex(random_vertices1[i]) + directed_graph.remove_vertex(random_vertices1[i]) + + self.__assert_graph_vertex_does_not_exist_check( + undirected_graph, directed_graph, random_vertices1[i] + ) + + # remove all vertices + for i, _ in enumerate(random_vertices1): + undirected_graph.remove_vertex(random_vertices2[i]) + directed_graph.remove_vertex(random_vertices2[i]) + + self.__assert_graph_vertex_does_not_exist_check( + undirected_graph, directed_graph, random_vertices2[i] + ) + + def test_contains_edge(self) -> None: + # generate graphs and graph input + vertex_count = 20 + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(vertex_count, 0, 100, 4) + + # generate all possible edges for testing + all_possible_edges: list[list[int]] = [] + for i in range(vertex_count - 1): + for j in range(i + 1, vertex_count): + all_possible_edges.append([random_vertices[i], random_vertices[j]]) + all_possible_edges.append([random_vertices[j], random_vertices[i]]) + + # test contains_edge function + for edge in all_possible_edges: + if edge in random_edges: + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, edge + ) + elif [edge[1], edge[0]] in random_edges: + # since this edge exists for undirected but the reverse + # may not exist for directed + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, [edge[1], edge[0]] + ) + else: + self.__assert_graph_edge_does_not_exist_check( + undirected_graph, directed_graph, edge + ) + + def test_add_edge(self) -> None: + # generate graph input + random_vertices: list[int] = random.sample(range(101), 15) + random_edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) + + # build graphs WITHOUT edges + undirected_graph = GraphAdjacencyList( + vertices=random_vertices, edges=[], directed=False + ) + directed_graph = GraphAdjacencyList( + vertices=random_vertices, edges=[], directed=True + ) + + # run and test add_edge + for edge in random_edges: + undirected_graph.add_edge(edge[0], edge[1]) + directed_graph.add_edge(edge[0], edge[1]) + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, edge + ) + + def test_remove_edge(self) -> None: + # generate graph input and graphs + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + # run and test remove_edge + for edge in random_edges: + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, edge + ) + undirected_graph.remove_edge(edge[0], edge[1]) + directed_graph.remove_edge(edge[0], edge[1]) + self.__assert_graph_edge_does_not_exist_check( + undirected_graph, directed_graph, edge + ) + + def test_add_and_remove_edges_repeatedly(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + # make some more edge options! + more_random_edges: list[list[int]] = [] + + while len(more_random_edges) != len(random_edges): + edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) + for edge in edges: + if len(more_random_edges) == len(random_edges): + break + elif edge not in more_random_edges and edge not in random_edges: + more_random_edges.append(edge) + + for i, _ in enumerate(random_edges): + undirected_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1]) + directed_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1]) + + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, more_random_edges[i] + ) + + undirected_graph.remove_edge(random_edges[i][0], random_edges[i][1]) + directed_graph.remove_edge(random_edges[i][0], random_edges[i][1]) + + self.__assert_graph_edge_does_not_exist_check( + undirected_graph, directed_graph, random_edges[i] + ) + + def test_add_vertex_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + for vertex in random_vertices: + with self.assertRaises(ValueError): + undirected_graph.add_vertex(vertex) + with self.assertRaises(ValueError): + directed_graph.add_vertex(vertex) + + def test_remove_vertex_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + for i in range(101): + if i not in random_vertices: + with self.assertRaises(ValueError): + undirected_graph.remove_vertex(i) + with self.assertRaises(ValueError): + directed_graph.remove_vertex(i) + + def test_add_edge_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + for edge in random_edges: + with self.assertRaises(ValueError): + undirected_graph.add_edge(edge[0], edge[1]) + with self.assertRaises(ValueError): + directed_graph.add_edge(edge[0], edge[1]) + + def test_remove_edge_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + more_random_edges: list[list[int]] = [] + + while len(more_random_edges) != len(random_edges): + edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) + for edge in edges: + if len(more_random_edges) == len(random_edges): + break + elif edge not in more_random_edges and edge not in random_edges: + more_random_edges.append(edge) + + for edge in more_random_edges: + with self.assertRaises(ValueError): + undirected_graph.remove_edge(edge[0], edge[1]) + with self.assertRaises(ValueError): + directed_graph.remove_edge(edge[0], edge[1]) + + def test_contains_edge_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + for vertex in random_vertices: + with self.assertRaises(ValueError): + undirected_graph.contains_edge(vertex, 102) + with self.assertRaises(ValueError): + directed_graph.contains_edge(vertex, 102) + + with self.assertRaises(ValueError): + undirected_graph.contains_edge(103, 102) + with self.assertRaises(ValueError): + directed_graph.contains_edge(103, 102) + + +if __name__ == "__main__": + unittest.main() diff --git a/graphs/graph_adjacency_matrix.py b/graphs/graph_adjacency_matrix.py new file mode 100644 index 000000000..4d2e02f73 --- /dev/null +++ b/graphs/graph_adjacency_matrix.py @@ -0,0 +1,608 @@ +#!/usr/bin/env python3 +""" +Author: Vikram Nithyanandam + +Description: +The following implementation is a robust unweighted Graph data structure +implemented using an adjacency matrix. This vertices and edges of this graph can be +effectively initialized and modified while storing your chosen generic +value in each vertex. + +Adjacency Matrix: https://mathworld.wolfram.com/AdjacencyMatrix.html + +Potential Future Ideas: +- Add a flag to set edge weights on and set edge weights +- Make edge weights and vertex values customizable to store whatever the client wants +- Support multigraph functionality if the client wants it +""" +from __future__ import annotations + +import random +import unittest +from pprint import pformat +from typing import Generic, TypeVar + +T = TypeVar("T") + + +class GraphAdjacencyMatrix(Generic[T]): + def __init__( + self, vertices: list[T], edges: list[list[T]], directed: bool = True + ) -> None: + """ + Parameters: + - vertices: (list[T]) The list of vertex names the client wants to + pass in. Default is empty. + - edges: (list[list[T]]) The list of edges the client wants to + pass in. Each edge is a 2-element list. Default is empty. + - directed: (bool) Indicates if graph is directed or undirected. + Default is True. + """ + self.directed = directed + self.vertex_to_index: dict[T, int] = {} + self.adj_matrix: list[list[int]] = [] + + # Falsey checks + edges = edges or [] + vertices = vertices or [] + + for vertex in vertices: + self.add_vertex(vertex) + + for edge in edges: + if len(edge) != 2: + msg = f"Invalid input: {edge} must have length 2." + raise ValueError(msg) + self.add_edge(edge[0], edge[1]) + + def add_edge(self, source_vertex: T, destination_vertex: T) -> None: + """ + Creates an edge from source vertex to destination vertex. If any + given vertex doesn't exist or the edge already exists, a ValueError + will be thrown. + """ + if not ( + self.contains_vertex(source_vertex) + and self.contains_vertex(destination_vertex) + ): + msg = ( + f"Incorrect input: Either {source_vertex} or " + f"{destination_vertex} does not exist" + ) + raise ValueError(msg) + if self.contains_edge(source_vertex, destination_vertex): + msg = ( + "Incorrect input: The edge already exists between " + f"{source_vertex} and {destination_vertex}" + ) + raise ValueError(msg) + + # Get the indices of the corresponding vertices and set their edge value to 1. + u: int = self.vertex_to_index[source_vertex] + v: int = self.vertex_to_index[destination_vertex] + self.adj_matrix[u][v] = 1 + if not self.directed: + self.adj_matrix[v][u] = 1 + + def remove_edge(self, source_vertex: T, destination_vertex: T) -> None: + """ + Removes the edge between the two vertices. If any given vertex + doesn't exist or the edge does not exist, a ValueError will be thrown. + """ + if not ( + self.contains_vertex(source_vertex) + and self.contains_vertex(destination_vertex) + ): + msg = ( + f"Incorrect input: Either {source_vertex} or " + f"{destination_vertex} does not exist" + ) + raise ValueError(msg) + if not self.contains_edge(source_vertex, destination_vertex): + msg = ( + "Incorrect input: The edge does NOT exist between " + f"{source_vertex} and {destination_vertex}" + ) + raise ValueError(msg) + + # Get the indices of the corresponding vertices and set their edge value to 0. + u: int = self.vertex_to_index[source_vertex] + v: int = self.vertex_to_index[destination_vertex] + self.adj_matrix[u][v] = 0 + if not self.directed: + self.adj_matrix[v][u] = 0 + + def add_vertex(self, vertex: T) -> None: + """ + Adds a vertex to the graph. If the given vertex already exists, + a ValueError will be thrown. + """ + if self.contains_vertex(vertex): + msg = f"Incorrect input: {vertex} already exists in this graph." + raise ValueError(msg) + + # build column for vertex + for row in self.adj_matrix: + row.append(0) + + # build row for vertex and update other data structures + self.adj_matrix.append([0] * (len(self.adj_matrix) + 1)) + self.vertex_to_index[vertex] = len(self.adj_matrix) - 1 + + def remove_vertex(self, vertex: T) -> None: + """ + Removes the given vertex from the graph and deletes all incoming and + outgoing edges from the given vertex as well. If the given vertex + does not exist, a ValueError will be thrown. + """ + if not self.contains_vertex(vertex): + msg = f"Incorrect input: {vertex} does not exist in this graph." + raise ValueError(msg) + + # first slide up the rows by deleting the row corresponding to + # the vertex being deleted. + start_index = self.vertex_to_index[vertex] + self.adj_matrix.pop(start_index) + + # next, slide the columns to the left by deleting the values in + # the column corresponding to the vertex being deleted + for lst in self.adj_matrix: + lst.pop(start_index) + + # final clean up + self.vertex_to_index.pop(vertex) + + # decrement indices for vertices shifted by the deleted vertex in the adj matrix + for vertex in self.vertex_to_index: + if self.vertex_to_index[vertex] >= start_index: + self.vertex_to_index[vertex] = self.vertex_to_index[vertex] - 1 + + def contains_vertex(self, vertex: T) -> bool: + """ + Returns True if the graph contains the vertex, False otherwise. + """ + return vertex in self.vertex_to_index + + def contains_edge(self, source_vertex: T, destination_vertex: T) -> bool: + """ + Returns True if the graph contains the edge from the source_vertex to the + destination_vertex, False otherwise. If any given vertex doesn't exist, a + ValueError will be thrown. + """ + if not ( + self.contains_vertex(source_vertex) + and self.contains_vertex(destination_vertex) + ): + msg = ( + f"Incorrect input: Either {source_vertex} " + f"or {destination_vertex} does not exist." + ) + raise ValueError(msg) + + u = self.vertex_to_index[source_vertex] + v = self.vertex_to_index[destination_vertex] + return self.adj_matrix[u][v] == 1 + + def clear_graph(self) -> None: + """ + Clears all vertices and edges. + """ + self.vertex_to_index = {} + self.adj_matrix = [] + + def __repr__(self) -> str: + first = "Adj Matrix:\n" + pformat(self.adj_matrix) + second = "\nVertex to index mapping:\n" + pformat(self.vertex_to_index) + return first + second + + +class TestGraphMatrix(unittest.TestCase): + def __assert_graph_edge_exists_check( + self, + undirected_graph: GraphAdjacencyMatrix, + directed_graph: GraphAdjacencyMatrix, + edge: list[int], + ) -> None: + self.assertTrue(undirected_graph.contains_edge(edge[0], edge[1])) + self.assertTrue(undirected_graph.contains_edge(edge[1], edge[0])) + self.assertTrue(directed_graph.contains_edge(edge[0], edge[1])) + + def __assert_graph_edge_does_not_exist_check( + self, + undirected_graph: GraphAdjacencyMatrix, + directed_graph: GraphAdjacencyMatrix, + edge: list[int], + ) -> None: + self.assertFalse(undirected_graph.contains_edge(edge[0], edge[1])) + self.assertFalse(undirected_graph.contains_edge(edge[1], edge[0])) + self.assertFalse(directed_graph.contains_edge(edge[0], edge[1])) + + def __assert_graph_vertex_exists_check( + self, + undirected_graph: GraphAdjacencyMatrix, + directed_graph: GraphAdjacencyMatrix, + vertex: int, + ) -> None: + self.assertTrue(undirected_graph.contains_vertex(vertex)) + self.assertTrue(directed_graph.contains_vertex(vertex)) + + def __assert_graph_vertex_does_not_exist_check( + self, + undirected_graph: GraphAdjacencyMatrix, + directed_graph: GraphAdjacencyMatrix, + vertex: int, + ) -> None: + self.assertFalse(undirected_graph.contains_vertex(vertex)) + self.assertFalse(directed_graph.contains_vertex(vertex)) + + def __generate_random_edges( + self, vertices: list[int], edge_pick_count: int + ) -> list[list[int]]: + self.assertTrue(edge_pick_count <= len(vertices)) + + random_source_vertices: list[int] = random.sample( + vertices[0 : int(len(vertices) / 2)], edge_pick_count + ) + random_destination_vertices: list[int] = random.sample( + vertices[int(len(vertices) / 2) :], edge_pick_count + ) + random_edges: list[list[int]] = [] + + for source in random_source_vertices: + for dest in random_destination_vertices: + random_edges.append([source, dest]) + + return random_edges + + def __generate_graphs( + self, vertex_count: int, min_val: int, max_val: int, edge_pick_count: int + ) -> tuple[GraphAdjacencyMatrix, GraphAdjacencyMatrix, list[int], list[list[int]]]: + if max_val - min_val + 1 < vertex_count: + raise ValueError( + "Will result in duplicate vertices. Either increase " + "range between min_val and max_val or decrease vertex count" + ) + + # generate graph input + random_vertices: list[int] = random.sample( + range(min_val, max_val + 1), vertex_count + ) + random_edges: list[list[int]] = self.__generate_random_edges( + random_vertices, edge_pick_count + ) + + # build graphs + undirected_graph = GraphAdjacencyMatrix( + vertices=random_vertices, edges=random_edges, directed=False + ) + directed_graph = GraphAdjacencyMatrix( + vertices=random_vertices, edges=random_edges, directed=True + ) + + return undirected_graph, directed_graph, random_vertices, random_edges + + def test_init_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + # test graph initialization with vertices and edges + for num in random_vertices: + self.__assert_graph_vertex_exists_check( + undirected_graph, directed_graph, num + ) + + for edge in random_edges: + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, edge + ) + + self.assertFalse(undirected_graph.directed) + self.assertTrue(directed_graph.directed) + + def test_contains_vertex(self) -> None: + random_vertices: list[int] = random.sample(range(101), 20) + + # Build graphs WITHOUT edges + undirected_graph = GraphAdjacencyMatrix( + vertices=random_vertices, edges=[], directed=False + ) + directed_graph = GraphAdjacencyMatrix( + vertices=random_vertices, edges=[], directed=True + ) + + # Test contains_vertex + for num in range(101): + self.assertEqual( + num in random_vertices, undirected_graph.contains_vertex(num) + ) + self.assertEqual( + num in random_vertices, directed_graph.contains_vertex(num) + ) + + def test_add_vertices(self) -> None: + random_vertices: list[int] = random.sample(range(101), 20) + + # build empty graphs + undirected_graph: GraphAdjacencyMatrix = GraphAdjacencyMatrix( + vertices=[], edges=[], directed=False + ) + directed_graph: GraphAdjacencyMatrix = GraphAdjacencyMatrix( + vertices=[], edges=[], directed=True + ) + + # run add_vertex + for num in random_vertices: + undirected_graph.add_vertex(num) + + for num in random_vertices: + directed_graph.add_vertex(num) + + # test add_vertex worked + for num in random_vertices: + self.__assert_graph_vertex_exists_check( + undirected_graph, directed_graph, num + ) + + def test_remove_vertices(self) -> None: + random_vertices: list[int] = random.sample(range(101), 20) + + # build graphs WITHOUT edges + undirected_graph = GraphAdjacencyMatrix( + vertices=random_vertices, edges=[], directed=False + ) + directed_graph = GraphAdjacencyMatrix( + vertices=random_vertices, edges=[], directed=True + ) + + # test remove_vertex worked + for num in random_vertices: + self.__assert_graph_vertex_exists_check( + undirected_graph, directed_graph, num + ) + + undirected_graph.remove_vertex(num) + directed_graph.remove_vertex(num) + + self.__assert_graph_vertex_does_not_exist_check( + undirected_graph, directed_graph, num + ) + + def test_add_and_remove_vertices_repeatedly(self) -> None: + random_vertices1: list[int] = random.sample(range(51), 20) + random_vertices2: list[int] = random.sample(range(51, 101), 20) + + # build graphs WITHOUT edges + undirected_graph = GraphAdjacencyMatrix( + vertices=random_vertices1, edges=[], directed=False + ) + directed_graph = GraphAdjacencyMatrix( + vertices=random_vertices1, edges=[], directed=True + ) + + # test adding and removing vertices + for i, _ in enumerate(random_vertices1): + undirected_graph.add_vertex(random_vertices2[i]) + directed_graph.add_vertex(random_vertices2[i]) + + self.__assert_graph_vertex_exists_check( + undirected_graph, directed_graph, random_vertices2[i] + ) + + undirected_graph.remove_vertex(random_vertices1[i]) + directed_graph.remove_vertex(random_vertices1[i]) + + self.__assert_graph_vertex_does_not_exist_check( + undirected_graph, directed_graph, random_vertices1[i] + ) + + # remove all vertices + for i, _ in enumerate(random_vertices1): + undirected_graph.remove_vertex(random_vertices2[i]) + directed_graph.remove_vertex(random_vertices2[i]) + + self.__assert_graph_vertex_does_not_exist_check( + undirected_graph, directed_graph, random_vertices2[i] + ) + + def test_contains_edge(self) -> None: + # generate graphs and graph input + vertex_count = 20 + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(vertex_count, 0, 100, 4) + + # generate all possible edges for testing + all_possible_edges: list[list[int]] = [] + for i in range(vertex_count - 1): + for j in range(i + 1, vertex_count): + all_possible_edges.append([random_vertices[i], random_vertices[j]]) + all_possible_edges.append([random_vertices[j], random_vertices[i]]) + + # test contains_edge function + for edge in all_possible_edges: + if edge in random_edges: + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, edge + ) + elif [edge[1], edge[0]] in random_edges: + # since this edge exists for undirected but the reverse may + # not exist for directed + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, [edge[1], edge[0]] + ) + else: + self.__assert_graph_edge_does_not_exist_check( + undirected_graph, directed_graph, edge + ) + + def test_add_edge(self) -> None: + # generate graph input + random_vertices: list[int] = random.sample(range(101), 15) + random_edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) + + # build graphs WITHOUT edges + undirected_graph = GraphAdjacencyMatrix( + vertices=random_vertices, edges=[], directed=False + ) + directed_graph = GraphAdjacencyMatrix( + vertices=random_vertices, edges=[], directed=True + ) + + # run and test add_edge + for edge in random_edges: + undirected_graph.add_edge(edge[0], edge[1]) + directed_graph.add_edge(edge[0], edge[1]) + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, edge + ) + + def test_remove_edge(self) -> None: + # generate graph input and graphs + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + # run and test remove_edge + for edge in random_edges: + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, edge + ) + undirected_graph.remove_edge(edge[0], edge[1]) + directed_graph.remove_edge(edge[0], edge[1]) + self.__assert_graph_edge_does_not_exist_check( + undirected_graph, directed_graph, edge + ) + + def test_add_and_remove_edges_repeatedly(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + # make some more edge options! + more_random_edges: list[list[int]] = [] + + while len(more_random_edges) != len(random_edges): + edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) + for edge in edges: + if len(more_random_edges) == len(random_edges): + break + elif edge not in more_random_edges and edge not in random_edges: + more_random_edges.append(edge) + + for i, _ in enumerate(random_edges): + undirected_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1]) + directed_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1]) + + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, more_random_edges[i] + ) + + undirected_graph.remove_edge(random_edges[i][0], random_edges[i][1]) + directed_graph.remove_edge(random_edges[i][0], random_edges[i][1]) + + self.__assert_graph_edge_does_not_exist_check( + undirected_graph, directed_graph, random_edges[i] + ) + + def test_add_vertex_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + for vertex in random_vertices: + with self.assertRaises(ValueError): + undirected_graph.add_vertex(vertex) + with self.assertRaises(ValueError): + directed_graph.add_vertex(vertex) + + def test_remove_vertex_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + for i in range(101): + if i not in random_vertices: + with self.assertRaises(ValueError): + undirected_graph.remove_vertex(i) + with self.assertRaises(ValueError): + directed_graph.remove_vertex(i) + + def test_add_edge_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + for edge in random_edges: + with self.assertRaises(ValueError): + undirected_graph.add_edge(edge[0], edge[1]) + with self.assertRaises(ValueError): + directed_graph.add_edge(edge[0], edge[1]) + + def test_remove_edge_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + more_random_edges: list[list[int]] = [] + + while len(more_random_edges) != len(random_edges): + edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) + for edge in edges: + if len(more_random_edges) == len(random_edges): + break + elif edge not in more_random_edges and edge not in random_edges: + more_random_edges.append(edge) + + for edge in more_random_edges: + with self.assertRaises(ValueError): + undirected_graph.remove_edge(edge[0], edge[1]) + with self.assertRaises(ValueError): + directed_graph.remove_edge(edge[0], edge[1]) + + def test_contains_edge_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + for vertex in random_vertices: + with self.assertRaises(ValueError): + undirected_graph.contains_edge(vertex, 102) + with self.assertRaises(ValueError): + directed_graph.contains_edge(vertex, 102) + + with self.assertRaises(ValueError): + undirected_graph.contains_edge(103, 102) + with self.assertRaises(ValueError): + directed_graph.contains_edge(103, 102) + + +if __name__ == "__main__": + unittest.main() diff --git a/graphs/graph_matrix.py b/graphs/graph_matrix.py deleted file mode 100644 index 4adc6c0bb..000000000 --- a/graphs/graph_matrix.py +++ /dev/null @@ -1,24 +0,0 @@ -class Graph: - def __init__(self, vertex): - self.vertex = vertex - self.graph = [[0] * vertex for i in range(vertex)] - - def add_edge(self, u, v): - self.graph[u - 1][v - 1] = 1 - self.graph[v - 1][u - 1] = 1 - - def show(self): - for i in self.graph: - for j in i: - print(j, end=" ") - print(" ") - - -g = Graph(100) - -g.add_edge(1, 4) -g.add_edge(4, 2) -g.add_edge(4, 5) -g.add_edge(2, 5) -g.add_edge(5, 3) -g.show() diff --git a/graphs/tests/__init__.py b/graphs/tests/__init__.py new file mode 100644 index 000000000..e69de29bb From 3a9e5fa5ecea0df54ed3ffdcb74f46171199f552 Mon Sep 17 00:00:00 2001 From: Chris O <46587501+ChrisO345@users.noreply.github.com> Date: Fri, 2 Jun 2023 17:14:25 +1200 Subject: [PATCH 05/43] Create a Simultaneous Equation Solver Algorithm (#8773) * Added simultaneous_linear_equation_solver.py * Removed Augment class, replaced with recursive functions * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed edge cases * Update settings.json --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .vscode/settings.json | 5 + maths/simultaneous_linear_equation_solver.py | 142 +++++++++++++++++++ 2 files changed, 147 insertions(+) create mode 100644 .vscode/settings.json create mode 100644 maths/simultaneous_linear_equation_solver.py diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 000000000..ef16fa1aa --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,5 @@ +{ + "githubPullRequests.ignoredPullRequestBranches": [ + "master" + ] +} diff --git a/maths/simultaneous_linear_equation_solver.py b/maths/simultaneous_linear_equation_solver.py new file mode 100644 index 000000000..1287b2002 --- /dev/null +++ b/maths/simultaneous_linear_equation_solver.py @@ -0,0 +1,142 @@ +""" +https://en.wikipedia.org/wiki/Augmented_matrix + +This algorithm solves simultaneous linear equations of the form +λa + λb + λc + λd + ... = γ as [λ, λ, λ, λ, ..., γ] +Where λ & γ are individual coefficients, the no. of equations = no. of coefficients - 1 + +Note in order to work there must exist 1 equation where all instances of λ and γ != 0 +""" + + +def simplify(current_set: list[list]) -> list[list]: + """ + >>> simplify([[1, 2, 3], [4, 5, 6]]) + [[1.0, 2.0, 3.0], [0.0, 0.75, 1.5]] + >>> simplify([[5, 2, 5], [5, 1, 10]]) + [[1.0, 0.4, 1.0], [0.0, 0.2, -1.0]] + """ + # Divide each row by magnitude of first term --> creates 'unit' matrix + duplicate_set = current_set.copy() + for row_index, row in enumerate(duplicate_set): + magnitude = row[0] + for column_index, column in enumerate(row): + if magnitude == 0: + current_set[row_index][column_index] = column + continue + current_set[row_index][column_index] = column / magnitude + # Subtract to cancel term + first_row = current_set[0] + final_set = [first_row] + current_set = current_set[1::] + for row in current_set: + temp_row = [] + # If first term is 0, it is already in form we want, so we preserve it + if row[0] == 0: + final_set.append(row) + continue + for column_index in range(len(row)): + temp_row.append(first_row[column_index] - row[column_index]) + final_set.append(temp_row) + # Create next recursion iteration set + if len(final_set[0]) != 3: + current_first_row = final_set[0] + current_first_column = [] + next_iteration = [] + for row in final_set[1::]: + current_first_column.append(row[0]) + next_iteration.append(row[1::]) + resultant = simplify(next_iteration) + for i in range(len(resultant)): + resultant[i].insert(0, current_first_column[i]) + resultant.insert(0, current_first_row) + final_set = resultant + return final_set + + +def solve_simultaneous(equations: list[list]) -> list: + """ + >>> solve_simultaneous([[1, 2, 3],[4, 5, 6]]) + [-1.0, 2.0] + >>> solve_simultaneous([[0, -3, 1, 7],[3, 2, -1, 11],[5, 1, -2, 12]]) + [6.4, 1.2, 10.6] + >>> solve_simultaneous([]) + Traceback (most recent call last): + ... + IndexError: solve_simultaneous() requires n lists of length n+1 + >>> solve_simultaneous([[1, 2, 3],[1, 2]]) + Traceback (most recent call last): + ... + IndexError: solve_simultaneous() requires n lists of length n+1 + >>> solve_simultaneous([[1, 2, 3],["a", 7, 8]]) + Traceback (most recent call last): + ... + ValueError: solve_simultaneous() requires lists of integers + >>> solve_simultaneous([[0, 2, 3],[4, 0, 6]]) + Traceback (most recent call last): + ... + ValueError: solve_simultaneous() requires at least 1 full equation + """ + if len(equations) == 0: + raise IndexError("solve_simultaneous() requires n lists of length n+1") + _length = len(equations) + 1 + if any(len(item) != _length for item in equations): + raise IndexError("solve_simultaneous() requires n lists of length n+1") + for row in equations: + if any(not isinstance(column, (int, float)) for column in row): + raise ValueError("solve_simultaneous() requires lists of integers") + if len(equations) == 1: + return [equations[0][-1] / equations[0][0]] + data_set = equations.copy() + if any(0 in row for row in data_set): + temp_data = data_set.copy() + full_row = [] + for row_index, row in enumerate(temp_data): + if 0 not in row: + full_row = data_set.pop(row_index) + break + if not full_row: + raise ValueError("solve_simultaneous() requires at least 1 full equation") + data_set.insert(0, full_row) + useable_form = data_set.copy() + simplified = simplify(useable_form) + simplified = simplified[::-1] + solutions: list = [] + for row in simplified: + current_solution = row[-1] + if not solutions: + if row[-2] == 0: + solutions.append(0) + continue + solutions.append(current_solution / row[-2]) + continue + temp_row = row.copy()[: len(row) - 1 :] + while temp_row[0] == 0: + temp_row.pop(0) + if len(temp_row) == 0: + solutions.append(0) + continue + temp_row = temp_row[1::] + temp_row = temp_row[::-1] + for column_index, column in enumerate(temp_row): + current_solution -= column * solutions[column_index] + solutions.append(current_solution) + final = [] + for item in solutions: + final.append(float(round(item, 5))) + return final[::-1] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + eq = [ + [2, 1, 1, 1, 1, 4], + [1, 2, 1, 1, 1, 5], + [1, 1, 2, 1, 1, 6], + [1, 1, 1, 2, 1, 7], + [1, 1, 1, 1, 2, 8], + ] + print(solve_simultaneous(eq)) + print(solve_simultaneous([[4, 2]])) From 80d95fccc390d366a9f617d8628a546a7be7b2a3 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sat, 3 Jun 2023 17:16:33 +0100 Subject: [PATCH 06/43] Pytest locally fails due to API_KEY env variable (#8738) * fix: Pytest locally fails due to API_KEY env variable (#8737) * chore: Fix ruff errors --- web_programming/currency_converter.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/web_programming/currency_converter.py b/web_programming/currency_converter.py index 69f2a2c4d..3bbcafa8f 100644 --- a/web_programming/currency_converter.py +++ b/web_programming/currency_converter.py @@ -8,13 +8,7 @@ import os import requests URL_BASE = "https://www.amdoren.com/api/currency.php" -TESTING = os.getenv("CI", "") -API_KEY = os.getenv("AMDOREN_API_KEY", "") -if not API_KEY and not TESTING: - raise KeyError( - "API key must be provided in the 'AMDOREN_API_KEY' environment variable." - ) # Currency and their description list_of_currencies = """ @@ -175,20 +169,31 @@ ZMW Zambian Kwacha def convert_currency( - from_: str = "USD", to: str = "INR", amount: float = 1.0, api_key: str = API_KEY + from_: str = "USD", to: str = "INR", amount: float = 1.0, api_key: str = "" ) -> str: """https://www.amdoren.com/currency-api/""" + # Instead of manually generating parameters params = locals() + # from is a reserved keyword params["from"] = params.pop("from_") res = requests.get(URL_BASE, params=params).json() return str(res["amount"]) if res["error"] == 0 else res["error_message"] if __name__ == "__main__": + TESTING = os.getenv("CI", "") + API_KEY = os.getenv("AMDOREN_API_KEY", "") + + if not API_KEY and not TESTING: + raise KeyError( + "API key must be provided in the 'AMDOREN_API_KEY' environment variable." + ) + print( convert_currency( input("Enter from currency: ").strip(), input("Enter to currency: ").strip(), float(input("Enter the amount: ").strip()), + API_KEY, ) ) From fa12b9a286bf42d250b30a772e8f226dc14953f4 Mon Sep 17 00:00:00 2001 From: ShivaDahal99 <130563462+ShivaDahal99@users.noreply.github.com> Date: Wed, 7 Jun 2023 23:47:27 +0200 Subject: [PATCH 07/43] Speed of sound (#8803) * Create TestShiva * Delete TestShiva * Add speed of sound * Update physics/speed_of_sound.py Co-authored-by: Christian Clauss * Update physics/speed_of_sound.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update speed_of_sound.py * Update speed_of_sound.py --------- Co-authored-by: jlhuhn <134317018+jlhuhn@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- physics/speed_of_sound.py | 52 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 physics/speed_of_sound.py diff --git a/physics/speed_of_sound.py b/physics/speed_of_sound.py new file mode 100644 index 000000000..a4658366a --- /dev/null +++ b/physics/speed_of_sound.py @@ -0,0 +1,52 @@ +""" +Title : Calculating the speed of sound + +Description : + The speed of sound (c) is the speed that a sound wave travels + per unit time (m/s). During propagation, the sound wave propagates + through an elastic medium. Its SI unit is meter per second (m/s). + + Only longitudinal waves can propagate in liquids and gas other then + solid where they also travel in transverse wave. The following Algo- + rithem calculates the speed of sound in fluid depanding on the bulk + module and the density of the fluid. + + Equation for calculating speed od sound in fluid: + c_fluid = (K_s*p)**0.5 + + c_fluid: speed of sound in fluid + K_s: isentropic bulk modulus + p: density of fluid + + + +Source : https://en.wikipedia.org/wiki/Speed_of_sound +""" + + +def speed_of_sound_in_a_fluid(density: float, bulk_modulus: float) -> float: + """ + This method calculates the speed of sound in fluid - + This is calculated from the other two provided values + Examples: + Example 1 --> Water 20°C: bulk_moduls= 2.15MPa, density=998kg/m³ + Example 2 --> Murcery 20°: bulk_moduls= 28.5MPa, density=13600kg/m³ + + >>> speed_of_sound_in_a_fluid(bulk_modulus=2.15*10**9, density=998) + 1467.7563207952705 + >>> speed_of_sound_in_a_fluid(bulk_modulus=28.5*10**9, density=13600) + 1447.614670861731 + """ + + if density <= 0: + raise ValueError("Impossible fluid density") + if bulk_modulus <= 0: + raise ValueError("Impossible bulk modulus") + + return (bulk_modulus / density) ** 0.5 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 7775de0ef779a28cec7d9f28af97a89b2bc29d7e Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Thu, 8 Jun 2023 13:40:38 +0100 Subject: [PATCH 08/43] Create number container system algorithm (#8808) * feat: Create number container system algorithm * updating DIRECTORY.md * chore: Fix failing tests * Update other/number_container_system.py Co-authored-by: Christian Clauss * Update other/number_container_system.py Co-authored-by: Christian Clauss * Update other/number_container_system.py Co-authored-by: Christian Clauss * chore: Add more tests * chore: Create binary_search_insert failing test * type: Update typehints to accept str, list and range --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 6 +- other/number_container_system.py | 180 +++++++++++++++++++++++++++++++ 2 files changed, 185 insertions(+), 1 deletion(-) create mode 100644 other/number_container_system.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 231b0e2f1..6dac4a9a5 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -419,8 +419,9 @@ * [Frequent Pattern Graph Miner](graphs/frequent_pattern_graph_miner.py) * [G Topological Sort](graphs/g_topological_sort.py) * [Gale Shapley Bigraph](graphs/gale_shapley_bigraph.py) + * [Graph Adjacency List](graphs/graph_adjacency_list.py) + * [Graph Adjacency Matrix](graphs/graph_adjacency_matrix.py) * [Graph List](graphs/graph_list.py) - * [Graph Matrix](graphs/graph_matrix.py) * [Graphs Floyd Warshall](graphs/graphs_floyd_warshall.py) * [Greedy Best First](graphs/greedy_best_first.py) * [Greedy Min Vertex Cover](graphs/greedy_min_vertex_cover.py) @@ -479,6 +480,7 @@ * [Lib](linear_algebra/src/lib.py) * [Polynom For Points](linear_algebra/src/polynom_for_points.py) * [Power Iteration](linear_algebra/src/power_iteration.py) + * [Rank Of Matrix](linear_algebra/src/rank_of_matrix.py) * [Rayleigh Quotient](linear_algebra/src/rayleigh_quotient.py) * [Schur Complement](linear_algebra/src/schur_complement.py) * [Test Linear Algebra](linear_algebra/src/test_linear_algebra.py) @@ -651,6 +653,7 @@ * [Sigmoid Linear Unit](maths/sigmoid_linear_unit.py) * [Signum](maths/signum.py) * [Simpson Rule](maths/simpson_rule.py) + * [Simultaneous Linear Equation Solver](maths/simultaneous_linear_equation_solver.py) * [Sin](maths/sin.py) * [Sock Merchant](maths/sock_merchant.py) * [Softmax](maths/softmax.py) @@ -726,6 +729,7 @@ * [Maximum Subarray](other/maximum_subarray.py) * [Maximum Subsequence](other/maximum_subsequence.py) * [Nested Brackets](other/nested_brackets.py) + * [Number Container System](other/number_container_system.py) * [Password](other/password.py) * [Quine](other/quine.py) * [Scoring Algorithm](other/scoring_algorithm.py) diff --git a/other/number_container_system.py b/other/number_container_system.py new file mode 100644 index 000000000..f547bc8a2 --- /dev/null +++ b/other/number_container_system.py @@ -0,0 +1,180 @@ +""" +A number container system that uses binary search to delete and insert values into +arrays with O(n logn) write times and O(1) read times. + +This container system holds integers at indexes. + +Further explained in this leetcode problem +> https://leetcode.com/problems/minimum-cost-tree-from-leaf-values +""" + + +class NumberContainer: + def __init__(self) -> None: + # numbermap keys are the number and its values are lists of indexes sorted + # in ascending order + self.numbermap: dict[int, list[int]] = {} + # indexmap keys are an index and it's values are the number at that index + self.indexmap: dict[int, int] = {} + + def binary_search_delete(self, array: list | str | range, item: int) -> list[int]: + """ + Removes the item from the sorted array and returns + the new array. + + >>> NumberContainer().binary_search_delete([1,2,3], 2) + [1, 3] + >>> NumberContainer().binary_search_delete([0, 0, 0], 0) + [0, 0] + >>> NumberContainer().binary_search_delete([-1, -1, -1], -1) + [-1, -1] + >>> NumberContainer().binary_search_delete([-1, 0], 0) + [-1] + >>> NumberContainer().binary_search_delete([-1, 0], -1) + [0] + >>> NumberContainer().binary_search_delete(range(7), 3) + [0, 1, 2, 4, 5, 6] + >>> NumberContainer().binary_search_delete([1.1, 2.2, 3.3], 2.2) + [1.1, 3.3] + >>> NumberContainer().binary_search_delete("abcde", "c") + ['a', 'b', 'd', 'e'] + >>> NumberContainer().binary_search_delete([0, -1, 2, 4], 0) + Traceback (most recent call last): + ... + ValueError: Either the item is not in the array or the array was unsorted + >>> NumberContainer().binary_search_delete([2, 0, 4, -1, 11], -1) + Traceback (most recent call last): + ... + ValueError: Either the item is not in the array or the array was unsorted + >>> NumberContainer().binary_search_delete(125, 1) + Traceback (most recent call last): + ... + TypeError: binary_search_delete() only accepts either a list, range or str + """ + if isinstance(array, (range, str)): + array = list(array) + elif not isinstance(array, list): + raise TypeError( + "binary_search_delete() only accepts either a list, range or str" + ) + + low = 0 + high = len(array) - 1 + + while low <= high: + mid = (low + high) // 2 + if array[mid] == item: + array.pop(mid) + return array + elif array[mid] < item: + low = mid + 1 + else: + high = mid - 1 + raise ValueError( + "Either the item is not in the array or the array was unsorted" + ) + + def binary_search_insert(self, array: list | str | range, index: int) -> list[int]: + """ + Inserts the index into the sorted array + at the correct position. + + >>> NumberContainer().binary_search_insert([1,2,3], 2) + [1, 2, 2, 3] + >>> NumberContainer().binary_search_insert([0,1,3], 2) + [0, 1, 2, 3] + >>> NumberContainer().binary_search_insert([-5, -3, 0, 0, 11, 103], 51) + [-5, -3, 0, 0, 11, 51, 103] + >>> NumberContainer().binary_search_insert([-5, -3, 0, 0, 11, 100, 103], 101) + [-5, -3, 0, 0, 11, 100, 101, 103] + >>> NumberContainer().binary_search_insert(range(10), 4) + [0, 1, 2, 3, 4, 4, 5, 6, 7, 8, 9] + >>> NumberContainer().binary_search_insert("abd", "c") + ['a', 'b', 'c', 'd'] + >>> NumberContainer().binary_search_insert(131, 23) + Traceback (most recent call last): + ... + TypeError: binary_search_insert() only accepts either a list, range or str + """ + if isinstance(array, (range, str)): + array = list(array) + elif not isinstance(array, list): + raise TypeError( + "binary_search_insert() only accepts either a list, range or str" + ) + + low = 0 + high = len(array) - 1 + + while low <= high: + mid = (low + high) // 2 + if array[mid] == index: + # If the item already exists in the array, + # insert it after the existing item + array.insert(mid + 1, index) + return array + elif array[mid] < index: + low = mid + 1 + else: + high = mid - 1 + + # If the item doesn't exist in the array, insert it at the appropriate position + array.insert(low, index) + return array + + def change(self, index: int, number: int) -> None: + """ + Changes (sets) the index as number + + >>> cont = NumberContainer() + >>> cont.change(0, 10) + >>> cont.change(0, 20) + >>> cont.change(-13, 20) + >>> cont.change(-100030, 20032903290) + """ + # Remove previous index + if index in self.indexmap: + n = self.indexmap[index] + if len(self.numbermap[n]) == 1: + del self.numbermap[n] + else: + self.numbermap[n] = self.binary_search_delete(self.numbermap[n], index) + + # Set new index + self.indexmap[index] = number + + # Number not seen before or empty so insert number value + if number not in self.numbermap: + self.numbermap[number] = [index] + + # Here we need to perform a binary search insertion in order to insert + # The item in the correct place + else: + self.numbermap[number] = self.binary_search_insert( + self.numbermap[number], index + ) + + def find(self, number: int) -> int: + """ + Returns the smallest index where the number is. + + >>> cont = NumberContainer() + >>> cont.find(10) + -1 + >>> cont.change(0, 10) + >>> cont.find(10) + 0 + >>> cont.change(0, 20) + >>> cont.find(10) + -1 + >>> cont.find(20) + 0 + """ + # Simply return the 0th index (smallest) of the indexes found (or -1) + return self.numbermap.get(number, [-1])[0] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 9c9da8ebf1d35ae40ac5438c05cc273f7c6d4473 Mon Sep 17 00:00:00 2001 From: Jan Wojciechowski <96974442+yanvoi@users.noreply.github.com> Date: Fri, 9 Jun 2023 11:06:37 +0200 Subject: [PATCH 09/43] Improve readability of ciphers/mixed_keyword_cypher.py (#8626) * refactored the code * the code will now pass the test * looked more into it and fixed the logic * made the code easier to read, added comments and fixed the logic * got rid of redundant code + plaintext can contain chars that are not in the alphabet * fixed the reduntant conversion of ascii_uppercase to a list * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * keyword and plaintext won't have default values * ran the ruff command * Update linear_discriminant_analysis.py and rsa_cipher.py (#8680) * Update rsa_cipher.py by replacing %s with {} * Update rsa_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_discriminant_analysis.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_discriminant_analysis.py * Update linear_discriminant_analysis.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_discriminant_analysis.py * Update linear_discriminant_analysis.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_discriminant_analysis.py * Update machine_learning/linear_discriminant_analysis.py Co-authored-by: Christian Clauss * Update linear_discriminant_analysis.py * updated --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss * fixed some difficulties * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added comments, made printing mapping optional, added 1 test * shortened the line that was too long * Update ciphers/mixed_keyword_cypher.py Co-authored-by: Tianyi Zheng --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: Tianyi Zheng --- ciphers/mixed_keyword_cypher.py | 100 +++++++++++++++++--------------- 1 file changed, 53 insertions(+), 47 deletions(-) diff --git a/ciphers/mixed_keyword_cypher.py b/ciphers/mixed_keyword_cypher.py index 93a0e3acb..b984808fc 100644 --- a/ciphers/mixed_keyword_cypher.py +++ b/ciphers/mixed_keyword_cypher.py @@ -1,7 +1,11 @@ -def mixed_keyword(key: str = "college", pt: str = "UNIVERSITY") -> str: - """ +from string import ascii_uppercase - For key:hello + +def mixed_keyword( + keyword: str, plaintext: str, verbose: bool = False, alphabet: str = ascii_uppercase +) -> str: + """ + For keyword: hello H E L O A B C D @@ -12,58 +16,60 @@ def mixed_keyword(key: str = "college", pt: str = "UNIVERSITY") -> str: Y Z and map vertically - >>> mixed_keyword("college", "UNIVERSITY") # doctest: +NORMALIZE_WHITESPACE + >>> mixed_keyword("college", "UNIVERSITY", True) # doctest: +NORMALIZE_WHITESPACE {'A': 'C', 'B': 'A', 'C': 'I', 'D': 'P', 'E': 'U', 'F': 'Z', 'G': 'O', 'H': 'B', 'I': 'J', 'J': 'Q', 'K': 'V', 'L': 'L', 'M': 'D', 'N': 'K', 'O': 'R', 'P': 'W', 'Q': 'E', 'R': 'F', 'S': 'M', 'T': 'S', 'U': 'X', 'V': 'G', 'W': 'H', 'X': 'N', 'Y': 'T', 'Z': 'Y'} 'XKJGUFMJST' + + >>> mixed_keyword("college", "UNIVERSITY", False) # doctest: +NORMALIZE_WHITESPACE + 'XKJGUFMJST' """ - key = key.upper() - pt = pt.upper() - temp = [] - for i in key: - if i not in temp: - temp.append(i) - len_temp = len(temp) - # print(temp) - alpha = [] - modalpha = [] - for j in range(65, 91): - t = chr(j) - alpha.append(t) - if t not in temp: - temp.append(t) - # print(temp) - r = int(26 / 4) - # print(r) - k = 0 - for _ in range(r): - s = [] - for _ in range(len_temp): - s.append(temp[k]) - if k >= 25: + keyword = keyword.upper() + plaintext = plaintext.upper() + alphabet_set = set(alphabet) + + # create a list of unique characters in the keyword - their order matters + # it determines how we will map plaintext characters to the ciphertext + unique_chars = [] + for char in keyword: + if char in alphabet_set and char not in unique_chars: + unique_chars.append(char) + # the number of those unique characters will determine the number of rows + num_unique_chars_in_keyword = len(unique_chars) + + # create a shifted version of the alphabet + shifted_alphabet = unique_chars + [ + char for char in alphabet if char not in unique_chars + ] + + # create a modified alphabet by splitting the shifted alphabet into rows + modified_alphabet = [ + shifted_alphabet[k : k + num_unique_chars_in_keyword] + for k in range(0, 26, num_unique_chars_in_keyword) + ] + + # map the alphabet characters to the modified alphabet characters + # going 'vertically' through the modified alphabet - consider columns first + mapping = {} + letter_index = 0 + for column in range(num_unique_chars_in_keyword): + for row in modified_alphabet: + # if current row (the last one) is too short, break out of loop + if len(row) <= column: break - k += 1 - modalpha.append(s) - # print(modalpha) - d = {} - j = 0 - k = 0 - for j in range(len_temp): - for m in modalpha: - if not len(m) - 1 >= j: - break - d[alpha[k]] = m[j] - if not k < 25: - break - k += 1 - print(d) - cypher = "" - for i in pt: - cypher += d[i] - return cypher + + # map current letter to letter in modified alphabet + mapping[alphabet[letter_index]] = row[column] + letter_index += 1 + + if verbose: + print(mapping) + # create the encrypted text by mapping the plaintext to the modified alphabet + return "".join(mapping[char] if char in mapping else char for char in plaintext) if __name__ == "__main__": + # example use print(mixed_keyword("college", "UNIVERSITY")) From daa0c8f3d340485ce295570e6d76b38891e371bd Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sat, 10 Jun 2023 13:21:49 +0100 Subject: [PATCH 10/43] Create count negative numbers in matrix algorithm (#8813) * updating DIRECTORY.md * feat: Count negative numbers in sorted matrix * updating DIRECTORY.md * chore: Fix pre-commit * refactor: Combine functions into iteration * style: Reformat reference * feat: Add timings of each implementation * chore: Fix problems with algorithms-keeper bot * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * test: Remove doctest from benchmark function * Update matrix/count_negative_numbers_in_sorted_matrix.py Co-authored-by: Christian Clauss * Update matrix/count_negative_numbers_in_sorted_matrix.py Co-authored-by: Christian Clauss * Update matrix/count_negative_numbers_in_sorted_matrix.py Co-authored-by: Christian Clauss * Update matrix/count_negative_numbers_in_sorted_matrix.py Co-authored-by: Christian Clauss * Update matrix/count_negative_numbers_in_sorted_matrix.py Co-authored-by: Christian Clauss * Update matrix/count_negative_numbers_in_sorted_matrix.py Co-authored-by: Christian Clauss * refactor: Use sum instead of large iteration * refactor: Use len not sum * Update count_negative_numbers_in_sorted_matrix.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 2 + ...count_negative_numbers_in_sorted_matrix.py | 151 ++++++++++++++++++ 2 files changed, 153 insertions(+) create mode 100644 matrix/count_negative_numbers_in_sorted_matrix.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 6dac4a9a5..8511c261a 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -679,6 +679,7 @@ ## Matrix * [Binary Search Matrix](matrix/binary_search_matrix.py) * [Count Islands In Matrix](matrix/count_islands_in_matrix.py) + * [Count Negative Numbers In Sorted Matrix](matrix/count_negative_numbers_in_sorted_matrix.py) * [Count Paths](matrix/count_paths.py) * [Cramers Rule 2X2](matrix/cramers_rule_2x2.py) * [Inverse Of Matrix](matrix/inverse_of_matrix.py) @@ -753,6 +754,7 @@ * [Potential Energy](physics/potential_energy.py) * [Rms Speed Of Molecule](physics/rms_speed_of_molecule.py) * [Shear Stress](physics/shear_stress.py) + * [Speed Of Sound](physics/speed_of_sound.py) ## Project Euler * Problem 001 diff --git a/matrix/count_negative_numbers_in_sorted_matrix.py b/matrix/count_negative_numbers_in_sorted_matrix.py new file mode 100644 index 000000000..2799ff3b4 --- /dev/null +++ b/matrix/count_negative_numbers_in_sorted_matrix.py @@ -0,0 +1,151 @@ +""" +Given an matrix of numbers in which all rows and all columns are sorted in decreasing +order, return the number of negative numbers in grid. + +Reference: https://leetcode.com/problems/count-negative-numbers-in-a-sorted-matrix +""" + + +def generate_large_matrix() -> list[list[int]]: + """ + >>> generate_large_matrix() # doctest: +ELLIPSIS + [[1000, ..., -999], [999, ..., -1001], ..., [2, ..., -1998]] + """ + return [list(range(1000 - i, -1000 - i, -1)) for i in range(1000)] + + +grid = generate_large_matrix() +test_grids = ( + [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], + [[3, 2], [1, 0]], + [[7, 7, 6]], + [[7, 7, 6], [-1, -2, -3]], + grid, +) + + +def validate_grid(grid: list[list[int]]) -> None: + """ + Validate that the rows and columns of the grid is sorted in decreasing order. + >>> for grid in test_grids: + ... validate_grid(grid) + """ + assert all(row == sorted(row, reverse=True) for row in grid) + assert all(list(col) == sorted(col, reverse=True) for col in zip(*grid)) + + +def find_negative_index(array: list[int]) -> int: + """ + Find the smallest negative index + + >>> find_negative_index([0,0,0,0]) + 4 + >>> find_negative_index([4,3,2,-1]) + 3 + >>> find_negative_index([1,0,-1,-10]) + 2 + >>> find_negative_index([0,0,0,-1]) + 3 + >>> find_negative_index([11,8,7,-3,-5,-9]) + 3 + >>> find_negative_index([-1,-1,-2,-3]) + 0 + >>> find_negative_index([5,1,0]) + 3 + >>> find_negative_index([-5,-5,-5]) + 0 + >>> find_negative_index([0]) + 1 + >>> find_negative_index([]) + 0 + """ + left = 0 + right = len(array) - 1 + + # Edge cases such as no values or all numbers are negative. + if not array or array[0] < 0: + return 0 + + while right + 1 > left: + mid = (left + right) // 2 + num = array[mid] + + # Num must be negative and the index must be greater than or equal to 0. + if num < 0 and array[mid - 1] >= 0: + return mid + + if num >= 0: + left = mid + 1 + else: + right = mid - 1 + # No negative numbers so return the last index of the array + 1 which is the length. + return len(array) + + +def count_negatives_binary_search(grid: list[list[int]]) -> int: + """ + An O(m logn) solution that uses binary search in order to find the boundary between + positive and negative numbers + + >>> [count_negatives_binary_search(grid) for grid in test_grids] + [8, 0, 0, 3, 1498500] + """ + total = 0 + bound = len(grid[0]) + + for i in range(len(grid)): + bound = find_negative_index(grid[i][:bound]) + total += bound + return (len(grid) * len(grid[0])) - total + + +def count_negatives_brute_force(grid: list[list[int]]) -> int: + """ + This solution is O(n^2) because it iterates through every column and row. + + >>> [count_negatives_brute_force(grid) for grid in test_grids] + [8, 0, 0, 3, 1498500] + """ + return len([number for row in grid for number in row if number < 0]) + + +def count_negatives_brute_force_with_break(grid: list[list[int]]) -> int: + """ + Similar to the brute force solution above but uses break in order to reduce the + number of iterations. + + >>> [count_negatives_brute_force_with_break(grid) for grid in test_grids] + [8, 0, 0, 3, 1498500] + """ + total = 0 + for row in grid: + for i, number in enumerate(row): + if number < 0: + total += len(row) - i + break + return total + + +def benchmark() -> None: + """Benchmark our functions next to each other""" + from timeit import timeit + + print("Running benchmarks") + setup = ( + "from __main__ import count_negatives_binary_search, " + "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" + ) + for func in ( + "count_negatives_binary_search", # took 0.7727 seconds + "count_negatives_brute_force_with_break", # took 4.6505 seconds + "count_negatives_brute_force", # took 12.8160 seconds + ): + time = timeit(f"{func}(grid=grid)", setup=setup, number=500) + print(f"{func}() took {time:0.4f} seconds") + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + benchmark() From 46379861257d43bb7140d261094bf17dc414950f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 13 Jun 2023 00:09:33 +0200 Subject: [PATCH 11/43] [pre-commit.ci] pre-commit autoupdate (#8817) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.270 → v0.0.272](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.270...v0.0.272) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4c70ae219..1d4b73681 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.270 + rev: v0.0.272 hooks: - id: ruff From e6f89a6b89941ffed911e96362be3611a45420e7 Mon Sep 17 00:00:00 2001 From: Ilkin Mengusoglu <113149540+imengus@users.noreply.github.com> Date: Sun, 18 Jun 2023 17:00:02 +0100 Subject: [PATCH 12/43] Simplex algorithm (#8825) * feat: added simplex.py * added docstrings * Update linear_programming/simplex.py Co-authored-by: Caeden Perelli-Harris * Update linear_programming/simplex.py Co-authored-by: Caeden Perelli-Harris * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_programming/simplex.py Co-authored-by: Caeden Perelli-Harris * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ruff fix Co-authored by: CaedenPH * removed README to add in separate PR * Update linear_programming/simplex.py Co-authored-by: Tianyi Zheng * Update linear_programming/simplex.py Co-authored-by: Tianyi Zheng * fix class docstring * add comments --------- Co-authored-by: Caeden Perelli-Harris Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- linear_programming/simplex.py | 311 ++++++++++++++++++++++++++++++++++ 1 file changed, 311 insertions(+) create mode 100644 linear_programming/simplex.py diff --git a/linear_programming/simplex.py b/linear_programming/simplex.py new file mode 100644 index 000000000..ba64add40 --- /dev/null +++ b/linear_programming/simplex.py @@ -0,0 +1,311 @@ +""" +Python implementation of the simplex algorithm for solving linear programs in +tabular form with +- `>=`, `<=`, and `=` constraints and +- each variable `x1, x2, ...>= 0`. + +See https://gist.github.com/imengus/f9619a568f7da5bc74eaf20169a24d98 for how to +convert linear programs to simplex tableaus, and the steps taken in the simplex +algorithm. + +Resources: +https://en.wikipedia.org/wiki/Simplex_algorithm +https://tinyurl.com/simplex4beginners +""" +from typing import Any + +import numpy as np + + +class Tableau: + """Operate on simplex tableaus + + >>> t = Tableau(np.array([[-1,-1,0,0,-1],[1,3,1,0,4],[3,1,0,1,4.]]), 2) + Traceback (most recent call last): + ... + ValueError: RHS must be > 0 + """ + + def __init__(self, tableau: np.ndarray, n_vars: int) -> None: + # Check if RHS is negative + if np.any(tableau[:, -1], where=tableau[:, -1] < 0): + raise ValueError("RHS must be > 0") + + self.tableau = tableau + self.n_rows, _ = tableau.shape + + # Number of decision variables x1, x2, x3... + self.n_vars = n_vars + + # Number of artificial variables to be minimised + self.n_art_vars = len(np.where(tableau[self.n_vars : -1] == -1)[0]) + + # 2 if there are >= or == constraints (nonstandard), 1 otherwise (std) + self.n_stages = (self.n_art_vars > 0) + 1 + + # Number of slack variables added to make inequalities into equalities + self.n_slack = self.n_rows - self.n_stages + + # Objectives for each stage + self.objectives = ["max"] + + # In two stage simplex, first minimise then maximise + if self.n_art_vars: + self.objectives.append("min") + + self.col_titles = [""] + + # Index of current pivot row and column + self.row_idx = None + self.col_idx = None + + # Does objective row only contain (non)-negative values? + self.stop_iter = False + + @staticmethod + def generate_col_titles(*args: int) -> list[str]: + """Generate column titles for tableau of specific dimensions + + >>> Tableau.generate_col_titles(2, 3, 1) + ['x1', 'x2', 's1', 's2', 's3', 'a1', 'RHS'] + + >>> Tableau.generate_col_titles() + Traceback (most recent call last): + ... + ValueError: Must provide n_vars, n_slack, and n_art_vars + >>> Tableau.generate_col_titles(-2, 3, 1) + Traceback (most recent call last): + ... + ValueError: All arguments must be non-negative integers + """ + if len(args) != 3: + raise ValueError("Must provide n_vars, n_slack, and n_art_vars") + + if not all(x >= 0 and isinstance(x, int) for x in args): + raise ValueError("All arguments must be non-negative integers") + + # decision | slack | artificial + string_starts = ["x", "s", "a"] + titles = [] + for i in range(3): + for j in range(args[i]): + titles.append(string_starts[i] + str(j + 1)) + titles.append("RHS") + return titles + + def find_pivot(self, tableau: np.ndarray) -> tuple[Any, Any]: + """Finds the pivot row and column. + >>> t = Tableau(np.array([[-2,1,0,0,0], [3,1,1,0,6], [1,2,0,1,7.]]), 2) + >>> t.find_pivot(t.tableau) + (1, 0) + """ + objective = self.objectives[-1] + + # Find entries of highest magnitude in objective rows + sign = (objective == "min") - (objective == "max") + col_idx = np.argmax(sign * tableau[0, : self.n_vars]) + + # Choice is only valid if below 0 for maximise, and above for minimise + if sign * self.tableau[0, col_idx] <= 0: + self.stop_iter = True + return 0, 0 + + # Pivot row is chosen as having the lowest quotient when elements of + # the pivot column divide the right-hand side + + # Slice excluding the objective rows + s = slice(self.n_stages, self.n_rows) + + # RHS + dividend = tableau[s, -1] + + # Elements of pivot column within slice + divisor = tableau[s, col_idx] + + # Array filled with nans + nans = np.full(self.n_rows - self.n_stages, np.nan) + + # If element in pivot column is greater than zeron_stages, return + # quotient or nan otherwise + quotients = np.divide(dividend, divisor, out=nans, where=divisor > 0) + + # Arg of minimum quotient excluding the nan values. n_stages is added + # to compensate for earlier exclusion of objective columns + row_idx = np.nanargmin(quotients) + self.n_stages + return row_idx, col_idx + + def pivot(self, tableau: np.ndarray, row_idx: int, col_idx: int) -> np.ndarray: + """Pivots on value on the intersection of pivot row and column. + + >>> t = Tableau(np.array([[-2,-3,0,0,0],[1,3,1,0,4],[3,1,0,1,4.]]), 2) + >>> t.pivot(t.tableau, 1, 0).tolist() + ... # doctest: +NORMALIZE_WHITESPACE + [[0.0, 3.0, 2.0, 0.0, 8.0], + [1.0, 3.0, 1.0, 0.0, 4.0], + [0.0, -8.0, -3.0, 1.0, -8.0]] + """ + # Avoid changes to original tableau + piv_row = tableau[row_idx].copy() + + piv_val = piv_row[col_idx] + + # Entry becomes 1 + piv_row *= 1 / piv_val + + # Variable in pivot column becomes basic, ie the only non-zero entry + for idx, coeff in enumerate(tableau[:, col_idx]): + tableau[idx] += -coeff * piv_row + tableau[row_idx] = piv_row + return tableau + + def change_stage(self, tableau: np.ndarray) -> np.ndarray: + """Exits first phase of the two-stage method by deleting artificial + rows and columns, or completes the algorithm if exiting the standard + case. + + >>> t = Tableau(np.array([ + ... [3, 3, -1, -1, 0, 0, 4], + ... [2, 1, 0, 0, 0, 0, 0.], + ... [1, 2, -1, 0, 1, 0, 2], + ... [2, 1, 0, -1, 0, 1, 2] + ... ]), 2) + >>> t.change_stage(t.tableau).tolist() + ... # doctest: +NORMALIZE_WHITESPACE + [[2.0, 1.0, 0.0, 0.0, 0.0, 0.0], + [1.0, 2.0, -1.0, 0.0, 1.0, 2.0], + [2.0, 1.0, 0.0, -1.0, 0.0, 2.0]] + """ + # Objective of original objective row remains + self.objectives.pop() + + if not self.objectives: + return tableau + + # Slice containing ids for artificial columns + s = slice(-self.n_art_vars - 1, -1) + + # Delete the artificial variable columns + tableau = np.delete(tableau, s, axis=1) + + # Delete the objective row of the first stage + tableau = np.delete(tableau, 0, axis=0) + + self.n_stages = 1 + self.n_rows -= 1 + self.n_art_vars = 0 + self.stop_iter = False + return tableau + + def run_simplex(self) -> dict[Any, Any]: + """Operate on tableau until objective function cannot be + improved further. + + # Standard linear program: + Max: x1 + x2 + ST: x1 + 3x2 <= 4 + 3x1 + x2 <= 4 + >>> Tableau(np.array([[-1,-1,0,0,0],[1,3,1,0,4],[3,1,0,1,4.]]), + ... 2).run_simplex() + {'P': 2.0, 'x1': 1.0, 'x2': 1.0} + + # Optimal tableau input: + >>> Tableau(np.array([ + ... [0, 0, 0.25, 0.25, 2], + ... [0, 1, 0.375, -0.125, 1], + ... [1, 0, -0.125, 0.375, 1] + ... ]), 2).run_simplex() + {'P': 2.0, 'x1': 1.0, 'x2': 1.0} + + # Non-standard: >= constraints + Max: 2x1 + 3x2 + x3 + ST: x1 + x2 + x3 <= 40 + 2x1 + x2 - x3 >= 10 + - x2 + x3 >= 10 + >>> Tableau(np.array([ + ... [2, 0, 0, 0, -1, -1, 0, 0, 20], + ... [-2, -3, -1, 0, 0, 0, 0, 0, 0], + ... [1, 1, 1, 1, 0, 0, 0, 0, 40], + ... [2, 1, -1, 0, -1, 0, 1, 0, 10], + ... [0, -1, 1, 0, 0, -1, 0, 1, 10.] + ... ]), 3).run_simplex() + {'P': 70.0, 'x1': 10.0, 'x2': 10.0, 'x3': 20.0} + + # Non standard: minimisation and equalities + Min: x1 + x2 + ST: 2x1 + x2 = 12 + 6x1 + 5x2 = 40 + >>> Tableau(np.array([ + ... [8, 6, 0, -1, 0, -1, 0, 0, 52], + ... [1, 1, 0, 0, 0, 0, 0, 0, 0], + ... [2, 1, 1, 0, 0, 0, 0, 0, 12], + ... [2, 1, 0, -1, 0, 0, 1, 0, 12], + ... [6, 5, 0, 0, 1, 0, 0, 0, 40], + ... [6, 5, 0, 0, 0, -1, 0, 1, 40.] + ... ]), 2).run_simplex() + {'P': 7.0, 'x1': 5.0, 'x2': 2.0} + """ + # Stop simplex algorithm from cycling. + for _ in range(100): + # Completion of each stage removes an objective. If both stages + # are complete, then no objectives are left + if not self.objectives: + self.col_titles = self.generate_col_titles( + self.n_vars, self.n_slack, self.n_art_vars + ) + + # Find the values of each variable at optimal solution + return self.interpret_tableau(self.tableau, self.col_titles) + + row_idx, col_idx = self.find_pivot(self.tableau) + + # If there are no more negative values in objective row + if self.stop_iter: + # Delete artificial variable columns and rows. Update attributes + self.tableau = self.change_stage(self.tableau) + else: + self.tableau = self.pivot(self.tableau, row_idx, col_idx) + return {} + + def interpret_tableau( + self, tableau: np.ndarray, col_titles: list[str] + ) -> dict[str, float]: + """Given the final tableau, add the corresponding values of the basic + decision variables to the `output_dict` + >>> tableau = np.array([ + ... [0,0,0.875,0.375,5], + ... [0,1,0.375,-0.125,1], + ... [1,0,-0.125,0.375,1] + ... ]) + >>> t = Tableau(tableau, 2) + >>> t.interpret_tableau(tableau, ["x1", "x2", "s1", "s2", "RHS"]) + {'P': 5.0, 'x1': 1.0, 'x2': 1.0} + """ + # P = RHS of final tableau + output_dict = {"P": abs(tableau[0, -1])} + + for i in range(self.n_vars): + # Gives ids of nonzero entries in the ith column + nonzero = np.nonzero(tableau[:, i]) + n_nonzero = len(nonzero[0]) + + # First entry in the nonzero ids + nonzero_rowidx = nonzero[0][0] + nonzero_val = tableau[nonzero_rowidx, i] + + # If there is only one nonzero value in column, which is one + if n_nonzero == nonzero_val == 1: + rhs_val = tableau[nonzero_rowidx, -1] + output_dict[col_titles[i]] = rhs_val + + # Check for basic variables + for title in col_titles: + # Don't add RHS or slack variables to output dict + if title[0] not in "R-s-a": + output_dict.setdefault(title, 0) + return output_dict + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From b0f871032e78dd1d2f2214acbaae2fac88fa55b0 Mon Sep 17 00:00:00 2001 From: Frank-1998 <77809242+Frank-1998@users.noreply.github.com> Date: Sun, 18 Jun 2023 10:30:06 -0600 Subject: [PATCH 13/43] Fix removing the root node in binary_search_tree.py removes the whole tree (#8752) * fix issue #8715 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/binary_tree/binary_search_tree.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index cd88cc10e..c72195424 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -40,7 +40,7 @@ class BinarySearchTree: else: node.parent.left = new_children else: - self.root = None + self.root = new_children def is_right(self, node: Node) -> bool: if node.parent and node.parent.right: From ea6c6056cf2215358834710bf89422310f831178 Mon Sep 17 00:00:00 2001 From: Turro <42980188+smturro2@users.noreply.github.com> Date: Mon, 19 Jun 2023 06:46:29 -0500 Subject: [PATCH 14/43] Added apr_interest function to financial (#6025) * Added apr_interest function to financial * Update interest.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update financial/interest.py * float --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- financial/interest.py | 41 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 39 insertions(+), 2 deletions(-) diff --git a/financial/interest.py b/financial/interest.py index c69c73045..33d02e27c 100644 --- a/financial/interest.py +++ b/financial/interest.py @@ -4,7 +4,7 @@ from __future__ import annotations def simple_interest( - principal: float, daily_interest_rate: float, days_between_payments: int + principal: float, daily_interest_rate: float, days_between_payments: float ) -> float: """ >>> simple_interest(18000.0, 0.06, 3) @@ -42,7 +42,7 @@ def simple_interest( def compound_interest( principal: float, nominal_annual_interest_rate_percentage: float, - number_of_compounding_periods: int, + number_of_compounding_periods: float, ) -> float: """ >>> compound_interest(10000.0, 0.05, 3) @@ -77,6 +77,43 @@ def compound_interest( ) +def apr_interest( + principal: float, + nominal_annual_percentage_rate: float, + number_of_years: float, +) -> float: + """ + >>> apr_interest(10000.0, 0.05, 3) + 1618.223072263547 + >>> apr_interest(10000.0, 0.05, 1) + 512.6749646744732 + >>> apr_interest(0.5, 0.05, 3) + 0.08091115361317736 + >>> apr_interest(10000.0, 0.06, -4) + Traceback (most recent call last): + ... + ValueError: number_of_years must be > 0 + >>> apr_interest(10000.0, -3.5, 3.0) + Traceback (most recent call last): + ... + ValueError: nominal_annual_percentage_rate must be >= 0 + >>> apr_interest(-5500.0, 0.01, 5) + Traceback (most recent call last): + ... + ValueError: principal must be > 0 + """ + if number_of_years <= 0: + raise ValueError("number_of_years must be > 0") + if nominal_annual_percentage_rate < 0: + raise ValueError("nominal_annual_percentage_rate must be >= 0") + if principal <= 0: + raise ValueError("principal must be > 0") + + return compound_interest( + principal, nominal_annual_percentage_rate / 365, number_of_years * 365 + ) + + if __name__ == "__main__": import doctest From 0dee4a402c85981af0c2d4c53af27a69a7eb91bf Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 20 Jun 2023 15:56:14 +0200 Subject: [PATCH 15/43] [pre-commit.ci] pre-commit autoupdate (#8827) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/codespell-project/codespell: v2.2.4 → v2.2.5](https://github.com/codespell-project/codespell/compare/v2.2.4...v2.2.5) - [github.com/tox-dev/pyproject-fmt: 0.11.2 → 0.12.0](https://github.com/tox-dev/pyproject-fmt/compare/0.11.2...0.12.0) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1d4b73681..591fd7819 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -26,14 +26,14 @@ repos: - id: black - repo: https://github.com/codespell-project/codespell - rev: v2.2.4 + rev: v2.2.5 hooks: - id: codespell additional_dependencies: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "0.11.2" + rev: "0.12.0" hooks: - id: pyproject-fmt diff --git a/DIRECTORY.md b/DIRECTORY.md index 8511c261a..6ec8d5111 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -486,6 +486,9 @@ * [Test Linear Algebra](linear_algebra/src/test_linear_algebra.py) * [Transformations 2D](linear_algebra/src/transformations_2d.py) +## Linear Programming + * [Simplex](linear_programming/simplex.py) + ## Machine Learning * [Astar](machine_learning/astar.py) * [Data Transformations](machine_learning/data_transformations.py) From 07e68128883b84fb7e342c6bce88863a05fbbf62 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 20 Jun 2023 18:03:16 +0200 Subject: [PATCH 16/43] Update .pre-commit-config.yaml (#8828) * Update .pre-commit-config.yaml * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- pyproject.toml | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index a52619668..1dcce044a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,21 +1,3 @@ -[tool.pytest.ini_options] -markers = [ - "mat_ops: mark a test as utilizing matrix operations.", -] -addopts = [ - "--durations=10", - "--doctest-modules", - "--showlocals", -] - -[tool.coverage.report] -omit = [".env/*"] -sort = "Cover" - -[tool.codespell] -ignore-words-list = "3rt,ans,crate,damon,fo,followings,hist,iff,kwanza,mater,secant,som,sur,tim,zar" -skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" - [tool.ruff] ignore = [ # `ruff rule S101` for a description of that rule "ARG001", # Unused function argument `amount` -- FIX ME? @@ -131,3 +113,21 @@ max-args = 10 # default: 5 max-branches = 20 # default: 12 max-returns = 8 # default: 6 max-statements = 88 # default: 50 + +[tool.pytest.ini_options] +markers = [ + "mat_ops: mark a test as utilizing matrix operations.", +] +addopts = [ + "--durations=10", + "--doctest-modules", + "--showlocals", +] + +[tool.coverage.report] +omit = [".env/*"] +sort = "Cover" + +[tool.codespell] +ignore-words-list = "3rt,ans,crate,damon,fo,followings,hist,iff,kwanza,mater,secant,som,sur,tim,zar" +skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" From 5b0890bd833eb85c58fae9afc4984d520e7e2ad6 Mon Sep 17 00:00:00 2001 From: "Linus M. Henkel" <86628476+linushenkel@users.noreply.github.com> Date: Thu, 22 Jun 2023 13:49:09 +0200 Subject: [PATCH 17/43] Dijkstra algorithm with binary grid (#8802) * Create TestShiva * Delete TestShiva * Implementation of the Dijkstra-Algorithm in a binary grid * Update double_ended_queue.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update least_common_multiple.py * Update sol1.py * Update pyproject.toml * Update pyproject.toml * https://github.com/astral-sh/ruff-pre-commit v0.0.274 --------- Co-authored-by: ShivaDahal99 <130563462+ShivaDahal99@users.noreply.github.com> Co-authored-by: jlhuhn <134317018+jlhuhn@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 +- data_structures/queue/double_ended_queue.py | 4 +- graphs/dijkstra_binary_grid.py | 89 +++++++++++++++++++++ maths/least_common_multiple.py | 6 +- project_euler/problem_054/sol1.py | 18 ++--- pyproject.toml | 1 + 6 files changed, 106 insertions(+), 16 deletions(-) create mode 100644 graphs/dijkstra_binary_grid.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 591fd7819..3d4cc4084 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -15,8 +15,8 @@ repos: hooks: - id: auto-walrus - - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.272 + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.0.274 hooks: - id: ruff diff --git a/data_structures/queue/double_ended_queue.py b/data_structures/queue/double_ended_queue.py index 637b7f62f..2472371b4 100644 --- a/data_structures/queue/double_ended_queue.py +++ b/data_structures/queue/double_ended_queue.py @@ -32,7 +32,7 @@ class Deque: the number of nodes """ - __slots__ = ["_front", "_back", "_len"] + __slots__ = ("_front", "_back", "_len") @dataclass class _Node: @@ -54,7 +54,7 @@ class Deque: the current node of the iteration. """ - __slots__ = ["_cur"] + __slots__ = "_cur" def __init__(self, cur: Deque._Node | None) -> None: self._cur = cur diff --git a/graphs/dijkstra_binary_grid.py b/graphs/dijkstra_binary_grid.py new file mode 100644 index 000000000..c23d82343 --- /dev/null +++ b/graphs/dijkstra_binary_grid.py @@ -0,0 +1,89 @@ +""" +This script implements the Dijkstra algorithm on a binary grid. +The grid consists of 0s and 1s, where 1 represents +a walkable node and 0 represents an obstacle. +The algorithm finds the shortest path from a start node to a destination node. +Diagonal movement can be allowed or disallowed. +""" + +from heapq import heappop, heappush + +import numpy as np + + +def dijkstra( + grid: np.ndarray, + source: tuple[int, int], + destination: tuple[int, int], + allow_diagonal: bool, +) -> tuple[float | int, list[tuple[int, int]]]: + """ + Implements Dijkstra's algorithm on a binary grid. + + Args: + grid (np.ndarray): A 2D numpy array representing the grid. + 1 represents a walkable node and 0 represents an obstacle. + source (Tuple[int, int]): A tuple representing the start node. + destination (Tuple[int, int]): A tuple representing the + destination node. + allow_diagonal (bool): A boolean determining whether + diagonal movements are allowed. + + Returns: + Tuple[Union[float, int], List[Tuple[int, int]]]: + The shortest distance from the start node to the destination node + and the shortest path as a list of nodes. + + >>> dijkstra(np.array([[1, 1, 1], [0, 1, 0], [0, 1, 1]]), (0, 0), (2, 2), False) + (4.0, [(0, 0), (0, 1), (1, 1), (2, 1), (2, 2)]) + + >>> dijkstra(np.array([[1, 1, 1], [0, 1, 0], [0, 1, 1]]), (0, 0), (2, 2), True) + (2.0, [(0, 0), (1, 1), (2, 2)]) + + >>> dijkstra(np.array([[1, 1, 1], [0, 0, 1], [0, 1, 1]]), (0, 0), (2, 2), False) + (4.0, [(0, 0), (0, 1), (0, 2), (1, 2), (2, 2)]) + """ + rows, cols = grid.shape + dx = [-1, 1, 0, 0] + dy = [0, 0, -1, 1] + if allow_diagonal: + dx += [-1, -1, 1, 1] + dy += [-1, 1, -1, 1] + + queue, visited = [(0, source)], set() + matrix = np.full((rows, cols), np.inf) + matrix[source] = 0 + predecessors = np.empty((rows, cols), dtype=object) + predecessors[source] = None + + while queue: + (dist, (x, y)) = heappop(queue) + if (x, y) in visited: + continue + visited.add((x, y)) + + if (x, y) == destination: + path = [] + while (x, y) != source: + path.append((x, y)) + x, y = predecessors[x, y] + path.append(source) # add the source manually + path.reverse() + return matrix[destination], path + + for i in range(len(dx)): + nx, ny = x + dx[i], y + dy[i] + if 0 <= nx < rows and 0 <= ny < cols: + next_node = grid[nx][ny] + if next_node == 1 and matrix[nx, ny] > dist + 1: + heappush(queue, (dist + 1, (nx, ny))) + matrix[nx, ny] = dist + 1 + predecessors[nx, ny] = (x, y) + + return np.inf, [] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/maths/least_common_multiple.py b/maths/least_common_multiple.py index 621d93720..10cc63ac7 100644 --- a/maths/least_common_multiple.py +++ b/maths/least_common_multiple.py @@ -67,7 +67,7 @@ def benchmark(): class TestLeastCommonMultiple(unittest.TestCase): - test_inputs = [ + test_inputs = ( (10, 20), (13, 15), (4, 31), @@ -77,8 +77,8 @@ class TestLeastCommonMultiple(unittest.TestCase): (12, 25), (10, 25), (6, 9), - ] - expected_results = [20, 195, 124, 210, 1462, 60, 300, 50, 18] + ) + expected_results = (20, 195, 124, 210, 1462, 60, 300, 50, 18) def test_lcm_function(self): for i, (first_num, second_num) in enumerate(self.test_inputs): diff --git a/project_euler/problem_054/sol1.py b/project_euler/problem_054/sol1.py index 74409f32c..86dfa5edd 100644 --- a/project_euler/problem_054/sol1.py +++ b/project_euler/problem_054/sol1.py @@ -47,18 +47,18 @@ import os class PokerHand: """Create an object representing a Poker Hand based on an input of a - string which represents the best 5 card combination from the player's hand + string which represents the best 5-card combination from the player's hand and board cards. Attributes: (read-only) - hand: string representing the hand consisting of five cards + hand: a string representing the hand consisting of five cards Methods: compare_with(opponent): takes in player's hand (self) and opponent's hand (opponent) and compares both hands according to the rules of Texas Hold'em. Returns one of 3 strings (Win, Loss, Tie) based on whether - player's hand is better than opponent's hand. + player's hand is better than the opponent's hand. hand_name(): Returns a string made up of two parts: hand name and high card. @@ -66,11 +66,11 @@ class PokerHand: Supported operators: Rich comparison operators: <, >, <=, >=, ==, != - Supported builtin methods and functions: + Supported built-in methods and functions: list.sort(), sorted() """ - _HAND_NAME = [ + _HAND_NAME = ( "High card", "One pair", "Two pairs", @@ -81,10 +81,10 @@ class PokerHand: "Four of a kind", "Straight flush", "Royal flush", - ] + ) - _CARD_NAME = [ - "", # placeholder as lists are zero indexed + _CARD_NAME = ( + "", # placeholder as tuples are zero-indexed "One", "Two", "Three", @@ -99,7 +99,7 @@ class PokerHand: "Queen", "King", "Ace", - ] + ) def __init__(self, hand: str) -> None: """ diff --git a/pyproject.toml b/pyproject.toml index 1dcce044a..4f21a9519 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -103,6 +103,7 @@ max-complexity = 17 # default: 10 "machine_learning/linear_discriminant_analysis.py" = ["ARG005"] "machine_learning/sequential_minimum_optimization.py" = ["SIM115"] "matrix/sherman_morrison.py" = ["SIM103", "SIM114"] +"other/l*u_cache.py" = ["RUF012"] "physics/newtons_second_law_of_motion.py" = ["BLE001"] "project_euler/problem_099/sol1.py" = ["SIM115"] "sorts/external_sort.py" = ["SIM115"] From 5ffe601c86a9b44691a4dce37480c6d904102d49 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Thu, 22 Jun 2023 05:24:34 -0700 Subject: [PATCH 18/43] Fix `mypy` errors in `maths/sigmoid_linear_unit.py` (#8786) * updating DIRECTORY.md * Fix mypy errors in sigmoid_linear_unit.py * updating DIRECTORY.md * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- maths/sigmoid_linear_unit.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/maths/sigmoid_linear_unit.py b/maths/sigmoid_linear_unit.py index a8ada10dd..0ee09bf82 100644 --- a/maths/sigmoid_linear_unit.py +++ b/maths/sigmoid_linear_unit.py @@ -17,7 +17,7 @@ This script is inspired by a corresponding research paper. import numpy as np -def sigmoid(vector: np.array) -> np.array: +def sigmoid(vector: np.ndarray) -> np.ndarray: """ Mathematical function sigmoid takes a vector x of K real numbers as input and returns 1/ (1 + e^-x). @@ -29,17 +29,15 @@ def sigmoid(vector: np.array) -> np.array: return 1 / (1 + np.exp(-vector)) -def sigmoid_linear_unit(vector: np.array) -> np.array: +def sigmoid_linear_unit(vector: np.ndarray) -> np.ndarray: """ Implements the Sigmoid Linear Unit (SiLU) or swish function Parameters: - vector (np.array): A numpy array consisting of real - values. + vector (np.ndarray): A numpy array consisting of real values Returns: - swish_vec (np.array): The input numpy array, after applying - swish. + swish_vec (np.ndarray): The input numpy array, after applying swish Examples: >>> sigmoid_linear_unit(np.array([-1.0, 1.0, 2.0])) From f54a9668103e560f20b50559fb54ac38a74d1fe8 Mon Sep 17 00:00:00 2001 From: Jan-Lukas Huhn <134317018+jlhuhn@users.noreply.github.com> Date: Thu, 22 Jun 2023 14:31:48 +0200 Subject: [PATCH 19/43] Energy conversions (#8801) * Create TestShiva * Delete TestShiva * Create energy_conversions.py * Update conversions/energy_conversions.py Co-authored-by: Caeden Perelli-Harris --------- Co-authored-by: ShivaDahal99 <130563462+ShivaDahal99@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris --- conversions/energy_conversions.py | 114 ++++++++++++++++++++++++++++++ 1 file changed, 114 insertions(+) create mode 100644 conversions/energy_conversions.py diff --git a/conversions/energy_conversions.py b/conversions/energy_conversions.py new file mode 100644 index 000000000..51de6b313 --- /dev/null +++ b/conversions/energy_conversions.py @@ -0,0 +1,114 @@ +""" +Conversion of energy units. + +Available units: joule, kilojoule, megajoule, gigajoule,\ + wattsecond, watthour, kilowatthour, newtonmeter, calorie_nutr,\ + kilocalorie_nutr, electronvolt, britishthermalunit_it, footpound + +USAGE : +-> Import this file into their respective project. +-> Use the function energy_conversion() for conversion of energy units. +-> Parameters : + -> from_type : From which type you want to convert + -> to_type : To which type you want to convert + -> value : the value which you want to convert + +REFERENCES : +-> Wikipedia reference: https://en.wikipedia.org/wiki/Units_of_energy +-> Wikipedia reference: https://en.wikipedia.org/wiki/Joule +-> Wikipedia reference: https://en.wikipedia.org/wiki/Kilowatt-hour +-> Wikipedia reference: https://en.wikipedia.org/wiki/Newton-metre +-> Wikipedia reference: https://en.wikipedia.org/wiki/Calorie +-> Wikipedia reference: https://en.wikipedia.org/wiki/Electronvolt +-> Wikipedia reference: https://en.wikipedia.org/wiki/British_thermal_unit +-> Wikipedia reference: https://en.wikipedia.org/wiki/Foot-pound_(energy) +-> Unit converter reference: https://www.unitconverters.net/energy-converter.html +""" + +ENERGY_CONVERSION: dict[str, float] = { + "joule": 1.0, + "kilojoule": 1_000, + "megajoule": 1_000_000, + "gigajoule": 1_000_000_000, + "wattsecond": 1.0, + "watthour": 3_600, + "kilowatthour": 3_600_000, + "newtonmeter": 1.0, + "calorie_nutr": 4_186.8, + "kilocalorie_nutr": 4_186_800.00, + "electronvolt": 1.602_176_634e-19, + "britishthermalunit_it": 1_055.055_85, + "footpound": 1.355_818, +} + + +def energy_conversion(from_type: str, to_type: str, value: float) -> float: + """ + Conversion of energy units. + >>> energy_conversion("joule", "joule", 1) + 1.0 + >>> energy_conversion("joule", "kilojoule", 1) + 0.001 + >>> energy_conversion("joule", "megajoule", 1) + 1e-06 + >>> energy_conversion("joule", "gigajoule", 1) + 1e-09 + >>> energy_conversion("joule", "wattsecond", 1) + 1.0 + >>> energy_conversion("joule", "watthour", 1) + 0.0002777777777777778 + >>> energy_conversion("joule", "kilowatthour", 1) + 2.7777777777777776e-07 + >>> energy_conversion("joule", "newtonmeter", 1) + 1.0 + >>> energy_conversion("joule", "calorie_nutr", 1) + 0.00023884589662749592 + >>> energy_conversion("joule", "kilocalorie_nutr", 1) + 2.388458966274959e-07 + >>> energy_conversion("joule", "electronvolt", 1) + 6.241509074460763e+18 + >>> energy_conversion("joule", "britishthermalunit_it", 1) + 0.0009478171226670134 + >>> energy_conversion("joule", "footpound", 1) + 0.7375621211696556 + >>> energy_conversion("joule", "megajoule", 1000) + 0.001 + >>> energy_conversion("calorie_nutr", "kilocalorie_nutr", 1000) + 1.0 + >>> energy_conversion("kilowatthour", "joule", 10) + 36000000.0 + >>> energy_conversion("britishthermalunit_it", "footpound", 1) + 778.1692306784539 + >>> energy_conversion("watthour", "joule", "a") # doctest: +ELLIPSIS + Traceback (most recent call last): + ... + TypeError: unsupported operand type(s) for /: 'str' and 'float' + >>> energy_conversion("wrongunit", "joule", 1) # doctest: +ELLIPSIS + Traceback (most recent call last): + ... + ValueError: Incorrect 'from_type' or 'to_type' value: 'wrongunit', 'joule' + Valid values are: joule, ... footpound + >>> energy_conversion("joule", "wrongunit", 1) # doctest: +ELLIPSIS + Traceback (most recent call last): + ... + ValueError: Incorrect 'from_type' or 'to_type' value: 'joule', 'wrongunit' + Valid values are: joule, ... footpound + >>> energy_conversion("123", "abc", 1) # doctest: +ELLIPSIS + Traceback (most recent call last): + ... + ValueError: Incorrect 'from_type' or 'to_type' value: '123', 'abc' + Valid values are: joule, ... footpound + """ + if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: + msg = ( + f"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n" + f"Valid values are: {', '.join(ENERGY_CONVERSION)}" + ) + raise ValueError(msg) + return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 331585f3f866e210e23d11700b09a8770a1c2490 Mon Sep 17 00:00:00 2001 From: Himanshu Tomar Date: Fri, 23 Jun 2023 13:56:05 +0530 Subject: [PATCH 20/43] Algorithm: Calculating Product Sum from a Special Array with Nested Structures (#8761) * Added minimum waiting time problem solution using greedy algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ruff --fix * Add type hints * Added two more doc test * Removed unnecessary comments * updated type hints * Updated the code as per the code review * Added recursive algo to calculate product sum from an array * Added recursive algo to calculate product sum from an array * Update doc string * Added doctest for product_sum function * Updated the code and added more doctests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added more test coverage for product_sum method * Update product_sum.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 + data_structures/arrays/product_sum.py | 98 +++++++++++++++++++++++++++ 2 files changed, 99 insertions(+) create mode 100644 data_structures/arrays/product_sum.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 6ec8d5111..83389dab1 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -166,6 +166,7 @@ * Arrays * [Permutations](data_structures/arrays/permutations.py) * [Prefix Sum](data_structures/arrays/prefix_sum.py) + * [Product Sum Array](data_structures/arrays/product_sum.py) * Binary Tree * [Avl Tree](data_structures/binary_tree/avl_tree.py) * [Basic Binary Tree](data_structures/binary_tree/basic_binary_tree.py) diff --git a/data_structures/arrays/product_sum.py b/data_structures/arrays/product_sum.py new file mode 100644 index 000000000..4fb906f36 --- /dev/null +++ b/data_structures/arrays/product_sum.py @@ -0,0 +1,98 @@ +""" +Calculate the Product Sum from a Special Array. +reference: https://dev.to/sfrasica/algorithms-product-sum-from-an-array-dc6 + +Python doctests can be run with the following command: +python -m doctest -v product_sum.py + +Calculate the product sum of a "special" array which can contain integers or nested +arrays. The product sum is obtained by adding all elements and multiplying by their +respective depths. + +For example, in the array [x, y], the product sum is (x + y). In the array [x, [y, z]], +the product sum is x + 2 * (y + z). In the array [x, [y, [z]]], +the product sum is x + 2 * (y + 3z). + +Example Input: +[5, 2, [-7, 1], 3, [6, [-13, 8], 4]] +Output: 12 + +""" + + +def product_sum(arr: list[int | list], depth: int) -> int: + """ + Recursively calculates the product sum of an array. + + The product sum of an array is defined as the sum of its elements multiplied by + their respective depths. If an element is a list, its product sum is calculated + recursively by multiplying the sum of its elements with its depth plus one. + + Args: + arr: The array of integers and nested lists. + depth: The current depth level. + + Returns: + int: The product sum of the array. + + Examples: + >>> product_sum([1, 2, 3], 1) + 6 + >>> product_sum([-1, 2, [-3, 4]], 2) + 8 + >>> product_sum([1, 2, 3], -1) + -6 + >>> product_sum([1, 2, 3], 0) + 0 + >>> product_sum([1, 2, 3], 7) + 42 + >>> product_sum((1, 2, 3), 7) + 42 + >>> product_sum({1, 2, 3}, 7) + 42 + >>> product_sum([1, -1], 1) + 0 + >>> product_sum([1, -2], 1) + -1 + >>> product_sum([-3.5, [1, [0.5]]], 1) + 1.5 + + """ + total_sum = 0 + for ele in arr: + total_sum += product_sum(ele, depth + 1) if isinstance(ele, list) else ele + return total_sum * depth + + +def product_sum_array(array: list[int | list]) -> int: + """ + Calculates the product sum of an array. + + Args: + array (List[Union[int, List]]): The array of integers and nested lists. + + Returns: + int: The product sum of the array. + + Examples: + >>> product_sum_array([1, 2, 3]) + 6 + >>> product_sum_array([1, [2, 3]]) + 11 + >>> product_sum_array([1, [2, [3, 4]]]) + 47 + >>> product_sum_array([0]) + 0 + >>> product_sum_array([-3.5, [1, [0.5]]]) + 1.5 + >>> product_sum_array([1, -2]) + -1 + + """ + return product_sum(array, 1) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 267a8b72f97762383e7c313ed20df859115e2815 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Fri, 23 Jun 2023 06:56:58 -0700 Subject: [PATCH 21/43] Clarify how to add issue numbers in PR template and CONTRIBUTING.md (#8833) * updating DIRECTORY.md * Clarify wording in PR template * Clarify CONTRIBUTING.md wording about adding issue numbers * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add suggested change from review to CONTRIBUTING.md Co-authored-by: Christian Clauss * Incorporate review edit to CONTRIBUTING.md Co-authored-by: Christian Clauss --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .github/pull_request_template.md | 2 +- CONTRIBUTING.md | 7 ++++++- DIRECTORY.md | 2 ++ 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index b3ba8baf9..1f9797fae 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -17,4 +17,4 @@ * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms include at least one URL that points to Wikipedia or another similar explanation. -* [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`. +* [ ] If this pull request resolves one or more open issues then the description above includes the issue number(s) with a [closing keyword](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue): "Fixes #ISSUE-NUMBER". diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2bb0c2e39..618cca868 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -25,7 +25,12 @@ We appreciate any contribution, from fixing a grammar mistake in a comment to im Your contribution will be tested by our [automated testing on GitHub Actions](https://github.com/TheAlgorithms/Python/actions) to save time and mental energy. After you have submitted your pull request, you should see the GitHub Actions tests start to run at the bottom of your submission page. If those tests fail, then click on the ___details___ button try to read through the GitHub Actions output to understand the failure. If you do not understand, please leave a comment on your submission page and a community member will try to help. -Please help us keep our issue list small by adding fixes: #{$ISSUE_NO} to the commit message of pull requests that resolve open issues. GitHub will use this tag to auto-close the issue when the PR is merged. +Please help us keep our issue list small by adding `Fixes #{$ISSUE_NUMBER}` to the description of pull requests that resolve open issues. +For example, if your pull request fixes issue #10, then please add the following to its description: +``` +Fixes #10 +``` +GitHub will use this tag to [auto-close the issue](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue) if and when the PR is merged. #### What is an Algorithm? diff --git a/DIRECTORY.md b/DIRECTORY.md index 83389dab1..1414aacf9 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -146,6 +146,7 @@ * [Decimal To Binary Recursion](conversions/decimal_to_binary_recursion.py) * [Decimal To Hexadecimal](conversions/decimal_to_hexadecimal.py) * [Decimal To Octal](conversions/decimal_to_octal.py) + * [Energy Conversions](conversions/energy_conversions.py) * [Excel Title To Column](conversions/excel_title_to_column.py) * [Hex To Bin](conversions/hex_to_bin.py) * [Hexadecimal To Decimal](conversions/hexadecimal_to_decimal.py) @@ -411,6 +412,7 @@ * [Dijkstra 2](graphs/dijkstra_2.py) * [Dijkstra Algorithm](graphs/dijkstra_algorithm.py) * [Dijkstra Alternate](graphs/dijkstra_alternate.py) + * [Dijkstra Binary Grid](graphs/dijkstra_binary_grid.py) * [Dinic](graphs/dinic.py) * [Directed And Undirected (Weighted) Graph](graphs/directed_and_undirected_(weighted)_graph.py) * [Edmonds Karp Multiple Source And Sink](graphs/edmonds_karp_multiple_source_and_sink.py) From 3bfa89dacf877b1d7a62b14f82d54e8de99a838e Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sun, 25 Jun 2023 18:28:01 +0200 Subject: [PATCH 22/43] GitHub Actions build: Add more tests (#8837) * GitHub Actions build: Add more tests Re-enable some tests that were disabled in #6591. Fixes #8818 * updating DIRECTORY.md * TODO: Re-enable quantum tests * fails: pytest quantum/bb84.py quantum/q_fourier_transform.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .github/workflows/build.yml | 7 +++---- DIRECTORY.md | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6b9cc890b..5229edaf8 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -22,11 +22,10 @@ jobs: python -m pip install --upgrade pip setuptools six wheel python -m pip install pytest-cov -r requirements.txt - name: Run tests - # See: #6591 for re-enabling tests on Python v3.11 + # TODO: #8818 Re-enable quantum tests run: pytest - --ignore=computer_vision/cnn_classification.py - --ignore=machine_learning/lstm/lstm_prediction.py - --ignore=quantum/ + --ignore=quantum/bb84.py + --ignore=quantum/q_fourier_transform.py --ignore=project_euler/ --ignore=scripts/validate_solutions.py --cov-report=term-missing:skip-covered diff --git a/DIRECTORY.md b/DIRECTORY.md index 1414aacf9..0c21b9537 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -167,7 +167,7 @@ * Arrays * [Permutations](data_structures/arrays/permutations.py) * [Prefix Sum](data_structures/arrays/prefix_sum.py) - * [Product Sum Array](data_structures/arrays/product_sum.py) + * [Product Sum](data_structures/arrays/product_sum.py) * Binary Tree * [Avl Tree](data_structures/binary_tree/avl_tree.py) * [Basic Binary Tree](data_structures/binary_tree/basic_binary_tree.py) From d764eec655c1c51f5ef3490d27ea72430191a000 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 26 Jun 2023 05:24:50 +0200 Subject: [PATCH 23/43] Fix failing pytest quantum/bb84.py (#8838) * Fix failing pytest quantum/bb84.py * Update bb84.py test results to match current qiskit --- .github/workflows/build.yml | 1 - quantum/bb84.py | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 5229edaf8..fc8cb6369 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -24,7 +24,6 @@ jobs: - name: Run tests # TODO: #8818 Re-enable quantum tests run: pytest - --ignore=quantum/bb84.py --ignore=quantum/q_fourier_transform.py --ignore=project_euler/ --ignore=scripts/validate_solutions.py diff --git a/quantum/bb84.py b/quantum/bb84.py index 60d64371f..e90a11c2a 100644 --- a/quantum/bb84.py +++ b/quantum/bb84.py @@ -64,10 +64,10 @@ def bb84(key_len: int = 8, seed: int | None = None) -> str: key: The key generated using BB84 protocol. >>> bb84(16, seed=0) - '1101101100010000' + '0111110111010010' >>> bb84(8, seed=0) - '01011011' + '10110001' """ # Set up the random number generator. rng = np.random.default_rng(seed=seed) From 62dcbea943e8cc4ea4d83eff115c4e6f6a4808af Mon Sep 17 00:00:00 2001 From: duongoku Date: Mon, 26 Jun 2023 14:39:18 +0700 Subject: [PATCH 24/43] Add power sum problem (#8832) * Add powersum problem * Add doctest * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add more doctests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add more doctests * Improve paramater name * Fix line too long * Remove global variables * Apply suggestions from code review * Apply suggestions from code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- backtracking/power_sum.py | 93 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 backtracking/power_sum.py diff --git a/backtracking/power_sum.py b/backtracking/power_sum.py new file mode 100644 index 000000000..fcf1429f8 --- /dev/null +++ b/backtracking/power_sum.py @@ -0,0 +1,93 @@ +""" +Problem source: https://www.hackerrank.com/challenges/the-power-sum/problem +Find the number of ways that a given integer X, can be expressed as the sum +of the Nth powers of unique, natural numbers. For example, if X=13 and N=2. +We have to find all combinations of unique squares adding up to 13. +The only solution is 2^2+3^2. Constraints: 1<=X<=1000, 2<=N<=10. +""" + +from math import pow + + +def backtrack( + needed_sum: int, + power: int, + current_number: int, + current_sum: int, + solutions_count: int, +) -> tuple[int, int]: + """ + >>> backtrack(13, 2, 1, 0, 0) + (0, 1) + >>> backtrack(100, 2, 1, 0, 0) + (0, 3) + >>> backtrack(100, 3, 1, 0, 0) + (0, 1) + >>> backtrack(800, 2, 1, 0, 0) + (0, 561) + >>> backtrack(1000, 10, 1, 0, 0) + (0, 0) + >>> backtrack(400, 2, 1, 0, 0) + (0, 55) + >>> backtrack(50, 1, 1, 0, 0) + (0, 3658) + """ + if current_sum == needed_sum: + # If the sum of the powers is equal to needed_sum, then we have a solution. + solutions_count += 1 + return current_sum, solutions_count + + i_to_n = int(pow(current_number, power)) + if current_sum + i_to_n <= needed_sum: + # If the sum of the powers is less than needed_sum, then continue adding powers. + current_sum += i_to_n + current_sum, solutions_count = backtrack( + needed_sum, power, current_number + 1, current_sum, solutions_count + ) + current_sum -= i_to_n + if i_to_n < needed_sum: + # If the power of i is less than needed_sum, then try with the next power. + current_sum, solutions_count = backtrack( + needed_sum, power, current_number + 1, current_sum, solutions_count + ) + return current_sum, solutions_count + + +def solve(needed_sum: int, power: int) -> int: + """ + >>> solve(13, 2) + 1 + >>> solve(100, 2) + 3 + >>> solve(100, 3) + 1 + >>> solve(800, 2) + 561 + >>> solve(1000, 10) + 0 + >>> solve(400, 2) + 55 + >>> solve(50, 1) + Traceback (most recent call last): + ... + ValueError: Invalid input + needed_sum must be between 1 and 1000, power between 2 and 10. + >>> solve(-10, 5) + Traceback (most recent call last): + ... + ValueError: Invalid input + needed_sum must be between 1 and 1000, power between 2 and 10. + """ + if not (1 <= needed_sum <= 1000 and 2 <= power <= 10): + raise ValueError( + "Invalid input\n" + "needed_sum must be between 1 and 1000, power between 2 and 10." + ) + + return backtrack(needed_sum, power, 1, 0, 0)[1] # Return the solutions_count + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 69f20033e55ae62c337e2fb2146aea5fabf3e5a0 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Mon, 26 Jun 2023 02:15:31 -0700 Subject: [PATCH 25/43] Remove duplicate implementation of Collatz sequence (#8836) * updating DIRECTORY.md * Remove duplicate implementation of Collatz sequence * updating DIRECTORY.md * Add suggestions from PR review --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 - maths/3n_plus_1.py | 151 -------------------------------------- maths/collatz_sequence.py | 69 +++++++++++------ 3 files changed, 46 insertions(+), 175 deletions(-) delete mode 100644 maths/3n_plus_1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 0c21b9537..1e0e450bc 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -522,7 +522,6 @@ * [Xgboost Regressor](machine_learning/xgboost_regressor.py) ## Maths - * [3N Plus 1](maths/3n_plus_1.py) * [Abs](maths/abs.py) * [Add](maths/add.py) * [Addition Without Arithmetic](maths/addition_without_arithmetic.py) diff --git a/maths/3n_plus_1.py b/maths/3n_plus_1.py deleted file mode 100644 index f9f6dfeb9..000000000 --- a/maths/3n_plus_1.py +++ /dev/null @@ -1,151 +0,0 @@ -from __future__ import annotations - - -def n31(a: int) -> tuple[list[int], int]: - """ - Returns the Collatz sequence and its length of any positive integer. - >>> n31(4) - ([4, 2, 1], 3) - """ - - if not isinstance(a, int): - msg = f"Must be int, not {type(a).__name__}" - raise TypeError(msg) - if a < 1: - msg = f"Given integer must be positive, not {a}" - raise ValueError(msg) - - path = [a] - while a != 1: - if a % 2 == 0: - a //= 2 - else: - a = 3 * a + 1 - path.append(a) - return path, len(path) - - -def test_n31(): - """ - >>> test_n31() - """ - assert n31(4) == ([4, 2, 1], 3) - assert n31(11) == ([11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1], 15) - assert n31(31) == ( - [ - 31, - 94, - 47, - 142, - 71, - 214, - 107, - 322, - 161, - 484, - 242, - 121, - 364, - 182, - 91, - 274, - 137, - 412, - 206, - 103, - 310, - 155, - 466, - 233, - 700, - 350, - 175, - 526, - 263, - 790, - 395, - 1186, - 593, - 1780, - 890, - 445, - 1336, - 668, - 334, - 167, - 502, - 251, - 754, - 377, - 1132, - 566, - 283, - 850, - 425, - 1276, - 638, - 319, - 958, - 479, - 1438, - 719, - 2158, - 1079, - 3238, - 1619, - 4858, - 2429, - 7288, - 3644, - 1822, - 911, - 2734, - 1367, - 4102, - 2051, - 6154, - 3077, - 9232, - 4616, - 2308, - 1154, - 577, - 1732, - 866, - 433, - 1300, - 650, - 325, - 976, - 488, - 244, - 122, - 61, - 184, - 92, - 46, - 23, - 70, - 35, - 106, - 53, - 160, - 80, - 40, - 20, - 10, - 5, - 16, - 8, - 4, - 2, - 1, - ], - 107, - ) - - -if __name__ == "__main__": - num = 4 - path, length = n31(num) - print(f"The Collatz sequence of {num} took {length} steps. \nPath: {path}") diff --git a/maths/collatz_sequence.py b/maths/collatz_sequence.py index 7b3636de6..4f3aa5582 100644 --- a/maths/collatz_sequence.py +++ b/maths/collatz_sequence.py @@ -1,43 +1,66 @@ +""" +The Collatz conjecture is a famous unsolved problem in mathematics. Given a starting +positive integer, define the following sequence: +- If the current term n is even, then the next term is n/2. +- If the current term n is odd, then the next term is 3n + 1. +The conjecture claims that this sequence will always reach 1 for any starting number. + +Other names for this problem include the 3n + 1 problem, the Ulam conjecture, Kakutani's +problem, the Thwaites conjecture, Hasse's algorithm, the Syracuse problem, and the +hailstone sequence. + +Reference: https://en.wikipedia.org/wiki/Collatz_conjecture +""" + from __future__ import annotations +from collections.abc import Generator -def collatz_sequence(n: int) -> list[int]: + +def collatz_sequence(n: int) -> Generator[int, None, None]: """ - Collatz conjecture: start with any positive integer n. The next term is - obtained as follows: - If n term is even, the next term is: n / 2 . - If n is odd, the next term is: 3 * n + 1. - - The conjecture states the sequence will always reach 1 for any starting value n. - Example: - >>> collatz_sequence(2.1) + Generate the Collatz sequence starting at n. + >>> tuple(collatz_sequence(2.1)) Traceback (most recent call last): ... - Exception: Sequence only defined for natural numbers - >>> collatz_sequence(0) + Exception: Sequence only defined for positive integers + >>> tuple(collatz_sequence(0)) Traceback (most recent call last): ... - Exception: Sequence only defined for natural numbers - >>> collatz_sequence(43) # doctest: +NORMALIZE_WHITESPACE - [43, 130, 65, 196, 98, 49, 148, 74, 37, 112, 56, 28, 14, 7, - 22, 11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1] + Exception: Sequence only defined for positive integers + >>> tuple(collatz_sequence(4)) + (4, 2, 1) + >>> tuple(collatz_sequence(11)) + (11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1) + >>> tuple(collatz_sequence(31)) # doctest: +NORMALIZE_WHITESPACE + (31, 94, 47, 142, 71, 214, 107, 322, 161, 484, 242, 121, 364, 182, 91, 274, 137, + 412, 206, 103, 310, 155, 466, 233, 700, 350, 175, 526, 263, 790, 395, 1186, 593, + 1780, 890, 445, 1336, 668, 334, 167, 502, 251, 754, 377, 1132, 566, 283, 850, 425, + 1276, 638, 319, 958, 479, 1438, 719, 2158, 1079, 3238, 1619, 4858, 2429, 7288, 3644, + 1822, 911, 2734, 1367, 4102, 2051, 6154, 3077, 9232, 4616, 2308, 1154, 577, 1732, + 866, 433, 1300, 650, 325, 976, 488, 244, 122, 61, 184, 92, 46, 23, 70, 35, 106, 53, + 160, 80, 40, 20, 10, 5, 16, 8, 4, 2, 1) + >>> tuple(collatz_sequence(43)) # doctest: +NORMALIZE_WHITESPACE + (43, 130, 65, 196, 98, 49, 148, 74, 37, 112, 56, 28, 14, 7, 22, 11, 34, 17, 52, 26, + 13, 40, 20, 10, 5, 16, 8, 4, 2, 1) """ - if not isinstance(n, int) or n < 1: - raise Exception("Sequence only defined for natural numbers") + raise Exception("Sequence only defined for positive integers") - sequence = [n] + yield n while n != 1: - n = 3 * n + 1 if n & 1 else n // 2 - sequence.append(n) - return sequence + if n % 2 == 0: + n //= 2 + else: + n = 3 * n + 1 + yield n def main(): n = 43 - sequence = collatz_sequence(n) + sequence = tuple(collatz_sequence(n)) print(sequence) - print(f"collatz sequence from {n} took {len(sequence)} steps.") + print(f"Collatz sequence from {n} took {len(sequence)} steps.") if __name__ == "__main__": From 929d3d9219020d2978d5560e3b931df69a6f2d50 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 27 Jun 2023 07:23:54 +0200 Subject: [PATCH 26/43] [pre-commit.ci] pre-commit autoupdate (#8842) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.0.274 → v0.0.275](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.274...v0.0.275) - [github.com/tox-dev/pyproject-fmt: 0.12.0 → 0.12.1](https://github.com/tox-dev/pyproject-fmt/compare/0.12.0...0.12.1) - [github.com/pre-commit/mirrors-mypy: v1.3.0 → v1.4.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.3.0...v1.4.1) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- DIRECTORY.md | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3d4cc4084..1d92d2ff3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.274 + rev: v0.0.275 hooks: - id: ruff @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "0.12.0" + rev: "0.12.1" hooks: - id: pyproject-fmt @@ -51,7 +51,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.3.0 + rev: v1.4.1 hooks: - id: mypy args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 1e0e450bc..d25d665ef 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -29,6 +29,7 @@ * [Minmax](backtracking/minmax.py) * [N Queens](backtracking/n_queens.py) * [N Queens Math](backtracking/n_queens_math.py) + * [Power Sum](backtracking/power_sum.py) * [Rat In Maze](backtracking/rat_in_maze.py) * [Sudoku](backtracking/sudoku.py) * [Sum Of Subsets](backtracking/sum_of_subsets.py) From c9ee6ed1887fadd25c1c43c31ed55a99b2be5f24 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 4 Jul 2023 00:20:35 +0200 Subject: [PATCH 27/43] [pre-commit.ci] pre-commit autoupdate (#8853) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.0.275 → v0.0.276](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.275...v0.0.276) * Update double_ended_queue.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update double_ended_queue.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 2 +- data_structures/queue/double_ended_queue.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1d92d2ff3..42ebeed14 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.275 + rev: v0.0.276 hooks: - id: ruff diff --git a/data_structures/queue/double_ended_queue.py b/data_structures/queue/double_ended_queue.py index 2472371b4..44dc863b9 100644 --- a/data_structures/queue/double_ended_queue.py +++ b/data_structures/queue/double_ended_queue.py @@ -54,7 +54,7 @@ class Deque: the current node of the iteration. """ - __slots__ = "_cur" + __slots__ = ("_cur",) def __init__(self, cur: Deque._Node | None) -> None: self._cur = cur From a0eec90466beeb3b6ce0f7afd905f96454e9b14c Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Tue, 11 Jul 2023 02:44:12 -0700 Subject: [PATCH 28/43] Consolidate duplicate implementations of max subarray (#8849) * Remove max subarray sum duplicate implementations * updating DIRECTORY.md * Rename max_sum_contiguous_subsequence.py * Fix typo in dynamic_programming/max_subarray_sum.py * Remove duplicate divide and conquer max subarray * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 8 +- divide_and_conquer/max_subarray.py | 112 ++++++++++++++++++ divide_and_conquer/max_subarray_sum.py | 78 ------------ dynamic_programming/max_sub_array.py | 93 --------------- dynamic_programming/max_subarray_sum.py | 60 ++++++++++ .../max_sum_contiguous_subsequence.py | 20 ---- maths/kadanes.py | 63 ---------- maths/largest_subarray_sum.py | 21 ---- other/maximum_subarray.py | 32 ----- 9 files changed, 174 insertions(+), 313 deletions(-) create mode 100644 divide_and_conquer/max_subarray.py delete mode 100644 divide_and_conquer/max_subarray_sum.py delete mode 100644 dynamic_programming/max_sub_array.py create mode 100644 dynamic_programming/max_subarray_sum.py delete mode 100644 dynamic_programming/max_sum_contiguous_subsequence.py delete mode 100644 maths/kadanes.py delete mode 100644 maths/largest_subarray_sum.py delete mode 100644 other/maximum_subarray.py diff --git a/DIRECTORY.md b/DIRECTORY.md index d25d665ef..77938f450 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -293,7 +293,7 @@ * [Inversions](divide_and_conquer/inversions.py) * [Kth Order Statistic](divide_and_conquer/kth_order_statistic.py) * [Max Difference Pair](divide_and_conquer/max_difference_pair.py) - * [Max Subarray Sum](divide_and_conquer/max_subarray_sum.py) + * [Max Subarray](divide_and_conquer/max_subarray.py) * [Mergesort](divide_and_conquer/mergesort.py) * [Peak](divide_and_conquer/peak.py) * [Power](divide_and_conquer/power.py) @@ -324,8 +324,7 @@ * [Matrix Chain Order](dynamic_programming/matrix_chain_order.py) * [Max Non Adjacent Sum](dynamic_programming/max_non_adjacent_sum.py) * [Max Product Subarray](dynamic_programming/max_product_subarray.py) - * [Max Sub Array](dynamic_programming/max_sub_array.py) - * [Max Sum Contiguous Subsequence](dynamic_programming/max_sum_contiguous_subsequence.py) + * [Max Subarray Sum](dynamic_programming/max_subarray_sum.py) * [Min Distance Up Bottom](dynamic_programming/min_distance_up_bottom.py) * [Minimum Coin Change](dynamic_programming/minimum_coin_change.py) * [Minimum Cost Path](dynamic_programming/minimum_cost_path.py) @@ -591,12 +590,10 @@ * [Is Square Free](maths/is_square_free.py) * [Jaccard Similarity](maths/jaccard_similarity.py) * [Juggler Sequence](maths/juggler_sequence.py) - * [Kadanes](maths/kadanes.py) * [Karatsuba](maths/karatsuba.py) * [Krishnamurthy Number](maths/krishnamurthy_number.py) * [Kth Lexicographic Permutation](maths/kth_lexicographic_permutation.py) * [Largest Of Very Large Numbers](maths/largest_of_very_large_numbers.py) - * [Largest Subarray Sum](maths/largest_subarray_sum.py) * [Least Common Multiple](maths/least_common_multiple.py) * [Line Length](maths/line_length.py) * [Liouville Lambda](maths/liouville_lambda.py) @@ -733,7 +730,6 @@ * [Linear Congruential Generator](other/linear_congruential_generator.py) * [Lru Cache](other/lru_cache.py) * [Magicdiamondpattern](other/magicdiamondpattern.py) - * [Maximum Subarray](other/maximum_subarray.py) * [Maximum Subsequence](other/maximum_subsequence.py) * [Nested Brackets](other/nested_brackets.py) * [Number Container System](other/number_container_system.py) diff --git a/divide_and_conquer/max_subarray.py b/divide_and_conquer/max_subarray.py new file mode 100644 index 000000000..851ef621a --- /dev/null +++ b/divide_and_conquer/max_subarray.py @@ -0,0 +1,112 @@ +""" +The maximum subarray problem is the task of finding the continuous subarray that has the +maximum sum within a given array of numbers. For example, given the array +[-2, 1, -3, 4, -1, 2, 1, -5, 4], the contiguous subarray with the maximum sum is +[4, -1, 2, 1], which has a sum of 6. + +This divide-and-conquer algorithm finds the maximum subarray in O(n log n) time. +""" +from __future__ import annotations + +import time +from collections.abc import Sequence +from random import randint + +from matplotlib import pyplot as plt + + +def max_subarray( + arr: Sequence[float], low: int, high: int +) -> tuple[int | None, int | None, float]: + """ + Solves the maximum subarray problem using divide and conquer. + :param arr: the given array of numbers + :param low: the start index + :param high: the end index + :return: the start index of the maximum subarray, the end index of the + maximum subarray, and the maximum subarray sum + + >>> nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4] + >>> max_subarray(nums, 0, len(nums) - 1) + (3, 6, 6) + >>> nums = [2, 8, 9] + >>> max_subarray(nums, 0, len(nums) - 1) + (0, 2, 19) + >>> nums = [0, 0] + >>> max_subarray(nums, 0, len(nums) - 1) + (0, 0, 0) + >>> nums = [-1.0, 0.0, 1.0] + >>> max_subarray(nums, 0, len(nums) - 1) + (2, 2, 1.0) + >>> nums = [-2, -3, -1, -4, -6] + >>> max_subarray(nums, 0, len(nums) - 1) + (2, 2, -1) + >>> max_subarray([], 0, 0) + (None, None, 0) + """ + if not arr: + return None, None, 0 + if low == high: + return low, high, arr[low] + + mid = (low + high) // 2 + left_low, left_high, left_sum = max_subarray(arr, low, mid) + right_low, right_high, right_sum = max_subarray(arr, mid + 1, high) + cross_left, cross_right, cross_sum = max_cross_sum(arr, low, mid, high) + if left_sum >= right_sum and left_sum >= cross_sum: + return left_low, left_high, left_sum + elif right_sum >= left_sum and right_sum >= cross_sum: + return right_low, right_high, right_sum + return cross_left, cross_right, cross_sum + + +def max_cross_sum( + arr: Sequence[float], low: int, mid: int, high: int +) -> tuple[int, int, float]: + left_sum, max_left = float("-inf"), -1 + right_sum, max_right = float("-inf"), -1 + + summ: int | float = 0 + for i in range(mid, low - 1, -1): + summ += arr[i] + if summ > left_sum: + left_sum = summ + max_left = i + + summ = 0 + for i in range(mid + 1, high + 1): + summ += arr[i] + if summ > right_sum: + right_sum = summ + max_right = i + + return max_left, max_right, (left_sum + right_sum) + + +def time_max_subarray(input_size: int) -> float: + arr = [randint(1, input_size) for _ in range(input_size)] + start = time.time() + max_subarray(arr, 0, input_size - 1) + end = time.time() + return end - start + + +def plot_runtimes() -> None: + input_sizes = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000] + runtimes = [time_max_subarray(input_size) for input_size in input_sizes] + print("No of Inputs\t\tTime Taken") + for input_size, runtime in zip(input_sizes, runtimes): + print(input_size, "\t\t", runtime) + plt.plot(input_sizes, runtimes) + plt.xlabel("Number of Inputs") + plt.ylabel("Time taken in seconds") + plt.show() + + +if __name__ == "__main__": + """ + A random simulation of this algorithm. + """ + from doctest import testmod + + testmod() diff --git a/divide_and_conquer/max_subarray_sum.py b/divide_and_conquer/max_subarray_sum.py deleted file mode 100644 index f23e81719..000000000 --- a/divide_and_conquer/max_subarray_sum.py +++ /dev/null @@ -1,78 +0,0 @@ -""" -Given a array of length n, max_subarray_sum() finds -the maximum of sum of contiguous sub-array using divide and conquer method. - -Time complexity : O(n log n) - -Ref : INTRODUCTION TO ALGORITHMS THIRD EDITION -(section : 4, sub-section : 4.1, page : 70) - -""" - - -def max_sum_from_start(array): - """This function finds the maximum contiguous sum of array from 0 index - - Parameters : - array (list[int]) : given array - - Returns : - max_sum (int) : maximum contiguous sum of array from 0 index - - """ - array_sum = 0 - max_sum = float("-inf") - for num in array: - array_sum += num - if array_sum > max_sum: - max_sum = array_sum - return max_sum - - -def max_cross_array_sum(array, left, mid, right): - """This function finds the maximum contiguous sum of left and right arrays - - Parameters : - array, left, mid, right (list[int], int, int, int) - - Returns : - (int) : maximum of sum of contiguous sum of left and right arrays - - """ - - max_sum_of_left = max_sum_from_start(array[left : mid + 1][::-1]) - max_sum_of_right = max_sum_from_start(array[mid + 1 : right + 1]) - return max_sum_of_left + max_sum_of_right - - -def max_subarray_sum(array, left, right): - """Maximum contiguous sub-array sum, using divide and conquer method - - Parameters : - array, left, right (list[int], int, int) : - given array, current left index and current right index - - Returns : - int : maximum of sum of contiguous sub-array - - """ - - # base case: array has only one element - if left == right: - return array[right] - - # Recursion - mid = (left + right) // 2 - left_half_sum = max_subarray_sum(array, left, mid) - right_half_sum = max_subarray_sum(array, mid + 1, right) - cross_sum = max_cross_array_sum(array, left, mid, right) - return max(left_half_sum, right_half_sum, cross_sum) - - -if __name__ == "__main__": - array = [-2, -5, 6, -2, -3, 1, 5, -6] - array_length = len(array) - print( - "Maximum sum of contiguous subarray:", - max_subarray_sum(array, 0, array_length - 1), - ) diff --git a/dynamic_programming/max_sub_array.py b/dynamic_programming/max_sub_array.py deleted file mode 100644 index 07717fba4..000000000 --- a/dynamic_programming/max_sub_array.py +++ /dev/null @@ -1,93 +0,0 @@ -""" -author : Mayank Kumar Jha (mk9440) -""" -from __future__ import annotations - - -def find_max_sub_array(a, low, high): - if low == high: - return low, high, a[low] - else: - mid = (low + high) // 2 - left_low, left_high, left_sum = find_max_sub_array(a, low, mid) - right_low, right_high, right_sum = find_max_sub_array(a, mid + 1, high) - cross_left, cross_right, cross_sum = find_max_cross_sum(a, low, mid, high) - if left_sum >= right_sum and left_sum >= cross_sum: - return left_low, left_high, left_sum - elif right_sum >= left_sum and right_sum >= cross_sum: - return right_low, right_high, right_sum - else: - return cross_left, cross_right, cross_sum - - -def find_max_cross_sum(a, low, mid, high): - left_sum, max_left = -999999999, -1 - right_sum, max_right = -999999999, -1 - summ = 0 - for i in range(mid, low - 1, -1): - summ += a[i] - if summ > left_sum: - left_sum = summ - max_left = i - summ = 0 - for i in range(mid + 1, high + 1): - summ += a[i] - if summ > right_sum: - right_sum = summ - max_right = i - return max_left, max_right, (left_sum + right_sum) - - -def max_sub_array(nums: list[int]) -> int: - """ - Finds the contiguous subarray which has the largest sum and return its sum. - - >>> max_sub_array([-2, 1, -3, 4, -1, 2, 1, -5, 4]) - 6 - - An empty (sub)array has sum 0. - >>> max_sub_array([]) - 0 - - If all elements are negative, the largest subarray would be the empty array, - having the sum 0. - >>> max_sub_array([-1, -2, -3]) - 0 - >>> max_sub_array([5, -2, -3]) - 5 - >>> max_sub_array([31, -41, 59, 26, -53, 58, 97, -93, -23, 84]) - 187 - """ - best = 0 - current = 0 - for i in nums: - current += i - current = max(current, 0) - best = max(best, current) - return best - - -if __name__ == "__main__": - """ - A random simulation of this algorithm. - """ - import time - from random import randint - - from matplotlib import pyplot as plt - - inputs = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000] - tim = [] - for i in inputs: - li = [randint(1, i) for j in range(i)] - strt = time.time() - (find_max_sub_array(li, 0, len(li) - 1)) - end = time.time() - tim.append(end - strt) - print("No of Inputs Time Taken") - for i in range(len(inputs)): - print(inputs[i], "\t\t", tim[i]) - plt.plot(inputs, tim) - plt.xlabel("Number of Inputs") - plt.ylabel("Time taken in seconds ") - plt.show() diff --git a/dynamic_programming/max_subarray_sum.py b/dynamic_programming/max_subarray_sum.py new file mode 100644 index 000000000..c76943472 --- /dev/null +++ b/dynamic_programming/max_subarray_sum.py @@ -0,0 +1,60 @@ +""" +The maximum subarray sum problem is the task of finding the maximum sum that can be +obtained from a contiguous subarray within a given array of numbers. For example, given +the array [-2, 1, -3, 4, -1, 2, 1, -5, 4], the contiguous subarray with the maximum sum +is [4, -1, 2, 1], so the maximum subarray sum is 6. + +Kadane's algorithm is a simple dynamic programming algorithm that solves the maximum +subarray sum problem in O(n) time and O(1) space. + +Reference: https://en.wikipedia.org/wiki/Maximum_subarray_problem +""" +from collections.abc import Sequence + + +def max_subarray_sum( + arr: Sequence[float], allow_empty_subarrays: bool = False +) -> float: + """ + Solves the maximum subarray sum problem using Kadane's algorithm. + :param arr: the given array of numbers + :param allow_empty_subarrays: if True, then the algorithm considers empty subarrays + + >>> max_subarray_sum([2, 8, 9]) + 19 + >>> max_subarray_sum([0, 0]) + 0 + >>> max_subarray_sum([-1.0, 0.0, 1.0]) + 1.0 + >>> max_subarray_sum([1, 2, 3, 4, -2]) + 10 + >>> max_subarray_sum([-2, 1, -3, 4, -1, 2, 1, -5, 4]) + 6 + >>> max_subarray_sum([2, 3, -9, 8, -2]) + 8 + >>> max_subarray_sum([-2, -3, -1, -4, -6]) + -1 + >>> max_subarray_sum([-2, -3, -1, -4, -6], allow_empty_subarrays=True) + 0 + >>> max_subarray_sum([]) + 0 + """ + if not arr: + return 0 + + max_sum = 0 if allow_empty_subarrays else float("-inf") + curr_sum = 0.0 + for num in arr: + curr_sum = max(0 if allow_empty_subarrays else num, curr_sum + num) + max_sum = max(max_sum, curr_sum) + + return max_sum + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + + nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4] + print(f"{max_subarray_sum(nums) = }") diff --git a/dynamic_programming/max_sum_contiguous_subsequence.py b/dynamic_programming/max_sum_contiguous_subsequence.py deleted file mode 100644 index bac592370..000000000 --- a/dynamic_programming/max_sum_contiguous_subsequence.py +++ /dev/null @@ -1,20 +0,0 @@ -def max_subarray_sum(nums: list) -> int: - """ - >>> max_subarray_sum([6 , 9, -1, 3, -7, -5, 10]) - 17 - """ - if not nums: - return 0 - n = len(nums) - - res, s, s_pre = nums[0], nums[0], nums[0] - for i in range(1, n): - s = max(nums[i], s_pre + nums[i]) - s_pre = s - res = max(res, s) - return res - - -if __name__ == "__main__": - nums = [6, 9, -1, 3, -7, -5, 10] - print(max_subarray_sum(nums)) diff --git a/maths/kadanes.py b/maths/kadanes.py deleted file mode 100644 index c2ea53a6c..000000000 --- a/maths/kadanes.py +++ /dev/null @@ -1,63 +0,0 @@ -""" -Kadane's algorithm to get maximum subarray sum -https://medium.com/@rsinghal757/kadanes-algorithm-dynamic-programming-how-and-why-does-it-work-3fd8849ed73d -https://en.wikipedia.org/wiki/Maximum_subarray_problem -""" -test_data: tuple = ([-2, -8, -9], [2, 8, 9], [-1, 0, 1], [0, 0], []) - - -def negative_exist(arr: list) -> int: - """ - >>> negative_exist([-2,-8,-9]) - -2 - >>> [negative_exist(arr) for arr in test_data] - [-2, 0, 0, 0, 0] - """ - arr = arr or [0] - max_number = arr[0] - for i in arr: - if i >= 0: - return 0 - elif max_number <= i: - max_number = i - return max_number - - -def kadanes(arr: list) -> int: - """ - If negative_exist() returns 0 than this function will execute - else it will return the value return by negative_exist function - - For example: arr = [2, 3, -9, 8, -2] - Initially we set value of max_sum to 0 and max_till_element to 0 than when - max_sum is less than max_till particular element it will assign that value to - max_sum and when value of max_till_sum is less than 0 it will assign 0 to i - and after that whole process, return the max_sum - So the output for above arr is 8 - - >>> kadanes([2, 3, -9, 8, -2]) - 8 - >>> [kadanes(arr) for arr in test_data] - [-2, 19, 1, 0, 0] - """ - max_sum = negative_exist(arr) - if max_sum < 0: - return max_sum - - max_sum = 0 - max_till_element = 0 - - for i in arr: - max_till_element += i - max_sum = max(max_sum, max_till_element) - max_till_element = max(max_till_element, 0) - return max_sum - - -if __name__ == "__main__": - try: - print("Enter integer values sepatated by spaces") - arr = [int(x) for x in input().split()] - print(f"Maximum subarray sum of {arr} is {kadanes(arr)}") - except ValueError: - print("Please enter integer values.") diff --git a/maths/largest_subarray_sum.py b/maths/largest_subarray_sum.py deleted file mode 100644 index 90f92c712..000000000 --- a/maths/largest_subarray_sum.py +++ /dev/null @@ -1,21 +0,0 @@ -from sys import maxsize - - -def max_sub_array_sum(a: list, size: int = 0): - """ - >>> max_sub_array_sum([-13, -3, -25, -20, -3, -16, -23, -12, -5, -22, -15, -4, -7]) - -3 - """ - size = size or len(a) - max_so_far = -maxsize - 1 - max_ending_here = 0 - for i in range(0, size): - max_ending_here = max_ending_here + a[i] - max_so_far = max(max_so_far, max_ending_here) - max_ending_here = max(max_ending_here, 0) - return max_so_far - - -if __name__ == "__main__": - a = [-13, -3, -25, -20, 1, -16, -23, -12, -5, -22, -15, -4, -7] - print(("Maximum contiguous sum is", max_sub_array_sum(a, len(a)))) diff --git a/other/maximum_subarray.py b/other/maximum_subarray.py deleted file mode 100644 index 1c8c8cabc..000000000 --- a/other/maximum_subarray.py +++ /dev/null @@ -1,32 +0,0 @@ -from collections.abc import Sequence - - -def max_subarray_sum(nums: Sequence[int]) -> int: - """Return the maximum possible sum amongst all non - empty subarrays. - - Raises: - ValueError: when nums is empty. - - >>> max_subarray_sum([1,2,3,4,-2]) - 10 - >>> max_subarray_sum([-2,1,-3,4,-1,2,1,-5,4]) - 6 - """ - if not nums: - raise ValueError("Input sequence should not be empty") - - curr_max = ans = nums[0] - nums_len = len(nums) - - for i in range(1, nums_len): - num = nums[i] - curr_max = max(curr_max + num, num) - ans = max(curr_max, ans) - - return ans - - -if __name__ == "__main__": - n = int(input("Enter number of elements : ").strip()) - array = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n] - print(max_subarray_sum(array)) From 44b1bcc7c7e0f15385530bf54c59ad4eb86fef0b Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Tue, 11 Jul 2023 10:51:21 +0100 Subject: [PATCH 29/43] Fix failing tests from ruff/newton_raphson (ignore S307 "possibly insecure function") (#8862) * chore: Fix failing tests (ignore S307 "possibly insecure function") * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: Move noqa back to right line --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- arithmetic_analysis/newton_raphson.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arithmetic_analysis/newton_raphson.py b/arithmetic_analysis/newton_raphson.py index aee2f07e5..1b90ad417 100644 --- a/arithmetic_analysis/newton_raphson.py +++ b/arithmetic_analysis/newton_raphson.py @@ -25,9 +25,11 @@ def newton_raphson( """ x = a while True: - x = Decimal(x) - (Decimal(eval(func)) / Decimal(eval(str(diff(func))))) + x = Decimal(x) - ( + Decimal(eval(func)) / Decimal(eval(str(diff(func)))) # noqa: S307 + ) # This number dictates the accuracy of the answer - if abs(eval(func)) < precision: + if abs(eval(func)) < precision: # noqa: S307 return float(x) From f614ed72170011d2d439f7901e1c8daa7deac8c4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 11 Jul 2023 11:55:32 +0200 Subject: [PATCH 30/43] [pre-commit.ci] pre-commit autoupdate (#8860) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.0.276 → v0.0.277](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.276...v0.0.277) - [github.com/tox-dev/pyproject-fmt: 0.12.1 → 0.13.0](https://github.com/tox-dev/pyproject-fmt/compare/0.12.1...0.13.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 42ebeed14..bf30703bd 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.276 + rev: v0.0.277 hooks: - id: ruff @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "0.12.1" + rev: "0.13.0" hooks: - id: pyproject-fmt From 5aefc00f0f1c692ce772ddbc616d7cd91233236b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 18 Jul 2023 09:58:22 +0530 Subject: [PATCH 31/43] [pre-commit.ci] pre-commit autoupdate (#8872) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.0.277 → v0.0.278](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.277...v0.0.278) - [github.com/psf/black: 23.3.0 → 23.7.0](https://github.com/psf/black/compare/23.3.0...23.7.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bf30703bd..13b955dd3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,12 +16,12 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.277 + rev: v0.0.278 hooks: - id: ruff - repo: https://github.com/psf/black - rev: 23.3.0 + rev: 23.7.0 hooks: - id: black From 93fb169627ea9fe43436a312fdfa751818808180 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sat, 22 Jul 2023 13:05:10 +0300 Subject: [PATCH 32/43] [Upgrade Ruff] Fix all errors raised from ruff (#8879) * chore: Fix tests * chore: Fix failing ruff * chore: Fix ruff errors * chore: Fix ruff errors * chore: Fix ruff errors * chore: Fix ruff errors * chore: Fix ruff errors * chore: Fix ruff errors * chore: Fix ruff errors * chore: Fix ruff errors * chore: Fix ruff errors * chore: Fix ruff errors * chore: Fix ruff errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * chore: Fix ruff errors * chore: Fix ruff errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update cellular_automata/game_of_life.py Co-authored-by: Christian Clauss * chore: Update ruff version in pre-commit * chore: Fix ruff errors * Update edmonds_karp_multiple_source_and_sink.py * Update factorial.py * Update primelib.py * Update min_cost_string_conversion.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 2 +- cellular_automata/game_of_life.py | 2 +- data_structures/binary_tree/red_black_tree.py | 2 +- data_structures/trie/radix_tree.py | 4 ++-- divide_and_conquer/convex_hull.py | 2 +- ...directed_and_undirected_(weighted)_graph.py | 18 +++++++++--------- .../edmonds_karp_multiple_source_and_sink.py | 2 +- maths/factorial.py | 2 +- maths/primelib.py | 2 +- other/davisb_putnamb_logemannb_loveland.py | 2 +- project_euler/problem_009/sol3.py | 16 ++++++++++------ quantum/ripple_adder_classic.py | 2 +- strings/min_cost_string_conversion.py | 2 +- web_programming/convert_number_to_words.py | 4 +--- 14 files changed, 32 insertions(+), 30 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 13b955dd3..5adf12cc7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.278 + rev: v0.0.280 hooks: - id: ruff diff --git a/cellular_automata/game_of_life.py b/cellular_automata/game_of_life.py index 3382af7b5..b69afdce0 100644 --- a/cellular_automata/game_of_life.py +++ b/cellular_automata/game_of_life.py @@ -98,7 +98,7 @@ def __judge_point(pt: bool, neighbours: list[list[bool]]) -> bool: if pt: if alive < 2: state = False - elif alive == 2 or alive == 3: + elif alive in {2, 3}: state = True elif alive > 3: state = False diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index 3ebc8d639..4ebe0e927 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -152,7 +152,7 @@ class RedBlackTree: self.grandparent.color = 1 self.grandparent._insert_repair() - def remove(self, label: int) -> RedBlackTree: + def remove(self, label: int) -> RedBlackTree: # noqa: PLR0912 """Remove label from this tree.""" if self.label == label: if self.left and self.right: diff --git a/data_structures/trie/radix_tree.py b/data_structures/trie/radix_tree.py index 66890346e..cf2f25c29 100644 --- a/data_structures/trie/radix_tree.py +++ b/data_structures/trie/radix_tree.py @@ -156,7 +156,7 @@ class RadixNode: del self.nodes[word[0]] # We merge the current node with its only child if len(self.nodes) == 1 and not self.is_leaf: - merging_node = list(self.nodes.values())[0] + merging_node = next(iter(self.nodes.values())) self.is_leaf = merging_node.is_leaf self.prefix += merging_node.prefix self.nodes = merging_node.nodes @@ -165,7 +165,7 @@ class RadixNode: incoming_node.is_leaf = False # If there is 1 edge, we merge it with its child else: - merging_node = list(incoming_node.nodes.values())[0] + merging_node = next(iter(incoming_node.nodes.values())) incoming_node.is_leaf = merging_node.is_leaf incoming_node.prefix += merging_node.prefix incoming_node.nodes = merging_node.nodes diff --git a/divide_and_conquer/convex_hull.py b/divide_and_conquer/convex_hull.py index 1ad933417..1d1bf301d 100644 --- a/divide_and_conquer/convex_hull.py +++ b/divide_and_conquer/convex_hull.py @@ -266,7 +266,7 @@ def convex_hull_bf(points: list[Point]) -> list[Point]: points_left_of_ij = points_right_of_ij = False ij_part_of_convex_hull = True for k in range(n): - if k != i and k != j: + if k not in {i, j}: det_k = _det(points[i], points[j], points[k]) if det_k > 0: diff --git a/graphs/directed_and_undirected_(weighted)_graph.py b/graphs/directed_and_undirected_(weighted)_graph.py index b29485031..8ca645fda 100644 --- a/graphs/directed_and_undirected_(weighted)_graph.py +++ b/graphs/directed_and_undirected_(weighted)_graph.py @@ -39,7 +39,7 @@ class DirectedGraph: stack = [] visited = [] if s == -2: - s = list(self.graph)[0] + s = next(iter(self.graph)) stack.append(s) visited.append(s) ss = s @@ -87,7 +87,7 @@ class DirectedGraph: d = deque() visited = [] if s == -2: - s = list(self.graph)[0] + s = next(iter(self.graph)) d.append(s) visited.append(s) while d: @@ -114,7 +114,7 @@ class DirectedGraph: stack = [] visited = [] if s == -2: - s = list(self.graph)[0] + s = next(iter(self.graph)) stack.append(s) visited.append(s) ss = s @@ -146,7 +146,7 @@ class DirectedGraph: def cycle_nodes(self): stack = [] visited = [] - s = list(self.graph)[0] + s = next(iter(self.graph)) stack.append(s) visited.append(s) parent = -2 @@ -199,7 +199,7 @@ class DirectedGraph: def has_cycle(self): stack = [] visited = [] - s = list(self.graph)[0] + s = next(iter(self.graph)) stack.append(s) visited.append(s) parent = -2 @@ -305,7 +305,7 @@ class Graph: stack = [] visited = [] if s == -2: - s = list(self.graph)[0] + s = next(iter(self.graph)) stack.append(s) visited.append(s) ss = s @@ -353,7 +353,7 @@ class Graph: d = deque() visited = [] if s == -2: - s = list(self.graph)[0] + s = next(iter(self.graph)) d.append(s) visited.append(s) while d: @@ -371,7 +371,7 @@ class Graph: def cycle_nodes(self): stack = [] visited = [] - s = list(self.graph)[0] + s = next(iter(self.graph)) stack.append(s) visited.append(s) parent = -2 @@ -424,7 +424,7 @@ class Graph: def has_cycle(self): stack = [] visited = [] - s = list(self.graph)[0] + s = next(iter(self.graph)) stack.append(s) visited.append(s) parent = -2 diff --git a/graphs/edmonds_karp_multiple_source_and_sink.py b/graphs/edmonds_karp_multiple_source_and_sink.py index d06108041..5c774f4b8 100644 --- a/graphs/edmonds_karp_multiple_source_and_sink.py +++ b/graphs/edmonds_karp_multiple_source_and_sink.py @@ -113,7 +113,7 @@ class PushRelabelExecutor(MaximumFlowAlgorithmExecutor): vertices_list = [ i for i in range(self.verticies_count) - if i != self.source_index and i != self.sink_index + if i not in {self.source_index, self.sink_index} ] # move through list diff --git a/maths/factorial.py b/maths/factorial.py index bbf0efc01..18cacdef9 100644 --- a/maths/factorial.py +++ b/maths/factorial.py @@ -55,7 +55,7 @@ def factorial_recursive(n: int) -> int: raise ValueError("factorial() only accepts integral values") if n < 0: raise ValueError("factorial() not defined for negative values") - return 1 if n == 0 or n == 1 else n * factorial(n - 1) + return 1 if n in {0, 1} else n * factorial(n - 1) if __name__ == "__main__": diff --git a/maths/primelib.py b/maths/primelib.py index 81d573706..28b5aee9d 100644 --- a/maths/primelib.py +++ b/maths/primelib.py @@ -154,7 +154,7 @@ def prime_factorization(number): quotient = number - if number == 0 or number == 1: + if number in {0, 1}: ans.append(number) # if 'number' not prime then builds the prime factorization of 'number' diff --git a/other/davisb_putnamb_logemannb_loveland.py b/other/davisb_putnamb_logemannb_loveland.py index a1bea5b39..f5fb103ba 100644 --- a/other/davisb_putnamb_logemannb_loveland.py +++ b/other/davisb_putnamb_logemannb_loveland.py @@ -253,7 +253,7 @@ def find_unit_clauses( unit_symbols = [] for clause in clauses: if len(clause) == 1: - unit_symbols.append(list(clause.literals.keys())[0]) + unit_symbols.append(next(iter(clause.literals.keys()))) else: f_count, n_count = 0, 0 for literal, value in clause.literals.items(): diff --git a/project_euler/problem_009/sol3.py b/project_euler/problem_009/sol3.py index d299f821d..37340d306 100644 --- a/project_euler/problem_009/sol3.py +++ b/project_euler/problem_009/sol3.py @@ -28,12 +28,16 @@ def solution() -> int: 31875000 """ - return [ - a * b * (1000 - a - b) - for a in range(1, 999) - for b in range(a, 999) - if (a * a + b * b == (1000 - a - b) ** 2) - ][0] + return next( + iter( + [ + a * b * (1000 - a - b) + for a in range(1, 999) + for b in range(a, 999) + if (a * a + b * b == (1000 - a - b) ** 2) + ] + ) + ) if __name__ == "__main__": diff --git a/quantum/ripple_adder_classic.py b/quantum/ripple_adder_classic.py index b604395bc..2284141cc 100644 --- a/quantum/ripple_adder_classic.py +++ b/quantum/ripple_adder_classic.py @@ -107,7 +107,7 @@ def ripple_adder( res = qiskit.execute(circuit, backend, shots=1).result() # The result is in binary. Convert it back to int - return int(list(res.get_counts())[0], 2) + return int(next(iter(res.get_counts())), 2) if __name__ == "__main__": diff --git a/strings/min_cost_string_conversion.py b/strings/min_cost_string_conversion.py index 089c2532f..0fad0b88c 100644 --- a/strings/min_cost_string_conversion.py +++ b/strings/min_cost_string_conversion.py @@ -61,7 +61,7 @@ def assemble_transformation(ops: list[list[str]], i: int, j: int) -> list[str]: if i == 0 and j == 0: return [] else: - if ops[i][j][0] == "C" or ops[i][j][0] == "R": + if ops[i][j][0] in {"C", "R"}: seq = assemble_transformation(ops, i - 1, j - 1) seq.append(ops[i][j]) return seq diff --git a/web_programming/convert_number_to_words.py b/web_programming/convert_number_to_words.py index 1e293df96..dac9e3e38 100644 --- a/web_programming/convert_number_to_words.py +++ b/web_programming/convert_number_to_words.py @@ -90,9 +90,7 @@ def convert(number: int) -> str: else: addition = "" if counter in placevalue: - if current == 0 and ((temp_num % 100) // 10) == 0: - addition = "" - else: + if current != 0 and ((temp_num % 100) // 10) != 0: addition = placevalue[counter] if ((temp_num % 100) // 10) == 1: words = teens[current] + addition + words From f7531d9874e0dd3682bf0ed7ae408927e1fae472 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sat, 22 Jul 2023 03:11:04 -0700 Subject: [PATCH 33/43] Add note in `CONTRIBUTING.md` about not asking to be assigned to issues (#8871) * Add note in CONTRIBUTING.md about not asking to be assigned to issues Add a paragraph to CONTRIBUTING.md explicitly asking contributors to not ask to be assigned to issues * Update CONTRIBUTING.md * Update CONTRIBUTING.md --------- Co-authored-by: Christian Clauss --- CONTRIBUTING.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 618cca868..4a1bb6527 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -25,6 +25,8 @@ We appreciate any contribution, from fixing a grammar mistake in a comment to im Your contribution will be tested by our [automated testing on GitHub Actions](https://github.com/TheAlgorithms/Python/actions) to save time and mental energy. After you have submitted your pull request, you should see the GitHub Actions tests start to run at the bottom of your submission page. If those tests fail, then click on the ___details___ button try to read through the GitHub Actions output to understand the failure. If you do not understand, please leave a comment on your submission page and a community member will try to help. +If you are interested in resolving an [open issue](https://github.com/TheAlgorithms/Python/issues), simply make a pull request with your proposed fix. __We do not assign issues in this repo__ so please do not ask for permission to work on an issue. + Please help us keep our issue list small by adding `Fixes #{$ISSUE_NUMBER}` to the description of pull requests that resolve open issues. For example, if your pull request fixes issue #10, then please add the following to its description: ``` From 9e08c7726dee5b18585a76e54c71922ca96c0b3a Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sat, 22 Jul 2023 13:34:19 +0300 Subject: [PATCH 34/43] Small docstring time complexity fix in number_container _system (#8875) * fix: Write time is O(log n) not O(n log n) * chore: Update pre-commit ruff version * revert: Undo previous commit --- other/number_container_system.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/other/number_container_system.py b/other/number_container_system.py index f547bc8a2..6c95dd0a3 100644 --- a/other/number_container_system.py +++ b/other/number_container_system.py @@ -1,6 +1,6 @@ """ A number container system that uses binary search to delete and insert values into -arrays with O(n logn) write times and O(1) read times. +arrays with O(log n) write times and O(1) read times. This container system holds integers at indexes. From a03b739d23b59890b59d2d2288ebaa56e3be47ce Mon Sep 17 00:00:00 2001 From: Sangmin Jeon Date: Mon, 24 Jul 2023 18:29:05 +0900 Subject: [PATCH 35/43] Fix `radix_tree.py` insertion fail in ["*X", "*XX"] cases (#8870) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix insertion fail in ["*X", "*XX"] cases Consider a word, and a copy of that word, but with the last letter repeating twice. (e.g., ["ABC", "ABCC"]) When adding the second word's last letter, it only compares the previous word's prefix—the last letter of the word already in the Radix Tree: 'C'—and the letter to be added—the last letter of the word we're currently adding: 'C'. So it wrongly passes the "Case 1" check, marks the current node as a leaf node when it already was, then returns when there's still one more letter to add. The issue arises because `prefix` includes the letter of the node itself. (e.g., `nodes: {'C' : RadixNode()}, is_leaf: True, prefix: 'C'`) It can be easily fixed by simply adding the `is_leaf` check, asking if there are more letters to be added. - Test Case: `"A AA AAA AAAA"` - Fixed correct output: ``` Words: ['A', 'AA', 'AAA', 'AAAA'] Tree: - A (leaf) -- A (leaf) --- A (leaf) ---- A (leaf) ``` - Current incorrect output: ``` Words: ['A', 'AA', 'AAA', 'AAAA'] Tree: - A (leaf) -- AA (leaf) --- A (leaf) ``` *N.B.* This passed test cases for [Croatian Open Competition in Informatics 2012/2013 Contest #3 Task 5 HERKABE](https://hsin.hr/coci/archive/2012_2013/) * Add a doctest for previous fix * improve doctest readability --- data_structures/trie/radix_tree.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/data_structures/trie/radix_tree.py b/data_structures/trie/radix_tree.py index cf2f25c29..fadc50cb4 100644 --- a/data_structures/trie/radix_tree.py +++ b/data_structures/trie/radix_tree.py @@ -54,10 +54,17 @@ class RadixNode: word (str): word to insert >>> RadixNode("myprefix").insert("mystring") + + >>> root = RadixNode() + >>> root.insert_many(['myprefix', 'myprefixA', 'myprefixAA']) + >>> root.print_tree() + - myprefix (leaf) + -- A (leaf) + --- A (leaf) """ # Case 1: If the word is the prefix of the node # Solution: We set the current node as leaf - if self.prefix == word: + if self.prefix == word and not self.is_leaf: self.is_leaf = True # Case 2: The node has no edges that have a prefix to the word From b77e6adf3abba674eb83ab7c0182bd6c89c08891 Mon Sep 17 00:00:00 2001 From: HManiac74 <63391783+HManiac74@users.noreply.github.com> Date: Tue, 25 Jul 2023 22:23:20 +0200 Subject: [PATCH 36/43] Add Docker devcontainer configuration files (#8887) * Added Docker container configuration files * Update Dockerfile Copy and install requirements * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated Docker devcontainer configuration * Update requierements.txt * Update Dockerfile * Update Dockerfile * Update .devcontainer/devcontainer.json Co-authored-by: Christian Clauss * Update Dockerfile * Update Dockerfile. Add linebreak --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .devcontainer/Dockerfile | 6 +++++ .devcontainer/devcontainer.json | 42 +++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) create mode 100644 .devcontainer/Dockerfile create mode 100644 .devcontainer/devcontainer.json diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 000000000..27b25c09b --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,6 @@ +# https://github.com/microsoft/vscode-dev-containers/blob/main/containers/python-3/README.md +ARG VARIANT=3.11-bookworm +FROM mcr.microsoft.com/vscode/devcontainers/python:${VARIANT} +COPY requirements.txt /tmp/pip-tmp/ +RUN python3 -m pip install --upgrade pip \ + && python3 -m pip install --no-cache-dir install ruff -r /tmp/pip-tmp/requirements.txt diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 000000000..c5a855b25 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,42 @@ +{ + "name": "Python 3", + "build": { + "dockerfile": "Dockerfile", + "context": "..", + "args": { + // Update 'VARIANT' to pick a Python version: 3, 3.10, 3.9, 3.8, 3.7, 3.6 + // Append -bullseye or -buster to pin to an OS version. + // Use -bullseye variants on local on arm64/Apple Silicon. + "VARIANT": "3.11-bookworm", + } + }, + + // Configure tool-specific properties. + "customizations": { + // Configure properties specific to VS Code. + "vscode": { + // Set *default* container specific settings.json values on container create. + "settings": { + "python.defaultInterpreterPath": "/usr/local/bin/python", + "python.linting.enabled": true, + "python.formatting.blackPath": "/usr/local/py-utils/bin/black", + "python.linting.mypyPath": "/usr/local/py-utils/bin/mypy" + }, + + // Add the IDs of extensions you want installed when the container is created. + "extensions": [ + "ms-python.python", + "ms-python.vscode-pylance" + ] + } + }, + + // Use 'forwardPorts' to make a list of ports inside the container available locally. + // "forwardPorts": [], + + // Use 'postCreateCommand' to run commands after the container is created. + // "postCreateCommand": "pip3 install --user -r requirements.txt", + + // Comment out to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root. + "remoteUser": "vscode" +} From dbaff345724040b270b3097cb02759f36ce0ef46 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Fri, 28 Jul 2023 18:53:09 +0200 Subject: [PATCH 37/43] Fix ruff rules ISC flake8-implicit-str-concat (#8892) --- ciphers/diffie_hellman.py | 244 ++++++++++++------------- compression/burrows_wheeler.py | 2 +- neural_network/input_data.py | 4 +- pyproject.toml | 2 +- strings/is_srilankan_phone_number.py | 4 +- web_programming/world_covid19_stats.py | 5 +- 6 files changed, 128 insertions(+), 133 deletions(-) diff --git a/ciphers/diffie_hellman.py b/ciphers/diffie_hellman.py index cd40a6b9c..aec7fb3ea 100644 --- a/ciphers/diffie_hellman.py +++ b/ciphers/diffie_hellman.py @@ -10,13 +10,13 @@ primes = { 5: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" - + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" - + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" - + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" - + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" - + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" - + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" - + "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF", + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, @@ -25,16 +25,16 @@ primes = { 14: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" - + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" - + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" - + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" - + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" - + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" - + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" - + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" - + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" - + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" - + "15728E5A8AACAA68FFFFFFFFFFFFFFFF", + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AACAA68FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, @@ -43,21 +43,21 @@ primes = { 15: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" - + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" - + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" - + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" - + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" - + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" - + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" - + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" - + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" - + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" - + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" - + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" - + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" - + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" - + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" - + "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF", + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF", base=16, ), "generator": 2, @@ -66,27 +66,27 @@ primes = { 16: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" - + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" - + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" - + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" - + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" - + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" - + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" - + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" - + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" - + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" - + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" - + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" - + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" - + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" - + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" - + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" - + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" - + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" - + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" - + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" - + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199" - + "FFFFFFFFFFFFFFFF", + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199" + "FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, @@ -95,33 +95,33 @@ primes = { 17: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08" - + "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B" - + "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9" - + "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6" - + "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8" - + "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D" - + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C" - + "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718" - + "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D" - + "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D" - + "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226" - + "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C" - + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC" - + "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26" - + "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB" - + "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2" - + "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127" - + "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" - + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406" - + "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918" - + "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151" - + "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03" - + "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F" - + "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" - + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B" - + "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632" - + "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E" - + "6DCC4024FFFFFFFFFFFFFFFF", + "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B" + "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9" + "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6" + "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8" + "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C" + "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718" + "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D" + "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D" + "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226" + "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC" + "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26" + "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB" + "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2" + "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127" + "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406" + "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918" + "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151" + "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03" + "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F" + "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B" + "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632" + "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E" + "6DCC4024FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, @@ -130,48 +130,48 @@ primes = { 18: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" - + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" - + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" - + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" - + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" - + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" - + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" - + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" - + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" - + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" - + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" - + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" - + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" - + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" - + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" - + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" - + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" - + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" - + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" - + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" - + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" - + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD" - + "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831" - + "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B" - + "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF" - + "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6" - + "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3" - + "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" - + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328" - + "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C" - + "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE" - + "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4" - + "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300" - + "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568" - + "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9" - + "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B" - + "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A" - + "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36" - + "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1" - + "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92" - + "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47" - + "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71" - + "60C980DD98EDD3DFFFFFFFFFFFFFFFFF", + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD" + "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831" + "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B" + "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF" + "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6" + "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3" + "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328" + "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C" + "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE" + "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4" + "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300" + "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568" + "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9" + "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B" + "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A" + "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36" + "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1" + "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92" + "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47" + "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71" + "60C980DD98EDD3DFFFFFFFFFFFFFFFFF", base=16, ), "generator": 2, diff --git a/compression/burrows_wheeler.py b/compression/burrows_wheeler.py index 0916b8a65..52bb045d9 100644 --- a/compression/burrows_wheeler.py +++ b/compression/burrows_wheeler.py @@ -150,7 +150,7 @@ def reverse_bwt(bwt_string: str, idx_original_string: int) -> str: raise ValueError("The parameter idx_original_string must not be lower than 0.") if idx_original_string >= len(bwt_string): raise ValueError( - "The parameter idx_original_string must be lower than" " len(bwt_string)." + "The parameter idx_original_string must be lower than len(bwt_string)." ) ordered_rotations = [""] * len(bwt_string) diff --git a/neural_network/input_data.py b/neural_network/input_data.py index 94c018ece..a58e64907 100644 --- a/neural_network/input_data.py +++ b/neural_network/input_data.py @@ -263,9 +263,7 @@ def _maybe_download(filename, work_directory, source_url): return filepath -@deprecated( - None, "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" -) +@deprecated(None, "Please use alternatives such as: tensorflow_datasets.load('mnist')") def read_data_sets( train_dir, fake_data=False, diff --git a/pyproject.toml b/pyproject.toml index 4f21a9519..f9091fb85 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,6 +49,7 @@ select = [ # https://beta.ruff.rs/docs/rules "ICN", # flake8-import-conventions "INP", # flake8-no-pep420 "INT", # flake8-gettext + "ISC", # flake8-implicit-str-concat "N", # pep8-naming "NPY", # NumPy-specific rules "PGH", # pygrep-hooks @@ -72,7 +73,6 @@ select = [ # https://beta.ruff.rs/docs/rules # "DJ", # flake8-django # "ERA", # eradicate -- DO NOT FIX # "FBT", # flake8-boolean-trap # FIX ME - # "ISC", # flake8-implicit-str-concat # FIX ME # "PD", # pandas-vet # "PT", # flake8-pytest-style # "PTH", # flake8-use-pathlib # FIX ME diff --git a/strings/is_srilankan_phone_number.py b/strings/is_srilankan_phone_number.py index 7bded93f7..6456f85e1 100644 --- a/strings/is_srilankan_phone_number.py +++ b/strings/is_srilankan_phone_number.py @@ -22,9 +22,7 @@ def is_sri_lankan_phone_number(phone: str) -> bool: False """ - pattern = re.compile( - r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" - ) + pattern = re.compile(r"^(?:0|94|\+94|0{2}94)7(0|1|2|4|5|6|7|8)(-| |)\d{7}$") return bool(re.search(pattern, phone)) diff --git a/web_programming/world_covid19_stats.py b/web_programming/world_covid19_stats.py index 1dd1ff6d1..ca81abdc4 100644 --- a/web_programming/world_covid19_stats.py +++ b/web_programming/world_covid19_stats.py @@ -22,6 +22,5 @@ def world_covid19_stats(url: str = "https://www.worldometers.info/coronavirus") if __name__ == "__main__": - print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n") - for key, value in world_covid19_stats().items(): - print(f"{key}\n{value}\n") + print("\033[1m COVID-19 Status of the World \033[0m\n") + print("\n".join(f"{key}\n{value}" for key, value in world_covid19_stats().items())) From 46454e204cc587d1ef044e4b1a11050c30aab4f6 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Fri, 28 Jul 2023 18:54:45 +0200 Subject: [PATCH 38/43] [skip-ci] In .devcontainer/Dockerfile: pipx install pre-commit ruff (#8893) [skip-ci] In .devcontainer/Dockerfile: pipx install pre-commit ruff --- .devcontainer/Dockerfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 27b25c09b..b5a5347c6 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -3,4 +3,6 @@ ARG VARIANT=3.11-bookworm FROM mcr.microsoft.com/vscode/devcontainers/python:${VARIANT} COPY requirements.txt /tmp/pip-tmp/ RUN python3 -m pip install --upgrade pip \ - && python3 -m pip install --no-cache-dir install ruff -r /tmp/pip-tmp/requirements.txt + && python3 -m pip install --no-cache-dir install -r /tmp/pip-tmp/requirements.txt \ + && pipx install pre-commit ruff \ + && pre-commit install From 4a83e3f0b1b2a3b414134c3498e57c0fea3b9fcf Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Fri, 28 Jul 2023 21:12:31 +0300 Subject: [PATCH 39/43] Fix failing build due to missing requirement (#8900) * feat(cellular_automata): Create wa-tor algorithm * updating DIRECTORY.md * chore(quality): Implement algo-keeper bot changes * build: Fix broken ci * git rm cellular_automata/wa_tor.py * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index acfbc823e..2702523d5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,6 +9,7 @@ pandas pillow projectq qiskit +qiskit-aer requests rich scikit-fuzzy From e406801f9e3967ff0533dfe8cb98a3249db48d33 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Fri, 28 Jul 2023 11:17:46 -0700 Subject: [PATCH 40/43] Reimplement polynomial_regression.py (#8889) * Reimplement polynomial_regression.py Rename machine_learning/polymonial_regression.py to machine_learning/polynomial_regression.py Reimplement machine_learning/polynomial_regression.py using numpy because the old original implementation was just a how-to on doing polynomial regression using sklearn Add detailed function documentation, doctests, and algorithm explanation * updating DIRECTORY.md * Fix matrix formatting in docstrings * Try to fix failing doctest * Debugging failing doctest * Fix failing doctest attempt 2 * Remove unnecessary return value descriptions in docstrings * Readd placeholder doctest for main function * Fix typo in algorithm description --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 +- machine_learning/polymonial_regression.py | 44 ----- machine_learning/polynomial_regression.py | 213 ++++++++++++++++++++++ 3 files changed, 214 insertions(+), 45 deletions(-) delete mode 100644 machine_learning/polymonial_regression.py create mode 100644 machine_learning/polynomial_regression.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 77938f450..133a1ab01 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -511,7 +511,7 @@ * Lstm * [Lstm Prediction](machine_learning/lstm/lstm_prediction.py) * [Multilayer Perceptron Classifier](machine_learning/multilayer_perceptron_classifier.py) - * [Polymonial Regression](machine_learning/polymonial_regression.py) + * [Polynomial Regression](machine_learning/polynomial_regression.py) * [Scoring Functions](machine_learning/scoring_functions.py) * [Self Organizing Map](machine_learning/self_organizing_map.py) * [Sequential Minimum Optimization](machine_learning/sequential_minimum_optimization.py) diff --git a/machine_learning/polymonial_regression.py b/machine_learning/polymonial_regression.py deleted file mode 100644 index 487fb8145..000000000 --- a/machine_learning/polymonial_regression.py +++ /dev/null @@ -1,44 +0,0 @@ -import pandas as pd -from matplotlib import pyplot as plt -from sklearn.linear_model import LinearRegression - -# Splitting the dataset into the Training set and Test set -from sklearn.model_selection import train_test_split - -# Fitting Polynomial Regression to the dataset -from sklearn.preprocessing import PolynomialFeatures - -# Importing the dataset -dataset = pd.read_csv( - "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" - "position_salaries.csv" -) -X = dataset.iloc[:, 1:2].values -y = dataset.iloc[:, 2].values - - -X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) - - -poly_reg = PolynomialFeatures(degree=4) -X_poly = poly_reg.fit_transform(X) -pol_reg = LinearRegression() -pol_reg.fit(X_poly, y) - - -# Visualizing the Polymonial Regression results -def viz_polymonial(): - plt.scatter(X, y, color="red") - plt.plot(X, pol_reg.predict(poly_reg.fit_transform(X)), color="blue") - plt.title("Truth or Bluff (Linear Regression)") - plt.xlabel("Position level") - plt.ylabel("Salary") - plt.show() - - -if __name__ == "__main__": - viz_polymonial() - - # Predicting a new result with Polymonial Regression - pol_reg.predict(poly_reg.fit_transform([[5.5]])) - # output should be 132148.43750003 diff --git a/machine_learning/polynomial_regression.py b/machine_learning/polynomial_regression.py new file mode 100644 index 000000000..5bafea96f --- /dev/null +++ b/machine_learning/polynomial_regression.py @@ -0,0 +1,213 @@ +""" +Polynomial regression is a type of regression analysis that models the relationship +between a predictor x and the response y as an mth-degree polynomial: + +y = β₀ + β₁x + β₂x² + ... + βₘxᵐ + ε + +By treating x, x², ..., xᵐ as distinct variables, we see that polynomial regression is a +special case of multiple linear regression. Therefore, we can use ordinary least squares +(OLS) estimation to estimate the vector of model parameters β = (β₀, β₁, β₂, ..., βₘ) +for polynomial regression: + +β = (XᵀX)⁻¹Xᵀy = X⁺y + +where X is the design matrix, y is the response vector, and X⁺ denotes the Moore–Penrose +pseudoinverse of X. In the case of polynomial regression, the design matrix is + + |1 x₁ x₁² ⋯ x₁ᵐ| +X = |1 x₂ x₂² ⋯ x₂ᵐ| + |⋮ ⋮ ⋮ ⋱ ⋮ | + |1 xₙ xₙ² ⋯ xₙᵐ| + +In OLS estimation, inverting XᵀX to compute X⁺ can be very numerically unstable. This +implementation sidesteps this need to invert XᵀX by computing X⁺ using singular value +decomposition (SVD): + +β = VΣ⁺Uᵀy + +where UΣVᵀ is an SVD of X. + +References: + - https://en.wikipedia.org/wiki/Polynomial_regression + - https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse + - https://en.wikipedia.org/wiki/Numerical_methods_for_linear_least_squares + - https://en.wikipedia.org/wiki/Singular_value_decomposition +""" + +import matplotlib.pyplot as plt +import numpy as np + + +class PolynomialRegression: + __slots__ = "degree", "params" + + def __init__(self, degree: int) -> None: + """ + @raises ValueError: if the polynomial degree is negative + """ + if degree < 0: + raise ValueError("Polynomial degree must be non-negative") + + self.degree = degree + self.params = None + + @staticmethod + def _design_matrix(data: np.ndarray, degree: int) -> np.ndarray: + """ + Constructs a polynomial regression design matrix for the given input data. For + input data x = (x₁, x₂, ..., xₙ) and polynomial degree m, the design matrix is + the Vandermonde matrix + + |1 x₁ x₁² ⋯ x₁ᵐ| + X = |1 x₂ x₂² ⋯ x₂ᵐ| + |⋮ ⋮ ⋮ ⋱ ⋮ | + |1 xₙ xₙ² ⋯ xₙᵐ| + + Reference: https://en.wikipedia.org/wiki/Vandermonde_matrix + + @param data: the input predictor values x, either for model fitting or for + prediction + @param degree: the polynomial degree m + @returns: the Vandermonde matrix X (see above) + @raises ValueError: if input data is not N x 1 + + >>> x = np.array([0, 1, 2]) + >>> PolynomialRegression._design_matrix(x, degree=0) + array([[1], + [1], + [1]]) + >>> PolynomialRegression._design_matrix(x, degree=1) + array([[1, 0], + [1, 1], + [1, 2]]) + >>> PolynomialRegression._design_matrix(x, degree=2) + array([[1, 0, 0], + [1, 1, 1], + [1, 2, 4]]) + >>> PolynomialRegression._design_matrix(x, degree=3) + array([[1, 0, 0, 0], + [1, 1, 1, 1], + [1, 2, 4, 8]]) + >>> PolynomialRegression._design_matrix(np.array([[0, 0], [0 , 0]]), degree=3) + Traceback (most recent call last): + ... + ValueError: Data must have dimensions N x 1 + """ + rows, *remaining = data.shape + if remaining: + raise ValueError("Data must have dimensions N x 1") + + return np.vander(data, N=degree + 1, increasing=True) + + def fit(self, x_train: np.ndarray, y_train: np.ndarray) -> None: + """ + Computes the polynomial regression model parameters using ordinary least squares + (OLS) estimation: + + β = (XᵀX)⁻¹Xᵀy = X⁺y + + where X⁺ denotes the Moore–Penrose pseudoinverse of the design matrix X. This + function computes X⁺ using singular value decomposition (SVD). + + References: + - https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse + - https://en.wikipedia.org/wiki/Singular_value_decomposition + - https://en.wikipedia.org/wiki/Multicollinearity + + @param x_train: the predictor values x for model fitting + @param y_train: the response values y for model fitting + @raises ArithmeticError: if X isn't full rank, then XᵀX is singular and β + doesn't exist + + >>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + >>> y = x**3 - 2 * x**2 + 3 * x - 5 + >>> poly_reg = PolynomialRegression(degree=3) + >>> poly_reg.fit(x, y) + >>> poly_reg.params + array([-5., 3., -2., 1.]) + >>> poly_reg = PolynomialRegression(degree=20) + >>> poly_reg.fit(x, y) + Traceback (most recent call last): + ... + ArithmeticError: Design matrix is not full rank, can't compute coefficients + + Make sure errors don't grow too large: + >>> coefs = np.array([-250, 50, -2, 36, 20, -12, 10, 2, -1, -15, 1]) + >>> y = PolynomialRegression._design_matrix(x, len(coefs) - 1) @ coefs + >>> poly_reg = PolynomialRegression(degree=len(coefs) - 1) + >>> poly_reg.fit(x, y) + >>> np.allclose(poly_reg.params, coefs, atol=10e-3) + True + """ + X = PolynomialRegression._design_matrix(x_train, self.degree) # noqa: N806 + _, cols = X.shape + if np.linalg.matrix_rank(X) < cols: + raise ArithmeticError( + "Design matrix is not full rank, can't compute coefficients" + ) + + # np.linalg.pinv() computes the Moore–Penrose pseudoinverse using SVD + self.params = np.linalg.pinv(X) @ y_train + + def predict(self, data: np.ndarray) -> np.ndarray: + """ + Computes the predicted response values y for the given input data by + constructing the design matrix X and evaluating y = Xβ. + + @param data: the predictor values x for prediction + @returns: the predicted response values y = Xβ + @raises ArithmeticError: if this function is called before the model + parameters are fit + + >>> x = np.array([0, 1, 2, 3, 4]) + >>> y = x**3 - 2 * x**2 + 3 * x - 5 + >>> poly_reg = PolynomialRegression(degree=3) + >>> poly_reg.fit(x, y) + >>> poly_reg.predict(np.array([-1])) + array([-11.]) + >>> poly_reg.predict(np.array([-2])) + array([-27.]) + >>> poly_reg.predict(np.array([6])) + array([157.]) + >>> PolynomialRegression(degree=3).predict(x) + Traceback (most recent call last): + ... + ArithmeticError: Predictor hasn't been fit yet + """ + if self.params is None: + raise ArithmeticError("Predictor hasn't been fit yet") + + return PolynomialRegression._design_matrix(data, self.degree) @ self.params + + +def main() -> None: + """ + Fit a polynomial regression model to predict fuel efficiency using seaborn's mpg + dataset + + >>> pass # Placeholder, function is only for demo purposes + """ + import seaborn as sns + + mpg_data = sns.load_dataset("mpg") + + poly_reg = PolynomialRegression(degree=2) + poly_reg.fit(mpg_data.weight, mpg_data.mpg) + + weight_sorted = np.sort(mpg_data.weight) + predictions = poly_reg.predict(weight_sorted) + + plt.scatter(mpg_data.weight, mpg_data.mpg, color="gray", alpha=0.5) + plt.plot(weight_sorted, predictions, color="red", linewidth=3) + plt.title("Predicting Fuel Efficiency Using Polynomial Regression") + plt.xlabel("Weight (lbs)") + plt.ylabel("Fuel Efficiency (mpg)") + plt.show() + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + main() From a0b642cfe58c215b8ead3f2a40655e144e07aacc Mon Sep 17 00:00:00 2001 From: Alex Bernhardt <54606095+FatAnorexic@users.noreply.github.com> Date: Fri, 28 Jul 2023 14:30:05 -0400 Subject: [PATCH 41/43] Physics/basic orbital capture (#8857) * Added file basic_orbital_capture * updating DIRECTORY.md * added second source * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed spelling errors * accepted changes * updating DIRECTORY.md * corrected spelling error * Added file basic_orbital_capture * added second source * fixed spelling errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * applied changes * reviewed and checked file * added doctest * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * removed redundant constnant * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added scipy imports * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added doctests to capture_radii and scipy const * fixed conflicts * finalizing file. Added tests * Update physics/basic_orbital_capture.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 + physics/basic_orbital_capture.py | 178 +++++++++++++++++++++++++++++++ 2 files changed, 179 insertions(+) create mode 100644 physics/basic_orbital_capture.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 133a1ab01..29514579c 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -741,6 +741,7 @@ ## Physics * [Archimedes Principle](physics/archimedes_principle.py) + * [Basic Orbital Capture](physics/basic_orbital_capture.py) * [Casimir Effect](physics/casimir_effect.py) * [Centripetal Force](physics/centripetal_force.py) * [Grahams Law](physics/grahams_law.py) diff --git a/physics/basic_orbital_capture.py b/physics/basic_orbital_capture.py new file mode 100644 index 000000000..eeb45e602 --- /dev/null +++ b/physics/basic_orbital_capture.py @@ -0,0 +1,178 @@ +from math import pow, sqrt + +from scipy.constants import G, c, pi + +""" +These two functions will return the radii of impact for a target object +of mass M and radius R as well as it's effective cross sectional area σ(sigma). +That is to say any projectile with velocity v passing within σ, will impact the +target object with mass M. The derivation of which is given at the bottom +of this file. + +The derivation shows that a projectile does not need to aim directly at the target +body in order to hit it, as R_capture>R_target. Astronomers refer to the effective +cross section for capture as σ=π*R_capture**2. + +This algorithm does not account for an N-body problem. + +""" + + +def capture_radii( + target_body_radius: float, target_body_mass: float, projectile_velocity: float +) -> float: + """ + Input Params: + ------------- + target_body_radius: Radius of the central body SI units: meters | m + target_body_mass: Mass of the central body SI units: kilograms | kg + projectile_velocity: Velocity of object moving toward central body + SI units: meters/second | m/s + Returns: + -------- + >>> capture_radii(6.957e8, 1.99e30, 25000.0) + 17209590691.0 + >>> capture_radii(-6.957e8, 1.99e30, 25000.0) + Traceback (most recent call last): + ... + ValueError: Radius cannot be less than 0 + >>> capture_radii(6.957e8, -1.99e30, 25000.0) + Traceback (most recent call last): + ... + ValueError: Mass cannot be less than 0 + >>> capture_radii(6.957e8, 1.99e30, c+1) + Traceback (most recent call last): + ... + ValueError: Cannot go beyond speed of light + + Returned SI units: + ------------------ + meters | m + """ + + if target_body_mass < 0: + raise ValueError("Mass cannot be less than 0") + if target_body_radius < 0: + raise ValueError("Radius cannot be less than 0") + if projectile_velocity > c: + raise ValueError("Cannot go beyond speed of light") + + escape_velocity_squared = (2 * G * target_body_mass) / target_body_radius + capture_radius = target_body_radius * sqrt( + 1 + escape_velocity_squared / pow(projectile_velocity, 2) + ) + return round(capture_radius, 0) + + +def capture_area(capture_radius: float) -> float: + """ + Input Param: + ------------ + capture_radius: The radius of orbital capture and impact for a central body of + mass M and a projectile moving towards it with velocity v + SI units: meters | m + Returns: + -------- + >>> capture_area(17209590691) + 9.304455331329126e+20 + >>> capture_area(-1) + Traceback (most recent call last): + ... + ValueError: Cannot have a capture radius less than 0 + + Returned SI units: + ------------------ + meters*meters | m**2 + """ + + if capture_radius < 0: + raise ValueError("Cannot have a capture radius less than 0") + sigma = pi * pow(capture_radius, 2) + return round(sigma, 0) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + +""" +Derivation: + +Let: Mt=target mass, Rt=target radius, v=projectile_velocity, + r_0=radius of projectile at instant 0 to CM of target + v_p=v at closest approach, + r_p=radius from projectile to target CM at closest approach, + R_capture= radius of impact for projectile with velocity v + +(1)At time=0 the projectile's energy falling from infinity| E=K+U=0.5*m*(v**2)+0 + + E_initial=0.5*m*(v**2) + +(2)at time=0 the angular momentum of the projectile relative to CM target| + L_initial=m*r_0*v*sin(Θ)->m*r_0*v*(R_capture/r_0)->m*v*R_capture + + L_i=m*v*R_capture + +(3)The energy of the projectile at closest approach will be its kinetic energy + at closest approach plus gravitational potential energy(-(GMm)/R)| + E_p=K_p+U_p->E_p=0.5*m*(v_p**2)-(G*Mt*m)/r_p + + E_p=0.0.5*m*(v_p**2)-(G*Mt*m)/r_p + +(4)The angular momentum of the projectile relative to the target at closest + approach will be L_p=m*r_p*v_p*sin(Θ), however relative to the target Θ=90° + sin(90°)=1| + + L_p=m*r_p*v_p +(5)Using conservation of angular momentum and energy, we can write a quadratic + equation that solves for r_p| + + (a) + Ei=Ep-> 0.5*m*(v**2)=0.5*m*(v_p**2)-(G*Mt*m)/r_p-> v**2=v_p**2-(2*G*Mt)/r_p + + (b) + Li=Lp-> m*v*R_capture=m*r_p*v_p-> v*R_capture=r_p*v_p-> v_p=(v*R_capture)/r_p + + (c) b plugs int a| + v**2=((v*R_capture)/r_p)**2-(2*G*Mt)/r_p-> + + v**2-(v**2)*(R_c**2)/(r_p**2)+(2*G*Mt)/r_p=0-> + + (v**2)*(r_p**2)+2*G*Mt*r_p-(v**2)*(R_c**2)=0 + + (d) Using the quadratic formula, we'll solve for r_p then rearrange to solve to + R_capture + + r_p=(-2*G*Mt ± sqrt(4*G^2*Mt^2+ 4(v^4*R_c^2)))/(2*v^2)-> + + r_p=(-G*Mt ± sqrt(G^2*Mt+v^4*R_c^2))/v^2-> + + r_p<0 is something we can ignore, as it has no physical meaning for our purposes.-> + + r_p=(-G*Mt)/v^2 + sqrt(G^2*Mt^2/v^4 + R_c^2) + + (e)We are trying to solve for R_c. We are looking for impact, so we want r_p=Rt + + Rt + G*Mt/v^2 = sqrt(G^2*Mt^2/v^4 + R_c^2)-> + + (Rt + G*Mt/v^2)^2 = G^2*Mt^2/v^4 + R_c^2-> + + Rt^2 + 2*G*Mt*Rt/v^2 + G^2*Mt^2/v^4 = G^2*Mt^2/v^4 + R_c^2-> + + Rt**2 + 2*G*Mt*Rt/v**2 = R_c**2-> + + Rt**2 * (1 + 2*G*Mt/Rt *1/v**2) = R_c**2-> + + escape velocity = sqrt(2GM/R)= v_escape**2=2GM/R-> + + Rt**2 * (1 + v_esc**2/v**2) = R_c**2-> + +(6) + R_capture = Rt * sqrt(1 + v_esc**2/v**2) + +Source: Problem Set 3 #8 c.Fall_2017|Honors Astronomy|Professor Rachel Bezanson + +Source #2: http://www.nssc.ac.cn/wxzygx/weixin/201607/P020160718380095698873.pdf + 8.8 Planetary Rendezvous: Pg.368 +""" From 0ef930697632a1f05dbbd956c4ccab0473025f5b Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Fri, 28 Jul 2023 13:08:40 -0700 Subject: [PATCH 42/43] Disable quantum/quantum_random.py (attempt 2) (#8902) * Disable quantum/quantum_random.py Temporarily disable quantum/quantum_random.py because it produces an illegal instruction error that causes all builds to fail * updating DIRECTORY.md * Disable quantum/quantum_random.py attempt 2 --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 - quantum/{quantum_random.py => quantum_random.py.DISABLED.txt} | 0 2 files changed, 1 deletion(-) rename quantum/{quantum_random.py => quantum_random.py.DISABLED.txt} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 29514579c..af150b129 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1063,7 +1063,6 @@ * [Q Fourier Transform](quantum/q_fourier_transform.py) * [Q Full Adder](quantum/q_full_adder.py) * [Quantum Entanglement](quantum/quantum_entanglement.py) - * [Quantum Random](quantum/quantum_random.py) * [Quantum Teleportation](quantum/quantum_teleportation.py) * [Ripple Adder Classic](quantum/ripple_adder_classic.py) * [Single Qubit Measure](quantum/single_qubit_measure.py) diff --git a/quantum/quantum_random.py b/quantum/quantum_random.py.DISABLED.txt similarity index 100% rename from quantum/quantum_random.py rename to quantum/quantum_random.py.DISABLED.txt From 2cfef0913a36e967d828881386ae78457cf65f33 Mon Sep 17 00:00:00 2001 From: Colin Leroy-Mira Date: Sat, 29 Jul 2023 19:03:43 +0200 Subject: [PATCH 43/43] Fix greyscale computation and inverted coords (#8905) * Fix greyscale computation and inverted coords * Fix test * Add test cases * Add reference to the greyscaling formula --------- Co-authored-by: Colin Leroy-Mira --- digital_image_processing/dithering/burkes.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/digital_image_processing/dithering/burkes.py b/digital_image_processing/dithering/burkes.py index 0804104ab..35aedc16d 100644 --- a/digital_image_processing/dithering/burkes.py +++ b/digital_image_processing/dithering/burkes.py @@ -39,9 +39,18 @@ class Burkes: def get_greyscale(cls, blue: int, green: int, red: int) -> float: """ >>> Burkes.get_greyscale(3, 4, 5) - 3.753 + 4.185 + >>> Burkes.get_greyscale(0, 0, 0) + 0.0 + >>> Burkes.get_greyscale(255, 255, 255) + 255.0 """ - return 0.114 * blue + 0.587 * green + 0.2126 * red + """ + Formula from https://en.wikipedia.org/wiki/HSL_and_HSV + cf Lightness section, and Fig 13c. + We use the first of four possible. + """ + return 0.114 * blue + 0.587 * green + 0.299 * red def process(self) -> None: for y in range(self.height): @@ -49,10 +58,10 @@ class Burkes: greyscale = int(self.get_greyscale(*self.input_img[y][x])) if self.threshold > greyscale + self.error_table[y][x]: self.output_img[y][x] = (0, 0, 0) - current_error = greyscale + self.error_table[x][y] + current_error = greyscale + self.error_table[y][x] else: self.output_img[y][x] = (255, 255, 255) - current_error = greyscale + self.error_table[x][y] - 255 + current_error = greyscale + self.error_table[y][x] - 255 """ Burkes error propagation (`*` is current pixel):